diff --git a/TensorMath.sln b/TensorMath.sln
index e3f3a86..04b4c9d 100644
--- a/TensorMath.sln
+++ b/TensorMath.sln
@@ -9,8 +9,6 @@ Project("{6EC3EE1D-3C4E-46DD-8F32-0CC8E7565705}") = "TensorMath", "src\TensorMat
EndProject
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "tests", "tests", "{BAA394CB-3D6F-4CE9-BAE8-56603DBE7793}"
EndProject
-Project("{6EC3EE1D-3C4E-46DD-8F32-0CC8E7565705}") = "TensorMath.Tests", "tests\TensorMath.Tests\TensorMath.Tests.fsproj", "{6D6C1F8A-1AFE-4BEE-A073-24515FCC6460}"
-EndProject
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "build", "build", "{7007FA68-0E95-42A0-B25C-A9BBA6071B34}"
EndProject
Project("{6EC3EE1D-3C4E-46DD-8F32-0CC8E7565705}") = "build", "build\build.fsproj", "{D305E2AA-681F-47ED-87C8-7A9F6EA2F1A6}"
@@ -21,6 +19,20 @@ Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = ".ci", ".ci", "{CD408BBB-CFD
.github\workflows\deploy-gh-pages.yml = .github\workflows\deploy-gh-pages.yml
EndProjectSection
EndProject
+Project("{6EC3EE1D-3C4E-46DD-8F32-0CC8E7565705}") = "TensorMath.Backends.Reference", "src\TensorMath.Backends.Reference\TensorMath.Backends.Reference.fsproj", "{F973F65A-3E9F-4780-84A8-E10C5EEA86F1}"
+EndProject
+Project("{6EC3EE1D-3C4E-46DD-8F32-0CC8E7565705}") = "TensorMath.Tests", "tests\TensorMath.Tests\TensorMath.Tests.fsproj", "{6A7CBDA7-5E2B-4818-A152-DE64031ACCEA}"
+EndProject
+Project("{6EC3EE1D-3C4E-46DD-8F32-0CC8E7565705}") = "TensorMath.Backends.TestDuplicate", "tests\TensorMath.Backends.TestDuplicate\TensorMath.Backends.TestDuplicate.fsproj", "{03D729EA-CE26-4AF4-887E-4339E38DBF11}"
+EndProject
+Project("{F2A71F9B-5D33-465A-A702-920D77279786}") = "TensorMath.Backends.Torch", "src\TensorMath.Backends.Torch\TensorMath.Backends.Torch.fsproj", "{D03FFF26-A7AA-4C9F-B226-D0F07FD08A5F}"
+EndProject
+Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "bundles", "bundles", "{26AD5F3B-A910-4128-971C-FE9780005B1E}"
+EndProject
+Project("{F2A71F9B-5D33-465A-A702-920D77279786}") = "TensorMath-cpu", "bundles\TensorMath-cpu\TensorMath-cpu.fsproj", "{13198191-E9B3-44B7-8F25-8013F2020900}"
+EndProject
+Project("{F2A71F9B-5D33-465A-A702-920D77279786}") = "TensorMath-lite", "bundles\TensorMath-lite\TensorMath-lite.fsproj", "{3E208A02-EFBC-4450-A5EF-BEC5139F1E55}"
+EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Any CPU = Debug|Any CPU
@@ -31,22 +43,47 @@ Global
{03276E48-6B47-463F-A5BC-59580A623ADB}.Debug|Any CPU.Build.0 = Debug|Any CPU
{03276E48-6B47-463F-A5BC-59580A623ADB}.Release|Any CPU.ActiveCfg = Release|Any CPU
{03276E48-6B47-463F-A5BC-59580A623ADB}.Release|Any CPU.Build.0 = Release|Any CPU
- {6D6C1F8A-1AFE-4BEE-A073-24515FCC6460}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
- {6D6C1F8A-1AFE-4BEE-A073-24515FCC6460}.Debug|Any CPU.Build.0 = Debug|Any CPU
- {6D6C1F8A-1AFE-4BEE-A073-24515FCC6460}.Release|Any CPU.ActiveCfg = Release|Any CPU
- {6D6C1F8A-1AFE-4BEE-A073-24515FCC6460}.Release|Any CPU.Build.0 = Release|Any CPU
{D305E2AA-681F-47ED-87C8-7A9F6EA2F1A6}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{D305E2AA-681F-47ED-87C8-7A9F6EA2F1A6}.Debug|Any CPU.Build.0 = Debug|Any CPU
{D305E2AA-681F-47ED-87C8-7A9F6EA2F1A6}.Release|Any CPU.ActiveCfg = Release|Any CPU
{D305E2AA-681F-47ED-87C8-7A9F6EA2F1A6}.Release|Any CPU.Build.0 = Release|Any CPU
+ {F973F65A-3E9F-4780-84A8-E10C5EEA86F1}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {F973F65A-3E9F-4780-84A8-E10C5EEA86F1}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {F973F65A-3E9F-4780-84A8-E10C5EEA86F1}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {F973F65A-3E9F-4780-84A8-E10C5EEA86F1}.Release|Any CPU.Build.0 = Release|Any CPU
+ {6A7CBDA7-5E2B-4818-A152-DE64031ACCEA}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {6A7CBDA7-5E2B-4818-A152-DE64031ACCEA}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {6A7CBDA7-5E2B-4818-A152-DE64031ACCEA}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {6A7CBDA7-5E2B-4818-A152-DE64031ACCEA}.Release|Any CPU.Build.0 = Release|Any CPU
+ {03D729EA-CE26-4AF4-887E-4339E38DBF11}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {03D729EA-CE26-4AF4-887E-4339E38DBF11}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {03D729EA-CE26-4AF4-887E-4339E38DBF11}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {03D729EA-CE26-4AF4-887E-4339E38DBF11}.Release|Any CPU.Build.0 = Release|Any CPU
+ {D03FFF26-A7AA-4C9F-B226-D0F07FD08A5F}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {D03FFF26-A7AA-4C9F-B226-D0F07FD08A5F}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {D03FFF26-A7AA-4C9F-B226-D0F07FD08A5F}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {D03FFF26-A7AA-4C9F-B226-D0F07FD08A5F}.Release|Any CPU.Build.0 = Release|Any CPU
+ {13198191-E9B3-44B7-8F25-8013F2020900}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {13198191-E9B3-44B7-8F25-8013F2020900}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {13198191-E9B3-44B7-8F25-8013F2020900}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {13198191-E9B3-44B7-8F25-8013F2020900}.Release|Any CPU.Build.0 = Release|Any CPU
+ {3E208A02-EFBC-4450-A5EF-BEC5139F1E55}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {3E208A02-EFBC-4450-A5EF-BEC5139F1E55}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {3E208A02-EFBC-4450-A5EF-BEC5139F1E55}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {3E208A02-EFBC-4450-A5EF-BEC5139F1E55}.Release|Any CPU.Build.0 = Release|Any CPU
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
EndGlobalSection
GlobalSection(NestedProjects) = preSolution
{03276E48-6B47-463F-A5BC-59580A623ADB} = {5317FD43-7D2F-4F4F-8444-B6AA1285C4BD}
- {6D6C1F8A-1AFE-4BEE-A073-24515FCC6460} = {BAA394CB-3D6F-4CE9-BAE8-56603DBE7793}
{D305E2AA-681F-47ED-87C8-7A9F6EA2F1A6} = {7007FA68-0E95-42A0-B25C-A9BBA6071B34}
+ {F973F65A-3E9F-4780-84A8-E10C5EEA86F1} = {5317FD43-7D2F-4F4F-8444-B6AA1285C4BD}
+ {6A7CBDA7-5E2B-4818-A152-DE64031ACCEA} = {BAA394CB-3D6F-4CE9-BAE8-56603DBE7793}
+ {03D729EA-CE26-4AF4-887E-4339E38DBF11} = {BAA394CB-3D6F-4CE9-BAE8-56603DBE7793}
+ {D03FFF26-A7AA-4C9F-B226-D0F07FD08A5F} = {5317FD43-7D2F-4F4F-8444-B6AA1285C4BD}
+ {13198191-E9B3-44B7-8F25-8013F2020900} = {26AD5F3B-A910-4128-971C-FE9780005B1E}
+ {3E208A02-EFBC-4450-A5EF-BEC5139F1E55} = {26AD5F3B-A910-4128-971C-FE9780005B1E}
EndGlobalSection
GlobalSection(ExtensibilityGlobals) = postSolution
SolutionGuid = {7A89F710-DE41-4B47-B450-17CCB1B3A0DC}
diff --git a/bundles/TensorMath-cpu/Empty.fs b/bundles/TensorMath-cpu/Empty.fs
new file mode 100644
index 0000000..aef8dbf
--- /dev/null
+++ b/bundles/TensorMath-cpu/Empty.fs
@@ -0,0 +1,4 @@
+namespace TensorMath
+
+// This project is to bundle TensorMath and some default backends into a single project
+// See TensorMath for main TensorMath code
\ No newline at end of file
diff --git a/bundles/TensorMath-cpu/TensorMath-cpu.fsproj b/bundles/TensorMath-cpu/TensorMath-cpu.fsproj
new file mode 100644
index 0000000..9d4d548
--- /dev/null
+++ b/bundles/TensorMath-cpu/TensorMath-cpu.fsproj
@@ -0,0 +1,13 @@
+
+
+
+ net8.0
+ TensorMath_cpu
+ true
+
+
+
+
+
+
+
diff --git a/bundles/TensorMath-lite/Empty.fs b/bundles/TensorMath-lite/Empty.fs
new file mode 100644
index 0000000..aef8dbf
--- /dev/null
+++ b/bundles/TensorMath-lite/Empty.fs
@@ -0,0 +1,4 @@
+namespace TensorMath
+
+// This project is to bundle TensorMath and some default backends into a single project
+// See TensorMath for main TensorMath code
\ No newline at end of file
diff --git a/bundles/TensorMath-lite/TensorMath-lite.fsproj b/bundles/TensorMath-lite/TensorMath-lite.fsproj
new file mode 100644
index 0000000..309595b
--- /dev/null
+++ b/bundles/TensorMath-lite/TensorMath-lite.fsproj
@@ -0,0 +1,13 @@
+
+
+
+ net8.0
+ TensorMath_lite
+ true
+
+
+
+
+
+
+
diff --git a/examples/what.fsx b/examples/what.fsx
new file mode 100644
index 0000000..0cdee61
--- /dev/null
+++ b/examples/what.fsx
@@ -0,0 +1,26 @@
+#!/usr/bin/env -S dotnet fsi
+
+#I "../tests/TensorMath.Tests/bin/Debug/net8.0"
+#r "TensorMath.dll"
+#r "TensorMath.Backends.Reference.dll"
+#r "TensorMath.Backends.Torch.dll"
+
+// Libtorch binaries
+// Option A: you can use a platform-specific nuget package
+#r "nuget: TorchSharp-cpu"
+// #r "nuget: TorchSharp-cuda-linux, 0.96.5"
+//#r "nuget: TorchSharp-cuda-windows" // #r "nuget: TorchSharp-cuda-windows, 0.96.5"
+// Option B: you can use a local libtorch installation
+// System.Runtime.InteropServices.NativeLibrary.Load("/home/gunes/anaconda3/lib/python3.8/site-packages/torch/lib/libtorch.so")
+
+
+open TensorMath
+
+
+dsharp.config(backend=Backend.Torch, device=Device.CPU)
+dsharp.seed(1)
+
+let t1 = dsharp.tensor [|1.; 2.; 3.; 4.; |]
+
+t1 * t1
+
diff --git a/src/TensorMath.Backends.Reference/Reference.RawTensor.fs b/src/TensorMath.Backends.Reference/Reference.RawTensor.fs
new file mode 100644
index 0000000..6d112f2
--- /dev/null
+++ b/src/TensorMath.Backends.Reference/Reference.RawTensor.fs
@@ -0,0 +1,2367 @@
+// Copyright (c) 2016- University of Oxford (Atilim Gunes Baydin )
+// and other contributors, see LICENSE in root of repository.
+//
+// BSD 2-Clause License. See LICENSE in root of repository.
+
+#if TEST_DUPLICATE_BACKEND
+namespace rec TensorMath.Backends.TestDuplicate
+#else
+namespace rec TensorMath.Backends.Reference
+#endif
+
+open System
+open TensorMath
+open TensorMath.Backends
+open TensorMath.Util
+
+#nowarn "77" // use of op_Explicit
+
+[]
+module internal Utils =
+ type RawTensor with
+ member x.GetTypedValues() : 'T[] = (x :?> RawTensorCPU<'T>).Values
+
+/// This is the base class for all RawTensorXyz types.
+/// All type-independent operations are implemented directly on this class.
+[]
+type RawTensorCPU<'T when 'T : equality and 'T :> scalar>(values: 'T[], shape: Shape, dtype: Dtype, device: Device) =
+ inherit RawTensor()
+ do if device.DeviceType = DeviceType.CUDA then failwithf "CUDA is not supported by the reference backend."
+
+ let mutable values = values
+ let mutable isMutable = false
+ let checkMutable() = if not isMutable then failwith "The tensor cannot be mutated."
+ override _.Shape = shape
+ override _.Dim = shape.Length
+ override _.Nelement = shapeLength shape
+ override _.Dtype = dtype
+ override _.Device = device
+ override _.DeviceType = device.DeviceType
+ override _.Handle = box values
+ override _.Backend =
+#if TEST_DUPLICATE_BACKEND
+ Backend.Register "TestDuplicate"
+#else
+ Backend.Reference
+#endif
+
+ member _.Values : 'T[] = values
+
+ member internal t.IndexToFlatIndex(index:int[]) =
+ indexToFlatIndex t.Shape index
+
+ member internal t.FlatIndexToIndex(flatIndex:int) =
+ flatIndexToIndex t.Shape flatIndex
+
+ member t.Item
+ with get ([] index:int[]) =
+ // printfn "rawtensor shape %A item index %A" t.Shape index
+ if index.Length <> t.Dim then failwithf "Expecting a %id index" t.Dim
+ let vvv = t.Values[t.IndexToFlatIndex(index)]
+ vvv
+
+ and set ([] index:int[]) v =
+ if index.Length <> t.Dim then failwithf "Expecting a %id index" t.Dim
+ t.Values[t.IndexToFlatIndex(index)] <- v
+
+ override t.GetItem(indexes:int[]) =
+ t[indexes] :> scalar
+
+ override t.GetSlice(fullBounds:int[,]) =
+ let fullBounds = Shape.completeSliceBounds t.Shape fullBounds
+ let shape = Shape.checkCanGetSlice t.Shape fullBounds
+ let array = Array.zeroCreate (shapeLength shape)
+ let mutable arrayi = 0
+ let rec slice (fullBounds:int[,]) externalCoords =
+ if fullBounds.GetLength(0) = 1 then
+ for i=fullBounds[0,0] to fullBounds[0,1] do
+ // printfn "inner %A" i
+ let globalCoords = Array.append externalCoords [|i|]
+ array[arrayi] <- t[globalCoords]
+ arrayi <- arrayi + 1
+ else
+ for i=fullBounds[0,0] to fullBounds[0,1] do
+ // printfn "outer %A" i
+ slice fullBounds[1..,*] (Array.append externalCoords [|i|])
+ slice fullBounds [||]
+ t.MakeLike(array, shape)
+
+ override t.Clone() = t.MakeLike(Array.copy t.Values, Array.copy t.Shape)
+
+ abstract member MakeLike: values: 'T[] * shape: Shape * ?device: Device -> RawTensor
+
+ override x.ComputeHash() = hash shape + hash values
+
+ override t.Expand(newShape) =
+ if newShape.Length = 1 && newShape[0] = 0 then t.MakeLike([||], newShape) else // Return zero-sized tensor if expanding to zero-sized tensor
+ if shape = newShape then t :> _ else
+ Shape.checkCanExpand shape newShape
+ let trim = newShape.Length - shape.Length
+ let exp = shapeLength newShape[0..trim-1]
+ let jshape = newShape[trim..]
+ let n = shapeLength newShape
+ let result = Array.zeroCreate n
+ if jshape.Length = 0 then
+ // The expansion is everything
+ for jP = 0 to exp-1 do
+ result[jP] <- values[0]
+ else
+ for jP = 0 to exp-1 do
+ let rec loop ibase jbase d =
+ let strideD = if (shape[d] = jshape[d]) then 1 else 0
+ if d < jshape.Length-1 then
+ let mutable iD = 0
+ for jD = 0 to jshape[d]-1 do
+ let ibaseD = (ibase+iD)*shape[d+1]
+ let jbaseD = (jbase+jD)*jshape[d+1]
+ loop ibaseD jbaseD (d+1)
+ iD <- iD + strideD
+ else
+ let mutable iD = 0
+ // last loop does the actual copy fragments
+ for jD = 0 to jshape[d]-1 do
+ result[jbase+jD] <- values[ibase+iD]
+ iD <- iD + strideD
+ loop 0 (jP*jshape[0]) 0
+ t.MakeLike(result, newShape)
+
+ override t.ToValues() =
+ let shape = t.Shape
+ match t.Dim with
+ | 0 -> box values[0]
+ | 1 -> upcast Array.init shape[0] (fun i -> t[i])
+ | 2 -> upcast Array2D.init shape[0] shape[1] (fun i j -> t[i, j])
+ | 3 -> upcast Array3D.init shape[0] shape[1] shape[2] (fun i j k -> t[i, j, k])
+ | 4 -> upcast Array4D.init shape[0] shape[1] shape[2] shape[3] (fun i j k l -> t[i, j, k, l])
+ | 5 -> upcast Array5D.init shape[0] shape[1] shape[2] shape[3] shape[4] (fun i j k l m -> t[i, j, k, l, m])
+ | 6 -> upcast Array6D.init shape[0] shape[1] shape[2] shape[3] shape[4] shape[5] (fun i j k l m n -> t[i, j, k, l, m, n])
+ | _ -> ArrayND.init shape (fun idxs -> t[idxs])
+
+ override _.StackTs(tensors, dim) =
+ let values, shapes = tensors |> Array.map (fun t -> t.GetTypedValues(), t.Shape) |> Array.unzip
+ let n, shape1, shape2, newShape = Shape.checkCanStack shapes dim
+ let m1 = shapeLength shape1
+ let m2 = shapeLength shape2
+ let m = m1 * m2
+ let result = Array.zeroCreate (n * m)
+ for i=0 to (n*m)-1 do
+ let chunk = i/m2
+ let i2 = chunk%n
+ let j2 = (chunk/n)*m2+i%m2
+ result[i] <-values[i2][j2]
+
+ (tensors[0] :?> RawTensorCPU<'T>).MakeLike(result, newShape)
+
+ override t.UnstackT(dim) =
+ let shape = t.Shape
+ let shape1, shape2, unstackedShape = Shape.checkCanUnstack shape dim
+ let n = shape[dim]
+ let m1 = shapeLength shape1
+ let m2 = shapeLength shape2
+ let m = m1 * m2
+ let values = t.Values
+ let results = Array.init n (fun _ -> Array.zeroCreate m)
+ for i=0 to (n*m)-1 do
+ let chunk = i/m2
+ let i2 = chunk%n
+ let j2 = (chunk/n)*m2+i%m2
+ results[i2][j2] <- values[i]
+ results |> Array.map (fun rvalues -> t.MakeLike(rvalues, unstackedShape))
+
+ override t.CatTs(tensors, dim) =
+ let values, shapes = tensors |> Array.map (fun t -> t.GetTypedValues(), t.Shape) |> Array.unzip
+ let n, shape1, m2, shape3, outShape = Shape.checkCanCat shapes dim
+ let m1 = shapeLength shape1
+ let m3 = shapeLength shape3
+ let m = m1 * m2 * m3
+ let result = Array.zeroCreate m
+ let mutable i = 0
+ for j1 = 0 to m1-1 do
+ for k = 0 to n-1 do
+ let d = shapes[k][dim]
+ let b = j1*m3*d
+ for j2 = 0 to d*m3-1 do
+ result[i+j2] <-values[k][b+j2]
+ i <- i + d*m3
+
+ t.MakeLike(result, outShape)
+
+ override t.SplitT(sizes, dim) =
+ let shape = t.Shape
+ let outShapes = Shape.checkCanSplit shape sizes dim
+ let n = sizes.Length
+ let shape1 = shape[0..dim-1]
+ let shape2 = shape[dim+1..]
+ let m1 = shapeLength shape1
+ let m3 = shapeLength shape2
+ let values = t.Values
+ let results = Array.init n (fun k -> Array.zeroCreate (m1 * sizes[k] * m3))
+ let mutable i = 0
+ for j1 = 0 to m1-1 do
+ for k = 0 to n-1 do
+ let d = sizes[k]
+ let b = j1*m3*d
+ for j2 = 0 to d*m3-1 do
+ results[k][b+j2] <-values[i+j2]
+ i <- i + d*m3
+
+ (results, outShapes) ||> Array.map2 (fun rvalues outShape ->
+ t.MakeLike(rvalues, outShape))
+
+ override t.PermuteT(permutation) =
+ let inversePermutation, newShape = Shape.checkCanPermute t.Shape permutation
+ let result = t.ZerosLike(newShape) :?> RawTensorCPU<'T>
+ let rec transpose (shape:Shape) externalCoords =
+ if shape.Length = 1 then
+ for i=0 to shape[0]-1 do
+ let globalCoords = Array.append externalCoords [|i|]
+ let transposedCoords = Array.permute (fun i -> inversePermutation[i]) globalCoords
+ result[transposedCoords] <- t[globalCoords]
+ else
+ for i=0 to shape[0]-1 do
+ transpose shape[1..] (Array.append externalCoords [|i|])
+ transpose t.Shape [||]
+ upcast result
+
+ override t.TransposeT(dim0, dim1) =
+ let permutation = [| 0 .. t.Shape.Length - 1 |]
+ permutation[dim0] <- dim1
+ permutation[dim1] <- dim0
+ t.PermuteT(permutation)
+
+ override t.TransposeT2() =
+ Shape.checkCanTranspose2d t.Dim
+ let tcols = t.Shape[1]
+ let result = Array2D.init t.Shape[1] t.Shape[0] (fun i j -> t.Values[j*tcols + i])
+ t.CreateLike(result)
+
+ override t.SqueezeT(dim) =
+ let result = Array.copy t.Values
+ t.MakeLike(result, Shape.squeeze dim t.Shape)
+
+ override t.UnsqueezeT(dim) =
+ let outputShape = Shape.checkCanUnsqueeze dim t.Shape
+ let result = Array.copy t.Values
+ t.MakeLike(result, outputShape)
+
+ override t.FlipT(dims:int[]) =
+ Shape.checkCanFlip t.Dim dims
+ match t.Dim with
+ | 0 -> t.Clone()
+ | _ ->
+ let result = t.ZerosLike(t.Shape) :?> RawTensorCPU<'T>
+ let rec flip (shape:Shape) externalCoords =
+ if shape.Length = 1 then
+ for i=0 to shape[0]-1 do
+ let globalCoords = Array.append externalCoords [|i|]
+ result[mirrorCoordinates globalCoords t.Shape dims] <- t[globalCoords]
+ else
+ for i=0 to shape[0]-1 do
+ flip shape[1..] (Array.append externalCoords [|i|])
+ flip t.Shape [||]
+ upcast result
+
+ override t.DilateT(dilations:int[]) =
+ Shape.checkCanDilate t.Dim dilations
+ match t.Dim with
+ | 0 -> t.Clone()
+ | _ ->
+ let result = t.ZerosLike(Shape.dilated t.Shape dilations) :?> RawTensorCPU<'T>
+ let rec dilate (shape:Shape) externalCoords =
+ if shape.Length = 1 then
+ for i=0 to shape[0]-1 do
+ let globalCoords = Array.append externalCoords [|i|]
+ result[dilatedCoordinates globalCoords dilations] <- t[globalCoords]
+ else
+ for i=0 to shape[0]-1 do
+ dilate shape[1..] (Array.append externalCoords [|i|])
+ dilate t.Shape [||]
+ upcast result
+
+ override t.UndilateT(dilations:int[]) =
+ match t.Dim with
+ | 0 -> t.Clone()
+ | _ ->
+ let result = t.ZerosLike(Shape.undilatedShape t.Shape dilations) :?> RawTensorCPU<'T>
+ let rec dilate (shape:Shape) externalCoords =
+ if shape.Length = 1 then
+ for i=0 to shape[0]-1 do
+ let globalCoords = Array.append externalCoords [|i|]
+ result[globalCoords] <- t[dilatedCoordinates globalCoords dilations]
+ else
+ for i=0 to shape[0]-1 do
+ dilate shape[1..] (Array.append externalCoords [|i|])
+ dilate result.Shape [||]
+ upcast result
+
+ override t.GatherT(dim:int, indices) =
+ Shape.checkCanGather t.Shape dim indices.Shape indices.Dtype
+ let indices = indices :?> RawTensorCPU
+ let result = t.ZerosLike(indices.Shape) :?> RawTensorCPU<'T>
+ let rec gather (shape:Shape) externalCoords =
+ if shape.Length = 1 then
+ for i=0 to shape[0]-1 do
+ let globalCoords = Array.append externalCoords [|i|]
+ let globalCoordsIndices = Array.copy globalCoords
+ globalCoordsIndices[dim] <- indices[globalCoords]
+ result[globalCoords] <- t[globalCoordsIndices]
+ else
+ for i=0 to shape[0]-1 do
+ gather shape[1..] (Array.append externalCoords [|i|])
+ gather result.Shape [||]
+ upcast result
+
+ override t.ScatterT(dim:int, indices, destinationShape:Shape) =
+ Shape.checkCanScatter t.Shape dim indices.Shape indices.Dtype destinationShape
+ let indices = indices :?> RawTensorCPU
+ let result = t.ZerosLike(destinationShape) :?> RawTensorCPU<'T>
+ let rec scatter (shape:Shape) externalCoords =
+ if shape.Length = 1 then
+ for i=0 to shape[0]-1 do
+ let globalCoords = Array.append externalCoords [|i|]
+ let globalCoordsIndices = Array.copy globalCoords
+ globalCoordsIndices[dim] <- indices[globalCoords]
+ result[globalCoordsIndices] <- t[globalCoords]
+ else
+ for i=0 to shape[0]-1 do
+ scatter shape[1..] (Array.append externalCoords [|i|])
+ scatter t.Shape [||]
+ upcast result
+
+ override t.ViewT(shape:Shape) =
+ Shape.checkCanView t.Shape shape
+ let result = Array.copy t.Values
+ t.MakeLike(result, shape)
+
+ override t.Cast(dtype: Dtype) =
+ if dtype = t.Dtype then
+ upcast t
+ else
+ let tflat = t.ViewT([|t.Nelement|]) // We flatten, cast, and return with the correct shape because .ToValues() in the next line does not support tensors with dimension > 4.
+ let values =
+ match t.Dtype with
+ // These special cases for byte and int8 are to ensure that values don't get truncated because RawTensor.Create cannot distinguish between byte and int8
+ | Dtype.Byte -> tflat.ToValues():?>byte[] |> Array.map int |> box
+ | Dtype.Int8 -> tflat.ToValues():?>int8[] |> Array.map int |> box
+ | _ -> tflat.ToValues()
+
+ RawTensor.Create(values, dtype=dtype, backend=t.Backend, device=t.Device).ViewT(t.Shape)
+
+ override t.MoveTo(device: Device) = t.MakeLike(values, shape, device=device)
+
+ override t.SetMutable() = isMutable <- true
+ override t.IsMutable = isMutable
+ member t.SetValues(tmp: RawTensor) = checkMutable(); values <- (tmp :?> RawTensorCPU<'T>).Values
+ override t.ClampInPlace(low, high) = t.SetValues <| t.ClampT(low, high)
+ override t.LtInPlace(t2) = t.SetValues <| t.LtTT(t2)
+ override t.GtInPlace(t2) = t.SetValues <| t.GtTT(t2)
+ override t.LeInPlace(t2) = t.SetValues <| t.LeTT(t2)
+ override t.GeInPlace(t2) = t.SetValues <| t.GeTT(t2)
+ override t.EqInPlace(t2) = t.SetValues <| t.EqTT(t2)
+ override t.NeqInPlace(t2) = t.SetValues <| t.NeqTT(t2)
+ override t.AddInPlace(t2, alpha) = t.SetValues <| t.AddTT(t2, ?alpha=alpha)
+ override t.AddScalarInPlace(t2) = t.SetValues <| t.AddTT0(t2)
+ override t.AddSliceInPlace(location, t2) = t.SetValues <| t.AddTTSlice(location, t2)
+ override t.SubInPlace(t2) = t.SetValues <| t.SubTT(t2)
+ override t.SubScalarInPlace(t2) = t.SetValues <| t.SubTT0(t2)
+ override t.MulInPlace(t2) = t.SetValues <| t.MulTT(t2)
+ override t.MulScalarInPlace(t2) = t.SetValues <| t.MulTT0(t2)
+ override t.DivInPlace(t2) = t.SetValues <| t.DivTT(t2)
+ override t.DivScalarInPlace(t2) = t.SetValues <| t.DivTT0(t2)
+ override t.PowInPlace(t2) = t.SetValues <| t.PowTT(t2)
+ override t.PowScalarInPlace(t2) = t.SetValues <| t.PowTT0(t2)
+ override t.MatMulInPlace(t2) = t.SetValues <| t.MatMulTT(t2)
+ override t.NegInPlace() = t.SetValues <| t.NegT()
+ override t.SignInPlace() = t.SetValues <| t.SignT()
+ override t.FloorInPlace() = t.SetValues <| t.FloorT()
+ override t.CeilInPlace() = t.SetValues <| t.CeilT()
+ override t.RoundInPlace() = t.SetValues <| t.RoundT()
+ override t.AbsInPlace() = t.SetValues <| t.AbsT()
+ override t.ReluInPlace() = t.SetValues <| t.ReluT()
+ override t.SoftplusInPlace() = t.SetValues <| t.SoftplusT()
+ override t.SigmoidInPlace() = t.SetValues <| t.SigmoidT()
+ override t.ExpInPlace() = t.SetValues <| t.ExpT()
+ override t.LogInPlace() = t.SetValues <| t.LogT()
+ override t.Log10InPlace() = t.SetValues <| t.Log10T()
+ override t.SqrtInPlace() = t.SetValues <| t.SqrtT()
+ override t.SinInPlace() = t.SetValues <| t.SinT()
+ override t.CosInPlace() = t.SetValues <| t.CosT()
+ override t.TanInPlace() = t.SetValues <| t.TanT()
+ override t.SinhInPlace() = t.SetValues <| t.SinhT()
+ override t.CoshInPlace() = t.SetValues <| t.CoshT()
+ override t.TanhInPlace() = t.SetValues <| t.TanhT()
+ override t.AsinInPlace() = t.SetValues <| t.AsinT()
+ override t.AcosInPlace() = t.SetValues <| t.AcosT()
+ override t.AtanInPlace() = t.SetValues <| t.AtanT()
+ override t.OnesInPlace() = t.SetValues <| t.OnesLike(t.Shape)
+ override t.RandomInPlace() = t.SetValues <| t.RandomLike(t.Shape)
+ override t.RandomNormalInPlace() = t.SetValues <| t.RandomNormalLike(t.Shape)
+ override t.RandomIntInPlace(low, high) = t.SetValues <| t.RandomIntLike(t.Shape, low, high)
+ override t.ZerosInPlace() = t.SetValues <| t.ZerosLike(t.Shape)
+
+// Defines the math-dependent operations for `RawTensorCPU` types
+// using generic inline code. Each implementing type (e.g. RawTensorFloat32) instantiates
+// inlines these at concrete types.
+//
+// Most of the functions produce (value, shape) pairs for use in constructing an instance
+// of the final implementing type.
+[]
+module internal RawTensorCPU =
+
+ /// Access the natural "0" value for the element of a CPU tensor type
+ let inline zero< ^T when ^T : (static member Zero : ^T) > = LanguagePrimitives.GenericZero< ^T >
+
+ /// Access the natural "1" value for the element of a CPU tensor type
+ let inline one< ^T when ^T : (static member One : ^T) > = LanguagePrimitives.GenericOne< ^T >
+
+ /// Get the scalar "0" tensor for a CPU tensor type
+ let inline Zero () : (^T[] * Shape) =
+ let values = [|zero< ^T > |]
+ (values, Shape.scalar)
+
+ /// Get the scalar "1" tensor for a CPU tensor type
+ let inline One() : (^T[] * Shape) =
+ let values = [| one< ^T > |]
+ (values, Shape.scalar)
+
+ /// Get the "0" tensor for a CPU tensor type of the given shape
+ let inline Zeros(shape:Shape) : (^T[] * Shape) =
+ let values = Array.zeroCreate (shapeLength shape)
+ (values, shape)
+
+ /// Get the "0" tensor for a CPU tensor type of the given shape
+ let inline Empty(shape:Shape) : (^T[] * Shape) = Zeros shape
+
+ let inline Ones(shape:Shape) =
+ let values = Array.create (shapeLength shape) one< ^T >
+ (values, shape)
+
+ let inline CreateFromFlatArray (values: System.Array, shape: Shape) : (^T[] * Shape) =
+ match values with
+ | :? ( ^T[]) as arr -> arr, shape
+ | _ -> invalidArg "value" (sprintf "Data unsuitable for RawTensorCPU of type %A" typeof< ^T >)
+
+ let inline Equals(t1: RawTensorCPU< ^T >, t2: RawTensor) =
+ if t1.Dtype <> t2.Dtype then
+ opNotSupported2 "Equals" t1.Dtype t2.Dtype
+ match t2 with
+ | :? RawTensorCPU< ^T > as t2 -> t1.Shape = t2.Shape && t1.Values = t2.Values
+ | _ -> invalidOp <| sprintf "Cannot compare RawTensors t1 (Shape=%A, Dtype=%A, Device=%A, Backend=%A) and t2 (Shape=%A, Dtype=%A, Device=%A, Backend=%A)" t1.Shape t1.Dtype t1.Device t1.Backend t2.Shape t2.Dtype t2.Device t2.Backend
+
+ let inline Full(shape:Shape, value: ^T) =
+ let result = Array.create (shapeLength shape) value
+ (result, shape)
+
+ let inline AllClose(t1: RawTensorCPU< ^T >, t2:RawTensor, relativeTolerance: ^T, absoluteTolerance: ^T) =
+ match t2 with
+ | :? RawTensorCPU< ^T > as t2 -> t1.Shape = t2.Shape && Array.allClose relativeTolerance absoluteTolerance t1.Values t2.Values
+ | _ -> invalidOp <| sprintf "Cannot compare RawTensors t1 (Shape=%A, Dtype=%A, Device=%A, Backend=%A) and t2 (Shape=%A, Dtype=%A, Device=%A, Backend=%A)" t1.Shape t1.Dtype t1.Device t1.Backend t2.Shape t2.Dtype t2.Device t2.Backend
+
+ let inline ClampT(t: RawTensorCPU< ^T>, low: RawTensor, high:RawTensor) : (^T[] * Shape) =
+ if low.Dim <> 0 || high.Dim <> 0 then failwithf "Expecting scalar low and high"
+ let tvalue = t.Values
+ let lowvalue = low.GetTypedValues()[0]
+ let highvalue = high.GetTypedValues()[0]
+ let result = Array.map (fun v -> (max (min v highvalue) lowvalue)) tvalue
+ (result, t.Shape)
+
+ let inline LtTT(t1: RawTensorCPU< ^T >, t2: RawTensor) : (bool[] * Shape) =
+ let t1value = t1.Values
+ let t2value = t2.GetTypedValues()
+ let result = Array.map2 (<) t1value t2value
+ (result, t1.Shape)
+
+ let inline GtTT(t1: RawTensorCPU< ^T >, t2: RawTensor) : (bool[] * Shape) =
+ let t1value = t1.Values
+ let t2value = t2.GetTypedValues()
+ let result = Array.map2 (>) t1value t2value
+ (result, t1.Shape)
+
+ let inline LeTT(t1: RawTensorCPU< ^T >, t2: RawTensor) : (bool[] * Shape) =
+ let t1value = t1.Values
+ let t2value = t2.GetTypedValues()
+ let result = Array.map2 (<=) t1value t2value
+ (result, t1.Shape)
+
+ let inline GeTT(t1: RawTensorCPU< ^T >, t2: RawTensor) : (bool[] * Shape) =
+ let t1value = t1.Values
+ let t2value = t2.GetTypedValues()
+ let result = Array.map2 (>=) t1value t2value
+ (result, t1.Shape)
+
+ let inline EqTT(t1: RawTensorCPU< ^T >, t2: RawTensor) : (bool[] * Shape) =
+ let t1value = t1.Values
+ let t2value = t2.GetTypedValues()
+ let result = Array.map2 (=) t1value t2value
+ (result, t1.Shape)
+
+ let inline NeqTT(t1: RawTensorCPU< ^T >, t2: RawTensor) : (bool[] * Shape) =
+ let t1value = t1.Values
+ let t2value = t2.GetTypedValues()
+ let result = Array.map2 (<>) t1value t2value
+ (result, t1.Shape)
+
+ let inline MaxIndexT(t: RawTensorCPU< ^T >) =
+ t.FlatIndexToIndex(Seq.maxIndex t.Values)
+
+ let inline MinMaxReduceT op (t: RawTensorCPU< ^T >, dim, keepDim) : RawTensor * RawTensor =
+ let newShape = Shape.checkCanMinMaxReduce dim keepDim t.Shape
+ let shape = t.Shape
+ let shape1 = shape[0..dim-1]
+ let n = shape[dim]
+ let shape2 = shape[dim+1..]
+ let m1 = shapeLength shape1
+ let m3 = shapeLength shape2
+ let values = t.Values
+ let results = Array.zeroCreate (m1 * m3)
+ let indexes = Array.zeroCreate (m1 * m3)
+ for j1 = 0 to m1-1 do
+ for j2 = 0 to m3-1 do
+ let b = j1*m3 + j2
+ for j3 = 0 to n-1 do
+ let v = values[j1*n*m3+j3*m3+j2]
+ if op v results[b] || (j3 = 0) then
+ results[b] <- v
+ indexes[b] <- j3
+ let resultsT = t.MakeLike(results, newShape)
+ let indexesT = t.CreateLike(indexes, dtype=Dtype.Int32).ViewT(newShape)
+ resultsT, indexesT
+
+ let inline MinIndexT(t: RawTensorCPU< ^T >) =
+ t.FlatIndexToIndex(Seq.minIndex t.Values)
+
+ let inline AddTT(t1: RawTensorCPU< ^T >, t2: RawTensor, alpha: ^T) : (^T[] * Shape) =
+ let t1value = t1.Values
+ let t2value = t2.GetTypedValues()
+ let result = Array.map2 (fun a b -> a + alpha * b) t1value t2value
+ (result, t1.Shape)
+
+ let inline AddTT0(t1: RawTensorCPU< ^T >, b: ^T, alpha: ^T) : (^T[] * Shape) =
+ let t1value = t1.Values
+ let result = Array.map (fun a -> a + alpha * b) t1value
+ (result, t1.Shape)
+
+ let inline internal AddTTSlice(plus, t1: RawTensorCPU< ^T >, location:int[], t2: RawTensor) : (^T[] * Shape) =
+ Shape.checkCanAddSlice t1.Shape location t2.Shape
+ let t1value = t1.Values
+ let t2 = t2 :?> RawTensorCPU< ^T >
+ let result = Array.copy t1value
+ let shape2 = Shape.unsqueezeAs t2.Shape t1.Shape
+ let rec add (shape2:Shape) externalCoords =
+ if shape2.Length = 1 then
+ for i=0 to shape2[0]-1 do
+ let globalCoords = Array.append externalCoords [|i|]
+ let t1Coords = Array.map2 (+) globalCoords location
+ let t1FlatIndex = t1.IndexToFlatIndex(t1Coords)
+ result[t1FlatIndex] <- plus result[t1FlatIndex] t2[globalCoords]
+ else
+ for i=0 to shape2[0]-1 do
+ add (shape2[1..]) (Array.append externalCoords [|i|])
+ add shape2 [||]
+ (result, t1.Shape)
+
+ let inline SubTT(t1: RawTensorCPU< ^T >, t2: RawTensor) : (^T[] * Shape) =
+ let t1value = t1.Values
+ let t2value = t2.GetTypedValues()
+ let result = Array.map2 (-) t1value t2value
+ (result, t1.Shape)
+
+ let inline SubT0T(a: ^T, t2: RawTensor) : (^T[] * Shape) =
+ let t2value = t2.GetTypedValues()
+ let result = Array.map (fun b -> a - b) t2value
+ (result, t2.Shape)
+
+ let inline SubTT0(t1: RawTensorCPU< ^T >, b: ^T) : (^T[] * Shape) =
+ let t1value = t1.Values
+ let result = Array.map (fun t -> t - b) t1value
+ (result, t1.Shape)
+
+ let inline MulTT(t1: RawTensorCPU< ^T >, t2: RawTensor) : (^T[] * Shape) =
+ let t1value = t1.Values
+ let t2value = t2.GetTypedValues()
+ let result = Array.map2 (*) t1value t2value
+ (result, t1.Shape)
+
+ let inline MulTT0(t1: RawTensorCPU< ^T >, b: ^T) : (^T[] * Shape) =
+ let t1value = t1.Values
+ let result = Array.map (fun a -> a * b) t1value
+ (result, t1.Shape)
+
+ let inline DivTT(t1: RawTensorCPU< ^T >, t2: RawTensor) : (^T[] * Shape) =
+ let t1value = t1.Values
+ let t2value = t2.GetTypedValues()
+ let result = Array.map2 (/) t1value t2value
+ (result, t1.Shape)
+
+ let inline DivT0T(a: ^T, t2: RawTensor) : (^T[] * Shape) =
+ let t2value = t2.GetTypedValues()
+ let result = Array.map (fun b -> a / b) t2value
+ (result, t2.Shape)
+
+ let inline DivTT0(t1: RawTensorCPU< ^T >, b: ^T) : (^T[] * Shape) =
+ let t1value = t1.Values
+ let result = Array.map (fun a -> a / b) t1value
+ (result, t1.Shape)
+
+ let inline PowTT(t1: RawTensorCPU< ^T >, t2: RawTensor) : (^T[] * Shape) =
+ let t1value = t1.Values
+ let t2value = t2.GetTypedValues()
+ let result = Array.map2 ( ** ) t1value t2value
+ (result, t1.Shape)
+
+ let inline PowT0T(a: ^T , t2: RawTensor) : (^T[] * Shape) =
+ let t2value = t2.GetTypedValues()
+ let result = Array.map (fun b -> a ** b) t2value
+ (result, t2.Shape)
+
+ let inline PowTT0(t1: RawTensorCPU< ^T >, b: ^T) : (^T[] * Shape) =
+ let t1value = t1.Values
+ let result = Array.map (fun a -> a ** b) t1value
+ (result, t1.Shape)
+
+ let inline MatMulTT(t1: RawTensorCPU< ^T >, t2: RawTensor) : (^T[] * Shape) =
+ let (t1BatchPart, t1MatrixPart), (t2BatchPart, t2MatrixPart) = Shape.checkCanMatmul t1.Shape t2.Shape
+ if t1BatchPart <> t2BatchPart then failwithf "Cannot matrix multiply raw tensors with shapes %A, %A - mismatch batching" t1.Shape t2.Shape
+ let t1rows, t1cols = t1MatrixPart[0], t1MatrixPart[1]
+ let t2rows, t2cols = t2MatrixPart[0], t2MatrixPart[1]
+ let t1value = t1.Values
+ let t2value = (t2 :?> RawTensorCPU< ^T >).Values
+ let newShape = Array.append t1BatchPart [| t1rows; t2cols |]
+ let nb = shapeLength t1BatchPart
+ let values = Array.initFlat3D nb t1rows t2cols (fun b i j -> Array.sumBy (fun k -> t1value[b*t1cols*t1rows + i*t1cols + k] * t2value[b*t2cols*t2rows + k*t2cols + j]) [|0..(t2rows-1)|] )
+ (values, newShape)
+
+ let inline BMMTT(t1: RawTensorCPU< ^T >, t2: RawTensor) : (^T[] * Shape) =
+ Shape.checkCanBMM t1.Shape t2.Shape |> ignore
+ MatMulTT(t1, t2)
+
+ // Returns the LU decomposition of this matrix. The return values are the LU matrix, pivot indices, and a toggle value indicating the number of row exchanges during the decomposition, which is +1 if the number of exchanges were even, -1 if odd. Source: Atilim Gunes Baydin, FsAlg, 2015, https://github.com/gbaydin/FsAlg
+ let inline LUDecomposition (m: ^T[,]) =
+ let rows = m.GetLength(0)
+ let res = Array2D.copy m
+ let perm = Array.init rows (fun i -> i)
+ let mutable toggle = LanguagePrimitives.GenericOne<'T>
+ for j = 0 to rows - 2 do
+ let mutable colmax:'T = abs res[j, j]
+ let mutable prow = j
+ for i = j + 1 to rows - 1 do
+ let absresij = abs res[i, j]
+ if absresij > colmax then
+ colmax <- absresij
+ prow <- i
+ if prow <> j then
+ let tmprow = res[prow, 0..]
+ res[prow, 0..] <- res[j, 0..]
+ res[j, 0..] <- tmprow
+ let tmp = perm[prow]
+ perm[prow] <- perm[j]
+ perm[j] <- tmp
+ toggle <- -toggle
+ for i = j + 1 to rows - 1 do
+ res[i, j] <- res[i, j] / res[j, j]
+ for k = j + 1 to rows - 1 do
+ res[i, k] <- res[i, k] - res[i, j] * res[j, k]
+ res, perm, toggle
+
+ // Finds an array that, when multiplied by a LU matrix `lu`, gives array `b`. Source: Atilim Gunes Baydin, FsAlg, 2015, https://github.com/gbaydin/FsAlg
+ let inline matrixSolveHelper (lu:^T[,]) (b:^T[]) =
+ let n = lu.GetLength 0
+ let x = Array.copy b
+ for i = 1 to n - 1 do
+ let mutable sum = x[i]
+ for j = 0 to i - 1 do
+ sum <- sum - lu[i, j] * x[j]
+ x[i] <- sum
+ x[n - 1] <- x[n - 1] / lu[n - 1, n - 1]
+ for i in (n - 2) .. -1 .. 0 do
+ let mutable sum = x[i]
+ for j = i + 1 to n - 1 do
+ sum <- sum - lu[i, j] * x[j]
+ x[i] <- sum / lu[i, i]
+ x
+
+ // Solves a system of linear equations ax = b, where the coefficients are given in matrix `a` and the result vector is vector `b`. The returned vector will correspond to x. Source: Atilim Gunes Baydin, FsAlg, 2015, https://github.com/gbaydin/FsAlg
+ let inline solve (a: ^T[,]) (b: ^T[]) =
+ let lu, perm, _ = LUDecomposition a
+ let bp = Array.init (a.GetLength(0)) (fun i -> b[perm[i]])
+ matrixSolveHelper lu bp
+
+ // Inverts matrix. Source: Atilim Gunes Baydin, FsAlg, 2015, https://github.com/gbaydin/FsAlg
+ let inline inverseMatrix (m: ^T[,]) =
+ let rows = m.GetLength(0)
+ let res = Array2D.copy m
+ let lu, perm, _ = LUDecomposition m
+ let b:'T[] = Array.zeroCreate rows
+ for i = 0 to rows - 1 do
+ for j = 0 to rows - 1 do
+ if i = perm[j] then
+ b[j] <- LanguagePrimitives.GenericOne<'T>
+ else
+ b[j] <- LanguagePrimitives.GenericZero<'T>
+ let x = matrixSolveHelper lu b
+ res[0.., i] <- x
+ res
+
+ let inline InverseT(t: RawTensorCPU< ^T >) : RawTensorCPU< ^T > =
+ Shape.checkCanInvert t.Shape
+ let dim = t.Shape.Length
+ if dim = 2 then // One matrix
+ let tinv = inverseMatrix (t.ToArray() :?> ^T[,])
+ let tinvflat = [| for i=0 to tinv.GetLength(0)-1 do for j=0 to tinv.GetLength(1)-1 do yield tinv[i, j] |]
+ t.MakeLike(tinvflat, t.Shape) :?> RawTensorCPU<'T>
+ else // Batch of matrices
+ let tinvs =
+ t.UnstackT(0)
+ |> Array.map (fun v -> inverseMatrix (v.ToArray() :?> ^T[,]))
+ |> Array.map (fun v -> [| for i=0 to v.GetLength(0)-1 do for j=0 to v.GetLength(1)-1 do yield v[i, j] |])
+ |> Array.map (fun v -> t.MakeLike(v, [|t.Shape[1]; t.Shape[2]|]))
+ t.StackTs(tinvs, 0) :?> RawTensorCPU<'T>
+
+ let inline diagonal(square: ^T[,]) =
+ let n = square.GetLength(0)
+ if n <> square.GetLength(1) then failwith "Expecting a square array"
+ Array.init n (fun i -> square[i, i])
+
+ let inline prod(t: ^T[]) =
+ Array.fold (fun s x -> s * x) LanguagePrimitives.GenericOne<'T> t
+
+ let inline DetT(t: RawTensorCPU< ^T >) : RawTensorCPU< ^T > =
+ Shape.checkCanDet t.Shape
+ let dim = t.Shape.Length
+ if dim = 2 then
+ let lu, _, toggle = LUDecomposition(t.ToArray() :?> ^T[,])
+ let d:^T = toggle * (prod (diagonal lu))
+ t.MakeLike([|d|], [||]) :?> RawTensorCPU<'T>
+ else
+ let tdets =
+ t.UnstackT(0)
+ |> Array.map (fun v -> let lu, _, toggle = LUDecomposition(v.ToArray() :?> ^T[,]) in lu, toggle)
+ |> Array.map (fun (lu, toggle) -> toggle * (prod (diagonal lu)))
+ |> Array.map (fun v -> t.MakeLike([|v|], [||]))
+ t.StackTs(tdets, 0) :?> RawTensorCPU<'T>
+
+ let inline SolveTT(a: RawTensorCPU< ^T >, b: RawTensor) : RawTensorCPU< ^T > =
+ let newShape = Shape.checkCanSolve a.Shape b.Shape
+ let dimA = a.Shape.Length
+ let dimB = b.Shape.Length
+ if dimA = 2 then
+ let n = a.Shape[0]
+ let amatrix = (a.ToArray() :?> ^T[,])
+ if dimB = 1 then
+ let bvector = (b.ToArray() :?> ^T[])
+ let s = solve amatrix bvector
+ a.MakeLike(s, newShape) :?> RawTensorCPU<'T>
+ else // dimB = 2
+ let cols =
+ b.UnstackT(1)
+ |> Array.map (fun v -> v.ToArray() :?> ^T[])
+ |> Array.map (fun v -> solve amatrix v)
+ |> Array.map (fun v -> a.MakeLike(v, [|n|]))
+ a.StackTs(cols, 1) :?> RawTensorCPU<'T>
+ else // dimA = 3
+ let n = a.Shape[1]
+ if dimB = 2 then
+ let aa = a.UnstackT(0)
+ let bb = b.UnstackT(0)
+ let ss =
+ Array.zip aa bb
+ |> Array.map (fun (aaa, bbb) ->
+ let amatrix = (aaa.ToArray() :?> ^T[,])
+ let bvector = (bbb.ToArray() :?> ^T[])
+ let s = solve amatrix bvector
+ a.MakeLike(s, [|n|]))
+ a.StackTs(ss, 0) :?> RawTensorCPU<'T>
+ else // dimB = 3
+ let aa = a.UnstackT(0)
+ let bb = b.UnstackT(0)
+ let ss =
+ Array.zip aa bb
+ |> Array.map (fun (aaa, bbb) ->
+ let amatrix = (aaa.ToArray() :?> ^T[,])
+ let cols =
+ bbb.UnstackT(1)
+ |> Array.map (fun v -> v.ToArray() :?> ^T[])
+ |> Array.map (fun v -> solve amatrix v)
+ |> Array.map (fun v -> a.MakeLike(v, [|n|]))
+ a.StackTs(cols, 1))
+ a.StackTs(ss, 0) :?> RawTensorCPU<'T>
+ // failwithf "Unsupported shapes %A %A" a.Shape b.Shape
+
+ let inline MaxPool1D(t1: RawTensorCPU< ^T >, kernelSize, stride, padding) : RawTensorCPU< ^T > * RawTensorCPU< int > =
+ let batchSize, channels, inputSize, outputSize, outputShape =
+ Shape.checkCanMaxpool1d t1.Dtype t1.Shape kernelSize stride padding
+ let result = t1.ZerosLike(outputShape) :?> RawTensorCPU<'T>
+ let indices = t1.ZerosLike(outputShape, dtype=Int32) :?> RawTensorCPU
+ let minValue = t1[t1.MinIndexT()] - one
+ for n=0 to batchSize-1 do
+ for c=0 to channels-1 do
+ for v=0 to outputSize-1 do
+ let mutable maxvalue = minValue
+ let mutable maxindex = -1
+ for u=0 to kernelSize-1 do
+ let i = (v*stride) + u - padding
+ if i >= 0 && i < inputSize then
+ let value = t1[n, c, i]
+ if value > maxvalue then
+ maxvalue <- value
+ maxindex <- i
+ result[[|n; c; v|]] <- maxvalue
+ indices[[|n; c; v|]] <- maxindex
+ result, indices
+
+ let inline MaxPool2D(t1: RawTensorCPU< ^T >, kernelSize, stride, padding) : RawTensorCPU< ^T > * RawTensorCPU< int > =
+ let batchSize, channels, (inputHeight, inputWidth), (kernelHeight, kernelWidth), (outputHeight, outputWidth), outputShape =
+ Shape.checkCanMaxpool2d t1.Dtype t1.Shape kernelSize stride padding
+ let result = t1.ZerosLike(outputShape) :?> RawTensorCPU<'T>
+ let indices = t1.ZerosLike(outputShape, dtype=Int32) :?> RawTensorCPU
+ let minValue = t1[t1.MinIndexT()] - one
+ for n=0 to batchSize-1 do
+ for c=0 to channels-1 do
+ for v0=0 to outputHeight-1 do
+ for v1=0 to outputWidth-1 do
+ let mutable maxvalue = minValue
+ let mutable maxindexi0 = -1
+ let mutable maxindexi1 = -1
+ for u0=0 to kernelHeight-1 do
+ for u1=0 to kernelWidth-1 do
+ let i0 = (v0*stride[0]) + u0 - padding[0]
+ let i1 = (v1*stride[1]) + u1 - padding[1]
+ if i0 >= 0 && i0 < inputHeight && i1 >= 0 && i1 < inputWidth then
+ let value = t1[n, c, i0, i1]
+ if value > maxvalue then
+ maxvalue <- value
+ maxindexi0 <- i0
+ maxindexi1 <- i1
+ result[[|n; c; v0; v1|]] <- maxvalue
+ indices[[|n; c; v0; v1|]] <- indexToFlatIndex [|inputHeight; inputWidth|] [|maxindexi0; maxindexi1|]
+ result, indices
+
+ let inline MaxPool3D(t1: RawTensorCPU< ^T >, kernelSize, stride, padding) : RawTensorCPU< ^T > * RawTensorCPU< int > =
+ let (batchSize, channels, (inputDepth, inputHeight, inputWidth), (kernelDepth, kernelHeight, kernelWidth), (outputDepth, outputHeight, outputWidth), outputShape) =
+ Shape.checkCanMaxpool3d t1.Dtype t1.Shape kernelSize stride padding
+ let result = t1.ZerosLike(outputShape) :?> RawTensorCPU<'T>
+ let indices = t1.ZerosLike(outputShape, dtype=Int32) :?> RawTensorCPU
+ let minValue = t1[t1.MinIndexT()] - one
+ for n=0 to batchSize-1 do
+ for c=0 to channels-1 do
+ for v0=0 to outputDepth-1 do
+ for v1=0 to outputHeight-1 do
+ for v2=0 to outputWidth-1 do
+ let mutable maxvalue = minValue
+ let mutable maxindexi0 = -1
+ let mutable maxindexi1 = -1
+ let mutable maxindexi2 = -1
+ for u0=0 to kernelDepth-1 do
+ for u1=0 to kernelHeight-1 do
+ for u2=0 to kernelWidth-1 do
+ let i0 = (v0*stride[0]) + u0 - padding[0]
+ let i1 = (v1*stride[1]) + u1 - padding[1]
+ let i2 = (v2*stride[2]) + u2 - padding[2]
+ if i0 >= 0 && i0 < inputDepth && i1 >= 0 && i1 < inputHeight && i2 >= 0 && i2 < inputWidth then
+ let value = t1[n, c, i0, i1, i2]
+ if value > maxvalue then
+ maxvalue <- value
+ maxindexi0 <- i0
+ maxindexi1 <- i1
+ maxindexi2 <- i2
+ result[[|n; c; v0; v1; v2|]] <- maxvalue
+ indices[[|n; c; v0; v1; v2|]] <- indexToFlatIndex [|inputDepth; inputHeight; inputWidth|] [|maxindexi0; maxindexi1; maxindexi2|]
+ result, indices
+
+ let inline MaxUnpool1D(t1: RawTensorCPU< ^T >, indices: RawTensorCPU, outputSize: int[]) : RawTensorCPU< ^T > =
+ let batchSize, channels, inputSize, outputShape =
+ Shape.checkCanMaxunpool1d t1.Dtype t1.Shape indices.Dtype indices.Shape outputSize
+ let result = t1.ZerosLike(outputShape) :?> RawTensorCPU<'T>
+ for n=0 to batchSize-1 do
+ for c=0 to channels-1 do
+ for u=0 to inputSize-1 do
+ let i = indices[[|n; c; u|]]
+ result[[|n; c; i|]] <- t1[[|n; c; u|]]
+ result
+
+ let inline MaxUnpool2D(t1: RawTensorCPU< ^T >, indices: RawTensorCPU, outputSize:int[]) : RawTensorCPU< ^T > =
+ let batchSize, channels, (inputHeight, inputWidth), outputShape =
+ Shape.checkCanMaxunpool2d t1.Dtype t1.Shape indices.Dtype indices.Shape outputSize
+ let result = t1.ZerosLike(outputShape) :?> RawTensorCPU<'T>
+ for n=0 to batchSize-1 do
+ for c=0 to channels-1 do
+ for u0=0 to inputHeight-1 do
+ for u1=0 to inputWidth-1 do
+ let iflat = indices[[|n; c; u0; u1|]]
+ let i = flatIndexToIndex [|outputSize[2]; outputSize[3]|] iflat
+ result[[|n; c; i[0]; i[1]|]] <- t1[[|n; c; u0; u1|]]
+ result
+
+ let inline MaxUnpool3D(t1: RawTensorCPU< ^T >, indices: RawTensorCPU, outputSize:int[]) : RawTensorCPU< ^T > =
+ let batchSize, channels, (inputDepth, inputHeight, inputWidth), outputShape =
+ Shape.checkCanMaxunpool3d t1.Dtype t1.Shape indices.Dtype indices.Shape outputSize
+ let result = t1.ZerosLike(outputShape) :?> RawTensorCPU<'T>
+ for n=0 to batchSize-1 do
+ for c=0 to channels-1 do
+ for u0=0 to inputDepth-1 do
+ for u1=0 to inputHeight-1 do
+ for u2=0 to inputWidth-1 do
+ let iflat = indices[[|n; c; u0; u1; u2|]]
+ let i = flatIndexToIndex [|outputSize[2]; outputSize[3]; outputSize[4]|] iflat
+ result[[|n; c; i[0]; i[1]; i[2]|]] <- t1[[|n; c; u0; u1; u2|]]
+ result
+
+ let inline Conv1D(t1: RawTensorCPU< ^T >, t2: RawTensor, stride, padding) : RawTensorCPU< ^T > =
+ // t1: input, NxCxI (batchSize x inputChannels x inputLength)
+ // t2: filters, KxCxF (outputChannels x inputChannels x kernelLength)
+ let batchSize, inputChannels, kernelSize, outputChannels, outputSize, outputShape =
+ Shape.checkCanConv1d t1.DeviceType t2.DeviceType t1.Dtype t2.Dtype t1.Shape t2.Shape stride padding 1
+ let result = t1.ZerosLike(outputShape) :?> RawTensorCPU<'T>
+ let t1 =
+ if padding = 0 then
+ t1
+ else
+ let tshape = Array.copy t1.Shape
+ tshape[2] <- t1.Shape[2] + padding * 2
+ let t = t1.ZerosLike(tshape)
+ t.AddTTSlice([|0; 0; padding|], t1) :?> RawTensorCPU< ^T >
+ let t2 = t2 :?> RawTensorCPU< ^T >
+ for n=0 to batchSize-1 do
+ for k=0 to outputChannels-1 do
+ for v=0 to outputSize-1 do
+ let mutable value = zero
+ for c=0 to inputChannels-1 do
+ for u=0 to kernelSize-1 do
+ value <- value + t2[k, c, u] * t1[n, c, (v*stride) + u]
+ result[[|n; k; v|]] <- value
+ result
+
+ let inline Conv2D(t1: RawTensorCPU< ^T >, t2: RawTensor, stride: int[], padding: int[]) : RawTensorCPU< ^T > =
+ // t1: input, NxCxHxW (batchSize x inputChannels x inputHeight x inputWidth)
+ // t2: filters, KxCxFxG (outputChannels x inputChannels x kernelHeight x kernelWidth)
+ let batchSize, inputChannels, (kernelHeight, kernelWidth), (outputChannels, outputHeight, outputWidth), outputShape =
+ Shape.checkCanConv2d t1.DeviceType t2.DeviceType t1.Dtype t2.Dtype t1.Shape t2.Shape stride padding [|1;1|]
+ let result = t1.ZerosLike(outputShape) :?> RawTensorCPU< ^T>
+ let t1 =
+ if padding[0] = 0 && padding[1] = 0 then
+ t1
+ else
+ let tshape = Array.copy t1.Shape
+ tshape[2] <- t1.Shape[2] + padding[0] * 2
+ tshape[3] <- t1.Shape[3] + padding[1] * 2
+ let t = t1.ZerosLike(tshape)
+ t.AddTTSlice([|0; 0; padding[0]; padding[1]|], t1) :?> RawTensorCPU< ^T >
+ let t2 = t2 :?> RawTensorCPU< ^T >
+ for n=0 to batchSize-1 do
+ for k=0 to outputChannels-1 do
+ for v0=0 to outputHeight-1 do
+ for v1=0 to outputWidth-1 do
+ let mutable value = zero
+ for c=0 to inputChannels-1 do
+ for u0=0 to kernelHeight-1 do
+ for u1=0 to kernelWidth-1 do
+ value <- value + t2[k, c, u0, u1] * t1[n, c, (v0*stride[0])+u0, (v1*stride[1])+u1]
+ result[[|n; k; v0; v1|]] <- value
+ result
+
+ let inline Conv3D(t1: RawTensorCPU< ^T >, t2: RawTensor, stride: int[], padding: int[]) : RawTensorCPU< ^T > =
+ // t1: input, NxCxDxHxW (batchSize x inputChannels x inputDepth x inputHeight x inputWidth)
+ // t2: filters, KxCxExFxG (outputChannels x inputChannels x kernelDepth x kernelHeight x kernelWidth)
+ let batchSize, inputChannels, (kernelDepth, kernelHeight, kernelWidth), (outputChannels, outputDepth, outputHeight, outputWidth), outputShape =
+ Shape.checkCanConv3d t1.DeviceType t2.DeviceType t1.Dtype t2.Dtype t1.Shape t2.Shape stride padding [|1;1;1|]
+ let result = t1.ZerosLike(outputShape) :?> RawTensorCPU< ^T>
+ let t1 =
+ if padding[0] = 0 && padding[1] = 0 && padding[2] = 0 then
+ t1
+ else
+ let tshape = Array.copy t1.Shape
+ tshape[2] <- t1.Shape[2] + padding[0] * 2
+ tshape[3] <- t1.Shape[3] + padding[1] * 2
+ tshape[4] <- t1.Shape[4] + padding[2] * 2
+ let t = t1.ZerosLike(tshape)
+ t.AddTTSlice([|0; 0; padding[0]; padding[1]; padding[2]|], t1) :?> RawTensorCPU< ^T >
+ let t2 = t2 :?> RawTensorCPU< ^T >
+ for n=0 to batchSize-1 do
+ for k=0 to outputChannels-1 do
+ for v0=0 to outputDepth-1 do
+ for v1=0 to outputHeight-1 do
+ for v2=0 to outputWidth-1 do
+ let mutable value = zero
+ for c=0 to inputChannels-1 do
+ for u0=0 to kernelDepth-1 do
+ for u1=0 to kernelHeight-1 do
+ for u2=0 to kernelWidth-1 do
+ // printfn "%A %A %A | %A %A %A" v0 v1 v2 u0 u1 u2
+ value <- value + t2[k, c, u0, u1, u2] * t1[n, c, (v0*stride[0])+u0, (v1*stride[1])+u1, (v2*stride[2])+u2]
+ result[[|n; k; v0; v1; v2|]] <- value
+ result
+
+ let inline AvgPool1D ofInt (t1: RawTensorCPU< ^T >, kernelSize, stride, padding) : RawTensorCPU< ^T >=
+ let batchSize, channels, inputSize, outputSize, outputShape =
+ Shape.checkCanAvgpool1d t1.Dtype t1.Shape kernelSize stride padding
+ let result = t1.ZerosLike(outputShape) :?> RawTensorCPU<'T>
+ for n=0 to batchSize-1 do
+ for c=0 to channels-1 do
+ for v=0 to outputSize-1 do
+ let mutable avg = zero
+ for u=0 to kernelSize-1 do
+ let i = (v*stride) + u - padding
+ if i >= 0 && i < inputSize then
+ let value = t1[n, c, i]
+ avg <- avg + value
+ result[[|n; c; v|]] <- avg / ofInt kernelSize
+ result
+
+ let inline AvgPool2D ofInt (t1: RawTensorCPU< ^T >, kernelSize, stride, padding) : RawTensorCPU< ^T > =
+ let batchSize, channels, (inputHeight, inputWidth), (kernelHeight, kernelWidth), (outputHeight, outputWidth), outputShape =
+ Shape.checkCanAvgpool2d t1.Dtype t1.Shape kernelSize stride padding
+ let result = t1.ZerosLike(outputShape) :?> RawTensorCPU<'T>
+ let kernelSize = kernelHeight * kernelWidth
+ for n=0 to batchSize-1 do
+ for c=0 to channels-1 do
+ for v0=0 to outputHeight-1 do
+ for v1=0 to outputWidth-1 do
+ let mutable avg = zero
+ for u0=0 to kernelHeight-1 do
+ for u1=0 to kernelWidth-1 do
+ let i0 = (v0*stride[0]) + u0 - padding[0]
+ let i1 = (v1*stride[1]) + u1 - padding[1]
+ if i0 >= 0 && i0 < inputHeight && i1 >= 0 && i1 < inputWidth then
+ let value = t1[n, c, i0, i1]
+ avg <- avg + value
+ result[[|n; c; v0; v1|]] <- avg / ofInt kernelSize
+ result
+
+ let inline AvgPool3D ofInt (t1: RawTensorCPU< ^T >, kernelSize, stride, padding) : RawTensorCPU< ^T > =
+ let (batchSize, channels, (inputDepth, inputHeight, inputWidth), (kernelDepth, kernelHeight, kernelWidth), (outputDepth, outputHeight, outputWidth), outputShape) =
+ Shape.checkCanAvgpool3d t1.Dtype t1.Shape kernelSize stride padding
+ let result = t1.ZerosLike(outputShape) :?> RawTensorCPU<'T>
+ let kernelSize = kernelDepth * kernelHeight * kernelWidth
+ for n=0 to batchSize-1 do
+ for c=0 to channels-1 do
+ for v0=0 to outputDepth-1 do
+ for v1=0 to outputHeight-1 do
+ for v2=0 to outputWidth-1 do
+ let mutable avg = zero
+ for u0=0 to kernelDepth-1 do
+ for u1=0 to kernelHeight-1 do
+ for u2=0 to kernelWidth-1 do
+ let i0 = (v0*stride[0]) + u0 - padding[0]
+ let i1 = (v1*stride[1]) + u1 - padding[1]
+ let i2 = (v2*stride[2]) + u2 - padding[2]
+ if i0 >= 0 && i0 < inputDepth && i1 >= 0 && i1 < inputHeight && i2 >= 0 && i2 < inputWidth then
+ let value = t1[n, c, i0, i1, i2]
+ avg <- avg + value
+ result[[|n; c; v0; v1; v2|]] <- avg / ofInt kernelSize
+ result
+
+ let inline AvgPoolReverse1D ofInt (t1: RawTensorCPU< ^T >, originalInput: RawTensor, kernelSize, stride, padding) : RawTensorCPU< ^T > =
+ let batchSize, channels, inputSize, outputSize, _outputShape =
+ Shape.checkCanAvgpool1d t1.Dtype originalInput.Shape kernelSize stride padding
+ let result = t1.ZerosLike(originalInput.Shape) :?> RawTensorCPU<'T>
+ for n=0 to batchSize-1 do
+ for c=0 to channels-1 do
+ for v=0 to outputSize-1 do
+ for u=0 to kernelSize-1 do
+ let i = (v*stride) + u - padding
+ if i >= 0 && i < inputSize then
+ result[[|n; c; i|]] <- t1[[|n; c; v|]] / ofInt kernelSize
+ result
+
+ let inline AvgPoolReverse2D ofInt (t1: RawTensorCPU< ^T >, originalInput: RawTensor, kernelSize, stride, padding) : RawTensorCPU< ^T > =
+ let batchSize, channels, (inputHeight, inputWidth), (kernelHeight, kernelWidth), (outputHeight, outputWidth), _outputShape =
+ Shape.checkCanAvgpool2d t1.Dtype originalInput.Shape kernelSize stride padding
+ let kernelSize = kernelHeight * kernelWidth
+ let result = t1.ZerosLike(originalInput.Shape) :?> RawTensorCPU<'T>
+ for n=0 to batchSize-1 do
+ for c=0 to channels-1 do
+ for v0=0 to outputHeight-1 do
+ for v1=0 to outputWidth-1 do
+ for u0=0 to kernelHeight-1 do
+ for u1=0 to kernelWidth-1 do
+ let i0 = (v0*stride[0]) + u0 - padding[0]
+ let i1 = (v1*stride[1]) + u1 - padding[1]
+ if i0 >= 0 && i0 < inputHeight && i1 >= 0 && i1 < inputWidth then
+ result[[|n; c; i0; i1|]] <- t1[[|n; c; v0; v1|]] / ofInt kernelSize
+ result
+
+ let inline AvgPoolReverse3D ofInt (t1: RawTensorCPU< ^T >, originalInput: RawTensor, kernelSize, stride, padding) : RawTensorCPU< ^T > =
+ let batchSize, channels, (inputDepth, inputHeight, inputWidth), (kernelDepth, kernelHeight, kernelWidth), (outputDepth, outputHeight, outputWidth), _outputShape =
+ Shape.checkCanAvgpool3d t1.Dtype originalInput.Shape kernelSize stride padding
+ let kernelSize = kernelDepth * kernelHeight * kernelWidth
+ let result = t1.ZerosLike(originalInput.Shape) :?> RawTensorCPU<'T>
+ for n=0 to batchSize-1 do
+ for c=0 to channels-1 do
+ for v0=0 to outputDepth-1 do
+ for v1=0 to outputHeight-1 do
+ for v2=0 to outputWidth-1 do
+ for u0=0 to kernelDepth-1 do
+ for u1=0 to kernelHeight-1 do
+ for u2=0 to kernelWidth-1 do
+ let i0 = (v0*stride[0]) + u0 - padding[0]
+ let i1 = (v1*stride[1]) + u1 - padding[1]
+ let i2 = (v2*stride[2]) + u2 - padding[2]
+ if i0 >= 0 && i0 < inputDepth && i1 >= 0 && i1 < inputHeight && i2 >= 0 && i2 < inputWidth then
+ result[[|n; c; i0; i1; i2|]] <- t1[[|n; c; v0; v1; v2|]] / ofInt kernelSize
+ result
+
+ let inline NegT op (t: RawTensorCPU< ^T >) : (^T[] * Shape) =
+ let result = Array.map op t.Values
+ (result, t.Shape)
+
+ let inline SumT(t: RawTensorCPU< ^T >) : (^T[] * Shape) =
+ if Array.isEmpty t.Values then ([|zero< ^T >|], Shape.scalar) else // Return a zero-valued scalar tensor if summing a zero-sized tensor (not holding any value). This is mirroring the behavior in PyTorch 1.5.1.
+ let result = Array.reduce (+) t.Values
+ ([|result|], [||])
+
+ let inline SumTDim(t: RawTensorCPU< ^T >, dim: int) : RawTensorCPU< ^T > =
+ let sBounds = Array2D.init t.Dim 3 (fun i j -> if j=0 then 0 elif j=1 then t.Shape[i]-1 else 0)
+ sBounds[dim, 1] <- 0
+ sBounds[dim, 2] <- 1
+ let s = t.ZerosLike(shape=t.Shape, dtype=t.Dtype.SummationType).GetSlice(sBounds) :?> RawTensorCPU<'T>
+ s.SetMutable()
+ for i=0 to t.Shape[dim]-1 do
+ sBounds[dim,0] <- i
+ sBounds[dim,1] <- i
+ sBounds[dim,2] <- 1
+ s.AddInPlace(t.GetSlice(sBounds).Cast(t.Dtype.SummationType))
+ s
+
+ let inline SignT op (t: RawTensorCPU< ^T >) : (^T[] * Shape) =
+ let result = t.Values |> Array.map op
+ (result, t.Shape)
+
+ let inline FloorT(t: RawTensorCPU< ^T >) : (^T[] * Shape) =
+ let result = t.Values |> Array.map floor
+ (result, t.Shape)
+
+ let inline CeilT(t: RawTensorCPU< ^T >) : (^T[] * Shape) =
+ let result = t.Values |> Array.map ceil
+ (result, t.Shape)
+
+ let inline RoundT(t: RawTensorCPU< ^T >) : (^T[] * Shape) =
+ let result = t.Values |> Array.map round
+ (result, t.Shape)
+
+ let inline AbsT op (t: RawTensorCPU< ^T >) : (^T[] * Shape) =
+ let result = t.Values |> Array.map op
+ (result, t.Shape)
+
+ let inline ReluT(t: RawTensorCPU< ^T >) : (^T[] * Shape) =
+ let result = t.Values |> Array.map (max zero< ^T >)
+ (result, t.Shape)
+
+ let inline SoftplusT(t: RawTensorCPU< ^T >) : (^T[] * Shape) =
+ let result = t.Values |> Array.map (fun x -> (max zero< ^T > x) + log(one< ^T > + exp(-abs(x))))
+ (result, t.Shape)
+
+ let inline SigmoidT(t: RawTensorCPU< ^T >) : (^T[] * Shape) =
+ let result = t.Values |> Array.map (fun v -> one / (one + exp -v))
+ (result, t.Shape)
+
+ let inline ExpT(t: RawTensorCPU< ^T >) : (^T[] * Shape) =
+ let result = t.Values |> Array.map exp
+ (result, t.Shape)
+
+ let inline LogT(t: RawTensorCPU< ^T >) : (^T[] * Shape) =
+ let result = t.Values |> Array.map log
+ (result, t.Shape)
+
+ let inline Log10T(t: RawTensorCPU< ^T >) : (^T[] * Shape) =
+ let result = t.Values |> Array.map log10
+ (result, t.Shape)
+
+ let inline SqrtT(t: RawTensorCPU< ^T >) : (^T[] * Shape) =
+ let result = t.Values |> Array.map sqrt
+ (result, t.Shape)
+
+ let inline SinT(t: RawTensorCPU< ^T >) : (^T[] * Shape) =
+ let result = t.Values |> Array.map sin
+ (result, t.Shape)
+
+ let inline CosT(t: RawTensorCPU< ^T >) : (^T[] * Shape) =
+ let result = t.Values |> Array.map cos
+ (result, t.Shape)
+
+ let inline TanT(t: RawTensorCPU< ^T >) : (^T[] * Shape) =
+ let result = t.Values |> Array.map tan
+ (result, t.Shape)
+
+ let inline SinhT(t: RawTensorCPU< ^T >) : (^T[] * Shape) =
+ let result = t.Values |> Array.map sinh
+ (result, t.Shape)
+
+ let inline CoshT(t: RawTensorCPU< ^T >) : (^T[] * Shape) =
+ let result = t.Values |> Array.map cosh
+ (result, t.Shape)
+
+ let inline TanhT(t: RawTensorCPU< ^T >) : (^T[] * Shape) =
+ let result = t.Values |> Array.map tanh
+ (result, t.Shape)
+
+ let inline AsinT(t: RawTensorCPU< ^T >) : (^T[] * Shape) =
+ let result = t.Values |> Array.map asin
+ (result, t.Shape)
+
+ let inline AcosT(t: RawTensorCPU< ^T >) : (^T[] * Shape) =
+ let result = t.Values |> Array.map acos
+ (result, t.Shape)
+
+ let inline AtanT(t: RawTensorCPU< ^T >) : (^T[] * Shape) =
+ let result = t.Values |> Array.map atan
+ (result, t.Shape)
+
+ let inline Random ofDouble (shape:Shape) : (^T[] * Shape) =
+ let values = Array.init (shapeLength shape) (fun _ -> ofDouble (TensorMath.Util.Random.Uniform()))
+ (values, shape)
+
+ let inline RandomNormal ofDouble (shape:Shape) : (^T[] * Shape) =
+ let values = Array.init (shapeLength shape) (fun _ -> ofDouble (TensorMath.Util.Random.Normal()))
+ (values, shape)
+
+ let inline RandomInt ofInt (shape:Shape) (low:int) (high:int) : (^T[] * Shape) =
+ let values = Array.init (shapeLength shape) (fun _ -> ofInt (TensorMath.Util.Random.Integer(low, high)))
+ (values, shape)
+
+/// The concrete implementation of RawTensor for Float32 data.
+type RawTensorFloat32(values: float32[], shape:Shape, device) =
+ inherit RawTensorCPU(values, shape, Dtype.Float32, device)
+ let create(values, shape) : RawTensor = upcast RawTensorFloat32(values, shape, device)
+ let createBool(values, shape) : RawTensor = upcast RawTensorBool(values, shape, device)
+ static let createOn device (values, shape) : RawTensor = upcast RawTensorFloat32(values, shape, device)
+
+ override t.MakeLike(values, shape, newDevice) = upcast RawTensorFloat32(values, shape, defaultArg newDevice device)
+ override t1.Equals(t2:RawTensor) = RawTensorCPU.Equals(t1, t2)
+ override t1.AllClose(t2:RawTensor, relativeTolerance, absoluteTolerance) = RawTensorCPU.AllClose(t1, t2, float32 relativeTolerance, float32 absoluteTolerance)
+ override t.ClampT(low, high) = RawTensorCPU.ClampT(t, low, high) |> create
+ override t.SoftplusT() = RawTensorCPU.SoftplusT(t) |> create
+ override t1.LtTT(t2) = RawTensorCPU.LtTT(t1, t2) |> createBool
+ override t1.GtTT(t2) = RawTensorCPU.GtTT(t1, t2) |> createBool
+ override t1.LeTT(t2) = RawTensorCPU.LeTT(t1, t2) |> createBool
+ override t1.GeTT(t2) = RawTensorCPU.GeTT(t1, t2) |> createBool
+ override t1.EqTT(t2) = RawTensorCPU.EqTT(t1, t2) |> createBool
+ override t1.NeqTT(t2) = RawTensorCPU.NeqTT(t1, t2) |> createBool
+ override t.MaxReduceT(dim, keepDim) = RawTensorCPU.MinMaxReduceT (>) (t, dim, keepDim)
+ override t.MinReduceT(dim, keepDim) = RawTensorCPU.MinMaxReduceT (<) (t, dim, keepDim)
+ override t.MaxIndexT() = RawTensorCPU.MaxIndexT(t)
+ override t.MinIndexT() = RawTensorCPU.MinIndexT(t)
+ override t1.AddTT(t2, alpha) =
+ let alpha = match alpha with Some v -> v.toSingle() | None -> RawTensorCPU.one
+ RawTensorCPU.AddTT(t1, t2, alpha) |> create
+ override t1.AddTT0(t2, alpha) =
+ let alpha = match alpha with Some v -> v.toSingle() | None -> RawTensorCPU.one
+ RawTensorCPU.AddTT0(t1, t2.toSingle(), alpha) |> create
+ override t1.AddTTSlice(location:int[], t2) = RawTensorCPU.AddTTSlice((+), t1, location, t2) |> create
+ override t1.SubTT(t2) = RawTensorCPU.SubTT(t1, t2) |> create
+ override t2.SubFromT0T(t1) = RawTensorCPU.SubT0T(t1.toSingle(), t2) |> create
+ override t1.SubTT0(t2) = RawTensorCPU.SubTT0(t1, t2.toSingle()) |> create
+ override t1.MulTT(t2) = RawTensorCPU.MulTT(t1, t2) |> create
+ override t1.MulTT0(t2) = RawTensorCPU.MulTT0(t1, t2.toSingle()) |> create
+ override t1.DivTT(t2) = RawTensorCPU.DivTT(t1, t2) |> create
+ override t2.DivFromT0T(t1) = RawTensorCPU.DivT0T(t1.toSingle(), t2) |> create
+ override t1.DivTT0(t2) = RawTensorCPU.DivTT0(t1, t2.toSingle()) |> create
+ override t1.PowTT(t2) = RawTensorCPU.PowTT(t1, t2) |> create
+ override t2.PowFromT0T(t1) = RawTensorCPU.PowT0T(t1.toSingle(), t2) |> create
+ override t1.PowTT0(t2) = RawTensorCPU.PowTT0(t1, t2.toSingle()) |> create
+ override t1.MatMulTT(t2) = RawTensorCPU.MatMulTT(t1, t2) |> create
+ override t1.BMMTT(t2) = RawTensorCPU.BMMTT(t1, t2) |> create
+ override t1.MaxPool1D(kernelSize, stride, padding) = let result, indices = RawTensorCPU.MaxPool1D(t1, kernelSize, stride, padding) in result :> _, indices :> _
+ override t1.MaxPool2D(kernelSize, stride, padding) = let result, indices = RawTensorCPU.MaxPool2D(t1, kernelSize, stride, padding) in result :> _, indices :> _
+ override t1.MaxPool3D(kernelSize, stride, padding) = let result, indices = RawTensorCPU.MaxPool3D(t1, kernelSize, stride, padding) in result :> _, indices :> _
+ override t1.MaxUnpool1D(indices, outputSize) = RawTensorCPU.MaxUnpool1D(t1, indices :?> RawTensorCPU, outputSize) :> _
+ override t1.MaxUnpool2D(indices, outputSize) = RawTensorCPU.MaxUnpool2D(t1, indices :?> RawTensorCPU, outputSize) :> _
+ override t1.MaxUnpool3D(indices, outputSize) = RawTensorCPU.MaxUnpool3D(t1, indices :?> RawTensorCPU, outputSize) :> _
+ override t1.AvgPool1D(kernelSize, stride, padding) = RawTensorCPU.AvgPool1D float32 (t1, kernelSize, stride, padding) :> _
+ override t1.AvgPool2D(kernelSize, stride, padding) = RawTensorCPU.AvgPool2D float32 (t1, kernelSize, stride, padding) :> _
+ override t1.AvgPool3D(kernelSize, stride, padding) = RawTensorCPU.AvgPool3D float32 (t1, kernelSize, stride, padding) :> _
+ override t1.AvgPoolReverse1D(originalInput, kernelSize, stride, padding) = RawTensorCPU.AvgPoolReverse1D float32 (t1, originalInput, kernelSize, stride, padding) :> _
+ override t1.AvgPoolReverse2D(originalInput, kernelSize, stride, padding) = RawTensorCPU.AvgPoolReverse2D float32 (t1, originalInput, kernelSize, stride, padding) :> _
+ override t1.AvgPoolReverse3D(originalInput, kernelSize, stride, padding) = RawTensorCPU.AvgPoolReverse3D float32 (t1, originalInput, kernelSize, stride, padding) :> _
+ override t1.Conv1D(t2, stride, padding) = RawTensorCPU.Conv1D (t1, t2, stride, padding) :> _
+ override t1.Conv2D(t2, stride, padding) = RawTensorCPU.Conv2D (t1, t2, stride, padding) :> _
+ override t1.Conv3D(t2, stride, padding) = RawTensorCPU.Conv3D (t1, t2, stride, padding) :> _
+ override t.NegT() = RawTensorCPU.NegT (~-) (t) |> create
+ override t.SumT(resultType) =
+ let res = RawTensorCPU.SumT(t) |> create
+ match resultType with
+ | None -> res
+ | Some dtype -> res.Cast(dtype)
+ override t.SumTDim(dim, resultType) =
+ let res = RawTensorCPU.SumTDim(t, dim)
+ match resultType with
+ | None -> res :> _
+ | Some dtype -> res.Cast(dtype)
+ override t.SignT() = RawTensorCPU.SignT (sign >> float32) t |> create
+ override t.FloorT() = RawTensorCPU.FloorT(t) |> create
+ override t.CeilT() = RawTensorCPU.CeilT(t) |> create
+ override t.RoundT() = RawTensorCPU.RoundT(t) |> create
+ override t.AbsT() = RawTensorCPU.AbsT abs t |> create
+ override t.ReluT() = RawTensorCPU.ReluT(t) |> create
+ override t.SigmoidT() = RawTensorCPU.SigmoidT(t) |> create
+ override t.ExpT() = RawTensorCPU.ExpT(t) |> create
+ override t.LogT() = RawTensorCPU.LogT(t) |> create
+ override t.Log10T() = RawTensorCPU.Log10T(t) |> create
+ override t.SqrtT() = RawTensorCPU.SqrtT(t) |> create
+ override t.SinT() = RawTensorCPU.SinT(t) |> create
+ override t.CosT() = RawTensorCPU.CosT(t) |> create
+ override t.TanT() = RawTensorCPU.TanT(t) |> create
+ override t.SinhT() = RawTensorCPU.SinhT(t) |> create
+ override t.CoshT() = RawTensorCPU.CoshT(t) |> create
+ override t.TanhT() = RawTensorCPU.TanhT(t) |> create
+ override t.AsinT() = RawTensorCPU.AsinT(t) |> create
+ override t.AcosT() = RawTensorCPU.AcosT(t) |> create
+ override t.AtanT() = RawTensorCPU.AtanT(t) |> create
+ override t.InverseT() = RawTensorCPU.InverseT(t) :> _
+ override t.DetT() = RawTensorCPU.DetT(t) :> _
+ override a.SolveTT(b) = RawTensorCPU.SolveTT(a, b) :> _
+
+ static member Seed(seed) = Random.Seed(seed)
+ static member Zero(device) = RawTensorCPU.Zero() |> createOn device
+ static member One(device) = RawTensorCPU.One() |> createOn device
+ static member Zeros(shape:Shape, device) = RawTensorCPU.Zeros(shape) |> createOn device
+ static member Empty(shape:Shape, device) = RawTensorCPU.Empty(shape) |> createOn device
+ static member Ones(shape:Shape, device) = RawTensorCPU.Ones(shape) |> createOn device
+ static member Full(shape:Shape, value:scalar, device) = RawTensorCPU.Full (shape, value.toSingle()) |> createOn device
+ static member Random(shape:Shape, device) = RawTensorCPU.Random float32 shape |> createOn device
+ static member RandomNormal(shape:Shape, device) = RawTensorCPU.RandomNormal float32 shape |> createOn device
+ static member RandomInt(shape:Shape, low:int, high:int, device) = RawTensorCPU.RandomInt float32 shape low high |> createOn device
+ static member CreateFromFlatArray(values:Array, shape, device) = RawTensorCPU.CreateFromFlatArray (values, shape) |> createOn device
+
+type RawTensorFloat64(values: double[], shape:Shape, device) =
+ inherit RawTensorCPU(values, shape, Dtype.Float64, device)
+
+ let create(values, shape) : RawTensor = upcast RawTensorFloat64(values, shape, device)
+ let createBool(values, shape) : RawTensor = upcast RawTensorBool(values, shape, device)
+ static let createOn device (values, shape) : RawTensor = upcast RawTensorFloat64(values, shape, device)
+
+ override t.MakeLike(values, shape, newDevice) = upcast RawTensorFloat64(values, shape, defaultArg newDevice device)
+ override t1.Equals(t2:RawTensor) = RawTensorCPU.Equals(t1, t2)
+ override t1.AllClose(t2:RawTensor, relativeTolerance, absoluteTolerance) = RawTensorCPU.AllClose(t1, t2, relativeTolerance, absoluteTolerance)
+ override t.ClampT(low, high) = RawTensorCPU.ClampT(t, low, high) |> create
+ override t.SoftplusT() = RawTensorCPU.SoftplusT(t) |> create
+ override t1.LtTT(t2) = RawTensorCPU.LtTT(t1, t2) |> createBool
+ override t1.GtTT(t2) = RawTensorCPU.GtTT(t1, t2) |> createBool
+ override t1.LeTT(t2) = RawTensorCPU.LeTT(t1, t2) |> createBool
+ override t1.GeTT(t2) = RawTensorCPU.GeTT(t1, t2) |> createBool
+ override t1.EqTT(t2) = RawTensorCPU.EqTT(t1, t2) |> createBool
+ override t1.NeqTT(t2) = RawTensorCPU.NeqTT(t1, t2) |> createBool
+ override t.MaxReduceT(dim, keepDim) = RawTensorCPU.MinMaxReduceT (>) (t, dim, keepDim)
+ override t.MinReduceT(dim, keepDim) = RawTensorCPU.MinMaxReduceT (<) (t, dim, keepDim)
+ override t.MaxIndexT() = RawTensorCPU.MaxIndexT(t)
+ override t.MinIndexT() = RawTensorCPU.MinIndexT(t)
+ override t1.AddTT(t2, alpha) =
+ let alpha = match alpha with Some v -> v.toDouble() | None -> RawTensorCPU.one
+ RawTensorCPU.AddTT(t1, t2, alpha) |> create
+ override t1.AddTT0(t2, alpha) =
+ let alpha = match alpha with Some v -> v.toDouble() | None -> RawTensorCPU.one
+ RawTensorCPU.AddTT0(t1, t2.toDouble(), alpha) |> create
+ override t1.AddTTSlice(location:int[], t2) = RawTensorCPU.AddTTSlice((+), t1, location, t2) |> create
+ override t1.SubTT(t2) = RawTensorCPU.SubTT(t1, t2) |> create
+ override t2.SubFromT0T(t1) = RawTensorCPU.SubT0T(t1.toDouble(), t2) |> create
+ override t1.SubTT0(t2) = RawTensorCPU.SubTT0(t1, t2.toDouble()) |> create
+ override t1.MulTT(t2) = RawTensorCPU.MulTT(t1, t2) |> create
+ override t1.MulTT0(t2) = RawTensorCPU.MulTT0(t1, t2.toDouble()) |> create
+ override t1.DivTT(t2) = RawTensorCPU.DivTT(t1, t2) |> create
+ override t2.DivFromT0T(t1) = RawTensorCPU.DivT0T(t1.toDouble(), t2) |> create
+ override t1.DivTT0(t2) = RawTensorCPU.DivTT0(t1, t2.toDouble()) |> create
+ override t1.PowTT(t2) = RawTensorCPU.PowTT(t1, t2) |> create
+ override t2.PowFromT0T(t1) = RawTensorCPU.PowT0T(t1.toDouble(), t2) |> create
+ override t1.PowTT0(t2) = RawTensorCPU.PowTT0(t1, t2.toDouble()) |> create
+ override t1.MatMulTT(t2) = RawTensorCPU.MatMulTT(t1, t2) |> create
+ override t1.BMMTT(t2) = RawTensorCPU.BMMTT(t1, t2) |> create
+ override t1.MaxPool1D(kernelSize, stride, padding) = let result, indices = RawTensorCPU.MaxPool1D(t1, kernelSize, stride, padding) in result :> _, indices :> _
+ override t1.MaxPool2D(kernelSize, stride, padding) = let result, indices = RawTensorCPU.MaxPool2D(t1, kernelSize, stride, padding) in result :> _, indices :> _
+ override t1.MaxPool3D(kernelSize, stride, padding) = let result, indices = RawTensorCPU.MaxPool3D(t1, kernelSize, stride, padding) in result :> _, indices :> _
+ override t1.MaxUnpool1D(indices, outputSize) = RawTensorCPU.MaxUnpool1D(t1, indices :?> RawTensorCPU, outputSize) :> _
+ override t1.MaxUnpool2D(indices, outputSize) = RawTensorCPU.MaxUnpool2D(t1, indices :?> RawTensorCPU, outputSize) :> _
+ override t1.MaxUnpool3D(indices, outputSize) = RawTensorCPU.MaxUnpool3D(t1, indices :?> RawTensorCPU, outputSize) :> _
+ override t1.AvgPool1D(kernelSize, stride, padding) = RawTensorCPU.AvgPool1D double (t1, kernelSize, stride, padding) :> _
+ override t1.AvgPool2D(kernelSize, stride, padding) = RawTensorCPU.AvgPool2D double (t1, kernelSize, stride, padding) :> _
+ override t1.AvgPool3D(kernelSize, stride, padding) = RawTensorCPU.AvgPool3D double (t1, kernelSize, stride, padding) :> _
+ override t1.AvgPoolReverse1D(originalInput, kernelSize, stride, padding) = RawTensorCPU.AvgPoolReverse1D double (t1, originalInput, kernelSize, stride, padding) :> _
+ override t1.AvgPoolReverse2D(originalInput, kernelSize, stride, padding) = RawTensorCPU.AvgPoolReverse2D double (t1, originalInput, kernelSize, stride, padding) :> _
+ override t1.AvgPoolReverse3D(originalInput, kernelSize, stride, padding) = RawTensorCPU.AvgPoolReverse3D double (t1, originalInput, kernelSize, stride, padding) :> _
+ override t1.Conv1D(t2, stride, padding) = RawTensorCPU.Conv1D (t1, t2, stride, padding) :> _
+ override t1.Conv2D(t2, stride, padding) = RawTensorCPU.Conv2D (t1, t2, stride, padding) :> _
+ override t1.Conv3D(t2, stride, padding) = RawTensorCPU.Conv3D (t1, t2, stride, padding) :> _
+ override t.NegT() = RawTensorCPU.NegT (~-) (t) |> create
+ override t.SumT(resultType) =
+ let res = RawTensorCPU.SumT(t) |> create
+ match resultType with
+ | None -> res
+ | Some dtype -> res.Cast(dtype)
+ override t.SumTDim(dim, resultType) =
+ let res = RawTensorCPU.SumTDim(t, dim)
+ match resultType with
+ | None -> res :> _
+ | Some dtype -> res.Cast(dtype)
+ override t.SignT() = RawTensorCPU.SignT (sign >> double) t |> create
+ override t.FloorT() = RawTensorCPU.FloorT(t) |> create
+ override t.CeilT() = RawTensorCPU.CeilT(t) |> create
+ override t.RoundT() = RawTensorCPU.RoundT(t) |> create
+ override t.AbsT() = RawTensorCPU.AbsT abs t |> create
+ override t.ReluT() = RawTensorCPU.ReluT(t) |> create
+ override t.SigmoidT() = RawTensorCPU.SigmoidT(t) |> create
+ override t.ExpT() = RawTensorCPU.ExpT(t) |> create
+ override t.LogT() = RawTensorCPU.LogT(t) |> create
+ override t.Log10T() = RawTensorCPU.Log10T(t) |> create
+ override t.SqrtT() = RawTensorCPU.SqrtT(t) |> create
+ override t.SinT() = RawTensorCPU.SinT(t) |> create
+ override t.CosT() = RawTensorCPU.CosT(t) |> create
+ override t.TanT() = RawTensorCPU.TanT(t) |> create
+ override t.SinhT() = RawTensorCPU.SinhT(t) |> create
+ override t.CoshT() = RawTensorCPU.CoshT(t) |> create
+ override t.TanhT() = RawTensorCPU.TanhT(t) |> create
+ override t.AsinT() = RawTensorCPU.AsinT(t) |> create
+ override t.AcosT() = RawTensorCPU.AcosT(t) |> create
+ override t.AtanT() = RawTensorCPU.AtanT(t) |> create
+ override t.InverseT() = RawTensorCPU.InverseT(t) :> _
+ override t.DetT() = RawTensorCPU.DetT(t) :> _
+ override a.SolveTT(b) = RawTensorCPU.SolveTT(a, b) :> _
+
+ static member Seed(seed) = Random.Seed(seed)
+ static member Zero(device) = RawTensorCPU.Zero() |> createOn device
+ static member One(device) = RawTensorCPU.One() |> createOn device
+ static member Zeros(shape:Shape, device) = RawTensorCPU.Zeros(shape) |> createOn device
+ static member Empty(shape:Shape, device) = RawTensorCPU.Empty(shape) |> createOn device
+ static member Ones(shape:Shape, device) = RawTensorCPU.Ones(shape) |> createOn device
+ static member Full(shape:Shape, value:scalar, device) = RawTensorCPU.Full (shape, value.toDouble()) |> createOn device
+ static member Random(shape:Shape, device) = RawTensorCPU.Random double shape |> createOn device
+ static member RandomNormal(shape:Shape, device) = RawTensorCPU.RandomNormal double shape |> createOn device
+ static member RandomInt(shape:Shape, low:int, high:int, device) = RawTensorCPU.RandomInt double shape low high |> createOn device
+ static member CreateFromFlatArray(values:Array, shape, device) = RawTensorCPU.CreateFromFlatArray (values, shape) |> createOn device
+
+type RawTensorInt8(values: int8[], shape:Shape, device) =
+ inherit RawTensorCPU(values, shape, Dtype.Int8, device)
+
+ let create(values, shape) : RawTensor = upcast RawTensorInt8(values, shape, device)
+ let createBool(values, shape) : RawTensor = upcast RawTensorBool(values, shape, device)
+ static let createOn device (values, shape) : RawTensor = upcast RawTensorInt8(values, shape, device)
+
+ override t.MakeLike(values, shape, newDevice) = upcast RawTensorInt8(values, shape, defaultArg newDevice device)
+ override t1.Equals(t2:RawTensor) = RawTensorCPU.Equals(t1, t2)
+ override t1.AllClose(t2:RawTensor, _relativeTolerance, _absoluteTolerance) = RawTensorCPU.Equals(t1, t2)
+ override t.ClampT(low, high) = RawTensorCPU.ClampT(t, low, high) |> create
+ override t1.LtTT(t2) = RawTensorCPU.LtTT(t1, t2) |> createBool
+ override t1.GtTT(t2) = RawTensorCPU.GtTT(t1, t2) |> createBool
+ override t1.LeTT(t2) = RawTensorCPU.LeTT(t1, t2) |> createBool
+ override t1.GeTT(t2) = RawTensorCPU.GeTT(t1, t2) |> createBool
+ override t1.EqTT(t2) = RawTensorCPU.EqTT(t1, t2) |> createBool
+ override t1.NeqTT(t2) = RawTensorCPU.NeqTT(t1, t2) |> createBool
+ override t.MaxReduceT(dim, keepDim) = RawTensorCPU.MinMaxReduceT (>) (t, dim, keepDim)
+ override t.MinReduceT(dim, keepDim) = RawTensorCPU.MinMaxReduceT (<) (t, dim, keepDim)
+ override t.MaxIndexT() = RawTensorCPU.MaxIndexT(t)
+ override t.MinIndexT() = RawTensorCPU.MinIndexT(t)
+ override t1.AddTT(t2, alpha) =
+ let alpha = match alpha with Some v -> v.toSByte() | None -> RawTensorCPU.one
+ RawTensorCPU.AddTT(t1, t2, alpha) |> create
+ override t1.AddTT0(t2, alpha) =
+ let alpha = match alpha with Some v -> v.toSByte() | None -> RawTensorCPU.one
+ RawTensorCPU.AddTT0(t1, t2.toSByte(), alpha) |> create
+ override t1.AddTTSlice(location:int[], t2) = RawTensorCPU.AddTTSlice((+), t1, location, t2) |> create
+ override t1.SubTT(t2) = RawTensorCPU.SubTT(t1, t2) |> create
+ override t2.SubFromT0T(t1) = RawTensorCPU.SubT0T(t1.toSByte(), t2) |> create
+ override t1.SubTT0(t2) = RawTensorCPU.SubTT0(t1, t2.toSByte()) |> create
+ override t1.MulTT(t2) = RawTensorCPU.MulTT(t1, t2) |> create
+ override t1.MulTT0(t2) = RawTensorCPU.MulTT0(t1, t2.toSByte()) |> create
+ override t1.DivTT(t2) = RawTensorCPU.DivTT(t1, t2) |> create
+ override t2.DivFromT0T(t1) = RawTensorCPU.DivT0T(t1.toSByte(), t2) |> create
+ override t1.DivTT0(t2) = RawTensorCPU.DivTT0(t1, t2.toSByte()) |> create
+ override t1.MatMulTT(t2) = RawTensorCPU.MatMulTT(t1, t2) |> create
+ override t1.BMMTT(t2) = RawTensorCPU.BMMTT(t1, t2) |> create
+ override t1.MaxPool1D(kernelSize, stride, padding) = let result, indices = RawTensorCPU.MaxPool1D(t1, kernelSize, stride, padding) in result :> _, indices :> _
+ override t1.MaxPool2D(kernelSize, stride, padding) = let result, indices = RawTensorCPU.MaxPool2D(t1, kernelSize, stride, padding) in result :> _, indices :> _
+ override t1.MaxPool3D(kernelSize, stride, padding) = let result, indices = RawTensorCPU.MaxPool3D(t1, kernelSize, stride, padding) in result :> _, indices :> _
+ override t1.MaxUnpool1D(indices, outputSize) = RawTensorCPU.MaxUnpool1D(t1, indices :?> RawTensorCPU, outputSize) :> _
+ override t1.MaxUnpool2D(indices, outputSize) = RawTensorCPU.MaxUnpool2D(t1, indices :?> RawTensorCPU, outputSize) :> _
+ override t1.MaxUnpool3D(indices, outputSize) = RawTensorCPU.MaxUnpool3D(t1, indices :?> RawTensorCPU, outputSize) :> _
+ override t1.AvgPool1D(kernelSize, stride, padding) = RawTensorCPU.AvgPool1D int8 (t1, kernelSize, stride, padding) :> _
+ override t1.AvgPool2D(kernelSize, stride, padding) = RawTensorCPU.AvgPool2D int8 (t1, kernelSize, stride, padding) :> _
+ override t1.AvgPool3D(kernelSize, stride, padding) = RawTensorCPU.AvgPool3D int8 (t1, kernelSize, stride, padding) :> _
+ override t1.AvgPoolReverse1D(originalInput, kernelSize, stride, padding) = RawTensorCPU.AvgPoolReverse1D int8 (t1, originalInput, kernelSize, stride, padding) :> _
+ override t1.AvgPoolReverse2D(originalInput, kernelSize, stride, padding) = RawTensorCPU.AvgPoolReverse2D int8 (t1, originalInput, kernelSize, stride, padding) :> _
+ override t1.AvgPoolReverse3D(originalInput, kernelSize, stride, padding) = RawTensorCPU.AvgPoolReverse3D int8 (t1, originalInput, kernelSize, stride, padding) :> _
+ override t1.Conv1D(t2, stride, padding) = RawTensorCPU.Conv1D(t1, t2, stride, padding) :> _
+ override t1.Conv2D(t2, stride, padding) = RawTensorCPU.Conv2D (t1, t2, stride, padding) :> _
+ override t1.Conv3D(t2, stride, padding) = RawTensorCPU.Conv3D (t1, t2, stride, padding) :> _
+ override t.NegT() = RawTensorCPU.NegT (~-) (t) |> create
+ override t.SumT(resultType) = t.Cast(Dtype.Int64).SumT(?resultType=resultType)
+ override t.SumTDim(dim, resultType) = t.Cast(Dtype.Int64).SumTDim(dim, ?resultType=resultType)
+ override t.SignT() = RawTensorCPU.SignT (sign >> int8) t |> create
+ override t.AbsT() = RawTensorCPU.AbsT abs t |> create
+ override t.ReluT() = RawTensorCPU.ReluT(t) |> create
+
+ override t.SoftplusT() = opNotSupported "SoftplusT" t.Dtype
+ override t1.PowTT(t2) = opNotSupported2 "PowTT" t1.Dtype t2.Dtype
+ override t2.PowFromT0T(_t1) = opNotSupported "PowT0T" t2.Dtype
+ override t1.PowTT0(_t2) = opNotSupported "PowTT0" t1.Dtype
+ override t.FloorT() = opNotSupported "FloorT" t.Dtype
+ override t.CeilT() = opNotSupported "CeilT" t.Dtype
+ override t.RoundT() = opNotSupported "RoundT" t.Dtype
+ override t.SigmoidT() = opNotSupported "SigmoidT" t.Dtype
+ override t.ExpT() = opNotSupported "ExpT" t.Dtype
+ override t.LogT() = opNotSupported "LogT" t.Dtype
+ override t.Log10T() = opNotSupported "Log10T" t.Dtype
+ override t.SqrtT() = opNotSupported "SqrtT" t.Dtype
+ override t.SinT() = opNotSupported "SinT" t.Dtype
+ override t.CosT() = opNotSupported "CosT" t.Dtype
+ override t.TanT() = opNotSupported "TanT" t.Dtype
+ override t.SinhT() = opNotSupported "SinhT" t.Dtype
+ override t.CoshT() = opNotSupported "CoshT" t.Dtype
+ override t.TanhT() = opNotSupported "TanhT" t.Dtype
+ override t.AsinT() = opNotSupported "AsinT" t.Dtype
+ override t.AcosT() = opNotSupported "AcosT" t.Dtype
+ override t.AtanT() = opNotSupported "AtanT" t.Dtype
+ override t.InverseT() = opNotSupported "InverseT" t.Dtype
+ override t.DetT() = opNotSupported "DetT" t.Dtype
+ override a.SolveTT(_) = opNotSupported "SolveTT" a.Dtype
+
+ static member Seed(seed) = Random.Seed(seed)
+ static member Zero(device) = RawTensorCPU.Zero() |> createOn device
+ static member One(device) = RawTensorCPU.One() |> createOn device
+ static member Zeros(shape:Shape, device) = RawTensorCPU.Zeros(shape) |> createOn device
+ static member Empty(shape:Shape, device) = RawTensorCPU.Empty(shape) |> createOn device
+ static member Ones(shape:Shape, device) = RawTensorCPU.Ones(shape) |> createOn device
+ static member Full(shape:Shape, value:scalar, device) = RawTensorCPU.Full (shape, value.toSByte()) |> createOn device
+ static member Random(_shape:Shape, _device) = opNotSupported "Random" Dtype.Int8
+ static member RandomNormal(_shape:Shape, _device) = opNotSupported "RandomNormal" Dtype.Int8
+ static member RandomInt(shape, low, high, device) = RawTensorCPU.RandomInt int8 shape low high |> createOn device
+ static member CreateFromFlatArray(values:Array, shape, device) = RawTensorCPU.CreateFromFlatArray (values, shape) |> createOn device
+
+type RawTensorByte(values: byte[], shape:Shape, device) =
+ inherit RawTensorCPU(values, shape, Dtype.Byte, device)
+
+ let create(values, shape) : RawTensor = upcast RawTensorByte(values, shape, device)
+ let createBool(values, shape) : RawTensor = upcast RawTensorBool(values, shape, device)
+ static let createOn device (values, shape) : RawTensor = upcast RawTensorByte(values, shape, device)
+
+ override t.MakeLike(values, shape, newDevice) = upcast RawTensorByte(values, shape, defaultArg newDevice device)
+ override t1.Equals(t2:RawTensor) = RawTensorCPU.Equals(t1, t2)
+ override t1.AllClose(t2:RawTensor, _relativeTolerance, _absoluteTolerance) = RawTensorCPU.Equals(t1, t2)
+ override t.ClampT(low, high) = RawTensorCPU.ClampT(t, low, high) |> create
+ override t1.LtTT(t2) = RawTensorCPU.LtTT(t1, t2) |> createBool
+ override t1.GtTT(t2) = RawTensorCPU.GtTT(t1, t2) |> createBool
+ override t1.LeTT(t2) = RawTensorCPU.LeTT(t1, t2) |> createBool
+ override t1.GeTT(t2) = RawTensorCPU.GeTT(t1, t2) |> createBool
+ override t1.EqTT(t2) = RawTensorCPU.EqTT(t1, t2) |> createBool
+ override t1.NeqTT(t2) = RawTensorCPU.NeqTT(t1, t2) |> createBool
+ override t.MaxReduceT(dim, keepDim) = RawTensorCPU.MinMaxReduceT (>) (t, dim, keepDim)
+ override t.MinReduceT(dim, keepDim) = RawTensorCPU.MinMaxReduceT (<) (t, dim, keepDim)
+ override t.MaxIndexT() = RawTensorCPU.MaxIndexT(t)
+ override t.MinIndexT() = RawTensorCPU.MinIndexT(t)
+ override t1.AddTT(t2, alpha) =
+ let alpha = match alpha with Some v -> v.toByte() | None -> RawTensorCPU.one
+ RawTensorCPU.AddTT(t1, t2, alpha) |> create
+ override t1.AddTT0(t2, alpha) =
+ let alpha = match alpha with Some v -> v.toByte() | None -> RawTensorCPU.one
+ RawTensorCPU.AddTT0(t1, t2.toByte(), alpha) |> create
+ override t1.AddTTSlice(location:int[], t2) = RawTensorCPU.AddTTSlice((+), t1, location, t2) |> create
+ override t1.SubTT(t2) = RawTensorCPU.SubTT(t1, t2) |> create
+ override t2.SubFromT0T(t1) = RawTensorCPU.SubT0T(t1.toByte(), t2) |> create
+ override t1.SubTT0(t2) = RawTensorCPU.SubTT0(t1, t2.toByte()) |> create
+ override t1.MulTT(t2) = RawTensorCPU.MulTT(t1, t2) |> create
+ override t1.MulTT0(t2) = RawTensorCPU.MulTT0(t1, t2.toByte()) |> create
+ override t1.DivTT(t2) = RawTensorCPU.DivTT(t1, t2) |> create
+ override t2.DivFromT0T(t1) = RawTensorCPU.DivT0T(t1.toByte(), t2) |> create
+ override t1.DivTT0(t2) = RawTensorCPU.DivTT0(t1, t2.toByte()) |> create
+ override t1.MatMulTT(t2) = RawTensorCPU.MatMulTT(t1, t2) |> create
+ override t1.BMMTT(t2) = RawTensorCPU.BMMTT(t1, t2) |> create
+ override t1.MaxPool1D(kernelSize, stride, padding) = let result, indices = RawTensorCPU.MaxPool1D(t1, kernelSize, stride, padding) in result :> _, indices :> _
+ override t1.MaxPool2D(kernelSize, stride, padding) = let result, indices = RawTensorCPU.MaxPool2D(t1, kernelSize, stride, padding) in result :> _, indices :> _
+ override t1.MaxPool3D(kernelSize, stride, padding) = let result, indices = RawTensorCPU.MaxPool3D(t1, kernelSize, stride, padding) in result :> _, indices :> _
+ override t1.MaxUnpool1D(indices, outputSize) = RawTensorCPU.MaxUnpool1D(t1, indices :?> RawTensorCPU, outputSize) :> _
+ override t1.MaxUnpool2D(indices, outputSize) = RawTensorCPU.MaxUnpool2D(t1, indices :?> RawTensorCPU, outputSize) :> _
+ override t1.MaxUnpool3D(indices, outputSize) = RawTensorCPU.MaxUnpool3D(t1, indices :?> RawTensorCPU, outputSize) :> _
+ override t1.AvgPool1D(kernelSize, stride, padding) = RawTensorCPU.AvgPool1D byte (t1, kernelSize, stride, padding) :> _
+ override t1.AvgPool2D(kernelSize, stride, padding) = RawTensorCPU.AvgPool2D byte (t1, kernelSize, stride, padding) :> _
+ override t1.AvgPool3D(kernelSize, stride, padding) = RawTensorCPU.AvgPool3D byte (t1, kernelSize, stride, padding) :> _
+ override t1.AvgPoolReverse1D(originalInput, kernelSize, stride, padding) = RawTensorCPU.AvgPoolReverse1D byte (t1, originalInput, kernelSize, stride, padding) :> _
+ override t1.AvgPoolReverse2D(originalInput, kernelSize, stride, padding) = RawTensorCPU.AvgPoolReverse2D byte (t1, originalInput, kernelSize, stride, padding) :> _
+ override t1.AvgPoolReverse3D(originalInput, kernelSize, stride, padding) = RawTensorCPU.AvgPoolReverse3D byte (t1, originalInput, kernelSize, stride, padding) :> _
+ override t1.Conv1D(t2, stride, padding) = RawTensorCPU.Conv1D(t1, t2, stride, padding) :> _
+ override t1.Conv2D(t2, stride, padding) = RawTensorCPU.Conv2D (t1, t2, stride, padding) :> _
+ override t1.Conv3D(t2, stride, padding) = RawTensorCPU.Conv3D (t1, t2, stride, padding) :> _
+ override t.NegT() = RawTensorCPU.NegT (sbyte >> (~-) >> byte ) (t) |> create
+ override t.SumT(resultType) = t.Cast(Dtype.Int64).SumT(?resultType=resultType)
+ override t.SumTDim(dim, resultType) = t.Cast(Dtype.Int64).SumTDim(dim, ?resultType=resultType)
+ override t.SignT() = RawTensorCPU.SignT (min 1uy) t |> create
+ override t.AbsT() = RawTensorCPU.AbsT id t |> create
+ override t.ReluT() = RawTensorCPU.ReluT(t) |> create
+
+ override t.SoftplusT() = opNotSupported "SoftplusT" t.Dtype
+ override t1.PowTT(t2) = opNotSupported2 "PowTT" t1.Dtype t2.Dtype
+ override t2.PowFromT0T(_t1) = opNotSupported "PowT0T" t2.Dtype
+ override t1.PowTT0(_t2) = opNotSupported "PowTT0" t1.Dtype
+ override t.FloorT() = opNotSupported "FloorT" t.Dtype
+ override t.CeilT() = opNotSupported "CeilT" t.Dtype
+ override t.RoundT() = opNotSupported "RoundT" t.Dtype
+ override t.SigmoidT() = opNotSupported "SigmoidT" t.Dtype
+ override t.ExpT() = opNotSupported "ExpT" t.Dtype
+ override t.LogT() = opNotSupported "LogT" t.Dtype
+ override t.Log10T() = opNotSupported "Log10T" t.Dtype
+ override t.SqrtT() = opNotSupported "SqrtT" t.Dtype
+ override t.SinT() = opNotSupported "SinT" t.Dtype
+ override t.CosT() = opNotSupported "CosT" t.Dtype
+ override t.TanT() = opNotSupported "TanT" t.Dtype
+ override t.SinhT() = opNotSupported "SinhT" t.Dtype
+ override t.CoshT() = opNotSupported "CoshT" t.Dtype
+ override t.TanhT() = opNotSupported "TanhT" t.Dtype
+ override t.AsinT() = opNotSupported "AsinT" t.Dtype
+ override t.AcosT() = opNotSupported "AcosT" t.Dtype
+ override t.AtanT() = opNotSupported "AtanT" t.Dtype
+ override t.InverseT() = opNotSupported "InverseT" t.Dtype
+ override t.DetT() = opNotSupported "DetT" t.Dtype
+ override a.SolveTT(_) = opNotSupported "SolveTT" a.Dtype
+
+ static member Seed(seed) = Random.Seed(seed)
+ static member Zero(device) = RawTensorCPU.Zero() |> createOn device
+ static member One(device) = RawTensorCPU.One() |> createOn device
+ static member Zeros(shape:Shape, device) = RawTensorCPU.Zeros(shape) |> createOn device
+ static member Empty(shape:Shape, device) = RawTensorCPU.Empty(shape) |> createOn device
+ static member Ones(shape:Shape, device) = RawTensorCPU.Ones(shape) |> createOn device
+ static member Full(shape:Shape, value:scalar, device) = RawTensorCPU.Full (shape, value.toByte()) |> createOn device
+ static member Random(_shape:Shape, _device) = opNotSupported "Random" Dtype.Byte
+ static member RandomNormal(_shape:Shape, _device) = opNotSupported "RandomNormal" Dtype.Byte
+ static member RandomInt(shape:Shape, low:int, high:int, device) = RawTensorCPU.RandomInt byte shape low high |> createOn device
+ static member CreateFromFlatArray(values:Array, shape, device) = RawTensorCPU.CreateFromFlatArray (values, shape) |> createOn device
+
+type RawTensorInt16(values: int16[], shape:Shape, device) =
+ inherit RawTensorCPU(values, shape, Dtype.Int16, device)
+
+ let create(values, shape) : RawTensor = upcast RawTensorInt16(values, shape, device)
+ let createBool(values, shape) : RawTensor = upcast RawTensorBool(values, shape, device)
+ static let createOn device (values, shape) : RawTensor = upcast RawTensorInt16(values, shape, device)
+
+ override t.MakeLike(values, shape, newDevice) = upcast RawTensorInt16(values, shape, defaultArg newDevice device)
+ override t1.Equals(t2:RawTensor) = RawTensorCPU.Equals(t1, t2)
+ override t1.AllClose(t2:RawTensor, _relativeTolerance, _absoluteTolerance) = RawTensorCPU.Equals(t1, t2)
+ override t.ClampT(low, high) = RawTensorCPU.ClampT(t, low, high) |> create
+ override t1.LtTT(t2) = RawTensorCPU.LtTT(t1, t2) |> createBool
+ override t1.GtTT(t2) = RawTensorCPU.GtTT(t1, t2) |> createBool
+ override t1.LeTT(t2) = RawTensorCPU.LeTT(t1, t2) |> createBool
+ override t1.GeTT(t2) = RawTensorCPU.GeTT(t1, t2) |> createBool
+ override t1.EqTT(t2) = RawTensorCPU.EqTT(t1, t2) |> createBool
+ override t1.NeqTT(t2) = RawTensorCPU.NeqTT(t1, t2) |> createBool
+ override t.MaxReduceT(dim, keepDim) = RawTensorCPU.MinMaxReduceT (>) (t, dim, keepDim)
+ override t.MinReduceT(dim, keepDim) = RawTensorCPU.MinMaxReduceT (<) (t, dim, keepDim)
+ override t.MaxIndexT() = RawTensorCPU.MaxIndexT(t)
+ override t.MinIndexT() = RawTensorCPU.MinIndexT(t)
+ override t1.AddTT(t2, alpha) =
+ let alpha = match alpha with Some v -> v.toInt16() | None -> RawTensorCPU.one
+ RawTensorCPU.AddTT(t1, t2, alpha) |> create
+ override t1.AddTT0(t2, alpha) =
+ let alpha = match alpha with Some v -> v.toInt16() | None -> RawTensorCPU.one
+ RawTensorCPU.AddTT0(t1, t2.toInt16(), alpha) |> create
+ override t1.AddTTSlice(location:int[], t2) = RawTensorCPU.AddTTSlice((+), t1, location, t2) |> create
+ override t1.SubTT(t2) = RawTensorCPU.SubTT(t1, t2) |> create
+ override t2.SubFromT0T(t1) = RawTensorCPU.SubT0T(t1.toInt16(), t2) |> create
+ override t1.SubTT0(t2) = RawTensorCPU.SubTT0(t1, t2.toInt16()) |> create
+ override t1.MulTT(t2) = RawTensorCPU.MulTT(t1, t2) |> create
+ override t1.MulTT0(t2) = RawTensorCPU.MulTT0(t1, t2.toInt16()) |> create
+ override t1.DivTT(t2) = RawTensorCPU.DivTT(t1, t2) |> create
+ override t2.DivFromT0T(t1) = RawTensorCPU.DivT0T(t1.toInt16(), t2) |> create
+ override t1.DivTT0(t2) = RawTensorCPU.DivTT0(t1, t2.toInt16()) |> create
+ override t1.MatMulTT(t2) = RawTensorCPU.MatMulTT(t1, t2) |> create
+ override t1.BMMTT(t2) = RawTensorCPU.BMMTT(t1, t2) |> create
+ override t1.MaxPool1D(kernelSize, stride, padding) = let result, indices = RawTensorCPU.MaxPool1D(t1, kernelSize, stride, padding) in result :> _, indices :> _
+ override t1.MaxPool2D(kernelSize, stride, padding) = let result, indices = RawTensorCPU.MaxPool2D(t1, kernelSize, stride, padding) in result :> _, indices :> _
+ override t1.MaxPool3D(kernelSize, stride, padding) = let result, indices = RawTensorCPU.MaxPool3D(t1, kernelSize, stride, padding) in result :> _, indices :> _
+ override t1.MaxUnpool1D(indices, outputSize) = RawTensorCPU.MaxUnpool1D(t1, indices :?> RawTensorCPU, outputSize) :> _
+ override t1.MaxUnpool2D(indices, outputSize) = RawTensorCPU.MaxUnpool2D(t1, indices :?> RawTensorCPU, outputSize) :> _
+ override t1.MaxUnpool3D(indices, outputSize) = RawTensorCPU.MaxUnpool3D(t1, indices :?> RawTensorCPU, outputSize) :> _
+ override t1.AvgPool1D(kernelSize, stride, padding) = RawTensorCPU.AvgPool1D int16 (t1, kernelSize, stride, padding) :> _
+ override t1.AvgPool2D(kernelSize, stride, padding) = RawTensorCPU.AvgPool2D int16 (t1, kernelSize, stride, padding) :> _
+ override t1.AvgPool3D(kernelSize, stride, padding) = RawTensorCPU.AvgPool3D int16 (t1, kernelSize, stride, padding) :> _
+ override t1.AvgPoolReverse1D(originalInput, kernelSize, stride, padding) = RawTensorCPU.AvgPoolReverse1D int16 (t1, originalInput, kernelSize, stride, padding) :> _
+ override t1.AvgPoolReverse2D(originalInput, kernelSize, stride, padding) = RawTensorCPU.AvgPoolReverse2D int16 (t1, originalInput, kernelSize, stride, padding) :> _
+ override t1.AvgPoolReverse3D(originalInput, kernelSize, stride, padding) = RawTensorCPU.AvgPoolReverse3D int16 (t1, originalInput, kernelSize, stride, padding) :> _
+ override t1.Conv1D(t2, stride, padding) = RawTensorCPU.Conv1D(t1, t2, stride, padding) :> _
+ override t1.Conv2D(t2, stride, padding) = RawTensorCPU.Conv2D (t1, t2, stride, padding) :> _
+ override t1.Conv3D(t2, stride, padding) = RawTensorCPU.Conv3D (t1, t2, stride, padding) :> _
+ override t.NegT() = RawTensorCPU.NegT (~-) (t) |> create
+ override t.SumT(resultType) = t.Cast(Dtype.Int64).SumT(?resultType=resultType)
+ override t.SumTDim(dim, resultType) = t.Cast(Dtype.Int64).SumTDim(dim, ?resultType=resultType)
+ override t.SignT() = RawTensorCPU.SignT (sign >> int16) t |> create
+ override t.AbsT() = RawTensorCPU.AbsT abs t |> create
+ override t.ReluT() = RawTensorCPU.ReluT(t) |> create
+
+ override t.SoftplusT() = opNotSupported "SoftplusT" t.Dtype
+ override t1.PowTT(t2) = opNotSupported2 "PowTT" t1.Dtype t2.Dtype
+ override t2.PowFromT0T(_t1) = opNotSupported "PowT0T" t2.Dtype
+ override t1.PowTT0(_t2) = opNotSupported "PowTT0" t1.Dtype
+ override t.FloorT() = opNotSupported "FloorT" t.Dtype
+ override t.CeilT() = opNotSupported "CeilT" t.Dtype
+ override t.RoundT() = opNotSupported "RoundT" t.Dtype
+ override t.SigmoidT() = opNotSupported "SigmoidT" t.Dtype
+ override t.ExpT() = opNotSupported "ExpT" t.Dtype
+ override t.LogT() = opNotSupported "LogT" t.Dtype
+ override t.Log10T() = opNotSupported "Log10T" t.Dtype
+ override t.SqrtT() = opNotSupported "SqrtT" t.Dtype
+ override t.SinT() = opNotSupported "SinT" t.Dtype
+ override t.CosT() = opNotSupported "CosT" t.Dtype
+ override t.TanT() = opNotSupported "TanT" t.Dtype
+ override t.SinhT() = opNotSupported "SinhT" t.Dtype
+ override t.CoshT() = opNotSupported "CoshT" t.Dtype
+ override t.TanhT() = opNotSupported "TanhT" t.Dtype
+ override t.AsinT() = opNotSupported "AsinT" t.Dtype
+ override t.AcosT() = opNotSupported "AcosT" t.Dtype
+ override t.AtanT() = opNotSupported "AtanT" t.Dtype
+ override t.InverseT() = opNotSupported "InverseT" t.Dtype
+ override t.DetT() = opNotSupported "DetT" t.Dtype
+ override a.SolveTT(_) = opNotSupported "SolveTT" a.Dtype
+
+ static member Seed(seed) = Random.Seed(seed)
+ static member Zero(device) = RawTensorCPU.Zero() |> createOn device
+ static member One(device) = RawTensorCPU.One() |> createOn device
+ static member Zeros(shape:Shape, device) = RawTensorCPU.Zeros(shape) |> createOn device
+ static member Empty(shape:Shape, device) = RawTensorCPU.Empty(shape) |> createOn device
+ static member Ones(shape:Shape, device) = RawTensorCPU.Ones(shape) |> createOn device
+ static member Full(shape:Shape, value:scalar, device) = RawTensorCPU.Full (shape, value.toInt16()) |> createOn device
+ static member Random(_shape:Shape, _device) = opNotSupported "Random" Dtype.Int16
+ static member RandomNormal(_shape:Shape, _device) = opNotSupported "RandomNormal" Dtype.Int16
+ static member RandomInt(shape:Shape, low:int, high:int, device) = RawTensorCPU.RandomInt int16 shape low high |> createOn device
+ static member CreateFromFlatArray(values:Array, shape, device) = RawTensorCPU.CreateFromFlatArray (values, shape) |> createOn device
+
+type RawTensorInt32(values: int32[], shape:Shape, device) =
+ inherit RawTensorCPU(values, shape, Dtype.Int32, device)
+
+ let create(values, shape) : RawTensor = upcast RawTensorInt32(values, shape, device)
+ let createBool(values, shape) : RawTensor = upcast RawTensorBool(values, shape, device)
+ static let createOn device (values, shape) : RawTensor = upcast RawTensorInt32(values, shape, device)
+
+ override t.MakeLike(values, shape, newDevice) = upcast RawTensorInt32(values, shape, defaultArg newDevice device)
+ override t1.Equals(t2:RawTensor) = RawTensorCPU.Equals(t1, t2)
+ override t1.AllClose(t2:RawTensor, _relativeTolerance, _absoluteTolerance) = RawTensorCPU.Equals(t1, t2)
+ override t.ClampT(low, high) = RawTensorCPU.ClampT(t, low, high) |> create
+ override t1.LtTT(t2) = RawTensorCPU.LtTT(t1, t2) |> createBool
+ override t1.GtTT(t2) = RawTensorCPU.GtTT(t1, t2) |> createBool
+ override t1.LeTT(t2) = RawTensorCPU.LeTT(t1, t2) |> createBool
+ override t1.GeTT(t2) = RawTensorCPU.GeTT(t1, t2) |> createBool
+ override t1.EqTT(t2) = RawTensorCPU.EqTT(t1, t2) |> createBool
+ override t1.NeqTT(t2) = RawTensorCPU.NeqTT(t1, t2) |> createBool
+ override t.MaxReduceT(dim, keepDim) = RawTensorCPU.MinMaxReduceT (>) (t, dim, keepDim)
+ override t.MinReduceT(dim, keepDim) = RawTensorCPU.MinMaxReduceT (<) (t, dim, keepDim)
+ override t.MaxIndexT() = RawTensorCPU.MaxIndexT(t)
+ override t.MinIndexT() = RawTensorCPU.MinIndexT(t)
+ override t1.AddTT(t2, alpha) =
+ let alpha = match alpha with Some v -> v.toInt32() | None -> RawTensorCPU.one
+ RawTensorCPU.AddTT(t1, t2, alpha) |> create
+ override t1.AddTT0(t2, alpha) =
+ let alpha = match alpha with Some v -> v.toInt32() | None -> RawTensorCPU.one
+ RawTensorCPU.AddTT0(t1, t2.toInt32(), alpha) |> create
+ override t1.AddTTSlice(location:int[], t2) = RawTensorCPU.AddTTSlice((+), t1, location, t2) |> create
+ override t1.SubTT(t2) = RawTensorCPU.SubTT(t1, t2) |> create
+ override t2.SubFromT0T(t1) = RawTensorCPU.SubT0T(t1.toInt32(), t2) |> create
+ override t1.SubTT0(t2) = RawTensorCPU.SubTT0(t1, t2.toInt32()) |> create
+ override t1.MulTT(t2) = RawTensorCPU.MulTT(t1, t2) |> create
+ override t1.MulTT0(t2) = RawTensorCPU.MulTT0(t1, t2.toInt32()) |> create
+ override t1.DivTT(t2) = RawTensorCPU.DivTT(t1, t2) |> create
+ override t2.DivFromT0T(t1) = RawTensorCPU.DivT0T(t1.toInt32(), t2) |> create
+ override t1.DivTT0(t2) = RawTensorCPU.DivTT0(t1, t2.toInt32()) |> create
+ override t1.MatMulTT(t2) = RawTensorCPU.MatMulTT(t1, t2) |> create
+ override t1.BMMTT(t2) = RawTensorCPU.BMMTT(t1, t2) |> create
+ override t1.MaxPool1D(kernelSize, stride, padding) = let result, indices = RawTensorCPU.MaxPool1D(t1, kernelSize, stride, padding) in result :> _, indices :> _
+ override t1.MaxPool2D(kernelSize, stride, padding) = let result, indices = RawTensorCPU.MaxPool2D(t1, kernelSize, stride, padding) in result :> _, indices :> _
+ override t1.MaxPool3D(kernelSize, stride, padding) = let result, indices = RawTensorCPU.MaxPool3D(t1, kernelSize, stride, padding) in result :> _, indices :> _
+ override t1.MaxUnpool1D(indices, outputSize) = RawTensorCPU.MaxUnpool1D(t1, indices :?> RawTensorCPU, outputSize) :> _
+ override t1.MaxUnpool2D(indices, outputSize) = RawTensorCPU.MaxUnpool2D(t1, indices :?> RawTensorCPU, outputSize) :> _
+ override t1.MaxUnpool3D(indices, outputSize) = RawTensorCPU.MaxUnpool3D(t1, indices :?> RawTensorCPU, outputSize) :> _
+ override t1.AvgPool1D(kernelSize, stride, padding) = RawTensorCPU.AvgPool1D int32 (t1, kernelSize, stride, padding) :> _
+ override t1.AvgPool2D(kernelSize, stride, padding) = RawTensorCPU.AvgPool2D int32 (t1, kernelSize, stride, padding) :> _
+ override t1.AvgPool3D(kernelSize, stride, padding) = RawTensorCPU.AvgPool3D int32 (t1, kernelSize, stride, padding) :> _
+ override t1.AvgPoolReverse1D(originalInput, kernelSize, stride, padding) = RawTensorCPU.AvgPoolReverse1D int32 (t1, originalInput, kernelSize, stride, padding) :> _
+ override t1.AvgPoolReverse2D(originalInput, kernelSize, stride, padding) = RawTensorCPU.AvgPoolReverse2D int32 (t1, originalInput, kernelSize, stride, padding) :> _
+ override t1.AvgPoolReverse3D(originalInput, kernelSize, stride, padding) = RawTensorCPU.AvgPoolReverse3D int32 (t1, originalInput, kernelSize, stride, padding) :> _
+ override t1.Conv1D(t2, stride, padding) = RawTensorCPU.Conv1D(t1, t2, stride, padding) :> _
+ override t1.Conv2D(t2, stride, padding) = RawTensorCPU.Conv2D (t1, t2, stride, padding) :> _
+ override t1.Conv3D(t2, stride, padding) = RawTensorCPU.Conv3D (t1, t2, stride, padding) :> _
+ override t.NegT() = RawTensorCPU.NegT (~-) (t) |> create
+ override t.SumT(resultType) = t.Cast(Dtype.Int64).SumT(?resultType=resultType)
+ override t.SumTDim(dim, resultType) = t.Cast(Dtype.Int64).SumTDim(dim, ?resultType=resultType)
+ override t.SignT() = RawTensorCPU.SignT (sign >> int32) t |> create
+ override t.AbsT() = RawTensorCPU.AbsT abs t |> create
+ override t.ReluT() = RawTensorCPU.ReluT(t) |> create
+
+ override t.SoftplusT() = opNotSupported "SoftplusT" t.Dtype
+ override t1.PowTT(t2) = opNotSupported2 "PowTT" t1.Dtype t2.Dtype
+ override t2.PowFromT0T(_t1) = opNotSupported "PowT0T" t2.Dtype
+ override t1.PowTT0(_t2) = opNotSupported "PowTT0" t1.Dtype
+ override t.FloorT() = opNotSupported "FloorT" t.Dtype
+ override t.CeilT() = opNotSupported "CeilT" t.Dtype
+ override t.RoundT() = opNotSupported "RoundT" t.Dtype
+ override t.SigmoidT() = opNotSupported "SigmoidT" t.Dtype
+ override t.ExpT() = opNotSupported "ExpT" t.Dtype
+ override t.LogT() = opNotSupported "LogT" t.Dtype
+ override t.Log10T() = opNotSupported "Log10T" t.Dtype
+ override t.SqrtT() = opNotSupported "SqrtT" t.Dtype
+ override t.SinT() = opNotSupported "SinT" t.Dtype
+ override t.CosT() = opNotSupported "CosT" t.Dtype
+ override t.TanT() = opNotSupported "TanT" t.Dtype
+ override t.SinhT() = opNotSupported "SinhT" t.Dtype
+ override t.CoshT() = opNotSupported "CoshT" t.Dtype
+ override t.TanhT() = opNotSupported "TanhT" t.Dtype
+ override t.AsinT() = opNotSupported "AsinT" t.Dtype
+ override t.AcosT() = opNotSupported "AcosT" t.Dtype
+ override t.AtanT() = opNotSupported "AtanT" t.Dtype
+ override t.InverseT() = opNotSupported "InverseT" t.Dtype
+ override t.DetT() = opNotSupported "DetT" t.Dtype
+ override a.SolveTT(_) = opNotSupported "SolveTT" a.Dtype
+
+ static member Seed(seed) = Random.Seed(seed)
+ static member Zero(device) = RawTensorCPU.Zero() |> createOn device
+ static member One(device) = RawTensorCPU.One() |> createOn device
+ static member Zeros(shape:Shape, device) = RawTensorCPU.Zeros(shape) |> createOn device
+ static member Empty(shape:Shape, device) = RawTensorCPU.Empty(shape) |> createOn device
+ static member Ones(shape:Shape, device) = RawTensorCPU.Ones(shape) |> createOn device
+ static member Full(shape:Shape, value:scalar, device) = RawTensorCPU.Full (shape, value.toInt32()) |> createOn device
+ static member Random(_shape:Shape, _device) = opNotSupported "Random" Dtype.Int32
+ static member RandomNormal(_shape:Shape, _device) = opNotSupported "RandomNormal" Dtype.Int32
+ static member RandomInt(shape:Shape, low:int, high:int, device) = RawTensorCPU.RandomInt int32 shape low high |> createOn device
+ static member CreateFromFlatArray(values:Array, shape, device) = RawTensorCPU.CreateFromFlatArray (values, shape) |> createOn device
+
+type RawTensorInt64(values: int64[], shape:Shape, device) =
+ inherit RawTensorCPU(values, shape, Dtype.Int64, device)
+
+ let create(values, shape) : RawTensor = upcast RawTensorInt64(values, shape, device)
+ let createBool(values, shape) : RawTensor = upcast RawTensorBool(values, shape, device)
+ static let createOn device (values, shape) : RawTensor = upcast RawTensorInt64(values, shape, device)
+
+ override t.MakeLike(values, shape, newDevice) = upcast RawTensorInt64(values, shape, defaultArg newDevice device)
+ override t1.Equals(t2:RawTensor) = RawTensorCPU.Equals(t1, t2)
+ override t1.AllClose(t2:RawTensor, _relativeTolerance, _absoluteTolerance) = RawTensorCPU.Equals(t1, t2)
+ override t.ClampT(low, high) = RawTensorCPU.ClampT(t, low, high) |> create
+ override t1.LtTT(t2) = RawTensorCPU.LtTT(t1, t2) |> createBool
+ override t1.GtTT(t2) = RawTensorCPU.GtTT(t1, t2) |> createBool
+ override t1.LeTT(t2) = RawTensorCPU.LeTT(t1, t2) |> createBool
+ override t1.GeTT(t2) = RawTensorCPU.GeTT(t1, t2) |> createBool
+ override t1.EqTT(t2) = RawTensorCPU.EqTT(t1, t2) |> createBool
+ override t1.NeqTT(t2) = RawTensorCPU.NeqTT(t1, t2) |> createBool
+ override t.MaxReduceT(dim, keepDim) = RawTensorCPU.MinMaxReduceT (>) (t, dim, keepDim)
+ override t.MinReduceT(dim, keepDim) = RawTensorCPU.MinMaxReduceT (<) (t, dim, keepDim)
+ override t.MaxIndexT() = RawTensorCPU.MaxIndexT(t)
+ override t.MinIndexT() = RawTensorCPU.MinIndexT(t)
+ override t1.AddTT(t2, alpha) =
+ let alpha = match alpha with Some v -> v.toInt64() | None -> RawTensorCPU.one
+ RawTensorCPU.AddTT(t1, t2, alpha) |> create
+ override t1.AddTT0(t2, alpha) =
+ let alpha = match alpha with Some v -> v.toInt64() | None -> RawTensorCPU.one
+ RawTensorCPU.AddTT0(t1, t2.toInt64(), alpha) |> create
+ override t1.AddTTSlice(location:int[], t2) = RawTensorCPU.AddTTSlice((+), t1, location, t2) |> create
+ override t1.SubTT(t2) = RawTensorCPU.SubTT(t1, t2) |> create
+ override t2.SubFromT0T(t1) = RawTensorCPU.SubT0T(t1.toInt64(), t2) |> create
+ override t1.SubTT0(t2) = RawTensorCPU.SubTT0(t1, t2.toInt64()) |> create
+ override t1.MulTT(t2) = RawTensorCPU.MulTT(t1, t2) |> create
+ override t1.MulTT0(t2) = RawTensorCPU.MulTT0(t1, t2.toInt64()) |> create
+ override t1.DivTT(t2) = RawTensorCPU.DivTT(t1, t2) |> create
+ override t2.DivFromT0T(t1) = RawTensorCPU.DivT0T(t1.toInt64(), t2) |> create
+ override t1.DivTT0(t2) = RawTensorCPU.DivTT0(t1, t2.toInt64()) |> create
+ override t1.MatMulTT(t2) = RawTensorCPU.MatMulTT(t1, t2) |> create
+ override t1.BMMTT(t2) = RawTensorCPU.BMMTT(t1, t2) |> create
+ override t1.MaxPool1D(kernelSize, stride, padding) = let result, indices = RawTensorCPU.MaxPool1D(t1, kernelSize, stride, padding) in result :> _, indices :> _
+ override t1.MaxPool2D(kernelSize, stride, padding) = let result, indices = RawTensorCPU.MaxPool2D(t1, kernelSize, stride, padding) in result :> _, indices :> _
+ override t1.MaxPool3D(kernelSize, stride, padding) = let result, indices = RawTensorCPU.MaxPool3D(t1, kernelSize, stride, padding) in result :> _, indices :> _
+ override t1.MaxUnpool1D(indices, outputSize) = RawTensorCPU.MaxUnpool1D(t1, indices :?> RawTensorCPU, outputSize) :> _
+ override t1.MaxUnpool2D(indices, outputSize) = RawTensorCPU.MaxUnpool2D(t1, indices :?> RawTensorCPU, outputSize) :> _
+ override t1.MaxUnpool3D(indices, outputSize) = RawTensorCPU.MaxUnpool3D(t1, indices :?> RawTensorCPU, outputSize) :> _
+ override t1.AvgPool1D(kernelSize, stride, padding) = RawTensorCPU.AvgPool1D int64 (t1, kernelSize, stride, padding) :> _
+ override t1.AvgPool2D(kernelSize, stride, padding) = RawTensorCPU.AvgPool2D int64 (t1, kernelSize, stride, padding) :> _
+ override t1.AvgPool3D(kernelSize, stride, padding) = RawTensorCPU.AvgPool3D int64 (t1, kernelSize, stride, padding) :> _
+ override t1.AvgPoolReverse1D(originalInput, kernelSize, stride, padding) = RawTensorCPU.AvgPoolReverse1D int64 (t1, originalInput, kernelSize, stride, padding) :> _
+ override t1.AvgPoolReverse2D(originalInput, kernelSize, stride, padding) = RawTensorCPU.AvgPoolReverse2D int64 (t1, originalInput, kernelSize, stride, padding) :> _
+ override t1.AvgPoolReverse3D(originalInput, kernelSize, stride, padding) = RawTensorCPU.AvgPoolReverse3D int64 (t1, originalInput, kernelSize, stride, padding) :> _
+ override t1.Conv1D(t2, stride, padding) = RawTensorCPU.Conv1D(t1, t2, stride, padding) :> _
+ override t1.Conv2D(t2, stride, padding) = RawTensorCPU.Conv2D (t1, t2, stride, padding) :> _
+ override t1.Conv3D(t2, stride, padding) = RawTensorCPU.Conv3D (t1, t2, stride, padding) :> _
+ override t.NegT() = RawTensorCPU.NegT (~-) (t) |> create
+ override t.SumT(resultType) =
+ let res = RawTensorCPU.SumT(t) |> create
+ match resultType with
+ | None -> res
+ | Some dtype -> res.Cast(dtype)
+ override t.SumTDim(dim, resultType) =
+ let res = RawTensorCPU.SumTDim(t, dim)
+ match resultType with
+ | None -> res :> _
+ | Some dtype -> res.Cast(dtype)
+ override t.SignT() = RawTensorCPU.SignT (sign >> int64) t |> create
+ override t.AbsT() = RawTensorCPU.AbsT abs t |> create
+ override t.ReluT() = RawTensorCPU.ReluT(t) |> create
+
+ override t.SoftplusT() = opNotSupported "SoftplusT" t.Dtype
+ override t1.PowTT(t2) = opNotSupported2 "PowTT" t1.Dtype t2.Dtype
+ override t2.PowFromT0T(_t1) = opNotSupported "PowT0T" t2.Dtype
+ override t1.PowTT0(_t2) = opNotSupported "PowTT0" t1.Dtype
+ override t.FloorT() = opNotSupported "FloorT" t.Dtype
+ override t.CeilT() = opNotSupported "CeilT" t.Dtype
+ override t.RoundT() = opNotSupported "RoundT" t.Dtype
+ override t.SigmoidT() = opNotSupported "SigmoidT" t.Dtype
+ override t.ExpT() = opNotSupported "ExpT" t.Dtype
+ override t.LogT() = opNotSupported "LogT" t.Dtype
+ override t.Log10T() = opNotSupported "Log10T" t.Dtype
+ override t.SqrtT() = opNotSupported "SqrtT" t.Dtype
+ override t.SinT() = opNotSupported "SinT" t.Dtype
+ override t.CosT() = opNotSupported "CosT" t.Dtype
+ override t.TanT() = opNotSupported "TanT" t.Dtype
+ override t.SinhT() = opNotSupported "SinhT" t.Dtype
+ override t.CoshT() = opNotSupported "CoshT" t.Dtype
+ override t.TanhT() = opNotSupported "TanhT" t.Dtype
+ override t.AsinT() = opNotSupported "AsinT" t.Dtype
+ override t.AcosT() = opNotSupported "AcosT" t.Dtype
+ override t.AtanT() = opNotSupported "AtanT" t.Dtype
+ override t.InverseT() = opNotSupported "InverseT" t.Dtype
+ override t.DetT() = opNotSupported "DetT" t.Dtype
+ override a.SolveTT(_) = opNotSupported "SolveTT" a.Dtype
+
+ static member Seed(seed) = Random.Seed(seed)
+ static member Zero(device) = RawTensorCPU.Zero() |> createOn device
+ static member One(device) = RawTensorCPU.One() |> createOn device
+ static member Zeros(shape:Shape, device) = RawTensorCPU.Zeros(shape) |> createOn device
+ static member Empty(shape:Shape, device) = RawTensorCPU.Empty(shape) |> createOn device
+ static member Ones(shape:Shape, device) = RawTensorCPU.Ones(shape) |> createOn device
+ static member Full(shape:Shape, value:scalar, device) = RawTensorCPU.Full (shape, value.toInt64()) |> createOn device
+ static member Random(_shape:Shape, _device) = opNotSupported "Random" Dtype.Int64
+ static member RandomNormal(_shape:Shape, _device) = opNotSupported "RandomNormal" Dtype.Int64
+ static member RandomInt(shape:Shape, low:int, high:int, device) = RawTensorCPU.RandomInt int64 shape low high |> createOn device
+ static member CreateFromFlatArray(values:Array, shape, device) = RawTensorCPU.CreateFromFlatArray (values, shape) |> createOn device
+
+type RawTensorBool(values: bool[], shape:Shape, device) =
+ inherit RawTensorCPU(values, shape, Dtype.Bool, device)
+
+ let create(values, shape) : RawTensor = upcast RawTensorBool(values, shape, device)
+ static let createOn device (values, shape) : RawTensor = upcast RawTensorBool(values, shape, device)
+
+ override t.MakeLike(values, shape, newDevice) = upcast RawTensorBool(values, shape, defaultArg newDevice device)
+ override t1.Equals(t2:RawTensor) = RawTensorCPU.Equals(t1, t2)
+ override t1.AllClose(t2:RawTensor, _relativeTolerance, _absoluteTolerance) = RawTensorCPU.Equals(t1, t2)
+ override t1.LtTT(t2) = t1.MakeLike(Array.map2 (<) t1.Values (t2.GetTypedValues()), t1.Shape)
+ override t1.GtTT(t2) = t1.MakeLike(Array.map2 (>) t1.Values (t2.GetTypedValues()), t1.Shape)
+ override t1.LeTT(t2) = t1.MakeLike(Array.map2 (<=) t1.Values (t2.GetTypedValues()), t1.Shape)
+ override t1.GeTT(t2) = t1.MakeLike(Array.map2 (>=) t1.Values (t2.GetTypedValues()), t1.Shape)
+ override t1.EqTT(t2) = RawTensorCPU.EqTT(t1, t2) |> create
+ override t1.NeqTT(t2) = RawTensorCPU.NeqTT(t1, t2) |> create
+ override t.MaxReduceT(dim, keepDim) = RawTensorCPU.MinMaxReduceT (>) (t, dim, keepDim)
+ override t.MinReduceT(dim, keepDim) = RawTensorCPU.MinMaxReduceT (<) (t, dim, keepDim)
+ override t.MaxIndexT() = RawTensorCPU.MaxIndexT(t)
+ override t.MinIndexT() = RawTensorCPU.MinIndexT(t)
+ override t1.AddTT(t2, alpha) =
+ let alpha = match alpha with Some v -> v.toBool() | None -> true
+ t1.MakeLike(Array.map2 (||) t1.Values (Array.map (fun x -> alpha && x) (t2.GetTypedValues())), t1.Shape)
+ override t1.AddTT0(t2, alpha) =
+ let t2 = t2.toBool()
+ let alpha = match alpha with Some v -> v.toBool() | None -> true
+ let values = Array.map (fun a -> a || (alpha && t2)) t1.Values
+ t1.MakeLike(values, t1.Shape)
+ override t1.AddTTSlice(location:int[], t2) = RawTensorCPU.AddTTSlice((||), t1, location, t2) |> create
+ override t1.MulTT(t2) = t1.MakeLike(Array.map2 (&&) t1.Values (t2.GetTypedValues()), t1.Shape)
+ override t1.MulTT0(t2) =
+ let t2 = t2.toBool()
+ t1.MakeLike(Array.map (fun a -> a && t2) t1.Values, t1.Shape)
+ override t.SumT(resultType) = t.Cast(Int64).SumT(?resultType=resultType)
+ override t.SumTDim(dim, resultType) = t.Cast(Dtype.Int64).SumTDim(dim, ?resultType=resultType)
+ override t.SignT() = t :> _
+
+ override t.ClampT(_low, _high) = opNotSupported "Clamp" t.Dtype
+ override t1.SubTT(t2) = opNotSupported2 "SubTT" t1.Dtype t2.Dtype
+ override t2.SubFromT0T(_t1) = opNotSupported "SubT0T" t2.Dtype
+ override t1.SubTT0(_t2) = opNotSupported "SubTT0" t1.Dtype
+ override t1.DivTT(t2) = opNotSupported2 "DivTT" t1.Dtype t2.Dtype
+ override t2.DivFromT0T(_t1) = opNotSupported "DivT0T" t2.Dtype
+ override t1.DivTT0(_t2) = opNotSupported "DivTT0" t1.Dtype
+ override t1.MatMulTT(t2) = opNotSupported2 "MatMulTT" t1.Dtype t2.Dtype
+ override t1.BMMTT(t2) = opNotSupported2 "BMMTT" t1.Dtype t2.Dtype
+ override t1.MaxPool1D(_kernelSize, _stride, _padding) = opNotSupported "MaxPool1D" t1.Dtype
+ override t1.MaxPool2D(_kernelSize, _stride, _padding) = opNotSupported "MaxPool2D" t1.Dtype
+ override t1.MaxPool3D(_kernelSize, _stride, _padding) = opNotSupported "MaxPool3D" t1.Dtype
+ override t1.MaxUnpool1D(_indices, _outputSize) = opNotSupported "MaxUnpool1D" t1.Dtype
+ override t1.MaxUnpool2D(_indices, _outputSize) = opNotSupported "MaxUnpool2D" t1.Dtype
+ override t1.MaxUnpool3D(_indices, _outputSize) = opNotSupported "MaxUnpool3D" t1.Dtype
+ override t1.Conv1D(t2, _stride, _padding) = opNotSupported2 "Conv1D" t1.Dtype t2.Dtype
+ override t1.Conv2D(t2, _stride, _padding) = opNotSupported2 "Conv2D" t1.Dtype t2.Dtype
+ override t1.Conv3D(t2, _stride, _padding) = opNotSupported2 "Conv3D" t1.Dtype t2.Dtype
+ override t1.AvgPool1D(_kernelSize, _stride, _padding) = opNotSupported "AvgPool1D" t1.Dtype
+ override t1.AvgPool2D(_kernelSize, _stride, _padding) = opNotSupported "AvgPool2D" t1.Dtype
+ override t1.AvgPool3D(_kernelSize, _stride, _padding) = opNotSupported "AvgPool3D" t1.Dtype
+ override t1.AvgPoolReverse1D(_originalInput, _kernelSize, _stride, _padding) = opNotSupported "AvgPoolReverse1D" t1.Dtype
+ override t1.AvgPoolReverse2D(_originalInput, _kernelSize, _stride, _padding) = opNotSupported "AvgPoolReverse2D" t1.Dtype
+ override t1.AvgPoolReverse3D(_originalInput, _kernelSize, _stride, _padding) = opNotSupported "AvgPoolReverse3D" t1.Dtype
+ override t.NegT() = opNotSupported "NegT" t.Dtype
+ override t.AbsT() = opNotSupported "AbsT" t.Dtype
+ override t.ReluT() = opNotSupported "ReluT" t.Dtype
+ override t.SoftplusT() = opNotSupported "SoftplusT" t.Dtype
+ override t1.PowTT(t2) = opNotSupported2 "PowTT" t1.Dtype t2.Dtype
+ override t2.PowFromT0T(_t1) = opNotSupported "PowT0T" t2.Dtype
+ override t1.PowTT0(_t2) = opNotSupported "PowTT0" t1.Dtype
+ override t.FloorT() = opNotSupported "FloorT" t.Dtype
+ override t.CeilT() = opNotSupported "CeilT" t.Dtype
+ override t.RoundT() = opNotSupported "RoundT" t.Dtype
+ override t.SigmoidT() = opNotSupported "SigmoidT" t.Dtype
+ override t.ExpT() = opNotSupported "ExpT" t.Dtype
+ override t.LogT() = opNotSupported "LogT" t.Dtype
+ override t.Log10T() = opNotSupported "Log10T" t.Dtype
+ override t.SqrtT() = opNotSupported "SqrtT" t.Dtype
+ override t.SinT() = opNotSupported "SinT" t.Dtype
+ override t.CosT() = opNotSupported "CosT" t.Dtype
+ override t.TanT() = opNotSupported "TanT" t.Dtype
+ override t.SinhT() = opNotSupported "SinhT" t.Dtype
+ override t.CoshT() = opNotSupported "CoshT" t.Dtype
+ override t.TanhT() = opNotSupported "TanhT" t.Dtype
+ override t.AsinT() = opNotSupported "AsinT" t.Dtype
+ override t.AcosT() = opNotSupported "AcosT" t.Dtype
+ override t.AtanT() = opNotSupported "AtanT" t.Dtype
+ override t.InverseT() = opNotSupported "InverseT" t.Dtype
+ override t.DetT() = opNotSupported "DetT" t.Dtype
+ override a.SolveTT(_) = opNotSupported "SolveTT" a.Dtype
+
+ static member Seed(seed) = Random.Seed(seed)
+ static member Zero(device) = ([| false |], Shape.scalar) |> createOn device
+ static member One(device) = ([| true |], Shape.scalar) |> createOn device
+ static member Zeros(shape:Shape, device) = (Array.zeroCreate (shapeLength shape), shape) |> createOn device
+ static member Empty(shape:Shape, device) = (Array.zeroCreate (shapeLength shape), shape) |> createOn device
+ static member Ones(shape:Shape, device) = (Array.create (shapeLength shape) true, shape) |> createOn device
+ static member Full(shape:Shape, value:scalar, device) = RawTensorCPU.Full (shape, value.toBool()) |> createOn device
+ static member Random(_shape:Shape, _device) = opNotSupported "Random" Dtype.Bool
+ static member RandomNormal(_shape:Shape, _device) = opNotSupported "RandomNormal" Dtype.Bool
+ static member RandomInt(shape:Shape, low:int, high:int, device) = RawTensorCPU.RandomInt System.Convert.ToBoolean shape low high |> createOn device
+ static member CreateFromFlatArray(values:Array, shape, device) = RawTensorCPU.CreateFromFlatArray (values, shape) |> createOn device
+
+/// The concrete implementation of RawTensor for Float16 data.
+type RawTensorFloat16(values: float32[], shape:Shape, device) =
+ inherit RawTensorCPU(values, shape, Dtype.Float16, device)
+ let create(values, shape) : RawTensor = upcast RawTensorFloat16(values, shape, device)
+ let createBool(values, shape) : RawTensor = upcast RawTensorBool(values, shape, device)
+ static let createOn device (values, shape) : RawTensor = upcast RawTensorFloat16(values, shape, device)
+
+ override t.MakeLike(values, shape, newDevice) = upcast RawTensorFloat16(values, shape, defaultArg newDevice device)
+ override t1.Equals(t2:RawTensor) = RawTensorCPU.Equals(t1, t2)
+ override t1.AllClose(t2:RawTensor, relativeTolerance, absoluteTolerance) = RawTensorCPU.AllClose(t1, t2, float32 relativeTolerance, float32 absoluteTolerance)
+ override t.ClampT(low, high) = RawTensorCPU.ClampT(t, low, high) |> create
+ override t.SoftplusT() = RawTensorCPU.SoftplusT(t) |> create
+ override t1.LtTT(t2) = RawTensorCPU.LtTT(t1, t2) |> createBool
+ override t1.GtTT(t2) = RawTensorCPU.GtTT(t1, t2) |> createBool
+ override t1.LeTT(t2) = RawTensorCPU.LeTT(t1, t2) |> createBool
+ override t1.GeTT(t2) = RawTensorCPU.GeTT(t1, t2) |> createBool
+ override t1.EqTT(t2) = RawTensorCPU.EqTT(t1, t2) |> createBool
+ override t1.NeqTT(t2) = RawTensorCPU.NeqTT(t1, t2) |> createBool
+ override t.MaxReduceT(dim, keepDim) = RawTensorCPU.MinMaxReduceT (>) (t, dim, keepDim)
+ override t.MinReduceT(dim, keepDim) = RawTensorCPU.MinMaxReduceT (<) (t, dim, keepDim)
+ override t.MaxIndexT() = RawTensorCPU.MaxIndexT(t)
+ override t.MinIndexT() = RawTensorCPU.MinIndexT(t)
+ override t1.AddTT(t2, alpha) =
+ let alpha = match alpha with Some v -> v.toSingle() | None -> RawTensorCPU.one
+ RawTensorCPU.AddTT(t1, t2, alpha) |> create
+ override t1.AddTT0(t2, alpha) =
+ let alpha = match alpha with Some v -> v.toSingle() | None -> RawTensorCPU.one
+ RawTensorCPU.AddTT0(t1, t2.toSingle(), alpha) |> create
+ override t1.AddTTSlice(location:int[], t2) = RawTensorCPU.AddTTSlice((+), t1, location, t2) |> create
+ override t1.SubTT(t2) = RawTensorCPU.SubTT(t1, t2) |> create
+ override t2.SubFromT0T(t1) = RawTensorCPU.SubT0T(t1.toSingle(), t2) |> create
+ override t1.SubTT0(t2) = RawTensorCPU.SubTT0(t1, t2.toSingle()) |> create
+ override t1.MulTT(t2) = RawTensorCPU.MulTT(t1, t2) |> create
+ override t1.MulTT0(t2) = RawTensorCPU.MulTT0(t1, t2.toSingle()) |> create
+ override t1.DivTT(t2) = RawTensorCPU.DivTT(t1, t2) |> create
+ override t2.DivFromT0T(t1) = RawTensorCPU.DivT0T(t1.toSingle(), t2) |> create
+ override t1.DivTT0(t2) = RawTensorCPU.DivTT0(t1, t2.toSingle()) |> create
+ override t1.PowTT(t2) = RawTensorCPU.PowTT(t1, t2) |> create
+ override t2.PowFromT0T(t1) = RawTensorCPU.PowT0T(t1.toSingle(), t2) |> create
+ override t1.PowTT0(t2) = RawTensorCPU.PowTT0(t1, t2.toSingle()) |> create
+ override t1.MatMulTT(t2) = RawTensorCPU.MatMulTT(t1, t2) |> create
+ override t1.BMMTT(t2) = RawTensorCPU.BMMTT(t1, t2) |> create
+ override t1.MaxPool1D(kernelSize, stride, padding) = let result, indices = RawTensorCPU.MaxPool1D(t1, kernelSize, stride, padding) in result :> _, indices :> _
+ override t1.MaxPool2D(kernelSize, stride, padding) = let result, indices = RawTensorCPU.MaxPool2D(t1, kernelSize, stride, padding) in result :> _, indices :> _
+ override t1.MaxPool3D(kernelSize, stride, padding) = let result, indices = RawTensorCPU.MaxPool3D(t1, kernelSize, stride, padding) in result :> _, indices :> _
+ override t1.MaxUnpool1D(indices, outputSize) = RawTensorCPU.MaxUnpool1D(t1, indices :?> RawTensorCPU, outputSize) :> _
+ override t1.MaxUnpool2D(indices, outputSize) = RawTensorCPU.MaxUnpool2D(t1, indices :?> RawTensorCPU, outputSize) :> _
+ override t1.MaxUnpool3D(indices, outputSize) = RawTensorCPU.MaxUnpool3D(t1, indices :?> RawTensorCPU, outputSize) :> _
+ override t1.AvgPool1D(kernelSize, stride, padding) = RawTensorCPU.AvgPool1D float32 (t1, kernelSize, stride, padding) :> _
+ override t1.AvgPool2D(kernelSize, stride, padding) = RawTensorCPU.AvgPool2D float32 (t1, kernelSize, stride, padding) :> _
+ override t1.AvgPool3D(kernelSize, stride, padding) = RawTensorCPU.AvgPool3D float32 (t1, kernelSize, stride, padding) :> _
+ override t1.AvgPoolReverse1D(originalInput, kernelSize, stride, padding) = RawTensorCPU.AvgPoolReverse1D float32 (t1, originalInput, kernelSize, stride, padding) :> _
+ override t1.AvgPoolReverse2D(originalInput, kernelSize, stride, padding) = RawTensorCPU.AvgPoolReverse2D float32 (t1, originalInput, kernelSize, stride, padding) :> _
+ override t1.AvgPoolReverse3D(originalInput, kernelSize, stride, padding) = RawTensorCPU.AvgPoolReverse3D float32 (t1, originalInput, kernelSize, stride, padding) :> _
+ override t1.Conv1D(t2, stride, padding) = RawTensorCPU.Conv1D (t1, t2, stride, padding) :> _
+ override t1.Conv2D(t2, stride, padding) = RawTensorCPU.Conv2D (t1, t2, stride, padding) :> _
+ override t1.Conv3D(t2, stride, padding) = RawTensorCPU.Conv3D (t1, t2, stride, padding) :> _
+ override t.NegT() = RawTensorCPU.NegT (~-) (t) |> create
+ override t.SumT(resultType) =
+ let res = RawTensorCPU.SumT(t) |> create
+ match resultType with
+ | None -> res
+ | Some dtype -> res.Cast(dtype)
+ override t.SumTDim(dim, resultType) =
+ let res = RawTensorCPU.SumTDim(t, dim)
+ match resultType with
+ | None -> res :> _
+ | Some dtype -> res.Cast(dtype)
+ override t.SignT() = RawTensorCPU.SignT (sign >> float32) t |> create
+ override t.FloorT() = RawTensorCPU.FloorT(t) |> create
+ override t.CeilT() = RawTensorCPU.CeilT(t) |> create
+ override t.RoundT() = RawTensorCPU.RoundT(t) |> create
+ override t.AbsT() = RawTensorCPU.AbsT abs t |> create
+ override t.ReluT() = RawTensorCPU.ReluT(t) |> create
+ override t.SigmoidT() = RawTensorCPU.SigmoidT(t) |> create
+ override t.ExpT() = RawTensorCPU.ExpT(t) |> create
+ override t.LogT() = RawTensorCPU.LogT(t) |> create
+ override t.Log10T() = RawTensorCPU.Log10T(t) |> create
+ override t.SqrtT() = RawTensorCPU.SqrtT(t) |> create
+ override t.SinT() = RawTensorCPU.SinT(t) |> create
+ override t.CosT() = RawTensorCPU.CosT(t) |> create
+ override t.TanT() = RawTensorCPU.TanT(t) |> create
+ override t.SinhT() = RawTensorCPU.SinhT(t) |> create
+ override t.CoshT() = RawTensorCPU.CoshT(t) |> create
+ override t.TanhT() = RawTensorCPU.TanhT(t) |> create
+ override t.AsinT() = RawTensorCPU.AsinT(t) |> create
+ override t.AcosT() = RawTensorCPU.AcosT(t) |> create
+ override t.AtanT() = RawTensorCPU.AtanT(t) |> create
+ override t.InverseT() = opNotSupported "InverseT" t.Dtype
+ override t.DetT() = opNotSupported "DetT" t.Dtype
+ override a.SolveTT(_) = opNotSupported "SolveTT" a.Dtype
+
+ static member Seed(seed) = Random.Seed(seed)
+ static member Zero(device) = RawTensorCPU.Zero() |> createOn device
+ static member One(device) = RawTensorCPU.One() |> createOn device
+ static member Zeros(shape:Shape, device) = RawTensorCPU.Zeros(shape) |> createOn device
+ static member Empty(shape:Shape, device) = RawTensorCPU.Empty(shape) |> createOn device
+ static member Ones(shape:Shape, device) = RawTensorCPU.Ones(shape) |> createOn device
+ static member Full(shape:Shape, value:scalar, device) = RawTensorCPU.Full (shape, value.toSingle()) |> createOn device
+ static member Random(shape:Shape, device) = RawTensorCPU.Random float32 shape |> createOn device
+ static member RandomNormal(shape:Shape, device) = RawTensorCPU.RandomNormal float32 shape |> createOn device
+ static member RandomInt(shape:Shape, low:int, high:int, device) = RawTensorCPU.RandomInt float32 shape low high |> createOn device
+ static member CreateFromFlatArray(values:Array, shape, device) = RawTensorCPU.CreateFromFlatArray (values, shape) |> createOn device
+
+/// The concrete implementation of RawTensor for Float16 data.
+type RawTensorBFloat16(values: float32[], shape:Shape, device) =
+ inherit RawTensorCPU(values, shape, Dtype.BFloat16, device)
+ let create(values, shape) : RawTensor = upcast RawTensorBFloat16(values, shape, device)
+ let createBool(values, shape) : RawTensor = upcast RawTensorBool(values, shape, device)
+ static let createOn device (values, shape) : RawTensor = upcast RawTensorBFloat16(values, shape, device)
+
+ override t.MakeLike(values, shape, newDevice) = upcast RawTensorBFloat16(values, shape, defaultArg newDevice device)
+ override t1.Equals(t2:RawTensor) = RawTensorCPU.Equals(t1, t2)
+ override t1.AllClose(t2:RawTensor, relativeTolerance, absoluteTolerance) = RawTensorCPU.AllClose(t1, t2, float32 relativeTolerance, float32 absoluteTolerance)
+ override t.ClampT(low, high) = RawTensorCPU.ClampT(t, low, high) |> create
+ override t.SoftplusT() = RawTensorCPU.SoftplusT(t) |> create
+ override t1.LtTT(t2) = RawTensorCPU.LtTT(t1, t2) |> createBool
+ override t1.GtTT(t2) = RawTensorCPU.GtTT(t1, t2) |> createBool
+ override t1.LeTT(t2) = RawTensorCPU.LeTT(t1, t2) |> createBool
+ override t1.GeTT(t2) = RawTensorCPU.GeTT(t1, t2) |> createBool
+ override t1.EqTT(t2) = RawTensorCPU.EqTT(t1, t2) |> createBool
+ override t1.NeqTT(t2) = RawTensorCPU.NeqTT(t1, t2) |> createBool
+ override t.MaxReduceT(dim, keepDim) = RawTensorCPU.MinMaxReduceT (>) (t, dim, keepDim)
+ override t.MinReduceT(dim, keepDim) = RawTensorCPU.MinMaxReduceT (<) (t, dim, keepDim)
+ override t.MaxIndexT() = RawTensorCPU.MaxIndexT(t)
+ override t.MinIndexT() = RawTensorCPU.MinIndexT(t)
+ override t1.AddTT(t2, alpha) =
+ let alpha = match alpha with Some v -> v.toSingle() | None -> RawTensorCPU.one
+ RawTensorCPU.AddTT(t1, t2, alpha) |> create
+ override t1.AddTT0(t2, alpha) =
+ let alpha = match alpha with Some v -> v.toSingle() | None -> RawTensorCPU.one
+ RawTensorCPU.AddTT0(t1, t2.toSingle(), alpha) |> create
+ override t1.AddTTSlice(location:int[], t2) = RawTensorCPU.AddTTSlice((+), t1, location, t2) |> create
+ override t1.SubTT(t2) = RawTensorCPU.SubTT(t1, t2) |> create
+ override t2.SubFromT0T(t1) = RawTensorCPU.SubT0T(t1.toSingle(), t2) |> create
+ override t1.SubTT0(t2) = RawTensorCPU.SubTT0(t1, t2.toSingle()) |> create
+ override t1.MulTT(t2) = RawTensorCPU.MulTT(t1, t2) |> create
+ override t1.MulTT0(t2) = RawTensorCPU.MulTT0(t1, t2.toSingle()) |> create
+ override t1.DivTT(t2) = RawTensorCPU.DivTT(t1, t2) |> create
+ override t2.DivFromT0T(t1) = RawTensorCPU.DivT0T(t1.toSingle(), t2) |> create
+ override t1.DivTT0(t2) = RawTensorCPU.DivTT0(t1, t2.toSingle()) |> create
+ override t1.PowTT(t2) = RawTensorCPU.PowTT(t1, t2) |> create
+ override t2.PowFromT0T(t1) = RawTensorCPU.PowT0T(t1.toSingle(), t2) |> create
+ override t1.PowTT0(t2) = RawTensorCPU.PowTT0(t1, t2.toSingle()) |> create
+ override t1.MatMulTT(t2) = RawTensorCPU.MatMulTT(t1, t2) |> create
+ override t1.BMMTT(t2) = RawTensorCPU.BMMTT(t1, t2) |> create
+ override t1.MaxPool1D(kernelSize, stride, padding) = let result, indices = RawTensorCPU.MaxPool1D(t1, kernelSize, stride, padding) in result :> _, indices :> _
+ override t1.MaxPool2D(kernelSize, stride, padding) = let result, indices = RawTensorCPU.MaxPool2D(t1, kernelSize, stride, padding) in result :> _, indices :> _
+ override t1.MaxPool3D(kernelSize, stride, padding) = let result, indices = RawTensorCPU.MaxPool3D(t1, kernelSize, stride, padding) in result :> _, indices :> _
+ override t1.MaxUnpool1D(indices, outputSize) = RawTensorCPU.MaxUnpool1D(t1, indices :?> RawTensorCPU, outputSize) :> _
+ override t1.MaxUnpool2D(indices, outputSize) = RawTensorCPU.MaxUnpool2D(t1, indices :?> RawTensorCPU, outputSize) :> _
+ override t1.MaxUnpool3D(indices, outputSize) = RawTensorCPU.MaxUnpool3D(t1, indices :?> RawTensorCPU, outputSize) :> _
+ override t1.Conv1D(t2, stride, padding) = RawTensorCPU.Conv1D (t1, t2, stride, padding) :> _
+ override t1.Conv2D(t2, stride, padding) = RawTensorCPU.Conv2D (t1, t2, stride, padding) :> _
+ override t1.Conv3D(t2, stride, padding) = RawTensorCPU.Conv3D (t1, t2, stride, padding) :> _
+ override t1.AvgPool1D(kernelSize, stride, padding) = RawTensorCPU.AvgPool1D float32 (t1, kernelSize, stride, padding) :> _
+ override t1.AvgPool2D(kernelSize, stride, padding) = RawTensorCPU.AvgPool2D float32 (t1, kernelSize, stride, padding) :> _
+ override t1.AvgPool3D(kernelSize, stride, padding) = RawTensorCPU.AvgPool3D float32 (t1, kernelSize, stride, padding) :> _
+ override t1.AvgPoolReverse1D(originalInput, kernelSize, stride, padding) = RawTensorCPU.AvgPoolReverse1D float32 (t1, originalInput, kernelSize, stride, padding) :> _
+ override t1.AvgPoolReverse2D(originalInput, kernelSize, stride, padding) = RawTensorCPU.AvgPoolReverse2D float32 (t1, originalInput, kernelSize, stride, padding) :> _
+ override t1.AvgPoolReverse3D(originalInput, kernelSize, stride, padding) = RawTensorCPU.AvgPoolReverse3D float32 (t1, originalInput, kernelSize, stride, padding) :> _
+ override t.NegT() = RawTensorCPU.NegT (~-) (t) |> create
+ override t.SumT(resultType) =
+ let res = RawTensorCPU.SumT(t) |> create
+ match resultType with
+ | None -> res
+ | Some dtype -> res.Cast(dtype)
+ override t.SumTDim(dim, resultType) =
+ let res = RawTensorCPU.SumTDim(t, dim)
+ match resultType with
+ | None -> res :> _
+ | Some dtype -> res.Cast(dtype)
+ override t.SignT() = RawTensorCPU.SignT (sign >> float32) t |> create
+ override t.FloorT() = RawTensorCPU.FloorT(t) |> create
+ override t.CeilT() = RawTensorCPU.CeilT(t) |> create
+ override t.RoundT() = RawTensorCPU.RoundT(t) |> create
+ override t.AbsT() = RawTensorCPU.AbsT abs t |> create
+ override t.ReluT() = RawTensorCPU.ReluT(t) |> create
+ override t.SigmoidT() = RawTensorCPU.SigmoidT(t) |> create
+ override t.ExpT() = RawTensorCPU.ExpT(t) |> create
+ override t.LogT() = RawTensorCPU.LogT(t) |> create
+ override t.Log10T() = RawTensorCPU.Log10T(t) |> create
+ override t.SqrtT() = RawTensorCPU.SqrtT(t) |> create
+ override t.SinT() = RawTensorCPU.SinT(t) |> create
+ override t.CosT() = RawTensorCPU.CosT(t) |> create
+ override t.TanT() = RawTensorCPU.TanT(t) |> create
+ override t.SinhT() = RawTensorCPU.SinhT(t) |> create
+ override t.CoshT() = RawTensorCPU.CoshT(t) |> create
+ override t.TanhT() = RawTensorCPU.TanhT(t) |> create
+ override t.AsinT() = RawTensorCPU.AsinT(t) |> create
+ override t.AcosT() = RawTensorCPU.AcosT(t) |> create
+ override t.AtanT() = RawTensorCPU.AtanT(t) |> create
+ override t.InverseT() = opNotSupported "InverseT" t.Dtype
+ override t.DetT() = opNotSupported "DetT" t.Dtype
+ override a.SolveTT(_) = opNotSupported "SolveTT" a.Dtype
+
+ static member Seed(seed) = Random.Seed(seed)
+ static member Zero(device) = RawTensorCPU.Zero() |> createOn device
+ static member One(device) = RawTensorCPU.One() |> createOn device
+ static member Zeros(shape:Shape, device) = RawTensorCPU.Zeros(shape) |> createOn device
+ static member Empty(shape:Shape, device) = RawTensorCPU.Empty(shape) |> createOn device
+ static member Ones(shape:Shape, device) = RawTensorCPU.Ones(shape) |> createOn device
+ static member Full(shape:Shape, value:scalar, device) = RawTensorCPU.Full (shape, value.toSingle()) |> createOn device
+ static member Random(shape:Shape, device) = RawTensorCPU.Random float32 shape |> createOn device
+ static member RandomNormal(shape:Shape, device) = RawTensorCPU.RandomNormal float32 shape |> createOn device
+ static member RandomInt(shape:Shape, low:int, high:int, device) = RawTensorCPU.RandomInt float32 shape low high |> createOn device
+ static member CreateFromFlatArray(values:Array, shape, device) = RawTensorCPU.CreateFromFlatArray (values, shape) |> createOn device
+
+#if TEST_DUPLICATE_BACKEND
+type TestDuplicateBackendTensorStatics() =
+#else
+type ReferenceBackendTensorStatics() =
+#endif
+
+ inherit BackendTensorStatics()
+
+ override _.GetDevices(deviceType) =
+ match deviceType with
+ | None -> [ Device.CPU (* ; Device.GPU *) ]
+ | Some DeviceType.CPU -> [ Device.CPU]
+ //| Some DeviceType.CUDA -> [ Device.GPU ]
+ | Some _ -> []
+
+ override _.IsDeviceTypeAvailable (deviceType) = (match deviceType with DeviceType.CPU -> true | _ -> false)
+ override _.Seed(seed) = Random.Seed(seed)
+ override _.Zero(dtype, device) =
+ match dtype with
+ | Float16 -> RawTensorFloat16.Zero(device)
+ | BFloat16 -> RawTensorBFloat16.Zero(device)
+ | Float32 -> RawTensorFloat32.Zero(device)
+ | Float64 -> RawTensorFloat64.Zero(device)
+ | Int8 -> RawTensorInt8.Zero(device)
+ | Byte -> RawTensorByte.Zero(device)
+ | Int16 -> RawTensorInt16.Zero(device)
+ | Int32 -> RawTensorInt32.Zero(device)
+ | Int64 -> RawTensorInt64.Zero(device)
+ | Bool -> RawTensorBool.Zero(device)
+ override _.One(dtype, device) =
+ match dtype with
+ | Float16 -> RawTensorFloat16.One(device)
+ | BFloat16 -> RawTensorBFloat16.One(device)
+ | Float32 -> RawTensorFloat32.One(device)
+ | Float64 -> RawTensorFloat64.One(device)
+ | Int8 -> RawTensorInt8.One(device)
+ | Byte -> RawTensorByte.One(device)
+ | Int16 -> RawTensorInt16.One(device)
+ | Int32 -> RawTensorInt32.One(device)
+ | Int64 -> RawTensorInt64.One(device)
+ | Bool -> RawTensorBool.One(device)
+ override _.Zeros(shape:Shape, dtype, device) =
+ match dtype with
+ | Float16 -> RawTensorFloat16.Zeros(shape, device)
+ | BFloat16 -> RawTensorBFloat16.Zeros(shape, device)
+ | Float32 -> RawTensorFloat32.Zeros(shape, device)
+ | Float64 -> RawTensorFloat64.Zeros(shape, device)
+ | Int8 -> RawTensorInt8.Zeros(shape, device)
+ | Byte -> RawTensorByte.Zeros(shape, device)
+ | Int16 -> RawTensorInt16.Zeros(shape, device)
+ | Int32 -> RawTensorInt32.Zeros(shape, device)
+ | Int64 -> RawTensorInt64.Zeros(shape, device)
+ | Bool -> RawTensorBool.Zeros(shape, device)
+ override _.Empty(shape:Shape, dtype, device) =
+ match dtype with
+ | Float16 -> RawTensorFloat16.Empty(shape, device)
+ | BFloat16 -> RawTensorBFloat16.Empty(shape, device)
+ | Float32 -> RawTensorFloat32.Empty(shape, device)
+ | Float64 -> RawTensorFloat64.Empty(shape, device)
+ | Int8 -> RawTensorInt8.Empty(shape, device)
+ | Byte -> RawTensorByte.Empty(shape, device)
+ | Int16 -> RawTensorInt16.Empty(shape, device)
+ | Int32 -> RawTensorInt32.Empty(shape, device)
+ | Int64 -> RawTensorInt64.Empty(shape, device)
+ | Bool -> RawTensorBool.Empty(shape, device)
+ override _.Ones(shape:Shape, dtype, device) =
+ match dtype with
+ | Float16 -> RawTensorFloat16.Ones(shape, device)
+ | BFloat16 -> RawTensorBFloat16.Ones(shape, device)
+ | Float32 -> RawTensorFloat32.Ones(shape, device)
+ | Float64 -> RawTensorFloat64.Ones(shape, device)
+ | Int8 -> RawTensorInt8.Ones(shape, device)
+ | Byte -> RawTensorByte.Ones(shape, device)
+ | Int16 -> RawTensorInt16.Ones(shape, device)
+ | Int32 -> RawTensorInt32.Ones(shape, device)
+ | Int64 -> RawTensorInt64.Ones(shape, device)
+ | Bool -> RawTensorBool.Ones(shape, device)
+ override _.Full(shape:Shape, value:scalar, dtype, device) =
+ match dtype with
+ | Float16 -> RawTensorFloat16.Full(shape, value, device)
+ | BFloat16 -> RawTensorBFloat16.Full(shape, value, device)
+ | Float32 -> RawTensorFloat32.Full(shape, value, device)
+ | Float64 -> RawTensorFloat64.Full(shape, value, device)
+ | Int8 -> RawTensorInt8.Full(shape, value, device)
+ | Byte -> RawTensorByte.Full(shape, value, device)
+ | Int16 -> RawTensorInt16.Full(shape, value, device)
+ | Int32 -> RawTensorInt32.Full(shape, value, device)
+ | Int64 -> RawTensorInt64.Full(shape, value, device)
+ | Bool -> RawTensorBool.Full(shape, value, device)
+ override _.Random(shape:Shape, dtype, device) =
+ match dtype with
+ | Float16 -> RawTensorFloat16.Random(shape, device)
+ | BFloat16 -> RawTensorBFloat16.Random(shape, device)
+ | Float32 -> RawTensorFloat32.Random(shape, device)
+ | Float64 -> RawTensorFloat64.Random(shape, device)
+ | Int8 -> RawTensorInt8.Random(shape, device)
+ | Byte -> RawTensorByte.Random(shape, device)
+ | Int16 -> RawTensorInt16.Random(shape, device)
+ | Int32 -> RawTensorInt32.Random(shape, device)
+ | Int64 -> RawTensorInt64.Random(shape, device)
+ | Bool -> RawTensorBool.Random(shape, device)
+ override _.RandomNormal(shape:Shape, dtype, device) =
+ match dtype with
+ | Float16 -> RawTensorFloat16.RandomNormal(shape, device)
+ | BFloat16 -> RawTensorBFloat16.RandomNormal(shape, device)
+ | Float32 -> RawTensorFloat32.RandomNormal(shape, device)
+ | Float64 -> RawTensorFloat64.RandomNormal(shape, device)
+ | Int8 -> RawTensorInt8.RandomNormal(shape, device)
+ | Byte -> RawTensorByte.RandomNormal(shape, device)
+ | Int16 -> RawTensorInt16.RandomNormal(shape, device)
+ | Int32 -> RawTensorInt32.RandomNormal(shape, device)
+ | Int64 -> RawTensorInt64.RandomNormal(shape, device)
+ | Bool -> RawTensorBool.RandomNormal(shape, device)
+ override _.RandomInt(shape:Shape, low:int, high:int, dtype, device) =
+ match dtype with
+ | Float16 -> RawTensorFloat16.RandomInt(shape, low, high, device)
+ | BFloat16 -> RawTensorBFloat16.RandomInt(shape, low, high, device)
+ | Float32 -> RawTensorFloat32.RandomInt(shape, low, high, device)
+ | Float64 -> RawTensorFloat64.RandomInt(shape, low, high, device)
+ | Int8 -> RawTensorInt8.RandomInt(shape, low, high, device)
+ | Byte -> RawTensorByte.RandomInt(shape, low, high, device)
+ | Int16 -> RawTensorInt16.RandomInt(shape, low, high, device)
+ | Int32 -> RawTensorInt32.RandomInt(shape, low, high, device)
+ | Int64 -> RawTensorInt64.RandomInt(shape, low, high, device)
+ | Bool -> RawTensorBool.RandomInt(shape, low, high, device)
+ override _.CreateFromFlatArray(values:Array, shape, dtype, device) =
+ match dtype with
+ | Float16 -> RawTensorFloat16.CreateFromFlatArray(values, shape, device)
+ | BFloat16 -> RawTensorBFloat16.CreateFromFlatArray(values, shape, device)
+ | Float32 -> RawTensorFloat32.CreateFromFlatArray(values, shape, device)
+ | Float64 -> RawTensorFloat64.CreateFromFlatArray(values, shape, device)
+ | Int8 -> RawTensorInt8.CreateFromFlatArray(values, shape, device)
+ | Byte -> RawTensorByte.CreateFromFlatArray(values, shape, device)
+ | Int16 -> RawTensorInt16.CreateFromFlatArray(values, shape, device)
+ | Int32 -> RawTensorInt32.CreateFromFlatArray(values, shape, device)
+ | Int64 -> RawTensorInt64.CreateFromFlatArray(values, shape, device)
+ | Bool -> RawTensorBool.CreateFromFlatArray(values, shape, device)
+
diff --git a/src/TensorMath.Backends.Reference/TensorMath.Backends.Reference.fsproj b/src/TensorMath.Backends.Reference/TensorMath.Backends.Reference.fsproj
new file mode 100644
index 0000000..ab19083
--- /dev/null
+++ b/src/TensorMath.Backends.Reference/TensorMath.Backends.Reference.fsproj
@@ -0,0 +1,16 @@
+
+
+
+ netstandard2.1
+ true
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/TensorMath.Backends.Torch/TensorMath.Backends.Torch.fsproj b/src/TensorMath.Backends.Torch/TensorMath.Backends.Torch.fsproj
new file mode 100644
index 0000000..565f37c
--- /dev/null
+++ b/src/TensorMath.Backends.Torch/TensorMath.Backends.Torch.fsproj
@@ -0,0 +1,21 @@
+
+
+
+ net8.0
+ true
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/TensorMath.Backends.Torch/TensorMath.Torch.fs b/src/TensorMath.Backends.Torch/TensorMath.Torch.fs
new file mode 100644
index 0000000..266a36c
--- /dev/null
+++ b/src/TensorMath.Backends.Torch/TensorMath.Torch.fs
@@ -0,0 +1,33 @@
+namespace TensorMath
+
+open TensorMath
+open TensorMath.Backends.Torch
+open TorchSharp
+
+[]
+module TorchExtensions =
+
+ type dsharp with
+
+ ///
+ /// Creates a new TensorMath tensor from the torch tensor.
+ ///
+ static member fromTorch(tt: torch.Tensor) =
+ Tensor.ofRawTensor(TorchRawTensor(tt))
+
+ type Tensor with
+ ///
+ /// Converts the primal of a tensor to a torch tensor.
+ ///
+ ///
+ /// If the tensor does not use the Torch backend an exception is raised.
+ ///
+ /// Note that this operation takes the primal of the tensor. This means
+ /// code that converts to Torch tensors will not be differentiable using
+ /// TensorMath differentiation capabilities.
+ ///
+ member t.toTorch() =
+ match t.primalRaw with
+ | :? TorchRawTensor as trt -> trt.TorchTensor
+ | _ -> failwith $"toTorch: the input is not a TensorMath.Backends.Torch tensor, its backend is {t.backend}"
+
diff --git a/src/TensorMath.Backends.Torch/Torch.RawTensor.fs b/src/TensorMath.Backends.Torch/Torch.RawTensor.fs
new file mode 100644
index 0000000..41ee780
--- /dev/null
+++ b/src/TensorMath.Backends.Torch/Torch.RawTensor.fs
@@ -0,0 +1,1594 @@
+// Copyright (c) 2016- University of Oxford (Atilim Gunes Baydin )
+// and other contributors, see LICENSE in root of repository.
+//
+// BSD 2-Clause License. See LICENSE in root of repository.
+
+namespace rec TensorMath.Backends.Torch
+
+open System
+open TensorMath
+open TensorMath.Backends
+open TensorMath.Util
+open TorchSharp
+
+type torch_cuda = torch.cuda
+type TorchShape = int64[]
+type TorchDevice = Torch.Device
+type Device = TensorMath.Device
+[]
+module internal Utils =
+
+ let int64s (b: int[]) = Array.map int64 b
+
+ let toTorchType dtype =
+ match dtype with
+ | Dtype.Bool -> torch.ScalarType.Bool
+ | Dtype.Int8 -> torch.ScalarType.Int8
+ | Dtype.Byte -> torch.ScalarType.Byte
+ | Dtype.Int16 -> torch.ScalarType.Int16
+ | Dtype.Int32 -> torch.ScalarType.Int32
+ | Dtype.Int64 -> torch.ScalarType.Int64
+ | Dtype.Float16 -> torch.ScalarType.Float16
+ | Dtype.BFloat16 -> torch.ScalarType.BFloat16
+ | Dtype.Float32 -> torch.ScalarType.Float32
+ | Dtype.Float64 -> torch.ScalarType.Float64
+
+ /// WARNING: TorchSharp Scalar creation is buggy and doesn't preserve types: https://github.com/xamarin/TorchSharp/issues/331
+ let toTorchScalar (x: scalar) =
+ match x.GetTypeCode() with
+ | TypeCode.Single -> Scalar.op_Implicit (x.toSingle())
+ | TypeCode.Double -> Scalar.op_Implicit (x.toDouble())
+ | TypeCode.Int32 -> Scalar.op_Implicit (x.toInt32())
+ | TypeCode.Int64 -> Scalar.op_Implicit (x.toInt64())
+ | TypeCode.Byte -> Scalar.op_Implicit (x.toByte())
+ | TypeCode.SByte -> Scalar.op_Implicit (x.toSByte())
+ | TypeCode.Int16 -> Scalar.op_Implicit (x.toInt16())
+ | TypeCode.Boolean -> Scalar.op_Implicit (x.toBool())
+ | t -> failwithf "unknown scalar type '%A'" t
+
+ let fromTorchType ttype =
+ match ttype with
+ | torch.ScalarType.Bool -> Dtype.Bool
+ | torch.ScalarType.Int8 -> Dtype.Int8
+ | torch.ScalarType.Byte -> Dtype.Byte
+ | torch.ScalarType.Int16 -> Dtype.Int16
+ | torch.ScalarType.Int32 -> Dtype.Int32
+ | torch.ScalarType.Int64 -> Dtype.Int64
+ | torch.ScalarType.Float32 -> Dtype.Float32
+ | torch.ScalarType.Float64 -> Dtype.Float64
+ | _ -> failwith "fromTorchType - other type"
+
+ let toTorchShape (shape: Shape) : TorchShape = int64s shape
+
+ let fromTorchShape (shape: int64[]) = shape |> Array.map int
+
+ type TensorMath.DeviceType with
+ member x.ToTorch : TorchSharp.DeviceType = enum (int x)
+
+ type TensorMath.Device with
+ member x.ToTorch = torch.Device(x.DeviceType.ToTorch, x.DeviceIndex)
+
+ let fromTorchDeviceType (x: TorchSharp.DeviceType) : TensorMath.DeviceType = enum (int x)
+
+ let fromTorchDevice (x: torch.Device) = TensorMath.Device(fromTorchDeviceType x.``type``, x.index)
+
+ let inline combineHashes (h1 : int) (h2 : int) = ((h1 <<< 5) + h1) ^^^ h2
+
+ let torchMoveTo (tt: torch.Tensor) (device: Device) =
+ tt.``to``(device.ToTorch)
+
+ type RawTensor with
+ member x.TorchTensor = (x :?> TorchRawTensor).TorchTensor
+
+/// This is the base class for all RawTensorXyz tuypes.
+/// All type-independent operations are implemented directly on this class.
+type TorchRawTensor(tt: torch.Tensor, shape: Shape, dtype: Dtype, device: Device) =
+
+ inherit RawTensor()
+
+ // Note, shape and dtype are stored as fields. These dupicate information in TorchTensor, but
+ // it is a little too costly to repeatedly re-extract this information.
+ //
+ // 'device' is not stored as a field, it is rarely accessed and can be fetched from TorchTensor
+
+#if DEBUG
+ // Check the invariants associated with the tensors
+ do
+ if tt.dtype <> toTorchType dtype then
+ failwithf "mismatched Torch tensor type, expected %A, got %A" (toTorchType dtype) tt.dtype
+
+ if int tt.device_type <> int device.DeviceType then
+ failwithf "mismatched Torch tensor device, expected %A, got %A" tt.device_type device.DeviceType
+
+ if int tt.device_index <> int device.DeviceIndex then
+ failwithf "mismatched Torch tensor index, expected %A, got %A" tt.device_index device.DeviceIndex
+
+ if toTorchShape shape <> tt.shape then
+ failwithf "mismatched Torch tensor shape, expected %A, got %A" (toTorchShape shape) tt.shape
+
+ let device = () // make sure 'device' isn't accessed in a member and stored as a field
+#endif
+ let mutable tt = tt
+ let mutable isMutable = false
+ let checkMutable() = if not isMutable then failwith "the tensor can't be mutated"
+ do ignore device
+
+ override _.Shape = shape
+ override _.Dim = shape.Length
+ override _.Nelement = shapeLength shape
+ override _.Dtype = dtype
+ override _.DeviceType : TensorMath.DeviceType = enum (int tt.device_type)
+ override t.Device = TensorMath.Device(t.DeviceType, tt.device_index)
+ override _.Backend = Backend.Torch
+ override _.Handle = box tt
+
+ new (tt: torch.Tensor) =
+ TorchRawTensor(tt, fromTorchShape tt.shape, fromTorchType tt.dtype, fromTorchDevice tt.device)
+
+ member t.MakeLike(tt, ?shape, ?dtype, ?device) : RawTensor =
+ upcast TorchRawTensor(tt, defaultArg shape t.Shape, defaultArg dtype t.Dtype, defaultArg device t.Device)
+
+ member _.TorchTensor = tt
+
+ override t.GetItem(indexes:int[]) =
+ Shape.checkCanIndex t.Shape indexes
+ if t.Shape.Length = 0 then t.ToScalar()
+ else t.MakeLike(tt=tt[indexes |> Array.map (fun v -> torch.TensorIndex.Single(int64 v))], shape=[||]).ToScalar()
+
+ override t.GetSlice(fullBounds:int[,]) =
+ let n = fullBounds.GetLength(0)
+ let newShape = Shape.checkCanGetSlice t.Shape fullBounds
+
+ let indices =
+ Array.init n (fun i ->
+ let start = fullBounds[i,0]
+ let stop = fullBounds[i,1] + 1
+ let len = stop - start
+ if fullBounds[i,2] = 1 && len = 1 then
+ torch.TensorIndex.Single(int64 start)
+ else
+ torch.TensorIndex.Slice(start=int64 start, stop=int64 stop))
+ let res = tt.index(indices)
+ t.MakeLike(tt=res, shape=newShape)
+
+ override t.Clone() =
+ t.MakeLike(tt.clone())
+
+ override t.ComputeHash() =
+ // Torch Tensors must be CPU before Data can be accessed
+ let tt = torchMoveTo tt Device.CPU
+
+ let shape = t.Shape
+ let mutable res = hash shape
+ let n = shapeLength shape
+ match dtype with
+ | Dtype.Int8 ->
+ let data = tt.data()
+ for i in 0 .. n-1 do
+ res <- combineHashes res (int32 data[int64 i])
+ | Dtype.Byte ->
+ let data = tt.data()
+ for i in 0 .. n-1 do
+ res <- combineHashes res (int32 data[int64 i])
+ | Dtype.Bool ->
+ let data = tt.data()
+ for i in 0 .. n-1 do
+ res <- combineHashes res (int32 data[int64 i])
+ | Dtype.Int16 ->
+ let data = tt.data()
+ for i in 0 .. n-1 do
+ res <- combineHashes res (int32 data[int64 i] )
+ | Dtype.Int32 ->
+ let data = tt.data()
+ for i in 0 .. n-1 do
+ res <- combineHashes res (int32 data[int64 i])
+ | Dtype.Int64 ->
+ let data = tt.data()
+ for i in 0 .. n-1 do
+ res <- combineHashes res (int32 data[int64 i])
+ | Dtype.Float16 ->
+ for i in 0 .. n-1 do
+ res <- combineHashes res (hash (tt.ReadCpuFloat16(int64 i)))
+ | Dtype.BFloat16 ->
+ for i in 0 .. n-1 do
+ res <- combineHashes res (hash (tt.ReadCpuBFloat16(int64 i)))
+ | Dtype.Float32 ->
+ let data = tt.data()
+ for i in 0 .. n-1 do
+ res <- combineHashes res (hash data[int64 i])
+ | Dtype.Float64 ->
+ let data = tt.data()
+ for i in 0 .. n-1 do
+ res <- combineHashes res (hash data[int64 i])
+ res
+
+ override t.Expand(newShape) =
+ t.MakeLike(tt.expand(toTorchShape newShape), shape=newShape)
+
+ override _.ToScalar() : scalar =
+ match dtype with
+ | Dtype.Bool -> tt.ToBoolean() :> scalar
+ | Dtype.Byte -> tt.ToByte() :> scalar
+ | Dtype.Int8 -> tt.ToSByte() :> scalar
+ | Dtype.Int16 -> tt.ToInt16() :> scalar
+ | Dtype.Int32 -> tt.ToInt32() :> scalar
+ | Dtype.Int64 -> tt.ToInt64() :> scalar
+ | Dtype.Float16 -> tt.ToSingle() :> scalar
+ | Dtype.BFloat16 -> tt.ToSingle() :> scalar
+ | Dtype.Float32 -> tt.ToSingle() :> scalar
+ | Dtype.Float64 -> tt.ToDouble() :> scalar
+
+ member t.ToValuesTyped<'T>(conv: torch.Tensor -> 'T) : obj =
+ // Move the tensors to CPU for efficiency since we're accessing all the data anyway
+ let tt = torchMoveTo tt Device.CPU
+ match t.Shape with
+ | [| |] -> tt.ToScalar() |> box
+ | [| d0 |] -> upcast Array.init<'T> d0 (fun i -> tt[int64 i] |> conv)
+ | [| d0; d1 |] -> upcast Array2D.init<'T> d0 d1 (fun i j -> tt[int64 i, int64 j] |> conv)
+ | [| d0; d1; d2 |] -> upcast Array3D.init<'T> d0 d1 d2 (fun i j k -> tt[int64 i, int64 j, int64 k] |> conv)
+ | [| d0; d1; d2; d3 |] -> upcast Array4D.init<'T> d0 d1 d2 d3 (fun i j k l -> tt[int64 i, int64 j, int64 k, int64 l] |> conv)
+ | [| d0; d1; d2; d3; d4 |] -> upcast Array5D.init<'T> d0 d1 d2 d3 d4 (fun i j k l m -> tt[int64 i, int64 j, int64 k, int64 l, int64 m] |> conv)
+ | [| d0; d1; d2; d3; d4; d5 |] -> upcast Array6D.init<'T> d0 d1 d2 d3 d4 d5 (fun i j k l m n -> tt[int64 i, int64 j, int64 k, int64 l, int64 m, int64 n] |> conv)
+ | _ -> failwithf "Cannot get array for Tensor dimensions > 6. Consider slicing the Tensor. Shape: %A" t.Shape
+
+ override t.ToValues() =
+ match dtype with
+ | Dtype.Bool -> t.ToValuesTyped(fun s -> s.ToBoolean())
+ | Dtype.Byte -> t.ToValuesTyped(fun s -> s.ToByte())
+ | Dtype.Int8 -> t.ToValuesTyped(fun s -> s.ToSByte())
+ | Dtype.Int16 -> t.ToValuesTyped(fun s -> s.ToInt16())
+ | Dtype.Int32 -> t.ToValuesTyped(fun s -> s.ToInt32())
+ | Dtype.Int64 -> t.ToValuesTyped(fun s -> s.ToInt64())
+ | Dtype.Float16 -> t.ToValuesTyped(fun s -> s.ToSingle())
+ | Dtype.BFloat16 -> t.ToValuesTyped(fun s -> s.ToSingle())
+ | Dtype.Float32 -> t.ToValuesTyped(fun s -> s.ToSingle())
+ | Dtype.Float64 -> t.ToValuesTyped(fun s -> s.ToDouble())
+
+ member private _.ToRawDataViaDirectAccess< 'T when 'T: struct and 'T :> ValueType and 'T : (new : unit -> 'T) >() : 'T[] =
+ // Torch Tensors must be CPU before raw data can be accessed
+ let tt2 = torchMoveTo tt Device.CPU
+
+ let data = tt2.data<'T>()
+ let res = Array.zeroCreate<'T> (int32 tt2.NumberOfElements)
+ for i in 0 .. int32 tt2.NumberOfElements - 1 do
+ res[i] <- data[int64 i]
+ res
+
+ member t.ToRawData() : Array =
+ match dtype with
+ | Dtype.Bool -> t.ToRawDataViaDirectAccess() :> _
+ | Dtype.Byte -> t.ToRawDataViaDirectAccess() :> _
+ | Dtype.Int8 -> t.ToRawDataViaDirectAccess() :> _
+ | Dtype.Int16 -> t.ToRawDataViaDirectAccess() :> _
+ | Dtype.Int32 -> t.ToRawDataViaDirectAccess() :> _
+ | Dtype.Int64 -> t.ToRawDataViaDirectAccess() :> _
+ | Dtype.Float32 -> t.ToRawDataViaDirectAccess() :> _
+ | Dtype.Float64 -> t.ToRawDataViaDirectAccess() :> _
+ | Dtype.Float16 ->
+ // Move the tensors to CPU for efficiency since we're accessing all the data anyway
+ let tt2 = torchMoveTo tt Device.CPU
+ Array.init (int32 tt2.NumberOfElements) (int64 >> tt2.ReadCpuFloat16) :> _
+ | Dtype.BFloat16 ->
+ // Move the tensors to CPU for efficiency since we're accessing all the data anyway
+ let tt2 = torchMoveTo tt Device.CPU
+ Array.init (int32 tt2.NumberOfElements) (int64 >> tt2.ReadCpuBFloat16) :> _
+
+ override _.StackTs(tensors, dim) =
+ let tts, shapes = tensors |> Array.map (fun t -> (t :?> TorchRawTensor).TorchTensor, t.Shape) |> Array.unzip
+ let _n, _shape1, _shape2, newShape = Shape.checkCanStack shapes dim
+ let result = torch.stack(tts, int64 dim)
+ (tensors[0] :?> TorchRawTensor).MakeLike(result, newShape)
+
+ override t.UnstackT(dim) =
+ let shape = t.Shape
+ let _shape1, _shape2, unstackedShape = Shape.checkCanUnstack shape dim
+ let results = tt.unbind(dim)
+ results |> Array.map (fun rvalues -> t.MakeLike(rvalues, shape=unstackedShape))
+
+ override t.CatTs(tensors, dim) =
+ let values, shapes = tensors |> Array.map (fun t -> t.TorchTensor, t.Shape) |> Array.unzip
+ let _n, _shape1, _m2, _shape3, outShape = Shape.checkCanCat shapes dim
+ let result = torch.cat(values, int64 dim)
+ t.MakeLike(result, outShape)
+
+ override t.SplitT(sizes, dim) =
+ let shape = t.Shape
+ let outShapes = Shape.checkCanSplit shape sizes dim
+ let results = tt.split(int64s sizes, int64 dim)
+ (results, outShapes) ||> Array.map2 (fun rvalues outShape ->
+ t.MakeLike(rvalues, shape=outShape))
+
+ override t.PermuteT(permutation) =
+ let _, newShape = Shape.checkCanPermute t.Shape permutation
+ let result = tt.permute(int64s permutation)
+ t.MakeLike(result, shape=newShape)
+
+ override t.TransposeT(dim0, dim1) =
+ Shape.checkCanTranspose t.Shape dim0 dim1
+ let result = tt.transpose(int64 dim0, int64 dim1)
+ let shape = result.shape |> Array.map int32
+ t.MakeLike(result, shape=shape)
+
+ override t.TransposeT2() =
+ Shape.checkCanTranspose2d t.Dim
+ let newShape = Shape.computeTranspose2d t.Shape
+ let result = tt.t()
+ t.MakeLike(result, shape=newShape)
+
+ override t.InverseT() =
+ Shape.checkCanInvert t.Shape
+ let result = tt.inverse()
+ t.MakeLike(result, shape=t.Shape)
+
+ override t.DetT() =
+ Shape.checkCanDet t.Shape
+ let result = torch.linalg.det(tt)
+ let shape = result.shape |> Array.map int32
+ t.MakeLike(result, shape=shape)
+
+ override t1.SolveTT(t2) =
+ let newShape = Shape.checkCanSolve t1.Shape t2.Shape
+ let result = torch.linalg.solve(tt, t2.TorchTensor)
+ t1.MakeLike(result, shape=newShape)
+
+ override t.SqueezeT(dim) =
+ let shape = t.Shape
+ let newShape = Shape.squeeze dim shape
+ let mutable res = tt
+ let mutable c = 0
+ for i in 0 .. t.Dim - 1 do
+ if shape[i] = 1 && (dim = -1 || i = dim) then
+ res <- res.squeeze(int64 c)
+ else
+ c <- c + 1
+ t.MakeLike(res, shape=newShape)
+
+ override t.UnsqueezeT(dim) =
+ let outputShape = Shape.checkCanUnsqueeze dim t.Shape
+ t.MakeLike(tt.unsqueeze(int64 dim), shape=outputShape)
+
+ override t.FlipT(dims:int[]) =
+ // "flip_cuda" not implemented for 'Bool'"
+ let result =
+ if dtype = Dtype.Bool then
+ tt.to_type(torch.ScalarType.Byte).flip(int64s dims).to_type(torch.ScalarType.Bool)
+ elif dtype = Dtype.Float16 || dtype = Dtype.BFloat16 then
+ tt.to_type(torch.ScalarType.Float32).flip(int64s dims).to_type(toTorchType dtype)
+ else
+ tt.flip(int64s dims)
+ t.MakeLike(result)
+
+ override t.DilateT(dilations:int[]) =
+ Shape.checkCanDilate t.Dim dilations
+ let outputShape = Shape.dilated t.Shape dilations
+ let dims = dilations.Length
+ let mutable res = tt
+ for i=0 to dims-1 do
+ let s = res.shape
+ s[i] <- int64 outputShape[i]
+ let resnew = t.ZerosLike(fromTorchShape s)
+ let indices = Array.init t.Shape[i] id |> Array.map ((*) dilations[i] >> int64)
+ let mutable d = TorchInt64TensorOps().CreateFromFlatArray(indices, shape=[|t.Shape[i]|], device=t.Device)
+ for _=0 to i-1 do
+ d <- d.UnsqueezeT(0)
+ for _=i+1 to dims-1 do
+ d <- d.UnsqueezeT(d.Dim)
+ d <- d.Expand(fromTorchShape res.shape)
+ res <- resnew.TorchTensor.scatter(int64 i, d.TorchTensor, res)
+ t.MakeLike(res, outputShape)
+
+ override t.UndilateT(dilations:int[]) =
+ let shape = t.Shape
+ let outputShape = Shape.undilatedShape shape dilations
+ let mutable res = tt
+ for d in 0 .. dilations.Length - 1 do
+ res <- res.slice(int64 d, 0L, int64 shape[d], int64 dilations[d])
+ t.MakeLike(res, outputShape)
+
+ override t.GatherT(dim:int, indices) =
+ Shape.checkCanGather t.Shape dim indices.Shape indices.Dtype
+
+ // NOTE: TensorMath currently expects indices as an Int32 tensor, Torch wants Int64
+ let indices = indices.Cast(Dtype.Int64)
+ let res =
+ // LibTorch Gather on float16/bfloat16 gives : method_name not implemented for 'BFloat16'
+ if dtype = Dtype.Float16 || dtype = Dtype.BFloat16 then
+ tt.to_type(torch.ScalarType.Float32).gather(int64 dim, indices.TorchTensor).to_type(toTorchType dtype)
+ else
+ t.TorchTensor.gather(int64 dim, indices.TorchTensor)
+ t.MakeLike(res, indices.Shape)
+
+ override t.ScatterT(dim:int, indices, destinationShape:Shape) =
+ Shape.checkCanScatter t.Shape dim indices.Shape indices.Dtype destinationShape
+ // NOTE: TensorMath currently expects indices as an Int32 tensor, Torch wants Int64
+ let indices = indices.Cast(Dtype.Int64)
+ let res = t.ZerosLike(destinationShape)
+ // LibTorch Scatter on float16/bfloat16 gives : method_name not implemented for 'BFloat16'
+ if dtype = Dtype.Float16 || dtype = Dtype.BFloat16 then
+ let res2 = res.TorchTensor.to_type(torch.ScalarType.Float32)
+ res2.scatter_(int64 dim, indices.TorchTensor, t.TorchTensor.to_type(torch.ScalarType.Float32)) |> ignore
+ t.MakeLike(res2.to_type(toTorchType dtype), destinationShape)
+ else
+ res.TorchTensor.scatter_(int64 dim, indices.TorchTensor, t.TorchTensor) |> ignore
+ res
+
+ override t.ViewT(shape:Shape) =
+ Shape.checkCanView t.Shape shape
+ t.MakeLike(tt.reshape(toTorchShape shape), shape=shape) // Use Reshape instead of View to ensure underlying non-contiguous libtorch tensors can be viewed. Internally Reshape uses View if possible, otherwise it copies data to a contiguous tensor and then views.
+
+ override t.Cast(newDtype: Dtype) =
+ if newDtype = dtype then
+ upcast t
+ else
+ let result = tt.to_type(toTorchType newDtype)
+ t.MakeLike(result, dtype=newDtype)
+
+ override t.MoveTo(device) =
+ if t.Device = device then (t :> _) else
+ let tt2 = torchMoveTo tt device
+ t.MakeLike(tt2, device=device)
+
+ override t.Equals(t2:RawTensor) : bool =
+ if dtype = t2.Dtype then
+ let r1 = (t.Shape = t2.Shape)
+ if not r1 then false else
+ let tt2 = t2.TorchTensor
+ let r2 = tt.Equals(tt2)
+ r2
+ else
+ opNotSupported2 "Equals" dtype t2.Dtype
+
+ override t.AllClose(t2:RawTensor, relativeTolerance, absoluteTolerance) =
+ if dtype = t2.Dtype then
+ match dtype with
+ | Dtype.IntegralOrBool -> t.Equals(t2)
+ | Dtype.Float16 | Dtype.BFloat16 ->
+ // Need because LibTorch 1.7.0 says "isfinite" not implemented for 'BFloat16'
+ tt.to_type(torch.ScalarType.Float32).allclose(t2.TorchTensor.to_type(torch.ScalarType.Float32), relativeTolerance, absoluteTolerance)
+ | _ -> tt.allclose(t2.TorchTensor, relativeTolerance, absoluteTolerance)
+ else
+ opNotSupported2 "Equals" dtype t2.Dtype
+
+ override t.ClampT(low, high) =
+ let result = tt.clamp(low.TorchTensor.ToScalar(), high.TorchTensor.ToScalar())
+ t.MakeLike(result)
+
+ override t1.LtTT(t2) =
+ let result = tt.lt(t2.TorchTensor)
+ t1.MakeLike(result, dtype=Dtype.Bool)
+
+ override t1.GtTT(t2) =
+ let result = tt.gt(t2.TorchTensor)
+ t1.MakeLike(result, dtype=Dtype.Bool)
+
+ override t1.LeTT(t2) =
+ let result = tt.le(t2.TorchTensor)
+ t1.MakeLike(result, dtype=Dtype.Bool)
+
+ override t1.GeTT(t2) =
+ let result = tt.ge(t2.TorchTensor)
+ t1.MakeLike(result, dtype=Dtype.Bool)
+
+ override t1.EqTT(t2) =
+ let result = tt.eq(t2.TorchTensor)
+ t1.MakeLike(result, dtype=Dtype.Bool)
+
+ override t1.NeqTT(t2) =
+ let result = tt.ne(t2.TorchTensor)
+ t1.MakeLike(result, dtype=Dtype.Bool)
+
+ override t.MaxReduceT(dim, keepDim) =
+ let (struct (maxValues, indexes)) = tt.max(int64 dim, keepdim=keepDim)
+ let newShape = Shape.checkCanMinMaxReduce dim keepDim t.Shape
+ let maxValuesResult = t.MakeLike(maxValues, shape=newShape)
+ let indexesResult = t.MakeLike(indexes, shape=newShape, dtype=Dtype.Int64).Cast(Dtype.Int32)
+ maxValuesResult, indexesResult
+
+ override t.MaxIndexT() =
+ // LibTorch 1.7.0: Max on float16/bfloat16 causes grief
+ let tt =
+ if dtype = Dtype.Float16 || dtype = Dtype.BFloat16 then
+ tt.to_type(torch.ScalarType.Float32)
+ else
+ tt
+ let res = Array.zeroCreate t.Dim
+ let idxs = Array.zeroCreate t.Dim
+ let mutable values = tt
+ // repeatedly reduce, tracking the recorded index for the final maximum eventually selected
+ for i = t.Dim - 1 downto 0 do
+ let (struct (values2, indexes)) = values.max(int64 i)
+ values <- values2
+ idxs[i] <- indexes
+
+ for i = 0 to t.Dim - 1 do
+ let idx = idxs[i]
+
+ res[i] <-
+ match i with
+ | 0 -> idx.ToInt64()
+ | 1 -> idx[res[0]].ToInt64()
+ | 2 -> idx[res[0], res[1]].ToInt64()
+ | 3 -> idx[res[0], res[1], res[2]].ToInt64()
+ | 4 -> idx[res[0], res[1], res[2], res[3]].ToInt64()
+ | 5 -> idx[res[0], res[1], res[2], res[3], res[4]].ToInt64()
+ | 6 -> idx[res[0], res[1], res[2], res[3], res[4], res[5]].ToInt64()
+ | _ -> failwith "MaxIndexT > 6d nyi for torch"
+ res |> Array.map int32
+
+ override t.MinReduceT(dim, keepDim) =
+ let (struct (minValues, indexes)) = tt.min(int64 dim, keepdim=keepDim)
+ let newShape = Shape.checkCanMinMaxReduce dim keepDim t.Shape
+ let minValuesResult = t.MakeLike(minValues, shape=newShape)
+ let indexesResult = t.MakeLike(indexes, shape=newShape, dtype=Dtype.Int64).Cast(Dtype.Int32)
+ minValuesResult, indexesResult
+
+ override t.MinIndexT() =
+ // LibTorch 1.7.0: Min on float16/bfloat16 causes grief
+ let tt =
+ if dtype = Dtype.Float16 || dtype = Dtype.BFloat16 then
+ tt.to_type(torch.ScalarType.Float32)
+ else
+ tt
+ let res = Array.zeroCreate t.Dim
+ let idxs = Array.zeroCreate t.Dim
+ let mutable values = tt
+ // repeatedly reduce, tracking the recorded index for the final minimum eventually selected
+ for i = t.Dim - 1 downto 0 do
+ let (struct (values2, indexes)) = values.min(int64 i)
+ values <- values2
+ idxs[i] <- indexes
+
+ for i = 0 to t.Dim - 1 do
+ let idx = idxs[i]
+
+ res[i] <-
+ match i with
+ | 0 -> idx.ToInt64()
+ | 1 -> idx[res[0]].ToInt64()
+ | 2 -> idx[res[0], res[1]].ToInt64()
+ | 3 -> idx[res[0], res[1], res[2]].ToInt64()
+ | 4 -> idx[res[0], res[1], res[2], res[3]].ToInt64()
+ | 5 -> idx[res[0], res[1], res[2], res[3], res[4]].ToInt64()
+ | 6 -> idx[res[0], res[1], res[2], res[3], res[4], res[5]].ToInt64()
+ | _ -> failwith "MinIndexT > 6d nyi for torch"
+ res |> Array.map int32
+
+ override t1.AddTT(t2, alpha) =
+ let result =
+ match alpha with
+ | Some v -> tt.add(t2.TorchTensor, toTorchScalar v)
+ | None -> tt.add(t2.TorchTensor)
+ t1.MakeLike(result)
+
+ override t1.AddTT0(t2: scalar, ?alpha: scalar) =
+ let result =
+ match alpha with
+ | Some v -> tt.add(toTorchScalar t2, toTorchScalar v)
+ | None -> tt.add(toTorchScalar t2)
+ t1.MakeLike(result)
+
+ override t1.AddTTSlice(location:int[], t2) =
+ Shape.checkCanAddSlice t1.Shape location t2.Shape
+ let shape1 = t1.Shape
+ let shape2 = t2.Shape
+ let expandedShape2 = Shape.unsqueezeAs shape2 shape1
+ let t2Expanded = t2.TorchTensor.expand(toTorchShape expandedShape2)
+ let res = tt.clone()
+ let mutable t1Slice = res // will share memory with res
+ for d in 0 .. location.Length - 1 do
+ let len2 = expandedShape2[d]
+ if location[d] <> 0 || len2 <> shape1[d] then
+ t1Slice <- t1Slice.narrow(int64 d, int64 location[d], int64 len2)
+ t1Slice.add_(t2Expanded) |> ignore
+ t1.MakeLike(res)
+
+ override t1.SubTT(t2) =
+ match dtype with
+ | Dtype.Bool -> opNotSupported2 "SubT" dtype t2.Dtype
+ | _ ->
+ let result = tt.sub(t2.TorchTensor)
+ t1.MakeLike(result)
+
+ override t2.SubFromT0T(t1:scalar) = t2.SubTT0(t1).NegT()
+
+ override t1.SubTT0(t2: scalar) =
+ //let t2v = t2.TorchTensor.ToScalar()
+ let result = tt.sub(toTorchScalar t2)
+ t1.MakeLike(result)
+
+ override t1.MulTT(t2) =
+ let result = tt.mul(t2.TorchTensor)
+ t1.MakeLike(result)
+
+ override t1.MulTT0(t2) =
+ match dtype with
+ | Dtype.Bool -> opNotSupported "MulTT0" dtype
+ | _ ->
+ let result = tt.mul(toTorchScalar t2)
+ t1.MakeLike(result)
+
+ override t1.DivTT(t2) =
+ match dtype with
+ | Dtype.Bool -> opNotSupported2 "DivTT" dtype t2.Dtype
+ | _ ->
+ let result = tt.div(t2.TorchTensor)
+ // Torch uses "true division" mirroring Python 3
+ // https://www.python.org/dev/peps/pep-0238/
+ // https://pytorch.org/docs/stable/generated/torch.div.html
+ // also see https://github.com/DiffSharp/DiffSharp/issues/239
+ let outtype = Dtype.divisionType t1.Dtype t2.Dtype
+ t1.MakeLike(result.to_type(toTorchType outtype), dtype=outtype)
+
+ override t2.DivFromT0T(t1: scalar) =
+ match dtype with
+ | Dtype.Bool -> opNotSupported "DivT0T" dtype
+ | _ ->
+ let t1 = t2.FullLike(Shape.scalar, t1, dtype=t1.dtype)
+ let result = t1.TorchTensor.div(t2.TorchTensor)
+ // Torch uses "true division" mirroring Python 3
+ // https://www.python.org/dev/peps/pep-0238/
+ // https://pytorch.org/docs/stable/generated/torch.div.html
+ // also see https://github.com/DiffSharp/DiffSharp/issues/239
+ let outtype = widenScalarForDivision t2.Dtype t1.Dtype
+ t2.MakeLike(result.to_type(toTorchType outtype), dtype=outtype)
+
+ override t1.DivTT0(t2) =
+ match dtype with
+ | Dtype.Bool -> opNotSupported "DivTT0" dtype
+ | _ ->
+ let t2 = toTorchScalar t2
+ // let t2 = t1.FullLike(Shape.scalar, t2, dtype=t1.Dtype)
+ let result = tt.div(t2)
+ // Torch uses "true division" mirroring Python 3
+ // https://www.python.org/dev/peps/pep-0238/
+ // https://pytorch.org/docs/stable/generated/torch.div.html
+ // also see https://github.com/DiffSharp/DiffSharp/issues/239
+ let outtype = widenScalarForDivision t1.Dtype (fromTorchType t2.Type)
+ t1.MakeLike(result.to_type(toTorchType outtype), dtype=outtype)
+
+ override t1.PowTT(t2) =
+ match dtype with
+ | Dtype.IntegralOrBool -> opNotSupported "PowTT" dtype
+ | _ ->
+ let result = tt.pow(t2.TorchTensor)
+ t1.MakeLike(result)
+
+ override t2.PowFromT0T(t1:scalar) =
+ match dtype with
+ | Dtype.IntegralOrBool -> opNotSupported "PowT0T" dtype
+ | _ ->
+ let t1 = t2.FullLike(Shape.scalar, t1)
+ let result = t1.Expand(t2.Shape).TorchTensor.pow(t2.TorchTensor)
+ t2.MakeLike(result)
+
+ override t1.PowTT0(t2:scalar) =
+ match dtype with
+ | Dtype.IntegralOrBool -> opNotSupported "PowTT0" dtype
+ | _ ->
+ let t2v = toTorchScalar t2
+ let result = tt.pow(t2v)
+ t1.MakeLike(result)
+
+ override t1.MatMulTT(t2) =
+ match dtype with
+ | Dtype.Bool -> opNotSupported2 "MatMulTT" dtype t2.Dtype
+ | _ ->
+ let (t1BatchPart, t1MatrixPart), (t2BatchPart, t2MatrixPart) = Shape.checkCanMatmul t1.Shape t2.Shape
+ if t1BatchPart <> t2BatchPart then failwithf "Cannot matrix multiply raw tensors with shapes %A, %A - mismatch batching" t1.Shape t2.Shape
+ let t1rows = t1MatrixPart[0]
+ let t2cols = t2MatrixPart[1]
+ let newShape = Array.append t1BatchPart [| t1rows; t2cols |]
+ let result =
+ // "addmm for CUDA tensors only supports floating-point types. Try converting the tensors with .float()" | const char *
+ match t1.DeviceType, dtype with
+ | TensorMath.DeviceType.CUDA, (Dtype.Integral as dtype) ->
+ let tt1 = tt.to_type(torch.ScalarType.Float64)
+ let tt2 = t2.TorchTensor.to_type(torch.ScalarType.Float64)
+ tt1.matmul(tt2).round().to_type(toTorchType dtype)
+ | _ ->
+ tt.matmul(t2.TorchTensor)
+ t1.MakeLike(result, newShape)
+
+ override t1.BMMTT(t2) =
+ match dtype with
+ | Dtype.Bool -> opNotSupported2 "BMMTT" dtype t2.Dtype
+ | _ ->
+ let resultShape = Shape.checkCanBMM t1.Shape t2.Shape
+ let result =
+ // "addmm for CUDA tensors only supports floating-point types. Try converting the tensors with .float()" | const char *
+ match t1.DeviceType, dtype with
+ | TensorMath.DeviceType.CUDA, (Dtype.Integral as dtype) ->
+ let tt1 = tt.to_type(torch.ScalarType.Float64)
+ let tt2 = t2.TorchTensor.to_type(torch.ScalarType.Float64)
+ tt1.bmm(tt2).round().to_type(toTorchType dtype)
+ | _ ->
+ tt.bmm(t2.TorchTensor)
+ t1.MakeLike(result, resultShape)
+
+ override t1.Conv1D(t2, stride, padding) = // TODO: bias, dilation and groups
+ let _batchSize, _inputChannels, _kernelSize, _outputChannels, _outputSize, outputShape =
+ Shape.checkCanConv1d t1.DeviceType t2.DeviceType dtype t2.Dtype t1.Shape t2.Shape stride padding 1
+ let resultt =
+ // "conv1d for CUDA tensors only supports floating-point types."
+ match t1.DeviceType, dtype with
+ | TensorMath.DeviceType.CUDA, (Dtype.Integral as dtype) ->
+ torch.nn.functional.conv1d(tt.to_type(torch.ScalarType.Float64), t2.TorchTensor.to_type(torch.ScalarType.Float64), stride=int64 stride, padding=int64 padding, dilation=1L).round().to_type(toTorchType dtype)
+ | _ ->
+ torch.nn.functional.conv1d(tt, t2.TorchTensor, stride=int64 stride, padding=int64 padding, dilation=1L)
+ t1.MakeLike(resultt, shape=outputShape)
+
+ override t1.Conv2D(t2, strides, paddings) = // TODO: bias, dilation and groups
+ let _batchSize, _inputChannels, _kernelDimensions, _outputDimensions, outputShape =
+ Shape.checkCanConv2d t1.DeviceType t2.DeviceType dtype t2.Dtype t1.Shape t2.Shape strides paddings [| 1;1 |]
+ let resultt =
+ // "conv2d for CUDA tensors only supports floating-point types."
+ match t1.DeviceType, dtype with
+ | TensorMath.DeviceType.CUDA, (Dtype.Integral as dtype) ->
+ torch.nn.functional.conv2d(tt.to_type(torch.ScalarType.Float64), t2.TorchTensor.to_type(torch.ScalarType.Float64), strides=int64s strides, padding=int64s paddings).round().to_type(toTorchType dtype)
+ | _ ->
+ torch.nn.functional.conv2d(tt, t2.TorchTensor, strides=int64s strides, padding=int64s paddings)
+ t1.MakeLike(resultt, shape=outputShape)
+
+ override t1.Conv3D(t2, strides, paddings) = // TODO: bias, dilation and groups
+ let _batchSize, _inputChannels, _kernelDimensions, _outputDimensions, outputShape =
+ Shape.checkCanConv3d t1.DeviceType t2.DeviceType dtype t2.Dtype t1.Shape t2.Shape strides paddings [| 1;1;1 |]
+ let resultt =
+ // "conv2d for CUDA tensors only supports floating-point types."
+ match t1.DeviceType, dtype with
+ | TensorMath.DeviceType.CUDA, (Dtype.Integral as dtype) ->
+ torch.nn.functional.conv3d(tt.to_type(torch.ScalarType.Float64), t2.TorchTensor.to_type(torch.ScalarType.Float64), strides=int64s strides, padding=int64s paddings).round().to_type(toTorchType dtype)
+ | _ ->
+ torch.nn.functional.conv3d(tt, t2.TorchTensor, strides=int64s strides, padding=int64s paddings)
+ t1.MakeLike(resultt, shape=outputShape)
+
+ override t1.MaxPool1D(kernelSize, stride, padding) =
+ let _batchSize, _channels, _inputSize, _outputSize, outputShape =
+ Shape.checkCanMaxpool1d dtype t1.Shape kernelSize stride padding
+ match dtype with
+ | Dtype.Bool | Dtype.Integral -> opNotSupported "MaxPool1D" dtype
+ | _ ->
+ let struct (resultt, indicest) = torch.nn.functional.max_pool1d_with_indices(tt, int64 kernelSize, stride=int64 stride, padding=int64 padding, dilation=1L)
+ // NOTE: TensorMath currently expects indices as an Int32 tensor
+ let indices = t1.MakeLike(indicest, shape=outputShape, dtype=Dtype.Int64).Cast(Dtype.Int32)
+ let result = t1.MakeLike(resultt, shape=outputShape)
+ result, indices
+
+ override t1.MaxPool2D(kernelSize, strides, paddings) =
+ let _batchSize, _channels, _inputDimensions, _kernelDimensions, _outputDimensions, outputShape =
+ Shape.checkCanMaxpool2d dtype t1.Shape kernelSize strides paddings
+ let struct (resultt, indicest) = torch.nn.functional.max_pool2d_with_indices(tt, int64s kernelSize, strides=int64s strides, padding=int64s paddings)
+ // NOTE: TensorMath currently expects indices as an Int32 tensor, Torch wants Int64
+ let indices = t1.MakeLike(indicest, shape=outputShape, dtype=Dtype.Int64).Cast(Dtype.Int32)
+ let result = t1.MakeLike(resultt, shape=outputShape)
+ result, indices
+
+ override t1.MaxPool3D(kernelSize, strides, paddings) =
+ let _batchSize, _channels, _inputDimensions, _kernelDimensions, _outputDimensions, outputShape =
+ Shape.checkCanMaxpool3d dtype t1.Shape kernelSize strides paddings
+ let struct (resultt, indicest) = torch.nn.functional.max_pool3d_with_indices(tt, int64s kernelSize, strides=int64s strides, padding=int64s paddings)
+
+ // NOTE: TensorMath currently expects indices as an Int32 tensor
+ let indices = t1.MakeLike(indicest, shape=outputShape, dtype=Dtype.Int64).Cast(Dtype.Int32)
+ let result = t1.MakeLike(resultt, shape=outputShape)
+ result, indices
+
+ override t1.MaxUnpool1D(indices, outputSize) =
+ // NOTE: LibTorch has no torch::max_unpool1d and so TorchSharp has Tensor.MaxUnpool1D
+ // So use MaxUnpool2D instead
+ //let batchSize, channels, _inputSize, _outputShape = Shape.computeMaxUnpool1d t1.Shape outputSize
+ let t1X = t1.UnsqueezeT(2)
+ let indicesX = indices.UnsqueezeT(2)
+ let resulttX = t1X.MaxUnpool2D(indicesX, [| outputSize[0]; outputSize[1]; 1; outputSize[2] |])
+ let resultt = resulttX.SqueezeT(2)
+ resultt
+
+ override t1.MaxUnpool2D(indices, outputSize) =
+ let _batchSize, _channels, _inputDimensions, outputShape =
+ Shape.checkCanMaxunpool2d dtype t1.Shape indices.Dtype indices.Shape outputSize
+ // NOTE: TensorMath currently expects indices as an Int32 tensor
+ let indices = indices.Cast(Dtype.Int64)
+
+ // note, LibTorch only wants the last two elements of the output size passsed in
+ // "There should be exactly two elements (height, width) in output_size (max_unpooling2d_shape_check at ...)"
+ let outputSize = outputSize[2..3]
+
+ // TODO: consider switching to the torch::nn module for MaxUnpool2d
+
+ let resultt = torch.nn.functional.max_unpool2d(tt, indices.TorchTensor, int64s outputSize)
+ t1.MakeLike(resultt, shape=outputShape)
+
+ override t1.MaxUnpool3D(indices, outputSize) =
+ let _batchSize, _channels, _inputDimensions, outputShape =
+ Shape.checkCanMaxunpool3d dtype t1.Shape indices.Dtype indices.Shape outputSize
+ // NOTE: TensorMath currently expects indices as an Int32 tensor
+ let indices = indices.Cast(Dtype.Int64)
+
+ // note, LibTorch only wants the last three elements of the output size passsed in
+ // "There should be exactly three elements (depth, height, width) in output_size (max_unpooling3d_shape_check at ..\..\aten\src\ATen\native\MaxUnpooling.cpp:231)"
+ let outputSize = outputSize[2..4]
+
+ // NOTE: strides and padding must always be specified for torch::max_unpool3d C++ entry
+ // TODO: consider switching to the torch::nn module for MaxUnpool
+ let strides = outputSize |> Array.map (fun _ -> 1L)
+ let padding = outputSize |> Array.map (fun _ -> 0L)
+ let resultt = torch.nn.functional.max_unpool3d(tt, indices.TorchTensor, int64s outputSize, strides, padding)
+ t1.MakeLike(resultt, shape=outputShape)
+
+ override t1.AvgPool1D(kernelSize, stride, padding) =
+ let _batchSize, _channels, _inputSize, _outputSize, outputShape = Shape.checkCanAvgpool1d dtype t1.Shape kernelSize stride padding
+ match dtype with
+ | Dtype.Bool | Dtype.Integral -> opNotSupported "AvgPool1D" dtype
+ | _ ->
+ let resultt = torch.nn.functional.avg_pool1d(tt, int64 kernelSize, stride=int64 stride, padding=int64 padding)
+ let result = t1.MakeLike(resultt, shape=outputShape)
+ result
+
+ override t1.AvgPool2D(kernelSize, stride, padding) =
+ let _batchSize, _channels, _inputSize, _kernelSize, _outputSize, outputShape = Shape.checkCanAvgpool2d dtype t1.Shape kernelSize stride padding
+ match dtype with
+ | Dtype.Bool | Dtype.Integral -> opNotSupported "AvgPool2D" dtype
+ | _ ->
+ let resultt = torch.nn.functional.avg_pool2d(tt, int64s kernelSize, strides=int64s stride, paddings=int64s padding)
+ let result = t1.MakeLike(resultt, shape=outputShape)
+ result
+
+ override t1.AvgPool3D(kernelSize, stride, padding) =
+ let _batchSize, _channels, _inputSize, _kernelSize, _outputSize, outputShape = Shape.checkCanAvgpool3d dtype t1.Shape kernelSize stride padding
+ match dtype with
+ | Dtype.Bool | Dtype.Integral -> opNotSupported "AvgPool3D" dtype
+ | _ ->
+ let resultt = torch.nn.functional.avg_pool3d(tt, int64s kernelSize, strides=int64s stride, paddings=int64s padding)
+ let result = t1.MakeLike(resultt, shape=outputShape)
+ result
+
+ override t1.AvgPoolReverse1D(originalInput, kernelSize, stride, padding) =
+ let t1X = t1.UnsqueezeT(2)
+ let originalInputX = originalInput.UnsqueezeT(2)
+ let resulttX = t1X.AvgPoolReverse2D(originalInputX, [| 1; kernelSize |], [| 1; stride |], [| 0; padding |])
+ let resultt = resulttX.SqueezeT(2)
+ resultt
+
+ override t1.AvgPoolReverse2D(originalInput, kernelSize, stride, padding) =
+ match dtype with
+ | Dtype.Bool | Dtype.Integral -> opNotSupported "AvgPoolReverse2D" dtype
+ | _ ->
+ let resultt = torch.nn.functional.avg_pool2d_backward(tt, originalInput.TorchTensor, int64s kernelSize, strides=int64s stride, paddings=int64s padding)
+ let result = t1.MakeLike(resultt, shape=originalInput.Shape)
+ result
+
+ override t1.AvgPoolReverse3D(originalInput, kernelSize, stride, padding) =
+ match dtype with
+ | Dtype.Bool | Dtype.Integral -> opNotSupported "AvgPoolReverse3D" dtype
+ | _ ->
+ let resultt = torch.nn.functional.avg_pool3d_backward(tt, originalInput.TorchTensor, int64s kernelSize, strides=int64s stride, paddings=int64s padding)
+ let result = t1.MakeLike(resultt, shape=originalInput.Shape)
+ result
+
+ override t.NegT() =
+ match dtype with
+ | Dtype.Bool -> opNotSupported "NegT" dtype
+ | _ -> t.MakeLike(-tt)
+
+ override t.SumT(?resultType) =
+ let typeArg = match resultType with None -> Nullable() | Some dt -> Nullable(toTorchType dt)
+ let outType = match resultType with None -> dtype.SummationType | Some dt -> dt
+ t.MakeLike(tt.sum(typeArg), shape=Shape.scalar, dtype=outType)
+
+ override t.SumTDim(dim, ?resultType) =
+ let typeArg = match resultType with None -> Nullable() | Some dt -> Nullable(toTorchType dt)
+ let outType = match resultType with None -> dtype.SummationType | Some dt -> dt
+ let ret = tt.sum(dim=(int64 dim), ``type``=typeArg, keepdim=false) // keepdim is fixed to false as it is handled at Tensor level, not at RawTensor level
+ t.MakeLike(ret, shape=fromTorchShape ret.shape, dtype=outType)
+
+ override t.SignT() =
+ t.MakeLike(tt.sign())
+
+ override t.FloorT() =
+ match dtype with
+ | Dtype.IntegralOrBool -> opNotSupported "FloorT" dtype
+ | _ -> t.MakeLike(tt.floor())
+
+ override t.CeilT() =
+ match dtype with
+ | Dtype.IntegralOrBool -> opNotSupported "CeilT" dtype
+ | _ -> t.MakeLike(tt.ceil())
+
+ override t.RoundT() =
+ match dtype with
+ | Dtype.IntegralOrBool -> opNotSupported "RoundT" dtype
+ | _ -> t.MakeLike(tt.round())
+
+ override t.AbsT() =
+ match dtype with
+ | Dtype.Bool -> opNotSupported "AbsT" dtype
+ | Dtype.Int8 -> t.Cast(Dtype.Int32).AbsT().Cast(Dtype.Int8) // TODO: there is odd behaviour from torch for relu on int8, may have been fixed in later version?
+ | _ -> t.MakeLike(tt.abs ())
+
+ override t.SoftplusT() =
+ match dtype with
+ | Dtype.IntegralOrBool -> opNotSupported "SoftplusT" dtype
+ | _ -> t.MakeLike(tt.softplus())
+
+ override t.ReluT() =
+ match dtype with
+ | Dtype.Bool -> opNotSupported "ReluT" dtype
+ | Dtype.Int8 -> t.Cast(Dtype.Int32).ReluT().Cast(Dtype.Int8) // TODO: there is odd behaviour from torch for relu on int8, may have been fixed in later version?
+ | _ -> t.MakeLike(tt.relu())
+
+ override t.SigmoidT() =
+ match dtype with
+ | Dtype.IntegralOrBool -> opNotSupported "SigmoidT" dtype
+ | _ -> t.MakeLike(tt.sigmoid())
+
+ override t.ExpT() =
+ match dtype with
+ | Dtype.IntegralOrBool -> opNotSupported "ExpT" dtype
+ | _ -> t.MakeLike(tt.exp())
+
+ override t.LogT() =
+ match dtype with
+ | Dtype.IntegralOrBool -> opNotSupported "LogT" dtype
+ | _ -> t.MakeLike(tt.log())
+
+ override t.Log10T() =
+ match dtype with
+ | Dtype.IntegralOrBool -> opNotSupported "Log10T" dtype
+ | _ -> t.MakeLike(tt.log10())
+
+ override t.SqrtT() =
+ match dtype with
+ | Dtype.IntegralOrBool -> opNotSupported "SqrtT" dtype
+ | _ -> t.MakeLike(tt.sqrt())
+
+ override t.SinT() =
+ match dtype with
+ | Dtype.IntegralOrBool -> opNotSupported "SinT" dtype
+ | _ -> t.MakeLike(tt.sin())
+
+ override t.CosT() =
+ match dtype with
+ | Dtype.IntegralOrBool -> opNotSupported "CosT" dtype
+ | _ -> t.MakeLike(tt.cos())
+
+ override t.TanT() =
+ match dtype with
+ | Dtype.IntegralOrBool -> opNotSupported "TanT" dtype
+ | _ -> t.MakeLike(tt.tan())
+
+ override t.SinhT() =
+ match dtype with
+ | Dtype.IntegralOrBool -> opNotSupported "SinhT" dtype
+ | _ -> t.MakeLike(tt.sinh())
+
+ override t.CoshT() =
+ match dtype with
+ | Dtype.IntegralOrBool -> opNotSupported "CoshT" dtype
+ | _ -> t.MakeLike(tt.cosh())
+
+ override t.TanhT() =
+ match dtype with
+ | Dtype.IntegralOrBool -> opNotSupported "TanhT" dtype
+ | _ -> t.MakeLike(tt.tanh())
+
+ override t.AsinT() =
+ match dtype with
+ | Dtype.IntegralOrBool -> opNotSupported "AsinT" dtype
+ | _ -> t.MakeLike(tt.asin())
+
+ override t.AcosT() =
+ match dtype with
+ | Dtype.IntegralOrBool -> opNotSupported "AcosT" dtype
+ | _ -> t.MakeLike(tt.acos())
+
+ override t.AtanT() =
+ match dtype with
+ | Dtype.IntegralOrBool -> opNotSupported "AtanT" dtype
+ | _ -> t.MakeLike(tt.atan())
+#if LATEST_TORCHSHARP
+ // Included to track new functionality available in TorchSharp
+ //
+ // These will be progressed to RawTensor and Tensor
+ member t.AdaptiveAvgPool1D(outputSize: int32) =
+ match dtype with
+ | Dtype.Bool -> opNotSupported "AdaptiveAvgPool1D" dtype
+ | _ -> t.MakeLike(tt.AdaptiveAvgPool1D(int64 outputSize))
+
+ member t.AdaptiveAvgPool2D(outputSizes: int32[]) =
+ match dtype with
+ | Dtype.Bool -> opNotSupported "AdaptiveAvgPool2D" dtype
+ | _ -> t.MakeLike(tt.AdaptiveAvgPool2D(int64s outputSizes))
+
+ member t.AdaptiveAvgPool3D(outputSizes: int32[]) =
+ match dtype with
+ | Dtype.Bool -> opNotSupported "AdaptiveAvgPool3D" dtype
+ | _ -> t.MakeLike(tt.AdaptiveAvgPool3D(int64s outputSizes))
+
+ member t.AdaptiveAvgPool3DBackward(originalInput: RawTensor) =
+ match dtype with
+ | Dtype.Bool -> opNotSupported "AdaptiveAvgPool3DBackward" dtype
+ | _ -> t.MakeLike(tt.AdaptiveAvgPool3Backward(originalInput.TorchTensor))
+
+ //member t.AvgPool1D(kernelSize: int32, stride: int32, padding: int32, ?ceil_mode: bool, ?count_include_pad: bool) =
+ // //let _batchSize, _channels, _inputSize, _outputSize, outputShape = Shape.checkCanAvgPool1d dtype t1.Shape kernelSize stride padding
+ // match dtype with
+ // | Dtype.Bool -> opNotSupported "AvgPool1D" dtype
+ // | _ ->
+ // let _resultt = tt.AvgPool1D(int64 kernelSize, stride=int64 stride, padding=int64 padding, ?ceil_mode=ceil_mode, ?count_include_pad=count_include_pad)
+ // failwith "tbd - outputShape"
+ // //t.MakeLike(resultt, shape=outputShape)
+
+ //member t.AvgPool2D(kernelSizes: int32[], strides: int32[], paddings: int32[], ?ceil_mode: bool, ?count_include_pad: bool) =
+ // failwith "tbd - TorchSharp signture being updated"
+ ////let _batchSize, _channels, _inputSize, _outputSize, outputShape = Shape.checkCanAvgPool1d dtype t1.Shape kernelSize stride padding
+ //match dtype with
+ //| Dtype.Bool -> opNotSupported "AvgPool2D" dtype
+ //| _ ->
+ //let _resultt = tt.AvgPool2D(int64s kernelSizes, stride=int64 stride, padding=int64 padding, ?ceil_mode=ceil_mode, ?count_include_pad=count_include_pad)
+ //failwith "tbd - outputShape"
+ ////t.MakeLike(resultt, shape=outputShape)
+
+ //member t.X(kernelSize: int32, stride: int32, padding: int32, ?ceil_mode: bool, ?count_include_pad: bool) =
+ // //let _batchSize, _channels, _inputSize, _outputSize, outputShape = Shape.checkCanAvgPool1d dtype t1.Shape kernelSize stride padding
+ // match dtype with
+ // | Dtype.Bool -> opNotSupported "AvgPool1D" dtype
+ // | _ ->
+ // let _resultt = tt.BitwiseAnd(int64 kernelSize, stride=int64 stride, padding=int64 padding, ?ceil_mode=ceil_mode, ?count_include_pad=count_include_pad)
+ // failwith "tbd - outputShape"
+ // //t.MakeLike(resultt, shape=outputShape)
+#endif
+
+ new (info: System.Runtime.Serialization.SerializationInfo, _context: System.Runtime.Serialization.StreamingContext) =
+ let dtype = info.GetValue("dtype", typeof) :?> Dtype
+ let shape = info.GetValue("shape", typeof) :?> Shape
+ let tt =
+ match dtype with
+ | Dtype.Bool ->
+ let data = info.GetValue("data", typeof) :?> bool[]
+ torch.tensor(data, dtype=toTorchType Dtype.Bool, dimensions=toTorchShape shape)
+ | Dtype.Byte ->
+ let data = info.GetValue("data", typeof) :?> byte[]
+ torch.tensor(data, dtype=toTorchType Dtype.Byte, dimensions=toTorchShape shape)
+ | Dtype.Int8 ->
+ let data = info.GetValue("data", typeof) :?> sbyte[]
+ torch.tensor(data, dtype=toTorchType Dtype.Int8, dimensions=toTorchShape shape)
+ | Dtype.Int16 ->
+ let data = info.GetValue("data", typeof) :?> int16[]
+ torch.tensor(data, dtype=toTorchType Dtype.Int16, dimensions=toTorchShape shape)
+ | Dtype.Int32 ->
+ let data = info.GetValue("data", typeof) :?> int32[]
+ torch.tensor(data, dtype=toTorchType Dtype.Int32, dimensions=toTorchShape shape)
+ | Dtype.Int64 ->
+ let data = info.GetValue("data", typeof) :?> int64[]
+ torch.tensor(data, dtype=toTorchType Dtype.Int64, dimensions=toTorchShape shape)
+ | Dtype.Float32 ->
+ let data = info.GetValue("data", typeof) :?> float32[]
+ torch.tensor(data, dtype=toTorchType Dtype.Float32, dimensions=toTorchShape shape)
+ | Dtype.Float64 ->
+ let data = info.GetValue("data", typeof) :?> double[]
+ torch.tensor(data, dtype=toTorchType Dtype.Float64, dimensions=toTorchShape shape)
+ | Dtype.Float16 ->
+ let data = info.GetValue("data", typeof) :?> float32[]
+ torch.tensor(data, dtype=toTorchType Dtype.Float16, dimensions=toTorchShape shape)
+ | Dtype.BFloat16 ->
+ let data = info.GetValue("data", typeof) :?> float32[]
+ torch.tensor(data, dtype=toTorchType Dtype.BFloat16, dimensions=toTorchShape shape)
+
+ TorchRawTensor(tt, shape, dtype, Device.CPU)
+
+ interface System.Runtime.Serialization.ISerializable with
+
+ //[SecurityPermissionAttribute(SecurityAction.Demand, SerializationFormatter = true)]
+ member t.GetObjectData(info, _context) =
+
+ // Torch Tensors must be CPU before they can access RawData
+ let tCpu = t.MoveTo(Device.CPU) :?> TorchRawTensor
+
+ info.AddValue("dtype", t.Dtype)
+ info.AddValue("shape", t.Shape)
+ info.AddValue("data", tCpu.ToRawData())
+
+
+ override _.ClampInPlace(low, high) =
+ // TODO - next version of TorchSharp will have in place version of this
+ checkMutable()
+ tt <- tt.clamp(low.TorchTensor.ToScalar(), high.TorchTensor.ToScalar())
+
+ override _.LtInPlace(t2) = checkMutable(); tt.lt_(t2.TorchTensor) |> ignore
+
+ override _.GtInPlace(t2) = checkMutable(); tt.gt_(t2.TorchTensor) |> ignore
+
+ override _.LeInPlace(t2) = checkMutable(); tt.le_(t2.TorchTensor) |> ignore
+
+ override _.GeInPlace(t2) = checkMutable(); tt.ge_(t2.TorchTensor) |> ignore
+
+ override _.EqInPlace(t2) = checkMutable(); tt.eq_(t2.TorchTensor) |> ignore
+
+ override _.NeqInPlace(t2) = checkMutable(); tt.ne_(t2.TorchTensor) |> ignore
+
+ override _.AddInPlace(t2, alpha) =
+ checkMutable()
+ match alpha with
+ | Some v -> tt.add_(t2.TorchTensor, toTorchScalar v) |> ignore
+ | None -> tt.add_(t2.TorchTensor) |> ignore
+
+ override _.AddScalarInPlace(t2) =
+ checkMutable()
+ tt.add_(toTorchScalar t2) |> ignore
+
+ // TODO - this should be faster
+ override t1.AddSliceInPlace(location, t2) =
+ checkMutable()
+ Shape.checkCanAddSlice t1.Shape location t2.Shape
+ let shape1 = t1.Shape
+ let shape2 = t2.Shape
+ let expandedShape2 = Shape.unsqueezeAs shape2 shape1
+ let t2Expanded = t2.TorchTensor.expand(toTorchShape expandedShape2)
+ let mutable t1Slice = tt // will share memory with res
+ for d in 0 .. location.Length - 1 do
+ let len2 = expandedShape2[d]
+ if location[d] <> 0 || len2 <> shape1[d] then
+ t1Slice <- t1Slice.narrow(int64 d, int64 location[d], int64 len2)
+ t1Slice.add_(t2Expanded) |> ignore
+
+ override _.SubInPlace(t2) = checkMutable(); tt.sub_(t2.TorchTensor) |> ignore
+
+ override _.SubScalarInPlace(t2) = checkMutable(); tt.sub_(toTorchScalar t2) |> ignore
+
+ override _.MulInPlace(t2) = checkMutable(); tt.mul_(t2.TorchTensor) |> ignore
+
+ override _.MulScalarInPlace(t2) = checkMutable(); tt.mul_(toTorchScalar t2) |> ignore
+
+ override _.DivInPlace(t2) = checkMutable(); tt.div_(t2.TorchTensor) |> ignore
+
+ override _.DivScalarInPlace(t2) = checkMutable(); tt.div_(toTorchScalar t2) |> ignore
+
+ override _.PowInPlace(t2) = checkMutable(); tt.pow_(t2.TorchTensor) |> ignore
+
+ override _.PowScalarInPlace(t2) = checkMutable(); tt.pow_(toTorchScalar t2) |> ignore
+
+ override _.MatMulInPlace(t2) = checkMutable(); tt <- tt.matmul(t2.TorchTensor)
+
+ override _.NegInPlace() = checkMutable(); tt.neg_() |> ignore
+
+ override _.SignInPlace() = checkMutable(); tt.sign_() |> ignore
+
+ override _.FloorInPlace() = checkMutable(); tt.floor_() |> ignore
+
+ override _.CeilInPlace() = checkMutable(); tt.ceil_() |> ignore
+
+ override _.RoundInPlace() = checkMutable(); tt.round_() |> ignore
+
+ override _.AbsInPlace() = checkMutable(); tt.abs_() |> ignore
+
+ override _.ReluInPlace() = checkMutable(); tt.relu_() |> ignore
+
+ override _.SoftplusInPlace() = checkMutable(); tt <- tt.softplus()
+
+ override _.SigmoidInPlace() = checkMutable(); tt <- tt.sigmoid()
+
+ override _.ExpInPlace() = checkMutable(); tt <- tt.exp()
+
+ override _.LogInPlace() = checkMutable(); tt.log_() |> ignore
+
+ override _.Log10InPlace() = checkMutable(); tt.log10_() |> ignore
+
+ override _.SqrtInPlace() = checkMutable(); tt.sqrt_() |> ignore
+
+ override _.SinInPlace() = checkMutable(); tt.sin_() |> ignore
+
+ override _.CosInPlace() = checkMutable(); tt.cos_() |> ignore
+
+ override _.TanInPlace() = checkMutable(); tt.tan_() |> ignore
+
+ override _.SinhInPlace() = checkMutable(); tt.sinh_() |> ignore
+
+ override _.CoshInPlace() = checkMutable(); tt.cosh_() |> ignore
+
+ override _.TanhInPlace() = checkMutable(); tt.tanh_() |> ignore
+
+ override _.AsinInPlace() = checkMutable(); tt.asin_() |> ignore
+
+ override _.AcosInPlace() = checkMutable(); tt.acos_() |> ignore
+
+ override _.AtanInPlace() = checkMutable(); tt.atan_() |> ignore
+
+ // TODO - next version of TorchSharp will have in place version of this
+ override t.OnesInPlace() = checkMutable(); tt <- (RawTensor.Ones(shape, dtype, t.Device, Backend.Torch) :?> TorchRawTensor).TorchTensor
+
+ // TODO - next version of TorchSharp will have in place version of this
+ override t.ZerosInPlace() = checkMutable(); tt <- (RawTensor.Zeros(shape, dtype, t.Device, Backend.Torch) :?> TorchRawTensor).TorchTensor
+
+ // TODO - next version of TorchSharp will have in place version of this
+ override t.RandomInPlace() = checkMutable(); tt <- (RawTensor.Random(shape, dtype, t.Device, Backend.Torch) :?> TorchRawTensor).TorchTensor
+
+ // TODO - next version of TorchSharp will have in place version of this
+ override t.RandomNormalInPlace() = checkMutable(); tt <- (RawTensor.RandomNormal(shape, dtype, t.Device, Backend.Torch) :?> TorchRawTensor).TorchTensor
+
+ // TODO - next version of TorchSharp will have in place version of this
+ override t.RandomIntInPlace(low, high) = checkMutable(); tt <- (RawTensor.RandomInt(shape, low, high, dtype, t.Device, Backend.Torch) :?> TorchRawTensor).TorchTensor
+
+ override t.SetMutable() = isMutable <- true
+
+ override t.IsMutable = isMutable
+
+/// The parameterized implementation of the static ops. Use a generic class to
+/// make sure we get the correlation with .NET types correct and systematic
+type TorchTensorOps<'T, 'T2>
+ (dtype: Dtype, conv: 'T -> 'T2,
+ fromScalar: 'T2 -> torch.Tensor,
+ from: 'T2[] * TorchShape -> torch.Tensor,
+ zero: 'T,
+ one: 'T,
+ empty: TorchShape * Device -> torch.Tensor,
+ zeros: TorchShape * Device -> torch.Tensor,
+ ones: TorchShape * Device -> torch.Tensor,
+ random: TorchShape * Device -> torch.Tensor,
+ randomN: TorchShape * Device -> torch.Tensor,
+ randomIntegers: TorchShape * int * int * Device -> torch.Tensor,
+ valueFromScalar: scalar -> 'T,
+ scalarFromConvValue: 'T2 -> TorchSharp.Scalar) =
+
+ member _.Zero(device) = TorchRawTensor(torchMoveTo (fromScalar (conv zero)) device, Shape.scalar, dtype, device) :> RawTensor
+ member _.One(device) = TorchRawTensor(torchMoveTo (fromScalar (conv one)) device, Shape.scalar, dtype, device) :> RawTensor
+ member _.Empty(shape:Shape, device) = TorchRawTensor(empty(toTorchShape shape, device), shape, dtype, device) :> RawTensor
+ member _.Zeros(shape:Shape, device) = TorchRawTensor(zeros(toTorchShape shape, device), shape, dtype, device) :> RawTensor
+ member _.Ones(shape:Shape, device) = TorchRawTensor(ones(toTorchShape shape, device), shape, dtype, device) :> RawTensor
+ member _.Random(shape:Shape, device) = TorchRawTensor(random(toTorchShape shape, device), shape, dtype, device) :> RawTensor
+ member _.RandomNormal(shape:Shape, device) = TorchRawTensor(randomN(toTorchShape shape, device), shape, dtype, device) :> RawTensor
+ member _.RandomInt(shape, low, high, device) = TorchRawTensor(randomIntegers(toTorchShape shape, low, high, device), shape, dtype, device) :> RawTensor
+
+ member _.Full(shape:Shape, value:scalar, device) =
+ let t = empty(toTorchShape shape, device)
+ t.fill_(scalarFromConvValue (conv (valueFromScalar value))) |> ignore
+ TorchRawTensor(t, shape, dtype, device) :> RawTensor
+
+ member _.CreateFromFlatArray(values:Array, shape:Shape, device: Device) : RawTensor =
+ let values = values :?> 'T[] |> Array.map conv
+ // torch.InitializeDevice(device.ToTorch) |> ignore
+ let t =
+ match shape with
+ | [| |] -> fromScalar(values[0])
+ | _ -> from (values, toTorchShape shape)
+ let tt = torchMoveTo t device
+ TorchRawTensor(tt, shape, dtype, device) :> RawTensor
+
+type TorchFloat32TensorOps() =
+
+ inherit TorchTensorOps(Dtype.Float32, id,
+ (fun v -> torch.tensor(float v, dtype=toTorchType Dtype.Float32)),
+ (fun (data, shape) -> torch.tensor(data, shape, dtype=toTorchType Dtype.Float32)),
+ 0.0f, 1.0f,
+ (fun (shape, device) -> torch.empty(size=shape, dtype=toTorchType Dtype.Float32, device=device.ToTorch)),
+ (fun (shape, device) -> torch.zeros(size=shape, dtype=toTorchType Dtype.Float32, device=device.ToTorch)),
+ (fun (shape, device) -> torch.ones(size=shape, dtype=toTorchType Dtype.Float32, device=device.ToTorch)),
+ (fun (shape, device) -> torch.rand(size=shape, dtype=toTorchType Dtype.Float32, device=device.ToTorch)),
+ (fun (shape, device) -> torch.randn(size=shape, dtype=toTorchType Dtype.Float32, device=device.ToTorch)),
+ (fun (shape, low, high, device) -> torch.randint(int64 (high-low), size=shape, dtype=toTorchType Dtype.Float32, device=device.ToTorch).add_((float low).ToScalar())),
+ System.Convert.ToSingle,
+ TorchSharp.Scalar.op_Implicit)
+
+type TorchFloat64TensorOps() =
+
+ inherit TorchTensorOps(Dtype.Float64, id,
+ (fun v -> torch.tensor(v, dtype=toTorchType Dtype.Float64)),
+ (fun (data, shape) -> torch.tensor(data, shape, dtype=toTorchType Dtype.Float64)),
+ 0.0, 1.0,
+ (fun (shape, device) -> torch.empty(size=shape, dtype=toTorchType Dtype.Float64, device=device.ToTorch)),
+ (fun (shape, device) -> torch.zeros(size=shape, dtype=toTorchType Dtype.Float64, device=device.ToTorch)),
+ (fun (shape, device) -> torch.ones(size=shape, dtype=toTorchType Dtype.Float64, device=device.ToTorch)),
+ (fun (shape, device) -> torch.rand(size=shape, dtype=toTorchType Dtype.Float64, device=device.ToTorch)),
+ (fun (shape, device) -> torch.randn(size=shape, dtype=toTorchType Dtype.Float64, device=device.ToTorch)),
+ (fun (shape, low, high, device) -> torch.randint(int64 (high-low), size=shape, dtype=toTorchType Dtype.Float64, device=device.ToTorch).add_((double low).ToScalar())),
+ System.Convert.ToDouble,
+ TorchSharp.Scalar.op_Implicit)
+
+type TorchInt8TensorOps() =
+
+ inherit TorchTensorOps(Dtype.Int8, sbyte,
+ (fun v -> torch.tensor(int64 v, dtype=toTorchType Dtype.Int8)),
+ (fun (data, shape) -> torch.tensor(data, shape, dtype=toTorchType Dtype.Int8)),
+ 0y, 1y,
+ (fun (shape, device) -> torch.empty(size=shape, dtype=toTorchType Dtype.Int8, device=device.ToTorch)),
+ (fun (shape, device) -> torch.zeros(size=shape, dtype=toTorchType Dtype.Int8, device=device.ToTorch)),
+ (fun (shape, device) -> torch.ones(size=shape, dtype=toTorchType Dtype.Int8, device=device.ToTorch)),
+ (fun _ -> opNotSupported "Random" Dtype.Int8),
+ (fun _ -> opNotSupported "RandomNormal" Dtype.Int8),
+ (fun (shape, low, high, device) -> torch.randint(int64 (high-low), size=shape, dtype=toTorchType Dtype.Int8, device=device.ToTorch).add_((sbyte low).ToScalar())),
+ System.Convert.ToSByte,
+ TorchSharp.Scalar.op_Implicit)
+
+type TorchInt16TensorOps() =
+
+ inherit TorchTensorOps(Dtype.Int16, int16,
+ (fun v -> torch.tensor(int64 v, dtype=toTorchType Dtype.Int16)),
+ (fun (data, shape) -> torch.tensor(data, shape, dtype=toTorchType Dtype.Int16)),
+ 0s, 1s,
+ (fun (shape, device) -> torch.empty(size=shape, dtype=toTorchType Dtype.Int16, device=device.ToTorch)),
+ (fun (shape, device) -> torch.zeros(size=shape, dtype=toTorchType Dtype.Int16, device=device.ToTorch)),
+ (fun (shape, device) -> torch.ones(size=shape, dtype=toTorchType Dtype.Int16, device=device.ToTorch)),
+ (fun _ -> opNotSupported "Random" Dtype.Int16),
+ (fun _ -> opNotSupported "RandomNormal" Dtype.Int16),
+ (fun (shape, low, high, device) -> torch.randint(int64 (high-low), size=shape, dtype=toTorchType Dtype.Int16, device=device.ToTorch).add_((int16 low).ToScalar())),
+ System.Convert.ToInt16,
+ TorchSharp.Scalar.op_Implicit)
+
+type TorchInt32TensorOps() =
+
+ inherit TorchTensorOps(Dtype.Int32, int32,
+ (fun v -> torch.tensor(int64 v, dtype=toTorchType Dtype.Int32)),
+ (fun (data, shape) -> torch.tensor(data, shape, dtype=toTorchType Dtype.Int32)),
+ 0, 1,
+ (fun (shape, device) -> torch.empty(size=shape, dtype=toTorchType Dtype.Int32, device=device.ToTorch)),
+ (fun (shape, device) -> torch.zeros(size=shape, dtype=toTorchType Dtype.Int32, device=device.ToTorch)),
+ (fun (shape, device) -> torch.ones(size=shape, dtype=toTorchType Dtype.Int32, device=device.ToTorch)),
+ (fun _ -> opNotSupported "Random" Dtype.Int32),
+ (fun _ -> opNotSupported "RandomNormal" Dtype.Int32),
+ (fun (shape, low, high, device) -> torch.randint(int64 (high-low), size=shape, dtype=toTorchType Dtype.Int32, device=device.ToTorch).add_((int32 low).ToScalar())),
+ System.Convert.ToInt32,
+ TorchSharp.Scalar.op_Implicit)
+
+type TorchInt64TensorOps() =
+
+ inherit TorchTensorOps(Dtype.Int64, int64,
+ (fun v -> torch.tensor(v, dtype=toTorchType Dtype.Int64)),
+ (fun (data, shape) -> torch.tensor(data, shape, dtype=toTorchType Dtype.Int64)),
+ 0L, 1L,
+ (fun (shape, device) -> torch.empty(size=shape, dtype=toTorchType Dtype.Int64, device=device.ToTorch)),
+ (fun (shape, device) -> torch.zeros(size=shape, dtype=toTorchType Dtype.Int64, device=device.ToTorch)),
+ (fun (shape, device) -> torch.ones(size=shape, dtype=toTorchType Dtype.Int64, device=device.ToTorch)),
+ (fun _ -> opNotSupported "Random" Dtype.Int64),
+ (fun _ -> opNotSupported "RandomNormal" Dtype.Int64),
+ (fun (shape, low, high, device) -> torch.randint(int64 (high-low), size=shape, dtype=toTorchType Dtype.Int64, device=device.ToTorch).add_((int64 low).ToScalar())),
+ System.Convert.ToInt64,
+ TorchSharp.Scalar.op_Implicit)
+
+type TorchBoolTensorOps() =
+
+ inherit TorchTensorOps(Dtype.Bool, id,
+ (fun v -> torch.tensor(v, dtype=toTorchType Dtype.Bool)),
+ (fun (data, shape) -> torch.tensor(data, shape, dtype=toTorchType Dtype.Bool)),
+ false, true,
+ (fun (shape, device) -> torch.empty(size=shape, dtype=toTorchType Dtype.Bool, device=device.ToTorch)),
+ (fun (shape, device) -> torch.zeros(size=shape, dtype=toTorchType Dtype.Bool, device=device.ToTorch)),
+ (fun (shape, device) -> torch.ones(size=shape, dtype=toTorchType Dtype.Bool, device=device.ToTorch)),
+ (fun _ -> opNotSupported "Random" Dtype.Bool),
+ (fun _ -> opNotSupported "RandomNormal" Dtype.Bool),
+ (fun (shape, low, high, device) -> torch.randint(min 2L (int64 (high-low)), size=shape, dtype=toTorchType Dtype.Bool, device=device.ToTorch).add_((low > 0).ToScalar())),
+ System.Convert.ToBoolean,
+ TorchSharp.Scalar.op_Implicit)
+
+type TorchByteTensorOps() =
+
+ inherit TorchTensorOps(Dtype.Byte, id,
+ (fun v -> torch.tensor(int64 v, dtype=toTorchType Dtype.Byte)),
+ (fun (data, shape) -> torch.tensor(data, shape, dtype=toTorchType Dtype.Byte)),
+ 0uy, 1uy,
+ (fun (shape, device) -> torch.empty(size=shape, dtype=toTorchType Dtype.Byte, device=device.ToTorch)),
+ (fun (shape, device) -> torch.zeros(size=shape, dtype=toTorchType Dtype.Byte, device=device.ToTorch)),
+ (fun (shape, device) -> torch.ones(size=shape, dtype=toTorchType Dtype.Byte, device=device.ToTorch)),
+ (fun _ -> opNotSupported "Random" Dtype.Byte),
+ (fun _ -> opNotSupported "RandomNormal" Dtype.Byte),
+ (fun (shape, low, high, device) -> torch.randint(int64 (high-low), size=shape, dtype=toTorchType Dtype.Byte, device=device.ToTorch).add_((byte low).ToScalar())),
+ System.Convert.ToByte,
+ TorchSharp.Scalar.op_Implicit)
+
+type TorchFloat16TensorOps() =
+
+ inherit TorchTensorOps(Dtype.Float16, id,
+ (fun v -> torch.tensor(float v, dtype=toTorchType Dtype.Float16)),
+ (fun (data, shape) -> torch.tensor(data, shape, dtype=toTorchType Dtype.Float16)),
+ 0.0f, 1.0f,
+ (fun (shape, device) -> torch.empty(size=shape, dtype=toTorchType Dtype.Float16, device=device.ToTorch)),
+ (fun (shape, device) -> torch.zeros(size=shape, dtype=toTorchType Dtype.Float16, device=device.ToTorch)),
+ (fun (shape, device) -> torch.ones(size=shape, dtype=toTorchType Dtype.Float16, device=device.ToTorch)),
+ (fun (shape, device) -> torch.rand(size=shape, dtype=toTorchType Dtype.Float16, device=device.ToTorch)),
+ (fun (shape, device) -> torch.randn(size=shape, dtype=toTorchType Dtype.Float16, device=device.ToTorch)),
+ (fun (shape, low, high, device) -> torch.randint(int64 (high-low), size=shape, dtype=toTorchType Dtype.Float16, device=device.ToTorch).add_((float low).ToScalar())),
+ System.Convert.ToSingle,
+ TorchSharp.Scalar.op_Implicit)
+
+
+type TorchBFloat16TensorOps() =
+
+ inherit TorchTensorOps(Dtype.BFloat16, id,
+ (fun v -> torch.tensor(float v, dtype=toTorchType Dtype.BFloat16)),
+ (fun (data, shape) -> torch.tensor(data, shape, dtype=toTorchType Dtype.BFloat16)),
+ 0.0f, 1.0f,
+ (fun (shape, device) -> torch.empty(size=shape, dtype=toTorchType Dtype.BFloat16, device=device.ToTorch)),
+ (fun (shape, device) -> torch.zeros(size=shape, dtype=toTorchType Dtype.BFloat16, device=device.ToTorch)),
+ (fun (shape, device) -> torch.ones(size=shape, dtype=toTorchType Dtype.BFloat16, device=device.ToTorch)),
+ (fun (shape, device) -> torch.rand(size=shape, dtype=toTorchType Dtype.BFloat16, device=device.ToTorch)),
+ (fun (shape, device) -> torch.randn(size=shape, dtype=toTorchType Dtype.BFloat16, device=device.ToTorch)),
+ (fun (shape, low, high, device) -> torch.randint(int64 (high-low), size=shape, dtype=toTorchType Dtype.BFloat16, device=device.ToTorch).add_((float low).ToScalar())),
+ System.Convert.ToSingle,
+ TorchSharp.Scalar.op_Implicit)
+
+type TorchBackendTensorStatics() =
+ inherit BackendTensorStatics()
+
+ let torchFloat16 = TorchFloat16TensorOps()
+ let torchBFloat16 = TorchBFloat16TensorOps()
+ let torchFloat32 = TorchFloat32TensorOps()
+ let torchFloat64 = TorchFloat64TensorOps()
+ let torchInt8 = TorchInt8TensorOps()
+ let torchInt16 = TorchInt16TensorOps()
+ let torchInt32 = TorchInt32TensorOps()
+ let torchInt64 = TorchInt64TensorOps()
+ let torchByte = TorchByteTensorOps()
+ let torchBool = TorchBoolTensorOps()
+
+ let supported = Array.zeroCreate 32
+ let isSupported (deviceType: TensorMath.DeviceType) =
+ let n = int deviceType
+ match supported[n] with
+ | 0 ->
+ try
+ torch.empty([| 1L |], device= torch.Device(deviceType.ToTorch, index=0)) |> ignore
+ supported[n] <- 1
+ true
+ with _ ->
+ supported[n] <- 2
+ false
+ | 1 -> true
+ | _ -> false
+
+ override _.GetDevices(deviceType) =
+ [
+ match deviceType with
+ | None | Some TensorMath.DeviceType.CPU ->
+ yield Device.CPU
+ | _ -> ()
+
+ match deviceType with
+ | None | Some TensorMath.DeviceType.CUDA ->
+ if torch_cuda.is_available() then
+ let ncuda = torch_cuda.device_count()
+ for i in 0 .. ncuda - 1 do
+ yield (TensorMath.Device(TensorMath.DeviceType.CUDA, i))
+ | _ -> ()
+ // We don't report other devices in GetDevices as yet though they may be usable
+ // There is currently no way in TorchSHarp to get the device count for other device types,
+ // you have to work it out via some other route.
+ ]
+
+ override _.IsDeviceTypeAvailable (deviceType) =
+ match deviceType with
+ | TensorMath.DeviceType.CPU -> true
+ | TensorMath.DeviceType.CUDA -> torch_cuda.is_available()
+ | _ -> isSupported deviceType
+
+ override _.Seed(seed) =
+ if torch_cuda.is_available() then
+ torch_cuda.manual_seed(int64 seed) |> ignore
+ torch.random.manual_seed(int64 seed) |> ignore
+
+ override _.Zero(dtype, device) =
+ match dtype with
+ | Float16 -> torchFloat16.Zero(device)
+ | BFloat16 -> torchBFloat16.Zero(device)
+ | Float32 -> torchFloat32.Zero(device)
+ | Float64 -> torchFloat64.Zero(device)
+ | Int8 -> torchInt8.Zero(device)
+ | Byte -> torchByte.Zero(device)
+ | Int16 -> torchInt16.Zero(device)
+ | Int32 -> torchInt32.Zero(device)
+ | Int64 -> torchInt64.Zero(device)
+ | Bool -> torchBool.Zero(device)
+
+ override _.One(dtype, device) =
+ match dtype with
+ | Float16 -> torchFloat16.One(device)
+ | BFloat16 -> torchBFloat16.One(device)
+ | Float32 -> torchFloat32.One(device)
+ | Float64 -> torchFloat64.One(device)
+ | Int8 -> torchInt8.One(device)
+ | Byte -> torchByte.One(device)
+ | Int16 -> torchInt16.One(device)
+ | Int32 -> torchInt32.One(device)
+ | Int64 -> torchInt64.One(device)
+ | Bool -> torchBool.One(device)
+
+ override _.Zeros(shape:Shape, dtype, device) =
+ match dtype with
+ | Float16 -> torchFloat16.Zeros(shape, device)
+ | BFloat16 -> torchBFloat16.Zeros(shape, device)
+ | Float32 -> torchFloat32.Zeros(shape, device)
+ | Float64 -> torchFloat64.Zeros(shape, device)
+ | Int8 -> torchInt8.Zeros(shape, device)
+ | Byte -> torchByte.Zeros(shape, device)
+ | Int16 -> torchInt16.Zeros(shape, device)
+ | Int32 -> torchInt32.Zeros(shape, device)
+ | Int64 -> torchInt64.Zeros(shape, device)
+ | Bool -> torchBool.Zeros(shape, device)
+
+ override _.Empty(shape:Shape, dtype, device) =
+ match dtype with
+ | Float16 -> torchFloat16.Empty(shape, device)
+ | BFloat16 -> torchBFloat16.Empty(shape, device)
+ | Float32 -> torchFloat32.Empty(shape, device)
+ | Float64 -> torchFloat64.Empty(shape, device)
+ | Int8 -> torchInt8.Empty(shape, device)
+ | Byte -> torchByte.Empty(shape, device)
+ | Int16 -> torchInt16.Empty(shape, device)
+ | Int32 -> torchInt32.Empty(shape, device)
+ | Int64 -> torchInt64.Empty(shape, device)
+ | Bool -> torchBool.Empty(shape, device)
+
+ override _.Ones(shape:Shape, dtype, device) =
+ match dtype with
+ | Float16 -> torchFloat16.Ones(shape, device)
+ | BFloat16 -> torchBFloat16.Ones(shape, device)
+ | Float32 -> torchFloat32.Ones(shape, device)
+ | Float64 -> torchFloat64.Ones(shape, device)
+ | Int8 -> torchInt8.Ones(shape, device)
+ | Byte -> torchByte.Ones(shape, device)
+ | Int16 -> torchInt16.Ones(shape, device)
+ | Int32 -> torchInt32.Ones(shape, device)
+ | Int64 -> torchInt64.Ones(shape, device)
+ | Bool -> torchBool.Ones(shape, device)
+
+ override _.Full(shape:Shape, value:scalar, dtype, device) =
+ match dtype with
+ | Float16 -> torchFloat16.Full(shape, value, device)
+ | BFloat16 -> torchBFloat16.Full(shape, value, device)
+ | Float32 -> torchFloat32.Full(shape, value, device)
+ | Float64 -> torchFloat64.Full(shape, value, device)
+ | Int8 -> torchInt8.Full(shape, value, device)
+ | Byte -> torchByte.Full(shape, value, device)
+ | Int16 -> torchInt16.Full(shape, value, device)
+ | Int32 -> torchInt32.Full(shape, value, device)
+ | Int64 -> torchInt64.Full(shape, value, device)
+ | Bool -> torchBool.Full(shape, value, device)
+
+ override _.Random(shape:Shape, dtype, device) =
+ match dtype with
+ | Float16 -> torchFloat16.Random(shape, device)
+ | BFloat16 -> torchBFloat16.Random(shape, device)
+ | Float32 -> torchFloat32.Random(shape, device)
+ | Float64 -> torchFloat64.Random(shape, device)
+ | Int8 -> torchInt8.Random(shape, device)
+ | Byte -> torchByte.Random(shape, device)
+ | Int16 -> torchInt16.Random(shape, device)
+ | Int32 -> torchInt32.Random(shape, device)
+ | Int64 -> torchInt64.Random(shape, device)
+ | Bool -> torchBool.Random(shape, device)
+
+ override _.RandomNormal(shape:Shape, dtype, device) =
+ match dtype with
+ | Float16 -> torchFloat16.RandomNormal(shape, device)
+ | BFloat16 -> torchBFloat16.RandomNormal(shape, device)
+ | Float32 -> torchFloat32.RandomNormal(shape, device)
+ | Float64 -> torchFloat64.RandomNormal(shape, device)
+ | Int8 -> torchInt8.RandomNormal(shape, device)
+ | Byte -> torchByte.RandomNormal(shape, device)
+ | Int16 -> torchInt16.RandomNormal(shape, device)
+ | Int32 -> torchInt32.RandomNormal(shape, device)
+ | Int64 -> torchInt64.RandomNormal(shape, device)
+ | Bool -> torchBool.RandomNormal(shape, device)
+
+ override _.RandomInt(shape:Shape, low:int, high:int, dtype, device) =
+ match dtype with
+ | Float16 -> torchFloat16.RandomInt(shape, low, high, device)
+ | BFloat16 -> torchBFloat16.RandomInt(shape, low, high, device)
+ | Float32 -> torchFloat32.RandomInt(shape, low, high, device)
+ | Float64 -> torchFloat64.RandomInt(shape, low, high, device)
+ | Int8 -> torchInt8.RandomInt(shape, low, high, device)
+ | Byte -> torchByte.RandomInt(shape, low, high, device)
+ | Int16 -> torchInt16.RandomInt(shape, low, high, device)
+ | Int32 -> torchInt32.RandomInt(shape, low, high, device)
+ | Int64 -> torchInt64.RandomInt(shape, low, high, device)
+ | Bool -> torchBool.RandomInt(shape, low, high, device)
+
+ override _.CreateFromFlatArray(values:Array, shape, dtype, device) =
+ match dtype with
+ | Float16 -> torchFloat16.CreateFromFlatArray(values, shape, device)
+ | BFloat16 -> torchBFloat16.CreateFromFlatArray(values, shape, device)
+ | Float32 -> torchFloat32.CreateFromFlatArray(values, shape, device)
+ | Float64 -> torchFloat64.CreateFromFlatArray(values, shape, device)
+ | Int8 -> torchInt8.CreateFromFlatArray(values, shape, device)
+ | Byte -> torchByte.CreateFromFlatArray(values, shape, device)
+ | Int16 -> torchInt16.CreateFromFlatArray(values, shape, device)
+ | Int32 -> torchInt32.CreateFromFlatArray(values, shape, device)
+ | Int64 -> torchInt64.CreateFromFlatArray(values, shape, device)
+ | Bool -> torchBool.CreateFromFlatArray(values, shape, device)
+
diff --git a/src/TensorMath/Backend.fs b/src/TensorMath/Backend.fs
new file mode 100644
index 0000000..a5dfd5c
--- /dev/null
+++ b/src/TensorMath/Backend.fs
@@ -0,0 +1,79 @@
+// Copyright (c) 2016- University of Oxford (Atilim Gunes Baydin )
+// and other contributors, see LICENSE in root of repository.
+//
+// BSD 2-Clause License. See LICENSE in root of repository.
+
+namespace TensorMath
+
+/// Represents a backend for TensorMath tensors
+[]
+type Backend =
+ /// The reference backend
+ | Reference
+ /// The LibTorch backend
+ | Torch
+ /// Reserved for future use
+ | Other of name: string * code: int
+
+ member internal x.Code =
+ match x with
+ | Reference -> 0x000
+ | Torch -> 0x0100
+ | Other (_name, code) -> (code + 3) <<< 8
+
+ /// Get the name of the backend
+ member x.Name =
+ match x with
+ | Reference -> "Reference"
+ | Torch -> "Torch"
+ | Other (name, _) -> name
+
+ override x.ToString() = x.Name
+
+/// Contains functions and settings related to backend specifications.
+module Backend =
+ let mutable internal count = 0
+ let internal codes = System.Collections.Concurrent.ConcurrentDictionary()
+
+ /// Register a new backend
+ let Register name =
+ codes.GetOrAdd(name, (fun _ ->
+ count <- count + 1
+ Backend.Other(name, count)))
+
+ /// Get or set the default backend used when creating tensors. Note, use dsharp.config(...) instead.
+ let mutable Default = Backend.Reference
+
+type BackendFunctionality<'T>() =
+ let mutable last = None
+ let backends = System.Collections.Concurrent.ConcurrentDictionary()
+
+ member _.Get(?backend: Backend) =
+ let backend = defaultArg backend Backend.Default
+ let code = backend.Code
+ match last with
+ | Some (code2, v) when code = code2 -> v
+ | _ ->
+ match backends.TryGetValue(code) with
+ | true, v -> v
+ | false, _ ->
+ let res =
+ backends.GetOrAdd(code, fun _ ->
+ let name = "TensorMath.Backends." + backend.Name
+ let fullName = System.Reflection.Assembly.GetExecutingAssembly().FullName.Replace("TensorMath", name)
+ let asm =
+ try System.Reflection.Assembly.Load(fullName)
+ with e -> failwithf "Couldn't find assembly '%s', error = %s" fullName (e.ToString())
+ let typeName = sprintf "TensorMath.Backends.%s.%s%s" backend.Name backend.Name typeof<'T>.Name
+ let theType = asm.GetType(typeName)
+ if isNull theType then failwithf "Couldn't find type '%s' in assembly '%s'" typeName fullName
+ let b =
+ match System.Activator.CreateInstance(theType) with
+ | :? 'T as b -> b
+ | _ -> failwith "activation failed to return correct type"
+ b
+ )
+ last <- Some (code, res)
+ res
+
+ member _.Backends = backends
diff --git a/src/TensorMath/Device.fs b/src/TensorMath/Device.fs
new file mode 100644
index 0000000..103cd4a
--- /dev/null
+++ b/src/TensorMath/Device.fs
@@ -0,0 +1,62 @@
+// Copyright (c) 2016- University of Oxford (Atilim Gunes Baydin )
+// and other contributors, see LICENSE in root of repository.
+//
+// BSD 2-Clause License. See LICENSE in root of repository.
+
+namespace TensorMath
+
+///
+/// Represents the type of a device.
+///
+///
+///
+/// The numeric values used are as for LibTorch.
+///
+///
+///
+/// Contains fundamental types for the tensor programming model, including Tensor, Shape and dsharp.
+///
+type DeviceType =
+ | CPU = 0
+ | CUDA = 1 // CUDA.
+ | MKLDNN = 2 // Reserved for explicit MKLDNN
+ | OPENGL = 3 // OpenGL
+ | OPENCL = 4 // OpenCL
+ | IDEEP = 5 // IDEEP.
+ | HIP = 6 // AMD HIP
+ | FPGA = 7 // FPGA
+ | MSNPU = 8 // MSNPU
+ | XLA = 9 // XLA / TPU
+
+/// Represents a device specification.
+[]
+type Device =
+ | Device of DeviceType * int
+ member x.DeviceType = (let (Device(a,_)) = x in a)
+ member x.DeviceIndex = (let (Device(_,b)) = x in b)
+ static member CPU = Device(DeviceType.CPU, -1)
+ static member GPU = Device(DeviceType.CUDA, 0)
+
+ member internal x.Code = (int x.DeviceType <<< 4) + x.DeviceIndex
+
+ member internal x.Name =
+ (match x.DeviceType with
+ | DeviceType.CPU -> "cpu"
+ | DeviceType.CUDA -> "cuda"
+ | DeviceType.MKLDNN -> "mkldnn"
+ | DeviceType.OPENGL -> "opengl"
+ | DeviceType.OPENCL -> "opencl"
+ | DeviceType.IDEEP -> "ideep"
+ | DeviceType.HIP -> "hip"
+ | DeviceType.FPGA -> "fpga"
+ | DeviceType.MSNPU -> "msnpu"
+ | DeviceType.XLA -> "xla"
+ | _ -> failwith "unknown device type") + string x.DeviceIndex
+
+ override x.ToString() = x.Name
+
+/// Contains functions and settings related to device specifications.
+module Device =
+
+ /// Get or set the default device used when creating tensors. Note, use dsharp.config(...) instead.
+ let mutable Default : Device = Device.CPU
diff --git a/src/TensorMath/Dtype.fs b/src/TensorMath/Dtype.fs
new file mode 100644
index 0000000..f521434
--- /dev/null
+++ b/src/TensorMath/Dtype.fs
@@ -0,0 +1,129 @@
+// Copyright (c) 2016- University of Oxford (Atilim Gunes Baydin )
+// and other contributors, see LICENSE in root of repository.
+//
+// BSD 2-Clause License. See LICENSE in root of repository.
+
+namespace TensorMath
+
+/// Represents a storage type for elements of a tensor
+[]
+type Dtype =
+ /// Store elements as 16-bit floating point numbers (bfloat16 variation)
+ | []
+ BFloat16
+ /// Store elements as 16-bit floating point numbers
+ | []
+ Float16
+ /// Store elements as 32-bit floating point numbers
+ | Float32
+ /// Store elements as 64-bit floating point numbers
+ | Float64
+ /// Store elements as 8-bit integers
+ | Int8
+ /// Store elements as 8-bit unsigned integers
+ | Byte
+ /// Store elements as 16-bit signed integers
+ | Int16
+ /// Store elements as 32-bit signed integers
+ | Int32
+ /// Store elements as 64-bit signed integers
+ | Int64
+ /// Store elements as booleans
+ | Bool
+
+ member internal x.Name =
+ match x with
+ | BFloat16 -> "BFloat16"
+ | Float16 -> "Float16"
+ | Float32 -> "Float32"
+ | Float64 -> "Float64"
+ | Int8 -> "Int8"
+ | Byte -> "Byte"
+ | Int16 -> "Int16"
+ | Int32 -> "Int32"
+ | Int64 -> "Int64"
+ | Bool -> "Bool"
+
+ /// Gets the natural result of the Sum(), SumToSize() and Sum(dim) operation on this dtype
+ member t.SummationType =
+ match t with
+ | Bool | Byte | Int8 | Int16 | Int32 | Int64 -> Dtype.Int64
+ | dt -> dt
+
+ override x.ToString() = x.Name
+
+/// Contains global functions and settings related to tensor element types, used when writing backends.
+[]
+module DtypeAutoOpens =
+
+ type Dtype with
+ /// Matches all floating point tensor element types
+ member x.IsFloatingPoint =
+ match x with
+ | Float16 | BFloat16 | Float32 | Float64 -> true
+ | _ -> false
+
+ /// Matches all integral tensor element types
+ member x.IsIntegral =
+ match x with
+ | Byte | Int8 | Int16 | Int32 | Int64 -> true
+ | _ -> false
+
+ /// Raise an exception indicating the given operation is not supported for the given tensor element type.
+ let opNotSupported msg (dtype: Dtype) =
+ invalidOp (sprintf "operation '%s' not permitted on tensors of type %A" msg dtype)
+
+ /// Raise an exception indicating the given operation is not supported for the given tensor device type.
+ let opNotSupportedOnDeviceType msg (dtype: Dtype) (deviceType: DeviceType) =
+ invalidOp (sprintf "operation '%s' not permitted on tensors of type %A on device type %A" msg dtype deviceType)
+
+ /// Raise an exception indicating the given binary operation is not supported for the two given tensor element types.
+ let opNotSupported2 msg (dtype1: Dtype) (dtype2: Dtype) =
+ invalidOp (sprintf "operation '%s' not permitted on tensors of type (%A, %A)" msg dtype1 dtype2)
+
+/// Contains functions and settings related to tensor element types
+module Dtype =
+
+ /// Matches all floating point tensor element types
+ let (|FloatingPoint|_|) (x: Dtype) = if x.IsFloatingPoint then Some() else None
+
+ /// Matches all integral tensor element types
+ let (|Integral|_|) (x: Dtype) = if x.IsIntegral then Some() else None
+
+ /// Matches all integral or boolean tensor element types
+ let (|IntegralOrBool|_|) x =
+ match x with
+ | Integral | Bool -> Some()
+ | _ -> None
+
+ /// Find the Dtype into which dtype1 and dtype2 can be widened
+ let widen (dtype1: Dtype) (dtype2: Dtype) =
+ if dtype1 = dtype2 then Some dtype1
+ else
+ match dtype1, dtype2 with
+ | Float64, _ | _, Float64 -> Some Float64
+ | Float32, _ | _, Float32 -> Some Float32
+ | BFloat16, _ | _, BFloat16 -> Some BFloat16
+ | Float16, _ | _, Float16 -> Some Float16
+ | Int64, _ | _, Int64 -> Some Int64
+ | Int32, _ | _, Int32 -> Some Int32
+ | Int16, _ | _, Int16 -> Some Int16
+ | Int8, Bool | Bool, Int8 -> Some Int8
+ | Byte, Bool | Bool, Byte -> Some Byte
+ | Int8, Int8 -> Some Int8
+ | Byte, Byte -> Some Byte
+ | Bool, Bool -> Some Bool
+ | Int8, Byte | Byte, Int8 -> None
+
+ /// Get or set the default element type used when creating tensors. Only floating point types are supported as the default type. Note, use dsharp.config(...) instead.
+ let mutable Default = Dtype.Float32
+
+ /// Find the Dtype which would result from dividing tensors with dtype1 and dtype2
+ let divisionType (dtype1: Dtype) (dtype2: Dtype) =
+ match dtype1.IsFloatingPoint, dtype2.IsFloatingPoint with
+ | false, false -> Default
+ | false, true -> dtype2
+ | true, false -> dtype1
+ | true, true -> (widen dtype1 dtype2).Value
+
+
diff --git a/src/TensorMath/Extensions.fs b/src/TensorMath/Extensions.fs
new file mode 100644
index 0000000..5352884
--- /dev/null
+++ b/src/TensorMath/Extensions.fs
@@ -0,0 +1,319 @@
+// Copyright (c) 2016- University of Oxford (Atilim Gunes Baydin )
+// and other contributors, see LICENSE in root of repository.
+//
+// BSD 2-Clause License. See LICENSE in root of repository.
+
+namespace TensorMath.Util
+
+open System
+open System.Collections.Generic
+open System.Collections.Specialized
+open System.Diagnostics.CodeAnalysis
+
+
+///
+/// Contains extensions to the F# Array module.
+///
+///
+///
+/// Contains utilities and library extensions related to the TensorMath programming model.
+///
+module Array =
+
+ /// Determines if all values of the first array lie within the given tolerances of the second array.
+ []
+ let inline allClose (relativeTolerance:'T) (absoluteTolerance:'T) (array1:'T[]) (array2:'T[]) =
+ let dim1 = array1.Length
+ let dim2 = array2.Length
+ if dim1 <> dim2 then false
+ else (array1,array2) ||> Array.forall2 (fun a b -> abs(a-b) <= absoluteTolerance + relativeTolerance*abs(b))
+
+ /// Gets the cumulative sum of the input array.
+ []
+ let inline cumulativeSum (a:_[]) = (Array.scan (+) LanguagePrimitives.GenericZero a)[1..]
+
+ /// Gets the unique counts of the input array.
+ let getUniqueCounts (sorted:bool) (values:'T[]) =
+ let counts = Dictionary<'T, int>()
+ for v in values do
+ if counts.ContainsKey(v) then counts[v] <- counts[v] + 1 else counts[v] <- 1
+ if sorted then
+ counts |> Array.ofSeq |> Array.sortByDescending (fun (KeyValue(_, v)) -> v) |> Array.map (fun (KeyValue(k, v)) -> k, v) |> Array.unzip
+ else
+ counts |> Array.ofSeq |> Array.map (fun (KeyValue(k, v)) -> k, v) |> Array.unzip
+
+ // Create a 2D array using a flat representation
+ let initFlat2D i j f = Array.init (i*j) (fun ij -> f (ij/j) (ij%j))
+
+ // Create a 3D array using a flat representation
+ let initFlat3D i j k f = Array.init (i*j*k) (fun ijk -> f (ijk/j/k) ((ijk/k)%j) (ijk%k))
+
+ let foralli f (arr: 'T[]) =
+ let mutable i = 0
+ let n = arr.Length
+ while i < n && f i arr[i] do
+ i <- i + 1
+ (i = n)
+
+ // Copied from https://github.com/dotnet/fsharp/pull/11888 contributed by Jan Dryk (uxsoft)
+ let insertManyAt (index: int) (values: seq<'T>) (source: 'T[]) : 'T[] =
+ if index < 0 || index > source.Length then invalidArg "index" "index must be within bounds of the array"
+
+ let valuesArray = Seq.toArray values
+ if valuesArray.Length = 0 then source
+ else
+ let length = source.Length + valuesArray.Length
+ let result = Array.zeroCreate length
+ if index > 0 then
+ Array.Copy(source, result, index)
+ Array.Copy(valuesArray, 0, result, index, valuesArray.Length)
+ if source.Length - index > 0 then
+ Array.Copy(source, index, result, index + valuesArray.Length, source.Length - index)
+ result
+
+ // Copied from https://github.com/dotnet/fsharp/pull/11888 contributed by Jan Dryk (uxsoft)
+ let removeAt (index: int) (source: 'T[]) : 'T[] =
+ if index < 0 || index >= source.Length then invalidArg "index" "index must be within bounds of the array"
+ let length = source.Length - 1
+ let result = Array.zeroCreate length
+ if index > 0 then
+ Array.Copy(source, result, index)
+ if length - index > 0 then
+ Array.Copy(source, index + 1, result, index, length - index)
+ result
+
+module Array4D =
+ /// Builds a new array whose elements are the results of applying the given function to each of the elements of the array.
+ let map mapping (array:'a[,,,]) =
+ Array4D.init (array.GetLength(0)) (array.GetLength(1)) (array.GetLength(2)) (array.GetLength(3)) (fun i j k l -> mapping array[i, j, k, l])
+
+// See https://github.com/dotnet/fsharp/issues/12013
+//type 'T array5d = 'T ``[,,,,]``
+//type 'T array6d = 'T ``[,,,,,]``
+
+module Array5D =
+ ///
+ let zeroCreate<'T> (length1:int) length2 length3 length4 length5 : Array =
+ System.Array.CreateInstance(typeof<'T>, [|length1;length2;length3;length4;length5|])
+
+ let get (array:Array) (index1:int) index2 index3 index4 index5 =
+ array.GetValue([|index1;index2;index3;index4;index5|])
+
+ let set (array:Array) (index1:int) index2 index3 index4 index5 value =
+ array.SetValue(value, [|index1;index2;index3;index4;index5|])
+
+ let length1 (array: Array) = array.GetLength(0)
+ let length2 (array: Array) = array.GetLength(1)
+ let length3 (array: Array) = array.GetLength(2)
+ let length4 (array: Array) = array.GetLength(3)
+ let length5 (array: Array) = array.GetLength(4)
+
+ let init<'T> (length1:int) length2 length3 length4 length5 (initializer:int->int->int->int->int->'T) : Array =
+ let arr = zeroCreate<'T> length1 length2 length3 length4 length5
+ for i1=0 to length1-1 do
+ for i2=0 to length2-1 do
+ for i3=0 to length3-1 do
+ for i4=0 to length4-1 do
+ for i5=0 to length5-1 do
+ set arr i1 i2 i3 i4 i5 (initializer i1 i2 i3 i4 i5)
+ arr
+
+ let create (length1:int) length2 length3 length4 length5 (initial:'T) = init length1 length2 length3 length4 length5 (fun _ _ _ _ _ -> initial)
+
+ let map mapping (array: Array) =
+ init (length1 array) (length2 array) (length3 array) (length4 array) (length5 array) (fun i1 i2 i3 i4 i5 -> mapping (get array i1 i2 i3 i4 i5))
+
+module Array6D =
+ let zeroCreate<'T> (length1:int) length2 length3 length4 length5 length6 : Array =
+ System.Array.CreateInstance(typeof<'T>, [|length1;length2;length3;length4;length5;length6|])
+
+ let get (array: Array) (index1: int) index2 index3 index4 index5 index6 =
+ array.GetValue([|index1;index2;index3;index4;index5;index6|])
+
+ let set (array: Array) (index1: int) index2 index3 index4 index5 index6 value =
+ array.SetValue(value, [|index1;index2;index3;index4;index5;index6|])
+
+ let length1 (array: Array) = array.GetLength(0)
+ let length2 (array: Array) = array.GetLength(1)
+ let length3 (array: Array) = array.GetLength(2)
+ let length4 (array: Array) = array.GetLength(3)
+ let length5 (array: Array) = array.GetLength(4)
+ let length6 (array: Array) = array.GetLength(5)
+
+ let init<'T> (length1: int) length2 length3 length4 length5 length6 (initializer: int->int->int->int->int->int->'T) =
+ let arr = zeroCreate<'T> length1 length2 length3 length4 length5 length6
+ for i1=0 to length1-1 do
+ for i2=0 to length2-1 do
+ for i3=0 to length3-1 do
+ for i4=0 to length4-1 do
+ for i5=0 to length5-1 do
+ for i6=0 to length6-1 do
+ set arr i1 i2 i3 i4 i5 i6 (initializer i1 i2 i3 i4 i5 i6)
+ arr
+
+ let create (length1: int) length2 length3 length4 length5 length6 (initial:'T) =
+ init length1 length2 length3 length4 length5 length6 (fun _ _ _ _ _ _ -> initial)
+
+ let map mapping (array: Array) =
+ init (length1 array) (length2 array) (length3 array) (length4 array) (length5 array) (length6 array) (fun i1 i2 i3 i4 i5 i6 -> mapping (get array i1 i2 i3 i4 i5 i6))
+
+
+// Notes about slicing 5d and 6d arrays if needed
+// #if SLICING
+// []
+// module Array5DExtensions =
+// type ``[,,,,]``<'T> with
+// member t.GetSlice(i0min:int option, i0max:int option, i1min:int option, i1max:int option, i2min:int option, i2max:int option, i3min:int option, i3max:int option, i4min:int option, i4max:int option) : ``[,,,,]``<'T> =
+// failwith "tbd"
+// member t.GetSlice(i0:int, i1min:int option, i1max:int option, i2min:int option, i2max:int option, i3min:int option, i3max:int option, i4min:int option, i4max:int option) : 'T[,,,] =
+// failwith "tbd"
+//
+// let d = Array5D.zeroCreate 2 2 2 2 2
+// d[0..0,0..0,0..0,0..0,0..0]
+// d[0,0..0,0..0,0..0,0..0]
+// #endif
+
+
+module ArrayND =
+ /// Initializes an array with a given shape and initializer function.
+ let init (shape: int[]) (f: int[] -> 'T) : obj =
+ match shape with
+ | [| |] -> f [| |] :> _
+ | [| d1 |] -> Array.init d1 (fun i -> f [| i |]) :> _
+ | [| d1; d2 |] -> Array2D.init d1 d2 (fun i1 i2 -> f [| i1; i2 |]) :> _
+ | [| d1; d2; d3 |] -> Array3D.init d1 d2 d3 (fun i1 i2 i3 -> f [| i1; i2; i3 |]) :> _
+ | [| d1; d2; d3; d4 |] -> Array4D.init d1 d2 d3 d4 (fun i1 i2 i3 i4 -> f [| i1; i2; i3; i4 |]) :> _
+ | [| d1; d2; d3; d4; d5 |] -> Array5D.init d1 d2 d3 d4 d5 (fun i1 i2 i3 i4 i5 -> f [| i1; i2; i3; i4; i5 |]) :> _
+ | [| d1; d2; d3; d4; d5; d6 |] -> Array6D.init d1 d2 d3 d4 d5 d6 (fun i1 i2 i3 i4 i5 i6 -> f [| i1; i2; i3; i4; i5; i6 |]) :> _
+ | _ -> failwith "ArrayND.init not supported for dim > 6"
+
+ /// Initializes an array with a given shape and initializer function.
+ let zeroCreate (shape: int[]) : Array =
+ match shape with
+ | [| |] -> [| |] :> _
+ | [| d1 |] -> Array.zeroCreate d1 :> _
+ | [| d1; d2 |] -> Array2D.zeroCreate d1 d2 :> _
+ | [| d1; d2; d3 |] -> Array3D.zeroCreate d1 d2 d3 :> _
+ | [| d1; d2; d3; d4 |] -> Array4D.zeroCreate d1 d2 d3 d4 :> _
+ | [| d1; d2; d3; d4; d5 |] -> Array5D.zeroCreate d1 d2 d3 d4 d5
+ | [| d1; d2; d3; d4; d5; d6 |] -> Array6D.zeroCreate d1 d2 d3 d4 d5 d6
+ | _ -> failwith "ArrayND.zeroCreate not supported for dim > 6"
+
+/// Contains extensions to the F# Seq module.
+module Seq =
+
+ /// Gets the index of the maximum element of the sequence.
+ let maxIndex seq = seq |> Seq.mapi (fun i x -> i, x) |> Seq.maxBy snd |> fst
+
+ /// Gets the index of the minimum element of the sequence.
+ let minIndex seq = seq |> Seq.mapi (fun i x -> i, x) |> Seq.minBy snd |> fst
+
+ /// Indicates if all elements of the sequence are equal.
+ let allEqual (items:seq<'T>) =
+ let item0 = items |> Seq.head
+ items |> Seq.forall ((=) item0)
+
+ /// Gets the duplicate elements in the sequence.
+ let duplicates l =
+ l |> List.ofSeq
+ |> List.groupBy id
+ |> List.choose ( function
+ | _, x::_::_ -> Some x
+ | _ -> None )
+
+ /// Indicates if a sequence has duplicate elements.
+ let hasDuplicates l =
+ duplicates l |> List.isEmpty |> not
+
+ /// Like Seq.toArray but does not clone the array if the input is already an array
+ let inline toArrayQuick (xs: seq<'T>) =
+ match xs with
+ | :? ('T[]) as arr -> arr
+ | _ -> Seq.toArray xs
+
+/// Contains extensions related to .NET OrderedDictionary.
+module OrderedDictionary =
+
+ /// Gets a fresh array containing the keys of the dictionary.
+ let copyKeys (dictionary:OrderedDictionary) =
+ let keys = Array.zeroCreate dictionary.Count
+ dictionary.Keys.CopyTo(keys, 0)
+ keys
+
+/// Contains extensions related to .NET Dictionary.
+module Dictionary =
+
+ /// Gets a fresh array containing the keys of the dictionary.
+ let copyKeys (dictionary:Dictionary<'Key, 'Value>) =
+ let keys = Array.zeroCreate dictionary.Count
+ dictionary.Keys.CopyTo(keys, 0)
+ keys
+
+ /// Gets a fresh array containing the values of the dictionary.
+ let copyValues (dictionary:Dictionary<'Key, 'Value>) =
+ let values = Array.zeroCreate dictionary.Count
+ dictionary.Values.CopyTo(values, 0)
+ values
+
+/// Contains auto-opened extensions to the F# programming model.
+[]
+module ExtensionAutoOpens =
+
+ /// Indicates if a value is not null.
+ []
+ let inline notNull value = not (obj.ReferenceEquals(value, null))
+
+ /// Creates a non-jagged 3D array from jagged data.
+ let array3D data =
+ let data = data |> Array.ofSeq |> Array.map array2D
+ let r1, r2, r3 = data.Length, data[0].GetLength(0), data[0].GetLength(1)
+ for i in 0 .. r1-1 do
+ let q2 = data[i].GetLength(0)
+ let q3 = data[i].GetLength(1)
+ if q2 <> r2 || q3 <> r3 then
+ invalidArg "data" (sprintf "jagged input at position %d: first is _ x %d x %d, later is _ x %d x %d" i r2 r3 q2 q3)
+ Array3D.init r1 r2 r3 (fun i j k -> data[i][j,k])
+
+ /// Creates a non-jagged 4D array from jagged data.
+ let array4D data =
+ let data = data |> array2D |> Array2D.map array2D
+ let r1,r2,r3,r4 = data.GetLength(0), data.GetLength(1), data[0,0].GetLength(0), data[0,0].GetLength(1)
+ for i in 0 .. r1-1 do
+ for j in 0 .. r2-1 do
+ let q3 = data[i,j].GetLength(0)
+ let q4 = data[i,j].GetLength(1)
+ if q3 <> r3 || q4 <> r4 then
+ invalidArg "data" (sprintf "jagged input at position (%d,%d): first is _ x _ x %d x %d, later is _ x _ x %d x %d" i j r2 r3 q3 q4)
+ Array4D.init r1 r2 r3 r4 (fun i j k m -> data[i,j][k,m])
+
+ let array5D data =
+ let data = data |> Array.ofSeq |> Array.map array4D
+ let r1,r2,r3,r4,r5 = data.Length, data[0].GetLength(0), data[0].GetLength(1), data[0].GetLength(2), data[0].GetLength(3)
+ for i in 0 .. r1-1 do
+ let q2 = data[i].GetLength(0)
+ let q3 = data[i].GetLength(1)
+ let q4 = data[i].GetLength(2)
+ let q5 = data[i].GetLength(3)
+ if q2 <> r2 || q3 <> r3 || q4 <> r4 || q5 <> r5 then
+ invalidArg "data" (sprintf "jagged input at position %d: first is _ x %d x %d x %d x %d, later is _ x %d x %d x %d x %d" i r2 r3 r4 r5 q2 q3 q4 q5)
+ Array5D.init r1 r2 r3 r4 r5 (fun i1 i2 i3 i4 i5 -> data[i1][i2,i3,i4,i5])
+
+ let array6D data =
+ let data = data |> array2D |> Array2D.map array4D
+ let r1,r2,r3,r4,r5,r6 = data.GetLength(0), data.GetLength(1), data[0,0].GetLength(0), data[0,0].GetLength(1), data[0,0].GetLength(2), data[0,0].GetLength(3)
+ for i in 0 .. r1-1 do
+ for j in 0 .. r2-2 do
+ let q3 = data[i,j].GetLength(0)
+ let q4 = data[i,j].GetLength(1)
+ let q5 = data[i,j].GetLength(2)
+ let q6 = data[i,j].GetLength(3)
+ if q3 <> r3 || q4 <> r4 || q5 <> r5 || q6 <> r6 then
+ invalidArg "data" (sprintf "jagged input at position (%d,%d): first is _ x _ x %d x %d x %d x %d, later is _ x _ x %d x %d x %d x %d" i j r3 r4 r5 r6 q3 q4 q5 q6)
+ Array6D.init r1 r2 r3 r4 r5 r6 (fun i1 i2 i3 i4 i5 i6 -> data[i1,i2][i3,i4,i5,i6])
+
+ /// Print the given value to the console using the '%A' printf format specifier
+ let print x = printfn "%A" x
+
+
+[]
+do()
diff --git a/src/TensorMath/Library.fs b/src/TensorMath/Library.fs
deleted file mode 100644
index 58a485d..0000000
--- a/src/TensorMath/Library.fs
+++ /dev/null
@@ -1,5 +0,0 @@
-namespace TensorMath
-
-module Say =
- let hello name =
- printfn "Hello %s" name
diff --git a/src/TensorMath/Op.AvgPool.fs b/src/TensorMath/Op.AvgPool.fs
new file mode 100644
index 0000000..24e8a50
--- /dev/null
+++ b/src/TensorMath/Op.AvgPool.fs
@@ -0,0 +1,101 @@
+// Copyright (c) 2016- University of Oxford (Atilim Gunes Baydin )
+// and other contributors, see LICENSE in root of repository.
+//
+// BSD 2-Clause License. See LICENSE in root of repository.
+
+namespace TensorMath
+
+[]
+module OpAvgPoolExtensions =
+
+ type Tensor with
+ /// Applies a 1D average pooling over an input signal composed of several input planes, returning the max indices along with the outputs.
+ /// The size of the window to take a max over.
+ /// The stride of the window. Default value is kernelSize.
+ /// The implicit zero padding to be added on both sides.
+ member a.avgpool1d(kernelSize:int, ?stride:int, ?padding:int(* , ?ceil_mode: bool, ?count_include_pad: bool *)) =
+ let stride = defaultArg stride kernelSize
+ let padding = defaultArg padding 0
+ //let ceil_mode = defaultArg ceil_mode false
+ //let count_include_pad= defaultArg count_include_pad true
+ Shape.checkCanAvgpool1d a.dtype a.shape kernelSize stride padding |> ignore
+ TensorC(a.primalRaw.AvgPool1D(kernelSize, stride, padding(* , ceil_mode, count_include_pad *)))
+
+ member internal a.avgpoolReverse1d(originalInput:Tensor, kernelSize:int, ?stride:int, ?padding:int(* , ?ceil_mode: bool, ?count_include_pad: bool *)) =
+ let stride = defaultArg stride kernelSize
+ let padding = defaultArg padding 0
+ //let ceil_mode = defaultArg ceil_mode false
+ //let count_include_pad= defaultArg count_include_pad true
+ TensorC(a.primalRaw.AvgPoolReverse1D(originalInput.primalRaw, kernelSize, stride, padding(* , ceil_mode, count_include_pad *)))
+
+ /// Applies a 1D average pooling over an input signal composed of several input planes, returning the max indices along with the outputs.
+ /// The size of the window to take a max over.
+ /// The stride of the window. Default value is kernelSize.
+ /// The implicit zero padding to be added on both sides.
+ /// The sizes of the window to take a max over.
+ /// The strides of the window. Default value is kernelSize.
+ /// The implicit zero paddings to be added on both sides.
+ member a.avgpool2d(?kernelSize:int, ?stride:int, ?padding:int, ?kernelSizes:seq, ?strides:seq, ?paddings:seq(* , ?ceil_mode: bool, ?count_include_pad: bool *)) =
+ let kernelSizes, strides, paddings = Shape.resolve2dMaxPoolSizes kernelSize kernelSizes stride strides padding paddings
+ //let ceil_mode = defaultArg ceil_mode false
+ //let count_include_pad= defaultArg count_include_pad true
+ Shape.checkCanAvgpool2d a.dtype a.shape kernelSizes strides paddings |> ignore
+ TensorC(a.primalRaw.AvgPool2D(kernelSizes, strides, paddings(* , ceil_mode, count_include_pad *)))
+
+ member internal a.avgpoolReverse2d(originalInput:Tensor, ?kernelSize:int, ?stride:int, ?padding:int, ?kernelSizes:seq, ?strides:seq, ?paddings:seq(* , ?ceil_mode: bool, ?count_include_pad: bool *)) =
+ let kernelSizes, strides, paddings = Shape.resolve2dMaxPoolSizes kernelSize kernelSizes stride strides padding paddings
+ //let ceil_mode = defaultArg ceil_mode false
+ //let count_include_pad= defaultArg count_include_pad true
+ TensorC(a.primalRaw.AvgPoolReverse2D(originalInput.primalRaw, kernelSizes, strides, paddings(* , ceil_mode, count_include_pad *)))
+
+ /// Applies a 3D average pooling over an input signal composed of several input planes, returning the max indices along with the outputs.
+ /// The size of the window to take a max over.
+ /// The stride of the window. Default value is kernelSize.
+ /// The implicit zero padding to be added on both sides.
+ /// The sizes of the window to take a max over.
+ /// The strides of the window. Default value is kernelSize.
+ /// The implicit zero paddings to be added on both sides.
+ member a.avgpool3d(?kernelSize:int, ?stride:int, ?padding:int, ?kernelSizes:seq, ?strides:seq, ?paddings:seq(* , ?ceil_mode: bool, ?count_include_pad: bool *)) =
+ let kernelSizes, strides, paddings = Shape.resolve3dMaxPoolSizes kernelSize kernelSizes stride strides padding paddings
+ //let ceil_mode = defaultArg ceil_mode false
+ //let count_include_pad= defaultArg count_include_pad true
+ Shape.checkCanAvgpool3d a.dtype a.shape kernelSizes strides paddings |> ignore
+ TensorC(a.primalRaw.AvgPool3D(kernelSizes, strides, paddings(* , ceil_mode, count_include_pad *)))
+
+ member internal a.avgpoolReverse3d(originalInput:Tensor, ?kernelSize:int, ?stride:int, ?padding:int, ?kernelSizes:seq, ?strides:seq, ?paddings:seq(* , ?ceil_mode: bool, ?count_include_pad: bool *)) =
+ let kernelSizes, strides, paddings = Shape.resolve3dMaxPoolSizes kernelSize kernelSizes stride strides padding paddings
+ //let ceil_mode = defaultArg ceil_mode false
+ //let count_include_pad= defaultArg count_include_pad true
+ TensorC(a.primalRaw.AvgPoolReverse3D(originalInput.primalRaw, kernelSizes, strides, paddings(* , ceil_mode, count_include_pad *)))
+
+ type dsharp with
+ /// Applies a 1D average pooling over an input signal composed of several input planes, returning the max indices along with the outputs.
+ /// The input tensor.
+ /// The size of the window to take a max over.
+ /// The stride of the window. Default value is kernelSize.
+ /// The implicit zero padding to be added on both sides.
+ static member avgpool1d(input: Tensor, kernelSize:int, ?stride:int, ?padding:int(* , ?ceil_mode: bool, ?count_include_pad: bool *)) =
+ input.avgpool2d(kernelSize=kernelSize, ?stride=stride, ?padding=padding(* , ?ceil_mode=ceil_mode, ?count_include_pad=count_include_pad *))
+
+ /// Applies a 2D average pooling over an input signal composed of several input planes, returning the max indices along with the outputs.
+ /// The input tensor.
+ /// The size of the window to take a max over.
+ /// The stride of the window. Default value is kernelSize.
+ /// The implicit zero padding to be added on both sides.
+ /// The sizes of the window to take a max over.
+ /// The strides of the window. Default value is kernelSize.
+ /// The implicit zero paddings to be added on both sides.
+ static member avgpool2d(input: Tensor, ?kernelSize:int, ?stride:int, ?padding:int, ?kernelSizes:seq, ?strides:seq, ?paddings:seq(* , ?ceil_mode: bool, ?count_include_pad: bool *)) =
+ input.avgpool2d(?kernelSize=kernelSize, ?stride=stride, ?padding=padding, ?kernelSizes=kernelSizes, ?strides=strides, ?paddings=paddings(* , ?ceil_mode=ceil_mode, ?count_include_pad=count_include_pad *))
+
+ /// Applies a 2D average pooling over an input signal composed of several input planes, returning the max indices along with the outputs.
+ /// The input tensor.
+ /// The size of the window to take a max over.
+ /// The stride of the window. Default value is kernelSize.
+ /// The implicit zero padding to be added on both sides.
+ /// The sizes of the window to take a max over.
+ /// The strides of the window. Default value is kernelSize.
+ /// The implicit zero paddings to be added on both sides.
+ static member avgpool3d(input: Tensor, ?kernelSize:int, ?stride:int, ?padding:int, ?kernelSizes:seq, ?strides:seq, ?paddings:seq(* , ?ceil_mode: bool, ?count_include_pad: bool *)) =
+ input.avgpool3d(?kernelSize=kernelSize, ?stride=stride, ?padding=padding, ?kernelSizes=kernelSizes, ?strides=strides, ?paddings=paddings(* , ?ceil_mode=ceil_mode, ?count_include_pad=count_include_pad *))
+
diff --git a/src/TensorMath/Op.BMM.fs b/src/TensorMath/Op.BMM.fs
new file mode 100644
index 0000000..2438f38
--- /dev/null
+++ b/src/TensorMath/Op.BMM.fs
@@ -0,0 +1,22 @@
+// Copyright (c) 2016- University of Oxford (Atilim Gunes Baydin )
+// and other contributors, see LICENSE in root of repository.
+//
+// BSD 2-Clause License. See LICENSE in root of repository.
+
+namespace TensorMath
+
+[]
+module OpBMMExtensions =
+
+ type Tensor with
+ /// Batched matrix product of two tensors. Tensors must be 3d tensors each containing the same number of matrices. If the tensor is a \(b \times n \times m\) tensor, and is a \(b \times m \times p\) tensor, the result will be a \(b \times n \times p\) tensor.
+ /// The second tensor.
+ member a.bmm(b:Tensor) =
+ Shape.checkCanBMM a.shape b.shape |> ignore
+ TensorC(a.primalRaw.BMMTT(b.primalRaw))
+
+ type dsharp with
+ /// Batched matrix product of two tensors. Tensors and must be 3d tensors each containing the same number of matrices. If is a \(b \times n \times m\) tensor, is a \(b \times m \times p\) tensor, the result will be a \(b \times n \times p\) tensor.
+ /// The first tensor.
+ /// The second tensor.
+ static member bmm(a:Tensor, b:Tensor) = a.bmm(b)
diff --git a/src/TensorMath/Op.Det.fs b/src/TensorMath/Op.Det.fs
new file mode 100644
index 0000000..3776808
--- /dev/null
+++ b/src/TensorMath/Op.Det.fs
@@ -0,0 +1,17 @@
+// Copyright (c) 2016- University of Oxford (Atilim Gunes Baydin )
+// and other contributors, see LICENSE in root of repository.
+//
+// BSD 2-Clause License. See LICENSE in root of repository.
+
+namespace TensorMath
+
+[]
+module OpDetExtensions =
+
+ type Tensor with
+ member a.det() =
+ Shape.checkCanDet a.shape
+ TensorC(a.primalRaw.DetT())
+
+ type dsharp with
+ static member det(a:Tensor) = a.det()
diff --git a/src/TensorMath/Op.Inv.fs b/src/TensorMath/Op.Inv.fs
new file mode 100644
index 0000000..3cdd435
--- /dev/null
+++ b/src/TensorMath/Op.Inv.fs
@@ -0,0 +1,17 @@
+// Copyright (c) 2016- University of Oxford (Atilim Gunes Baydin )
+// and other contributors, see LICENSE in root of repository.
+//
+// BSD 2-Clause License. See LICENSE in root of repository.
+
+namespace TensorMath
+
+[]
+module OpInvExtensions =
+
+ type Tensor with
+ member a.inv() =
+ Shape.checkCanInvert a.shape
+ TensorC(a.primalRaw.InverseT())
+
+ type dsharp with
+ static member inv(a:Tensor) = a.inv()
diff --git a/src/TensorMath/Op.Norm.fs b/src/TensorMath/Op.Norm.fs
new file mode 100644
index 0000000..93302a5
--- /dev/null
+++ b/src/TensorMath/Op.Norm.fs
@@ -0,0 +1,30 @@
+// Copyright (c) 2016- University of Oxford (Atilim Gunes Baydin )
+// and other contributors, see LICENSE in root of repository.
+//
+// BSD 2-Clause License. See LICENSE in root of repository.
+
+namespace TensorMath
+
+[]
+module OpNormExtensions =
+
+ type Tensor with
+ member a.norm(?order:float, ?dim:int, ?keepDim:bool) =
+ if not (a.dtype = Dtype.Float32 || a.dtype = Dtype.Float64) then failwithf "Vector norm is only supported for Float32 and Float64 dtypes."
+ let order = defaultArg order 2.
+ match order, dim with
+ | 1., None -> a.flatten().abs().sum()
+ | 1., Some(dim) -> a.abs().sum(dim=dim, ?keepDim=keepDim)
+ | 2., None -> let aa = a.flatten() in (aa * aa).sum().sqrt()
+ | 2., Some(dim) -> (a * a).sum(dim=dim, ?keepDim=keepDim).sqrt()
+ | System.Double.PositiveInfinity, None -> a.flatten().abs().max()
+ | System.Double.PositiveInfinity, Some(dim) -> a.abs().max(dim=dim, ?keepDim=keepDim)
+ | System.Double.NegativeInfinity, None -> a.flatten().abs().min()
+ | System.Double.NegativeInfinity, Some(dim) -> a.abs().min(dim=dim, ?keepDim=keepDim)
+ | 0., None -> a.ne(a.zerosLike()).cast(dtype=a.dtype).sum()
+ | 0., Some(dim) -> a.ne(a.zerosLike()).cast(dtype=a.dtype).sum(dim=dim, ?keepDim=keepDim)
+ | order, None -> a.abs().pow(order).sum().pow(1./order)
+ | order, Some(dim) -> a.abs().pow(order).sum(dim=dim, ?keepDim=keepDim).pow(1./order)
+
+ type dsharp with
+ static member norm(a:Tensor, ?order:float, ?dim:int, ?keepDim:bool) = a.norm(?order=order, ?dim=dim, ?keepDim=keepDim)
diff --git a/src/TensorMath/Op.Outer.fs b/src/TensorMath/Op.Outer.fs
new file mode 100644
index 0000000..db42b81
--- /dev/null
+++ b/src/TensorMath/Op.Outer.fs
@@ -0,0 +1,24 @@
+// Copyright (c) 2016- University of Oxford (Atilim Gunes Baydin )
+// and other contributors, see LICENSE in root of repository.
+//
+// BSD 2-Clause License. See LICENSE in root of repository.
+
+namespace TensorMath
+
+[]
+module OpOuterExtensions =
+
+ type Tensor with
+ /// Outer product of two tensors.
+ /// The second tensor.
+ member a.outer(b:Tensor) =
+ match a.dim, b.dim with
+ | 1, 1 -> a.unsqueeze(1).matmul(b.unsqueeze(0))
+ | 2, 2 when a.shape[0] = b.shape[0] -> a.unsqueeze(2).bmm(b.unsqueeze(1)) // Batched outer product
+ | _ -> failwithf "Outer product unsupported for tensor shapes %A %A" a.shape b.shape
+
+ type dsharp with
+ /// Outer product of two tensors.
+ /// The first tensor.
+ /// The second tensor.
+ static member outer(a:Tensor, b:Tensor) = a.outer(b)
diff --git a/src/TensorMath/Op.Solve.fs b/src/TensorMath/Op.Solve.fs
new file mode 100644
index 0000000..ba3bf93
--- /dev/null
+++ b/src/TensorMath/Op.Solve.fs
@@ -0,0 +1,17 @@
+// Copyright (c) 2016- University of Oxford (Atilim Gunes Baydin )
+// and other contributors, see LICENSE in root of repository.
+//
+// BSD 2-Clause License. See LICENSE in root of repository.
+
+namespace TensorMath
+
+[]
+module OpSolveExtensions =
+
+ type Tensor with
+ member a.solve(b:Tensor) =
+ let _ = Shape.checkCanSolve a.shape b.shape
+ TensorC(a.primalRaw.SolveTT(b.primalRaw))
+
+ type dsharp with
+ static member solve(a:Tensor, b:Tensor) = a.solve(b)
diff --git a/src/TensorMath/Printer.fs b/src/TensorMath/Printer.fs
new file mode 100644
index 0000000..0b08513
--- /dev/null
+++ b/src/TensorMath/Printer.fs
@@ -0,0 +1,39 @@
+// Copyright (c) 2016- University of Oxford (Atilim Gunes Baydin )
+// and other contributors, see LICENSE in root of repository.
+//
+// BSD 2-Clause License. See LICENSE in root of repository.
+
+namespace TensorMath
+
+type Printer =
+ | Default
+ | Short
+ | Full
+ | Custom of threshold: int * edgeItems: int * precision: int
+
+ member p.threshold =
+ match p with
+ | Default -> 100
+ | Short -> 10
+ | Full -> System.Int32.MaxValue
+ | Custom(t, _, _) -> t
+
+ member p.edgeItems =
+ match p with
+ | Default -> 3
+ | Short -> 2
+ | Full -> -1
+ | Custom(_, e, _) -> e
+
+ member p.precision =
+ match p with
+ | Default -> 4
+ | Short -> 2
+ | Full -> 4
+ | Custom(_, _, p) -> p
+
+/// Contains functions and settings related to print options.
+module Printer =
+
+ /// Get or set the default printer used when printing tensors. Note, use dsharp.config(...) instead.
+ let mutable Default : Printer = Printer.Default
\ No newline at end of file
diff --git a/src/TensorMath/RawTensor.fs b/src/TensorMath/RawTensor.fs
new file mode 100644
index 0000000..2784110
--- /dev/null
+++ b/src/TensorMath/RawTensor.fs
@@ -0,0 +1,918 @@
+// Copyright (c) 2016- University of Oxford (Atilim Gunes Baydin )
+// and other contributors, see LICENSE in root of repository.
+//
+// BSD 2-Clause License. See LICENSE in root of repository.
+
+namespace rec TensorMath.Backends
+
+open System
+open TensorMath
+open TensorMath.Util
+
+///
+/// Represents the static functionality for tensors implemented by a TensorMath backend.
+///
+///
+///
+/// Contains types and functionality related to backend implementations for TensorMath.
+///
+[]
+type BackendTensorStatics() =
+ // cache for most recently accessed backend
+ static let hook = BackendFunctionality()
+
+ /// Sets the seed for the default random number generator of the backend
+ abstract Seed: seed:int -> unit
+
+ /// Gets the scalar 0 tensor for the given device
+ abstract Zero: dtype: Dtype * device: Device -> RawTensor
+
+ /// Gets a tensor filled with arbitrary values for the given shape and device
+ abstract Empty: shape:Shape * dtype: Dtype * device: Device -> RawTensor
+
+ /// Gets a tensor filled with zeros for the given shape and device
+ abstract Zeros: shape:Shape * dtype: Dtype * device: Device -> RawTensor
+
+ /// Gets the scalar 1 tensor for the given device
+ abstract One: dtype: Dtype * device: Device -> RawTensor
+
+ /// Gets a tensor filled with ones for the given shape and device
+ abstract Ones: shape:Shape * dtype: Dtype * device: Device -> RawTensor
+
+ /// Gets a tensor filled with the given value for the given shape and device
+ abstract Full: shape:Shape * value: scalar * dtype: Dtype * device: Device -> RawTensor
+
+ /// Gets a tensor filled with random values for the given shape and device
+ abstract Random: shape:Shape * dtype: Dtype * device: Device -> RawTensor
+
+ /// Gets a tensor filled with random values from the normal distribution for the given shape and device
+ abstract RandomNormal: shape:Shape * dtype: Dtype * device: Device -> RawTensor
+
+ /// Gets a tensor filled with random integers from the given range for the given shape and device
+ abstract RandomInt: shape:Shape * low:int * high:int * dtype: Dtype * device: Device -> RawTensor
+
+ /// Gets the devices supported by this backend
+ abstract GetDevices: ?deviceType: DeviceType -> Device list
+
+ /// Indicates if a device type is supported by this backend
+ abstract IsDeviceTypeAvailable: deviceType: DeviceType -> bool
+
+ /// Seed all backends with the given random seed, or a new seed based on the current time
+ /// if no seed is specified.
+ static member Seed(?seed:int) =
+ let seed = defaultArg seed (int DateTime.Now.Ticks)
+ Random.Seed(seed) // Do not remove. util.Random seed would be set by the Reference backend if it's currently loaded. However we still need to keep this here to ensure util.Random seed is set (it may be used in code other than the Reference backend).
+ for KeyValue(_, backend) in hook.Backends do
+ backend.Seed(seed)
+
+ /// Create a tensor of appropriate dtype from a scalar or array of appropriate values.
+ /// A backend type is delivered consistent with in-memory data - a type for dtype Int32 gets int32 data etc.
+ abstract CreateFromFlatArray: data: System.Array * shape: Shape * dtype: Dtype * device: Device -> RawTensor
+
+ /// Get the backend implementation for the given tensor element type and backend.
+ static member Get(?backend: Backend) =
+ hook.Get(?backend=backend)
+
+///
+/// Represents a raw (i.e. non-differentiable immutable) tensor implemented by a TensorMath backend.
+///
+///
+///
+/// Each backend will provide one of more .NET implementations of this type, which may in turn
+/// wrap handles to native implementations.
+///
+[]
+type RawTensor() =
+
+ /// Gets the shape of the tensor
+ abstract Shape: Shape
+
+ /// Gets the dimensionality of the tensor
+ abstract Dim: int
+
+ /// Gets the number of elements in the tensor
+ // TODO: int32 might not be enough for very large tensors
+ abstract Nelement: int
+
+ /// Gets the element storage type for the tensor
+ abstract Dtype: Dtype
+
+ /// Gets the device for the tensor
+ abstract Device: Device
+
+ /// Gets the device type for the tensor
+ abstract DeviceType: DeviceType
+
+ /// Gets the backend for the tensor
+ abstract Backend: Backend
+
+ /// Gets a handle to the underlying representation of the the tensor. For example, if the Torch
+ /// backend is used this will be the corresponding TorchSharp TorchTensor.
+ abstract Handle: obj
+
+ override t.ToString() = t.Print()
+
+ /// Gets a tensor containing arbitrary values for the given shape and configuration
+ static member Empty(shape:Shape, ?dtype, ?device, ?backend) =
+ let statics = BackendTensorStatics.Get(?backend=backend)
+ let dtype = defaultArg dtype Dtype.Default
+ let device = defaultArg device Device.Default
+ statics.Empty(shape, dtype, device)
+
+ /// Gets the scalar zero tensor for the given configuration
+ static member Zero(?dtype, ?device, ?backend) =
+ let statics = BackendTensorStatics.Get(?backend=backend)
+ let dtype = defaultArg dtype Dtype.Default
+ let device = defaultArg device Device.Default
+ statics.Zero(dtype, device)
+
+ /// Gets the zero tensor for the given shape and configuration
+ static member Zeros(shape:Shape, ?dtype, ?device, ?backend) =
+ let statics = BackendTensorStatics.Get(?backend=backend)
+ let dtype = defaultArg dtype Dtype.Default
+ let device = defaultArg device Device.Default
+ statics.Zeros(shape, dtype, device)
+
+ /// Gets the scalar 1 tensor for the given configuration
+ static member One(?dtype, ?device, ?backend) =
+ let statics = BackendTensorStatics.Get(?backend=backend)
+ let dtype = defaultArg dtype Dtype.Default
+ let device = defaultArg device Device.Default
+ statics.One(dtype, device)
+
+ /// Gets a tensor filled with 1 values for the given shape and configuration
+ static member Ones(shape:Shape, ?dtype, ?device, ?backend) =
+ let statics = BackendTensorStatics.Get(?backend=backend)
+ let dtype = defaultArg dtype Dtype.Default
+ let device = defaultArg device Device.Default
+ statics.Ones(shape, dtype, device)
+
+ /// Gets a tensor filled with the given value for the given shape and configuration
+ static member Full(shape:Shape, value, ?dtype, ?device, ?backend) =
+ let statics = BackendTensorStatics.Get(?backend=backend)
+ let dtype = defaultArg dtype Dtype.Default
+ let device = defaultArg device Device.Default
+ statics.Full(shape, value, dtype, device)
+
+ /// Gets a tensor filled with random values for the given shape and configuration
+ static member Random(shape:Shape, ?dtype, ?device, ?backend) =
+ let statics = BackendTensorStatics.Get(?backend=backend)
+ let dtype = defaultArg dtype Dtype.Default
+ let device = defaultArg device Device.Default
+ statics.Random(shape, dtype, device)
+
+ /// Gets a tensor filled with random values from the normal distribution for the given shape and configuration
+ static member RandomNormal(shape:Shape, ?dtype, ?device, ?backend) =
+ let statics = BackendTensorStatics.Get(?backend=backend)
+ let dtype = defaultArg dtype Dtype.Default
+ let device = defaultArg device Device.Default
+ statics.RandomNormal(shape, dtype, device)
+
+ /// Gets a tensor filled with random integer values from the given range for the given shape and configuration
+ static member RandomInt(shape:Shape, low, high, ?dtype, ?device, ?backend) =
+ let statics = BackendTensorStatics.Get(?backend=backend)
+ let dtype = defaultArg dtype Dtype.Default
+ let device = defaultArg device Device.Default
+ statics.RandomInt(shape, low, high, dtype, device)
+
+ ///
+ /// Gets a tensor filled with values drawn from the given .NET object.
+ ///
+ ///
+ ///
+ /// The value may be a scalar, an array, or an array of tupled objects. If the dtype is not specified
+ /// then it is inferred from the .NET type of the object.
+ ///
+ static member Create(values: obj, ?dtype, ?device, ?backend) =
+ // We deliver consistent in-memory data to the backend - a dtype Int32 gets int32 etc.
+ let data, shape, dtype2 =
+ match dtype with
+ | Some Dtype.Int64 ->
+ let a,s = DataConverter.dataOfValuesForInt64 values
+ (a :> Array), s, Dtype.Int64
+ | Some Dtype.Int32 ->
+ let a,s = DataConverter.dataOfValuesForInt32 values
+ (a :> Array), s, Dtype.Int32
+ | Some Dtype.Int16 ->
+ let a,s = DataConverter.dataOfValuesForInt16 values
+ (a :> Array), s, Dtype.Int16
+ | Some Dtype.Int8 ->
+ let a,s = DataConverter.dataOfValuesForInt8 values
+ (a :> Array), s, Dtype.Int8
+ | Some Dtype.Byte ->
+ let a,s = DataConverter.dataOfValuesForByte values
+ (a :> Array), s, Dtype.Byte
+ | Some Dtype.Bool ->
+ let a,s = DataConverter.dataOfValuesForBool values
+ (a :> Array), s, Dtype.Bool
+ | Some Dtype.Float64 ->
+ let a,s = DataConverter.dataOfValuesForFloat64 values
+ (a :> Array), s, Dtype.Float64
+ | Some Dtype.Float32 ->
+ let a,s = DataConverter.dataOfValuesForFloat32 values
+ (a :> Array), s, Dtype.Float32
+ | Some Dtype.Float16 ->
+ let a,s = DataConverter.dataOfValuesForFloat32 values
+ (a :> Array), s, Dtype.Float16
+ | Some Dtype.BFloat16 ->
+ let a,s = DataConverter.dataOfValuesForFloat32 values
+ (a :> Array), s, Dtype.BFloat16
+ // If no dtype is given, use a dtype inferred from the given data. This is consistent with PyTorch's behavior.
+ | None ->
+ match values |> DataConverter.tryFlatArrayAndShape with
+ | Some (values, shape) -> ((values :> Array), shape, Dtype.Float32)
+ | _ ->
+ // Exception: If data is double and no dtype is given by the user, prefer a Float32 tensor
+ match values |> DataConverter.tryFlatArrayAndShape with
+ | Some (values, shape) -> ((values |> Array.map float32 :> Array), shape, Dtype.Float32)
+ | _ ->
+ match values |> DataConverter.tryFlatArrayAndShape with
+ | Some (values, shape) -> ((values :> Array), shape, Dtype.Int64)
+ | _ ->
+ match values |> DataConverter.tryFlatArrayAndShape with
+ | Some (values, shape) -> ((values :> Array), shape, Dtype.Int32)
+ | _ ->
+ match values |> DataConverter.tryFlatArrayAndShape with
+ | Some (values, shape) -> ((values :> Array), shape, Dtype.Int16)
+ | _ ->
+ match values |> DataConverter.tryFlatArrayAndShape with
+ | Some (values, shape) -> ((values :> Array), shape, Dtype.Bool)
+ | _ ->
+ match values |> DataConverter.tryFlatArrayAndShape with
+ | Some (values, shape) -> ((values :> Array), shape, Dtype.Byte)
+ | _ ->
+ match values |> DataConverter.tryFlatArrayAndShape with
+ | Some (values, shape) -> ((values :> Array), shape, Dtype.Int8)
+ | _ ->
+ failwithf "Cannot create tensor from data: %A" values
+
+ let statics = BackendTensorStatics.Get(?backend=backend)
+ let device = defaultArg device Device.Default
+
+ statics.CreateFromFlatArray(data, shape, dtype2, device)
+
+ static member CreateFromFlatArray(values: Array, shape:Shape, ?dtype, ?device, ?backend) =
+ let statics = BackendTensorStatics.Get(?backend=backend)
+ let dtype = defaultArg dtype Dtype.Default
+ let device = defaultArg device Device .Default
+ statics.CreateFromFlatArray(values, shape, dtype, device)
+
+ /// Gets a tensor filled with values drawn from the given .NET object for the
+ /// given configuration settings, defaulting to the configuration settings of the object tensor.
+ member t.CreateLike(values: obj, ?dtype: Dtype, ?device: Device, ?backend: Backend) =
+ RawTensor.Create(values, dtype=defaultArg dtype t.Dtype, device=defaultArg device t.Device, backend=defaultArg backend t.Backend)
+
+ /// Gets a tensor filled with arbitrary values for the given shape and configuration settings,
+ /// defaulting to the configuration settings of the object tensor
+ member t.EmptyLike(shape: Shape, ?dtype: Dtype, ?device: Device, ?backend: Backend) =
+ RawTensor.Empty(shape=shape, dtype=defaultArg dtype t.Dtype, device=defaultArg device t.Device, backend=defaultArg backend t.Backend)
+
+ /// Gets a zero tensor for the given configuration settings, defaulting to the configuration settings of the object tensor
+ member t.ZeroLike(?dtype: Dtype, ?device: Device, ?backend: Backend) =
+ RawTensor.Zero(dtype=defaultArg dtype t.Dtype, device=defaultArg device t.Device, backend=defaultArg backend t.Backend)
+
+ /// Gets a tensor filled with zero values for the given shape and configuration settings,
+ /// defaulting to the configuration settings of the object tensor
+ member t.ZerosLike(shape: Shape, ?dtype: Dtype, ?device: Device, ?backend: Backend) =
+ RawTensor.Zeros(shape=shape, dtype=defaultArg dtype t.Dtype, device=defaultArg device t.Device, backend=defaultArg backend t.Backend)
+
+ /// Gets a scalar one tensor for the given configuration settings, defaulting to the configuration settings of the object tensor
+ member t.OneLike(?dtype: Dtype, ?device: Device, ?backend: Backend) =
+ RawTensor.One(dtype=defaultArg dtype t.Dtype, device=defaultArg device t.Device, backend=defaultArg backend t.Backend)
+
+ /// Gets a tensor filled with one values for the given shape and configuration settings,
+ /// defaulting to the configuration settings of the object tensor
+ member t.OnesLike(shape: Shape, ?dtype: Dtype, ?device: Device, ?backend: Backend) =
+ RawTensor.Ones(shape=shape, dtype=defaultArg dtype t.Dtype, device=defaultArg device t.Device, backend=defaultArg backend t.Backend)
+
+ /// Gets a tensor filled with the given scalar value for the given shape and configuration settings,
+ /// defaulting to the configuration settings of the object tensor
+ member t.FullLike(shape: Shape, value: scalar, ?dtype: Dtype, ?device: Device, ?backend: Backend) =
+ RawTensor.Full(shape, value, dtype=defaultArg dtype t.Dtype, device=defaultArg device t.Device, backend=defaultArg backend t.Backend)
+
+ /// Gets a tensor filled with random values for the given shape and configuration settings,
+ /// defaulting to the configuration settings of the object tensor
+ member t.RandomLike(shape: Shape, ?dtype: Dtype, ?device: Device, ?backend: Backend) =
+ RawTensor.Random(shape=shape, dtype=defaultArg dtype t.Dtype, device=defaultArg device t.Device, backend=defaultArg backend t.Backend)
+
+ /// Gets a tensor filled with random values from a normal distribution for the given shape and configuration settings,
+ /// defaulting to the configuration settings of the object tensor
+ member t.RandomNormalLike(shape: Shape, ?dtype: Dtype, ?device: Device, ?backend: Backend) =
+ RawTensor.RandomNormal(shape=shape, dtype=defaultArg dtype t.Dtype, device=defaultArg device t.Device, backend=defaultArg backend t.Backend)
+
+ /// Gets a tensor filled with random integer values from the given range for the given shape and configuration settings,
+ /// defaulting to the configuration settings of the object tensor
+ member t.RandomIntLike(shape: Shape, low:int, high:int, ?dtype: Dtype, ?device: Device, ?backend: Backend) =
+ RawTensor.RandomInt(shape=shape, low=low, high=high, dtype=defaultArg dtype t.Dtype, device=defaultArg device t.Device, backend=defaultArg backend t.Backend)
+
+ /// Clone the underlying storage of the tensor.
+ abstract Clone: unit -> RawTensor
+
+ /// Expand the shape of the tensor.
+ abstract Expand: newShape: Shape -> RawTensor
+
+ /// Stack the given tensors along the given dimension
+ abstract StackTs: tensors: RawTensor[] * dim:int -> RawTensor
+
+ /// Unstack the given tensors along the given dimension
+ abstract UnstackT: dim:int -> RawTensor[]
+
+ /// Concatenate the given tensors along the given dimension
+ abstract CatTs: tensors: RawTensor[] * dim: int -> RawTensor
+
+ /// Split the given tensors along the given dimensions
+ abstract SplitT: sizes: int[] * dim: int -> RawTensor[]
+
+ /// Get a slice of the given tensor.
+ ///
+ ///
+ /// The indexes are an Nx3 array. The first row is the start bounds, the second row is
+ /// the end bounds, the third is 1/0 indicating dimension removal.
+ ///
+ abstract GetSlice: fullBounds: int[,] -> RawTensor
+
+ /// Gets a .NET object representing the value of the tensor at the given indexes
+ abstract GetItem: [] indexes: int[] -> scalar
+
+ /// Gets a .NET object representing the value of a scalar tensor
+ abstract ToScalar: unit -> scalar
+
+ /// Get a .NET object for all the values in the tensor.
+ ///
+ /// The runtime type of the returned object is either a .NET scalar
+ /// or array corresponding to the shape and element type of the tensor.
+ abstract ToValues: unit -> obj
+
+ /// Compare two tensors for equality
+ abstract Equals: t2: RawTensor -> bool
+
+ /// Returns a tensor where the elements have each been cast to the given tensor element storage type.
+ abstract Cast: dtype: Dtype -> RawTensor
+
+ /// Returns a tensor moved to the given device.
+ abstract MoveTo: device: Device -> RawTensor
+
+ /// Returns a hash of the contents of the tensor. This operation may cause the
+ /// tensor to be moved to the CPU, and its entire contents iterated.
+ abstract ComputeHash: unit -> int
+
+ /// Indicates if the two tensors have the same shape and element type, and all corresponding values
+ /// are equal up to the given tolerances.
+ abstract AllClose: t2: RawTensor * relativeTolerance: float * absoluteTolerance: float -> bool
+
+ /// Returns a tensor with values constrained by the corresponding elements in the low/high tensors.
+ abstract ClampT: low: RawTensor * high: RawTensor -> RawTensor
+
+ /// Returns a tensor selecting the given indices from the given dimension and stacking those in the order specified.
+ abstract GatherT: dim: int * indices: RawTensor -> RawTensor
+
+ /// Returns a tensor with given destination shape where values are copied from the current tensor to locations specified by the dimension and indices.
+ abstract ScatterT: dim: int * indices: RawTensor * destinationShape: Shape -> RawTensor
+
+ /// Returns a boolean tensor comparing each element pairwise with the corresponding element in t2
+ abstract LtTT: t2: RawTensor -> RawTensor
+
+ /// Returns a boolean tensor comparing each element pairwise with the corresponding element in t2
+ abstract GtTT: t2: RawTensor -> RawTensor
+
+ /// Returns a boolean tensor comparing each element pairwise with the corresponding element in t2
+ abstract LeTT: t2: RawTensor -> RawTensor
+
+ /// Returns a boolean tensor comparing each element pairwise with the corresponding element in t2
+ abstract GeTT: t2: RawTensor -> RawTensor
+
+ /// Returns a boolean tensor comparing each element pairwise with the corresponding element in t2
+ abstract EqTT: t2: RawTensor -> RawTensor
+
+ /// Returns a boolean tensor comparing each element pairwise with the corresponding element in t2
+ abstract NeqTT: t2: RawTensor -> RawTensor
+
+ /// Returns a boolean tensor where each element indicates if the corresponding element in the tensor is an infinity value
+ abstract IsInfT: unit -> RawTensor
+
+ /// Returns a boolean tensor where each element indicates if the corresponding element in the tensor is a NaN value
+ abstract IsNaNT: unit -> RawTensor
+
+ /// Gets a tensor containing values and indexes of a maximum value of the tensor reducing along the given dimension
+ abstract MaxReduceT: dim: int * keepdim: bool -> RawTensor * RawTensor
+
+ /// Gets the index of a maximum value of the tensor
+ abstract MaxIndexT: unit -> int[]
+
+ /// Gets a tensor containing values and indexes of a minimum value of the tensor reducing along the given dimension
+ abstract MinReduceT: dim: int * keepdim: bool -> RawTensor * RawTensor
+
+ /// Gets the index of a minimum value of the tensor
+ abstract MinIndexT: unit -> int[]
+
+ /// Returns the element-wise addition of the two tensors
+ abstract AddTT: RawTensor * ?alpha: scalar -> RawTensor
+
+ /// Returns the element-wise addition of a tensor and a scalar
+ abstract AddTT0: b: scalar * ?alpha: scalar -> RawTensor
+
+ /// Adds a slice of t2 at the given location to the tensor
+ abstract AddTTSlice: location: int[] * t2: RawTensor -> RawTensor
+
+ /// Returns the element-wise subtraction of two tensors
+ abstract SubTT: t2: RawTensor -> RawTensor
+
+ /// Returns the element-wise subtraction of the scalar and a tensor, where the scalar is logically
+ /// broadcast to the same shape as the tensor
+ abstract SubFromT0T: t1: scalar -> RawTensor
+
+ /// Returns the element-wise subtraction of the tensor and a scalar, where the scalar is logically
+ /// broadcast to the same shape as the tensor
+ abstract SubTT0: t2: scalar -> RawTensor
+
+ /// Returns the element-wise multiplication of two tensors
+ abstract MulTT: t2: RawTensor -> RawTensor
+
+ /// Returns the element-wise multiplication of a tensor and a scalar, where the scalar is logically
+ /// broadcast to the same shape as the tensor
+ abstract MulTT0: t2: scalar -> RawTensor
+
+ /// Returns the element-wise division of two tensors
+ abstract DivTT: t2: RawTensor -> RawTensor
+
+ /// Returns the element-wise division of a scalar by a tensor, where the scalar is logically
+ /// broadcast to the same shape as the tensor
+ abstract DivFromT0T: t1: scalar -> RawTensor
+
+ /// Returns the element-wise division of a tensor by a scalar, where the scalar is logically
+ /// broadcast to the same shape as the tensor
+ abstract DivTT0: t2: scalar -> RawTensor
+
+ /// Returns the element-wise exponentiation of two tensors
+ abstract PowTT: t2: RawTensor -> RawTensor
+
+ /// Returns the element-wise exponentiation of a scalar and a tensor, where the scalar is logically
+ /// broadcast to the same shape as the tensor
+ abstract PowFromT0T: t1: scalar -> RawTensor
+
+ /// Returns the element-wise exponentiation of a tensor and a scalar, where the scalar is logically
+ /// broadcast to the same shape as the tensor
+ abstract PowTT0: t2: scalar -> RawTensor
+
+ /// Returns the matrix multiplication of two tensors
+ abstract MatMulTT: t2: RawTensor -> RawTensor
+
+ /// Returns the batched matrix multiplication of two tensors
+ abstract BMMTT: t2: RawTensor -> RawTensor
+
+ /// Returns the 1D maxpool of a tensor and its chosen maximum indices
+ abstract MaxPool1D: kernelSize: int * stride: int * padding: int -> RawTensor * RawTensor
+
+ /// Returns the 2D maxpool of a tensor and its chosen maximum indices
+ abstract MaxPool2D: kernelSize: int[] * strides: int[] * padding: int[] -> RawTensor * RawTensor
+
+ /// Returns the 3D maxpool of a tensor and its chosen maximum indices
+ abstract MaxPool3D: kernelSize: int[] * strides: int[] * padding: int[] -> RawTensor * RawTensor
+
+ /// Returns the 1D maxunpool of a tensor using the given indices for locations of maximums
+ abstract MaxUnpool1D: indices: RawTensor * outputSize: int[] -> RawTensor
+
+ /// Returns the 2D maxunpool of a tensor using the given indices for locations of maximums
+ abstract MaxUnpool2D: indices: RawTensor * outputSize: int[] -> RawTensor
+
+ /// Returns the 3D maxunpool of a tensor using the given indices for locations of maximums
+ abstract MaxUnpool3D: indices: RawTensor * outputSize: int[] -> RawTensor
+
+ /// Returns the 1D avgpool of a tensor
+ abstract AvgPool1D: kernelSize: int * stride: int * padding: int (* * ceil_mode: bool * count_include_pad: bool *) -> RawTensor
+
+ /// Returns the 2D avgpool of a tensor
+ abstract AvgPool2D: kernelSize: int[] * stride: int[] * padding: int[] (* * ceil_mode: bool * count_include_pad: bool *) -> RawTensor
+
+ /// Returns the 2D avgpool of a tensor
+ abstract AvgPool3D: kernelSize: int[] * stride: int[] * padding: int[] (* * ceil_mode: bool * count_include_pad: bool *) -> RawTensor
+
+ /// Returns the reverse mode of a 1D avgpool of a tensor, apportioning each part of the adjoint equally to each corresponding input
+ /// The originalInput parameter is only used for shape information
+ abstract AvgPoolReverse1D: originalInput: RawTensor * kernelSize: int * stride: int * padding: int (* * ceil_mode: bool * count_include_pad: bool *) -> RawTensor
+
+ /// Returns the reverse mode of a 2D avgpool of a tensor, apportioning each part of the adjoint equally to each corresponding input
+ /// The originalInput parameter is only used for shape information
+ abstract AvgPoolReverse2D: originalInput: RawTensor * kernelSize: int[] * stride: int[] * padding: int[] (* * ceil_mode: bool * count_include_pad: bool *) -> RawTensor
+
+ /// Returns the reverse mode of a 3D avgpool of a tensor, apportioning each part of the adjoint equally to each corresponding input
+ /// The originalInput parameter is only used for shape information
+ abstract AvgPoolReverse3D: originalInput: RawTensor * kernelSize: int[] * stride: int[] * padding: int[] (* * ceil_mode: bool * count_include_pad: bool *) -> RawTensor
+
+ /// Returns the 1D convolution of the tensor
+ abstract Conv1D: kernel: RawTensor * stride: int * padding: int -> RawTensor
+
+ /// Returns the 2D convolution of the tensor
+ abstract Conv2D: kernel: RawTensor * strides: int[] * padding: int[] -> RawTensor
+
+ /// Returns the 3D convolution of the tensor
+ abstract Conv3D: kernel: RawTensor * strides: int[] * padding: int[] -> RawTensor
+
+ /// Returns a view of the original tensor with its dimensions permuted
+ abstract PermuteT: permutation: int[] -> RawTensor
+
+ /// Returns the element-wise negation of the tensor
+ abstract NegT: unit -> RawTensor
+
+ /// Returns the scalar tensor for the summation of all elements in the tensor
+ abstract SumT: ?resultType: Dtype -> RawTensor
+
+ /// Returns the tensor representing the summation of the tensor along the given dimension
+ abstract SumTDim: dim: int * ?resultType: Dtype -> RawTensor
+
+ /// Returns the transpose of the tensor between the given dimensions
+ abstract TransposeT: dim0: int * dim1: int -> RawTensor
+
+ /// Returns the transpose of a 2D tensor
+ abstract TransposeT2: unit -> RawTensor
+
+ /// Returns the inverse of a single square matrix (2d tensor) or a batch of square matrices (3d tensor)
+ abstract InverseT: unit -> RawTensor
+
+ /// Returns the determinant of a square matrix
+ abstract DetT: unit -> RawTensor
+
+ /// Returns the solution of single a square system of linear equations with a unique solution or a batch of several such systems
+ abstract SolveTT: RawTensor -> RawTensor
+
+ /// Returns the tensor with the same values and the given dimension removed. The given dimension must be of size 1.
+ abstract SqueezeT: dim: int -> RawTensor
+
+ /// Returns the tensor with the same values and a dimension of size 1 inserted before the given dimension.
+ abstract UnsqueezeT: dim: int -> RawTensor
+
+ /// Returns the flip of the tensor along the given dimensions
+ abstract FlipT: dims: int[] -> RawTensor
+
+ /// Returns the dilation of the tensor using the given dilations parameters
+ abstract DilateT: dilations: int[] -> RawTensor
+
+ /// Returns the reverse of the dilation of the tensor using the given dilations parameters
+ abstract UndilateT: dilations: int[] -> RawTensor
+
+ /// Returns the tensor with the same values viewed as a different shape
+ abstract ViewT: shape: Shape -> RawTensor
+
+ /// Returns the element-wise sign of the tensor
+ abstract SignT: unit -> RawTensor
+
+ /// Returns the element-wise integer floor of the tensor
+ abstract FloorT: unit -> RawTensor
+
+ /// Returns the element-wise integer ceiling of the tensor
+ abstract CeilT: unit -> RawTensor
+
+ /// Returns the element-wise rounding of the tensor
+ abstract RoundT: unit -> RawTensor
+
+ /// Returns the element-wise absolute value of the tensor
+ abstract AbsT: unit -> RawTensor
+
+ /// Returns the element-wise ReLU of the tensor
+ abstract ReluT: unit -> RawTensor
+
+ /// Returns the element-wise softplus of the tensor
+ abstract SoftplusT: unit -> RawTensor
+
+ /// Returns the element-wise sigmoid of the tensor
+ abstract SigmoidT: unit -> RawTensor
+
+ /// Returns the element-wise natural exponentiation of the tensor
+ abstract ExpT: unit -> RawTensor
+
+ /// Returns the element-wise natural logarithm of the tensor
+ abstract LogT: unit -> RawTensor
+
+ /// Returns the element-wise base10 logarithm of the tensor
+ abstract Log10T: unit -> RawTensor
+
+ /// Returns the element-wise square root of the tensor
+ abstract SqrtT: unit -> RawTensor
+
+ /// Returns the element-wise sine of the tensor
+ abstract SinT: unit -> RawTensor
+
+ /// Returns the element-wise cosine of the tensor
+ abstract CosT: unit -> RawTensor
+
+ /// Returns the element-wise tangent of the tensor
+ abstract TanT: unit -> RawTensor
+
+ /// Returns the element-wise sinh of the tensor
+ abstract SinhT: unit -> RawTensor
+
+ /// Returns the element-wise cosh of the tensor
+ abstract CoshT: unit -> RawTensor
+
+ /// Returns the element-wise tanh of the tensor
+ abstract TanhT: unit -> RawTensor
+
+ /// Returns the element-wise asin of the tensor
+ abstract AsinT: unit -> RawTensor
+
+ /// Returns the element-wise cos of the tensor
+ abstract AcosT: unit -> RawTensor
+
+ /// Returns the element-wise atan of the tensor
+ abstract AtanT: unit -> RawTensor
+
+ default t.IsInfT() =
+ match t.Dtype with
+ | Dtype.IntegralOrBool -> t.FullLike(t.Shape, false, dtype=Dtype.Bool)
+ | _ -> t.AbsT().EqTT(t.FullLike(t.Shape,System.Single.PositiveInfinity))
+
+ default t.IsNaNT() =
+ match t.Dtype with
+ | Dtype.IntegralOrBool -> t.FullLike(t.Shape, false, dtype=Dtype.Bool)
+ | _ -> t.NeqTT(t)
+
+ member t.Print(?postfix: string) =
+ // TODO: this code is not ideal and can be reimplemented to be cleaner and more efficient
+ let postfix = defaultArg postfix ""
+ if t.Nelement = 0 then sprintf "tensor([])%s" postfix
+ else
+ let threshold = Printer.Default.threshold
+ let edgeItems = Printer.Default.edgeItems
+ let precision = Printer.Default.precision
+
+ let vmin = t.GetItem(t.MinIndexT()).toDouble()
+ let vmax = t.GetItem(t.MaxIndexT()).toDouble()
+ let absMax = max (abs vmin) (abs vmax)
+ let precisionStr = (String.replicate precision "0")
+ let floatMaxStrLen1 = System.String.Format("{0:G"+precision.ToString()+"}", absMax).Length
+ let floatMaxStrLen2 = System.String.Format("{0:0."+precisionStr+"}", absMax).Length
+ let floatFormat1 = "{0,"+floatMaxStrLen1.ToString()+":G"+precision.ToString()+"}"
+ let floatFormat2 = "{0,"+floatMaxStrLen2.ToString()+":0."+precisionStr+"}"
+ let floatFormat3 = "{0,"+floatMaxStrLen2.ToString()+": 0."+precisionStr+";-0."+precisionStr+"}"
+ let floatNoDecimals = t.Dtype.IsFloatingPoint && (let tt = t.Cast(Dtype.Float64) in tt.CeilT().Equals(tt))
+ let floatNonNegative = t.Dtype.IsFloatingPoint && (let tt = t.Cast(Dtype.Float64) in tt.AbsT().Equals(tt))
+ let printFloat (v:float) =
+ if absMax >= 1.e8 || floatNoDecimals then
+ let p = System.String.Format(floatFormat1, v)
+ if p.Contains(".") || p.Contains("e") || p.Contains("E") || p.Contains("NaN") || p.Contains("Inf") || p.Contains("∞") then p else p + "."
+ elif floatNonNegative then
+ System.String.Format(floatFormat2, v)
+ else
+ System.String.Format(floatFormat3, v)
+
+ let intMaxStrLen = System.String.Format("{0:D}", int64 (if vmin < 0. then -absMax else absMax)).Length
+ let intFormat = "{0,"+intMaxStrLen.ToString()+":D}"
+ let printInt (v:int64) =
+ System.String.Format(intFormat, v)
+
+ let printVal (x:scalar) =
+ match x.GetTypeCode() with
+ | TypeCode.Single -> printFloat (x.toDouble())
+ | TypeCode.Double -> printFloat (x.toDouble())
+ | TypeCode.Int32 -> printInt (x.toInt64())
+ | TypeCode.Int64 -> printInt (x.toInt64())
+ | TypeCode.Byte -> printInt (x.toInt64())
+ | TypeCode.SByte -> printInt (x.toInt64())
+ | TypeCode.Int16 -> printInt (x.toInt64())
+ | TypeCode.Boolean -> if (x.toBool()) then " true" else "false"
+ | _ -> printFloat (x.toDouble()) // Handles Float16, BFloat16
+
+ let sb = System.Text.StringBuilder()
+ sb.Append("tensor(") |> ignore
+ match t.Dim with
+ | 0 ->
+ sb.Append(printVal (t.ToScalar())) |> ignore
+ | _ ->
+ let rec print (shape:Shape) externalCoords =
+ if shape.Length = 1 then
+ sb.Append("[") |> ignore
+ let mutable prefix = ""
+ if (shape[0] >= threshold) && (edgeItems*2 < shape[0]) then
+ for i=0 to edgeItems-1 do
+ let globalCoords = Array.append externalCoords [|i|]
+ sb.Append(prefix) |> ignore
+ sb.Append(printVal (t.GetItem(globalCoords))) |> ignore
+ prefix <- ", "
+ sb.Append(", ...") |> ignore
+ for i=shape[0]-edgeItems to shape[0]-1 do
+ let globalCoords = Array.append externalCoords [|i|]
+ sb.Append(prefix) |> ignore
+ sb.Append(printVal (t.GetItem(globalCoords))) |> ignore
+ // prefix <- ", "
+ else
+ for i=0 to shape[0]-1 do
+ let globalCoords = Array.append externalCoords [|i|]
+ sb.Append(prefix) |> ignore
+ sb.Append(printVal (t.GetItem(globalCoords))) |> ignore
+ prefix <- ", "
+ sb.Append("]") |> ignore
+ else
+ sb.Append("[") |> ignore
+ let mutable prefix = ""
+ let prefix2 = sprintf ",%s%s" (String.replicate (max 1 (shape.Length-1)) "\n ") (String.replicate (externalCoords.Length+1) " ")
+ if (shape[0] >= threshold) && (edgeItems*2 < shape[0]) then
+ for i=0 to edgeItems-1 do
+ sb.Append(prefix) |> ignore
+ print shape[1..] (Array.append externalCoords [|i|])
+ prefix <- prefix2
+ sb.Append(prefix) |> ignore
+ sb.Append("...") |> ignore
+ for i=shape[0]-edgeItems to shape[0]-1 do
+ sb.Append(prefix) |> ignore
+ print shape[1..] (Array.append externalCoords [|i|])
+ // prefix <- prefix2
+ else
+ for i=0 to shape[0]-1 do
+ sb.Append(prefix) |> ignore
+ print shape[1..] (Array.append externalCoords [|i|])
+ prefix <- prefix2
+ sb.Append("]") |> ignore
+ print t.Shape [||]
+ if t.Dtype <> Dtype.Default then
+ sb.Append ",dtype=" |> ignore
+ sb.Append (t.Dtype.ToString()) |> ignore
+ if t.Device <> Device.Default then
+ sb.Append ",device=" |> ignore
+ sb.Append (t.Device.ToString()) |> ignore
+ if t.Backend <> Backend.Default then
+ sb.Append ",backend=" |> ignore
+ sb.Append (t.Backend.ToString()) |> ignore
+ sb.Append(")") |> ignore
+ sb.Append(postfix) |> ignore
+ sb.ToString()
+
+ override x.Equals(yobj: obj) =
+ match yobj with
+ | :? RawTensor as y -> x.Equals(y)
+ | _ -> false
+
+ override x.GetHashCode() = x.ComputeHash()
+
+ interface System.IComparable with
+ member x.CompareTo(yobj) =
+ match yobj with
+ | :? RawTensor as y -> Unchecked.compare (x.ToScalar()) (y.ToScalar())
+ | _ -> failwithf "Cannot compare RawTensor with object of type %A" (yobj.GetType())
+
+ default t.GetItem(indexes) =
+ let t0 = t.GetSlice(Array2D.init indexes.Length 3 (fun i j -> if j = 0 || j = 1 then indexes[i] else 1))
+ t0.ToScalar()
+
+ /// Returns a .NET object for the value of a scalar tensor
+ override t.ToScalar() =
+ match t.Nelement with
+ | 1 -> t.ViewT([||]).ToValues() :?> scalar
+ | _ -> failwithf "Only one element tensors can be converted to scalars. This tensor has shape %A." t.Shape
+
+ /// Returns a .NET array object for the values of a non-scalar tensor
+ member t.ToArray() =
+ match t.Dim with
+ | 0 -> failwithf "Cannot convert scalar tensor to array"
+ | _ ->
+ match t.ToValues() with
+ | :? System.Array as a -> a
+ | _ -> failwithf "ToValues() should return an array but returned type %A" (t.GetType())
+
+ /// A backdoor to switch this tensor to be usable as a mutable tensor. You should have a unique handle to
+ /// this tensor for the entire time it is being used as a mutable tensor.
+ abstract SetMutable: unit -> unit
+
+ abstract IsMutable: bool
+
+ /// Modifies the tensor by with values constrained by the corresponding elements in the low/high tensors.
+ abstract ClampInPlace: low: RawTensor * high: RawTensor -> unit
+
+ /// Modifies the tensor by comparing each element pairwise with the corresponding element in t2
+ abstract LtInPlace: t2: RawTensor -> unit
+
+ /// Modifies the tensor by comparing each element pairwise with the corresponding element in t2
+ abstract GtInPlace: t2: RawTensor -> unit
+
+ /// Modifies the tensor by comparing each element pairwise with the corresponding element in t2
+ abstract LeInPlace: t2: RawTensor -> unit
+
+ /// Modifies the tensor by comparing each element pairwise with the corresponding element in t2
+ abstract GeInPlace: t2: RawTensor -> unit
+
+ /// Modifies the tensor by comparing each element pairwise with the corresponding element in t2
+ abstract EqInPlace: t2: RawTensor -> unit
+
+ /// Modifies the tensor by comparing each element pairwise with the corresponding element in t2
+ abstract NeqInPlace: t2: RawTensor -> unit
+
+ /// Modifies the tensor by the element-wise addition of the two tensors
+ abstract AddInPlace: RawTensor * ?alpha: scalar -> unit
+
+ /// Modifies the tensor by the element-wise addition of two scalars
+ abstract AddScalarInPlace: b: scalar -> unit
+
+ /// Adds a slice of t2 at the given location to the tensor
+ abstract AddSliceInPlace: location: int[] * t2: RawTensor -> unit
+
+ /// Modifies the tensor by the element-wise subtraction of two tensors
+ abstract SubInPlace: t2: RawTensor -> unit
+
+ /// Modifies the tensor by the element-wise subtraction of the tensor and a scalar, where the scalar is logically
+ /// broadcast to the same shape as the tensor
+ abstract SubScalarInPlace: b: scalar -> unit
+
+ /// Modifies the tensor by the element-wise multiplication of two tensors
+ abstract MulInPlace: t2: RawTensor -> unit
+
+ /// Modifies the tensor by the element-wise multiplication of a tensor and a scalar, where the scalar is logically
+ /// broadcast to the same shape as the tensor
+ abstract MulScalarInPlace: b: scalar -> unit
+
+ /// Modifies the tensor by the element-wise division of two tensors
+ abstract DivInPlace: t2: RawTensor -> unit
+
+ /// Modifies the tensor by the element-wise division of a tensor by a scalar, where the scalar is logically
+ /// broadcast to the same shape as the tensor
+ abstract DivScalarInPlace: t2: scalar -> unit
+
+ /// Modifies the tensor by the element-wise exponentiation of two tensors
+ abstract PowInPlace: t2: RawTensor -> unit
+
+ /// Modifies the tensor by the element-wise exponentiation of a tensor and a scalar, where the scalar is logically
+ /// broadcast to the same shape as the tensor
+ abstract PowScalarInPlace: t2: scalar -> unit
+
+ /// Modifies the tensor by the matrix multiplication of two tensors
+ abstract MatMulInPlace: t2: RawTensor -> unit
+
+ /// Modifies the tensor by the element-wise negation of the tensor
+ abstract NegInPlace: unit -> unit
+
+ /// Modifies the tensor by the element-wise sign of the tensor
+ abstract SignInPlace: unit -> unit
+
+ /// Modifies the tensor by the element-wise integer floor of the tensor
+ abstract FloorInPlace: unit -> unit
+
+ /// Modifies the tensor by the element-wise integer ceiling of the tensor
+ abstract CeilInPlace: unit -> unit
+
+ /// Modifies the tensor by the element-wise rounding of the tensor
+ abstract RoundInPlace: unit -> unit
+
+ /// Modifies the tensor by the element-wise absolute value of the tensor
+ abstract AbsInPlace: unit -> unit
+
+ /// Modifies the tensor by the element-wise ReLU of the tensor
+ abstract ReluInPlace: unit -> unit
+
+ /// Modifies the tensor by the element-wise softplus of the tensor
+ abstract SoftplusInPlace: unit -> unit
+
+ /// Modifies the tensor by the element-wise sigmoid of the tensor
+ abstract SigmoidInPlace: unit -> unit
+
+ /// Modifies the tensor by the element-wise natural exponentiation of the tensor
+ abstract ExpInPlace: unit -> unit
+
+ /// Modifies the tensor by the element-wise natural logarithm of the tensor
+ abstract LogInPlace: unit -> unit
+
+ /// Modifies the tensor by the element-wise base10 logarithm of the tensor
+ abstract Log10InPlace: unit -> unit
+
+ /// Modifies the tensor by the element-wise square root of the tensor
+ abstract SqrtInPlace: unit -> unit
+
+ /// Modifies the tensor by the element-wise sine of the tensor
+ abstract SinInPlace: unit -> unit
+
+ /// Modifies the tensor by the element-wise cosine of the tensor
+ abstract CosInPlace: unit -> unit
+
+ /// Modifies the tensor by the element-wise tangent of the tensor
+ abstract TanInPlace: unit -> unit
+
+ /// Modifies the tensor by the element-wise sinh of the tensor
+ abstract SinhInPlace: unit -> unit
+
+ /// Modifies the tensor by the element-wise cosh of the tensor
+ abstract CoshInPlace: unit -> unit
+
+ /// Modifies the tensor by the element-wise tanh of the tensor
+ abstract TanhInPlace: unit -> unit
+
+ /// Modifies the tensor by the element-wise asin of the tensor
+ abstract AsinInPlace: unit -> unit
+
+ /// Modifies the tensor by the element-wise cos of the tensor
+ abstract AcosInPlace: unit -> unit
+
+ /// Modifies the tensor by the element-wise atan of the tensor
+ abstract AtanInPlace: unit -> unit
+
+ /// Modifies the tensor by setting all values to one
+ abstract OnesInPlace: unit -> unit
+
+ /// Modifies the tensor by setting all values to zero
+ abstract ZerosInPlace: unit -> unit
+
+ /// Modifies the tensor by setting it to random values taken from a uniform distribution in [0, 1).
+ abstract RandomInPlace: unit -> unit
+
+ /// Modifies the tensor by setting all values taken from a normal distribution with mean 0 and variance 1.
+ abstract RandomNormalInPlace: unit -> unit
+
+ /// Gets a tensor filled with random integers from the given range
+ abstract RandomIntInPlace: low:int * high:int -> unit
+
diff --git a/src/TensorMath/Scalar.fs b/src/TensorMath/Scalar.fs
new file mode 100644
index 0000000..61b0ef0
--- /dev/null
+++ b/src/TensorMath/Scalar.fs
@@ -0,0 +1,77 @@
+// Copyright (c) 2016- University of Oxford (Atilim Gunes Baydin )
+// and other contributors, see LICENSE in root of repository.
+//
+// BSD 2-Clause License. See LICENSE in root of repository.
+
+namespace TensorMath
+
+open System
+open System.Reflection
+
+/// Represents a scalar on the TensorMath programming model
+type scalar = System.IConvertible
+
+[]
+module ScalarExtensions =
+ type System.IConvertible with
+ member inline x.toSingle() = x.ToSingle(null)
+ member inline x.toDouble() = x.ToDouble(null)
+ member inline x.toInt64() = x.ToInt64(null)
+ member inline x.toInt32() = x.ToInt32(null)
+ member inline x.toInt16() = x.ToInt16(null)
+ member inline x.toSByte() = x.ToSByte(null)
+ member inline x.toByte() = x.ToByte(null)
+ member inline x.toBool() = x.toInt32() <> 0
+ member inline x.sub(y:scalar) : scalar = (x.toDouble() - y.toDouble()) :> scalar
+ member inline x.log() : scalar = x.toDouble() |> log :> scalar
+ member inline x.neg() : scalar = -x.toDouble() :> scalar
+ member inline x.dtype =
+ let ti = x.GetTypeCode()
+ match ti with
+ | TypeCode.Double -> Dtype.Float64
+ | TypeCode.Single -> Dtype.Float32
+ | TypeCode.Int32 -> Dtype.Int32
+ | TypeCode.Int64 -> Dtype.Int64
+ | TypeCode.SByte -> Dtype.Int8
+ | TypeCode.Byte -> Dtype.Byte
+ | TypeCode.Int16 -> Dtype.Int16
+ | TypeCode.Boolean -> Dtype.Bool
+ | _ -> failwithf "unknown scalar type '%A'" x
+
+ member inline x.cast(dtype) =
+ match dtype with
+ | Dtype.Float16 -> x.toSingle() :> scalar
+ | Dtype.BFloat16 -> x.toSingle() :> scalar
+ | Dtype.Float32 -> x.toSingle() :> scalar
+ | Dtype.Float64 -> x.toDouble() :> scalar
+ | Dtype.Int8 -> x.toSByte() :> scalar
+ | Dtype.Byte -> x.toByte() :> scalar
+ | Dtype.Int32 -> x.toInt32() :> scalar
+ | Dtype.Int64 -> x.toInt64() :> scalar
+ | Dtype.Int16 -> x.toInt16() :> scalar
+ | Dtype.Bool -> x.toBool() :> scalar
+
+ // Floating point scalars force integers to widen to the default floating point type
+ //
+ // For example:
+ // >>> import torch
+ // >>> (torch.tensor([1], dtype=torch.int32) * 2.5).dtype
+ // torch.float32
+ // >>> torch.set_default_dtype(torch.float16)
+ // >>> (torch.tensor([1], dtype=torch.int32) * 2.5).dtype
+ // torch.float16
+ // >>> (torch.tensor([1], dtype=torch.int32) * 2).dtype
+ // torch.int32
+ let tryWidenScalar (tensorDtype: Dtype) (scalar: scalar) =
+ match tensorDtype, scalar.GetTypeCode() with
+ | Dtype.Integral, (TypeCode.Double | TypeCode.Single) -> ValueSome Dtype.Default
+ | _, _ -> ValueNone
+
+ let widenScalarForDivision (tensorDtype: Dtype) (scalarDtype: Dtype) =
+ match tensorDtype.IsFloatingPoint, scalarDtype.IsFloatingPoint with
+ | false, false -> Dtype.Default
+ | false, true -> Dtype.Default
+ | true, false -> tensorDtype
+ | true, true -> tensorDtype
+
+
\ No newline at end of file
diff --git a/src/TensorMath/Shape.fs b/src/TensorMath/Shape.fs
new file mode 100644
index 0000000..41581b5
--- /dev/null
+++ b/src/TensorMath/Shape.fs
@@ -0,0 +1,883 @@
+// Copyright (c) 2016- University of Oxford (Atilim Gunes Baydin )
+// and other contributors, see LICENSE in root of repository.
+//
+// BSD 2-Clause License. See LICENSE in root of repository.
+namespace TensorMath
+
+open TensorMath.Util
+
+/// Represents the shape of a tensor.
+type Shape = int[]
+
+/// Contains functions and values related to tensor shapes.
+module rec Shape =
+
+ /// Gets the total number of elements in the shape.
+ let nelement (shape: Shape) =
+ if shape.Length = 0 then 1
+ else Array.reduce (*) shape
+
+ /// The shape for a scalar value.
+ let scalar : Shape = [| |]
+
+ /// Indicates if one shape contains another.
+ let contains (bigShape:Shape) (smallShape: Shape) =
+ if bigShape.Length <> smallShape.Length then failwithf "Expecting bigShape (%A) and smallShape (%A) to have the same number of dimensions" bigShape.Length smallShape.Length
+ Array.map2 (<=) smallShape bigShape |> Array.forall id
+
+ /// Checks if the given shapes are appropriate for a stack operation and returns information related to the resulting shape.
+ let checkCanStack (shapes:Shape[]) (dim: int) =
+ if not (Seq.allEqual shapes) then failwithf "Cannot stack tensors with different shapes: %A" shapes
+ let n = shapes.Length
+ if n = 0 then failwithf "Expecting a non-empty sequence of tensors"
+ let shape = shapes[0]
+ if dim < 0 || dim > shape.Length then failwithf "Expecting 0 <= dim (%A) <= %A" dim shape.Length
+ if dim < 0 || dim > n then failwithf "Expecting 0 <= dim (%A) <= %A" dim n
+ let shape1 = shape[0..dim-1]
+ let shape2 = shape[dim..]
+ let outputShape = [| yield! shape1; yield n; yield! shape2 |]
+ n, shape1, shape2, outputShape
+
+ /// Checks if the given shapes are appropriate for a GetSlice operation and returns information related to the resulting shape.
+ let checkCanGetSlice (shape: Shape) (fullBounds: int[,]) =
+ if Array2D.length1 fullBounds <> shape.Length then failwithf "Expecting %i-by-3 fullBounds" shape.Length
+ let outputShape =
+ [|for i=0 to (fullBounds.GetLength(0) - 1) do
+ let len = fullBounds[i,1] - fullBounds[i,0] + 1
+ if fullBounds[i, 2] = 1 then
+ if len > 1 then yield len // if len=1 then squeeze this dimension
+ else
+ yield len|]
+ outputShape
+
+ /// Checks if the given index is valid in the context of the given shape.
+ let checkCanIndex (shape: int[]) (index: int[]) =
+ if shape.Length <> index.Length then failwithf "Expecting shape (%A) and index (%A) to have the same length" shape index
+ let valid = Array.forall2 (fun s i -> (i < s) && (i >= 0)) shape index
+ if not valid then failwithf "index (%A) is not valid for shape (%A)" index shape
+
+ /// Computes the shape that results from a dilation operation.
+ let dilated (shape: Shape) (dilations: int[]) =
+ Array.map2 (fun n d -> n + (n - 1) * (d - 1)) shape dilations
+
+ /// Checks if the given shapes are appropriate for a concatenation operation and returns information related to the resulting shape.
+ let checkCanCat (shapes: Shape[]) (dim: int) =
+ let n = shapes.Length
+ if n = 0 then invalidArg "tensors" "Expecting at least one tensor"
+ let shape = shapes[0]
+ if dim < 0 || dim >= shape.Length then invalidArg "dim" "invalid dimension"
+ let shape1 = shape[0..dim-1]
+ let shape3 = shape[dim+1..]
+ if shapes |> Array.exists (fun shapeOther -> shapeOther[0..dim-1] <> shape1 || shapeOther[dim+1..] <> shape3) then
+ invalidArg "tensors" "Expecting tensors with similar shapes"
+ let m2 = shapes |> Array.sumBy (fun shape -> shape[dim])
+ let outputShape = [| yield! shape1; yield m2; yield! shape3 |]
+ n, shape1, m2, shape3, outputShape
+
+ /// Checks if the given shapes are appropriate for a split operation and returns information related to the resulting shape.
+ let checkCanSplit (shape: Shape) (sizes: int[]) (dim: int) =
+ if dim < 0 || dim >= shape.Length then invalidArg "dim" "invalid dimension"
+ if Array.sum sizes <> shape[dim] then invalidArg "sizes" "the sum of sizes must equal the relevant dimension"
+ let shape1 = shape[0..dim-1]
+ let shape2 = shape[dim+1..]
+ let outputShapes = sizes |> Array.map (fun sz -> [| yield! shape1; yield sz; yield! shape2 |])
+ outputShapes
+
+ /// Checks if the given shapes are appropriate for an unstack operation and returns information related to the resulting shape.
+ let checkCanUnstack (shape: Shape) (dim: int) =
+ if shape.Length < 1 then failwith "Cannot unstack scalar Tensor (dim < 1)"
+ if dim < 0 || dim >= shape.Length then invalidArg "dim" "invalid dimension"
+ let shape1 = shape[0..dim-1]
+ let shape2 = shape[dim+1..]
+ let outputShape = Array.append shape1 shape2
+ shape1, shape2, outputShape
+
+ /// Checks if the given shapes are appropriate for a transpose operation and returns information related to the resulting shape.
+ let computeTranspose2d (shape: Shape) =
+ let nrows = shape[0]
+ let ncols = shape[1]
+ let outputShape = [| ncols; nrows |]
+ outputShape
+
+ /// Checks if the two device types are equal.
+ let checkDeviceTypes (deviceType1: DeviceType) (deviceType2: DeviceType) =
+ if deviceType1 <> deviceType2 then failwithf "Expecting input device types %A and %A to be the same" deviceType1 deviceType2
+
+ /// Checks if the two tensor element types are equal.
+ let checkDtypes (dtype1: Dtype) (dtype2: Dtype) =
+ if dtype1 <> dtype2 then failwithf "Expecting input tensor types %A and %A to be the same" dtype1 dtype2
+
+ /// Check if the tensor element type is appropriate for a convolution operation.
+ let private checkConvDType op (dtype: Dtype) =
+ match dtype with
+ | Dtype.Bool -> opNotSupported op dtype
+ | _ -> ()
+
+ /// Checks if the given shapes are appropriate for a convolution operation and returns information related to the resulting shape.
+ let checkCanConv1d (deviceType1: DeviceType) (deviceType2: DeviceType) (dtype1: Dtype) (dtype2: Dtype) (shape1:Shape) (shape2:Shape) (stride: int) (padding: int) (dilation: int) =
+ checkDeviceTypes deviceType1 deviceType2
+ checkDtypes dtype1 dtype2
+ checkConvDType "conv1d" dtype1
+ if shape1.Length <> 3 || shape2.Length <> 3 then failwithf "Expecting two 3d tensors t1, t2 where t1 is input (NxCxI: batchSize x inputChannels x inputLength) and t2 is filters (KxCxF: outputChannels x inputChannels x kernelLength), received tensors with shapes %A, %A" shape1 shape2
+ if padding < 0 then failwithf "Expecting padding (%A) >= 0" padding
+ if stride < 1 then failwithf "Expecting stride (%A) >= 1" stride
+ if dilation < 1 then failwithf "Expecting dilation (%A) >=1" dilation
+ let batchSize = shape1[0]
+ let inputChannels = shape1[1]
+ let inputLength = shape1[2]
+ let outputChannels = shape2[0]
+ let filtersChannels = shape2[1]
+ let kernelLength = shape2[2]
+ let inputLengthAfterPadding = inputLength + 2*padding
+ if shape2[1] <> inputChannels then failwithf "Input and filters have different number of channels: %A, %A" inputChannels filtersChannels
+ if kernelLength > inputLengthAfterPadding then failwithf "Expecting kernelLength (%A) <= inputLengthAfterPadding (%A)" kernelLength inputLengthAfterPadding
+ let outputSize = int (floor (float (inputLengthAfterPadding - kernelLength)/(float stride))) + 1
+ let outputShape = [|batchSize; outputChannels; outputSize|]
+ batchSize, inputChannels, kernelLength, outputChannels, outputSize, outputShape
+
+ /// Checks if the given shapes are appropriate for a convolution operation and returns information related to the resulting shape.
+ let checkCanConv2d (deviceType1: DeviceType) (deviceType2: DeviceType) (dtype1: Dtype) (dtype2: Dtype) (shape1: Shape) (shape2: Shape) (strides: int[]) (paddings: int[]) (dilations: int[]) =
+ checkDeviceTypes deviceType1 deviceType2
+ checkDtypes dtype1 dtype2
+ checkConvDType "conv2d" dtype1
+ if shape1.Length <> 4 || shape2.Length <> 4 then failwithf "Expecting two 4d tensors t1, t2 where t1 is input, NxCxHxW (batchSize x inputChannels x inputHeight x inputWidth) and t2 is filters, KxCxFxG (outputChannels x inputChannels x kernelHeight x kernelWidth), received tensors with shapes %A, %A" shape1 shape2
+ if strides.Length <> 2 then failwithf "Expecting strides (%A) to be a two-dimensional array" strides
+ if paddings.Length <> 2 then failwithf "Expecting paddings (%A) to be a two-dimensional array" paddings
+ if dilations.Length <> 2 then failwithf "Expecting dilations (%A) to be a two-dimensional array" dilations
+ if paddings[0] < 0 || paddings[1] < 0 then failwithf "Expecting all paddings (%A) >= 0" paddings
+ if strides[0] < 1 || strides[1] < 1 then failwithf "Expecting all strides (%A) >= 1" strides
+ if dilations[0] < 1 || dilations[1] < 1 then failwithf "Expecting all dilations (%A) >= 1" dilations
+ let batchSize = shape1[0]
+ let inputChannels = shape1[1]
+ let inputHeight = shape1[2]
+ let inputWidth = shape1[3]
+ let outputChannels = shape2[0]
+ let filtersChannels = shape2[1]
+ let kernelHeight = shape2[2]
+ let kernelWidth = shape2[3]
+ let inputHeightAfterPadding = inputHeight + 2*paddings[0]
+ let inputWidthAfterPadding = inputWidth + 2*paddings[1]
+ if filtersChannels <> inputChannels then failwithf "Input and filters have different number of channels: %A, %A" inputChannels filtersChannels
+ if kernelHeight > inputHeightAfterPadding then failwithf "Expecting kernelHeight (%A) <= inputHeightAfterPadding (%A)" kernelHeight inputHeightAfterPadding
+ if kernelWidth > inputWidthAfterPadding then failwithf "Expecting kernelWidth (%A) <= inputWidthAfterPadding (%A)" kernelWidth inputWidthAfterPadding
+ let outputHeight = int (floor (float (inputHeightAfterPadding - kernelHeight)/(float strides[0]))) + 1
+ let outputWidth = int (floor (float (inputWidthAfterPadding - kernelWidth)/(float strides[1]))) + 1
+ let outputShape = [|batchSize; outputChannels; outputHeight; outputWidth|]
+ batchSize, inputChannels, (kernelHeight, kernelWidth), (outputChannels, outputHeight, outputWidth), outputShape
+
+ /// Checks if the given shapes are appropriate for a convolution operation and returns information related to the resulting shape.
+ let checkCanConv3d (deviceType1: DeviceType) (deviceType2: DeviceType) (dtype1: Dtype) (dtype2: Dtype) (shape1: Shape) (shape2: Shape) (strides: int[]) (paddings: int[]) (dilations: int[]) =
+ checkDeviceTypes deviceType1 deviceType2
+ checkDtypes dtype1 dtype2
+ checkConvDType "conv3d" dtype1
+ if shape1.Length <> 5 || shape2.Length <> 5 then failwithf "Expecting two 4d tensors t1, t2 where t1 is input, NxCxDxHxW (batchSize x inputChannels x inputDepth x inputHeight x inputWidth) and t2 is filters, KxCxExFxG (outputChannels x inputChannels x kernelDepth x kernelHeight x kernelWidth), received tensors with shapes %A, %A" shape1 shape2
+ if strides.Length <> 3 then failwithf "Expecting strides (%A) to be a length-three array" strides
+ if paddings.Length <> 3 then failwithf "Expecting paddings (%A) to be a length-three array" paddings
+ if dilations.Length <> 3 then failwithf "Expecting dilations (%A) to be a length-three array" dilations
+ if paddings[0] < 0 || paddings[1] < 0 || paddings[2] < 0 then failwithf "Expecting all paddings (%A) >= 0" paddings
+ if strides[0] < 1 || strides[1] < 1 || strides[2] < 1 then failwithf "Expecting all strides (%A) >= 1" strides
+ if dilations[0] < 1 || dilations[1] < 1 || dilations[2] < 1 then failwithf "Expecting all dilations (%A) >= 1" dilations
+ let batchSize = shape1[0]
+ let inputChannels = shape1[1]
+ let inputDepth = shape1[2]
+ let inputHeight = shape1[3]
+ let inputWidth = shape1[4]
+ let outputChannels = shape2[0]
+ let filtersChannels = shape2[1]
+ let kernelDepth = shape2[2]
+ let kernelHeight = shape2[3]
+ let kernelWidth = shape2[4]
+ let inputDepthAfterPadding = inputDepth + 2*paddings[0]
+ let inputHeightAfterPadding = inputHeight + 2*paddings[1]
+ let inputWidthAfterPadding = inputWidth + 2*paddings[2]
+ if filtersChannels <> inputChannels then failwithf "Input and filters have different number of channels: %A, %A" inputChannels filtersChannels
+ if kernelDepth > inputDepthAfterPadding then failwithf "Expecting kernelDepth (%A) <= inputDepthAfterPadding (%A)" kernelDepth inputDepthAfterPadding
+ if kernelHeight > inputHeightAfterPadding then failwithf "Expecting kernelHeight (%A) <= inputHeightAfterPadding (%A)" kernelHeight inputHeightAfterPadding
+ if kernelWidth > inputWidthAfterPadding then failwithf "Expecting kernelWidth (%A) <= inputWidthAfterPadding (%A)" kernelWidth inputWidthAfterPadding
+ let outputDepth = int (floor (float (inputDepthAfterPadding - kernelDepth)/(float strides[0]))) + 1
+ let outputHeight = int (floor (float (inputHeightAfterPadding - kernelHeight)/(float strides[1]))) + 1
+ let outputWidth = int (floor (float (inputWidthAfterPadding - kernelWidth)/(float strides[2]))) + 1
+ let outputShape = [|batchSize; outputChannels; outputDepth; outputHeight; outputWidth|]
+ batchSize, inputChannels, (kernelDepth, kernelHeight, kernelWidth), (outputChannels, outputDepth, outputHeight, outputWidth), outputShape
+
+ /// Checks if the given shapes are appropriate for a transposed convolution operation and returns information related to the resulting shape.
+ let checkCanConvTranspose1d (deviceType1: DeviceType) (deviceType2: DeviceType) (dtype1: Dtype) (dtype2: Dtype) (shape1: Shape) (shape2: Shape) (stride: int) (padding: int) (dilation: int) (outputPadding: int) =
+ checkDeviceTypes deviceType1 deviceType2
+ checkDtypes dtype1 dtype2
+ checkConvDType "convTranspose1d" dtype1
+ if shape1.Length <> 3 || shape2.Length <> 3 then failwithf "Expecting two 3d tensors t1, t2 where t1 is input (NxCxI: batchSize x inputChannels x inputLength) and t2 is filters (KxCxF: outputChannels x inputChannels x kernelLength), received tensors with shapes %A, %A" shape1 shape2
+ if padding < 0 then failwithf "Expecting padding (%A) >= 0" padding
+ if stride < 1 then failwithf "Expecting stride (%A) >= 1" stride
+ if dilation < 1 then failwithf "Expecting dilation (%A) >=1" dilation
+ if outputPadding < 0 then failwithf "Expecting outputPadding (%A) >= 0" outputPadding
+ let batchSize = shape1[0]
+ let inputChannels = shape1[1]
+ let inputLength = shape1[2]
+ let outputChannels = shape2[1]
+ let filtersChannels = shape2[0]
+ let kernelLength = shape2[2]
+ let kernelShape = [|kernelLength|]
+ let kernelShapeAfterDilation = dilated kernelShape [|dilation|]
+ let kernelLength = kernelShapeAfterDilation[0]
+ if filtersChannels <> inputChannels then failwithf "Input and filters have different number of channels: %A, %A" inputChannels filtersChannels
+ let outputSize = stride * (inputLength - 1) + kernelLength - 2 * padding + outputPadding
+ let outputShape = [|batchSize; outputChannels; outputSize|]
+ batchSize, inputChannels, kernelLength, outputChannels, outputSize, outputShape
+
+ /// Checks if the given shapes are appropriate for a transposed convolution operation and returns information related to the resulting shape.
+ let checkCanConvTranspose2d (deviceType1: DeviceType) (deviceType2: DeviceType) (dtype1: Dtype) (dtype2: Dtype) (shape1: Shape) (shape2: Shape) (strides: int[]) (paddings: int[]) (dilations: int[]) (outputPaddings: int[]) =
+ checkDeviceTypes deviceType1 deviceType2
+ checkDtypes dtype1 dtype2
+ checkConvDType "convTranspose2d" dtype1
+ if shape1.Length <> 4 || shape2.Length <> 4 then failwithf "Expecting two 4d tensors t1, t2 where t1 is input, NxCxHxW (batchSize x inputChannels x inputHeight x inputWidth) and t2 is filters, KxCxFxG (outputChannels x inputChannels x kernelHeight x kernelWidth), received tensors with shapes %A, %A" shape1 shape2
+ if strides.Length <> 2 then failwithf "Expecting strides (%A) to be a length-two array" strides
+ if paddings.Length <> 2 then failwithf "Expecting paddings (%A) to be a length-two array" paddings
+ if dilations.Length <> 2 then failwithf "Expecting dilations (%A) to be a length-two array" dilations
+ if outputPaddings.Length <> 2 then failwithf "Expecting outputPaddings (%A) to be a length-two array" outputPaddings
+ if paddings[0] < 0 || paddings[1] < 0 then failwithf "Expecting all paddings (%A) >= 0" paddings
+ if strides[0] < 1 || strides[1] < 1 then failwithf "Expecting all strides (%A) >= 1" strides
+ if dilations[0] < 1 || dilations[1] < 1 then failwithf "Expecting all dilations (%A) >= 1" dilations
+ if outputPaddings[0] < 0 || outputPaddings[1] < 0 then failwithf "Expecting all outputPaddings (%A) >= 0" outputPaddings
+ let batchSize = shape1[0]
+ let inputChannels = shape1[1]
+ let inputHeight = shape1[2]
+ let inputWidth = shape1[3]
+ let outputChannels = shape2[1]
+ let filtersChannels = shape2[0]
+ let kernelHeight = shape2[2]
+ let kernelWidth = shape2[3]
+ let kernelShape = [|kernelHeight; kernelWidth|]
+ let kernelShapeAfterDilation = dilated kernelShape dilations
+ let kernelHeight = kernelShapeAfterDilation[0]
+ let kernelWidth = kernelShapeAfterDilation[1]
+ if filtersChannels <> inputChannels then failwithf "Input and filters have different number of channels: %A, %A" inputChannels filtersChannels
+ let outputHeight = strides[0] * (inputHeight - 1) + kernelHeight - 2 * paddings[0] + outputPaddings[0]
+ let outputWidth = strides[1] * (inputWidth - 1) + kernelWidth - 2 * paddings[1] + outputPaddings[1]
+ let outputShape = [|batchSize; outputChannels; outputHeight; outputWidth|]
+ batchSize, inputChannels, (kernelHeight, kernelWidth), (outputChannels, outputHeight, outputWidth), outputShape
+
+ /// Checks if the given shapes are appropriate for a transposed convolution operation and returns information related to the resulting shape.
+ let checkCanConvTranspose3d (deviceType1: DeviceType) (deviceType2: DeviceType) (dtype1: Dtype) (dtype2: Dtype) (shape1: Shape) (shape2: Shape) (strides: int[]) (paddings: int[]) (dilations: int[]) (outputPaddings: int[]) =
+ checkDeviceTypes deviceType1 deviceType2
+ checkDtypes dtype1 dtype2
+ checkConvDType "convTranspose3d" dtype1
+ if shape1.Length <> 5 || shape2.Length <> 5 then failwithf "Expecting two 4d tensors t1, t2 where t1 is input, NxCxDxHxW (batchSize x inputChannels x inputDepth x inputHeight x inputWidth) and t2 is filters, KxCxExFxG (outputChannels x inputChannels x kernelDepth x kernelHeight x kernelWidth), received tensors with shapes %A, %A" shape1 shape2
+ if strides.Length <> 3 then failwithf "Expecting strides (%A) to be a length-three array" strides
+ if paddings.Length <> 3 then failwithf "Expecting paddings (%A) to be a length-three array" paddings
+ if dilations.Length <> 3 then failwithf "Expecting dilations (%A) to be a length-three array" dilations
+ if outputPaddings.Length <> 3 then failwithf "Expecting outputPaddings (%A) to be a length-three array" outputPaddings
+ if paddings[0] < 0 || paddings[1] < 0 || paddings[2] < 0 then failwithf "Expecting all paddings (%A) >= 0" paddings
+ if strides[0] < 1 || strides[1] < 1 || strides[2] < 1 then failwithf "Expecting all strides (%A) >= 1" strides
+ if dilations[0] < 1 || dilations[1] < 1 || dilations[2] < 1 then failwithf "Expecting all dilations (%A) >= 1" dilations
+ if outputPaddings[0] < 0 || outputPaddings[1] < 0 || outputPaddings[2] < 0 then failwithf "Expecting all outputPaddings (%A) >= 0" outputPaddings
+ let batchSize = shape1[0]
+ let inputChannels = shape1[1]
+ let inputDepth = shape1[2]
+ let inputHeight = shape1[3]
+ let inputWidth = shape1[4]
+ let outputChannels = shape2[1]
+ let filtersChannels = shape2[0]
+ let kernelDepth = shape2[2]
+ let kernelHeight = shape2[3]
+ let kernelWidth = shape2[4]
+ let kernelShape = [|kernelDepth; kernelHeight; kernelWidth|]
+ let kernelShapeAfterDilation = dilated kernelShape dilations
+ let kernelDepth = kernelShapeAfterDilation[0]
+ let kernelHeight = kernelShapeAfterDilation[1]
+ let kernelWidth = kernelShapeAfterDilation[2]
+ if filtersChannels <> inputChannels then failwithf "Input and filters have different number of channels: %A, %A" inputChannels filtersChannels
+ let outputDepth = strides[0] * (inputDepth - 1) + kernelDepth - 2 * paddings[0] + outputPaddings[0]
+ let outputHeight = strides[1] * (inputHeight - 1) + kernelHeight - 2 * paddings[1] + outputPaddings[1]
+ let outputWidth = strides[2] * (inputWidth - 1) + kernelWidth - 2 * paddings[2] + outputPaddings[2]
+ let outputShape = [|batchSize; outputChannels; outputDepth; outputHeight; outputWidth|]
+ batchSize, inputChannels, (kernelDepth, kernelHeight, kernelWidth), (outputChannels, outputDepth, outputHeight, outputWidth), outputShape
+
+ /// Checks if the given shapes are appropriate for a maxpool operation and returns information related to the resulting shape.
+ let checkCanMaxOrAvgpool1d nm (dtype: Dtype) (shape: Shape) (kernelSize: int) (stride: int) (padding: int) =
+ match dtype with
+ | Dtype.Bool | Dtype.Integral -> opNotSupported nm dtype
+ | _ ->
+ if shape.Length <> 3 then failwithf "Expecting a 3d tensor (NxCxL: batchSize x inputChannels x inputLength), received tensor with shape %A" shape
+ if kernelSize < 1 then failwithf "Expecting kernelSize (%A) >= 1" kernelSize
+ if padding < 0 then failwithf "Expecting padding (%A) >= 0" padding
+ if padding > kernelSize/2 then failwithf "Expecting padding (%A) < kernelSize (%A) / 2" padding kernelSize
+ if stride < 1 then failwithf "Expecting stride (%A) >= 1" stride
+ let batchSize = shape[0]
+ let channels = shape[1]
+ let inputSize = shape[2]
+ let inputLengthAfterPadding = inputSize + 2*padding
+ if kernelSize > inputLengthAfterPadding then failwithf "Expecting kernelSize (%A) <= inputLengthAfterPadding (%A)" kernelSize inputLengthAfterPadding
+ let outputSize = int (floor (float (inputLengthAfterPadding - kernelSize)/(float stride))) + 1
+ let outputShape = [|batchSize; channels; outputSize|]
+ batchSize, channels, inputSize, outputSize, outputShape
+
+ /// Checks if the given shapes are appropriate for a maxpool operation and returns information related to the resulting shape.
+ let checkCanMaxpool1d dtype shape kernelSize stride padding =
+ checkCanMaxOrAvgpool1d "maxpool1d" dtype shape kernelSize stride padding
+
+ /// Checks if the given shapes are appropriate for an avgpool operation and returns information related to the resulting shape.
+ let checkCanAvgpool1d dtype shape kernelSize stride padding =
+ checkCanMaxOrAvgpool1d "maxpool1d" dtype shape kernelSize stride padding
+
+ /// Checks if the given shapes are appropriate for a maxpool operation and returns information related to the resulting shape.
+ let checkCanMaxOrAvgpool2d nm (dtype: Dtype) (shape: Shape) (kernelSize: int[]) (strides: int[]) (paddings: int[]) =
+ match dtype with
+ | Dtype.Bool | Dtype.Integral -> opNotSupported nm dtype
+ | _ ->
+ if shape.Length <> 4 then failwithf "Expecting a 4d tensor (NxCxHxW: batchSize x inputChannels x inputHeight x inputWidth), received tensor with shape %A" shape
+ if kernelSize[0] < 1 || kernelSize[1] < 1 then failwithf "Expecting all kernelSizes (%A) >= 1" kernelSize
+ if paddings[0] < 0 || paddings[1] < 0 then failwithf "Expecting all paddings (%A) >= 0" paddings
+ if paddings[0] > kernelSize[0]/2 || paddings[1] > kernelSize[1]/2 then failwithf "Expecting all paddings (%A) < kernelSizes (%A) / 2" paddings kernelSize
+ if strides[0] < 1 || strides[1] < 1 then failwithf "Expecting all strides (%A) >= 1" strides
+ let batchSize = shape[0]
+ let channels = shape[1]
+ let inputHeight = shape[2]
+ let inputWidth = shape[3]
+ let kernelHeight = kernelSize[0]
+ let kernelWidth = kernelSize[1]
+ let inputHeightAfterPadding = inputHeight + 2*paddings[0]
+ let inputWidthAfterPadding = inputWidth + 2*paddings[1]
+ if kernelSize[0] > inputHeightAfterPadding then failwithf "Expecting kernelSize[0] (%A) <= inputHeightAfterPadding (%A)" kernelSize[0] inputHeightAfterPadding
+ if kernelSize[1] > inputWidthAfterPadding then failwithf "Expecting kernelSize[1] (%A) <= inputWidthAfterPadding (%A)" kernelSize[1] inputWidthAfterPadding
+ let outputHeight = int (floor (float (inputHeightAfterPadding - kernelHeight)/(float strides[0]))) + 1
+ let outputWidth = int (floor (float (inputWidthAfterPadding - kernelWidth)/(float strides[1]))) + 1
+ let outputShape = [|batchSize; channels; outputHeight; outputWidth|]
+ (batchSize, channels, (inputHeight, inputWidth), (kernelHeight, kernelWidth), (outputHeight, outputWidth), outputShape)
+
+ /// Checks if the given shapes are appropriate for a maxpool operation and returns information related to the resulting shape.
+ let checkCanMaxpool2d dtype shape kernelSize strides paddings =
+ checkCanMaxOrAvgpool2d "maxpool2d" dtype shape kernelSize strides paddings
+
+ /// Checks if the given shapes are appropriate for an avgpool operation and returns information related to the resulting shape.
+ let checkCanAvgpool2d dtype shape kernelSize strides paddings =
+ checkCanMaxOrAvgpool2d "avgpool2d" dtype shape kernelSize strides paddings
+
+ /// Checks if the given shapes are appropriate for a maxpool operation and returns information related to the resulting shape.
+ let checkCanMaxOrAvgpool3d nm (dtype: Dtype) (shape: Shape) (kernelSize: int[]) (strides: int[]) (paddings: int[]) =
+ match dtype with
+ | Dtype.Bool | Dtype.Integral -> opNotSupported nm dtype
+ | _ ->
+ if shape.Length <> 5 then failwithf "Expecting a 5d tensor (NxCxDxHxW: batchSize x inputChannels x inputDepth x inputHeight x inputWidth), received tensor with shape %A" shape
+ if kernelSize[0] < 1 || kernelSize[1] < 1 || kernelSize[2] < 1 then failwithf "Expecting all kernelSizes (%A) >= 1" kernelSize
+ if paddings[0] < 0 || paddings[1] < 0 || paddings[2] < 0 then failwithf "Expecting all paddings (%A) >= 0" paddings
+ if paddings[0] > kernelSize[0]/2 || paddings[1] > kernelSize[1]/2 || paddings[2] > kernelSize[2]/2 then failwithf "Expecting all paddings (%A) < kernelSizes (%A) / 2" paddings kernelSize
+ if strides[0] < 1 || strides[1] < 1 || strides[2] < 1 then failwithf "Expecting all strides (%A) >= 1" strides
+ let batchSize = shape[0]
+ let channels = shape[1]
+ let inputDepth = shape[2]
+ let inputHeight = shape[3]
+ let inputWidth = shape[4]
+ let kernelDepth = kernelSize[0]
+ let kernelHeight = kernelSize[1]
+ let kernelWidth = kernelSize[2]
+ let inputDepthAfterPadding = inputDepth + 2*paddings[0]
+ let inputHeightAfterPadding = inputHeight + 2*paddings[1]
+ let inputWidthAfterPadding = inputWidth + 2*paddings[2]
+ if kernelSize[0] > inputDepthAfterPadding then failwithf "Expecting kernelSize[0] (%A) <= inputDepthAfterPadding (%A)" kernelSize[0] inputDepthAfterPadding
+ if kernelSize[1] > inputHeightAfterPadding then failwithf "Expecting kernelSize[1] (%A) <= inputHeightAfterPadding (%A)" kernelSize[1] inputHeightAfterPadding
+ if kernelSize[2] > inputWidthAfterPadding then failwithf "Expecting kernelSize[1] (%A) <= inputWidthAfterPadding (%A)" kernelSize[1] inputWidthAfterPadding
+ let outputDepth = int (floor (float (inputDepthAfterPadding - kernelDepth)/(float strides[0]))) + 1
+ let outputHeight = int (floor (float (inputHeightAfterPadding - kernelHeight)/(float strides[1]))) + 1
+ let outputWidth = int (floor (float (inputWidthAfterPadding - kernelWidth)/(float strides[2]))) + 1
+ let outputShape = [|batchSize; channels; outputDepth; outputHeight; outputWidth|]
+ (batchSize, channels, (inputDepth, inputHeight, inputWidth), (kernelDepth, kernelHeight, kernelWidth), (outputDepth, outputHeight, outputWidth), outputShape)
+
+ /// Checks if the given shapes are appropriate for a maxpool operation and returns information related to the resulting shape.
+ let checkCanMaxpool3d dtype shape kernelSize strides paddings =
+ checkCanMaxOrAvgpool3d "maxpool3d" dtype shape kernelSize strides paddings
+
+ /// Checks if the given shapes are appropriate for an avgpool operation and returns information related to the resulting shape.
+ let checkCanAvgpool3d dtype shape kernelSize strides paddings =
+ checkCanMaxOrAvgpool3d "avgpool3d" dtype shape kernelSize strides paddings
+
+ /// Checks if the given shapes are appropriate for a maxunpool operation and returns information related to the resulting shape.
+ let checkCanMaxunpool1d (dtype: Dtype) (shape: Shape) (indicesDtype: Dtype) (indicesShape: Shape) (outputSize: int[]) =
+ match dtype with
+ | Dtype.Bool | Dtype.Integral -> opNotSupported "maxunpool2d" dtype
+ | _ ->
+ if indicesDtype <> Dtype.Int32 then failwithf "Expecting indices to have type %A" Dtype.Int32
+ if outputSize.Length <> 3 then failwithf "Expecting outputSize (%A) to be 3-dimensional" outputSize
+ let batchSize = shape[0]
+ let channels = shape[1]
+ let inputSize = shape[2]
+ if outputSize[0] <> indicesShape[0] || outputSize[1] <> indicesShape[1] then failwithf "Expecting the first two elements of outputSize (%A) and indicesShape (%A) to be the same" outputSize indicesShape
+ let outputShape = [|batchSize; channels; outputSize[2]|]
+ batchSize, channels, inputSize, outputShape
+
+ /// Checks if the given shapes are appropriate for a maxunpool operation and returns information related to the resulting shape.
+ let checkCanMaxunpool2d (dtype: Dtype) (shape: Shape) (indicesDtype: Dtype) (indicesShape: Shape) (outputSize: int[]) =
+ match dtype with
+ | Dtype.Bool | Dtype.Integral -> opNotSupported "maxunpool2d" dtype
+ | _ ->
+ if indicesDtype <> Dtype.Int32 then failwithf "Expecting indices to have type %A" Dtype.Int32
+ if outputSize.Length <> 4 then failwithf "Expecting outputSize (%A) to be 4-dimensional" outputSize
+ let batchSize = shape[0]
+ let channels = shape[1]
+ let inputHeight = shape[2]
+ let inputWidth = shape[3]
+ if outputSize[0] <> indicesShape[0] || outputSize[1] <> indicesShape[1] then failwithf "Expecting the first two elements of outputSize (%A) and indicesShape (%A) to be the same" outputSize indicesShape
+ let outputShape = [|batchSize; channels; outputSize[2]; outputSize[3]|]
+ batchSize, channels, (inputHeight, inputWidth), outputShape
+
+ /// Checks if the given shapes are appropriate for a maxunpool operation and returns information related to the resulting shape.
+ let checkCanMaxunpool3d (dtype: Dtype) (shape: Shape) (indicesDtype: Dtype) (indicesShape: Shape) (outputSize: int[]) =
+ match dtype with
+ | Dtype.Bool | Dtype.Integral -> opNotSupported "maxunpool2d" dtype
+ | _ ->
+ if indicesDtype <> Dtype.Int32 then failwithf "Expecting indices to have type %A" Dtype.Int32
+ if outputSize.Length <> 5 then failwithf "Expecting outputSize (%A) to be 5-dimensional" outputSize
+ let batchSize = shape[0]
+ let channels = shape[1]
+ let inputDepth = shape[2]
+ let inputHeight = shape[3]
+ let inputWidth = shape[4]
+ if outputSize[0] <> indicesShape[0] || outputSize[1] <> indicesShape[1] then failwithf "Expecting the first two elements of outputSize (%A) and indicesShape (%A) to be the same" outputSize indicesShape
+ let outputShape = [|batchSize; channels; outputSize[2]; outputSize[3]; outputSize[4]|]
+ batchSize, channels, (inputDepth, inputHeight, inputWidth), outputShape
+
+ /// Indicates if one shape can expand into another through the addition of broadcast dimensions.
+ let canExpand (oldShape: Shape) (newShape: Shape) =
+ newShape.Length >= oldShape.Length &&
+ let trim = newShape.Length - oldShape.Length
+ newShape[..trim-1] |> Array.forall (fun m -> m >= 1)
+ && (oldShape,newShape[trim..]) ||> Array.forall2 (fun n m -> n = 1 || n = m)
+
+ /// Checks if one shape can expand into another through the addition of broadcast dimensions.
+ let checkCanExpand (oldShape: Shape) (newShape: Shape) =
+ let isOK = canExpand oldShape newShape
+ if not isOK then failwithf "can't expand from shape %A to %A - each dimension must either be equal or expand from 1" oldShape newShape
+
+ /// Checks if the given shape is appropriate for a transpose operation and returns information related to the resulting shape.
+ let checkCanTranspose (shape: Shape) (dim0: int) (dim1: int) =
+ if dim0 < 0 || dim0 >= shape.Length then failwithf "Expecting 0 <= dim0 (%A) < shape.Length (%A)" dim0 shape.Length
+ if dim1 < 0 || dim1 >= shape.Length then failwithf "Expecting 0 <= dim1 (%A) < shape.Length (%A)" dim1 shape.Length
+
+ /// Checks if the given shape is appropriate for a transpose operation.
+ let checkCanTranspose2d (dim: int) =
+ if dim <> 2 then failwith "Expecting dim=2 when no specific dimensions are given to transpose. Consider using general transpose(dim0, dim1)."
+
+ /// Checks if the given shape is appropriate for a transpose operation.
+ let checkCanInvert (shape: Shape) =
+ let dim = shape.Length
+ if not (dim = 2 || dim = 3) then failwith "Expecting 2d tensor (a square matrix) or a 3d tensor (a batch of square matrices)."
+ if dim = 2 then if shape[0] <> shape[1] then failwith "Expecting a square matrix"
+ if dim = 3 then if shape[1] <> shape[2] then failwith "Expecting square matrices"
+
+ /// Checks if the given shape is appropriate for a determinant operation.
+ let checkCanDet (shape: Shape) =
+ let dim = shape.Length
+ if not (dim = 2 || dim = 3) then failwith "Expecting 2d tensor (a square matrix) or a 3d tensor (a batch of square matrices)."
+ if dim = 2 then if shape[0] <> shape[1] then failwith "Expecting a square matrix"
+ if dim = 3 then if shape[1] <> shape[2] then failwith "Expecting square matrices"
+
+ /// Checks if the given shapes are appropriate for a linear solve operation, and returns the resulting shape of the solution
+ let checkCanSolve (shapeA: Shape) (shapeB: Shape) =
+ let dimA = shapeA.Length
+ let dimB = shapeB.Length
+ let newShape =
+ if dimA = 2 then
+ let n = shapeA[0]
+ if n <> shapeA[1] then failwithf "Expecting A to be a square matrix, received A with shape %A." shapeA
+ if n <> shapeB[0] then failwithf "Expecting A and B to have the same number of rows (1st dimension), received A and B with shapes %A and %A." shapeA shapeB
+ if dimB = 1 then
+ // k = 1
+ [|n|]
+ elif dimB = 2 then
+ let k = shapeB[1]
+ [|n; k|]
+ else
+ failwithf "Expecting B to be a 1d or 2d tensor, received B with shape %A." shapeB
+ elif dimA = 3 then
+ let batchSize = shapeA[0]
+ if batchSize <> shapeB[0] then failwithf "Expecting A and B to have the same number of batch items (1st dimension), received A and B with shapes %A and %A." shapeA shapeB
+ let n = shapeA[1]
+ if n <> shapeA[2] then failwithf "Expecting A to be a batch of square matrices, received A with shape %A." shapeA
+ if n <> shapeB[1] then failwithf "Expecting the matrices in batches A and B to have the same number of rows items (2nd dimension), received A and B with shapes %A and %A." shapeA shapeB
+ if dimB = 2 then
+ // k = 1
+ [|batchSize; n|]
+ elif dimB = 3 then
+ let k = shapeB[2]
+ [|batchSize; n; k|]
+ else
+ failwithf "Expecting B to be a 2d tensor (batch of vectors) or 3d tensor (a batch of matrices), received B with shape %A." shapeB
+ else
+ failwithf "Expecting A to be a 2d tensor (a square matrix) or a 3d tensor (a batch of square matrices), received A with shape %A." shapeA
+ newShape
+
+ /// Checks if the given shape is appropriate for a permute operation and returns information related to the resulting shape.
+ let checkCanPermute (shape: Shape) (permutation: int[]) =
+ if shape.Length <> permutation.Length then failwithf "Expecting tensor's shape (%A) and permutation (%A) to have the same dims" shape permutation
+ if Seq.hasDuplicates permutation then failwithf "Expecting permutation (%A) to have no duplicate values" permutation
+ let inversePermutation = Array.permute (fun i -> permutation[i]) [| 0.. shape.Length-1 |]
+ let newShape = Array.permute (fun i -> inversePermutation[i]) shape
+ inversePermutation, newShape
+
+ /// Checks if the given shape is appropriate for a flip operation.
+ let checkCanFlip (dim: int) (dims: int[]) =
+ if dims.Length > dim then failwithf "Expecting dims (list of dimension indices to flip) of length less than Tensor's dimensions, received %A, %A" dims.Length dim
+ if Seq.hasDuplicates dims then failwithf "Expecting dims (list of dimension indices to flip) without repetition, received %A" dims
+ if (Array.max dims) >= dim then failwithf "Expecting dims (list of dimension indices to flip) where all indices are less than the tensor dimension, received %A, %A" dims dim
+
+ /// Checks if the given shape is appropriate for a repeat operation.
+ let checkCanRepeat (shape: Shape) (dim: int) =
+ if shape[dim] <> 1 then failwithf "Expecting Tensor's shape (%A) at dim (%A) to be 1" shape dim
+
+ /// Checks if the given shape is appropriate for a dilate operation.
+ let checkCanDilate (dim: int) (dilations: int[]) =
+ if dilations.Length <> dim then failwithf "Expecting dilations (dilation to use in each dimension) of same length with Tensor's dimensions, received %A, %A" dilations.Length dim
+ if (Array.min dilations) < 1 then failwithf "Expecting dilations (dilation to use in each dimension) >= 1 where 1 represents no dilation, received %A" dilations
+
+ /// Checks if the given shape is appropriate for a gather operation.
+ let checkCanGather (shape: Shape) (dim: int) (indicesShape: Shape) (indicesDtype:Dtype) =
+ if shape.Length <> indicesShape.Length then failwithf "Expecting tensor (%A) and indices (%A) to have the same number of dimensions" shape indicesShape
+ if dim < 0 || dim > shape.Length-1 then failwithf "Expecting 0<= dim (%A) < tensor dim (%A)" dim shape.Length
+ if indicesShape[dim] < 1 then failwithf "Expecting indices shape at dim %A (%A) >= 1" dim indicesShape[dim]
+ if indicesDtype <> Dtype.Int32 then failwithf "Expecting indices to have type %A" Dtype.Int32
+
+ /// Checks if the given shape is appropriate for a scatter operation.
+ let checkCanScatter (shape: Shape) (dim: int) (indicesShape: Shape) (indicesDtype:Dtype) (destinationShape: Shape)=
+ if shape.Length <> indicesShape.Length then failwithf "Expecting tensor (%A) and indices (%A) to have the same number of dimensions" shape indicesShape
+ if shape.Length <> destinationShape.Length then failwithf "Expecting tensor (%A) and destination (%A) to have the same number of dimensions" shape destinationShape
+ if not (contains shape indicesShape) then failwithf "Expecting tensor shape (%A) to contain indices shape (%A)" shape indicesShape
+ if dim < 0 || dim > shape.Length-1 then failwithf "Expecting 0<= dim (%A) < tensor dim (%A)" dim shape.Length
+ if indicesDtype <> Dtype.Int32 then failwithf "Expecting indices to have type %A" Dtype.Int32
+
+ /// Checks if the given shape is appropriate for a view operation.
+ let checkCanView (shape1: Shape) (shape2: Shape) =
+ if nelement shape1 <> nelement shape2 then failwithf "Cannot view Tensor of shape %A as shape %A" shape1 shape2
+
+ /// Checks if the given shape is appropriate for a flatten operation.
+ let checkCanFlatten (shape: Shape) (startDim: int) (endDim: int) =
+ if startDim < 0 || startDim >= shape.Length then failwithf "Expecting 0 <= startDim (%A) < %A" startDim shape.Length
+ if endDim < 0 || endDim >= shape.Length then failwithf "Expecting 0 <= endDim (%A) < %A" endDim shape.Length
+ if endDim <= startDim then failwithf "Expecting startDim (%A) < endDim (%A)" startDim endDim
+
+ /// Checks if the given shape is appropriate for an addSlice operation.
+ let checkCanAddSlice (shape1: Shape) (location: int[]) (shape2: Shape) =
+ if not (contains shape1 shape2) then failwithf "Expecting shape1 to contain shape2, received %A, %A" shape1 shape2
+ if location.Length <> shape1.Length then failwithf "Expecting location of the same length as shape1, received %A, %A" (location.Length) shape1
+
+ /// Checks if the given shapes are appropriate for a matmul operation.
+ let checkCanMatmul (shape1: Shape) (shape2: Shape) =
+ if shape1.Length < 2 || shape2.Length < 2 then failwithf "Expecting tensors to have at least two dimensions, received tensors with shapes %A, %A" shape1 shape2
+ let aBatchPart, aMatrixPart = Array.splitAt (shape1.Length-2) shape1
+ let bBatchPart, bMatrixPart = Array.splitAt (shape2.Length-2) shape2
+ if aMatrixPart[1] <> bMatrixPart[0] then failwithf "Cannot matrix multiply tensors with shapes %A, %A - mismatch in matrix dimension" shape1 shape2
+ (aBatchPart, aMatrixPart), (bBatchPart, bMatrixPart)
+
+ /// Checks if the given shapes are appropriate for a batched matrix multiplication operation.
+ let checkCanBMM (shape1: Shape) (shape2: Shape) =
+ if shape1.Length <> 3 || shape2.Length <> 3 then failwithf "Expecting two 3d tensors, received tensors with shapes %A, %A" shape1 shape2
+ if shape1[0] <> shape2[0] then failwithf "Cannot batch matrix multiply tensors with shapes %A, %A - mismatch in batch dimension" shape1 shape2
+ let batchSize = shape1[0]
+ if shape1[2] <> shape2[1] then failwithf "Cannot batch matrix multiply tensors with shapes %A, %A - mismatch in matrix dimension" shape1 shape2
+ let outputShape = [|batchSize; shape1[1]; shape2[2]|]
+ outputShape
+
+ /// Checks if the given shape is appropriate for a dot product operation.
+ let checkCanDot (shape1: Shape) (shape2: Shape) =
+ if shape1.Length <> 1 || shape2.Length <> 1 then failwithf "Expecting two vectors (1d Tensors), received tensors with shapes %A, %A" shape1 shape2
+ if shape1[0] <> shape2[0] then failwithf "Cannot multiply vectors with different lengths %A, %A" shape1[0] shape2[0]
+
+ /// Checks if the given shape is appropriate for a pad operation.
+ let checkCanPad (shape: Shape) (paddings: int[]) =
+ if shape.Length <> paddings.Length then failwithf "Expecting shape (%A) and paddings (%A) to have the same length" shape paddings
+ if not (paddings |> Array.forall (fun p -> p >= 0)) then failwithf "Expecting all paddings (%A) >= 0" paddings
+
+ /// Checks if the given shape is appropriate for a dropout operation.
+ let checkCanDropout (p:double) =
+ if p < 0. || p > 1. then failwithf "Expecting 0 <= p <= 1, but received %A" p
+
+ /// Checks if the given shape is appropriate for a dropout2d operation.
+ let checkCanDropout2d (shape: Shape) (p:double) =
+ checkCanDropout p
+ if shape.Length <> 4 then failwithf "Expecting shape (%A) to be 4-dimensional (NxCxHxW: batchSize, inputChannels, inputHeight, inputWidth)" shape
+
+ /// Checks if the given shape is appropriate for a dropout3d operation.
+ let checkCanDropout3d (shape: Shape) (p:double) =
+ checkCanDropout p
+ if shape.Length <> 5 then failwithf "Expecting shape (%A) to be 5-dimensional (NxCxDxHxW: batchSize, inputChannels, inputDepth, inputHeight, inputWidth)" shape
+
+ /// Computes the shape that results from a squeeze operation.
+ let squeeze (dim: int) (shape: Shape) =
+ if dim = -1 then
+ [|for s in shape do if s <> 1 then yield s|]
+ elif shape[dim] = 1 then
+ [|for i=0 to shape.Length - 1 do
+ if i < dim then yield shape[i]
+ elif i > dim then yield shape[i]|]
+ else
+ shape
+
+ let checkCanMinMaxReduce (dim: int) (keepDim: bool) (shape: Shape) =
+ if dim >= shape.Length || dim < 0 then failwithf "Expecting dim to be between 0 and %d" shape.Length
+ let part1 = shape[..dim-1]
+ let part2 = shape[dim+1..]
+ [| yield! part1
+ if keepDim then yield 1
+ yield! part2 |]
+
+ /// Checks if the given shape is appropriate for an unsqueeze operation and returns the resulting shape.
+ let checkCanUnsqueeze (dim: int) (shape: Shape) =
+ if dim < 0 || dim > shape.Length then failwithf "Expecting dim in range [0, %A] but received %A" shape.Length dim
+ [|for i=0 to shape.Length - 1 + 1 do
+ if i < dim then yield shape[i]
+ elif i = dim then yield 1
+ else yield shape[i-1]|]
+
+ /// Computes the shape that results from an unsqueezeAs operation.
+ let unsqueezeAs (shape1: Shape) (shape2: Shape) =
+ if shape1.Length > shape2.Length then failwithf "Expecting shape1.Length (%A) <= shape2.Length (%A)" shape1.Length shape2.Length
+ let ones = Array.create (shape2.Length - shape1.Length) 1
+ Array.append ones shape1
+
+ /// Converts the given location to a three-element bounds array in the context of the given shape.
+ let locationToBounds (shape: Shape) (location: int[]) =
+ Array2D.init location.Length 3 (fun i j -> if j=0 then location[i] elif j=1 then location[i] + shape[i] - 1 else 0)
+
+ /// Computes the shape that results from a flatten operation.
+ let flatten (startDim: int) (endDim: int) (shape: Shape) =
+ let shape = [|for i in 0..shape.Length-1 do if (i < startDim) || (i > endDim) then shape[i] else -1|]
+ let mutable emitted = false
+ [|for s in shape do if s <> -1 then s elif not emitted then emitted <- true; -1|]
+
+ /// Finds the shape into which `shape1` and `shape2` can be expanded.
+ let broadcast2 (shape1: Shape) (shape2: Shape) =
+ if canExpand shape1 shape2 || canExpand shape2 shape1 then
+ let n1 = shape1.Length
+ let n2 = shape2.Length
+ let mx = max n1 n2
+ let mn = mx - min n1 n2
+ Array.init mx (fun i ->
+ if i < mn then (if n1 > n2 then shape1[i] else shape2[i])
+ elif n1 > n2 then max shape1[i] shape2[i-mn]
+ else max shape1[i-mn] shape2[i])
+ else failwithf "shapes %A and %A are not related by broadcasting - each dimension must either be extra, equal, expand from 1" shape1 shape2
+
+ /// Finds the shape into which all the shapes can be expanded.
+ let broadcastShapes (shapes: Shape[]) = Array.reduce broadcast2 shapes
+
+ // /// Computes the shape that results from a pairwise dilation operation.
+ // let dilated2 (shape: Shape) (dilations: int[]) =
+ // Array.map2 (*) shape dilations
+
+ /// Computes the shape that results from an undilation operation.
+ let undilatedShape (shape: Shape) (dilations: int[]) =
+ Array.map2 (fun n d -> (n + d - 1) / d) shape dilations
+
+ /// Completes the given shape with respect to a tensor with the given number of elements.
+ let complete (nelement: int) (shape: Shape) =
+ if (shape |> Array.filter (fun x -> x < -1) |> Array.length) > 0 then failwithf "Invalid shape %A" shape
+ let numUnspecified = shape |> Array.filter ((=) -1) |> Array.length
+ if numUnspecified > 1 then
+ failwithf "Cannot complete shape %A, expecting at most one unspecified dimension (-1)" shape
+ elif numUnspecified = 0 then
+ shape
+ else
+ let divisor = shape |> Array.filter ((<>) -1) |> Shape.nelement
+ if nelement % divisor <> 0 then failwithf "Cannot complete shape %A to have %A elements" shape nelement
+ let missing = nelement / divisor
+ [|for d in shape do if d = -1 then yield missing else yield d|]
+
+ /// Completes the given shape dimension with respect to a concrete dimension.
+ let completeDim (dims:int) (dim:int) =
+ if dim < -dims || dim >= dims then failwithf "Expecting dim (%A) to be within the range [%A, %A)" dim (-dims) dims
+ if dim < 0 then dims+dim
+ else dim
+
+ /// Completes the given shape dimension with respect to a concrete dimension, for the unsqueeze operation.
+ let completeDimUnsqueeze (dims:int) (dim:int) =
+ if dim < (-1 - dims) || dim >= (dims + 1) then failwithf "Expecting dim (%A) to be within the range [%A, %A)" dim (-1 - dims) (dims + 1)
+ if dim < 0 then dims + dim + 1
+ else dim
+
+ /// Completes the new shape for an expand operation based on the current shape of the tensor.
+ let completeExpand (shape: Shape) (newShape: Shape) =
+ let trim = newShape.Length - shape.Length
+ newShape |> Array.mapi (fun i x -> if i>=trim && x = -1 then shape[i - trim] else x)
+
+ let completeSliceBounds (shape: Shape) (bounds:int[,]) =
+ let newBounds = Array2D.init (bounds.GetLength(0)) (bounds.GetLength(1))
+ (fun i j ->
+ if j = 0 || j = 1 then completeDim shape[i] bounds[i, j]
+ else bounds[i, j])
+ newBounds
+
+ let inline create (xs: seq) = Seq.toArrayQuick xs
+
+ let resolve2dKernelSizes kernelSize kernelSizes =
+ match kernelSize, kernelSizes with
+ | Some _ , Some _ -> failwithf "Expecting only one of kernelSize, kernelSizes"
+ | Some k, None -> [|k; k|]
+ | None, Some k -> let k = k |> Array.ofSeq in if k.Length <> 2 then failwithf "Expecting kernelSizes to have length two" else k
+ | _ -> [|1; 1|]
+
+ let resolve3dKernelSizes kernelSize kernelSizes =
+ match kernelSize, kernelSizes with
+ | Some _ , Some _ -> failwithf "Expecting only one of kernelSize, kernelSizes"
+ | Some k, None -> [|k; k; k|]
+ | None, Some k -> let k = k |> Array.ofSeq in if k.Length <> 3 then failwithf "Expecting kernelSizes to have length three" else k
+ | _ -> [|1; 1; 1|]
+
+ let resolve2dConvSizes stride strides padding paddings dilation dilations =
+ let strides =
+ match stride, strides with
+ | Some _, Some _ -> failwithf "Expecting only one of stride, strides"
+ | Some s, None -> [|s; s|]
+ | None, Some s -> let s = s |> Array.ofSeq in if s.Length <> 2 then failwithf "Expecting strides to be 2-dimensional" else s
+ | _ -> [|1; 1|]
+ let paddings =
+ match padding, paddings with
+ | Some _ , Some _ -> failwithf "Expecting only one of padding, paddings"
+ | Some p, None -> [|p; p|]
+ | None, Some p -> let p = p |> Array.ofSeq in if p.Length <> 2 then failwithf "Expecting paddings to be 2-dimensional" else p
+ | _ -> [|0; 0|]
+ let dilations =
+ match dilation, dilations with
+ | Some _ , Some _ -> failwithf "Expecting only one of dilation, dilations"
+ | Some d, None -> [|d; d|]
+ | None, Some d -> let d = d |> Array.ofSeq in if d.Length <> 2 then failwithf "Expecting dilations to be 2-dimensional" else d
+ | _ -> [|1; 1|]
+ strides, paddings, dilations
+
+ let resolve3dConvSizes stride strides padding paddings dilation dilations =
+ let strides =
+ match stride, strides with
+ | Some _ , Some _ -> failwithf "Expecting only one of stride, strides"
+ | Some s, None -> [|s; s; s|]
+ | None, Some s -> let s = s |> Array.ofSeq in if s.Length <> 3 then failwithf "Expecting strides to be 3-dimensional" else s
+ | _ -> [|1; 1; 1|]
+ let paddings =
+ match padding, paddings with
+ | Some _ , Some _ -> failwithf "Expecting only one of padding, paddings"
+ | Some p, None -> [|p; p; p|]
+ | None, Some p -> let p = p |> Array.ofSeq in if p.Length <> 3 then failwithf "Expecting paddings to be 3-dimensional" else p
+ | _ -> [|0; 0; 0|]
+ let dilations =
+ match dilation, dilations with
+ | Some _ , Some _ -> failwithf "Expecting only one of dilation, dilations"
+ | Some d, None -> [|d; d; d|]
+ | None, Some d -> let d = d |> Array.ofSeq in if d.Length <> 3 then failwithf "Expecting dilations to be 3-dimensional" else d
+ | _ -> [|1; 1; 1|]
+ strides, paddings, dilations
+
+ let resolve2dConvOutputPadding outputPadding outputPaddings =
+ match outputPadding, outputPaddings with
+ | Some _ , Some _ -> failwithf "Expecting only one of outputPadding, outputPaddings"
+ | Some p, None -> [|p; p|]
+ | None, Some p -> let p = p |> Array.ofSeq in if p.Length <> 2 then failwithf "Expecting outputPaddings to be 2-dimensional" else p
+ | _ -> [|0; 0|]
+
+ let resolve3dConvOutputPadding outputPadding outputPaddings =
+ match outputPadding, outputPaddings with
+ | Some _ , Some _ -> failwithf "Expecting only one of outputPadding, outputPaddings"
+ | Some p, None -> [|p; p; p|]
+ | None, Some p -> let p = p |> Array.ofSeq in if p.Length <> 3 then failwithf "Expecting outputPaddings to be 3-dimensional" else p
+ | _ -> [|0; 0; 0|]
+
+ let resolve2dMaxPoolSizes kernelSize kernelSizes stride strides padding paddings =
+ let kernelSizes =
+ match kernelSize, kernelSizes with
+ | Some _, Some _ -> failwithf "Expecting only one of kernelSize, kernelSizes"
+ | Some k, None -> [|k; k|]
+ | None, Some k -> let k = k |> Array.ofSeq in if k.Length <> 2 then failwithf "Expecting kernelSizes to be 2-dimensional" else k
+ | _ -> failwithf "Expecting either kernelSize or kernelSizes"
+
+ let strides =
+ match stride, strides with
+ | Some _, Some _ -> failwithf "Expecting only one of stride, strides"
+ | Some s, None -> [|s; s|]
+ | None, Some s -> let s = s |> Array.ofSeq in if s.Length <> 2 then failwithf "Expecting strides to be 2-dimensional" else s
+ | _ -> kernelSizes
+
+ let paddings =
+ match padding, paddings with
+ | Some _, Some _ -> failwithf "Expecting only one of padding, paddings"
+ | Some p, None -> [|p; p|]
+ | None, Some p -> let p = p |> Array.ofSeq in if p.Length <> 2 then failwithf "Expecting paddings to be 2-dimensional" else p
+ | _ -> [|0; 0|]
+ kernelSizes, strides, paddings
+
+ let resolve3dMaxPoolSizes kernelSize kernelSizes stride strides padding paddings =
+ let kernelSizes =
+ match kernelSize, kernelSizes with
+ | Some _, Some _ -> failwithf "Expecting only one of kernelSize, kernelSizes"
+ | Some k, None -> [|k; k; k|]
+ | None, Some k -> let k = k |> Array.ofSeq in if k.Length <> 3 then failwithf "Expecting kernelSizes to be 3-dimensional" else k
+ | _ -> failwithf "Expecting either kernelSize or kernelSizes"
+ let strides =
+ match stride, strides with
+ | Some _, Some _ -> failwithf "Expecting only one of stride, strides"
+ | Some s, None -> [|s; s; s|]
+ | None, Some s -> let s = s |> Array.ofSeq in if s.Length <> 3 then failwithf "Expecting strides to be 3-dimensional" else s
+ | _ -> kernelSizes
+ let paddings =
+ match padding, paddings with
+ | Some _, Some _ -> failwithf "Expecting only one of padding, paddings"
+ | Some p, None -> [|p; p; p|]
+ | None, Some p -> let p = p |> Array.ofSeq in if p.Length <> 3 then failwithf "Expecting paddings to be 3-dimensional" else p
+ | _ -> [|0; 0; 0|]
+ kernelSizes, strides, paddings
+
+
+[]
+module ShapeAutoOpens =
+
+ /// Gets the total number of elements in a shape.
+ let shapeLength (shape: Shape) = Shape.nelement shape
+
+ /// Checks if the full bounds is a scalar location
+ let boundsIsScalar (bounds: int[,]) =
+ let mutable res = true
+ for i=0 to bounds.GetLength(0) - 1 do
+ res <- res && bounds[i,2] = 1
+ res
+
+ /// Converts the array of three-position bounds specifications to a location.
+ let boundsToLocation (bounds: int[,]) =
+ [|for i=0 to bounds.GetLength(0) - 1 do yield bounds[i, 0]|]
+
+ /// Converts the array of three-position bounds specifications to a shape without squeezing out scalars
+ let boundsToShape (bounds: int[,]) =
+ [|for i=0 to bounds.GetLength(0) - 1 do
+ let len = bounds[i, 1] - bounds[i, 0] + 1
+ yield len|]
+
+ let shapeToFullBounds (shape: Shape) =
+ Array2D.init (shape.Length) 3 (fun i j -> if j=0 then 0 elif j=1 then shape[i]-1 else 0)
+
+ /// Mirrors the coordinates in the given dimensions in the context of the given shape.
+ let mirrorCoordinates (coordinates: int[]) (shape: int[]) (mirrorDims: int[]) =
+ if coordinates.Length <> shape.Length then failwithf "Expecting coordinates and shape of the same dimension, received %A, %A" coordinates.Length shape.Length
+ let result = Array.copy coordinates
+ for d=0 to coordinates.Length-1 do
+ if mirrorDims |> Array.contains d then
+ result[d] <- abs (coordinates[d] - shape[d] + 1)
+ result
+
+ /// Dilates the given coordinates.
+ let dilatedCoordinates (coordinates: int[]) (dilations: int[]) =
+ Array.map2 (*) coordinates dilations
+
+ /// Converts the given index to a flat index in the context of the given shape.
+ let indexToFlatIndex (shape: int[]) (index: int[]) =
+ Shape.checkCanIndex shape index
+ let mutable flatIndex = 0
+ for i=0 to index.Length - 1 do
+ let v = if i = index.Length - 1 then 1 else (Array.reduce (*) shape[i+1..])
+ flatIndex <- flatIndex + index[i] * v
+ flatIndex
+
+ /// Converts the given flat index to an index in the context of the given shape.
+ let flatIndexToIndex (shape: int[]) (flatIndex: int) =
+ let dim = shape.Length
+ let nelement = shapeLength shape
+ let index = Array.create dim 0
+ let mutable mul = nelement
+ let mutable fi = flatIndex
+ for i=dim downto 1 do
+ mul <- mul / shape[dim-i]
+ index[i-1] <- fi / mul
+ fi <- fi - index[i-1] * mul
+ index |> Array.rev
diff --git a/src/TensorMath/Tensor.Slicing.fs b/src/TensorMath/Tensor.Slicing.fs
new file mode 100644
index 0000000..fbf3016
--- /dev/null
+++ b/src/TensorMath/Tensor.Slicing.fs
@@ -0,0 +1,2695 @@
+// Copyright (c) 2016- University of Oxford (Atilim Gunes Baydin )
+// and other contributors, see LICENSE in root of repository.
+//
+// BSD 2-Clause License. See LICENSE in root of repository.
+
+namespace TensorMath
+
+open TensorMath
+open System.Diagnostics.CodeAnalysis
+
+[]
+module SlicingExtensions =
+ type Tensor with
+ []
+ ///
+ member t.GetSlice(i0min:int option, i0max:int option) =
+ // Dims: 1
+ let i0given = if i0min.IsSome && i0max.IsSome then 1 else 0
+ let i0min = defaultArg i0min 0
+ let i0max = defaultArg i0max (t.shape[0] - 1)
+ let bounds = array2D [[i0min; i0max; i0given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0:int) =
+ // Dims: 1
+ let i0given = 1
+ let i0min = i0
+ let i0max = i0
+ let bounds = array2D [[i0min; i0max; i0given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0min:int option, i0max:int option, i1min:int option, i1max:int option) =
+ // Dims: 2
+ let i0given = if i0min.IsSome && i0max.IsSome then 1 else 0
+ let i0min = defaultArg i0min 0
+ let i0max = defaultArg i0max (t.shape[0] - 1)
+ let i1given = if i1min.IsSome && i1max.IsSome then 1 else 0
+ let i1min = defaultArg i1min 0
+ let i1max = defaultArg i1max (t.shape[1] - 1)
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0min:int option, i0max:int option, i1:int) =
+ // Dims: 2
+ let i0given = if i0min.IsSome && i0max.IsSome then 1 else 0
+ let i0min = defaultArg i0min 0
+ let i0max = defaultArg i0max (t.shape[0] - 1)
+ let i1given = 1
+ let i1min = i1
+ let i1max = i1
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0:int, i1min:int option, i1max:int option) =
+ // Dims: 2
+ let i0given = 1
+ let i0min = i0
+ let i0max = i0
+ let i1given = if i1min.IsSome && i1max.IsSome then 1 else 0
+ let i1min = defaultArg i1min 0
+ let i1max = defaultArg i1max (t.shape[1] - 1)
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0:int, i1:int) =
+ // Dims: 2
+ let i0given = 1
+ let i0min = i0
+ let i0max = i0
+ let i1given = 1
+ let i1min = i1
+ let i1max = i1
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0min:int option, i0max:int option, i1min:int option, i1max:int option, i2min:int option, i2max:int option) =
+ // Dims: 3
+ let i0given = if i0min.IsSome && i0max.IsSome then 1 else 0
+ let i0min = defaultArg i0min 0
+ let i0max = defaultArg i0max (t.shape[0] - 1)
+ let i1given = if i1min.IsSome && i1max.IsSome then 1 else 0
+ let i1min = defaultArg i1min 0
+ let i1max = defaultArg i1max (t.shape[1] - 1)
+ let i2given = if i2min.IsSome && i2max.IsSome then 1 else 0
+ let i2min = defaultArg i2min 0
+ let i2max = defaultArg i2max (t.shape[2] - 1)
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0min:int option, i0max:int option, i1min:int option, i1max:int option, i2:int) =
+ // Dims: 3
+ let i0given = if i0min.IsSome && i0max.IsSome then 1 else 0
+ let i0min = defaultArg i0min 0
+ let i0max = defaultArg i0max (t.shape[0] - 1)
+ let i1given = if i1min.IsSome && i1max.IsSome then 1 else 0
+ let i1min = defaultArg i1min 0
+ let i1max = defaultArg i1max (t.shape[1] - 1)
+ let i2given = 1
+ let i2min = i2
+ let i2max = i2
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0min:int option, i0max:int option, i1:int, i2min:int option, i2max:int option) =
+ // Dims: 3
+ let i0given = if i0min.IsSome && i0max.IsSome then 1 else 0
+ let i0min = defaultArg i0min 0
+ let i0max = defaultArg i0max (t.shape[0] - 1)
+ let i1given = 1
+ let i1min = i1
+ let i1max = i1
+ let i2given = if i2min.IsSome && i2max.IsSome then 1 else 0
+ let i2min = defaultArg i2min 0
+ let i2max = defaultArg i2max (t.shape[2] - 1)
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0min:int option, i0max:int option, i1:int, i2:int) =
+ // Dims: 3
+ let i0given = if i0min.IsSome && i0max.IsSome then 1 else 0
+ let i0min = defaultArg i0min 0
+ let i0max = defaultArg i0max (t.shape[0] - 1)
+ let i1given = 1
+ let i1min = i1
+ let i1max = i1
+ let i2given = 1
+ let i2min = i2
+ let i2max = i2
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0:int, i1min:int option, i1max:int option, i2min:int option, i2max:int option) =
+ // Dims: 3
+ let i0given = 1
+ let i0min = i0
+ let i0max = i0
+ let i1given = if i1min.IsSome && i1max.IsSome then 1 else 0
+ let i1min = defaultArg i1min 0
+ let i1max = defaultArg i1max (t.shape[1] - 1)
+ let i2given = if i2min.IsSome && i2max.IsSome then 1 else 0
+ let i2min = defaultArg i2min 0
+ let i2max = defaultArg i2max (t.shape[2] - 1)
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0:int, i1min:int option, i1max:int option, i2:int) =
+ // Dims: 3
+ let i0given = 1
+ let i0min = i0
+ let i0max = i0
+ let i1given = if i1min.IsSome && i1max.IsSome then 1 else 0
+ let i1min = defaultArg i1min 0
+ let i1max = defaultArg i1max (t.shape[1] - 1)
+ let i2given = 1
+ let i2min = i2
+ let i2max = i2
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0:int, i1:int, i2min:int option, i2max:int option) =
+ // Dims: 3
+ let i0given = 1
+ let i0min = i0
+ let i0max = i0
+ let i1given = 1
+ let i1min = i1
+ let i1max = i1
+ let i2given = if i2min.IsSome && i2max.IsSome then 1 else 0
+ let i2min = defaultArg i2min 0
+ let i2max = defaultArg i2max (t.shape[2] - 1)
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0:int, i1:int, i2:int) =
+ // Dims: 3
+ let i0given = 1
+ let i0min = i0
+ let i0max = i0
+ let i1given = 1
+ let i1min = i1
+ let i1max = i1
+ let i2given = 1
+ let i2min = i2
+ let i2max = i2
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0min:int option, i0max:int option, i1min:int option, i1max:int option, i2min:int option, i2max:int option, i3min:int option, i3max:int option) =
+ // Dims: 4
+ let i0given = if i0min.IsSome && i0max.IsSome then 1 else 0
+ let i0min = defaultArg i0min 0
+ let i0max = defaultArg i0max (t.shape[0] - 1)
+ let i1given = if i1min.IsSome && i1max.IsSome then 1 else 0
+ let i1min = defaultArg i1min 0
+ let i1max = defaultArg i1max (t.shape[1] - 1)
+ let i2given = if i2min.IsSome && i2max.IsSome then 1 else 0
+ let i2min = defaultArg i2min 0
+ let i2max = defaultArg i2max (t.shape[2] - 1)
+ let i3given = if i3min.IsSome && i3max.IsSome then 1 else 0
+ let i3min = defaultArg i3min 0
+ let i3max = defaultArg i3max (t.shape[3] - 1)
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0min:int option, i0max:int option, i1min:int option, i1max:int option, i2min:int option, i2max:int option, i3:int) =
+ // Dims: 4
+ let i0given = if i0min.IsSome && i0max.IsSome then 1 else 0
+ let i0min = defaultArg i0min 0
+ let i0max = defaultArg i0max (t.shape[0] - 1)
+ let i1given = if i1min.IsSome && i1max.IsSome then 1 else 0
+ let i1min = defaultArg i1min 0
+ let i1max = defaultArg i1max (t.shape[1] - 1)
+ let i2given = if i2min.IsSome && i2max.IsSome then 1 else 0
+ let i2min = defaultArg i2min 0
+ let i2max = defaultArg i2max (t.shape[2] - 1)
+ let i3given = 1
+ let i3min = i3
+ let i3max = i3
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0min:int option, i0max:int option, i1min:int option, i1max:int option, i2:int, i3min:int option, i3max:int option) =
+ // Dims: 4
+ let i0given = if i0min.IsSome && i0max.IsSome then 1 else 0
+ let i0min = defaultArg i0min 0
+ let i0max = defaultArg i0max (t.shape[0] - 1)
+ let i1given = if i1min.IsSome && i1max.IsSome then 1 else 0
+ let i1min = defaultArg i1min 0
+ let i1max = defaultArg i1max (t.shape[1] - 1)
+ let i2given = 1
+ let i2min = i2
+ let i2max = i2
+ let i3given = if i3min.IsSome && i3max.IsSome then 1 else 0
+ let i3min = defaultArg i3min 0
+ let i3max = defaultArg i3max (t.shape[3] - 1)
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0min:int option, i0max:int option, i1min:int option, i1max:int option, i2:int, i3:int) =
+ // Dims: 4
+ let i0given = if i0min.IsSome && i0max.IsSome then 1 else 0
+ let i0min = defaultArg i0min 0
+ let i0max = defaultArg i0max (t.shape[0] - 1)
+ let i1given = if i1min.IsSome && i1max.IsSome then 1 else 0
+ let i1min = defaultArg i1min 0
+ let i1max = defaultArg i1max (t.shape[1] - 1)
+ let i2given = 1
+ let i2min = i2
+ let i2max = i2
+ let i3given = 1
+ let i3min = i3
+ let i3max = i3
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0min:int option, i0max:int option, i1:int, i2min:int option, i2max:int option, i3min:int option, i3max:int option) =
+ // Dims: 4
+ let i0given = if i0min.IsSome && i0max.IsSome then 1 else 0
+ let i0min = defaultArg i0min 0
+ let i0max = defaultArg i0max (t.shape[0] - 1)
+ let i1given = 1
+ let i1min = i1
+ let i1max = i1
+ let i2given = if i2min.IsSome && i2max.IsSome then 1 else 0
+ let i2min = defaultArg i2min 0
+ let i2max = defaultArg i2max (t.shape[2] - 1)
+ let i3given = if i3min.IsSome && i3max.IsSome then 1 else 0
+ let i3min = defaultArg i3min 0
+ let i3max = defaultArg i3max (t.shape[3] - 1)
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0min:int option, i0max:int option, i1:int, i2min:int option, i2max:int option, i3:int) =
+ // Dims: 4
+ let i0given = if i0min.IsSome && i0max.IsSome then 1 else 0
+ let i0min = defaultArg i0min 0
+ let i0max = defaultArg i0max (t.shape[0] - 1)
+ let i1given = 1
+ let i1min = i1
+ let i1max = i1
+ let i2given = if i2min.IsSome && i2max.IsSome then 1 else 0
+ let i2min = defaultArg i2min 0
+ let i2max = defaultArg i2max (t.shape[2] - 1)
+ let i3given = 1
+ let i3min = i3
+ let i3max = i3
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0min:int option, i0max:int option, i1:int, i2:int, i3min:int option, i3max:int option) =
+ // Dims: 4
+ let i0given = if i0min.IsSome && i0max.IsSome then 1 else 0
+ let i0min = defaultArg i0min 0
+ let i0max = defaultArg i0max (t.shape[0] - 1)
+ let i1given = 1
+ let i1min = i1
+ let i1max = i1
+ let i2given = 1
+ let i2min = i2
+ let i2max = i2
+ let i3given = if i3min.IsSome && i3max.IsSome then 1 else 0
+ let i3min = defaultArg i3min 0
+ let i3max = defaultArg i3max (t.shape[3] - 1)
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0min:int option, i0max:int option, i1:int, i2:int, i3:int) =
+ // Dims: 4
+ let i0given = if i0min.IsSome && i0max.IsSome then 1 else 0
+ let i0min = defaultArg i0min 0
+ let i0max = defaultArg i0max (t.shape[0] - 1)
+ let i1given = 1
+ let i1min = i1
+ let i1max = i1
+ let i2given = 1
+ let i2min = i2
+ let i2max = i2
+ let i3given = 1
+ let i3min = i3
+ let i3max = i3
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0:int, i1min:int option, i1max:int option, i2min:int option, i2max:int option, i3min:int option, i3max:int option) =
+ // Dims: 4
+ let i0given = 1
+ let i0min = i0
+ let i0max = i0
+ let i1given = if i1min.IsSome && i1max.IsSome then 1 else 0
+ let i1min = defaultArg i1min 0
+ let i1max = defaultArg i1max (t.shape[1] - 1)
+ let i2given = if i2min.IsSome && i2max.IsSome then 1 else 0
+ let i2min = defaultArg i2min 0
+ let i2max = defaultArg i2max (t.shape[2] - 1)
+ let i3given = if i3min.IsSome && i3max.IsSome then 1 else 0
+ let i3min = defaultArg i3min 0
+ let i3max = defaultArg i3max (t.shape[3] - 1)
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0:int, i1min:int option, i1max:int option, i2min:int option, i2max:int option, i3:int) =
+ // Dims: 4
+ let i0given = 1
+ let i0min = i0
+ let i0max = i0
+ let i1given = if i1min.IsSome && i1max.IsSome then 1 else 0
+ let i1min = defaultArg i1min 0
+ let i1max = defaultArg i1max (t.shape[1] - 1)
+ let i2given = if i2min.IsSome && i2max.IsSome then 1 else 0
+ let i2min = defaultArg i2min 0
+ let i2max = defaultArg i2max (t.shape[2] - 1)
+ let i3given = 1
+ let i3min = i3
+ let i3max = i3
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0:int, i1min:int option, i1max:int option, i2:int, i3min:int option, i3max:int option) =
+ // Dims: 4
+ let i0given = 1
+ let i0min = i0
+ let i0max = i0
+ let i1given = if i1min.IsSome && i1max.IsSome then 1 else 0
+ let i1min = defaultArg i1min 0
+ let i1max = defaultArg i1max (t.shape[1] - 1)
+ let i2given = 1
+ let i2min = i2
+ let i2max = i2
+ let i3given = if i3min.IsSome && i3max.IsSome then 1 else 0
+ let i3min = defaultArg i3min 0
+ let i3max = defaultArg i3max (t.shape[3] - 1)
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0:int, i1min:int option, i1max:int option, i2:int, i3:int) =
+ // Dims: 4
+ let i0given = 1
+ let i0min = i0
+ let i0max = i0
+ let i1given = if i1min.IsSome && i1max.IsSome then 1 else 0
+ let i1min = defaultArg i1min 0
+ let i1max = defaultArg i1max (t.shape[1] - 1)
+ let i2given = 1
+ let i2min = i2
+ let i2max = i2
+ let i3given = 1
+ let i3min = i3
+ let i3max = i3
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0:int, i1:int, i2min:int option, i2max:int option, i3min:int option, i3max:int option) =
+ // Dims: 4
+ let i0given = 1
+ let i0min = i0
+ let i0max = i0
+ let i1given = 1
+ let i1min = i1
+ let i1max = i1
+ let i2given = if i2min.IsSome && i2max.IsSome then 1 else 0
+ let i2min = defaultArg i2min 0
+ let i2max = defaultArg i2max (t.shape[2] - 1)
+ let i3given = if i3min.IsSome && i3max.IsSome then 1 else 0
+ let i3min = defaultArg i3min 0
+ let i3max = defaultArg i3max (t.shape[3] - 1)
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0:int, i1:int, i2min:int option, i2max:int option, i3:int) =
+ // Dims: 4
+ let i0given = 1
+ let i0min = i0
+ let i0max = i0
+ let i1given = 1
+ let i1min = i1
+ let i1max = i1
+ let i2given = if i2min.IsSome && i2max.IsSome then 1 else 0
+ let i2min = defaultArg i2min 0
+ let i2max = defaultArg i2max (t.shape[2] - 1)
+ let i3given = 1
+ let i3min = i3
+ let i3max = i3
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0:int, i1:int, i2:int, i3min:int option, i3max:int option) =
+ // Dims: 4
+ let i0given = 1
+ let i0min = i0
+ let i0max = i0
+ let i1given = 1
+ let i1min = i1
+ let i1max = i1
+ let i2given = 1
+ let i2min = i2
+ let i2max = i2
+ let i3given = if i3min.IsSome && i3max.IsSome then 1 else 0
+ let i3min = defaultArg i3min 0
+ let i3max = defaultArg i3max (t.shape[3] - 1)
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0:int, i1:int, i2:int, i3:int) =
+ // Dims: 4
+ let i0given = 1
+ let i0min = i0
+ let i0max = i0
+ let i1given = 1
+ let i1min = i1
+ let i1max = i1
+ let i2given = 1
+ let i2min = i2
+ let i2max = i2
+ let i3given = 1
+ let i3min = i3
+ let i3max = i3
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0min:int option, i0max:int option, i1min:int option, i1max:int option, i2min:int option, i2max:int option, i3min:int option, i3max:int option, i4min:int option, i4max:int option) =
+ // Dims: 5
+ let i0given = if i0min.IsSome && i0max.IsSome then 1 else 0
+ let i0min = defaultArg i0min 0
+ let i0max = defaultArg i0max (t.shape[0] - 1)
+ let i1given = if i1min.IsSome && i1max.IsSome then 1 else 0
+ let i1min = defaultArg i1min 0
+ let i1max = defaultArg i1max (t.shape[1] - 1)
+ let i2given = if i2min.IsSome && i2max.IsSome then 1 else 0
+ let i2min = defaultArg i2min 0
+ let i2max = defaultArg i2max (t.shape[2] - 1)
+ let i3given = if i3min.IsSome && i3max.IsSome then 1 else 0
+ let i3min = defaultArg i3min 0
+ let i3max = defaultArg i3max (t.shape[3] - 1)
+ let i4given = if i4min.IsSome && i4max.IsSome then 1 else 0
+ let i4min = defaultArg i4min 0
+ let i4max = defaultArg i4max (t.shape[4] - 1)
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]; [i4min; i4max; i4given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0min:int option, i0max:int option, i1min:int option, i1max:int option, i2min:int option, i2max:int option, i3min:int option, i3max:int option, i4:int) =
+ // Dims: 5
+ let i0given = if i0min.IsSome && i0max.IsSome then 1 else 0
+ let i0min = defaultArg i0min 0
+ let i0max = defaultArg i0max (t.shape[0] - 1)
+ let i1given = if i1min.IsSome && i1max.IsSome then 1 else 0
+ let i1min = defaultArg i1min 0
+ let i1max = defaultArg i1max (t.shape[1] - 1)
+ let i2given = if i2min.IsSome && i2max.IsSome then 1 else 0
+ let i2min = defaultArg i2min 0
+ let i2max = defaultArg i2max (t.shape[2] - 1)
+ let i3given = if i3min.IsSome && i3max.IsSome then 1 else 0
+ let i3min = defaultArg i3min 0
+ let i3max = defaultArg i3max (t.shape[3] - 1)
+ let i4given = 1
+ let i4min = i4
+ let i4max = i4
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]; [i4min; i4max; i4given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0min:int option, i0max:int option, i1min:int option, i1max:int option, i2min:int option, i2max:int option, i3:int, i4min:int option, i4max:int option) =
+ // Dims: 5
+ let i0given = if i0min.IsSome && i0max.IsSome then 1 else 0
+ let i0min = defaultArg i0min 0
+ let i0max = defaultArg i0max (t.shape[0] - 1)
+ let i1given = if i1min.IsSome && i1max.IsSome then 1 else 0
+ let i1min = defaultArg i1min 0
+ let i1max = defaultArg i1max (t.shape[1] - 1)
+ let i2given = if i2min.IsSome && i2max.IsSome then 1 else 0
+ let i2min = defaultArg i2min 0
+ let i2max = defaultArg i2max (t.shape[2] - 1)
+ let i3given = 1
+ let i3min = i3
+ let i3max = i3
+ let i4given = if i4min.IsSome && i4max.IsSome then 1 else 0
+ let i4min = defaultArg i4min 0
+ let i4max = defaultArg i4max (t.shape[4] - 1)
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]; [i4min; i4max; i4given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0min:int option, i0max:int option, i1min:int option, i1max:int option, i2min:int option, i2max:int option, i3:int, i4:int) =
+ // Dims: 5
+ let i0given = if i0min.IsSome && i0max.IsSome then 1 else 0
+ let i0min = defaultArg i0min 0
+ let i0max = defaultArg i0max (t.shape[0] - 1)
+ let i1given = if i1min.IsSome && i1max.IsSome then 1 else 0
+ let i1min = defaultArg i1min 0
+ let i1max = defaultArg i1max (t.shape[1] - 1)
+ let i2given = if i2min.IsSome && i2max.IsSome then 1 else 0
+ let i2min = defaultArg i2min 0
+ let i2max = defaultArg i2max (t.shape[2] - 1)
+ let i3given = 1
+ let i3min = i3
+ let i3max = i3
+ let i4given = 1
+ let i4min = i4
+ let i4max = i4
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]; [i4min; i4max; i4given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0min:int option, i0max:int option, i1min:int option, i1max:int option, i2:int, i3min:int option, i3max:int option, i4min:int option, i4max:int option) =
+ // Dims: 5
+ let i0given = if i0min.IsSome && i0max.IsSome then 1 else 0
+ let i0min = defaultArg i0min 0
+ let i0max = defaultArg i0max (t.shape[0] - 1)
+ let i1given = if i1min.IsSome && i1max.IsSome then 1 else 0
+ let i1min = defaultArg i1min 0
+ let i1max = defaultArg i1max (t.shape[1] - 1)
+ let i2given = 1
+ let i2min = i2
+ let i2max = i2
+ let i3given = if i3min.IsSome && i3max.IsSome then 1 else 0
+ let i3min = defaultArg i3min 0
+ let i3max = defaultArg i3max (t.shape[3] - 1)
+ let i4given = if i4min.IsSome && i4max.IsSome then 1 else 0
+ let i4min = defaultArg i4min 0
+ let i4max = defaultArg i4max (t.shape[4] - 1)
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]; [i4min; i4max; i4given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0min:int option, i0max:int option, i1min:int option, i1max:int option, i2:int, i3min:int option, i3max:int option, i4:int) =
+ // Dims: 5
+ let i0given = if i0min.IsSome && i0max.IsSome then 1 else 0
+ let i0min = defaultArg i0min 0
+ let i0max = defaultArg i0max (t.shape[0] - 1)
+ let i1given = if i1min.IsSome && i1max.IsSome then 1 else 0
+ let i1min = defaultArg i1min 0
+ let i1max = defaultArg i1max (t.shape[1] - 1)
+ let i2given = 1
+ let i2min = i2
+ let i2max = i2
+ let i3given = if i3min.IsSome && i3max.IsSome then 1 else 0
+ let i3min = defaultArg i3min 0
+ let i3max = defaultArg i3max (t.shape[3] - 1)
+ let i4given = 1
+ let i4min = i4
+ let i4max = i4
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]; [i4min; i4max; i4given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0min:int option, i0max:int option, i1min:int option, i1max:int option, i2:int, i3:int, i4min:int option, i4max:int option) =
+ // Dims: 5
+ let i0given = if i0min.IsSome && i0max.IsSome then 1 else 0
+ let i0min = defaultArg i0min 0
+ let i0max = defaultArg i0max (t.shape[0] - 1)
+ let i1given = if i1min.IsSome && i1max.IsSome then 1 else 0
+ let i1min = defaultArg i1min 0
+ let i1max = defaultArg i1max (t.shape[1] - 1)
+ let i2given = 1
+ let i2min = i2
+ let i2max = i2
+ let i3given = 1
+ let i3min = i3
+ let i3max = i3
+ let i4given = if i4min.IsSome && i4max.IsSome then 1 else 0
+ let i4min = defaultArg i4min 0
+ let i4max = defaultArg i4max (t.shape[4] - 1)
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]; [i4min; i4max; i4given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0min:int option, i0max:int option, i1min:int option, i1max:int option, i2:int, i3:int, i4:int) =
+ // Dims: 5
+ let i0given = if i0min.IsSome && i0max.IsSome then 1 else 0
+ let i0min = defaultArg i0min 0
+ let i0max = defaultArg i0max (t.shape[0] - 1)
+ let i1given = if i1min.IsSome && i1max.IsSome then 1 else 0
+ let i1min = defaultArg i1min 0
+ let i1max = defaultArg i1max (t.shape[1] - 1)
+ let i2given = 1
+ let i2min = i2
+ let i2max = i2
+ let i3given = 1
+ let i3min = i3
+ let i3max = i3
+ let i4given = 1
+ let i4min = i4
+ let i4max = i4
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]; [i4min; i4max; i4given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0min:int option, i0max:int option, i1:int, i2min:int option, i2max:int option, i3min:int option, i3max:int option, i4min:int option, i4max:int option) =
+ // Dims: 5
+ let i0given = if i0min.IsSome && i0max.IsSome then 1 else 0
+ let i0min = defaultArg i0min 0
+ let i0max = defaultArg i0max (t.shape[0] - 1)
+ let i1given = 1
+ let i1min = i1
+ let i1max = i1
+ let i2given = if i2min.IsSome && i2max.IsSome then 1 else 0
+ let i2min = defaultArg i2min 0
+ let i2max = defaultArg i2max (t.shape[2] - 1)
+ let i3given = if i3min.IsSome && i3max.IsSome then 1 else 0
+ let i3min = defaultArg i3min 0
+ let i3max = defaultArg i3max (t.shape[3] - 1)
+ let i4given = if i4min.IsSome && i4max.IsSome then 1 else 0
+ let i4min = defaultArg i4min 0
+ let i4max = defaultArg i4max (t.shape[4] - 1)
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]; [i4min; i4max; i4given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0min:int option, i0max:int option, i1:int, i2min:int option, i2max:int option, i3min:int option, i3max:int option, i4:int) =
+ // Dims: 5
+ let i0given = if i0min.IsSome && i0max.IsSome then 1 else 0
+ let i0min = defaultArg i0min 0
+ let i0max = defaultArg i0max (t.shape[0] - 1)
+ let i1given = 1
+ let i1min = i1
+ let i1max = i1
+ let i2given = if i2min.IsSome && i2max.IsSome then 1 else 0
+ let i2min = defaultArg i2min 0
+ let i2max = defaultArg i2max (t.shape[2] - 1)
+ let i3given = if i3min.IsSome && i3max.IsSome then 1 else 0
+ let i3min = defaultArg i3min 0
+ let i3max = defaultArg i3max (t.shape[3] - 1)
+ let i4given = 1
+ let i4min = i4
+ let i4max = i4
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]; [i4min; i4max; i4given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0min:int option, i0max:int option, i1:int, i2min:int option, i2max:int option, i3:int, i4min:int option, i4max:int option) =
+ // Dims: 5
+ let i0given = if i0min.IsSome && i0max.IsSome then 1 else 0
+ let i0min = defaultArg i0min 0
+ let i0max = defaultArg i0max (t.shape[0] - 1)
+ let i1given = 1
+ let i1min = i1
+ let i1max = i1
+ let i2given = if i2min.IsSome && i2max.IsSome then 1 else 0
+ let i2min = defaultArg i2min 0
+ let i2max = defaultArg i2max (t.shape[2] - 1)
+ let i3given = 1
+ let i3min = i3
+ let i3max = i3
+ let i4given = if i4min.IsSome && i4max.IsSome then 1 else 0
+ let i4min = defaultArg i4min 0
+ let i4max = defaultArg i4max (t.shape[4] - 1)
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]; [i4min; i4max; i4given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0min:int option, i0max:int option, i1:int, i2min:int option, i2max:int option, i3:int, i4:int) =
+ // Dims: 5
+ let i0given = if i0min.IsSome && i0max.IsSome then 1 else 0
+ let i0min = defaultArg i0min 0
+ let i0max = defaultArg i0max (t.shape[0] - 1)
+ let i1given = 1
+ let i1min = i1
+ let i1max = i1
+ let i2given = if i2min.IsSome && i2max.IsSome then 1 else 0
+ let i2min = defaultArg i2min 0
+ let i2max = defaultArg i2max (t.shape[2] - 1)
+ let i3given = 1
+ let i3min = i3
+ let i3max = i3
+ let i4given = 1
+ let i4min = i4
+ let i4max = i4
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]; [i4min; i4max; i4given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0min:int option, i0max:int option, i1:int, i2:int, i3min:int option, i3max:int option, i4min:int option, i4max:int option) =
+ // Dims: 5
+ let i0given = if i0min.IsSome && i0max.IsSome then 1 else 0
+ let i0min = defaultArg i0min 0
+ let i0max = defaultArg i0max (t.shape[0] - 1)
+ let i1given = 1
+ let i1min = i1
+ let i1max = i1
+ let i2given = 1
+ let i2min = i2
+ let i2max = i2
+ let i3given = if i3min.IsSome && i3max.IsSome then 1 else 0
+ let i3min = defaultArg i3min 0
+ let i3max = defaultArg i3max (t.shape[3] - 1)
+ let i4given = if i4min.IsSome && i4max.IsSome then 1 else 0
+ let i4min = defaultArg i4min 0
+ let i4max = defaultArg i4max (t.shape[4] - 1)
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]; [i4min; i4max; i4given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0min:int option, i0max:int option, i1:int, i2:int, i3min:int option, i3max:int option, i4:int) =
+ // Dims: 5
+ let i0given = if i0min.IsSome && i0max.IsSome then 1 else 0
+ let i0min = defaultArg i0min 0
+ let i0max = defaultArg i0max (t.shape[0] - 1)
+ let i1given = 1
+ let i1min = i1
+ let i1max = i1
+ let i2given = 1
+ let i2min = i2
+ let i2max = i2
+ let i3given = if i3min.IsSome && i3max.IsSome then 1 else 0
+ let i3min = defaultArg i3min 0
+ let i3max = defaultArg i3max (t.shape[3] - 1)
+ let i4given = 1
+ let i4min = i4
+ let i4max = i4
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]; [i4min; i4max; i4given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0min:int option, i0max:int option, i1:int, i2:int, i3:int, i4min:int option, i4max:int option) =
+ // Dims: 5
+ let i0given = if i0min.IsSome && i0max.IsSome then 1 else 0
+ let i0min = defaultArg i0min 0
+ let i0max = defaultArg i0max (t.shape[0] - 1)
+ let i1given = 1
+ let i1min = i1
+ let i1max = i1
+ let i2given = 1
+ let i2min = i2
+ let i2max = i2
+ let i3given = 1
+ let i3min = i3
+ let i3max = i3
+ let i4given = if i4min.IsSome && i4max.IsSome then 1 else 0
+ let i4min = defaultArg i4min 0
+ let i4max = defaultArg i4max (t.shape[4] - 1)
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]; [i4min; i4max; i4given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0min:int option, i0max:int option, i1:int, i2:int, i3:int, i4:int) =
+ // Dims: 5
+ let i0given = if i0min.IsSome && i0max.IsSome then 1 else 0
+ let i0min = defaultArg i0min 0
+ let i0max = defaultArg i0max (t.shape[0] - 1)
+ let i1given = 1
+ let i1min = i1
+ let i1max = i1
+ let i2given = 1
+ let i2min = i2
+ let i2max = i2
+ let i3given = 1
+ let i3min = i3
+ let i3max = i3
+ let i4given = 1
+ let i4min = i4
+ let i4max = i4
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]; [i4min; i4max; i4given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0:int, i1min:int option, i1max:int option, i2min:int option, i2max:int option, i3min:int option, i3max:int option, i4min:int option, i4max:int option) =
+ // Dims: 5
+ let i0given = 1
+ let i0min = i0
+ let i0max = i0
+ let i1given = if i1min.IsSome && i1max.IsSome then 1 else 0
+ let i1min = defaultArg i1min 0
+ let i1max = defaultArg i1max (t.shape[1] - 1)
+ let i2given = if i2min.IsSome && i2max.IsSome then 1 else 0
+ let i2min = defaultArg i2min 0
+ let i2max = defaultArg i2max (t.shape[2] - 1)
+ let i3given = if i3min.IsSome && i3max.IsSome then 1 else 0
+ let i3min = defaultArg i3min 0
+ let i3max = defaultArg i3max (t.shape[3] - 1)
+ let i4given = if i4min.IsSome && i4max.IsSome then 1 else 0
+ let i4min = defaultArg i4min 0
+ let i4max = defaultArg i4max (t.shape[4] - 1)
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]; [i4min; i4max; i4given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0:int, i1min:int option, i1max:int option, i2min:int option, i2max:int option, i3min:int option, i3max:int option, i4:int) =
+ // Dims: 5
+ let i0given = 1
+ let i0min = i0
+ let i0max = i0
+ let i1given = if i1min.IsSome && i1max.IsSome then 1 else 0
+ let i1min = defaultArg i1min 0
+ let i1max = defaultArg i1max (t.shape[1] - 1)
+ let i2given = if i2min.IsSome && i2max.IsSome then 1 else 0
+ let i2min = defaultArg i2min 0
+ let i2max = defaultArg i2max (t.shape[2] - 1)
+ let i3given = if i3min.IsSome && i3max.IsSome then 1 else 0
+ let i3min = defaultArg i3min 0
+ let i3max = defaultArg i3max (t.shape[3] - 1)
+ let i4given = 1
+ let i4min = i4
+ let i4max = i4
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]; [i4min; i4max; i4given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0:int, i1min:int option, i1max:int option, i2min:int option, i2max:int option, i3:int, i4min:int option, i4max:int option) =
+ // Dims: 5
+ let i0given = 1
+ let i0min = i0
+ let i0max = i0
+ let i1given = if i1min.IsSome && i1max.IsSome then 1 else 0
+ let i1min = defaultArg i1min 0
+ let i1max = defaultArg i1max (t.shape[1] - 1)
+ let i2given = if i2min.IsSome && i2max.IsSome then 1 else 0
+ let i2min = defaultArg i2min 0
+ let i2max = defaultArg i2max (t.shape[2] - 1)
+ let i3given = 1
+ let i3min = i3
+ let i3max = i3
+ let i4given = if i4min.IsSome && i4max.IsSome then 1 else 0
+ let i4min = defaultArg i4min 0
+ let i4max = defaultArg i4max (t.shape[4] - 1)
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]; [i4min; i4max; i4given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0:int, i1min:int option, i1max:int option, i2min:int option, i2max:int option, i3:int, i4:int) =
+ // Dims: 5
+ let i0given = 1
+ let i0min = i0
+ let i0max = i0
+ let i1given = if i1min.IsSome && i1max.IsSome then 1 else 0
+ let i1min = defaultArg i1min 0
+ let i1max = defaultArg i1max (t.shape[1] - 1)
+ let i2given = if i2min.IsSome && i2max.IsSome then 1 else 0
+ let i2min = defaultArg i2min 0
+ let i2max = defaultArg i2max (t.shape[2] - 1)
+ let i3given = 1
+ let i3min = i3
+ let i3max = i3
+ let i4given = 1
+ let i4min = i4
+ let i4max = i4
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]; [i4min; i4max; i4given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0:int, i1min:int option, i1max:int option, i2:int, i3min:int option, i3max:int option, i4min:int option, i4max:int option) =
+ // Dims: 5
+ let i0given = 1
+ let i0min = i0
+ let i0max = i0
+ let i1given = if i1min.IsSome && i1max.IsSome then 1 else 0
+ let i1min = defaultArg i1min 0
+ let i1max = defaultArg i1max (t.shape[1] - 1)
+ let i2given = 1
+ let i2min = i2
+ let i2max = i2
+ let i3given = if i3min.IsSome && i3max.IsSome then 1 else 0
+ let i3min = defaultArg i3min 0
+ let i3max = defaultArg i3max (t.shape[3] - 1)
+ let i4given = if i4min.IsSome && i4max.IsSome then 1 else 0
+ let i4min = defaultArg i4min 0
+ let i4max = defaultArg i4max (t.shape[4] - 1)
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]; [i4min; i4max; i4given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0:int, i1min:int option, i1max:int option, i2:int, i3min:int option, i3max:int option, i4:int) =
+ // Dims: 5
+ let i0given = 1
+ let i0min = i0
+ let i0max = i0
+ let i1given = if i1min.IsSome && i1max.IsSome then 1 else 0
+ let i1min = defaultArg i1min 0
+ let i1max = defaultArg i1max (t.shape[1] - 1)
+ let i2given = 1
+ let i2min = i2
+ let i2max = i2
+ let i3given = if i3min.IsSome && i3max.IsSome then 1 else 0
+ let i3min = defaultArg i3min 0
+ let i3max = defaultArg i3max (t.shape[3] - 1)
+ let i4given = 1
+ let i4min = i4
+ let i4max = i4
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]; [i4min; i4max; i4given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0:int, i1min:int option, i1max:int option, i2:int, i3:int, i4min:int option, i4max:int option) =
+ // Dims: 5
+ let i0given = 1
+ let i0min = i0
+ let i0max = i0
+ let i1given = if i1min.IsSome && i1max.IsSome then 1 else 0
+ let i1min = defaultArg i1min 0
+ let i1max = defaultArg i1max (t.shape[1] - 1)
+ let i2given = 1
+ let i2min = i2
+ let i2max = i2
+ let i3given = 1
+ let i3min = i3
+ let i3max = i3
+ let i4given = if i4min.IsSome && i4max.IsSome then 1 else 0
+ let i4min = defaultArg i4min 0
+ let i4max = defaultArg i4max (t.shape[4] - 1)
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]; [i4min; i4max; i4given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0:int, i1min:int option, i1max:int option, i2:int, i3:int, i4:int) =
+ // Dims: 5
+ let i0given = 1
+ let i0min = i0
+ let i0max = i0
+ let i1given = if i1min.IsSome && i1max.IsSome then 1 else 0
+ let i1min = defaultArg i1min 0
+ let i1max = defaultArg i1max (t.shape[1] - 1)
+ let i2given = 1
+ let i2min = i2
+ let i2max = i2
+ let i3given = 1
+ let i3min = i3
+ let i3max = i3
+ let i4given = 1
+ let i4min = i4
+ let i4max = i4
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]; [i4min; i4max; i4given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0:int, i1:int, i2min:int option, i2max:int option, i3min:int option, i3max:int option, i4min:int option, i4max:int option) =
+ // Dims: 5
+ let i0given = 1
+ let i0min = i0
+ let i0max = i0
+ let i1given = 1
+ let i1min = i1
+ let i1max = i1
+ let i2given = if i2min.IsSome && i2max.IsSome then 1 else 0
+ let i2min = defaultArg i2min 0
+ let i2max = defaultArg i2max (t.shape[2] - 1)
+ let i3given = if i3min.IsSome && i3max.IsSome then 1 else 0
+ let i3min = defaultArg i3min 0
+ let i3max = defaultArg i3max (t.shape[3] - 1)
+ let i4given = if i4min.IsSome && i4max.IsSome then 1 else 0
+ let i4min = defaultArg i4min 0
+ let i4max = defaultArg i4max (t.shape[4] - 1)
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]; [i4min; i4max; i4given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0:int, i1:int, i2min:int option, i2max:int option, i3min:int option, i3max:int option, i4:int) =
+ // Dims: 5
+ let i0given = 1
+ let i0min = i0
+ let i0max = i0
+ let i1given = 1
+ let i1min = i1
+ let i1max = i1
+ let i2given = if i2min.IsSome && i2max.IsSome then 1 else 0
+ let i2min = defaultArg i2min 0
+ let i2max = defaultArg i2max (t.shape[2] - 1)
+ let i3given = if i3min.IsSome && i3max.IsSome then 1 else 0
+ let i3min = defaultArg i3min 0
+ let i3max = defaultArg i3max (t.shape[3] - 1)
+ let i4given = 1
+ let i4min = i4
+ let i4max = i4
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]; [i4min; i4max; i4given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0:int, i1:int, i2min:int option, i2max:int option, i3:int, i4min:int option, i4max:int option) =
+ // Dims: 5
+ let i0given = 1
+ let i0min = i0
+ let i0max = i0
+ let i1given = 1
+ let i1min = i1
+ let i1max = i1
+ let i2given = if i2min.IsSome && i2max.IsSome then 1 else 0
+ let i2min = defaultArg i2min 0
+ let i2max = defaultArg i2max (t.shape[2] - 1)
+ let i3given = 1
+ let i3min = i3
+ let i3max = i3
+ let i4given = if i4min.IsSome && i4max.IsSome then 1 else 0
+ let i4min = defaultArg i4min 0
+ let i4max = defaultArg i4max (t.shape[4] - 1)
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]; [i4min; i4max; i4given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0:int, i1:int, i2min:int option, i2max:int option, i3:int, i4:int) =
+ // Dims: 5
+ let i0given = 1
+ let i0min = i0
+ let i0max = i0
+ let i1given = 1
+ let i1min = i1
+ let i1max = i1
+ let i2given = if i2min.IsSome && i2max.IsSome then 1 else 0
+ let i2min = defaultArg i2min 0
+ let i2max = defaultArg i2max (t.shape[2] - 1)
+ let i3given = 1
+ let i3min = i3
+ let i3max = i3
+ let i4given = 1
+ let i4min = i4
+ let i4max = i4
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]; [i4min; i4max; i4given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0:int, i1:int, i2:int, i3min:int option, i3max:int option, i4min:int option, i4max:int option) =
+ // Dims: 5
+ let i0given = 1
+ let i0min = i0
+ let i0max = i0
+ let i1given = 1
+ let i1min = i1
+ let i1max = i1
+ let i2given = 1
+ let i2min = i2
+ let i2max = i2
+ let i3given = if i3min.IsSome && i3max.IsSome then 1 else 0
+ let i3min = defaultArg i3min 0
+ let i3max = defaultArg i3max (t.shape[3] - 1)
+ let i4given = if i4min.IsSome && i4max.IsSome then 1 else 0
+ let i4min = defaultArg i4min 0
+ let i4max = defaultArg i4max (t.shape[4] - 1)
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]; [i4min; i4max; i4given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0:int, i1:int, i2:int, i3min:int option, i3max:int option, i4:int) =
+ // Dims: 5
+ let i0given = 1
+ let i0min = i0
+ let i0max = i0
+ let i1given = 1
+ let i1min = i1
+ let i1max = i1
+ let i2given = 1
+ let i2min = i2
+ let i2max = i2
+ let i3given = if i3min.IsSome && i3max.IsSome then 1 else 0
+ let i3min = defaultArg i3min 0
+ let i3max = defaultArg i3max (t.shape[3] - 1)
+ let i4given = 1
+ let i4min = i4
+ let i4max = i4
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]; [i4min; i4max; i4given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0:int, i1:int, i2:int, i3:int, i4min:int option, i4max:int option) =
+ // Dims: 5
+ let i0given = 1
+ let i0min = i0
+ let i0max = i0
+ let i1given = 1
+ let i1min = i1
+ let i1max = i1
+ let i2given = 1
+ let i2min = i2
+ let i2max = i2
+ let i3given = 1
+ let i3min = i3
+ let i3max = i3
+ let i4given = if i4min.IsSome && i4max.IsSome then 1 else 0
+ let i4min = defaultArg i4min 0
+ let i4max = defaultArg i4max (t.shape[4] - 1)
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]; [i4min; i4max; i4given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0:int, i1:int, i2:int, i3:int, i4:int) =
+ // Dims: 5
+ let i0given = 1
+ let i0min = i0
+ let i0max = i0
+ let i1given = 1
+ let i1min = i1
+ let i1max = i1
+ let i2given = 1
+ let i2min = i2
+ let i2max = i2
+ let i3given = 1
+ let i3min = i3
+ let i3max = i3
+ let i4given = 1
+ let i4min = i4
+ let i4max = i4
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]; [i4min; i4max; i4given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0min:int option, i0max:int option, i1min:int option, i1max:int option, i2min:int option, i2max:int option, i3min:int option, i3max:int option, i4min:int option, i4max:int option, i5min:int option, i5max:int option) =
+ // Dims: 6
+ let i0given = if i0min.IsSome && i0max.IsSome then 1 else 0
+ let i0min = defaultArg i0min 0
+ let i0max = defaultArg i0max (t.shape[0] - 1)
+ let i1given = if i1min.IsSome && i1max.IsSome then 1 else 0
+ let i1min = defaultArg i1min 0
+ let i1max = defaultArg i1max (t.shape[1] - 1)
+ let i2given = if i2min.IsSome && i2max.IsSome then 1 else 0
+ let i2min = defaultArg i2min 0
+ let i2max = defaultArg i2max (t.shape[2] - 1)
+ let i3given = if i3min.IsSome && i3max.IsSome then 1 else 0
+ let i3min = defaultArg i3min 0
+ let i3max = defaultArg i3max (t.shape[3] - 1)
+ let i4given = if i4min.IsSome && i4max.IsSome then 1 else 0
+ let i4min = defaultArg i4min 0
+ let i4max = defaultArg i4max (t.shape[4] - 1)
+ let i5given = if i5min.IsSome && i5max.IsSome then 1 else 0
+ let i5min = defaultArg i5min 0
+ let i5max = defaultArg i5max (t.shape[5] - 1)
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]; [i4min; i4max; i4given]; [i5min; i5max; i5given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0min:int option, i0max:int option, i1min:int option, i1max:int option, i2min:int option, i2max:int option, i3min:int option, i3max:int option, i4min:int option, i4max:int option, i5:int) =
+ // Dims: 6
+ let i0given = if i0min.IsSome && i0max.IsSome then 1 else 0
+ let i0min = defaultArg i0min 0
+ let i0max = defaultArg i0max (t.shape[0] - 1)
+ let i1given = if i1min.IsSome && i1max.IsSome then 1 else 0
+ let i1min = defaultArg i1min 0
+ let i1max = defaultArg i1max (t.shape[1] - 1)
+ let i2given = if i2min.IsSome && i2max.IsSome then 1 else 0
+ let i2min = defaultArg i2min 0
+ let i2max = defaultArg i2max (t.shape[2] - 1)
+ let i3given = if i3min.IsSome && i3max.IsSome then 1 else 0
+ let i3min = defaultArg i3min 0
+ let i3max = defaultArg i3max (t.shape[3] - 1)
+ let i4given = if i4min.IsSome && i4max.IsSome then 1 else 0
+ let i4min = defaultArg i4min 0
+ let i4max = defaultArg i4max (t.shape[4] - 1)
+ let i5given = 1
+ let i5min = i5
+ let i5max = i5
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]; [i4min; i4max; i4given]; [i5min; i5max; i5given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0min:int option, i0max:int option, i1min:int option, i1max:int option, i2min:int option, i2max:int option, i3min:int option, i3max:int option, i4:int, i5min:int option, i5max:int option) =
+ // Dims: 6
+ let i0given = if i0min.IsSome && i0max.IsSome then 1 else 0
+ let i0min = defaultArg i0min 0
+ let i0max = defaultArg i0max (t.shape[0] - 1)
+ let i1given = if i1min.IsSome && i1max.IsSome then 1 else 0
+ let i1min = defaultArg i1min 0
+ let i1max = defaultArg i1max (t.shape[1] - 1)
+ let i2given = if i2min.IsSome && i2max.IsSome then 1 else 0
+ let i2min = defaultArg i2min 0
+ let i2max = defaultArg i2max (t.shape[2] - 1)
+ let i3given = if i3min.IsSome && i3max.IsSome then 1 else 0
+ let i3min = defaultArg i3min 0
+ let i3max = defaultArg i3max (t.shape[3] - 1)
+ let i4given = 1
+ let i4min = i4
+ let i4max = i4
+ let i5given = if i5min.IsSome && i5max.IsSome then 1 else 0
+ let i5min = defaultArg i5min 0
+ let i5max = defaultArg i5max (t.shape[5] - 1)
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]; [i4min; i4max; i4given]; [i5min; i5max; i5given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0min:int option, i0max:int option, i1min:int option, i1max:int option, i2min:int option, i2max:int option, i3min:int option, i3max:int option, i4:int, i5:int) =
+ // Dims: 6
+ let i0given = if i0min.IsSome && i0max.IsSome then 1 else 0
+ let i0min = defaultArg i0min 0
+ let i0max = defaultArg i0max (t.shape[0] - 1)
+ let i1given = if i1min.IsSome && i1max.IsSome then 1 else 0
+ let i1min = defaultArg i1min 0
+ let i1max = defaultArg i1max (t.shape[1] - 1)
+ let i2given = if i2min.IsSome && i2max.IsSome then 1 else 0
+ let i2min = defaultArg i2min 0
+ let i2max = defaultArg i2max (t.shape[2] - 1)
+ let i3given = if i3min.IsSome && i3max.IsSome then 1 else 0
+ let i3min = defaultArg i3min 0
+ let i3max = defaultArg i3max (t.shape[3] - 1)
+ let i4given = 1
+ let i4min = i4
+ let i4max = i4
+ let i5given = 1
+ let i5min = i5
+ let i5max = i5
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]; [i4min; i4max; i4given]; [i5min; i5max; i5given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0min:int option, i0max:int option, i1min:int option, i1max:int option, i2min:int option, i2max:int option, i3:int, i4min:int option, i4max:int option, i5min:int option, i5max:int option) =
+ // Dims: 6
+ let i0given = if i0min.IsSome && i0max.IsSome then 1 else 0
+ let i0min = defaultArg i0min 0
+ let i0max = defaultArg i0max (t.shape[0] - 1)
+ let i1given = if i1min.IsSome && i1max.IsSome then 1 else 0
+ let i1min = defaultArg i1min 0
+ let i1max = defaultArg i1max (t.shape[1] - 1)
+ let i2given = if i2min.IsSome && i2max.IsSome then 1 else 0
+ let i2min = defaultArg i2min 0
+ let i2max = defaultArg i2max (t.shape[2] - 1)
+ let i3given = 1
+ let i3min = i3
+ let i3max = i3
+ let i4given = if i4min.IsSome && i4max.IsSome then 1 else 0
+ let i4min = defaultArg i4min 0
+ let i4max = defaultArg i4max (t.shape[4] - 1)
+ let i5given = if i5min.IsSome && i5max.IsSome then 1 else 0
+ let i5min = defaultArg i5min 0
+ let i5max = defaultArg i5max (t.shape[5] - 1)
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]; [i4min; i4max; i4given]; [i5min; i5max; i5given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0min:int option, i0max:int option, i1min:int option, i1max:int option, i2min:int option, i2max:int option, i3:int, i4min:int option, i4max:int option, i5:int) =
+ // Dims: 6
+ let i0given = if i0min.IsSome && i0max.IsSome then 1 else 0
+ let i0min = defaultArg i0min 0
+ let i0max = defaultArg i0max (t.shape[0] - 1)
+ let i1given = if i1min.IsSome && i1max.IsSome then 1 else 0
+ let i1min = defaultArg i1min 0
+ let i1max = defaultArg i1max (t.shape[1] - 1)
+ let i2given = if i2min.IsSome && i2max.IsSome then 1 else 0
+ let i2min = defaultArg i2min 0
+ let i2max = defaultArg i2max (t.shape[2] - 1)
+ let i3given = 1
+ let i3min = i3
+ let i3max = i3
+ let i4given = if i4min.IsSome && i4max.IsSome then 1 else 0
+ let i4min = defaultArg i4min 0
+ let i4max = defaultArg i4max (t.shape[4] - 1)
+ let i5given = 1
+ let i5min = i5
+ let i5max = i5
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]; [i4min; i4max; i4given]; [i5min; i5max; i5given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0min:int option, i0max:int option, i1min:int option, i1max:int option, i2min:int option, i2max:int option, i3:int, i4:int, i5min:int option, i5max:int option) =
+ // Dims: 6
+ let i0given = if i0min.IsSome && i0max.IsSome then 1 else 0
+ let i0min = defaultArg i0min 0
+ let i0max = defaultArg i0max (t.shape[0] - 1)
+ let i1given = if i1min.IsSome && i1max.IsSome then 1 else 0
+ let i1min = defaultArg i1min 0
+ let i1max = defaultArg i1max (t.shape[1] - 1)
+ let i2given = if i2min.IsSome && i2max.IsSome then 1 else 0
+ let i2min = defaultArg i2min 0
+ let i2max = defaultArg i2max (t.shape[2] - 1)
+ let i3given = 1
+ let i3min = i3
+ let i3max = i3
+ let i4given = 1
+ let i4min = i4
+ let i4max = i4
+ let i5given = if i5min.IsSome && i5max.IsSome then 1 else 0
+ let i5min = defaultArg i5min 0
+ let i5max = defaultArg i5max (t.shape[5] - 1)
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]; [i4min; i4max; i4given]; [i5min; i5max; i5given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0min:int option, i0max:int option, i1min:int option, i1max:int option, i2min:int option, i2max:int option, i3:int, i4:int, i5:int) =
+ // Dims: 6
+ let i0given = if i0min.IsSome && i0max.IsSome then 1 else 0
+ let i0min = defaultArg i0min 0
+ let i0max = defaultArg i0max (t.shape[0] - 1)
+ let i1given = if i1min.IsSome && i1max.IsSome then 1 else 0
+ let i1min = defaultArg i1min 0
+ let i1max = defaultArg i1max (t.shape[1] - 1)
+ let i2given = if i2min.IsSome && i2max.IsSome then 1 else 0
+ let i2min = defaultArg i2min 0
+ let i2max = defaultArg i2max (t.shape[2] - 1)
+ let i3given = 1
+ let i3min = i3
+ let i3max = i3
+ let i4given = 1
+ let i4min = i4
+ let i4max = i4
+ let i5given = 1
+ let i5min = i5
+ let i5max = i5
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]; [i4min; i4max; i4given]; [i5min; i5max; i5given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0min:int option, i0max:int option, i1min:int option, i1max:int option, i2:int, i3min:int option, i3max:int option, i4min:int option, i4max:int option, i5min:int option, i5max:int option) =
+ // Dims: 6
+ let i0given = if i0min.IsSome && i0max.IsSome then 1 else 0
+ let i0min = defaultArg i0min 0
+ let i0max = defaultArg i0max (t.shape[0] - 1)
+ let i1given = if i1min.IsSome && i1max.IsSome then 1 else 0
+ let i1min = defaultArg i1min 0
+ let i1max = defaultArg i1max (t.shape[1] - 1)
+ let i2given = 1
+ let i2min = i2
+ let i2max = i2
+ let i3given = if i3min.IsSome && i3max.IsSome then 1 else 0
+ let i3min = defaultArg i3min 0
+ let i3max = defaultArg i3max (t.shape[3] - 1)
+ let i4given = if i4min.IsSome && i4max.IsSome then 1 else 0
+ let i4min = defaultArg i4min 0
+ let i4max = defaultArg i4max (t.shape[4] - 1)
+ let i5given = if i5min.IsSome && i5max.IsSome then 1 else 0
+ let i5min = defaultArg i5min 0
+ let i5max = defaultArg i5max (t.shape[5] - 1)
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]; [i4min; i4max; i4given]; [i5min; i5max; i5given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0min:int option, i0max:int option, i1min:int option, i1max:int option, i2:int, i3min:int option, i3max:int option, i4min:int option, i4max:int option, i5:int) =
+ // Dims: 6
+ let i0given = if i0min.IsSome && i0max.IsSome then 1 else 0
+ let i0min = defaultArg i0min 0
+ let i0max = defaultArg i0max (t.shape[0] - 1)
+ let i1given = if i1min.IsSome && i1max.IsSome then 1 else 0
+ let i1min = defaultArg i1min 0
+ let i1max = defaultArg i1max (t.shape[1] - 1)
+ let i2given = 1
+ let i2min = i2
+ let i2max = i2
+ let i3given = if i3min.IsSome && i3max.IsSome then 1 else 0
+ let i3min = defaultArg i3min 0
+ let i3max = defaultArg i3max (t.shape[3] - 1)
+ let i4given = if i4min.IsSome && i4max.IsSome then 1 else 0
+ let i4min = defaultArg i4min 0
+ let i4max = defaultArg i4max (t.shape[4] - 1)
+ let i5given = 1
+ let i5min = i5
+ let i5max = i5
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]; [i4min; i4max; i4given]; [i5min; i5max; i5given]]
+ t.GetSlice(bounds)
+ []
+ ///
+ member t.GetSlice(i0min:int option, i0max:int option, i1min:int option, i1max:int option, i2:int, i3min:int option, i3max:int option, i4:int, i5min:int option, i5max:int option) =
+ // Dims: 6
+ let i0given = if i0min.IsSome && i0max.IsSome then 1 else 0
+ let i0min = defaultArg i0min 0
+ let i0max = defaultArg i0max (t.shape[0] - 1)
+ let i1given = if i1min.IsSome && i1max.IsSome then 1 else 0
+ let i1min = defaultArg i1min 0
+ let i1max = defaultArg i1max (t.shape[1] - 1)
+ let i2given = 1
+ let i2min = i2
+ let i2max = i2
+ let i3given = if i3min.IsSome && i3max.IsSome then 1 else 0
+ let i3min = defaultArg i3min 0
+ let i3max = defaultArg i3max (t.shape[3] - 1)
+ let i4given = 1
+ let i4min = i4
+ let i4max = i4
+ let i5given = if i5min.IsSome && i5max.IsSome then 1 else 0
+ let i5min = defaultArg i5min 0
+ let i5max = defaultArg i5max (t.shape[5] - 1)
+ let bounds = array2D [[i0min; i0max; i0given]; [i1min; i1max; i1given]; [i2min; i2max; i2given]; [i3min; i3max; i3given]; [i4min; i4max; i4given]; [i5min; i5max; i5given]]
+ t.GetSlice(bounds)
+ []
+ ///