Skip to content

Commit

Permalink
Fixed vectorized solver but only for test data; next we do an actual …
Browse files Browse the repository at this point in the history
…processing set. Also added __init__.py files where needed.
  • Loading branch information
jrhosk committed Dec 16, 2024
1 parent f7b36f5 commit 7988987
Show file tree
Hide file tree
Showing 6 changed files with 1,892 additions and 3 deletions.
1 change: 1 addition & 0 deletions src/calviper/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
from . import math
3 changes: 3 additions & 0 deletions src/calviper/math/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
from . import loss
from . import optimizer
from . import solver
8 changes: 6 additions & 2 deletions src/calviper/math/optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,15 @@ def __init__(self, alpha: float = 1e-3):

@staticmethod
def gradient(target: np.ndarray, model: np.ndarray, parameter: np.ndarray) -> np.ndarray:
cache_ = target * target.T
cache_ = target * model.conj()
#print(f"cache:\n{cache_}")

numerator_ = np.matmul(cache_, parameter)
denominator_ = np.matmul(model * model.conj(), parameter * parameter.conj())

#print(f"numerator:\n{numerator_}")
#print(f"denominator:\n{denominator_}")

gradient_ = (numerator_ / denominator_) - parameter

return gradient_
Expand All @@ -24,7 +28,7 @@ def loss(y: np.ndarray, y_pred: np.ndarray) -> float:
:param y_pred: Predicted values.
:return: Mean squared error.
"""
return np.mean(np.square(y_pred - y))
return np.mean(np.power(y_pred - y), 2)

def step(self, parameter: np.ndarray, gradient: np.ndarray) -> np.ndarray:
parameter = parameter + self.alpha * gradient
Expand Down
1 change: 1 addition & 0 deletions src/calviper/math/solver/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
from . import least_squares
7 changes: 6 additions & 1 deletion src/calviper/math/solver/least_squares.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ def _solve(self, vis, iterations, loss=mse, optimizer=None, alpha=0.1):

_step[i] = (_numerator / _denominator) - _gains[i]

print(f"step({i}): {_step[i]}")
_gains[i] = _gains[i] + alpha * _step[i]

return _gains
Expand All @@ -66,12 +67,16 @@ def solve(self, vis, iterations, optimizer=MeanSquaredError(), stopping=1e-3):
self.losses = []

for n in range(iterations):
# Fill this in when I figure out the most optimal way to calculate the error given the
# input data structure.
#self.losses.append(optimizer.loss(y, y_pred))

gradient_ = optimizer.gradient(
target=vis,
model=model_,
parameter=self.parameter
)

#print(f"gradient({n}): {gradient_}")
self.parameter = optimizer.step(
parameter=self.parameter,
gradient=gradient_
Expand Down
Loading

0 comments on commit 7988987

Please sign in to comment.