Skip to content

Commit

Permalink
No idea what is going on ....
Browse files Browse the repository at this point in the history
  • Loading branch information
jrhosk committed Feb 25, 2025
1 parent 2bda9f8 commit 180e932
Show file tree
Hide file tree
Showing 2 changed files with 6 additions and 11 deletions.
9 changes: 3 additions & 6 deletions src/calviper/math/optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,16 +47,13 @@ def gradient(target: np.ndarray, model: np.ndarray, parameter: np.ndarray) -> np
for antenna_j in range(n_antennas):
for p in [0, 1]:
for q in [0, 1]:
#if antenna_i == antenna_j:
# continue
if antenna_i == antenna_j:
continue

numerator_[0, 0, p, antenna_i] += target[0, 0, p, q, antenna_i, antenna_j] * parameter[0, 0, q, antenna_j] * model[0, 0, p, q, antenna_i, antenna_j].conj()
denominator_[0, 0, p, antenna_i] += parameter[0, 0, q, antenna_j] * parameter[0, 0, q, antenna_j].conj() * model[0, 0, p, q, antenna_i, antenna_j].conj() * model[0, 0, p, q, antenna_i, antenna_j]
#print(f"parameter[p={p}, j={antenna_j}]: {parameter[0, 0, q, antenna_j]}\tmodel_[p={p}, i={antenna_i}, j={antenna_j}]:{model[0, 0, p, q, antenna_i, antenna_j]}")

#print(f"\t --- denominator: {denominator_}")

gradient_ = (numerator_ / denominator_) - parameter
gradient_ = (numerator_ / denominator_).conj() - parameter

return gradient_

Expand Down
8 changes: 3 additions & 5 deletions src/calviper/math/solver/least_squares.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,9 +114,7 @@ def solve(self, vis, iterations, optimizer=MeanSquaredError(), stopping=1e-3):
eye = np.identity(n_antenna1, dtype=np.complex64)
np.fill_diagonal(anti_eye, np.complex64(1., 0.))

#self.model_ = self.model_ #* eye
self.model_ = self.model_ * anti_eye
#self.model_ = self.model_ * np.random.uniform(low=0.0, high=1.0, size=self.model_.shape) * anti_eye
self.model_ = self.model_

self.losses = []

Expand All @@ -142,8 +140,8 @@ def solve(self, vis, iterations, optimizer=MeanSquaredError(), stopping=1e-3):

self.losses.append(optimizer.loss(y_pred, vis))

#if n % (iterations // 10) == 0:
# logger.info(f"iteration: {n}\tloss: {np.abs(self.losses[-1])}")
if n % (iterations // 10) == 0:
logger.info(f"iteration: {n}\tloss: {np.abs(self.losses[-1])}")

if self.losses[-1] < stopping:
logger.info(f"Iteration: ({n})\tStopping criterion reached: {self.losses[-1]}")
Expand Down

0 comments on commit 180e932

Please sign in to comment.