Skip to content

Commit a713d40

Browse files
committed
Fix typos; default learning_rate=None
1 parent 98e9d48 commit a713d40

File tree

1 file changed

+6
-6
lines changed

1 file changed

+6
-6
lines changed

dadapy/feature_weighting.py

+6-6
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ def with_check(*args, **kwargs):
5252
feature_selector: type[FeatureWeighting] = args[0]
5353
if feature_selector.maxk != feature_selector.N - 1:
5454
warnings.warn(
55-
f"""maxk neighbors is not available for this functionality.\
55+
f"""maxk neighbors is not available for this functionality.\n
5656
It will be ignored and treated as the number of data-1, {feature_selector.N}""",
5757
stacklevel=2,
5858
)
@@ -447,18 +447,18 @@ def return_backward_greedy_dii_elimination(
447447
initial_gammas: Union[np.ndarray, int, float] = None,
448448
lambd: float = None,
449449
n_epochs: int = 100,
450-
learning_rate: float = 0.1,
450+
learning_rate: float = None,
451451
constrain: bool = False,
452452
decaying_lr: bool = True,
453453
):
454-
"""Do a stepwise backward eliminitaion of feature weights, always eleminiating the lowest weight,
455-
and after each elimination GD otpmize the DII
454+
"""Do a stepwise backward elimination of feature weights, always eliminating the lowest weight;
455+
after each elimination the DII is optimized by gradient descent using the remaining features
456456
457457
Args:
458458
target_data: FeatureWeighting object, containing the groundtruth data
459459
(D_groundtruth x N array, period (optional)) to be compared to.
460460
initial_gammas (np.ndarray or list): D(input) initial weights for the input features. No zeros allowed here
461-
lambd (float): softmax scaling. If None (preferred) this chosen automatically with compute_optimial_lambda
461+
lambd (float): softmax scaling. If None (preferred) this chosen automatically with compute_optimal_lambda
462462
n_epochs (int): number of epochs in each optimization cycle
463463
learning_rate (float): learning rate.
464464
Has to be tuned, especially if constrain=True (otherwise optmization could fail)
@@ -468,8 +468,8 @@ def return_backward_greedy_dii_elimination(
468468
- every 10 epochs the learning rate will be halfed
469469
470470
Returns:
471-
final_weights: np.ndarray, shape (D x D). Array of the optmized weights for each number of non-zero weights.
472471
final_diis: np.ndarray, shape (D). Array of the optmized DII for each of the according weights.
472+
final_weights: np.ndarray, shape (D x D). Array of the optmized weights for each number of non-zero weights.
473473
474474
History entries added to FeatureWeighting object:
475475
dii_per_epoch: np.ndarray, shape (D, n_epochs+1, D).

0 commit comments

Comments
 (0)