@@ -359,6 +359,13 @@ class PySRRegressor(MultiOutputMixin, RegressorMixin, BaseEstimator):
359
359
Whether to use the frequency mentioned above in the tournament,
360
360
rather than just the simulated annealing.
361
361
Default is `True`.
362
+ adaptive_parsimony_scaling : float
363
+ If the adaptive parsimony strategy (`use_frequency` and
364
+ `use_frequency_in_tournament`), this is how much to (exponentially)
365
+ weight the contribution. If you find that the search is only optimizing
366
+ the most complex expressions while the simpler expressions remain stagnant,
367
+ you should increase this value.
368
+ Default is `20.0`.
362
369
alpha : float
363
370
Initial temperature for simulated annealing
364
371
(requires `annealing` to be `True`).
@@ -408,6 +415,12 @@ class PySRRegressor(MultiOutputMixin, RegressorMixin, BaseEstimator):
408
415
weight_simplify : float
409
416
Relative likelihood for mutation to simplify constant parts by evaluation
410
417
Default is `0.0020`.
418
+ weight_optimize: float
419
+ Constant optimization can also be performed as a mutation, in addition to
420
+ the normal strategy controlled by `optimize_probability` which happens
421
+ every iteration. Using it as a mutation is useful if you want to use
422
+ a large `ncyclesperiteration`, and may not optimize very often.
423
+ Default is `0.0`.
411
424
crossover_probability : float
412
425
Absolute probability of crossover-type genetic operation, instead of a mutation.
413
426
Default is `0.066`.
@@ -664,6 +677,7 @@ def __init__(
664
677
parsimony = 0.0032 ,
665
678
use_frequency = True ,
666
679
use_frequency_in_tournament = True ,
680
+ adaptive_parsimony_scaling = 20.0 ,
667
681
alpha = 0.1 ,
668
682
annealing = False ,
669
683
early_stop_condition = None ,
@@ -678,6 +692,7 @@ def __init__(
678
692
weight_mutate_operator = 0.47 ,
679
693
weight_randomize = 0.00023 ,
680
694
weight_simplify = 0.0020 ,
695
+ weight_optimize = 0.0 ,
681
696
crossover_probability = 0.066 ,
682
697
skip_mutation_failures = True ,
683
698
migration = True ,
@@ -748,6 +763,7 @@ def __init__(
748
763
self .parsimony = parsimony
749
764
self .use_frequency = use_frequency
750
765
self .use_frequency_in_tournament = use_frequency_in_tournament
766
+ self .adaptive_parsimony_scaling = adaptive_parsimony_scaling
751
767
self .alpha = alpha
752
768
self .annealing = annealing
753
769
# - Evolutionary search parameters
@@ -760,6 +776,7 @@ def __init__(
760
776
self .weight_mutate_operator = weight_mutate_operator
761
777
self .weight_randomize = weight_randomize
762
778
self .weight_simplify = weight_simplify
779
+ self .weight_optimize = weight_optimize
763
780
self .crossover_probability = crossover_probability
764
781
self .skip_mutation_failures = skip_mutation_failures
765
782
# -- Migration parameters
@@ -1534,6 +1551,7 @@ def _run(self, X, y, mutated_params, weights, seed):
1534
1551
simplify = self .weight_simplify ,
1535
1552
randomize = self .weight_randomize ,
1536
1553
do_nothing = self .weight_do_nothing ,
1554
+ optimize = self .weight_optimize ,
1537
1555
)
1538
1556
1539
1557
# Call to Julia backend.
@@ -1569,6 +1587,7 @@ def _run(self, X, y, mutated_params, weights, seed):
1569
1587
warmup_maxsize_by = self .warmup_maxsize_by ,
1570
1588
use_frequency = self .use_frequency ,
1571
1589
use_frequency_in_tournament = self .use_frequency_in_tournament ,
1590
+ adaptive_parsimony_scaling = self .adaptive_parsimony_scaling ,
1572
1591
npop = self .population_size ,
1573
1592
ncycles_per_iteration = self .ncyclesperiteration ,
1574
1593
fraction_replaced = self .fraction_replaced ,
0 commit comments