Skip to content

Commit 465f35e

Browse files
authored
Merge pull request #208 from MilesCranmer/update-backend
Speed up evaluation with `turbo` parameter
2 parents a934083 + 496cedf commit 465f35e

File tree

3 files changed

+56
-38
lines changed

3 files changed

+56
-38
lines changed

pysr/sr.py

+50-35
Original file line numberDiff line numberDiff line change
@@ -476,6 +476,11 @@ class PySRRegressor(MultiOutputMixin, RegressorMixin, BaseEstimator):
476476
algorithm than regularized evolution, but does cycles 15%
477477
faster. May be algorithmically less efficient.
478478
Default is `False`.
479+
turbo: bool
480+
(Experimental) Whether to use LoopVectorization.jl to speed up the
481+
search evaluation. Certain operators may not be supported.
482+
Does not support 16-bit precision floats.
483+
Default is `False`.
479484
precision : int
480485
What precision to use for the data. By default this is `32`
481486
(float32), but you can select `64` or `16` as well, giving
@@ -692,6 +697,7 @@ def __init__(
692697
batching=False,
693698
batch_size=50,
694699
fast_cycle=False,
700+
turbo=False,
695701
precision=32,
696702
random_state=None,
697703
deterministic=False,
@@ -779,6 +785,7 @@ def __init__(
779785
self.batching = batching
780786
self.batch_size = batch_size
781787
self.fast_cycle = fast_cycle
788+
self.turbo = turbo
782789
self.precision = precision
783790
self.random_state = random_state
784791
self.deterministic = deterministic
@@ -1518,25 +1525,22 @@ def _run(self, X, y, mutated_params, weights, seed):
15181525
str(self.early_stop_condition) if self.early_stop_condition else None
15191526
)
15201527

1521-
mutation_weights = np.array(
1522-
[
1523-
self.weight_mutate_constant,
1524-
self.weight_mutate_operator,
1525-
self.weight_add_node,
1526-
self.weight_insert_node,
1527-
self.weight_delete_node,
1528-
self.weight_simplify,
1529-
self.weight_randomize,
1530-
self.weight_do_nothing,
1531-
],
1532-
dtype=float,
1528+
mutation_weights = SymbolicRegression.MutationWeights(
1529+
mutate_constant=self.weight_mutate_constant,
1530+
mutate_operator=self.weight_mutate_operator,
1531+
add_node=self.weight_add_node,
1532+
insert_node=self.weight_insert_node,
1533+
delete_node=self.weight_delete_node,
1534+
simplify=self.weight_simplify,
1535+
randomize=self.weight_randomize,
1536+
do_nothing=self.weight_do_nothing,
15331537
)
15341538

15351539
# Call to Julia backend.
15361540
# See https://github.com/MilesCranmer/SymbolicRegression.jl/blob/master/src/OptionsStruct.jl
15371541
options = SymbolicRegression.Options(
1538-
binary_operators=Main.eval(str(tuple(binary_operators)).replace("'", "")),
1539-
unary_operators=Main.eval(str(tuple(unary_operators)).replace("'", "")),
1542+
binary_operators=Main.eval(str(binary_operators).replace("'", "")),
1543+
unary_operators=Main.eval(str(unary_operators).replace("'", "")),
15401544
bin_constraints=bin_constraints,
15411545
una_constraints=una_constraints,
15421546
complexity_of_operators=complexity_of_operators,
@@ -1545,45 +1549,47 @@ def _run(self, X, y, mutated_params, weights, seed):
15451549
nested_constraints=nested_constraints,
15461550
loss=custom_loss,
15471551
maxsize=int(self.maxsize),
1548-
hofFile=_escape_filename(self.equation_file_),
1552+
output_file=_escape_filename(self.equation_file_),
15491553
npopulations=int(self.populations),
15501554
batching=self.batching,
1551-
batchSize=int(min([batch_size, len(X)]) if self.batching else len(X)),
1552-
mutationWeights=mutation_weights,
1553-
probPickFirst=self.tournament_selection_p,
1554-
ns=self.tournament_selection_n,
1555+
batch_size=int(min([batch_size, len(X)]) if self.batching else len(X)),
1556+
mutation_weights=mutation_weights,
1557+
tournament_selection_p=self.tournament_selection_p,
1558+
tournament_selection_n=self.tournament_selection_n,
15551559
# These have the same name:
15561560
parsimony=self.parsimony,
15571561
alpha=self.alpha,
15581562
maxdepth=maxdepth,
15591563
fast_cycle=self.fast_cycle,
1564+
turbo=self.turbo,
15601565
migration=self.migration,
1561-
hofMigration=self.hof_migration,
1562-
fractionReplacedHof=self.fraction_replaced_hof,
1563-
shouldOptimizeConstants=self.should_optimize_constants,
1564-
warmupMaxsizeBy=self.warmup_maxsize_by,
1565-
useFrequency=self.use_frequency,
1566-
useFrequencyInTournament=self.use_frequency_in_tournament,
1566+
hof_migration=self.hof_migration,
1567+
fraction_replaced_hof=self.fraction_replaced_hof,
1568+
should_optimize_constants=self.should_optimize_constants,
1569+
warmup_maxsize_by=self.warmup_maxsize_by,
1570+
use_frequency=self.use_frequency,
1571+
use_frequency_in_tournament=self.use_frequency_in_tournament,
15671572
npop=self.population_size,
1568-
ncyclesperiteration=self.ncyclesperiteration,
1569-
fractionReplaced=self.fraction_replaced,
1573+
ncycles_per_iteration=self.ncyclesperiteration,
1574+
fraction_replaced=self.fraction_replaced,
15701575
topn=self.topn,
15711576
verbosity=self.verbosity,
15721577
optimizer_algorithm=self.optimizer_algorithm,
15731578
optimizer_nrestarts=self.optimizer_nrestarts,
1574-
optimize_probability=self.optimize_probability,
1579+
optimizer_probability=self.optimize_probability,
15751580
optimizer_iterations=self.optimizer_iterations,
1576-
perturbationFactor=self.perturbation_factor,
1581+
perturbation_factor=self.perturbation_factor,
15771582
annealing=self.annealing,
1578-
stateReturn=True, # Required for state saving.
1583+
return_state=True, # Required for state saving.
15791584
progress=progress,
15801585
timeout_in_seconds=self.timeout_in_seconds,
1581-
crossoverProbability=self.crossover_probability,
1586+
crossover_probability=self.crossover_probability,
15821587
skip_mutation_failures=self.skip_mutation_failures,
15831588
max_evals=self.max_evals,
1584-
earlyStopCondition=early_stop_condition,
1589+
early_stop_condition=early_stop_condition,
15851590
seed=seed,
15861591
deterministic=self.deterministic,
1592+
define_helper_functions=False,
15871593
)
15881594

15891595
# Convert data to desired precision
@@ -1603,7 +1609,16 @@ def _run(self, X, y, mutated_params, weights, seed):
16031609
else:
16041610
Main.weights = None
16051611

1606-
cprocs = 0 if multithreading else self.procs
1612+
if self.procs == 0 and not multithreading:
1613+
parallelism = "serial"
1614+
elif multithreading:
1615+
parallelism = "multithreading"
1616+
else:
1617+
parallelism = "multiprocessing"
1618+
1619+
cprocs = (
1620+
None if parallelism in ["serial", "multithreading"] else int(self.procs)
1621+
)
16071622

16081623
# Call to Julia backend.
16091624
# See https://github.com/MilesCranmer/SymbolicRegression.jl/blob/master/src/SymbolicRegression.jl
@@ -1614,8 +1629,8 @@ def _run(self, X, y, mutated_params, weights, seed):
16141629
niterations=int(self.niterations),
16151630
varMap=self.feature_names_in_.tolist(),
16161631
options=options,
1617-
numprocs=int(cprocs),
1618-
multithreading=bool(multithreading),
1632+
numprocs=cprocs,
1633+
parallelism=parallelism,
16191634
saved_state=self.raw_julia_state_,
16201635
addprocs_function=cluster_manager,
16211636
)

pysr/version.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
1-
__version__ = "0.11.5"
2-
__symbolic_regression_jl_version__ = "0.12.6"
1+
__version__ = "0.11.6"
2+
__symbolic_regression_jl_version__ = "0.14.0"

test/test.py

+4-1
Original file line numberDiff line numberDiff line change
@@ -70,12 +70,13 @@ def test_linear_relation_weighted(self):
7070
print(model.equations_)
7171
self.assertLessEqual(model.get_best()["loss"], 1e-4)
7272

73-
def test_multiprocessing(self):
73+
def test_multiprocessing_turbo(self):
7474
y = self.X[:, 0]
7575
model = PySRRegressor(
7676
**self.default_test_kwargs,
7777
procs=2,
7878
multithreading=False,
79+
turbo=True,
7980
early_stop_condition="stop_if(loss, complexity) = loss < 1e-4 && complexity == 1",
8081
)
8182
model.fit(self.X, y)
@@ -108,6 +109,8 @@ def test_multioutput_custom_operator_quiet_custom_complexity(self):
108109
verbosity=0,
109110
**self.default_test_kwargs,
110111
procs=0,
112+
# Test custom operators with turbo:
113+
turbo=True,
111114
# Test custom operators with constraints:
112115
nested_constraints={"square_op": {"square_op": 3}},
113116
constraints={"square_op": 10},

0 commit comments

Comments
 (0)