Skip to content

Commit

Permalink
Unit test for the MOEA
Browse files Browse the repository at this point in the history
  • Loading branch information
chen0040 committed Jun 24, 2017
1 parent 7079736 commit 3b4708c
Show file tree
Hide file tree
Showing 8 changed files with 507 additions and 23 deletions.
138 changes: 137 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
@@ -1,2 +1,138 @@
# spark-opt-moea
Spark Multi-Objective Evolutionary Computation Framework

Spark implementation of Multi-Objective Evolutionary Computation Framework for Distributed Computing Numerical Optimization

# Features

The distributed optimization is performed so that computationally intensive optimization cost evaluation can be distributed in a computing cluster via Spark.

The following Multi-Objective EA are supported:

* NSGA-II
* GDE-3


# Install

Add the follow dependency to your POM file:

```xml
<dependency>
<groupId>com.github.chen0040</groupId>
<artifactId>java-moea</artifactId>
<version>1.0.5</version>
</dependency>
```

# Usage

### NSGA-II for solving NDND 2-Objective Problem

The following sample code shows how to use NSGA-II to solve the NDND 2-objective optimization problem:

```java
SparkNSGAII algorithm = new SparkNSGAII();
algorithm.setCostFunction((CostFunction) (x, objective_index, lowerBounds, upperBounds) -> {
double f1 = 1 - Math.exp((-4) * x.get(0)) * Math.pow(Math.sin(5 * Math.PI * x.get(0)), 4);
if (objective_index == 0)
{
// objective 0
return f1;
}
else
{
// objective 1
double f2, g, h;
if (x.get(1) > 0 && x.get(1) < 0.4)
g = 4 - 3 * Math.exp(-2500 * (x.get(1) - 0.2) * (x.get(1) - 0.2));
else
g = 4 - 3 * Math.exp(-25 * (x.get(1) - 0.7) * (x.get(1) - 0.7));
double a = 4;
if (f1 < g)
h = 1 - Math.pow(f1 / g, a);
else
h = 0;
f2 = g * h;
return f2;
}
});
algorithm.setDimension(2);
algorithm.setObjectiveCount(2);
algorithm.setLowerBounds(Arrays.asList(0.0, 0.0));
algorithm.setUpperBounds(Arrays.asList(1.0, 1.0));

algorithm.setPopulationSize(1000);
algorithm.setMaxGenerations(100);
algorithm.setDisplayEvery(10);

JavaSparkContext context = SparkContextFactory.createSparkContext("testing-1");
NondominatedPopulation pareto_front = algorithm.solve(context);
```

'pareto_front' is a set of solutions that represents that best solutions found by the algorithm (i.e. the pareto front).

To access individual solution in the pareto front:

```java
for(int i=0; i < pareto_front.size(); ++i) {
Solution solution = pareto_front.get(i);
}

```

To visualize the pareto front:

```java
List<TupleTwo<Double, Double>> pareto_front_data = pareto_front.front2D();
ParetoFront chart = new ParetoFront(pareto_front_data, "Pareto Front");
chart.showIt(true);
```

### GDE-3 for solving NDND 2-Objective Problem

The following sample code shows how to use GDE-3 to solve the NDND 2-objective optimization problem:

```java
SparkGDE3 algorithm = new SparkGDE3();
algorithm.setCostFunction((CostFunction) (x, objective_index, lowerBounds, upperBounds) -> {
double f1 = 1 - Math.exp((-4) * x.get(0)) * Math.pow(Math.sin(5 * Math.PI * x.get(0)), 4);
if (objective_index == 0)
{
// objective 0
return f1;
}
else
{
// objective 1
double f2, g, h;
if (x.get(1) > 0 && x.get(1) < 0.4)
g = 4 - 3 * Math.exp(-2500 * (x.get(1) - 0.2) * (x.get(1) - 0.2));
else
g = 4 - 3 * Math.exp(-25 * (x.get(1) - 0.7) * (x.get(1) - 0.7));
double a = 4;
if (f1 < g)
h = 1 - Math.pow(f1 / g, a);
else
h = 0;
f2 = g * h;
return f2;
}
});
algorithm.setDimension(2);
algorithm.setObjectiveCount(2);
algorithm.setLowerBounds(Arrays.asList(0.0, 0.0));
algorithm.setUpperBounds(Arrays.asList(1.0, 1.0));

algorithm.setPopulationSize(100);
algorithm.setMaxGenerations(50);
algorithm.setDisplayEvery(10);

JavaSparkContext context = SparkContextFactory.createSparkContext("testing-1");
NondominatedPopulation pareto_front = algorithm.solve(context);

List<TupleTwo<Double, Double>> pareto_front_data = pareto_front.front2D();

ParetoFront chart = new ParetoFront(pareto_front_data, "Pareto Front for NDND");
chart.showIt(true);
```

6 changes: 3 additions & 3 deletions pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,8 @@
</issueManagement>


<name>Recommender Algorithms for Spark</name>
<description>Recommender algorithms implemented in Java and for Spark</description>
<name>Multi-Objective Evolutionary Computation for Spark Distributed Optimization</name>
<description>Spark implementation of Multi-Objective Evolutionary Computation Framework for Distributed Computing Numerical Optimization</description>
<url>https://github.com/chen0040/spark-opt-moea</url>

<distributionManagement>
Expand Down Expand Up @@ -539,7 +539,7 @@
<dependency>
<groupId>com.github.chen0040</groupId>
<artifactId>java-moea</artifactId>
<version>1.0.4</version>
<version>1.0.5</version>
</dependency>
</dependencies>
</project>
8 changes: 2 additions & 6 deletions src/main/java/com/github/chen0040/spark/moea/SparkGDE3.java
Original file line number Diff line number Diff line change
Expand Up @@ -19,13 +19,9 @@
public class SparkGDE3 extends SparkNSGAII {
private static final long serialVersionUID = -6713947168965879195L;

public SparkGDE3(JavaSparkContext context) {
super(context);
}


@Override
public void evolve()
public void evolve(JavaSparkContext context)
{
int index = 0;
int populationSize = getPopulationSize();
Expand Down Expand Up @@ -56,7 +52,7 @@ public void evolve()
}
}

evaluate(children);
evaluate(children, context);

merge2(children);

Expand Down
71 changes: 58 additions & 13 deletions src/main/java/com/github/chen0040/spark/moea/SparkNSGAII.java
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,22 @@

import com.github.chen0040.data.utils.TupleTwo;
import com.github.chen0040.moea.components.*;
import com.github.chen0040.moea.enums.CrossoverType;
import com.github.chen0040.moea.enums.MutationType;
import com.github.chen0040.moea.enums.ReplacementType;
import com.github.chen0040.moea.utils.CostFunction;
import com.github.chen0040.moea.utils.InvertedCompareUtils;
import com.github.chen0040.moea.utils.TournamentSelection;
import com.github.chen0040.moea.utils.TournamentSelectionResult;
import lombok.AccessLevel;
import lombok.Getter;
import lombok.Setter;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.broadcast.Broadcast;

import java.util.ArrayList;
import java.util.List;


/**
Expand All @@ -32,18 +40,19 @@ public class SparkNSGAII extends Mediator {
@Setter(AccessLevel.NONE)
protected NondominatedSortingPopulation population = new NondominatedSortingPopulation();

@Setter(AccessLevel.NONE)
private JavaSparkContext context;

public SparkNSGAII(JavaSparkContext context){
this.context = context;
private int partitionCount = 50;

private Broadcast<Mediator> mediatorBroadcast;

public SparkNSGAII(){
}

public NondominatedPopulation solve(){
initialize();
public NondominatedPopulation solve(JavaSparkContext context){
initialize(context);
int maxGenerations = this.getMaxGenerations();
for(int generation = 0; generation < maxGenerations; ++generation) {
evolve();
evolve(context);
if(displayEvery > 0 && generation % displayEvery == 0){
System.out.println("Generation #" + generation + "\tArchive size: " + archive.size());
}
Expand All @@ -52,18 +61,40 @@ public NondominatedPopulation solve(){
return archive;
}

public void initialize(){
public void initialize(JavaSparkContext context){
archive.setMediator(this);
archive.clear();

population.setMediator(this);
population.initialize();
evaluate(population);
evaluate(population, context);
population.sort();
currentGeneration = 0;


}

public void evolve()
public Mediator mediator(){
Mediator mediator = new Mediator();
mediator.setObjectiveCount(getObjectiveCount());
mediator.setDimension(getDimension());
mediator.setMaxArchive(getMaxArchive());
mediator.setMaxGenerations(getMaxGenerations());
mediator.setLowerBounds(getLowerBounds());
mediator.setUpperBounds(getUpperBounds());
mediator.setRandomGenerator(getRandomGenerator());
mediator.setMutationRate(getMutationRate());
mediator.setCrossoverType(getCrossoverType());
mediator.setCrossoverRate(getCrossoverRate());
mediator.setCostFunction(getCostFunction());
mediator.setPopulationSize(getPopulationSize());
mediator.setMutationType(getMutationType());
mediator.setReplacementType(getReplacementType());
mediator.setInterpolation4DE(getInterpolation4DE());
return mediator;
}

public void evolve(JavaSparkContext context)
{
Population offspring = new Population();
offspring.setMediator(this);
Expand Down Expand Up @@ -97,7 +128,7 @@ public void evolve()
offspring.add(children._2());
}

evaluate(offspring);
evaluate(offspring, context);

ReplacementType replacementType = this.getReplacementType();
if(replacementType == ReplacementType.Generational) {
Expand All @@ -109,12 +140,26 @@ public void evolve()
currentGeneration++;
}

protected void evaluate(Population population) {
protected void evaluate(Population population, JavaSparkContext context) {

if(mediatorBroadcast == null) {
mediatorBroadcast = context.broadcast(mediator());
}

JavaRDD<Solution> solutionJavaRDD = context.parallelize(population.getSolutions());
solutionJavaRDD.coalesce(partitionCount);

List<Solution> solutions = solutionJavaRDD.map(s -> {
s.evaluate(mediatorBroadcast.getValue());
return s;
}).collect();

population.getSolutions().clear();
population.getSolutions().addAll(solutions);

for (int i = 0; i < population.size(); ++i)
{
Solution s = population.getSolutions().get(i);
s.evaluate(this);

//System.out.println("cost1: " + s.getCost(0) + "\tcost2:" + s.getCost(1));

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
package com.github.chen0040.spark.moea;


import com.github.chen0040.data.utils.TupleTwo;
import com.github.chen0040.moea.components.NondominatedPopulation;
import com.github.chen0040.moea.utils.CostFunction;
import com.github.chen0040.plt.ParetoFront;
import com.github.chen0040.sparkml.commons.SparkContextFactory;
import org.apache.spark.api.java.JavaSparkContext;

import java.util.Arrays;
import java.util.List;


/**
* Created by xschen on 18/6/2017.
*/
public class SparkGDE3GuiTest4NDND {
public static void main(String[] args) {
JavaSparkContext context = SparkContextFactory.createSparkContext("testing-1");
SparkGDE3 algorithm = new SparkGDE3();
algorithm.setCostFunction((CostFunction) (x, objective_index, lowerBounds, upperBounds) -> {
double f1 = 1 - Math.exp((-4) * x.get(0)) * Math.pow(Math.sin(5 * Math.PI * x.get(0)), 4);
if (objective_index == 0)
{
// objective 0
return f1;
}
else
{
// objective 1
double f2, g, h;
if (x.get(1) > 0 && x.get(1) < 0.4)
g = 4 - 3 * Math.exp(-2500 * (x.get(1) - 0.2) * (x.get(1) - 0.2));
else
g = 4 - 3 * Math.exp(-25 * (x.get(1) - 0.7) * (x.get(1) - 0.7));
double a = 4;
if (f1 < g)
h = 1 - Math.pow(f1 / g, a);
else
h = 0;
f2 = g * h;
return f2;
}
});
algorithm.setDimension(2);
algorithm.setObjectiveCount(2);
algorithm.setLowerBounds(Arrays.asList(0.0, 0.0));
algorithm.setUpperBounds(Arrays.asList(1.0, 1.0));

algorithm.setPopulationSize(100);
algorithm.setMaxGenerations(50);
algorithm.setDisplayEvery(10);

NondominatedPopulation pareto_front = algorithm.solve(context);

List<TupleTwo<Double, Double>> pareto_front_data = pareto_front.front2D();

ParetoFront chart = new ParetoFront(pareto_front_data, "Pareto Front for NDND");
chart.showIt(true);
}

}
Loading

0 comments on commit 3b4708c

Please sign in to comment.