repo stringlengths 1 191 ⌀ | file stringlengths 23 351 | code stringlengths 0 5.32M | file_length int64 0 5.32M | avg_line_length float64 0 2.9k | max_line_length int64 0 288k | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
MicroRTS | MicroRTS-master/src/ai/evaluation/SimpleSqrtEvaluationFunction.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package ai.evaluation;
import rts.GameState;
import rts.PhysicalGameState;
import rts.units.*;
/**
*
* @author santi
*/
public class SimpleSqrtEvaluationFunction extends EvaluationFunction {
public static float RESOURCE = 20;
public static float RESOURCE_IN_WORKER = 10;
public static float UNIT_BONUS_MULTIPLIER = 40.0f;
public float evaluate(int maxplayer, int minplayer, GameState gs) {
return base_score(maxplayer,gs) - base_score(minplayer,gs);
}
public float base_score(int player, GameState gs) {
PhysicalGameState pgs = gs.getPhysicalGameState();
float score = gs.getPlayer(player).getResources()*RESOURCE;
for(Unit u:pgs.getUnits()) {
if (u.getPlayer()==player) {
score += u.getResources() * RESOURCE_IN_WORKER;
score += UNIT_BONUS_MULTIPLIER * (u.getCost()*Math.sqrt( u.getHitPoints() / u.getMaxHitPoints() ));
}
}
return score;
}
public float upperBound(GameState gs) {
PhysicalGameState pgs = gs.getPhysicalGameState();
int free_resources = 0;
int player_resources[] = {gs.getPlayer(0).getResources(),gs.getPlayer(1).getResources()};
for(Unit u:pgs.getUnits()) {
if (u.getPlayer()==-1) free_resources+=u.getResources();
if (u.getPlayer()==0) {
player_resources[0] += u.getResources();
player_resources[0] += u.getCost();
}
if (u.getPlayer()==1) {
player_resources[1] += u.getResources();
player_resources[1] += u.getCost();
}
}
// System.out.println(free_resources + " + [" + player_resources[0] + " , " + player_resources[1] + "]");
// if (free_resources + player_resources[0] + player_resources[1]>62) {
// System.out.println(gs);
// }
return (free_resources + Math.max(player_resources[0],player_resources[1]))*UNIT_BONUS_MULTIPLIER;
}
}
| 2,146 | 35.389831 | 115 | java |
MicroRTS | MicroRTS-master/src/ai/evaluation/SimpleSqrtEvaluationFunction2.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package ai.evaluation;
import rts.GameState;
import rts.PhysicalGameState;
import rts.units.*;
/**
*
* @author santi
*
* This function is similar to SimpleSqrtEvaluationFunction, except that it detects when a player has won and returns a special, larger value.
*/
public class SimpleSqrtEvaluationFunction2 extends EvaluationFunction {
public static float RESOURCE = 20;
public static float RESOURCE_IN_WORKER = 10;
public static float UNIT_BONUS_MULTIPLIER = 40.0f;
public float evaluate(int maxplayer, int minplayer, GameState gs) {
float s1 = base_score(maxplayer,gs);
float s2 = base_score(minplayer,gs);
if (s1==0 && s2!=0) return -VICTORY;
if (s1!=0 && s2==0) return VICTORY;
return s1 - s2;
}
public float base_score(int player, GameState gs) {
PhysicalGameState pgs = gs.getPhysicalGameState();
float score = gs.getPlayer(player).getResources()*RESOURCE;
boolean anyunit = false;
for(Unit u:pgs.getUnits()) {
if (u.getPlayer()==player) {
anyunit = true;
score += u.getResources() * RESOURCE_IN_WORKER;
score += UNIT_BONUS_MULTIPLIER * (u.getCost()*Math.sqrt( u.getHitPoints()) / u.getMaxHitPoints() );
}
}
if (!anyunit) return 0;
return score;
}
public float upperBound(GameState gs) {
PhysicalGameState pgs = gs.getPhysicalGameState();
int free_resources = 0;
int player_resources[] = {gs.getPlayer(0).getResources(),gs.getPlayer(1).getResources()};
for(Unit u:pgs.getUnits()) {
if (u.getPlayer()==-1) free_resources+=u.getResources();
if (u.getPlayer()==0) {
player_resources[0] += u.getResources();
player_resources[0] += u.getCost();
}
if (u.getPlayer()==1) {
player_resources[1] += u.getResources();
player_resources[1] += u.getCost();
}
}
return (free_resources + Math.max(player_resources[0],player_resources[1]))*UNIT_BONUS_MULTIPLIER;
}
}
| 2,285 | 34.71875 | 142 | java |
MicroRTS | MicroRTS-master/src/ai/evaluation/SimpleSqrtEvaluationFunction3.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package ai.evaluation;
import rts.GameState;
import rts.PhysicalGameState;
import rts.units.*;
/**
*
* @author santi
*
* This function uses the same base evaluation as SimpleSqrtEvaluationFunction and SimpleSqrtEvaluationFunction2, but returns the (proportion*2)-1 of the total score on the board that belongs to one player.
* The advantage of this function is that evaluation is bounded between -1 and 1.
*/
public class SimpleSqrtEvaluationFunction3 extends EvaluationFunction {
public static float RESOURCE = 20;
public static float RESOURCE_IN_WORKER = 10;
public static float UNIT_BONUS_MULTIPLIER = 40.0f;
public float evaluate(int maxplayer, int minplayer, GameState gs) {
float s1 = base_score(maxplayer,gs);
float s2 = base_score(minplayer,gs);
if (s1 + s2 == 0) return 0.5f;
return (2*s1 / (s1 + s2))-1;
}
public float base_score(int player, GameState gs) {
PhysicalGameState pgs = gs.getPhysicalGameState();
float score = gs.getPlayer(player).getResources()*RESOURCE;
boolean anyunit = false;
for(Unit u:pgs.getUnits()) {
if (u.getPlayer()==player) {
anyunit = true;
score += u.getResources() * RESOURCE_IN_WORKER;
score += UNIT_BONUS_MULTIPLIER * u.getCost()*Math.sqrt( u.getHitPoints()/u.getMaxHitPoints() );
}
}
if (!anyunit) return 0;
return score;
}
public float upperBound(GameState gs) {
return 1.0f;
}
}
| 1,665 | 32.32 | 206 | java |
MicroRTS | MicroRTS-master/src/ai/jni/JNIAI.java | package ai.jni;
import java.util.List;
import ai.core.AI;
import ai.core.AIWithComputationBudget;
import ai.core.ParameterSpecification;
import ai.evaluation.SimpleEvaluationFunction;
import rts.GameState;
import rts.PlayerAction;
import rts.units.UnitTypeTable;
/**
* This AI does not actually have the ability to make its own decisions for actions.
* It is a helper class that we can pass actions that we have already decided upon
* externally (e.g., in Python code) in vector-format, and this class can convert
* them for us into the PlayerAction format.
*
* @author costa
*/
public class JNIAI extends AIWithComputationBudget implements JNIInterface {
UnitTypeTable utt = null;
double reward = 0.0;
double oldReward = 0.0;
boolean firstRewardCalculation = true;
SimpleEvaluationFunction ef = new SimpleEvaluationFunction();
int maxAttackRadius;
public JNIAI(int timeBudget, int iterationsBudget, UnitTypeTable a_utt) {
super(timeBudget, iterationsBudget);
utt = a_utt;
maxAttackRadius = utt.getMaxAttackRange() * 2 + 1;
}
public double computeReward(int maxplayer, int minplayer, GameState gs) throws Exception {
// do something
if (firstRewardCalculation) {
oldReward = ef.evaluate(maxplayer, minplayer, gs);
reward = 0;
firstRewardCalculation = false;
} else {
double newReward = ef.evaluate(maxplayer, minplayer, gs);
reward = newReward - oldReward;
oldReward = newReward;
}
return reward;
}
public PlayerAction getAction(int player, GameState gs, int[][] action) throws Exception {
PlayerAction pa = PlayerAction.fromVectorAction(action, gs, utt, player, maxAttackRadius);
pa.fillWithNones(gs, player, 1);
return pa;
}
public int[][][] getObservation(int player, GameState gs) throws Exception {
return gs.getVectorObservation(player);
}
@Override
public void reset() {
// TODO Auto-generated method stub
}
@Override
public AI clone() {
// TODO Auto-generated method stub
return null;
}
@Override
public List<ParameterSpecification> getParameters() {
// TODO Auto-generated method stub
return null;
}
@Override
public PlayerAction getAction(int player, GameState gs) throws Exception {
// TODO Auto-generated method stub
return null;
}
@Override
public String computeInfo(int player, GameState gs) throws Exception {
// TODO Auto-generated method stub
return null;
}
}
| 2,657 | 28.865169 | 98 | java |
MicroRTS | MicroRTS-master/src/ai/jni/JNIInterface.java | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package ai.jni;
import rts.GameState;
import rts.PlayerAction;
/**
*
* @author costa
*/
public interface JNIInterface {
public PlayerAction getAction(int player, GameState gs, int[][] action) throws Exception;
public int[][][] getObservation(int player, GameState gs) throws Exception;
public void reset();
public double computeReward(int i, int j, GameState gs) throws Exception;
public String computeInfo(int player, GameState gs) throws Exception;
} | 659 | 30.428571 | 90 | java |
MicroRTS | MicroRTS-master/src/ai/jni/Response.java | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package ai.jni;
/**
*
* @author costa
*/
public class Response {
public int[][][] observation;
public double[] reward;
public boolean[] done;
public String info;
public Response(int[][][] observation, double reward[], boolean done[], String info) {
this.observation = observation;
this.reward = reward;
this.done = done;
this.info = info;
}
public void set(int[][][] observation, double reward[], boolean done[], String info) {
this.observation = observation;
this.reward = reward;
this.done = done;
this.info = info;
}
} | 808 | 25.096774 | 90 | java |
MicroRTS | MicroRTS-master/src/ai/jni/Responses.java | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package ai.jni;
/**
*
* @author costa
*/
public class Responses {
public int[][][][] observation;
public double[][] reward;
public boolean[][] done;
// public String info;
public Responses(int[][][][] observation, double reward[][], boolean done[][]) {
this.observation = observation;
this.reward = reward;
this.done = done;
// this.info = info;
}
public void set(int[][][][] observation, double reward[][], boolean done[][]) {
this.observation = observation;
this.reward = reward;
this.done = done;
// this.info = info;
}
} | 811 | 25.193548 | 84 | java |
MicroRTS | MicroRTS-master/src/ai/machinelearning/bayes/ActionInterdependenceModel.java | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package ai.machinelearning.bayes;
import ai.machinelearning.bayes.featuregeneration.FeatureGenerator;
import ai.machinelearning.bayes.featuregeneration.FeatureGeneratorComplex;
import ai.machinelearning.bayes.featuregeneration.FeatureGeneratorEmpty;
import ai.machinelearning.bayes.featuregeneration.FeatureGeneratorSimple;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import org.jdom.Element;
import rts.UnitAction;
import rts.units.Unit;
import rts.units.UnitTypeTable;
import util.XMLWriter;
/**
*
* @author santi
*
* This class actually implements a WRONG NaiveBayes equation, so, it should be removed
*/
public class ActionInterdependenceModel extends BayesianModel {
int estimationMethod = ESTIMATION_COUNTS;
double calibrationFactor = 0.0; // how much to crrect probabilities after estimation
double []prior_distribution;
DiscreteCPD []distributions;
boolean []selectedFeatures;
int Ysize = 0;
int YtypeSize = 0;
int Xsizes[];
int []action_allowed_counts_prior; // number of times actions were allowed
int [][]selected_allowed_action_prior; // [i][j]: number of times i was selected when j was also allowed
List<Integer> allPossibleActionsTypes;
int []actiontypes_allowed_counts_prior; // number of times action types were allowed
int [][]selected_allowed_actiontype_prior; // [i][j]: number of times i was selected when j was also allowed
boolean consider_individual_actions = false;
boolean consider_action_types = true;
public ActionInterdependenceModel(int a_Xsizes[], int a_Ysize, int estimation, double a_correctionFactor, UnitTypeTable utt, FeatureGenerator fg, String a_name) {
super(utt, fg, a_name);
Ysize = a_Ysize;
Xsizes = a_Xsizes;
estimationMethod = estimation;
calibrationFactor = a_correctionFactor;
// calculate the action types:
allPossibleActionsTypes = new ArrayList<>();
for(UnitAction ua:allPossibleActions) {
allPossibleActionsTypes.add(ua.getType());
}
YtypeSize = UnitAction.NUMBER_OF_ACTION_TYPES;
clearTraining();
}
public Object clone() {
ActionInterdependenceModel c = new ActionInterdependenceModel(Xsizes, Ysize, estimationMethod, calibrationFactor, utt, featureGenerator, name);
return c;
}
public void clearTraining() {
action_allowed_counts_prior = null;
selected_allowed_action_prior = null;
actiontypes_allowed_counts_prior = null;
selected_allowed_actiontype_prior = null;
if (Xsizes!=null) {
int nfeatures = Xsizes.length;
distributions = new DiscreteCPD[nfeatures];
for(int i = 0;i<nfeatures;i++) {
distributions[i] = new DiscreteCPD(Ysize, Xsizes[i]);
}
} else {
distributions = null;
}
}
public void train(List<int []> x_l, List<Integer> y_l, List<TrainingInstance> i_l) throws Exception {
int nfeatures = distributions.length;
prior_distribution = new double[Ysize];
action_allowed_counts_prior = new int[Ysize];
selected_allowed_action_prior = new int[Ysize][Ysize];
actiontypes_allowed_counts_prior = new int[YtypeSize];
selected_allowed_actiontype_prior = new int[YtypeSize][YtypeSize];
for(int i = 0;i<x_l.size();i++) {
int []x = x_l.get(i);
int y = y_l.get(i);
prior_distribution[y]++;
for(int j = 0;j<nfeatures;j++) {
distributions[j].addObservation(y, x[j]);
}
List<Integer> l = i_l.get(i).getPossibleActions(allPossibleActions);
for(int idx1:l) {
action_allowed_counts_prior[idx1]++;
if (idx1==y) {
for(int idx2:l) {
selected_allowed_action_prior[idx1][idx2]++;
}
}
}
List<Integer> ltypes = new ArrayList<>();
for(Integer ua:l) {
int ua_type = allPossibleActionsTypes.get(ua);
if (!ltypes.contains(ua_type)) ltypes.add(ua_type);
}
for(int idx1:ltypes) {
actiontypes_allowed_counts_prior[idx1]++;
if (idx1==allPossibleActionsTypes.get(y)) {
for(int idx2:ltypes) {
selected_allowed_actiontype_prior[idx1][idx2]++;
}
}
}
}
if (estimationMethod==ESTIMATION_COUNTS) {
for(int i = 0;i<Ysize;i++) prior_distribution[i]/=x_l.size();
} else {
for(int i = 0;i<Ysize;i++) prior_distribution[i] = (prior_distribution[i]+1)/(x_l.size()+Ysize);
}
}
public void calibrateProbabilities(List<int []> x_l, List<Integer> y_l, List<TrainingInstance> i_l) throws Exception {
double best_c = 0;
double best_ll = Double.NEGATIVE_INFINITY;
for(double c = 0.0;c<=1.05;c+=0.05) {
calibrationFactor = c;
double loglikelihood = 0;
for(int i = 0;i<x_l.size();i++) {
Unit u = i_l.get(i).u;
List<UnitAction> possibleUnitActions = u.getUnitActions(i_l.get(i).gs);
List<Integer> possibleUnitActionIndexes = new ArrayList<>();
for(UnitAction ua : possibleUnitActions) {
if (ua.getType()==UnitAction.TYPE_ATTACK_LOCATION) {
ua = new UnitAction(UnitAction.TYPE_ATTACK_LOCATION, ua.getLocationX() - u.getX(), ua.getLocationY() - u.getY());
}
int idx = allPossibleActions.indexOf(ua);
if (idx<0) throw new Exception("Unknown action: " + ua);
possibleUnitActionIndexes.add(idx);
}
if (possibleUnitActions.size()>1) {
double predicted_distribution[] = predictDistribution(x_l.get(i), i_l.get(i));
predicted_distribution = filterByPossibleActionIndexes(predicted_distribution, possibleUnitActionIndexes);
int actual_y = y_l.get(i);
if (!possibleUnitActionIndexes.contains(actual_y)) continue;
int predicted_y = -1;
Collections.shuffle(possibleUnitActions); // shuffle it, just in case there are ties, to prevent action ordering bias
for(int idx:possibleUnitActionIndexes) {
if (predicted_y==-1) {
predicted_y = idx;
} else {
if (predicted_distribution[idx]>predicted_distribution[predicted_y]) predicted_y = idx;
}
}
double ll = Math.log(predicted_distribution[actual_y]);
if (Double.isInfinite(ll)) {
System.out.println(Arrays.toString(predicted_distribution));
System.out.println(possibleUnitActionIndexes);
System.out.println(actual_y + " : " + allPossibleActions.get(actual_y));
System.exit(1);
}
loglikelihood += ll;
}
}
// System.out.println(" ll (cf = " + c + ") = " + loglikelihood/x_l.size());
if (loglikelihood>best_ll) {
best_c = c;
best_ll = loglikelihood;
} else {
// once this starts going down, it will always go down...
break;
}
}
System.out.println("best calibration factor = " + best_c);
calibrationFactor = best_c;
}
public void featureSelectionByGainRatio(List<int []> x_l, List<Integer> y_l, double fractionOfFeaturesToKeep) {
List<Integer> featureIndexes = new ArrayList<>();
List<Double> featureGR = new ArrayList<>();
int nfeatures = distributions.length;
selectedFeatures = new boolean[nfeatures];
for(int i = 0;i<nfeatures;i++) {
featureIndexes.add(i);
featureGR.add(FeatureSelection.featureGainRatio(x_l, y_l, i));
selectedFeatures[i] = false;
}
// sort features:
featureIndexes.sort(new Comparator<Integer>() {
public int compare(Integer o1, Integer o2) {
return Double.compare(featureGR.get(o2), featureGR.get(o1));
}
});
// System.out.println("FS:");
for(int i = 0;i<fractionOfFeaturesToKeep*nfeatures;i++) {
selectedFeatures[featureIndexes.get(i)] = true;
// System.out.println(" Selected " + featureIndexes.get(i) + " GR: " + featureGR.get(featureIndexes.get(i)));
}
}
public double[] predictDistribution(int []x, TrainingInstance ti) {
return predictDistribution(x, ti, calibrationFactor);
}
public double[] predictDistribution(int []x, TrainingInstance ti, double correction) {
List<Integer> l = ti.getPossibleActions(allPossibleActions);
double d[] = new double[Ysize];
double n_factors = 1; // this includes the prior
// start with P(y)
for(int i = 0;i<Ysize;i++) d[i] = 0;
for(int i:l) {
if (prior_distribution==null) {
d[i] = 1;
} else {
d[i] = prior_distribution[i];
}
}
// add P(x|y)
for(int i = 0;i<x.length;i++) {
if (selectedFeatures==null || selectedFeatures[i]) {
n_factors++;
if (estimationMethod == ESTIMATION_COUNTS) {
for(int j:l) {
double d2[] = distributions[i].distribution(j);
d[j] *= d2[x[i]];
}
} else {
for(int j:l) {
double d2[] = distributions[i].distributionLaplace(j, laplaceBeta);
double v = 1;
if (d2.length > x[i]) {
v = d2[x[i]];
} else {
v = 1.0/Ysize;
}
d[j] *= v;
}
}
}
}
// add P(legal(type(y_i))|type(y))
if (consider_action_types && selected_allowed_actiontype_prior!=null) {
List<Integer> ltypes = new ArrayList<>();
for(Integer ua:l) {
int ua_type = allPossibleActionsTypes.get(ua);
if (!ltypes.contains(ua_type)) ltypes.add(ua_type);
}
n_factors += ltypes.size()-1;
// n_factors += ltypes.size();
for(int i:l) {
int i_type = allPossibleActionsTypes.get(i);
for(int j:ltypes) {
if (j!=i_type) {
if (estimationMethod == ESTIMATION_COUNTS) {
double p = selected_allowed_actiontype_prior[i_type][j] / (double)actiontypes_allowed_counts_prior[i_type];
d[i]*=p;
} else {
double p = (selected_allowed_actiontype_prior[i_type][j]+1) / (double)(actiontypes_allowed_counts_prior[i_type]+2);
d[i]*=p;
}
}
}
}
}
// add P(legal(y_i)|y)
if (consider_individual_actions && selected_allowed_action_prior!=null) {
n_factors += l.size()-1;
for(int i:l) {
for(int j:l) {
if (j!=i) {
if (estimationMethod == ESTIMATION_COUNTS) {
double p = selected_allowed_action_prior[i][j] / (double)action_allowed_counts_prior[i];
d[i]*=p;
} else {
double p = (selected_allowed_action_prior[i][j]+1) / (double)(action_allowed_counts_prior[i]+2);
d[i]*=p;
}
}
}
}
}
double accum = 0;
for(int i = 0;i<Ysize;i++) {
d[i] = Math.pow(d[i], 1/(1*(1-correction)+ n_factors*correction));
accum += d[i];
}
if (accum <= 0) {
// if 0 accum, then just make uniform distribution:
for(int i = 0;i<Ysize;i++) d[i] = 1.0/Ysize;
} else {
for(int i = 0;i<Ysize;i++) d[i] /= accum;
}
return d;
}
public void save(XMLWriter w) throws Exception {
w.tagWithAttributes(getClass().getSimpleName(),
"estimationMethod=\"" +estimationMethod+ "\" " +
"Ysize=\"" +Ysize+ "\" " +
"calibrationFactor=\""+calibrationFactor+"\" " +
"nfeatures=\""+distributions.length+"\" " +
"featureGenerationClass=\""+featureGenerator.getClass().getSimpleName()+"\"");
w.tag("Xsizes");
for(int v:Xsizes) w.rawXML(v + " ");
w.rawXML("\n");
w.tag("/Xsizes");
w.tag("priorDistribution");
for(double v:prior_distribution) w.rawXML(v + " ");
w.rawXML("\n");
w.tag("/priorDistribution");
if (selectedFeatures!=null) {
w.tag("selectedFeatures");
for(boolean v:selectedFeatures) w.rawXML(v + " ");
w.rawXML("\n");
w.tag("/selectedFeatures");
}
w.tag("action_allowed_counts_prior");
for(int v:action_allowed_counts_prior) {
w.rawXML(v + " ");
}
w.rawXML("\n");
w.tag("/action_allowed_counts_prior");
w.tag("selected_action_pairs_prior");
for(int row[]:selected_allowed_action_prior) {
for(int v:row) {
w.rawXML(v + " ");
}
w.rawXML("\n");
}
w.tag("/selected_action_pairs_prior");
w.tag("actiontypes_allowed_counts_prior");
for(int v:actiontypes_allowed_counts_prior) {
w.rawXML(v + " ");
}
w.rawXML("\n");
w.tag("/actiontypes_allowed_counts_prior");
w.tag("selected_allowed_actiontype_prior");
for(int row[]:selected_allowed_actiontype_prior) {
for(int v:row) {
w.rawXML(v + " ");
}
w.rawXML("\n");
}
w.tag("/selected_allowed_actiontype_prior");
for (DiscreteCPD distribution : distributions) {
distribution.save(w);
}
w.tag("/" + getClass().getSimpleName());
w.flush();
}
public ActionInterdependenceModel(Element e, UnitTypeTable utt, String a_name) throws Exception {
super(utt, null, a_name);
load(e);
}
public void load(Element e) throws Exception {
if (!e.getName().equals(getClass().getSimpleName())) throw new Exception("Head tag "+e.getName()+" is not '"+getClass().getSimpleName()+"'!");
// calculate the action types:
allPossibleActionsTypes = new ArrayList<>();
for(UnitAction ua:allPossibleActions) {
allPossibleActionsTypes.add(ua.getType());
}
String fgclass = e.getAttributeValue("featureGenerationClass");
if (fgclass.contains("FeatureGeneratorEmpty")) {
featureGenerator = new FeatureGeneratorEmpty();
} else if (fgclass.contains("FeatureGeneratorSimple")) {
featureGenerator = new FeatureGeneratorSimple();
} else if (fgclass.contains("FeatureGeneratorComplex")) {
featureGenerator = new FeatureGeneratorComplex();
}
YtypeSize = UnitAction.NUMBER_OF_ACTION_TYPES;
estimationMethod = Integer.parseInt(e.getAttributeValue("estimationMethod"));
Ysize = Integer.parseInt(e.getAttributeValue("Ysize"));
calibrationFactor = Double.parseDouble(e.getAttributeValue("calibrationFactor"));
int nfeatures = Integer.parseInt(e.getAttributeValue("nfeatures"));
Element xs_xml = e.getChild("Xsizes");
{
String text = xs_xml.getTextTrim();
String []tokens = text.split(" ");
Xsizes = new int[nfeatures];
for(int i = 0;i<nfeatures;i++) Xsizes[i] = Integer.parseInt(tokens[i]);
}
Element pd_xml = e.getChild("priorDistribution");
{
String text = pd_xml.getTextTrim();
String []tokens = text.split(" ");
prior_distribution = new double[Ysize];
for(int i = 0;i<Ysize;i++) prior_distribution[i] = Double.parseDouble(tokens[i]);
}
Element sf_xml = e.getChild("selectedFeatures");
if (sf_xml!=null) {
String text = sf_xml.getTextTrim();
String []tokens = text.split(" ");
selectedFeatures = new boolean[nfeatures];
for(int i = 0;i<nfeatures;i++) selectedFeatures[i] = Boolean.parseBoolean(tokens[i]);
} else {
selectedFeatures = null;
}
{
Element action_allowed_counts_prior_xml = e.getChild("action_allowed_counts_prior");
String text = action_allowed_counts_prior_xml.getTextTrim();
String []tokens = text.split(" ");
action_allowed_counts_prior = new int[Ysize];
for(int i = 0;i<Ysize;i++) action_allowed_counts_prior[i] = Integer.parseInt(tokens[i]);
}
{
Element selected_action_pairs_prior_xml = e.getChild("selected_action_pairs_prior");
String text = selected_action_pairs_prior_xml.getTextTrim();
String tokens[] = text.split(" |\n");
selected_allowed_action_prior = new int[Ysize][Ysize];
for(int k = 0,i = 0;i<Ysize;i++) {
for(int j = 0;j<Ysize;j++,k++) {
while(tokens[k].equals("")) k++;
selected_allowed_action_prior[i][j] = Integer.parseInt(tokens[k]);
}
}
}
{
Element actiontypes_allowed_counts_prior_xml = e.getChild("actiontypes_allowed_counts_prior");
String text = actiontypes_allowed_counts_prior_xml.getTextTrim();
String []tokens = text.split(" ");
actiontypes_allowed_counts_prior = new int[YtypeSize];
for(int i = 0;i<YtypeSize;i++) actiontypes_allowed_counts_prior[i] = Integer.parseInt(tokens[i]);
}
{
Element selected_allowed_actiontype_prior_xml = e.getChild("selected_allowed_actiontype_prior");
String text = selected_allowed_actiontype_prior_xml.getTextTrim();
String tokens[] = text.split(" |\n");
selected_allowed_actiontype_prior = new int[YtypeSize][YtypeSize];
for(int k = 0,i = 0;i<YtypeSize;i++) {
for(int j = 0;j<YtypeSize;j++,k++) {
while(tokens[k].equals("")) k++;
selected_allowed_actiontype_prior[i][j] = Integer.parseInt(tokens[k]);
}
}
}
distributions = new DiscreteCPD[nfeatures];
List cpd_xml_l = e.getChildren("DiscreteCPD");
for(int i = 0;i<nfeatures;i++) {
Element cpd_xml = (Element)cpd_xml_l.get(i);
distributions[i] = new DiscreteCPD(cpd_xml);
}
}
public void featureSelectionByCrossValidation(List<int[]> x_l, List<Integer> y_l, List<TrainingInstance> i_l) throws Exception {
int nfeatures = distributions.length;
System.out.println("featureSelectionByCrossValidation " + x_l.size());
boolean bestSelection[] = new boolean[nfeatures];
for(int i = 0;i<nfeatures;i++) bestSelection[i] = false;
selectedFeatures = bestSelection;
double best_score = FeatureSelection.crossValidation(this, x_l, y_l, i_l, allPossibleActions, 10).m_a;
// double best_score = TestNaiveBayesAsInGame.crossValidation(this, x_l, y_l, i_l, allPossibleActions, 10, false, true).m_b;
System.out.println(" loglikelihood with " + Arrays.toString(selectedFeatures) + ": " + best_score);
boolean change;
do {
change = false;
boolean bestLastSelection[] = bestSelection;
for(int i = 0;i<nfeatures;i++) {
if (!bestSelection[i]) {
boolean currentSelection[] = new boolean[nfeatures];
System.arraycopy(bestSelection, 0, currentSelection, 0, nfeatures);
currentSelection[i] = true;
selectedFeatures = currentSelection;
double score = FeatureSelection.crossValidation(this, x_l, y_l, i_l, allPossibleActions, 10).m_a;
// double score = TestNaiveBayesAsInGame.crossValidation(this, x_l, y_l, i_l, allPossibleActions, 10, false, true).m_b;
System.out.println(" loglikelihood with " + Arrays.toString(selectedFeatures) + ": " + score);
if (score > best_score) {
bestLastSelection = currentSelection;
best_score = score;
change = true;
}
}
}
bestSelection = bestLastSelection;
}while(change);
selectedFeatures = bestSelection;
System.out.println("Selected features: " + Arrays.toString(selectedFeatures));
}
}
| 22,275 | 41.592734 | 166 | java |
MicroRTS | MicroRTS-master/src/ai/machinelearning/bayes/BayesianModel.java | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package ai.machinelearning.bayes;
import ai.machinelearning.bayes.featuregeneration.FeatureGenerator;
import java.util.ArrayList;
import java.util.List;
import ai.stochastic.UnitActionProbabilityDistribution;
import org.jdom.Element;
import rts.GameState;
import rts.UnitAction;
import rts.units.Unit;
import rts.units.UnitType;
import rts.units.UnitTypeTable;
import util.Sampler;
import util.XMLWriter;
/**
*
* @author santi
*/
public abstract class BayesianModel extends UnitActionProbabilityDistribution {
public static final int ESTIMATION_COUNTS = 1;
public static final int ESTIMATION_LAPLACE = 2;
public static final double laplaceBeta = 1.0;
protected List<UnitAction> allPossibleActions;
protected FeatureGenerator featureGenerator;
protected String name;
public BayesianModel(UnitTypeTable utt, FeatureGenerator fg, String a_name) {
super(utt);
allPossibleActions = generateAllPossibleUnitActions(utt);
featureGenerator = fg;
name = a_name;
}
@Override
public abstract Object clone();
public abstract void clearTraining();
public abstract void train(List<int []> x_l, List<Integer> y_l, List<TrainingInstance> i_l) throws Exception;
public void calibrateProbabilities(List<int []> x_l, List<Integer> y_l, List<TrainingInstance> i_l) throws Exception
{
}
public abstract void featureSelectionByCrossValidation(List<int[]> x_l, List<Integer> y_l, List<TrainingInstance> i_l) throws Exception;
public abstract void featureSelectionByGainRatio(List<int []> x_l, List<Integer> y_l, double fractionOfFeaturesToKeep);
public double[] predictDistribution(Unit u, GameState gs) throws Exception
{
TrainingInstance ti = new TrainingInstance(gs, u.getID(), null);
int []x = featureGenerator.generateFeaturesAsArray(ti);
return predictDistribution(x, ti);
}
public double[] predictDistribution(Unit u, GameState gs, List<UnitAction> actions) throws Exception
{
TrainingInstance ti = new TrainingInstance(gs, u.getID(), null);
int []x = featureGenerator.generateFeaturesAsArray(ti);
double []prediction = predictDistribution(x, ti);
return filterByPossibleActions(prediction, u, actions);
}
public abstract double[] predictDistribution(int []x, TrainingInstance ti);
public int predictMax(int []x, TrainingInstance ti) {
double d[] = predictDistribution(x, ti);
int argmax = 0;
for(int i = 1;i<d.length;i++) {
if (d[i] > d[argmax]) argmax = i;
}
return argmax;
}
public int predictSample(int []x, TrainingInstance ti) throws Exception {
double d[] = predictDistribution(x, ti);
return Sampler.weighted(d);
}
public double[] filterByPossibleActionIndexes(double[] predicted_distribution, List<Integer> possibleUnitActionIndexes) {
double accum = 0;
int n = predicted_distribution.length;
double d[] = new double[n];
for(int i = 0;i<n;i++) {
if (possibleUnitActionIndexes.contains(i)) accum+=predicted_distribution[i];
}
for(int i = 0;i<n;i++) {
if (possibleUnitActionIndexes.contains(i)) {
d[i] = predicted_distribution[i]/accum;
} else {
d[i] = 0;
}
}
return d;
}
public double[] filterByPossibleActions(double []d, Unit u, List<UnitAction> l) {
double []filtered = new double[l.size()];
double total = 0;
for(int i = 0;i<l.size();i++) {
UnitAction ua = l.get(i);
// translate the attack actions to relative coordinates:
if (ua.getType()==UnitAction.TYPE_ATTACK_LOCATION) {
ua = new UnitAction(UnitAction.TYPE_ATTACK_LOCATION, ua.getLocationX() - u.getX(), ua.getLocationY() - u.getY());
}
int idx = allPossibleActions.indexOf(ua);
filtered[i] = d[idx];
total += d[idx];
}
if (total>0) {
for(int j = 0;j<l.size();j++) filtered[j]/=total;
} else {
for(int j = 0;j<l.size();j++) filtered[j]=1.0/l.size();
}
return filtered;
}
public static List<UnitAction> generateAllPossibleUnitActions(UnitTypeTable utt) {
List<UnitAction> l = new ArrayList<>();
int maxAttackRange = 1;
int directions[] = {UnitAction.DIRECTION_UP, UnitAction.DIRECTION_RIGHT, UnitAction.DIRECTION_DOWN, UnitAction.DIRECTION_LEFT};
for(UnitType ut:utt.getUnitTypes()) {
if (ut.attackRange > maxAttackRange) maxAttackRange = ut.attackRange;
}
l.add(new UnitAction(UnitAction.TYPE_NONE, 10));
for(int d:directions) l.add(new UnitAction(UnitAction.TYPE_MOVE, d));
for(int d:directions) l.add(new UnitAction(UnitAction.TYPE_HARVEST, d));
for(int d:directions) l.add(new UnitAction(UnitAction.TYPE_RETURN, d));
for(int d:directions) {
for(UnitType ut:utt.getUnitTypes()) {
if (!ut.producedBy.isEmpty()) {
l.add(new UnitAction(UnitAction.TYPE_PRODUCE, d, ut));
}
}
}
for(int ox = -maxAttackRange;ox<=maxAttackRange;ox++) {
for(int oy = -maxAttackRange;oy<=maxAttackRange;oy++) {
int d = (ox*ox) + (oy*oy);
if (d>0 && d<=maxAttackRange*maxAttackRange) {
l.add(new UnitAction(UnitAction.TYPE_ATTACK_LOCATION, ox, oy));
}
}
}
return l;
}
public abstract void save(XMLWriter w) throws Exception;
public abstract void load(Element e) throws Exception;
public String toString() {
return name;
}
}
| 6,240 | 33.103825 | 140 | java |
MicroRTS | MicroRTS-master/src/ai/machinelearning/bayes/BayesianModelByUnitTypeWithDefaultModel.java | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package ai.machinelearning.bayes;
import ai.machinelearning.bayes.TrainingInstance;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import org.jdom.Element;
import rts.units.UnitType;
import rts.units.UnitTypeTable;
import util.XMLWriter;
/**
*
* @author santi
*/
public class BayesianModelByUnitTypeWithDefaultModel extends BayesianModel {
BayesianModel templateModel;
HashMap<UnitType,BayesianModel> unitModels = new HashMap<>();
BayesianModel defaultModel;
public BayesianModelByUnitTypeWithDefaultModel(UnitTypeTable utt, BayesianModel tm, String a_name) {
super(utt, tm.featureGenerator, a_name);
templateModel = tm;
}
public Object clone() {
return new BayesianModelByUnitTypeWithDefaultModel(utt, templateModel, name);
}
public void clearTraining() {
for(BayesianModel model:unitModels.values()) model.clearTraining();
defaultModel.clearTraining();
}
public void train(List<int []> x_l, List<Integer> y_l, List<TrainingInstance> i_l) throws Exception {
HashMap<UnitType, List<int []>> x_l_ut_l = new HashMap<>();
HashMap<UnitType, List<Integer>> y_l_ut_l = new HashMap<>();
HashMap<UnitType, List<TrainingInstance>> i_l_ut_l = new HashMap<>();
for(int i = 0;i<x_l.size();i++) {
UnitType ut = i_l.get(i).u.getType();
List<int []> x_l_ut = x_l_ut_l.get(ut);
List<Integer> y_l_ut = y_l_ut_l.get(ut);
List<TrainingInstance> i_l_ut = i_l_ut_l.get(ut);
if (x_l_ut==null) {
x_l_ut = new ArrayList<>();
x_l_ut_l.put(ut, x_l_ut);
y_l_ut = new ArrayList<>();
y_l_ut_l.put(ut, y_l_ut);
i_l_ut = new ArrayList<>();
i_l_ut_l.put(ut, i_l_ut);
}
x_l_ut.add(x_l.get(i));
y_l_ut.add(y_l.get(i));
i_l_ut.add(i_l.get(i));
}
for(UnitType ut:x_l_ut_l.keySet()) {
BayesianModel model_ut = unitModels.get(ut);
if (model_ut==null) {
model_ut = (BayesianModel) templateModel.clone();
unitModels.put(ut, model_ut);
}
model_ut.train(x_l_ut_l.get(ut), y_l_ut_l.get(ut), i_l_ut_l.get(ut));
}
if (defaultModel==null) defaultModel = (BayesianModel) templateModel.clone();
defaultModel.train(x_l, y_l, i_l);
}
public void calibrateProbabilities(List<int []> x_l, List<Integer> y_l, List<TrainingInstance> i_l) throws Exception {
HashMap<UnitType, List<int []>> x_l_ut_l = new HashMap<>();
HashMap<UnitType, List<Integer>> y_l_ut_l = new HashMap<>();
HashMap<UnitType, List<TrainingInstance>> i_l_ut_l = new HashMap<>();
for(int i = 0;i<x_l.size();i++) {
UnitType ut = i_l.get(i).u.getType();
List<int []> x_l_ut = x_l_ut_l.get(ut);
List<Integer> y_l_ut = y_l_ut_l.get(ut);
List<TrainingInstance> i_l_ut = i_l_ut_l.get(ut);
if (x_l_ut==null) {
x_l_ut = new ArrayList<>();
x_l_ut_l.put(ut, x_l_ut);
y_l_ut = new ArrayList<>();
y_l_ut_l.put(ut, y_l_ut);
i_l_ut = new ArrayList<>();
i_l_ut_l.put(ut, i_l_ut);
}
x_l_ut.add(x_l.get(i));
y_l_ut.add(y_l.get(i));
i_l_ut.add(i_l.get(i));
}
for(UnitType ut:x_l_ut_l.keySet()) {
BayesianModel model_ut = unitModels.get(ut);
if (model_ut==null) {
model_ut = (BayesianModel) templateModel.clone();
unitModels.put(ut, model_ut);
}
model_ut.calibrateProbabilities(x_l_ut_l.get(ut), y_l_ut_l.get(ut), i_l_ut_l.get(ut));
}
if (defaultModel==null) defaultModel = (BayesianModel) templateModel.clone();
defaultModel.calibrateProbabilities(x_l, y_l, i_l);
}
public void featureSelectionByGainRatio(List<int []> x_l, List<Integer> y_l, double fractionOfFeaturesToKeep) {
for(UnitType ut:unitModels.keySet()) {
BayesianModel model_ut = unitModels.get(ut);
if (model_ut==null) {
model_ut = (BayesianModel) templateModel.clone();
unitModels.put(ut, model_ut);
}
model_ut.featureSelectionByGainRatio(x_l, y_l, fractionOfFeaturesToKeep);
}
if (defaultModel==null) defaultModel = (BayesianModel) templateModel.clone();
defaultModel.featureSelectionByGainRatio(x_l, y_l, fractionOfFeaturesToKeep);
}
public void featureSelectionByCrossValidation(List<int []> x_l, List<Integer> y_l, List<TrainingInstance> i_l) throws Exception {
HashMap<UnitType, List<int []>> x_l_ut_l = new HashMap<>();
HashMap<UnitType, List<Integer>> y_l_ut_l = new HashMap<>();
HashMap<UnitType, List<TrainingInstance>> i_l_ut_l = new HashMap<>();
for(int i = 0;i<x_l.size();i++) {
UnitType ut = i_l.get(i).u.getType();
List<int []> x_l_ut = x_l_ut_l.get(ut);
List<Integer> y_l_ut = y_l_ut_l.get(ut);
List<TrainingInstance> i_l_ut = i_l_ut_l.get(ut);
if (x_l_ut==null) {
x_l_ut = new ArrayList<>();
x_l_ut_l.put(ut, x_l_ut);
y_l_ut = new ArrayList<>();
y_l_ut_l.put(ut, y_l_ut);
i_l_ut = new ArrayList<>();
i_l_ut_l.put(ut, i_l_ut);
}
x_l_ut.add(x_l.get(i));
y_l_ut.add(y_l.get(i));
i_l_ut.add(i_l.get(i));
}
for(UnitType ut:x_l_ut_l.keySet()) {
BayesianModel model_ut = unitModels.get(ut);
if (model_ut==null) {
model_ut = (BayesianModel) templateModel.clone();
unitModels.put(ut, model_ut);
}
model_ut.featureSelectionByCrossValidation(x_l_ut_l.get(ut), y_l_ut_l.get(ut), i_l_ut_l.get(ut));
}
if (defaultModel==null) defaultModel = (BayesianModel) templateModel.clone();
defaultModel.featureSelectionByCrossValidation(x_l, y_l, i_l);
}
public double[] predictDistribution(int []x, TrainingInstance ti) {
BayesianModel model_ut = unitModels.get(ti.u.getType());
if (model_ut!=null) {
return model_ut.predictDistribution(x, ti);
} else {
return defaultModel.predictDistribution(x, ti);
}
}
public void save(XMLWriter w) throws Exception {
w.tag(this.getClass().getSimpleName());
for(UnitType ut:unitModels.keySet()) {
w.tagWithAttributes("UnitType", "name=\""+ut.name+"\" ID=\""+ut.ID+"\"");
unitModels.get(ut).save(w);
w.tag("/UnitType");
}
w.tag("defaultModel");
defaultModel.save(w);
w.tag("/defaultModel");
w.tag("/"+this.getClass().getSimpleName());
w.flush();
}
public BayesianModelByUnitTypeWithDefaultModel(Element e, UnitTypeTable utt, BayesianModel tm, String a_name) throws Exception {
super(utt, tm.featureGenerator, a_name);
templateModel = tm;
load(e);
}
public void load(Element e) throws Exception {
if (!e.getName().equals(this.getClass().getSimpleName())) throw new Exception("Head tag is not '"+this.getClass().getSimpleName()+"'!");
List models = e.getChildren("UnitType");
for(Object o:models) {
Element ut_xml = (Element)o;
UnitType ut = utt.getUnitType(ut_xml.getAttributeValue("name"));
BayesianModel model = (BayesianModel) templateModel.clone();
model.load((Element)(ut_xml.getChildren().get(0)));
unitModels.put(ut, model);
}
Element dm_xml = e.getChild("defaultModel");
defaultModel = (BayesianModel) templateModel.clone();
defaultModel.load((Element)dm_xml.getChildren().get(0));
}
}
| 8,462 | 38 | 144 | java |
MicroRTS | MicroRTS-master/src/ai/machinelearning/bayes/CalibratedNaiveBayes.java | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package ai.machinelearning.bayes;
import ai.machinelearning.bayes.featuregeneration.FeatureGenerator;
import ai.machinelearning.bayes.featuregeneration.FeatureGeneratorComplex;
import ai.machinelearning.bayes.featuregeneration.FeatureGeneratorEmpty;
import ai.machinelearning.bayes.featuregeneration.FeatureGeneratorSimple;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import org.jdom.Element;
import rts.UnitAction;
import rts.units.Unit;
import rts.units.UnitTypeTable;
import util.XMLWriter;
/**
*
* @author santi
*
* This class actually implements a WRONG NaiveBayes equation, so, it should be removed
*/
public class CalibratedNaiveBayes extends BayesianModel {
int estimationMethod = ESTIMATION_COUNTS;
double calibrationFactor = 0.0; // how much to crrect probabilities after estimation
double []prior_distribution;
DiscreteCPD []distributions;
boolean []selectedFeatures;
int Ysize = 0;
int Xsizes[];
public CalibratedNaiveBayes(int a_Xsizes[], int a_Ysize, int estimation, double a_correctionFactor, UnitTypeTable utt, FeatureGenerator fg, String a_name) {
super(utt, fg, a_name);
Ysize = a_Ysize;
Xsizes = a_Xsizes;
estimationMethod = estimation;
calibrationFactor = a_correctionFactor;
clearTraining();
}
public Object clone() {
CalibratedNaiveBayes c = new CalibratedNaiveBayes(Xsizes, Ysize, estimationMethod, calibrationFactor, utt, featureGenerator, name);
return c;
}
public void clearTraining() {
int nfeatures = Xsizes.length;
distributions = new DiscreteCPD[nfeatures];
for(int i = 0;i<nfeatures;i++) {
distributions[i] = new DiscreteCPD(Ysize, Xsizes[i]);
}
}
public void train(List<int []> x_l, List<Integer> y_l, List<TrainingInstance> i_l) throws Exception {
int nfeatures = distributions.length;
prior_distribution = new double[Ysize];
for(int i = 0;i<x_l.size();i++) {
int []x = x_l.get(i);
int y = y_l.get(i);
prior_distribution[y]++;
for(int j = 0;j<nfeatures;j++) {
distributions[j].addObservation(y, x[j]);
}
}
if (estimationMethod==ESTIMATION_COUNTS) {
for(int i = 0;i<Ysize;i++) prior_distribution[i]/=x_l.size();
} else {
for(int i = 0;i<Ysize;i++) prior_distribution[i] = (prior_distribution[i]+1)/(x_l.size()+Ysize);
}
}
public void calibrateProbabilities(List<int []> x_l, List<Integer> y_l, List<TrainingInstance> i_l) throws Exception {
double best_c = 0;
double best_ll = Double.NEGATIVE_INFINITY;
for(double c = 0.0;c<=1.05;c+=0.05) {
calibrationFactor = c;
double loglikelihood = 0;
for(int i = 0;i<x_l.size();i++) {
Unit u = i_l.get(i).u;
List<UnitAction> possibleUnitActions = u.getUnitActions(i_l.get(i).gs);
List<Integer> possibleUnitActionIndexes = new ArrayList<>();
for(UnitAction ua : possibleUnitActions) {
if (ua.getType()==UnitAction.TYPE_ATTACK_LOCATION) {
ua = new UnitAction(UnitAction.TYPE_ATTACK_LOCATION, ua.getLocationX() - u.getX(), ua.getLocationY() - u.getY());
}
int idx = allPossibleActions.indexOf(ua);
if (idx<0) throw new Exception("Unknown action: " + ua);
possibleUnitActionIndexes.add(idx);
}
if (possibleUnitActions.size()>1) {
double predicted_distribution[] = predictDistribution(x_l.get(i), i_l.get(i));
predicted_distribution = filterByPossibleActionIndexes(predicted_distribution, possibleUnitActionIndexes);
int actual_y = y_l.get(i);
if (!possibleUnitActionIndexes.contains(actual_y)) continue;
int predicted_y = -1;
Collections.shuffle(possibleUnitActions); // shuffle it, just in case there are ties, to prevent action ordering bias
for(int idx:possibleUnitActionIndexes) {
if (predicted_y==-1) {
predicted_y = idx;
} else {
if (predicted_distribution[idx]>predicted_distribution[predicted_y]) predicted_y = idx;
}
}
double ll = Math.log(predicted_distribution[actual_y]);
if (Double.isInfinite(ll)) {
System.out.println(Arrays.toString(predicted_distribution));
System.out.println(possibleUnitActionIndexes);
System.out.println(actual_y + " : " + allPossibleActions.get(actual_y));
System.exit(1);
}
loglikelihood += ll;
}
}
// System.out.println(" ll (cf = " + c + ") = " + loglikelihood);
if (loglikelihood>best_ll) {
best_c = c;
best_ll = loglikelihood;
} else {
// once this starts going down, it will always go down...
break;
}
}
System.out.println("best calibration factor = " + best_c);
calibrationFactor = best_c;
}
public void featureSelectionByGainRatio(List<int []> x_l, List<Integer> y_l, double fractionOfFeaturesToKeep) {
List<Integer> featureIndexes = new ArrayList<>();
List<Double> featureGR = new ArrayList<>();
int nfeatures = distributions.length;
selectedFeatures = new boolean[nfeatures];
for(int i = 0;i<nfeatures;i++) {
featureIndexes.add(i);
featureGR.add(FeatureSelection.featureGainRatio(x_l, y_l, i));
selectedFeatures[i] = false;
}
// sort features:
featureIndexes.sort(new Comparator<Integer>() {
public int compare(Integer o1, Integer o2) {
return Double.compare(featureGR.get(o2), featureGR.get(o1));
}
});
// System.out.println("FS:");
for(int i = 0;i<fractionOfFeaturesToKeep*nfeatures;i++) {
selectedFeatures[featureIndexes.get(i)] = true;
// System.out.println(" Selected " + featureIndexes.get(i) + " GR: " + featureGR.get(featureIndexes.get(i)));
}
}
public double[] predictDistribution(int []x, TrainingInstance ti) {
return predictDistribution(x, ti, calibrationFactor);
}
public double[] predictDistribution(int []x, TrainingInstance ti, double correction) {
double d[] = new double[Ysize];
for(int i = 0;i<Ysize;i++) {
if (prior_distribution==null) {
d[i] = 1;
} else {
d[i] = prior_distribution[i];
}
}
double n_used_features = 1; // this includes the prior
for(int i = 0;i<x.length;i++) {
if (selectedFeatures==null || selectedFeatures[i]) {
n_used_features++;
if (estimationMethod == ESTIMATION_COUNTS) {
for(int j = 0;j<Ysize;j++) {
double d2[] = distributions[i].distribution(j);
d[j] *= d2[x[i]];
}
} else {
for(int j = 0;j<Ysize;j++) {
double d2[] = distributions[i].distributionLaplace(j, laplaceBeta);
double v = 1;
if (d2.length > x[i]) {
v = d2[x[i]];
} else {
v = 1.0/Ysize;
}
d[j] *= v;
}
}
}
}
double accum = 0;
for(int i = 0;i<Ysize;i++) {
d[i] = Math.pow(d[i], 1/(1*(1-correction)+ n_used_features*correction));
accum += d[i];
}
if (accum <= 0) {
// if 0 accum, then just make uniform distribution:
for(int i = 0;i<Ysize;i++) d[i] = 1.0/Ysize;
} else {
for(int i = 0;i<Ysize;i++) d[i] /= accum;
}
// System.out.println(Arrays.toString(d));
return d;
}
public void save(XMLWriter w) throws Exception {
w.tagWithAttributes(this.getClass().getSimpleName(),
"estimationMethod=\"" +estimationMethod+ "\" " +
"Ysize=\"" +Ysize+ "\" " +
"calibrationFactor=\""+calibrationFactor+"\" " +
"nfeatures=\""+distributions.length+"\" " +
"featureGenerationClass=\""+featureGenerator.getClass().getSimpleName()+"\"");
w.tag("Xsizes");
for(int v:Xsizes) w.rawXML(v + " ");
w.rawXML("\n");
w.tag("/Xsizes");
w.tag("priorDistribution");
for(double v:prior_distribution) w.rawXML(v + " ");
w.rawXML("\n");
w.tag("/priorDistribution");
if (selectedFeatures!=null) {
w.tag("selectedFeatures");
for(boolean v:selectedFeatures) w.rawXML(v + " ");
w.rawXML("\n");
w.tag("/selectedFeatures");
}
for (DiscreteCPD distribution : distributions) {
distribution.save(w);
}
w.tag("/SimpleNaiveBayes");
w.flush();
}
public CalibratedNaiveBayes(Element e, UnitTypeTable utt, String a_name) throws Exception {
super(utt, null, a_name);
load(e);
}
public void load(Element e) throws Exception {
if (!e.getName().equals(this.getClass().getSimpleName())) throw new Exception("Head tag is not 'SimpleNaiveBayes'!");
String fgclass = e.getAttributeValue("featureGenerationClass");
if (fgclass.contains("FeatureGeneratorEmpty")) {
featureGenerator = new FeatureGeneratorEmpty();
} else if (fgclass.contains("FeatureGeneratorSimple")) {
featureGenerator = new FeatureGeneratorSimple();
} else if (fgclass.contains("FeatureGeneratorComplex")) {
featureGenerator = new FeatureGeneratorComplex();
}
estimationMethod = Integer.parseInt(e.getAttributeValue("estimationMethod"));
Ysize = Integer.parseInt(e.getAttributeValue("Ysize"));
int nfeatures = Integer.parseInt(e.getAttributeValue("nfeatures"));
calibrationFactor = Double.parseDouble(e.getAttributeValue("calibrationFactor"));
Element xs_xml = e.getChild("Xsizes");
{
String text = xs_xml.getTextTrim();
String []tokens = text.split(" ");
Xsizes = new int[nfeatures];
for(int i = 0;i<nfeatures;i++) Xsizes[i] = Integer.parseInt(tokens[i]);
}
Element pd_xml = e.getChild("priorDistribution");
{
String text = pd_xml.getTextTrim();
String []tokens = text.split(" ");
prior_distribution = new double[Ysize];
for(int i = 0;i<Ysize;i++) prior_distribution[i] = Double.parseDouble(tokens[i]);
}
Element sf_xml = e.getChild("selectedFeatures");
if (sf_xml!=null) {
String text = sf_xml.getTextTrim();
String []tokens = text.split(" ");
selectedFeatures = new boolean[nfeatures];
for(int i = 0;i<nfeatures;i++) selectedFeatures[i] = Boolean.parseBoolean(tokens[i]);
} else {
selectedFeatures = null;
}
distributions = new DiscreteCPD[nfeatures];
List cpd_xml_l = e.getChildren("DiscreteCPD");
for(int i = 0;i<nfeatures;i++) {
Element cpd_xml = (Element)cpd_xml_l.get(i);
distributions[i] = new DiscreteCPD(cpd_xml);
}
}
public void featureSelectionByCrossValidation(List<int[]> x_l, List<Integer> y_l, List<TrainingInstance> i_l) throws Exception {
int nfeatures = distributions.length;
System.out.println("featureSelectionByCrossValidation " + x_l.size());
boolean bestSelection[] = new boolean[nfeatures];
for(int i = 0;i<nfeatures;i++) bestSelection[i] = false;
selectedFeatures = bestSelection;
double best_score = FeatureSelection.crossValidation(this, x_l, y_l, i_l, allPossibleActions, 10).m_a;
System.out.println(" loglikelihood with " + Arrays.toString(selectedFeatures) + ": " + best_score);
boolean change;
do {
change = false;
boolean bestLastSelection[] = bestSelection;
for(int i = 0;i<nfeatures;i++) {
if (!bestSelection[i]) {
boolean currentSelection[] = new boolean[nfeatures];
System.arraycopy(bestSelection, 0, currentSelection, 0, nfeatures);
currentSelection[i] = true;
selectedFeatures = currentSelection;
double score = FeatureSelection.crossValidation(this, x_l, y_l, i_l, allPossibleActions, 10).m_a;
// double score = TestNaiveBayesAsInGame.crossValidation(this, x_l, y_l, i_l, allPossibleActions, 10, false, true).m_b;
System.out.println(" loglikelihood with " + Arrays.toString(selectedFeatures) + ": " + score);
if (score > best_score) {
bestLastSelection = currentSelection;
best_score = score;
change = true;
}
}
}
bestSelection = bestLastSelection;
}while(change);
selectedFeatures = bestSelection;
System.out.println("Selected features: " + Arrays.toString(selectedFeatures));
}
}
| 14,468 | 40.458453 | 160 | java |
MicroRTS | MicroRTS-master/src/ai/machinelearning/bayes/DiscreteCPD.java | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package ai.machinelearning.bayes;
import org.jdom.Element;
import util.XMLWriter;
/**
*
* @author santi
*
* captures the probability P(Y|X)
*
*/
public class DiscreteCPD {
public int Xvalues = 0;
public int Yvalues = 0;
public int counts[][];
public DiscreteCPD(int nX, int nY) {
Xvalues = nX;
Yvalues = nY;
counts = new int[nX][nY];
}
public void addObservation(int X, int Y) {
counts[X][Y]++;
}
public int[] marginalizedCounts() {
int []marginalizedCounts = new int[Yvalues];
for(int i = 0;i<Xvalues;i++) {
for(int j = 0;j<Yvalues;j++) {
marginalizedCounts[j] += counts[i][j];
}
}
return marginalizedCounts;
}
public double[] marginalizedDistribution() {
int []marginalizedCounts = marginalizedCounts();
double []distribution = new double[Yvalues];
int accum = 0;
for(int i = 0;i<Yvalues;i++) accum += marginalizedCounts[i];
for(int i = 0;i<Yvalues;i++) distribution[i] = ((double)marginalizedCounts[i])/accum;
return distribution;
}
public double[] marginalizedDistributionLaplace(double beta) {
int []marginalizedCounts = marginalizedCounts();
double []distribution = new double[Yvalues];
int accum = 0;
for(int i = 0;i<Yvalues;i++) accum += marginalizedCounts[i];
for(int i = 0;i<Yvalues;i++) distribution[i] = ((double)marginalizedCounts[i] + beta)/(accum + beta*Yvalues);
return distribution;
}
public double[] distribution(int Xvalue) {
double []distribution = new double[Yvalues];
if (Xvalue>=Xvalues) {
for(int i = 0;i<Yvalues;i++) distribution[i] = 1.0/Yvalues;
return distribution;
}
int accum = 0;
for(int i = 0;i<Yvalues;i++) accum += counts[Xvalue][i];
for(int i = 0;i<Yvalues;i++) distribution[i] = ((double)counts[Xvalue][i])/accum;
return distribution;
}
public double[] distributionLaplace(int Xvalue, double beta) {
double []distribution = new double[Yvalues];
if (Xvalue>=Xvalues) {
for(int i = 0;i<Yvalues;i++) distribution[i] = 1.0/Yvalues;
return distribution;
}
int accum = 0;
for(int i = 0;i<Yvalues;i++) accum += counts[Xvalue][i];
for(int i = 0;i<Yvalues;i++) distribution[i] = ((double)counts[Xvalue][i] + beta)/(accum + beta*Yvalues);
return distribution;
}
public void save(XMLWriter w) throws Exception {
w.tagWithAttributes("DiscreteCPD","Xvalues=\""+Xvalues+"\" Yvalues=\""+Yvalues+"\"");
for(int i = 0;i<Xvalues;i++) {
for(int j = 0;j<Yvalues;j++) {
w.rawXML(counts[i][j] + " ");
}
w.rawXML("\n");
}
w.tag("/DiscreteCPD");
}
public DiscreteCPD(Element e) throws Exception {
if (!e.getName().equals("DiscreteCPD")) throw new Exception("Head tag is not 'DiscreteCPD'!");
Xvalues = Integer.parseInt(e.getAttributeValue("Xvalues"));
Yvalues = Integer.parseInt(e.getAttributeValue("Yvalues"));
counts = new int[Xvalues][Yvalues];
String text = e.getTextTrim();
String tokens[] = text.split(" |\n");
for(int k = 0,i = 0;i<Xvalues;i++) {
for(int j = 0;j<Yvalues;j++,k++) {
while(tokens[k].equals("")) k++;
counts[i][j] = Integer.parseInt(tokens[k]);
}
}
}
}
| 3,815 | 28.8125 | 117 | java |
MicroRTS | MicroRTS-master/src/ai/machinelearning/bayes/FeatureSelection.java | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package ai.machinelearning.bayes;
import ai.machinelearning.bayes.TrainingInstance;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Random;
import rts.UnitAction;
import rts.units.Unit;
import rts.units.UnitTypeTable;
import util.Pair;
/**
*
* @author santi
*/
public class FeatureSelection {
public static int DEBUG = 0;
public static double featureSetCrossValidationAccuracy(BayesianModel model, List<int []> X_l, List<Integer> Y_l,
List<TrainingInstance> instances, List<UnitAction> allPossibleActions,
List<Integer> features) throws Exception {
// TODO this list is populated, but never used
List<int []> X_reduced_l = new ArrayList<>();
for(int []x:X_l) {
int []x_reduced = new int[features.size()];
for(int i = 0;i<features.size();i++) {
x_reduced[i] = x[features.get(i)];
}
X_reduced_l.add(x_reduced);
}
double accuracy = crossValidation(model, X_l, Y_l, instances, allPossibleActions, 10).m_a;
return accuracy;
}
public static double featureGainRatio(List<int []> X_l, List<Integer> Y_l, int feature) {
int n_x_values = 0;
int n_y_values = 0;
for(int i = 0;i<X_l.size();i++) {
if (X_l.get(i)[feature]>=n_x_values) n_x_values = X_l.get(i)[feature]+1;
if (Y_l.get(i)>=n_y_values) n_y_values = Y_l.get(i)+1;
}
// System.out.println("n values: " + n_x_values + " / " + n_y_values);
int x_distribution[] = new int[n_x_values];
List<Integer> y_x_distributions[] = new List[n_x_values];
for(int i = 0;i<n_x_values;i++) {
y_x_distributions[i] = new ArrayList<>();
}
for(int i = 0;i<X_l.size();i++) {
int x = X_l.get(i)[feature];
x_distribution[x]++;
y_x_distributions[x].add(Y_l.get(i));
}
double H = entropy(Y_l, n_y_values);
double H_x[] = new double[n_x_values];
for(int i = 0;i<n_x_values;i++) {
if (x_distribution[i]>0) {
H_x[i] = entropy(y_x_distributions[i], n_y_values);
} else {
H_x[i] = 0;
}
}
double information_gain = H;
double intrinsic_value = 0;
for(int i = 0;i<n_x_values;i++) {
double x_ratio = x_distribution[i]/(double)X_l.size();
information_gain -= x_ratio * H_x[i];
if (x_distribution[i]>0) intrinsic_value -= x_ratio*Math.log(x_ratio)/Math.log(2);
}
double information_gain_ratio = (intrinsic_value>0 ? information_gain / intrinsic_value : 0);
// System.out.println("H = " + H);
// System.out.println("IG(" + feature + ") = " + information_gain);
// System.out.println("IV(" + feature + ") = " + intrinsic_value);
// System.out.println("IGR(" + feature + ") = " + information_gain_ratio);
/*
if (information_gain_ratio>0.5) {
System.out.println("information_gain_ratio = " + information_gain_ratio + " -----------");
System.out.println(" H was: " + H);
System.out.println(" information_gain was: " + information_gain);
System.out.println(" x_distribution was: " + Arrays.toString(x_distribution));
System.out.println(" H_x was: " + Arrays.toString(H_x));
}
*/
return information_gain_ratio;
}
public static double entropy(List<Integer> l, int nValues) {
int histogram[] = new int[nValues];
double total = 0;
for(int v:l) {
histogram[v]++;
total ++;
}
double h = 0;
for(int i = 0;i<nValues;i++) {
double p = histogram[i]/total;
if (histogram[i]>0) {
h += -p * Math.log(p)/Math.log(2);
}
}
return h;
}
public static Pair<Double,Double> crossValidation(BayesianModel model, List<int []> X_l, List<Integer> Y_l,
List<TrainingInstance> instances,
List<UnitAction> allPossibleActions,
int nfolds) throws Exception
{
Random r = new Random();
List<Integer> folds[] = new List[nfolds];
int nfeatures = X_l.get(0).length;
int []Xsizes = new int[nfeatures];
int Ysize = 0;
UnitTypeTable utt = instances.get(0).gs.getUnitTypeTable();
for(int i = 0;i<nfolds;i++) {
folds[i] = new ArrayList<>();
}
for(int i = 0;i<X_l.size();i++) {
int fold = r.nextInt(nfolds);
folds[fold].add(i);
for(int j = 0;j<nfeatures;j++) {
if (X_l.get(i)[j] >= Xsizes[j]) Xsizes[j] = X_l.get(i)[j]+1;
}
if (Y_l.get(i) >= Ysize) Ysize = Y_l.get(i)+1;
}
if (DEBUG>=1) System.out.println("Xsizes: " + Arrays.toString(Xsizes));
if (DEBUG>=1) System.out.println("Ysize: " + Ysize);
double correct_per_unit[] = new double[utt.getUnitTypes().size()];
double total_per_unit[] = new double[utt.getUnitTypes().size()];
double loglikelihood_per_unit[] = new double[utt.getUnitTypes().size()];
for(int fold = 0;fold<nfolds;fold++) {
if (DEBUG>=1) System.out.println("Evaluating fold " + (fold+1) + "/" + nfolds + ":");
// prepare training and test set:
List<int []> X_training = new ArrayList<>();
List<Integer> Y_training = new ArrayList<>();
List<TrainingInstance> i_training = new ArrayList<>();
List<int []> X_test = new ArrayList<>();
List<Integer> Y_test = new ArrayList<>();
List<TrainingInstance> i_test = new ArrayList<>();
for(int i = 0;i<nfolds;i++) {
if (i==fold) {
for(int idx:folds[i]) {
X_test.add(X_l.get(idx));
Y_test.add(Y_l.get(idx));
i_test.add(instances.get(idx));
}
} else {
for(int idx:folds[i]) {
X_training.add(X_l.get(idx));
Y_training.add(Y_l.get(idx));
i_training.add(instances.get(idx));
}
}
}
if (DEBUG>=1) System.out.println(" training/test split is " + X_training.size() + "/" + X_test.size());
// train the model:
model.clearTraining();
model.train(X_training, Y_training, i_training);
/*
model.save(new XMLWriter(new FileWriter(model.getClass().getSimpleName() + ".xml")));
Element e = new SAXBuilder().build(model.getClass().getSimpleName() + ".xml").getRootElement();
// model = new OldNaiveBayes(e);
// model = new SimpleNaiveBayesByUnitType(e, utt);
// model = new NaiveBayesCorrectedByActionSet(e, allPossibleActions);
// model = new NaiveBayesByUnitTypeCorrectedByActionSet(e, allPossibleActions, utt);
*/
// test the model:
int fold_correct_per_unit[] = new int[utt.getUnitTypes().size()];
int fold_total_per_unit[] = new int[utt.getUnitTypes().size()];
double fold_loglikelihood_per_unit[] = new double[utt.getUnitTypes().size()];
double numPossibleActionsAccum = 0;
for(int i = 0;i<X_test.size();i++) {
Unit u = i_test.get(i).u;
List<UnitAction> possibleUnitActions = u.getUnitActions(i_test.get(i).gs);
List<Integer> possibleUnitActionIndexes = new ArrayList<>();
for(UnitAction ua : possibleUnitActions) {
if (ua.getType()==UnitAction.TYPE_ATTACK_LOCATION) {
ua = new UnitAction(UnitAction.TYPE_ATTACK_LOCATION, ua.getLocationX() - u.getX(), ua.getLocationY() - u.getY());
}
int idx = allPossibleActions.indexOf(ua);
if (idx<0) throw new Exception("Unknown action: " + ua);
possibleUnitActionIndexes.add(idx);
}
if (possibleUnitActions.size()>1) {
numPossibleActionsAccum += possibleUnitActions.size();
// double predicted_distribution[] = ((SimpleNaiveBayes)model).predictDistribution(X_test.get(i), i_test.get(i), true);
// double predicted_distribution_nocorrection[] = ((SimpleNaiveBayes)model).predictDistribution(X_test.get(i), i_test.get(i), false);
double predicted_distribution[] = model.predictDistribution(X_test.get(i), i_test.get(i));
// double predicted_distribution[] = model.predictDistribution(X_test.get(i), u.getType());
// double predicted_distribution[] = model.predictDistribution(X_test.get(i));
// double predicted_distribution[] = new double[allPossibleActions.size()];
// for(int j = 0;j<predicted_distribution.length;j++) predicted_distribution[j] = r.nextDouble();
predicted_distribution = model.filterByPossibleActionIndexes(predicted_distribution, possibleUnitActionIndexes);
// predicted_distribution_nocorrection = filterByPossibleActions(predicted_distribution_nocorrection, possibleUnitActionIndexes);
int actual_y = Y_test.get(i);
if (!possibleUnitActionIndexes.contains(actual_y)) {
// System.out.println("Game State\n" + i_test.get(i).gs);
// System.out.println("Unit\n" + i_test.get(i).u);
// System.out.println("Action\n" + i_test.get(i).ua);
// throw new Exception("actual action in the dataset is not possible!");
System.out.println("Actual action in the dataset is not possible!");
continue;
}
int predicted_y = -1;
// int predicted_y_nocorrection = -1;
Collections.shuffle(possibleUnitActions); // shuffle it, just in case there are ties, to prevent action ordering bias
for(int idx:possibleUnitActionIndexes) {
if (predicted_y==-1) {
predicted_y = idx;
} else {
if (predicted_distribution[idx]>predicted_distribution[predicted_y]) predicted_y = idx;
}
/*
if (predicted_y_nocorrection==-1) {
predicted_y_nocorrection = idx;
} else {
if (predicted_distribution_nocorrection[idx]>predicted_distribution_nocorrection[predicted_y_nocorrection]) predicted_y_nocorrection = idx;
}
*/
}
/*
if (predicted_y != predicted_y_nocorrection) {
System.out.println(Arrays.toString(predicted_distribution));
System.out.println(Arrays.toString(predicted_distribution_nocorrection));
System.out.println(predicted_y);
System.out.println(predicted_y_nocorrection);
System.exit(0);
}
*/
// if (u.getType().name.equals("Worker")) System.out.println(allPossibleActions.get(actual_y) + " -> " + allPossibleActions.get(predicted_y) + " " + Arrays.toString(predicted_distribution));
if (predicted_y == actual_y) fold_correct_per_unit[u.getType().ID]++;
fold_total_per_unit[u.getType().ID]++;
double loglikelihood = Math.log(predicted_distribution[actual_y]);
// double loglikelihood_nocorrection = Math.log(predicted_distribution_nocorrection[actual_y]);
if (Double.isInfinite(loglikelihood)) {
System.out.println(Arrays.toString(predicted_distribution));
System.out.println(possibleUnitActionIndexes);
System.out.println(actual_y + " : " + allPossibleActions.get(actual_y));
System.exit(1);
}
fold_loglikelihood_per_unit[u.getType().ID] += loglikelihood;
// System.out.println(loglikelihood + "\t" + loglikelihood_nocorrection);
}
}
double fold_accuracy_per_unit[] = new double[utt.getUnitTypes().size()];
if (DEBUG>=1) System.out.println("Average possible actions: " + numPossibleActionsAccum/X_test.size());
for(int i = 0;i<utt.getUnitTypes().size();i++) {
fold_accuracy_per_unit[i] = fold_correct_per_unit[i]/(double)fold_total_per_unit[i];
if (DEBUG>=1) System.out.println("Fold accuracy ("+utt.getUnitTypes().get(i).name+"): " + fold_accuracy_per_unit[i] + " (" + fold_correct_per_unit[i] + "/" + fold_total_per_unit[i] + ")");
correct_per_unit[i] += fold_correct_per_unit[i];
total_per_unit[i] += fold_total_per_unit[i];
}
for(int i = 0;i<utt.getUnitTypes().size();i++) {
if (DEBUG>=1) System.out.println("Fold loglikelihood ("+utt.getUnitTypes().get(i).name+"): " + fold_loglikelihood_per_unit[i] + " (average: " + fold_loglikelihood_per_unit[i]/fold_total_per_unit[i] + ")");
loglikelihood_per_unit[i] += fold_loglikelihood_per_unit[i];
}
}
if (DEBUG>=1) System.out.println(" ---------- ");
double correct = 0;
double total = 0;
double loglikelihood = 0;
for(int i = 0;i<utt.getUnitTypes().size();i++) {
double accuracy_per_unit = correct_per_unit[i]/(double)total_per_unit[i];
if (DEBUG>=1) System.out.println("Final accuracy ("+utt.getUnitTypes().get(i).name+"): " + accuracy_per_unit + " (" + correct_per_unit[i] + "/" + total_per_unit[i] + ")");
correct += correct_per_unit[i];
total += total_per_unit[i];
}
for(int i = 0;i<utt.getUnitTypes().size();i++) {
if (DEBUG>=1) System.out.println("Final loglikelihood ("+utt.getUnitTypes().get(i).name+"): " + loglikelihood_per_unit[i] + " (average: " + loglikelihood_per_unit[i]/total_per_unit[i] + ")");
loglikelihood += loglikelihood_per_unit[i];
}
double accuracy = correct/total;
if (DEBUG>=1) System.out.println("Final accuracy: " + accuracy);
if (DEBUG>=1) System.out.println("Final loglikelihood: " + loglikelihood + " (average " + (loglikelihood/total) + ")");
// return accuracy;
return new Pair<>(accuracy, loglikelihood / total);
}
}
| 15,834 | 47.723077 | 221 | java |
MicroRTS | MicroRTS-master/src/ai/machinelearning/bayes/TrainingInstance.java | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package ai.machinelearning.bayes;
import java.util.ArrayList;
import java.util.List;
import rts.GameState;
import rts.UnitAction;
import rts.units.Unit;
/**
*
* @author santi
*/
public class TrainingInstance {
public GameState gs;
public Unit u;
public UnitAction ua;
public TrainingInstance(GameState a_gs, long uID, UnitAction a_ua) throws Exception {
gs = a_gs;
u = gs.getUnit(uID);
if (u==null) throw new Exception("Unit " + uID + " not found!");
ua = a_ua;
if (ua!=null && ua.getType() == UnitAction.TYPE_ATTACK_LOCATION) {
// turn into relative:
ua = new UnitAction(UnitAction.TYPE_ATTACK_LOCATION, ua.getLocationX() - u.getX(), ua.getLocationY() - u.getY());
}
}
public List<Integer> getPossibleActions(List<UnitAction> allPossibleActions) {
List<Integer> l = new ArrayList<>();
for(UnitAction ua:u.getUnitActions(gs)) {
if (ua.getType()==UnitAction.TYPE_ATTACK_LOCATION) {
ua = new UnitAction(UnitAction.TYPE_ATTACK_LOCATION, ua.getLocationX() - u.getX(), ua.getLocationY() - u.getY());
}
l.add(allPossibleActions.indexOf(ua));
}
return l;
}
}
| 1,453 | 30.608696 | 129 | java |
MicroRTS | MicroRTS-master/src/ai/machinelearning/bayes/featuregeneration/FeatureGenerator.java | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package ai.machinelearning.bayes.featuregeneration;
import ai.machinelearning.bayes.TrainingInstance;
import java.io.File;
import java.io.FileWriter;
import java.util.ArrayList;
import java.util.List;
import org.jdom.input.SAXBuilder;
import rts.GameState;
import rts.Trace;
import rts.TraceEntry;
import rts.UnitAction;
import rts.units.Unit;
import util.Pair;
/**
*
* @author santi
*/
public abstract class FeatureGenerator {
public static List<Trace> loadTraces(String tracesfolder) throws Exception {
List<Trace> traces = new ArrayList<>();
File folder = new File(tracesfolder);
for(File file:folder.listFiles()) {
String fileName = file.getAbsolutePath();
if (fileName.endsWith(".xml")) {
Trace t = new Trace(new SAXBuilder().build(fileName).getRootElement());
traces.add(t);
}
}
return traces;
}
public static List<TrainingInstance> generateInstances(List<Trace> traces) throws Exception {
List<TrainingInstance> instances = new ArrayList<>();
for(Trace t:traces) {
GameState lastgs = t.getGameStateAtCycle(t.getLength());
int winner = lastgs.winner();
for(TraceEntry te:t.getEntries()) {
GameState gs = t.getGameStateAtCycle(te.getTime());
for(Pair<Unit,UnitAction> tmp:te.getActions()) {
if (tmp.m_a.getUnitActions(gs).size()>1) {
if (tmp.m_a.getPlayer()==winner) {
TrainingInstance ti = new TrainingInstance(gs, tmp.m_a.getID(), tmp.m_b);
instances.add(ti);
}
}
}
}
}
return instances;
}
public static void writeARFFHeader(List<List<Object>> features, List<String> labels, String name, FileWriter fw) throws Exception {
fw.write("@relation " + name + "\n");
int nfeatures = features.get(0).size();
for(int i = 0;i<nfeatures;i++) {
List<Object> instance = features.get(0);
if (instance.get(i) instanceof String) {
List<String> values = new ArrayList<>();
for(List<Object> instance2:features) {
if (!values.contains(instance2.get(i))) values.add((String)instance2.get(i));
}
fw.write("@attribute f" + i + " {");
boolean first = true;
for(String v:values) {
if (first) {
fw.write("'" + v + "'");
first = false;
} else {
fw.write(",'" + v + "'");
}
}
fw.write("}\n");
} else {
fw.write("@attribute f" + i + " numeric\n");
}
}
// class:
List<String> values = new ArrayList<>();
for(String label:labels) {
if (!values.contains(label)) values.add(label);
}
fw.write("@attribute class {");
boolean first = true;
for(String v:values) {
if (first) {
fw.write("'" + v + "'");
first = false;
} else {
fw.write(",'" + v + "'");
}
}
fw.write("}\n");
fw.write("@data\n");
}
public static void translateToARFF(List<Object> features, String label, FileWriter fw) throws Exception {
for(Object value:features) {
if (value instanceof String) {
fw.write("'" + value + "'");
} else {
fw.write(value.toString());
}
fw.write(",");
}
fw.write("'" + label + "'\n");
}
public int[] generateFeaturesAsArray(TrainingInstance ti) {
List<Object> feature_vector = generateFeatures(ti);
int []x = new int[feature_vector.size()];
for(int i = 0;i<feature_vector.size();i++) {
x[i] = (Integer)feature_vector.get(i);
}
return x;
}
public abstract List<Object> generateFeatures(TrainingInstance ti);
}
| 4,509 | 32.161765 | 135 | java |
MicroRTS | MicroRTS-master/src/ai/machinelearning/bayes/featuregeneration/FeatureGeneratorComplex.java | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package ai.machinelearning.bayes.featuregeneration;
import ai.machinelearning.bayes.TrainingInstance;
import java.util.ArrayList;
import java.util.List;
import rts.PhysicalGameState;
import rts.units.Unit;
import rts.units.UnitTypeTable;
/**
*
* @author santi
*/
public class FeatureGeneratorComplex extends FeatureGenerator {
public List<Object> generateFeatures(TrainingInstance ti) {
PhysicalGameState pgs = ti.gs.getPhysicalGameState();
List<Object> features = new ArrayList<>();
// player resources:
int player = ti.u.getPlayer();
UnitTypeTable utt = ti.gs.getUnitTypeTable();
features.add((Integer)ti.gs.getPlayer(player).getResources());
// unit features:
features.add(ti.u.getResources());
features.add(ti.u.getHitPoints());
features.add(ti.u.getAttackRange());
// features.add(ti.u.getDamage());
// unit type:
// features.add(ti.u.getType().name);
// features.add(ti.u.getType().ID);
// average coordinates of friendly and enemy units:
int total_friendly = 0;
double x_friendly = 0;
double y_friendly = 0;
int total_enemy = 0;
double x_enemy = 0;
double y_enemy = 0;
int have_barracks = 0;
for(Unit u:pgs.getUnits()) {
if (u.getPlayer()==-1) {
// neutral units
} else if (u.getPlayer()==player) {
x_friendly += u.getX();
y_friendly += u.getY();
total_friendly++;
if (u.getType().name.equals("Barracks")) have_barracks = 1;
} else {
x_enemy += u.getX();
y_enemy += u.getY();
total_enemy++;
}
}
x_friendly/=total_friendly;
y_friendly/=total_friendly;
x_enemy/=total_enemy;
y_enemy/=total_enemy;
// calculate direction of friendly and enemy (4 directions):
x_friendly-=ti.u.getX();
y_friendly-=ti.u.getY();
x_enemy-=ti.u.getX();
y_enemy-=ti.u.getY();
double angle_friendly = Math.atan2(x_friendly, y_friendly);
double angle_enemy = Math.atan2(x_enemy, y_enemy);
double resolution = Math.PI/4;
angle_friendly+=resolution/2; // offset everything 45 degrees
angle_enemy+=resolution/2; // offset everything 45 degrees
if (angle_friendly<0) angle_friendly+=Math.PI*2;
if (angle_enemy<0) angle_enemy+=Math.PI*2;
int direction_friendly = (int)(angle_friendly/(resolution));
int direction_enemy = (int)(angle_enemy/(resolution));
// System.out.println("quadrant: " + x_enemy + "," + y_enemy + " -> " + Math.atan2(x_enemy, y_enemy) + " -> " + direction_enemy);
features.add(direction_friendly);
features.add(direction_enemy);
features.add(have_barracks);
// surrounding area:
// int xo[] = {-1, 0, 1, 0, -2, 0, 2, 0, -1, -1, 1, 1};
// int yo[] = { 0,-1, 0, 1, 0,-2, 0, 2, -1, 1,-1, 1};
int surround_feature_start = features.size();
int xo[] = { 0, -2,-1, 0, 1, 2, -2,-1, 0, 1, 2, -3,-2,-1, 1, 2, 3, -2,-1, 0, 1, 2, -2,-1, 0, 1, 2, 0};
int yo[] = {-3, -2,-2,-2,-2,-2, -1,-1,-1,-1,-1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3 };
int x = ti.u.getX();
int y = ti.u.getY();
int width = pgs.getWidth();
int height = pgs.getHeight();
int unitTypes = utt.getUnitTypes().size();
for(int i = 0;i<xo.length;i++) {
int x2 = x - xo[i];
int y2 = y - yo[i];
Unit u = pgs.getUnitAt(x2, y2);
if (u!=null) {
if (u.getPlayer() == player) {
features.add(3 + u.getType().ID);
// features.add("friendly" + u.getType().name);
} else {
features.add(3 + unitTypes + u.getType().ID);
// features.add(u.getType().name);
}
} else {
if (x2<0 || y2<0 || x2>=width || y2>=height) {
features.add(2);
// features.add("wall");
} else if (pgs.getTerrain(x2, y2) == PhysicalGameState.TERRAIN_NONE) {
if (ti.gs.free(x2, y2)) {
features.add(0);
// features.add("free");
} else {
features.add(1);
// features.add("reserved");
}
} else {
features.add(2);
// features.add("wall");
}
}
}
// add combined features:
features.add(((Integer)features.get(surround_feature_start+8)) * (unitTypes*2+3) + (Integer)features.get(surround_feature_start+19));
features.add(((Integer)features.get(surround_feature_start+13)) * (unitTypes*2+3) + (Integer)features.get(surround_feature_start+14));
features.add(((Integer)features.get(surround_feature_start+8)) * (unitTypes*2+3) + (Integer)features.get(surround_feature_start+14));
features.add(((Integer)features.get(surround_feature_start+14)) * (unitTypes*2+3) + (Integer)features.get(surround_feature_start+19));
features.add(((Integer)features.get(surround_feature_start+19)) * (unitTypes*2+3) + (Integer)features.get(surround_feature_start+13));
features.add(((Integer)features.get(surround_feature_start+13)) * (unitTypes*2+3) + (Integer)features.get(surround_feature_start+8));
features.add(((Integer)features.get(1)) * (unitTypes*2+3) + (Integer)features.get(surround_feature_start+8));
features.add(((Integer)features.get(1)) * (unitTypes*2+3) + (Integer)features.get(surround_feature_start+13));
features.add(((Integer)features.get(1)) * (unitTypes*2+3) + (Integer)features.get(surround_feature_start+14));
features.add(((Integer)features.get(1)) * (unitTypes*2+3) + (Integer)features.get(surround_feature_start+19));
return features;
}
}
| 6,437 | 41.635762 | 142 | java |
MicroRTS | MicroRTS-master/src/ai/machinelearning/bayes/featuregeneration/FeatureGeneratorEmpty.java | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package ai.machinelearning.bayes.featuregeneration;
import ai.machinelearning.bayes.TrainingInstance;
import java.util.ArrayList;
import java.util.List;
/**
*
* @author santi
*/
public class FeatureGeneratorEmpty extends FeatureGenerator {
public List<Object> generateFeatures(TrainingInstance ti) {
return new ArrayList<>();
}
}
| 549 | 24 | 79 | java |
MicroRTS | MicroRTS-master/src/ai/machinelearning/bayes/featuregeneration/FeatureGeneratorSimple.java | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package ai.machinelearning.bayes.featuregeneration;
import ai.machinelearning.bayes.TrainingInstance;
import java.util.ArrayList;
import java.util.List;
import rts.PhysicalGameState;
import rts.units.Unit;
import rts.units.UnitTypeTable;
/**
*
* @author santi
*/
public class FeatureGeneratorSimple extends FeatureGenerator {
public List<Object> generateFeatures(TrainingInstance ti) {
List<Object> features = new ArrayList<>();
int player = ti.u.getPlayer();
PhysicalGameState pgs = ti.gs.getPhysicalGameState();
// average coordinates of friendly and enemy units:
int total_friendly = 0;
double x_friendly = 0;
double y_friendly = 0;
int total_enemy = 0;
double x_enemy = 0;
double y_enemy = 0;
int have_barracks = 0;
for(Unit u:pgs.getUnits()) {
if (u.getPlayer()==-1) {
// neutral units
} else if (u.getPlayer()==player) {
x_friendly += u.getX();
y_friendly += u.getY();
total_friendly++;
if (u.getType().name.equals("Barracks")) have_barracks = 1;
} else {
x_enemy += u.getX();
y_enemy += u.getY();
total_enemy++;
}
}
x_friendly/=total_friendly;
y_friendly/=total_friendly;
x_enemy/=total_enemy;
y_enemy/=total_enemy;
// calculate direction of friendly and enemy (4 directions):
x_friendly-=ti.u.getX();
y_friendly-=ti.u.getY();
x_enemy-=ti.u.getX();
y_enemy-=ti.u.getY();
double angle_friendly = Math.atan2(x_friendly, y_friendly);
double angle_enemy = Math.atan2(x_enemy, y_enemy);
double resolution = Math.PI/4;
angle_friendly+=resolution/2; // offset everything 45 degrees
angle_enemy+=resolution/2; // offset everything 45 degrees
if (angle_friendly<0) angle_friendly+=Math.PI*2;
if (angle_enemy<0) angle_enemy+=Math.PI*2;
int direction_friendly = (int)(angle_friendly/(resolution));
int direction_enemy = (int)(angle_enemy/(resolution));
// System.out.println("quadrant: " + x_enemy + "," + y_enemy + " -> " + Math.atan2(x_enemy, y_enemy) + " -> " + direction_enemy);
features.add(ti.u.getResources());
features.add(direction_friendly);
features.add(direction_enemy);
features.add(have_barracks);
int xo[] = {-2, 0, 2, 0};
int yo[] = { 0,-2, 0, 2};
int x = ti.u.getX();
int y = ti.u.getY();
int width = ti.gs.getPhysicalGameState().getWidth();
int height = ti.gs.getPhysicalGameState().getHeight();
UnitTypeTable utt = ti.gs.getUnitTypeTable();
int unitTypes = utt.getUnitTypes().size();
for(int i = 0;i<xo.length;i++) {
int x2 = x - xo[i];
int y2 = y - yo[i];
Unit u = ti.gs.getPhysicalGameState().getUnitAt(x2, y2);
if (u!=null) {
if (u.getPlayer() == player) {
features.add(3 + u.getType().ID);
// features.add("friendly" + u.getType().name);
} else {
features.add(3 + unitTypes + u.getType().ID);
// features.add(u.getType().name);
}
} else {
if (x2<0 || y2<0 || x2>=width || y2>=height) {
features.add(2);
// features.add("wall");
} else if (ti.gs.getPhysicalGameState().getTerrain(x2, y2) == PhysicalGameState.TERRAIN_NONE) {
if (ti.gs.free(x2, y2)) {
features.add(0);
// features.add("free");
} else {
features.add(1);
// features.add("reserved");
}
} else {
features.add(2);
// features.add("wall");
}
}
}
return features;
}
}
| 4,402 | 35.38843 | 136 | java |
MicroRTS | MicroRTS-master/src/ai/mcts/MCTSNode.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package ai.mcts;
import ai.evaluation.EvaluationFunction;
import java.util.List;
import java.util.Random;
import rts.GameState;
import rts.PlayerAction;
/**
*
* @author santi
*/
public abstract class MCTSNode {
public static Random r = new Random();
public int type; // 0 : max, 1 : min, -1: Game-over
public MCTSNode parent;
public GameState gs;
public int depth = 0; // the depth in the tree
public List<PlayerAction> actions;
public List<MCTSNode> children;
public double accum_evaluation = 0;
public int visit_count = 0;
// These variables are just used to improve the efficiency of the algorithm,
// and avoid linear searches:
// "creation_ID": Starts at 0 for the root of the tree, and increases by one
// in each node of the tree in order of creation.
// This is used in the abstaction AIs to quickly know if any child node
// is new or was there before.
// "highest_children_creation_ID": holds the highest creation ID of all the children
// "best_child_so_far": - This can be used to update the best child in constant time when using e-greedy strategies.
// Since only one child's value is updated at each iteraiton, we only
// have to compare the value of the newly updated child with 'best_child_so_far'
// to determine which is the new best child in this iteration.
// - best_child_so_far = -1 means that it has not been cached, so we need
// to compute it from scratch.
public int creation_ID = -1;
public int highest_children_creation_ID = -1;
public int best_child_so_far = -1;
public void showNode(int depth, int maxdepth, EvaluationFunction ef) {
if (children!=null) {
for(int i = 0;i<children.size();i++) {
MCTSNode child = children.get(i);
for(int j = 0;j<depth;j++) System.out.print(" ");
System.out.println("child explored " + child.visit_count + " (EF: " + ef.evaluate(0, 1, child.gs) + ") Avg evaluation: " + (child.accum_evaluation/((double)child.visit_count)) + " : " + actions.get(i));
if (depth<maxdepth) child.showNode(depth+1,maxdepth, ef);
}
}
}
}
| 2,486 | 40.45 | 218 | java |
MicroRTS | MicroRTS-master/src/ai/mcts/believestatemcts/AIWithBelieveState.java | package ai.mcts.believestatemcts;
import java.util.List;
import rts.GameState;
import rts.PartiallyObservableGameState;
import rts.units.Unit;
/**
*
* @author albertouri
*/
public interface AIWithBelieveState {
void setInitialBelieveState(int player, GameState gs, PartiallyObservableGameState pogs);
List<Unit> getBelieveUnits();
static double getJaccardIndex(int player, GameState gs, PartiallyObservableGameState pogs, List<Unit> believeUnits) {
// Jaccard Index = AB_intersection / (A + B - AB_intersection)
double maxDist = gs.getPhysicalGameState().getWidth() + gs.getPhysicalGameState().getHeight() + 1;
double AB_intersection = 0.0;
double A = believeUnits.size(); // visible units in gs + believe units
double B = 0.0; // all opponent units in gs
boolean[] unitSeen = new boolean[believeUnits.size()];
for (Unit u : gs.getUnits()) {
if (u.getPlayer() == player) {
B += 1.0;
// if is visible count as intersection
if (pogs.observable(u.getX(), u.getY())) {
AB_intersection += 1.0;
A += 1.0;
} else { // else look if it is in the believe-state
Unit closestUnit = null;
double minDist = maxDist;
int id = -1;
for (int i = 0; i < believeUnits.size(); i++) {
if (unitSeen[i]) continue;
Unit bu = believeUnits.get(i);
// }
// for (Unit bu : believeUnits) {
// TODO skip vistied units
if (bu.getID() == u.getID()) {
id = i;
closestUnit = bu;
minDist = Math.abs(bu.getX() - u.getX()) + Math.abs(bu.getY() - u.getY());
break;
} else if (bu.getType() == u.getType()) {
double dist = Math.abs(bu.getX() - u.getX()) + Math.abs(bu.getY() - u.getY());
if (minDist > dist) {
id = i;
closestUnit = bu;
minDist = dist;
}
}
}
if (closestUnit != null) {
unitSeen[id] = true;
double normDist = 1 - (minDist / maxDist);
AB_intersection += normDist;
}
}
}
}
double jaccardIndex = AB_intersection / (A + B - AB_intersection);
if (jaccardIndex > 1.0) { // something went wrong
System.out.println("### Jaccard Index bigger than 1.0 ###");
System.out.println(AB_intersection + " / " + A + " + " + B + " - " + AB_intersection + "=" + jaccardIndex);
System.out.println(gs.getPhysicalGameState());
System.out.println(pogs.getPhysicalGameState());
for (Unit u : gs.getUnits()) {
if (u.getPlayer() == player) {
System.out.println("Adding to B " + u);
if (pogs.observable(u.getX(), u.getY())) {
System.out.println("Adding to A " + u);
}
}
}
}
return jaccardIndex;
}
}
| 3,485 | 40.5 | 121 | java |
MicroRTS | MicroRTS-master/src/ai/mcts/believestatemcts/BS1_NaiveMCTS.java | package ai.mcts.believestatemcts;
import ai.core.AI;
import ai.evaluation.EvaluationFunction;
import static ai.mcts.MCTSNode.r;
import ai.mcts.naivemcts.NaiveMCTS;
import ai.mcts.naivemcts.NaiveMCTSNode;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.List;
import rts.GameState;
import rts.PartiallyObservableGameState;
import rts.PlayerAction;
import rts.units.Unit;
import rts.units.UnitTypeTable;
/**
* The believe state only stores the opponent building from the game state at frame 0.
* There isn't new observations, and the buildings in the believe state are only removed
* when the building is destroyed.
*
* @author albertouri
*/
public class BS1_NaiveMCTS extends NaiveMCTS implements AIWithBelieveState {
GameState initialGameState;
// list of units we "believe" exist (for now it's just "last seen" position)
List<Unit> lastKnownPosition = new LinkedList<>();
public BS1_NaiveMCTS(UnitTypeTable utt) {
super(utt);
}
public BS1_NaiveMCTS(int available_time, int max_playouts, int lookahead, int max_depth,
float e_l, float discout_l, float e_g, float discout_g, float e_0, float discout_0, AI policy, EvaluationFunction a_ef, boolean fensa) {
super(available_time, max_playouts, lookahead, max_depth, e_l, discout_l, e_g, discout_g, e_0, discout_0, policy, a_ef, fensa);
}
public BS1_NaiveMCTS(int available_time, int max_playouts, int lookahead, int max_depth,
float e_l, float e_g, float e_0, AI policy, EvaluationFunction a_ef, boolean fensa) {
super(available_time, max_playouts, lookahead, max_depth, e_l, e_g, e_0, policy, a_ef, fensa);
}
public BS1_NaiveMCTS(int available_time, int max_playouts, int lookahead, int max_depth,
float e_l, float e_g, float e_0, int a_global_strategy, AI policy, EvaluationFunction a_ef, boolean fensa) {
super(available_time, max_playouts, lookahead, max_depth, e_l, e_g, e_0, a_global_strategy, policy, a_ef, fensa);
}
@Override
public AI clone() {
return new BS1_NaiveMCTS(TIME_BUDGET, ITERATIONS_BUDGET, MAXSIMULATIONTIME, MAX_TREE_DEPTH, epsilon_l, discount_l, epsilon_g, discount_g, epsilon_0, discount_0, playoutPolicy, ef, forceExplorationOfNonSampledActions);
}
@Override
public final PlayerAction getAction(int player, GameState gs) throws Exception {
if (gs.canExecuteAnyAction(player)) {
startNewComputation(player, gs);
computeDuringOneGameFrame();
return getBestActionSoFar();
} else {
return new PlayerAction();
}
}
@Override
public void startNewComputation(int a_player, GameState gs) throws Exception {
if (initialGameState!=null && gs.getTime()==0) {
setInitialBelieveState(a_player, initialGameState.clone(), new PartiallyObservableGameState(initialGameState, a_player));
}
if (gs instanceof PartiallyObservableGameState) {
// create a sampling world from our believe-states
gs = sampleWorld(player, (PartiallyObservableGameState) gs);
}
player = a_player;
current_iteration = 0;
tree = new NaiveMCTSNode(player, 1-player, gs, null, ef.upperBound(gs), current_iteration++, forceExplorationOfNonSampledActions);
if (tree.moveGenerator==null) {
max_actions_so_far = 0;
} else {
max_actions_so_far = Math.max(tree.moveGenerator.getSize(),max_actions_so_far);
}
gs_to_start_from = gs;
epsilon_l = initial_epsilon_l;
epsilon_g = initial_epsilon_g;
epsilon_0 = initial_epsilon_0;
}
@Override
public int getMostVisitedActionIdx() {
total_actions_issued++;
if (getTree().children == null) return -1;
List<Integer> bestIdxs = new ArrayList<>();
int bestScore = -1;
for (int i = 0; i < getTree().children.size(); i++) {
NaiveMCTSNode child = (NaiveMCTSNode) getTree().children.get(i);
if (child.visit_count > bestScore) {
bestIdxs.clear();
bestIdxs.add(i);
bestScore = child.visit_count;
} else if (child.visit_count > bestScore) {
bestIdxs.add(i);
}
}
if (bestIdxs.isEmpty()) return -1;
if (bestIdxs.size() == 1) return bestIdxs.get(0);
// otherwise we have multiple best actions, choose one randomly
return r.nextInt(bestIdxs.size());
}
public void reset()
{
initialGameState = null;
}
public void preGameAnalysis(GameState gs, long milliseconds) throws Exception
{
initialGameState = gs.clone();
}
@Override
public void setInitialBelieveState(int player, GameState gs, PartiallyObservableGameState pogs) {
int opponent = 1 - player;
// save list of enemy units that cannot move (buildings)
for (Unit u : gs.getUnits()) {
if (u.getPlayer() == opponent && !u.getType().canMove
&& !pogs.observable(u.getX(), u.getY())) {
lastKnownPosition.add(u);
}
}
}
@Override
public List<Unit> getBelieveUnits() {
List<Unit> l = new LinkedList<>(lastKnownPosition);
return l;
}
public GameState sampleWorld(int player, PartiallyObservableGameState gs) {
GameState newWorld = gs.clone();
// for each enemy building in our believe-state
List<Unit> toDelete = new ArrayList<>();
for (Unit u : lastKnownPosition) {
// if location not visible add it
if (!gs.observable(u.getX(), u.getY())) {
// System.out.println("Unit added to world: " + u.toString());
newWorld.getPhysicalGameState().addUnit(u);
} else { // if visible and not present, remove from believe-state (i.e. it was killed)
Unit observedUnit = newWorld.getPhysicalGameState().getUnitAt(u.getX(), u.getY());
if (observedUnit == null || u.getType() != observedUnit.getType()) {
// System.out.println("Removing from believe-state: " + u.toString());
toDelete.add(u);
}
}
}
lastKnownPosition.removeAll(toDelete);
return newWorld;
}
}
| 6,502 | 35.948864 | 225 | java |
MicroRTS | MicroRTS-master/src/ai/mcts/believestatemcts/BS2_NaiveMCTS.java | package ai.mcts.believestatemcts;
import ai.core.AI;
import ai.evaluation.EvaluationFunction;
import ai.mcts.naivemcts.NaiveMCTS;
import ai.mcts.naivemcts.NaiveMCTSNode;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.List;
import rts.GameState;
import rts.PartiallyObservableGameState;
import rts.PlayerAction;
import rts.UnitAction;
import rts.UnitActionAssignment;
import rts.units.Unit;
import rts.units.UnitTypeTable;
/**
* The believe state stores the last observed location of an opponent's unit.
* If the locations is visible again we remove the unit from the believe state.
*
* @author albertouri
*/
public class BS2_NaiveMCTS extends NaiveMCTS implements AIWithBelieveState {
GameState initialGameState;
// list of units we "believe" exist (for now it's just "last seen" position)
List<Unit> lastKnownPosition = new LinkedList<>();
PartiallyObservableGameState lastObservedGame;
public BS2_NaiveMCTS(UnitTypeTable utt) {
super(utt);
}
public BS2_NaiveMCTS(int available_time, int max_playouts, int lookahead, int max_depth,
float e_l, float discout_l, float e_g, float discout_g, float e_0, float discout_0, AI policy, EvaluationFunction a_ef, boolean fensa) {
super(available_time, max_playouts, lookahead, max_depth, e_l, discout_l, e_g, discout_g, e_0, discout_0, policy, a_ef, fensa);
}
public BS2_NaiveMCTS(int available_time, int max_playouts, int lookahead, int max_depth,
float e_l, float e_g, float e_0, AI policy, EvaluationFunction a_ef, boolean fensa) {
super(available_time, max_playouts, lookahead, max_depth, e_l, e_g, e_0, policy, a_ef, fensa);
}
public BS2_NaiveMCTS(int available_time, int max_playouts, int lookahead, int max_depth,
float e_l, float e_g, float e_0, int a_global_strategy, AI policy, EvaluationFunction a_ef, boolean fensa) {
super(available_time, max_playouts, lookahead, max_depth, e_l, e_g, e_0, a_global_strategy, policy, a_ef, fensa);
}
@Override
public AI clone() {
return new BS2_NaiveMCTS(TIME_BUDGET, ITERATIONS_BUDGET, MAXSIMULATIONTIME, MAX_TREE_DEPTH, epsilon_l, discount_l, epsilon_g, discount_g, epsilon_0, discount_0, playoutPolicy, ef, forceExplorationOfNonSampledActions);
}
@Override
public final PlayerAction getAction(int player, GameState gs) throws Exception {
if (gs.canExecuteAnyAction(player)) {
startNewComputation(player, gs);
computeDuringOneGameFrame();
return getBestActionSoFar();
} else {
return new PlayerAction();
}
}
@Override
public void startNewComputation(int a_player, GameState gs) throws Exception {
if (initialGameState!=null && gs.getTime()==0) {
setInitialBelieveState(a_player, initialGameState.clone(), new PartiallyObservableGameState(initialGameState, a_player));
}
if (gs instanceof PartiallyObservableGameState) {
// create a sampling world from our believe-states
updateBelieveState(player, (PartiallyObservableGameState) gs);
gs = sampleWorld(player, (PartiallyObservableGameState) gs);
}
player = a_player;
current_iteration = 0;
tree = new NaiveMCTSNode(player, 1-player, gs, null, ef.upperBound(gs), current_iteration++, forceExplorationOfNonSampledActions);
if (tree.moveGenerator==null) {
max_actions_so_far = 0;
} else {
max_actions_so_far = Math.max(tree.moveGenerator.getSize(),max_actions_so_far);
}
gs_to_start_from = gs;
epsilon_l = initial_epsilon_l;
epsilon_g = initial_epsilon_g;
epsilon_0 = initial_epsilon_0;
}
public void reset()
{
initialGameState = null;
}
public void preGameAnalysis(GameState gs, long milliseconds) throws Exception
{
initialGameState = gs.clone();
}
@Override
public void setInitialBelieveState(int player, GameState gs, PartiallyObservableGameState pogs) {
int opponent = 1 - player;
// save all enemy's units that are not visible
for (Unit u : gs.getUnits()) {
if (u.getPlayer() == opponent && !pogs.observable(u.getX(), u.getY())) {
lastKnownPosition.add(u);
}
}
// save initila observed game
lastObservedGame = pogs;
}
@Override
public List<Unit> getBelieveUnits() {
List<Unit> l = new LinkedList<>(lastKnownPosition);
return l;
}
public GameState sampleWorld(int player, PartiallyObservableGameState gs) {
GameState newWorld = gs.clone();
List<Unit> toDelete = new ArrayList<>();
for (Unit u : lastKnownPosition) {
// remove unit if location is visible or not valid
if (gs.observable(u.getX(), u.getY())) {
toDelete.add(u);
} else {
newWorld.getPhysicalGameState().addUnit(u);
}
}
lastKnownPosition.removeAll(toDelete);
return newWorld;
}
public void updateBelieveState(int player, PartiallyObservableGameState gs) {
int opponent = 1 - player;
// Handling missing units form previous game state
for (Unit u : lastObservedGame.getUnits()) {
if (u.getPlayer() == opponent && gs.free(u.getX(), u.getY())) {
// check for enemy units that moved into the fog-of-war
UnitActionAssignment uaa = lastObservedGame.getActionAssignment(u);
if (uaa != null && uaa.action.getType() == UnitAction.TYPE_MOVE) {
int offsx = 0;
int offsy = 0;
if (uaa.action.getDirection() == UnitAction.DIRECTION_UP) offsy = -1;
if (uaa.action.getDirection() == UnitAction.DIRECTION_RIGHT) offsx = 1;
if (uaa.action.getDirection() == UnitAction.DIRECTION_DOWN) offsy = 1;
if (uaa.action.getDirection() == UnitAction.DIRECTION_LEFT) offsx = -1;
if (!gs.observable(u.getX() + offsx, u.getY() + offsy)) {
// System.out.println("Enemy moved to fog of war!");
lastKnownPosition.add(u.clone());
}
} else { // unit was static
if (!gs.observable(u.getX(), u.getY())) { // is location still observable?
if (!wasUnderAttack(u)) { // wasn't under attack (sometimes units kill each other)
// System.out.println("Enemy now is out of sight! " + u.toString());
lastKnownPosition.add(u.clone());
} else {
// System.out.println("Enemy unit killed out of sight! (probably they kill each other)");
}
}
}
}
}
// Handling new units frome previous game state
for (Unit u : gs.getUnits()) {
if (u.getPlayer() == opponent) {
// check if a new visible unit has a last known position
for (Unit bu : lastKnownPosition) {
if (bu.getID() == u.getID()) {
lastKnownPosition.remove(bu);
break;
}
}
// sometimes a visible unit start to move or produce a unit on top of a believe unit
UnitActionAssignment uaa = gs.getActionAssignment(u);
if (uaa != null && (uaa.action.getType() == UnitAction.TYPE_MOVE || uaa.action.getType() == UnitAction.TYPE_PRODUCE)) {
int offsx = u.getX();
int offsy = u.getY();
if (uaa.action.getDirection() == UnitAction.DIRECTION_UP) offsy -= 1;
if (uaa.action.getDirection() == UnitAction.DIRECTION_RIGHT) offsx += 1;
if (uaa.action.getDirection() == UnitAction.DIRECTION_DOWN) offsy += 1;
if (uaa.action.getDirection() == UnitAction.DIRECTION_LEFT) offsx -= 1;
for (Unit bu : lastKnownPosition) {
if (bu.getX() == offsx && bu.getY() == offsy) {
lastKnownPosition.remove(bu);
break;
}
}
}
}
}
// at the end, update the last observed game state
lastObservedGame = gs.clone();
}
public boolean wasUnderAttack(Unit u) {
for (UnitActionAssignment ua : lastObservedGame.getUnitActions().values()) {
if (ua.action.getType() == UnitAction.TYPE_ATTACK_LOCATION
&& ua.action.getLocationX() == u.getX() && ua.action.getLocationY() == u.getY()) {
return true;
}
}
return false;
}
}
| 9,114 | 39.511111 | 225 | java |
MicroRTS | MicroRTS-master/src/ai/mcts/believestatemcts/BS3_NaiveMCTS.java | package ai.mcts.believestatemcts;
import ai.core.AI;
import ai.evaluation.EvaluationFunction;
import static ai.mcts.MCTSNode.r;
import ai.mcts.naivemcts.NaiveMCTS;
import ai.mcts.naivemcts.NaiveMCTSNode;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.List;
import rts.GameState;
import rts.PartiallyObservableGameState;
import rts.PlayerAction;
import rts.UnitAction;
import rts.UnitActionAssignment;
import rts.units.Unit;
import rts.units.UnitType;
import rts.units.UnitTypeTable;
/**
* The believe state stores the last observed location of an opponent's unit.
* If the locations is visible again we search for the nearest not visible location.
* We do inference on:
* - possible buildings not seen before (i.e. if we see a military unit, it should be a barracks)
* - when a new visible unit, remove closest last seen position
*
* @author albertouri
*/
public class BS3_NaiveMCTS extends NaiveMCTS implements AIWithBelieveState {
GameState initialGameState;
List<Unit> lastKnownPosition = new LinkedList<>();
List<Unit> inferedUnits = new LinkedList<>();
PartiallyObservableGameState lastObservedGame;
boolean[] typeSeen;
public BS3_NaiveMCTS(UnitTypeTable utt) {
super(utt);
}
public BS3_NaiveMCTS(int available_time, int max_playouts, int lookahead, int max_depth,
float e_l, float discout_l, float e_g, float discout_g, float e_0, float discout_0, AI policy, EvaluationFunction a_ef, boolean fensa) {
super(available_time, max_playouts, lookahead, max_depth, e_l, discout_l, e_g, discout_g, e_0, discout_0, policy, a_ef, fensa);
}
public BS3_NaiveMCTS(int available_time, int max_playouts, int lookahead, int max_depth,
float e_l, float e_g, float e_0, AI policy, EvaluationFunction a_ef, boolean fensa) {
super(available_time, max_playouts, lookahead, max_depth, e_l, e_g, e_0, policy, a_ef, fensa);
}
public BS3_NaiveMCTS(int available_time, int max_playouts, int lookahead, int max_depth,
float e_l, float e_g, float e_0, int a_global_strategy, AI policy, EvaluationFunction a_ef, boolean fensa) {
super(available_time, max_playouts, lookahead, max_depth, e_l, e_g, e_0, a_global_strategy, policy, a_ef, fensa);
}
@Override
public AI clone() {
return new BS3_NaiveMCTS(TIME_BUDGET, ITERATIONS_BUDGET, MAXSIMULATIONTIME, MAX_TREE_DEPTH, epsilon_l, discount_l, epsilon_g, discount_g, epsilon_0, discount_0, playoutPolicy, ef, forceExplorationOfNonSampledActions);
}
@Override
public final PlayerAction getAction(int player, GameState gs) throws Exception {
if (gs.canExecuteAnyAction(player)) {
startNewComputation(player, gs);
computeDuringOneGameFrame();
return getBestActionSoFar();
} else {
return new PlayerAction();
}
}
@Override
public void startNewComputation(int a_player, GameState gs) throws Exception {
if (initialGameState!=null && gs.getTime()==0) {
setInitialBelieveState(a_player, initialGameState.clone(), new PartiallyObservableGameState(initialGameState, a_player));
}
if (gs instanceof PartiallyObservableGameState) {
// create a sampling world from our believe-states
updateBelieveState(player, (PartiallyObservableGameState) gs);
gs = sampleWorld(player, (PartiallyObservableGameState) gs);
}
player = a_player;
current_iteration = 0;
tree = new NaiveMCTSNode(player, 1-player, gs, null, ef.upperBound(gs), current_iteration++, forceExplorationOfNonSampledActions);
if (tree.moveGenerator==null) {
max_actions_so_far = 0;
} else {
max_actions_so_far = Math.max(tree.moveGenerator.getSize(),max_actions_so_far);
}
gs_to_start_from = gs;
epsilon_l = initial_epsilon_l;
epsilon_g = initial_epsilon_g;
epsilon_0 = initial_epsilon_0;
}
@Override
public int getMostVisitedActionIdx() {
total_actions_issued++;
if (getTree().children == null) return -1;
List<Integer> bestIdxs = new ArrayList<>();
int bestScore = -1;
for (int i = 0; i < getTree().children.size(); i++) {
NaiveMCTSNode child = (NaiveMCTSNode) getTree().children.get(i);
if (child.visit_count > bestScore) {
bestIdxs.clear();
bestIdxs.add(i);
bestScore = child.visit_count;
} else if (child.visit_count > bestScore) {
bestIdxs.add(i);
}
}
if (bestIdxs.isEmpty()) return -1;
if (bestIdxs.size() == 1) return bestIdxs.get(0);
// otherwise we have multiple best actions, choose one randomly
System.out.println("Random action from " + bestIdxs.size());
return r.nextInt(bestIdxs.size());
}
public void reset()
{
initialGameState = null;
}
public void preGameAnalysis(GameState gs, long milliseconds) throws Exception
{
initialGameState = gs.clone();
}
@Override
public void setInitialBelieveState(int player, GameState gs, PartiallyObservableGameState pogs) {
int opponent = 1 - player;
// set initial typeSeen
typeSeen = new boolean[gs.getUnitTypeTable().getUnitTypes().size()]; // default value of false
// save all enemy's units that are not visible
for (Unit u : gs.getUnits()) {
if (u.getPlayer() == opponent && !pogs.observable(u.getX(), u.getY())) {
// System.out.println("First time seen " + u.getType().name);
lastKnownPosition.add(u.clone());
typeSeen[u.getType().ID] = true;
}
}
// save initila observed game
lastObservedGame = pogs.clone();
}
@Override
public List<Unit> getBelieveUnits() {
List<Unit> l = new LinkedList<>();
l.addAll(lastKnownPosition);
l.addAll(inferedUnits);
return l;
}
public GameState sampleWorld(int player, PartiallyObservableGameState gs) {
GameState newWorld = gs.clone();
List<Unit> toDelete = new ArrayList<>();
// add last known units in our world sampler
for (Unit u : lastKnownPosition) {
boolean validPosition = true;
if (gs.observable(u.getX(), u.getY())) {
// infered position was wrong, update it
validPosition = getClosestNotObservableLocationNear(u.getX(), u.getY(), gs, u);
}
if (validPosition) {
try {
newWorld.getPhysicalGameState().addUnit(u);
} catch (IllegalArgumentException e) {
System.err.println("IllegalArgumentException: " + e.getMessage());
System.err.println(newWorld.getPhysicalGameState());
System.err.println("adding unit: " + u);
System.err.println("Last known unit:");
System.err.println(lastKnownPosition);
}
} else {
toDelete.add(u);
}
}
lastKnownPosition.removeAll(toDelete);
toDelete.clear();
// add inferend units in our world sampler
for (Unit u : inferedUnits) {
boolean validPosition = true;
if (gs.observable(u.getX(), u.getY())) {
// infered position was wrong, update it
getClosestNotObservableLocationNear(u.getX(), u.getY(), gs, u);
}
if (validPosition) {
try {
newWorld.getPhysicalGameState().addUnit(u);
// System.out.println("Infered unit added: " + u.toString());
} catch (IllegalArgumentException e) {
System.err.println("IllegalArgumentException: " + e.getMessage());
System.err.println(newWorld.getPhysicalGameState());
System.err.println("adding unit: " + u);
System.err.println("Infered units:");
System.err.println(inferedUnits);
}
} else {
toDelete.add(u);
}
}
inferedUnits.removeAll(toDelete);
return newWorld;
}
public void updateBelieveState(int player, PartiallyObservableGameState gs) {
int opponent = 1 - player;
// Handling missing units form previous game state
for (Unit u : lastObservedGame.getUnits()) {
if (u.getPlayer() == opponent && gs.free(u.getX(), u.getY())) {
// check for enemy units that moved into the fog-of-war
UnitActionAssignment uaa = lastObservedGame.getActionAssignment(u);
if (uaa != null && uaa.action.getType() == UnitAction.TYPE_MOVE) {
int offsx = 0;
int offsy = 0;
if (uaa.action.getDirection() == UnitAction.DIRECTION_UP) offsy = -1;
if (uaa.action.getDirection() == UnitAction.DIRECTION_RIGHT) offsx = 1;
if (uaa.action.getDirection() == UnitAction.DIRECTION_DOWN) offsy = 1;
if (uaa.action.getDirection() == UnitAction.DIRECTION_LEFT) offsx = -1;
if (!gs.observable(u.getX() + offsx, u.getY() + offsy)) {
// System.out.println("Enemy moved to fog of war!" + u.toString());
lastKnownPosition.add(u.clone());
}
} else { // unit was static
if (!gs.observable(u.getX(), u.getY())) { // is location still observable?
if (!wasUnderAttack(u)) { // wasn't under attack (sometimes units kill each other simultaneously)
// System.out.println("Enemy now is out of sight! " + u.toString());
lastKnownPosition.add(u.clone());
} else {
// System.out.println("Enemy unit killed out of sight! (probably they kill each other)");
}
}
}
}
}
// Handling new units frome previous game state
for (Unit u : gs.getUnits()) {
if (u.getPlayer() == opponent) {
// check if we have seen any new type of unit
if (!typeSeen[u.getType().ID]) {
// System.out.println("First time seen " + u.getType().name);
typeSeen[u.getType().ID] = true;
// we assume only one unit type can produce this unit
UnitType ut = u.getType().producedBy.get(0);
// if producer not seen, add to inferedUnits
if (!typeSeen[ut.ID]) {
Unit newUnit = new Unit(opponent, ut, 0, 0, 0);
// Search possible location for the new unit
boolean validPosition = getClosestNotObservableLocationNear(u.getX(), u.getY(), gs, newUnit);
if (validPosition) {
// System.out.println("New Infered unit: " + newUnit.toString());
// if possible location found, add it
inferedUnits.add(newUnit);
}
}
}
// check if an infered unit now is visible
List<Unit> toDelete = new ArrayList<>();
for (Unit iu : inferedUnits) {
if (u.getType() == iu.getType()) {
// System.out.println("Infered unit found: " + u.toString());
toDelete.add(iu);
}
}
inferedUnits.removeAll(toDelete);
// check if a new visible unit has a last known position
// new visible opponent if: previous locations wasn't observable or no visible opponent was moving there
if (!lastObservedGame.observable(u.getX(), u.getY()) || !wasVisibleOpponentMovingTo(opponent, u.getX(), u.getY())) {
// System.out.println("Opponent visible: " + u.toString());
Unit unitToRemove = null;
// look if unit was added to the lastKnownPosition
for (Unit observedUnit : lastKnownPosition) {
if (observedUnit.getID() == u.getID()) {
unitToRemove = observedUnit;
break;
}
}
if (unitToRemove != null) {
// System.out.println("Last known position removed: " + unitToRemove.toString());
lastKnownPosition.remove(unitToRemove);
}
}
// sometimes a visible unit start to move or produce a unit on top of a believe unit
// for those cases we need to relocate the believe unit
UnitActionAssignment uaa = gs.getActionAssignment(u);
if (uaa != null && (uaa.action.getType() == UnitAction.TYPE_MOVE || uaa.action.getType() == UnitAction.TYPE_PRODUCE)) {
int offsx = u.getX();
int offsy = u.getY();
if (uaa.action.getDirection() == UnitAction.DIRECTION_UP) offsy -= 1;
if (uaa.action.getDirection() == UnitAction.DIRECTION_RIGHT) offsx += 1;
if (uaa.action.getDirection() == UnitAction.DIRECTION_DOWN) offsy += 1;
if (uaa.action.getDirection() == UnitAction.DIRECTION_LEFT) offsx -= 1;
for (Unit bu : lastKnownPosition) {
if (bu.getX() == offsx && bu.getY() == offsy) {
// System.out.println("Updating conflict with move/produce");
boolean validPosition = getClosestNotObservableLocationNear(bu.getX(), bu.getY(), gs, bu);
if (!validPosition) lastKnownPosition.remove(bu);
break;
}
}
for (Unit bu : inferedUnits) {
if (bu.getX() == offsx && bu.getY() == offsy) {
// System.out.println("Updating conflict with move/produce");
boolean validPosition = getClosestNotObservableLocationNear(bu.getX(), bu.getY(), gs, bu);
if (!validPosition) inferedUnits.remove(bu);
break;
}
}
}
}
}
// at the end, update the last observed game state
lastObservedGame = gs.clone();
}
public boolean wasVisibleOpponentMovingTo(int opponent, int x, int y) {
if (!lastObservedGame.free(x, y)) return true; // unit was already there
for (Unit u : lastObservedGame.getUnits()) {
if (u.getPlayer() == opponent) {
UnitActionAssignment uaa = lastObservedGame.getActionAssignment(u);
if (uaa != null && uaa.action.getType() == UnitAction.TYPE_MOVE) {
int offsx = 0;
int offsy = 0;
if (uaa.action.getDirection() == UnitAction.DIRECTION_UP) offsy = -1;
if (uaa.action.getDirection() == UnitAction.DIRECTION_RIGHT) offsx = 1;
if (uaa.action.getDirection() == UnitAction.DIRECTION_DOWN) offsy = 1;
if (uaa.action.getDirection() == UnitAction.DIRECTION_LEFT) offsx = -1;
if ((u.getX() + offsx) == x && (u.getY() + offsy) == y) return true;
}
}
}
return false;
}
public boolean wasUnderAttack(Unit u) {
for (UnitActionAssignment ua : lastObservedGame.getUnitActions().values()) {
if (ua.action.getType() == UnitAction.TYPE_ATTACK_LOCATION
&& ua.action.getLocationX() == u.getX() && ua.action.getLocationY() == u.getY()) {
return true;
}
}
return false;
}
// returns false if couldn't find a valid location
public boolean getClosestNotObservableLocationNear(int startX, int startY, PartiallyObservableGameState gs, Unit u) {
//searches outward in a spiral.
int x = startX;
int y = startY;
int length = 1;
int j = 0;
boolean first = true;
int dx = 0;
int dy = 1;
int maxLenght = Math.max(gs.getPhysicalGameState().getWidth(), gs.getPhysicalGameState().getHeight());
while (length < maxLenght) {
// look for a location that is not visible and free
if (x >= 0 && x < gs.getPhysicalGameState().getWidth()
&& y >= 0 && y < gs.getPhysicalGameState().getHeight()
&& !gs.observable(x, y) && gs.free(x, y) && believeFree(x, y)) {
u.setX(x);
u.setY(y);
return true;
}
//otherwise, move to another position
x = x + dx;
y = y + dy;
j++; //count how many steps we take in this direction
if (j == length) { //if we've reached the end, its time to turn
j = 0; //reset step counter
if (!first) length++; //increment step counter if needed
first = !first; //first=true for every other turn so we spiral out at the right rate
//turn counter clockwise 90 degrees:
if (dx == 0) {
dx = dy;
dy = 0;
} else {
dy = -dx;
dx = 0;
}
}
}
return false;
}
public boolean believeFree(int x, int y) {
for (Unit u : lastKnownPosition) {
if (u.getX() == x && u.getY() == y) return false;
}
for (Unit u : inferedUnits) {
if (u.getX() == x && u.getY() == y) return false;
}
return true;
}
}
| 18,452 | 42.215457 | 225 | java |
MicroRTS | MicroRTS-master/src/ai/mcts/informedmcts/InformedNaiveMCTS.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package ai.mcts.informedmcts;
import ai.*;
import ai.core.AI;
import ai.core.AIWithComputationBudget;
import ai.core.ParameterSpecification;
import ai.evaluation.EvaluationFunction;
import ai.evaluation.SimpleSqrtEvaluationFunction3;
import ai.machinelearning.bayes.ActionInterdependenceModel;
import ai.machinelearning.bayes.BayesianModelByUnitTypeWithDefaultModel;
import ai.machinelearning.bayes.featuregeneration.FeatureGeneratorSimple;
import ai.stochastic.UnitActionProbabilityDistribution;
import ai.stochastic.UnitActionProbabilityDistributionAI;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import org.jdom.input.SAXBuilder;
import rts.GameState;
import rts.PlayerAction;
import rts.units.UnitTypeTable;
import ai.core.InterruptibleAI;
/**
*
* @author santi
*/
public class InformedNaiveMCTS extends AIWithComputationBudget implements InterruptibleAI {
public static int DEBUG = 0;
public EvaluationFunction ef;
UnitTypeTable utt;
Random r = new Random();
public AI playoutPolicy = new RandomBiasedAI();
UnitActionProbabilityDistribution bias;
long max_actions_so_far = 0;
GameState gs_to_start_from;
InformedNaiveMCTSNode tree;
int current_iteration = 0;
public int MAXSIMULATIONTIME = 1024;
public int MAX_TREE_DEPTH = 10;
int player;
public float epsilon_0 = 0.2f;
public float epsilon_l = 0.25f;
public float epsilon_g = 0.0f;
// these variables are for using a discount factor on the epsilon values above. My experiments indicate that things work better without discount
// So, they are just maintained here for completeness:
public float initial_epsilon_0 = 0.2f;
public float initial_epsilon_l = 0.25f;
public float initial_epsilon_g = 0.0f;
public float discount_0 = 0.999f;
public float discount_l = 0.999f;
public float discount_g = 0.999f;
public int global_strategy = InformedNaiveMCTSNode.E_GREEDY;
// statistics:
public long total_runs = 0;
public long total_cycles_executed = 0;
public long total_actions_issued = 0;
public long total_time = 0;
public InformedNaiveMCTS(UnitTypeTable a_utt) throws Exception {
this(100,-1,100,10,
0.3f,0.0f,0.4f,
new UnitActionProbabilityDistributionAI(
new BayesianModelByUnitTypeWithDefaultModel(new SAXBuilder().build(
"data/bayesianmodels/pretrained/ActionInterdependenceModel-WR.xml").getRootElement(), a_utt,
new ActionInterdependenceModel(null, 0, 0, 0, a_utt, new FeatureGeneratorSimple(), ""), "AIM-WR"),
a_utt, "ActionInterdependenceModel-Acc-WR"),
new BayesianModelByUnitTypeWithDefaultModel(new SAXBuilder().build(
"data/bayesianmodels/pretrained/ActionInterdependenceModel-WR.xml").getRootElement(), a_utt,
new ActionInterdependenceModel(null, 0, 0, 0, a_utt, new FeatureGeneratorSimple(), ""), "AIM-WR"),
new SimpleSqrtEvaluationFunction3(), a_utt);
}
public InformedNaiveMCTS(int available_time, int max_playouts, int lookahead, int max_depth,
float e1, float discout1,
float e2, float discout2,
float e3, float discout3,
AI policy,
UnitActionProbabilityDistribution a_bias,
EvaluationFunction a_ef,
UnitTypeTable a_utt) {
super(available_time, max_playouts);
utt = a_utt;
MAXSIMULATIONTIME = lookahead;
playoutPolicy = policy;
bias = a_bias;
MAX_TREE_DEPTH = max_depth;
initial_epsilon_l = epsilon_l = e1;
initial_epsilon_g = epsilon_g = e2;
initial_epsilon_0 = epsilon_0 = e3;
discount_l = discout1;
discount_g = discout2;
discount_0 = discout3;
ef = a_ef;
}
public InformedNaiveMCTS(int available_time, int max_playouts, int lookahead, int max_depth,
float e1, float e2, float e3,
AI policy,
UnitActionProbabilityDistribution a_bias,
EvaluationFunction a_ef,
UnitTypeTable a_utt) {
super(available_time, max_playouts);
utt = a_utt;
MAXSIMULATIONTIME = lookahead;
playoutPolicy = policy;
bias = a_bias;
MAX_TREE_DEPTH = max_depth;
initial_epsilon_l = epsilon_l = e1;
initial_epsilon_g = epsilon_g = e2;
initial_epsilon_0 = epsilon_0 = e3;
discount_l = 1.0f;
discount_g = 1.0f;
discount_0 = 1.0f;
ef = a_ef;
}
public InformedNaiveMCTS(int available_time, int max_playouts, int lookahead, int max_depth,
float e1, float e2, float e3, int a_global_strategy,
AI policy,
UnitActionProbabilityDistribution a_bias,
EvaluationFunction a_ef) {
super(available_time, max_playouts);
MAXSIMULATIONTIME = lookahead;
playoutPolicy = policy;
bias = a_bias;
MAX_TREE_DEPTH = max_depth;
initial_epsilon_l = epsilon_l = e1;
initial_epsilon_g = epsilon_g = e2;
initial_epsilon_0 = epsilon_0 = e3;
discount_l = 1.0f;
discount_g = 1.0f;
discount_0 = 1.0f;
global_strategy = a_global_strategy;
ef = a_ef;
}
public void reset() {
tree = null;
gs_to_start_from = null;
total_runs = 0;
total_cycles_executed = 0;
total_actions_issued = 0;
total_time = 0;
current_iteration = 0;
}
public AI clone() {
return new InformedNaiveMCTS(TIME_BUDGET, ITERATIONS_BUDGET, MAXSIMULATIONTIME, MAX_TREE_DEPTH, epsilon_l, discount_l, epsilon_g, discount_g, epsilon_0, discount_0, playoutPolicy, bias, ef, utt);
}
public PlayerAction getAction(int player, GameState gs) throws Exception
{
if (gs.canExecuteAnyAction(player)) {
startNewComputation(player,gs.clone());
computeDuringOneGameFrame();
return getBestActionSoFar();
} else {
return new PlayerAction();
}
}
public void startNewComputation(int a_player, GameState gs) throws Exception {
player = a_player;
current_iteration = 0;
tree = new InformedNaiveMCTSNode(player, 1-player, gs, bias, null, ef.upperBound(gs), current_iteration++);
if (tree.moveGenerator!=null) {
max_actions_so_far = Math.max(tree.moveGenerator.getSize(),max_actions_so_far);
}
gs_to_start_from = gs;
epsilon_l = initial_epsilon_l;
epsilon_g = initial_epsilon_g;
epsilon_0 = initial_epsilon_0;
}
public void resetSearch() {
if (DEBUG>=2) System.out.println("Resetting search...");
tree = null;
gs_to_start_from = null;
}
public void computeDuringOneGameFrame() throws Exception {
if (DEBUG>=2) System.out.println("Search...");
long start = System.currentTimeMillis();
long end = start;
long count = 0;
while(true) {
if (!iteration(player)) break;
count++;
end = System.currentTimeMillis();
if (TIME_BUDGET>=0 && (end - start)>=TIME_BUDGET) break;
if (ITERATIONS_BUDGET>=0 && count>=ITERATIONS_BUDGET) break;
}
// System.out.println("HL: " + count + " time: " + (System.currentTimeMillis() - start) + " (" + available_time + "," + max_playouts + ")");
total_time += (end - start);
total_cycles_executed++;
}
public boolean iteration(int player) throws Exception {
InformedNaiveMCTSNode leaf = tree.selectLeaf(player, 1-player, epsilon_l, epsilon_g, epsilon_0, global_strategy, MAX_TREE_DEPTH, current_iteration++);
if (leaf!=null) {
GameState gs2 = leaf.gs.clone();
simulate(gs2, gs2.getTime() + MAXSIMULATIONTIME);
int time = gs2.getTime() - gs_to_start_from.getTime();
double evaluation = ef.evaluate(player, 1-player, gs2)*Math.pow(0.99,time/10.0);
leaf.propagateEvaluation(evaluation,null);
// update the epsilon values:
epsilon_0*=discount_0;
epsilon_l*=discount_l;
epsilon_g*=discount_g;
total_runs++;
// System.out.println(total_runs + " - " + epsilon_0 + ", " + epsilon_l + ", " + epsilon_g);
} else {
// no actions to choose from :)
System.err.println(this.getClass().getSimpleName() + ": claims there are no more leafs to explore...");
return false;
}
return true;
}
public PlayerAction getBestActionSoFar() {
int idx = getMostVisitedActionIdx();
if (idx==-1) {
if (DEBUG>=1) System.out.println("BiasedNaiveMCTS no children selected. Returning an empty asction");
return new PlayerAction();
}
if (DEBUG>=2) tree.showNode(0,1,ef);
if (DEBUG>=1) {
InformedNaiveMCTSNode best = (InformedNaiveMCTSNode) tree.children.get(idx);
System.out.println("BiasedNaiveMCTS selected children " + tree.actions.get(idx) + " explored " + best.visit_count + " Avg evaluation: " + (best.accum_evaluation/((double)best.visit_count)));
}
return tree.actions.get(idx);
}
public int getMostVisitedActionIdx() {
total_actions_issued++;
int bestIdx = -1;
InformedNaiveMCTSNode best = null;
if (DEBUG>=2) {
System.out.println("Number of playouts: " + tree.visit_count);
tree.printUnitActionTable();
}
if (tree.children==null) return -1;
for(int i = 0;i<tree.children.size();i++) {
InformedNaiveMCTSNode child = (InformedNaiveMCTSNode)tree.children.get(i);
if (DEBUG>=2) {
System.out.println("child " + tree.actions.get(i) + " explored " + child.visit_count + " Avg evaluation: " + (child.accum_evaluation/((double)child.visit_count)));
}
// if (best == null || (child.accum_evaluation/child.visit_count)>(best.accum_evaluation/best.visit_count)) {
if (best == null || child.visit_count>best.visit_count) {
best = child;
bestIdx = i;
}
}
return bestIdx;
}
public int getHighestEvaluationActionIdx() {
total_actions_issued++;
int bestIdx = -1;
InformedNaiveMCTSNode best = null;
if (DEBUG>=2) {
System.out.println("Number of playouts: " + tree.visit_count);
tree.printUnitActionTable();
}
if (tree.children==null) return -1;
for(int i = 0;i<tree.children.size();i++) {
InformedNaiveMCTSNode child = (InformedNaiveMCTSNode)tree.children.get(i);
if (DEBUG>=2) {
System.out.println("child " + tree.actions.get(i) + " explored " + child.visit_count + " Avg evaluation: " + (child.accum_evaluation/((double)child.visit_count)));
}
// if (best == null || (child.accum_evaluation/child.visit_count)>(best.accum_evaluation/best.visit_count)) {
if (best == null || (child.accum_evaluation/((double)child.visit_count))>(best.accum_evaluation/((double)best.visit_count))) {
best = child;
bestIdx = i;
}
}
return bestIdx;
}
public void simulate(GameState gs, int time) throws Exception {
boolean gameover = false;
do{
if (gs.isComplete()) {
gameover = gs.cycle();
} else {
gs.issue(playoutPolicy.getAction(0, gs));
gs.issue(playoutPolicy.getAction(1, gs));
}
}while(!gameover && gs.getTime()<time);
}
public InformedNaiveMCTSNode getTree() {
return tree;
}
public GameState getGameStateToStartFrom() {
return gs_to_start_from;
}
@Override
public String toString() {
return getClass().getSimpleName() + "(" + TIME_BUDGET + ", " + ITERATIONS_BUDGET + ", " + MAXSIMULATIONTIME + ", " + MAX_TREE_DEPTH + ", " + epsilon_l + ", " + discount_l + ", " + epsilon_g + ", " + discount_g + ", " + epsilon_0 + ", " + discount_0 + ", " + playoutPolicy + ", " + bias + ", " + ef + ")";
}
@Override
public String statisticsString() {
return "Total runs: " + total_runs +
", runs per action: " + (total_runs/(float)total_actions_issued) +
", runs per cycle: " + (total_runs/(float)total_cycles_executed) +
", average time per cycle: " + (total_time/(float)total_cycles_executed) +
", max branching factor: " + max_actions_so_far;
}
@Override
public List<ParameterSpecification> getParameters() {
List<ParameterSpecification> parameters = new ArrayList<>();
parameters.add(new ParameterSpecification("TimeBudget",int.class,100));
parameters.add(new ParameterSpecification("IterationsBudget",int.class,-1));
parameters.add(new ParameterSpecification("PlayoutLookahead",int.class,100));
parameters.add(new ParameterSpecification("MaxTreeDepth",int.class,10));
parameters.add(new ParameterSpecification("E_l",float.class,0.3));
parameters.add(new ParameterSpecification("Discount_l",float.class,1.0));
parameters.add(new ParameterSpecification("E_g",float.class,0.0));
parameters.add(new ParameterSpecification("Discount_g",float.class,1.0));
parameters.add(new ParameterSpecification("E_0",float.class,0.4));
parameters.add(new ParameterSpecification("Discount_0",float.class,1.0));
try {
String biasNames[] = {
"AIM-WR",
"AIM-LR",
"AIM-HR",
"AIM-RR",
"AIM-LSI500",
"AIM-LSI10000",
"AIM-NaiveMCTS500",
"AIM-NaiveMCTS10000",
};
UnitActionProbabilityDistribution biasOptions[] = {
new BayesianModelByUnitTypeWithDefaultModel(new SAXBuilder().build(
"data/bayesianmodels/pretrained/ActionInterdependenceModel-WR.xml").getRootElement(), utt,
new ActionInterdependenceModel(null, 0, 0, 0, utt, new FeatureGeneratorSimple(), ""), "AIM-WR"),
new BayesianModelByUnitTypeWithDefaultModel(new SAXBuilder().build(
"data/bayesianmodels/pretrained/ActionInterdependenceModel-LR.xml").getRootElement(), utt,
new ActionInterdependenceModel(null, 0, 0, 0, utt, new FeatureGeneratorSimple(), ""), "AIM-LR"),
new BayesianModelByUnitTypeWithDefaultModel(new SAXBuilder().build(
"data/bayesianmodels/pretrained/ActionInterdependenceModel-HR.xml").getRootElement(), utt,
new ActionInterdependenceModel(null, 0, 0, 0, utt, new FeatureGeneratorSimple(), ""), "AIM-HR"),
new BayesianModelByUnitTypeWithDefaultModel(new SAXBuilder().build(
"data/bayesianmodels/pretrained/ActionInterdependenceModel-RR.xml").getRootElement(), utt,
new ActionInterdependenceModel(null, 0, 0, 0, utt, new FeatureGeneratorSimple(), ""), "AIM-RR"),
new BayesianModelByUnitTypeWithDefaultModel(new SAXBuilder().build(
"data/bayesianmodels/pretrained/ActionInterdependenceModel-LSI500.xml").getRootElement(), utt,
new ActionInterdependenceModel(null, 0, 0, 0, utt, new FeatureGeneratorSimple(), ""), "AIM-LSI500"),
new BayesianModelByUnitTypeWithDefaultModel(new SAXBuilder().build(
"data/bayesianmodels/pretrained/ActionInterdependenceModel-LSI10000.xml").getRootElement(), utt,
new ActionInterdependenceModel(null, 0, 0, 0, utt, new FeatureGeneratorSimple(), ""), "AIM-LSI10000"),
new BayesianModelByUnitTypeWithDefaultModel(new SAXBuilder().build(
"data/bayesianmodels/pretrained/ActionInterdependenceModel-NaiveMCTS500.xml").getRootElement(), utt,
new ActionInterdependenceModel(null, 0, 0, 0, utt, new FeatureGeneratorSimple(), ""), "AIM-NaiveMCTS500"),
new BayesianModelByUnitTypeWithDefaultModel(new SAXBuilder().build(
"data/bayesianmodels/pretrained/ActionInterdependenceModel-NaiveMCTS10000.xml").getRootElement(), utt,
new ActionInterdependenceModel(null, 0, 0, 0, utt, new FeatureGeneratorSimple(), ""), "AIM-NaiveMCTS10000"),
};
ParameterSpecification dp_ps = new ParameterSpecification("DefaultPolicy",AI.class, playoutPolicy);
ParameterSpecification tpb_ps = new ParameterSpecification("TreePolicyBias",UnitActionProbabilityDistribution.class, bias);
for(int i = 0;i<biasOptions.length;i++) {
dp_ps.addPossibleValue(new UnitActionProbabilityDistributionAI(biasOptions[i], utt, biasNames[i]));
tpb_ps.addPossibleValue(biasOptions[i]);
}
parameters.add(dp_ps);
parameters.add(tpb_ps);
} catch(Exception e) {
e.printStackTrace();
}
parameters.add(new ParameterSpecification("EvaluationFunction", EvaluationFunction.class, new SimpleSqrtEvaluationFunction3()));
return parameters;
}
public int getPlayoutLookahead() {
return MAXSIMULATIONTIME;
}
public void setPlayoutLookahead(int a_pola) {
MAXSIMULATIONTIME = a_pola;
}
public int getMaxTreeDepth() {
return MAX_TREE_DEPTH;
}
public void setMaxTreeDepth(int a_mtd) {
MAX_TREE_DEPTH = a_mtd;
}
public float getE_l() {
return epsilon_l;
}
public void setE_l(float a_e_l) {
epsilon_l = a_e_l;
}
public float getDiscount_l() {
return discount_l;
}
public void setDiscount_l(float a_discount_l) {
discount_l = a_discount_l;
}
public float getE_g() {
return epsilon_g;
}
public void setE_g(float a_e_g) {
epsilon_g = a_e_g;
}
public float getDiscount_g() {
return discount_g;
}
public void setDiscount_g(float a_discount_g) {
discount_g = a_discount_g;
}
public float getE_0() {
return epsilon_0;
}
public void setE_0(float a_e_0) {
epsilon_0 = a_e_0;
}
public float getDiscount_0() {
return discount_0;
}
public void setDiscount_0(float a_discount_0) {
discount_0 = a_discount_0;
}
public AI getDefaultPolicy() {
return playoutPolicy;
}
public void setDefaultPolicy(AI a_dp) {
playoutPolicy = a_dp;
}
public UnitActionProbabilityDistribution getTreePolicyBias() {
return bias;
}
public void setTreePolicyBias(UnitActionProbabilityDistribution a_bias) {
bias = a_bias;
}
public EvaluationFunction getEvaluationFunction() {
return ef;
}
public void setEvaluationFunction(EvaluationFunction a_ef) {
ef = a_ef;
}
}
| 20,224 | 36.874532 | 313 | java |
MicroRTS | MicroRTS-master/src/ai/mcts/informedmcts/InformedNaiveMCTSNode.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package ai.mcts.informedmcts;
import ai.mcts.MCTSNode;
import ai.stochastic.UnitActionProbabilityDistribution;
import java.math.BigInteger;
import java.util.*;
import rts.*;
import rts.units.Unit;
import util.Pair;
import util.Sampler;
/**
*
* @author santi
*/
public class InformedNaiveMCTSNode extends MCTSNode {
public static final int E_GREEDY = 0;
public static final int UCB1 = 1;
static public int DEBUG = 0;
public static float C = 0.05f; // exploration constant for UCB1
boolean hasMoreActions = true;
public PlayerActionGenerator moveGenerator;
HashMap<BigInteger,InformedNaiveMCTSNode> childrenMap = new LinkedHashMap<>(); // associates action codes with children
// Decomposition of the player actions in unit actions, and their contributions:
public List<InformedUnitActionTableEntry> unitActionTable;
double evaluation_bound; // this is the maximum positive value that the evaluation function can return
public BigInteger multipliers[];
UnitActionProbabilityDistribution model;
public InformedNaiveMCTSNode(int maxplayer, int minplayer, GameState a_gs, UnitActionProbabilityDistribution a_bias, InformedNaiveMCTSNode a_parent, double a_evaluation_bound, int a_creation_ID) throws Exception {
parent = a_parent;
gs = a_gs;
model = a_bias;
if (parent==null) depth = 0;
else depth = parent.depth+1;
evaluation_bound = a_evaluation_bound;
creation_ID = a_creation_ID;
while (gs.winner() == -1 &&
!gs.gameover() &&
!gs.canExecuteAnyAction(maxplayer) &&
!gs.canExecuteAnyAction(minplayer)) {
gs.cycle();
}
if (gs.winner() != -1 || gs.gameover()) {
type = -1;
} else if (gs.canExecuteAnyAction(maxplayer)) {
type = 0;
moveGenerator = new PlayerActionGenerator(gs, maxplayer);
actions = new ArrayList<>();
children = new ArrayList<>();
unitActionTable = new LinkedList<>();
multipliers = new BigInteger[moveGenerator.getChoices().size()];
BigInteger baseMultiplier = BigInteger.ONE;
int idx = 0;
for (Pair<Unit, List<UnitAction>> choice : moveGenerator.getChoices()) {
double []prior_distribution = model.predictDistribution(choice.m_a, gs, choice.m_b);
InformedUnitActionTableEntry ae = new InformedUnitActionTableEntry(choice.m_a, choice.m_b, prior_distribution);
unitActionTable.add(ae);
multipliers[idx] = baseMultiplier;
baseMultiplier = baseMultiplier.multiply(BigInteger.valueOf(ae.nactions));
idx++;
}
} else if (gs.canExecuteAnyAction(minplayer)) {
type = 1;
moveGenerator = new PlayerActionGenerator(gs, minplayer);
actions = new ArrayList<>();
children = new ArrayList<>();
unitActionTable = new LinkedList<>();
multipliers = new BigInteger[moveGenerator.getChoices().size()];
BigInteger baseMultiplier = BigInteger.ONE;
int idx = 0;
for (Pair<Unit, List<UnitAction>> choice : moveGenerator.getChoices()) {
double []prior_distribution = model.predictDistribution(choice.m_a, gs, choice.m_b);
InformedUnitActionTableEntry ae = new InformedUnitActionTableEntry(choice.m_a, choice.m_b, prior_distribution);
unitActionTable.add(ae);
multipliers[idx] = baseMultiplier;
baseMultiplier = baseMultiplier.multiply(BigInteger.valueOf(ae.nactions));
idx++;
}
} else {
type = -1;
System.err.println("BiasedNaiveMCTSNode: This should not have happened...");
}
}
// Naive Sampling:
public InformedNaiveMCTSNode selectLeaf(int maxplayer, int minplayer, float epsilon_l, float epsilon_g, float epsilon_0, int global_strategy, int max_depth, int a_creation_ID) throws Exception {
if (unitActionTable == null) return this;
if (depth>=max_depth) return this;
/*
// DEBUG:
for(PlayerAction a:actions) {
for(Pair<Unit,UnitAction> tmp:a.getActions()) {
if (!gs.getUnits().contains(tmp.m_a)) new Error("DEBUG!!!!");
boolean found = false;
for(UnitActionTableEntry e:unitActionTable) {
if (e.u == tmp.m_a) found = true;
}
if (!found) new Error("DEBUG 2!!!!!");
}
}
*/
if (children.size()>0 && r.nextFloat()>=epsilon_0) {
// sample from the global MAB:
InformedNaiveMCTSNode selected = null;
if (global_strategy==E_GREEDY) selected = selectFromAlreadySampledEpsilonGreedy(epsilon_g);
else if (global_strategy==UCB1) selected = selectFromAlreadySampledUCB1(C);
return selected.selectLeaf(maxplayer, minplayer, epsilon_l, epsilon_g, epsilon_0, global_strategy, max_depth, a_creation_ID);
} else {
// sample from the local MABs (this might recursively call "selectLeaf" internally):
return selectLeafUsingLocalMABs(maxplayer, minplayer, epsilon_l, epsilon_g, epsilon_0, global_strategy, max_depth, a_creation_ID);
}
}
public InformedNaiveMCTSNode selectFromAlreadySampledEpsilonGreedy(float epsilon_g) throws Exception {
if (r.nextFloat()>=epsilon_g) {
InformedNaiveMCTSNode best = null;
for(MCTSNode pate:children) {
if (type==0) {
// max node:
if (best==null || (pate.accum_evaluation/pate.visit_count)>(best.accum_evaluation/best.visit_count)) {
best = (InformedNaiveMCTSNode)pate;
}
} else {
// min node:
if (best==null || (pate.accum_evaluation/pate.visit_count)<(best.accum_evaluation/best.visit_count)) {
best = (InformedNaiveMCTSNode)pate;
}
}
}
return best;
} else {
// choose one at random from the ones seen so far:
InformedNaiveMCTSNode best = (InformedNaiveMCTSNode)children.get(r.nextInt(children.size()));
return best;
}
}
public InformedNaiveMCTSNode selectFromAlreadySampledUCB1(float C) throws Exception {
InformedNaiveMCTSNode best = null;
double bestScore = 0;
for(MCTSNode pate:children) {
double exploitation = ((double)pate.accum_evaluation) / pate.visit_count;
double exploration = Math.sqrt(Math.log((double)visit_count)/pate.visit_count);
if (type==0) {
// max node:
exploitation = (evaluation_bound + exploitation)/(2*evaluation_bound);
} else {
exploitation = (evaluation_bound - exploitation)/(2*evaluation_bound);
}
// System.out.println(exploitation + " + " + exploration);
double tmp = C*exploitation + exploration;
if (best==null || tmp>bestScore) {
best = (InformedNaiveMCTSNode)pate;
bestScore = tmp;
}
}
return best;
}
public InformedNaiveMCTSNode selectLeafUsingLocalMABs(int maxplayer, int minplayer, float epsilon_l, float epsilon_g, float epsilon_0, int global_strategy, int max_depth, int a_creation_ID) throws Exception {
PlayerAction pa2;
BigInteger actionCode;
// For each unit, rank the unitActions according to preference:
List<double []> distributions = new LinkedList<>();
List<Integer> notSampledYet = new LinkedList<>();
for(InformedUnitActionTableEntry ate:unitActionTable) {
double []dist = new double[ate.nactions];
int bestIdx = -1;
double bestEvaluation = 0;
int visits = 0;
for(int i = 0;i<ate.nactions;i++) {
if (type==0) {
// max node:
if (bestIdx==-1 ||
(visits!=0 && ate.visit_count[i]==0) ||
(visits!=0 && (ate.accum_evaluation[i]/ate.visit_count[i])>bestEvaluation)) {
bestIdx = i;
if (ate.visit_count[i]>0) bestEvaluation = (ate.accum_evaluation[i]/ate.visit_count[i]);
else bestEvaluation = 0;
visits = ate.visit_count[i];
}
} else {
// min node:
if (bestIdx==-1 ||
(visits!=0 && ate.visit_count[i]==0) ||
(visits!=0 && (ate.accum_evaluation[i]/ate.visit_count[i])<bestEvaluation)) {
bestIdx = i;
if (ate.visit_count[i]>0) bestEvaluation = (ate.accum_evaluation[i]/ate.visit_count[i]);
else bestEvaluation = 0;
visits = ate.visit_count[i];
}
}
// model the distribution:
dist[i] = epsilon_l * ate.prior_distribution[i];
}
if (ate.visit_count[bestIdx]!=0) {
dist[bestIdx] = (1-epsilon_l) + (epsilon_l * ate.prior_distribution[bestIdx]);
} else {
for(int j = 0;j<dist.length;j++)
if (ate.visit_count[j]>0) dist[j] = 0;
}
if (DEBUG>=3) {
System.out.println("e_l = " + epsilon_l);
System.out.println(ate.actions);
System.out.print("[ ");
for(int i = 0;i<ate.nactions;i++) System.out.print("(" + ate.visit_count[i] + "," + ate.accum_evaluation[i]/ate.visit_count[i] + ")");
System.out.println("]");
System.out.println("Prior = " + Arrays.toString(ate.prior_distribution));
System.out.println("Final = " + Arrays.toString(dist));
}
notSampledYet.add(distributions.size());
distributions.add(dist);
}
// Select the best combination that results in a valid playeraction by epsilon-greedy sampling:
ResourceUsage base_ru = new ResourceUsage();
for(Unit u:gs.getUnits()) {
UnitAction ua = gs.getUnitAction(u);
if (ua!=null) {
ResourceUsage ru = ua.resourceUsage(u, gs.getPhysicalGameState());
base_ru.merge(ru);
}
}
pa2 = new PlayerAction();
actionCode = BigInteger.ZERO;
pa2.setResourceUsage(base_ru.clone());
while(!notSampledYet.isEmpty()) {
int i = notSampledYet.remove(r.nextInt(notSampledYet.size()));
try {
InformedUnitActionTableEntry ate = unitActionTable.get(i);
int code;
UnitAction ua;
ResourceUsage r2;
// try one at random:
double []distribution = distributions.get(i);
code = Sampler.weighted(distribution);
ua = ate.actions.get(code);
r2 = ua.resourceUsage(ate.u, gs.getPhysicalGameState());
if (!pa2.getResourceUsage().consistentWith(r2, gs)) {
// sample at random, eliminating the ones that have not worked so far:
List<Double> dist_l = new ArrayList<>();
List<Integer> dist_outputs = new ArrayList<>();
for(int j = 0;j<distribution.length;j++) {
dist_l.add(distribution[j]);
dist_outputs.add(j);
}
do{
int idx = dist_outputs.indexOf(code);
dist_l.remove(idx);
dist_outputs.remove(idx);
code = (Integer)Sampler.weighted(dist_l, dist_outputs);
ua = ate.actions.get(code);
r2 = ua.resourceUsage(ate.u, gs.getPhysicalGameState());
}while(!pa2.getResourceUsage().consistentWith(r2, gs));
}
// DEBUG code:
if (gs.getUnit(ate.u.getID())==null) throw new Error("Issuing an action to an inexisting unit!!!");
pa2.getResourceUsage().merge(r2);
pa2.addUnitAction(ate.u, ua);
actionCode = actionCode.add(BigInteger.valueOf(code).multiply(multipliers[i]));
} catch(Exception e) {
e.printStackTrace();
}
}
InformedNaiveMCTSNode pate = childrenMap.get(actionCode);
if (pate==null) {
actions.add(pa2);
GameState gs2 = gs.cloneIssue(pa2);
InformedNaiveMCTSNode node = new InformedNaiveMCTSNode(maxplayer, minplayer, gs2.clone(), model, this, evaluation_bound, a_creation_ID);
childrenMap.put(actionCode,node);
children.add(node);
return node;
}
return pate.selectLeaf(maxplayer, minplayer, epsilon_l, epsilon_g, epsilon_0, global_strategy, max_depth, a_creation_ID);
}
public InformedUnitActionTableEntry getActionTableEntry(Unit u) {
for(InformedUnitActionTableEntry e:unitActionTable) {
if (e.u == u) return e;
}
throw new Error("Could not find Action Table Entry!");
}
public void propagateEvaluation(double evaluation, InformedNaiveMCTSNode child) {
accum_evaluation += evaluation;
visit_count++;
// if (child!=null) System.out.println(evaluation);
// update the unitAction table:
if (child != null) {
int idx = children.indexOf(child);
PlayerAction pa = actions.get(idx);
for (Pair<Unit, UnitAction> ua : pa.getActions()) {
InformedUnitActionTableEntry actionTable = getActionTableEntry(ua.m_a);
idx = actionTable.actions.indexOf(ua.m_b);
if (idx==-1) {
System.out.println("Looking for action: " + ua.m_b);
System.out.println("Available actions are: " + actionTable.actions);
}
actionTable.accum_evaluation[idx] += evaluation;
actionTable.visit_count[idx]++;
}
}
if (parent != null) {
((InformedNaiveMCTSNode)parent).propagateEvaluation(evaluation, this);
}
}
public void printUnitActionTable() {
for (InformedUnitActionTableEntry uat : unitActionTable) {
System.out.println("Actions for unit " + uat.u);
for (int i = 0; i < uat.nactions; i++) {
System.out.println(" " + uat.actions.get(i) + " visited " + uat.visit_count[i] + " with average evaluation " + (uat.accum_evaluation[i] / uat.visit_count[i]));
}
}
}
}
| 15,653 | 42.483333 | 217 | java |
MicroRTS | MicroRTS-master/src/ai/mcts/informedmcts/InformedUnitActionTableEntry.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package ai.mcts.informedmcts;
import java.util.List;
import rts.UnitAction;
import rts.units.Unit;
/**
*
* @author santi
*/
public class InformedUnitActionTableEntry {
public Unit u;
public int nactions = 0;
public List<UnitAction> actions;
public double[] prior_distribution;
public double[] accum_evaluation;
public int[] visit_count;
public InformedUnitActionTableEntry(Unit a_u, List<UnitAction> a_actions, double []a_prior)
{
u = a_u;
actions = a_actions;
nactions = actions.size();
accum_evaluation = new double[nactions];
visit_count = new int[nactions];
for (int i = 0; i < nactions; i++) {
accum_evaluation[i] = 0;
visit_count[i] = 0;
}
prior_distribution = a_prior;
assert(a_prior.length == nactions);
}
}
| 964 | 23.74359 | 96 | java |
MicroRTS | MicroRTS-master/src/ai/mcts/mlps/MLPSMCTS.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package ai.mcts.mlps;
import ai.*;
import ai.core.AI;
import ai.core.AIWithComputationBudget;
import ai.core.ParameterSpecification;
import ai.evaluation.EvaluationFunction;
import ai.evaluation.SimpleSqrtEvaluationFunction3;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import rts.GameState;
import rts.PlayerAction;
import rts.units.UnitTypeTable;
import ai.core.InterruptibleAI;
/**
*
* @author santi
*/
public class MLPSMCTS extends AIWithComputationBudget implements InterruptibleAI {
public static int DEBUG = 0;
public EvaluationFunction ef;
Random r = new Random();
public AI randomAI = new RandomBiasedAI();
long max_actions_so_far = 0;
GameState gs_to_start_from;
MLPSNode tree;
int current_iteration = 0;
public int MAXSIMULATIONTIME = 1024;
public int MAX_TREE_DEPTH = 10;
int playerForThisComputation;
double C = 0.05;
// statistics:
public long total_runs = 0;
public long total_cycles_executed = 0;
public long total_actions_issued = 0;
public long total_time = 0;
public MLPSMCTS(UnitTypeTable utt) {
this(100,-1,100,10,0.05,
new RandomBiasedAI(),
new SimpleSqrtEvaluationFunction3());
}
public MLPSMCTS(int available_time, int max_playouts, int lookahead, int max_depth,
double a_C,
AI policy, EvaluationFunction a_ef) {
super(available_time,max_playouts);
MAXSIMULATIONTIME = lookahead;
randomAI = policy;
MAX_TREE_DEPTH = max_depth;
C = a_C;
ef = a_ef;
}
public void reset() {
tree = null;
gs_to_start_from = null;
total_runs = 0;
total_cycles_executed = 0;
total_actions_issued = 0;
total_time = 0;
current_iteration = 0;
}
public AI clone() {
return new MLPSMCTS(TIME_BUDGET, ITERATIONS_BUDGET, MAXSIMULATIONTIME, MAX_TREE_DEPTH, C, randomAI, ef);
}
public PlayerAction getAction(int player, GameState gs) throws Exception
{
if (gs.canExecuteAnyAction(player)) {
startNewComputation(player,gs.clone());
computeDuringOneGameFrame();
return getBestActionSoFar();
} else {
return new PlayerAction();
}
}
public void startNewComputation(int a_player, GameState gs) throws Exception {
playerForThisComputation = a_player;
current_iteration = 0;
float evaluation_bound = ef.upperBound(gs);
tree = new MLPSNode(playerForThisComputation, 1-playerForThisComputation, gs, null, evaluation_bound, current_iteration++);
if (tree.moveGenerator!=null) {
max_actions_so_far = Math.max(tree.moveGenerator.getSize(),max_actions_so_far);
}
gs_to_start_from = gs;
}
public void resetSearch() {
if (DEBUG>=2) System.out.println("Resetting search...");
tree = null;
gs_to_start_from = null;
}
public void computeDuringOneGameFrame() throws Exception {
if (DEBUG>=2) System.out.println("Search...");
long start = System.currentTimeMillis();
long end = start;
long count = 0;
while(true) {
if (!iteration(playerForThisComputation)) break;
count++;
end = System.currentTimeMillis();
if (TIME_BUDGET>=0 && (end - start)>=TIME_BUDGET) break;
if (ITERATIONS_BUDGET>=0 && count>=ITERATIONS_BUDGET) break;
}
// System.out.println("HL: " + count + " time: " + (System.currentTimeMillis() - start) + " (" + available_time + "," + max_playouts + ")");
total_time += (end - start);
total_cycles_executed++;
}
public boolean iteration(int player) throws Exception {
MLPSNode leaf = tree.selectLeaf(player, 1-player, C, MAX_TREE_DEPTH, current_iteration++);
if (leaf!=null) {
GameState gs2 = leaf.gs.clone();
simulate(gs2, gs2.getTime() + MAXSIMULATIONTIME);
int time = gs2.getTime() - gs_to_start_from.getTime();
double evaluation = ef.evaluate(player, 1-player, gs2)*Math.pow(0.99,time/10.0);
leaf.propagateEvaluation((float)evaluation,null);
total_runs++;
} else {
// no actions to choose from :)
System.err.println(this.getClass().getSimpleName() + ": claims there are no more leafs to explore...");
return false;
}
return true;
}
public PlayerAction getBestActionSoFar() {
int idx = getMostVisitedActionIdx();
if (idx==-1) {
if (DEBUG>=1) System.out.println("MLPSMCTS no children selected. Returning an empty asction");
return new PlayerAction();
}
if (DEBUG>=2) tree.showNode(0,1,ef);
if (DEBUG>=1) {
MLPSNode best = (MLPSNode) tree.children.get(idx);
System.out.println("MLPSMCTS selected children " + tree.actions.get(idx) + " explored " + best.visit_count + " Avg evaluation: " + (best.accum_evaluation/((double)best.visit_count)));
}
return tree.actions.get(idx);
}
public int getMostVisitedActionIdx() {
total_actions_issued++;
int bestIdx = -1;
MLPSNode best = null;
if (DEBUG>=2) {
System.out.println("Number of playouts: " + tree.visit_count);
tree.printUnitActionTable();
}
if (tree.children==null) return -1;
for(int i = 0;i<tree.children.size();i++) {
MLPSNode child = (MLPSNode)tree.children.get(i);
if (DEBUG>=2) {
System.out.println("child " + tree.actions.get(i) + " explored " + child.visit_count + " Avg evaluation: " + (child.accum_evaluation/((double)child.visit_count)));
}
// if (best == null || (child.accum_evaluation/child.visit_count)>(best.accum_evaluation/best.visit_count)) {
if (best == null || child.visit_count>best.visit_count) {
best = child;
bestIdx = i;
}
}
return bestIdx;
}
public int getHighestEvaluationActionIdx() {
total_actions_issued++;
int bestIdx = -1;
MLPSNode best = null;
if (DEBUG>=2) {
// for(Player p:gs_to_start_from.getPlayers()) {
// System.out.println("Resources P" + p.getID() + ": " + p.getResources());
// }
System.out.println("Number of playouts: " + tree.visit_count);
tree.printUnitActionTable();
}
for(int i = 0;i<tree.children.size();i++) {
MLPSNode child = (MLPSNode)tree.children.get(i);
if (DEBUG>=2) {
System.out.println("child " + tree.actions.get(i) + " explored " + child.visit_count + " Avg evaluation: " + (child.accum_evaluation/((double)child.visit_count)));
}
// if (best == null || (child.accum_evaluation/child.visit_count)>(best.accum_evaluation/best.visit_count)) {
if (best == null || (child.accum_evaluation/((double)child.visit_count))>(best.accum_evaluation/((double)best.visit_count))) {
best = child;
bestIdx = i;
}
}
return bestIdx;
}
public void simulate(GameState gs, int time) throws Exception {
boolean gameover = false;
do{
if (gs.isComplete()) {
gameover = gs.cycle();
} else {
gs.issue(randomAI.getAction(0, gs));
gs.issue(randomAI.getAction(1, gs));
}
}while(!gameover && gs.getTime()<time);
}
public MLPSNode getTree() {
return tree;
}
public GameState getGameStateToStartFrom() {
return gs_to_start_from;
}
@Override
public String toString() {
return getClass().getSimpleName() + "(" + TIME_BUDGET + ", " + ITERATIONS_BUDGET + ", " + MAXSIMULATIONTIME + ", " + MAX_TREE_DEPTH + ", " + C + ", " + randomAI + ", " + ef + ")";
}
@Override
public String statisticsString() {
return "Total runs: " + total_runs +
", runs per action: " + (total_runs/(float)total_actions_issued) +
", runs per cycle: " + (total_runs/(float)total_cycles_executed) +
", averate time per cycle: " + (total_time/(float)total_cycles_executed) +
", max branching factor: " + max_actions_so_far;
}
@Override
public List<ParameterSpecification> getParameters() {
List<ParameterSpecification> parameters = new ArrayList<>();
parameters.add(new ParameterSpecification("TimeBudget",int.class,100));
parameters.add(new ParameterSpecification("IterationsBudget",int.class,-1));
parameters.add(new ParameterSpecification("PlayoutLookahead",int.class,100));
parameters.add(new ParameterSpecification("MaxTreeDepth",int.class,10));
parameters.add(new ParameterSpecification("C",double.class,0.05));
parameters.add(new ParameterSpecification("DefaultPolicy",AI.class, randomAI));
parameters.add(new ParameterSpecification("EvaluationFunction", EvaluationFunction.class, new SimpleSqrtEvaluationFunction3()));
return parameters;
}
public int getPlayoutLookahead() {
return MAXSIMULATIONTIME;
}
public void setPlayoutLookahead(int a_pola) {
MAXSIMULATIONTIME = a_pola;
}
public int getMaxTreeDepth() {
return MAX_TREE_DEPTH;
}
public void setMaxTreeDepth(int a_mtd) {
MAX_TREE_DEPTH = a_mtd;
}
public double getC() {
return C;
}
public void setC(double a_c) {
C = a_c;
}
public AI getDefaultPolicy() {
return randomAI;
}
public void setDefaultPolicy(AI a_dp) {
randomAI = a_dp;
}
public EvaluationFunction getEvaluationFunction() {
return ef;
}
public void setEvaluationFunction(EvaluationFunction a_ef) {
ef = a_ef;
}
}
| 10,662 | 30.92515 | 195 | java |
MicroRTS | MicroRTS-master/src/ai/mcts/mlps/MLPSNode.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package ai.mcts.mlps;
import ai.mcts.MCTSNode;
import ai.montecarlo.lsi.Sampling.UnitActionTableEntry;
import java.util.*;
import rts.*;
import rts.units.Unit;
import util.Pair;
/**
*
* @author santi
*
* From: "Learning Multiuser Channel Allocations in Cognitive Radio Networks: A Combinatorial Multi-Armed Bandit Formulation"
* Yi Gai, Bhaskar Krishnamachari and Rahul Jain
*
* The original NLPS sampling strategy used the Hungarian algorithm to selct the best macro-arm
* at each cycle. However, that only works in their formulation. In the more general case that
* we consider here, the Hungarian algorithm is not applicable. Thus, I replaced that step by
* simply selecting the actino with the maximum score for each unit. If the actions do not interfere
* too much, this obtains the obtimal action most of the times, and it's O(n*m)
*
*/
public class MLPSNode extends MCTSNode {
static public int DEBUG = 0;
boolean hasMoreActions = true;
public PlayerActionGenerator moveGenerator;
HashMap<Long,MLPSNode> childrenMap = new LinkedHashMap<>(); // associates action codes with children
// Decomposition of the player actions in unit actions, and their contributions:
public List<UnitActionTableEntry> unitActionTable;
public List<double[]> UCBExplorationScores;
public List<double[]> UCBExploitationScores;
double evaluation_bound = 0;
int max_nactions = 0;
public long multipliers[];
public MLPSNode(int maxplayer, int minplayer, GameState a_gs, MLPSNode a_parent, double bound, int a_creation_ID) throws Exception {
parent = a_parent;
gs = a_gs;
if (parent==null) depth = 0;
else depth = parent.depth+1;
evaluation_bound = bound;
creation_ID = a_creation_ID;
while (gs.winner() == -1 &&
!gs.gameover() &&
!gs.canExecuteAnyAction(maxplayer) &&
!gs.canExecuteAnyAction(minplayer)) {
gs.cycle();
}
if (gs.winner() != -1 || gs.gameover()) {
type = -1;
} else if (gs.canExecuteAnyAction(maxplayer)) {
type = 0;
moveGenerator = new PlayerActionGenerator(a_gs, maxplayer);
actions = new ArrayList<>();
children = new ArrayList<>();
unitActionTable = new ArrayList<>();
UCBExplorationScores = new ArrayList<>();
UCBExploitationScores = new ArrayList<>();
multipliers = new long[moveGenerator.getChoices().size()];
long baseMultiplier = 1;
int idx = 0;
for (Pair<Unit, List<UnitAction>> choice : moveGenerator.getChoices()) {
UnitActionTableEntry ae = new UnitActionTableEntry();
ae.u = choice.m_a;
ae.nactions = choice.m_b.size();
if (ae.nactions>max_nactions) max_nactions= ae.nactions;
ae.actions = choice.m_b;
ae.accum_evaluation = new double[ae.nactions];
ae.visit_count = new int[ae.nactions];
for (int i = 0; i < ae.nactions; i++) {
ae.accum_evaluation[i] = 0;
ae.visit_count[i] = 0;
}
unitActionTable.add(ae);
UCBExplorationScores.add(new double[ae.nactions]);
UCBExploitationScores.add(new double[ae.nactions]);
multipliers[idx] = baseMultiplier;
baseMultiplier*=ae.nactions;
idx++;
}
} else if (gs.canExecuteAnyAction(minplayer)) {
type = 1;
moveGenerator = new PlayerActionGenerator(a_gs, minplayer);
actions = new ArrayList<>();
children = new ArrayList<>();
unitActionTable = new ArrayList<>();
UCBExplorationScores = new ArrayList<>();
UCBExploitationScores = new ArrayList<>();
multipliers = new long[moveGenerator.getChoices().size()];
long baseMultiplier = 1;
int idx = 0;
for (Pair<Unit, List<UnitAction>> choice : moveGenerator.getChoices()) {
UnitActionTableEntry ae = new UnitActionTableEntry();
ae.u = choice.m_a;
ae.nactions = choice.m_b.size();
if (ae.nactions>max_nactions) max_nactions= ae.nactions;
ae.actions = choice.m_b;
ae.accum_evaluation = new double[ae.nactions];
ae.visit_count = new int[ae.nactions];
for (int i = 0; i < ae.nactions; i++) {
ae.accum_evaluation[i] = 0;
ae.visit_count[i] = 0;
}
unitActionTable.add(ae);
UCBExplorationScores.add(new double[ae.nactions]);
UCBExploitationScores.add(new double[ae.nactions]);
multipliers[idx] = baseMultiplier;
baseMultiplier*=ae.nactions;
idx++;
}
} else {
type = -1;
System.err.println("MLPSNode: This should not have happened...");
}
}
public double actionExploitationValue(UnitActionTableEntry e, int action) {
if (e.visit_count[action]==0) return 0;
double exploitation = e.accum_evaluation[action] / e.visit_count[action];
return exploitation;
}
public double explorationValue(int M, int n, int n_ij) {
if (n_ij == 0) return Double.MAX_VALUE;
double exploration = M*Math.sqrt((M+1)*Math.log((double)n)/n_ij);
return exploration;
}
// C is the UCB constant for exploration/exploitation
public MLPSNode selectLeaf(int maxplayer, int minplayer, double C, int max_depth, int a_creation_ID) throws Exception {
if (unitActionTable == null) return this;
if (depth>=max_depth) return this;
if (DEBUG>=1) System.out.println("MLPSNode.selectLeaf...");
// For each unit, compute the UCB1 scores for each action:
List<Integer> notSampledYetIDs = new LinkedList<>();
for(int ate_idx = 0;ate_idx<unitActionTable.size();ate_idx++) {
UnitActionTableEntry ate = unitActionTable.get(ate_idx);
double []scoresExploitation = UCBExploitationScores.get(ate_idx);
double []scoresExploration = UCBExplorationScores.get(ate_idx);
for(int i = 0;i<ate.nactions;i++) {
scoresExploitation[i] = actionExploitationValue(ate, i);
scoresExploration[i] = explorationValue(max_nactions, visit_count, ate.visit_count[i]);
}
if (DEBUG>=3) {
System.out.print("[ ");
for(int i = 0;i<ate.nactions;i++) System.out.print("(" + ate.visit_count[i] + "," + scoresExploitation[i] + "," + scoresExploration[i] + ")");
System.out.println("]");
}
notSampledYetIDs.add(ate_idx);
}
// Select the best combination that results in a valid playeraction by MLPS sampling (maximizing UCB1 score of each action):
ResourceUsage base_ru = new ResourceUsage();
for(Unit u:gs.getUnits()) {
UnitAction ua = gs.getUnitAction(u);
if (ua!=null) {
ResourceUsage ru = ua.resourceUsage(u, gs.getPhysicalGameState());
base_ru.merge(ru);
}
}
PlayerAction best_pa = null;
long best_actionCode = -1;
double best_accumUCBScore = 0;
for(int repeat = 0;repeat<10;repeat++) {
PlayerAction pa2 = new PlayerAction();
long actionCode = 0;
double accumUCBScore = 0;
double maxExplorationScore = 0;
pa2.setResourceUsage(base_ru.clone());
List<Integer> notSampledYetIDs2 = new LinkedList<>(notSampledYetIDs);
while(!notSampledYetIDs2.isEmpty()) {
if (DEBUG>=2) System.out.println("notSampledYet: " + notSampledYetIDs2);
int i = r.nextInt(notSampledYetIDs2.size());
i = notSampledYetIDs2.remove(i);
try {
UnitActionTableEntry ate = unitActionTable.get(i);
double []scoresExploitation = UCBExploitationScores.get(i);
double []scoresExploration = UCBExplorationScores.get(i);
int code = -1;
UnitAction ua;
ResourceUsage r2;
// select the best one:
for(int j = 0;j<ate.nactions;j++) {
if (code==-1) {
code = j;
continue;
}
double s1 = scoresExploitation[j] + C*Math.max(scoresExploration[j], maxExplorationScore);
double s2 = scoresExploitation[code] + C*Math.max(scoresExploration[code], maxExplorationScore);
if (s1>s2) code = j;
}
ua = ate.actions.get(code);
r2 = ua.resourceUsage(ate.u, gs.getPhysicalGameState());
if (!pa2.getResourceUsage().consistentWith(r2, gs)) {
// get the best next one:
List<Integer> actions = new ArrayList<>();
for(int j = 0;j<ate.nactions;j++) {
if (j!=code) actions.add(j);
}
if (DEBUG>=4) System.out.println(" unit " + i + ": trying " + code);
do{
code = -1;
for(Integer j:actions) {
if (code==-1) {
code = j;
continue;
}
double s1 = scoresExploitation[j] + C*Math.max(scoresExploration[j], maxExplorationScore);
double s2 = scoresExploitation[code] + C*Math.max(scoresExploration[code], maxExplorationScore);
if (s1>s2) code = j;
}
if (DEBUG>=4) System.out.println(" unit " + i + ": trying " + code);
actions.remove((Integer)code);
ua = ate.actions.get(code);
r2 = ua.resourceUsage(ate.u, gs.getPhysicalGameState());
}while(!pa2.getResourceUsage().consistentWith(r2, gs));
}
if (DEBUG>=3) System.out.println(" unit " + i + ": " + code);
accumUCBScore += C*scoresExploitation[code];
maxExplorationScore = Math.max(scoresExploration[code], maxExplorationScore);
pa2.getResourceUsage().merge(r2);
pa2.addUnitAction(ate.u, ua);
actionCode+= ((long)code)*multipliers[i];
} catch(Exception e) {
e.printStackTrace();
}
}
accumUCBScore+=maxExplorationScore;
if (DEBUG>=1) System.out.println(" accumUCBScore: " + accumUCBScore);
if (best_pa==null || accumUCBScore>best_accumUCBScore) {
best_pa = pa2;
best_accumUCBScore = accumUCBScore;
best_actionCode = actionCode;
}
}
MLPSNode pate = childrenMap.get(best_actionCode);
if (pate==null) {
actions.add(best_pa);
GameState gs2 = gs.cloneIssue(best_pa);
MLPSNode node = new MLPSNode(maxplayer, minplayer, gs2.clone(), this, evaluation_bound, a_creation_ID);
childrenMap.put(best_actionCode,node);
children.add(node);
return node;
}
return pate.selectLeaf(maxplayer, minplayer, C, max_depth, a_creation_ID);
}
public UnitActionTableEntry getActionTableEntry(Unit u) {
for(UnitActionTableEntry e:unitActionTable) {
if (e.u == u) return e;
}
return null;
}
public void propagateEvaluation(float evaluation, MLPSNode child) {
accum_evaluation += evaluation;
visit_count++;
// if (child!=null) System.out.println(evaluation);
// update the unitAction table:
if (child != null) {
int idx = children.indexOf(child);
PlayerAction pa = actions.get(idx);
for (Pair<Unit, UnitAction> ua : pa.getActions()) {
UnitActionTableEntry actionTable = getActionTableEntry(ua.m_a);
idx = actionTable.actions.indexOf(ua.m_b);
if (idx==-1) {
System.out.println("Looking for action: " + ua.m_b);
System.out.println("Available actions are: " + actionTable.actions);
}
actionTable.accum_evaluation[idx] += evaluation;
actionTable.visit_count[idx]++;
}
}
if (parent != null) {
((MLPSNode)parent).propagateEvaluation(evaluation, this);
}
}
public void printUnitActionTable() {
for (UnitActionTableEntry uat : unitActionTable) {
System.out.println("Actions for unit " + uat.u);
for (int i = 0; i < uat.nactions; i++) {
System.out.println(" " + uat.actions.get(i) + " visited " + uat.visit_count[i] + " with average evaluation " + (uat.accum_evaluation[i] / uat.visit_count[i]));
}
}
}
}
| 13,890 | 41.873457 | 177 | java |
MicroRTS | MicroRTS-master/src/ai/mcts/naivemcts/NaiveMCTS.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package ai.mcts.naivemcts;
import ai.*;
import ai.core.AI;
import ai.core.AIWithComputationBudget;
import ai.core.ParameterSpecification;
import ai.evaluation.EvaluationFunction;
import ai.evaluation.SimpleSqrtEvaluationFunction3;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import rts.GameState;
import rts.PlayerAction;
import rts.units.UnitTypeTable;
import ai.core.InterruptibleAI;
/**
*
* @author santi
*/
public class NaiveMCTS extends AIWithComputationBudget implements InterruptibleAI {
public static int DEBUG = 0;
public EvaluationFunction ef;
Random r = new Random();
public AI playoutPolicy = new RandomBiasedAI();
protected long max_actions_so_far = 0;
protected GameState gs_to_start_from;
protected NaiveMCTSNode tree;
protected int current_iteration = 0;
public int MAXSIMULATIONTIME = 1024;
public int MAX_TREE_DEPTH = 10;
protected int player;
public float epsilon_0 = 0.2f;
public float epsilon_l = 0.25f;
public float epsilon_g = 0.0f;
// these variables are for using a discount factor on the epsilon values above. My experiments indicate that things work better without discount
// So, they are just maintained here for completeness:
public float initial_epsilon_0 = 0.2f;
public float initial_epsilon_l = 0.25f;
public float initial_epsilon_g = 0.0f;
public float discount_0 = 0.999f;
public float discount_l = 0.999f;
public float discount_g = 0.999f;
public int global_strategy = NaiveMCTSNode.E_GREEDY;
public boolean forceExplorationOfNonSampledActions = true;
// statistics:
public long total_runs = 0;
public long total_cycles_executed = 0;
public long total_actions_issued = 0;
public long total_time = 0;
public NaiveMCTS(UnitTypeTable utt) {
this(100,-1,100,10,
0.3f, 0.0f, 0.4f,
new RandomBiasedAI(),
new SimpleSqrtEvaluationFunction3(), true);
}
public NaiveMCTS(int available_time, int max_playouts, int lookahead, int max_depth,
float e_l, float discout_l,
float e_g, float discout_g,
float e_0, float discout_0,
AI policy, EvaluationFunction a_ef,
boolean fensa) {
super(available_time, max_playouts);
MAXSIMULATIONTIME = lookahead;
playoutPolicy = policy;
MAX_TREE_DEPTH = max_depth;
initial_epsilon_l = epsilon_l = e_l;
initial_epsilon_g = epsilon_g = e_g;
initial_epsilon_0 = epsilon_0 = e_0;
discount_l = discout_l;
discount_g = discout_g;
discount_0 = discout_0;
ef = a_ef;
forceExplorationOfNonSampledActions = fensa;
}
public NaiveMCTS(int available_time, int max_playouts, int lookahead, int max_depth, float e_l, float e_g, float e_0, AI policy, EvaluationFunction a_ef, boolean fensa) {
super(available_time, max_playouts);
MAXSIMULATIONTIME = lookahead;
playoutPolicy = policy;
MAX_TREE_DEPTH = max_depth;
initial_epsilon_l = epsilon_l = e_l;
initial_epsilon_g = epsilon_g = e_g;
initial_epsilon_0 = epsilon_0 = e_0;
discount_l = 1.0f;
discount_g = 1.0f;
discount_0 = 1.0f;
ef = a_ef;
forceExplorationOfNonSampledActions = fensa;
}
public NaiveMCTS(int available_time, int max_playouts, int lookahead, int max_depth, float e_l, float e_g, float e_0, int a_global_strategy, AI policy, EvaluationFunction a_ef, boolean fensa) {
super(available_time, max_playouts);
MAXSIMULATIONTIME = lookahead;
playoutPolicy = policy;
MAX_TREE_DEPTH = max_depth;
initial_epsilon_l = epsilon_l = e_l;
initial_epsilon_g = epsilon_g = e_g;
initial_epsilon_0 = epsilon_0 = e_0;
discount_l = 1.0f;
discount_g = 1.0f;
discount_0 = 1.0f;
global_strategy = a_global_strategy;
ef = a_ef;
forceExplorationOfNonSampledActions = fensa;
}
public void reset() {
tree = null;
gs_to_start_from = null;
total_runs = 0;
total_cycles_executed = 0;
total_actions_issued = 0;
total_time = 0;
current_iteration = 0;
}
public AI clone() {
return new NaiveMCTS(TIME_BUDGET, ITERATIONS_BUDGET, MAXSIMULATIONTIME, MAX_TREE_DEPTH, epsilon_l, discount_l, epsilon_g, discount_g, epsilon_0, discount_0, playoutPolicy, ef, forceExplorationOfNonSampledActions);
}
public PlayerAction getAction(int player, GameState gs) throws Exception
{
if (gs.canExecuteAnyAction(player)) {
startNewComputation(player,gs.clone());
computeDuringOneGameFrame();
return getBestActionSoFar();
} else {
return new PlayerAction();
}
}
public void startNewComputation(int a_player, GameState gs) throws Exception {
player = a_player;
current_iteration = 0;
tree = new NaiveMCTSNode(player, 1-player, gs, null, ef.upperBound(gs), current_iteration++, forceExplorationOfNonSampledActions);
if (tree.moveGenerator==null) {
max_actions_so_far = 0;
} else {
max_actions_so_far = Math.max(tree.moveGenerator.getSize(),max_actions_so_far);
}
gs_to_start_from = gs;
epsilon_l = initial_epsilon_l;
epsilon_g = initial_epsilon_g;
epsilon_0 = initial_epsilon_0;
}
public void resetSearch() {
if (DEBUG>=2) System.out.println("Resetting search...");
tree = null;
gs_to_start_from = null;
}
public void computeDuringOneGameFrame() throws Exception {
if (DEBUG>=2) System.out.println("Search...");
long start = System.currentTimeMillis();
long end = start;
long count = 0;
while(true) {
if (!iteration(player)) break;
count++;
end = System.currentTimeMillis();
if (TIME_BUDGET>=0 && (end - start)>=TIME_BUDGET) break;
if (ITERATIONS_BUDGET>=0 && count>=ITERATIONS_BUDGET) break;
}
// System.out.println("HL: " + count + " time: " + (System.currentTimeMillis() - start) + " (" + available_time + "," + max_playouts + ")");
total_time += (end - start);
total_cycles_executed++;
}
public boolean iteration(int player) throws Exception {
NaiveMCTSNode leaf = tree.selectLeaf(player, 1-player, epsilon_l, epsilon_g, epsilon_0, global_strategy, MAX_TREE_DEPTH, current_iteration++);
if (leaf!=null) {
GameState gs2 = leaf.gs.clone();
simulate(gs2, gs2.getTime() + MAXSIMULATIONTIME);
int time = gs2.getTime() - gs_to_start_from.getTime();
double evaluation = ef.evaluate(player, 1-player, gs2)*Math.pow(0.99,time/10.0);
leaf.propagateEvaluation(evaluation,null);
// update the epsilon values:
epsilon_0*=discount_0;
epsilon_l*=discount_l;
epsilon_g*=discount_g;
total_runs++;
// System.out.println(total_runs + " - " + epsilon_0 + ", " + epsilon_l + ", " + epsilon_g);
} else {
// no actions to choose from :)
System.err.println(this.getClass().getSimpleName() + ": claims there are no more leafs to explore...");
return false;
}
return true;
}
public PlayerAction getBestActionSoFar() {
int idx = getMostVisitedActionIdx();
if (idx==-1) {
if (DEBUG>=1) System.out.println("NaiveMCTS no children selected. Returning an empty asction");
return new PlayerAction();
}
if (DEBUG>=2) tree.showNode(0,1,ef);
if (DEBUG>=1) {
NaiveMCTSNode best = (NaiveMCTSNode) tree.children.get(idx);
System.out.println("NaiveMCTS selected children " + tree.actions.get(idx) + " explored " + best.visit_count + " Avg evaluation: " + (best.accum_evaluation/((double)best.visit_count)));
}
return tree.actions.get(idx);
}
public int getMostVisitedActionIdx() {
total_actions_issued++;
int bestIdx = -1;
NaiveMCTSNode best = null;
if (DEBUG>=2) {
// for(Player p:gs_to_start_from.getPlayers()) {
// System.out.println("Resources P" + p.getID() + ": " + p.getResources());
// }
System.out.println("Number of playouts: " + tree.visit_count);
tree.printUnitActionTable();
}
if (tree.children==null) return -1;
for(int i = 0;i<tree.children.size();i++) {
NaiveMCTSNode child = (NaiveMCTSNode)tree.children.get(i);
if (DEBUG>=2) {
System.out.println("child " + tree.actions.get(i) + " explored " + child.visit_count + " Avg evaluation: " + (child.accum_evaluation/((double)child.visit_count)));
}
// if (best == null || (child.accum_evaluation/child.visit_count)>(best.accum_evaluation/best.visit_count)) {
if (best == null || child.visit_count>best.visit_count) {
best = child;
bestIdx = i;
}
}
return bestIdx;
}
public int getHighestEvaluationActionIdx() {
total_actions_issued++;
int bestIdx = -1;
NaiveMCTSNode best = null;
if (DEBUG>=2) {
// for(Player p:gs_to_start_from.getPlayers()) {
// System.out.println("Resources P" + p.getID() + ": " + p.getResources());
// }
System.out.println("Number of playouts: " + tree.visit_count);
tree.printUnitActionTable();
}
for(int i = 0;i<tree.children.size();i++) {
NaiveMCTSNode child = (NaiveMCTSNode)tree.children.get(i);
if (DEBUG>=2) {
System.out.println("child " + tree.actions.get(i) + " explored " + child.visit_count + " Avg evaluation: " + (child.accum_evaluation/((double)child.visit_count)));
}
// if (best == null || (child.accum_evaluation/child.visit_count)>(best.accum_evaluation/best.visit_count)) {
if (best == null || (child.accum_evaluation/((double)child.visit_count))>(best.accum_evaluation/((double)best.visit_count))) {
best = child;
bestIdx = i;
}
}
return bestIdx;
}
public void simulate(GameState gs, int time) throws Exception {
boolean gameover = false;
do{
if (gs.isComplete()) {
gameover = gs.cycle();
} else {
gs.issue(playoutPolicy.getAction(0, gs));
gs.issue(playoutPolicy.getAction(1, gs));
}
}while(!gameover && gs.getTime()<time);
}
public NaiveMCTSNode getTree() {
return tree;
}
public GameState getGameStateToStartFrom() {
return gs_to_start_from;
}
@Override
public String toString() {
return getClass().getSimpleName() + "(" + TIME_BUDGET + ", " + ITERATIONS_BUDGET + ", " + MAXSIMULATIONTIME + "," + MAX_TREE_DEPTH + "," + epsilon_l + ", " + discount_l + ", " + epsilon_g + ", " + discount_g + ", " + epsilon_0 + ", " + discount_0 + ", " + playoutPolicy + ", " + ef + ")";
}
@Override
public String statisticsString() {
return "Total runs: " + total_runs +
", runs per action: " + (total_runs/(float)total_actions_issued) +
", runs per cycle: " + (total_runs/(float)total_cycles_executed) +
", average time per cycle: " + (total_time/(float)total_cycles_executed) +
", max branching factor: " + max_actions_so_far;
}
@Override
public List<ParameterSpecification> getParameters() {
List<ParameterSpecification> parameters = new ArrayList<>();
parameters.add(new ParameterSpecification("TimeBudget",int.class,100));
parameters.add(new ParameterSpecification("IterationsBudget",int.class,-1));
parameters.add(new ParameterSpecification("PlayoutLookahead",int.class,100));
parameters.add(new ParameterSpecification("MaxTreeDepth",int.class,10));
parameters.add(new ParameterSpecification("E_l",float.class,0.3));
parameters.add(new ParameterSpecification("Discount_l",float.class,1.0));
parameters.add(new ParameterSpecification("E_g",float.class,0.0));
parameters.add(new ParameterSpecification("Discount_g",float.class,1.0));
parameters.add(new ParameterSpecification("E_0",float.class,0.4));
parameters.add(new ParameterSpecification("Discount_0",float.class,1.0));
parameters.add(new ParameterSpecification("DefaultPolicy",AI.class, playoutPolicy));
parameters.add(new ParameterSpecification("EvaluationFunction", EvaluationFunction.class, new SimpleSqrtEvaluationFunction3()));
parameters.add(new ParameterSpecification("ForceExplorationOfNonSampledActions",boolean.class,true));
return parameters;
}
public int getPlayoutLookahead() {
return MAXSIMULATIONTIME;
}
public void setPlayoutLookahead(int a_pola) {
MAXSIMULATIONTIME = a_pola;
}
public int getMaxTreeDepth() {
return MAX_TREE_DEPTH;
}
public void setMaxTreeDepth(int a_mtd) {
MAX_TREE_DEPTH = a_mtd;
}
public float getE_l() {
return epsilon_l;
}
public void setE_l(float a_e_l) {
epsilon_l = a_e_l;
}
public float getDiscount_l() {
return discount_l;
}
public void setDiscount_l(float a_discount_l) {
discount_l = a_discount_l;
}
public float getE_g() {
return epsilon_g;
}
public void setE_g(float a_e_g) {
epsilon_g = a_e_g;
}
public float getDiscount_g() {
return discount_g;
}
public void setDiscount_g(float a_discount_g) {
discount_g = a_discount_g;
}
public float getE_0() {
return epsilon_0;
}
public void setE_0(float a_e_0) {
epsilon_0 = a_e_0;
}
public float getDiscount_0() {
return discount_0;
}
public void setDiscount_0(float a_discount_0) {
discount_0 = a_discount_0;
}
public AI getDefaultPolicy() {
return playoutPolicy;
}
public void setDefaultPolicy(AI a_dp) {
playoutPolicy = a_dp;
}
public EvaluationFunction getEvaluationFunction() {
return ef;
}
public void setEvaluationFunction(EvaluationFunction a_ef) {
ef = a_ef;
}
public boolean getForceExplorationOfNonSampledActions() {
return forceExplorationOfNonSampledActions;
}
public void setForceExplorationOfNonSampledActions(boolean fensa)
{
forceExplorationOfNonSampledActions = fensa;
}
}
| 15,702 | 32.625268 | 296 | java |
MicroRTS | MicroRTS-master/src/ai/mcts/naivemcts/NaiveMCTSNode.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package ai.mcts.naivemcts;
import ai.mcts.MCTSNode;
import java.math.BigInteger;
import java.util.*;
import rts.*;
import rts.units.Unit;
import util.Pair;
import util.Sampler;
/**
*
* @author santi
*/
public class NaiveMCTSNode extends MCTSNode {
public static final int E_GREEDY = 0;
public static final int UCB1 = 1;
static public int DEBUG = 0;
public static float C = 0.05f; // exploration constant for UCB1
boolean forceExplorationOfNonSampledActions = true;
boolean hasMoreActions = true;
public PlayerActionGenerator moveGenerator;
HashMap<BigInteger,NaiveMCTSNode> childrenMap = new LinkedHashMap<>(); // associates action codes with children
// Decomposition of the player actions in unit actions, and their contributions:
public List<UnitActionTableEntry> unitActionTable;
double evaluation_bound; // this is the maximum positive value that the evaluation function can return
public BigInteger multipliers[];
public NaiveMCTSNode(int maxplayer, int minplayer, GameState a_gs, NaiveMCTSNode a_parent, double a_evaluation_bound, int a_creation_ID, boolean fensa) throws Exception {
parent = a_parent;
gs = a_gs;
if (parent==null) depth = 0;
else depth = parent.depth+1;
evaluation_bound = a_evaluation_bound;
creation_ID = a_creation_ID;
forceExplorationOfNonSampledActions = fensa;
while (gs.winner() == -1 &&
!gs.gameover() &&
!gs.canExecuteAnyAction(maxplayer) &&
!gs.canExecuteAnyAction(minplayer)) {
gs.cycle();
}
if (gs.winner() != -1 || gs.gameover()) {
type = -1;
} else if (gs.canExecuteAnyAction(maxplayer)) {
type = 0;
moveGenerator = new PlayerActionGenerator(gs, maxplayer);
actions = new ArrayList<>();
children = new ArrayList<>();
unitActionTable = new LinkedList<>();
multipliers = new BigInteger[moveGenerator.getChoices().size()];
BigInteger baseMultiplier = BigInteger.ONE;
int idx = 0;
for (Pair<Unit, List<UnitAction>> choice : moveGenerator.getChoices()) {
UnitActionTableEntry ae = new UnitActionTableEntry();
ae.u = choice.m_a;
ae.nactions = choice.m_b.size();
ae.actions = choice.m_b;
ae.accum_evaluation = new double[ae.nactions];
ae.visit_count = new int[ae.nactions];
for (int i = 0; i < ae.nactions; i++) {
ae.accum_evaluation[i] = 0;
ae.visit_count[i] = 0;
}
unitActionTable.add(ae);
multipliers[idx] = baseMultiplier;
baseMultiplier = baseMultiplier.multiply(BigInteger.valueOf(ae.nactions));
idx++;
}
} else if (gs.canExecuteAnyAction(minplayer)) {
type = 1;
moveGenerator = new PlayerActionGenerator(gs, minplayer);
actions = new ArrayList<>();
children = new ArrayList<>();
unitActionTable = new LinkedList<>();
multipliers = new BigInteger[moveGenerator.getChoices().size()];
BigInteger baseMultiplier = BigInteger.ONE;
int idx = 0;
for (Pair<Unit, List<UnitAction>> choice : moveGenerator.getChoices()) {
UnitActionTableEntry ae = new UnitActionTableEntry();
ae.u = choice.m_a;
ae.nactions = choice.m_b.size();
ae.actions = choice.m_b;
ae.accum_evaluation = new double[ae.nactions];
ae.visit_count = new int[ae.nactions];
for (int i = 0; i < ae.nactions; i++) {
ae.accum_evaluation[i] = 0;
ae.visit_count[i] = 0;
}
unitActionTable.add(ae);
multipliers[idx] = baseMultiplier;
baseMultiplier = baseMultiplier.multiply(BigInteger.valueOf(ae.nactions));
idx++;
}
} else {
type = -1;
System.err.println("NaiveMCTSNode: This should not have happened...");
}
}
// Naive Sampling:
public NaiveMCTSNode selectLeaf(int maxplayer, int minplayer, float epsilon_l, float epsilon_g, float epsilon_0, int global_strategy, int max_depth, int a_creation_ID) throws Exception {
if (unitActionTable == null) return this;
if (depth>=max_depth) return this;
/*
// DEBUG:
for(PlayerAction a:actions) {
for(Pair<Unit,UnitAction> tmp:a.getActions()) {
if (!gs.getUnits().contains(tmp.m_a)) new Error("DEBUG!!!!");
boolean found = false;
for(UnitActionTableEntry e:unitActionTable) {
if (e.u == tmp.m_a) found = true;
}
if (!found) new Error("DEBUG 2!!!!!");
}
}
*/
if (children.size()>0 && r.nextFloat()>=epsilon_0) {
// sample from the global MAB:
NaiveMCTSNode selected = null;
if (global_strategy==E_GREEDY) selected = selectFromAlreadySampledEpsilonGreedy(epsilon_g);
else if (global_strategy==UCB1) selected = selectFromAlreadySampledUCB1(C);
return selected.selectLeaf(maxplayer, minplayer, epsilon_l, epsilon_g, epsilon_0, global_strategy, max_depth, a_creation_ID);
} else {
// sample from the local MABs (this might recursively call "selectLeaf" internally):
return selectLeafUsingLocalMABs(maxplayer, minplayer, epsilon_l, epsilon_g, epsilon_0, global_strategy, max_depth, a_creation_ID);
}
}
public NaiveMCTSNode selectFromAlreadySampledEpsilonGreedy(float epsilon_g) throws Exception {
if (r.nextFloat()>=epsilon_g) {
NaiveMCTSNode best = null;
for(MCTSNode pate:children) {
if (type==0) {
// max node:
if (best==null || (pate.accum_evaluation/pate.visit_count)>(best.accum_evaluation/best.visit_count)) {
best = (NaiveMCTSNode)pate;
}
} else {
// min node:
if (best==null || (pate.accum_evaluation/pate.visit_count)<(best.accum_evaluation/best.visit_count)) {
best = (NaiveMCTSNode)pate;
}
}
}
return best;
} else {
// choose one at random from the ones seen so far:
NaiveMCTSNode best = (NaiveMCTSNode)children.get(r.nextInt(children.size()));
return best;
}
}
public NaiveMCTSNode selectFromAlreadySampledUCB1(float C) throws Exception {
NaiveMCTSNode best = null;
double bestScore = 0;
for(MCTSNode pate:children) {
double exploitation = ((double)pate.accum_evaluation) / pate.visit_count;
double exploration = Math.sqrt(Math.log((double)visit_count)/pate.visit_count);
if (type==0) {
// max node:
exploitation = (evaluation_bound + exploitation)/(2*evaluation_bound);
} else {
exploitation = (evaluation_bound - exploitation)/(2*evaluation_bound);
}
// System.out.println(exploitation + " + " + exploration);
double tmp = C*exploitation + exploration;
if (best==null || tmp>bestScore) {
best = (NaiveMCTSNode)pate;
bestScore = tmp;
}
}
return best;
}
public NaiveMCTSNode selectLeafUsingLocalMABs(int maxplayer, int minplayer, float epsilon_l, float epsilon_g, float epsilon_0, int global_strategy, int max_depth, int a_creation_ID) throws Exception {
PlayerAction pa2;
BigInteger actionCode;
// For each unit, rank the unitActions according to preference:
List<double []> distributions = new LinkedList<>();
List<Integer> notSampledYet = new LinkedList<>();
for(UnitActionTableEntry ate:unitActionTable) {
double []dist = new double[ate.nactions];
int bestIdx = -1;
double bestEvaluation = 0;
int visits = 0;
for(int i = 0;i<ate.nactions;i++) {
if (type==0) {
// max node:
if (bestIdx==-1 ||
(visits!=0 && ate.visit_count[i]==0) ||
(visits!=0 && (ate.accum_evaluation[i]/ate.visit_count[i])>bestEvaluation)) {
bestIdx = i;
if (ate.visit_count[i]>0) bestEvaluation = (ate.accum_evaluation[i]/ate.visit_count[i]);
else bestEvaluation = 0;
visits = ate.visit_count[i];
}
} else {
// min node:
if (bestIdx==-1 ||
(visits!=0 && ate.visit_count[i]==0) ||
(visits!=0 && (ate.accum_evaluation[i]/ate.visit_count[i])<bestEvaluation)) {
bestIdx = i;
if (ate.visit_count[i]>0) bestEvaluation = (ate.accum_evaluation[i]/ate.visit_count[i]);
else bestEvaluation = 0;
visits = ate.visit_count[i];
}
}
dist[i] = epsilon_l/ate.nactions;
}
if (ate.visit_count[bestIdx]!=0) {
dist[bestIdx] = (1-epsilon_l) + (epsilon_l/ate.nactions);
} else {
if (forceExplorationOfNonSampledActions) {
for(int j = 0;j<dist.length;j++)
if (ate.visit_count[j]>0) dist[j] = 0;
}
}
if (DEBUG>=3) {
System.out.print("[ ");
for(int i = 0;i<ate.nactions;i++) System.out.print("(" + ate.visit_count[i] + "," + ate.accum_evaluation[i]/ate.visit_count[i] + ")");
System.out.println("]");
System.out.print("[ ");
for (double v : dist) System.out.print(v + " ");
System.out.println("]");
}
notSampledYet.add(distributions.size());
distributions.add(dist);
}
// Select the best combination that results in a valid playeraction by epsilon-greedy sampling:
ResourceUsage base_ru = new ResourceUsage();
for(Unit u:gs.getUnits()) {
UnitAction ua = gs.getUnitAction(u);
if (ua!=null) {
ResourceUsage ru = ua.resourceUsage(u, gs.getPhysicalGameState());
base_ru.merge(ru);
}
}
pa2 = new PlayerAction();
actionCode = BigInteger.ZERO;
pa2.setResourceUsage(base_ru.clone());
while(!notSampledYet.isEmpty()) {
int i = notSampledYet.remove(r.nextInt(notSampledYet.size()));
try {
UnitActionTableEntry ate = unitActionTable.get(i);
int code;
UnitAction ua;
ResourceUsage r2;
// try one at random:
double []distribution = distributions.get(i);
code = Sampler.weighted(distribution);
ua = ate.actions.get(code);
r2 = ua.resourceUsage(ate.u, gs.getPhysicalGameState());
if (!pa2.getResourceUsage().consistentWith(r2, gs)) {
// sample at random, eliminating the ones that have not worked so far:
List<Double> dist_l = new ArrayList<>();
List<Integer> dist_outputs = new ArrayList<>();
for(int j = 0;j<distribution.length;j++) {
dist_l.add(distribution[j]);
dist_outputs.add(j);
}
do{
int idx = dist_outputs.indexOf(code);
dist_l.remove(idx);
dist_outputs.remove(idx);
code = (Integer)Sampler.weighted(dist_l, dist_outputs);
ua = ate.actions.get(code);
r2 = ua.resourceUsage(ate.u, gs.getPhysicalGameState());
}while(!pa2.getResourceUsage().consistentWith(r2, gs));
}
// DEBUG code:
if (gs.getUnit(ate.u.getID())==null) throw new Error("Issuing an action to an inexisting unit!!!");
pa2.getResourceUsage().merge(r2);
pa2.addUnitAction(ate.u, ua);
actionCode = actionCode.add(BigInteger.valueOf(code).multiply(multipliers[i]));
} catch(Exception e) {
e.printStackTrace();
}
}
NaiveMCTSNode pate = childrenMap.get(actionCode);
if (pate==null) {
actions.add(pa2);
GameState gs2 = gs.cloneIssue(pa2);
NaiveMCTSNode node = new NaiveMCTSNode(maxplayer, minplayer, gs2.clone(), this, evaluation_bound, a_creation_ID, forceExplorationOfNonSampledActions);
childrenMap.put(actionCode,node);
children.add(node);
return node;
}
return pate.selectLeaf(maxplayer, minplayer, epsilon_l, epsilon_g, epsilon_0, global_strategy, max_depth, a_creation_ID);
}
public UnitActionTableEntry getActionTableEntry(Unit u) {
for(UnitActionTableEntry e:unitActionTable) {
if (e.u == u) return e;
}
throw new Error("Could not find Action Table Entry!");
}
public void propagateEvaluation(double evaluation, NaiveMCTSNode child) {
accum_evaluation += evaluation;
visit_count++;
// if (child!=null) System.out.println(evaluation);
// update the unitAction table:
if (child != null) {
int idx = children.indexOf(child);
PlayerAction pa = actions.get(idx);
for (Pair<Unit, UnitAction> ua : pa.getActions()) {
UnitActionTableEntry actionTable = getActionTableEntry(ua.m_a);
idx = actionTable.actions.indexOf(ua.m_b);
if (idx==-1) {
System.out.println("Looking for action: " + ua.m_b);
System.out.println("Available actions are: " + actionTable.actions);
}
actionTable.accum_evaluation[idx] += evaluation;
actionTable.visit_count[idx]++;
}
}
if (parent != null) {
((NaiveMCTSNode)parent).propagateEvaluation(evaluation, this);
}
}
public void printUnitActionTable() {
for (UnitActionTableEntry uat : unitActionTable) {
System.out.println("Actions for unit " + uat.u);
for (int i = 0; i < uat.nactions; i++) {
System.out.println(" " + uat.actions.get(i) + " visited " + uat.visit_count[i] + " with average evaluation " + (uat.accum_evaluation[i] / uat.visit_count[i]));
}
}
}
}
| 15,747 | 40.994667 | 207 | java |
MicroRTS | MicroRTS-master/src/ai/mcts/naivemcts/TwoPhaseNaiveMCTS.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package ai.mcts.naivemcts;
import ai.*;
import ai.core.AI;
import ai.core.AIWithComputationBudget;
import ai.core.ParameterSpecification;
import ai.evaluation.EvaluationFunction;
import ai.evaluation.SimpleSqrtEvaluationFunction3;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import rts.GameState;
import rts.PlayerAction;
import rts.units.UnitTypeTable;
import ai.core.InterruptibleAI;
/**
*
* @author santi
*/
public class TwoPhaseNaiveMCTS extends AIWithComputationBudget implements InterruptibleAI {
public static int DEBUG = 0;
public EvaluationFunction ef;
Random r = new Random();
public AI randomAI = new RandomBiasedAI();
long max_actions_so_far = 0;
GameState gs_to_start_from;
NaiveMCTSNode tree;
int node_creation_ID = 0;
int n_phase1_iterations_left = -1; // this is set in the first cycle of execution for each action
int n_phase1_milliseconds_left = -1; // this is set in the first cycle of execution for each action
public int MAXSIMULATIONTIME = 1024;
public int MAX_TREE_DEPTH = 10;
int playerForThisComputation;
public float phase1_epsilon_l = 0.3f;
public float phase1_epsilon_g = 0.0f;
public float phase1_epsilon_0 = 1.0f;
public float phase2_epsilon_l = 0.3f;
public float phase2_epsilon_g = 0.0f;
public float phase2_epsilon_0 = 0.0f;
public float phase1_ratio = 0.5f;
public int phase1_global_strategy = NaiveMCTSNode.E_GREEDY;
public int phase2_global_strategy = NaiveMCTSNode.E_GREEDY;
boolean forceExplorationOfNonSampledActions = true;
// statistics:
public long total_runs = 0;
public long total_cycles_executed = 0;
public long total_actions_issued = 0;
public long total_time = 0;
public TwoPhaseNaiveMCTS(UnitTypeTable utt) {
this(100,-1,100,10,
0.3f, 0.0f, 1.0f,
0.3f, 0.0f, 0.0f,
0.5f,
new RandomBiasedAI(),
new SimpleSqrtEvaluationFunction3(), true);
}
public TwoPhaseNaiveMCTS(int available_time, int max_playouts, int lookahead, int max_depth,
float el1, float eg1, float e01,
float el2, float eg2, float e02,
float p1_ratio,
AI policy, EvaluationFunction a_ef,
boolean fensa) {
super(available_time, max_playouts);
MAXSIMULATIONTIME = lookahead;
randomAI = policy;
MAX_TREE_DEPTH = max_depth;
phase1_epsilon_l = el1;
phase1_epsilon_g = eg1;
phase1_epsilon_0 = e01;
phase2_epsilon_l = el2;
phase2_epsilon_g = eg2;
phase2_epsilon_0 = e02;
phase1_ratio = p1_ratio;
ef = a_ef;
forceExplorationOfNonSampledActions = fensa;
}
public TwoPhaseNaiveMCTS(int available_time, int max_playouts, int lookahead, int max_depth,
float el1, float eg1, float e01, int a_gs1,
float el2, float eg2, float e02, int a_gs2,
float p1_ratio,
AI policy, EvaluationFunction a_ef,
boolean fensa) {
super(available_time, max_playouts);
MAXSIMULATIONTIME = lookahead;
randomAI = policy;
MAX_TREE_DEPTH = max_depth;
phase1_epsilon_l = el1;
phase1_epsilon_g = eg1;
phase1_epsilon_0 = e01;
phase1_global_strategy = a_gs1;
phase2_epsilon_l = el2;
phase2_epsilon_g = eg2;
phase2_epsilon_0 = e02;
phase2_global_strategy = a_gs2;
phase1_ratio = p1_ratio;
ef = a_ef;
forceExplorationOfNonSampledActions = fensa;
}
public void reset() {
tree = null;
gs_to_start_from = null;
total_runs = 0;
total_cycles_executed = 0;
total_actions_issued = 0;
total_time = 0;
node_creation_ID = 0;
n_phase1_iterations_left = -1;
n_phase1_milliseconds_left = -1;
}
public AI clone() {
return new TwoPhaseNaiveMCTS(TIME_BUDGET, ITERATIONS_BUDGET, MAXSIMULATIONTIME, MAX_TREE_DEPTH,
phase1_epsilon_l, phase1_epsilon_g, phase1_epsilon_0,
phase2_epsilon_l, phase2_epsilon_g, phase2_epsilon_0,
phase1_ratio, randomAI, ef, forceExplorationOfNonSampledActions);
}
public final PlayerAction getAction(int player, GameState gs) throws Exception
{
if (gs.canExecuteAnyAction(player)) {
startNewComputation(player,gs.clone());
computeDuringOneGameFrame();
return getBestActionSoFar();
} else {
return new PlayerAction();
}
}
public void startNewComputation(int a_player, GameState gs) throws Exception {
playerForThisComputation = a_player;
node_creation_ID = 0;
tree = new NaiveMCTSNode(playerForThisComputation, 1-playerForThisComputation, gs, null, ef.upperBound(gs), node_creation_ID++, forceExplorationOfNonSampledActions);
max_actions_so_far = Math.max(tree.moveGenerator.getSize(),max_actions_so_far);
gs_to_start_from = gs;
n_phase1_iterations_left = -1;
n_phase1_milliseconds_left = -1;
if (ITERATIONS_BUDGET>0) n_phase1_iterations_left = (int)(phase1_ratio * ITERATIONS_BUDGET);
if (TIME_BUDGET>0) n_phase1_milliseconds_left = (int)(phase1_ratio * TIME_BUDGET);
}
public void resetSearch() {
if (DEBUG>=2) System.out.println("Resetting search...");
tree = null;
gs_to_start_from = null;
n_phase1_iterations_left = -1;
n_phase1_milliseconds_left = -1;
}
public void computeDuringOneGameFrame() throws Exception {
if (DEBUG>=2) System.out.println("Search...");
long start = System.currentTimeMillis();
long end = start;
long count = 0;
int n_phase1_milliseconds_left_initial = n_phase1_milliseconds_left;
while(true) {
if (n_phase1_milliseconds_left>0) n_phase1_milliseconds_left = n_phase1_milliseconds_left_initial - (int)(end - start);
if (!iteration(playerForThisComputation)) break;
count++;
end = System.currentTimeMillis();
if (TIME_BUDGET>=0 && (end - start)>=TIME_BUDGET) break;
if (ITERATIONS_BUDGET>=0 && count>=ITERATIONS_BUDGET) break;
}
if (n_phase1_milliseconds_left>0) n_phase1_milliseconds_left = n_phase1_milliseconds_left_initial - (int)(end - start);
// System.out.println("HL: " + count + " time: " + (System.currentTimeMillis() - start) + " (" + available_time + "," + max_playouts + ")");
total_time += (end - start);
total_cycles_executed++;
}
public boolean iteration(int player) throws Exception {
NaiveMCTSNode leaf;
// System.out.println(" " + n_phase1_iterations_left);
if (n_phase1_iterations_left>0 || n_phase1_milliseconds_left>0) {
leaf = tree.selectLeaf(player, 1-player, phase1_epsilon_l, phase1_epsilon_g, phase1_epsilon_0, phase1_global_strategy, MAX_TREE_DEPTH, node_creation_ID++);
n_phase1_iterations_left--;
} else {
leaf = tree.selectLeaf(player, 1-player, phase2_epsilon_l, phase2_epsilon_g, phase2_epsilon_0, phase2_global_strategy, MAX_TREE_DEPTH, node_creation_ID++);
}
if (leaf!=null) {
GameState gs2 = leaf.gs.clone();
simulate(gs2, gs2.getTime() + MAXSIMULATIONTIME);
int time = gs2.getTime() - gs_to_start_from.getTime();
double evaluation = ef.evaluate(player, 1-player, gs2)*Math.pow(0.99,time/10.0);
leaf.propagateEvaluation((float)evaluation,null);
total_runs++;
// System.out.println(total_runs + " - " + epsilon_0 + ", " + epsilon_l + ", " + epsilon_g);
} else {
// no actions to choose from :)
System.err.println(this.getClass().getSimpleName() + ": claims there are no more leafs to explore...");
return false;
}
return true;
}
public PlayerAction getBestActionSoFar() {
int idx = getMostVisitedActionIdx();
if (idx==-1) {
if (DEBUG>=1) System.out.println("TwoPhaseNaiveMCTS no children selected. Returning an empty asction");
return new PlayerAction();
}
if (DEBUG>=2) tree.showNode(0,1,ef);
if (DEBUG>=1) {
NaiveMCTSNode best = (NaiveMCTSNode) tree.children.get(idx);
System.out.println("TwoPhaseNaiveMCTS selected children " + tree.actions.get(idx) + " explored " + best.visit_count + " Avg evaluation: " + (best.accum_evaluation/((double)best.visit_count)));
}
return tree.actions.get(idx);
}
public int getMostVisitedActionIdx() {
total_actions_issued++;
int bestIdx = -1;
NaiveMCTSNode best = null;
if (DEBUG>=2) {
// for(Player p:gs_to_start_from.getPlayers()) {
// System.out.println("Resources P" + p.getID() + ": " + p.getResources());
// }
System.out.println("Number of playouts: " + tree.visit_count);
tree.printUnitActionTable();
}
for(int i = 0;i<tree.children.size();i++) {
NaiveMCTSNode child = (NaiveMCTSNode)tree.children.get(i);
if (DEBUG>=2) {
System.out.println("child " + tree.actions.get(i) + " explored " + child.visit_count + " Avg evaluation: " + (child.accum_evaluation/((double)child.visit_count)));
}
// if (best == null || (child.accum_evaluation/child.visit_count)>(best.accum_evaluation/best.visit_count)) {
if (best == null || child.visit_count>best.visit_count) {
best = child;
bestIdx = i;
}
}
return bestIdx;
}
public int getHighestEvaluationActionIdx() {
total_actions_issued++;
int bestIdx = -1;
NaiveMCTSNode best = null;
if (DEBUG>=2) {
// for(Player p:gs_to_start_from.getPlayers()) {
// System.out.println("Resources P" + p.getID() + ": " + p.getResources());
// }
System.out.println("Number of playouts: " + tree.visit_count);
tree.printUnitActionTable();
}
for(int i = 0;i<tree.children.size();i++) {
NaiveMCTSNode child = (NaiveMCTSNode)tree.children.get(i);
if (DEBUG>=2) {
System.out.println("child " + tree.actions.get(i) + " explored " + child.visit_count + " Avg evaluation: " + (child.accum_evaluation/((double)child.visit_count)));
}
// if (best == null || (child.accum_evaluation/child.visit_count)>(best.accum_evaluation/best.visit_count)) {
if (best == null || (child.accum_evaluation/((double)child.visit_count))>(best.accum_evaluation/((double)best.visit_count))) {
best = child;
bestIdx = i;
}
}
return bestIdx;
}
public void simulate(GameState gs, int time) throws Exception {
boolean gameover = false;
do{
if (gs.isComplete()) {
gameover = gs.cycle();
} else {
gs.issue(randomAI.getAction(0, gs));
gs.issue(randomAI.getAction(1, gs));
}
}while(!gameover && gs.getTime()<time);
}
public NaiveMCTSNode getTree() {
return tree;
}
public GameState getGameStateToStartFrom() {
return gs_to_start_from;
}
public String toString() {
return getClass().getSimpleName() + "(" + TIME_BUDGET + ", " + ITERATIONS_BUDGET + ", " + MAXSIMULATIONTIME + "," + MAX_TREE_DEPTH + "," +
phase1_epsilon_l + ", " + phase1_epsilon_g + ", " + phase1_epsilon_0 + ", " +
phase2_epsilon_l + ", " + phase2_epsilon_g + ", " + phase2_epsilon_0 + ", " +
phase1_ratio + ", " + randomAI + ", " + ef + ")";
}
public String statisticsString() {
return "Total runs: " + total_runs +
", runs per action: " + (total_runs/(float)total_actions_issued) +
", runs per cycle: " + (total_runs/(float)total_cycles_executed) +
", averate time per cycle: " + (total_time/(float)total_cycles_executed) +
", max branching factor: " + max_actions_so_far;
}
@Override
public List<ParameterSpecification> getParameters() {
List<ParameterSpecification> parameters = new ArrayList<>();
parameters.add(new ParameterSpecification("TimeBudget",int.class,100));
parameters.add(new ParameterSpecification("IterationsBudget",int.class,-1));
parameters.add(new ParameterSpecification("PlayoutLookahead",int.class,100));
parameters.add(new ParameterSpecification("MaxTreeDepth",int.class,10));
parameters.add(new ParameterSpecification("E1_l",float.class,0.3));
parameters.add(new ParameterSpecification("E1_g",float.class,0.0));
parameters.add(new ParameterSpecification("E1_0",float.class,1.0));
parameters.add(new ParameterSpecification("E2_l",float.class,0.3));
parameters.add(new ParameterSpecification("E2_g",float.class,0.0));
parameters.add(new ParameterSpecification("E2_0",float.class,0.0));
ParameterSpecification ps_ratio = new ParameterSpecification("Phase1_Ratio",float.class,0.5);
ps_ratio.setRange(0.0, 1.0);
parameters.add(ps_ratio);
parameters.add(new ParameterSpecification("DefaultPolicy",AI.class, randomAI));
parameters.add(new ParameterSpecification("EvaluationFunction", EvaluationFunction.class, new SimpleSqrtEvaluationFunction3()));
parameters.add(new ParameterSpecification("ForceExplorationOfNonSampledActions",boolean.class,true));
return parameters;
}
public int getPlayoutLookahead() {
return MAXSIMULATIONTIME;
}
public void setPlayoutLookahead(int a_pola) {
MAXSIMULATIONTIME = a_pola;
}
public int getMaxTreeDepth() {
return MAX_TREE_DEPTH;
}
public void setMaxTreeDepth(int a_mtd) {
MAX_TREE_DEPTH = a_mtd;
}
public float getE1_l() {
return phase1_epsilon_l;
}
public void setE1_l(float a_e1_l) {
phase1_epsilon_l = a_e1_l;
}
public float getE1_g() {
return phase1_epsilon_g;
}
public void setE1_g(float a_e1_g) {
phase1_epsilon_g = a_e1_g;
}
public float getE1_0() {
return phase1_epsilon_0;
}
public void setE1_0(float a_e1_0) {
phase1_epsilon_0 = a_e1_0;
}
public float getE2_l() {
return phase2_epsilon_l;
}
public void setE2_l(float a_e2_l) {
phase2_epsilon_l = a_e2_l;
}
public float getE2_g() {
return phase2_epsilon_g;
}
public void setE2_g(float a_e2_g) {
phase2_epsilon_g = a_e2_g;
}
public float getE2_0() {
return phase2_epsilon_0;
}
public void setE2_0(float a_e2_0) {
phase2_epsilon_0 = a_e2_0;
}
public float getPhase1_Ratio() {
return phase1_ratio;
}
public void setPhase1_Ratio(float a_p1r) {
phase1_ratio = a_p1r;
}
public AI getDefaultPolicy() {
return randomAI;
}
public void setDefaultPolicy(AI a_dp) {
randomAI = a_dp;
}
public EvaluationFunction getEvaluationFunction() {
return ef;
}
public void setEvaluationFunction(EvaluationFunction a_ef) {
ef = a_ef;
}
public boolean getForceExplorationOfNonSampledActions() {
return forceExplorationOfNonSampledActions;
}
public void setForceExplorationOfNonSampledActions(boolean fensa)
{
forceExplorationOfNonSampledActions = fensa;
}
}
| 16,930 | 33.765914 | 204 | java |
MicroRTS | MicroRTS-master/src/ai/mcts/naivemcts/TwoPhaseNaiveMCTSNode.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package ai.mcts.naivemcts;
import static ai.mcts.MCTSNode.r;
import static ai.mcts.naivemcts.NaiveMCTSNode.DEBUG;
import java.math.BigInteger;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.List;
import rts.*;
import rts.units.Unit;
import util.Sampler;
/**
*
* @author santi
*/
public class TwoPhaseNaiveMCTSNode extends NaiveMCTSNode {
public TwoPhaseNaiveMCTSNode(int maxplayer, int minplayer, GameState a_gs, NaiveMCTSNode a_parent, double a_evaluation_bound, int a_creation_ID, boolean fensa) throws Exception {
super(maxplayer, minplayer, a_gs, a_parent, a_evaluation_bound, a_creation_ID, fensa);
}
// Naive Sampling:
public TwoPhaseNaiveMCTSNode selectLeaf(int maxplayer, int minplayer, float el1, float eg1, float e01, int a_gs1,
float el2, float eg2, float e02, int a_gs2,
int phase1_budget,
int max_depth, int a_creation_ID) throws Exception {
if (unitActionTable == null) return this;
if (depth>=max_depth) return this;
float epsilon_0 = (visit_count<phase1_budget ? e01 : e02);
float epsilon_g = (visit_count<phase1_budget ? eg1 : eg2);
int global_strategy = (visit_count<phase1_budget ? a_gs1 : a_gs2);
if (children.size()>0 && r.nextFloat()>=epsilon_0) {
// sample from the global MAB:
TwoPhaseNaiveMCTSNode selected = null;
if (global_strategy==E_GREEDY) selected = (TwoPhaseNaiveMCTSNode)selectFromAlreadySampledEpsilonGreedy(epsilon_g);
else if (global_strategy==UCB1) selected = (TwoPhaseNaiveMCTSNode)selectFromAlreadySampledUCB1(C);
return selected.selectLeaf(maxplayer, minplayer, el1, eg1, e01, a_gs1, el2, eg2, e02, a_gs2, phase1_budget, max_depth, a_creation_ID);
} else {
// sample from the local MABs (this might recursively call "selectLeaf" internally):
return selectLeafUsingLocalMABs(maxplayer, minplayer, el1, eg1, e01, a_gs1, el2, eg2, e02, a_gs2, phase1_budget, max_depth, a_creation_ID);
}
}
public TwoPhaseNaiveMCTSNode selectLeafUsingLocalMABs(int maxplayer, int minplayer, float el1, float eg1, float e01, int a_gs1,
float el2, float eg2, float e02, int a_gs2,
int phase1_budget,
int max_depth, int a_creation_ID) throws Exception {
PlayerAction pa2;
BigInteger actionCode;
float epsilon_l = (visit_count<phase1_budget ? el1 : el2);
// For each unit, rank the unitActions according to preference:
List<double []> distributions = new LinkedList<>();
List<Integer> notSampledYet = new LinkedList<>();
for(UnitActionTableEntry ate:unitActionTable) {
double []dist = new double[ate.nactions];
int bestIdx = -1;
double bestEvaluation = 0;
int visits = 0;
for(int i = 0;i<ate.nactions;i++) {
if (type==0) {
// max node:
if (bestIdx==-1 ||
(visits!=0 && ate.visit_count[i]==0) ||
(visits!=0 && (ate.accum_evaluation[i]/ate.visit_count[i])>bestEvaluation)) {
bestIdx = i;
if (ate.visit_count[i]>0) bestEvaluation = (ate.accum_evaluation[i]/ate.visit_count[i]);
else bestEvaluation = 0;
visits = ate.visit_count[i];
}
} else {
// min node:
if (bestIdx==-1 ||
(visits!=0 && ate.visit_count[i]==0) ||
(visits!=0 && (ate.accum_evaluation[i]/ate.visit_count[i])<bestEvaluation)) {
bestIdx = i;
if (ate.visit_count[i]>0) bestEvaluation = (ate.accum_evaluation[i]/ate.visit_count[i]);
else bestEvaluation = 0;
visits = ate.visit_count[i];
}
}
dist[i] = epsilon_l/ate.nactions;
}
if (ate.visit_count[bestIdx]!=0) {
dist[bestIdx] = (1-epsilon_l) + (epsilon_l/ate.nactions);
} else {
for(int j = 0;j<dist.length;j++)
if (ate.visit_count[j]>0) dist[j] = 0;
}
if (DEBUG>=3) {
System.out.print("[ ");
for(int i = 0;i<ate.nactions;i++) System.out.print("(" + ate.visit_count[i] + "," + ate.accum_evaluation[i]/ate.visit_count[i] + ")");
System.out.println("]");
System.out.print("[ ");
for (double v : dist) System.out.print(v + " ");
System.out.println("]");
}
notSampledYet.add(distributions.size());
distributions.add(dist);
}
// Select the best combination that results in a valid playeraction by epsilon-greedy sampling:
ResourceUsage base_ru = new ResourceUsage();
for(Unit u:gs.getUnits()) {
UnitAction ua = gs.getUnitAction(u);
if (ua!=null) {
ResourceUsage ru = ua.resourceUsage(u, gs.getPhysicalGameState());
base_ru.merge(ru);
}
}
pa2 = new PlayerAction();
actionCode = BigInteger.ZERO;
pa2.setResourceUsage(base_ru.clone());
while(!notSampledYet.isEmpty()) {
int i = notSampledYet.remove(r.nextInt(notSampledYet.size()));
try {
UnitActionTableEntry ate = unitActionTable.get(i);
int code;
UnitAction ua;
ResourceUsage r2;
// try one at random:
double []distribution = distributions.get(i);
code = Sampler.weighted(distribution);
ua = ate.actions.get(code);
r2 = ua.resourceUsage(ate.u, gs.getPhysicalGameState());
if (!pa2.getResourceUsage().consistentWith(r2, gs)) {
// sample at random, eliminating the ones that have not worked so far:
List<Double> dist_l = new ArrayList<>();
List<Integer> dist_outputs = new ArrayList<>();
for(int j = 0;j<distribution.length;j++) {
dist_l.add(distribution[j]);
dist_outputs.add(j);
}
do{
int idx = dist_outputs.indexOf(code);
dist_l.remove(idx);
dist_outputs.remove(idx);
code = (Integer)Sampler.weighted(dist_l, dist_outputs);
ua = ate.actions.get(code);
r2 = ua.resourceUsage(ate.u, gs.getPhysicalGameState());
}while(!pa2.getResourceUsage().consistentWith(r2, gs));
}
// DEBUG code:
if (gs.getUnit(ate.u.getID())==null) throw new Error("Issuing an action to an inexisting unit!!!");
pa2.getResourceUsage().merge(r2);
pa2.addUnitAction(ate.u, ua);
actionCode = actionCode.add(BigInteger.valueOf(code).multiply(multipliers[i]));
} catch(Exception e) {
e.printStackTrace();
}
}
TwoPhaseNaiveMCTSNode pate = (TwoPhaseNaiveMCTSNode)childrenMap.get(actionCode);
if (pate==null) {
actions.add(pa2);
GameState gs2 = gs.cloneIssue(pa2);
TwoPhaseNaiveMCTSNode node = new TwoPhaseNaiveMCTSNode(maxplayer, minplayer, gs2.clone(), this, evaluation_bound, a_creation_ID, forceExplorationOfNonSampledActions);
childrenMap.put(actionCode,node);
children.add(node);
return node;
}
return pate.selectLeaf(maxplayer, minplayer, el1, eg1, e01, a_gs1, el2, eg2, e02, a_gs2, phase1_budget, max_depth, a_creation_ID);
}
} | 8,751 | 46.308108 | 182 | java |
MicroRTS | MicroRTS-master/src/ai/mcts/naivemcts/TwoPhaseNaiveMCTSPerNode.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package ai.mcts.naivemcts;
import ai.*;
import ai.core.AI;
import ai.core.AIWithComputationBudget;
import ai.core.ParameterSpecification;
import ai.evaluation.EvaluationFunction;
import ai.evaluation.SimpleSqrtEvaluationFunction3;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import rts.GameState;
import rts.PlayerAction;
import rts.units.UnitTypeTable;
import ai.core.InterruptibleAI;
/**
*
* @author santi
*/
public class TwoPhaseNaiveMCTSPerNode extends AIWithComputationBudget implements InterruptibleAI {
public static int DEBUG = 0;
public EvaluationFunction ef;
Random r = new Random();
public AI randomAI = new RandomBiasedAI();
long max_actions_so_far = 0;
GameState gs_to_start_from;
TwoPhaseNaiveMCTSNode tree;
int node_creation_ID = 0;
public int MAXSIMULATIONTIME = 1024;
public int MAX_TREE_DEPTH = 10;
int playerForThisComputation;
public float phase1_epsilon_l = 0.3f;
public float phase1_epsilon_g = 0.0f;
public float phase1_epsilon_0 = 1.0f;
public float phase2_epsilon_l = 0.3f;
public float phase2_epsilon_g = 0.0f;
public float phase2_epsilon_0 = 0.0f;
public int phase1_budget = 100;
public int phase1_global_strategy = NaiveMCTSNode.E_GREEDY;
public int phase2_global_strategy = NaiveMCTSNode.E_GREEDY;
boolean forceExplorationOfNonSampledActions = true;
// statistics:
public long total_runs = 0;
public long total_cycles_executed = 0;
public long total_actions_issued = 0;
public long total_time = 0;
public TwoPhaseNaiveMCTSPerNode(UnitTypeTable utt) {
this(100,-1,100,10,
0.3f, 0.0f, 1.0f,
0.3f, 0.0f, 0.0f,
100,
new RandomBiasedAI(),
new SimpleSqrtEvaluationFunction3(), true);
}
public TwoPhaseNaiveMCTSPerNode(int available_time, int max_playouts, int lookahead, int max_depth,
float el1, float eg1, float e01,
float el2, float eg2, float e02,
int p1_budget,
AI policy, EvaluationFunction a_ef,
boolean fensa) {
super(available_time, max_playouts);
MAXSIMULATIONTIME = lookahead;
randomAI = policy;
MAX_TREE_DEPTH = max_depth;
phase1_epsilon_l = el1;
phase1_epsilon_g = eg1;
phase1_epsilon_0 = e01;
phase2_epsilon_l = el2;
phase2_epsilon_g = eg2;
phase2_epsilon_0 = e02;
phase1_budget = p1_budget;
ef = a_ef;
forceExplorationOfNonSampledActions = fensa;
}
public TwoPhaseNaiveMCTSPerNode(int available_time, int max_playouts, int lookahead, int max_depth,
float el1, float eg1, float e01, int a_gs1,
float el2, float eg2, float e02, int a_gs2,
int p1_budget,
AI policy, EvaluationFunction a_ef,
boolean fensa) {
super(available_time, max_playouts);
MAXSIMULATIONTIME = lookahead;
randomAI = policy;
MAX_TREE_DEPTH = max_depth;
phase1_epsilon_l = el1;
phase1_epsilon_g = eg1;
phase1_epsilon_0 = e01;
phase1_global_strategy = a_gs1;
phase2_epsilon_l = el2;
phase2_epsilon_g = eg2;
phase2_epsilon_0 = e02;
phase2_global_strategy = a_gs2;
phase1_budget = p1_budget;
ef = a_ef;
forceExplorationOfNonSampledActions = fensa;
}
public void reset() {
tree = null;
gs_to_start_from = null;
total_runs = 0;
total_cycles_executed = 0;
total_actions_issued = 0;
total_time = 0;
node_creation_ID = 0;
}
public AI clone() {
return new TwoPhaseNaiveMCTSPerNode(TIME_BUDGET, ITERATIONS_BUDGET, MAXSIMULATIONTIME, MAX_TREE_DEPTH,
phase1_epsilon_l, phase1_epsilon_g, phase1_epsilon_0,
phase2_epsilon_l, phase2_epsilon_g, phase2_epsilon_0,
phase1_budget, randomAI, ef, forceExplorationOfNonSampledActions);
}
public final PlayerAction getAction(int player, GameState gs) throws Exception
{
if (gs.canExecuteAnyAction(player)) {
startNewComputation(player,gs.clone());
computeDuringOneGameFrame();
return getBestActionSoFar();
} else {
return new PlayerAction();
}
}
public void startNewComputation(int a_player, GameState gs) throws Exception {
playerForThisComputation = a_player;
node_creation_ID = 0;
tree = new TwoPhaseNaiveMCTSNode(playerForThisComputation, 1-playerForThisComputation, gs, null, ef.upperBound(gs), node_creation_ID++, forceExplorationOfNonSampledActions);
max_actions_so_far = Math.max(tree.moveGenerator.getSize(),max_actions_so_far);
gs_to_start_from = gs;
}
public void resetSearch() {
if (DEBUG>=2) System.out.println("Resetting search...");
tree = null;
gs_to_start_from = null;
}
public void computeDuringOneGameFrame() throws Exception {
if (DEBUG>=2) System.out.println("Search...");
long start = System.currentTimeMillis();
long end = start;
long count = 0;
while(true) {
if (!iteration(playerForThisComputation)) break;
count++;
end = System.currentTimeMillis();
if (TIME_BUDGET>=0 && (end - start)>=TIME_BUDGET) break;
if (ITERATIONS_BUDGET>=0 && count>=ITERATIONS_BUDGET) break;
}
// System.out.println("HL: " + count + " time: " + (System.currentTimeMillis() - start) + " (" + available_time + "," + max_playouts + ")");
total_time += (end - start);
total_cycles_executed++;
}
public boolean iteration(int player) throws Exception {
TwoPhaseNaiveMCTSNode leaf;
// System.out.println(" " + n_phase1_iterations_left);
leaf = tree.selectLeaf(player, 1-player, phase1_epsilon_l, phase1_epsilon_g, phase1_epsilon_0, phase1_global_strategy,
phase2_epsilon_l, phase2_epsilon_g, phase2_epsilon_0, phase2_global_strategy,
phase1_budget,
MAX_TREE_DEPTH, node_creation_ID++);
if (leaf!=null) {
GameState gs2 = leaf.gs.clone();
simulate(gs2, gs2.getTime() + MAXSIMULATIONTIME);
int time = gs2.getTime() - gs_to_start_from.getTime();
double evaluation = ef.evaluate(player, 1-player, gs2)*Math.pow(0.99,time/10.0);
leaf.propagateEvaluation((float)evaluation,null);
total_runs++;
// System.out.println(total_runs + " - " + epsilon_0 + ", " + epsilon_l + ", " + epsilon_g);
} else {
// no actions to choose from :)
System.err.println(this.getClass().getSimpleName() + ": claims there are no more leafs to explore...");
return false;
}
return true;
}
public PlayerAction getBestActionSoFar() {
int idx = getMostVisitedActionIdx();
if (idx==-1) {
if (DEBUG>=1) System.out.println("TwoPhaseNaiveMCTS no children selected. Returning an empty asction");
return new PlayerAction();
}
if (DEBUG>=2) tree.showNode(0,1,ef);
if (DEBUG>=1) {
NaiveMCTSNode best = (NaiveMCTSNode) tree.children.get(idx);
System.out.println("TwoPhaseNaiveMCTS selected children " + tree.actions.get(idx) + " explored " + best.visit_count + " Avg evaluation: " + (best.accum_evaluation/((double)best.visit_count)));
}
return tree.actions.get(idx);
}
public int getMostVisitedActionIdx() {
total_actions_issued++;
int bestIdx = -1;
NaiveMCTSNode best = null;
if (DEBUG>=2) {
// for(Player p:gs_to_start_from.getPlayers()) {
// System.out.println("Resources P" + p.getID() + ": " + p.getResources());
// }
System.out.println("Number of playouts: " + tree.visit_count);
tree.printUnitActionTable();
}
for(int i = 0;i<tree.children.size();i++) {
NaiveMCTSNode child = (NaiveMCTSNode)tree.children.get(i);
if (DEBUG>=2) {
System.out.println("child " + tree.actions.get(i) + " explored " + child.visit_count + " Avg evaluation: " + (child.accum_evaluation/((double)child.visit_count)));
}
// if (best == null || (child.accum_evaluation/child.visit_count)>(best.accum_evaluation/best.visit_count)) {
if (best == null || child.visit_count>best.visit_count) {
best = child;
bestIdx = i;
}
}
return bestIdx;
}
public int getHighestEvaluationActionIdx() {
total_actions_issued++;
int bestIdx = -1;
NaiveMCTSNode best = null;
if (DEBUG>=2) {
// for(Player p:gs_to_start_from.getPlayers()) {
// System.out.println("Resources P" + p.getID() + ": " + p.getResources());
// }
System.out.println("Number of playouts: " + tree.visit_count);
tree.printUnitActionTable();
}
for(int i = 0;i<tree.children.size();i++) {
NaiveMCTSNode child = (NaiveMCTSNode)tree.children.get(i);
if (DEBUG>=2) {
System.out.println("child " + tree.actions.get(i) + " explored " + child.visit_count + " Avg evaluation: " + (child.accum_evaluation/((double)child.visit_count)));
}
// if (best == null || (child.accum_evaluation/child.visit_count)>(best.accum_evaluation/best.visit_count)) {
if (best == null || (child.accum_evaluation/((double)child.visit_count))>(best.accum_evaluation/((double)best.visit_count))) {
best = child;
bestIdx = i;
}
}
return bestIdx;
}
public void simulate(GameState gs, int time) throws Exception {
boolean gameover = false;
do{
if (gs.isComplete()) {
gameover = gs.cycle();
} else {
gs.issue(randomAI.getAction(0, gs));
gs.issue(randomAI.getAction(1, gs));
}
}while(!gameover && gs.getTime()<time);
}
public NaiveMCTSNode getTree() {
return tree;
}
public GameState getGameStateToStartFrom() {
return gs_to_start_from;
}
public String toString() {
return getClass().getSimpleName() + "(" + TIME_BUDGET + ", " + ITERATIONS_BUDGET + ", " + MAXSIMULATIONTIME + "," + MAX_TREE_DEPTH + "," +
phase1_epsilon_l + ", " + phase1_epsilon_g + ", " + phase1_epsilon_0 + ", " +
phase2_epsilon_l + ", " + phase2_epsilon_g + ", " + phase2_epsilon_0 + ", " +
phase1_budget + ", " + randomAI + ", " + ef + ")";
}
public String statisticsString() {
return "Total runs: " + total_runs +
", runs per action: " + (total_runs/(float)total_actions_issued) +
", runs per cycle: " + (total_runs/(float)total_cycles_executed) +
", averate time per cycle: " + (total_time/(float)total_cycles_executed) +
", max branching factor: " + max_actions_so_far;
}
@Override
public List<ParameterSpecification> getParameters() {
List<ParameterSpecification> parameters = new ArrayList<>();
parameters.add(new ParameterSpecification("TimeBudget",int.class,100));
parameters.add(new ParameterSpecification("IterationsBudget",int.class,-1));
parameters.add(new ParameterSpecification("PlayoutLookahead",int.class,100));
parameters.add(new ParameterSpecification("MaxTreeDepth",int.class,10));
parameters.add(new ParameterSpecification("E1_l",float.class,0.3));
parameters.add(new ParameterSpecification("E1_g",float.class,0.0));
parameters.add(new ParameterSpecification("E1_0",float.class,1.0));
parameters.add(new ParameterSpecification("E2_l",float.class,0.3));
parameters.add(new ParameterSpecification("E2_g",float.class,0.0));
parameters.add(new ParameterSpecification("E2_0",float.class,0.0));
parameters.add(new ParameterSpecification("Phase1_Budget",int.class,100));
parameters.add(new ParameterSpecification("DefaultPolicy",AI.class, randomAI));
parameters.add(new ParameterSpecification("EvaluationFunction", EvaluationFunction.class, new SimpleSqrtEvaluationFunction3()));
parameters.add(new ParameterSpecification("ForceExplorationOfNonSampledActions",boolean.class,true));
return parameters;
}
public int getPlayoutLookahead() {
return MAXSIMULATIONTIME;
}
public void setPlayoutLookahead(int a_pola) {
MAXSIMULATIONTIME = a_pola;
}
public int getMaxTreeDepth() {
return MAX_TREE_DEPTH;
}
public void setMaxTreeDepth(int a_mtd) {
MAX_TREE_DEPTH = a_mtd;
}
public float getE1_l() {
return phase1_epsilon_l;
}
public void setE1_l(float a_e1_l) {
phase1_epsilon_l = a_e1_l;
}
public float getE1_g() {
return phase1_epsilon_g;
}
public void setE1_g(float a_e1_g) {
phase1_epsilon_g = a_e1_g;
}
public float getE1_0() {
return phase1_epsilon_0;
}
public void setE1_0(float a_e1_0) {
phase1_epsilon_0 = a_e1_0;
}
public float getE2_l() {
return phase2_epsilon_l;
}
public void setE2_l(float a_e2_l) {
phase2_epsilon_l = a_e2_l;
}
public float getE2_g() {
return phase2_epsilon_g;
}
public void setE2_g(float a_e2_g) {
phase2_epsilon_g = a_e2_g;
}
public float getE2_0() {
return phase2_epsilon_0;
}
public void setE2_0(float a_e2_0) {
phase2_epsilon_0 = a_e2_0;
}
public int getPhase1_Budget() {
return phase1_budget;
}
public void setPhase1_Budget(int a_p1b) {
phase1_budget = a_p1b;
}
public AI getDefaultPolicy() {
return randomAI;
}
public void setDefaultPolicy(AI a_dp) {
randomAI = a_dp;
}
public EvaluationFunction getEvaluationFunction() {
return ef;
}
public void setEvaluationFunction(EvaluationFunction a_ef) {
ef = a_ef;
}
public boolean getForceExplorationOfNonSampledActions() {
return forceExplorationOfNonSampledActions;
}
public void setForceExplorationOfNonSampledActions(boolean fensa)
{
forceExplorationOfNonSampledActions = fensa;
}
}
| 15,844 | 32.856838 | 204 | java |
MicroRTS | MicroRTS-master/src/ai/mcts/naivemcts/UnitActionTableEntry.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package ai.mcts.naivemcts;
import java.util.List;
import rts.UnitAction;
import rts.units.Unit;
/**
*
* @author santi
*/
public class UnitActionTableEntry {
public Unit u;
public int nactions = 0;
public List<UnitAction> actions;
public double[] accum_evaluation;
public int[] visit_count;
}
| 417 | 18 | 52 | java |
MicroRTS | MicroRTS-master/src/ai/mcts/uct/DownsamplingUCT.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package ai.mcts.uct;
import ai.core.AI;
import ai.RandomBiasedAI;
import ai.core.AIWithComputationBudget;
import ai.core.ParameterSpecification;
import ai.evaluation.EvaluationFunction;
import ai.evaluation.SimpleSqrtEvaluationFunction3;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import rts.GameState;
import rts.PlayerAction;
import rts.units.UnitTypeTable;
import ai.core.InterruptibleAI;
/**
*
* @author santi
*/
public class DownsamplingUCT extends AIWithComputationBudget implements InterruptibleAI {
public static final int DEBUG = 0;
EvaluationFunction ef;
Random r = new Random();
AI randomAI = new RandomBiasedAI();
long max_actions_so_far = 0;
GameState gs_to_start_from;
DownsamplingUCTNode tree;
// statistics:
public long total_runs = 0;
public long total_cycles_executed = 0;
public long total_actions_issued = 0;
long MAXACTIONS = 100;
int MAXSIMULATIONTIME = 1024;
int MAX_TREE_DEPTH = 10;
int playerForThisComputation;
public DownsamplingUCT(UnitTypeTable utt) {
this(100,-1,100,100,10,
new RandomBiasedAI(),
new SimpleSqrtEvaluationFunction3());
}
public DownsamplingUCT(int available_time, int max_playouts, int lookahead, long maxactions, int max_depth, AI policy, EvaluationFunction a_ef) {
super(available_time, max_playouts);
MAXACTIONS = maxactions;
MAXSIMULATIONTIME = lookahead;
randomAI = policy;
MAX_TREE_DEPTH = max_depth;
ef = a_ef;
}
public void printStats() {
if (total_cycles_executed>0 && total_actions_issued>0) {
System.out.println("Average runs per cycle: " + ((double)total_runs)/total_cycles_executed);
System.out.println("Average runs per action: " + ((double)total_runs)/total_actions_issued);
}
}
public void reset() {
gs_to_start_from = null;
tree = null;
}
public AI clone() {
return new DownsamplingUCT(TIME_BUDGET, ITERATIONS_BUDGET, MAXSIMULATIONTIME, MAXACTIONS, MAX_TREE_DEPTH, randomAI, ef);
}
public final PlayerAction getAction(int player, GameState gs) throws Exception
{
if (gs.canExecuteAnyAction(player)) {
startNewComputation(player,gs.clone());
computeDuringOneGameFrame();
return getBestActionSoFar();
} else {
return new PlayerAction();
}
}
public void startNewComputation(int a_player, GameState gs) throws Exception {
playerForThisComputation = a_player;
float evaluation_bound = ef.upperBound(gs);
tree = new DownsamplingUCTNode(playerForThisComputation, 1-playerForThisComputation, gs, null, MAXACTIONS, evaluation_bound);
gs_to_start_from = gs;
}
public void resetSearch() {
if (DEBUG>=2) System.out.println("Resetting search...");
tree = null;
gs_to_start_from = null;
}
public void computeDuringOneGameFrame() throws Exception {
if (DEBUG>=2) System.out.println("Search...");
long start = System.currentTimeMillis();
long cutOffTime = (TIME_BUDGET>0 ? start + TIME_BUDGET:0);
long end = start;
long count = 0;
while(true) {
DownsamplingUCTNode leaf = tree.UCTSelectLeaf(playerForThisComputation, 1-playerForThisComputation, MAXACTIONS, cutOffTime, MAX_TREE_DEPTH);
if (leaf!=null) {
GameState gs2 = leaf.gs.clone();
simulate(gs2, gs2.getTime() + MAXSIMULATIONTIME);
int time = gs2.getTime() - gs_to_start_from.getTime();
double evaluation = ef.evaluate(playerForThisComputation, 1-playerForThisComputation, gs2)*Math.pow(0.99,time/10.0);
while(leaf!=null) {
leaf.accum_evaluation += evaluation;
leaf.visit_count++;
leaf = leaf.parent;
}
total_runs++;
} else {
// no actions to choose from :)
System.err.println(this.getClass().getSimpleName() + ": claims there are no more leafs to explore...");
break;
}
count++;
end = System.currentTimeMillis();
if (TIME_BUDGET>=0 && (end - start)>=TIME_BUDGET) break;
if (ITERATIONS_BUDGET>=0 && count>=ITERATIONS_BUDGET) break;
}
total_cycles_executed++;
}
public PlayerAction getBestActionSoFar() {
total_actions_issued++;
int mostVisitedIdx = -1;
DownsamplingUCTNode mostVisited = null;
for(int i = 0;i<tree.children.size();i++) {
DownsamplingUCTNode child = tree.children.get(i);
if (mostVisited == null || child.visit_count>mostVisited.visit_count) {
mostVisited = child;
mostVisitedIdx = i;
}
}
if (mostVisitedIdx == -1) {
System.err.println("DownsamplingUCT.getBestActionSoFar: mostVisitedIdx == -1!!! tree.children.size() = " + tree.children.size());
return tree.moveGenerator.getRandom();
}
if (DEBUG>=2) tree.showNode(0,1);
if (DEBUG>=1) System.out.println(this.getClass().getSimpleName() + " selected children " + tree.actions.get(mostVisitedIdx) + " explored " + mostVisited.visit_count + " Avg evaluation: " + (mostVisited.accum_evaluation/((double)mostVisited.visit_count)));
// printStats();
return tree.actions.get(mostVisitedIdx);
}
public void simulate(GameState gs, int time) throws Exception {
boolean gameover = false;
do{
if (gs.isComplete()) {
gameover = gs.cycle();
} else {
gs.issue(randomAI.getAction(0, gs));
gs.issue(randomAI.getAction(1, gs));
}
}while(!gameover && gs.getTime()<time);
}
@Override
public String toString() {
return getClass().getSimpleName() + "(" + TIME_BUDGET + ", " + ITERATIONS_BUDGET + ", " + MAXSIMULATIONTIME + ", " + MAXACTIONS + ", " + MAX_TREE_DEPTH + ", " + randomAI + ", " + ef + ")";
}
@Override
public List<ParameterSpecification> getParameters() {
List<ParameterSpecification> parameters = new ArrayList<>();
parameters.add(new ParameterSpecification("TimeBudget",int.class,100));
parameters.add(new ParameterSpecification("IterationsBudget",int.class,-1));
parameters.add(new ParameterSpecification("PlayoutLookahead",int.class,100));
parameters.add(new ParameterSpecification("MaxActions",long.class,100));
parameters.add(new ParameterSpecification("MaxTreeDepth",int.class,10));
parameters.add(new ParameterSpecification("DefaultPolicy",AI.class, randomAI));
parameters.add(new ParameterSpecification("EvaluationFunction", EvaluationFunction.class, new SimpleSqrtEvaluationFunction3()));
return parameters;
}
public int getPlayoutLookahead() {
return MAXSIMULATIONTIME;
}
public void setPlayoutLookahead(int a_pola) {
MAXSIMULATIONTIME = a_pola;
}
public int getMaxTreeDepth() {
return MAX_TREE_DEPTH;
}
public void setMaxTreeDepth(int a_mtd) {
MAX_TREE_DEPTH = a_mtd;
}
public long getMaxActions() {
return MAXACTIONS;
}
public void setMaxActions(long a_ma) {
MAXACTIONS = a_ma;
}
public AI getDefaultPolicy() {
return randomAI;
}
public void setDefaultPolicy(AI a_dp) {
randomAI = a_dp;
}
public EvaluationFunction getEvaluationFunction() {
return ef;
}
public void setEvaluationFunction(EvaluationFunction a_ef) {
ef = a_ef;
}
}
| 8,330 | 30.79771 | 263 | java |
MicroRTS | MicroRTS-master/src/ai/mcts/uct/DownsamplingUCTNode.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package ai.mcts.uct;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import rts.GameState;
import rts.PlayerAction;
import rts.PlayerActionGenerator;
/**
*
* @author santi
*/
public class DownsamplingUCTNode {
public static int DEBUG = 0;
static Random r = new Random();
// static float C = 50; // this is the constant that regulates exploration vs exploitation, it must be tuned for each domain
// static float C = 5; // this is the constant that regulates exploration vs exploitation, it must be tuned for each domain
static float C = 0.05f; // this is the constant that regulates exploration vs exploitation, it must be tuned for each domain
public int type; // 0 : max, 1 : min, -1: Game-over
DownsamplingUCTNode parent;
public GameState gs;
int depth = 0; // the depth in the tree
boolean hasMoreActions = true;
PlayerActionGenerator moveGenerator;
public List<PlayerAction> actions;
public List<DownsamplingUCTNode> children;
float evaluation_bound = 0;
float accum_evaluation = 0;
int visit_count = 0;
public DownsamplingUCTNode(int maxplayer, int minplayer, GameState a_gs, DownsamplingUCTNode a_parent, long MAXACTIONS, float bound) throws Exception {
parent = a_parent;
gs = a_gs;
if (parent==null) depth = 0;
else depth = parent.depth+1;
evaluation_bound = bound;
while(gs.winner()==-1 &&
!gs.gameover() &&
!gs.canExecuteAnyAction(maxplayer) &&
!gs.canExecuteAnyAction(minplayer)) gs.cycle();
if (gs.winner()!=-1 || gs.gameover()) {
type = -1;
} else if (gs.canExecuteAnyAction(maxplayer)) {
type = 0;
moveGenerator = new PlayerActionGenerator(a_gs, maxplayer);
moveGenerator.randomizeOrder();
} else if (gs.canExecuteAnyAction(minplayer)) {
type = 1;
moveGenerator = new PlayerActionGenerator(a_gs, minplayer);
moveGenerator.randomizeOrder();
} else {
type = -1;
System.err.println("RTMCTSNode: This should not have happened...");
}
}
public DownsamplingUCTNode UCTSelectLeaf(int maxplayer, int minplayer, long MAXACTIONS, long cutOffTime, int max_depth) throws Exception {
// Cut the tree policy at a predefined depth
if (depth>=max_depth) return this;
// Downsample the number of actions:
if (moveGenerator!=null && actions==null) {
actions = new ArrayList<>();
children = new ArrayList<>();
if (moveGenerator.getSize()>2*MAXACTIONS) {
for(int i = 0;i<MAXACTIONS;i++) {
actions.add(moveGenerator.getRandom());
}
} else {
PlayerAction pa = null;
long count = 0;
do{
pa = moveGenerator.getNextAction(cutOffTime);
if (pa!=null) {
actions.add(pa);
count++;
if (count>=2*MAXACTIONS) break; // this is needed since some times, moveGenerator.size() overflows
}
}while(pa!=null);
while(actions.size()>MAXACTIONS) actions.remove(r.nextInt(actions.size()));
}
}
if (hasMoreActions) {
if (moveGenerator==null) return this;
if (children.size()>=actions.size()) {
hasMoreActions = false;
} else {
PlayerAction a = actions.get(children.size());
GameState gs2 = gs.cloneIssue(a);
DownsamplingUCTNode node = new DownsamplingUCTNode(maxplayer, minplayer, gs2.clone(), this, MAXACTIONS, evaluation_bound);
children.add(node);
return node;
}
}
// Bandit policy:
double best_score = 0;
DownsamplingUCTNode best = null;
for (DownsamplingUCTNode child : children) {
double exploitation = ((double) child.accum_evaluation) / child.visit_count;
double exploration = Math.sqrt(Math.log((double) visit_count) / child.visit_count);
if (type == 0) {
// max node:
exploitation = (exploitation + evaluation_bound) / (2 * evaluation_bound);
} else {
exploitation = -(exploitation - evaluation_bound) / (2 * evaluation_bound);
}
double tmp = C * exploitation + exploration;
if (best == null || tmp > best_score) {
best = child;
best_score = tmp;
}
}
if (best==null) return this;
return best.UCTSelectLeaf(maxplayer, minplayer, MAXACTIONS, cutOffTime, max_depth);
}
public void showNode(int depth, int maxdepth) {
if (children!=null) {
for(int i = 0;i<children.size();i++) {
DownsamplingUCTNode child = children.get(i);
for(int j = 0;j<depth;j++) System.out.print(" ");
System.out.println("child " + actions.get(i) + " explored " + child.visit_count + " Avg evaluation: " + (child.accum_evaluation/((double)child.visit_count)));
if (depth<maxdepth) child.showNode(depth+1,maxdepth);
}
}
}
}
| 5,682 | 39.304965 | 174 | java |
MicroRTS | MicroRTS-master/src/ai/mcts/uct/UCT.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package ai.mcts.uct;
import ai.core.AI;
import ai.RandomBiasedAI;
import ai.core.AIWithComputationBudget;
import ai.core.ParameterSpecification;
import ai.evaluation.EvaluationFunction;
import ai.evaluation.SimpleSqrtEvaluationFunction3;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import rts.GameState;
import rts.PlayerAction;
import rts.units.UnitTypeTable;
import ai.core.InterruptibleAI;
/**
*
* @author santi
*/
public class UCT extends AIWithComputationBudget implements InterruptibleAI {
public static int DEBUG = 0;
EvaluationFunction ef;
Random r = new Random();
AI randomAI = new RandomBiasedAI();
long max_actions_so_far = 0;
GameState gs_to_start_from;
public UCTNode tree;
// statistics:
public long total_runs = 0;
public long total_cycles_executed = 0;
public long total_actions_issued = 0;
long total_runs_this_move = 0;
int MAXSIMULATIONTIME = 1024;
int MAX_TREE_DEPTH = 10;
int playerForThisComputation;
public UCT(UnitTypeTable utt) {
this(100,-1,100,10,
new RandomBiasedAI(),
new SimpleSqrtEvaluationFunction3());
}
public UCT(int available_time, int max_playouts, int lookahead, int max_depth, AI policy, EvaluationFunction a_ef) {
super(available_time, max_playouts);
MAXSIMULATIONTIME = lookahead;
randomAI = policy;
MAX_TREE_DEPTH = max_depth;
ef = a_ef;
}
public String statisticsString() {
return "Average runs per cycle: " + ((double)total_runs)/total_cycles_executed +
", Average runs per action: " + ((double)total_runs)/total_actions_issued;
}
public void printStats() {
if (total_cycles_executed>0 && total_actions_issued>0) {
System.out.println("Average runs per cycle: " + ((double)total_runs)/total_cycles_executed);
System.out.println("Average runs per action: " + ((double)total_runs)/total_actions_issued);
}
}
public void reset() {
gs_to_start_from = null;
tree = null;
total_runs_this_move = 0;
}
public AI clone() {
return new UCT(TIME_BUDGET, ITERATIONS_BUDGET, MAXSIMULATIONTIME, MAX_TREE_DEPTH, randomAI, ef);
}
public PlayerAction getAction(int player, GameState gs) throws Exception
{
if (gs.canExecuteAnyAction(player)) {
startNewComputation(player,gs.clone());
computeDuringOneGameFrame();
return getBestActionSoFar();
} else {
return new PlayerAction();
}
}
public void startNewComputation(int a_player, GameState gs) throws Exception {
float evaluation_bound = ef.upperBound(gs);
playerForThisComputation = a_player;
tree = new UCTNode(playerForThisComputation, 1-playerForThisComputation, gs, null, evaluation_bound);
gs_to_start_from = gs;
total_runs_this_move = 0;
// System.out.println(evaluation_bound);
}
public void resetSearch() {
if (DEBUG>=2) System.out.println("Resetting search...");
tree = null;
gs_to_start_from = null;
total_runs_this_move = 0;
}
public void computeDuringOneGameFrame() throws Exception {
if (DEBUG>=2) System.out.println("Search...");
long start = System.currentTimeMillis();
int nPlayouts = 0;
long cutOffTime = start + TIME_BUDGET;
if (TIME_BUDGET<=0) cutOffTime = 0;
// System.out.println(start + " + " + available_time + " = " + cutOffTime);
while(true) {
if (cutOffTime>0 && System.currentTimeMillis() > cutOffTime) break;
if (ITERATIONS_BUDGET>0 && nPlayouts>ITERATIONS_BUDGET) break;
monteCarloRun(playerForThisComputation, cutOffTime);
nPlayouts++;
}
total_cycles_executed++;
}
public double monteCarloRun(int player, long cutOffTime) throws Exception {
UCTNode leaf = tree.UCTSelectLeaf(player, 1-player, cutOffTime, MAX_TREE_DEPTH);
if (leaf!=null) {
GameState gs2 = leaf.gs.clone();
simulate(gs2, gs2.getTime() + MAXSIMULATIONTIME);
int time = gs2.getTime() - gs_to_start_from.getTime();
double evaluation = ef.evaluate(player, 1-player, gs2)*Math.pow(0.99,time/10.0);
// System.out.println(evaluation_bound + " -> " + evaluation + " -> " + (evaluation+evaluation_bound)/(evaluation_bound*2));
while(leaf!=null) {
leaf.accum_evaluation += evaluation;
leaf.visit_count++;
leaf = leaf.parent;
}
total_runs++;
total_runs_this_move++;
return evaluation;
} else {
// no actions to choose from :)
System.err.println(this.getClass().getSimpleName() + ": claims there are no more leafs to explore...");
return 0;
}
}
public PlayerAction getBestActionSoFar() {
total_actions_issued++;
if (tree.children==null) {
if (DEBUG>=1) System.out.println(this.getClass().getSimpleName() + " no children selected. Returning an empty asction");
return new PlayerAction();
}
int mostVisitedIdx = -1;
UCTNode mostVisited = null;
for(int i = 0;i<tree.children.size();i++) {
UCTNode child = tree.children.get(i);
if (mostVisited == null || child.visit_count>mostVisited.visit_count ||
(child.visit_count==mostVisited.visit_count &&
child.accum_evaluation > mostVisited.accum_evaluation)) {
mostVisited = child;
mostVisitedIdx = i;
}
}
// tree.showNode(0,0);
if (DEBUG>=2) tree.showNode(0,1);
if (DEBUG>=1) System.out.println(this.getClass().getSimpleName() + " performed " + total_runs_this_move + " playouts.");
if (DEBUG>=1) System.out.println(this.getClass().getSimpleName() + " selected children " + tree.actions.get(mostVisitedIdx) + " explored " + mostVisited.visit_count + " Avg evaluation: " + (mostVisited.accum_evaluation/((double)mostVisited.visit_count)));
// printStats();
if (mostVisitedIdx==-1) return new PlayerAction();
return tree.actions.get(mostVisitedIdx);
}
// gets the best action, evaluates it for 'N' times using a simulation, and returns the average obtained value:
public float getBestActionEvaluation(GameState gs, int player, int N) throws Exception {
PlayerAction pa = getBestActionSoFar();
if (pa==null) return 0;
float accum = 0;
for(int i = 0;i<N;i++) {
GameState gs2 = gs.cloneIssue(pa);
GameState gs3 = gs2.clone();
simulate(gs3,gs3.getTime() + MAXSIMULATIONTIME);
int time = gs3.getTime() - gs2.getTime();
// Discount factor:
accum += (float)(ef.evaluate(player, 1-player, gs3)*Math.pow(0.99,time/10.0));
}
return accum/N;
}
public void simulate(GameState gs, int time) throws Exception {
boolean gameover = false;
do{
if (gs.isComplete()) {
gameover = gs.cycle();
} else {
gs.issue(randomAI.getAction(0, gs));
gs.issue(randomAI.getAction(1, gs));
}
}while(!gameover && gs.getTime()<time);
}
@Override
public String toString() {
return getClass().getSimpleName() + "(" + TIME_BUDGET + ", " + ITERATIONS_BUDGET + ", " + MAXSIMULATIONTIME + ", " + MAX_TREE_DEPTH + ", " + randomAI + ", " + ef + ")";
}
@Override
public List<ParameterSpecification> getParameters() {
List<ParameterSpecification> parameters = new ArrayList<>();
parameters.add(new ParameterSpecification("TimeBudget",int.class,100));
parameters.add(new ParameterSpecification("IterationsBudget",int.class,-1));
parameters.add(new ParameterSpecification("PlayoutLookahead",int.class,100));
parameters.add(new ParameterSpecification("MaxTreeDepth",int.class,10));
parameters.add(new ParameterSpecification("DefaultPolicy",AI.class, randomAI));
parameters.add(new ParameterSpecification("EvaluationFunction", EvaluationFunction.class, new SimpleSqrtEvaluationFunction3()));
return parameters;
}
public int getPlayoutLookahead() {
return MAXSIMULATIONTIME;
}
public void setPlayoutLookahead(int a_pola) {
MAXSIMULATIONTIME = a_pola;
}
public int getMaxTreeDepth() {
return MAX_TREE_DEPTH;
}
public void setMaxTreeDepth(int a_mtd) {
MAX_TREE_DEPTH = a_mtd;
}
public AI getDefaultPolicy() {
return randomAI;
}
public void setDefaultPolicy(AI a_dp) {
randomAI = a_dp;
}
public EvaluationFunction getEvaluationFunction() {
return ef;
}
public void setEvaluationFunction(EvaluationFunction a_ef) {
ef = a_ef;
}
}
| 9,579 | 31.147651 | 263 | java |
MicroRTS | MicroRTS-master/src/ai/mcts/uct/UCTFirstPlayUrgency.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package ai.mcts.uct;
import ai.core.AI;
import ai.RandomBiasedAI;
import ai.core.AIWithComputationBudget;
import ai.core.ParameterSpecification;
import ai.evaluation.EvaluationFunction;
import ai.evaluation.SimpleSqrtEvaluationFunction3;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import rts.GameState;
import rts.PlayerAction;
import rts.units.UnitTypeTable;
import ai.core.InterruptibleAI;
/**
*
* @author santi
*/
public class UCTFirstPlayUrgency extends AIWithComputationBudget implements InterruptibleAI {
public static int DEBUG = 0;
EvaluationFunction ef;
Random r = new Random();
AI randomAI = new RandomBiasedAI();
long max_actions_so_far = 0;
GameState gs_to_start_from;
public UCTNodeFirstPlayUrgency tree;
// statistics:
public long total_runs = 0;
public long total_cycles_executed = 0;
public long total_actions_issued = 0;
long total_runs_this_move = 0;
int MAXSIMULATIONTIME = 1024;
int MAX_TREE_DEPTH = 10;
int playerForThisComputation;
double FPUvalue = 0;
public UCTFirstPlayUrgency(UnitTypeTable utt) {
this(100,-1,100,10,
new RandomBiasedAI(),
new SimpleSqrtEvaluationFunction3(),
0.0);
}
public UCTFirstPlayUrgency(int available_time, int max_playouts, int lookahead, int max_depth, AI policy, EvaluationFunction a_ef, double a_FPUvalue) {
super(available_time, max_playouts);
MAXSIMULATIONTIME = lookahead;
randomAI = policy;
MAX_TREE_DEPTH = max_depth;
ef = a_ef;
FPUvalue = a_FPUvalue;
}
public void printStats() {
if (total_cycles_executed>0 && total_actions_issued>0) {
System.out.println("Average runs per cycle: " + ((double)total_runs)/total_cycles_executed);
System.out.println("Average runs per action: " + ((double)total_runs)/total_actions_issued);
}
}
public void reset() {
gs_to_start_from = null;
tree = null;
total_runs_this_move = 0;
}
public AI clone() {
return new UCTFirstPlayUrgency(TIME_BUDGET, ITERATIONS_BUDGET, MAXSIMULATIONTIME, MAX_TREE_DEPTH, randomAI, ef, FPUvalue);
}
public PlayerAction getAction(int player, GameState gs) throws Exception
{
if (gs.canExecuteAnyAction(player)) {
startNewComputation(player,gs.clone());
computeDuringOneGameFrame();
return getBestActionSoFar();
} else {
return new PlayerAction();
}
}
public void startNewComputation(int a_player, GameState gs) throws Exception {
playerForThisComputation = a_player;
float evaluation_bound = ef.upperBound(gs);
tree = new UCTNodeFirstPlayUrgency(playerForThisComputation, 1-playerForThisComputation, gs, null, evaluation_bound, FPUvalue);
gs_to_start_from = gs;
total_runs_this_move = 0;
// System.out.println(evaluation_bound);
}
public void resetSearch() {
if (DEBUG>=2) System.out.println("Resetting search...");
tree = null;
gs_to_start_from = null;
total_runs_this_move = 0;
}
public void computeDuringOneGameFrame() throws Exception {
if (DEBUG>=2) System.out.println("Search...");
long start = System.currentTimeMillis();
int nPlayouts = 0;
long cutOffTime = start + TIME_BUDGET;
if (TIME_BUDGET<=0) cutOffTime = 0;
// System.out.println(start + " + " + available_time + " = " + cutOffTime);
while(true) {
// System.out.println("time " + System.currentTimeMillis() + " - " + cutOffTime);
// System.out.println("playouts " + ITERATIONS_BUDGET + " - " + nPlayouts);
if (cutOffTime>0 && System.currentTimeMillis() >= cutOffTime) break;
if (ITERATIONS_BUDGET>0 && nPlayouts>=ITERATIONS_BUDGET) break;
monteCarloRun(playerForThisComputation, cutOffTime);
nPlayouts++;
}
total_cycles_executed++;
}
public double monteCarloRun(int player, long cutOffTime) throws Exception {
UCTNodeFirstPlayUrgency leaf = tree.UCTSelectLeaf(player, 1-player, cutOffTime, MAX_TREE_DEPTH);
// System.out.println(leaf);
if (leaf!=null) {
GameState gs2 = leaf.gs.clone();
simulate(gs2, gs2.getTime() + MAXSIMULATIONTIME);
int time = gs2.getTime() - gs_to_start_from.getTime();
double evaluation = ef.evaluate(player, 1-player, gs2)*Math.pow(0.99,time/10.0);
// System.out.println(evaluation_bound + " -> " + evaluation + " -> " + (evaluation+evaluation_bound)/(evaluation_bound*2));
while(leaf!=null) {
leaf.accum_evaluation += evaluation;
leaf.visit_count++;
leaf = leaf.parent;
}
total_runs++;
total_runs_this_move++;
return evaluation;
} else {
// no actions to choose from! (this can happen in partialy observable games, when we do not see any enemy unit)
// System.err.println(this.getClass().getSimpleName() + ": claims there are no more leafs to explore...");
return ef.evaluate(player, 1-player, gs_to_start_from);
}
}
public PlayerAction getBestActionSoFar() {
total_actions_issued++;
if (tree.children==null) {
if (DEBUG>=1) System.out.println(this.getClass().getSimpleName() + " no children selected. Returning an empty asction");
return new PlayerAction();
}
int mostVisitedIdx = -1;
UCTNodeFirstPlayUrgency mostVisited = null;
for(int i = 0;i<tree.children.size();i++) {
UCTNodeFirstPlayUrgency child = tree.children.get(i);
if (mostVisited == null || child.visit_count>mostVisited.visit_count) {
mostVisited = child;
mostVisitedIdx = i;
}
}
// tree.showNode(0,0);
if (DEBUG>=2) {
System.out.println("--- Tree: ----");
tree.showNode(0,1);
}
if (DEBUG>=1) System.out.println(this.getClass().getSimpleName() + " performed " + total_runs_this_move + " playouts.");
if (DEBUG>=1) System.out.println(this.getClass().getSimpleName() + " selected children " + tree.actions.get(mostVisitedIdx) + " explored " + mostVisited.visit_count + " Avg evaluation: " + (mostVisited.accum_evaluation/((double)mostVisited.visit_count)));
// printStats();
if (mostVisitedIdx==-1) return new PlayerAction();
return tree.actions.get(mostVisitedIdx);
}
// gets the best action, evaluates it for 'N' times using a simulation, and returns the average obtained value:
public float getBestActionEvaluation(GameState gs, int player, int N) throws Exception {
PlayerAction pa = getBestActionSoFar();
if (pa==null) return 0;
float accum = 0;
for(int i = 0;i<N;i++) {
GameState gs2 = gs.cloneIssue(pa);
GameState gs3 = gs2.clone();
simulate(gs3,gs3.getTime() + MAXSIMULATIONTIME);
int time = gs3.getTime() - gs2.getTime();
// Discount factor:
accum += (float)(ef.evaluate(player, 1-player, gs3)*Math.pow(0.99,time/10.0));
}
return accum/N;
}
public void simulate(GameState gs, int time) throws Exception {
boolean gameover = false;
do{
if (gs.isComplete()) {
gameover = gs.cycle();
} else {
gs.issue(randomAI.getAction(0, gs));
gs.issue(randomAI.getAction(1, gs));
}
}while(!gameover && gs.getTime()<time);
}
@Override
public String toString() {
return getClass().getSimpleName() + "(" + TIME_BUDGET + ", " + ITERATIONS_BUDGET + ", " + MAXSIMULATIONTIME + ", " + MAX_TREE_DEPTH + ", " + randomAI + ", " + ef + ", " + FPUvalue + ")";
}
@Override
public List<ParameterSpecification> getParameters() {
List<ParameterSpecification> parameters = new ArrayList<>();
parameters.add(new ParameterSpecification("TimeBudget", int.class, 100));
parameters.add(new ParameterSpecification("IterationsBudget", int.class, -1));
parameters.add(new ParameterSpecification("PlayoutLookahead", int.class, 100));
parameters.add(new ParameterSpecification("MaxTreeDepth", int.class, 10));
parameters.add(new ParameterSpecification("DefaultPolicy", AI.class, randomAI));
parameters.add(new ParameterSpecification("EvaluationFunction", EvaluationFunction.class, new SimpleSqrtEvaluationFunction3()));
parameters.add(new ParameterSpecification("FPU", double.class, 0.0));
return parameters;
}
public int getPlayoutLookahead() {
return MAXSIMULATIONTIME;
}
public void setPlayoutLookahead(int a_pola) {
MAXSIMULATIONTIME = a_pola;
}
public int getMaxTreeDepth() {
return MAX_TREE_DEPTH;
}
public void setMaxTreeDepth(int a_mtd) {
MAX_TREE_DEPTH = a_mtd;
}
public AI getDefaultPolicy() {
return randomAI;
}
public void setDefaultPolicy(AI a_dp) {
randomAI = a_dp;
}
public EvaluationFunction getEvaluationFunction() {
return ef;
}
public void setEvaluationFunction(EvaluationFunction a_ef) {
ef = a_ef;
}
public double getFPU() {
return FPUvalue;
}
public void setFPU(double a_fpu) {
FPUvalue = a_fpu;
}
}
| 10,146 | 31.41853 | 263 | java |
MicroRTS | MicroRTS-master/src/ai/mcts/uct/UCTNode.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package ai.mcts.uct;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import rts.GameState;
import rts.PlayerAction;
import rts.PlayerActionGenerator;
/**
*
* @author santi
*/
public class UCTNode {
static Random r = new Random();
public static float C = 0.05f; // this is the constant that regulates exploration vs exploitation, it must be tuned for each domain
// public static float C = 1; // this is the constant that regulates exploration vs exploitation, it must be tuned for each domain
public int type; // 0 : max, 1 : min, -1: Game-over
UCTNode parent;
public GameState gs;
int depth = 0; // the depth in the tree
boolean hasMoreActions = true;
PlayerActionGenerator moveGenerator;
public List<PlayerAction> actions;
public List<UCTNode> children;
float evaluation_bound = 0;
float accum_evaluation = 0;
int visit_count = 0;
public UCTNode(int maxplayer, int minplayer, GameState a_gs, UCTNode a_parent, float bound) throws Exception {
parent = a_parent;
gs = a_gs;
if (parent==null) depth = 0;
else depth = parent.depth+1;
evaluation_bound = bound;
while(gs.winner()==-1 &&
!gs.gameover() &&
!gs.canExecuteAnyAction(maxplayer) &&
!gs.canExecuteAnyAction(minplayer)) gs.cycle();
if (gs.winner()!=-1 || gs.gameover()) {
type = -1;
} else if (gs.canExecuteAnyAction(maxplayer)) {
type = 0;
// actions = gs.getPlayerActions(maxplayer);
moveGenerator = new PlayerActionGenerator(a_gs, maxplayer);
moveGenerator.randomizeOrder();
actions = new ArrayList<>();
children = new ArrayList<>();
} else if (gs.canExecuteAnyAction(minplayer)) {
type = 1;
// actions = gs.getPlayerActions(minplayer);
moveGenerator = new PlayerActionGenerator(a_gs, minplayer);
moveGenerator.randomizeOrder();
actions = new ArrayList<>();
children = new ArrayList<>();
} else {
type = -1;
System.err.println("RTMCTSNode: This should not have happened...");
}
}
public UCTNode UCTSelectLeaf(int maxplayer, int minplayer, long cutOffTime, int max_depth) throws Exception {
// Cut the tree policy at a predefined depth
if (depth>=max_depth) return this;
// if non visited children, visit:
if (hasMoreActions) {
if (moveGenerator==null) {
// System.out.println("No more leafs because moveGenerator = null!");
return this;
}
PlayerAction a = moveGenerator.getNextAction(cutOffTime);
if (a!=null) {
actions.add(a);
GameState gs2 = gs.cloneIssue(a);
UCTNode node = new UCTNode(maxplayer, minplayer, gs2.clone(), this, evaluation_bound);
children.add(node);
return node;
} else {
hasMoreActions = false;
}
}
// Bandit policy:
double best_score = 0;
UCTNode best = null;
for (UCTNode child : children) {
double tmp = childValue(child);
if (best==null || tmp>best_score) {
best = child;
best_score = tmp;
}
}
if (best==null) {
// System.out.println("No more leafs because this node has no children!");
// return null;
return this;
}
return best.UCTSelectLeaf(maxplayer, minplayer, cutOffTime, max_depth);
// return best;
}
public double childValue(UCTNode child) {
double exploitation = ((double)child.accum_evaluation) / child.visit_count;
double exploration = Math.sqrt(Math.log((double)visit_count)/child.visit_count);
if (type==0) {
// max node:
exploitation = (evaluation_bound + exploitation)/(2*evaluation_bound);
} else {
exploitation = (evaluation_bound - exploitation)/(2*evaluation_bound);
}
// System.out.println(exploitation + " + " + exploration);
double tmp = C*exploitation + exploration;
return tmp;
}
public void showNode(int depth, int maxdepth) {
int mostVisitedIdx = -1;
UCTNode mostVisited = null;
for(int i = 0;i<children.size();i++) {
UCTNode child = children.get(i);
for(int j = 0;j<depth;j++) System.out.print(" ");
System.out.println("child explored " + child.visit_count + " Avg evaluation: " + (child.accum_evaluation/((double)child.visit_count)) + " : " + actions.get(i));
if (depth<maxdepth) child.showNode(depth+1,maxdepth);
}
}
}
| 5,148 | 35.51773 | 172 | java |
MicroRTS | MicroRTS-master/src/ai/mcts/uct/UCTNodeFirstPlayUrgency.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package ai.mcts.uct;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Random;
import rts.GameState;
import rts.PlayerAction;
import rts.PlayerActionGenerator;
/**
*
* @author santi
*/
public class UCTNodeFirstPlayUrgency {
public static int DEBUG = 0;
static Random r = new Random();
public static float C = 0.05f; // this is the constant that regulates exploration vs exploitation, it must be tuned for each domain
// static float C = 1; // this is the constant that regulates exploration vs exploitation, it must be tuned for each domain
public int type; // 0 : max, 1 : min, -1: Game-over
UCTNodeFirstPlayUrgency parent;
public GameState gs;
int depth = 0; // the depth in the tree
boolean hasMoreActions = true;
PlayerActionGenerator moveGenerator;
public List<PlayerAction> actions;
HashMap<Long,UCTNodeFirstPlayUrgency> childrenMap = new LinkedHashMap<>(); // associates action codes with children
public List<UCTNodeFirstPlayUrgency> children;
float evaluation_bound = 0;
float accum_evaluation = 0;
int visit_count = 0;
double FPUvalue = 0;
public UCTNodeFirstPlayUrgency(int maxplayer, int minplayer, GameState a_gs, UCTNodeFirstPlayUrgency a_parent, float bound, double a_FPUValue) throws Exception {
parent = a_parent;
gs = a_gs;
if (parent==null) depth = 0;
else depth = parent.depth+1;
evaluation_bound = bound;
FPUvalue = a_FPUValue;
while(gs.winner()==-1 &&
!gs.gameover() &&
!gs.canExecuteAnyAction(maxplayer) &&
!gs.canExecuteAnyAction(minplayer)) gs.cycle();
if (gs.winner()!=-1 || gs.gameover()) {
type = -1;
} else if (gs.canExecuteAnyAction(maxplayer)) {
type = 0;
// actions = gs.getPlayerActions(maxplayer);
moveGenerator = new PlayerActionGenerator(a_gs, maxplayer);
moveGenerator.randomizeOrder();
actions = new ArrayList<>();
children = new ArrayList<>();
} else if (gs.canExecuteAnyAction(minplayer)) {
type = 1;
// actions = gs.getPlayerActions(minplayer);
moveGenerator = new PlayerActionGenerator(a_gs, minplayer);
moveGenerator.randomizeOrder();
actions = new ArrayList<>();
children = new ArrayList<>();
} else {
type = -1;
System.err.println("RTMCTSNode: This should not have happened...");
}
}
public UCTNodeFirstPlayUrgency UCTSelectLeaf(int maxplayer, int minplayer, long cutOffTime, int max_depth) throws Exception {
// Cut the tree policy at a predefined depth
if (depth>=max_depth) return this;
if (children==null) return null;
// Bandit policy:
double best_score = 0;
UCTNodeFirstPlayUrgency best = null;
if (DEBUG>=1) System.out.println("UCTNodeFirstPlayUrgency.UCTSelectLeaf:");
for (UCTNodeFirstPlayUrgency child : children) {
double tmp = childValue(child);
if (DEBUG >= 1) System.out.println(" " + tmp);
if (best == null || tmp > best_score) {
best = child;
best_score = tmp;
}
}
// First Play Urgency:
if (best!=null && best_score>FPUvalue) return best.UCTSelectLeaf(maxplayer, minplayer, cutOffTime, max_depth);
// if none of the already visited children have an urgency above the threshold,
// choose one at random:
// TODO: here I should try not to repeat previously selected nodes. But this should work for now
if (moveGenerator!=null) {
PlayerAction a = moveGenerator.getRandom();
long index = moveGenerator.getActionIndex(a);
int attemptsLeft = 50;
while(childrenMap.containsKey(index) && attemptsLeft>0) {
a = moveGenerator.getRandom();
index = moveGenerator.getActionIndex(a);
attemptsLeft--;
}
if (attemptsLeft>0) {
actions.add(a);
GameState gs2 = gs.cloneIssue(a);
UCTNodeFirstPlayUrgency node = new UCTNodeFirstPlayUrgency(maxplayer, minplayer, gs2.clone(), this, evaluation_bound, FPUvalue);
children.add(node);
childrenMap.put(index, node);
return node;
}
}
if (best==null) return this;
return best.UCTSelectLeaf(maxplayer, minplayer, cutOffTime, max_depth);
}
public double childValue(UCTNodeFirstPlayUrgency child) {
double exploitation = ((double)child.accum_evaluation) / child.visit_count;
double exploration = Math.sqrt(Math.log((double)visit_count)/child.visit_count);
if (type==0) {
// max node:
exploitation = (exploitation + evaluation_bound)/(2*evaluation_bound);
} else {
exploitation = - (exploitation - evaluation_bound)/(2*evaluation_bound);
}
//System.out.println(" " + exploitation + " + " + exploration);
double tmp = exploitation + C*exploration;
return tmp;
}
public void showNode(int depth, int maxdepth) {
int mostVisitedIdx = -1;
UCTNodeFirstPlayUrgency mostVisited = null;
for(int i = 0;i<children.size();i++) {
UCTNodeFirstPlayUrgency child = children.get(i);
for(int j = 0;j<depth;j++) System.out.print(" ");
System.out.println("child explored " + child.visit_count + " Avg evaluation: " + (child.accum_evaluation/((double)child.visit_count)) + " : " + actions.get(i));
if (depth<maxdepth) child.showNode(depth+1,maxdepth);
}
}
}
| 6,145 | 38.909091 | 172 | java |
MicroRTS | MicroRTS-master/src/ai/mcts/uct/UCTUnitActions.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package ai.mcts.uct;
import ai.core.AI;
import ai.RandomBiasedAI;
import ai.core.AIWithComputationBudget;
import ai.core.ParameterSpecification;
import ai.evaluation.EvaluationFunction;
import ai.evaluation.SimpleSqrtEvaluationFunction3;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import rts.GameState;
import rts.PlayerAction;
import rts.units.UnitTypeTable;
import ai.core.InterruptibleAI;
/**
*
* @author santi
*/
public class UCTUnitActions extends AIWithComputationBudget implements InterruptibleAI {
public static final int DEBUG = 0;
EvaluationFunction ef;
Random r = new Random();
AI randomAI = new RandomBiasedAI();
long max_actions_so_far = 0;
GameState gs_to_start_from;
UCTUnitActionsNode tree;
int MAX_TREE_DEPTH = 10;
// statistics:
public long total_runs = 0;
public long total_cycles_executed = 0;
public long total_actions_issued = 0;
int MAXSIMULATIONTIME = 1024;
int playerForThisComputation;
public UCTUnitActions(UnitTypeTable utt) {
this(100,-1,100,10,
new RandomBiasedAI(),
new SimpleSqrtEvaluationFunction3());
}
public UCTUnitActions(int available_time, int available_playouts, int lookahead, int max_depth, AI policy, EvaluationFunction a_ef) {
super(available_time, available_playouts);
MAXSIMULATIONTIME = lookahead;
randomAI = policy;
MAX_TREE_DEPTH = max_depth;
ef = a_ef;
}
public void printStats() {
if (total_cycles_executed>0 && total_actions_issued>0) {
System.out.println("Average runs per cycle: " + ((double)total_runs)/total_cycles_executed);
System.out.println("Average runs per action: " + ((double)total_runs)/total_actions_issued);
}
}
public void reset() {
gs_to_start_from = null;
tree = null;
}
public AI clone() {
return new UCTUnitActions(TIME_BUDGET, ITERATIONS_BUDGET, MAXSIMULATIONTIME, MAX_TREE_DEPTH, randomAI, ef);
}
public PlayerAction getAction(int player, GameState gs) throws Exception
{
if (gs.canExecuteAnyAction(player)) {
startNewComputation(player,gs.clone());
computeDuringOneGameFrame();
return getBestActionSoFar();
} else {
return new PlayerAction();
}
}
public void startNewComputation(int a_player, GameState gs) {
playerForThisComputation = a_player;
float evaluation_bound = ef.upperBound(gs);
tree = new UCTUnitActionsNode(playerForThisComputation, 1-playerForThisComputation, gs, null, evaluation_bound);
gs_to_start_from = gs;
// System.out.println(evaluation_bound);
}
public void resetSearch() {
if (DEBUG>=2) System.out.println("Resetting search...");
tree = null;
gs_to_start_from = null;
}
public void computeDuringOneGameFrame() throws Exception {
if (DEBUG>=2) System.out.println("Search...");
long start = System.currentTimeMillis();
// long cutOffTime = (TIME_BUDGET>0 ? start + TIME_BUDGET:0);
long end = start;
long count = 0;
while(true) {
UCTUnitActionsNode leaf = tree.UCTSelectLeaf(playerForThisComputation, 1-playerForThisComputation, MAX_TREE_DEPTH);
if (leaf!=null) {
GameState gs2 = leaf.gs.clone();
simulate(gs2, gs2.getTime() + MAXSIMULATIONTIME);
int time = gs2.getTime() - gs_to_start_from.getTime();
double evaluation = ef.evaluate(playerForThisComputation, 1-playerForThisComputation, gs2)*Math.pow(0.99,time/10.0);
// System.out.println(evaluation_bound + " -> " + evaluation + " -> " + (evaluation+evaluation_bound)/(evaluation_bound*2));
while(leaf!=null) {
leaf.accum_evaluation += evaluation;
leaf.visit_count++;
leaf = leaf.parent;
}
total_runs++;
} else {
// no actions to choose from :)
System.err.println(this.getClass().getSimpleName() + ": claims there are no more leafs to explore...");
break;
}
count++;
end = System.currentTimeMillis();
if (TIME_BUDGET>=0 && (end - start)>=TIME_BUDGET) break;
if (ITERATIONS_BUDGET>=0 && count>=ITERATIONS_BUDGET) break;
}
total_cycles_executed++;
}
public PlayerAction getBestActionSoFar() {
if (tree.children==null) {
if (DEBUG>=1) System.out.println(this.getClass().getSimpleName() + " no children selected. Returning an empty asction");
return new PlayerAction();
}
return getMostVisited(tree, gs_to_start_from.getTime());
}
public PlayerAction getMostVisited(UCTUnitActionsNode current, int time) {
if (current.type!=0 || current.gs.getTime()!=time) return null;
int mostVisitedIdx = -1;
UCTUnitActionsNode mostVisited = null;
for(int i = 0;i<current.children.size();i++) {
UCTUnitActionsNode child = current.children.get(i);
if (mostVisited == null || child.visit_count>mostVisited.visit_count) {
mostVisited = child;
mostVisitedIdx = i;
}
// System.out.println(child.visit_count);
}
if (mostVisitedIdx==-1) return null;
PlayerAction mostVisitedAction = current.actions.get(mostVisitedIdx);
PlayerAction restOfAction = getMostVisited(mostVisited, time);
if (restOfAction!=null) mostVisitedAction = mostVisitedAction.merge(restOfAction);
return mostVisitedAction;
}
public void simulate(GameState gs, int time) throws Exception {
boolean gameover = false;
do{
if (gs.isComplete()) {
gameover = gs.cycle();
} else {
gs.issue(randomAI.getAction(0, gs));
gs.issue(randomAI.getAction(1, gs));
}
}while(!gameover && gs.getTime()<time);
}
@Override
public String toString() {
return getClass().getSimpleName() + "(" + TIME_BUDGET + ", " + ITERATIONS_BUDGET + ", " + MAXSIMULATIONTIME + ", " + MAX_TREE_DEPTH + ", " + randomAI + ", " + ef + ")";
}
@Override
public List<ParameterSpecification> getParameters() {
List<ParameterSpecification> parameters = new ArrayList<>();
parameters.add(new ParameterSpecification("TimeBudget",int.class,100));
parameters.add(new ParameterSpecification("IterationsBudget",int.class,-1));
parameters.add(new ParameterSpecification("PlayoutLookahead",int.class,100));
parameters.add(new ParameterSpecification("MaxTreeDepth",int.class,10));
parameters.add(new ParameterSpecification("DefaultPolicy",AI.class, randomAI));
parameters.add(new ParameterSpecification("EvaluationFunction", EvaluationFunction.class, new SimpleSqrtEvaluationFunction3()));
return parameters;
}
public int getPlayoutLookahead() {
return MAXSIMULATIONTIME;
}
public void setPlayoutLookahead(int a_pola) {
MAXSIMULATIONTIME = a_pola;
}
public int getMaxTreeDepth() {
return MAX_TREE_DEPTH;
}
public void setMaxTreeDepth(int a_mtd) {
MAX_TREE_DEPTH = a_mtd;
}
public AI getDefaultPolicy() {
return randomAI;
}
public void setDefaultPolicy(AI a_dp) {
randomAI = a_dp;
}
public EvaluationFunction getEvaluationFunction() {
return ef;
}
public void setEvaluationFunction(EvaluationFunction a_ef) {
ef = a_ef;
}
}
| 8,322 | 30.888889 | 176 | java |
MicroRTS | MicroRTS-master/src/ai/mcts/uct/UCTUnitActionsNode.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package ai.mcts.uct;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import rts.*;
import rts.units.Unit;
/**
*
* @author santi
*/
public class UCTUnitActionsNode {
static Random r = new Random();
// static float C = 50; // this is the constant that regulates exploration vs exploitation, it must be tuned for each domain
// static float C = 5; // this is the constant that regulates exploration vs exploitation, it must be tuned for each domain
static float C = 0.05f; // this is the constant that regulates exploration vs exploitation, it must be tuned for each domain
public int type; // 0 : max, 1 : min, -1: Game-over
UCTUnitActionsNode parent;
public GameState gs;
int depth = 0;
public List<PlayerAction> actions;
public List<UCTUnitActionsNode> children;
float evaluation_bound = 0;
float accum_evaluation = 0;
int visit_count = 0;
public UCTUnitActionsNode(int maxplayer, int minplayer, GameState a_gs, UCTUnitActionsNode a_parent, float bound) {
parent = a_parent;
if (parent==null) depth = 0;
else depth = parent.depth+1;
gs = a_gs;
evaluation_bound = bound;
PhysicalGameState pgs = a_gs.getPhysicalGameState();
while(gs.winner()==-1 &&
!gs.gameover() &&
!gs.canExecuteAnyAction(maxplayer) &&
!gs.canExecuteAnyAction(minplayer)) gs.cycle();
if (gs.winner()!=-1 || gs.gameover()) {
type = -1;
} else if (gs.canExecuteAnyAction(maxplayer)) {
type = 0;
actions = null;
for(Unit u:pgs.getUnits()) {
if (u.getPlayer()==maxplayer) {
// if (a_gs.getTime()==1) {
// System.out.println(u + " -> " + a_gs.getActionAssignment(u));
// }
if (a_gs.getActionAssignment(u)==null) {
// System.out.println(u);;
actions = a_gs.getPlayerActionsSingleUnit(u);
break;
}
}
}
if (actions==null) System.err.println("UCTUnitActionNode: error when generating maxplayer node!");
children = new ArrayList<>();
} else if (gs.canExecuteAnyAction(minplayer)) {
type = 1;
actions = null;
for(Unit u:pgs.getUnits()) {
if (u.getPlayer()==minplayer) {
if (a_gs.getActionAssignment(u)==null) {
actions = a_gs.getPlayerActionsSingleUnit(u);
break;
}
}
}
if (actions==null) System.err.println("UCTUnitActionNode: error when generating minplayer node!");
children = new ArrayList<>();
} else {
type = -1;
System.err.println("RTMCTSNode: This should not have happened...");
}
}
public UCTUnitActionsNode UCTSelectLeaf(int maxplayer, int minplayer, int max_depth) {
// Cut the tree policy at a predefined depth
if (depth>=max_depth) return this;
// if non visited children, visit:
if (children==null || actions==null) return this;
if (children.size()<actions.size()) {
PlayerAction a = actions.get(children.size());
if (a!=null) {
GameState gs2 = gs.cloneIssue(a);
UCTUnitActionsNode node = new UCTUnitActionsNode(maxplayer, minplayer, gs2.clone(), this, evaluation_bound);
children.add(node);
return node;
}
}
// Bandit policy:
double best_score = 0;
UCTUnitActionsNode best = null;
for (UCTUnitActionsNode child : children) {
double exploitation = ((double) child.accum_evaluation) / child.visit_count;
double exploration = Math.sqrt(Math.log(((double) visit_count) / child.visit_count));
if (type == 0) {
// max node:
exploitation = (exploitation + evaluation_bound) / (2 * evaluation_bound);
} else {
exploitation = -(exploitation - evaluation_bound) / (2 * evaluation_bound);
}
// System.out.println(exploitation + " + " + exploration);
double tmp = C * exploitation + exploration;
if (best == null || tmp > best_score) {
best = child;
best_score = tmp;
}
}
if (best==null) return this;
return best.UCTSelectLeaf(maxplayer, minplayer, max_depth);
}
public void showNode(int depth, int maxdepth) {
int mostVisitedIdx = -1;
UCTUnitActionsNode mostVisited = null;
for(int i = 0;i<children.size();i++) {
UCTUnitActionsNode child = children.get(i);
for(int j = 0;j<depth;j++) System.out.print(" ");
System.out.println("child " + actions.get(i) + " explored " + child.visit_count + " Avg evaluation: " + (child.accum_evaluation/((double)child.visit_count)));
if (depth<maxdepth) child.showNode(depth+1,maxdepth);
}
}
}
| 5,488 | 39.065693 | 170 | java |
MicroRTS | MicroRTS-master/src/ai/minimax/MiniMaxResult.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package ai.minimax;
import rts.GameState;
import rts.PlayerAction;
/**
*
* @author santi
*/
public class MiniMaxResult {
public PlayerAction action;
public float evaluation;
public GameState gs;
public MiniMaxResult(PlayerAction a, float e, GameState a_gs) {
action = a;
evaluation = e;
gs = a_gs;
}
public String toString() {
return evaluation + " : " + action;
}
}
| 542 | 17.724138 | 67 | java |
MicroRTS | MicroRTS-master/src/ai/minimax/ABCD/ABCD.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package ai.minimax.ABCD;
import ai.abstraction.WorkerRush;
import ai.abstraction.pathfinding.AStarPathFinding;
import ai.core.AI;
import ai.core.ParameterSpecification;
import ai.evaluation.EvaluationFunction;
import ai.evaluation.SimpleSqrtEvaluationFunction3;
import ai.minimax.MiniMaxResult;
import java.util.ArrayList;
import java.util.List;
import rts.GameState;
import rts.PlayerAction;
import rts.PlayerActionGenerator;
import rts.units.UnitTypeTable;
/**
*
* @author santi:
*
* - This is the ABCD (Alpha-Beta considering durations)
* algorithm presented by Churchill and Buro at AIIDE 2012
* - In particular, this version uses the "alt" tree alteration technique to improve the
* estimation of the alphabeta values when there are simultaneous moves.
*/
public class ABCD extends AI {
public static int DEBUG = 0;
// reset at each execution of minimax:
int nLeaves = 0;
int nNodes = 0;
int max_depth_so_far = 0;
long max_branching_so_far = 0;
long max_leaves_so_far = 0;
long max_nodes_so_far = 0;
int MAXDEPTH = 4;
AI playoutAI;
int maxPlayoutTime = 100;
EvaluationFunction ef;
protected int defaultNONEduration = 8;
public ABCD(UnitTypeTable utt) {
this(4,
new WorkerRush(utt, new AStarPathFinding()), 100,
new SimpleSqrtEvaluationFunction3());
}
public ABCD(int md, AI a_playoutAI, int a_maxPlayoutTime, EvaluationFunction a_ef) {
MAXDEPTH = md;
playoutAI = a_playoutAI;
maxPlayoutTime = a_maxPlayoutTime;
ef = a_ef;
}
public void reset() {
max_depth_so_far = 0;
max_branching_so_far = 0;
max_leaves_so_far = 0;
max_nodes_so_far = 0;
}
public AI clone() {
return new ABCD(MAXDEPTH, playoutAI, maxPlayoutTime, ef);
}
public PlayerAction getAction(int player, GameState gs) throws Exception {
if (gs.canExecuteAnyAction(player) && gs.winner()==-1) {
PlayerAction pa = ABCD(player, gs, MAXDEPTH);
pa.fillWithNones(gs, player, defaultNONEduration);
return pa;
} else {
return new PlayerAction();
}
}
public PlayerAction ABCD(int player, GameState gs, int depthLeft) throws Exception {
long start = System.currentTimeMillis();
float alpha = -EvaluationFunction.VICTORY;
float beta = EvaluationFunction.VICTORY;
int maxplayer = player;
int minplayer = 1 - player;
if (DEBUG>=1) System.out.println("Starting ABCD... " + player);
if (nLeaves>max_leaves_so_far) max_leaves_so_far = nLeaves;
if (nNodes>max_nodes_so_far) max_nodes_so_far = nNodes;
nLeaves = 0;
nNodes = 0;
MiniMaxResult bestMove = ABCD(gs, maxplayer, minplayer, alpha, beta, depthLeft, maxplayer);
if (DEBUG>=1) System.out.println("ABCD: " + bestMove + " in " + (System.currentTimeMillis()-start));
return bestMove.action;
}
public MiniMaxResult ABCD(GameState gs, int maxplayer, int minplayer, float alpha, float beta, int depthLeft, int nextPlayerInSimultaneousNode) throws Exception {
// System.out.println("realTimeMinimaxAB(" + alpha + "," + beta + ") at " + gs.getTime());
// gs.dumpActionAssignments();
nNodes++;
if (depthLeft<=0 || gs.winner()!=-1) {
nLeaves++;
// Run the play out:
GameState gs2 = gs.clone();
AI playoutAI1 = playoutAI.clone();
AI playoutAI2 = playoutAI.clone();
int timeOut = gs2.getTime() + maxPlayoutTime;
boolean gameover = false;
while(!gameover && gs2.getTime()<timeOut) {
if (gs2.isComplete()) {
gameover = gs2.cycle();
} else {
gs2.issue(playoutAI1.getAction(0, gs2));
gs2.issue(playoutAI2.getAction(1, gs2));
}
}
// System.out.println("Eval (at " + gs.getTime() + "): " + EvaluationFunction.evaluate(maxplayer, minplayer, gs));
// System.out.println(gs);
return new MiniMaxResult(null,ef.evaluate(maxplayer, minplayer, gs2), gs2);
}
int toMove = -1;
if (gs.canExecuteAnyAction(maxplayer)) {
if (gs.canExecuteAnyAction(minplayer)) {
toMove = nextPlayerInSimultaneousNode;
nextPlayerInSimultaneousNode = 1 - nextPlayerInSimultaneousNode;
} else {
toMove = maxplayer;
}
} else {
if (gs.canExecuteAnyAction(minplayer)) toMove = minplayer;
}
if (toMove == maxplayer) {
PlayerActionGenerator actions = new PlayerActionGenerator(gs, maxplayer);
long l = actions.getSize();
if (l>max_branching_so_far) max_branching_so_far = l;
MiniMaxResult best = null;
PlayerAction next = null;
do{
next = actions.getNextAction(-1);
if (next!=null) {
GameState gs2 = gs.cloneIssue(next);
MiniMaxResult tmp = ABCD(gs2, maxplayer, minplayer, alpha, beta, depthLeft-1, nextPlayerInSimultaneousNode);
alpha = Math.max(alpha,tmp.evaluation);
if (best==null || tmp.evaluation>best.evaluation) {
best = tmp;
best.action = next;
}
if (beta<=alpha) return best;
}
}while(next!=null);
return best;
} else if (toMove == minplayer) {
PlayerActionGenerator actions = new PlayerActionGenerator(gs, minplayer);
long l = actions.getSize();
if (l>max_branching_so_far) max_branching_so_far = l;
MiniMaxResult best = null;
PlayerAction next = null;
do{
next = actions.getNextAction(-1);
if (next!=null) {
GameState gs2 = gs.cloneIssue(next);
MiniMaxResult tmp = ABCD(gs2, maxplayer, minplayer, alpha, beta, depthLeft-1, nextPlayerInSimultaneousNode);
beta = Math.min(beta,tmp.evaluation);
if (best==null || tmp.evaluation<best.evaluation) {
best = tmp;
best.action = next;
}
if (beta<=alpha) return best;
}
}while(next!=null);
return best;
} else {
GameState gs2 = gs.clone();
while(gs2.winner()==-1 &&
!gs2.gameover() &&
!gs2.canExecuteAnyAction(maxplayer) &&
!gs2.canExecuteAnyAction(minplayer)) gs2.cycle();
return ABCD(gs2, maxplayer, minplayer, alpha, beta, depthLeft, nextPlayerInSimultaneousNode);
}
}
@Override
public String toString() {
return getClass().getSimpleName() + "(" + MAXDEPTH + ", " + playoutAI + ", " + maxPlayoutTime + ", " + ef + ")";
}
@Override
public List<ParameterSpecification> getParameters()
{
List<ParameterSpecification> parameters = new ArrayList<>();
parameters.add(new ParameterSpecification("MaxDepth",int.class,4));
parameters.add(new ParameterSpecification("PlayoutLookahead",int.class,100));
parameters.add(new ParameterSpecification("PlayoutAI",AI.class, playoutAI));
parameters.add(new ParameterSpecification("EvaluationFunction", EvaluationFunction.class, new SimpleSqrtEvaluationFunction3()));
return parameters;
}
public int getMaxDepth() {
return MAXDEPTH;
}
public void setMaxDepth(int a_md) {
MAXDEPTH = a_md;
}
public int getPlayoutLookahead() {
return maxPlayoutTime;
}
public void setPlayoutLookahead(int a_pola) {
maxPlayoutTime = a_pola;
}
public AI getPlayoutAI() {
return playoutAI;
}
public void setPlayoutAI(AI a_dp) {
playoutAI = a_dp;
}
public EvaluationFunction getEvaluationFunction() {
return ef;
}
public void setEvaluationFunction(EvaluationFunction a_ef) {
ef = a_ef;
}
}
| 8,694 | 32.964844 | 166 | java |
MicroRTS | MicroRTS-master/src/ai/minimax/ABCD/ABCDNode.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package ai.minimax.ABCD;
import rts.GameState;
import rts.PlayerAction;
import rts.PlayerActionGenerator;
import util.Pair;
/**
*
* @author santi
*/
public class ABCDNode {
public int type; // -1: unknown, 0 : max, 1 : min, 2: simulation
public int depth = 0;
public GameState gs;
public PlayerActionGenerator actions;
public float alpha, beta;
public Pair<PlayerAction,Float> best;
public int nextPlayerInSimultaneousNode = 0;
public ABCDNode(int a_type, int a_depth, GameState a_gs, float a_alpha, float a_beta, int npsn) {
type = a_type;
depth = a_depth;
gs = a_gs;
alpha = a_alpha;
beta = a_beta;
nextPlayerInSimultaneousNode = npsn;
}
}
| 839 | 23.705882 | 101 | java |
MicroRTS | MicroRTS-master/src/ai/minimax/ABCD/IDABCD.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package ai.minimax.ABCD;
import ai.abstraction.WorkerRush;
import ai.abstraction.pathfinding.AStarPathFinding;
import ai.core.AI;
import ai.core.AIWithComputationBudget;
import ai.core.ParameterSpecification;
import ai.evaluation.EvaluationFunction;
import ai.evaluation.SimpleSqrtEvaluationFunction3;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.List;
import rts.GameState;
import rts.PlayerAction;
import rts.PlayerActionGenerator;
import rts.units.UnitTypeTable;
import util.Pair;
import ai.core.InterruptibleAI;
/**
*
* @author santi
*/
public class IDABCD extends AIWithComputationBudget implements InterruptibleAI {
public static int DEBUG = 0;
int MAX_DEPTH = 50; // if search goes beyond this point, most likely we are done
int avg_depth_so_far = 0;
int count_depth_so_far = 0;
long avg_branching_so_far = 0;
int count_branching_so_far = 0;
long avg_leaves_so_far = 0;
int count_leaves_so_far = 0;
long avg_nodes_so_far = 0;
int count_nodes_so_far = 0;
long max_potential_branching_so_far = 0;
long avg_potential_branching_so_far = 0;
int count_potential_branching_so_far = 0;
// reset at each execution of minimax:
int nPlayouts = 0; // different form "nLeaves", since this is not reset due to iterative deepening
int nLeaves = 0;
int nNodes = 0;
int max_depth_so_far = 0;
long max_branching_so_far = 0;
long max_leaves_so_far = 0;
long max_nodes_so_far = 0;
AI playoutAI;
int maxPlayoutTime = 100;
EvaluationFunction ef;
boolean performGreedyActionScan = false;
int max_consecutive_frames_searching_so_far = 0;
GameState gs_to_start_from;
int consecutive_frames_searching = 0;
int last_depth = 1;
int last_nleaves = 0;
int last_nnodes = 0;
int last_time_depth = 0;
int time_depth = 0;
int max_time_depth_so_far = 0;
long avg_time_depth_so_far = 0;
double count_time_depth_so_far = 0;
boolean treeIsComplete = true;
List<ABCDNode> stack;
Pair<PlayerAction,Float> lastResult;
PlayerAction bestMove;
int playerForThisComputation;
public IDABCD(UnitTypeTable utt) {
this(100, -1,
new WorkerRush(utt, new AStarPathFinding()), 100,
new SimpleSqrtEvaluationFunction3(), true);
}
public IDABCD(int tpc, int ppc, AI a_playoutAI, int a_maxPlayoutTime, EvaluationFunction a_ef, boolean a_performGreedyActionScan) {
super(tpc, ppc);
playoutAI = a_playoutAI;
maxPlayoutTime = a_maxPlayoutTime;
ef = a_ef;
performGreedyActionScan = a_performGreedyActionScan;
}
@Override
public void reset() {
gs_to_start_from = null;
consecutive_frames_searching = 0;
stack = null;
lastResult = null;
bestMove = null;
treeIsComplete = true;
max_depth_so_far = 0;
max_branching_so_far = 0;
max_leaves_so_far = 0;
max_nodes_so_far = 0;
avg_depth_so_far = 0;
count_depth_so_far = 0;
avg_branching_so_far = 0;
count_branching_so_far = 0;
avg_leaves_so_far = 0;
count_leaves_so_far = 0;
avg_nodes_so_far = 0;
count_nodes_so_far = 0;
avg_time_depth_so_far = 0;
count_time_depth_so_far = 0;
max_time_depth_so_far = 0;
max_potential_branching_so_far = 0;
avg_potential_branching_so_far = 0;
count_potential_branching_so_far = 0;
}
public AI clone() {
return new IDABCD(TIME_BUDGET, ITERATIONS_BUDGET, playoutAI, maxPlayoutTime, ef, performGreedyActionScan);
}
public final PlayerAction getAction(int player, GameState gs) throws Exception
{
if (gs.canExecuteAnyAction(player)) {
startNewComputation(player,gs.clone());
computeDuringOneGameFrame();
return getBestActionSoFar();
} else {
return new PlayerAction();
}
}
public void startNewComputation(int a_player, GameState gs) throws Exception
{
consecutive_frames_searching = 0;
stack = null;
last_depth = 1;
last_nleaves = 0;
last_nnodes = 0;
last_time_depth = 0;
gs_to_start_from = gs;
playerForThisComputation = a_player;
bestMove = null;
}
public void computeDuringOneGameFrame() throws Exception {
int maxplayer = playerForThisComputation;
int minplayer = 1 - playerForThisComputation;
int depth = 1;
long startTime = System.currentTimeMillis();
long cutOffTime = startTime + TIME_BUDGET;
// System.out.println("ABCD search starts (consecutive_frames_searching: " + consecutive_frames_searching + ")");
if (TIME_BUDGET<=0) cutOffTime = 0;
nPlayouts = 0;
if (bestMove==null && performGreedyActionScan) {
// The first time, we just want to do a quick evaluation of all actions, to have a first idea of what is best:
bestMove = greedyActionScan(gs_to_start_from,playerForThisComputation, cutOffTime, ITERATIONS_BUDGET);
// System.out.println("greedyActionScan suggested action: " + bestMove);
}
if (cutOffTime>0 && System.currentTimeMillis() >= cutOffTime) {
// if (bestMove == null) {
// PlayerActionGenerator pag = new PlayerActionGenerator(gs_to_start_from,player);
// return pag.getRandom();
// }
// return bestMove;
return;
}
consecutive_frames_searching++;
// System.out.println("Starting realTimeMinimaxABIterativeDeepening... (time " + gs.getTime() + ")");
do {
if (stack!=null) depth = last_depth;
if (DEBUG>=1) System.out.println(" next depth: " + depth);
// if (depth==50) DEBUG = 2;
long currentTime = System.currentTimeMillis();
PlayerAction tmp = searchOutsideStack(gs_to_start_from, maxplayer, minplayer, depth, cutOffTime, ITERATIONS_BUDGET, false);
if (DEBUG>=1) System.out.println(" Time taken: " + (System.currentTimeMillis() - currentTime) + ", nPlayouts: " + nPlayouts);
// System.out.println(gs.getTime() + ", depth: " + depth + ", nPlayouts: " + nPlayouts + ", PA: " + tmp);
if (tmp!=null) {
bestMove = tmp;
// the <200 condition is because sometimes, towards the end of the game, the tree is so
// small, that opening it takes no time, and this loop incrases depth very fast, but
// we don't want to record that, since it is meanigless. In fact, I should detect
// when the tree has been open completely, and cancel this loop.
if (//depth<200 &&
depth>max_depth_so_far) max_depth_so_far = depth;
}
if (stack.isEmpty()) {
// search was completed:
if (nLeaves>max_leaves_so_far) max_leaves_so_far = nLeaves;
if (nNodes>max_nodes_so_far) max_nodes_so_far = nNodes;
last_nleaves = nLeaves;
last_nnodes = nNodes;
last_time_depth = time_depth;
stack = null;
depth++;
if (treeIsComplete || depth>MAX_DEPTH) {
// System.out.println("Tree is complete!");
break;
}
} else {
// System.out.println("realTimeMinimaxABIterativeDeepening (lookahead = " + lookAhead + "): " + tmp + " interrupted after " + (System.currentTimeMillis()-runStartTime) + " (" + nLeaves + " leaves)"); System.out.flush();
}
nLeaves = 0;
nNodes = 0;
time_depth = 0;
if (ITERATIONS_BUDGET>0 && nPlayouts>=ITERATIONS_BUDGET) break;
if (cutOffTime>0 && System.currentTimeMillis() >= cutOffTime) break;
}while(true);
last_depth = depth;
// if (bestMove == null) {
// PlayerActionGenerator pag = new PlayerActionGenerator(gs_to_start_from,player);
// return pag.getRandom();
// }
// return bestMove;
}
public PlayerAction getBestActionSoFar() throws Exception {
// statistics:
avg_depth_so_far+=last_depth;
count_depth_so_far++;
avg_leaves_so_far += last_nleaves;
count_leaves_so_far++;
avg_nodes_so_far += last_nnodes;
count_nodes_so_far++;
avg_time_depth_so_far += last_time_depth;
count_time_depth_so_far++;
if (last_time_depth>max_time_depth_so_far) max_time_depth_so_far = last_time_depth;
if (bestMove == null) {
PlayerActionGenerator pag = new PlayerActionGenerator(gs_to_start_from,playerForThisComputation);
return pag.getRandom();
}
return bestMove;
}
public PlayerAction greedyActionScan(GameState gs, int player, long cutOffTime, int maxPlayouts) throws Exception {
PlayerAction best = null;
float bestScore = 0;
PlayerActionGenerator pag = new PlayerActionGenerator(gs,player);
PlayerAction pa = null;
// System.out.println(gs.getUnitActions());
// System.out.println(pag);
do{
pa = pag.getNextAction(cutOffTime);
if (pa!=null) {
GameState gs2 = gs.cloneIssue(pa);
float score = ef.evaluate(player, 1 - player, gs2);
if (best==null || score>bestScore) {
best = pa;
bestScore = score;
}
}
if (cutOffTime>0 && System.currentTimeMillis()>cutOffTime) return best;
}while(pa!=null);
return best;
}
public PlayerAction searchOutsideStack(GameState initial_gs, int maxplayer, int minplayer, int depth, long cutOffTime, int maxPlayouts, boolean needAResult) throws Exception {
ABCDNode head;
if (stack==null) {
// System.out.println("searchOutsideStack: stack is null (maxplayer: " + maxplayer + ")");
nLeaves = 0;
time_depth = 0;
stack = new LinkedList<>();
head = new ABCDNode(-1, 0, initial_gs, -EvaluationFunction.VICTORY, EvaluationFunction.VICTORY, 0);
stack.add(head);
treeIsComplete = true;
} else {
// System.out.println("searchOutsideStack: stack is NOT null");
if (stack.isEmpty()) return lastResult.m_a;
head = stack.get(stack.size()-1);
// System.out.println("searchOutsideStack: head type " + head.type);
}
while(!stack.isEmpty()) {
if (cutOffTime>0 && System.currentTimeMillis()>=cutOffTime) break;
if (maxPlayouts>0 && nPlayouts>=maxPlayouts) break;
// System.out.print("Stack: [ ");
// for(RTMiniMaxNode n:stack) System.out.print(" " + n.type + "(" + n.gs.getTime() + ") ");
// System.out.println("]");
ABCDNode current = stack.get(0);
if (DEBUG>=2) {
for(int i = 0;i<current.depth;i++) System.out.print(" ");
System.out.println("Node: " + current.type);
}
switch(current.type) {
case -1: // unknown node:
{
int winner = current.gs.winner();
boolean gameover = current.gs.gameover();
if (current.depth>=depth || winner != -1 || gameover) {
if (current.gs.getTime() - initial_gs.getTime() > time_depth) {
time_depth = current.gs.getTime() - initial_gs.getTime();
}
nLeaves++;
nNodes++;
nPlayouts++;
if (DEBUG>=2) {
for(int i = 0;i<current.depth;i++) System.out.print(" ");
System.out.println("playout!");
}
// Run the play out:
GameState gs2 = current.gs.clone();
AI playoutAI1 = playoutAI.clone();
AI playoutAI2 = playoutAI.clone();
int timeOut = gs2.getTime() + maxPlayoutTime;
if (!gs2.gameover()) treeIsComplete = false;
gameover = false;
while(!gameover && gs2.getTime()<timeOut) {
if (gs2.isComplete()) {
gameover = gs2.cycle();
} else {
gs2.issue(playoutAI1.getAction(0, gs2));
gs2.issue(playoutAI2.getAction(1, gs2));
}
}
lastResult = new Pair<>(null, ef.evaluate(maxplayer, minplayer, gs2));
// System.out.println("last result from -1 node");
stack.remove(0);
} else {
current.type = 2;
if (current.gs.canExecuteAnyAction(maxplayer)) {
if (current.gs.canExecuteAnyAction(minplayer)) {
current.type = current.nextPlayerInSimultaneousNode;
current.nextPlayerInSimultaneousNode = 1 - current.nextPlayerInSimultaneousNode;
} else {
current.type = 0;
}
} else {
if (current.gs.canExecuteAnyAction(minplayer)) {
current.type = 1;
}
}
}
}
break;
case 0: // max node:
nNodes++;
if (current.actions == null) {
current.actions = new PlayerActionGenerator(current.gs, maxplayer);
current.actions.randomizeOrder();
long l = current.actions.getSize();
if (DEBUG>=2) {
for(int i = 0;i<current.depth;i++) System.out.print(" ");
System.out.println("PlayerGenerator moves: " + l + "(cutOffTime: " + cutOffTime + ")");
}
if (l > max_potential_branching_so_far) max_potential_branching_so_far = l;
avg_potential_branching_so_far+=l;
count_potential_branching_so_far++;
// while(current.actions.size()>MAX_BRANCHING_FACTOR) current.actions.remove(r.nextInt(current.actions.size()));
current.best = null;
PlayerAction next = current.actions.getNextAction(cutOffTime);
if (next != null) {
GameState gs2 = current.gs.cloneIssue(next);
stack.add(0, new ABCDNode(-1, current.depth + 1, gs2, current.alpha, current.beta, current.nextPlayerInSimultaneousNode));
} else {
// This can only happen if the getNextAction call times out...
break;
}
} else {
current.alpha = Math.max(current.alpha, lastResult.m_b);
if (current.best == null || lastResult.m_b > current.best.m_b) {
current.best = lastResult;
current.best.m_a = current.actions.getLastAction();
}
PlayerAction next = current.actions.getNextAction(cutOffTime);
if (DEBUG>=2) {
for(int i = 0;i<current.depth;i++) System.out.print(" ");
System.out.println("alpha: " + current.alpha + ", beta: " + current.beta + ", next: " + next);
}
if (current.beta <= current.alpha || next == null) {
lastResult = current.best;
stack.remove(0);
if (current.actions.getGenerated() > max_branching_so_far) {
max_branching_so_far = current.actions.getGenerated();
}
avg_branching_so_far += current.actions.getGenerated();
count_branching_so_far++;
} else {
GameState gs2 = current.gs.cloneIssue(next);
stack.add(0, new ABCDNode(-1, current.depth + 1, gs2, current.alpha, current.beta, current.nextPlayerInSimultaneousNode));
}
}
break;
case 1: // min node:
nNodes++;
if (current.actions == null) {
current.actions = new PlayerActionGenerator(current.gs, minplayer);
current.actions.randomizeOrder();
long l = current.actions.getSize();
if (DEBUG>=2) {
for(int i = 0;i<current.depth;i++) System.out.print(" ");
System.out.println("PlayerGenerator moves: " + l);
}
if (l > max_potential_branching_so_far) max_potential_branching_so_far = l;
// while(current.actions.size()>MAX_BRANCHING_FACTOR) current.actions.remove(r.nextInt(current.actions.size()));
avg_potential_branching_so_far+=l;
count_potential_branching_so_far++;
current.best = null;
PlayerAction next = current.actions.getNextAction(cutOffTime);
if (next != null) {
GameState gs2 = current.gs.cloneIssue(next);
stack.add(0, new ABCDNode(-1, current.depth + 1, gs2, current.alpha, current.beta, current.nextPlayerInSimultaneousNode));
} else {
// This can only happen if the getNextAction call times out...
break;
}
} else {
current.beta = Math.min(current.beta, lastResult.m_b);
if (current.best == null || lastResult.m_b < current.best.m_b) {
current.best = lastResult;
current.best.m_a = current.actions.getLastAction();
}
PlayerAction next = current.actions.getNextAction(cutOffTime);
if (current.beta <= current.alpha || next == null) {
lastResult = current.best;
stack.remove(0);
if (current.actions.getGenerated() > max_branching_so_far) {
max_branching_so_far = current.actions.getGenerated();
}
avg_branching_so_far += current.actions.getGenerated();
count_branching_so_far++;
} else {
GameState gs2 = current.gs.cloneIssue(next);
stack.add(0, new ABCDNode(-1, current.depth + 1, gs2, current.alpha, current.beta, current.nextPlayerInSimultaneousNode));
}
}
break;
case 2: // simulation node:
nNodes++;
current.gs = current.gs.clone();
while (current.gs.winner() == -1 &&
!current.gs.gameover() &&
!current.gs.canExecuteAnyAction(maxplayer) &&
!current.gs.canExecuteAnyAction(minplayer)) {
current.gs.cycle();
}
current.type = -1;
break;
}
}
if (stack.isEmpty()) {
// System.out.println("searchOutsideStack: stack is empty, returning last result.");
return lastResult.m_a;
}
// System.out.println("searchOutsideStack: stack is not empty.");
if (needAResult) {
if (head.best!=null) return head.best.m_a;
return head.actions.getRandom();
}
return null;
}
public String statisticsString() {
return
"avg depth: " + (avg_depth_so_far/(double)count_depth_so_far) +
" , max depth: " + max_depth_so_far +
" , avg branching factor: " + (avg_branching_so_far/(double)count_branching_so_far) +
" , max branching factor: " + max_branching_so_far +
" , avg potential branching factor: " + (avg_potential_branching_so_far/(double)count_potential_branching_so_far) +
" , max potential branching factor: " + max_potential_branching_so_far +
" , avg leaves: " + (avg_leaves_so_far/(double)count_leaves_so_far) +
" , max leaves: " + max_leaves_so_far +
" , avg nodes: " + (avg_nodes_so_far/(double)count_nodes_so_far) +
" , max nodes: " + max_nodes_so_far +
" , avg time depth: " + (avg_time_depth_so_far/(double)count_time_depth_so_far) +
" , max time depth: " + max_time_depth_so_far;
}
public String toString() {
return getClass().getSimpleName() + "(" + TIME_BUDGET + ", " + ITERATIONS_BUDGET + ", " + playoutAI + ", " + maxPlayoutTime + ", " + ef + ", " + performGreedyActionScan + ")";
}
@Override
public List<ParameterSpecification> getParameters()
{
List<ParameterSpecification> parameters = new ArrayList<>();
parameters.add(new ParameterSpecification("TimeBudget",int.class,100));
parameters.add(new ParameterSpecification("IterationsBudget",int.class,-1));
parameters.add(new ParameterSpecification("PlayoutAI",AI.class, playoutAI));
parameters.add(new ParameterSpecification("PlayoutLookahead",int.class,100));
parameters.add(new ParameterSpecification("EvaluationFunction", EvaluationFunction.class, new SimpleSqrtEvaluationFunction3()));
parameters.add(new ParameterSpecification("PerformGreedyActionScan",boolean.class,true));
return parameters;
}
public AI getPlayoutAI() {
return playoutAI;
}
public void setPlayoutAI(AI a_dp) {
playoutAI = a_dp;
}
public int getPlayoutLookahead() {
return maxPlayoutTime;
}
public void setPlayoutLookahead(int a_pola) {
maxPlayoutTime = a_pola;
}
public EvaluationFunction getEvaluationFunction() {
return ef;
}
public void setEvaluationFunction(EvaluationFunction a_ef) {
ef = a_ef;
}
public boolean getPerformGreedyActionScan() {
return performGreedyActionScan;
}
public void setPerformGreedyActionScan(boolean a_pgas) {
performGreedyActionScan = a_pgas;
}
}
| 24,719 | 41.694301 | 234 | java |
MicroRTS | MicroRTS-master/src/ai/minimax/RTMiniMax/IDRTMinimax.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package ai.minimax.RTMiniMax;
import ai.evaluation.EvaluationFunctionForwarding;
import ai.core.AI;
import ai.core.AIWithComputationBudget;
import ai.core.ParameterSpecification;
import ai.evaluation.EvaluationFunction;
import ai.evaluation.SimpleSqrtEvaluationFunction3;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.List;
import java.util.Random;
import rts.GameState;
import rts.PlayerAction;
import rts.PlayerActionGenerator;
import rts.units.UnitTypeTable;
import util.Pair;
import ai.core.InterruptibleAI;
/**
*
* @author santi
*/
public class IDRTMinimax extends AIWithComputationBudget implements InterruptibleAI {
public static int DEBUG = 0;
// reset at each execution of minimax:
static int minCT = -1;
static int maxCT = -1;
static int nLeaves = 0;
public long max_branching_so_far = 0;
public long max_leaves_so_far = 0;
int LOOKAHEAD = 40;
protected int defaultNONEduration = 8;
EvaluationFunction ef;
int max_depth_so_far = 0;
long max_potential_branching_so_far = 0;
int max_consecutive_frames_searching_so_far = 0;
GameState gs_to_start_from;
int consecutive_frames_searching = 0;
int last_lookAhead = 1;
List<RTMiniMaxNode> stack;
Pair<PlayerAction,Float> lastResult;
PlayerAction bestMove;
Random r = new Random();
int playerForThisComputation;
public IDRTMinimax(UnitTypeTable utt) {
this(100, new SimpleSqrtEvaluationFunction3());
}
public IDRTMinimax(int available_time, EvaluationFunction a_ef) {
super(available_time, -1);
LOOKAHEAD = 1;
ef = a_ef;
}
@Override
public void reset() {
gs_to_start_from = null;
consecutive_frames_searching = 0;
stack = null;
lastResult = null;
bestMove = null;
}
@Override
public AI clone() {
return new IDRTMinimax(TIME_BUDGET, ef);
}
public final PlayerAction getAction(int player, GameState gs) throws Exception
{
if (gs.canExecuteAnyAction(player)) {
startNewComputation(player,gs.clone());
computeDuringOneGameFrame();
return getBestActionSoFar();
} else {
return new PlayerAction();
}
}
@Override
public void startNewComputation(int a_player, GameState gs) throws Exception
{
playerForThisComputation = a_player;
stack = null;
last_lookAhead = 1;
gs_to_start_from = gs;
bestMove = null;
}
@Override
public void computeDuringOneGameFrame() throws Exception {
int maxplayer = playerForThisComputation;
int minplayer = 1 - playerForThisComputation;
int lookAhead = 1;
long startTime = System.currentTimeMillis();
long cutOffTime = startTime + TIME_BUDGET;
if (bestMove==null) {
// The first time, we just want to do a quick evaluation of all actions, to have a first idea of what is best:
bestMove = greedyActionScan(gs_to_start_from, playerForThisComputation, cutOffTime);
// System.out.println("greedyActionScan suggested action: " + bestMove);
}
if (System.currentTimeMillis() >= cutOffTime) return;
consecutive_frames_searching++;
// System.out.println("Starting realTimeMinimaxABIterativeDeepening... (time " + gs.getTime() + ")");
do {
// System.out.println("next lookahead: " + lookAhead);
if (stack==null) {
if (nLeaves>max_leaves_so_far) max_leaves_so_far = nLeaves;
minCT = -1;
maxCT = -1;
nLeaves = 0;
} else {
lookAhead = last_lookAhead;
}
// long runStartTime = System.currentTimeMillis();
PlayerAction tmp = timeBoundedRealTimeMinimaxABOutsideStack(gs_to_start_from, maxplayer, minplayer, gs_to_start_from.getTime() + lookAhead, cutOffTime, false);
if (tmp!=null) {
bestMove = tmp;
if (lookAhead>max_depth_so_far) max_depth_so_far = lookAhead;
}
if (stack.isEmpty()) {
// search was completed:
stack = null;
// System.out.println("realTimeMinimaxABIterativeDeepening (lookahead = " + lookAhead + "): " + tmp + " in " + (System.currentTimeMillis()-runStartTime) + " (" + nLeaves + " leaves)"); System.out.flush();
int nextLookAhead = Math.max((minCT+1) - gs_to_start_from.getTime(), lookAhead+4);
// System.out.println("minCT = " + minCT + ", maxCT = " + maxCT + " lookAhead : " + lookAhead + " -> " + nextLookAhead);
if ((minCT==-1 && maxCT==-1) || nextLookAhead<=lookAhead) {
// return bestMove;
return;
} else {
lookAhead = nextLookAhead;
}
} else {
// System.out.println("realTimeMinimaxABIterativeDeepening (lookahead = " + lookAhead + "): " + tmp + " interrupted after " + (System.currentTimeMillis()-runStartTime) + " (" + nLeaves + " leaves)"); System.out.flush();
}
}while(System.currentTimeMillis() - startTime < TIME_BUDGET);
last_lookAhead = lookAhead;
// return bestMove;
return;
}
public PlayerAction getBestActionSoFar() throws Exception {
return bestMove;
}
public PlayerAction timeBoundedRealTimeMinimaxABOutsideStack(GameState initial_gs, int maxplayer, int minplayer, int lookAhead, long cutOffTime, boolean needAResult) throws Exception {
RTMiniMaxNode head;
if (stack==null) {
stack = new LinkedList<>();
head = new RTMiniMaxNode(0,initial_gs,-EvaluationFunctionForwarding.VICTORY, EvaluationFunctionForwarding.VICTORY);
stack.add(head);
} else {
if (stack.isEmpty()) return lastResult.m_a;
head = stack.get(stack.size()-1);
}
while(!stack.isEmpty() && System.currentTimeMillis()<cutOffTime){
// System.out.print("Stack: [ ");
// for(RTMiniMaxNode n:stack) System.out.print(" " + n.type + "(" + n.gs.getTime() + ") ");
// System.out.println("]");
RTMiniMaxNode current = stack.get(0);
switch(current.type) {
case -1: // unknown node:
{
int winner = current.gs.winner();
if (current.gs.getTime()>=lookAhead || winner!=-1) {
if (winner==-1) {
int CT = current.gs.getNextChangeTime();
if (minCT==-1 || CT<minCT) minCT = CT;
if (maxCT==-1 || CT>maxCT) maxCT = CT;
}
nLeaves++;
lastResult = new Pair<>(null, ef.evaluate(maxplayer, minplayer, current.gs));
stack.remove(0);
} else if (current.gs.canExecuteAnyAction(maxplayer)) {
current.type = 0;
} else if (current.gs.canExecuteAnyAction(minplayer)) {
current.type = 1;
} else {
current.type = 2;
}
}
break;
case 0: // max node:
if (current.actions==null) {
current.actions = new PlayerActionGenerator(current.gs,maxplayer);
long l = current.actions.getSize();
if (l>max_potential_branching_so_far) max_potential_branching_so_far = l;
// while(current.actions.size()>MAX_BRANCHING_FACTOR) current.actions.remove(r.nextInt(current.actions.size()));
current.best = null;
PlayerAction next = current.actions.getNextAction(cutOffTime);
if (next!=null) {
GameState gs2 = current.gs.cloneIssue(next);
stack.add(0, new RTMiniMaxNode(-1,gs2,current.alpha, current.beta));
} else {
// This can only happen if the getNextAction call times out...
break;
}
} else {
current.alpha = Math.max(current.alpha,lastResult.m_b);
if (current.best==null || lastResult.m_b>current.best.m_b) {
current.best = lastResult;
current.best.m_a = current.actions.getLastAction();
}
PlayerAction next = current.actions.getNextAction(cutOffTime);
if (current.beta<=current.alpha || next == null) {
lastResult = current.best;
stack.remove(0);
if (current.actions.getGenerated()>max_branching_so_far) max_branching_so_far = current.actions.getGenerated();
} else {
GameState gs2 = current.gs.cloneIssue(next);
stack.add(0, new RTMiniMaxNode(-1,gs2,current.alpha, current.beta));
}
}
break;
case 1: // min node:
if (current.actions==null) {
current.actions = new PlayerActionGenerator(current.gs,minplayer);
long l = current.actions.getSize();
if (l>max_potential_branching_so_far) max_potential_branching_so_far = l;
// while(current.actions.size()>MAX_BRANCHING_FACTOR) current.actions.remove(r.nextInt(current.actions.size()));
current.best = null;
PlayerAction next = current.actions.getNextAction(cutOffTime);
if (next!=null) {
GameState gs2 = current.gs.cloneIssue(next);
stack.add(0, new RTMiniMaxNode(-1,gs2,current.alpha, current.beta));
} else {
// This can only happen if the getNextAction call times out...
break;
}
} else {
current.beta = Math.min(current.beta,lastResult.m_b);
if (current.best==null || lastResult.m_b<current.best.m_b) {
current.best = lastResult;
current.best.m_a = current.actions.getLastAction();
}
PlayerAction next = current.actions.getNextAction(cutOffTime);
if (current.beta<=current.alpha || next == null) {
lastResult = current.best;
stack.remove(0);
if (current.actions.getGenerated()>max_branching_so_far) max_branching_so_far = current.actions.getGenerated();
} else {
GameState gs2 = current.gs.cloneIssue(next);
stack.add(0, new RTMiniMaxNode(-1,gs2,current.alpha, current.beta));
}
}
break;
case 2: // simulation node:
current.gs = current.gs.clone();
while(current.gs.winner()==-1 &&
!current.gs.gameover() &&
!current.gs.canExecuteAnyAction(maxplayer) &&
!current.gs.canExecuteAnyAction(minplayer)) current.gs.cycle();
current.type = -1;
break;
}
}
if (stack.isEmpty()) return lastResult.m_a;
if (needAResult) {
if (head.best!=null) return head.best.m_a;
return head.actions.getRandom();
}
return null;
}
public PlayerAction greedyActionScan(GameState gs, int player, long cutOffTime) throws Exception {
PlayerAction best = null;
float bestScore = 0;
PlayerActionGenerator pag = new PlayerActionGenerator(gs,player);
PlayerAction pa = null;
// System.out.println(gs.getUnitActions());
// System.out.println(pag);
do{
pa = pag.getNextAction(cutOffTime);
if (pa!=null) {
GameState gs2 = gs.cloneIssue(pa);
float score = ef.evaluate(player, 1 - player, gs2);
if (best==null || score>bestScore) {
best = pa;
bestScore = score;
}
}
if (System.currentTimeMillis()>cutOffTime) return best;
}while(pa!=null);
return best;
}
public String statisticsString() {
return "max depth: " + max_depth_so_far +
" , max branching factor (potential): " + max_branching_so_far + "(" + max_potential_branching_so_far + ")" +
" , max leaves: " + max_leaves_so_far +
" , max consecutive frames: " + max_consecutive_frames_searching_so_far;
}
public String toString() {
return getClass().getSimpleName() + "(" + TIME_BUDGET + ", " + ITERATIONS_BUDGET + ", " + ef + ")";
}
@Override
public List<ParameterSpecification> getParameters()
{
List<ParameterSpecification> parameters = new ArrayList<>();
parameters.add(new ParameterSpecification("TimeBudget",int.class,100));
parameters.add(new ParameterSpecification("IterationsBudget",int.class,-1));
parameters.add(new ParameterSpecification("EvaluationFunction", EvaluationFunction.class, new SimpleSqrtEvaluationFunction3()));
return parameters;
}
public EvaluationFunction getEvaluationFunction() {
return ef;
}
public void setEvaluationFunction(EvaluationFunction a_ef) {
ef = a_ef;
}
}
| 15,203 | 41.588235 | 250 | java |
MicroRTS | MicroRTS-master/src/ai/minimax/RTMiniMax/IDRTMinimaxRandomized.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package ai.minimax.RTMiniMax;
import ai.core.AI;
import ai.core.ParameterSpecification;
import ai.evaluation.EvaluationFunction;
import ai.evaluation.SimpleSqrtEvaluationFunction3;
import java.util.LinkedList;
import java.util.List;
import rts.GameState;
import rts.PlayerAction;
import rts.PlayerActionGenerator;
import rts.units.UnitTypeTable;
import util.Pair;
/**
*
* @author santi
*
* This class implements the diea of "randomized alpha-beta" search form Michael
* Buro's group into RTMM
*
*/
public class IDRTMinimaxRandomized extends IDRTMinimax {
int m_repeats = 10; // howmany times will we repeat the search for each action in the root node?
public IDRTMinimaxRandomized(UnitTypeTable utt) {
this(100, 10, new SimpleSqrtEvaluationFunction3());
}
public IDRTMinimaxRandomized(int tpc, int repeats, EvaluationFunction a_ef) {
super(tpc, a_ef);
}
public AI clone() {
return new IDRTMinimaxRandomized(TIME_BUDGET, m_repeats, ef);
}
public PlayerAction timeBoundedRealTimeMinimaxRandomizedABOutsideStack(GameState initial_gs, int maxplayer, int minplayer, int lookAhead, long cutOffTime, boolean needAResult) throws Exception {
RTMiniMaxNode head;
if (stack == null) {
stack = new LinkedList<>();
head = new RTMiniMaxRandomizedRootNode(initial_gs);
stack.add(head);
} else {
if (stack.isEmpty()) {
return lastResult.m_a;
}
head = stack.get(stack.size() - 1);
}
while (!stack.isEmpty() && System.currentTimeMillis() < cutOffTime) {
// System.out.print("Stack: [ ");
// for(RTMiniMaxNode n:stack) System.out.print(" " + n.type + "(" + n.gs.getTime() + ") ");
// System.out.println("]");
RTMiniMaxNode current = stack.get(0);
switch (current.type) {
case -1: // unknown node:
{
int winner = current.gs.winner();
if (current.gs.getTime() >= lookAhead || winner != -1) {
if (winner == -1) {
int CT = current.gs.getNextChangeTime();
if (minCT == -1 || CT < minCT) {
minCT = CT;
}
if (maxCT == -1 || CT > maxCT) {
maxCT = CT;
}
}
nLeaves++;
lastResult = new Pair<>(null, ef.evaluate(maxplayer, minplayer, current.gs));
stack.remove(0);
} else if (current.gs.canExecuteAnyAction(maxplayer)) {
if (stack.size() == 1
|| !current.gs.canExecuteAnyAction(minplayer)) {
current.type = 0;
} else {
// randomize which player we will consider next!
// this is the ONLY difference between this method and the starndard alpha-beta:
current.type = r.nextInt(2) + 1;
// System.out.println(current.type);
}
} else if (current.gs.canExecuteAnyAction(minplayer)) {
current.type = 1;
} else {
current.type = 2;
}
}
break;
case 3: // initial max node:
{
RTMiniMaxRandomizedRootNode currentRR = (RTMiniMaxRandomizedRootNode) current;
if (currentRR.actions == null) {
currentRR.actions = new PlayerActionGenerator(currentRR.gs, maxplayer);
currentRR.scores = new float[m_repeats];
currentRR.iterations_run = 0;
long l = currentRR.actions.getSize();
if (l > max_potential_branching_so_far) {
max_potential_branching_so_far = l;
}
// while(current.actions.size()>MAX_BRANCHING_FACTOR) current.actions.remove(r.nextInt(current.actions.size()));
currentRR.best = null;
PlayerAction next = currentRR.actions.getNextAction(cutOffTime);
// System.out.println("Randomized start!");
if (next != null) {
// System.out.println("- action: " + next.toString());
GameState gs2 = currentRR.gs.cloneIssue(next);
stack.add(0, new RTMiniMaxNode(-1, gs2, -EvaluationFunction.VICTORY, EvaluationFunction.VICTORY));
} else {
// This can only happen if the getNextAction call times out...
break;
}
} else {
currentRR.scores[currentRR.iterations_run] = lastResult.m_b;
currentRR.iterations_run++;
if (currentRR.iterations_run < m_repeats) {
PlayerAction next = currentRR.actions.getLastAction();
if (next==null) {
System.out.println("getLastAction returned null!!! time: " + System.currentTimeMillis() + " cutOff: " + cutOffTime);
System.out.println("Action generator status:");
System.out.println(currentRR.actions);
}
GameState gs2 = currentRR.gs.cloneIssue(next);
stack.add(0, new RTMiniMaxNode(-1, gs2, -EvaluationFunction.VICTORY, EvaluationFunction.VICTORY));
// System.out.println(" " + currentRR.iterations_run + " cycle: " + gs2.getTime());
} else {
// compute the score:
float mean = 0;
float std_dev = 0;
for (int i = 0; i < m_repeats; i++) {
mean += currentRR.scores[i];
}
mean /= (float) m_repeats;
for (int i = 0; i < m_repeats; i++) {
std_dev += (mean - currentRR.scores[i]) * (mean - currentRR.scores[i]);
}
std_dev /= (float) m_repeats;
std_dev = (float) Math.sqrt(std_dev);
float score = mean - std_dev;
lastResult.m_b = score;
/*
System.out.print(" [ ");
for (int i = 0; i < m_repeats; i++) {
System.out.print(currentRR.scores[i] + " ");
}
System.out.println("]\n - Randomized: " + mean + " +- " + std_dev);
*/
if (currentRR.best == null || lastResult.m_b > currentRR.best.m_b) {
currentRR.best = lastResult;
currentRR.best.m_a = currentRR.actions.getLastAction();
}
currentRR.iterations_run = 0;
PlayerAction next = currentRR.actions.getNextAction(cutOffTime);
if (next == null) {
lastResult = currentRR.best;
stack.remove(0);
if (currentRR.actions.getGenerated() > max_branching_so_far) {
max_branching_so_far = current.actions.getGenerated();
}
} else {
// System.out.println("- action: " + next.toString());
GameState gs2 = currentRR.gs.cloneIssue(next);
stack.add(0, new RTMiniMaxNode(-1, gs2, -EvaluationFunction.VICTORY, EvaluationFunction.VICTORY));
}
}
}
}
break;
case 0: // max node:
if (current.actions == null) {
current.actions = new PlayerActionGenerator(current.gs, maxplayer);
long l = current.actions.getSize();
if (l > max_potential_branching_so_far) {
max_potential_branching_so_far = l;
}
// while(current.actions.size()>MAX_BRANCHING_FACTOR) current.actions.remove(r.nextInt(current.actions.size()));
current.best = null;
PlayerAction next = current.actions.getNextAction(cutOffTime);
if (next != null) {
GameState gs2 = current.gs.cloneIssue(next);
stack.add(0, new RTMiniMaxNode(-1, gs2, current.alpha, current.beta));
} else {
// This can only happen if the getNextAction call times out...
break;
}
} else {
current.alpha = Math.max(current.alpha, lastResult.m_b);
if (current.best == null || lastResult.m_b > current.best.m_b) {
current.best = lastResult;
current.best.m_a = current.actions.getLastAction();
}
PlayerAction next = current.actions.getNextAction(cutOffTime);
if (current.beta <= current.alpha || next == null) {
lastResult = current.best;
stack.remove(0);
if (current.actions.getGenerated() > max_branching_so_far) {
max_branching_so_far = current.actions.getGenerated();
}
} else {
GameState gs2 = current.gs.cloneIssue(next);
stack.add(0, new RTMiniMaxNode(-1, gs2, current.alpha, current.beta));
}
}
break;
case 1: // min node:
if (current.actions == null) {
current.actions = new PlayerActionGenerator(current.gs, minplayer);
long l = current.actions.getSize();
if (l > max_potential_branching_so_far) {
max_potential_branching_so_far = l;
}
// while(current.actions.size()>MAX_BRANCHING_FACTOR) current.actions.remove(r.nextInt(current.actions.size()));
current.best = null;
PlayerAction next = current.actions.getNextAction(cutOffTime);
if (next != null) {
GameState gs2 = current.gs.cloneIssue(next);
stack.add(0, new RTMiniMaxNode(-1, gs2, current.alpha, current.beta));
} else {
// This can only happen if the getNextAction call times out...
break;
}
} else {
current.beta = Math.min(current.beta, lastResult.m_b);
if (current.best == null || lastResult.m_b < current.best.m_b) {
current.best = lastResult;
current.best.m_a = current.actions.getLastAction();
}
PlayerAction next = current.actions.getNextAction(cutOffTime);
if (current.beta <= current.alpha || next == null) {
lastResult = current.best;
stack.remove(0);
if (current.actions.getGenerated() > max_branching_so_far) {
max_branching_so_far = current.actions.getGenerated();
}
} else {
GameState gs2 = current.gs.cloneIssue(next);
stack.add(0, new RTMiniMaxNode(-1, gs2, current.alpha, current.beta));
}
}
break;
case 2: // simulation node:
current.gs = current.gs.clone();
while (current.gs.winner() == -1
&& !current.gs.gameover()
&& //current.gs.getTime()<lookAhead &&
!current.gs.canExecuteAnyAction(maxplayer)
&& !current.gs.canExecuteAnyAction(minplayer)) {
current.gs.cycle();
}
current.type = -1;
break;
}
}
if (stack.isEmpty()) {
return lastResult.m_a;
}
if (needAResult) {
if (head.best != null) {
return head.best.m_a;
}
return head.actions.getRandom();
}
return null;
}
public String toString() {
return getClass().getSimpleName() + "(" + TIME_BUDGET + ", " + ITERATIONS_BUDGET + ", " + m_repeats + ", " + ef + ")";
}
@Override
public List<ParameterSpecification> getParameters()
{
List<ParameterSpecification> parameters = super.getParameters();
parameters.add(new ParameterSpecification("Repeats",int.class,10));
return parameters;
}
public int getRepeats() {
return m_repeats;
}
public void setRepeats(int a_r) {
m_repeats = a_r;
}
}
| 14,500 | 46.544262 | 198 | java |
MicroRTS | MicroRTS-master/src/ai/minimax/RTMiniMax/RTMiniMaxNode.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package ai.minimax.RTMiniMax;
import rts.GameState;
import rts.PlayerAction;
import rts.PlayerActionGenerator;
import util.Pair;
/**
*
* @author santi
*/
public class RTMiniMaxNode {
public int type; // -1: unknown, 0 : max, 1 : min, 2: simulation
public GameState gs;
public PlayerActionGenerator actions;
public float alpha, beta;
public Pair<PlayerAction,Float> best;
public RTMiniMaxNode(int a_type, GameState a_gs, float a_alpha, float a_beta) {
type = a_type;
gs = a_gs;
alpha = a_alpha;
beta = a_beta;
}
}
| 686 | 21.9 | 83 | java |
MicroRTS | MicroRTS-master/src/ai/minimax/RTMiniMax/RTMiniMaxRandomizedRootNode.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package ai.minimax.RTMiniMax;
import ai.evaluation.EvaluationFunction;
import rts.GameState;
/**
*
* @author santi
*/
public class RTMiniMaxRandomizedRootNode extends RTMiniMaxNode {
public int iterations_run = 0;
float scores[];
public RTMiniMaxRandomizedRootNode(GameState a_gs) {
super(3, a_gs, -EvaluationFunction.VICTORY, EvaluationFunction.VICTORY);
}
}
| 495 | 20.565217 | 80 | java |
MicroRTS | MicroRTS-master/src/ai/minimax/RTMiniMax/RTMinimax.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package ai.minimax.RTMiniMax;
import ai.core.AI;
import ai.core.ParameterSpecification;
import ai.evaluation.EvaluationFunction;
import ai.evaluation.SimpleSqrtEvaluationFunction3;
import ai.minimax.MiniMaxResult;
import java.util.ArrayList;
import java.util.List;
import rts.GameState;
import rts.PlayerAction;
import rts.PlayerActionGenerator;
import rts.units.UnitTypeTable;
/**
*
* @author santi
*/
public class RTMinimax extends AI {
// reset at each execution of minimax:
static int minCT = -1;
static int maxCT = -1;
static int nLeaves = 0;
public long max_branching_so_far = 0;
public long max_leaves_so_far = 0;
int LOOKAHEAD = 40;
protected int defaultNONEduration = 8;
EvaluationFunction ef;
public RTMinimax(UnitTypeTable utt) {
this(50, new SimpleSqrtEvaluationFunction3());
}
public RTMinimax(int la, EvaluationFunction a_ef) {
LOOKAHEAD = la;
ef = a_ef;
}
@Override
public void reset() {
}
@Override
public AI clone() {
return new RTMinimax(LOOKAHEAD, ef);
}
@Override
public PlayerAction getAction(int player, GameState gs) throws Exception {
if (gs.canExecuteAnyAction(player) && gs.winner()==-1) {
PlayerAction pa = realTimeMinimaxAB(player, gs, LOOKAHEAD);
pa.fillWithNones(gs, player, defaultNONEduration);
return pa;
} else {
return new PlayerAction();
}
}
public PlayerAction greedyActionScan(GameState gs, int player, long cutOffTime) throws Exception {
PlayerAction best = null;
float bestScore = 0;
PlayerActionGenerator pag = new PlayerActionGenerator(gs,player);
PlayerAction pa = null;
// System.out.println(gs.getUnitActions());
// System.out.println(pag);
do{
pa = pag.getNextAction(cutOffTime);
if (pa!=null) {
GameState gs2 = gs.cloneIssue(pa);
float score = ef.evaluate(player, 1 - player, gs2);
if (best==null || score>bestScore) {
best = pa;
bestScore = score;
}
}
if (System.currentTimeMillis()>cutOffTime) return best;
}while(pa!=null);
return best;
}
public PlayerAction realTimeMinimaxAB(int player, GameState gs, int lookAhead) {
long start = System.currentTimeMillis();
float alpha = -EvaluationFunction.VICTORY;
float beta = EvaluationFunction.VICTORY;
int maxplayer = player;
int minplayer = 1 - player;
System.out.println("Starting realTimeMinimaxAB...");
if (nLeaves>max_leaves_so_far) max_leaves_so_far = nLeaves;
minCT = -1;
maxCT = -1;
nLeaves = 0;
MiniMaxResult bestMove = realTimeMinimaxAB(gs, maxplayer, minplayer, alpha, beta, gs.getTime() + lookAhead, 0);
System.out.println("realTimeMinimax: " + bestMove + " in " + (System.currentTimeMillis()-start));
return bestMove.action;
}
public MiniMaxResult realTimeMinimaxAB(GameState gs, int maxplayer, int minplayer, float alpha, float beta, int lookAhead, int depth) {
// System.out.println("realTimeMinimaxAB(" + alpha + "," + beta + ") at " + gs.getTime());
// gs.dumpActionAssignments();
if (gs.getTime()>=lookAhead || gs.winner()!=-1) {
int CT = gs.getNextChangeTime();
if (minCT==-1 || CT<minCT) minCT = CT;
if (maxCT==-1 || CT>maxCT) maxCT = CT;
nLeaves++;
// System.out.println("Eval (at " + gs.getTime() + "): " + EvaluationFunction.evaluate(maxplayer, minplayer, gs));
// System.out.println(gs);
return new MiniMaxResult(null,ef.evaluate(maxplayer, minplayer, gs), gs);
}
if (gs.canExecuteAnyAction(maxplayer)) {
List<PlayerAction> actions_max = gs.getPlayerActions(maxplayer);
int l = actions_max.size();
if (l>max_branching_so_far) max_branching_so_far = l;
MiniMaxResult best = null;
// System.out.println("realTimeMinimaxAB.max: " + actions_max.size());
for(PlayerAction action_max:actions_max) {
GameState gs2 = gs.cloneIssue(action_max);
// System.out.println("action_max: " + action_max);
MiniMaxResult tmp = realTimeMinimaxAB(gs2, maxplayer, minplayer, alpha, beta, lookAhead, depth+1);
// System.out.println(action_max + " -> " + tmp.evaluation);
alpha = Math.max(alpha,tmp.evaluation);
if (best==null || tmp.evaluation>best.evaluation) {
best = tmp;
best.action = action_max;
}
// if (depth==0) {
// System.out.println(action_max + " -> " + tmp.evaluation);
// System.out.println(tmp.gs);
// }
if (beta<=alpha) return best;
}
return best;
} else if (gs.canExecuteAnyAction(minplayer)) {
List<PlayerAction> actions_min = gs.getPlayerActions(minplayer);
int l = actions_min.size();
if (l>max_branching_so_far) max_branching_so_far = l;
MiniMaxResult best = null;
// System.out.println("realTimeMinimaxAB.min: " + actions_min.size());
for(PlayerAction action_min:actions_min) {
GameState gs2 = gs.cloneIssue(action_min);
// System.out.println("action_min: " + action_min);
MiniMaxResult tmp = realTimeMinimaxAB(gs2, maxplayer, minplayer, alpha, beta, lookAhead, depth+1);
beta = Math.min(beta,tmp.evaluation);
if (best==null || tmp.evaluation<best.evaluation) {
best = tmp;
best.action = action_min;
}
if (beta<=alpha) return best;
}
return best;
} else {
GameState gs2 = gs.clone();
while(gs2.winner()==-1 &&
!gs2.gameover() &&
!gs2.canExecuteAnyAction(maxplayer) &&
!gs2.canExecuteAnyAction(minplayer)) gs2.cycle();
return realTimeMinimaxAB(gs2, maxplayer, minplayer, alpha, beta, lookAhead, depth+1);
}
}
public String toString() {
return getClass().getSimpleName() + "(" + LOOKAHEAD + ", " + ef + ")";
}
@Override
public List<ParameterSpecification> getParameters()
{
List<ParameterSpecification> parameters = new ArrayList<>();
parameters.add(new ParameterSpecification("LookAhead",int.class,50));
parameters.add(new ParameterSpecification("EvaluationFunction", EvaluationFunction.class, new SimpleSqrtEvaluationFunction3()));
return parameters;
}
public int getLookAhead() {
return LOOKAHEAD;
}
public void setLookAhead(int a_la) {
LOOKAHEAD = a_la;
}
public EvaluationFunction getEvaluationFunction() {
return ef;
}
public void setEvaluationFunction(EvaluationFunction a_ef) {
ef = a_ef;
}
}
| 7,563 | 33.226244 | 139 | java |
MicroRTS | MicroRTS-master/src/ai/montecarlo/MonteCarlo.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package ai.montecarlo;
import ai.core.AI;
import ai.RandomBiasedAI;
import ai.core.AIWithComputationBudget;
import ai.core.ParameterSpecification;
import ai.evaluation.EvaluationFunction;
import ai.evaluation.SimpleSqrtEvaluationFunction3;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import rts.GameState;
import rts.PlayerAction;
import rts.PlayerActionGenerator;
import rts.units.UnitTypeTable;
import ai.core.InterruptibleAI;
/**
*
* @author santi
*/
public class MonteCarlo extends AIWithComputationBudget implements InterruptibleAI {
public static final int DEBUG = 0;
EvaluationFunction ef;
public class PlayerActionTableEntry {
PlayerAction pa;
float accum_evaluation = 0;
int visit_count = 0;
}
Random r = new Random();
AI randomAI = new RandomBiasedAI();
long max_actions_so_far = 0;
PlayerActionGenerator moveGenerator;
boolean allMovesGenerated = false;
List<PlayerActionTableEntry> actions;
GameState gs_to_start_from;
int run = 0;
int playerForThisComputation;
// statistics:
public long total_runs = 0;
public long total_cycles_executed = 0;
public long total_actions_issued = 0;
long MAXACTIONS = 100;
int MAXSIMULATIONTIME = 1024;
public MonteCarlo(UnitTypeTable utt) {
this(100, -1, 100,
new RandomBiasedAI(),
new SimpleSqrtEvaluationFunction3());
}
public MonteCarlo(int available_time, int playouts_per_cycle, int lookahead, AI policy, EvaluationFunction a_ef) {
super(available_time, playouts_per_cycle);
MAXACTIONS = -1;
MAXSIMULATIONTIME = lookahead;
randomAI = policy;
ef = a_ef;
}
public MonteCarlo(int available_time, int playouts_per_cycle, int lookahead, long maxactions, AI policy, EvaluationFunction a_ef) {
super(available_time, playouts_per_cycle);
MAXACTIONS = maxactions;
MAXSIMULATIONTIME = lookahead;
randomAI = policy;
ef = a_ef;
}
public void printStats() {
if (total_cycles_executed>0 && total_actions_issued>0) {
System.out.println("Average runs per cycle: " + ((double)total_runs)/total_cycles_executed);
System.out.println("Average runs per action: " + ((double)total_runs)/total_actions_issued);
}
}
public void reset() {
moveGenerator = null;
actions = null;
gs_to_start_from = null;
run = 0;
}
public AI clone() {
return new MonteCarlo(TIME_BUDGET, ITERATIONS_BUDGET, MAXSIMULATIONTIME, MAXACTIONS, randomAI, ef);
}
public final PlayerAction getAction(int player, GameState gs) throws Exception
{
if (gs.canExecuteAnyAction(player)) {
startNewComputation(player,gs.clone());
computeDuringOneGameFrame();
return getBestActionSoFar();
} else {
return new PlayerAction();
}
}
public void startNewComputation(int a_player, GameState gs) throws Exception {
if (DEBUG>=2) System.out.println("Starting a new search...");
if (DEBUG>=2) System.out.println(gs);
playerForThisComputation = a_player;
gs_to_start_from = gs;
moveGenerator = new PlayerActionGenerator(gs,playerForThisComputation);
moveGenerator.randomizeOrder();
allMovesGenerated = false;
actions = null;
run = 0;
}
public void resetSearch() {
if (DEBUG>=2) System.out.println("Resetting search...");
gs_to_start_from = null;
moveGenerator = null;
actions = null;
run = 0;
}
public void computeDuringOneGameFrame() throws Exception {
if (DEBUG>=2) System.out.println("Search...");
long start = System.currentTimeMillis();
int nruns = 0;
long cutOffTime = (TIME_BUDGET>0 ? System.currentTimeMillis() + TIME_BUDGET:0);
if (TIME_BUDGET<=0) cutOffTime = 0;
if (actions==null) {
actions = new ArrayList<>();
if (MAXACTIONS>0 && moveGenerator.getSize()>2*MAXACTIONS) {
for(int i = 0;i<MAXACTIONS;i++) {
MonteCarlo.PlayerActionTableEntry pate = new MonteCarlo.PlayerActionTableEntry();
pate.pa = moveGenerator.getRandom();
actions.add(pate);
}
max_actions_so_far = Math.max(moveGenerator.getSize(),max_actions_so_far);
if (DEBUG>=1) System.out.println("MontCarloAI (random action sampling) for player " + playerForThisComputation + " chooses between " + moveGenerator.getSize() + " actions [maximum so far " + max_actions_so_far + "] (cycle " + gs_to_start_from.getTime() + ")");
} else {
PlayerAction pa;
long count = 0;
do{
pa = moveGenerator.getNextAction(cutOffTime);
if (pa!=null) {
MonteCarlo.PlayerActionTableEntry pate = new MonteCarlo.PlayerActionTableEntry();
pate.pa = pa;
actions.add(pate);
count++;
if (MAXACTIONS>0 && count>=2*MAXACTIONS) break; // this is needed since some times, moveGenerator.size() overflows
}
}while(pa!=null);
max_actions_so_far = Math.max(actions.size(),max_actions_so_far);
if (DEBUG>=1) System.out.println("MontCarloAI (complete generation plus random reduction) for player " + playerForThisComputation + " chooses between " + actions.size() + " actions [maximum so far " + max_actions_so_far + "] (cycle " + gs_to_start_from.getTime() + ")");
while(MAXACTIONS>0 && actions.size()>MAXACTIONS) actions.remove(r.nextInt(actions.size()));
}
}
while(true) {
if (TIME_BUDGET>0 && (System.currentTimeMillis() - start)>=TIME_BUDGET) break;
if (ITERATIONS_BUDGET>0 && nruns>=ITERATIONS_BUDGET) break;
monteCarloRun(playerForThisComputation, gs_to_start_from);
nruns++;
}
total_cycles_executed++;
}
public void monteCarloRun(int player, GameState gs) throws Exception {
int idx = run%actions.size();
// System.out.println(idx);
PlayerActionTableEntry pate = actions.get(idx);
GameState gs2 = gs.cloneIssue(pate.pa);
GameState gs3 = gs2.clone();
simulate(gs3,gs3.getTime() + MAXSIMULATIONTIME);
int time = gs3.getTime() - gs2.getTime();
pate.accum_evaluation += ef.evaluate(player, 1-player, gs3)*Math.pow(0.99,time/10.0);
pate.visit_count++;
run++;
total_runs++;
}
public PlayerAction getBestActionSoFar() {
// find the best:
PlayerActionTableEntry best = null;
for(PlayerActionTableEntry pate:actions) {
if (best==null || (pate.accum_evaluation/pate.visit_count)>(best.accum_evaluation/best.visit_count)) {
best = pate;
}
}
if (best==null) {
MonteCarlo.PlayerActionTableEntry pate = new MonteCarlo.PlayerActionTableEntry();
pate.pa = moveGenerator.getRandom();
System.err.println("MonteCarlo.getBestActionSoFar: best action was null!!! action.size() = " + actions.size());
}
if (DEBUG>=1) {
System.out.println("Executed " + run + " runs");
System.out.println("Selected action: " + best + " visited " + best.visit_count + " with average evaluation " + (best.accum_evaluation/best.visit_count));
}
total_actions_issued++;
return best.pa;
}
public void simulate(GameState gs, int time) throws Exception {
boolean gameover = false;
do{
if (gs.isComplete()) {
gameover = gs.cycle();
} else {
gs.issue(randomAI.getAction(0, gs));
gs.issue(randomAI.getAction(1, gs));
}
}while(!gameover && gs.getTime()<time);
}
public String toString() {
return getClass().getSimpleName() + "(" + TIME_BUDGET + "," + ITERATIONS_BUDGET + "," + MAXSIMULATIONTIME + "," + MAXACTIONS + ", " + randomAI + ", " + ef + ")";
}
@Override
public List<ParameterSpecification> getParameters()
{
List<ParameterSpecification> parameters = new ArrayList<>();
parameters.add(new ParameterSpecification("TimeBudget",int.class,100));
parameters.add(new ParameterSpecification("IterationsBudget",int.class,-1));
parameters.add(new ParameterSpecification("PlayoutLookahead",int.class,100));
parameters.add(new ParameterSpecification("MaxActions",long.class,100));
parameters.add(new ParameterSpecification("playoutAI",AI.class, randomAI));
parameters.add(new ParameterSpecification("EvaluationFunction", EvaluationFunction.class, new SimpleSqrtEvaluationFunction3()));
return parameters;
}
public int getPlayoutLookahead() {
return MAXSIMULATIONTIME;
}
public void setPlayoutLookahead(int a_pola) {
MAXSIMULATIONTIME = a_pola;
}
public long getMaxActions() {
return MAXACTIONS;
}
public void setMaxActions(long a_ma) {
MAXACTIONS = a_ma;
}
public AI getplayoutAI() {
return randomAI;
}
public void setplayoutAI(AI a_dp) {
randomAI = a_dp;
}
public EvaluationFunction getEvaluationFunction() {
return ef;
}
public void setEvaluationFunction(EvaluationFunction a_ef) {
ef = a_ef;
}
}
| 10,143 | 33.040268 | 286 | java |
MicroRTS | MicroRTS-master/src/ai/montecarlo/lsi/LSI.java | /*
* This class was contributed by: Antonin Komenda, Alexander Shleyfman and Carmel Domshlak
*/
package ai.montecarlo.lsi;
import ai.RandomBiasedAI;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map.Entry;
import java.util.Random;
import java.util.Set;
import rts.GameState;
import rts.PhysicalGameState;
import rts.PlayerAction;
import rts.PlayerActionGenerator;
import rts.ResourceUsage;
import rts.UnitAction;
import rts.UnitActionAssignment;
import rts.units.Unit;
import util.Pair;
import ai.core.AI;
import ai.core.AIWithComputationBudget;
import ai.core.ParameterSpecification;
import ai.montecarlo.lsi.Sampling.AgentOrderingType;
import ai.montecarlo.lsi.Sampling.UnitActionTableEntry;
import ai.evaluation.EvaluationFunction;
import ai.evaluation.SimpleSqrtEvaluationFunction3;
import rts.units.UnitTypeTable;
public class LSI extends AIWithComputationBudget {
public static final int DEBUG = 0;
private static final double NORMALIZATION_EPSILON = 0.01;
private Random rnd = new Random();
private int lookAhead;
private double split;
private EstimateType estimateType;
private EstimateReuseType estimateReuseType;
private GenerateType generateType;
private AgentOrderingType agentOrderingType;
private EvaluateType evaluateType;
private boolean eliteReuse;
private RelaxationType relaxationType;
private int relaxationLimit;
private boolean epochal;
private AI simulationAi;
private EvaluationFunction evalFunction;
private int nofPlays = 0;
private int nofNoops = 0;
private int nofSamples = 0;
private int nofPlayedUnits = 0;
private int nofActions = 0;
private Sampling sampling;
private LinkedHashMap<PlayerAction, Pair<Double, Integer>> elitePlayerActions = new LinkedHashMap<>();
private Set<Unit> nextEpochUnits = new HashSet<>();
private Set<Unit> epochUnits;
private int actionCount;
public LSI(UnitTypeTable utt) {
this(100, 100, 0.25,
LSI.EstimateType.RANDOM_TAIL, LSI.EstimateReuseType.ALL,
LSI.GenerateType.PER_AGENT, Sampling.AgentOrderingType.ENTROPY,
LSI.EvaluateType.HALVING, false,
LSI.RelaxationType.NONE, 2,
false,
new RandomBiasedAI(),
new SimpleSqrtEvaluationFunction3());
}
public LSI(int availableSimulationCount, int lookAhead, double split,
EstimateType estimateType, EstimateReuseType estimateReuseType, GenerateType generateType,
AgentOrderingType agentOrderingType, EvaluateType evaluateType, boolean eliteReuse,
RelaxationType relaxationType, int relaxationLimit, boolean epochal,
AI simulationAi, EvaluationFunction evalFunction) {
super(-1,availableSimulationCount);
this.lookAhead = lookAhead;
this.split = split;
this.estimateType = estimateType;
this.estimateReuseType = estimateReuseType;
this.generateType = generateType;
this.agentOrderingType = agentOrderingType;
this.evaluateType = evaluateType;
this.relaxationType = relaxationType;
this.relaxationLimit = relaxationLimit;
this.eliteReuse = eliteReuse;
this.epochal = epochal;
this.simulationAi = simulationAi;
this.evalFunction = evalFunction;
this.sampling = new Sampling(agentOrderingType, lookAhead, simulationAi, evalFunction);
}
public void reset() {
}
public AI clone() {
return new LSI(ITERATIONS_BUDGET, lookAhead, split,
estimateType, estimateReuseType, generateType, agentOrderingType, evaluateType,
eliteReuse, relaxationType, relaxationLimit, epochal, simulationAi, evalFunction);
}
public PlayerAction getAction(int player, GameState gameState) throws Exception {
if (!gameState.canExecuteAnyAction(player) || gameState.winner() != -1) {
return new PlayerAction();
}
// initialize
sampling.resetSimulationCount();
elitePlayerActions.clear();
List<UnitActionTableEntry> unitActionTable = prepareUnitActionTable(gameState, player);
Set<Unit> units = new HashSet<>();
for (UnitActionTableEntry unitActionTableEntry : unitActionTable) {
units.add(unitActionTableEntry.u);
}
// epochal unit subselection
if (epochal) {
// init epochs
if (epochUnits == null) {
epochUnits = new HashSet<>(units);
}
// add units with finished actions for next (or current if empty) epoch
for (UnitActionTableEntry entry : unitActionTable) {
if (!epochUnits.contains(entry.u) && !nextEpochUnits.contains(entry.u)) {
if (epochUnits.isEmpty()) {
epochUnits.add(entry.u);
} else {
nextEpochUnits.add(entry.u);
}
}
}
// filter units not allowed for this epoch
for (Iterator<UnitActionTableEntry> iterator = unitActionTable.iterator(); iterator.hasNext(); ) {
UnitActionTableEntry entry = iterator.next();
if (!epochUnits.contains(entry.u)) {
iterator.remove();
}
}
}
// pre-relaxation (e.g., --LSI)
if (relaxationType == ai.montecarlo.lsi.LSI.RelaxationType.PRE_RANDOM) {
java.util.List<Integer> indices = getRelaxedAgentIndicesRandom(unitActionTable);
for (int index : indices) {
unitActionTable.remove(index);
}
}
PlayerAction playerAction = new PlayerAction();
if (!unitActionTable.isEmpty()) {
// generate joint actions for later evaluation (+ probability distributions on single actions)
List<double[]> distributions = null;
Set<PlayerAction> actionSet = null;
if (estimateType.equals(EstimateType.ALL_COMBINATIONS)) {
actionSet = sampling.generatePlayerActionAll(unitActionTable, player, gameState, true);
// to be sure increase the sampling counter to be compatible with the LSI versions
sampling.increaseSimulationCount(ITERATIONS_BUDGET * split);
} else {
switch (estimateType) {
case RANDOM:
distributions = stageGenerateRandom(player, gameState, unitActionTable);
// to be sure increase the sampling counter to be compatible with the LSI versions
sampling.increaseSimulationCount(ITERATIONS_BUDGET * split);
break;
case NOOP_TAIL:
distributions = stageGenerateNoopTail(player, gameState, unitActionTable);
break;
case RANDOM_TAIL:
distributions = stageGenerateRandomTail(player, gameState, unitActionTable);
break;
case RANDOM_TAIL_ELITE:
distributions = stageGenerateRandomTailElite(player, gameState, unitActionTable);
break;
default:
throw new RuntimeException("Unknown EstimateType");
}
switch (relaxationType) {
case MAX:
case MEAN:
case MEDIAN:
case MAX_ENT:
case MIN_ENT:
actionSet = stageChoosePlayerActionsAllRelaxation(distributions, player, gameState, unitActionTable);
break;
default:
actionSet = stageChoosePlayerActionByDist(distributions, player, gameState, unitActionTable);
break;
}
}
// evaluate joint actions and pick one
switch (evaluateType) {
case HALVING:
//playerAction = stageEvaluateHalving(actionSet, player, gameState);
playerAction = stageEvaluateHalvingFill(actionSet, player, gameState);
break;
case HALVING_ELITE:
playerAction = stageEvaluateEliteHalving(actionSet, player, gameState);
break;
case BEST:
playerAction = stageEvaluateBest(actionSet, player, gameState);
break;
}
// post-relaxation (e.g., for LSI--)
switch (relaxationType) {
case POST_RANDOM:
List<Integer> indices = getRelaxedAgentIndicesRandom(unitActionTable);
for (Integer index : indices) {
playerAction.getActions().remove((int) index);
}
break;
case POST_ENTROPY_MAX:
case POST_ENTROPY_MIN:
case POST_MAX_DIFF:
case POST_MAX_TIME_NORMALIZE:
if (unitActionTable.size() - relaxationLimit >= 1) {
int noToRemove = unitActionTable.size() - relaxationLimit;
// get evaluations per units
List<Pair<Integer, Double>> evaluatedIndices = new ArrayList<>();
for (int i = 0; i < unitActionTable.size(); i++) {
double evaluator = 0;
switch (relaxationType) {
case POST_MAX_DIFF:
evaluator = sampling.difference(unitActionTable, distributions, playerAction, i);
break;
case POST_MAX_TIME_NORMALIZE:
UnitActionTableEntry entry = unitActionTable.get(i);
evaluator = Double.NEGATIVE_INFINITY;
for (int j = 0; j < entry.nactions; j++) {
double duration = entry.actions.get(j).ETA(entry.u);
double eval = distributions.get(entry.idx)[j] / duration;
if (eval > evaluator) {
evaluator = eval;
}
}
break;
case POST_ENTROPY_MAX:
case POST_ENTROPY_MIN:
evaluator = sampling.entropy(distributions.get(i));
break;
default:
throw new RuntimeException("Unknown relaxationType!");
}
evaluatedIndices.add(new Pair<>(i, evaluator));
}
// sort the units by their evaluations
switch (relaxationType) {
case POST_ENTROPY_MAX:
case POST_MAX_DIFF:
case POST_MAX_TIME_NORMALIZE:
evaluatedIndices.sort(new Comparator<Pair<Integer, Double>>() {
@Override
public int compare(Pair<Integer, Double> p1, Pair<Integer, Double> p2) {
return p1.m_b.compareTo(p2.m_b);
}
});
break;
case POST_ENTROPY_MIN:
evaluatedIndices.sort(new Comparator<Pair<Integer, Double>>() {
@Override
public int compare(Pair<Integer, Double> p1, Pair<Integer, Double> p2) {
return p2.m_b.compareTo(p1.m_b);
}
});
break;
default:
throw new RuntimeException("Unknown relaxationType!");
}
// remove the single-actions of the weakest
evaluatedIndices = evaluatedIndices.subList(0, noToRemove);
evaluatedIndices.sort(new Comparator<Pair<Integer, Double>>() {
@Override
public int compare(Pair<Integer, Double> p1, Pair<Integer, Double> p2) {
return p2.m_a.compareTo(p1.m_a);
}
});
for (Pair<Integer, Double> index : evaluatedIndices) {
playerAction.getActions().remove((int) index.m_a);
}
}
break;
}
}
// epochal unit subselections
if (epochal) {
// remove used units from current epoch
for (Pair<Unit, UnitAction> actionPair : playerAction.getActions()) {
epochUnits.remove(actionPair.m_a);
}
// if there are no more units in this epoch use the next one
if (epochUnits.isEmpty()) {
epochUnits = new HashSet<>(nextEpochUnits);
nextEpochUnits.clear();
}
}
// update stat counters
if (DEBUG>=1) System.out.println("GEMC: " + sampling.getSimulationCount());
incrementActionCounter(playerAction, unitActionTable);
// resulting joint-action
return playerAction;
}
private List<double[]> stageGenerateNoopTail(int player, GameState gameState, List<UnitActionTableEntry> unitActionTable)
throws Exception {
PlayerAction currentPA = new PlayerAction();
currentPA.fillWithNones(gameState, player, 10);
// count valid noop-neighbours
int reducedActionCount = actionCount;
int i = 0;
for (UnitActionTableEntry entry : unitActionTable) {
for (UnitAction action : entry.actions) {
PlayerAction neighbourPA = currentPA.clone();
neighbourPA.getActions().set(i, new Pair<>(entry.u, action));
if (!isPlayerActionValid(gameState, neighbourPA)) {
reducedActionCount--;
}
}
i++;
}
// init --> sample (noop, ..., noop)'s neighbours
List<double[]> distributions = new ArrayList<>();
i = 0;
for (UnitActionTableEntry entry : unitActionTable) {
double[] distribution = new double[entry.nactions];
int idx = 0;
double min = Double.POSITIVE_INFINITY;
for (UnitAction action : entry.actions) {
PlayerAction neighbourPA = currentPA.clone();
neighbourPA.getActions().set(i, new Pair<>(entry.u, action));
if (isPlayerActionValid(gameState, neighbourPA)) {
double eval = sampling.evaluatePlayerAction(player, gameState, neighbourPA,
(int) (ITERATIONS_BUDGET * split / reducedActionCount));
distribution[idx] = eval;
if (eval < min) min = eval;
} else {
// marking invalid unit action
distribution[idx] = Double.NEGATIVE_INFINITY;
}
idx++;
}
for (int j = 0; j < distribution.length; j++) {
if (Double.isInfinite(distribution[j])) {
// this was marked as an invalid unit action, do not
// generate it
distribution[j] = 0;
} if (distribution[j] == Double.MIN_VALUE) {
// this action was not sampled at all (forbidden one)
// keep it with the lowest probability
distribution[j] = NORMALIZATION_EPSILON;
} else {
distribution[j] -= min - NORMALIZATION_EPSILON;
}
if (distribution[j] < 0 || Double.isNaN(distribution[j])) {
System.err.println("Negative/NaN distribution!");
}
}
distributions.add(distribution);
i++;
}
return distributions;
}
private List<double[]> stageGenerateRandomTail(int player, GameState gameState, List<UnitActionTableEntry> unitActionTable)
throws Exception {
List<double[]> distributions = new ArrayList<>();
List<double[]> actionDist = new ArrayList<>();
for (UnitActionTableEntry entry : unitActionTable) {
double[] armsDist = new double[entry.nactions];
actionDist.add(armsDist);
for (int i = 0; i < armsDist.length; i++) {
armsDist[i] = 0;
}
}
int sample = 0;
boolean completeOnce = false;
// round-robin
roundrobin:
while (true) {
// over all agents
int agentIndex = 0;
for (UnitActionTableEntry entry : unitActionTable) {
// over all actions of the agent
int actionIndex = 0;
for (UnitAction action : entry.actions) {
PlayerAction neighbourPA = new PlayerAction();
// TODO: simplify down here
// generate random order of the agents with the current one as first
List<Integer> agentOrder = new LinkedList<>();
for (int i = 0; i < unitActionTable.size(); i++) {
if (i != agentIndex) {
agentOrder.add(i);
}
}
Collections.shuffle(agentOrder);
agentOrder.add(0, agentIndex);
// generate valid random action with uniform distribution (0s are fine, because of .wighted implementation)
actionDist.get(agentIndex)[actionIndex] = 1;
neighbourPA = sampling.generatePlayerActionGivenDist(unitActionTable, player, gameState, actionDist, null);
actionDist.get(agentIndex)[actionIndex] = 0;
// reorder the actions in neighbourPA to be the same as in unitActionTable
PlayerAction orderedNeighbourPA = new PlayerAction();
for (UnitActionTableEntry agentTableEntry : unitActionTable) {
for (Pair<Unit, UnitAction> neighbourPair : neighbourPA.getActions()) {
if (neighbourPair.m_a.equals(agentTableEntry.u)) {
orderedNeighbourPA.addUnitAction(neighbourPair.m_a, neighbourPair.m_b);
}
}
}
neighbourPA = orderedNeighbourPA;
// checks
if (!isPlayerActionValid(gameState, neighbourPA)) {
throw new RuntimeException("Should generate only valid combinations!");
}
// evaluate & store
double eval = sampling.evaluatePlayerAction(player, gameState, neighbourPA, 1);
switch (estimateReuseType) {
case SINGLE:
// depends on actions in playerAction in the same order as in unitActionTable
updateActionEvalSingle(unitActionTable, neighbourPA, agentIndex, eval);
break;
case ALL:
// depends on actions in playerAction in the same order as in unitActionTable
updateActionEvalAll(unitActionTable, neighbourPA, agentIndex, eval);
break;
default:
throw new RuntimeException("Unknown EstimateReusingType");
}
sample++;
if (sample >= ITERATIONS_BUDGET * split) {
break roundrobin;
}
actionIndex++;
}
agentIndex++;
}
completeOnce = true;
}
//System.out.println(" G: " + sample);
if (!completeOnce) {
System.err.println("Generate did not complete even one round! " + sample + " >= (" + ITERATIONS_BUDGET + " * " + split + ")");
}
for (UnitActionTableEntry entry : unitActionTable) {
double min = Double.POSITIVE_INFINITY;
for (double accumEval : entry.accum_evaluation) {
if (accumEval < min) min = accumEval;
}
for (int j = 0; j < entry.accum_evaluation.length; j++) {
if (entry.accum_evaluation[j] == Double.MIN_VALUE) {
// this action was not sampled at all (forbidden one)
// keep it with the lowest probability
if (eliteReuse) {
entry.accum_evaluation[j] = NORMALIZATION_EPSILON;
} else {
entry.accum_evaluation[j] -= min - NORMALIZATION_EPSILON;
}
} else {
entry.accum_evaluation[j] -= min - NORMALIZATION_EPSILON;
}
if (entry.accum_evaluation[j] < 0 || Double.isNaN(entry.accum_evaluation[j])) {
System.err.println("Negative/NaN distribution!");
}
}
distributions.add(entry.accum_evaluation);
}
return distributions;
}
private List<double[]> stageGenerateRandom(int player, GameState gameState, List<UnitActionTableEntry> unitActionTable)
throws Exception {
List<double[]> distributions = new ArrayList<>();
for (UnitActionTableEntry entry : unitActionTable) {
distributions.add(new double[entry.nactions]);
}
return distributions;
}
private List<double[]> stageGenerateRandomTailElite(int player, GameState gameState, List<UnitActionTableEntry> unitActionTable)
throws Exception {
List<double[]> distributions = new ArrayList<>();
int sample = 0;
// round-robin
while (sample < ITERATIONS_BUDGET * split) {
// over all agents
int agentIndex = 0;
for (UnitActionTableEntry entry : unitActionTable) {
// over all actions of the agent
for (UnitAction action : entry.actions) {
PlayerAction neighbourPA = new PlayerAction();
for (UnitActionTableEntry rndEntry : unitActionTable) {
neighbourPA.addUnitAction(rndEntry.u, rndEntry.actions.get(rnd.nextInt(rndEntry.nactions)));
}
neighbourPA.getActions().set(agentIndex, new Pair<>(entry.u, action));
if (isPlayerActionValid(gameState, neighbourPA)) {
sample++;
double eval = sampling.evaluatePlayerAction(player, gameState, neighbourPA, 1);
if (eliteReuse) {
// store elite candidates
if (elitePlayerActions.containsKey(neighbourPA)) {
Pair<Double, Integer> evalPair = elitePlayerActions.get(neighbourPA);
double newEval = (evalPair.m_a * evalPair.m_b + eval) / (evalPair.m_b + 1);
elitePlayerActions.put(neighbourPA, new Pair<>(newEval, evalPair.m_b + 1));
} else {
elitePlayerActions.put(neighbourPA, new Pair<>(eval, 1));
}
}
switch (estimateReuseType) {
case SINGLE:
updateActionEvalSingle(unitActionTable, neighbourPA, agentIndex, eval);
break;
case ALL:
updateActionEvalAll(unitActionTable, neighbourPA, agentIndex, eval);
break;
default:
throw new RuntimeException("Unknown EstimateReusingType");
}
}
}
agentIndex++;
}
}
for (UnitActionTableEntry entry : unitActionTable) {
double min = Double.POSITIVE_INFINITY;
for (double accumEval : entry.accum_evaluation) {
if (accumEval < min) min = accumEval;
}
for (int j = 0; j < entry.accum_evaluation.length; j++) {
entry.accum_evaluation[j] -= min - NORMALIZATION_EPSILON;
if (entry.accum_evaluation[j] < 0 || Double.isNaN(entry.accum_evaluation[j])) {
System.err.println("Negative/NaN distribution!");
}
}
distributions.add(entry.accum_evaluation);
}
return distributions;
}
private void updateActionEvalSingle(List<UnitActionTableEntry> unitActionTable, PlayerAction playerAction, int agentIndex, double eval) {
int actionIndex = 0;
UnitActionTableEntry agentEntry = unitActionTable.get(agentIndex);
for (UnitAction unitAction : agentEntry.actions) {
if (unitAction.equals(playerAction.getActions().get(agentIndex).m_b)) {
agentEntry.accum_evaluation[actionIndex] =
(agentEntry.accum_evaluation[actionIndex] * agentEntry.visit_count[actionIndex] + eval)
/ (agentEntry.visit_count[actionIndex] + 1);
agentEntry.visit_count[actionIndex]++;
}
actionIndex++;
}
}
private void updateActionEvalAll(List<UnitActionTableEntry> unitActionTable, PlayerAction playerAction, int agentIndex, double eval) {
// we are searching for all agents here
agentIndex = 0;
for (UnitActionTableEntry agentEntry : unitActionTable) {
int actionIndex = 0;
for (UnitAction unitAction : agentEntry.actions) {
if (unitAction.equals(playerAction.getActions().get(agentIndex).m_b)) {
agentEntry.accum_evaluation[actionIndex] =
(agentEntry.accum_evaluation[actionIndex] * agentEntry.visit_count[actionIndex] + eval)
/ (agentEntry.visit_count[actionIndex] + 1);
agentEntry.visit_count[actionIndex]++;
}
actionIndex++;
}
agentIndex++;
}
}
private Set<PlayerAction> stageChoosePlayerActionsAllRelaxation(List<double[]> distributions, int player, GameState gameState,
List<UnitActionTableEntry> unitActionTable) throws Exception {
if (relaxationLimit > 0 && unitActionTable.size() - relaxationLimit >= 1) {
List<Pair<Integer, Double>> choseActList = new LinkedList<>();
for(int j = 0; j < distributions.size(); j++) {
double [] distribution = distributions.get(j);
double value;
switch (relaxationType) {
case MAX:
Arrays.sort(distribution);
value = distribution[distribution.length - 1];
break;
case MEAN:
double sum = 0;
for (double val : distribution) {
sum += val;
}
value = sum / distribution.length;
break;
case MEDIAN:
Arrays.sort(distribution);
if (distribution.length % 2 == 0) {
value = (distribution[distribution.length / 2] + distribution[distribution.length / 2 -1]) / 2.0;
} else {
value = distribution[(int) Math.floor(distribution.length / 2)];
}
break;
case MAX_ENT:
value = sampling.entropy(distribution);
break;
case MIN_ENT:
value = 1 / sampling.entropy(distribution);
//the distributions are normalized with a NORMALIZATION_EPSILON offset => entropy != 0
break;
default:
throw new RuntimeException("Unknown RelaxationType!");
}
choseActList.add(new Pair<>(j, value));
}
choseActList.sort(new Comparator<Pair<Integer, Double>>() {
@Override
public int compare(Pair<Integer, Double> p1, Pair<Integer, Double> p2) {
double eval1 = p1.m_b;
double eval2 = p2.m_b;
return Double.compare(eval1, eval2);
}
});
choseActList = choseActList.subList(0, choseActList.size() - relaxationLimit);
choseActList.sort(new Comparator<Pair<Integer, Double>>() {
@Override
public int compare(Pair<Integer, Double> p1, Pair<Integer, Double> p2) {
double eval1 = p1.m_a;
double eval2 = p2.m_a;
return Double.compare(eval2, eval1);
}
});
for (Pair<Integer, Double> pair : choseActList) {
unitActionTable.remove((int) pair.m_a);
}
}
return sampling.generatePlayerActionAll(unitActionTable, player, gameState, false);
}
private Set<PlayerAction> stageChoosePlayerActionByDist(List<double[]> distributions, int player, GameState gameState,
List<UnitActionTableEntry> unitActionTable) throws Exception {
int budget = (int) (ITERATIONS_BUDGET * (1 - split));
int actionCount = 1;
do {
actionCount++;
} while ((int) (budget / actionCount / Math.ceil(Sampling.log(actionCount, 2))) != 1);
// TODO: should be map
Set<PlayerAction> actionSet = new HashSet<>();
for (int r = 0; r < actionCount; r++) {
PlayerAction playerAction;
switch (generateType) {
case PER_AGENT:
playerAction = sampling.generatePlayerActionGivenDist(unitActionTable, player, gameState, distributions, null);
break;
case ONE_DIST:
playerAction = sampling.generatePlayerActionOneDist(unitActionTable, player, gameState, distributions);
break;
default:
throw new RuntimeException("Unkonwn GenerateType");
}
if (!actionSet.contains(playerAction)) {
actionSet.add(playerAction);
}
}
return actionSet;
}
private PlayerAction stageEvaluateHalving(Set<PlayerAction> actionSet, int player, GameState gameState) throws Exception {
int budget = (int) (ITERATIONS_BUDGET * (1 - split));
List<Pair<PlayerAction, Double>> actionList = new LinkedList<>();
for (PlayerAction playerAction : actionSet) {
actionList.add(new Pair<>(playerAction, 0.0));
}
actionCount = actionList.size();
double log2ceil = Math.ceil(Sampling.log(actionCount, 2));
int rSup = log2int(actionCount);
int residueActionCount = actionCount;
int residueSampleCount = 0;
for (int r = 0; r < rSup; r++) {
int sampleCount = (int) (budget / residueActionCount / log2ceil);
residueSampleCount += sampleCount * residueActionCount;
residueActionCount /= 2;
}
int residue = budget - residueSampleCount;
int sampleCountSum = 0;
for (int r = 0; r < rSup - 1; r++) {
int sampleCount = (int) (budget / actionList.size() / log2ceil);
sampleCount += residue / actionList.size();
residue -= residue / actionList.size() * actionList.size();
actionList = sampling.halvedOriginalSampling(actionList, gameState, player, sampleCount, sampleCountSum);
sampleCountSum += sampleCount;
}
actionList = sampling.halvedOriginalSampling(actionList, gameState, player,
(budget - sampling.getSimulationCount()) / actionList.size(), sampleCountSum);
if (DEBUG>=1) System.out.println("GEMC H " + ITERATIONS_BUDGET + " " + actionList.get(0).m_b + " " + sampleCountSum);
return actionList.get(0).m_a;
}
private PlayerAction stageEvaluateHalvingFill(Set<PlayerAction> actionSet, int player, GameState gameState) throws Exception {
int budget = (int) (ITERATIONS_BUDGET * (1 - split));
List<Pair<PlayerAction, Double>> actionList = new LinkedList<>();
for (PlayerAction playerAction : actionSet) {
actionList.add(new Pair<>(playerAction, 0.0));
}
actionCount = actionList.size();
int noOfLayers = log2int(actionCount);
int residueActionCount = actionCount;
int residueSampleCount = 0;
for (int r = 0; r < noOfLayers; r++) {
int sampleCount = (int) (budget / residueActionCount / noOfLayers);
residueSampleCount += sampleCount * residueActionCount;
residueActionCount /= 2;
}
int residue = budget - residueSampleCount;
int sampleCountSum = 0;
for (int r = 0; r < noOfLayers; r++) {
int sampleCount = (int) (budget / actionList.size() / noOfLayers);
sampleCount += residue / actionList.size();
residue -= residue / actionList.size() * actionList.size();
actionList = sampling.halvedOriginalSamplingFill(actionList, gameState, player, sampleCount, sampleCountSum);
sampleCountSum += sampleCount;
}
if (DEBUG>=1) System.out.println("GEMC H " + ITERATIONS_BUDGET + " " + actionList.get(0).m_b + " " + sampleCountSum);
return actionList.get(0).m_a;
}
private PlayerAction stageEvaluateEliteHalving(Set<PlayerAction> actionSet, int player, GameState gameState) throws Exception {
// generate combinations
int budget = (int) (ITERATIONS_BUDGET * (1 - split));
List<Pair<PlayerAction, Pair<Double, Integer>>> actionList = new LinkedList<>();
for (PlayerAction playerAction : actionSet) {
actionList.add(new Pair<>(playerAction, new Pair<>(0.0, 0)));
}
if (eliteReuse) {
// include elite combinations from estimation phase
List<Entry<PlayerAction, Pair<Double, Integer>>> eliteEntries =
new ArrayList<>(elitePlayerActions.entrySet());
eliteEntries.sort(new Comparator<Entry<PlayerAction, Pair<Double, Integer>>>() {
@Override
public int compare(Entry<PlayerAction, Pair<Double, Integer>> e1,
Entry<PlayerAction, Pair<Double, Integer>> e2) {
double eval1 = e1.getValue().m_a / e1.getValue().m_b;
double eval2 = e2.getValue().m_a / e2.getValue().m_b;
return Double.compare(eval2, eval1);
}
});
while(!eliteEntries.isEmpty()) {
Entry<PlayerAction, Pair<Double, Integer>> eliteEntry = eliteEntries.remove(0);
if (actionSet.contains(eliteEntry.getKey())) {
for (Iterator<Pair<PlayerAction, Pair<Double, Integer>>> iterator = actionList.iterator(); iterator.hasNext();) {
Pair<PlayerAction, Pair<Double, Integer>> searchEntry = iterator.next();
if (searchEntry.m_a.equals(eliteEntry.getKey())) {
iterator.remove();
break;
}
}
} else {
actionSet.add(eliteEntry.getKey());
}
actionList.add(new Pair<>(eliteEntry.getKey(), eliteEntry.getValue()));
if (actionList.size() >= actionCount) {
break;
}
}
}
// halving
actionCount = actionList.size();
double log2ceil = Math.ceil(Sampling.log(actionCount, 2));
int rSup = log2int(actionCount);
int residueActionCount = actionCount;
int residueSampleCount = 0;
for (int r = 0; r < rSup; r++) {
int sampleCount = (int) (budget / residueActionCount / log2ceil);
residueSampleCount += sampleCount * residueActionCount;
residueActionCount /= 2;
}
int residue = budget - residueSampleCount;
for (int r = 0; r < rSup - 1; r++) {
int sampleCount = (int) (budget / actionList.size() / log2ceil);
sampleCount += residue / actionList.size();
residue -= residue / actionList.size() * actionList.size();
actionList = sampling.halvedSampling(actionList, gameState, player, sampleCount);
}
actionList = sampling.halvedSampling(actionList, gameState, player,
(ITERATIONS_BUDGET - sampling.getSimulationCount()) / actionList.size());
if (DEBUG>=1) System.out.println("GEMC H " + ITERATIONS_BUDGET + " " + actionList.get(0).m_b);
return actionList.get(0).m_a;
}
private PlayerAction stageEvaluateBest(Set<PlayerAction> actionSet, int player, GameState gameState) throws Exception {
int budget = (int) (ITERATIONS_BUDGET * (1 - split));
List<Pair<PlayerAction, Pair<Double, Integer>>> actionList = new LinkedList<>();
for (PlayerAction playerAction : actionSet) {
actionList.add(new Pair<>(playerAction, new Pair<>(0.0, 0)));
}
actionCount = actionList.size();
for (Pair<PlayerAction, Pair<Double, Integer>> pair : actionList) {
double eval = sampling.evaluatePlayerAction(player, gameState, pair.m_a, 1);
pair.m_b.m_a += eval;
pair.m_b.m_b++;
}
for (int i = 0; i <= budget - actionCount; i++) {
actionList.sort(new Comparator<Pair<PlayerAction, Pair<Double, Integer>>>() {
@Override
public int compare(Pair<PlayerAction, Pair<Double, Integer>> p1, Pair<PlayerAction, Pair<Double, Integer>> p2) {
double eval1 = p1.m_b.m_a / p1.m_b.m_b;
double eval2 = p2.m_b.m_a / p2.m_b.m_b;
return Double.compare(eval2, eval1);
}
});
double eval = sampling.evaluatePlayerAction(player, gameState, actionList.get(0).m_a, 1);
actionList.get(0).m_b.m_a += eval;
actionList.get(0).m_b.m_b++;
}
if (DEBUG>=1) System.out.println("GEMC B " + ITERATIONS_BUDGET + " " + actionList.get(0).m_b);
return actionList.get(0).m_a;
}
private void incrementActionCounter(PlayerAction playerAction, List<UnitActionTableEntry> unitActionTableEntry) {
if (DEBUG>=1) System.out.println("Selected action has " + playerAction.hasNamNoneActions() + " Noops.");
nofPlays++;
nofNoops += playerAction.hasNamNoneActions();
nofSamples += sampling.getSimulationCount();
nofPlayedUnits += playerAction.getActions().size();
for (UnitActionTableEntry actionTableEntry : unitActionTableEntry) {
nofActions += actionTableEntry.nactions;
}
}
private int log2int(int n) {
if (n <= 0)
throw new IllegalArgumentException();
return 31 - Integer.numberOfLeadingZeros(n);
}
private boolean isPlayerActionValid(GameState gs, PlayerAction playerAction) {
ResourceUsage stateResourceUsage = new ResourceUsage();
PhysicalGameState pgs = gs.getPhysicalGameState();
for (Unit u : pgs.getUnits()) {
UnitActionAssignment uaa = gs.getUnitActions().get(u);
if (uaa != null) {
ResourceUsage ru = uaa.action.resourceUsage(u, pgs);
stateResourceUsage.merge(ru);
}
}
ResourceUsage actionResourceUsage = new ResourceUsage();
for (Pair<Unit, UnitAction> element : playerAction.getActions()) {
ResourceUsage resourceUsage = element.m_b.resourceUsage(element.m_a, pgs);
actionResourceUsage.merge(resourceUsage);
}
playerAction.setResourceUsage(actionResourceUsage);
return playerAction.consistentWith(stateResourceUsage, gs);
}
private List<UnitActionTableEntry> prepareUnitActionTable(GameState gameState, int player) throws Exception {
List<UnitActionTableEntry> unitActionTable = new ArrayList<>();
actionCount = 0;
PlayerActionGenerator moveGenerator = new PlayerActionGenerator(gameState, player);
int idx = 0;
for (Pair<Unit, List<UnitAction>> choice : moveGenerator.getChoices()) {
UnitActionTableEntry ae = new UnitActionTableEntry();
ae.idx = idx;
ae.u = choice.m_a;
ae.nactions = choice.m_b.size();
ae.actions = choice.m_b;
ae.accum_evaluation = new double[ae.nactions];
ae.visit_count = new int[ae.nactions];
for (int i = 0; i < ae.nactions; i++) {
ae.accum_evaluation[i] = Double.MIN_VALUE;
ae.visit_count[i] = 0;
}
unitActionTable.add(ae);
idx++;
actionCount += ae.nactions;
}
return unitActionTable;
}
private List<Integer> getRelaxedAgentIndicesRandom(List<UnitActionTableEntry> unitActionTable) {
List<Integer> indices = new ArrayList<>();
int noToRemove = unitActionTable.size() - relaxationLimit ;
if (noToRemove > 0) {
for (int i = 0; i < unitActionTable.size(); i++) {
indices.add(i);
}
Collections.shuffle(indices);
indices = indices.subList(0, noToRemove);
Collections.sort(indices);
Collections.reverse(indices);
}
return indices;
}
public void printState(List<UnitActionTableEntry> unitActionTable,
HashMap<Long, PlayerActionTableEntry> playerActionTable) {
System.out.println("Unit actions table:");
for (UnitActionTableEntry uat : unitActionTable) {
System.out.println("Actions for unit " + uat.u);
for (int i = 0; i < uat.nactions; i++) {
System.out.println(" " + uat.actions.get(i) + " visited " + uat.visit_count[i]
+ " with average evaluation " + (uat.accum_evaluation[i] / uat.visit_count[i]));
}
}
System.out.println("Player actions:");
for (PlayerActionTableEntry pate : playerActionTable.values()) {
System.out.println(pate.pa + " visited " + pate.visit_count + " with average evaluation "
+ (pate.accum_evaluation / pate.visit_count));
}
}
public String toString() {
return getClass().getSimpleName() + "(" + ITERATIONS_BUDGET + ", " + lookAhead + ", " + split + ", " +
estimateType + ", " + estimateReuseType + ", " + generateType + ", " + agentOrderingType + ", " + evaluateType + ", " +
eliteReuse + ", " + relaxationType + ", " + relaxationLimit + ", " + epochal + ", " + simulationAi + ", " + evalFunction + ")";
}
public String statisticsString() {
return nofPlays + "\t" + nofNoops + "\t" + nofSamples
+ "\t" + nofPlayedUnits / (double) nofPlays
+ "\t" + nofActions / (double) nofPlays;
}
@Override
public List<ParameterSpecification> getParameters()
{
List<ParameterSpecification> parameters = new ArrayList<>();
parameters.add(new ParameterSpecification("IterationsBudget",int.class,500));
parameters.add(new ParameterSpecification("PlayoutLookahead",int.class,100));
ParameterSpecification ps_split = new ParameterSpecification("Split",double.class,0.25);
ps_split.setRange(0.0, 1.0);
parameters.add(ps_split);
ParameterSpecification ps_et = new ParameterSpecification("EstimateType",EstimateType.class,EstimateType.RANDOM_TAIL);
ps_et.addPossibleValue(EstimateType.RANDOM_TAIL);
ps_et.addPossibleValue(EstimateType.RANDOM_TAIL_ELITE);
ps_et.addPossibleValue(EstimateType.NOOP_TAIL);
ps_et.addPossibleValue(EstimateType.RANDOM);
ps_et.addPossibleValue(EstimateType.ALL_COMBINATIONS);
parameters.add(ps_et);
ParameterSpecification ert_et = new ParameterSpecification("EstimateReuseType",EstimateReuseType.class,EstimateReuseType.ALL);
ert_et.addPossibleValue(EstimateReuseType.ALL);
ert_et.addPossibleValue(EstimateReuseType.SINGLE);
parameters.add(ert_et);
ParameterSpecification gt_et = new ParameterSpecification("GenerateType",GenerateType.class,GenerateType.PER_AGENT);
gt_et.addPossibleValue(GenerateType.ONE_DIST);
gt_et.addPossibleValue(GenerateType.PER_AGENT);
parameters.add(gt_et);
ParameterSpecification aot_et = new ParameterSpecification("AgentOrderingType",Sampling.AgentOrderingType.class,Sampling.AgentOrderingType.ENTROPY);
aot_et.addPossibleValue(Sampling.AgentOrderingType.RANDOM);
aot_et.addPossibleValue(Sampling.AgentOrderingType.ENTROPY);
parameters.add(aot_et);
ParameterSpecification et_et = new ParameterSpecification("EvaluateType",EvaluateType.class,EvaluateType.HALVING);
et_et.addPossibleValue(EvaluateType.HALVING);
et_et.addPossibleValue(EvaluateType.HALVING_ELITE);
et_et.addPossibleValue(EvaluateType.BEST);
parameters.add(et_et);
parameters.add(new ParameterSpecification("EliteReuse",boolean.class,false));
ParameterSpecification rt_et = new ParameterSpecification("RelaxationType",RelaxationType.class,RelaxationType.NONE);
rt_et.addPossibleValue(RelaxationType.NONE);
rt_et.addPossibleValue(RelaxationType.PRE_RANDOM);
rt_et.addPossibleValue(RelaxationType.EPOCH);
rt_et.addPossibleValue(RelaxationType.MAX);
rt_et.addPossibleValue(RelaxationType.MEAN);
rt_et.addPossibleValue(RelaxationType.MEDIAN);
rt_et.addPossibleValue(RelaxationType.MAX_ENT);
rt_et.addPossibleValue(RelaxationType.MIN_ENT);
rt_et.addPossibleValue(RelaxationType.POST_RANDOM);
rt_et.addPossibleValue(RelaxationType.POST_ENTROPY_MAX);
rt_et.addPossibleValue(RelaxationType.POST_ENTROPY_MIN);
rt_et.addPossibleValue(RelaxationType.POST_MAX_DIFF);
rt_et.addPossibleValue(RelaxationType.POST_MAX_TIME_NORMALIZE);
parameters.add(rt_et);
parameters.add(new ParameterSpecification("RelaxationLimit",int.class,2));
parameters.add(new ParameterSpecification("Epochal",boolean.class,epochal));
parameters.add(new ParameterSpecification("SimulationAI",AI.class,simulationAi));
parameters.add(new ParameterSpecification("EvaluationFunction",EvaluationFunction.class,new SimpleSqrtEvaluationFunction3()));
return parameters;
}
public int getPlayoutLookahead() {
return lookAhead;
}
public void setPlayoutLookahead(int a_pola) {
lookAhead = a_pola;
}
public double getSplit() {
return split;
}
public void setSplit(double a_split) {
split = a_split;
}
public EstimateType getEstimateType() {
return estimateType;
}
public void setEstimateType(EstimateType a) {
estimateType = a;
}
public EstimateReuseType getEstimateReuseType() {
return estimateReuseType;
}
public void setEstimateReuseType(EstimateReuseType a){
estimateReuseType = a;
}
public GenerateType getGenerateType() {
return generateType;
}
public void setGenerateType(GenerateType a) {
generateType = a;
}
public AgentOrderingType getAgentOrderingType() {
return agentOrderingType;
}
public void setAgentOrderingType(AgentOrderingType a) {
agentOrderingType = a;
}
public EvaluateType getEvaluateType() {
return evaluateType;
}
public void setEvaluateType(EvaluateType a) {
evaluateType = a;
}
public boolean getEliteReuse() {
return eliteReuse;
}
public void setEliteReuse(boolean a) {
eliteReuse = a;
}
public RelaxationType getRelaxationType() {
return relaxationType;
}
public void setRelaxationType(RelaxationType a) {
relaxationType = a;
}
public int getRelaxationLimit() {
return relaxationLimit;
}
public void setRelaxationLimit(int a) {
relaxationLimit = a;
}
public boolean getEpochal() {
return epochal;
}
public void setEpochal(boolean a) {
epochal = a;
}
public AI getSimulationAI() {
return simulationAi;
}
public void setSimulationAI(AI a) {
simulationAi = a;
}
public EvaluationFunction getEvaluationFunction() {
return evalFunction;
}
public void setEvaluationFunction(EvaluationFunction a_ef) {
evalFunction = a_ef;
}
public enum EstimateType {
RANDOM_TAIL, RANDOM_TAIL_ELITE, NOOP_TAIL, RANDOM, ALL_COMBINATIONS
}
public enum EstimateReuseType {
ALL, SINGLE
}
public enum GenerateType {
ONE_DIST, PER_AGENT
}
public enum EvaluateType {
HALVING, HALVING_ELITE, BEST
}
public enum RelaxationType {
NONE,
PRE_RANDOM, EPOCH,
MAX, MEAN, MEDIAN, MAX_ENT, MIN_ENT,
POST_RANDOM, POST_ENTROPY_MAX, POST_ENTROPY_MIN, POST_MAX_DIFF, POST_MAX_TIME_NORMALIZE
}
class PlayerActionTableEntry {
long code;
int selectedUnitActions[];
PlayerAction pa;
float accum_evaluation;
int visit_count;
}
}
| 51,591 | 39.369327 | 164 | java |
MicroRTS | MicroRTS-master/src/ai/montecarlo/lsi/Sampling.java | /*
* This class was contributed by: Antonin Komenda, Alexander Shleyfman and Carmel Domshlak
*/
package ai.montecarlo.lsi;
import util.CartesianProduct;
import ai.core.AI;
import ai.evaluation.EvaluationFunction;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashSet;
import java.util.List;
import java.util.Random;
import java.util.Set;
import rts.GameState;
import rts.PhysicalGameState;
import rts.PlayerAction;
import rts.ResourceUsage;
import rts.UnitAction;
import rts.UnitActionAssignment;
import rts.units.Unit;
import util.Pair;
import util.Sampler;
public class Sampling {
private final AgentOrderingType agentOrderingType;
private final int lookAhead;
private final EvaluationFunction evalFunction;
private final AI simulationAi;
private int simulationCount = 0;
public Sampling(AgentOrderingType agentOrderingType, int lookAhead, AI simulationAi, EvaluationFunction evalFunction) {
this.agentOrderingType = agentOrderingType;
this.lookAhead = lookAhead;
this.evalFunction = evalFunction;
this.simulationAi = simulationAi;
}
public double evaluatePlayerAction(int player, GameState gs, PlayerAction playerAction, int numEval) throws Exception {
double evalMean = 0;
for (int step = 0; step < numEval; step++) {
GameState gs2 = gs.cloneIssue(playerAction);
GameState gs3 = gs2.clone();
simulate(gs3, gs3.getTime() + lookAhead);
int time = gs3.getTime() - gs2.getTime();
double eval = evalFunction.evaluate(player, 1 - player, gs3)*Math.pow(0.99, time / 10.0);
evalMean = (step * evalMean + eval) / (step + 1);
}
return evalMean;
}
private void simulate(GameState gs, int lookaheadTime) throws Exception {
simulationCount++;
boolean gameover = false;
do {
if (gs.isComplete()) {
gameover = gs.cycle();
} else {
gs.issue(simulationAi.getAction(0, gs));
gs.issue(simulationAi.getAction(1, gs));
}
} while (!gameover && gs.getTime() < lookaheadTime);
}
public PlayerAction generatePlayerActionGivenDist(List<UnitActionTableEntry> unitActionTable, int player,
GameState gameState, List<double []> distributions, List<Integer> forcedAgentOrder) throws Exception {
ResourceUsage base_ru = new ResourceUsage();
PhysicalGameState pgs = gameState.getPhysicalGameState();
for(Unit u:pgs.getUnits()) {
UnitActionAssignment uaa = gameState.getUnitActions().get(u);
if (uaa!=null) {
ResourceUsage ru = uaa.action.resourceUsage(u, pgs);
base_ru.merge(ru);
}
}
PlayerAction pa = new PlayerAction();
pa.setResourceUsage(base_ru.clone());
// entropy-based agent ordering
ArrayList<Pair<Integer, Double>> ent_list = new ArrayList<>(distributions.size());
if (forcedAgentOrder == null) {
for(int j = 0; j < distributions.size(); j++) {
ent_list.add(new Pair<>(j, entropy(distributions.get(j))));
}
switch(agentOrderingType) {
case RANDOM:
Collections.shuffle(ent_list);
break;
case ENTROPY:
ent_list.sort(new Comparator<Pair<Integer, Double>>() {
@Override
public int compare(Pair<Integer, Double> p1, Pair<Integer, Double> p2) {
return p1.m_b.compareTo(p2.m_b);
}
});
break;
default:
throw new RuntimeException("Unknown AgentOrderingType");
}
} else {
for (Integer agentIndex : forcedAgentOrder) {
ent_list.add(new Pair<>(agentIndex, 0.0));
}
}
for (Pair<Integer, Double> idx_of_dist : ent_list) {
double [] distribution = distributions.get(idx_of_dist.m_a);
UnitActionTableEntry ate = unitActionTable.get(idx_of_dist.m_a);
int code = Sampler.weighted(distribution);
UnitAction ua = ate.actions.get(code);
ResourceUsage r2 = ua.resourceUsage(ate.u, pgs);
if (!pa.getResourceUsage().consistentWith(r2, gameState)) {
// sample at random, eliminating the ones that have not worked so far:
List<Double> dist_l = new ArrayList<>();
List<Integer> dist_outputs = new ArrayList<>();
for(int j = 0; j < distribution.length; j++) {
dist_l.add(distribution[j]);
dist_outputs.add(j);
}
do {
int idx = dist_outputs.indexOf(code);
dist_l.remove(idx);
dist_outputs.remove(idx);
code = (Integer)Sampler.weighted(dist_l, dist_outputs);
ua = ate.actions.get(code);
r2 = ua.resourceUsage(ate.u, pgs);
} while(!pa.getResourceUsage().consistentWith(r2, gameState));
}
pa.getResourceUsage().merge(r2);
pa.addUnitAction(ate.u, ua);
}
// reorder the actions in neighbourPA to be the same as in unitActionTable
PlayerAction orderedPA = new PlayerAction();
for (UnitActionTableEntry agentTableEntry : unitActionTable) {
for (Pair<Unit, UnitAction> pair : pa.getActions()) {
if (pair.m_a.equals(agentTableEntry.u)) {
orderedPA.addUnitAction(pair.m_a, pair.m_b);
}
}
}
pa = orderedPA;
return pa;
}
public PlayerAction generatePlayerActionOneDist(List<UnitActionTableEntry> unitActionTable, int player,
GameState gameState, List<double[]> distributions) throws Exception {
ResourceUsage base_ru = new ResourceUsage();
PhysicalGameState pgs = gameState.getPhysicalGameState();
for(Unit u:pgs.getUnits()) {
UnitActionAssignment uaa = gameState.getUnitActions().get(u);
if (uaa!=null) {
ResourceUsage ru = uaa.action.resourceUsage(u, pgs);
base_ru.merge(ru);
}
}
PlayerAction pa = new PlayerAction();
pa.setResourceUsage(base_ru.clone());
ArrayList<Pair<Integer,ArrayList<Integer>>> idxTable = new ArrayList<>();
ArrayList<Pair<Double,ArrayList<Double>>> distTable = new ArrayList<>();
int i = 0;
for(double [] actionDist :distributions) {
double sum = 0;
ArrayList<Double> distList = new ArrayList<>();
ArrayList<Integer> idxList = new ArrayList<>();
for(int j = 0; j < actionDist.length; j++){
distList.add(actionDist[j]);
idxList.add(j);
sum += actionDist[j];
}
Pair<Double,ArrayList<Double>> distPair = new Pair<>(sum, distList);
Pair<Integer,ArrayList<Integer>> idxPair = new Pair<>(i, idxList);
distTable.add(distPair);
idxTable.add(idxPair);
i++;
}
double density = 0;
for (Pair<Double, ArrayList<Double>> sumAndDist: distTable){
density += sumAndDist.m_a;
}
while(!distTable.isEmpty()) {
Random gen = new Random();
double random = gen.nextDouble() * density;
for(int x = 0; x < distTable.size(); x++){
if (random > distTable.get(x).m_a){
random -= distTable.get(x).m_a;
}
else{
for(int y = 0; y < distTable.get(x).m_b.size(); y++){
if (random > distTable.get(x).m_b.get(y)){
random -= distTable.get(x).m_b.get(y);
}
else{
UnitActionTableEntry ate = unitActionTable.get(idxTable.get(x).m_a);
UnitAction ua = ate.actions.get(idxTable.get(x).m_b.get(y));
ResourceUsage r2 = ua.resourceUsage(ate.u, pgs);
if (!pa.getResourceUsage().consistentWith(r2, gameState)) {
density -= distTable.get(x).m_b.get(y);
distTable.get(x).m_a -= distTable.get(x).m_b.get(y);
distTable.get(x).m_b.remove(y);
idxTable.get(x).m_b.remove(y);
} else {
density -= distTable.get(x).m_a;
distTable.remove(x);
idxTable.remove(x);
pa.getResourceUsage().merge(r2);
pa.addUnitAction(ate.u, ua);
break;
}
}
}
break;
}
}
}
return pa;
}
public Set<PlayerAction> generatePlayerActionAll(List<UnitActionTableEntry> unitActionTable, int player,
GameState gameState, boolean includeNoops) throws Exception {
ResourceUsage base_ru = new ResourceUsage();
PhysicalGameState pgs = gameState.getPhysicalGameState();
for(Unit u:pgs.getUnits()) {
UnitActionAssignment uaa = gameState.getUnitActions().get(u);
if (uaa!=null) {
ResourceUsage ru = uaa.action.resourceUsage(u, pgs);
base_ru.merge(ru);
}
}
Set<PlayerAction> actionSet = new HashSet<>();
List<Set<Integer>> definitionOfDomains = new ArrayList<>(unitActionTable.size());
for (UnitActionTableEntry unitActionTableEntry : unitActionTable) {
HashSet<Integer> domain = new HashSet<>();
for (int i = 0; i < unitActionTableEntry.nactions; i++) {
if (unitActionTableEntry.actions.get(i).getType() != UnitAction.TYPE_NONE || includeNoops) {
domain.add(i);
}
}
definitionOfDomains.add(domain);
}
CartesianProduct<Integer> product = new CartesianProduct<>(definitionOfDomains);
int size = product.size();
for (int elementIndex = 0; elementIndex < size; elementIndex++) {
List<Integer> element = product.element(elementIndex);
PlayerAction pa = new PlayerAction();
pa.setResourceUsage(base_ru.clone());
boolean isValid = true;
for (int i = 0; i < element.size(); i++) {
int actionIndex = element.get(i);
UnitActionTableEntry unitActionTableEntry = unitActionTable.get(i);
UnitAction unitAction = unitActionTableEntry.actions.get(actionIndex);
if (!pa.consistentWith(unitAction.resourceUsage(unitActionTableEntry.u, pgs), gameState)) {
isValid = false;
break;
} else {
pa.addUnitAction(unitActionTableEntry.u, unitAction);
}
}
if (isValid) {
actionSet.add(pa);
}
}
if (actionSet.size() == 0) {
actionSet.add(new PlayerAction());
}
return actionSet;
}
public List<Pair<PlayerAction, Pair<Double, Integer>>> halvedSampling(List<Pair<PlayerAction,Pair<Double,Integer>>> actionList, GameState gameState,
int player, int num) throws Exception {
for (Pair<PlayerAction, Pair<Double, Integer>> pair : actionList) {
double eval = evaluatePlayerAction(player, gameState, pair.m_a, num);
double oldEval = pair.m_b.m_a;
int oldNum = pair.m_b.m_b;
pair.m_b.m_a = oldEval + eval;
pair.m_b.m_b = oldNum + num;
}
actionList.sort(new Comparator<Pair<PlayerAction, Pair<Double, Integer>>>() {
@Override
public int compare(Pair<PlayerAction, Pair<Double, Integer>> p1, Pair<PlayerAction, Pair<Double, Integer>> p2) {
double eval1 = p1.m_b.m_a / p1.m_b.m_b;
double eval2 = p2.m_b.m_a / p2.m_b.m_b;
return Double.compare(eval2, eval1);
}
});
return actionList.subList(0, actionList.size()/2 +1);
}
public List<Pair<PlayerAction, Double>> halvedOriginalSampling(List<Pair<PlayerAction, Double>> actionList, GameState gameState,
int player, int numEval, int numEvalPrevious) throws Exception {
for (Pair<PlayerAction, Double> pair : actionList) {
double eval = evaluatePlayerAction(player, gameState, pair.m_a, numEval);
pair.m_b = (pair.m_b*numEvalPrevious + eval*numEval)/(numEvalPrevious + numEval);
}
actionList.sort(new Comparator<Pair<PlayerAction, Double>>() {
@Override
public int compare(Pair<PlayerAction, Double> p1, Pair<PlayerAction, Double> p2) {
return p2.m_b.compareTo(p1.m_b);
}
});
return actionList.subList(0, actionList.size()/2 +1);
}
public List<Pair<PlayerAction, Double>> halvedOriginalSamplingFill(List<Pair<PlayerAction, Double>> actionList, GameState gameState,
int player, int numEval, int numEvalPrevious) throws Exception {
for (Pair<PlayerAction, Double> pair : actionList) {
double eval = evaluatePlayerAction(player, gameState, pair.m_a, numEval);
pair.m_b = (pair.m_b*numEvalPrevious + eval*numEval)/(numEvalPrevious + numEval);
}
actionList.sort(new Comparator<Pair<PlayerAction, Double>>() {
@Override
public int compare(Pair<PlayerAction, Double> p1, Pair<PlayerAction, Double> p2) {
return p2.m_b.compareTo(p1.m_b);
}
});
return actionList.subList(0, actionList.size() / 2);
}
public double entropy(double[] distribution) {
double sum = 0;
for (double prob : distribution) {
sum += prob;
}
double ent = 0;
for (double prob : distribution) {
if (prob == 0) {
continue;
}
ent += (-1) * (prob / sum) * log((prob / sum), 2);
}
return ent;
}
public double difference(List<UnitActionTableEntry> unitActionTable, List<double[]> distributions, PlayerAction playerAction, int agentIndex) {
Pair<Unit, UnitAction> ute = playerAction.getActions().get(agentIndex);
int j = 0;
for (UnitAction ua : unitActionTable.get(agentIndex).actions) {
if (ute.m_b.equals(ua)){
break;
}
j++;
}
return distributions.get(agentIndex)[j] - distributions.get(agentIndex)[distributions.get(agentIndex).length - 1];
}
public void resetSimulationCount() {
simulationCount = 0;
}
public int getSimulationCount() {
return simulationCount;
}
public static double log(double x, double base)
{
return Math.log(x) / Math.log(base);
}
public static class UnitActionTableEntry {
public int idx;
public Unit u;
public int nactions = 0;
public List<UnitAction> actions;
public double[] accum_evaluation;
public int[] visit_count;
}
public enum AgentOrderingType {
RANDOM, ENTROPY
}
public void increaseSimulationCount(double d) {
simulationCount += d;
}
}
| 15,894 | 35.709007 | 152 | java |
MicroRTS | MicroRTS-master/src/ai/portfolio/PortfolioAI.java | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package ai.portfolio;
import ai.RandomBiasedAI;
import ai.abstraction.LightRush;
import ai.abstraction.RangedRush;
import ai.abstraction.WorkerRush;
import ai.core.AI;
import ai.core.AIWithComputationBudget;
import ai.core.ParameterSpecification;
import ai.evaluation.EvaluationFunction;
import ai.evaluation.SimpleSqrtEvaluationFunction3;
import java.util.ArrayList;
import java.util.List;
import rts.GameState;
import rts.PlayerAction;
import rts.units.UnitTypeTable;
import ai.core.InterruptibleAI;
/**
*
* @author santi
*/
public class PortfolioAI extends AIWithComputationBudget implements InterruptibleAI {
public static int DEBUG = 0;
int LOOKAHEAD = 500;
AI strategies[];
boolean deterministic[];
EvaluationFunction evaluation;
GameState gs_to_start_from;
double scores[][];
int counts[][];
int nplayouts = 0;
int playerForThisComputation;
public PortfolioAI(UnitTypeTable utt) {
this(new AI[]{new WorkerRush(utt),
new LightRush(utt),
new RangedRush(utt),
new RandomBiasedAI()},
new boolean[]{true,true,true,false},
100, -1, 100,
new SimpleSqrtEvaluationFunction3());
}
public PortfolioAI(AI s[], boolean d[], int time, int max_playouts, int la, EvaluationFunction e) {
super(time, max_playouts);
LOOKAHEAD = la;
strategies = s;
deterministic = d;
evaluation = e;
}
@Override
public void reset() {
}
public final PlayerAction getAction(int player, GameState gs) throws Exception
{
if (gs.canExecuteAnyAction(player)) {
startNewComputation(player,gs.clone());
computeDuringOneGameFrame();
return getBestActionSoFar();
} else {
return new PlayerAction();
}
}
@Override
public void startNewComputation(int a_player, GameState gs) {
int n = strategies.length;
scores = new double[n][n];
counts = new int[n][n];
playerForThisComputation = a_player;
gs_to_start_from = gs;
nplayouts = 0;
}
public void resetSearch() {
scores = null;
counts = null;
gs_to_start_from = null;
}
@Override
public void computeDuringOneGameFrame() throws Exception {
int n = strategies.length;
boolean timeout = false;
long start = System.currentTimeMillis();
do{
boolean anyChange = false;
for(int i = 0;i<n && !timeout;i++) {
for(int j = 0;j<n && !timeout;j++) {
if (counts[i][j]==0 ||
!deterministic[i] ||
!deterministic[j]) {
anyChange = true;
AI ai1 = strategies[i].clone();
AI ai2 = strategies[j].clone();
GameState gs2 = gs_to_start_from.clone();
ai1.reset();
ai2.reset();
int timeLimit = gs2.getTime() + LOOKAHEAD;
boolean gameover = false;
while(!gameover && gs2.getTime()<timeLimit) {
if (gs2.isComplete()) {
gameover = gs2.cycle();
} else {
gs2.issue(ai1.getAction(playerForThisComputation, gs2));
gs2.issue(ai2.getAction(1-playerForThisComputation, gs2));
}
}
scores[i][j] += evaluation.evaluate(playerForThisComputation, 1-playerForThisComputation, gs2);
counts[i][j]++;
nplayouts++;
}
if (ITERATIONS_BUDGET>0 && nplayouts>=ITERATIONS_BUDGET) timeout = true;
if (TIME_BUDGET>0 && System.currentTimeMillis()>start+TIME_BUDGET) timeout = true;
}
}
// when all the AIs are deterministic, as soon as we have done one play out with each, we are done
if (!anyChange) break;
}while(!timeout);
}
public PlayerAction getBestActionSoFar() throws Exception {
int n = strategies.length;
if (DEBUG>=1) {
System.out.println("PortfolioAI, game cycle: " + gs_to_start_from.getTime());
System.out.println(" counts:");
for(int i = 0;i<n;i++) {
System.out.print(" ");
for(int j = 0;j<n;j++) {
System.out.print(counts[i][j] + "\t");
}
System.out.println("");
}
System.out.println(" scores:");
for(int i = 0;i<n;i++) {
System.out.print(" ");
for(int j = 0;j<n;j++) {
System.out.print(scores[i][j]/counts[i][j] + "\t");
}
System.out.println("");
}
}
// minimax:
double bestMaxScore = 0;
int bestMax = -1;
for(int i = 0;i<n;i++) {
double bestMinScore = 0;
int bestMin = -1;
for(int j = 0;j<n;j++) {
double s = scores[i][j]/counts[i][j];
if (bestMin==-1 || s<bestMinScore) {
bestMin = j;
bestMinScore = s;
}
}
if (bestMax==-1 || bestMinScore>bestMaxScore) {
bestMax = i;
bestMaxScore = bestMinScore;
}
}
if (DEBUG>=1) {
System.out.println("PortfolioAI: selected " + bestMax + " with score: " + bestMaxScore);
}
// use the AI that obtained best results:
AI ai = strategies[bestMax].clone();
ai.reset();
return ai.getAction(playerForThisComputation, gs_to_start_from);
}
@Override
public AI clone() {
return new PortfolioAI(strategies, deterministic, TIME_BUDGET, ITERATIONS_BUDGET, LOOKAHEAD, evaluation);
}
@Override
public String toString() {
return getClass().getSimpleName() + "(" + TIME_BUDGET + ", " + ITERATIONS_BUDGET + ", " + LOOKAHEAD + ", " + evaluation + ")";
}
@Override
public List<ParameterSpecification> getParameters() {
List<ParameterSpecification> parameters = new ArrayList<>();
parameters.add(new ParameterSpecification("TimeBudget",int.class,100));
parameters.add(new ParameterSpecification("IterationsBudget",int.class,-1));
parameters.add(new ParameterSpecification("PlayoutLookahead",int.class,100));
parameters.add(new ParameterSpecification("EvaluationFunction", EvaluationFunction.class, new SimpleSqrtEvaluationFunction3()));
// parameters.add(new ParameterSpecification("Strategies", AI[].class, strategies));
// parameters.add(new ParameterSpecification("Deterministic", boolean[].class, deterministic));
return parameters;
}
public int getPlayoutLookahead() {
return LOOKAHEAD;
}
public void setPlayoutLookahead(int a_pola) {
LOOKAHEAD = a_pola;
}
public EvaluationFunction getEvaluationFunction() {
return evaluation;
}
public void setEvaluationFunction(EvaluationFunction a_ef) {
evaluation = a_ef;
}
}
| 7,857 | 31.605809 | 136 | java |
MicroRTS | MicroRTS-master/src/ai/portfolio/portfoliogreedysearch/PGSAI.java | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package ai.portfolio.portfoliogreedysearch;
import ai.abstraction.pathfinding.AStarPathFinding;
import ai.core.AI;
import ai.abstraction.pathfinding.PathFinding;
import ai.core.AIWithComputationBudget;
import ai.core.ParameterSpecification;
import ai.evaluation.EvaluationFunction;
import ai.evaluation.SimpleSqrtEvaluationFunction3;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import rts.GameState;
import rts.PlayerAction;
import rts.ResourceUsage;
import rts.UnitAction;
import rts.units.Unit;
import rts.units.UnitType;
import rts.units.UnitTypeTable;
/**
*
* @author santi
*
* This class implements "Portfolio Greedy Search", as presented by Churchill and Buro in the paper:
* "Portfolio Greedy Search and Simulation for Large-Scale Combat in StarCraft"
*
* Moreover, their original paper focused purely on combat, and thus their portfolio was very samll.
* Here:
* - getSeedPlayer does not make sense in general, since each unit type might have a different set of scripts, so it's ignored
* - the portfolios might be very large, since we have to include scripts for training, building, harvesting, etc.
* - new units might be created, so a script is selected as the "default" for those new units before hand
*
*/
public class PGSAI extends AIWithComputationBudget {
public static int DEBUG = 0;
int LOOKAHEAD = 500;
int I = 1; // number of iterations for improving a given player
int R = 1; // number of times to improve with respect to the response fo the other player
EvaluationFunction evaluation;
HashMap<UnitType, List<UnitScript>> scripts;
UnitTypeTable utt;
PathFinding pf;
UnitScript defaultScript;
long start_time = 0;
int nplayouts = 0;
public PGSAI(UnitTypeTable utt) {
this(100, -1, 100, 1, 1,
new SimpleSqrtEvaluationFunction3(),
utt,
new AStarPathFinding());
}
public PGSAI(int time, int max_playouts, int la, int a_I, int a_R, EvaluationFunction e, UnitTypeTable a_utt, PathFinding a_pf) {
super(time, max_playouts);
LOOKAHEAD = la;
I = a_I;
R = a_R;
evaluation = e;
utt = a_utt;
pf = a_pf;
UnitScript harvest = new UnitScriptHarvest(pf,utt);
UnitScript buildBarracks = new UnitScriptBuild(pf,utt.getUnitType("Barracks"));
UnitScript buildBase = new UnitScriptBuild(pf,utt.getUnitType("Base"));
UnitScript attack = new UnitScriptAttack(pf);
UnitScript idle = new UnitScriptIdle();
UnitScript trainWorker = new UnitScriptTrain(utt.getUnitType("Worker"));
UnitScript trainLight = new UnitScriptTrain(utt.getUnitType("Light"));
UnitScript trainHeavy = new UnitScriptTrain(utt.getUnitType("Heavy"));
UnitScript trainRanged = new UnitScriptTrain(utt.getUnitType("Ranged"));
defaultScript = idle;
scripts = new HashMap<>();
{
List<UnitScript> l = new ArrayList<>();
l.add(harvest);
l.add(buildBarracks);
l.add(buildBase);
l.add(attack);
l.add(idle);
scripts.put(utt.getUnitType("Worker"),l);
}
{
List<UnitScript> l = new ArrayList<>();
scripts.put(utt.getUnitType("Base"),l);
l.add(trainWorker);
l.add(idle);
}
{
List<UnitScript> l = new ArrayList<>();
scripts.put(utt.getUnitType("Barracks"),l);
l.add(trainLight);
l.add(trainHeavy);
l.add(trainRanged);
l.add(idle);
}
{
List<UnitScript> l = new ArrayList<>();
scripts.put(utt.getUnitType("Light"),l);
l.add(attack);
l.add(idle);
}
{
List<UnitScript> l = new ArrayList<>();
scripts.put(utt.getUnitType("Heavy"),l);
l.add(attack);
l.add(idle);
}
{
List<UnitScript> l = new ArrayList<>();
scripts.put(utt.getUnitType("Ranged"),l);
l.add(attack);
l.add(idle);
}
}
public void reset() {
}
public PlayerAction getAction(int player, GameState gs) throws Exception {
if (gs.winner()!=-1) return new PlayerAction();
if (!gs.canExecuteAnyAction(player)) return new PlayerAction();
if (DEBUG>=1) System.out.println("PGSAI " + player + "(MAX_TIME = " + TIME_BUDGET +", I: " + I + ", R: " + R + ")");
List<Unit> playerUnits = new ArrayList<>();
List<Unit> enemyUnits = new ArrayList<>();
for(Unit u:gs.getUnits()) {
if (u.getPlayer()==player) playerUnits.add(u);
else if (u.getPlayer()>=0) enemyUnits.add(u);
}
int n1 = playerUnits.size();
int n2 = enemyUnits.size();
UnitScript playerScripts[] = new UnitScript[n1];
UnitScript enemyScripts[] = new UnitScript[n2];
// Init the players:
for(int i = 0;i<n1;i++) playerScripts[i] = defaultScript(playerUnits.get(i), gs);
for(int i = 0;i<n2;i++) enemyScripts[i] = defaultScript(enemyUnits.get(i), gs);
// Note: here, the original algorithm does "getSeedPlayer", which only makes sense if the same scripts can be used for all the units
start_time = System.currentTimeMillis();
nplayouts = 0;
improve(player, playerScripts, playerUnits, enemyScripts, enemyUnits, gs);
for(int r = 0;r<R;r++) {
improve(1-player, enemyScripts, enemyUnits, playerScripts, playerUnits, gs);
improve(player, playerScripts, playerUnits, enemyScripts, enemyUnits, gs);
}
// generate the final Player Action:
PlayerAction pa = new PlayerAction();
ResourceUsage ra = gs.getResourceUsage();
for(int i = 0;i<n1;i++) {
Unit u = playerUnits.get(i);
if (gs.getUnitAction(u)==null) {
UnitScript s = playerScripts[i];
if (s != null) s = s.instantiate(u, gs);
if (s != null) {
UnitAction ua = s.getAction(u, gs);
if (ua!=null) {
ResourceUsage ra2 = ua.resourceUsage(u, gs.getPhysicalGameState());
if (ra.consistentWith(ra2, gs)) {
pa.addUnitAction(u, ua);
ra.merge(ra2);
// System.out.println(" u: " + u);
}
}
}
}
}
pa.fillWithNones(gs, player, 10);
//System.out.println("resources: " + ra.getResourcesUsed(player) + " / " + gs.getPlayer(player).getResources());
return pa;
}
public UnitScript defaultScript(Unit u, GameState gs) {
// the first script added per type is considered the default:
List<UnitScript> l = scripts.get(u.getType());
return l.get(0).instantiate(u, gs);
}
public void improve(int player,
UnitScript scriptsToImprove[], List<Unit> units,
UnitScript otherScripts[], List<Unit> otherUnits, GameState gs) throws Exception {
for(int i = 0;i<I;i++) {
if (DEBUG>=1) System.out.println("Improve player " + player + "(" + i + "/" + I + ")");
for(int u = 0;u<scriptsToImprove.length;u++) {
if (ITERATIONS_BUDGET>0 && nplayouts>=ITERATIONS_BUDGET) {
if (DEBUG>=1) System.out.println("nplayouts>=MAX_PLAYOUTS");
return;
}
if (TIME_BUDGET>0 && System.currentTimeMillis()>=start_time+TIME_BUDGET) {
if (DEBUG>=1) System.out.println("Time out!");
return;
}
Unit unit = units.get(u);
double bestEvaluation = 0;
UnitScript bestScript = null;
List<UnitScript> candidates = scripts.get(unit.getType());
for(UnitScript us:candidates) {
UnitScript s = us.instantiate(unit, gs);
if (s!=null) {
scriptsToImprove[u] = s;
double e = playout(player, scriptsToImprove, units, otherScripts, otherUnits, gs);
if (DEBUG>=2) System.out.println(" " + unit + " -> " + s.getClass().toString() + " -> " + e);
if (bestScript==null || e>bestEvaluation) {
bestScript = us;
bestEvaluation = e;
if (DEBUG>=2) System.out.println(" new best: " + e);
}
}
}
scriptsToImprove[u] = bestScript;
}
}
}
public double playout(int player,
UnitScript scripts1[], List<Unit> units1,
UnitScript scripts2[], List<Unit> units2, GameState gs) throws Exception {
// if (DEBUG>=1) System.out.println(" playout... " + LOOKAHEAD);
nplayouts++;
AI ai1 = new UnitScriptsAI(scripts1, units1, scripts, defaultScript);
AI ai2 = new UnitScriptsAI(scripts2, units2, scripts, defaultScript);
GameState gs2 = gs.clone();
ai1.reset();
ai2.reset();
int timeLimit = gs2.getTime() + LOOKAHEAD;
boolean gameover = false;
while(!gameover && gs2.getTime()<timeLimit) {
if (gs2.isComplete()) {
gameover = gs2.cycle();
} else {
gs2.issue(ai1.getAction(player, gs2));
gs2.issue(ai2.getAction(1-player, gs2));
}
}
double e = evaluation.evaluate(player, 1-player, gs2);
// if (DEBUG>=1) System.out.println(" done: " + e);
return e;
}
@Override
public AI clone() {
return new PGSAI(TIME_BUDGET, ITERATIONS_BUDGET, LOOKAHEAD, I, R, evaluation, utt, pf);
}
@Override
public String toString() {
return getClass().getSimpleName() + "(" + TIME_BUDGET + ", " + ITERATIONS_BUDGET + ", " + LOOKAHEAD + ", " + I + ", " + R + ", " + evaluation + ", " + pf + ")";
}
@Override
public List<ParameterSpecification> getParameters() {
List<ParameterSpecification> parameters = new ArrayList<>();
parameters.add(new ParameterSpecification("TimeBudget",int.class,100));
parameters.add(new ParameterSpecification("IterationsBudget",int.class,-1));
parameters.add(new ParameterSpecification("PlayoutLookahead",int.class,100));
parameters.add(new ParameterSpecification("I", int.class, 1));
parameters.add(new ParameterSpecification("R", int.class, 1));
parameters.add(new ParameterSpecification("EvaluationFunction", EvaluationFunction.class, new SimpleSqrtEvaluationFunction3()));
parameters.add(new ParameterSpecification("PathFinding", PathFinding.class, new AStarPathFinding()));
return parameters;
}
public int getPlayoutLookahead() {
return LOOKAHEAD;
}
public void setPlayoutLookahead(int a_pola) {
LOOKAHEAD = a_pola;
}
public int getI() {
return I;
}
public void setI(int a) {
I = a;
}
public int getR() {
return R;
}
public void setR(int a) {
R = a;
}
public EvaluationFunction getEvaluationFunction() {
return evaluation;
}
public void setEvaluationFunction(EvaluationFunction a_ef) {
evaluation = a_ef;
}
public PathFinding getPathFinding() {
return pf;
}
public void setPathFinding(PathFinding a_pf) {
pf = a_pf;
}
}
| 12,168 | 33.968391 | 168 | java |
MicroRTS | MicroRTS-master/src/ai/portfolio/portfoliogreedysearch/UnitScript.java | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package ai.portfolio.portfoliogreedysearch;
import rts.GameState;
import rts.UnitAction;
import rts.units.Unit;
/**
*
* @author santi
*/
public abstract class UnitScript {
public abstract UnitAction getAction(Unit u, GameState gs);
public abstract UnitScript instantiate(Unit u, GameState gs);
}
| 494 | 23.75 | 79 | java |
MicroRTS | MicroRTS-master/src/ai/portfolio/portfoliogreedysearch/UnitScriptAttack.java | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package ai.portfolio.portfoliogreedysearch;
import ai.abstraction.AbstractAction;
import ai.abstraction.Attack;
import ai.abstraction.pathfinding.PathFinding;
import rts.GameState;
import rts.UnitAction;
import rts.units.Unit;
/**
*
* @author santi
*/
public class UnitScriptAttack extends UnitScript {
AbstractAction action;
PathFinding pf;
public UnitScriptAttack(PathFinding a_pf) {
pf = a_pf;
}
public UnitAction getAction(Unit u, GameState gs) {
if (action.completed(gs)) {
return null;
} else {
return action.execute(gs);
}
}
public UnitScript instantiate(Unit u, GameState gs) {
Unit closestEnemy = closestEnemyUnit(u, gs);
if (closestEnemy != null) {
UnitScriptAttack script = new UnitScriptAttack(pf);
script.action = new Attack(u, closestEnemy, pf);
return script;
} else {
return null;
}
}
public Unit closestEnemyUnit(Unit u, GameState gs) {
Unit closest = null;
int closestDistance = 0;
for (Unit u2 : gs.getPhysicalGameState().getUnits()) {
if (u2.getPlayer()>=0 && u2.getPlayer() != u.getPlayer()) {
int d = Math.abs(u2.getX() - u.getX()) + Math.abs(u2.getY() - u.getY());
if (closest == null || d < closestDistance) {
closest = u2;
closestDistance = d;
}
}
}
return closest;
}
}
| 1,748 | 26.328125 | 88 | java |
MicroRTS | MicroRTS-master/src/ai/portfolio/portfoliogreedysearch/UnitScriptBuild.java | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package ai.portfolio.portfoliogreedysearch;
import ai.abstraction.AbstractAction;
import ai.abstraction.Build;
import ai.abstraction.pathfinding.PathFinding;
import rts.GameState;
import rts.PhysicalGameState;
import rts.UnitAction;
import rts.units.Unit;
import rts.units.UnitType;
/**
*
* @author santi
*/
public class UnitScriptBuild extends UnitScript {
AbstractAction action;
PathFinding pf;
UnitType ut;
public UnitScriptBuild(PathFinding a_pf, UnitType a_ut) {
pf = a_pf;
ut = a_ut;
}
public UnitAction getAction(Unit u, GameState gs) {
if (action.completed(gs)) {
return null;
} else {
return action.execute(gs);
}
}
public UnitScript instantiate(Unit u, GameState gs) {
int pos = findBuildingPosition(u, gs.getPhysicalGameState());
if (pos!=-1) {
UnitScriptBuild script = new UnitScriptBuild(pf, ut);
script.action = new Build(u, ut, pos%gs.getPhysicalGameState().getWidth(),
pos/gs.getPhysicalGameState().getWidth(), pf);
return script;
} else {
return null;
}
}
// Finds the nearest available location at which a building can be placed:
public int findBuildingPosition(Unit u, PhysicalGameState pgs) {
int bestPos = -1;
int bestScore = 0;
for (int x = 0; x < pgs.getWidth(); x++) {
for (int y = 0; y < pgs.getHeight(); y++) {
int pos = x + y * pgs.getWidth();
if (pgs.getUnitAt(x, y) == null) {
int score = 0;
score = -(Math.abs(u.getX() - x) + Math.abs(u.getY() - y));
if (bestPos == -1 || score > bestScore) {
bestPos = pos;
bestScore = score;
}
}
}
}
return bestPos;
}
}
| 2,192 | 27.115385 | 91 | java |
MicroRTS | MicroRTS-master/src/ai/portfolio/portfoliogreedysearch/UnitScriptHarvest.java | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package ai.portfolio.portfoliogreedysearch;
import ai.abstraction.AbstractAction;
import ai.abstraction.Harvest;
import ai.abstraction.pathfinding.PathFinding;
import rts.GameState;
import rts.UnitAction;
import rts.units.Unit;
import rts.units.UnitType;
import rts.units.UnitTypeTable;
/**
*
* @author santi
*/
public class UnitScriptHarvest extends UnitScript {
AbstractAction action;
PathFinding pf;
UnitTypeTable utt;
public UnitScriptHarvest(PathFinding a_pf, UnitTypeTable a_utt) {
pf = a_pf;
utt = a_utt;
}
public UnitAction getAction(Unit u, GameState gs) {
if (action.completed(gs)) {
return null;
} else {
return action.execute(gs);
}
}
public UnitScript instantiate(Unit u, GameState gs) {
Unit closestResource = closestUnitOfType(u, gs, utt.getUnitType("Resource"), null);
Unit closestBase = closestUnitOfType(u, gs, utt.getUnitType("Base"), u.getPlayer());
if (closestResource != null && closestBase != null) {
UnitScriptHarvest script = new UnitScriptHarvest(pf, utt);
script.action = new Harvest(u, closestResource, closestBase, pf);
return script;
} else {
return null;
}
}
public Unit closestUnitOfType(Unit u, GameState gs, UnitType type, Integer player) {
Unit closest = null;
int closestDistance = 0;
for (Unit u2 : gs.getPhysicalGameState().getUnits()) {
if (u2.getType() == type) {
if (player!=null && u2.getPlayer()!=player) continue;
int d = Math.abs(u2.getX() - u.getX()) + Math.abs(u2.getY() - u.getY());
if (closest == null || d < closestDistance) {
closest = u2;
closestDistance = d;
}
}
}
return closest;
}
}
| 2,127 | 29.4 | 92 | java |
MicroRTS | MicroRTS-master/src/ai/portfolio/portfoliogreedysearch/UnitScriptIdle.java | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package ai.portfolio.portfoliogreedysearch;
import ai.abstraction.AbstractAction;
import ai.abstraction.Idle;
import rts.GameState;
import rts.UnitAction;
import rts.units.Unit;
/**
*
* @author santi
*/
public class UnitScriptIdle extends UnitScript {
AbstractAction action;
public UnitScriptIdle() {
}
public UnitAction getAction(Unit u, GameState gs) {
if (action.completed(gs)) {
return null;
} else {
return action.execute(gs);
}
}
public UnitScript instantiate(Unit u, GameState gs) {
UnitScriptIdle script = new UnitScriptIdle();
script.action = new Idle(u);
return script;
}
}
| 898 | 22.051282 | 79 | java |
MicroRTS | MicroRTS-master/src/ai/portfolio/portfoliogreedysearch/UnitScriptTrain.java | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package ai.portfolio.portfoliogreedysearch;
import ai.abstraction.AbstractAction;
import ai.abstraction.Train;
import rts.GameState;
import rts.UnitAction;
import rts.units.Unit;
import rts.units.UnitType;
/**
*
* @author santi
*/
public class UnitScriptTrain extends UnitScript {
AbstractAction action;
UnitType ut;
public UnitScriptTrain(UnitType a_ut) {
ut = a_ut;
}
public UnitAction getAction(Unit u, GameState gs) {
if (action.completed(gs)) {
return null;
} else {
return action.execute(gs);
}
}
public UnitScript instantiate(Unit u, GameState gs) {
UnitScriptTrain script = new UnitScriptTrain(ut);
script.action = new Train(u, ut);
return script;
}
}
| 982 | 22.404762 | 79 | java |
MicroRTS | MicroRTS-master/src/ai/portfolio/portfoliogreedysearch/UnitScriptsAI.java | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package ai.portfolio.portfoliogreedysearch;
import ai.core.AI;
import ai.core.ParameterSpecification;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import rts.GameState;
import rts.PlayerAction;
import rts.ResourceUsage;
import rts.UnitAction;
import rts.units.Unit;
import rts.units.UnitType;
/**
*
* @author santi
*/
public class UnitScriptsAI extends AI {
public static int DEBUG = 0;
UnitScript scriptsInput[];
List<Unit> unitsInput;
HashMap<Long,UnitScript> scripts = new HashMap<>();
HashMap<UnitType, List<UnitScript>> allScripts;
UnitScript defaultScript;
public UnitScriptsAI(UnitScript a_scripts[], List<Unit> a_units,
HashMap<UnitType, List<UnitScript>> a_allScripts,
UnitScript a_defaultScript) {
scriptsInput = a_scripts;
unitsInput = a_units;
for(int i = 0;i<a_scripts.length;i++) {
scripts.put(a_units.get(i).getID(), a_scripts[i]);
}
allScripts = a_allScripts;
defaultScript = a_defaultScript;
}
public void reset() {
}
public void resetScripts(GameState gs) {
for(Long ID:scripts.keySet()) {
UnitScript s = scripts.get(ID);
scripts.put(ID, s.instantiate(gs.getUnit(ID), gs));
}
}
public PlayerAction getAction(int player, GameState gs) throws Exception {
PlayerAction pa = new PlayerAction();
ResourceUsage ru = gs.getResourceUsage();
for(Unit u:gs.getUnits()) {
if (u.getPlayer()==player && gs.getUnitAction(u)==null) {
UnitScript s = scripts.get(u.getID());
if (s!=null) s = s.instantiate(u, gs);
if (s==null) {
// new unit, or completed script
s = allScripts.get(u.getType()).get(0).instantiate(u, gs);
if (s==null) s = defaultScript.instantiate(u, gs);
scripts.put(u.getID(),s);
}
UnitAction ua = s.getAction(u, gs);
if (ua!=null) {
ResourceUsage ru2 = ua.resourceUsage(u, gs.getPhysicalGameState());
if (ru.consistentWith(ru2, gs)) {
pa.addUnitAction(u, ua);
ru.merge(ru2);
}
}
}
}
//System.out.println(" UnitScriptsAI.getAction " + player + ", " + gs.getTime() + ": " + pa);
pa.fillWithNones(gs, player, 10);
return pa;
}
@Override
public AI clone() {
return new UnitScriptsAI(scriptsInput, unitsInput, allScripts, defaultScript);
}
@Override
public String toString() {
return getClass().getSimpleName() + "()";
}
@Override
public List<ParameterSpecification> getParameters() {
List<ParameterSpecification> parameters = new ArrayList<>();
parameters.add(new ParameterSpecification("Scripts", List.class, scriptsInput));
parameters.add(new ParameterSpecification("Units", List.class, unitsInput));
parameters.add(new ParameterSpecification("AllScripts", HashMap.class, allScripts));
parameters.add(new ParameterSpecification("DefaultScript", UnitScript.class, defaultScript));
return parameters;
}
}
| 3,617 | 31.017699 | 104 | java |
MicroRTS | MicroRTS-master/src/ai/puppet/BasicConfigurableScript.java | package ai.puppet;
import ai.abstraction.pathfinding.FloodFillPathFinding;
import java.util.ArrayList;
import java.util.Collection;
import java.util.EnumMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Random;
import java.util.function.Predicate;
import ai.abstraction.pathfinding.PathFinding;
import ai.core.ParameterSpecification;
import rts.GameState;
import rts.PhysicalGameState;
import rts.Player;
import rts.PlayerAction;
import rts.units.Unit;
import rts.units.UnitType;
import rts.units.UnitTypeTable;
enum BasicChoicePoint{UNITTYPE, EXPAND}
public class BasicConfigurableScript extends ConfigurableScript<BasicChoicePoint> {
Random r = new Random();
UnitTypeTable utt;
UnitType workerType;
UnitType baseType;
UnitType barracksType;
UnitType lightType;
UnitType heavyType;
UnitType rangedType;
UnitType resourceType;
int resourcesUsed;
int nbases;
int nbarracks;
int nresources;
int ownresources;
int abandonedbases;
int freeresources;
int nworkers;
private static final int BASE_RESOURCE_RADIUS = 8;
public BasicConfigurableScript(UnitTypeTable a_utt) {
this(a_utt, new FloodFillPathFinding());
}
public BasicConfigurableScript(UnitTypeTable a_utt, PathFinding a_pf) {
super(a_pf);
utt = a_utt;
workerType = utt.getUnitType("Worker");
baseType = utt.getUnitType("Base");
barracksType = utt.getUnitType("Barracks");
lightType = utt.getUnitType("Light");
heavyType = utt.getUnitType("Heavy");
rangedType = utt.getUnitType("Ranged");
resourceType = utt.getUnitType("Resource");
choicePoints = new EnumMap<>(BasicChoicePoint.class);
choices = new EnumMap<>(BasicChoicePoint.class);
choicePointValues = BasicChoicePoint.values();
reset();
}
public ConfigurableScript<BasicChoicePoint> clone() {
BasicConfigurableScript sc = new BasicConfigurableScript(utt, pf);
sc.choices=choices.clone();
sc.choicePoints=choicePoints.clone();
sc.choicePointValues=choicePointValues.clone();
return sc;
}
/*
This is the main function of the AI. It is called at each game cycle with the most up to date game state and
returns which actions the AI wants to execute in this cycle.
The input parameters are:
- player: the player that the AI controls (0 or 1)
- gs: the current game state
This method returns the actions to be sent to each of the units in the gamestate controlled by the player,
packaged as a PlayerAction.
*/
public PlayerAction getAction(int player, GameState gs) {
PhysicalGameState pgs = gs.getPhysicalGameState();
Player p = gs.getPlayer(player);
resourcesUsed=gs.getResourceUsage().getResourcesUsed(player);
nworkers=0;
nbases = 0;
nbarracks = 0;
nresources = 0;
ownresources = 0;
abandonedbases = 0;
freeresources = 0;
for (Unit u2 : pgs.getUnits()) {
if (u2.getType() == workerType
&& u2.getPlayer() == p.getID()) {
nworkers++;
}
}
for (Unit u2 : pgs.getUnits()) {
if (u2.getType() == baseType
&& u2.getPlayer() == p.getID()) {
nbases++;
if(!pgs.getUnitsAround(u2.getX(), u2.getY(), BASE_RESOURCE_RADIUS).stream()
.map((a)->a.getType()==resourceType)
.reduce((a,b)->a||b).get()){
abandonedbases++;
}
}
if (u2.getType() == barracksType
&& u2.getPlayer() == p.getID()) {
nbarracks++;
}
if(u2.getType() == resourceType){
nresources++;
if(pgs.getUnitsAround(u2.getX(), u2.getY(), BASE_RESOURCE_RADIUS).stream()
.map((a)->a.getPlayer()==p.getID()&&a.getType()==baseType)
.reduce((a,b)->a||b).get()){
ownresources++;
}
if(!pgs.getUnitsAround(u2.getX(), u2.getY(), BASE_RESOURCE_RADIUS).stream()
.map((a)->a.getPlayer()!=(1-p.getID())&&a.getType()!=baseType)
.reduce((a,b)->a&&b).get()){
freeresources++;
}
}
}
// System.out.println(nbases+" "+abandonedbases+" "+ownresources);
// behavior of bases:
for (Unit u : pgs.getUnits()) {
if (u.getType() == baseType
&& u.getPlayer() == player
&& gs.getActionAssignment(u) == null) {
baseBehavior(u, p, pgs);
}
}
// behavior of melee units:
for (Unit u : pgs.getUnits()) {
if (u.getType().canAttack && !u.getType().canHarvest
&& u.getPlayer() == player
&& gs.getActionAssignment(u) == null) {
meleeUnitBehavior(u, p, gs);
}
}
// behavior of workers:
List<Unit> workers = new LinkedList<>();
for (Unit u : pgs.getUnits()) {
if (u.getType().canHarvest
&& u.getPlayer() == player) {
workers.add(u);
}
}
workersBehavior(workers, p, gs);
// behavior of barracks:
for (Unit u : pgs.getUnits()) {
if (u.getType() == barracksType
&& u.getPlayer() == player
&& gs.getActionAssignment(u) == null) {
barracksBehavior(u, p, pgs);
}
}
// This method simply takes all the unit actions executed so far, and packages them into a PlayerAction
return translateActions(player, gs);
}
public void baseBehavior(Unit u, Player p, PhysicalGameState pgs) {
if ((choices.get(BasicChoicePoint.UNITTYPE)==workerType.ID
// || nworkers < 4
// || nworkers < choices.get(BasicChoicePoint.NWORKERS)
)
&& p.getResources() >= workerType.cost + resourcesUsed) {
train(u, workerType);
resourcesUsed+=workerType.cost;
}
}
public void barracksBehavior(Unit u, Player p, PhysicalGameState pgs) {
UnitType toBuild=utt.getUnitType(choices.get(BasicChoicePoint.UNITTYPE));
if(!toBuild.canHarvest){
if (p.getResources() >= toBuild.cost + resourcesUsed) {
train(u, toBuild);
resourcesUsed+=toBuild.cost;
}
}
}
public int manDist(Unit u1,Unit u2){
return Math.abs(u2.getX() - u1.getX()) + Math.abs(u2.getY() - u1.getY());
}
public void meleeUnitBehavior(Unit u, Player p, GameState gs) {
Unit closestEnemy = null;
int closestDistance = 0;
for (Unit u2 : gs.getPhysicalGameState().getUnits()) {
if (u2.getPlayer() >= 0 && u2.getPlayer() != p.getID()) {
int d = manDist(u,u2);
if (closestEnemy == null || d < closestDistance) {
closestEnemy = u2;
closestDistance = d;
}
}
}
if (closestEnemy != null) {//TODO: check relative speeds and cooldowns
if(closestEnemy.getAttackRange()<u.getAttackRange()
&& sqDist(u,closestEnemy)<=closestEnemy.getAttackRange()*closestEnemy.getAttackRange()){
int xDiff=u.getX()-closestEnemy.getX();//>0 enemy LEFT
int yDiff=u.getY()-closestEnemy.getY();//>0 enemy UP
int targetX=u.getX();
int targetY=u.getY();
if (Math.abs(xDiff)> Math.abs(yDiff)){//run horizontally
if(xDiff>0 && targetX<gs.getPhysicalGameState().getWidth()-1)targetX=u.getX()+1;
else if(xDiff<0 && targetX>0) targetX=u.getX()-1;
}else{
if(yDiff>0 && targetY<gs.getPhysicalGameState().getHeight()-1)targetY=u.getY()+1;
else if (yDiff<0 && targetY>0) targetY=u.getY()-1;
}
if(gs.free(targetX,targetY)){
move(u,targetX,targetY);
}else{
attack(u, closestEnemy);
}
}else{
attack(u, closestEnemy);
}
}
}
public int sqDist(Unit u1, Unit u2){
int xDiff=Math.abs(u1.getX()-u2.getX());
int yDiff=Math.abs(u1.getY()-u2.getY());
return xDiff*xDiff+yDiff*yDiff;
}
public void workersBehavior(List<Unit> workers, Player p, GameState gs) {
PhysicalGameState pgs=gs.getPhysicalGameState();
if(workers.isEmpty())return;
List<Unit> bases = new LinkedList<>();
for (Unit u2 : pgs.getUnits()) {
if (u2.getType() == baseType
&& u2.getPlayer() == p.getID()) {
bases.add(u2);
}
}
List<Unit> freeWorkers = new LinkedList<>(workers);
List<Integer> reservedPositions = new LinkedList<>();
if (nbases == 0 && !freeWorkers.isEmpty()) {
// build a base:
if (p.getResources() >= baseType.cost + resourcesUsed) {
Unit u = freeWorkers.remove(0);
buildIfNotAlreadyBuilding(u,baseType,u.getX(),u.getY(),reservedPositions,p,pgs);
resourcesUsed += baseType.cost;
}
}
if (nbarracks < (nbases - abandonedbases) && !utt.getUnitType(choices.get(BasicChoicePoint.UNITTYPE)).canHarvest) {
// build a barracks:
if (p.getResources() >= barracksType.cost + resourcesUsed && !freeWorkers.isEmpty()) {
Unit u = freeWorkers.remove(0);
Unit b = bases.get(nbarracks);
buildIfNotAlreadyBuilding(u,barracksType,b.getX(),b.getY(),reservedPositions,p,pgs);
resourcesUsed += barracksType.cost;
}
}
//expand
if(choices.get(BasicChoicePoint.EXPAND)>0
&& nbarracks >= 1
&& (nbases - abandonedbases) <= 1
&& freeresources > 0
&& !freeWorkers.isEmpty()) {
// System.out.println("should expand");
// build a base:
if (p.getResources() >= baseType.cost + resourcesUsed ) {
//System.out.println("expanding");
Unit u = freeWorkers.remove(0);
//get closest resource that hasn't got bases around, or enemy units
Unit closestFreeResource=findClosest(u,
(Unit unit) -> {
return unit.getType() == resourceType &&
pgs.getUnitsAround(unit.getX(), unit.getY(), 10).stream()
.map((a)->a.getPlayer()!=(1-p.getID())&&a.getType()!=baseType)
.reduce((a,b)->a&&b).get();
},
pgs);
if(closestFreeResource!=null){
buildIfNotAlreadyBuilding(u,baseType,closestFreeResource.getX(),closestFreeResource.getY(),reservedPositions,p,pgs);
}
resourcesUsed += baseType.cost;
}else{
//System.out.println("reserving");
resourcesUsed+= baseType.cost;
}
}
while(choices.get(BasicChoicePoint.UNITTYPE)==workerType.ID &&
freeWorkers.size()>1)
meleeUnitBehavior(freeWorkers.remove(0), p, gs);
// harvest with all the free workers:
for (Unit u : freeWorkers) {
Unit closestBase = null;
Unit closestResource = null;
int closestDistance = 0;
for (Unit u2 : pgs.getUnits()) {
if (u2.getType().isResource) {
int d = Math.abs(u2.getX() - u.getX()) + Math.abs(u2.getY() - u.getY());
if (closestResource == null || d < closestDistance) {
closestResource = u2;
closestDistance = d;
}
}
}
closestDistance = 0;
for (Unit u2 : pgs.getUnits()) {
if (u2.getType().isStockpile && u2.getPlayer()==p.getID()) {
int d = Math.abs(u2.getX() - u.getX()) + Math.abs(u2.getY() - u.getY());
if (closestBase == null || d < closestDistance) {
closestBase = u2;
closestDistance = d;
}
}
}
if (closestResource != null && closestBase != null) {
harvest(u, closestResource, closestBase);
}
}
}
public Unit findClosest(Unit from, Predicate<Unit> predicate, PhysicalGameState pgs){
Unit closestUnit = null;
int closestDistance = 0;
for (Unit u2 : pgs.getUnits()) {
if (predicate.test(u2)) {
int d = Math.abs(u2.getX() - from.getX()) + Math.abs(u2.getY() - from.getY());
if (closestUnit == null || d < closestDistance) {
closestUnit = u2;
closestDistance = d;
}
}
}
return closestUnit;
}
public Unit findClosest(Unit from, UnitType targetType, PhysicalGameState pgs){
Unit closestUnit = null;
int closestDistance = 0;
for (Unit u2 : pgs.getUnits()) {
if (u2.getType() == targetType) {
int d = Math.abs(u2.getX() - from.getX()) + Math.abs(u2.getY() - from.getY());
if (closestUnit == null || d < closestDistance) {
closestUnit = u2;
closestDistance = d;
}
}
}
return closestUnit;
}
public Unit findSecondClosest(Unit from, UnitType targetType, PhysicalGameState pgs){
return findClosest(from,targetType,findClosest(from,targetType,pgs),pgs);
}
public Unit findClosest(Unit from, UnitType targetType, Unit except, PhysicalGameState pgs){
Unit closestUnit = null;
int closestDistance = 0;
for (Unit u2 : pgs.getUnits()) {
if (u2.getType() == targetType && u2.getID()!=except.getID()) {
int d = Math.abs(u2.getX() - from.getX()) + Math.abs(u2.getY() - from.getY());
if (closestUnit == null || d < closestDistance) {
closestUnit = u2;
closestDistance = d;
}
}
}
return closestUnit;
}
public Unit findClosest(Unit from, UnitType targetType, Player targetPlayer, PhysicalGameState pgs){
Unit closestUnit = null;
int closestDistance = 0;
for (Unit u2 : pgs.getUnits()) {
if (u2.getType() == targetType && u2.getPlayer()==targetPlayer.getID()) {
int d = Math.abs(u2.getX() - from.getX()) + Math.abs(u2.getY() - from.getY());
if (closestUnit == null || d < closestDistance) {
closestUnit = u2;
closestDistance = d;
}
}
}
return closestUnit;
}
public Unit findClosest(Unit from, Player targetPlayer, PhysicalGameState pgs){
Unit closestUnit = null;
int closestDistance = 0;
for (Unit u2 : pgs.getUnits()) {
if (u2.getPlayer()==targetPlayer.getID()) {
int d = Math.abs(u2.getX() - from.getX()) + Math.abs(u2.getY() - from.getY());
if (closestUnit == null || d < closestDistance) {
closestUnit = u2;
closestDistance = d;
}
}
}
return closestUnit;
}
@Override
public Collection<Options> getApplicableChoicePoints(int player, GameState gs) {
int nworkers=0;
int nbarracks=0;
int nbases=0;
int abandonedbases=0;
int ownresources = 0;
int nresources = 0;
int freeresources = 0;
for (Unit u2 : gs.getPhysicalGameState().getUnits()) {
if(u2.getPlayer() == player){
if (u2.getType() == workerType){
nworkers++;
}
if (u2.getType() == barracksType ) {
nbarracks++;
}
if (u2.getType() == baseType) {
nbases++;
if(!gs.getPhysicalGameState().getUnitsAround(u2.getX(), u2.getY(), BASE_RESOURCE_RADIUS).stream()
.map((a)->a.getType()==resourceType)
.reduce((a,b)->a||b).get()){
abandonedbases++;
}
}
}
if(u2.getType() == resourceType){
nresources++;
if(gs.getPhysicalGameState().getUnitsAround(u2.getX(), u2.getY(), BASE_RESOURCE_RADIUS).stream()
.map((a)->a.getPlayer()==player&&a.getType()==baseType)
.reduce((a,b)->a||b).get()){
ownresources++;
}
if(!gs.getPhysicalGameState().getUnitsAround(u2.getX(), u2.getY(), BASE_RESOURCE_RADIUS).stream()
.map((a)->/*a.getPlayer()==(1-player)&&*/a.getType()==baseType)
.reduce((a,b)->a||b).get()){
freeresources++;
}
}
}
List<Options> choices= new ArrayList<>();
if(nbarracks>0){//already have a barracks, build combat units
choices.add(new Options(BasicChoicePoint.UNITTYPE.ordinal(),new int[]{
lightType.ID,
rangedType.ID,
heavyType.ID}));
}else{
choices.add(new Options(BasicChoicePoint.UNITTYPE.ordinal(),new int[]{
workerType.ID,
lightType.ID,
rangedType.ID,
heavyType.ID}));
}
if(nbarracks<1 || (nbases - abandonedbases) > 1 || freeresources==0 ){//already have an extra base
choices.add(new Options(BasicChoicePoint.EXPAND.ordinal(),new int[]{0}));
}else if(ownresources==0){//no resources, force expansion
choices.add(new Options(BasicChoicePoint.EXPAND.ordinal(),new int[]{1}));
}else{
choices.add(new Options(BasicChoicePoint.EXPAND.ordinal(),new int[]{0,1}));
}
return choices;
}
@Override
public void initializeChoices() {
for(BasicChoicePoint c:choicePointValues){
switch(c){
case UNITTYPE:
choicePoints.put(c, new Options(c.ordinal(),new int[]{
lightType.ID,
workerType.ID,
rangedType.ID,
heavyType.ID}));
break;
case EXPAND:
choicePoints.put(c, new Options(c.ordinal(),new int[]{0,1}));
break;
}
}
}
public String toString(){
StringBuilder str = new StringBuilder(getClass().getSimpleName() + "(");
for(BasicChoicePoint c:BasicChoicePoint.values()){
str.append(c.toString()).append(",");
}
return str+")";
}
@Override
public List<ParameterSpecification> getParameters() {
List<ParameterSpecification> parameters = new ArrayList<>();
parameters.add(new ParameterSpecification("PathFinding", PathFinding.class, new FloodFillPathFinding()));
return parameters;
}
}
| 18,840 | 35.09387 | 133 | java |
MicroRTS | MicroRTS-master/src/ai/puppet/CacheTable.java | package ai.puppet;
import java.util.Random;
import rts.GameState;
import util.Pair;
class PuppetGameState{
GameState gs;
int hash;
static final int MAX_DEPTH=100;
static final int MAX_CHOICE_POINTS=10;
static final int MAX_CHOICES=10;
static int zobrist[][][][]=new int[MAX_DEPTH][2][MAX_CHOICE_POINTS][MAX_CHOICES];
static{
Random rng=new Random();
for(int depth=0;depth<MAX_DEPTH;depth++){
for(int p=0;p<2;p++){
for(int point=0;point<MAX_CHOICE_POINTS;point++){
for(int choice=0;choice<MAX_CHOICES;choice++){
zobrist[depth][p][point][choice]=rng.nextInt(Integer.MAX_VALUE);
}
}
}
}
}
public PuppetGameState(GameState gs) {
this.gs=gs.clone();
Random rng=new Random();
hash=rng.nextInt(Integer.MAX_VALUE);
}
public PuppetGameState(PuppetGameState gs) {
this.gs=gs.gs;
hash=gs.hash;
}
public PuppetGameState(PuppetGameState oldState, GameState newState, int depth, Move move1, Move move2) {
this.gs=newState;
hash=oldState.hash;
hash=getHash(depth, move1, move2);
}
int getHash(int depth, Move move1, Move move2)
{
int _hash = hash;
for (Pair<Integer,Integer> c : move1.choices)
{
_hash ^= zobrist[depth][0][c.m_a][c.m_b];
}
for (Pair<Integer,Integer> c : move2.choices)
{
_hash ^= zobrist[depth+1][1][c.m_a][c.m_b];
}
return _hash;
}
int getHash(int depth, Move move)
{
int _hash = hash;
for (Pair<Integer,Integer> c : move.choices)
{
_hash ^= zobrist[depth][0][c.m_a][c.m_b];
}
return _hash;
}
int getHash()
{
return hash;
}
}
class CacheEntry{
PuppetGameState _state;
CacheEntry(PuppetGameState state){
_state=state;
}
CacheEntry(){}
}
class CacheTable
{
CacheEntry[] _entries;
CacheTable(int size)
{
_entries=new CacheEntry[size];
for(int i=0;i<size;i++){
_entries[i]=new CacheEntry();
}
}
void store(PuppetGameState origState, PuppetGameState newState)
{
_entries[newState.getHash()%_entries.length]._state=newState;
}
CacheEntry lookup(PuppetGameState state, int depth, Move move1, Move move2)
{
int hash=state.getHash(depth, move1, move2);
CacheEntry entry=_entries[hash % _entries.length];
if(entry._state!=null&&entry._state.getHash()==hash){
return entry;
}else{
return null;
}
}
}
| 2,258 | 20.932039 | 106 | java |
MicroRTS | MicroRTS-master/src/ai/puppet/ConfigurableScript.java | package ai.puppet;
import java.util.ArrayList;
import java.util.Collection;
import java.util.EnumMap;
import ai.abstraction.AbstractionLayerAI;
import ai.abstraction.pathfinding.PathFinding;
import rts.GameState;
import util.Pair;
public abstract class ConfigurableScript<T extends Enum<T>> extends AbstractionLayerAI {
class Options{
int id;
int options[];
public Options(int id,int[] options){
this.id=id;
this.options=options;
}
public int numOptions(){
return options.length;
}
public int getOption(int o){
return options[o];
}
@Override
public Options clone(){
return new Options(id,options);
}
@Override
public String toString(){
return "("+id+",["+options+"])";
}
}
protected T[] choicePointValues;
protected EnumMap<T,Options> choicePoints ;
protected EnumMap<T,Integer> choices;
public ConfigurableScript(PathFinding a_pf) {
super(a_pf);
}
@Override
public void reset(){
initializeChoices();
setDefaultChoices();
}
public Collection<Options> getAllChoicePoints(){
return choicePoints.values();
}
public void setChoices(Collection<Pair<Integer,Integer>> choices){
for(Pair<Integer,Integer> c:choices){
this.choices.put(choicePointValues[c.m_a],c.m_b);
}
}
public void setDefaultChoices() {//first option is the default
for(T c:choicePointValues){
choices.put(c, choicePoints.get(c).getOption(0));
}
}
public ArrayList<ArrayList<Pair<Integer,Integer>>> getChoiceCombinations(int player, GameState gs){
Collection<Options> options= getApplicableChoicePoints(player,gs);
int[] reps=new int[options.size()+1];
for(int j=0;j<options.size();j++){
reps[j]=1;
int i=0;
for(Options o:options){
if(i++>=j){
reps[j]*=o.numOptions();
}
}
}
reps[options.size()]=1;
int count=1;
for(Options o:options){
count*=o.numOptions();
}
ArrayList<ArrayList<Pair<Integer, Integer>>> combinations =
new ArrayList<>(count);
for(int i=0;i<count;i++){
combinations.add(new ArrayList<>(options.size()));
}
int opt=0;
for (Options o:options) {
for (int choi = 0; choi < o.numOptions(); choi++) {
for(int k=0;k<reps[opt+1];k++){
int cycles=count/reps[opt];
for(int it=0;it<cycles;it++){
combinations.get(it*reps[opt]+choi*reps[opt+1]+k).add(new Pair<>(opt, o.getOption(choi)));
}
}
}
opt++;
}
return combinations;
}
public abstract ConfigurableScript<T> clone();
public abstract Collection<Options> getApplicableChoicePoints(int player, GameState gs);
public abstract void initializeChoices();
}
| 2,592 | 23.462264 | 100 | java |
MicroRTS | MicroRTS-master/src/ai/puppet/PuppetBase.java | package ai.puppet;
import java.util.ArrayList;
import java.util.stream.Collectors;
import ai.core.AI;
import ai.core.AIWithComputationBudget;
import ai.evaluation.EvaluationFunction;
import rts.GameState;
import rts.PlayerAction;
import util.Pair;
class MoveGenerator{
ArrayList<ArrayList<Pair<Integer,Integer>>> choices;
int current=0;
int player;
MoveGenerator(ArrayList<ArrayList<Pair<Integer,Integer>>> choices, int player){
this.choices=choices;
this.player=player;
}
boolean hasNext(){
return current<choices.size();
}
void swapFront(Move bestMove){
for(int i=0;i<choices.size();i++){
if(choices.get(i).equals(bestMove.choices)){
if(i==0){
break;
}
choices.set(i, choices.get(0));
choices.set(0, bestMove.choices);
break;
}
}
}
Move next(){
return new Move(choices.get(current++),player);
}
Move last(){
return new Move(choices.get(current-1),player);
}
void ABcut(){
current=choices.size();
}
}
class Move{
ArrayList<Pair<Integer,Integer>> choices;
int player;
public Move(ArrayList<Pair<Integer,Integer>> choices, int player){
this.choices=choices;
this.player=player;
}
public String toString(ConfigurableScript<?> script){
return "choices: "+choices.stream().map(
(Pair<Integer,Integer> p)->
new Pair<>(script.choicePointValues[p.m_a].name(), p.m_b))
.collect(Collectors.toList())+", player: "+player;
}
}
public abstract class PuppetBase extends AIWithComputationBudget {
// int MAX_TIME = 100;//ms
// int MAX_ITERATIONS = -1;
int PLAN_TIME;
int PLAN_PLAYOUTS;
int STEP_PLAYOUT_TIME;
boolean PLAN;
EvaluationFunction eval;
ConfigurableScript<?> script;
int lastSearchFrame;
long lastSearchTime;
int frameLeaves = 0, totalLeaves = 0;
long frameStartTime=0,frameTime=0, totalTime = 0;
PuppetBase(int max_time_per_frame, int max_playouts_per_frame,
int max_plan_time, int max_plan_playouts,int step_playout_time,
ConfigurableScript<?> script, EvaluationFunction evaluation) {
super(max_time_per_frame,max_playouts_per_frame);
assert(max_time_per_frame>=0||max_playouts_per_frame>=0);
PLAN_TIME=max_plan_time;
PLAN_PLAYOUTS=max_plan_playouts;
STEP_PLAYOUT_TIME=step_playout_time;
PLAN= max_plan_time >= 0 || max_plan_playouts >= 0;
this.script=script;
eval=evaluation;
lastSearchFrame=-1;
lastSearchTime=-1;
}
@Override
public void reset() {
lastSearchFrame=-1;
lastSearchTime=-1;
script.reset();
frameLeaves = 0; totalLeaves = 0;
frameTime=0; totalTime = 0;
}
boolean planBudgetExpired(){
return (PLAN_PLAYOUTS>=0 && totalLeaves>=PLAN_PLAYOUTS)
|| (PLAN_TIME>=0 && totalTime>PLAN_TIME);
}
boolean frameBudgetExpired(){
return (ITERATIONS_BUDGET>=0 && frameLeaves>=ITERATIONS_BUDGET)
|| (TIME_BUDGET>=0 && frameTime>TIME_BUDGET);
}
abstract void startNewComputation(int player, GameState gs) throws Exception;
abstract void computeDuringOneGameFrame() throws Exception;
abstract PlayerAction getBestActionSoFar() throws Exception;
static void simulate(GameState gs, AI ai1, AI ai2, int player1, int player2, int time)
throws Exception {
assert(player1!=player2);
int timeOut = gs.getTime() + time;
boolean gameover = gs.gameover();
while(!gameover && gs.getTime()<timeOut) {
if (gs.isComplete()) {
gameover = gs.cycle();
} else {
gs.issue(ai1.getAction(player1, gs));
gs.issue(ai2.getAction(player2, gs));
}
}
}
public int getPlanTimeBudget() {
return PLAN_TIME;
}
public void setPlanTimeBudget(int a_ib) {
PLAN_TIME = a_ib;
}
public int getPlanIterationsBudget() {
return PLAN_PLAYOUTS;
}
public void setPlanIterationsBudget(int a_ib) {
PLAN_PLAYOUTS = a_ib;
}
}
| 3,822 | 23.986928 | 87 | java |
MicroRTS | MicroRTS-master/src/ai/puppet/PuppetMCTSNode.java | package ai.puppet;
import java.util.ArrayList;
import java.util.List;
import rts.GameState;
public class PuppetMCTSNode {
GameState gs;
float C;//exploration constant
PuppetMCTSNode parent;
ConfigurableScript<?> script;
float evaluation_bound;
List<PuppetMCTSNode> children= new ArrayList<>();
Move prevMove;//move that generated this state
int nextPlayerInSimultaneousNode;
Move[] actions;
int[] visit_count;
float[] accum_evaluation;
int total_visit_count;
int index;
public String toString(){
return bestChild()==null? "":
" time:"+gs.getTime()+" "+
actions[bestChild().index].toString(script)+", score: "+bestChild().score()+"\n"+
bestChild().toString();
}
float score(){
assert(parent.visit_count[index]==total_visit_count);
return parent.accum_evaluation[index]/total_visit_count;
}
public PuppetMCTSNode(
GameState gs,
ConfigurableScript<?> script,
float C,
int nextPlayerInSimultaneousNode,
float bound,
PuppetMCTSNode parent,
Move prevMove,
int index) {
this.gs=gs;
this.script=script;
this.C=C;
this.nextPlayerInSimultaneousNode=nextPlayerInSimultaneousNode;
evaluation_bound=bound;
this.parent=parent;
this.prevMove=prevMove;
this.index=index;
actions=script.getChoiceCombinations(toMove(), gs).stream().map(e -> new Move(e,toMove())).toArray(Move[]::new);
visit_count=new int[actions.length];
accum_evaluation=new float[actions.length];
total_visit_count=0;
}
public PuppetMCTSNode(
GameState gs,
ConfigurableScript<?> script,
float C,
int nextPlayerInSimultaneousNode,
float bound) {
this(gs,script,C,nextPlayerInSimultaneousNode,bound,null,null,-1);
}
int toMove(){
if(prevMove==null)return nextPlayerInSimultaneousNode;
else return (1-prevMove.player);
}
PuppetMCTSNode bestChild(){
if(children.isEmpty())return null;
int best = -1;
int best_visit_count=0;
for(int child=0;child<children.size();child++){
int tmp = visit_count[child];
if (best==-1 || tmp>best_visit_count) {
best = child;
best_visit_count = tmp;
}
}
return children.get(best);
}
PuppetMCTSNode selectLeaf(int STEP_PLAYOUT_TIME) throws Exception{
// if non visited children, visit:
if (children.size()<actions.length) {
Move m=actions[children.size()];
//if first player
if(prevMove==null)
{
PuppetMCTSNode node= new PuppetMCTSNode(gs, script, C, 1-nextPlayerInSimultaneousNode,evaluation_bound, this,m,children.size());
children.add(node);
return node.selectLeaf(STEP_PLAYOUT_TIME);
}
else//second player
{
if(gs.gameover())return this;
GameState gs2=gs.clone();
ConfigurableScript<?> sc1=script.clone();
sc1.reset();
ConfigurableScript<?> sc2=script.clone();
sc2.reset();
sc1.setChoices(prevMove.choices);
sc2.setChoices(m.choices);
PuppetBase.simulate(gs2,sc1,sc2,prevMove.player,m.player, STEP_PLAYOUT_TIME);
PuppetMCTSNode node= new PuppetMCTSNode(gs2, script, C, nextPlayerInSimultaneousNode, evaluation_bound, this,null,children.size() );//players alternate in 1-2-2-1
children.add(node);
return node;
}
}
else//all children expanded, Bandit policy:
{
double best_score = 0;
int best = -1;
for (int child=0;child<children.size();child++) {
double exploitation = ((double)accum_evaluation[child]) / visit_count[child];
exploitation = exploitation/evaluation_bound;
double exploration = Math.sqrt(Math.log((double)total_visit_count)/visit_count[child]);
// System.out.println(exploitation + " + " + exploration);
double tmp = exploitation + C*exploration;
if (best==-1 || tmp>best_score) {
best = child;
best_score = tmp;
}
}
if (best==-1) {
return this;
}else{
return children.get(best).selectLeaf(STEP_PLAYOUT_TIME);
}
}
}
void update(float ev, int player){
total_visit_count++;
if(parent!=null){
parent.accum_evaluation[index] += (player()==player?ev:-ev);
parent.visit_count[index]++;
parent.update(ev, player);
}
}
int player(){
return parent!=null?parent.actions[index].player:-1;
}
}
| 4,208 | 25.639241 | 166 | java |
MicroRTS | MicroRTS-master/src/ai/puppet/PuppetNoPlan.java | package ai.puppet;
import ai.abstraction.pathfinding.FloodFillPathFinding;
import ai.core.AI;
import ai.core.AIWithComputationBudget;
import ai.core.ParameterSpecification;
import ai.evaluation.SimpleSqrtEvaluationFunction3;
import java.util.List;
import rts.GameState;
import rts.PlayerAction;
import rts.units.UnitTypeTable;
import ai.core.InterruptibleAI;
public class PuppetNoPlan extends AIWithComputationBudget implements InterruptibleAI {
PuppetBase puppet;
//By default use ABCD
public PuppetNoPlan(UnitTypeTable utt) {
this(new PuppetSearchAB(100, -1,
-1, -1,
100,
new BasicConfigurableScript(utt, new FloodFillPathFinding()),
new SimpleSqrtEvaluationFunction3()));
}
public PuppetNoPlan(PuppetBase puppet) {
super(puppet.getTimeBudget(), puppet.getIterationsBudget());
this.puppet = puppet;
}
public final PlayerAction getAction(int player, GameState gs) throws Exception {
if (gs.canExecuteAnyAction(player)) {
startNewComputation(player, gs.clone());
computeDuringOneGameFrame();
return getBestActionSoFar();
} else {
return new PlayerAction();
}
}
@Override
public void setTimeBudget(int a_tb) {
puppet.setTimeBudget(a_tb);
}
@Override
public int getTimeBudget() {
return puppet.getTimeBudget();
}
@Override
public int getIterationsBudget() {
return puppet.getIterationsBudget();
}
@Override
public void setIterationsBudget(int a_ib) {
puppet.setIterationsBudget(a_ib);
}
@Override
public void startNewComputation(int player, GameState gs) throws Exception {
puppet.startNewComputation(player, gs);
}
@Override
public void computeDuringOneGameFrame() throws Exception {
puppet.computeDuringOneGameFrame();
}
@Override
public PlayerAction getBestActionSoFar() throws Exception {
return puppet.getBestActionSoFar();
}
@Override
public void reset() {
puppet.reset();
}
@Override
public AI clone() {
PuppetNoPlan clone = new PuppetNoPlan((PuppetBase)puppet.clone());
return clone;
}
public String toString() {
return getClass().getSimpleName() + "(" + puppet.toString() + ")";
}
@Override
public String statisticsString() {
return puppet.statisticsString();
}
@Override
public List<ParameterSpecification> getParameters() {
return puppet.getParameters();
}
}
| 2,557 | 23.834951 | 86 | java |
MicroRTS | MicroRTS-master/src/ai/puppet/PuppetSearchAB.java | /**
*
*/
package ai.puppet;
import ai.abstraction.pathfinding.FloodFillPathFinding;
import java.util.Collection;
import java.util.Collections;
import java.util.Stack;
import ai.core.AI;
import ai.core.ParameterSpecification;
import ai.evaluation.EvaluationFunction;
import ai.evaluation.SimpleSqrtEvaluationFunction3;
import java.util.ArrayList;
import java.util.List;
import rts.GameState;
import rts.PlayerAction;
import rts.units.UnitTypeTable;
import util.Pair;
/**
* @author nbarriga
*
*/
public class PuppetSearchAB extends PuppetBase {
class Result{
Move m;
float score;
public Result(Move m, float score){
this.m=m;
this.score=score;
}
@Override
public String toString(){
return m.toString(script)+", score: "+score;
}
}
class ABCDNode {
PuppetGameState gs;
Move prevMove;
float alpha;
float beta;
int depth;
int nextPlayerInSimultaneousNode;
MoveGenerator nextMoves;
Result best;
ABCDNode following;
public ABCDNode(
PuppetGameState gs,
Move prevMove,
float alpha,
float beta,
int depth,
int nextPlayerInSimultaneousNode,
Result best) {
this.gs=gs;
this.prevMove=prevMove;
this.alpha=alpha;
this.beta=beta;
this.depth=depth;
this.nextPlayerInSimultaneousNode=nextPlayerInSimultaneousNode;
this.best=best;
nextMoves=new MoveGenerator(script.getChoiceCombinations(toMove(), gs.gs),toMove());
following=null;
}
int toMove(){
if(prevMove==null)return nextPlayerInSimultaneousNode;
else return (1-prevMove.player);
}
boolean isMaxPlayer(){
return toMove()==MAXPLAYER;
}
void setResult(Result result, ABCDNode node){
if(best==null){
best=result;
following=node;
}else if(isMaxPlayer()){
alpha = Math.max(alpha,best.score);
if(result.score>best.score){
best=result;
following=node;
}
}else if(!isMaxPlayer()){
beta = Math.min(beta,best.score);
if(result.score<best.score){
best=result;
following=node;
}
}
if(alpha>=beta){
nextMoves.ABcut();
}
}
public String toString(){
return " time:"+gs.gs.getTime()+" "+/*prevMove+" best="+*/best+"\n"+(following!=null?following.toString():"");
}
}
class Plan{
ABCDNode node;
Plan(ABCDNode node){
this.node=node;
}
Plan(){
node=null;
}
void update(GameState gs){
while(node!=null&&
((gs.getTime()-node.gs.gs.getTime())>STEP_PLAYOUT_TIME ||!node.isMaxPlayer())){
node=node.following;
}
}
Collection<Pair<Integer, Integer>> getChoices(){
if(valid()){
return node.best.m.choices;
}else{
return Collections.emptyList();
}
}
boolean valid(){
return node!=null&&node.best!=null;
}
public String toString(){
return node!=null?node.toString():"";
}
}
protected int DEBUG=0;
protected int DEPTH;
protected int MAXPLAYER=-1;
Stack<ABCDNode> stack= new Stack<>();
ABCDNode head;
ABCDNode lastFinishedHead;
Plan currentPlan;
TranspositionTable TT=new TranspositionTable(100000);
CacheTable CT=new CacheTable(100000);
public PuppetSearchAB(UnitTypeTable utt) {
this(100, -1,
5000, -1,
100,
new BasicConfigurableScript(utt, new FloodFillPathFinding()),
new SimpleSqrtEvaluationFunction3());
}
/**
* @param mt
* @param mi
*/
public PuppetSearchAB(
int max_time_per_frame, int max_playouts_per_frame,
int max_plan_time, int max_plan_playouts,
int playout_time,
ConfigurableScript<?> script, EvaluationFunction evaluation) {
super(max_time_per_frame,max_playouts_per_frame,
max_plan_time, max_plan_playouts,playout_time,
script,evaluation);
currentPlan=new Plan();
}
@Override
public void reset() {
super.reset();
currentPlan=new Plan();
stack.clear();
head=null;
lastFinishedHead=null;
DEPTH=0;
clearStats();
}
//todo:this clone method is broken
@Override
public AI clone() {
PuppetSearchAB ps = new PuppetSearchAB(TIME_BUDGET, ITERATIONS_BUDGET,PLAN_TIME,PLAN_PLAYOUTS,STEP_PLAYOUT_TIME, script.clone(), eval);
ps.currentPlan = currentPlan;
ps.lastSearchFrame = lastSearchFrame;
ps.lastSearchTime = lastSearchTime;
return ps;
}
@Override
public PlayerAction getAction(int player, GameState gs) throws Exception {
assert(PLAN):"This method can only be called when using a standing plan";
if(lastSearchFrame==-1||stack.empty()//||(gs.getTime()-lastSearchFrame)>PLAN_VALIDITY
){
if(DEBUG>=1){
System.out.println("Restarting after "+(gs.getTime()-lastSearchFrame)+" frames, "
+(System.currentTimeMillis()-lastSearchTime)+" ms");
}
startNewComputation(player, gs);
}
if (DEBUG>=2) System.out.println("Starting ABCD at frame "+gs.getTime()+", player " + player + " with " + TIME_BUDGET +" ms");
if(!stack.empty()){
computeDuringOneGameFrame();
}
if (gs.canExecuteAnyAction(player) && gs.winner()==-1) {
if (DEBUG>=2) System.out.println("Issuing move using choices: " + currentPlan.getChoices());
currentPlan.update(gs);
script.setDefaultChoices();
script.setChoices(currentPlan.getChoices());
PlayerAction pa = script.getAction(player, gs);
return pa;
} else {
return new PlayerAction();
}
}
@Override
public String statisticsString() {
return "Average Number of Leaves: "+allLeaves/allSearches+
", Average Depth: "+allDepth/allSearches+
", Average Time: "+allTime/allSearches;
}
void clearStats(){
allTime=allLeaves=allDepth=0;
allSearches=-1;
}
long allLeaves;
long allTime;
long allDepth;
long allSearches;
@Override
public void startNewComputation(int player, GameState gs ){
MAXPLAYER=player;
lastSearchFrame=gs.getTime();
lastSearchTime=System.currentTimeMillis();
stack.clear();
stack.push(new ABCDNode(
new PuppetGameState(gs.clone()),
null,
-EvaluationFunction.VICTORY,
EvaluationFunction.VICTORY,
0,
MAXPLAYER,
null));
head=stack.peek();
allLeaves+=totalLeaves;
allTime+=totalTime;
allDepth+=DEPTH;
allSearches++;
totalLeaves = 0;
totalTime=0;
DEPTH=0;
}
@Override
public
PlayerAction getBestActionSoFar() throws Exception {
assert(!PLAN):"This method can only be called when not using a standing plan";
if (DEBUG>=1) System.out.println("ABCD:\n" + currentPlan + " in "
+ (System.currentTimeMillis()-lastSearchTime)+" ms, leaves: "+totalLeaves);
script.setDefaultChoices();
script.setChoices(currentPlan.getChoices());
return script.getAction(MAXPLAYER, head.gs.gs);
}
@Override
public
void computeDuringOneGameFrame() throws Exception{
frameStartTime=System.currentTimeMillis();
long prev=frameStartTime;
frameLeaves = 0;
do{
if(DEPTH==0){//just started
DEPTH+=2;
reached=false;
}else if(stack.empty()){//just finished a depth
if(!reached)break;
lastFinishedHead=head;
if (DEBUG>=2) System.out.println("ABCD:\n" + lastFinishedHead + " in "
+ (System.currentTimeMillis()-lastSearchTime)+" ms, leaves: "+totalLeaves+
", depth: "+DEPTH);
DEPTH+=2;
stack.push(new ABCDNode(
new PuppetGameState(head.gs),
null,
-EvaluationFunction.VICTORY,
EvaluationFunction.VICTORY,
0,
MAXPLAYER,
null));
head=stack.peek();
reached=false;
}else{//continuing from last frame
}
// System.out.println("Depth:" +DEPTH);
iterativeABCD(DEPTH);
if(stack.empty()){
lastFinishedHead=head;
}
long next=System.currentTimeMillis();
totalTime+=next-prev;
prev=next;
frameTime=prev-frameStartTime;
}while(!frameBudgetExpired() && !searchDone());
if(!PLAN){
currentPlan=new Plan(lastFinishedHead);
}
if(searchDone()){
if(DEBUG>=1)System.out.println(ttHits+"/"+ttQueries+" TT, "+ctHits+"/"+ctQueries+" CT");
stack.clear();
currentPlan=new Plan(lastFinishedHead);
if (DEBUG>=1) System.out.println("ABCD:\n" + currentPlan + " in "
+ totalTime
+" ms, wall time: "+(System.currentTimeMillis()-lastSearchTime)
+" ms, leaves: "+totalLeaves);
}
}
boolean searchDone(){
return PLAN && planBudgetExpired();
}
int ttHits=0;
int ttQueries=0;
int ctHits=0;
int ctQueries=0;
boolean tt=true,ct=true;
boolean reached;
protected void iterativeABCD(int maxDepth) throws Exception {
assert(maxDepth%2==0);
if(DEBUG>=2)System.out.println("ABCD at " + head.gs.gs.getTime());
while(!stack.isEmpty()&&!frameBudgetExpired()&&!searchDone()) {
if(DEBUG>=2)System.out.println(stack);
ABCDNode current = stack.peek();
if(current.prevMove==null){//first side to choose move
if(current.depth==maxDepth|| current.gs.gs.gameover()){//evaluate
if(DEBUG>=2)System.out.println("eval");
if(current.depth==maxDepth)reached=true;
frameLeaves++;
totalLeaves++;
stack.pop();
ABCDNode parent= stack.peek();
Result result = new Result(parent.nextMoves.last(),eval.evaluate(MAXPLAYER, 1-MAXPLAYER, current.gs.gs));
parent.setResult(result, current);
}else if(current.nextMoves.hasNext()){//check children
if(tt&¤t.nextMoves.current==0){//if first child, check TT first
Entry ttEntry=TT.lookup(current.gs);
ttQueries++;
if(ttEntry!=null){
current.nextMoves.swapFront(ttEntry._bestMove);
ttHits++;
// System.out.println("first");
}
}
if(DEBUG>=2)System.out.println("current.nextMoves.hasNext()");
stack.push(new ABCDNode(
current.gs,
current.nextMoves.next(),
current.alpha,
current.beta,
current.depth+1,
1-current.nextPlayerInSimultaneousNode,
null));
}else{//all children checked, return up
stack.pop();
if(!stack.empty()){
ABCDNode parent= stack.peek();
parent.setResult(new Result(parent.nextMoves.last(),current.best.score),current);
// TT.store(parent.gs, parent.depth, parent.prevMove, parent.best.m, parent.best.score, parent.alpha, parent.beta, maxDepth-parent.depth);
}
if(tt)TT.store(current.gs, current.best.m, current.best.score, current.alpha, current.beta, maxDepth-current.depth);
}
}else{//second side to choose move
if(current.nextMoves.hasNext()){//check children
if(tt&¤t.nextMoves.current==0){//if first child, check TT first
Entry ttEntry=TT.lookup(current.gs, current.depth, current.prevMove);
ttQueries++;
if(ttEntry!=null){
current.nextMoves.swapFront(ttEntry._bestMove);
ttHits++;
// System.out.println("second");
}
}
Move next=current.nextMoves.next();
PuppetGameState gs2=null;
CacheEntry ctEntry;
if(ct){
ctEntry=CT.lookup(current.gs, current.depth-1, current.prevMove, next);
ctQueries++;
if(ctEntry!=null){
gs2=ctEntry._state;
ctHits++;
}
}
if(gs2==null){
GameState gsTemp = current.gs.gs.clone();
ConfigurableScript<?> sc1=script.clone();
sc1.reset();
ConfigurableScript<?> sc2=script.clone();
sc2.reset();
sc1.setChoices(current.prevMove.choices);
sc2.setChoices(next.choices);
simulate(gsTemp,sc1,sc2,current.prevMove.player,next.player, STEP_PLAYOUT_TIME);
gs2=new PuppetGameState(current.gs,gsTemp,current.depth-1,current.prevMove, next);
if(ct)CT.store(current.gs, gs2);
}
stack.push(new ABCDNode(
gs2,
null,
current.alpha,
current.beta,
current.depth+1,
current.nextPlayerInSimultaneousNode,
null));
}else{//all children checked, return up
stack.pop();
ABCDNode parent= stack.peek();
parent.setResult(new Result(parent.nextMoves.last(),current.best.score),current);
// TT.store(parent.gs, parent.best.m, parent.best.score, parent.alpha, parent.beta, maxDepth-parent.depth);
if(tt)TT.store(current.gs, current.depth, current.prevMove, current.best.m, current.best.score, current.alpha, current.beta, maxDepth-current.depth);
}
}
frameTime=System.currentTimeMillis()-frameStartTime;
}
}
/*
int max_time_per_frame, int max_playouts_per_frame,
int max_plan_time, int max_plan_playouts,
int playout_time,
ConfigurableScript<?> script, EvaluationFunction evaluation
*/
@Override
public String toString(){
return getClass().getSimpleName() + "("+
TIME_BUDGET + ", " + ITERATIONS_BUDGET + ", " +
PLAN_TIME + ", " + PLAN_PLAYOUTS + ", " +
STEP_PLAYOUT_TIME + ", " +
script + ", " + eval + ")";
}
@Override
public List<ParameterSpecification> getParameters() {
List<ParameterSpecification> parameters = new ArrayList<>();
parameters.add(new ParameterSpecification("TimeBudget",int.class,100));
parameters.add(new ParameterSpecification("IterationsBudget",int.class,-1));
parameters.add(new ParameterSpecification("PlanTimeBudget",int.class,5000));
parameters.add(new ParameterSpecification("PlanIterationsBudget",int.class,-1));
parameters.add(new ParameterSpecification("StepPlayoutTime",int.class,100));
// parameters.add(new ParameterSpecification("Script",ConfigurableScript.class, script));
parameters.add(new ParameterSpecification("EvaluationFunction", EvaluationFunction.class, new SimpleSqrtEvaluationFunction3()));
return parameters;
}
public int getStepPlayoutTime() {
return STEP_PLAYOUT_TIME;
}
public void setStepPlayoutTime(int a_ib) {
STEP_PLAYOUT_TIME = a_ib;
}
public EvaluationFunction getEvaluationFunction() {
return eval;
}
public void setEvaluationFunction(EvaluationFunction a_ef) {
eval = a_ef;
}
}
| 13,955 | 27.539877 | 154 | java |
MicroRTS | MicroRTS-master/src/ai/puppet/PuppetSearchMCTS.java | package ai.puppet;
import ai.RandomBiasedAI;
import ai.abstraction.pathfinding.FloodFillPathFinding;
import java.util.Collection;
import java.util.Collections;
import ai.core.AI;
import ai.core.ParameterSpecification;
import ai.evaluation.EvaluationFunction;
import ai.evaluation.SimpleSqrtEvaluationFunction3;
import java.util.ArrayList;
import java.util.List;
import rts.GameState;
import rts.PlayerAction;
import rts.units.UnitTypeTable;
import util.Pair;
public class PuppetSearchMCTS extends PuppetBase {
class Plan{
PuppetMCTSNode node;
Plan(){
node=null;
}
Plan(PuppetMCTSNode root){
node=root;
}
void update(GameState gs, int player){
while(valid()&&
((gs.getTime()-node.gs.getTime())>STEP_PLAYOUT_TIME ||node.bestChild().player()!=player)){
node=node.bestChild();
}
}
Collection<Pair<Integer, Integer>> getChoices(){
if(valid()){
return node.actions[node.bestChild().index].choices;
}else{
return Collections.emptyList();
}
}
boolean valid(){
return node!=null&&node.bestChild()!=null;
}
public String toString(){
return node.toString();
}
}
int DEBUG=0;
int EVAL_PLAYOUT_TIME;
AI policy1, policy2;
PuppetMCTSNode root;
Plan currentPlan;
float C;//UCT exploration constant
public PuppetSearchMCTS(UnitTypeTable utt) {
this(100, -1,
5000, -1,
100, 100,
new RandomBiasedAI(),
new BasicConfigurableScript(utt, new FloodFillPathFinding()),
new SimpleSqrtEvaluationFunction3());
}
public PuppetSearchMCTS(int max_time_per_frame, int max_playouts_per_frame,
int max_plan_time, int max_plan_playouts,
int step_playout_time, int eval_playout_time,
AI policy, ConfigurableScript<?> script, EvaluationFunction evaluation) {
super(max_time_per_frame,max_playouts_per_frame,
max_plan_time, max_plan_playouts,step_playout_time,
script,evaluation);
EVAL_PLAYOUT_TIME=eval_playout_time;
this.policy1=policy.clone();
this.policy2=policy.clone();
currentPlan=new Plan();
root=null;
}
@Override
public void reset() {
super.reset();
policy1.reset();
policy2.reset();
currentPlan=new Plan();
root=null;
clearStats();
}
@Override
public String statisticsString() {
return "Average Number of Leaves: "+
(allSearches>0 ? allLeaves/allSearches:"-")+
", Average Time: "+
(allSearches>0 ? allTime/allSearches:"-");
}
void clearStats(){
allTime=allLeaves=0;
allSearches=-1;
}
long allLeaves;
long allTime;
long allSearches;
//todo:this clone method is broken
@Override
public AI clone() {
PuppetSearchMCTS clone = new PuppetSearchMCTS(TIME_BUDGET,ITERATIONS_BUDGET,
PLAN_TIME, PLAN_PLAYOUTS, STEP_PLAYOUT_TIME, EVAL_PLAYOUT_TIME,
policy1.clone(),script.clone(), eval);
clone.currentPlan = currentPlan;
clone.lastSearchFrame = lastSearchFrame;
clone.lastSearchTime = lastSearchTime;
return clone;
}
private void setC(GameState gs){
if(gs.getPhysicalGameState().getWidth()<=8){
C=1.0f;
}else if(gs.getPhysicalGameState().getWidth()<=16){
C=10.0f;
}else {
C=0.1f;
}
}
@Override
public PlayerAction getAction(int player, GameState gs) throws Exception {
assert(PLAN):"This method can only be called when using a standing plan";
setC(gs);
//Reinitialize the tree
if(lastSearchFrame==-1||root==null//||(gs.getTime()-lastSearchFrame)>PLAN_VALIDITY
){
if(DEBUG>=1){
System.out.println("Restarting after "+(gs.getTime()-lastSearchFrame)+" frames, "
+(System.currentTimeMillis()-lastSearchTime)+" ms ("+totalTime+" ms)");
}
startNewComputation(player, gs);
}
if (DEBUG>=3) System.out.println("Starting MCTS at frame "+gs.getTime()+", player " + player + " with " + TIME_BUDGET +" ms");
//Expand the tree
if(root!=null){
computeDuringOneGameFrame();
}
//execute current plan
if (gs.canExecuteAnyAction(player) && gs.winner()==-1) {
currentPlan.update(gs,player);
if (DEBUG>=2) System.out.println("Issuing move using choices: " + currentPlan.getChoices());
script.setDefaultChoices();
script.setChoices(currentPlan.getChoices());
PlayerAction pa = script.getAction(player, gs);
return pa;
} else {
return new PlayerAction();
}
}
@Override
public
void startNewComputation(int player, GameState gs){
setC(gs);
lastSearchFrame=gs.getTime();
lastSearchTime=System.currentTimeMillis();
root=new PuppetMCTSNode(gs.clone(),script,C,player,eval.upperBound(gs));
allLeaves+=totalLeaves;
allTime+=totalTime;
allSearches++;
totalLeaves = 0;
totalTime=0;
}
@Override
public
PlayerAction getBestActionSoFar() throws Exception{
assert(!PLAN):"This method can only be called when not using s standing plan";
if (DEBUG>=1) System.out.println("Done. Moves:\n"+root+ " in "
+ totalTime
+" ms, wall time: "+(System.currentTimeMillis()-lastSearchTime)
+" ms, playouts: "+totalLeaves);
script.setDefaultChoices();
script.setChoices(root.actions[root.bestChild().index].choices);
return script.getAction(root.nextPlayerInSimultaneousNode, root.gs);
}
@Override
public
void computeDuringOneGameFrame() throws Exception{
frameStartTime = System.currentTimeMillis();
long prev=frameStartTime;
frameLeaves=0;
if (DEBUG>=2) System.out.println("Search...");
do{
monteCarloRun();
long next=System.currentTimeMillis();
totalTime+=next-prev;
prev=next;
frameTime=next-frameStartTime;
}while(!frameBudgetExpired() && !searchDone());
if(searchDone()){
currentPlan=new Plan(root);
root=null;
if (DEBUG>=1) System.out.println("Done. Updating Plan:\n"+currentPlan+ " in "
+ totalTime
+" ms, wall time: "+(System.currentTimeMillis()-lastSearchTime)
+" ms, playouts: "+totalLeaves);
}
}
void monteCarloRun() throws Exception{
PuppetMCTSNode leaf = root.selectLeaf(STEP_PLAYOUT_TIME);
float e;
if(!leaf.gs.gameover()){
frameLeaves++;
totalLeaves++;
policy1.reset();
policy2.reset();
GameState gs2=leaf.gs.clone();
simulate(gs2,policy1, policy2,leaf.parent.player(),leaf.player(),EVAL_PLAYOUT_TIME);
e=eval.evaluate(leaf.player(),1-leaf.player(), gs2);
}else{
e=eval.evaluate(leaf.player(),1-leaf.player(), leaf.gs);
}
leaf.update(e, leaf.player());
}
boolean searchDone(){
return PLAN && planBudgetExpired();
}
@Override
public String toString(){
return getClass().getSimpleName() + "("+
TIME_BUDGET + ", " + ITERATIONS_BUDGET + ", " +
PLAN_TIME + ", " + PLAN_PLAYOUTS + ", " + STEP_PLAYOUT_TIME + ", " + EVAL_PLAYOUT_TIME + ", " +
policy1 + ", " + script + ", " + eval + ")";
}
@Override
public List<ParameterSpecification> getParameters() {
List<ParameterSpecification> parameters = new ArrayList<>();
parameters.add(new ParameterSpecification("TimeBudget",int.class,100));
parameters.add(new ParameterSpecification("IterationsBudget",int.class,-1));
parameters.add(new ParameterSpecification("PlanTimeBudget",int.class,5000));
parameters.add(new ParameterSpecification("PlanIterationsBudget",int.class,-1));
parameters.add(new ParameterSpecification("StepPlayoutTime",int.class,100));
parameters.add(new ParameterSpecification("EvalPlayoutTime",int.class,100));
parameters.add(new ParameterSpecification("Policy",AI.class,policy1));
// parameters.add(new ParameterSpecification("Script",ConfigurableScript.class, script));
parameters.add(new ParameterSpecification("EvaluationFunction", EvaluationFunction.class, new SimpleSqrtEvaluationFunction3()));
return parameters;
}
public int getStepPlayoutTime() {
return STEP_PLAYOUT_TIME;
}
public void setStepPlayoutTime(int a_ib) {
STEP_PLAYOUT_TIME = a_ib;
}
public int getEvalPlayoutTime() {
return EVAL_PLAYOUT_TIME;
}
public void setEvalPlayoutTime(int a_ib) {
EVAL_PLAYOUT_TIME = a_ib;
}
public AI getPolicy() {
return policy1;
}
public void setPolicy(AI a) throws Exception {
policy1 = (AI) a.clone();
policy2 = (AI) a.clone();
}
public EvaluationFunction getEvaluationFunction() {
return eval;
}
public void setEvaluationFunction(EvaluationFunction a_ef) {
eval = a_ef;
}
}
| 8,896 | 27.516026 | 144 | java |
MicroRTS | MicroRTS-master/src/ai/puppet/SingleChoiceConfigurableScript.java | package ai.puppet;
import ai.abstraction.pathfinding.FloodFillPathFinding;
import java.util.Collection;
import java.util.EnumMap;
import ai.abstraction.pathfinding.PathFinding;
import ai.core.AI;
import ai.core.ParameterSpecification;
import java.util.ArrayList;
import java.util.List;
import rts.GameState;
import rts.PlayerAction;
enum SingleChoice{SINGLE}
public class SingleChoiceConfigurableScript extends ConfigurableScript<SingleChoice> {
AI scripts[];
public SingleChoiceConfigurableScript(PathFinding a_pf,AI scripts[]) {
super(a_pf);
this.scripts=scripts;
choicePoints = new EnumMap<>(SingleChoice.class);
choices = new EnumMap<>(SingleChoice.class);
choicePointValues = SingleChoice.values();
reset();
}
@Override
public void reset(){
super.reset();
for(AI sc:scripts){
sc.reset();
}
}
@Override
public Collection<Options> getApplicableChoicePoints(int player, GameState gs) {
return getAllChoicePoints();
}
@Override
public void initializeChoices() {
int opts[] = new int[scripts.length];
for(int i=0;i<scripts.length;i++){
opts[i]=i;
}
for(SingleChoice c:choicePointValues){
if (c == ai.puppet.SingleChoice.SINGLE) {
choicePoints.put(c, new ai.puppet.ConfigurableScript.Options(c.ordinal(), opts));
}
}
}
@Override
public ConfigurableScript<SingleChoice> clone() {
AI scripts2[]=new AI[scripts.length];
for(int i=0;i<scripts.length;i++)
scripts2[i]=scripts[i].clone();
SingleChoiceConfigurableScript sc = new SingleChoiceConfigurableScript(pf,scripts2);
sc.choices=choices.clone();
sc.choicePoints=choicePoints.clone();
sc.choicePointValues=choicePointValues.clone();
return sc;
}
@Override
public PlayerAction getAction(int player, GameState gs) throws Exception {
return scripts[choices.get(SingleChoice.SINGLE)].getAction(player, gs);
}
public String toString(){
StringBuilder str = new StringBuilder("SingleChoicePoint(");
for(AI ai:scripts){
str.append(ai.toString()).append(",");
}
return str+")";
}
@Override
public List<ParameterSpecification> getParameters() {
List<ParameterSpecification> parameters = new ArrayList<>();
parameters.add(new ParameterSpecification("PathFinding", PathFinding.class, new FloodFillPathFinding()));
parameters.add(new ParameterSpecification("Scripts", AI[].class, scripts));
return parameters;
}
}
| 2,515 | 26.347826 | 113 | java |
MicroRTS | MicroRTS-master/src/ai/puppet/TranspositionTable.java | package ai.puppet;
class Entry{
Move _bestMove;
int _hash;
float _value;
int _height;
boolean _exact;
boolean _upper;
Entry(Move bestMove,int hash, float value, int height, boolean exact, boolean upper){
_bestMove=bestMove;
_hash=hash;
_value=value;
_height=height;
_exact=exact;
_upper=upper;
}
Entry(){}
}
class TranspositionTable
{
Entry[] _entries;
TranspositionTable(int size)
{
_entries=new Entry[size];
for(int i=0;i<size;i++){
_entries[i]=new Entry();
}
}
void store(PuppetGameState origState, Move bestMove, float value, float alpha, float beta, int height)
{
boolean exact,upper;
if (value <= alpha){
exact = false;
upper = true;
}
else if (value >= beta){
exact = false;
upper = false;
}
else{
exact = true;
upper = false;
}
int pos=origState.getHash()%_entries.length;
_entries[pos]._bestMove=bestMove;
_entries[pos]._hash=origState.getHash();
_entries[pos]._value=value;
_entries[pos]._height=height;
_entries[pos]._exact=exact;
_entries[pos]._upper=upper;
}
void store(PuppetGameState origState, int depth, Move move, Move bestMove, float value, float alpha, float beta, int height)
{
boolean exact,upper;
if (value <= alpha){
exact = false;
upper = true;
}
else if (value >= beta){
exact = false;
upper = false;
}
else{
exact = true;
upper = false;
}
int pos=origState.getHash(depth, move)%_entries.length;
_entries[pos]._bestMove=bestMove;
_entries[pos]._hash=origState.getHash(depth, move);
_entries[pos]._value=value;
_entries[pos]._height=height;
_entries[pos]._exact=exact;
_entries[pos]._upper=upper;
}
Entry lookup(PuppetGameState state, int depth, Move move)
{
int hash=state.getHash(depth, move);
Entry entry=_entries[hash %_entries.length];
if(entry._hash==hash){
return entry;
}else{
return null;
}
}
Entry lookup(PuppetGameState state)
{
Entry entry=_entries[state.getHash() %_entries.length];
if(entry._hash==state.getHash()){
return entry;
}else{
return null;
}
}
}
| 2,071 | 19.514851 | 125 | java |
MicroRTS | MicroRTS-master/src/ai/reward/AttackRewardFunction.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package ai.reward;
import rts.GameState;
import rts.TraceEntry;
import rts.UnitAction;
import rts.units.Unit;
import util.Pair;
/**
*
* @author costa
*/
public class AttackRewardFunction extends RewardFunctionInterface{
public static float ATTACK_REWARD = 1;
public void computeReward(int maxplayer, int minplayer, TraceEntry te, GameState afterGs) {
reward = 0.0;
done = false;
for(Pair<Unit, UnitAction> p:te.getActions()) {
if (p.m_a.getPlayer()==maxplayer && p.m_b.getType()==UnitAction.TYPE_ATTACK_LOCATION) {
Unit other = te.getPhysicalGameState().getUnitAt(p.m_b.getLocationX(), p.m_b.getLocationY());
if (other != null) {
reward += ATTACK_REWARD;
}
}
}
}
public double getReward() {
return reward;
}
public boolean isDone() {
return done;
}
}
| 1,027 | 24.073171 | 109 | java |
MicroRTS | MicroRTS-master/src/ai/reward/CloserToEnemyBaseRewardFunction.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package ai.reward;
import rts.GameState;
import rts.TraceEntry;
import rts.units.Unit;
/**
*
* @author costa
*/
public class CloserToEnemyBaseRewardFunction extends RewardFunctionInterface{
public static float ATTACK_REWARD = 1;
public void computeReward(int maxplayer, int minplayer, TraceEntry te, GameState afterGs) {
reward = 0.0;
done = false;
int baseX = 0;
int baseY = 0;
boolean baseExists = false;
for(Unit t: te.getPhysicalGameState().getUnits()) {
if (t.getPlayer() == minplayer && t.getType().name.equals("Base")) {
baseExists = true;
baseX = t.getX();
baseY = t.getY();
break;
}
}
if (!baseExists) {
return;
}
double oldMinDistanceToEnemyBase = 2000000000;
for(Unit t: te.getPhysicalGameState().getUnits()) {
if (t.getPlayer() == maxplayer && (
t.getType().name.equals("Light") || t.getType().name.equals("Heavy") ||
t.getType().name.equals("Ranged") || t.getType().name.equals("Worker"))) {
// Euclidean distance
double distance = Math.sqrt(Math.pow((baseX-t.getX()), 2.0) + Math.pow((baseY-t.getY()), 2.0));
if (distance < oldMinDistanceToEnemyBase) {
oldMinDistanceToEnemyBase = distance;
}
}
}
double newMinDistanceToEnemyBase = 2000000000;
for(Unit t: afterGs.getPhysicalGameState().getUnits()) {
if (t.getPlayer() == maxplayer && (
t.getType().name.equals("Light") || t.getType().name.equals("Heavy") ||
t.getType().name.equals("Ranged") || t.getType().name.equals("Worker"))) {
// Euclidean distance
double distance = Math.sqrt(Math.pow((baseX-t.getX()), 2.0) + Math.pow((baseY-t.getY()), 2.0));
if (distance < newMinDistanceToEnemyBase) {
newMinDistanceToEnemyBase = distance;
}
}
}
reward = oldMinDistanceToEnemyBase - newMinDistanceToEnemyBase;
}
public double getReward() {
return reward;
}
public boolean isDone() {
return done;
}
}
| 2,422 | 32.652778 | 111 | java |
MicroRTS | MicroRTS-master/src/ai/reward/CloserToEnemyUnitRewardFunction.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package ai.reward;
import rts.GameState;
import rts.TraceEntry;
import rts.units.Unit;
/**
*
* @author costa
*/
public class CloserToEnemyUnitRewardFunction extends RewardFunctionInterface{
public static float ATTACK_REWARD = 1;
public void computeReward(int maxplayer, int minplayer, TraceEntry te, GameState afterGs) {
reward = 0.0;
done = false;
int baseX = 0;
int baseY = 0;
boolean baseExists = false;
for(Unit t: te.getPhysicalGameState().getUnits()) {
if (t.getPlayer() == minplayer && t.getType().name.equals("Base")) {
baseExists = true;
baseX = t.getX();
baseY = t.getY();
break;
}
}
if (!baseExists) {
return;
}
double oldMinDistanceToEnemyBase = 2000000000;
for(Unit t: te.getPhysicalGameState().getUnits()) {
if (t.getPlayer() == maxplayer && (
t.getType().name.equals("Light") || t.getType().name.equals("Heavy") ||
t.getType().name.equals("Ranged") || t.getType().name.equals("Worker"))) {
// Euclidean distance
double distance = Math.sqrt(Math.pow((baseX-t.getX()), 2.0) + Math.pow((baseY-t.getY()), 2.0));
if (distance < oldMinDistanceToEnemyBase) {
oldMinDistanceToEnemyBase = distance;
}
}
}
double newMinDistanceToEnemyBase = 2000000000;
for(Unit t: afterGs.getPhysicalGameState().getUnits()) {
if (t.getPlayer() == maxplayer && (
t.getType().name.equals("Light") || t.getType().name.equals("Heavy") ||
t.getType().name.equals("Ranged") || t.getType().name.equals("Worker"))) {
// Euclidean distance
double distance = Math.sqrt(Math.pow((baseX-t.getX()), 2.0) + Math.pow((baseY-t.getY()), 2.0));
if (distance < newMinDistanceToEnemyBase) {
newMinDistanceToEnemyBase = distance;
}
}
}
reward = oldMinDistanceToEnemyBase - newMinDistanceToEnemyBase;
}
public double getReward() {
return reward;
}
public boolean isDone() {
return done;
}
}
| 2,422 | 32.652778 | 111 | java |
MicroRTS | MicroRTS-master/src/ai/reward/ProduceBuildingRewardFunction.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package ai.reward;
import rts.GameState;
import rts.TraceEntry;
import rts.UnitAction;
import rts.units.Unit;
import util.Pair;
/**
*
* @author costa
*/
public class ProduceBuildingRewardFunction extends RewardFunctionInterface{
public static float BUILDING_PRODUCE_REWARD = 1;
public void computeReward(int maxplayer, int minplayer, TraceEntry te, GameState afterGs) {
reward = 0.0;
done = false;
for(Pair<Unit, UnitAction> p:te.getActions()) {
if (p.m_a.getPlayer()==maxplayer && p.m_b.getType()==UnitAction.TYPE_PRODUCE && p.m_b.getUnitType()!=null) {
if (p.m_b.getUnitType().name.equals("Barracks") || p.m_b.getUnitType().name.equals("Base")) {
reward += BUILDING_PRODUCE_REWARD;
}
}
}
}
public double getReward() {
return reward;
}
public boolean isDone() {
return done;
}
}
| 1,040 | 25.025 | 120 | java |
MicroRTS | MicroRTS-master/src/ai/reward/ProduceCombatUnitRewardFunction.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package ai.reward;
import rts.GameState;
import rts.TraceEntry;
import rts.UnitAction;
import rts.units.Unit;
import util.Pair;
/**
*
* @author costa
*/
public class ProduceCombatUnitRewardFunction extends RewardFunctionInterface{
public static float COMBAT_UNITS_PRODUCE_REWARD = 1;
public void computeReward(int maxplayer, int minplayer, TraceEntry te, GameState afterGs) {
reward = 0.0;
done = false;
for(Pair<Unit, UnitAction> p:te.getActions()) {
if (p.m_a.getPlayer()==maxplayer && p.m_b.getType()==UnitAction.TYPE_PRODUCE && p.m_b.getUnitType()!=null) {
if (p.m_b.getUnitType().name.equals("Light") || p.m_b.getUnitType().name.equals("Heavy") || p.m_b.getUnitType().name.equals("Ranged")) {
reward += COMBAT_UNITS_PRODUCE_REWARD;
}
}
}
}
public double getReward() {
return reward;
}
public boolean isDone() {
return done;
}
}
| 1,093 | 26.35 | 152 | java |
MicroRTS | MicroRTS-master/src/ai/reward/ProduceWorkerRewardFunction.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package ai.reward;
import rts.GameState;
import rts.TraceEntry;
import rts.UnitAction;
import rts.units.Unit;
import util.Pair;
/**
*
* @author costa
*/
public class ProduceWorkerRewardFunction extends RewardFunctionInterface{
public static float WORKER_PRODUCE_REWARD = 1;
public void computeReward(int maxplayer, int minplayer, TraceEntry te, GameState afterGs) {
reward = 0.0;
done = false;
for(Pair<Unit, UnitAction> p:te.getActions()) {
if (p.m_a.getPlayer()==maxplayer && p.m_b.getType()==UnitAction.TYPE_PRODUCE && p.m_b.getUnitType()!=null) {
if (p.m_b.getUnitType().name.equals("Worker")) {
reward += WORKER_PRODUCE_REWARD;
}
}
}
}
public double getReward() {
return reward;
}
public boolean isDone() {
return done;
}
}
| 989 | 23.75 | 120 | java |
MicroRTS | MicroRTS-master/src/ai/reward/ResourceGatherRewardFunction.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package ai.reward;
import rts.GameState;
import rts.PhysicalGameState;
import rts.TraceEntry;
import rts.UnitAction;
import rts.units.Unit;
import util.Pair;
/**
*
* @author costa
*/
public class ResourceGatherRewardFunction extends RewardFunctionInterface{
public static float RESOURCE_RETURN_REWARD = 1;
public static float RESOURCE_HARVEST_REWARD = 1;
public void computeReward(int maxplayer, int minplayer, TraceEntry te, GameState afterGs) {
reward = 0.0;
for(Pair<Unit, UnitAction> p:te.getActions()) {
if (p.m_a.getPlayer()==maxplayer && p.m_b.getType()==UnitAction.TYPE_HARVEST) {
reward += RESOURCE_HARVEST_REWARD;
} else if (p.m_a.getPlayer()==maxplayer && p.m_b.getType()==UnitAction.TYPE_RETURN) {
reward += RESOURCE_RETURN_REWARD;
}
}
done = true;
PhysicalGameState pgs = afterGs.getPhysicalGameState();
for(Unit u:pgs.getUnits()) {
// If there are Resources left, it's not done
if (u.getType().name.equals("Resource")) {
if (u.getResources()>0) {
done = false;
return;
}
}
}
}
public double getReward() {
return reward;
}
public boolean isDone() {
return done;
}
}
| 1,467 | 27.230769 | 97 | java |
MicroRTS | MicroRTS-master/src/ai/reward/RewardFunctionInterface.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package ai.reward;
import rts.GameState;
import rts.TraceEntry;
/**
*
* @author costa
*/
public abstract class RewardFunctionInterface {
public double reward = 0.0;
public boolean done = false;
public abstract void computeReward(int maxplayer, int minplayer, TraceEntry te, GameState afterGs);
public double getReward() {
return reward;
}
public boolean isDone() {
return done;
}
public String toString() {
return getClass().getSimpleName();
}
}
| 618 | 18.967742 | 103 | java |
MicroRTS | MicroRTS-master/src/ai/reward/WinLossRewardFunction.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package ai.reward;
import rts.GameState;
import rts.TraceEntry;
/**
*
* @author costa
*/
public class WinLossRewardFunction extends RewardFunctionInterface{
public void computeReward(int maxplayer, int minplayer, TraceEntry te, GameState afterGs) {
reward = 0.0;
done = false;
if (afterGs.gameover()) {
done = true;
reward = afterGs.winner()==maxplayer ? 1.0 : -1.0;
}
}
}
| 544 | 19.961538 | 95 | java |
MicroRTS | MicroRTS-master/src/ai/scv/SCV.java | package ai.scv;
import ai.abstraction.EconomyMilitaryRush;
import ai.abstraction.EconomyRush;
import ai.abstraction.HeavyDefense;
import ai.abstraction.LightDefense;
import ai.abstraction.RangedDefense;
import ai.abstraction.WorkerDefense;
import ai.RandomBiasedAI;
import ai.abstraction.LightRush;
import ai.abstraction.RangedRush;
import ai.abstraction.WorkerRush;
import ai.abstraction.WorkerRushPlusPlus;
import ai.abstraction.partialobservability.POHeavyRush;
import ai.abstraction.partialobservability.POLightRush;
import ai.abstraction.partialobservability.PORangedRush;
import ai.abstraction.partialobservability.POWorkerRush;
import ai.abstraction.pathfinding.AStarPathFinding;
import ai.core.AI;
import ai.core.AIWithComputationBudget;
import ai.core.ParameterSpecification;
import ai.evaluation.SimpleSqrtEvaluationFunction3;
import java.io.BufferedReader;
import java.io.FileNotFoundException;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import java.util.logging.Level;
import java.util.logging.Logger;
import rts.GameState;
import rts.PhysicalGameState;
import rts.Player;
import rts.PlayerAction;
import rts.UnitAction;
import rts.units.Unit;
import rts.units.UnitTypeTable;
import util.Pair;
//weka itens
import weka.core.Instance;
import weka.core.Instances;
import weka.classifiers.functions.SimpleLogistic;
import weka.core.DenseInstance;
import weka.core.SerializationHelper;
import weka.core.converters.ConverterUtils;
/* Strategy Generation for Multi-Unit Real-Time Games via Voting.
* ToG 2018.
*/
public class SCV extends AIWithComputationBudget {
protected class infBattles {
Integer tMapa;
String enemy, strategy;
double ltd3;
}
AI strategies[];
int playerForThisComputation;
GameState gs_to_start_from;
SimpleLogistic rf;
UnitTypeTable localUtt;
Instances dataSet;
long tempoInicial;
HashMap<String, HashMap<Integer, List<infBattles> > > indice;
int heightMap;
// This is the default constructor that microRTS will call
public SCV(UnitTypeTable utt) {
this(new AI[]{new WorkerRush(utt),
new LightRush(utt),
new RangedRush(utt),
new RandomBiasedAI()}, 100, -1, utt);
}
public SCV(AI s[], int time, int max_playouts, UnitTypeTable utt) {
super(time, max_playouts);
strategies = s;
localUtt = utt;
indice = new HashMap();
}
@Override
public void reset() {
}
@Override
public PlayerAction getAction(int player, GameState gs) throws Exception {
tempoInicial = System.currentTimeMillis();
if(rf == null){
this.heightMap = gs.getPhysicalGameState().getHeight();
loadModel();
loadLtd3Battles();
}
tryClassify(player, gs);
if (gs.canExecuteAnyAction(player)) {
startNewComputation(player, gs);
return getBestActionSoFar();
} else {
return new PlayerAction();
}
}
protected void loadLtd3Battles() {
ArrayList<infBattles> infTemp = new ArrayList<>();
String linha;
try {
BufferedReader learArq;
switch (this.heightMap) {
case 8:
learArq = new BufferedReader(new InputStreamReader(getClass().getResourceAsStream("models/ltdsFinais8.csv")));
break;
case 9:
learArq = new BufferedReader(new InputStreamReader(getClass().getResourceAsStream("models/ltdsFinais9.csv")));
break;
case 16:
learArq = new BufferedReader(new InputStreamReader(getClass().getResourceAsStream("models/ltdsFinais16.csv")));
break;
case 24:
learArq = new BufferedReader(new InputStreamReader(getClass().getResourceAsStream("models/ltdsFinais24.csv")));
break;
case 32:
learArq = new BufferedReader(new InputStreamReader(getClass().getResourceAsStream("models/ltdsFinais32.csv")));
break;
case 64:
learArq = new BufferedReader(new InputStreamReader(getClass().getResourceAsStream("models/ltdsFinais64.csv")));
break;
default:
//map 128
learArq = new BufferedReader(new InputStreamReader(getClass().getResourceAsStream("models/ltdsFinaisSCV.csv")));
break;
}
linha = learArq.readLine();
while (linha != null) {
infBattles bat = new infBattles();
String[] itens = linha.split(";");
bat.ltd3 = Double.parseDouble(itens[0]);
bat.tMapa = Integer.decode(itens[1]);
bat.enemy = itens[2];
bat.strategy = itens[3];
infTemp.add(bat);
linha = learArq.readLine();
}
learArq.close();
} catch (Exception e) {
System.err.printf("Erro na abertura do arquivo: %s.\n", e.getMessage());
System.out.println(e.toString());
}
buildIndice(infTemp);
}
protected void buildIndice(ArrayList<infBattles> infTemp){
HashMap<Integer, List<infBattles>> batTemp;
int cont = 0;
for (infBattles bat : infTemp) {
cont++;
if(indice.containsKey(bat.strategy)){
//if it contains a verifiable strategy if the map
batTemp = indice.get(bat.strategy);
if(!batTemp.containsKey(bat.tMapa)){
//if it does not contain I'll add the map
ArrayList<infBattles> infT = new ArrayList<>();
infT.add(bat);
batTemp.put(bat.tMapa, infT);
}else{
//I add the battle to the list
batTemp.get(bat.tMapa).add(bat);
}
}else{
//if it does not contain the
batTemp = new HashMap<>();
ArrayList<infBattles> infT = new ArrayList<>();
infT.add(bat);
batTemp.put(bat.tMapa, infT);
indice.put(bat.strategy, batTemp);
}
}
}
public void startNewComputation(int a_player, GameState gs) {
playerForThisComputation = a_player;
gs_to_start_from = gs;
}
public PlayerAction getBestActionSoFar() throws Exception {
int slength = strategies.length;
PlayerAction[] pa = new PlayerAction[slength];
ArrayList<TreeMap<Long, UnitAction>> s = new ArrayList<>();
for (int i = 0; i < slength; i++) {
pa[i] = strategies[i].getAction(playerForThisComputation, gs_to_start_from);
}
PlayerAction pAux = pa[0];
for (PlayerAction p : pa) {
TreeMap<Long, UnitAction> sAux = new TreeMap<>();
p.getActions().forEach((u) -> {
sAux.put(u.m_a.getID(), u.m_b);
});
s.add(sAux);
}
PlayerAction resultado = new PlayerAction();
ArrayList<UnitAction> vote = new ArrayList<>();
TreeMap<UnitAction, Integer> contagem = new TreeMap<>(new Comparator<UnitAction>() {
@Override
public int compare(UnitAction u1, UnitAction u2) {
if (u1.equals(u2)) {
return 0;
} else {
return 1;
}
}
});
while (!s.get(0).isEmpty()) {
s.forEach((ua) -> {
vote.add(ua.get(ua.firstKey()));
});
Unit uAux = null;
for (Pair<Unit, UnitAction> u : pAux.getActions()) {
if (u.m_a.getID() == s.get(0).firstKey()) {
uAux = u.m_a;
}
}
s.forEach((ua) -> {
ua.remove(ua.firstKey());
});
vote.stream().map((valor) -> {
if (!contagem.containsKey(valor)) {
contagem.put(valor, 0);
}
return valor;
}).forEachOrdered((valor) -> {
contagem.put(valor, contagem.get(valor) + 1);
});
vote.clear();
Iterator<Map.Entry<UnitAction, Integer>> iterator = contagem.entrySet().iterator();
Map.Entry<UnitAction, Integer> entry = iterator.next();
Integer maior = entry.getValue();
UnitAction action = entry.getKey();
iterator.remove();
while (iterator.hasNext()) {
entry = iterator.next();
Integer aux = entry.getValue();
if (aux > maior) {
action = entry.getKey();
maior = aux;
}
iterator.remove();
}
resultado.addUnitAction(uAux, action);
}
return resultado;
}
@Override
public AI clone() {
return new SCV(strategies, TIME_BUDGET, ITERATIONS_BUDGET, localUtt);
}
@Override
public List<ParameterSpecification> getParameters() {
List<ParameterSpecification> parameters = new ArrayList<>();
parameters.add(new ParameterSpecification("TimeBudget", int.class, 100));
parameters.add(new ParameterSpecification("IterationsBudget", int.class, -1));
return parameters;
}
protected void loadModel() {
dataSet = null;
try {
switch (heightMap) {
case 8:
rf = (SimpleLogistic) SerializationHelper.read(getClass().getResourceAsStream("models/SimpleLogisticSCV8.model"));
break;
case 9:
rf = (SimpleLogistic) SerializationHelper.read(getClass().getResourceAsStream("models/SimpleLogisticSCV9.model"));
break;
case 16:
rf = (SimpleLogistic) SerializationHelper.read(getClass().getResourceAsStream("models/SimpleLogisticSCV16.model"));
break;
case 24:
rf = (SimpleLogistic) SerializationHelper.read(getClass().getResourceAsStream("models/SimpleLogisticSCV24.model"));
break;
case 32:
rf = (SimpleLogistic) SerializationHelper.read(getClass().getResourceAsStream("models/SimpleLogisticSCV32.model"));
break;
case 64:
rf = (SimpleLogistic) SerializationHelper.read(getClass().getResourceAsStream("models/SimpleLogisticSCV64.model"));
break;
default:
//map 128
rf = (SimpleLogistic) SerializationHelper.read(getClass().getResourceAsStream("models/SimpleLogisticSCV.model"));
break;
}
ConverterUtils.DataSource source = new ConverterUtils.DataSource(getClass().getResourceAsStream("models/dadosEnemyDistModelTemplateSCV.arff"));
dataSet = source.getDataSet();
dataSet.setClassIndex(dataSet.numAttributes() - 1);
Instance avai = new DenseInstance(10);
avai.setDataset(dataSet);
avai.setValue(0, 0);
avai.setValue(1, 0);
avai.setValue(2, 0);
avai.setValue(3, 0);
avai.setValue(4, 0);
avai.setValue(5, 0);
avai.setValue(6, 0);
avai.setValue(7, 8);
avai.setValue(8, -1);
double enemy = rf.classifyInstance(avai);
} catch (FileNotFoundException ex) {
Logger.getLogger(SCV.class.getName()).log(Level.SEVERE, null, ex);
System.out.println("Erro "+ex);
} catch (Exception ex) {
Logger.getLogger(SCV.class.getName()).log(Level.SEVERE, null, ex);
System.out.println("Erro "+ex);
}
}
protected void tryClassify(int player, GameState gs) {
int playerEnemy = 0;
if (player == 0) {
playerEnemy = 1;
}
if (gs.getTime() % 1000 == 0 && gs.getTime() != 0) {
this.recordInfo(playerEnemy, player, gs, gs.getTime());
} else if (gs.getTime() == 0) {
PhysicalGameState pgs = gs.getPhysicalGameState();
if (pgs.getHeight() == 8) {
this.strategies = new AI[]{new WorkerRushPlusPlus(localUtt),
new WorkerDefense(localUtt)};
} else if (pgs.getHeight() == 16) {
this.strategies = new AI[]{new WorkerRushPlusPlus(localUtt)};
} else if (pgs.getHeight() == 24) {
this.strategies = new AI[]{new WorkerRushPlusPlus(localUtt),
new WorkerDefense(localUtt),
new LightDefense(localUtt)};
} else if (pgs.getHeight() == 32) {
this.strategies = new AI[]{
new POLightRush(localUtt),
new WorkerDefense(localUtt),
new EconomyMilitaryRush(localUtt)
};
} else if (pgs.getHeight() == 64) {
this.strategies = new AI[]{//new POWorkerRush(localUtt),
new POLightRush(localUtt),
new EconomyMilitaryRush(localUtt),
new WorkerDefense(localUtt)};
}else {
this.strategies = new AI[]{
new EconomyMilitaryRush(localUtt)
};
}
}
}
private void recordInfo(int playerEnemy, int player, GameState gs, int time) {
PhysicalGameState pgs = gs.getPhysicalGameState();
Player pEn = gs.getPlayer(playerEnemy);
Player pA = gs.getPlayer(player);
SimpleSqrtEvaluationFunction3 ef = new SimpleSqrtEvaluationFunction3();
Unit base = null;
int nWorkers = 0;
int nBases = 0;
int nBarracks = 0;
int nRanged = 0;
int nLight = 0;
int nHeavy = 0;
for (Unit u : pgs.getUnits()) {
if (u.getType().name.equals("Base") && u.getPlayer() == player) {
if (base == null) {
base = u;
}
}
if (u.getType().name.equals("Base") && u.getPlayer() == playerEnemy) {
++nBases;
}
if (u.getType().name.equals("Barracks") && u.getPlayer() == playerEnemy) {
++nBarracks;
}
if (u.getType().name.equals("Worker") && u.getPlayer() == playerEnemy) {
++nWorkers;
}
if (u.getType().name.equals("Ranged") && u.getPlayer() == playerEnemy) {
++nRanged;
}
if (u.getType().name.equals("Light") && u.getPlayer() == playerEnemy) {
++nLight;
}
if (u.getType().name.equals("Heavy") && u.getPlayer() == playerEnemy) {
++nHeavy;
}
}
Instance avai = new DenseInstance(10);
avai.setDataset(dataSet);
avai.setValue(0, nBases);
avai.setValue(1, nBarracks);
avai.setValue(2, nWorkers);
avai.setValue(3, nLight);
avai.setValue(4, nHeavy);
avai.setValue(5, nRanged);
avai.setValue(6, gs.getTime());
avai.setValue(7, pgs.getWidth());
if (base == null) {
avai.setValue(8, -1);
} else {
avai.setValue(8, distRealUnitEneBase(base, pA, gs));
}
try {
setNewStrategy(getStrategyByDistribution(rf.distributionForInstance(avai), pgs.getHeight()));
} catch (Exception ex) {
Logger.getLogger(SCV.class.getName()).log(Level.SEVERE, null, ex);
System.out.println("Erro na classificação="+ex);
}
}
public int distRealUnitEneBase(Unit base, Player p, GameState gs) {
AStarPathFinding aStar = new AStarPathFinding();
PhysicalGameState pgs = gs.getPhysicalGameState();
Unit closestEnemy = null;
int closestDistance = 0;
int d = 9999;
for (Unit u2 : pgs.getUnits()) {
if (u2.getPlayer() >= 0 && u2.getPlayer() != p.getID()) {
if (u2 != null && base != null) {
d = aStar.findDistToPositionInRange(base, u2.getPosition(pgs), 1, gs, gs.getResourceUsage());
if (closestEnemy == null || d < closestDistance) {
closestEnemy = u2;
closestDistance = d;
}
}
}
}
if (closestEnemy == null) {
return -1;
} else {
return aStar.findDistToPositionInRange(base, closestEnemy.getPosition(pgs), 1, gs, gs.getResourceUsage());
}
}
public int distUnitEneBase(Unit base, Player p, GameState gs) {
PhysicalGameState pgs = gs.getPhysicalGameState();
Unit closestEnemy = null;
int closestDistance = 0;
for (Unit u2 : pgs.getUnits()) {
if (u2.getPlayer() >= 0 && u2.getPlayer() != p.getID()) {
int d = Math.abs(u2.getX() - base.getX()) + Math.abs(u2.getY() - base.getY());
if (closestEnemy == null || d < closestDistance) {
closestEnemy = u2;
closestDistance = d;
}
}
}
return closestDistance;
}
protected String getStrategyByDistribution(double[] distrib, int alturaMapa) {
String bestStrategy = "POWorkerRush";
double bestPondValue = -1;
for (String s : indice.keySet()) {
double heavy=0, economy= 0, ranged =0, light = 0, worker = 0;
for(infBattles i : indice.get(s).get(alturaMapa)){
switch (i.enemy){
case "POHeavyRush":
heavy = i.ltd3;
break;
case "EconomyRush":
economy = i.ltd3;
break;
case "PORangedRush":
ranged = i.ltd3;
break;
case "POLightRush":
light = i.ltd3;
break;
case "POWorkerRush":
worker = i.ltd3;
break;
default:
System.err.println("Erro na seleção");
}
}
double pondTemp = (distrib[0]* light + distrib[1]*worker +distrib[2]* ranged + distrib[3]* economy + distrib[4]* heavy )/(distrib[0]+distrib[1]+distrib[2]+distrib[3]+distrib[4]);
if(pondTemp > bestPondValue){
bestPondValue = pondTemp;
bestStrategy = s;
}
}
return bestStrategy;
}
protected void setNewStrategy(String BagStrategy) {
ArrayList<AI> newStrat = new ArrayList<>();
if (BagStrategy.contains("POWorkerRush")) {
newStrat.add(new POWorkerRush(localUtt));
}
if (BagStrategy.contains("WorkerRushPlusPlus")) {
newStrat.add(new WorkerRushPlusPlus(localUtt));
}
if (BagStrategy.contains("POLightRush")) {
newStrat.add(new POLightRush(localUtt));
}
if (BagStrategy.contains("EconomyRush")) {
newStrat.add(new EconomyRush(localUtt));
}
if (BagStrategy.contains("RandomBiasedAI")) {
newStrat.add(new RandomBiasedAI(localUtt));
}
if (BagStrategy.contains("POHeavyRush")) {
newStrat.add(new POHeavyRush(localUtt));
}
if (BagStrategy.contains("PORangedRush")) {
newStrat.add(new PORangedRush(localUtt));
}
if (BagStrategy.contains("LightDefense")) {
newStrat.add(new LightDefense(localUtt));
}
if (BagStrategy.contains("RangedDefense")) {
newStrat.add(new RangedDefense(localUtt));
}
if (BagStrategy.contains("WorkerDefense")) {
newStrat.add(new WorkerDefense(localUtt));
}
if (BagStrategy.contains("EconomyMilitaryRush")) {
newStrat.add(new EconomyMilitaryRush(localUtt));
}
if (BagStrategy.contains("HeavyDefense")) {
newStrat.add(new HeavyDefense(localUtt));
}
this.strategies = new AI[newStrat.size()];
for (int i = 0; i < newStrat.size(); i++) {
this.strategies[i] = newStrat.get(i);
}
}
}
| 20,417 | 33.902564 | 190 | java |
MicroRTS | MicroRTS-master/src/ai/socket/GameVisualSimulationWithSocketAI.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package ai.socket;
import ai.core.AI;
import ai.*;
import gui.PhysicalGameStatePanel;
import javax.swing.JFrame;
import rts.GameState;
import rts.PhysicalGameState;
import rts.PlayerAction;
import rts.units.UnitTypeTable;
/**
*
* @author santi
*/
public class GameVisualSimulationWithSocketAI {
public static void main(String args[]) throws Exception {
UnitTypeTable utt = new UnitTypeTable();
PhysicalGameState pgs = PhysicalGameState.load("maps/16x16/basesWorkers16x16.xml", utt);
// PhysicalGameState pgs = MapGenerator.basesWorkers8x8Obstacle();
GameState gs = new GameState(pgs, utt);
int MAXCYCLES = 5000;
int PERIOD = 20;
boolean gameover = false;
// AI ai1 = new WorkerRush(utt, new BFSPathFinding());
AI ai1 = new SocketAI(100,0, "127.0.0.1", 9898, SocketAI.LANGUAGE_XML, utt);
// AI ai1 = new SocketAI(100,0, "127.0.0.1", 9898, SocketAI.LANGUAGE_JSON, utt);
AI ai2 = new RandomBiasedAI();
ai1.reset();
ai2.reset();
JFrame w = PhysicalGameStatePanel.newVisualizer(gs,640,640,false,PhysicalGameStatePanel.COLORSCHEME_BLACK);
// JFrame w = PhysicalGameStatePanel.newVisualizer(gs,640,640,false,PhysicalGameStatePanel.COLORSCHEME_WHITE);
ai1.preGameAnalysis(gs, 1000, ".");
ai2.preGameAnalysis(gs, 1000, ".");
long nextTimeToUpdate = System.currentTimeMillis() + PERIOD;
do{
if (System.currentTimeMillis()>=nextTimeToUpdate) {
PlayerAction pa1 = ai1.getAction(0, gs);
PlayerAction pa2 = ai2.getAction(1, gs);
gs.issueSafe(pa1);
gs.issueSafe(pa2);
// simulate:
gameover = gs.cycle();
w.repaint();
nextTimeToUpdate+=PERIOD;
} else {
try {
Thread.sleep(1);
} catch (Exception e) {
e.printStackTrace();
}
}
}while(!gameover && gs.getTime()<MAXCYCLES);
System.out.println("Game Over");
}
}
| 2,265 | 31.84058 | 117 | java |
MicroRTS | MicroRTS-master/src/ai/socket/JSONSocketWrapperAI.java | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package ai.socket;
import ai.abstraction.WorkerRush;
import ai.core.AIWithComputationBudget;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.PrintWriter;
import java.net.ServerSocket;
import java.net.Socket;
import rts.GameState;
import rts.PlayerAction;
import rts.units.UnitTypeTable;
/**
*
* @author santi
*/
public class JSONSocketWrapperAI {
public static int DEBUG = 0;
public static void main(String[] args) throws Exception {
DEBUG = 1;
runServer(new WorkerRush(new UnitTypeTable()), 9898);
}
public static void runServer(AIWithComputationBudget ai, int socket) throws Exception {
if (DEBUG>=1) System.out.println("JSONSocketWrapperAI server is running.");
int clientNumber = 0;
ServerSocket listener = new ServerSocket(socket);
try {
while (true) {
new SocketWrapperAI(listener.accept(), clientNumber++, ai).start();
}
} finally {
listener.close();
}
}
private static class SocketWrapperAI extends Thread {
Socket socket;
int clientNumber = 0;
int time_budget = 100;
int iterations_budget = 0;
UnitTypeTable utt;
AIWithComputationBudget ai;
public SocketWrapperAI(Socket socket, int clientNumber, AIWithComputationBudget a_ai) {
this.socket = socket;
this.clientNumber = clientNumber;
ai = a_ai;
if (DEBUG>=1) System.out.println("New connection with client# " + clientNumber + " at " + socket);
}
public void run() {
try {
// Decorate the streams so we can send characters
// and not just bytes. Ensure output is flushed
// after every newline.
BufferedReader in = new BufferedReader(
new InputStreamReader(socket.getInputStream()));
PrintWriter out = new PrintWriter(socket.getOutputStream(), true);
// Send a welcome message to the client.
out.println("JSONSocketWrapperAI: you are client #" + clientNumber);
// Get messages from the client, line by line
while (true) {
String input = in.readLine();
if (input == null) break;
if (input.startsWith("end")) {
System.exit(0);
} else if (input.startsWith("budget")) {
String []tokens = input.split(" ");
time_budget = Integer.parseInt(tokens[1]);
iterations_budget = Integer.parseInt(tokens[2]);
if (DEBUG>=1) System.out.println("setting the budget to: " + time_budget + ", " + iterations_budget);
// reset the AI:
ai.reset();
ai.setTimeBudget(time_budget);
ai.setIterationsBudget(iterations_budget);
out.append("ack\n");
out.flush();
} else if (input.startsWith("utt")) {
input = in.readLine();
// parse the unit type table:
if (DEBUG>=1) System.out.println("setting the utt to: " + input);
utt = UnitTypeTable.fromJSON(input);
ai.reset(utt);
out.append("ack\n");
out.flush();
} else if (input.startsWith("getAction")) {
String []tokens = input.split(" ");
int player = Integer.parseInt(tokens[1]);
if (DEBUG>=1) System.out.println("getAction for player " + player);
input = in.readLine();
if (DEBUG>=1) System.out.println("with game state: " + input);
// parse the game state:
GameState gs = GameState.fromJSON(input, utt);
if (DEBUG>=1) System.out.println(gs);
// generate an action and send it through the socket:
PlayerAction pa = ai.getAction(player, gs);
pa.toJSON(out);
out.append("\n");
out.flush();
if (DEBUG>=1) System.out.println("action sent!");
} else if (input.startsWith("preGameAnalysis")) {
String []tokens = input.split(" ");
int milliseconds = Integer.parseInt(tokens[1]);
String readWriteFolder = null;
if (tokens.length>=2) {
// TODO tokens.length>=2 is always true because we have already accessed tokens[1] previously
// TODO but checking if tokens.length>=2 does not ensure that tokens[2] (accessed below) exists
readWriteFolder = tokens[2];
if (readWriteFolder.startsWith("\"")) readWriteFolder = readWriteFolder.substring(1, readWriteFolder.length()-1);
}
if (DEBUG>=1) System.out.println("preGameAnalysis");
input = in.readLine();
if (DEBUG>=1) System.out.println("with game state: " + input);
// parse the game state:
GameState gs = GameState.fromJSON(input, utt);
if (DEBUG>=1) System.out.println(gs);
if (readWriteFolder != null) {
// TODO this is also always true...
ai.preGameAnalysis(gs, milliseconds, readWriteFolder);
} else {
ai.preGameAnalysis(gs, milliseconds);
}
out.append("ack\n");
out.flush();
} else if (input.startsWith("gameOver")) {
String []tokens = input.split(" ");
int winner = Integer.parseInt(tokens[1]);
if (DEBUG>=1) System.out.println("gameOver " + winner);
ai.gameOver(winner);
out.append("ack\n");
out.flush();
}
}
} catch (Exception e) {
System.out.println("Error handling client# " + clientNumber + ": " + e);
e.printStackTrace();
} finally {
try {
socket.close();
} catch (IOException e) {
e.printStackTrace();
}
System.out.println("Connection with client# " + clientNumber + " closed");
}
}
}
}
| 7,423 | 41.913295 | 141 | java |
MicroRTS | MicroRTS-master/src/ai/socket/SocketAI.java | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package ai.socket;
import ai.core.AI;
import ai.core.AIWithComputationBudget;
import ai.core.ParameterSpecification;
import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.io.PrintWriter;
import java.io.StringReader;
import java.net.Socket;
import java.util.ArrayList;
import java.util.List;
import org.jdom.Element;
import org.jdom.input.SAXBuilder;
import rts.GameState;
import rts.PlayerAction;
import rts.units.UnitTypeTable;
import util.XMLWriter;
/**
*
* @author santi
*/
public class SocketAI extends AIWithComputationBudget {
public static int DEBUG;
public static final int LANGUAGE_XML = 1;
public static final int LANGUAGE_JSON = 2;
private boolean includeConstants = true, compressTerrain = false;
UnitTypeTable utt;
int communication_language = LANGUAGE_XML;
String serverAddress = "127.0.0.1";
int serverPort = 9898;
Socket socket;
BufferedReader in_pipe;
PrintWriter out_pipe;
public SocketAI(UnitTypeTable a_utt) {
super(100,-1);
utt = a_utt;
try {
connectToServer();
}catch(Exception e) {
e.printStackTrace();
}
}
public SocketAI(int mt, int mi, String a_sa, int a_port, int a_language, UnitTypeTable a_utt) {
super(mt, mi);
serverAddress = a_sa;
serverPort = a_port;
communication_language = a_language;
utt = a_utt;
try {
connectToServer();
} catch (Exception e) {
e.printStackTrace();
}
}
private SocketAI(int mt, int mi, UnitTypeTable a_utt, int a_language,
boolean includeConstantsInState, boolean compressTerrain, Socket socket) {
super(mt, mi);
communication_language = a_language;
utt = a_utt;
this.includeConstants = includeConstantsInState;
this.compressTerrain = compressTerrain;
try {
this.socket = socket;
in_pipe = new BufferedReader(new InputStreamReader(socket.getInputStream()));
out_pipe = new PrintWriter(socket.getOutputStream(), true);
// Consume the initial welcoming messages from the server
while(!in_pipe.ready());
while(in_pipe.ready()) in_pipe.readLine();
if (DEBUG >= 1) {
System.out.println("SocketAI: welcome message received");
}
reset();
} catch (Exception e) {
e.printStackTrace();
}
}
/**
* Creates a SocketAI from an existing socket.
*
* @param mt The time budget in milliseconds.
* @param mi The iterations budget in milliseconds
* @param a_utt The unit type table.
* @param a_language The communication layer to use.
* @param includeConstantsInState
* @param compressTerrain
* @param socket The socket the ai will communicate over.
*/
public static SocketAI createFromExistingSocket(int mt, int mi, rts.units.UnitTypeTable a_utt,
int a_language, boolean includeConstantsInState, boolean compressTerrain,
java.net.Socket socket) {
return new SocketAI(mt, mi, a_utt, a_language, includeConstantsInState, compressTerrain,
socket);
}
public void connectToServer() throws Exception {
// Make connection and initialize streams
socket = new Socket(serverAddress, serverPort);
in_pipe = new BufferedReader(new InputStreamReader(socket.getInputStream()));
out_pipe = new PrintWriter(socket.getOutputStream(), true);
// Consume the initial welcoming messages from the server
while(!in_pipe.ready());
while(in_pipe.ready()) in_pipe.readLine();
if (DEBUG>=1) System.out.println("SocketAI: welcome message received");
reset();
}
@Override
public void reset() {
try {
// set the game parameters:
out_pipe.append("budget ").append(String.valueOf(TIME_BUDGET)).append(" ").append(String.valueOf(ITERATIONS_BUDGET)).append("\n");
out_pipe.flush();
if (DEBUG>=1) System.out.println("SocketAI: budget sent, waiting for ack");
// wait for ack:
in_pipe.readLine();
while(in_pipe.ready()) in_pipe.readLine();
if (DEBUG>=1) System.out.println("SocketAI: ack received");
// send the utt:
out_pipe.append("utt\n");
if (communication_language == LANGUAGE_XML) {
XMLWriter w = new XMLWriter(out_pipe, " ");
utt.toxml(w);
w.flush();
out_pipe.append("\n");
out_pipe.flush();
} else if (communication_language == LANGUAGE_JSON) {
utt.toJSON(out_pipe);
out_pipe.append("\n");
out_pipe.flush();
} else {
throw new Exception("Communication language " + communication_language + " not supported!");
}
if (DEBUG>=1) System.out.println("SocketAI: UTT sent, waiting for ack");
// wait for ack:
in_pipe.readLine();
// read any extra left-over lines
while(in_pipe.ready()) in_pipe.readLine();
if (DEBUG>=1) System.out.println("SocketAI: ack received");
}catch(Exception e) {
e.printStackTrace();
}
}
@Override
public PlayerAction getAction(int player, GameState gs) throws Exception {
// send the game state:
out_pipe.append("getAction ").append(String.valueOf(player)).append("\n");
if (communication_language == LANGUAGE_XML) {
XMLWriter w = new XMLWriter(out_pipe, " ");
gs.toxml(w, includeConstants, compressTerrain);
w.getWriter().append("\n");
w.flush();
// wait to get an action:
// while(!in_pipe.ready()) {
// Thread.sleep(0);
// if (DEBUG>=1) System.out.println("waiting");
// }
// parse the action:
String actionString = in_pipe.readLine();
if (DEBUG >= 1) {
System.out.println("action received from server: " + actionString);
}
Element action_e = new SAXBuilder().build(new StringReader(actionString))
.getRootElement();
PlayerAction pa = PlayerAction.fromXML(action_e, gs, utt);
pa.fillWithNones(gs, player, 10);
return pa;
} else if (communication_language == LANGUAGE_JSON) {
gs.toJSON(out_pipe, includeConstants, compressTerrain);
out_pipe.append("\n");
out_pipe.flush();
// wait to get an action:
//while(!in_pipe.ready());
// parse the action:
String actionString = in_pipe.readLine();
// System.out.println("action received from server: " + actionString);
PlayerAction pa = PlayerAction.fromJSON(actionString, gs, utt);
pa.fillWithNones(gs, player, 10);
return pa;
} else {
throw new Exception("Communication language " + communication_language + " not supported!");
}
}
@Override
public void preGameAnalysis(GameState gs, long milliseconds) throws Exception {
preGameAnalysis(gs, milliseconds, null);
}
@Override
public void preGameAnalysis(GameState gs, long milliseconds, String readWriteFolder)
throws Exception {
out_pipe.append("preGameAnalysis ").append(String.valueOf(milliseconds));
if (readWriteFolder != null) {
out_pipe.append(" \"").append(readWriteFolder).append("\"");
}
out_pipe.append("\n");
switch (communication_language) {
case LANGUAGE_XML:
XMLWriter w = new XMLWriter(out_pipe, " ");
gs.toxml(w);
w.flush();
out_pipe.append("\n");
out_pipe.flush();
// wait for ack:
in_pipe.readLine();
break;
case LANGUAGE_JSON:
gs.toJSON(out_pipe);
out_pipe.append("\n");
out_pipe.flush();
// wait for ack:
in_pipe.readLine();
break;
default:
throw new Exception("Communication language " + communication_language + " not supported!");
}
}
@Override
public void gameOver(int winner) throws Exception
{
// send the game state:
out_pipe.append("gameOver ").append(String.valueOf(winner)).append("\n");
out_pipe.flush();
// wait for ack:
in_pipe.readLine();
}
@Override
public AI clone() {
return new SocketAI(TIME_BUDGET, ITERATIONS_BUDGET, serverAddress, serverPort, communication_language, utt);
}
@Override
public List<ParameterSpecification> getParameters() {
List<ParameterSpecification> l = new ArrayList<>();
l.add(new ParameterSpecification("Server Address", String.class, "127.0.0.1"));
l.add(new ParameterSpecification("Server Port", Integer.class, 9898));
l.add(new ParameterSpecification("Language", Integer.class, LANGUAGE_XML));
return l;
}
}
| 9,903 | 33.269896 | 142 | java |
MicroRTS | MicroRTS-master/src/ai/socket/XMLSocketWrapperAI.java | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package ai.socket;
import ai.abstraction.WorkerRush;
import ai.core.AIWithComputationBudget;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.PrintWriter;
import java.io.StringReader;
import java.net.ServerSocket;
import java.net.Socket;
import org.jdom.input.SAXBuilder;
import rts.GameState;
import rts.PlayerAction;
import rts.units.UnitTypeTable;
import util.XMLWriter;
/**
*
* @author santi
*/
public class XMLSocketWrapperAI {
public static int DEBUG = 0;
public static void main(String[] args) throws Exception {
DEBUG = 1;
runServer(new WorkerRush(new UnitTypeTable()), 9898);
}
public static void runServer(AIWithComputationBudget ai, int socket) throws Exception {
if (DEBUG>=1) System.out.println("XMLSocketWrapperAI server is running.");
int clientNumber = 0;
ServerSocket listener = new ServerSocket(socket);
try {
while (true) {
new SocketWrapperAI(listener.accept(), clientNumber++, (AIWithComputationBudget)ai.clone()).start();
}
} finally {
listener.close();
}
}
private static class SocketWrapperAI extends Thread {
Socket socket;
int clientNumber = 0;
int time_budget = 100;
int iterations_budget = 0;
UnitTypeTable utt;
AIWithComputationBudget ai;
public SocketWrapperAI(Socket a_socket, int a_clientNumber, AIWithComputationBudget a_ai) {
socket = a_socket;
clientNumber = a_clientNumber;
ai = a_ai;
if (DEBUG>=1) System.out.println("New connection with client# " + a_clientNumber + " at " + a_socket);
}
public void run() {
try {
// Decorate the streams so we can send characters
// and not just bytes. Ensure output is flushed
// after every newline.
BufferedReader in = new BufferedReader(
new InputStreamReader(socket.getInputStream()));
PrintWriter out = new PrintWriter(socket.getOutputStream(), true);
// Send a welcome message to the client.
out.println("XMLSocketWrapperAI: you are client #" + clientNumber);
// Get messages from the client, line by line
while (true) {
String input = in.readLine();
if (input == null) break;
if (input.startsWith("end")) {
System.exit(0);
} else if (input.startsWith("budget")) {
String []tokens = input.split(" ");
time_budget = Integer.parseInt(tokens[1]);
iterations_budget = Integer.parseInt(tokens[2]);
if (DEBUG>=1) System.out.println("setting the budget to: " + time_budget + ", " + iterations_budget);
// reset the AI:
ai.reset();
ai.setTimeBudget(time_budget);
ai.setIterationsBudget(iterations_budget);
out.append("ack\n");
out.flush();
} else if (input.startsWith("utt")) {
input = in.readLine();
if (DEBUG>=1) System.out.println("setting the utt to: " + input);
// parse the unit type table:
utt = UnitTypeTable.fromXML(new SAXBuilder().build(new StringReader(input)).getRootElement());
ai.reset(utt);
out.append("ack\n");
out.flush();
} else if (input.startsWith("getAction")) {
String []tokens = input.split(" ");
int player = Integer.parseInt(tokens[1]);
if (DEBUG>=1) System.out.println("getAction for player " + player);
input = in.readLine();
if (DEBUG>=1) System.out.println("with game state: " + input);
// parse the game state:
GameState gs = GameState.fromXML(new SAXBuilder().build(new StringReader(input)).getRootElement(), utt);
if (DEBUG>=1) System.out.println(gs);
// generate an action and send it through the socket:
PlayerAction pa = ai.getAction(player, gs);
XMLWriter xml = new XMLWriter(out," ");
pa.toxml(xml);
xml.flush();
out.append("\n");
out.flush();
if (DEBUG>=1) System.out.println("action sent!");
} else if (input.startsWith("preGameAnalysis")) {
String []tokens = input.split(" ");
int milliseconds = Integer.parseInt(tokens[1]);
String readWriteFolder = null;
if (tokens.length>=2) {
readWriteFolder = tokens[2];
if (readWriteFolder.startsWith("\"")) readWriteFolder = readWriteFolder.substring(1, readWriteFolder.length()-1);
}
if (DEBUG>=1) System.out.println("preGameAnalysis");
input = in.readLine();
if (DEBUG>=1) System.out.println("with game state: " + input);
// parse the game state:
GameState gs = GameState.fromXML(new SAXBuilder().build(new StringReader(input)).getRootElement(), utt);
if (DEBUG>=1) System.out.println(gs);
if (readWriteFolder != null) {
ai.preGameAnalysis(gs, milliseconds, readWriteFolder);
} else {
ai.preGameAnalysis(gs, milliseconds);
}
out.append("ack\n");
out.flush();
} else if (input.startsWith("gameOver")) {
String []tokens = input.split(" ");
int winner = Integer.parseInt(tokens[1]);
if (DEBUG>=1) System.out.println("gameOver " + winner);
ai.gameOver(winner);
out.append("ack\n");
out.flush();
}
}
} catch (Exception e) {
System.out.println("Error handling client# " + clientNumber + ": " + e);
e.printStackTrace();
} finally {
try {
socket.close();
} catch (IOException e) {
e.printStackTrace();
}
System.out.println("Connection with client# " + clientNumber + " closed");
}
}
}
}
| 7,486 | 42.52907 | 141 | java |
MicroRTS | MicroRTS-master/src/ai/stochastic/UnitActionProbabilityDistribution.java | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package ai.stochastic;
import java.util.List;
import rts.GameState;
import rts.UnitAction;
import rts.units.Unit;
import rts.units.UnitTypeTable;
/**
*
* @author santi
*/
public abstract class UnitActionProbabilityDistribution {
protected UnitTypeTable utt;
public UnitActionProbabilityDistribution(UnitTypeTable a_utt) {
utt = a_utt;
}
public abstract double[] predictDistribution(Unit u, GameState gs, List<UnitAction> actions) throws Exception;
}
| 699 | 24 | 114 | java |
MicroRTS | MicroRTS-master/src/ai/stochastic/UnitActionProbabilityDistributionAI.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package ai.stochastic;
import ai.core.AI;
import ai.core.ParameterSpecification;
import java.util.List;
import java.util.Random;
import ai.stochastic.UnitActionProbabilityDistribution;
import java.util.ArrayList;
import rts.*;
import rts.units.Unit;
import rts.units.UnitTypeTable;
import util.Sampler;
/**
*
* @author santi
*
*/
public class UnitActionProbabilityDistributionAI extends AI {
public static int DEBUG = 0;
Random r = new Random();
UnitActionProbabilityDistribution model;
String modelName = ""; // name of the model for the toString method, so it can be identified
UnitTypeTable utt;
public UnitActionProbabilityDistributionAI(UnitTypeTable utt) throws Exception {
this(new UnitActionTypeConstantDistribution(utt,new double[]{1.0,1.0,1.0,1.0,1.0,1.0}),
utt,
"uniform");
}
public UnitActionProbabilityDistributionAI(UnitActionProbabilityDistribution a_model, UnitTypeTable a_utt, String a_modelName) {
model = a_model;
utt = a_utt;
modelName = a_modelName;
}
@Override
public String toString() {
return this.getClass().getSimpleName()+"("+modelName+")";
}
@Override
public void reset() {
}
public AI clone() {
return new UnitActionProbabilityDistributionAI(model, utt, modelName);
}
public PlayerAction getAction(int player, GameState gs) throws Exception {
if (gs.getUnitTypeTable() != utt) throw new Exception("UnitActionDistributionAI uses a UnitTypeTable different from the one used to play!");
PhysicalGameState pgs = gs.getPhysicalGameState();
PlayerAction pa = new PlayerAction();
if (!gs.canExecuteAnyAction(player)) return pa;
// Generate the reserved resources:
for(Unit u:pgs.getUnits()) {
UnitActionAssignment uaa = gs.getActionAssignment(u);
if (uaa!=null) {
ResourceUsage ru = uaa.action.resourceUsage(u, pgs);
pa.getResourceUsage().merge(ru);
}
}
for(Unit u:pgs.getUnits()) {
if (u.getPlayer()==player) {
if (gs.getActionAssignment(u)==null) {
List<UnitAction> l = u.getUnitActions(gs);
double []distribution = model.predictDistribution(u, gs, l);
UnitAction none = null;
for(UnitAction ua:l)
if (ua.getType()==UnitAction.TYPE_NONE) none = ua;
try {
UnitAction ua = l.get(Sampler.weighted(distribution));
if (ua.resourceUsage(u, pgs).consistentWith(pa.getResourceUsage(), gs)) {
ResourceUsage ru = ua.resourceUsage(u, pgs);
pa.getResourceUsage().merge(ru);
pa.addUnitAction(u, ua);
} else {
pa.addUnitAction(u, none);
}
} catch (Exception ex) {
ex.printStackTrace();
pa.addUnitAction(u, none);
}
}
}
}
return pa;
}
@Override
public List<ParameterSpecification> getParameters()
{
List<ParameterSpecification> parameters = new ArrayList<>();
try {
parameters.add(new ParameterSpecification("Model",UnitActionProbabilityDistribution.class,
new UnitActionTypeConstantDistribution(utt,new double[]{1.0,1.0,1.0,1.0,1.0,1.0})));
parameters.add(new ParameterSpecification("ModelName",String.class,"uniformDistribution"));
}catch(Exception e) {
e.printStackTrace();
}
return parameters;
}
public UnitActionProbabilityDistribution getModel() {
return model;
}
public void setModel(UnitActionProbabilityDistribution a) {
model = a;
}
public String getModelName() {
return modelName;
}
public void setModelName(String a) {
modelName = a;
}
}
| 4,441 | 29.847222 | 148 | java |
MicroRTS | MicroRTS-master/src/ai/stochastic/UnitActionTypeConstantDistribution.java | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package ai.stochastic;
import java.util.List;
import rts.GameState;
import rts.UnitAction;
import rts.units.Unit;
import rts.units.UnitTypeTable;
/**
*
* @author santi
*/
public class UnitActionTypeConstantDistribution extends UnitActionProbabilityDistribution {
double m_distribution[];
public UnitActionTypeConstantDistribution(UnitTypeTable a_utt, double distribution[]) throws Exception {
super(a_utt);
if (distribution==null || distribution.length != UnitAction.NUMBER_OF_ACTION_TYPES) throw new Exception("distribution does not have the right number of elements!");
m_distribution = distribution;
}
@Override
public double[] predictDistribution(Unit u, GameState gs, List<UnitAction> actions) throws Exception
{
int nActions = actions.size();
double d[] = new double[nActions];
double accum = 0;
for(int i = 0;i<nActions;i++) {
int type = actions.get(i).getType();
d[i] = m_distribution[type];
accum += d[i];
}
if (accum <= 0) {
// if 0 accum, then just make uniform distribution:
for(int i = 0;i<nActions;i++) d[i] = 1.0/nActions;
} else {
for(int i = 0;i<nActions;i++) d[i] /= accum;
}
return d;
}
}
| 1,549 | 28.245283 | 172 | java |
MicroRTS | MicroRTS-master/src/ai/stochastic/UnitActionUniformDistribution.java | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package ai.stochastic;
import java.util.List;
import rts.GameState;
import rts.UnitAction;
import rts.units.Unit;
import rts.units.UnitTypeTable;
/**
*
* @author santi
*/
public class UnitActionUniformDistribution extends UnitActionProbabilityDistribution {
public UnitActionUniformDistribution(UnitTypeTable a_utt) throws Exception {
super(a_utt);
}
public double[] predictDistribution(Unit u, GameState gs, List<UnitAction> actions) throws Exception
{
int nActions = actions.size();
double d[] = new double[nActions];
for(int i = 0;i<nActions;i++) d[i] = 1.0/nActions;
return d;
}
}
| 867 | 23.8 | 104 | java |
MicroRTS | MicroRTS-master/src/ai/synthesis/dslForScriptGenerator/DslAI.java | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package ai.synthesis.dslForScriptGenerator;
import ai.synthesis.dslForScriptGenerator.DSLCommandInterfaces.ICommand;
import ai.synthesis.dslForScriptGenerator.DSLCompiler.FunctionDSLCompiler;
import ai.synthesis.dslForScriptGenerator.DSLCompiler.MainDSLCompiler;
import ai.abstraction.pathfinding.AStarPathFinding;
import ai.abstraction.pathfinding.PathFinding;
import ai.core.AI;
import ai.core.ParameterSpecification;
import ai.synthesis.grammar.dslTree.interfacesDSL.iDSL;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import rts.GameState;
import rts.PlayerAction;
import rts.units.Unit;
import rts.units.UnitTypeTable;
import ai.synthesis.dslForScriptGenerator.DSLCompiler.IDSLCompiler;
import rts.UnitAction;
import util.Pair;
/**
*
* @author rubens
*/
public class DslAI extends AI {
List<ICommand> commands = new ArrayList<>();
UnitTypeTable utt;
String name;
iDSL script;
IDSLCompiler compiler = new MainDSLCompiler();
public HashMap<Long, String> counterByFunction;
public DslAI(UnitTypeTable utt) {
this.utt = utt;
}
public List<ICommand> getCommands() {
return commands;
}
public DslAI(UnitTypeTable utt, List<ICommand> commands, String name, iDSL script, HashMap<Long, String> counterByFunction) {
this.utt = utt;
this.commands = commands;
this.name = name;
this.script = script;
this.counterByFunction = counterByFunction;
}
public PlayerAction getAction(int player, GameState gs) {
PlayerAction currentActions = new PlayerAction();
PathFinding pf = new AStarPathFinding();
for (ICommand command : commands) {
currentActions = command.getAction(gs, player, currentActions, pf, utt, counterByFunction);
/*
if (has_actions_strange(currentActions, player)) {
System.out.println(" ¨¨¨¨ Code =" + script.translate());
System.out.println(" ¨¨¨¨ Command =" + command.toString());
System.out.println(" ¨¨¨¨ Actions =" + currentActions.toString());
}
*/
}
currentActions = filterForStrangeUnits(currentActions, player);
currentActions = fillWithWait(currentActions, player, gs, utt);
return currentActions;
}
public PlayerAction getActionSingleUnit(int player, GameState gs, Unit u) {
PlayerAction currentActions = new PlayerAction();
PathFinding pf = new AStarPathFinding();
currentActions = fillWithWait(currentActions, player, gs, utt);
currentActions.removeUnitAction(u, currentActions.getAction(u));
//System.out.println("Idunit "+u.getID());
//System.out.println("player1"+currentActions.getActions().toString());
for (ICommand command : commands) {
currentActions = command.getAction(gs, player, currentActions, pf, utt, counterByFunction);
}
//System.out.println("player2"+currentActions.getActions().toString());
currentActions = fillWithWait(currentActions, player, gs, utt);
//System.out.println("player3"+currentActions.getActions().toString());
return currentActions;
}
@Override
public void reset() {
}
@Override
public void reset(UnitTypeTable utt) {
super.reset(utt);
this.utt = utt;
}
@Override
public AI clone() {
return buildCommandsIA(utt, this.script, this.name);
}
private AI buildCommandsIA(UnitTypeTable utt, iDSL code, String name) {
FunctionDSLCompiler.counterCommands = 0;
List<ICommand> commandsGP = compiler.CompilerCode(code, utt);
HashMap<Long, String> counterByFunctionNew = new HashMap<Long, String>(counterByFunction);
AI aiscript = new DslAI(utt, commandsGP, name , code, counterByFunctionNew);
return aiscript;
}
@Override
public List<ParameterSpecification> getParameters() {
List<ParameterSpecification> list = new ArrayList<>();
return list;
}
@Override
public String toString() {
// String nameCommand = "";
// for (Iterator iterator = commands.iterator(); iterator.hasNext();) {
// ICommand iCommand = (ICommand) iterator.next();
// nameCommand += iCommand.toString();
//
// }
return name;
}
private PlayerAction fillWithWait(PlayerAction currentActions, int player, GameState gs, UnitTypeTable utt) {
currentActions.fillWithNones(gs, player, 10);
return currentActions;
}
private boolean has_actions_strange(PlayerAction currentActions, int player) {
for (Pair<Unit, UnitAction> entry : currentActions.getActions()) {
if (entry.m_a.getPlayer() != player) {
return true;
}
}
return false;
}
private PlayerAction filterForStrangeUnits(PlayerAction currentActions, int player) {
PlayerAction cleanActions = new PlayerAction();
for (Pair<Unit, UnitAction> entry : currentActions.getActions()) {
if (entry.m_a.getPlayer() == player) {
cleanActions.addUnitAction(entry.m_a, entry.m_b);
}
}
return cleanActions;
}
}
| 5,518 | 33.279503 | 129 | java |
MicroRTS | MicroRTS-master/src/ai/synthesis/dslForScriptGenerator/DslAIScript.java | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package ai.synthesis.dslForScriptGenerator;
import ai.synthesis.dslForScriptGenerator.DSLCommand.DSLBasicAction.AttackBasic;
import ai.synthesis.dslForScriptGenerator.DSLCommand.DSLBasicAction.TrainBasic;
import ai.synthesis.dslForScriptGenerator.DSLCommand.DSLBasicBoolean.AllyRange;
import ai.synthesis.dslForScriptGenerator.DSLCommand.DSLBasicAction.HarvestBasic;
import ai.synthesis.dslForScriptGenerator.DSLCommand.DSLBasicAction.MoveToUnitBasic;
import ai.synthesis.dslForScriptGenerator.DSLCommand.DSLEnumerators.EnumPositionType;
import ai.synthesis.dslForScriptGenerator.DSLCommand.DSLEnumerators.EnumPlayerTarget;
import ai.synthesis.dslForScriptGenerator.DSLCommandInterfaces.ICommand;
import ai.synthesis.dslForScriptGenerator.DSLParametersConcrete.ClosestEnemy;
import ai.synthesis.dslForScriptGenerator.DSLParametersConcrete.LessHealthyEnemy;
import ai.synthesis.dslForScriptGenerator.DSLParametersConcrete.PlayerTargetParam;
import ai.synthesis.dslForScriptGenerator.DSLParametersConcrete.PriorityPositionParam;
import ai.synthesis.dslForScriptGenerator.DSLParametersConcrete.QuantityParam;
import ai.synthesis.dslForScriptGenerator.DSLParametersConcrete.TypeConcrete;
import ai.abstraction.pathfinding.AStarPathFinding;
import ai.abstraction.pathfinding.PathFinding;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import rts.GameState;
import rts.PlayerAction;
import rts.units.UnitTypeTable;
/**
*
* @author rubens
*/
public class DslAIScript {
List<ICommand> commands = new ArrayList<>();
List<ICommand> commandsforBoolean = new ArrayList<>();
UnitTypeTable utt;
public DslAIScript(UnitTypeTable utt) {
this.utt = utt;
//build action
//BuildBasic build = new BuildBasic();
//build.addParameter(TypeConcrete.getTypeBarracks()); //add unit construct type
//build.addParameter(new QuantityParam(1)); //add qtd unit
//commands.add(build);
//train action
TrainBasic train = new TrainBasic();
train.addParameter(TypeConcrete.getTypeBase()); //add unit construct type
train.addParameter(TypeConcrete.getTypeWorker()); //add unit Type
//train.addParameter(TypeConcrete.getTypeWorker()); //add unit Type
train.addParameter(new QuantityParam(20)); //add qtd unit
PriorityPositionParam pos = new PriorityPositionParam();
pos.addPosition(EnumPositionType.EnemyDirection);
//pos.addPosition(EnumPositionType.Left);
//pos.addPosition(EnumPositionType.Right);
//pos.addPosition(EnumPositionType.Down);
train.addParameter(pos);
commands.add(train);
//harverst action
HarvestBasic harverst = new HarvestBasic();
harverst.addParameter(TypeConcrete.getTypeWorker()); //add unit type
harverst.addParameter(new QuantityParam(1)); //add qtd unit
commands.add(harverst);
//attack action
AttackBasic attack = new AttackBasic();
attack.addParameter(TypeConcrete.getTypeUnits()); //add unit type
PlayerTargetParam pt=new PlayerTargetParam();
pt.addPlayer(EnumPlayerTarget.Enemy);
attack.addParameter(pt);
attack.addParameter(new ClosestEnemy()); //add behavior
commands.add(attack);
//Move action
// MoveToUnitBasic moveToUnit = new MoveToUnitBasic();
// moveToUnit.addParameter(TypeConcrete.getTypeUnits()); //add unit type
// moveToUnit.addParameter(new ClosestEnemy()); //add behavior
// commands.add(moveToUnit);
//Move To coordinates
// MoveToCoordinatesBasic moveToCoordinates = new MoveToCoordinatesBasic();
// moveToCoordinates.addParameter(new CoordinatesParam(6,6)); //add unit type
// moveToCoordinates.addParameter(TypeConcrete.getTypeUnits());
// commands.add(moveToCoordinates);
//BOOLEAN If there is an enemy in allyRange
MoveToUnitBasic moveToUnit = new MoveToUnitBasic();
moveToUnit.addParameter(TypeConcrete.getTypeUnits()); //add unit type
pt=new PlayerTargetParam();
pt.addPlayer(EnumPlayerTarget.Enemy);
moveToUnit.addParameter(pt);
moveToUnit.addParameter(new LessHealthyEnemy()); //add behavior
commandsforBoolean = new ArrayList<>();
commandsforBoolean.add(moveToUnit);
AllyRange allyRangeBoolean = new AllyRange(commandsforBoolean);
allyRangeBoolean.addParameter(TypeConcrete.getTypeUnits());
commands.add(allyRangeBoolean);
//System.out.println("t "+allyRangeBoolean.toString());
}
public PlayerAction getAction(int player, GameState gs) {
PlayerAction currentActions = new PlayerAction();
PathFinding pf = new AStarPathFinding();
//simulate one WR
for (ICommand command : commands) {
currentActions = command.getAction(gs, player, currentActions, pf, utt, new HashMap<Long, String>());
}
return currentActions;
}
}
| 5,213 | 43.186441 | 113 | java |
MicroRTS | MicroRTS-master/src/ai/synthesis/dslForScriptGenerator/DSLBasicConditional/AbstractConditional.java | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package ai.synthesis.dslForScriptGenerator.DSLBasicConditional;
import ai.synthesis.grammar.dslTree.interfacesDSL.iDSL;
import java.math.BigDecimal;
import java.util.List;
/**
*
* @author julian and rubens
*/
public abstract class AbstractConditional implements IConditional {
/* List of types
0 = conditional between two integers.
1 = conditional between two bigDecimals.
2 = conditional between two objects.
3 = conditional between one function + params and one integer;
4 = conditional between one function + params and one BigDecimal;
5 = conditional between two function;
6 = run a condition function (will be deprecated);
*/
protected int typeOfParam;
protected int param1, param2;
protected BigDecimal paramB1, paramB2;
protected Object ob1, ob2;
protected String function, function2;
protected List lParam1, lParam2;
protected iDSL dsl;
protected boolean hasDSLused;
protected boolean deny_boolean = false;
public AbstractConditional(int param1, int param2) {
this.param1 = param1;
this.param2 = param2;
}
public AbstractConditional(BigDecimal paramB1, BigDecimal paramB2) {
this.paramB1 = paramB1;
this.paramB2 = paramB2;
}
public AbstractConditional(Object ob1, Object ob2) {
this.ob1 = ob1;
this.ob2 = ob2;
}
public AbstractConditional(String function, List lParam1, int param1) {
this.param1 = param1;
this.function = function;
this.lParam1 = lParam1;
}
public AbstractConditional(String function, List lParam1, BigDecimal paramB1) {
this.paramB1 = paramB1;
this.function = function;
this.lParam1 = lParam1;
}
public AbstractConditional(String function, List lParam1, String function2, List lParam2) {
this.function = function;
this.function2 = function2;
this.lParam1 = lParam1;
this.lParam2 = lParam2;
}
public AbstractConditional(String function, List lParam1, iDSL dsl) {
this.function = function;
this.lParam1 = lParam1;
this.dsl = dsl;
this.hasDSLused = false;
}
public AbstractConditional(boolean deny_boolean, String function, List lParam1, iDSL dsl) {
this.function = function;
this.lParam1 = lParam1;
this.dsl = dsl;
this.hasDSLused = false;
this.deny_boolean = deny_boolean;
}
public void setDSLUsed(){
this.hasDSLused = true;
}
}
| 2,748 | 29.544444 | 95 | java |
MicroRTS | MicroRTS-master/src/ai/synthesis/dslForScriptGenerator/DSLBasicConditional/AbstractUnitConditional.java | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package ai.synthesis.dslForScriptGenerator.DSLBasicConditional;
import java.math.BigDecimal;
import java.util.List;
/**
*
* @author julian and rubens
*/
public abstract class AbstractUnitConditional implements IUnitConditional {
/* List of types
0 = conditional between two integers.
1 = conditional between two bigDecimals.
2 = conditional between two objects.
3 = conditional between one function + params and one integer;
4 = conditional between one function + params and one BigDecimal;
5 = conditional between two function;
6 = run a condition function (will be deprecated);
*/
protected int typeOfParam;
protected int param1, param2;
protected BigDecimal paramB1, paramB2;
protected Object ob1, ob2;
protected String function, function2;
protected List lParam1, lParam2;
public AbstractUnitConditional(int param1, int param2) {
this.param1 = param1;
this.param2 = param2;
}
public AbstractUnitConditional(BigDecimal paramB1, BigDecimal paramB2) {
this.paramB1 = paramB1;
this.paramB2 = paramB2;
}
public AbstractUnitConditional(Object ob1, Object ob2) {
this.ob1 = ob1;
this.ob2 = ob2;
}
public AbstractUnitConditional(String function, List lParam1, int param1) {
this.param1 = param1;
this.function = function;
this.lParam1 = lParam1;
}
public AbstractUnitConditional(String function, List lParam1, BigDecimal paramB1) {
this.paramB1 = paramB1;
this.function = function;
this.lParam1 = lParam1;
}
public AbstractUnitConditional(String function, List lParam1, String function2, List lParam2) {
this.function = function;
this.function2 = function2;
this.lParam1 = lParam1;
this.lParam2 = lParam2;
}
public AbstractUnitConditional(String function, List lParam1) {
this.function = function;
this.lParam1 = lParam1;
}
}
| 2,215 | 29.356164 | 99 | java |
MicroRTS | MicroRTS-master/src/ai/synthesis/dslForScriptGenerator/DSLBasicConditional/ConditionalBiggerThen.java | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package ai.synthesis.dslForScriptGenerator.DSLBasicConditional;
import ai.abstraction.pathfinding.PathFinding;
import java.math.BigDecimal;
import java.util.HashMap;
import java.util.List;
import rts.GameState;
import rts.PlayerAction;
import rts.units.UnitTypeTable;
/**
*
* @author rubens
*/
public class ConditionalBiggerThen extends AbstractConditional {
public ConditionalBiggerThen(int param1, int param2) {
super(param1, param2);
this.typeOfParam = 0;
}
public ConditionalBiggerThen(BigDecimal paramB1, BigDecimal paramB2) {
super(paramB1, paramB2);
this.typeOfParam = 1;
}
public ConditionalBiggerThen(Object ob1, Object ob2) {
super(ob1, ob2);
this.typeOfParam = 2;
}
public ConditionalBiggerThen(String function, List lParam1, int param1) {
super(function, lParam1, param1);
this.typeOfParam = 3;
}
public ConditionalBiggerThen(String function, List lParam1) {
super(function, lParam1);
this.typeOfParam = 6;
}
public ConditionalBiggerThen(String function, List lParam1, BigDecimal paramB1) {
super(function, lParam1, paramB1);
this.typeOfParam = 4;
}
public ConditionalBiggerThen(String function, List lParam1, String function2, List lParam2) {
super(function, lParam1, function2, lParam2);
this.typeOfParam = 5;
}
@Override
public boolean runConditional(GameState game, int player, PlayerAction currentPlayerAction,
PathFinding pf, UnitTypeTable a_utt, HashMap<Long, String> counterByFunction) {
throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates.
}
}
| 1,968 | 29.765625 | 135 | java |
MicroRTS | MicroRTS-master/src/ai/synthesis/dslForScriptGenerator/DSLBasicConditional/IConditional.java | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package ai.synthesis.dslForScriptGenerator.DSLBasicConditional;
import java.util.HashMap;
import ai.abstraction.pathfinding.PathFinding;
import rts.GameState;
import rts.PlayerAction;
import rts.units.UnitTypeTable;
/**
*
* @author julian and rubens
*/
public interface IConditional {
public boolean runConditional(GameState game, int player, PlayerAction currentPlayerAction,
PathFinding pf, UnitTypeTable a_utt, HashMap<Long, String> counterByFunction);
}
| 704 | 27.2 | 118 | java |
MicroRTS | MicroRTS-master/src/ai/synthesis/dslForScriptGenerator/DSLBasicConditional/IUnitConditional.java | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package ai.synthesis.dslForScriptGenerator.DSLBasicConditional;
import java.util.HashMap;
import ai.abstraction.pathfinding.PathFinding;
import rts.GameState;
import rts.PlayerAction;
import rts.units.Unit;
import rts.units.UnitTypeTable;
/**
*
* @author julian and rubens
*/
public interface IUnitConditional {
public Boolean isNecessaryUnit();
public boolean runConditional(GameState game, int player, PlayerAction currentPlayerAction,
PathFinding pf, UnitTypeTable a_utt, Unit un,HashMap<Long, String> counterByFunction);
}
| 772 | 28.730769 | 126 | java |
MicroRTS | MicroRTS-master/src/ai/synthesis/dslForScriptGenerator/DSLBasicConditional/SimpleConditional.java | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package ai.synthesis.dslForScriptGenerator.DSLBasicConditional;
import ai.synthesis.dslForScriptGenerator.DSLBasicConditional.functions.IConditionalFunction;
import ai.abstraction.pathfinding.PathFinding;
import ai.synthesis.grammar.dslTree.interfacesDSL.iDSL;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.logging.Level;
import java.util.logging.Logger;
import rts.GameState;
import rts.PlayerAction;
import rts.units.Unit;
import rts.units.UnitTypeTable;
/**
*
* @author rubens
*/
public class SimpleConditional extends AbstractConditional implements IUnitConditional{
public SimpleConditional(boolean deny_boolean, String function, List lParam1, iDSL dsl) {
super(deny_boolean, function, lParam1, dsl);
}
@Override
public boolean runConditional(GameState game, int player, PlayerAction currentPlayerAction, PathFinding pf, UnitTypeTable a_utt, HashMap<Long, String> counterByFunction) {
List param = new ArrayList();
param.add(game);
param.add(player);
param.add(currentPlayerAction);
param.add(pf);
param.add(a_utt);
param.addAll(lParam1);
try {
IConditionalFunction fcond = (IConditionalFunction) Class.forName("ai.synthesis.dslForScriptGenerator.DSLBasicConditional.functions." + function).newInstance();
setDSLUsed();
if(this.deny_boolean){
return !fcond.runFunction(param,counterByFunction);
}
return fcond.runFunction(param,counterByFunction);
} catch (InstantiationException | IllegalAccessException | ClassNotFoundException ex) {
Logger.getLogger(ConditionalBiggerThen.class.getName()).log(Level.SEVERE, null, ex);
}
return false;
}
@Override
public String toString() {
if (this.deny_boolean) {
return "SimpleConditional{not " +this.function+" "+ this.lParam1+ '}';
}
return "SimpleConditional{" +this.function+" "+ this.lParam1+ '}';
}
@Override
public boolean runConditional(GameState game, int player, PlayerAction currentPlayerAction, PathFinding pf, UnitTypeTable a_utt, Unit un,HashMap<Long, String> counterByFunction) {
List param = new ArrayList();
param.add(game);
param.add(player);
param.add(currentPlayerAction);
param.add(pf);
param.add(a_utt);
param.addAll(lParam1);
param.add(un);
try {
IConditionalFunction fcond = (IConditionalFunction) Class.forName("ai.synthesis.dslForScriptGenerator.DSLBasicConditional.functions." + function).newInstance();
setDSLUsed();
if(this.deny_boolean){
return !fcond.runFunction(param,counterByFunction);
}
return fcond.runFunction(param,counterByFunction);
} catch (InstantiationException | IllegalAccessException | ClassNotFoundException ex) {
Logger.getLogger(ConditionalBiggerThen.class.getName()).log(Level.SEVERE, null, ex);
}
return false;
}
@Override
public Boolean isNecessaryUnit() {
for (Object object : lParam1) {
if(object instanceof String){
String param = (String) object;
if(param.equals("u")){
return true;
}
}
}
return false;
}
}
| 3,682 | 35.465347 | 184 | java |
MicroRTS | MicroRTS-master/src/ai/synthesis/dslForScriptGenerator/DSLBasicConditional/functions/AbstractConditionalFunction.java | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package ai.synthesis.dslForScriptGenerator.DSLBasicConditional.functions;
import ai.synthesis.dslForScriptGenerator.DSLCommand.DSLEnumerators.EnumTypeUnits;
import ai.synthesis.dslForScriptGenerator.DSLParametersConcrete.PriorityPositionParam;
import ai.synthesis.dslForScriptGenerator.IDSLParameters.IParameters;
import ai.synthesis.dslForScriptGenerator.DSLParametersConcrete.UnitTypeParam;
import ai.synthesis.dslForScriptGenerator.IDSLParameters.IPriorityPosition;
import java.util.ArrayList;
import java.util.List;
import rts.GameState;
import rts.PlayerAction;
import rts.units.Unit;
/**
*
* @author rubens
*/
public abstract class AbstractConditionalFunction implements IConditionalFunction{
protected List<IParameters> parameters = new ArrayList<>();
private boolean hasDSLUsed;
public AbstractConditionalFunction() {
this.hasDSLUsed = false;
}
protected List<IParameters> getParameters() {
return parameters;
}
protected Iterable<Unit> getPotentialUnits(GameState game, PlayerAction currentPlayerAction, int player) {
ArrayList<Unit> unitAllys = new ArrayList<>();
for (Unit u : game.getUnits()) {
if(u.getPlayer() == player && currentPlayerAction.getAction(u) == null
&& game.getActionAssignment(u) == null && u.getResources() == 0
&& isUnitControlledByParam(u)){
unitAllys.add(u);
}
}
return unitAllys;
}
protected Iterable<Unit> getPotentialUnitsSimpleWay(GameState game, PlayerAction currentPlayerAction, int player) {
ArrayList<Unit> unitAllys = new ArrayList<>();
for (Unit u : game.getUnits()) {
if(u.getPlayer() == player && isUnitControlledByParam(u)){
unitAllys.add(u);
}
}
return unitAllys;
}
protected boolean isUnitControlledByParam(Unit u) {
List<UnitTypeParam> unType = getTypeUnitFromParam();
for (UnitTypeParam unitTypeParam : unType) {
for (EnumTypeUnits paramType : unitTypeParam.getParamTypes()) {
if(u.getType().ID == paramType.code()){
return true;
}
}
}
return false;
}
protected List<UnitTypeParam> getTypeUnitFromParam() {
List<UnitTypeParam> types = new ArrayList<>();
for (IParameters param : getParameters()) {
if(param instanceof UnitTypeParam){
types.add((UnitTypeParam) param);
}
}
return types;
}
protected boolean hasUnitInParam(List lParam1) {
for (Object object : lParam1) {
if(object instanceof Unit){
return true;
}
}
return false;
}
protected Unit getUnitFromParam(List lParam1) {
for (Object object : lParam1) {
if(object instanceof Unit){
return (Unit) object;
}
}
return null;
}
@Override
public void setDSLUsed() {
this.hasDSLUsed = true;
}
protected PriorityPositionParam getPriorityParam() {
for (IParameters param : getParameters()) {
if (param instanceof IPriorityPosition) {
return (PriorityPositionParam) param;
}
}
return null;
}
}
| 3,623 | 30.241379 | 119 | java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.