repo stringlengths 1 191 ⌀ | file stringlengths 23 351 | code stringlengths 0 5.32M | file_length int64 0 5.32M | avg_line_length float64 0 2.9k | max_line_length int64 0 288k | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
g-ssl-crf | g-ssl-crf-master/src/GraphConstruct/src/main/java/TextToNgram/TextToNgramRunner.java | package main.java.TextToNgram;
/**
* Copyright: Masoud Kiaeeha, Mohammad Aliannejadi
* This work is licensed under the Creative Commons Attribution-NonCommercial 4.0
* International License. To view a copy of this license, visit
* http://creativecommons.org/licenses/by-nc/4.0/.
*/
public class TextToNgramRunner {
/**
* This method takes a text file as input and creates the associated n-gram file
* @param args first argument declares the address of input file, second argument declares the address of output
* file and the third argument (int) indicates size of n-grams. e.g. args[2] = 3 means 3-grams are
* needed.
*/
public static void main(String[] args){
String inputFileAddress,outputFileAddress;
int ngramSize = Utils.packageDefaultNgramSize;
//args[0] input file
//args[1] output file
//args[2] n-gram size, default is 3 which means 3-grams are produced
if(args.length <= 1)
throw new IllegalArgumentException(Utils.packageExceptionPrefix + "Error: input & output files should be declared as the first two parameters.");
if(args.length <= 2)
System.out.println(Utils.packageExceptionPrefix + "Warning: third argument is not declared. "
+ Utils.packageDefaultNgramSize + "-grams are produced as default");
else
ngramSize = Integer.parseInt(args[2]);
inputFileAddress = args[0];
outputFileAddress = args[1];
//create n-gram for each word
NgramUtility processor = new NgramUtility();
processor.CreateNgramFileFromTextFile(inputFileAddress, outputFileAddress, ngramSize);
}
}
| 1,714 | 40.829268 | 157 | java |
g-ssl-crf | g-ssl-crf-master/src/GraphConstruct/src/main/java/TextToNgram/NgramContainer.java | package main.java.TextToNgram;
import main.java.PMI.FeatureHandler;
import main.java.Text.WordDictionary;
import main.java.Utility.Config;
/**
* Copyright: Masoud Kiaeeha, Mohammad Aliannejadi
* This work is licensed under the Creative Commons Attribution-NonCommercial 4.0
* International License. To view a copy of this license, visit
* http://creativecommons.org/licenses/by-nc/4.0/.
*/
public class NgramContainer{
private String[] members;
public NgramContainer(String[] membersCopy){
members = membersCopy.clone();
}
/**
* create a new NgramContainer object of a given size and initialize its members
* @param ngramSize size of the required ngram, e.g. ngramSize=3 produces a tri-gram container
*/
public NgramContainer(int ngramSize){
members = new String[ngramSize];
//initialize members array
for (int i=0; i<members.length; ++i)
members[i] = Config.packageOutputDummyValue;
}
/**
* Use this method to get size of ngram e.g. count of words that this ngram can hold
* @return size of ngram
*/
public int getSize(){
return members.length;
}
public void setMemberValue(int index, String value){
members[index] = value;
}
public String getMemberValue(int index){
if(index>= members.length || index<0)
throw new IllegalArgumentException(Utils.packageExceptionPrefix
+ "class:NgramContainer, method:getMemberValue, index:" + index);
return members[index];
}
public String getCenterValue(){
return this.members[getIndexOfCenterMember()];
}
private int getIndexOfCenterMember() {
return members.length / 2;
}
public boolean equals(NgramContainer matchingContainer){
boolean match = true;
if(this.members.length != matchingContainer.members.length){
match = false;
}else{
for(int i=0; i<members.length ; ++i)
if(!this.members[i].equalsIgnoreCase(matchingContainer.members[i])){
match = false;
break;
}
}
return match;
}
public boolean equalsWithTemplate(NgramContainer ngramTemplate) {
boolean match = true;
if(this.members.length != ngramTemplate.members.length){
match = false;
}else{
for(int i=0; i<members.length ; ++i)
if(!ngramTemplate.members[i].equals(FeatureHandler.nullTokenIdentifier)
&& !this.members[i].equalsIgnoreCase(ngramTemplate.members[i])){
match = false;
break;
}
}
return match;
}
public boolean hasMember(String word){
boolean result = false;
for (int i=0 ; i<members.length ; ++i)
if (members[i].equalsIgnoreCase(word)){
result = true;
break;
}
return result;
}
public String serialize(){
String result = "";
if (members.length != 0){
result = members[0];
for(int i=1; i<members.length ; ++i)
result += "," + members[i];
}
return result;
}
public boolean isBeginningOfLine(){
return members[0].equalsIgnoreCase(Config.packageOutputDummyValue);
}
public String getWordSet(WordDictionary dictionary){
String result = "( ";
for (String member : this.members)
result += dictionary.getEntry(member) + " ";
result += ")";
return result;
}
public boolean isMemberOfDictionary(WordDictionary wordDictionary) {
boolean result = true;
for (String word:members)
if (! wordDictionary.containsKey(word)){
result = false;
break;
}
return result;
}
public NgramContainer getRightPart(){
NgramContainer result = new NgramContainer(this.getSize()/2 + 1);
for (int index=result.getSize()-1, j=this.getSize()-1; index>=0 ; --index,--j)
result.setMemberValue(index, this.getMemberValue(j));
return result;
}
public NgramContainer getLeftPart(){
NgramContainer result = new NgramContainer(this.getSize()/2 + 1);
for (int index=0; index < result.getSize() ; ++index)
result.setMemberValue(index, this.getMemberValue(index));
return result;
}
public NgramContainer getSubNgram(int startIndex){
int size = this.getSize() - startIndex;
NgramContainer result = new NgramContainer(size);
for (int i=startIndex, j=0; i<this.getSize() ; ++i, ++j)
result.setMemberValue(j, this.members[i]);
return result;
}
}
| 4,832 | 29.783439 | 98 | java |
g-ssl-crf | g-ssl-crf-master/src/GraphConstruct/src/main/java/TextToNgram/Utils.java | package main.java.TextToNgram;
/**
* Copyright: Masoud Kiaeeha, Mohammad Aliannejadi
* This work is licensed under the Creative Commons Attribution-NonCommercial 4.0
* International License. To view a copy of this license, visit
* http://creativecommons.org/licenses/by-nc/4.0/.
*/
public class Utils {
public static final int packageDefaultNgramSize = 3;
public static final String packageExceptionPrefix = "[main.java.TextToNgram]-";
public static final String packageOutputDelimiter = " ";
public static final String packageOutputNewLineCharacter = "\r\n";
}
| 584 | 38 | 83 | java |
g-ssl-crf | g-ssl-crf-master/src/GraphConstruct/src/main/java/JuntoOutputConvert/MessagePrinter.java | package main.java.JuntoOutputConvert;
public class MessagePrinter {
public static void Print (String msg) {
System.out.println(msg);
}
public static void PrintAndDie(String msg) {
Print(msg);
printHelpMessage();
System.exit(1);
}
private static void printHelpMessage() {
Print("");
Print("Input arguments format:");
Print("\"-text [fileAddress]\" specifies the address of input junto file.");
Print("\"-labels [fileAddress]\" specifies the address of labels dictionary file.");
Print("\"-output [fileAddress]\" output file name.");
}
}
| 636 | 26.695652 | 92 | java |
g-ssl-crf | g-ssl-crf-master/src/GraphConstruct/src/main/java/JuntoOutputConvert/JuntoOutputHandler.java | package main.java.JuntoOutputConvert;
import main.java.Utility.Config;
import main.java.Utility.LabelFileHandler;
import main.java.Utility.TextFileInput;
import main.java.Utility.TextFileOutput;
/**
* Copyright: Masoud Kiaeeha, Mohammad Aliannejadi
* This work is licensed under the Creative Commons Attribution-NonCommercial 4.0
* International License. To view a copy of this license, visit
* http://creativecommons.org/licenses/by-nc/4.0/.
*/
public class JuntoOutputHandler {
public void convertJuntoOutputToViterbiFormat(String juntoFileAddress,
String labelsFileAddress,
String outputFileAddress){
TextFileInput fileInput = new TextFileInput(juntoFileAddress);
TextFileOutput fileOutput = new TextFileOutput(outputFileAddress);
String line;
String nodeId, labelsStream, outputStream;
String labelId, labelValue;
int labelCount = LabelFileHandler.countLabels(labelsFileAddress);
float[] labels = new float[labelCount];
String[] tokens;
while ((line = fileInput.readLine()) != null){
tokens = line.split("\\t");
if (tokens.length != 6)
continue;
//1 & 4 are important
labels = this.initializeLabelsArray(labels);
nodeId = tokens[0];
labelsStream = tokens[3];
tokens = labelsStream.split("\\s");
int index = 0;
while (index < tokens.length){
labelId = tokens[index++];
labelValue = tokens[index++];
if (!labelId.equalsIgnoreCase("__DUMMY__")){
labels[Integer.parseInt(labelId)] = Float.parseFloat(labelValue);
}
}
outputStream = "";
for (float label : labels)
outputStream += nodeId + "\t" + label + Config.outputNewLineCharacter;
fileOutput.write(outputStream);
}
fileOutput.close();
fileInput.close();
}
private float[] initializeLabelsArray(float[] labels) {
for (int i=0; i<labels.length ; ++i)
labels[i] = 0;
return labels;
}
}
| 2,263 | 33.830769 | 86 | java |
g-ssl-crf | g-ssl-crf-master/src/GraphConstruct/src/main/java/JuntoOutputConvert/JuntoOutputConvertor.java | package main.java.JuntoOutputConvert;
import main.java.Utility.Defaults;
import java.util.Hashtable;
/**
* Copyright: Masoud Kiaeeha, Mohammad Aliannejadi
* This work is licensed under the Creative Commons Attribution-NonCommercial 4.0
* International License. To view a copy of this license, visit
* http://creativecommons.org/licenses/by-nc/4.0/.
*/
public class JuntoOutputConvertor {
private static String inputFileAddress, labelsFileAddress, outputFileAddress;
public static void main(String[] args) {
processArguments(args);
JuntoOutputHandler outputHandler = new JuntoOutputHandler();
outputHandler.convertJuntoOutputToViterbiFormat(inputFileAddress,
labelsFileAddress, outputFileAddress);
}
private static void processArguments(String[] args) {
Hashtable<String, String> config = new Hashtable<String, String>(10, (float) 0.9);
for (int i=0; i<args.length ; ++i){
if (args[i].startsWith("-")){
if (i+1 < args.length){
config.put(getCommandFromArg(args[i]), args[++i]);
}
}
}
if (config.containsKey("h") || config.containsKey("help"))
MessagePrinter.PrintAndDie("help->");
inputFileAddress = Defaults.GetValueOrDie(config, "text");
labelsFileAddress = Defaults.GetValueOrDie(config, "labels");
outputFileAddress = Defaults.GetValueOrDie(config, "output");
}
/**
* returns the command without the first character e.g. given "-command" output would be "command"
* @param arg the command string
* @return the command without the first "-" character
*/
private static String getCommandFromArg(String arg) {
return arg.substring(1, arg.length()).toLowerCase();
}
}
| 1,824 | 35.5 | 102 | java |
g-ssl-crf | g-ssl-crf-master/src/GraphConstruct/src/main/java/Graph/GraphConstructRunner.java | package main.java.Graph;
import main.java.Graph.Builder.*;
import main.java.Graph.GraphStructure.GraphContainer;
import main.java.PMI.FeatureHandler;
import main.java.Text.WordDictionary;
import main.java.Utility.*;
import java.util.Hashtable;
/**
* Copyright: Masoud Kiaeeha, Mohammad Aliannejadi
* This work is licensed under the Creative Commons Attribution-NonCommercial 4.0
* International License. To view a copy of this license, visit
* http://creativecommons.org/licenses/by-nc/4.0/.
*/
//todo: read graph file concurrently
public class GraphConstructRunner {
private static String inputFileAddress,
inputUnlabeledFileAddress,
outputFileAddress,
labelsInputFile,
locationToLabelProbFile,
dictionaryFile = null,
featuresFile = null,
dictionaryOfClassesFile = null,
dictionaryOfPrepositionsFile = null;
public static void main(String[] args) {
processArguments(args);
Logger logger = new Logger(Config.defaultLogFileAddress);
RuntimeAnalyzer totalRunAnalyzer;
totalRunAnalyzer = logger.taskStarted("[GraphConstructRunner]- ");
switch (Config.runMode){
case Graph:
runInGraphMode(logger);
break;
case EmpiricalTypeProbability:
runInEmpiricalTypeLabelProbabilityMode(logger);
break;
case TypeProbability:
runInTypeProbabilityMode(logger);
break;
}
logger.taskFinished(totalRunAnalyzer, "[GraphConstructRunner]- ");
logger.close();
}
/**
* program is run in typeprobability mode so only .type2probability file will be created.
* @param logger logger object to use
*/
private static void runInTypeProbabilityMode(Logger logger) {
IGraphBuilder graphBuilder = GraphBuilderFactory.getGraphBuilder(logger, Config.graphNgramType);
GraphContainer graph;
graph = graphBuilder.createGraphFromFileBaseForTypeProbabilityCalculation(inputFileAddress,
labelsInputFile, locationToLabelProbFile);
graphBuilder.saveFileAsNodeIdToTypeLevelProbabilities(graph,
outputFileAddress + Defaults.exportTypeLevelProbabilitiesPostfix);
}
/**
* program is run in type empirical probability mode so only .seed file will be created.
* @param logger logger object to use
*/
private static void runInEmpiricalTypeLabelProbabilityMode(Logger logger) {
IGraphBuilder graphBuilder = GraphBuilderFactory.getGraphBuilder(logger, Config.graphNgramType);
GraphContainer graph;
graph = graphBuilder.createGraphFromFileBaseForMarginalsCalculation(inputFileAddress);
graphBuilder.saveFileAsTypeLevelEmpiricalLabelProbabilities(graph,
outputFileAddress + Defaults.exportTypeLevelEmpiricalLabelProbabilitiesPostfix);
}
/**
* program is run in graph mode so .graph and .type2probability files will be created.
* @param logger logger object to use
*/
private static void runInGraphMode(Logger logger) {
//todo: add pos graphbuilder here
if (featuresFile != null)
FeatureHandler.readFeaturesFromFile(featuresFile);
IGraphBuilder graphBuilder = GraphBuilderFactory.getGraphBuilder(logger, Config.graphNgramType);
GraphContainer graph = null;
//todo: correct here, these should be run independently
if (dictionaryOfClassesFile != null){
WordDictionary dictionaryOfClasses = new WordDictionary();
dictionaryOfClasses.buildDictionaryFromFile(dictionaryOfClassesFile);
if (dictionaryOfPrepositionsFile != null){
WordDictionary dictionaryOfPrepositions = new WordDictionary();
dictionaryOfPrepositions.buildDictionaryFromFile(dictionaryOfPrepositionsFile);
graph = new GraphContainer(dictionaryOfClasses, dictionaryOfPrepositions);
} else {
graph = new GraphContainer(dictionaryOfClasses);
}
}
graph = graphBuilder.createGraphFromFileMultiThread(graph, inputFileAddress, inputUnlabeledFileAddress);
graphBuilder.saveGraphToFile(graph, outputFileAddress + Defaults.exportGraphPostfix);
if (dictionaryFile != null){
graph.getGraphAnalytics(dictionaryFile);
}
}
private static void processArguments(String[] args) {
Hashtable<String, String> configTable = new Hashtable<String, String>(10, (float) 0.9);
for (int i=0; i<args.length ; ++i){
if (args[i].startsWith("-")){
if (args[i].toLowerCase().equals("-typeprobability") || args[i].toLowerCase().equals("-graph") ||
args[i].toLowerCase().equals("-empirical") || args[i].toLowerCase().equals("-pos"))
configTable.put(getCommandFromArg(args[i]), "true");
else if (i+1 < args.length){
configTable.put(getCommandFromArg(args[i]), args[++i]);
}
}
}
if (configTable.containsKey("h") || configTable.containsKey("help"))
MessagePrinter.PrintAndDie("help->");
if (configTable.containsKey("graph"))
Config.runMode = Config.RunModeType.Graph;
else if (configTable.containsKey("typeprobability"))
Config.runMode = Config.RunModeType.TypeProbability;
else if (configTable.containsKey("empirical"))
Config.runMode = Config.RunModeType.EmpiricalTypeProbability;
else
MessagePrinter.PrintAndDie("run mode must be specified using input argument -graph or -typeprobability or -empirical");
inputFileAddress = Defaults.GetValueOrDie(configTable, "text");
outputFileAddress = Defaults.GetValueOrDie(configTable, "output");
switch (Config.runMode){
case Graph:
inputUnlabeledFileAddress = Defaults.GetValueOrDie(configTable, "textu");
break;
case EmpiricalTypeProbability:
break;
case TypeProbability:
labelsInputFile = Defaults.GetValueOrDie(configTable, "labels");
locationToLabelProbFile = Defaults.GetValueOrDie(configTable, "marginals");
break;
}
if (configTable.containsKey("pos"))
Config.POSstyleInput = true;
dictionaryFile = Defaults.GetValueOrDefault(configTable, "dictionary", null);
featuresFile = Defaults.GetValueOrDefault(configTable, "features", null);
dictionaryOfClassesFile = Defaults.GetValueOrDefault(configTable, "classdic", null);
dictionaryOfPrepositionsFile = Defaults.GetValueOrDefault(configTable, "prepositiondic", null);
String kValue = Defaults.GetValueOrDefault(configTable, "k", null);
if (kValue != null)
Config.setKnnDefaultSize(Integer.parseInt(kValue));
String edgeWeightThreshold = Defaults.GetValueOrDefault(configTable, "threshold", null);
if (edgeWeightThreshold != null)
Config.edgeWeightThreshold = Float.parseFloat(edgeWeightThreshold);
String nodeType = Defaults.GetValueOrDefault(configTable, "node", null);
if (nodeType!=null){
if (nodeType.equalsIgnoreCase("word"))
Config.graphNgramType = GraphBuilderFactory.GraphNgramType.Word;
else if (nodeType.equalsIgnoreCase("wordclass"))
Config.graphNgramType = GraphBuilderFactory.GraphNgramType.WordClass;
}
}
/**
* returns the command without the first character e.g. given "-command" output would be "command"
* @param arg the command string
* @return the command without the first "-" character
*/
private static String getCommandFromArg(String arg) {
return arg.substring(1, arg.length()).toLowerCase();
}
}
| 8,012 | 41.173684 | 131 | java |
g-ssl-crf | g-ssl-crf-master/src/GraphConstruct/src/main/java/Graph/Concurrency/GraphThreadHandler.java | package main.java.Graph.Concurrency;
import main.java.Graph.GraphStructure.GraphContainer;
/**
* Copyright: Masoud Kiaeeha, Mohammad Aliannejadi
* This work is licensed under the Creative Commons Attribution-NonCommercial 4.0
* International License. To view a copy of this license, visit
* http://creativecommons.org/licenses/by-nc/4.0/.
*/
public class GraphThreadHandler extends Thread {
private int seed,step;
GraphContainer graph;
private boolean calculatePMI;
public GraphThreadHandler(int seed, int step, GraphContainer graph, boolean calculatePMI){
this.seed = seed;
this.step = step;
this.graph = graph;
this.calculatePMI = calculatePMI;
}
public GraphThreadHandler(int seed, int step, GraphContainer graph){
this.seed = seed;
this.step = step;
this.graph = graph;
this.calculatePMI = false;
}
public void run(){
if (calculatePMI){
graph.buildFeatureScoreMapForNodes(seed, step);
calculatePMI = false;
} else
graph.populateEdgeValues(seed, step);
}
}
| 1,122 | 28.552632 | 94 | java |
g-ssl-crf | g-ssl-crf-master/src/GraphConstruct/src/main/java/Graph/Concurrency/GraphWithPOSThreadHandler.java | package main.java.Graph.Concurrency;
import main.java.Graph.GraphStructure.GraphContainerWithPOS;
/**
* Copyright: Masoud Kiaeeha, Mohammad Aliannejadi
* This work is licensed under the Creative Commons Attribution-NonCommercial 4.0
* International License. To view a copy of this license, visit
* http://creativecommons.org/licenses/by-nc/4.0/.
*/
public class GraphWithPOSThreadHandler extends Thread{
private int seed,step;
GraphContainerWithPOS graph;
private boolean calculatePMI;
public GraphWithPOSThreadHandler(int seed, int step, GraphContainerWithPOS graph, boolean calculatePMI){
this.seed = seed;
this.step = step;
this.graph = graph;
this.calculatePMI = calculatePMI;
}
public GraphWithPOSThreadHandler(int seed, int step, GraphContainerWithPOS graph){
this.seed = seed;
this.step = step;
this.graph = graph;
this.calculatePMI = false;
}
public void run(){
if (calculatePMI){
graph.buildFeatureScoreMapForNodes(seed, step);
calculatePMI = false;
} else
graph.populateEdgeValues(seed, step);
}
}
| 1,170 | 29.815789 | 108 | java |
g-ssl-crf | g-ssl-crf-master/src/GraphConstruct/src/main/java/Graph/GraphStructure/NodeWithPartOfSpeech.java | package main.java.Graph.GraphStructure;
import main.java.TextToNgram.NgramContainer;
import main.java.Utility.Defaults;
/**
* Copyright: Masoud Kiaeeha, Mohammad Aliannejadi
* This work is licensed under the Creative Commons Attribution-NonCommercial 4.0
* International License. To view a copy of this license, visit
* http://creativecommons.org/licenses/by-nc/4.0/.
*/
public class NodeWithPartOfSpeech extends Node<LocationWithPOSTags> {
public NodeWithPartOfSpeech(NgramContainer value){
super(0, value, 0);
}
public NodeWithPartOfSpeech(NgramContainer value, int labelCount){
super(0, value, labelCount);
}
public NodeWithPartOfSpeech(int nodeId ,NgramContainer value, int labelCount) {
super(nodeId, value, labelCount);
}
public void addLocation(LocationWithPOSTags location){
if (location!=null)
locationArrayList.add(location);
}
public NgramContainer getContextPOSTags(int index){
this.throwExceptionForInvalidLocationArrayListIndex(index);
NgramContainer context = new NgramContainer(5);
LocationWithPOSTags currentLocation = this.locationArrayList.get(index);
context.setMemberValue(0, currentLocation.getLeftContextPOSTags().getMemberValue(0));
context.setMemberValue(1, currentLocation.getNgramPOSTags().getMemberValue(0));
context.setMemberValue(2, currentLocation.getNgramPOSTags().getMemberValue(1));
context.setMemberValue(3, currentLocation.getNgramPOSTags().getMemberValue(2));
context.setMemberValue(4, currentLocation.getRightContextPOSTags().getMemberValue(1));
return context;
}
}
| 1,674 | 36.222222 | 94 | java |
g-ssl-crf | g-ssl-crf-master/src/GraphConstruct/src/main/java/Graph/GraphStructure/LabelCountContainer.java | package main.java.Graph.GraphStructure;
/**
* Copyright: Masoud Kiaeeha, Mohammad Aliannejadi
* This work is licensed under the Creative Commons Attribution-NonCommercial 4.0
* International License. To view a copy of this license, visit
* http://creativecommons.org/licenses/by-nc/4.0/.
*/
public class LabelCountContainer {
private int labelIndex;
private int count;
private float empiricalProbability;
public LabelCountContainer(){
labelIndex = 0;
count = 0;
empiricalProbability = 0;
}
public LabelCountContainer(int labelIndex) {
this.labelIndex = labelIndex;
count = 0;
empiricalProbability = 0;
}
public int getLabelIndex() {
return labelIndex;
}
public void setLabelIndex(int labelIndex) {
this.labelIndex = labelIndex;
}
public int getCount() {
return count;
}
public void setCount(int count) {
this.count = count;
}
public void incrementCount() {
++count;
}
public void setEmpiricalProbability(int totalLabelCount){
this.empiricalProbability = ((float) this.count) / ((float)totalLabelCount);
}
public float getEmpiricalProbability(){
return this.empiricalProbability;
}
}
| 1,286 | 22.833333 | 84 | java |
g-ssl-crf | g-ssl-crf-master/src/GraphConstruct/src/main/java/Graph/GraphStructure/Edge.java | package main.java.Graph.GraphStructure;
/**
* Copyright: Masoud Kiaeeha, Mohammad Aliannejadi
* This work is licensed under the Creative Commons Attribution-NonCommercial 4.0
* International License. To view a copy of this license, visit
* http://creativecommons.org/licenses/by-nc/4.0/.
*/
public class Edge<LocationType extends Location> {
private float weight;
private Node<LocationType> destination;
public float getWeight(){
return weight;
}
public Node getDestination(){
return destination;
}
public Edge(float iWeight, Node iDestination){
this.weight = iWeight;
this.destination = iDestination;
}
}
| 679 | 25.153846 | 81 | java |
g-ssl-crf | g-ssl-crf-master/src/GraphConstruct/src/main/java/Graph/GraphStructure/LabelCountMap.java | package main.java.Graph.GraphStructure;
import main.java.Utility.Config;
import java.util.ArrayList;
/**
* Copyright: Masoud Kiaeeha, Mohammad Aliannejadi
* This work is licensed under the Creative Commons Attribution-NonCommercial 4.0
* International License. To view a copy of this license, visit
* http://creativecommons.org/licenses/by-nc/4.0/.
*/
public class LabelCountMap {
private ArrayList<LabelCountContainer> labels;
private int totalLabelOccurrence;
public LabelCountMap(){
this.labels = new ArrayList<LabelCountContainer>();
this.totalLabelOccurrence = 0;
}
public int indexOf(int labelIndex){
int result = -1;
for (int i=0; i<this.labels.size() ;++i){
if (this.labels.get(i).getLabelIndex() == labelIndex){
result = i;
break;
}
}
return result;
}
public void increaseFrequency(int labelIndex){
int index = indexOf(labelIndex);
if (index < 0){
index = insertLabel(labelIndex);
}
this.labels.get(index).incrementCount();
++this.totalLabelOccurrence;
}
public int insertLabel(int labelIndex) {
int index = 0;
for ( ; index<this.labels.size() ; ++index){
if (this.labels.get(index).getLabelIndex() > labelIndex)
break;
}
LabelCountContainer labelCountContainer = new LabelCountContainer(labelIndex);
this.labels.add(index, labelCountContainer);
return index;
}
public void updateEmpiricalProbabilities(){
for (LabelCountContainer labelInfo:labels)
labelInfo.setEmpiricalProbability(this.totalLabelOccurrence);
}
public String serializeEmpiricalProbabilities(String nodeId){
String result = "";
for (LabelCountContainer labelInfo:this.labels)
result += nodeId + "\t" +
labelInfo.getLabelIndex() + "\t" +
labelInfo.getEmpiricalProbability() +
Config.outputNewLineCharacter;
return result;
}
}
| 2,123 | 28.5 | 86 | java |
g-ssl-crf | g-ssl-crf-master/src/GraphConstruct/src/main/java/Graph/GraphStructure/GraphContainerWithPOS.java | package main.java.Graph.GraphStructure;
import main.java.PMI.FeatureHandler;
import main.java.Text.WordDictionary;
import main.java.TextToNgram.NgramContainer;
import main.java.Utility.LocationToLabelFileHandler;
import java.util.ArrayList;
/**
* Copyright: Masoud Kiaeeha, Mohammad Aliannejadi
* This work is licensed under the Creative Commons Attribution-NonCommercial 4.0
* International License. To view a copy of this license, visit
* http://creativecommons.org/licenses/by-nc/4.0/.
*/
public class GraphContainerWithPOS extends GraphContainerAbstract<LocationWithPOSTags>{
protected NgramStatMap ngramStatMapForPOS;
protected NgramPairStatMap ngramPairStatMapForPOS;
protected GraphContainerWithPOS[] ngramGraph;
public GraphContainerWithPOS(){
super();
}
public GraphContainerWithPOS(WordDictionary dictionaryOfClasses){
super(dictionaryOfClasses);
}
public GraphContainerWithPOS(WordDictionary dictionaryOfClasses, WordDictionary dictionaryOfPrepositions){
super(dictionaryOfClasses, dictionaryOfPrepositions);
}
protected void initialize(){
super.initialize();
this.ngramStatMapForPOS = new NgramStatMap();
this.ngramPairStatMapForPOS = new NgramPairStatMap();
}
@Override
protected void initializeNgramGraphsArray() {
this.ngramGraph = new GraphContainerWithPOS[5];
}
@Override
protected void initializeNodeList() {
this.nodeList = new ArrayList<Node<LocationWithPOSTags>>();
}
@Override
protected LocationWithPOSTags newLocationObject(int sequence, int position) {
return null;
}
@Override
protected void storeSelfInGraphOfNgrams() {
this.setGraphOfNgram(GraphContainerAbstract.defaultNgramSize, this); //set the tri-gram graph to self
}
/**
* sets the reference to graph for a specified ngram size
* @param ngramSize size of ngram
* @param graph a given graph object
*/
public void setGraphOfNgram(int ngramSize, GraphContainerWithPOS graph){
this.ngramGraph[this.getIndexOfGraph(ngramSize)] = graph;
}
/**
* gets the graph assigned to a given ngram size
* @param ngramSize size of ngram
* @return a GraphContainer object containing information on ngrams of a specified size
*/
public GraphContainerWithPOS getGraphOfNgram(int ngramSize){
return this.ngramGraph[this.getIndexOfGraph(ngramSize)];
}
public int getCountOfNgram(NgramContainer ngram){
return this.getGraphOfNgram(ngram.getSize()).getCountOfNgramInSelf(ngram);
}
public void removeRedundantData() {
for (int i=0; i<ngramGraph.length ; ++i)
this.ngramGraph[i] = null;
}
public void addNgramsToGraph(NgramContainer[] ngramSet, NgramContainer[] POSSet, int sequence) {
LocationWithPOSTags currentLocation, previousLocation = null;
int position = 0; //position of the word in current sentence (sequence)
NgramContainer previousNgram = null;
for (int i=1; i<ngramSet.length-1 ; ++i) {
//todo: this was changed
//for (NgramContainer ngram : ngramSet) {
currentLocation = new LocationWithPOSTags(sequence, position, POSSet[i]);
currentLocation.setPreviousLocation(previousLocation, previousNgram, ngramSet[i]);
this.addNode(new Node<LocationWithPOSTags>(ngramSet[i]), currentLocation);//add node to graph or else update node frequency
++position;
previousLocation = currentLocation;
previousNgram = ngramSet[i];
}
}
public void addNgramsToGraph(NgramContainer[] ngramSet, NgramContainer[] POSSet, int sequence, int labelCount, LocationToLabelFileHandler fileInputLocationToLabelMapping){
int position = 0;
LocationWithPOSTags currentLocation, previousLocation = null;
NgramContainer previousNgram = null;
float[] labelProbabilitiesArray;
Node<LocationWithPOSTags> tempNode;
for (int i=1; i<ngramSet.length-1 ; ++i) {
currentLocation = new LocationWithPOSTags(sequence, position, POSSet[i]);
currentLocation.setPreviousLocation(previousLocation, previousNgram, ngramSet[i]);
labelProbabilitiesArray = fileInputLocationToLabelMapping.getLabelProbabilitiesOf(sequence, position, labelCount);
tempNode = new Node<LocationWithPOSTags>(ngramSet[i], labelCount);
this.addNode(tempNode, currentLocation, labelProbabilitiesArray);//add node to graph or else update node frequency
++position;
previousLocation = currentLocation;
previousNgram = ngramSet[i];
}
}
public NgramStatMap getNgramStatMapForPOS(){
return this.ngramStatMapForPOS;
}
public NgramPairStatMap getNgramPairStatMapForPOS(){
return this.ngramPairStatMapForPOS;
}
public void computeFeatureStats(){
for(Node<LocationWithPOSTags> node:nodeList)
this.computeFeatureStats(node);
}
//todo: call this method
protected void computeFeatureStats(Node<LocationWithPOSTags> node){
NgramContainer[] featureArray;
NgramContainer nodeContext;
for (int i=0; i<node.getLocationArrayList().size() ; ++i){
nodeContext = node.getContext(i);
featureArray = FeatureHandler.extractFeaturesOfContext(nodeContext);
for (NgramContainer aFeature : featureArray) {
if (FeatureHandler.isPOSFeature(aFeature)) {
ngramStatMapForPOS.add(FeatureHandler.getMainPartOfNonSimpleFeature(aFeature));
ngramPairStatMapForPOS.add(node.getNgram(), FeatureHandler.getMainPartOfNonSimpleFeature(aFeature));
}
}
}
}
}
| 5,865 | 34.98773 | 175 | java |
g-ssl-crf | g-ssl-crf-master/src/GraphConstruct/src/main/java/Graph/GraphStructure/NgramStatMapCell.java | package main.java.Graph.GraphStructure;
import main.java.TextToNgram.NgramContainer;
/**
* Copyright: Masoud Kiaeeha, Mohammad Aliannejadi
* This work is licensed under the Creative Commons Attribution-NonCommercial 4.0
* International License. To view a copy of this license, visit
* http://creativecommons.org/licenses/by-nc/4.0/.
*/
public class NgramStatMapCell {
private NgramContainer ngram;
private int value;
public NgramStatMapCell(NgramContainer ngram) {
this.ngram = ngram;
this.value = 0;
}
public int getValue() {
return value;
}
public void setValue(int value) {
this.value = value;
}
public NgramContainer getNgram() {
return ngram;
}
public void increaseValue(){
++value;
}
}
| 800 | 21.25 | 81 | java |
g-ssl-crf | g-ssl-crf-master/src/GraphConstruct/src/main/java/Graph/GraphStructure/Node.java | package main.java.Graph.GraphStructure;
import main.java.PMI.Struct.NodePairFeatureSetContainer;
import main.java.Text.WordDictionary;
import main.java.TextToNgram.NgramContainer;
import main.java.Utility.Config;
import main.java.Utility.DataTypeManipulator;
import main.java.Utility.Defaults;
import java.util.ArrayList;
/**
* Copyright: Masoud Kiaeeha, Mohammad Aliannejadi
* This work is licensed under the Creative Commons Attribution-NonCommercial 4.0
* International License. To view a copy of this license, visit
* http://creativecommons.org/licenses/by-nc/4.0/.
*/
public class Node<LocationType extends Location> {
protected int nodeId;
protected NgramContainer ngram;
protected ArrayList<LocationType> locationArrayList;
protected ArrayList<Edge<LocationType>> edgeArrayList;
protected NodePairFeatureSetContainer featureSetContainer;
protected LabelCountMap labelCountMap;
/**
* number of occurrences of the associated ngram in analyzed text
*/
protected int frequency = 0;
/**
* member at index i of this array stores the sum of probability of label[i] in all
* occurrences of the ngram associated with this node.
*/
protected float[] totalLabelProbability;
public int getNodeId(){
return nodeId;
}
public void setNodeId(int id){
this.nodeId = id;
}
public NgramContainer getNgram(){
return ngram;
}
public void addLocation(LocationType location){
if (location!=null)
locationArrayList.add(location);
}
public ArrayList<LocationType> getLocationArrayList(){
return locationArrayList;
}
public int getFrequency(){
return frequency;
}
public Node(NgramContainer value){
Constructor(0, value, 0);
}
public Node(NgramContainer value, int labelCount){
Constructor(0, value, labelCount);
}
public Node(int nodeId ,NgramContainer value, int labelCount) {
Constructor(nodeId, value, labelCount);
}
protected void Constructor(int nodeId ,NgramContainer value, int labelCount){
this.edgeArrayList = new ArrayList<Edge<LocationType>>();
this.locationArrayList = new ArrayList<LocationType>();
this.nodeId = nodeId;
this.ngram = value;
this.totalLabelProbability = DataTypeManipulator.newInitializedFloatArray(labelCount);
this.increaseFrequency();
this.labelCountMap = new LabelCountMap();
}
/**
* use this method to calculate Q(y) for each ngram, y stands for label
* @return an array of float typeLabel probabilities of current node
*/
public float[] getTypeLabelProbabilities(){
float[] Q = new float[totalLabelProbability.length];
for (int i=0; i<Q.length ; ++i)
Q[i] = totalLabelProbability[i]/frequency;
return Q;
}
public void increaseFrequency(){
++frequency;
}
public void addLabelProbability(float[] labelProbabilityArray){
if (labelProbabilityArray == null)
return;
if (totalLabelProbability.length != labelProbabilityArray.length)
throw new IllegalArgumentException(Defaults.packageExceptionPrefix
+ "[invalid use of method: addLabelProbability] " +
"size of input array does not match with totalLabelProbability array");
for (int i=0; i<totalLabelProbability.length ; ++i){
totalLabelProbability[i] += labelProbabilityArray[i];
}
}
public boolean equals(Node<LocationType> matchingNode){
return this.ngram.equals(matchingNode.ngram);
}
/**
* get context of this trigram occurring in the index location
* @param index index of the trigram occurrence
* @return context of the given trigram
*/
public NgramContainer getContext(int index){
throwExceptionForInvalidLocationArrayListIndex(index);
NgramContainer context = new NgramContainer(5);
context.setMemberValue(0, this.locationArrayList.get(index).getLeftContext().getMemberValue(0));
context.setMemberValue(1, this.ngram.getMemberValue(0));
context.setMemberValue(2, this.ngram.getMemberValue(1));
context.setMemberValue(3, this.ngram.getMemberValue(2));
context.setMemberValue(4, this.locationArrayList.get(index).getRightContext().getMemberValue(1));
return context;
}
protected void throwExceptionForInvalidLocationArrayListIndex(int index) {
if (index < 0 || index >= locationArrayList.size())
throw new IllegalArgumentException(Defaults.packageExceptionPrefix
+ "[invalid use of method: Node.getContext] "
+ "index must be a non-negative integer less than the size of location array list. index="
+ index + ", size=" + locationArrayList.size());
}
public ArrayList<Edge<LocationType>> getEdgeArrayList(){
return this.edgeArrayList;
}
public void addEdge(Node<LocationType> destination ,float weight){
this.addEdgeWithSort(destination, weight);
}
protected void addEdgeWithSort(Node<LocationType> destination, float weight){
int index;
synchronized (this.edgeArrayList){
for (index=0; index<this.edgeArrayList.size() ; ++index){
if (weight > this.edgeArrayList.get(index).getWeight())
break;
}
this.edgeArrayList.add(index, new Edge<LocationType>(weight,destination));
}
}
public void convertEdgesToKNN(int kValue){
for (int index=this.edgeArrayList.size() - 1; index>=kValue ; --index)
this.edgeArrayList.remove(index);
//also edgeArrayList.subList(fromIndex, toIndex) method can be used
}
public String serialize(){
String result = "";
for (Edge<LocationType> edge : edgeArrayList)
result += this.ngram.serialize()
+ "\t" + edge.getDestination().getNgram().serialize()
+ "\t" + edge.getWeight()
+ Config.outputNewLineCharacter;
return result;
}
/**
* note: this method is only used for debugging purposes
* @param dictionary a WordDictionary object used to map each wordId to its string representation
* @return [source node in serialized form] [destination node in serialized form] Real(weight of edge connecting these nodes)
*/
public String serializeAsWordSets(WordDictionary dictionary){
String result = "";
String myWordSet = this.ngram.getWordSet(dictionary);
for (Edge<LocationType> edge : edgeArrayList)
result += myWordSet
+ "\t" + edge.getDestination().getNgram().getWordSet(dictionary)
+ "\t" + edge.getWeight()
+ Config.outputNewLineCharacter;
return result;
}
public String serializeTypeLabelProbabilities(){
float[] typeProbabilitiesArray = this.getTypeLabelProbabilities();
String result = "";
for(int index=0; index<typeProbabilitiesArray.length ; ++index)
result += this.ngram.serialize()
+ "\t" + index
+ "\t" + typeProbabilitiesArray[index]
+ Config.outputNewLineCharacter;
return result;
}
public NodePairFeatureSetContainer getFeatureSetContainer() {
return featureSetContainer;
}
public void setFeatureSetContainer(NodePairFeatureSetContainer featureSetContainer) {
this.featureSetContainer = featureSetContainer;
}
public void incrementLabelCount(int labelIndex){
this.labelCountMap.increaseFrequency(labelIndex);
}
public void updateLabelsEmpiricalProbabilities(){
this.labelCountMap.updateEmpiricalProbabilities();
}
public String serializeAsEmpiricalProbabilities() {
return this.labelCountMap.serializeEmpiricalProbabilities(this.ngram.serialize());
}
public boolean isMemberOfDictionary(WordDictionary wordDictionary) {
return this.getNgram().isMemberOfDictionary(wordDictionary);
}
}
| 8,231 | 34.482759 | 129 | java |
g-ssl-crf | g-ssl-crf-master/src/GraphConstruct/src/main/java/Graph/GraphStructure/LocationForUnigrams.java | package main.java.Graph.GraphStructure;
import main.java.TextToNgram.NgramContainer;
import main.java.Utility.Config;
import java.util.StringTokenizer;
/**
* Copyright: Masoud Kiaeeha, Mohammad Aliannejadi
* This work is licensed under the Creative Commons Attribution-NonCommercial 4.0
* International License. To view a copy of this license, visit
* http://creativecommons.org/licenses/by-nc/4.0/.
*/
public class LocationForUnigrams {
private NgramContainer leftContext, rightContext;
private NgramContainer previousNode, nextNode;
private int sequence;
private int position;
public int getSequence() {
return sequence;
}
public void setSequence(int sequence) {
this.sequence = sequence;
}
public int getPosition() {
return position;
}
public void setPosition(int position) {
this.position = position;
}
public void setSeqAndPos(int seq, int pos){
setSequence(seq);
setPosition(pos);
}
private void initialize(){
this.leftContext = new NgramContainer(2);
this.rightContext = new NgramContainer(2);
this.sequence = 0;
this.position = 0;
}
public LocationForUnigrams(int seq, int pos){
this.initialize();
this.sequence = seq;
this.position = pos;
}
public LocationForUnigrams(LocationForUnigrams oldCopy){
this.initialize();
this.sequence = oldCopy.sequence;
this.position = oldCopy.position;
}
public LocationForUnigrams(){
this.initialize();
}
public static LocationLabelProbability extractLocationFromString(String lineOfData){
LocationLabelProbability loc;
StringTokenizer stringTokenizer = new StringTokenizer(lineOfData, " \t");
int countTokens = stringTokenizer.countTokens();
if(countTokens == 4){
loc = new LocationLabelProbability();
int seq = Integer.parseInt(stringTokenizer.nextToken());
int pos = Integer.parseInt(stringTokenizer.nextToken());
int labelId = Integer.parseInt(stringTokenizer.nextToken());
float probability = Float.parseFloat(stringTokenizer.nextToken());
loc.setSequence(seq);
loc.setPosition(pos);
loc.setLabelId(labelId);
loc.setLabelProbability(probability);
}else {
loc = null;
}
return loc;
}
public void setPreviousLocation(Location previousLocation,
NgramContainer previousNgram, NgramContainer currentNgram){
if (previousLocation != null){
previousLocation.getRightContext().setMemberValue(0, currentNgram.getMemberValue(1));
previousLocation.getRightContext().setMemberValue(1, currentNgram.getMemberValue(2));
this.getLeftContext().setMemberValue(0, previousNgram.getMemberValue(0));
this.getLeftContext().setMemberValue(1, previousNgram.getMemberValue(1));
}
}
public NgramContainer getLeftContext() {
return leftContext;
}
public void setLeftContext(NgramContainer leftContext) {
this.leftContext = leftContext;
}
public NgramContainer getRightContext() {
return rightContext;
}
public void setRightContext(NgramContainer rightContext) {
this.rightContext = rightContext;
}
/**
* for debugging purposes
* @return
*/
public String serializeLeftAndRightContext(){
String result = "";
if (this.getLeftContext() != null)
result += "leftContext(" + this.getLeftContext().serialize() + ")";
if (this.getRightContext() != null)
result += Config.outputDelimiter + "rightContext(" + this.getRightContext().serialize() + ")";
return result;
}
}
| 3,869 | 29.96 | 106 | java |
g-ssl-crf | g-ssl-crf-master/src/GraphConstruct/src/main/java/Graph/GraphStructure/NgramPairStatMap.java | package main.java.Graph.GraphStructure;
import main.java.TextToNgram.NgramContainer;
import java.util.Hashtable;
/**
* Copyright: Masoud Kiaeeha, Mohammad Aliannejadi
* This work is licensed under the Creative Commons Attribution-NonCommercial 4.0
* International License. To view a copy of this license, visit
* http://creativecommons.org/licenses/by-nc/4.0/.
*/
public class NgramPairStatMap{
protected Hashtable<String, Integer> mapData;
public NgramPairStatMap(){
mapData = new Hashtable<String, Integer>(20);
}
public void add(NgramContainer ngram1, NgramContainer ngram2){
String key = this.getStringFormOf(ngram1, ngram2);
if (mapData.containsKey(key)){
Integer value = mapData.get(key);
++value;
}else {
Integer value = 1;
mapData.put(key, value);
}
}
protected String getStringFormOf(NgramContainer ngram1, NgramContainer ngram2) {
return ngram1.serialize() + "#" + ngram2.serialize();
}
public int getValueOf(NgramContainer ngram1, NgramContainer ngram2) {
int result = 0;
String key = this.getStringFormOf(ngram1, ngram2);
if (mapData.containsKey(key))
result = mapData.get(key);
return result;
}
}
| 1,298 | 29.209302 | 84 | java |
g-ssl-crf | g-ssl-crf-master/src/GraphConstruct/src/main/java/Graph/GraphStructure/GraphContainerAbstract.java | package main.java.Graph.GraphStructure;
import main.java.PMI.FeatureHandler;
import main.java.PMI.Struct.NodePairFeatureSetContainer;
import main.java.Text.WordDictionary;
import main.java.TextToNgram.NgramContainer;
import main.java.Utility.*;
import java.util.ArrayList;
/**
* Copyright: Masoud Kiaeeha, Mohammad Aliannejadi
* This work is licensed under the Creative Commons Attribution-NonCommercial 4.0
* International License. To view a copy of this license, visit
* http://creativecommons.org/licenses/by-nc/4.0/.
*/
public abstract class GraphContainerAbstract<LocationType extends Location> {
public static final int defaultNgramSize = 3;
/**
* nodes of the graph
*/
protected ArrayList<Node<LocationType>> nodeList;
/**
* total number of n-grams seen in input
*/
protected int totalFrequency;
/**
* Use this array to store references to all graphs of the analyzed corpus.
* Each member of this array having index=i represents the graph for (i+1)-grams
*/
protected static final int nodeIdStartingIndex = 0;
protected WordDictionary dictionaryOfClasses;
protected WordDictionary dictionaryOfPrepositions;
protected float edgeWeightThreshold;
public GraphContainerAbstract(){
initialize();
}
public GraphContainerAbstract(WordDictionary dictionaryOfClasses){
this.initialize();
this.dictionaryOfClasses = dictionaryOfClasses;
}
public GraphContainerAbstract(WordDictionary dictionaryOfClasses, WordDictionary dictionaryOfPrepositions){
this.initialize();
this.dictionaryOfClasses = dictionaryOfClasses;
this.dictionaryOfPrepositions = dictionaryOfPrepositions;
}
protected void initialize() {
initializeNodeList();
totalFrequency = 0;
this.edgeWeightThreshold = Config.edgeWeightThreshold;
initializeNgramGraphsArray();
storeSelfInGraphOfNgrams();
}
protected abstract void initializeNgramGraphsArray();
protected abstract void initializeNodeList();
protected abstract LocationType newLocationObject(int sequence, int position);
protected abstract void storeSelfInGraphOfNgrams();
/**
* Finds a specified node using its ngram
* @param iNode a given node
* @return a non-negative integer represtenting the index of the node in the graph.
* If the specified node does not exist in the graph -1 is returned.
*/
public int indexOf(Node<LocationType> iNode){
int result = -1;
Node<LocationType> currentNode;
for(int i=0; i<nodeList.size() ; ++i){
currentNode = nodeList.get(i);
if (currentNode.equals(iNode)){
result = i;
break;
}
}
return result;
}
/**
* Adds a specified node to current graph. If node already exists adds a new location for the specified node.
* @param iNode the node to add to graph
* @param location location of occurrence of node in text
* @param labelProbabilityArray associated label probability for current occurrence of the node
* @return true if node already not existed in the graph.
*/
public int addNode(Node<LocationType> iNode, LocationType location, float[] labelProbabilityArray){
int index = this.indexOf(iNode);
int id;
if(index < 0){
id = nodeList.size() + GraphContainerAbstract.nodeIdStartingIndex;
iNode.setNodeId(id);
index = nodeList.size();
nodeList.add(iNode);
}else
nodeList.get(index).increaseFrequency();
nodeList.get(index).addLocation(location);
nodeList.get(index).addLabelProbability(labelProbabilityArray);
++this.totalFrequency;
return index;
}
/**
* Adds a specified node to current graph. If node already exists adds a new location for the specified node.
* @param iNode the node to add to graph
* @param location location of occurrence of node in text
* @return true if node already not existed in the graph.
*/
public int addNode(Node<LocationType> iNode, LocationType location){
return this.addNode(iNode, location, null);
}
/**
* Adds a specified node to current graph. If node already exists adds a new location for the specified node.
* @param iNode the node to add to graph
* @return true if node already not existed in the graph.
*/
public int addNode(Node<LocationType> iNode){
return this.addNode(iNode, null, null);
}
/**
* exports graph information to file as node and edge data
* @param outputFileAddress address of the file to save graph information
*/
public void exportGraphToFile(String outputFileAddress){
TextFileOutput fileOutput = new TextFileOutput(outputFileAddress);
for (Node<LocationType> node : nodeList)
fileOutput.write(node.serialize());
fileOutput.close();
}
/**
* Use this method to export graph data to file. Output format is as described below: </br>
* [source-node word set] [destination-node word set] (Real number)edge-weight </br>
* note: this method is only used for debug purposes
* @param outputFileAddress address of the file to save graph information
* @param dictionary a word dictionary which has wordId -> word mappings
*/
public void exportGraphToFileAsWordSetsSimilarity(String outputFileAddress, WordDictionary dictionary){
TextFileOutput fileOutput = new TextFileOutput(outputFileAddress);
for (Node<LocationType> node : nodeList)
fileOutput.write(node.serializeAsWordSets(dictionary));
fileOutput.close();
}
public void exportToFileAsIdMapping(String outputFileAddress){
//todo: append a header section to the beginning of output file
TextFileOutput fileOutput = new TextFileOutput(outputFileAddress);
String bufferData;
for (Node<LocationType> node : nodeList) {
bufferData = node.getNodeId() + Defaults.packageOutputDelimiter
+ node.getNgram().serialize() + Defaults.packageOutputDelimiter
+ node.getFrequency();
fileOutput.writeLine(bufferData);
}
fileOutput.close();
}
public void exportToFileAsIdToLocationMapping(String outputFileAddress){
TextFileOutput fileOutput = new TextFileOutput(outputFileAddress);
String bufferData;
ArrayList<LocationType> locations;
int currentNodeId;
for (Node<LocationType> node : nodeList) {
currentNodeId = node.getNodeId();
locations = node.getLocationArrayList();
for (Location location : locations) {
bufferData = currentNodeId + Defaults.packageOutputDelimiter
+ location.getSequence() + Defaults.packageOutputDelimiter
+ location.getPosition();
fileOutput.writeLine(bufferData);
}
}
fileOutput.close();
}
public void exportToFileAsIdToTypeLevelProbabilities(String outputFileAddress){
TextFileOutput fileOutput = new TextFileOutput(outputFileAddress);
for (Node<LocationType> node : nodeList) {
fileOutput.write(node.serializeTypeLabelProbabilities());
}
fileOutput.close();
}
/**
* gets the index of the graph which stores information on ngrams of a specified size
* @param ngramSize size of ngrams
* @return zero-based index of graph in ngramGraph
*/
protected int getIndexOfGraph(int ngramSize){
return ngramSize-1;
}
/**
* convert graph to a KNN-graph. In a KNN-graph some edges are removed so that each node only has a maximum
* of K edges heading out of it. This is done in a way that K most valuable edges are preserved (e.g. only
* K edges having the highest weights are preserved).</br>
* note: Be careful, using this method will modify original graph data. Another version which modifies a clone
* of the original graph can be implemented.
* @param kValue value of K to use for pruning
*/
public void convertToKNN(int kValue){
for (Node<LocationType> node:nodeList)
node.convertEdgesToKNN(kValue);
}
public void populateEdgeValuesOld() {
NodePairFeatureSetContainer featureScoreMap;
Node<LocationType> node1,node2;
RuntimeAnalyzer raTotal;
raTotal = new RuntimeAnalyzer();
raTotal.start("populateEdgeValues started");
for (int i=0; i<nodeList.size() ; ++i){
node1 = nodeList.get(i);
featureScoreMap = new NodePairFeatureSetContainer();
buildFeatureScoreMap(featureScoreMap, node1);
for (int j=i+1; j<nodeList.size() ; ++j){
node2 = nodeList.get(j);
populateEdgeValue(featureScoreMap.makeCopy(), node1, node2);
}
}
raTotal.finish("populateEdgeValues finished");
}
public void populateEdgeValuesOld(int seed, int step) {
NodePairFeatureSetContainer featureScoreMap;
Node<LocationType> node1,node2;
for (int i=seed; i<nodeList.size() ; i+=step){
node1 = nodeList.get(i);
featureScoreMap = new NodePairFeatureSetContainer();
buildFeatureScoreMap(featureScoreMap, node1);
for (int j=i+1; j<nodeList.size() ; ++j){
node2 = nodeList.get(j);
populateEdgeValue(featureScoreMap.makeCopy(), node1, node2);
}
}
}
public void buildFeatureScoreMapForNodes(){
for (Node<LocationType> node:nodeList){
node.setFeatureSetContainer(this.buildFeatureScoreMap(node));
}
}
public void buildFeatureScoreMapForNodes(int seed, int step){
Node<LocationType> node;
for (int i=seed; i<nodeList.size() ; i+=step){
node = nodeList.get(i);
node.setFeatureSetContainer(this.buildFeatureScoreMap(node));
}
}
public void populateEdgeValues() {
Node<LocationType> node1,node2;
RuntimeAnalyzer raTotal;
raTotal = new RuntimeAnalyzer();
raTotal.start("populateEdgeValues started");
for (int i=0; i<nodeList.size() ; ++i){
node1 = nodeList.get(i);
for (int j=i+1; j<nodeList.size() ; ++j){
node2 = nodeList.get(j);
this.populateEdgeValue(node1, node2);
}
}
raTotal.finish("populateEdgeValues finished");
}
public void populateEdgeValues(int seed, int step) {
Node<LocationType> node1,node2;
for (int i=seed; i<nodeList.size() ; i+=step){
node1 = nodeList.get(i);
for (int j=i+1; j<nodeList.size() ; ++j){
node2 = nodeList.get(j);
populateEdgeValue(node1, node2);
}
}
}
protected NodePairFeatureSetContainer buildFeatureScoreMap(NodePairFeatureSetContainer featureScoreMap, Node<LocationType> node){
int nodeIndex;
NgramContainer[] featureArray, featureCombinedFormArray;
double pmi;
NgramContainer nodeContext;
for (int i=0; i<node.getLocationArrayList().size() ; ++i){
nodeContext = node.getContext(i);
featureArray = FeatureHandler.extractFeaturesOfContext(nodeContext);
featureCombinedFormArray = FeatureHandler.extractFeaturesInCombinedFormOfContext(nodeContext);
for (int j=0; j<featureArray.length ; ++j){
pmi = calculatePMIForPair(node.getNgram(), featureArray[j], featureCombinedFormArray[j]);
nodeIndex = featureScoreMap.add(featureArray[j]);
featureScoreMap.setScore(nodeIndex, 0, pmi);
}
}
return featureScoreMap;
}
protected NodePairFeatureSetContainer buildFeatureScoreMap(Node<LocationType> node){
int nodeIndex;
NgramContainer[] featureArray, featureCombinedFormArray;
double pmi;
NgramContainer nodeContext;
NodePairFeatureSetContainer featureScoreMap = new NodePairFeatureSetContainer();
for (int i=0; i<node.getLocationArrayList().size() ; ++i){
nodeContext = node.getContext(i);
featureArray = FeatureHandler.extractFeaturesOfContext(nodeContext);
featureCombinedFormArray = FeatureHandler.extractFeaturesInCombinedFormOfContext(nodeContext);
for (int j=0; j<featureArray.length ; ++j){
if (featureScoreMap.indexOf(featureArray[j]) < 0){
pmi = calculatePMIForPair(node.getNgram(), featureArray[j], featureCombinedFormArray[j]);
nodeIndex = featureScoreMap.add(featureArray[j]);
featureScoreMap.setScore(nodeIndex, 0, pmi);
}
}
}
return featureScoreMap;
}
protected void populateEdgeValue(Node<LocationType> node1, Node<LocationType> node2) {
float similarity = (float)node1.getFeatureSetContainer()
.makeCopy()
.measureSimilarity(node2.getFeatureSetContainer());
if (! (similarity < edgeWeightThreshold) ){
node1.addEdge(node2, similarity);
node2.addEdge(node1, similarity);
}
}
protected void populateEdgeValue(NodePairFeatureSetContainer featureScoreMap, Node<LocationType> node1, Node<LocationType> node2) {
int nodeIndex;
NgramContainer[] featureArray, featureCombinedFormArray;
NgramContainer nodeContext;
float pmi;
for (int i=0; i<node2.getLocationArrayList().size() ; ++i){
nodeContext = node2.getContext(i);
featureArray = FeatureHandler.extractFeaturesOfContext(nodeContext);
featureCombinedFormArray = FeatureHandler.extractFeaturesInCombinedFormOfContext(nodeContext);
for (int j=0; j<featureArray.length ; ++j){
pmi = (float)calculatePMIForPair(node2.getNgram(), featureArray[j], featureCombinedFormArray[j]);
nodeIndex = featureScoreMap.add(featureArray[j]);
featureScoreMap.setScore(nodeIndex, 1, pmi);
}
}
float similarity = (float)featureScoreMap.measureSimilarity();
if (! (similarity < edgeWeightThreshold) ){
node1.addEdge(node2, similarity);
node2.addEdge(node1, similarity);
}
}
protected void populateEdgeValueDeprecated(Node<LocationType> node1, Node<LocationType> node2) {
NodePairFeatureSetContainer featureScoreMap = new NodePairFeatureSetContainer();
int nodeIndex;
NgramContainer[] featureArray, featureCombinedFormArray;
float pmi;
for (int i=0; i<node1.getLocationArrayList().size() ; ++i){
featureArray = FeatureHandler.extractFeaturesOfContext(node1.getContext(i));
featureCombinedFormArray = FeatureHandler.extractFeaturesInCombinedFormOfContext(node1.getContext(i));
for (int j=0; j<featureArray.length ; ++j){
pmi = (float)calculatePMIForPair(node1.getNgram(), featureArray[j], featureCombinedFormArray[j]);
nodeIndex = featureScoreMap.add(featureArray[j]);
featureScoreMap.setScore(nodeIndex, 0, pmi);
}
}
for (int i=0; i<node2.getLocationArrayList().size() ; ++i){
featureArray = FeatureHandler.extractFeaturesOfContext(node2.getContext(i));
featureCombinedFormArray = FeatureHandler.extractFeaturesInCombinedFormOfContext(node2.getContext(i));
for (int j=0; j<featureArray.length ; ++j){
pmi = (float)calculatePMIForPair(node2.getNgram(), featureArray[j], featureCombinedFormArray[j]);
nodeIndex = featureScoreMap.add(featureArray[j]);
featureScoreMap.setScore(nodeIndex, 1, pmi);
}
}
float similarity = (float)featureScoreMap.measureSimilarity();
node1.addEdge(node2, similarity);
node2.addEdge(node1, similarity);
}
protected double calculatePMIForPair(NgramContainer ngram1, NgramContainer ngram2, NgramContainer combinedForm){
return FeatureHandler.computePMIForPair(this.totalFrequency, ngram1, ngram2,combinedForm, this);
}
public abstract int getCountOfNgram(NgramContainer ngram);
protected int getCountOfNgramInSelf(NgramContainer ngram){
int result = 0;
if (FeatureHandler.isTemplate(ngram)){
for (Node<LocationType> node : nodeList) {
if (node.getNgram().equalsWithTemplate(ngram)) {
result += node.getFrequency();
}
}
}else {
for (Node<LocationType> node : nodeList) {
if (node.getNgram().equals(ngram)) {
result = node.getFrequency();
break;
}
}
}
return result;
}
public abstract void removeRedundantData();
public Node<LocationType> getNodeAt(int nodeIndex){
if (nodeIndex < nodeList.size())
return this.nodeList.get(nodeIndex);
else
throw new IllegalArgumentException("nodeIndex out of ArrayList bounds in GraphContainerAbstract.getNodeAt method");
}
public void updateNodesEmpiricalLabelProbabilities() {
for (Node<LocationType> node:nodeList)
node.updateLabelsEmpiricalProbabilities();
}
public void exportToFileAsEmpiricalProbabilities(String outputFileAddress) {
TextFileOutput fileOutput = new TextFileOutput(outputFileAddress);
for (Node<LocationType> node: nodeList)
fileOutput.write(node.serializeAsEmpiricalProbabilities());
fileOutput.close();
}
public void getGraphAnalytics(String labeledNodesDictionaryFileAddress){
WordDictionary labeledWordsDictionary = new WordDictionary();
labeledWordsDictionary.buildDictionaryFromFile(labeledNodesDictionaryFileAddress);
boolean[] isMemberOfLabeledData = DataTypeManipulator.newInitializedBooleanArray(nodeList.size());
ArrayList<Edge<LocationType>> edges;
Node<LocationType> currentNode;
for (int index=0; index<nodeList.size() ;++index){
currentNode = nodeList.get(index);
if (!isMemberOfLabeledData[index] && currentNode.isMemberOfDictionary(labeledWordsDictionary))
isMemberOfLabeledData[index] = true;
edges = currentNode.getEdgeArrayList();
if (isMemberOfLabeledData[index]) {
for (Edge edge:edges)
isMemberOfLabeledData[edge.getDestination().getNodeId()] = true;
} else {
for (Edge edge:edges)
if (isMemberOfLabeledData[edge.getDestination().getNodeId()] ||
edge.getDestination().isMemberOfDictionary(labeledWordsDictionary)){
isMemberOfLabeledData[index] = true;
//this code can be injected for optimization isMemberOfLabeledData[edge.getDestination().getNodeId()] = true;
break;
}
}
}
//output information
int countOfNodesNotConnectedWithLabeledData = 0;
for (boolean isLabeled:isMemberOfLabeledData)
if (!isLabeled)
++countOfNodesNotConnectedWithLabeledData;
float percentOfNotConnectedNodes = (float)countOfNodesNotConnectedWithLabeledData / (float)isMemberOfLabeledData.length;
System.out.println(Defaults.packageExceptionPrefix + "[Info]: " + percentOfNotConnectedNodes
+ "% of nodes (" + countOfNodesNotConnectedWithLabeledData + " out of " + isMemberOfLabeledData.length
+ " nodes) are not connected to any labeled node.");
}
public void addNgramsToGraph(NgramContainer[] ngramSet, int sequence) {
LocationType currentLocation, previousLocation = null;
int position = 0; //position of the word in current sentence (sequence)
NgramContainer previousNgram = null;
for (int i=1; i<ngramSet.length-1 ; ++i) {
//todo: this was changed
//for (NgramContainer ngram : ngramSet) {
currentLocation = this.newLocationObject(sequence, position);
currentLocation.setPreviousLocation(previousLocation, previousNgram, ngramSet[i]);
this.addNode(new Node<LocationType>(ngramSet[i]), currentLocation);//add node to graph or else update node frequency
++position;
previousLocation = currentLocation;
previousNgram = ngramSet[i];
}
}
public void addNgramsToGraph(NgramContainer[] ngramSet, int sequence, int labelCount, LocationToLabelFileHandler fileInputLocationToLabelMapping){
int position = 0;
LocationType currentLocation, previousLocation = null;
NgramContainer previousNgram = null;
float[] labelProbabilitiesArray;
Node<LocationType> tempNode;
for (NgramContainer ngram : ngramSet) {
currentLocation = this.newLocationObject(sequence, position);
currentLocation.setPreviousLocation(previousLocation, previousNgram, ngram);
labelProbabilitiesArray = fileInputLocationToLabelMapping.getLabelProbabilitiesOf(sequence, position, labelCount);
tempNode = new Node<LocationType>(ngram, labelCount);
this.addNode(tempNode, currentLocation, labelProbabilitiesArray);//add node to graph or else update node frequency
++position;
previousLocation = currentLocation;
previousNgram = ngram;
}
}
public WordDictionary getDictionaryOfClasses() {
return dictionaryOfClasses;
}
public WordDictionary getDictionaryOfPrepositions() {
return dictionaryOfPrepositions;
}
public NgramStatMap getNgramStatMapForPOS(){
return null;
}
public NgramPairStatMap getNgramPairStatMapForPOS(){
return null;
}
}
| 22,351 | 37.537931 | 150 | java |
g-ssl-crf | g-ssl-crf-master/src/GraphConstruct/src/main/java/Graph/GraphStructure/LocationLabelProbability.java | package main.java.Graph.GraphStructure;
/**
* Copyright: Masoud Kiaeeha, Mohammad Aliannejadi
* This work is licensed under the Creative Commons Attribution-NonCommercial 4.0
* International License. To view a copy of this license, visit
* http://creativecommons.org/licenses/by-nc/4.0/.
*/
public class LocationLabelProbability {
private int sequence;
private int position;
private int labelId;
private float labelProbability;
public int getSequence() {
return sequence;
}
public void setSequence(int sequence) {
this.sequence = sequence;
}
public int getPosition() {
return position;
}
public void setPosition(int position) {
this.position = position;
}
public int getLabelId() {
return labelId;
}
public void setLabelId(int labelId) {
this.labelId = labelId;
}
public float getLabelProbability() {
return labelProbability;
}
public void setLabelProbability(float labelProbability) {
this.labelProbability = labelProbability;
}
}
| 1,090 | 22.212766 | 81 | java |
g-ssl-crf | g-ssl-crf-master/src/GraphConstruct/src/main/java/Graph/GraphStructure/NgramStatMap.java | package main.java.Graph.GraphStructure;
import main.java.TextToNgram.NgramContainer;
import java.util.ArrayList;
import java.util.Hashtable;
/**
* Copyright: Masoud Kiaeeha, Mohammad Aliannejadi
* This work is licensed under the Creative Commons Attribution-NonCommercial 4.0
* International License. To view a copy of this license, visit
* http://creativecommons.org/licenses/by-nc/4.0/.
*/
public class NgramStatMap {
protected Hashtable<String, Integer> mapData;
public NgramStatMap(){
mapData = new Hashtable<String, Integer>(20);
}
public void add(NgramContainer ngram){
String key = this.getStringFormOf(ngram);
if (mapData.containsKey(key)){
Integer value = mapData.get(key);
++value;
}else {
Integer value = 1;
mapData.put(key, value);
}
}
protected String getStringFormOf(NgramContainer ngram) {
return ngram.serialize();
}
public int getValueOf(NgramContainer ngram) {
int result = 0;
String key = this.getStringFormOf(ngram);
if (mapData.containsKey(key))
result = mapData.get(key);
return result;
}
}
| 1,201 | 26.318182 | 81 | java |
g-ssl-crf | g-ssl-crf-master/src/GraphConstruct/src/main/java/Graph/GraphStructure/Location.java | package main.java.Graph.GraphStructure;
import main.java.TextToNgram.NgramContainer;
import main.java.TextToNgram.Utils;
import main.java.Utility.Config;
import main.java.Utility.DataTypeManipulator;
import java.util.StringTokenizer;
/**
* Copyright: Masoud Kiaeeha, Mohammad Aliannejadi
* This work is licensed under the Creative Commons Attribution-NonCommercial 4.0
* International License. To view a copy of this license, visit
* http://creativecommons.org/licenses/by-nc/4.0/.
*/
public class Location {
protected NgramContainer leftContext, rightContext;
protected int sequence;
protected int position;
public int getSequence() {
return sequence;
}
public void setSequence(int sequence) {
this.sequence = sequence;
}
public int getPosition() {
return position;
}
public void setPosition(int position) {
this.position = position;
}
public void setSeqAndPos(int seq, int pos){
setSequence(seq);
setPosition(pos);
}
protected void initialize(){
this.leftContext = new NgramContainer(2);
this.rightContext = new NgramContainer(2);
this.sequence = 0;
this.position = 0;
}
public Location(int seq, int pos){
this.initialize();
this.sequence = seq;
this.position = pos;
}
public Location(Location oldCopy){
this.initialize();
this.sequence = oldCopy.sequence;
this.position = oldCopy.position;
}
public static LocationLabelProbability extractLocationFromString(String lineOfData){
LocationLabelProbability loc;
StringTokenizer stringTokenizer = new StringTokenizer(lineOfData, " \t");
int countTokens = stringTokenizer.countTokens();
if(countTokens == 4){
loc = new LocationLabelProbability();
int seq = Integer.parseInt(stringTokenizer.nextToken());
int pos = Integer.parseInt(stringTokenizer.nextToken());
int labelId = Integer.parseInt(stringTokenizer.nextToken());
float probability = Float.parseFloat(stringTokenizer.nextToken());
loc.setSequence(seq);
loc.setPosition(pos);
loc.setLabelId(labelId);
loc.setLabelProbability(probability);
}else {
loc = null;
}
return loc;
}
public void setPreviousLocation(Location previousLocation,
NgramContainer previousNgram, NgramContainer currentNgram){
if (previousLocation != null){
previousLocation.getRightContext().setMemberValue(0, currentNgram.getMemberValue(1));
previousLocation.getRightContext().setMemberValue(1, currentNgram.getMemberValue(2));
this.getLeftContext().setMemberValue(0, previousNgram.getMemberValue(0));
this.getLeftContext().setMemberValue(1, previousNgram.getMemberValue(1));
}
}
public NgramContainer getLeftContext() {
return leftContext;
}
public void setLeftContext(NgramContainer leftContext) {
this.leftContext = leftContext;
}
public NgramContainer getRightContext() {
return rightContext;
}
public void setRightContext(NgramContainer rightContext) {
this.rightContext = rightContext;
}
/**
* for debugging purposes
* @return
*/
public String serializeLeftAndRightContext(){
String result = "";
if (this.getLeftContext() != null)
result += "leftContext(" + this.getLeftContext().serialize() + ")";
if (this.getRightContext() != null)
result += Config.outputDelimiter + "rightContext(" + this.getRightContext().serialize() + ")";
return result;
}
}
| 3,797 | 29.878049 | 106 | java |
g-ssl-crf | g-ssl-crf-master/src/GraphConstruct/src/main/java/Graph/GraphStructure/LocationWithPOSTags.java | package main.java.Graph.GraphStructure;
import main.java.TextToNgram.NgramContainer;
import main.java.Utility.LocationToLabelFileHandler;
/**
* Copyright: Masoud Kiaeeha, Mohammad Aliannejadi
* This work is licensed under the Creative Commons Attribution-NonCommercial 4.0
* International License. To view a copy of this license, visit
* http://creativecommons.org/licenses/by-nc/4.0/.
*/
public class LocationWithPOSTags extends Location {
protected NgramContainer leftContextPOSTags, rightContextPOSTags;
protected NgramContainer ngramPOSTags;
protected void initialize(){
super.initialize();
this.leftContextPOSTags = new NgramContainer(2);
this.rightContextPOSTags = new NgramContainer(2);
}
public LocationWithPOSTags(int seq, int pos, NgramContainer POSTags){
super(seq, pos);
this.leftContext = new NgramContainer(2);
this.rightContext = new NgramContainer(2);
this.ngramPOSTags = POSTags;
}
public LocationWithPOSTags(LocationWithPOSTags oldCopy){
super(oldCopy);
this.leftContext = oldCopy.getLeftContext();
this.rightContext = oldCopy.getRightContext();
this.ngramPOSTags = oldCopy.getNgramPOSTags();
}
public NgramContainer getLeftContextPOSTags(){
return this.leftContextPOSTags;
}
public NgramContainer getRightContextPOSTags(){
return this.rightContextPOSTags;
}
public NgramContainer getNgramPOSTags(){
return this.ngramPOSTags;
}
public void setPreviousLocation(LocationWithPOSTags previousLocation,
NgramContainer previousNgram, NgramContainer currentNgram,
NgramContainer previousPOSTag, NgramContainer currentPOSTag){
if (previousLocation != null){
super.setPreviousLocation(previousLocation, previousNgram, currentNgram);
previousLocation.getRightContextPOSTags().setMemberValue(0, currentPOSTag.getMemberValue(1));
previousLocation.getRightContextPOSTags().setMemberValue(1, currentPOSTag.getMemberValue(2));
this.getLeftContextPOSTags().setMemberValue(0, previousPOSTag.getMemberValue(0));
this.getLeftContextPOSTags().setMemberValue(1, previousPOSTag.getMemberValue(1));
}
}
}
| 2,341 | 36.174603 | 105 | java |
g-ssl-crf | g-ssl-crf-master/src/GraphConstruct/src/main/java/Graph/GraphStructure/GraphContainer.java | package main.java.Graph.GraphStructure;
import main.java.Text.WordDictionary;
import main.java.TextToNgram.NgramContainer;
import java.util.ArrayList;
//todo: ngram search should be modified after adding POS feature capability
/**
* Copyright: Masoud Kiaeeha, Mohammad Aliannejadi
* This work is licensed under the Creative Commons Attribution-NonCommercial 4.0
* International License. To view a copy of this license, visit
* http://creativecommons.org/licenses/by-nc/4.0/.
*/
//todo: add <Location extends Location> for GraphContainer
public class GraphContainer extends GraphContainerAbstract<Location>{
protected GraphContainer[] ngramGraph;
public GraphContainer(){
super();
}
public GraphContainer(WordDictionary dictionaryOfClasses){
super(dictionaryOfClasses);
}
public GraphContainer(WordDictionary dictionaryOfClasses, WordDictionary dictionaryOfPrepositions){
super(dictionaryOfClasses, dictionaryOfPrepositions);
}
@Override
protected void initializeNgramGraphsArray() {
this.ngramGraph = new GraphContainer[5];
}
@Override
protected void initializeNodeList() {
this.nodeList = new ArrayList<Node<Location>>();
}
@Override
protected Location newLocationObject(int sequence, int position) {
return new Location(sequence, position);
}
@Override
protected void storeSelfInGraphOfNgrams() {
this.setGraphOfNgram(GraphContainerAbstract.defaultNgramSize, this); //set the tri-gram graph to self
}
/**
* sets the reference to graph for a specified ngram size
* @param ngramSize size of ngram
* @param graph a given graph object
*/
public void setGraphOfNgram(int ngramSize, GraphContainer graph){
this.ngramGraph[this.getIndexOfGraph(ngramSize)] = graph;
}
/**
* gets the graph assigned to a given ngram size
* @param ngramSize size of ngram
* @return a GraphContainer object containing information on ngrams of a specified size
*/
public GraphContainer getGraphOfNgram(int ngramSize){
return this.ngramGraph[this.getIndexOfGraph(ngramSize)];
}
public int getCountOfNgram(NgramContainer ngram){
return this.getGraphOfNgram(ngram.getSize()).getCountOfNgramInSelf(ngram);
}
public void removeRedundantData() {
for (int i=0; i<ngramGraph.length ; ++i)
this.ngramGraph[i] = null;
}
}
| 2,461 | 30.164557 | 109 | java |
g-ssl-crf | g-ssl-crf-master/src/GraphConstruct/src/main/java/Graph/Builder/GraphBuilderStandardCRFWordClassWithPOSImpl.java | package main.java.Graph.Builder;
import main.java.CRF.CRFFileReaderWithPOSTags;
import main.java.Utility.Logger;
/**
* Copyright: Masoud Kiaeeha, Mohammad Aliannejadi
* This work is licensed under the Creative Commons Attribution-NonCommercial 4.0
* International License. To view a copy of this license, visit
* http://creativecommons.org/licenses/by-nc/4.0/.
*/
public class GraphBuilderStandardCRFWordClassWithPOSImpl extends GraphBuilderStandardCRFWithPOS {
public GraphBuilderStandardCRFWordClassWithPOSImpl(Logger logger){
super(logger);
}
protected String getSentence(CRFFileReaderWithPOSTags crfFileReader){
return crfFileReader.getWordClassSentence();
}
}
| 705 | 32.619048 | 97 | java |
g-ssl-crf | g-ssl-crf-master/src/GraphConstruct/src/main/java/Graph/Builder/GraphBuilderStandardCRFWordClassImpl.java | package main.java.Graph.Builder;
import main.java.CRF.CRFFileReader;
import main.java.Utility.*;
/**
* Copyright: Masoud Kiaeeha, Mohammad Aliannejadi
* This work is licensed under the Creative Commons Attribution-NonCommercial 4.0
* International License. To view a copy of this license, visit
* http://creativecommons.org/licenses/by-nc/4.0/.
*/
public class GraphBuilderStandardCRFWordClassImpl extends GraphBuilderStandardCRF {
public GraphBuilderStandardCRFWordClassImpl(Logger logger){
super(logger);
}
protected String getSentence(CRFFileReader crfFileReader){
return crfFileReader.getWordClassSentence();
}
}
| 658 | 28.954545 | 83 | java |
g-ssl-crf | g-ssl-crf-master/src/GraphConstruct/src/main/java/Graph/Builder/GraphBuilderStandardCRFWordsImpl.java | package main.java.Graph.Builder;
import main.java.CRF.CRFFileReader;
import main.java.Utility.*;
/**
* Copyright: Masoud Kiaeeha, Mohammad Aliannejadi
* This work is licensed under the Creative Commons Attribution-NonCommercial 4.0
* International License. To view a copy of this license, visit
* http://creativecommons.org/licenses/by-nc/4.0/.
*/
public class GraphBuilderStandardCRFWordsImpl extends GraphBuilderStandardCRF {
public GraphBuilderStandardCRFWordsImpl(Logger logger){
super(logger);
}
protected String getSentence(CRFFileReader crfFileReader){
return crfFileReader.getWordSentence();
}
}
| 645 | 28.363636 | 81 | java |
g-ssl-crf | g-ssl-crf-master/src/GraphConstruct/src/main/java/Graph/Builder/IGraphBuilder.java | package main.java.Graph.Builder;
import main.java.Graph.GraphStructure.GraphContainer;
import main.java.Graph.GraphStructure.GraphContainerAbstract;
import main.java.Text.WordDictionary;
/**
* Copyright: Masoud Kiaeeha, Mohammad Aliannejadi
* This work is licensed under the Creative Commons Attribution-NonCommercial 4.0
* International License. To view a copy of this license, visit
* http://creativecommons.org/licenses/by-nc/4.0/.
*/
public interface IGraphBuilder {
/**
* Use this method to create a graph of ngrams extracted from sentences of a given text file.
* @param corpusFileAddress address of the text file to read from
* @param ngramSize size of ngrams to extract from each sentence
* @return a graph of ngrams
*/
GraphContainer createGraphFromFileBase(String corpusFileAddress, int ngramSize);
/**
* Use this method to create a graph of ngrams extracted from sentences of a given text file and add this data to
* a previously built graph
* @param graph a given graph to add new data to
* @param corpusFileAddress address of the text file to read from
* @param ngramSize size of ngrams to extract from each sentence
* @return a graph of ngrams
*/
GraphContainer createGraphFromFileBase(GraphContainer graph, String corpusFileAddress, int ngramSize);
//todo: reform the following javadoc comments
/**
* Use this method to calculate label probabilities for each tri-gram.
* @param corpusFileAddress address of the text file to read from
* @param labelsFileAddress address of labels dictionary file
* @param wordLocationLabelProbabilityFileAddress address of the file containing label probability for each location.
* @return a graph of nodes containing label probability data for each node
*/
GraphContainer createGraphFromFileBaseForTypeProbabilityCalculation(String corpusFileAddress,
String labelsFileAddress,
String wordLocationLabelProbabilityFileAddress);
/**
* Use this method to calculate label probabilities for each tri-gram, and add new data to a previously built graph
* @param graph a given graph to add new data to
* @param corpusFileAddress address of the text file to read from
* @param labelsFileAddress address of labels dictionary file
* @param wordLocationLabelProbabilityFileAddress address of the file containing label probability for each location.
* @return a graph of nodes containing label probability data for each node
*/
GraphContainer createGraphFromFileBaseForTypeProbabilityCalculation(GraphContainer graph,
String corpusFileAddress,
String labelsFileAddress,
String wordLocationLabelProbabilityFileAddress);
/**
* Use this method to calculate marginal probability for each tri-gram, and add new data to a previously built graph
* @param corpusFileAddress address of the text file to read from
* @return a graph of nodes containing marginal probability data for each node
*/
GraphContainer createGraphFromFileBaseForMarginalsCalculation(String corpusFileAddress);
/**
* Use this method to calculate marginal probability for each tri-gram. And add new data to a previously built graph
* @param graph a given graph to add new data to
* @param corpusFileAddress address of the text file to read from
* @return a graph of nodes containing marginal probability data for each node
*/
GraphContainer createGraphFromFileBaseForMarginalsCalculation(GraphContainer graph, String corpusFileAddress);
/**
* Use this method to create the graph of tri-grams for a given corpus.
* @deprecated This is the sequential version of GraphBuilder.createGraphFromFileMultiThread method. Be aware that,
* Running this implementation will require considerable amount of time compared to multi-thread version.
* @param corpusFileAddress address of input text.
* @param labelsFileAddress address of labels file
* @param wordLocationLabelProbabilityFileAddress address of the file containing location to
* label probability mappings.
* each line of this file is formatted as below: </br>
* #sequence #position #labelIndex (Real number)probability
* @return a graph of tri-grams of the given corpus
*/
GraphContainer createGraphFromFile(String corpusFileAddress,
String labelsFileAddress,
String wordLocationLabelProbabilityFileAddress);
/**
* Use this method to create the graph of tri-grams for a given corpus
* @param corpusFileAddress address of input text.
* @return a graph of tri-grams of the given corpus
*/
GraphContainer createGraphFromFileMultiThread(String corpusFileAddress);
/**
* Use this method to create the graph of tri-grams for a given corpus
* @param corpusFileAddress address of input text.
* @return a graph of tri-grams of the given corpus
*/
GraphContainer createGraphFromFileMultiThread(String corpusFileAddress,
String corpusUnlabeledFileAddress);
public GraphContainer createGraphFromFileMultiThread(GraphContainer graph, String corpusFileAddress,
String corpusUnlabeledFileAddress);
/**
* Use this method to export graph nodes as node id to ngram mapping. Output format is as described below: </br>
* #nodeId [space separated ngram members]
* @param graph the input graph
* @param outputFileAddress name of the file to save output
*/
void saveFileAsNodeIdToNgramMapping(GraphContainer graph, String outputFileAddress);
/**
* Use this method to export graph data to file. Output format is as described below: </br>
* #source-nodeId #destination-nodeId (Real number)edge-weight
* @param graph the input graph
* @param outputFileAddress name of the file to save output
*/
void saveGraphToFile(GraphContainer graph, String outputFileAddress);
/**
* Use this method to export graph data to file. Output format is as described below: </br>
* [source-node word set] [destination-node word set] (Real number)edge-weight
* @param graph the input graph
* @param outputFileAddress name of the file to save output
* @param dictionary a dictionary object containing <word-index to word>
*/
void saveGraphToFileAsWordSets(GraphContainer graph, String outputFileAddress, WordDictionary dictionary);
/**
* Use this method to export graph nodes' data to file. Output format is as described below: </br>
* #nodeId #sequence #position
* </br>
* sequence number and position number match to sentence number and position of the n-gram center word in sentence.
* Both of these indexes are zero-based.
* @deprecated this method is only used for debugging purposes.
* @param graph the input graph
* @param outputFileAddress name of the file to save output
*/
void saveFileAsNodeIdToLocationMapping(GraphContainer graph, String outputFileAddress);
/**
* Use this method to export type probability information contained in the graph.
* Output format is as described below: </br>
* nodeIdInSerializedForm [TAB] #labelId [TAB] (Real number)probability
* @param graph the input graph
* @param outputFileAddress name of the file to save output
*/
void saveFileAsNodeIdToTypeLevelProbabilities(GraphContainer graph, String outputFileAddress);
/**
* Use this method to export type marginal probabilities to a file.
* Output format is as described below: </br>
* nodeIdInSerializedForm [TAB] #labelId [TAB] (Real number)probability
* @param graph the input graph
* @param outputFileAddress name of the file to save output
*/
void saveFileAsTypeLevelEmpiricalLabelProbabilities(GraphContainer graph, String outputFileAddress);
}
| 8,533 | 51.679012 | 121 | java |
g-ssl-crf | g-ssl-crf-master/src/GraphConstruct/src/main/java/Graph/Builder/GraphBuilderStandardCRFWordsWithPOSImpl.java | package main.java.Graph.Builder;
import main.java.CRF.CRFFileReader;
import main.java.CRF.CRFFileReaderWithPOSTags;
import main.java.Graph.GraphStructure.GraphContainer;
import main.java.Graph.GraphStructure.GraphContainerWithPOS;
import main.java.Graph.GraphStructure.NodeWithPartOfSpeech;
import main.java.TextToNgram.NgramContainer;
import main.java.TextToNgram.NgramUtility;
import main.java.Utility.Logger;
/**
* Copyright: Masoud Kiaeeha, Mohammad Aliannejadi
* This work is licensed under the Creative Commons Attribution-NonCommercial 4.0
* International License. To view a copy of this license, visit
* http://creativecommons.org/licenses/by-nc/4.0/.
*/
public class GraphBuilderStandardCRFWordsWithPOSImpl extends GraphBuilderStandardCRFWithPOS {
public GraphBuilderStandardCRFWordsWithPOSImpl(Logger logger){
super(logger);
}
protected String getSentence(CRFFileReaderWithPOSTags crfFileReader){
return crfFileReader.getWordSentence();
}
}
| 991 | 35.740741 | 93 | java |
g-ssl-crf | g-ssl-crf-master/src/GraphConstruct/src/main/java/Graph/Builder/GraphBuilderImplOld.java | package main.java.Graph.Builder;
import main.java.Graph.GraphStructure.GraphContainer;
import main.java.Graph.GraphStructure.Node;
import main.java.Graph.GraphStructure.Location;
import main.java.Graph.Concurrency.GraphThreadHandler;
import main.java.Text.WordDictionary;
import main.java.TextToNgram.NgramContainer;
import main.java.TextToNgram.NgramUtility;
import main.java.Utility.*;
/**
* Use this class to create a weighted graph from a file containing n-grams of a text and export the resulting graph
* in a desired way.
*/
public class GraphBuilderImplOld {
private static final int knnDefaultSize = 5;
private Logger logHandler;
public GraphBuilderImplOld(Logger logger){
this.logHandler = logger;
}
public GraphContainer createGraphFromNgramFile(String ngramFileAddress){
String line;
NgramContainer ngram;
NgramUtility ngramUtil = new NgramUtility();
GraphContainer graph = new GraphContainer();
Node tempNode;
int nodeId = 1;
int seq, pos;
seq = -1;
pos = 0;
Location currentLocation;
TextFileInput fileInput = new TextFileInput(ngramFileAddress);
while ((line = fileInput.readLine()) != null) {
ngram = ngramUtil.sentenceToNgram(line);
if(ngram == null)
continue;//invalid line, ignore
currentLocation = new Location(seq,pos);
if(ngram.isBeginningOfLine()){
++seq;
pos = 0;
currentLocation.setSeqAndPos(seq,pos);
}
tempNode = new Node(nodeId, ngram, 0);
graph.addNode(tempNode,currentLocation, null);//add node to graph or else update node frequency
++pos;
}
fileInput.close();
return graph;
}
public GraphContainer createGraphFromNgramFile(String ngramFileAddress, String labelsFileAddress,
String wordLocationLabelProbabilityFileAddress){
String line;
NgramContainer ngram, previousNgram = null;
NgramUtility ngramUtil = new NgramUtility();
GraphContainer graph = new GraphContainer();
Node tempNode;
int nodeId = 1;
int labelCount = LabelFileHandler.countLabels(labelsFileAddress);
float[] labelProbabilitiesArray;
int sequence, position;
sequence = -1;
position = 0;
Location currentLocation, previousLocation = null;
TextFileInput fileInput = new TextFileInput(ngramFileAddress);
LocationToLabelFileHandler fileInputLocationToLabelMapping =
new LocationToLabelFileHandler(wordLocationLabelProbabilityFileAddress);
while ((line = fileInput.readLine()) != null) {
ngram = ngramUtil.sentenceToNgram(line);
if(ngram == null)
continue;//invalid line, ignore
currentLocation = new Location(sequence,position);
if(ngram.isBeginningOfLine()){
++sequence;
position = 0;
currentLocation.setSeqAndPos(sequence,position);
previousLocation = null;
}
currentLocation.setPreviousLocation(previousLocation, previousNgram, ngram);
labelProbabilitiesArray = fileInputLocationToLabelMapping.getLabelProbabilitiesOf(sequence, position, labelCount);
tempNode = new Node(nodeId, ngram, labelCount);
graph.addNode(tempNode, currentLocation, labelProbabilitiesArray);//add node to graph or else update node frequency
++position;
previousLocation = currentLocation;
previousNgram = ngram;
}
fileInput.close();
return graph;
}
public GraphContainer createGraphFromFileBase(String corpusFileAddress,
String labelsFileAddress,
String wordLocationLabelProbabilityFileAddress){
String line;
NgramContainer[] ngramSet;
TextFileInput fileInput = new TextFileInput(corpusFileAddress);
int labelCount = LabelFileHandler.countLabels(labelsFileAddress);
LocationToLabelFileHandler fileInputLocationToLabelMapping =
new LocationToLabelFileHandler(wordLocationLabelProbabilityFileAddress);
NgramUtility ngramUtility = new NgramUtility();
int ngramSize = 3;
Node tempNode;
int nodeId = 1;
int sequence = -1, position = 0;
Location currentLocation, previousLocation = null;
NgramContainer previousNgram = null;
float[] labelProbabilitiesArray;
GraphContainer graph = new GraphContainer();
while ((line = fileInput.readLine()) != null) {
//ignore first line
//todo: if input file is corrected next line of code should be removed
fileInput.readLine(); //ignore one line because of duplicate sentence
line = fileInput.readLine();
ngramSet = ngramUtility.extractNgramsFromSentenceDefaultWithEscapeCharacters(line, ngramSize);
for(int i=0; i<ngramSet.length ; ++i){
currentLocation = new Location(sequence,position);
if(ngramSet[i].isBeginningOfLine()){
++sequence;
position = 0;
currentLocation.setSeqAndPos(sequence,position);
previousLocation = null;
}
currentLocation.setPreviousLocation(previousLocation, previousNgram, ngramSet[i]);
labelProbabilitiesArray = fileInputLocationToLabelMapping.getLabelProbabilitiesOf(sequence, position, labelCount);
tempNode = new Node(nodeId, ngramSet[i], labelCount);
graph.addNode(tempNode, currentLocation, labelProbabilitiesArray);//add node to graph or else update node frequency
++position;
previousLocation = currentLocation;
previousNgram = ngramSet[i];
}
}
return graph;
}
public GraphContainer createGraphFromFileBase(String corpusFileAddress, int ngramSize){
String line;
NgramContainer[] ngramSet;
TextFileInput fileInput = new TextFileInput(corpusFileAddress);
NgramUtility ngramUtility = new NgramUtility();
Node tempNode;
int nodeId = 1;
GraphContainer graph = new GraphContainer();
while ((line = fileInput.readLine()) != null) {
//ignore first line
//todo: if input file is corrected next line of code should be removed
fileInput.readLine(); //ignore one line because of duplicate sentence
line = fileInput.readLine();
ngramSet = ngramUtility.extractNgramsFromSentenceDefaultWithEscapeCharacters(line, ngramSize);
for(int i=0; i<ngramSet.length ; ++i){
tempNode = new Node(ngramSet[i]);
graph.addNode(tempNode);//add node to graph or else update node frequency
}
}
return graph;
}
/**
* Use this method to create the graph of tri-grams for a given corpus
* @param corpusFileAddress address of input text.
* @param labelsFileAddress address of labels file
* @param wordLocationLabelProbabilityFileAddress address of the file containing location to
* label probability mappings.
* each line of this file is formatted as below: </br>
* #sequence #position #labelIndex (Real number)probability
* @return a graph of tri-grams of the given corpus
*/
public GraphContainer createGraphFromFileMultiThread(String corpusFileAddress,
String labelsFileAddress,
String wordLocationLabelProbabilityFileAddress){
RuntimeAnalyzer ra;
ra = logHandler.taskStarted("[GraphBuilderImplOld]- creating nodes of ngram graphs");
GraphContainer baseGraph = this.createGraphFromFileBase(corpusFileAddress,labelsFileAddress,wordLocationLabelProbabilityFileAddress);
GraphContainer unigramGraph = this.createGraphFromFileBase(corpusFileAddress, 1);
GraphContainer bigramGraph = this.createGraphFromFileBase(corpusFileAddress, 2);
GraphContainer fourgramGraph = this.createGraphFromFileBase(corpusFileAddress, 4);
GraphContainer fivegramGraph = this.createGraphFromFileBase(corpusFileAddress, 5);
logHandler.taskFinished(ra, "[GraphBuilderImplOld]- creating nodes of ngram graphs");
ra = logHandler.taskStarted("[GraphBuilderImplOld]- calculating pmi values and assigning edge weights for nodes");
baseGraph.setGraphOfNgram(1 ,unigramGraph);
baseGraph.setGraphOfNgram(2, bigramGraph);
baseGraph.setGraphOfNgram(4, fourgramGraph);
baseGraph.setGraphOfNgram(5, fivegramGraph);
unigramGraph = null;
bigramGraph = null;
fourgramGraph = null;
fivegramGraph = null;
try{
int threadCount = 8;
//build feature score map in first run
GraphThreadHandler[] threads = new GraphThreadHandler[threadCount];
for (int i=0; i<threadCount ; ++i){
threads[i] = new GraphThreadHandler(i, threadCount, baseGraph, true);
threads[i].start();
}
for (int i=0; i<threadCount ; ++i){
threads[i].join();
}
//measure similarity values of nodes and assign edge values
for (int i=0; i<threadCount ; ++i){
threads[i] = new GraphThreadHandler(i, threadCount, baseGraph);
threads[i].start();
}
for (int i=0; i<threadCount ; ++i){
threads[i].join();
}
}catch (InterruptedException ex){
ex.printStackTrace();
}
logHandler.taskFinished(ra, "[GraphBuilderImplOld]- assigning edge values");
ra = logHandler.taskStarted("[GraphBuilderImplOld]- converting graph to KNN form");
baseGraph.convertToKNN(GraphBuilderImplOld.knnDefaultSize);
logHandler.taskFinished(ra, "[GraphBuilderImplOld]- converting graph to KNN form");
baseGraph.removeRedundantData();
return baseGraph;
}
/**
* Use this method to create the graph of tri-grams for a given corpus.
* @deprecated This is the sequential version of GraphBuilderImplOld.createGraphFromFileMultiThread method. Be aware that,
* Running this implementation will require considerable amount of time compared to multi-thread version.
* @param corpusFileAddress address of input text.
* @param labelsFileAddress address of labels file
* @param wordLocationLabelProbabilityFileAddress address of the file containing location to
* label probability mappings.
* each line of this file is formatted as below: </br>
* #sequence #position #labelIndex (Real number)probability
* @return a graph of tri-grams of the given corpus
*/
public GraphContainer createGraphFromFile(String corpusFileAddress,
String labelsFileAddress,
String wordLocationLabelProbabilityFileAddress){
GraphContainer baseGraph = this.createGraphFromFileBase(corpusFileAddress,labelsFileAddress,wordLocationLabelProbabilityFileAddress);
GraphContainer unigramGraph = this.createGraphFromFileBase(corpusFileAddress, 1);
GraphContainer bigramGraph = this.createGraphFromFileBase(corpusFileAddress, 2);
GraphContainer fourgramGraph = this.createGraphFromFileBase(corpusFileAddress, 4);
GraphContainer fivegramGraph = this.createGraphFromFileBase(corpusFileAddress, 5);
baseGraph.setGraphOfNgram(1 ,unigramGraph);
baseGraph.setGraphOfNgram(2, bigramGraph);
baseGraph.setGraphOfNgram(4, fourgramGraph);
baseGraph.setGraphOfNgram(5, fivegramGraph);
unigramGraph = null;
bigramGraph = null;
fourgramGraph = null;
fivegramGraph = null;
baseGraph.populateEdgeValuesOld();
baseGraph.convertToKNN(GraphBuilderImplOld.knnDefaultSize);
baseGraph.removeRedundantData();
return baseGraph;
}
/**
* Use this method to export graph nodes as node id to ngram mapping. Output format is as described below: </br>
* #nodeId [space separated ngram members]
* @param graph the input graph
* @param outputFileAddress name of the file to save output
*/
public void saveFileAsNodeIdToNgramMapping(GraphContainer graph, String outputFileAddress){
graph.exportToFileAsIdMapping(outputFileAddress);
}
/**
* Use this method to export graph data to file. Output format is as described below: </br>
* #source-nodeId #destination-nodeId (Real number)edge-weight
* @param graph the input graph
* @param outputFileAddress name of the file to save output
*/
public void saveGraphToFile(GraphContainer graph, String outputFileAddress){
graph.exportGraphToFile(outputFileAddress);
}
/**
* Use this method to export graph data to file. Output format is as described below: </br>
* [source-node word set] [destination-node word set] (Real number)edge-weight
* @param graph the input graph
* @param outputFileAddress name of the file to save output
* @param dictionary a dictionary object containing <word-index to word>
*/
public void saveGraphToFileAsWordSets(GraphContainer graph, String outputFileAddress, WordDictionary dictionary){
graph.exportGraphToFileAsWordSetsSimilarity(outputFileAddress, dictionary);
}
/**
* Use this method to export graph nodes' data to file. Output format is as described below: </br>
* #nodeId #sequence #position
* </br>
* sequence number and position number match to sentence number and position of the n-gram center word in sentence.
* Both of these indexes are zero-based.
* @deprecated this method is only used for debugging purposes.
* @param graph the input graph
* @param outputFileAddress name of the file to save output
*/
public void saveFileAsNodeIdToLocationMapping(GraphContainer graph, String outputFileAddress){
graph.exportToFileAsIdToLocationMapping(outputFileAddress);
}
/**
* Use this method to export type probability information contained in the graph.
* Output format is as described below: </br>
* #nodeId #labelId (Real number)probability
* @param graph the input graph
* @param outputFileAddress name of the file to save output
*/
public void saveFileAsNodeIdToTypeLevelProbabilities(GraphContainer graph, String outputFileAddress){
graph.exportToFileAsIdToTypeLevelProbabilities(outputFileAddress);
}
}
| 15,369 | 40.428571 | 141 | java |
g-ssl-crf | g-ssl-crf-master/src/GraphConstruct/src/main/java/Graph/Builder/IGraphBuilderWithPOS.java | package main.java.Graph.Builder;
import main.java.Graph.GraphStructure.GraphContainer;
import main.java.Graph.GraphStructure.GraphContainerWithPOS;
import main.java.Text.WordDictionary;
/**
* Copyright: Masoud Kiaeeha, Mohammad Aliannejadi
* This work is licensed under the Creative Commons Attribution-NonCommercial 4.0
* International License. To view a copy of this license, visit
* http://creativecommons.org/licenses/by-nc/4.0/.
*/
public interface IGraphBuilderWithPOS {
/**
* Use this method to create a graph of ngrams extracted from sentences of a given text file.
* @param corpusFileAddress address of the text file to read from
* @param ngramSize size of ngrams to extract from each sentence
* @return a graph of ngrams
*/
GraphContainerWithPOS createGraphFromFileBase(String corpusFileAddress, int ngramSize);
/**
* Use this method to create a graph of ngrams extracted from sentences of a given text file and add this data to
* a previously built graph
* @param graph a given graph to add new data to
* @param corpusFileAddress address of the text file to read from
* @param ngramSize size of ngrams to extract from each sentence
* @return a graph of ngrams
*/
GraphContainerWithPOS createGraphFromFileBase(GraphContainerWithPOS graph, String corpusFileAddress, int ngramSize);
//todo: reform the following javadoc comments
/**
* Use this method to calculate label probabilities for each tri-gram.
* @param corpusFileAddress address of the text file to read from
* @param labelsFileAddress address of labels dictionary file
* @param wordLocationLabelProbabilityFileAddress address of the file containing label probability for each location.
* @return a graph of nodes containing label probability data for each node
*/
GraphContainer createGraphFromFileBaseForTypeProbabilityCalculation(String corpusFileAddress,
String labelsFileAddress,
String wordLocationLabelProbabilityFileAddress);
/**
* Use this method to calculate label probabilities for each tri-gram, and add new data to a previously built graph
* @param graph a given graph to add new data to
* @param corpusFileAddress address of the text file to read from
* @param labelsFileAddress address of labels dictionary file
* @param wordLocationLabelProbabilityFileAddress address of the file containing label probability for each location.
* @return a graph of nodes containing label probability data for each node
*/
GraphContainer createGraphFromFileBaseForTypeProbabilityCalculation(GraphContainer graph,
String corpusFileAddress,
String labelsFileAddress,
String wordLocationLabelProbabilityFileAddress);
/**
* Use this method to calculate marginal probability for each tri-gram, and add new data to a previously built graph
* @param corpusFileAddress address of the text file to read from
* @return a graph of nodes containing marginal probability data for each node
*/
GraphContainer createGraphFromFileBaseForMarginalsCalculation(String corpusFileAddress);
/**
* Use this method to calculate marginal probability for each tri-gram. And add new data to a previously built graph
* @param graph a given graph to add new data to
* @param corpusFileAddress address of the text file to read from
* @return a graph of nodes containing marginal probability data for each node
*/
GraphContainer createGraphFromFileBaseForMarginalsCalculation(GraphContainer graph, String corpusFileAddress);
/**
* Use this method to create the graph of tri-grams for a given corpus.
* @deprecated This is the sequential version of GraphBuilder.createGraphFromFileMultiThread method. Be aware that,
* Running this implementation will require considerable amount of time compared to multi-thread version.
* @param corpusFileAddress address of input text.
* @param labelsFileAddress address of labels file
* @param wordLocationLabelProbabilityFileAddress address of the file containing location to
* label probability mappings.
* each line of this file is formatted as below: </br>
* #sequence #position #labelIndex (Real number)probability
* @return a graph of tri-grams of the given corpus
*/
GraphContainerWithPOS createGraphFromFile(String corpusFileAddress,
String labelsFileAddress,
String wordLocationLabelProbabilityFileAddress);
/**
* Use this method to create the graph of tri-grams for a given corpus
* @param corpusFileAddress address of input text.
* @return a graph of tri-grams of the given corpus
*/
GraphContainerWithPOS createGraphFromFileMultiThread(String corpusFileAddress);
/**
* Use this method to create the graph of tri-grams for a given corpus
* @param corpusFileAddress address of input text.
* @return a graph of tri-grams of the given corpus
*/
GraphContainerWithPOS createGraphFromFileMultiThread(String corpusFileAddress,
String corpusUnlabeledFileAddress);
public GraphContainerWithPOS createGraphFromFileMultiThread(GraphContainerWithPOS graph, String corpusFileAddress,
String corpusUnlabeledFileAddress);
/**
* Use this method to export graph nodes as node id to ngram mapping. Output format is as described below: </br>
* #nodeId [space separated ngram members]
* @param graph the input graph
* @param outputFileAddress name of the file to save output
*/
void saveFileAsNodeIdToNgramMapping(GraphContainerWithPOS graph, String outputFileAddress);
/**
* Use this method to export graph data to file. Output format is as described below: </br>
* #source-nodeId #destination-nodeId (Real number)edge-weight
* @param graph the input graph
* @param outputFileAddress name of the file to save output
*/
void saveGraphToFile(GraphContainerWithPOS graph, String outputFileAddress);
/**
* Use this method to export graph data to file. Output format is as described below: </br>
* [source-node word set] [destination-node word set] (Real number)edge-weight
* @param graph the input graph
* @param outputFileAddress name of the file to save output
* @param dictionary a dictionary object containing <word-index to word>
*/
void saveGraphToFileAsWordSets(GraphContainerWithPOS graph, String outputFileAddress, WordDictionary dictionary);
/**
* Use this method to export graph nodes' data to file. Output format is as described below: </br>
* #nodeId #sequence #position
* </br>
* sequence number and position number match to sentence number and position of the n-gram center word in sentence.
* Both of these indexes are zero-based.
* @deprecated this method is only used for debugging purposes.
* @param graph the input graph
* @param outputFileAddress name of the file to save output
*/
void saveFileAsNodeIdToLocationMapping(GraphContainerWithPOS graph, String outputFileAddress);
/**
* Use this method to export type probability information contained in the graph.
* Output format is as described below: </br>
* nodeIdInSerializedForm [TAB] #labelId [TAB] (Real number)probability
* @param graph the input graph
* @param outputFileAddress name of the file to save output
*/
void saveFileAsNodeIdToTypeLevelProbabilities(GraphContainer graph, String outputFileAddress);
/**
* Use this method to export type marginal probabilities to a file.
* Output format is as described below: </br>
* nodeIdInSerializedForm [TAB] #labelId [TAB] (Real number)probability
* @param graph the input graph
* @param outputFileAddress name of the file to save output
*/
void saveFileAsTypeLevelEmpiricalLabelProbabilities(GraphContainer graph, String outputFileAddress);
}
| 8,615 | 52.515528 | 121 | java |
g-ssl-crf | g-ssl-crf-master/src/GraphConstruct/src/main/java/Graph/Builder/GraphBuilderStandardCRF.java | package main.java.Graph.Builder;
import main.java.CRF.CRFFileReader;
import main.java.Graph.Concurrency.GraphThreadHandler;
import main.java.Graph.GraphStructure.GraphContainer;
import main.java.Graph.GraphStructure.Location;
import main.java.Graph.GraphStructure.Node;
import main.java.Text.WordDictionary;
import main.java.TextToNgram.NgramContainer;
import main.java.TextToNgram.NgramUtility;
import main.java.Utility.*;
import java.util.ArrayList;
/**
* Copyright: Masoud Kiaeeha, Mohammad Aliannejadi
* This work is licensed under the Creative Commons Attribution-NonCommercial 4.0
* International License. To view a copy of this license, visit
* http://creativecommons.org/licenses/by-nc/4.0/.
*/
public abstract class GraphBuilderStandardCRF implements IGraphBuilder {
private static final int defaultNgramSize = 3;
private Logger logHandler;
public GraphBuilderStandardCRF(Logger logger){
this.logHandler = logger;
}
protected abstract String getSentence(CRFFileReader crfFileReader);
public GraphContainer createGraphFromFileBase(String corpusFileAddress, int ngramSize){
return this.createGraphFromFileBase(null, corpusFileAddress, ngramSize);
}
public GraphContainer createGraphFromFileBase(GraphContainer graph, String corpusFileAddress, int ngramSize){
NgramContainer[] ngramSet;
NgramUtility ngramUtility = new NgramUtility();
if (graph == null)
graph = new GraphContainer();
String sentence;
CRFFileReader crfFileReader = new CRFFileReader(corpusFileAddress);
while (crfFileReader.hasNext()) {
crfFileReader.getNext();
sentence = getSentence(crfFileReader);
ngramSet = ngramUtility.extractNgramsFromSentence(sentence, ngramSize);
for (NgramContainer ngram : ngramSet) {
graph.addNode(new Node<Location>(ngram));//add node to graph or else update node frequency
}
}
crfFileReader.close();
return graph;
}
public GraphContainer createGraphFromFileBaseForMainGraph(String corpusFileAddress, int ngramSize){
return createGraphFromFileBaseForMainGraph(null, corpusFileAddress, ngramSize);
}
public GraphContainer createGraphFromFileBaseForMainGraph(GraphContainer graph, String corpusFileAddress, int ngramSize){
NgramContainer[] ngramSet;
NgramUtility ngramUtility = new NgramUtility();
int sequence = 0;//todo: this variable can be declared as a field
if (graph == null)
graph = new GraphContainer();
String sentence;
CRFFileReader crfFileReader = new CRFFileReader(corpusFileAddress);
while (crfFileReader.hasNext()) {
crfFileReader.getNext();
sentence = getSentence(crfFileReader);
ngramSet = ngramUtility.extractNgramsFromSentence(sentence, ngramSize);
graph.addNgramsToGraph(ngramSet, sequence);
++sequence;
}
return graph;
}
public GraphContainer createGraphFromFileBaseForTypeProbabilityCalculation(String corpusFileAddress,
String labelsFileAddress,
String wordLocationLabelProbabilityFileAddress){
return this.createGraphFromFileBaseForTypeProbabilityCalculation(null, corpusFileAddress, labelsFileAddress, wordLocationLabelProbabilityFileAddress);
}
public GraphContainer createGraphFromFileBaseForTypeProbabilityCalculation(GraphContainer graph,
String corpusFileAddress,
String labelsFileAddress,
String wordLocationLabelProbabilityFileAddress){
if (graph == null)
graph = new GraphContainer();
int ngramSize = defaultNgramSize;
NgramContainer[] ngramSet;
NgramUtility ngramUtility = new NgramUtility();
int sequence = 0;
String sentence;
int labelCount = LabelFileHandler.countLabels(labelsFileAddress);
LocationToLabelFileHandler fileInputLocationToLabelMapping =
new LocationToLabelFileHandler(wordLocationLabelProbabilityFileAddress);
CRFFileReader crfFileReader = new CRFFileReader(corpusFileAddress);
while (crfFileReader.hasNext()) {
crfFileReader.getNext();
sentence = getSentence(crfFileReader);
ngramSet = ngramUtility.extractNgramsFromSentence(sentence, ngramSize);
graph.addNgramsToGraph(ngramSet, sequence, labelCount, fileInputLocationToLabelMapping);
++sequence;
}
return graph;
}
public GraphContainer createGraphFromFileBaseForMarginalsCalculation(String corpusFileAddress){
return this.createGraphFromFileBaseForMarginalsCalculation(null, corpusFileAddress);
}
public GraphContainer createGraphFromFileBaseForMarginalsCalculation(GraphContainer graph, String corpusFileAddress){
if (graph == null)
graph = new GraphContainer();
int ngramSize = 3;
NgramContainer[] ngramSet;
NgramUtility ngramUtility = new NgramUtility();
int nodeIndex;
String sentence;
ArrayList<Integer> labels;
CRFFileReader crfFileReader = new CRFFileReader(corpusFileAddress);
while (crfFileReader.hasNext()) {
crfFileReader.getNext();
sentence = getSentence(crfFileReader);
labels = crfFileReader.getLabels();
ngramSet = ngramUtility.extractNgramsFromSentence(sentence, ngramSize);
for(int i=0; i<ngramSet.length ; ++i){
nodeIndex = graph.addNode(new Node<Location>(ngramSet[i]));//add node to graph or else update node frequency
graph.getNodeAt(nodeIndex).incrementLabelCount(labels.get(i)); //add label data to node
}
}
graph.updateNodesEmpiricalLabelProbabilities();
return graph;
}
/**
* Use this method to create the graph of tri-grams for a given corpus.
* @deprecated This is the sequential version of GraphBuilder.createGraphFromFileMultiThread method. Be aware that,
* Running this implementation will require considerable amount of time compared to multi-thread version.
* @param corpusFileAddress address of input text.
* @param labelsFileAddress address of labels file
* @param wordLocationLabelProbabilityFileAddress address of the file containing location to
* label probability mappings.
* each line of this file is formatted as below: </br>
* #sequence #position #labelIndex (Real number)probability
* @return a graph of tri-grams of the given corpus
*/
@Override
public GraphContainer createGraphFromFile(String corpusFileAddress,
String labelsFileAddress,
String wordLocationLabelProbabilityFileAddress){
GraphContainer baseGraph = this.createGraphFromFileBase(corpusFileAddress, 3);
GraphContainer unigramGraph = this.createGraphFromFileBase(corpusFileAddress, 1);
GraphContainer bigramGraph = this.createGraphFromFileBase(corpusFileAddress, 2);
GraphContainer fourgramGraph = this.createGraphFromFileBase(corpusFileAddress, 4);
GraphContainer fivegramGraph = this.createGraphFromFileBase(corpusFileAddress, 5);
baseGraph.setGraphOfNgram(1 ,unigramGraph);
baseGraph.setGraphOfNgram(2, bigramGraph);
baseGraph.setGraphOfNgram(4, fourgramGraph);
baseGraph.setGraphOfNgram(5, fivegramGraph);
baseGraph.buildFeatureScoreMapForNodes();
baseGraph.populateEdgeValues();
baseGraph.convertToKNN(Config.getKnnDefaultSize());
baseGraph.removeRedundantData();
return baseGraph;
}
/**
* Use this method to create the graph of tri-grams for a given corpus
* @param corpusFileAddress address of input text.
* @return a graph of tri-grams of the given corpus
*/
@Override
public GraphContainer createGraphFromFileMultiThread(String corpusFileAddress){
RuntimeAnalyzer ra;
ra = logHandler.taskStarted("[GraphBuilder]- creating nodes of ngram graphs");
GraphContainer baseGraph = this.createGraphFromFileBase(corpusFileAddress, 3);
GraphContainer unigramGraph = this.createGraphFromFileBase(corpusFileAddress, 1);
GraphContainer bigramGraph = this.createGraphFromFileBase(corpusFileAddress, 2);
GraphContainer fourgramGraph = this.createGraphFromFileBase(corpusFileAddress, 4);
GraphContainer fivegramGraph = this.createGraphFromFileBase(corpusFileAddress, 5);
logHandler.taskFinished(ra, "[GraphBuilder]- creating nodes of ngram graphs");
baseGraph.setGraphOfNgram(1 ,unigramGraph);
baseGraph.setGraphOfNgram(2, bigramGraph);
baseGraph.setGraphOfNgram(4, fourgramGraph);
baseGraph.setGraphOfNgram(5, fivegramGraph);
try{
int threadCount = Config.graphBuilderThreadCount;
//build feature score map in first run
ra = logHandler.taskStarted("[GraphBuilder]- calculating pmi values");
GraphThreadHandler[] threads = new GraphThreadHandler[threadCount];
for (int i=0; i<threadCount ; ++i){
threads[i] = new GraphThreadHandler(i, threadCount, baseGraph, true);
threads[i].start();
}
for (int i=0; i<threadCount ; ++i){
threads[i].join();
}
logHandler.taskFinished(ra, "[GraphBuilder]- calculating pmi values");
//measure similarity values of nodes and assign edge values
ra = logHandler.taskStarted("[GraphBuilder]- assigning edge weights for nodes");
for (int i=0; i<threadCount ; ++i){
threads[i] = new GraphThreadHandler(i, threadCount, baseGraph);
threads[i].start();
}
for (int i=0; i<threadCount ; ++i){
threads[i].join();
}
logHandler.taskFinished(ra, "[GraphBuilder]- assigning edge weights for nodes");
}catch (InterruptedException ex){
ex.printStackTrace();
}
ra = logHandler.taskStarted("[GraphBuilder]- converting graph to KNN form");
baseGraph.convertToKNN(Config.getKnnDefaultSize());
logHandler.taskFinished(ra, "[GraphBuilder]- converting graph to KNN form");
baseGraph.removeRedundantData();
return baseGraph;
}
/**
* Use this method to create the graph of tri-grams for a given corpus
* @param corpusFileAddress address of input text.
* @param corpusUnlabeledFileAddress address of second input file which is supposed to be the unlabeled set
* @return a graph of tri-grams of the given corpus
*/
public GraphContainer createGraphFromFileMultiThread(String corpusFileAddress,
String corpusUnlabeledFileAddress){
return createGraphFromFileMultiThread(null, corpusFileAddress, corpusUnlabeledFileAddress);
}
/**
* Use this method to create the graph of tri-grams for a given corpus
* @param graph a graph object to add graph data to
* @param corpusFileAddress address of input text.
* @param corpusUnlabeledFileAddress address of second input file which is supposed to be the unlabeled set
* @return a graph of tri-grams of the given corpus
*/
@Override
public GraphContainer createGraphFromFileMultiThread(GraphContainer graph, String corpusFileAddress, String corpusUnlabeledFileAddress) {
RuntimeAnalyzer ra;
ra = logHandler.taskStarted("[GraphBuilder]- creating nodes of ngram graphs");
GraphContainer baseGraph = this.createGraphFromFileBaseForMainGraph(graph, corpusFileAddress, 3);
baseGraph = this.createGraphFromFileBaseForMainGraph(baseGraph, corpusUnlabeledFileAddress, 3);
GraphContainer unigramGraph = this.createGraphFromFileBase(corpusFileAddress, 1);
unigramGraph = this.createGraphFromFileBase(unigramGraph, corpusUnlabeledFileAddress, 1);
GraphContainer bigramGraph = this.createGraphFromFileBase(corpusFileAddress, 2);
bigramGraph = this.createGraphFromFileBase(bigramGraph, corpusUnlabeledFileAddress, 2);
GraphContainer fourgramGraph = this.createGraphFromFileBase(corpusFileAddress, 4);
fourgramGraph = this.createGraphFromFileBase(fourgramGraph, corpusUnlabeledFileAddress, 4);
GraphContainer fivegramGraph = this.createGraphFromFileBase(corpusFileAddress, 5);
fivegramGraph = this.createGraphFromFileBase(fivegramGraph, corpusUnlabeledFileAddress, 5);
logHandler.taskFinished(ra, "[GraphBuilder]- creating nodes of ngram graphs");
baseGraph.setGraphOfNgram(1 ,unigramGraph);
baseGraph.setGraphOfNgram(2, bigramGraph);
baseGraph.setGraphOfNgram(4, fourgramGraph);
baseGraph.setGraphOfNgram(5, fivegramGraph);
try{
int threadCount = Config.graphBuilderThreadCount;
//build feature score map in first run
ra = logHandler.taskStarted("[GraphBuilder]- calculating pmi values");
GraphThreadHandler[] threads = new GraphThreadHandler[threadCount];
for (int i=0; i<threadCount ; ++i){
threads[i] = new GraphThreadHandler(i, threadCount, baseGraph, true);
threads[i].start();
}
for (int i=0; i<threadCount ; ++i){
threads[i].join();
}
logHandler.taskFinished(ra, "[GraphBuilder]- calculating pmi values");
//measure similarity values of nodes and assign edge values
ra = logHandler.taskStarted("[GraphBuilder]- assigning edge weights for nodes");
for (int i=0; i<threadCount ; ++i){
threads[i] = new GraphThreadHandler(i, threadCount, baseGraph);
threads[i].start();
}
for (int i=0; i<threadCount ; ++i){
threads[i].join();
}
logHandler.taskFinished(ra, "[GraphBuilder]- assigning edge weights for nodes");
}catch (InterruptedException ex){
ex.printStackTrace();
}
ra = logHandler.taskStarted("[GraphBuilder]- converting graph to KNN form");
baseGraph.convertToKNN(Config.getKnnDefaultSize());
logHandler.taskFinished(ra, "[GraphBuilder]- converting graph to KNN form");
baseGraph.removeRedundantData();
return baseGraph;
}
/**
* Use this method to export graph nodes as node id to ngram mapping. Output format is as described below: </br>
* #nodeId [space separated ngram members]
* @param graph the input graph
* @param outputFileAddress name of the file to save output
*/
@Override
public void saveFileAsNodeIdToNgramMapping(GraphContainer graph, String outputFileAddress){
RuntimeAnalyzer sectionRunAnalyzer = logHandler.taskStarted("[GraphBuilder]- exporting graph data");
graph.exportToFileAsIdMapping(outputFileAddress);
logHandler.taskFinished(sectionRunAnalyzer, "[GraphBuilder]- exporting graph data");
}
/**
* Use this method to export graph data to file. Output format is as described below: </br>
* #source-nodeId #destination-nodeId (Real number)edge-weight
* @param graph the input graph
* @param outputFileAddress name of the file to save output
*/
@Override
public void saveGraphToFile(GraphContainer graph, String outputFileAddress){
RuntimeAnalyzer sectionRunAnalyzer = logHandler.taskStarted("[GraphBuilder]- exporting graph data");
graph.exportGraphToFile(outputFileAddress);
logHandler.taskFinished(sectionRunAnalyzer, "[GraphBuilder]- exporting graph data");
}
/**
* Use this method to export graph data to file. Output format is as described below: </br>
* [source-node word set] [destination-node word set] (Real number)edge-weight
* @param graph the input graph
* @param outputFileAddress name of the file to save output
* @param dictionary a dictionary object containing <word-index to word>
*/
@Override
public void saveGraphToFileAsWordSets(GraphContainer graph, String outputFileAddress, WordDictionary dictionary){
RuntimeAnalyzer sectionRunAnalyzer = logHandler.taskStarted("[GraphBuilder]- exporting graph data");
graph.exportGraphToFileAsWordSetsSimilarity(outputFileAddress, dictionary);
logHandler.taskFinished(sectionRunAnalyzer, "[GraphBuilder]- exporting graph data");
}
/**
* Use this method to export graph nodes' data to file. Output format is as described below: </br>
* #nodeId #sequence #position
* </br>
* sequence number and position number match to sentence number and position of the n-gram center word in sentence.
* Both of these indexes are zero-based.
* @deprecated this method is only used for debugging purposes.
* @param graph the input graph
* @param outputFileAddress name of the file to save output
*/
@Override
public void saveFileAsNodeIdToLocationMapping(GraphContainer graph, String outputFileAddress){
RuntimeAnalyzer sectionRunAnalyzer = logHandler.taskStarted("[GraphBuilder]- exporting graph data");
graph.exportToFileAsIdToLocationMapping(outputFileAddress);
logHandler.taskFinished(sectionRunAnalyzer, "[GraphBuilder]- exporting graph data");
}
/**
* Use this method to export type probability information contained in the graph.
* Output format is as described below: </br>
* #nodeId #labelId (Real number)probability
* @param graph the input graph
* @param outputFileAddress name of the file to save output
*/
@Override
public void saveFileAsNodeIdToTypeLevelProbabilities(GraphContainer graph, String outputFileAddress){
RuntimeAnalyzer sectionRunAnalyzer = logHandler.taskStarted("[GraphBuilder]- exporting graph data");
graph.exportToFileAsIdToTypeLevelProbabilities(outputFileAddress);
logHandler.taskFinished(sectionRunAnalyzer, "[GraphBuilder]- exporting graph data");
}
/**
* Use this method to export type marginal probabilities to a file.
* Output format is as described below: </br>
* nodeIdInSerializedForm [TAB] #labelId [TAB] (Real number)probability
* @param graph the input graph
* @param outputFileAddress name of the file to save output
*/
@Override
public void saveFileAsTypeLevelEmpiricalLabelProbabilities(GraphContainer graph, String outputFileAddress){
RuntimeAnalyzer sectionRunAnalyzer = logHandler.taskStarted("[GraphBuilder]- exporting graph data");
graph.exportToFileAsEmpiricalProbabilities(outputFileAddress);
logHandler.taskFinished(sectionRunAnalyzer, "[GraphBuilder]- exporting graph data");
}
}
| 19,537 | 43.404545 | 158 | java |
g-ssl-crf | g-ssl-crf-master/src/GraphConstruct/src/main/java/Graph/Builder/GraphBuilderFactory.java | package main.java.Graph.Builder;
import main.java.Utility.Logger;
/**
* Copyright: Masoud Kiaeeha, Mohammad Aliannejadi
* This work is licensed under the Creative Commons Attribution-NonCommercial 4.0
* International License. To view a copy of this license, visit
* http://creativecommons.org/licenses/by-nc/4.0/.
*/
public class GraphBuilderFactory {
public enum GraphNgramType{
WordClass, Word
}
/**
* default builder class
* @param logger a logger object used for logging purposes inside graphBuilder object
* @param ngramType graph builder will extract sentences based on type of ngram
* @return a generic purpose instance of graphbuilder
*/
public static IGraphBuilder getGraphBuilder(Logger logger, GraphNgramType ngramType){
switch (ngramType){
case WordClass:
return new GraphBuilderStandardCRFWordClassImpl(logger);
case Word:
return new GraphBuilderStandardCRFWordsImpl(logger);
}
return null;
}
}
| 1,051 | 29.941176 | 89 | java |
g-ssl-crf | g-ssl-crf-master/src/GraphConstruct/src/main/java/Graph/Builder/GraphBuilderStandardCRFWithPOS.java | package main.java.Graph.Builder;
import main.java.CRF.CRFFileReader;
import main.java.CRF.CRFFileReaderWithPOSTags;
import main.java.Graph.Concurrency.GraphThreadHandler;
import main.java.Graph.Concurrency.GraphWithPOSThreadHandler;
import main.java.Graph.GraphStructure.*;
import main.java.Text.WordDictionary;
import main.java.TextToNgram.NgramContainer;
import main.java.TextToNgram.NgramUtility;
import main.java.Utility.*;
import java.util.ArrayList;
/**
* Copyright: Masoud Kiaeeha, Mohammad Aliannejadi
* This work is licensed under the Creative Commons Attribution-NonCommercial 4.0
* International License. To view a copy of this license, visit
* http://creativecommons.org/licenses/by-nc/4.0/.
*/
public abstract class GraphBuilderStandardCRFWithPOS implements IGraphBuilderWithPOS {
private static final int defaultNgramSize = 3;
private Logger logHandler;
public GraphBuilderStandardCRFWithPOS(Logger logger){
this.logHandler = logger;
}
protected abstract String getSentence(CRFFileReaderWithPOSTags crfFileReader);
public GraphContainerWithPOS createGraphFromFileBase(String corpusFileAddress, int ngramSize){
return this.createGraphFromFileBase(null, corpusFileAddress, ngramSize);
}
public GraphContainerWithPOS createGraphFromFileBase(GraphContainerWithPOS graph, String corpusFileAddress, int ngramSize){
NgramContainer[] ngramSet;
NgramUtility ngramUtility = new NgramUtility();
if (graph == null)
graph = new GraphContainerWithPOS();
String sentence;
CRFFileReaderWithPOSTags crfFileReader = new CRFFileReaderWithPOSTags(corpusFileAddress);
while (crfFileReader.hasNext()) {
crfFileReader.getNext();
sentence = getSentence(crfFileReader);
ngramSet = ngramUtility.extractNgramsFromSentence(sentence, ngramSize);
for (NgramContainer ngram : ngramSet) {
graph.addNode(new NodeWithPartOfSpeech(ngram));//add node to graph or else update node frequency
}
}
crfFileReader.close();
return graph;
}
public GraphContainerWithPOS createGraphFromFileBaseForMainGraph(String corpusFileAddress, int ngramSize){
return createGraphFromFileBaseForMainGraph(null, corpusFileAddress, ngramSize);
}
public GraphContainerWithPOS createGraphFromFileBaseForMainGraph(GraphContainerWithPOS graph, String corpusFileAddress, int ngramSize){
NgramContainer[] ngramSet, ngramPOSSet;
NgramUtility ngramUtility = new NgramUtility();
int sequence = 0;//todo: this variable can be declared as a field
if (graph == null)
graph = new GraphContainerWithPOS();
String sentence;
CRFFileReaderWithPOSTags crfFileReader = new CRFFileReaderWithPOSTags(corpusFileAddress);
while (crfFileReader.hasNext()) {
crfFileReader.getNext();
sentence = getSentence(crfFileReader);
ngramSet = ngramUtility.extractNgramsFromSentence(sentence, ngramSize);
ngramPOSSet = ngramUtility.extractNgramsFromSentence(crfFileReader.getPOSTagSentence(), ngramSize);
graph.addNgramsToGraph(ngramSet, ngramPOSSet, sequence);
++sequence;
}
return graph;
}
public GraphContainer createGraphFromFileBaseForTypeProbabilityCalculation(String corpusFileAddress,
String labelsFileAddress,
String wordLocationLabelProbabilityFileAddress){
return this.createGraphFromFileBaseForTypeProbabilityCalculation(null, corpusFileAddress, labelsFileAddress, wordLocationLabelProbabilityFileAddress);
}
public GraphContainer createGraphFromFileBaseForTypeProbabilityCalculation(GraphContainer graph,
String corpusFileAddress,
String labelsFileAddress,
String wordLocationLabelProbabilityFileAddress){
if (graph == null)
graph = new GraphContainer();
int ngramSize = defaultNgramSize;
NgramContainer[] ngramSet;
NgramUtility ngramUtility = new NgramUtility();
int sequence = 0;
String sentence;
int labelCount = LabelFileHandler.countLabels(labelsFileAddress);
LocationToLabelFileHandler fileInputLocationToLabelMapping =
new LocationToLabelFileHandler(wordLocationLabelProbabilityFileAddress);
CRFFileReaderWithPOSTags crfFileReader = new CRFFileReaderWithPOSTags(corpusFileAddress);
while (crfFileReader.hasNext()) {
crfFileReader.getNext();
sentence = getSentence(crfFileReader);
ngramSet = ngramUtility.extractNgramsFromSentence(sentence, ngramSize);
graph.addNgramsToGraph(ngramSet, sequence, labelCount, fileInputLocationToLabelMapping);
++sequence;
}
return graph;
}
public GraphContainer createGraphFromFileBaseForMarginalsCalculation(String corpusFileAddress){
return this.createGraphFromFileBaseForMarginalsCalculation(null, corpusFileAddress);
}
public GraphContainer createGraphFromFileBaseForMarginalsCalculation(GraphContainer graph, String corpusFileAddress){
if (graph == null)
graph = new GraphContainer();
int ngramSize = 3;
NgramContainer[] ngramSet;
NgramUtility ngramUtility = new NgramUtility();
int nodeIndex;
String sentence;
ArrayList<Integer> labels;
CRFFileReaderWithPOSTags crfFileReader = new CRFFileReaderWithPOSTags(corpusFileAddress);
while (crfFileReader.hasNext()) {
crfFileReader.getNext();
sentence = getSentence(crfFileReader);
labels = crfFileReader.getLabels();
ngramSet = ngramUtility.extractNgramsFromSentence(sentence, ngramSize);
for(int i=0; i<ngramSet.length ; ++i){
nodeIndex = graph.addNode(new Node<Location>(ngramSet[i]));//add node to graph or else update node frequency
graph.getNodeAt(nodeIndex).incrementLabelCount(labels.get(i)); //add label data to node
}
}
graph.updateNodesEmpiricalLabelProbabilities();
return graph;
}
/**
* Use this method to create the graph of tri-grams for a given corpus.
* @deprecated This is the sequential version of GraphBuilder.createGraphFromFileMultiThread method. Be aware that,
* Running this implementation will require considerable amount of time compared to multi-thread version.
* @param corpusFileAddress address of input text.
* @param labelsFileAddress address of labels file
* @param wordLocationLabelProbabilityFileAddress address of the file containing location to
* label probability mappings.
* each line of this file is formatted as below: </br>
* #sequence #position #labelIndex (Real number)probability
* @return a graph of tri-grams of the given corpus
*/
@Override
public GraphContainerWithPOS createGraphFromFile(String corpusFileAddress,
String labelsFileAddress,
String wordLocationLabelProbabilityFileAddress){
GraphContainerWithPOS baseGraph = this.createGraphFromFileBase(corpusFileAddress, 3);
GraphContainerWithPOS unigramGraph = this.createGraphFromFileBase(corpusFileAddress, 1);
GraphContainerWithPOS bigramGraph = this.createGraphFromFileBase(corpusFileAddress, 2);
GraphContainerWithPOS fourgramGraph = this.createGraphFromFileBase(corpusFileAddress, 4);
GraphContainerWithPOS fivegramGraph = this.createGraphFromFileBase(corpusFileAddress, 5);
baseGraph.setGraphOfNgram(1 ,unigramGraph);
baseGraph.setGraphOfNgram(2, bigramGraph);
baseGraph.setGraphOfNgram(4, fourgramGraph);
baseGraph.setGraphOfNgram(5, fivegramGraph);
baseGraph.computeFeatureStats();
baseGraph.buildFeatureScoreMapForNodes();
baseGraph.populateEdgeValues();
baseGraph.convertToKNN(Config.getKnnDefaultSize());
baseGraph.removeRedundantData();
return baseGraph;
}
/**
* Use this method to create the graph of tri-grams for a given corpus
* @param corpusFileAddress address of input text.
* @return a graph of tri-grams of the given corpus
*/
@Override
public GraphContainerWithPOS createGraphFromFileMultiThread(String corpusFileAddress){
RuntimeAnalyzer ra;
ra = logHandler.taskStarted("[GraphBuilder]- creating nodes of ngram graphs");
GraphContainerWithPOS baseGraph = this.createGraphFromFileBase(corpusFileAddress, 3);
GraphContainerWithPOS unigramGraph = this.createGraphFromFileBase(corpusFileAddress, 1);
GraphContainerWithPOS bigramGraph = this.createGraphFromFileBase(corpusFileAddress, 2);
GraphContainerWithPOS fourgramGraph = this.createGraphFromFileBase(corpusFileAddress, 4);
GraphContainerWithPOS fivegramGraph = this.createGraphFromFileBase(corpusFileAddress, 5);
logHandler.taskFinished(ra, "[GraphBuilder]- creating nodes of ngram graphs");
baseGraph.setGraphOfNgram(1 ,unigramGraph);
baseGraph.setGraphOfNgram(2, bigramGraph);
baseGraph.setGraphOfNgram(4, fourgramGraph);
baseGraph.setGraphOfNgram(5, fivegramGraph);
baseGraph.computeFeatureStats();
try{
int threadCount = Config.graphBuilderThreadCount;
//build feature score map in first run
ra = logHandler.taskStarted("[GraphBuilder]- calculating pmi values");
GraphWithPOSThreadHandler[] threads = new GraphWithPOSThreadHandler[threadCount];
for (int i=0; i<threadCount ; ++i){
threads[i] = new GraphWithPOSThreadHandler(i, threadCount, baseGraph, true);
threads[i].start();
}
for (int i=0; i<threadCount ; ++i){
threads[i].join();
}
logHandler.taskFinished(ra, "[GraphBuilder]- calculating pmi values");
//measure similarity values of nodes and assign edge values
ra = logHandler.taskStarted("[GraphBuilder]- assigning edge weights for nodes");
for (int i=0; i<threadCount ; ++i){
threads[i] = new GraphWithPOSThreadHandler(i, threadCount, baseGraph);
threads[i].start();
}
for (int i=0; i<threadCount ; ++i){
threads[i].join();
}
logHandler.taskFinished(ra, "[GraphBuilder]- assigning edge weights for nodes");
}catch (InterruptedException ex){
ex.printStackTrace();
}
ra = logHandler.taskStarted("[GraphBuilder]- converting graph to KNN form");
baseGraph.convertToKNN(Config.getKnnDefaultSize());
logHandler.taskFinished(ra, "[GraphBuilder]- converting graph to KNN form");
baseGraph.removeRedundantData();
return baseGraph;
}
/**
* Use this method to create the graph of tri-grams for a given corpus
* @param corpusFileAddress address of input text.
* @param corpusUnlabeledFileAddress address of second input file which is supposed to be the unlabeled set
* @return a graph of tri-grams of the given corpus
*/
public GraphContainerWithPOS createGraphFromFileMultiThread(String corpusFileAddress,
String corpusUnlabeledFileAddress){
return createGraphFromFileMultiThread(null, corpusFileAddress, corpusUnlabeledFileAddress);
}
/**
* Use this method to create the graph of tri-grams for a given corpus
* @param graph a graph object to add graph data to
* @param corpusFileAddress address of input text.
* @param corpusUnlabeledFileAddress address of second input file which is supposed to be the unlabeled set
* @return a graph of tri-grams of the given corpus
*/
@Override
public GraphContainerWithPOS createGraphFromFileMultiThread(GraphContainerWithPOS graph, String corpusFileAddress, String corpusUnlabeledFileAddress) {
RuntimeAnalyzer ra;
ra = logHandler.taskStarted("[GraphBuilder]- creating nodes of ngram graphs");
GraphContainerWithPOS baseGraph = this.createGraphFromFileBaseForMainGraph(graph, corpusFileAddress, 3);
baseGraph = this.createGraphFromFileBaseForMainGraph(baseGraph, corpusUnlabeledFileAddress, 3);
GraphContainerWithPOS unigramGraph = this.createGraphFromFileBase(corpusFileAddress, 1);
unigramGraph = this.createGraphFromFileBase(unigramGraph, corpusUnlabeledFileAddress, 1);
GraphContainerWithPOS bigramGraph = this.createGraphFromFileBase(corpusFileAddress, 2);
bigramGraph = this.createGraphFromFileBase(bigramGraph, corpusUnlabeledFileAddress, 2);
GraphContainerWithPOS fourgramGraph = this.createGraphFromFileBase(corpusFileAddress, 4);
fourgramGraph = this.createGraphFromFileBase(fourgramGraph, corpusUnlabeledFileAddress, 4);
GraphContainerWithPOS fivegramGraph = this.createGraphFromFileBase(corpusFileAddress, 5);
fivegramGraph = this.createGraphFromFileBase(fivegramGraph, corpusUnlabeledFileAddress, 5);
logHandler.taskFinished(ra, "[GraphBuilder]- creating nodes of ngram graphs");
baseGraph.setGraphOfNgram(1 ,unigramGraph);
baseGraph.setGraphOfNgram(2, bigramGraph);
baseGraph.setGraphOfNgram(4, fourgramGraph);
baseGraph.setGraphOfNgram(5, fivegramGraph);
baseGraph.computeFeatureStats();
try{
int threadCount = Config.graphBuilderThreadCount;
//build feature score map in first run
ra = logHandler.taskStarted("[GraphBuilder]- calculating pmi values");
GraphWithPOSThreadHandler[] threads = new GraphWithPOSThreadHandler[threadCount];
for (int i=0; i<threadCount ; ++i){
threads[i] = new GraphWithPOSThreadHandler(i, threadCount, baseGraph, true);
threads[i].start();
}
for (int i=0; i<threadCount ; ++i){
threads[i].join();
}
logHandler.taskFinished(ra, "[GraphBuilder]- calculating pmi values");
//measure similarity values of nodes and assign edge values
ra = logHandler.taskStarted("[GraphBuilder]- assigning edge weights for nodes");
for (int i=0; i<threadCount ; ++i){
threads[i] = new GraphWithPOSThreadHandler(i, threadCount, baseGraph);
threads[i].start();
}
for (int i=0; i<threadCount ; ++i){
threads[i].join();
}
logHandler.taskFinished(ra, "[GraphBuilder]- assigning edge weights for nodes");
}catch (InterruptedException ex){
ex.printStackTrace();
}
ra = logHandler.taskStarted("[GraphBuilder]- converting graph to KNN form");
baseGraph.convertToKNN(Config.getKnnDefaultSize());
logHandler.taskFinished(ra, "[GraphBuilder]- converting graph to KNN form");
baseGraph.removeRedundantData();
return baseGraph;
}
/**
* Use this method to export graph nodes as node id to ngram mapping. Output format is as described below: </br>
* #nodeId [space separated ngram members]
* @param graph the input graph
* @param outputFileAddress name of the file to save output
*/
@Override
public void saveFileAsNodeIdToNgramMapping(GraphContainerWithPOS graph, String outputFileAddress){
RuntimeAnalyzer sectionRunAnalyzer = logHandler.taskStarted("[GraphBuilder]- exporting graph data");
graph.exportToFileAsIdMapping(outputFileAddress);
logHandler.taskFinished(sectionRunAnalyzer, "[GraphBuilder]- exporting graph data");
}
/**
* Use this method to export graph data to file. Output format is as described below: </br>
* #source-nodeId #destination-nodeId (Real number)edge-weight
* @param graph the input graph
* @param outputFileAddress name of the file to save output
*/
@Override
public void saveGraphToFile(GraphContainerWithPOS graph, String outputFileAddress){
RuntimeAnalyzer sectionRunAnalyzer = logHandler.taskStarted("[GraphBuilder]- exporting graph data");
graph.exportGraphToFile(outputFileAddress);
logHandler.taskFinished(sectionRunAnalyzer, "[GraphBuilder]- exporting graph data");
}
/**
* Use this method to export graph data to file. Output format is as described below: </br>
* [source-node word set] [destination-node word set] (Real number)edge-weight
* @param graph the input graph
* @param outputFileAddress name of the file to save output
* @param dictionary a dictionary object containing <word-index to word>
*/
@Override
public void saveGraphToFileAsWordSets(GraphContainerWithPOS graph, String outputFileAddress, WordDictionary dictionary){
RuntimeAnalyzer sectionRunAnalyzer = logHandler.taskStarted("[GraphBuilder]- exporting graph data");
graph.exportGraphToFileAsWordSetsSimilarity(outputFileAddress, dictionary);
logHandler.taskFinished(sectionRunAnalyzer, "[GraphBuilder]- exporting graph data");
}
/**
* Use this method to export graph nodes' data to file. Output format is as described below: </br>
* #nodeId #sequence #position
* </br>
* sequence number and position number match to sentence number and position of the n-gram center word in sentence.
* Both of these indexes are zero-based.
* @deprecated this method is only used for debugging purposes.
* @param graph the input graph
* @param outputFileAddress name of the file to save output
*/
@Override
public void saveFileAsNodeIdToLocationMapping(GraphContainerWithPOS graph, String outputFileAddress){
RuntimeAnalyzer sectionRunAnalyzer = logHandler.taskStarted("[GraphBuilder]- exporting graph data");
graph.exportToFileAsIdToLocationMapping(outputFileAddress);
logHandler.taskFinished(sectionRunAnalyzer, "[GraphBuilder]- exporting graph data");
}
/**
* Use this method to export type probability information contained in the graph.
* Output format is as described below: </br>
* #nodeId #labelId (Real number)probability
* @param graph the input graph
* @param outputFileAddress name of the file to save output
*/
@Override
public void saveFileAsNodeIdToTypeLevelProbabilities(GraphContainer graph, String outputFileAddress){
RuntimeAnalyzer sectionRunAnalyzer = logHandler.taskStarted("[GraphBuilder]- exporting graph data");
graph.exportToFileAsIdToTypeLevelProbabilities(outputFileAddress);
logHandler.taskFinished(sectionRunAnalyzer, "[GraphBuilder]- exporting graph data");
}
/**
* Use this method to export type marginal probabilities to a file.
* Output format is as described below: </br>
* nodeIdInSerializedForm [TAB] #labelId [TAB] (Real number)probability
* @param graph the input graph
* @param outputFileAddress name of the file to save output
*/
@Override
public void saveFileAsTypeLevelEmpiricalLabelProbabilities(GraphContainer graph, String outputFileAddress){
RuntimeAnalyzer sectionRunAnalyzer = logHandler.taskStarted("[GraphBuilder]- exporting graph data");
graph.exportToFileAsEmpiricalProbabilities(outputFileAddress);
logHandler.taskFinished(sectionRunAnalyzer, "[GraphBuilder]- exporting graph data");
}
}
| 20,208 | 44.515766 | 158 | java |
g-ssl-crf | g-ssl-crf-master/src/GraphConstruct/src/main/java/Utility/RuntimeAnalyzer.java | package main.java.Utility;
/**
* Copyright: Masoud Kiaeeha, Mohammad Aliannejadi
* This work is licensed under the Creative Commons Attribution-NonCommercial 4.0
* International License. To view a copy of this license, visit
* http://creativecommons.org/licenses/by-nc/4.0/.
*/
public class RuntimeAnalyzer {
private Long startTime;
public String start(String message){
startTime = System.nanoTime();
message += " task started.";
System.out.println(message);
return message;
}
public String finish(String message){
Long endTime = System.nanoTime();
Long duration = Math.abs(endTime - startTime);
int sec = (int)(duration / 1000000000) % 60;
int min = ((int)(duration / 1000000000) / 60);
message += " task finished in " + min + " min, " + sec + " second(s)";
System.out.println(message);
return message;
}
public void mileStone(String message){
finish(message);
}
}
| 998 | 30.21875 | 81 | java |
g-ssl-crf | g-ssl-crf-master/src/GraphConstruct/src/main/java/Utility/Logger.java | package main.java.Utility;
import java.io.BufferedWriter;
import java.io.FileWriter;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.Calendar;
/**
* Copyright: Masoud Kiaeeha, Mohammad Aliannejadi
* This work is licensed under the Creative Commons Attribution-NonCommercial 4.0
* International License. To view a copy of this license, visit
* http://creativecommons.org/licenses/by-nc/4.0/.
*/
public class Logger {
private BufferedWriter output;
private String fileAddress;
private Calendar calendar;
private SimpleDateFormat dateTimeFormatter;
private RuntimeAnalyzer runtimeAnalyzer;
public Logger(String logFileAddress){
fileAddress = logFileAddress;
try {
output = new BufferedWriter(new FileWriter(fileAddress, true));
this.writeLine();
} catch (IOException e) {
System.out.println("Error: an error occurred while creating the log file");
System.out.println("output file: " + fileAddress);
e.printStackTrace();
}
calendar = Calendar.getInstance();
dateTimeFormatter = new SimpleDateFormat("YYYY/MM/DD HH:mm:ss");
runtimeAnalyzer = new RuntimeAnalyzer();
}
public void writeLine(String message){
calendar = Calendar.getInstance();
this.write(dateTimeFormatter.format(calendar.getTime()) + " - " + message + Config.outputNewLineCharacter);
}
public void writeLine(){
this.write(Config.outputNewLineCharacter);
}
private void write(String message){
try{
output.write(message);
}catch(IOException ex){
System.out.println("Error: there was an error in writing to output file");
System.out.println("output file: " + fileAddress);
ex.printStackTrace();
System.exit(1);
}
}
public RuntimeAnalyzer taskStarted(String taskName){
RuntimeAnalyzer ra = new RuntimeAnalyzer();
this.writeLine(ra.start(taskName));
return ra;
}
public void taskFinished(RuntimeAnalyzer ra, String taskName){
this.writeLine(ra.finish(taskName));
}
public void close(){
try{
output.flush();
//bw.close();
}catch(IOException ex){
System.out.println("Error: there was an error when closing the output file");
System.out.println("output file: " + fileAddress);
ex.printStackTrace();
System.exit(1);
}
}
public void finalize(){
this.close();
try{
super.finalize();
} catch (Throwable ex){
System.out.println("Error: unexpected error when finalizing an object of class Logger");
ex.printStackTrace();
}
}
}
| 2,826 | 30.065934 | 115 | java |
g-ssl-crf | g-ssl-crf-master/src/GraphConstruct/src/main/java/Utility/Defaults.java | package main.java.Utility;
import java.util.Hashtable;
public class Defaults {
public static final String packageExceptionPrefix = "[Graph]-";
public static final String packageOutputDelimiter = " ";
public static final String exportFileType = "";
public static final String exportIndexToNgramPostfix = ".index2ngram" + exportFileType;
public static final String exportTypeLevelEmpiricalLabelProbabilitiesPostfix = ".seed" + exportFileType;
public static final String exportGraphPostfix = ".graph" + exportFileType;
public static final String exportTypeLevelProbabilitiesPostfix = ".type2probability" + exportFileType;
public static final String exportIndexToLocationPostfix = ".index2location" + exportFileType;
/*************/
public static String GetValueOrDie(Hashtable config, String key) {
if (!config.containsKey(key)) {
MessagePrinter.PrintAndDie("Must specify " + key + "");
}
return ((String) config.get(key));
}
public static String GetValueOrDefault(Hashtable config, String key, String defaultVal) {
String result;
if (!config.containsKey(key)) {
result = defaultVal;
} else {
result = ((String) config.get(key));
}
return result;
}
public static String GetValueOrDefault(String valStr, String defaultVal) {
String res = defaultVal;
if (valStr != null) {
res = valStr;
}
return (res);
}
public static double GetValueOrDefault(String valStr, double defaultVal) {
double res = defaultVal;
if (valStr != null) {
res = Double.parseDouble(valStr);
}
return (res);
}
public static boolean GetValueOrDefault(String valStr, boolean defaultVal) {
boolean res = defaultVal;
if (valStr != null) {
res = Boolean.parseBoolean(valStr);
}
return (res);
}
public static int GetValueOrDefault(String valStr, int defaultVal) {
int res = defaultVal;
if (valStr != null) {
res = Integer.parseInt(valStr);
}
return (res);
}
}
| 2,064 | 29.820896 | 108 | java |
g-ssl-crf | g-ssl-crf-master/src/GraphConstruct/src/main/java/Utility/TextFileOutput.java | package main.java.Utility;
import java.io.*;
/**
* Copyright: Masoud Kiaeeha, Mohammad Aliannejadi
* This work is licensed under the Creative Commons Attribution-NonCommercial 4.0
* International License. To view a copy of this license, visit
* http://creativecommons.org/licenses/by-nc/4.0/.
*/
public class TextFileOutput {
private BufferedWriter bw;
private String outputFile;
public TextFileOutput(String outputFileAddress){
outputFile = outputFileAddress;
try {
OutputStream ois = new FileOutputStream(outputFile);
bw = new BufferedWriter(new OutputStreamWriter(ois));
}catch(FileNotFoundException ex){
System.out.println("Error: there was an error in creating output file");
System.out.println("outputfile: " + outputFile);
ex.printStackTrace();
System.exit(1);
}
}
public void write(String data){
try{
bw.write(data);
}catch(IOException ex){
System.out.println("Error: there was an error in writing to output file");
System.out.println("output file: " + outputFile);
ex.printStackTrace();
System.exit(1);
}
}
public void writeLine(String line){
this.write(line + Config.outputNewLineCharacter);
}
public void nextLine(){
this.writeLine("");
}
public void close(){
try{
bw.flush();
//bw.close();
}catch(IOException ex){
System.out.println("Error: there was an error when closing the output file");
System.out.println("output file: " + outputFile);
ex.printStackTrace();
System.exit(1);
}
}
protected void finalize(){
this.close();
try{
super.finalize();
} catch (Throwable ex){
System.out.println("Error: unexpected error when finalizing an object of class TextFileOutput");
ex.printStackTrace();
}
}
}
| 2,046 | 28.666667 | 108 | java |
g-ssl-crf | g-ssl-crf-master/src/GraphConstruct/src/main/java/Utility/MessagePrinter.java | package main.java.Utility;
public class MessagePrinter {
public static void Print (String msg) {
System.out.println(msg);
}
public static void PrintAndDie(String msg) {
Print(msg);
printHelpMessage();
System.exit(1);
}
private static void printHelpMessage() {
Print("");
Print("Input arguments format:");
Print("");
Print("Specify the run mode using -graph or -typeprobability or -marginals . In graph mode the complete graph is created " +
"and .graph file is produced. In typeprobability mode only graph nodes are loaded and only the .type2probability file is " +
"produced. In marginals mode only graph nodes are loaded and .seed file is produced.");
Print("");
Print("\"-text [fileAddress]\" specifies the address of input text file");
Print("");
Print("\"-textU [fileAddress]\" specifies the address of unlabeled input text file. this command should " +
"only be specified when graph mode is selected.");
Print("");
Print("\"-output [fileAddress]\" output file name format, information on graph is stored in files" +
" starting with this name.");
Print("");
Print("\"-labels [fileAddress]\" labels input file. Should be specified when typeprobability run mode is selected.");
Print("");
Print("\"-marginals [fileAddress]\" locationToLabelProbability input file. Should be specified when typeprobability run mode is selected.");
Print("");
Print("\"-dictionary [fileAddress]\" optional dictionary input file of labeled words.");
Print("");
Print("\"-features [fileAddress]\" optional features input file. Features specified in this file will" +
" be used to extract features from each ngram when calculating main.java.PMI values. If this file is not specified " +
"default features will be used.");
Print("");
Print("\"-node [word | wordclass]\" optional graph node type input. If word is specified, nodes of graph are created based on words and " +
"if wordclass is specified nodes are created based on word classes. Default option is: " + Config.graphNgramType);
Print("");
Print("\"-k [positive integer]\" optional K value input.");
Print("");
Print("\"-threshold [positive float]\" optional threshold value for edge weight. Edges having weight of less than " +
"the specified value, will be filtered out (i.e. will not be added to graph)");
Print("");
Print("\"-classdic [fileAddress]\" optional dictionary input file. This string specifies address of the classes" +
" dictionary file which must be given when isClass feature is defined in the features file.");
Print("");
Print("\"-prepositiondic [fileAddress]\" optional dictionary input file. This string specifies address of the prepositions" +
" dictionary file which must be given when isPreposition feature is defined in the features file.");
Print("");
}
}
| 3,175 | 54.719298 | 148 | java |
g-ssl-crf | g-ssl-crf-master/src/GraphConstruct/src/main/java/Utility/Mathematics.java | package main.java.Utility;
/**
* Copyright: Masoud Kiaeeha, Mohammad Aliannejadi
* This work is licensed under the Creative Commons Attribution-NonCommercial 4.0
* International License. To view a copy of this license, visit
* http://creativecommons.org/licenses/by-nc/4.0/.
*/
public class Mathematics {
public static double log2(double a){
return (Math.log10(a)/Math.log10(2));
}
}
| 406 | 28.071429 | 81 | java |
g-ssl-crf | g-ssl-crf-master/src/GraphConstruct/src/main/java/Utility/TextFileInput.java | package main.java.Utility;
import java.io.*;
/**
* Copyright: Masoud Kiaeeha, Mohammad Aliannejadi
* This work is licensed under the Creative Commons Attribution-NonCommercial 4.0
* International License. To view a copy of this license, visit
* http://creativecommons.org/licenses/by-nc/4.0/.
*/
public class TextFileInput {
private BufferedReader br;
private String inputFile;
public TextFileInput(String inputFileAddress){
inputFile = inputFileAddress;
try {
InputStream ois = new FileInputStream(inputFile);
br = new BufferedReader(new InputStreamReader(ois));
}catch(FileNotFoundException ex){
System.out.println("Error: there was an error in opening input file");
System.out.println("input file: " + inputFile);
ex.printStackTrace();
System.exit(1);
}
}
public String readLine(){
String result = "";
try{
result = br.readLine();
}catch (IOException ex){
System.out.println("Error: there was an error in reading input file contents");
System.out.println("input file: " + inputFile);
ex.printStackTrace();
System.exit(1);
}
return result;
}
public void close(){
try{
br.close();
}catch(IOException ex){
System.out.println("Error: there was an error when closing the input file");
System.out.println("input file: " + inputFile);
ex.printStackTrace();
System.exit(1);
}
}
protected void finalize(){
this.close();
try{
super.finalize();
} catch (Throwable ex){
System.out.println("Error: unexpected error when finalizing an object of class TextFileInput");
ex.printStackTrace();
}
}
}
| 1,892 | 29.532258 | 107 | java |
g-ssl-crf | g-ssl-crf-master/src/GraphConstruct/src/main/java/Utility/DataTypeManipulator.java | package main.java.Utility;
/**
* Copyright: Masoud Kiaeeha, Mohammad Aliannejadi
* This work is licensed under the Creative Commons Attribution-NonCommercial 4.0
* International License. To view a copy of this license, visit
* http://creativecommons.org/licenses/by-nc/4.0/.
*/
public class DataTypeManipulator {
public static float[] initializeFloatArray(float[] inputArray){
for(int index=0; index<inputArray.length ; ++index)
inputArray[index] = 0;
return inputArray;
}
public static float[] newInitializedFloatArray(int sizeOfArray){
float[] result = new float[sizeOfArray];
result = DataTypeManipulator.initializeFloatArray(result);
return result;
}
public static boolean[] newInitializedBooleanArray(int sizeOfArray){
boolean[] result = new boolean[sizeOfArray];
result = DataTypeManipulator.initializeBooleanArray(result);
return result;
}
private static boolean[] initializeBooleanArray(boolean[] inputArray) {
for(int index=0; index<inputArray.length ; ++index)
inputArray[index] = false;
return inputArray;
}
}
| 1,166 | 33.323529 | 81 | java |
g-ssl-crf | g-ssl-crf-master/src/GraphConstruct/src/main/java/Utility/LocationToLabelFileHandler.java | package main.java.Utility;
import main.java.Graph.GraphStructure.Location;
import main.java.Graph.GraphStructure.LocationLabelProbability;
/**
* Copyright: Masoud Kiaeeha, Mohammad Aliannejadi
* This work is licensed under the Creative Commons Attribution-NonCommercial 4.0
* International License. To view a copy of this license, visit
* http://creativecommons.org/licenses/by-nc/4.0/.
*/
public class LocationToLabelFileHandler {
TextFileInput fileInput;
int currentSequence, currentPosition;
LocationLabelProbability lastLocation;
/**
* Create a new instance of LocationToLabel file reader based on a given input file
* @param fileAddress input file to work on
*/
public LocationToLabelFileHandler(String fileAddress){
fileInput = new TextFileInput(fileAddress);
currentSequence = currentPosition = 0;
lastLocation = null;
}
/**
* Use this method to read all label probabilities associated with a specific location.</br>
* note:currently this method can only be used to read probabilities in forward reading order. This means if you're
* trying to read probabilities for location Y while you have previously read information for location X (and Y was
* seen before X), you are not able to read information for location Y.
* @param sequence identifier of the specified sequence (zero-based index)
* @param position identifier of the specified position occurring in a sequence (zero-based index)
* @param labelCount number of labels available
* @return an array of float[labelCount] containing all label probabilities associated with the specified location
*/
public float[] getLabelProbabilitiesOf(int sequence, int position, int labelCount){
if(isLocationPassed(sequence, position))
return null;
String line;
float[] probabilityArray = DataTypeManipulator.newInitializedFloatArray(labelCount);
boolean locationReached = false;
if(currentSequence == sequence && currentPosition == position){
locationReached = true;
//use last data read in previous call of the method
if(this.lastLocation != null){
probabilityArray[this.lastLocation.getLabelId()] = this.lastLocation.getLabelProbability();
}
}
while ((line = fileInput.readLine()) != null){
lastLocation = Location.extractLocationFromString(line);
if(lastLocation != null){
if(!locationReached && isLocationMatch(sequence, position)){
locationReached = true;
}
if(locationReached){
if(!isLocationMatch(sequence,position)){
currentSequence = this.lastLocation.getSequence();
currentPosition = this.lastLocation.getPosition();
break;
}
probabilityArray[lastLocation.getLabelId()] = lastLocation.getLabelProbability();
}
}
}
return probabilityArray;
}
/**
* given a location determines if the given location was passed during the previous iteration
* @param sequence identifier of the specified sequence (zero-based index)
* @param position identifier of the specified position in a given sequence (zero-based index)
* @return true if the given location was passed in previous iterations
*/
private boolean isLocationPassed(int sequence, int position) {
return sequence < this.currentSequence || (sequence==this.currentSequence && position<this.currentPosition);
}
/**
* given a location determines if the given location matches the current location or not
* @param sequence identifier of the specified sequence (zero-based index)
* @param position identifier of the specified position in a given sequence (zero-based index)
* @return true if the current location is the same as given location
*/
private boolean isLocationMatch(int sequence, int position) {
return lastLocation.getSequence() == sequence && lastLocation.getPosition() == position;
}
/**
* use this method to close the input file after work is finished
*/
public void closeFile(){
fileInput.close();
}
protected void finalize(){
this.closeFile();
}
}
| 4,440 | 41.701923 | 119 | java |
g-ssl-crf | g-ssl-crf-master/src/GraphConstruct/src/main/java/Utility/Config.java | package main.java.Utility;
import main.java.Graph.Builder.GraphBuilderFactory;
/**
* Copyright: Masoud Kiaeeha, Mohammad Aliannejadi
* This work is licensed under the Creative Commons Attribution-NonCommercial 4.0
* International License. To view a copy of this license, visit
* http://creativecommons.org/licenses/by-nc/4.0/.
*/
public class Config {
public static final int graphBuilderThreadCount = 8;
public static final String outputNewLineCharacter = "\r\n";
public static final String outputDelimiter = " ";
public static final String defaultLogFileAddress = "graphConstruct.log";
public static final Boolean pmiSmoothing = true;
public static final float pmiSmoothingEpsilon = (float) 0.0000001;
public static final String packageOutputDummyValue = "0";
private static int knnDefaultSize = 5;
public static boolean POSstyleInput = false;
public static RunModeType runMode = RunModeType.Graph;
public static GraphBuilderFactory.GraphNgramType graphNgramType = GraphBuilderFactory.GraphNgramType.WordClass;
public static float edgeWeightThreshold = 0;
public static int getKnnDefaultSize() {
return knnDefaultSize;
}
public static void setKnnDefaultSize(int knnDefaultSize) {
if (knnDefaultSize <= 0)
MessagePrinter.PrintAndDie("K value (as in KNN-graph) must be a positive integer! K=" + knnDefaultSize);
else
Config.knnDefaultSize = knnDefaultSize;
}
public enum RunModeType{
Graph, TypeProbability, EmpiricalTypeProbability
}
}
| 1,578 | 35.72093 | 116 | java |
g-ssl-crf | g-ssl-crf-master/src/GraphConstruct/src/main/java/Utility/LabelFileHandler.java | package main.java.Utility;
/**
* Copyright: Masoud Kiaeeha, Mohammad Aliannejadi
* This work is licensed under the Creative Commons Attribution-NonCommercial 4.0
* International License. To view a copy of this license, visit
* http://creativecommons.org/licenses/by-nc/4.0/.
*/
public class LabelFileHandler {
public static int countLabels(String fileAddress){
int count = 0;
String line;
TextFileInput fileInput = new TextFileInput(fileAddress);
while ((line =fileInput.readLine()) != null){
if(line.trim().isEmpty())
continue;
++count;
}
fileInput.close();
return count;
}
}
| 688 | 27.708333 | 81 | java |
g-ssl-crf | g-ssl-crf-master/src/GraphConstruct/tests/main/java/TextToNgram/NgramUtilityTest.java | package main.java.TextToNgram;
import main.java.Utility.Config;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
/**
* Created with IntelliJ IDEA.
* User: masouD
* Date: 1/30/14
* Time: 4:00 PM
* To change this template use File | Settings | File Templates.
*/
public class NgramUtilityTest {
NgramUtility ngramUtility;
@Before
public void setUp() throws Exception{
ngramUtility = new NgramUtility();
}
@Test
public void testExtractNgramsFromSentenceForUnigramsNullString() throws Exception {
//for unigrams
NgramContainer[] ngramSet = ngramUtility.extractNgramsFromSentence("", 1);
assertEquals(ngramSet, null);
}
@Test
public void testExtractNgramsFromSentenceForUnigrams1Words() throws Exception {
//for unigrams
NgramContainer[] ngramSet = ngramUtility.extractNgramsFromSentence("19", 1);
assertEquals(ngramSet.length, 3);
assertEquals(ngramSet[0].getMemberValue(0), Config.packageOutputDummyValue);
assertEquals(ngramSet[2].getMemberValue(0), Config.packageOutputDummyValue);
assertEquals(ngramSet[1].getMemberValue(0), "19");
}
@Test
public void testExtractNgramsFromSentenceForUnigrams2Words() throws Exception {
//for unigrams
NgramContainer[] ngramSet = ngramUtility.extractNgramsFromSentence("6 19", 1);
assertEquals(ngramSet.length, 4);
assertEquals(ngramSet[0].getMemberValue(0), Config.packageOutputDummyValue);
assertEquals(ngramSet[3].getMemberValue(0), Config.packageOutputDummyValue);
assertEquals(ngramSet[1].getMemberValue(0), "6");
assertEquals(ngramSet[2].getMemberValue(0), "19");
}
@Test
public void testExtractNgramsFromSentenceForBigramsNullString() throws Exception {
//for unigrams
NgramContainer[] ngramSet = ngramUtility.extractNgramsFromSentence("", 2);
assertEquals(ngramSet, null);
}
@Test
public void testExtractNgramsFromSentenceForBigram1Words() throws Exception {
//for bigrams
NgramContainer[] ngramSet = ngramUtility.extractNgramsFromSentence("19", 2);
assertEquals(ngramSet.length, 4);
assertTrue(ngramSet[0].equals(new NgramContainer(new String[]{"0", "0"})));
assertTrue(ngramSet[1].equals(new NgramContainer(new String[]{"0", "19"})));
assertTrue(ngramSet[2].equals(new NgramContainer(new String[]{"19", "0"})));
assertTrue(ngramSet[3].equals(new NgramContainer(new String[]{"0", "0"})));
}
@Test
public void testExtractNgramsFromSentenceForBigram2Words() throws Exception {
//for bigrams
NgramContainer[] ngramSet = ngramUtility.extractNgramsFromSentence("6 19", 2);
assertEquals(ngramSet.length, 5);
assertTrue(ngramSet[0].equals(new NgramContainer(new String[]{"0", "0"})));
assertTrue(ngramSet[1].equals(new NgramContainer(new String[]{"0", "6"})));
assertTrue(ngramSet[2].equals(new NgramContainer(new String[]{"6", "19"})));
assertTrue(ngramSet[3].equals(new NgramContainer(new String[]{"19", "0"})));
assertTrue(ngramSet[4].equals(new NgramContainer(new String[]{"0", "0"})));
}
@Test
public void testExtractNgramsFromSentenceForBigram3Words() throws Exception {
//for bigrams
NgramContainer[] ngramSet = ngramUtility.extractNgramsFromSentence("6 19 54", 2);
assertEquals(ngramSet.length, 6);
assertTrue(ngramSet[0].equals(new NgramContainer(new String[]{"0", "0"})));
assertTrue(ngramSet[1].equals(new NgramContainer(new String[]{"0", "6"})));
assertTrue(ngramSet[2].equals(new NgramContainer(new String[]{"6", "19"})));
assertTrue(ngramSet[3].equals(new NgramContainer(new String[]{"19", "54"})));
assertTrue(ngramSet[4].equals(new NgramContainer(new String[]{"54", "0"})));
assertTrue(ngramSet[5].equals(new NgramContainer(new String[]{"0", "0"})));
}
}
| 4,062 | 41.322917 | 89 | java |
g-ssl-crf | g-ssl-crf-master/src/GraphConstruct/tests/main/java/TextToNgram/NgramContainerTest.java | package main.java.TextToNgram;
import main.java.PMI.FeatureHandler;
import main.java.Text.WordDictionary;
import main.java.Utility.Config;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
/**
* Created with IntelliJ IDEA.
* User: masouD
* Date: 1/22/14
* Time: 6:52 PM
* To change this template use File | Settings | File Templates.
*/
public class NgramContainerTest {
private NgramContainer ngram;
@Before
public void setUp() throws Exception {
ngram = new NgramContainer(new String[] {"first", "second", "third"});
}
@Test
public void testGetSize() throws Exception {
assertEquals(ngram.getSize(), 3);
}
@Test
public void testSetMemberValue() throws Exception {
ngram.setMemberValue(0, "bar");
assertEquals(ngram.getMemberValue(0), "bar");
}
@Test
public void testGetMemberValue() throws Exception {
try{
ngram.getMemberValue(-1);
assertFalse(true);
} catch (Exception x){
}
try{
ngram.getMemberValue(100);
assertFalse(true);
} catch (Exception x){
}
}
@Test
public void testGetCenterIndex() throws Exception {
}
@Test
public void testGetCenterValue() throws Exception {
}
@Test
public void testEquals() throws Exception {
//todo:write more tests here
NgramContainer secondNgram = new NgramContainer(new String[] {"first", "second", "third"});
assertTrue(ngram.equals(secondNgram));
secondNgram.setMemberValue(0, "bar");
assertFalse(ngram.equals(secondNgram));
secondNgram = new NgramContainer(new String[] {"first", "second"});
assertFalse(ngram.equals(secondNgram));
}
@Test
public void testEqualsWithUnequalLengths() throws Exception {
NgramContainer secondNgram = new NgramContainer(new String[] {"first", "second"});
assertFalse(ngram.equals(secondNgram));
}
@Test
public void testEqualsWithTemplate() throws Exception {
NgramContainer secondNgram = new NgramContainer(new String[] {"first",
FeatureHandler.nullTokenIdentifier, FeatureHandler.nullTokenIdentifier});
assertTrue(ngram.equalsWithTemplate(secondNgram));
secondNgram.setMemberValue(2, "first");
assertFalse(ngram.equalsWithTemplate(secondNgram));
secondNgram.setMemberValue(2, "third");
assertTrue(ngram.equalsWithTemplate(secondNgram));
}
@Test
public void testEqualsWithTemplateWithUnequalLengths() throws Exception {
NgramContainer secondNgram = new NgramContainer(new String[] {"first", "second"});
assertFalse(ngram.equalsWithTemplate(secondNgram));
}
@Test
public void testHasMember() throws Exception {
assertTrue(ngram.hasMember("second"));
assertTrue(ngram.hasMember("ThIRd"));
assertFalse(ngram.hasMember("malmal"));
assertFalse(ngram.hasMember(""));
}
@Test
public void testSerialize() throws Exception {
assertEquals(ngram.serialize(), "first,second,third");
}
@Test
public void testIsBeginningOfLine() throws Exception {
assertFalse(ngram.isBeginningOfLine());
ngram.setMemberValue(0, Config.packageOutputDummyValue);
assertTrue(ngram.isBeginningOfLine());
}
@Test
public void testGetWordSet() throws Exception {
NgramContainer ngram = new NgramContainer(new String[] {"1", "2", "4"});
WordDictionary dictionary = getSampleWordDictionary();
assertEquals(ngram.getWordSet(dictionary), "( first second fourth )");
}
private WordDictionary getSampleWordDictionary() {
WordDictionary dictionary = new WordDictionary();
dictionary.addEntry("1", "first");
dictionary.addEntry("2", "second");
dictionary.addEntry("4", "fourth");
return dictionary;
}
@Test
public void testIsMemberOfDictionary() throws Exception {
NgramContainer ngram = new NgramContainer(new String[] {"1", "2", "4"});
WordDictionary dictionary = getSampleWordDictionary();
assertTrue(ngram.isMemberOfDictionary(dictionary));
ngram.setMemberValue(1, "54");
assertFalse(ngram.isMemberOfDictionary(dictionary));
}
@Test
public void testGetRightPart() throws Exception {
NgramContainer actualRightPart = new NgramContainer(new String[] {"second", "third"});
assertTrue(ngram.getRightPart().equals(actualRightPart));
}
@Test
public void testGetLeftPart() throws Exception {
NgramContainer actualLeftPart = new NgramContainer(new String[] {"first", "second"});
assertTrue(ngram.getLeftPart().equals(actualLeftPart));
}
}
| 4,926 | 29.226994 | 99 | java |
g-ssl-crf | g-ssl-crf-master/bin/junto-master/src/main/java/upenn/junto/config/Flags.java | package upenn.junto.config;
/**
* Copyright 2011 Partha Talukdar
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Tests for configuration flags.
*/
public class Flags {
public static boolean IsOriginalMode(String mode) {
return (mode.equals("original")) ? true : false;
}
public static boolean IsModifiedMode(String mode) {
return (mode.equals("modified")) ? true : false;
}
public static boolean IsColumnNode(String nodeName) {
return (nodeName.startsWith("C#"));
}
}
| 1,022 | 26.648649 | 75 | java |
g-ssl-crf | g-ssl-crf-master/bin/junto-master/src/main/java/upenn/junto/config/ConfigReader.java | package upenn.junto.config;
import upenn.junto.util.MessagePrinter;
import java.io.BufferedReader;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.Hashtable;
import java.util.StringTokenizer;
public class ConfigReader {
public static Hashtable<String,String> read_config(String fName) {
Hashtable<String,String> retval = new Hashtable<String,String>(50);
return (read_config(retval, fName));
}
@SuppressWarnings("unchecked")
public static Hashtable<String,String> read_config(Hashtable<String,String> retval, String fName) {
try {
// File reading preparation
FileInputStream fis = new FileInputStream(fName);
InputStreamReader ir = new InputStreamReader(fis);
BufferedReader br = new BufferedReader(ir);
// processing lines into lists
String line;
StringTokenizer st;
line = br.readLine();
String key = "";
String value = "";
while (line != null) {
System.out.println(line);
st = new StringTokenizer(line);
// read this line
int i = 0;
boolean noComment = true;
while (noComment && (st.hasMoreTokens())) {
String t = st.nextToken();
if (i == 0) {
if (t.startsWith("#"))
noComment = false;
else
key = t;
} else if (i == 2)
value = t;
i++;
}
// if we find a (( key = value )) line, add it to the HT
if (i == 3) {
retval.put(key, value);
}
line = br.readLine();
}
fis.close();
} catch (IOException ioe) {
ioe.printStackTrace();
}
return retval;
}
public static Hashtable<String,String> read_config(String[] args) {
Hashtable<String,String> retVal = read_config(args[0]);
for (int ai = 1; ai < args.length; ++ai) {
String[] parts = args[ai].split("=");
if (parts.length == 2 && parts[1].length() > 0) {
System.out.println(parts[0] + " = " + parts[1]);
retVal.put(parts[0], parts[1]);
} else {
retVal.remove(parts[0]);
MessagePrinter.Print("Removing argument: " + parts[0] + "\n");
}
}
return (retVal);
}
}
| 2,290 | 23.634409 | 103 | java |
g-ssl-crf | g-ssl-crf-master/bin/junto-master/src/main/java/upenn/junto/eval/GraphEval.java | package upenn.junto.eval;
import upenn.junto.graph.Graph;
import upenn.junto.graph.Vertex;
import java.util.Iterator;
public class GraphEval {
public static double GetAccuracy(Graph g) {
double doc_mrr_sum = 0;
int correct_doc_cnt = 0;
int total_doc_cnt = 0;
Iterator<String> vIter = g.vertices().keySet().iterator();
while (vIter.hasNext()) {
String vName = vIter.next();
Vertex v = g.vertices().get(vName);
if (v.isTestNode()) {
double mrr = v.GetMRR();
++total_doc_cnt;
doc_mrr_sum += mrr;
if (mrr == 1) {
++correct_doc_cnt;
}
}
}
return ((1.0 * correct_doc_cnt) / total_doc_cnt);
}
public static double GetAverageTestMRR(Graph g) {
double doc_mrr_sum = 0;
int total_doc_cnt = 0;
Iterator<String> vIter = g.vertices().keySet().iterator();
while (vIter.hasNext()) {
String vName = vIter.next();
Vertex v = g.vertices().get(vName);
if (v.isTestNode()) {
double mrr = v.GetMRR();
++total_doc_cnt;
doc_mrr_sum += mrr;
}
}
// System.out.println("MRR Computation: " + doc_mrr_sum + " " + total_doc_cnt);
return ((1.0 * doc_mrr_sum) / total_doc_cnt);
}
public static double GetAverageTrainMRR(Graph g) {
double doc_mrr_sum = 0;
int total_doc_cnt = 0;
Iterator<String> vIter = g.vertices().keySet().iterator();
while (vIter.hasNext()) {
String vName = vIter.next();
Vertex v = g.vertices().get(vName);
if (v.isSeedNode()) {
double mrr = v.GetMRR();
++total_doc_cnt;
doc_mrr_sum += mrr;
}
}
// System.out.println("MRR Computation: " + doc_mrr_sum + " " + total_doc_cnt);
return ((1.0 * doc_mrr_sum) / total_doc_cnt);
}
public static double GetRMSE(Graph g) {
double totalMSE = 0;
int totalCount = 0;
Iterator<String> vIter = g.vertices().keySet().iterator();
while (vIter.hasNext()) {
String vName = vIter.next();
Vertex v = g.vertices().get(vName);
if (v.isTestNode()) {
totalMSE += v.GetMSE();
++totalCount;
}
}
return (Math.sqrt((1.0 * totalMSE) / totalCount));
}
}
| 2,246 | 23.423913 | 83 | java |
g-ssl-crf | g-ssl-crf-master/bin/junto-master/src/main/java/upenn/junto/util/RyanAlphabet.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Andrew McCallum <a href="mailto:mccallum@cs.umass.edu">mccallum@cs.umass.edu</a>
*/
package upenn.junto.util;
import java.util.ArrayList;
import java.io.*;
import java.util.*;
import gnu.trove.map.hash.TObjectIntHashMap;
public class RyanAlphabet implements Serializable {
TObjectIntHashMap map;
ArrayList entries;
boolean growthStopped = false;
Class entryClass = null;
public RyanAlphabet(int capacity, Class entryClass) {
this.map = new TObjectIntHashMap(capacity);
this.entries = new ArrayList(capacity);
this.entryClass = entryClass;
}
public RyanAlphabet(Class entryClass) {
this(8, entryClass);
}
public RyanAlphabet(int capacity) {
this(capacity, null);
}
public RyanAlphabet() {
this(8, null);
}
public Object clone() {
//try {
// Wastes effort, because we over-write ivars we create
RyanAlphabet ret = new RyanAlphabet();
ret.map = new TObjectIntHashMap(map);
ret.entries = (ArrayList) entries.clone();
ret.growthStopped = growthStopped;
ret.entryClass = entryClass;
return ret;
//} catch (CloneNotSupportedException e) {
//e.printStackTrace();
//throw new IllegalStateException ("Couldn't clone InstanceList Vocabuary");
//}
}
/** Return -1 if entry isn't present. */
public int lookupIndex(Object entry, boolean addIfNotPresent) {
if (entry == null)
throw new IllegalArgumentException(
"Can't lookup \"null\" in an RyanAlphabet.");
if (entryClass == null)
entryClass = entry.getClass();
else
// Insist that all entries in the RyanAlphabet are of the same
// class. This may not be strictly necessary, but will catch a
// bunch of easily-made errors.
if (entry.getClass() != entryClass)
throw new IllegalArgumentException("Non-matching entry class, "
+ entry.getClass() + ", was " + entryClass);
int ret = map.get(entry);
if (!map.containsKey(entry) && !growthStopped && addIfNotPresent) {
//xxxx: not necessary, fangfang, Aug. 2003
// if (entry instanceof String)
// entry = ((String)entry).intern();
ret = entries.size();
map.put(entry, entries.size());
entries.add(entry);
}
return ret;
}
public int lookupIndex(Object entry) {
return lookupIndex(entry, true);
}
public Object lookupObject(int index) {
return entries.get(index);
}
public Object[] toArray() {
return entries.toArray();
}
// xxx This should disable the iterator's remove method...
public Iterator iterator() {
return entries.iterator();
}
public Object[] lookupObjects(int[] indices) {
Object[] ret = new Object[indices.length];
for (int i = 0; i < indices.length; i++)
ret[i] = entries.get(indices[i]);
return ret;
}
public int[] lookupIndices(Object[] objects, boolean addIfNotPresent) {
int[] ret = new int[objects.length];
for (int i = 0; i < objects.length; i++)
ret[i] = lookupIndex(objects[i], addIfNotPresent);
return ret;
}
public boolean contains(Object entry) {
return map.contains(entry);
}
public int size() {
return entries.size();
}
public void stopGrowth() {
growthStopped = true;
}
public void allowGrowth() {
growthStopped = false;
}
public boolean growthStopped() {
return growthStopped;
}
public Class entryClass() {
return entryClass;
}
/** Return String representation of all RyanAlphabet entries, each
separated by a newline. */
public String toString() {
StringBuffer sb = new StringBuffer();
for (int i = 0; i < entries.size(); i++) {
sb.append(entries.get(i).toString());
sb.append('\n');
}
return sb.toString();
}
public void dump() {
dump(System.out);
}
public void dump(PrintStream out) {
for (int i = 0; i < entries.size(); i++) {
out.println(i + " => " + entries.get(i));
}
}
public void dump(String outputFile) {
try {
BufferedWriter bwr = new BufferedWriter(new FileWriter(outputFile));
for (int i = 0; i < entries.size(); i++) {
bwr.write(entries.get(i) + "\t" + map.get(entries.get(i)) + "\n");
}
bwr.close();
} catch (IOException ioe) {
ioe.printStackTrace();
}
}
// Serialization
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 0;
private void writeObject(ObjectOutputStream out) throws IOException {
out.writeInt(CURRENT_SERIAL_VERSION);
out.writeInt(entries.size());
for (int i = 0; i < entries.size(); i++)
out.writeObject(entries.get(i));
out.writeBoolean(growthStopped);
out.writeObject(entryClass);
}
private void readObject(ObjectInputStream in) throws IOException,
ClassNotFoundException {
int version = in.readInt();
int size = in.readInt();
entries = new ArrayList(size);
map = new TObjectIntHashMap(size);
for (int i = 0; i < size; i++) {
Object o = in.readObject();
map.put(o, i);
entries.add(o);
}
growthStopped = in.readBoolean();
entryClass = (Class) in.readObject();
}
// public String toString()
// {
// return Arrays.toString(map.keys());
//}
}
| 5,804 | 26.77512 | 89 | java |
g-ssl-crf | g-ssl-crf-master/bin/junto-master/src/main/java/upenn/junto/util/Constants.java | package upenn.junto.util;
import gnu.trove.map.hash.TObjectDoubleHashMap;
public class Constants {
public static String _kContProb = "cont_prob";
public static String _kInjProb = "inj_prob";
public static String _kTermProb = "term_prob";
public static double GetSmallConstant() {
return (1e-12);
}
public static String GetDummyLabel() {
return ("__DUMMY__");
}
public static String GetDocPrefix() {
return ("DOC_");
}
public static String GetFeatPrefix() {
// return ("FEAT_");
return ("C#");
}
public static String GetPrecisionString() {
return ("precision");
}
public static String GetMRRString() {
return ("mrr");
}
public static String GetMDBRRString() {
return ("mdmbrr");
}
public static double GetStoppingThreshold() {
return (0.001);
}
public static TObjectDoubleHashMap GetDummyLabelDist() {
TObjectDoubleHashMap ret = new TObjectDoubleHashMap();
ret.put(Constants.GetDummyLabel(), 1.0);
return (ret);
}
}
| 1,020 | 19.019608 | 58 | java |
g-ssl-crf | g-ssl-crf-master/bin/junto-master/src/main/java/upenn/junto/util/Defaults.java | package upenn.junto.util;
import java.util.Hashtable;
public class Defaults {
public static String GetValueOrDie(Hashtable config, String key) {
if (!config.containsKey(key)) {
MessagePrinter.PrintAndDie("Must specify " + key + "");
}
return ((String) config.get(key));
}
public static String GetValueOrDefault(String valStr, String defaultVal) {
String res = defaultVal;
if (valStr != null) {
res = valStr;
}
return (res);
}
public static double GetValueOrDefault(String valStr, double defaultVal) {
double res = defaultVal;
if (valStr != null) {
res = Double.parseDouble(valStr);
}
return (res);
}
public static boolean GetValueOrDefault(String valStr, boolean defaultVal) {
boolean res = defaultVal;
if (valStr != null) {
res = Boolean.parseBoolean(valStr);
}
return (res);
}
public static int GetValueOrDefault(String valStr, int defaultVal) {
int res = defaultVal;
if (valStr != null) {
res = Integer.parseInt(valStr);
}
return (res);
}
}
| 1,079 | 21.978723 | 78 | java |
g-ssl-crf | g-ssl-crf-master/bin/junto-master/src/main/java/upenn/junto/util/IoUtil.java | package upenn.junto.util;
import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.util.ArrayList;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
public class IoUtil {
private static Logger logger = LogManager.getLogger(IoUtil.class);
public static ArrayList<String> LoadFile(String fileName) {
ArrayList<String> retList = new ArrayList<String>();
try {
BufferedReader bfr = new BufferedReader(new FileReader(fileName));
String line;
while ((line = bfr.readLine()) != null) {
if (!retList.contains(line)) {
retList.add(line);
}
}
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
logger.info("Total " + retList.size() +
" entries loaded from " + fileName);
return (retList);
}
public static ArrayList<String> LoadFirstFieldFile(String fileName) {
ArrayList<String> retList = new ArrayList<String>();
try {
BufferedReader bfr = new BufferedReader(new FileReader(fileName));
String line;
while ((line = bfr.readLine()) != null) {
String[] fields = line.split("\t");
if (!retList.contains(fields[0])) {
retList.add(fields[0]);
}
}
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
logger.info("Total " + retList.size() +
" entries loaded from " + fileName);
return (retList);
}
public static RyanAlphabet LoadAlphabet(String fileName) {
RyanAlphabet retAlpha = new RyanAlphabet();
try {
BufferedReader bfr = new BufferedReader(new FileReader(fileName));
String line;
while ((line = bfr.readLine()) != null) {
String[] fields = line.split("\t");
retAlpha.lookupIndex(fields[0], true);
assert (retAlpha.lookupIndex(fields[0]) == Integer.parseInt(fields[1]));
}
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
logger.info("Total " + retAlpha.size() +
" entries loaded from " + fileName);
return (retAlpha);
}
}
| 2,189 | 28.2 | 80 | java |
g-ssl-crf | g-ssl-crf-master/bin/junto-master/src/main/java/upenn/junto/util/MessagePrinter.java | package upenn.junto.util;
public class MessagePrinter {
public static void Print (String msg) {
System.out.print (msg + "\n");
}
public static void PrintAndDie(String msg) {
System.out.println(msg + "\n");
System.exit(1);
}
}
| 250 | 16.928571 | 46 | java |
g-ssl-crf | g-ssl-crf-master/bin/junto-master/src/main/java/upenn/junto/util/ObjectDoublePair.java | package upenn.junto.util;
/**
* Used, e.g., to keep track of an Object and its associated score.
*/
public class ObjectDoublePair {
private Object label_;
private double score_;
public ObjectDoublePair (Object l, double s) {
this.label_ = l;
this.score_ = s;
}
public Object GetLabel() {
return label_;
}
public double GetScore() {
return score_;
}
}
| 392 | 16.086957 | 67 | java |
g-ssl-crf | g-ssl-crf-master/bin/junto-master/src/main/java/upenn/junto/util/CollectionUtil.java | package upenn.junto.util;
import gnu.trove.iterator.TObjectDoubleIterator;
import gnu.trove.map.hash.TObjectDoubleHashMap;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.Hashtable;
import java.util.Iterator;
public class CollectionUtil {
public static ArrayList<ObjectDoublePair> ReverseSortMap(TObjectDoubleHashMap m) {
ArrayList<ObjectDoublePair> lsps = new ArrayList<ObjectDoublePair>();
TObjectDoubleIterator mi = m.iterator();
while (mi.hasNext()) {
mi.advance();
lsps.add(new ObjectDoublePair(mi.key(), mi.value()));
}
ObjectDoublePairComparator lspComparator = new ObjectDoublePairComparator();
Collections.sort(lsps, lspComparator);
return (lsps);
}
protected static class ObjectDoublePairComparator implements Comparator<ObjectDoublePair> {
public int compare(ObjectDoublePair p1, ObjectDoublePair p2) {
double diff = p2.GetScore() - p1.GetScore();
return (diff > 0 ? 1 : (diff < 0 ? -1 : 0));
}
}
public static TObjectDoubleHashMap String2Map(String inp) {
return (String2Map(null, inp));
}
public static TObjectDoubleHashMap String2Map(TObjectDoubleHashMap retMap,
String inp) {
if (retMap == null) {
retMap = new TObjectDoubleHashMap();
}
if (inp.length() > 0) {
String[] fields = inp.split(" ");
for (int i = 0; i < fields.length; i += 2) {
retMap.put(fields[i], Double.parseDouble(fields[i + 1]));
}
}
return (retMap);
}
public static String Map2String(TObjectDoubleHashMap m) {
return (Map2String(m, null));
}
public static String Map2String(TObjectDoubleHashMap m, RyanAlphabet a) {
String retString = "";
TObjectDoubleIterator mIter = m.iterator();
ArrayList<ObjectDoublePair> sortedMap = ReverseSortMap(m);
int n = sortedMap.size();
for (int i = 0; i < n; ++i) {
String label = (String) sortedMap.get(i).GetLabel();
if (a != null) {
Integer li = String2Integer(label);
if (li != null) {
label = (String) a.lookupObject(li.intValue());
}
}
retString += " " + label + " " + sortedMap.get(i).GetScore();
}
return (retString.trim());
}
public static Integer String2Integer(String str) {
Integer retInt = null;
try {
int ri = Integer.parseInt(str);
retInt = new Integer(ri);
} catch (NumberFormatException nfe) {
// don't do anything
}
return (retInt);
}
public static String Map2StringPrettyPrint(Hashtable m) {
String retString = "";
Iterator iter = m.keySet().iterator();
while (iter.hasNext()) {
String key = (String) iter.next();
retString += key + " = " + m.get(key) + "\n";
}
return (retString.trim());
}
public static String Join(String[] fields, String delim) {
String retString = "";
for (int si = 0; si < fields.length; ++si) {
if (si > 0) {
retString += delim + fields[si];
} else {
retString = fields[0];
}
}
return (retString);
}
public static ArrayList<String> GetIntersection(TObjectDoubleHashMap m1,
ArrayList<String> l2) {
ArrayList<String> retList = new ArrayList<String>();
for (int i = 0; i < l2.size(); ++i) {
if (m1.containsKey(l2.get(i))) {
retList.add(l2.get(i));
}
}
return (retList);
}
}
| 3,518 | 26.708661 | 93 | java |
g-ssl-crf | g-ssl-crf-master/bin/junto-master/src/main/java/upenn/junto/util/RyanFeatureVector.java | package upenn.junto.util;
import gnu.trove.map.hash.TIntDoubleHashMap;
import gnu.trove.iterator.TIntDoubleIterator;
import java.io.*;
import java.util.*;
public class RyanFeatureVector implements Comparable, Serializable {
public int index;
public double value;
public RyanFeatureVector next;
public RyanFeatureVector(int i, double v, RyanFeatureVector n) {
index = i;
value = v;
next = n;
}
public RyanFeatureVector add(String feat, double val, RyanAlphabet dataAlphabet) {
int num = dataAlphabet.lookupIndex(feat);
if(num >= 0)
return new RyanFeatureVector(num,val,this);
return this;
}
public void add(int i1, double v1) {
RyanFeatureVector new_node = new RyanFeatureVector(this.index, this.value, this.next);
this.index = i1;
this.value = v1;
this.next = new_node;
}
public static RyanFeatureVector cat(RyanFeatureVector fv1, RyanFeatureVector fv2) {
RyanFeatureVector result = new RyanFeatureVector(-1,-1.0,null);
for(RyanFeatureVector curr = fv1; curr.next != null; curr = curr.next) {
if(curr.index < 0)
continue;
result = new RyanFeatureVector(curr.index,curr.value,result);
}
for(RyanFeatureVector curr = fv2; curr.next != null; curr = curr.next) {
if(curr.index < 0)
continue;
result = new RyanFeatureVector(curr.index,curr.value,result);
}
return result;
}
// fv1 - fv2
public static RyanFeatureVector getDistVector(RyanFeatureVector fv1, RyanFeatureVector fv2) {
RyanFeatureVector result = new RyanFeatureVector(-1, -1.0, null);
for (RyanFeatureVector curr = fv1; curr.next != null; curr = curr.next) {
if (curr.index < 0)
continue;
result = new RyanFeatureVector(curr.index, curr.value, result);
}
for (RyanFeatureVector curr = fv2; curr.next != null; curr = curr.next) {
if (curr.index < 0)
continue;
result = new RyanFeatureVector(curr.index, -curr.value, result);
}
return result;
}
public static RyanFeatureVector getAddedVector(RyanFeatureVector fv1, RyanFeatureVector fv2, double rate) {
TIntDoubleHashMap hm = new TIntDoubleHashMap();
for (RyanFeatureVector curr = fv1; curr.next != null; curr = curr.next) {
if (curr.index >= 0) {
hm.put(curr.index, (hm.containsKey(curr.index) ? hm.get(curr.index) : 0) + curr.value);
}
}
for (RyanFeatureVector curr = fv2; curr.next != null; curr = curr.next) {
if (curr.index >= 0) {
hm.put(curr.index, (hm.containsKey(curr.index) ? hm.get(curr.index) : 0) + rate * curr.value);
}
}
RyanFeatureVector result = new RyanFeatureVector(-1, -1, null);
TIntDoubleIterator hmIter = hm.iterator();
while (hmIter.hasNext()) {
hmIter.advance();
result = new RyanFeatureVector(hmIter.key(), hmIter.value(), result);
}
return result;
}
public static double dotProduct(RyanFeatureVector fv1, RyanFeatureVector fv2) {
double result = 0.0;
TIntDoubleHashMap hm1 = new TIntDoubleHashMap();
TIntDoubleHashMap hm2 = new TIntDoubleHashMap();
for(RyanFeatureVector curr = fv1; curr.next != null; curr = curr.next) {
if(curr.index < 0)
continue;
hm1.put(curr.index,hm1.get(curr.index)+curr.value);
}
for(RyanFeatureVector curr = fv2; curr.next != null; curr = curr.next) {
if(curr.index < 0)
continue;
hm2.put(curr.index,hm2.get(curr.index)+curr.value);
}
int[] keys = hm1.keys();
for(int i = 0; i < keys.length; i++) {
double v1 = hm1.get(keys[i]);
double v2 = hm2.get(keys[i]);
result += v1*v2;
}
return result;
}
public static double oneNorm(RyanFeatureVector fv1) {
double sum = 0.0;
for(RyanFeatureVector curr = fv1; curr.next != null; curr = curr.next) {
if(curr.index < 0)
continue;
sum += curr.value;
}
return sum;
}
public static int size(RyanFeatureVector fv1) {
int sum = 0;
for(RyanFeatureVector curr = fv1; curr.next != null; curr = curr.next) {
if(curr.index < 0)
continue;
sum++;
}
return sum;
}
public static double twoNorm(RyanFeatureVector fv1) {
TIntDoubleHashMap hm = new TIntDoubleHashMap();
double sum = 0.0;
for(RyanFeatureVector curr = fv1; curr.next != null; curr = curr.next) {
if(curr.index < 0)
continue;
hm.put(curr.index,hm.get(curr.index)+curr.value);
}
int[] keys = hm.keys();
for(int i = 0; i < keys.length; i++)
sum += Math.pow(hm.get(keys[i]),2.0);
return Math.sqrt(sum);
}
public static RyanFeatureVector twoNormalize(RyanFeatureVector fv1) {
return normalize(fv1,twoNorm(fv1));
}
public static RyanFeatureVector oneNormalize(RyanFeatureVector fv1) {
return normalize(fv1,oneNorm(fv1));
}
public static RyanFeatureVector normalize(RyanFeatureVector fv1, double norm) {
RyanFeatureVector result = new RyanFeatureVector(-1,-1.0,null);
for(RyanFeatureVector curr = fv1; curr.next != null; curr = curr.next) {
if(curr.index < 0)
continue;
result = new RyanFeatureVector(curr.index,curr.value/norm,result);
}
return result;
}
public String toString() {
if (next == null)
return "" + index + ":" + value;
return index + ":" + value + " " + next.toString();
}
public void sort() {
ArrayList features = new ArrayList();
for(RyanFeatureVector curr = this; curr != null; curr = curr.next)
if(curr.index >= 0)
features.add(curr);
Object[] feats = features.toArray();
Arrays.sort(feats);
RyanFeatureVector fv = new RyanFeatureVector(-1,-1.0,null);
for(int i = feats.length-1; i >= 0; i--) {
RyanFeatureVector tmp = (RyanFeatureVector)feats[i];
fv = new RyanFeatureVector(tmp.index,tmp.value,fv);
}
this.index = fv.index;
this.value = fv.value;
this.next = fv.next;
}
public int compareTo(Object o) {
RyanFeatureVector fv = (RyanFeatureVector)o;
if(index < fv.index)
return -1;
if(index > fv.index)
return 1;
return 0;
}
public double dotProdoct(double[] weights) {
double score = 0.0;
for(RyanFeatureVector curr = this; curr != null; curr = curr.next) {
if (curr.index >= 0)
score += weights[curr.index]*curr.value;
}
return score;
}
}
| 6,221 | 26.052174 | 111 | java |
g-ssl-crf | g-ssl-crf-master/bin/junto-master/src/main/java/upenn/junto/util/ProbUtil.java | package upenn.junto.util;
import gnu.trove.iterator.TObjectDoubleIterator;
import gnu.trove.map.hash.TObjectDoubleHashMap;
import java.util.ArrayList;
public class ProbUtil {
public static TObjectDoubleHashMap GetUniformPrior(ArrayList<String> labels) {
int totalLabels = labels.size();
assert (totalLabels > 0);
double prior = 1.0 / totalLabels;
assert (prior > 0);
TObjectDoubleHashMap retMap = new TObjectDoubleHashMap();
for (int li = 0; li < totalLabels; ++li) {
retMap.put(labels.get(li), prior);
}
return (retMap);
}
// this method returns result += mult * addDist
public static void AddScores(TObjectDoubleHashMap result, double mult,
TObjectDoubleHashMap addDist) {
assert (result != null);
assert (addDist != null);
TObjectDoubleIterator iter = addDist.iterator();
while (iter.hasNext()) {
iter.advance();
double adjVal = mult * iter.value();
// System.out.println(">> adjVal: " + mult + " " + iter.key() + " " + iter.value() + " " + adjVal);
result.adjustOrPutValue(iter.key(), adjVal, adjVal);
}
}
public static void DivScores(TObjectDoubleHashMap result, double divisor) {
assert (result != null);
assert (divisor > 0);
TObjectDoubleIterator li = result.iterator();
while (li.hasNext()) {
li.advance();
// System.out.println("Before: " + " " + li.key() + " " + li.value() + " " + divisor);
double newVal = (1.0 * li.value()) / divisor;
result.put(li.key(), newVal);
// System.out.println("After: " + " " + li.key() + " " + result.get(li.key()) + " " + divisor);
}
}
public static void KeepTopScoringKeys(TObjectDoubleHashMap m, int keepTopK) {
ArrayList<ObjectDoublePair> lsps = CollectionUtil.ReverseSortMap(m);
// the array is sorted from large to small, so start
// from beginning and retain only top scoring k keys.
m.clear();
int totalAdded = 0;
int totalSorted = lsps.size();
// for (int li = lsps.size() - 1; li >= 0 && totalAdded <= keepTopK; --li) {
for (int li = 0; li < totalSorted && totalAdded < keepTopK; ++li) {
++totalAdded;
if (lsps.get(li).GetScore() > 0) {
m.put(lsps.get(li).GetLabel(), lsps.get(li).GetScore());
}
}
// size of the new map is upper bounded by the max
// number of entries requested
assert (m.size() <= keepTopK);
}
public static void Normalize(TObjectDoubleHashMap m) {
Normalize(m, Integer.MAX_VALUE);
}
public static void Normalize(TObjectDoubleHashMap m, int keepTopK) {
// if the number of labels to retain are not the trivial
// default value, then keep the top scoring k labels as requested
if (keepTopK != Integer.MAX_VALUE) {
KeepTopScoringKeys(m, keepTopK);
}
TObjectDoubleIterator mi = m.iterator();
double denom = 0;
while (mi.hasNext()) {
mi.advance();
denom += mi.value();
}
// assert (denom > 0);
if (denom > 0) {
mi = m.iterator();
while (mi.hasNext()) {
mi.advance();
double newVal = mi.value() / denom;
mi.setValue(newVal);
}
}
}
public static double GetSum(TObjectDoubleHashMap m) {
TObjectDoubleIterator mi = m.iterator();
double sum = 0;
while (mi.hasNext()) {
mi.advance();
sum += mi.value();
}
return (sum);
}
public static double GetDifferenceNorm2Squarred(TObjectDoubleHashMap m1,
double m1Mult, TObjectDoubleHashMap m2, double m2Mult) {
TObjectDoubleHashMap diffMap = new TObjectDoubleHashMap();
// copy m1 into the difference map
TObjectDoubleIterator iter = m1.iterator();
while (iter.hasNext()) {
iter.advance();
diffMap.put(iter.key(), m1Mult * iter.value());
}
iter = m2.iterator();
while (iter.hasNext()) {
iter.advance();
diffMap.adjustOrPutValue(iter.key(), -1 * m2Mult * iter.value(), -1
* m2Mult * iter.value());
}
double val = 0;
iter = diffMap.iterator();
while (iter.hasNext()) {
iter.advance();
val += iter.value() * iter.value();
}
return (Math.sqrt(val));
}
// KL (m1 || m2)
public static double GetKLDifference(TObjectDoubleHashMap m1,
TObjectDoubleHashMap m2) {
double divergence = 0;
TObjectDoubleIterator iter = m1.iterator();
while (iter.hasNext()) {
iter.advance();
if (iter.value() > 0) {
// if (!m2.containsKey(iter.key()) && m2.get(iter.key()) <= 0) {
// divergence += Double.NEGATIVE_INFINITY;
// } else {
// add a small quantity to the numerator and denominator to avoid
// infinite divergence
divergence += iter.value()
* Math.log((iter.value() + Constants.GetSmallConstant())
/ (m2.get(iter.key()) + Constants.GetSmallConstant()));
// }
}
}
return (divergence);
}
// Entropy(m1)
public static double GetEntropy(TObjectDoubleHashMap m1) {
double entropy = 0;
TObjectDoubleIterator iter = m1.iterator();
while (iter.hasNext()) {
iter.advance();
if (iter.value() > 0) {
entropy += -1 * iter.value() * Math.log(iter.value());
}
}
return (entropy);
}
}
| 5,406 | 28.872928 | 107 | java |
g-ssl-crf | g-ssl-crf-master/bin/junto-master/src/main/java/upenn/junto/util/GraphStats.java | package upenn.junto.util;
import java.io.BufferedWriter;
import java.io.FileWriter;
import java.io.IOException;
import java.util.Hashtable;
import java.util.Iterator;
import java.util.List;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.jgrapht.GraphPath;
import org.jgrapht.alg.KShortestPaths;
import org.jgrapht.graph.DefaultDirectedWeightedGraph;
import org.jgrapht.graph.DefaultWeightedEdge;
import upenn.junto.config.ConfigReader;
import upenn.junto.config.GraphConfigLoader;
import upenn.junto.graph.Graph;
import upenn.junto.graph.Vertex;
public class GraphStats {
private static Logger logger = LogManager.getLogger(GraphStats.class);
// Number of K-shortest paths generated.
private static int _kPrime = -1;
public static void PrintStats(Graph g, String graphStatsFile) {
try {
BufferedWriter swr = new BufferedWriter(new FileWriter(graphStatsFile));
swr.write(PrintStats(g));
swr.close();
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}
public static String PrintStats(Graph g) {
int totalSeedNodes = 0;
int totalTestNodes = 0;
int totalSeedAndTestNodes = 0;
int totalEdges = 0;
int totalVertices = 0;
int maxDegree = Integer.MIN_VALUE;
int minDegree = Integer.MAX_VALUE;
for (String vName : g.vertices().keySet()) {
Vertex v = g.vertices().get(vName);
++totalVertices;
int degree = v.GetNeighborNames().length;
if (degree > maxDegree) { maxDegree = degree; }
if (degree < minDegree) { minDegree = degree; }
totalEdges += v.neighbors().size();
if (v.isSeedNode()) { ++totalSeedNodes; }
if (v.isTestNode()) { ++totalTestNodes; }
if (v.isSeedNode() && v.isTestNode()) { ++totalSeedAndTestNodes; }
}
String retStr = "Total seed vertices: " + totalSeedNodes + "\n";
retStr += "Total test vertices: " + totalTestNodes + "\n";
retStr += "Total seed vertices which are also test vertices: " + totalSeedAndTestNodes + "\n";
retStr += "Total vertices: " + totalVertices + "\n";
retStr += "Total edges: " + totalEdges + "\n";
retStr += "Average degree: " + (1.0 * totalEdges) / totalVertices + "\n";
retStr += "Min degree: " + minDegree + "\n";
retStr += "Max degree: " + maxDegree + "\n";
return (retStr);
}
private static String GetDiameter(
DefaultDirectedWeightedGraph<Vertex,DefaultWeightedEdge> g) {
String retDiaReport = "";
// HashMap<Vertex,KShortestPaths<Vertex,DefaultWeightedEdge>> kShortestPathMap =
// new HashMap<Vertex,KShortestPaths<Vertex,DefaultWeightedEdge>>();
boolean isConnected = true;
int diameter = -1;
int totalProcessed = 0;
Iterator<Vertex> vIter = g.vertexSet().iterator();
while (vIter.hasNext()) {
Vertex v = vIter.next();
if (!v.isSeedNode()) {
continue;
}
++totalProcessed;
if (totalProcessed % 1000 == 0) {
logger.info("Processed: " + totalProcessed + " curr_dia: " + diameter);
}
KShortestPaths<Vertex,DefaultWeightedEdge> ksp = new KShortestPaths(g, v, 1);
// kShortestPathMap.put(v, new KShortestPaths(g, v, _kPrime));
Iterator<Vertex> vIter2 = g.vertexSet().iterator();
while (vIter2.hasNext()) {
Vertex nv = vIter2.next();
// skip self comparison
if (v.equals(nv)) { continue; }
List<GraphPath<Vertex,DefaultWeightedEdge>> paths = ksp.getPaths(nv);
if (paths == null) { isConnected = false; }
else if (paths.get(0).getEdgeList().size() > diameter) {
diameter = paths.get(0).getEdgeList().size();
}
}
}
retDiaReport += "Connected(from_seed_nodes): " + (isConnected ? "true" : "false") + "\n";
retDiaReport += "Diameter(from_seed_nodes): " + diameter + "\n";
return (retDiaReport);
}
public static void main(String[] args) {
Hashtable config = ConfigReader.read_config(args);
// load the graph
Graph g = GraphConfigLoader.apply(config);
MessagePrinter.Print(PrintStats(g));
}
}
| 4,194 | 30.780303 | 98 | java |
g-ssl-crf | g-ssl-crf-master/bin/junto-master/src/main/java/upenn/junto/app/ConfigTuner.java | package upenn.junto.app;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Hashtable;
import java.util.Iterator;
import gnu.trove.list.array.TDoubleArrayList;
import gnu.trove.map.hash.TObjectDoubleHashMap;
import upenn.junto.config.ConfigReader;
import upenn.junto.util.CollectionUtil;
import upenn.junto.util.Constants;
import upenn.junto.util.Defaults;
import upenn.junto.util.MessagePrinter;
public class ConfigTuner {
private static ArrayList<Hashtable>
GetAllCombinations(Hashtable tuningConfig) {
ArrayList<Hashtable> configs = new ArrayList<Hashtable>();
Iterator iter = tuningConfig.keySet().iterator();
while (iter.hasNext()) {
String paramKey = (String) iter.next();
String paramVal = (String) tuningConfig.get(paramKey);
// e.g. mu1 = 1e-8,1,1e-8
String[] fields = paramVal.split(",");
int currSize = configs.size();
for (int fi = 0; fi < fields.length; ++fi) {
// add the first configuration, if none exists
if (configs.size() == 0) {
configs.add(new Hashtable());
++currSize;
}
for (int ci = 0; ci < currSize; ++ci) {
// the first value can be added to existing
// configurations.
if (fi == 0) {
configs.get(ci).put(paramKey, fields[fi]);
} else {
Hashtable nc = (Hashtable) configs.get(ci).clone();
nc.put(paramKey, fields[fi]);
// append the new config to the end of the list
configs.add(nc);
}
}
}
}
System.out.println("Total config (non-unique) combinations: " + configs.size());
return (configs);
}
private static void Run(Hashtable tuningConfig) {
// some essential options terminate if they are note specified
String idenStr = Defaults.GetValueOrDie(tuningConfig, "iden_str");
String logDir = Defaults.GetValueOrDie(tuningConfig, "log_output_dir");
String opDir = Defaults.GetValueOrDefault(
(String) tuningConfig.get("output_dir"), null);
boolean skipExistingConfigs =
Defaults.GetValueOrDefault((String) tuningConfig.get("skip_existing_config"), false);
// config file with post-tuning testing details (i.e. final test file etc.)
String finalTestConfigFile = (String) tuningConfig.get("final_config_file");
tuningConfig.remove("final_config_file");
// generate all possible combinations (non unique)
ArrayList<Hashtable> configs = GetAllCombinations(tuningConfig);
ArrayList<ArrayList> results = new ArrayList<ArrayList>();
HashSet<String> uniqueConfigs = new HashSet<String>();
// map from algo to the current best scores and the corresponding config
HashMap<String,Hashtable> algo2BestConfig = new HashMap<String,Hashtable>();
TObjectDoubleHashMap algo2BestScore = new TObjectDoubleHashMap();
// store console
PrintStream consoleOut = System.out;
PrintStream consoleErr = System.err;
for (int ci = 0; ci < configs.size(); ++ci) {
Hashtable c = configs.get(ci);
// if this a post-tune config, then generate seed and test files
if (Defaults.GetValueOrDefault((String) c.get("is_final_run"), false)) {
String splitId = Defaults.GetValueOrDie(c, "split_id");
c.put("seed_file", c.remove("seed_base") + "." + splitId + ".train");
c.put("test_file", c.remove("test_base") + "." + splitId + ".test");
}
// output file name is considered a unique identifier of a configuration
String outputFile = GetOutputFileName(c, opDir, idenStr);
if (uniqueConfigs.contains(outputFile)) {
continue;
}
uniqueConfigs.add(outputFile);
if (opDir != null) {
c.put("output_file", outputFile);
}
System.out.println("Working with config: " + c.toString());
try {
// reset System.out so that the log printed using System.out.println
// is directed to the right log file
String logFile = GetLogFileName(c, logDir, idenStr);
// if the log file exists, then don't repeat
File lf = new File(logFile);
if (skipExistingConfigs && lf.exists()) {
continue;
}
FileOutputStream fos = new FileOutputStream(new File(logFile));
PrintStream ps = new PrintStream(fos);
System.setOut(ps);
System.setErr(ps);
results.add(new ArrayList());
JuntoConfigRunner.apply(c, results.get(results.size() - 1));
UpdateBestConfig((String) c.get("algo"), algo2BestScore,
algo2BestConfig, c, results.get(results.size() - 1));
// reset System.out back to the original console value
System.setOut(consoleOut);
System.setErr(consoleErr);
// close log file
fos.close();
} catch (FileNotFoundException fnfe) {
fnfe.printStackTrace();
} catch (IOException ioe) {
ioe.printStackTrace();
}
}
// print out the best parameters for each algorithm
Iterator algoIter = algo2BestConfig.keySet().iterator();
while (algoIter.hasNext()) {
String algo = (String) algoIter.next();
System.out.println("\n#################\n" +
"BEST_CONFIG_FOR " + algo + " " +
algo2BestScore.get(algo) + "\n" +
CollectionUtil.Map2StringPrettyPrint(algo2BestConfig.get(algo)));
// run test with tuned parameters, if requested
if (finalTestConfigFile != null) {
Hashtable finalTestConfig = (Hashtable) algo2BestConfig.get(algo).clone();
// add additional config options from the file to the tuned params
finalTestConfig = ConfigReader.read_config(finalTestConfig, finalTestConfigFile);
JuntoConfigRunner.apply(finalTestConfig, null);
}
}
}
private static String GetOutputFileName(Hashtable c, String opDir, String idenStr) {
String outputFile = " ";
if (c.get("algo").equals("mad") ||
c.get("algo").equals("lgc") ||
c.get("algo").equals("am") ||
c.get("algo").equals("lclp")) {
outputFile = opDir + "/" + GetBaseName2(c, idenStr);
} else if (c.get("algo").equals("maddl")) {
outputFile = opDir + "/" +
GetBaseName2(c, idenStr) +
".mu4_" + c.get("mu4");
} else if (c.get("algo").equals("adsorption") || c.get("algo").equals("lp_zgl")) {
outputFile = opDir + "/" + GetBaseName(c, idenStr);
} else {
MessagePrinter.PrintAndDie("output_1 file can't be empty!");
}
return (outputFile);
}
private static String GetLogFileName(Hashtable c, String logDir, String idenStr) {
String logFile = "";
if (c.get("algo").equals("mad") ||
c.get("algo").equals("lgc") ||
c.get("algo").equals("am") ||
c.get("algo").equals("lclp")) {
logFile = logDir + "/" + "log." + GetBaseName2(c, idenStr);
} else if (c.get("algo").equals("maddl")) {
logFile = logDir + "/" +
"log." +
GetBaseName2(c, idenStr) +
".mu4_" + c.get("mu4");
} else if (c.get("algo").equals("adsorption") || c.get("algo").equals("lp_zgl")) {
logFile = logDir + "/" +
"log." + GetBaseName(c, idenStr);
} else {
MessagePrinter.PrintAndDie("output_2 file can't be empty!");
}
return (logFile);
}
private static String GetBaseName(Hashtable c, String idenStr) {
String base = idenStr;
if (c.containsKey("max_seeds_per_class")) {
base += ".spc_" + c.get("max_seeds_per_class");
}
base += "." + c.get("algo");
if (c.containsKey("use_bipartite_optimization")) {
base += ".bipart_opt_" + c.get("use_bipartite_optimization");
}
if (c.containsKey("top_k_neighbors")) {
base += ".K_" + c.get("top_k_neighbors");
}
if (c.containsKey("prune_threshold")) {
base += ".P_" + c.get("prune_threshold");
}
if (c.containsKey("high_prune_thresh")) {
base += ".feat_prune_high_" + c.get("high_prune_thresh");
}
if (c.containsKey("keep_top_k_labels")) {
base += ".top_labels_" + c.get("keep_top_k_labels");
}
if (c.containsKey("train_fract")) {
base += ".train_fract_" + c.get("train_fract");
}
if (Defaults.GetValueOrDefault((String) c.get("set_gaussian_kernel_weights"), false)) {
double sigmaFactor = Double.parseDouble(Defaults.GetValueOrDie(c, "gauss_sigma_factor"));
base += ".gk_sig_" + sigmaFactor;
}
if (c.containsKey("algo") && (c.get("algo").equals("adsorption") ||
c.get("algo").equals("mad") ||
c.get("algo").equals("maddl"))) {
double beta = Defaults.GetValueOrDefault((String) c.get("beta"), 2.0);
base += ".beta_" + beta;
}
// if this a post-tune config, then generate seed and test files
if (Defaults.GetValueOrDefault((String) c.get("is_final_run"), false)) {
base += ".split_id_" + Defaults.GetValueOrDie(c, "split_id");
}
return (base);
}
private static String GetBaseName2(Hashtable c, String idenStr) {
String base = GetBaseName(c, idenStr) +
".mu1_" + c.get("mu1") +
".mu2_" + c.get("mu2") +
".mu3_" + c.get("mu3") +
".norm_" + c.get("norm");
return (base);
}
private static void UpdateBestConfig(String algo, TObjectDoubleHashMap algo2BestScore,
HashMap<String,Hashtable> algo2BestConfig, Hashtable config,
ArrayList perIterMultiScores) {
TDoubleArrayList perIterScores = new TDoubleArrayList();
for (int i = 1; i < perIterMultiScores.size(); ++i) {
TObjectDoubleHashMap r = (TObjectDoubleHashMap) perIterMultiScores.get(i);
perIterScores.add(r.get(Constants.GetMRRString()));
}
if (perIterScores.size() > 0) {
// System.out.println("SIZE: " + perIterScores.size());
int mi = 0;
for (int i = 1; i < perIterScores.size(); ++i) {
if (perIterScores.get(i) > perIterScores.get(mi)) {
mi = i;
}
}
// System.out.println("max_idx: " + mi + " " + perIterScores.toString());
double maxScore = perIterScores.get(mi); // perIterScores.max();
if (algo2BestScore.size() == 0 || algo2BestScore.get(algo) < maxScore) {
// System.out.println("new best score: " + maxScore);
// best iteration
int bestIter = perIterScores.indexOf(maxScore) + 1;
algo2BestScore.put(algo, maxScore);
algo2BestConfig.put(algo, (Hashtable) config.clone());
algo2BestConfig.get(algo).put("iters", bestIter);
}
}
}
public static void main(String[] args) {
Hashtable tuningConfig = ConfigReader.read_config(args);
Run(tuningConfig);
}
}
| 11,087 | 35.715232 | 99 | java |
g-ssl-crf | g-ssl-crf-master/bin/junto-master/src/main/java/upenn/junto/graph/CrossValidationGenerator.java | package upenn.junto.graph;
import upenn.junto.util.ObjectDoublePair;
import upenn.junto.util.Constants;
import upenn.junto.util.CollectionUtil;
import gnu.trove.map.hash.TObjectDoubleHashMap;
import gnu.trove.iterator.TObjectDoubleIterator;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.Random;
public class CrossValidationGenerator {
// seed used to initialize the random number generator
static long _kDeterministicSeed = 100;
public static void Split(Graph g, double trainFract) {
Random r = new Random(_kDeterministicSeed);
// Random r = new Random();
TObjectDoubleHashMap instanceVertices = new TObjectDoubleHashMap();
Iterator vIter = g.vertices().keySet().iterator();
while (vIter.hasNext()) {
Vertex v = g.vertices().get(vIter.next());
// nodes without feature prefix and those with at least one
// gold labels are considered valid instances
if (!v.name().startsWith(Constants.GetFeatPrefix()) &&
v.goldLabels().size() > 0) {
instanceVertices.put(v, r.nextDouble());
}
}
ArrayList<ObjectDoublePair> sortedRandomInstances =
CollectionUtil.ReverseSortMap(instanceVertices);
int totalInstances = sortedRandomInstances.size();
double totalTrainInstances = Math.ceil(totalInstances * trainFract);
for (int vi = 0; vi < totalInstances; ++vi) {
Vertex v = (Vertex) sortedRandomInstances.get(vi).GetLabel();
// mark train and test nodes
if (vi < totalTrainInstances) {
v.setIsSeedNode(true);
// we expect that the gold labels for the node has already been
// set, we only need to copy them as injected labels
TObjectDoubleIterator goldLabIter = v.goldLabels().iterator();
while (goldLabIter.hasNext()) {
goldLabIter.advance();
v.SetInjectedLabelScore((String) goldLabIter.key(), goldLabIter.value());
}
} else {
v.setIsTestNode(true);
}
}
// // for sanity check, count the number of train and test nodes
// int totalTrainNodes = 0;
// int totalTestNodes = 0;
// for (int vi = 0; vi < totalInstances; ++vi) {
// Vertex v = (Vertex) sortedRandomInstances.get(vi).GetLabel();
// if (v.isSeedNode()) {
// ++totalTrainNodes;
// }
// if (v.isTestNode()) {
// ++totalTestNodes;
// }
// }
// MessagePrinter.Print("Total train nodes: " + totalTrainNodes);
// MessagePrinter.Print("Total test nodes: " + totalTestNodes);
}
}
| 2,568 | 33.253333 | 83 | java |
g-ssl-crf | g-ssl-crf-master/bin/junto-master/src/main/java/upenn/junto/graph/parallel/Edge2NodeFactoredHadoop.java | package upenn.junto.graph.parallel;
import upenn.junto.util.*;
import upenn.junto.graph.Vertex;
import java.io.*;
import java.util.*;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.mapred.TextOutputFormat;
public class Edge2NodeFactoredHadoop {
private static String _kDelim = "\t";
private static int kMaxNeighorsPerLine_ = 1000;
private static double _kBeta = 2.0;
private static String neighMsgType = "-NEIGH-";
private static String goldLabMsgType = "-GOLD-";
private static String injLabMsgType = "-INJ-";
public static class Map extends MapReduceBase implements
Mapper<LongWritable, Text, Text, Text> {
private HashMap<String,String> goldLabels;
private HashMap<String,String> seedLabels;
public void configure(JobConf conf) {
goldLabels = LoadLabels(conf.get("gold_label_file"));
seedLabels = LoadLabels(conf.get("seed_label_file"));
}
private HashMap<String,String> LoadLabels(String fileName) {
HashMap<String,String> m = new HashMap<String,String>();
try {
Path p = new Path(fileName);
FileSystem fs = FileSystem.get(new Configuration());
BufferedReader bfr = new BufferedReader(new InputStreamReader(
fs.open(p)));
String line;
while ((line = bfr.readLine()) != null) {
String[] fields = line.split(_kDelim);
if (!m.containsKey(fields[0])) {
m.put(fields[0], fields[1] + _kDelim + fields[2]);
}
}
bfr.close();
} catch (IOException e) {
throw new RuntimeException(e);
}
return (m);
}
public void map(LongWritable key, Text value,
OutputCollector<Text, Text> output, Reporter reporter)
throws IOException {
// ///
// Constructing the vertex from the string representation
// ///
String line = value.toString();
// node1 node2 edge_weight
String[] fields = line.split(_kDelim);
// source --> dest
output.collect(new Text(fields[0]), new Text(neighMsgType + _kDelim
+ fields[1] + _kDelim + fields[2]));
if (goldLabels.containsKey(fields[0])) {
output.collect(new Text(fields[0]),
new Text(goldLabMsgType + _kDelim + goldLabels.get(fields[0])));
}
if (seedLabels.containsKey(fields[0])) {
output.collect(new Text(fields[0]),
new Text(injLabMsgType + _kDelim + seedLabels.get(fields[0])));
}
// dest --> source
// generate this message only if source and destination
// are different, as otherwise a similar message has already
// been generated above.
if (!fields[0].equals(fields[1])) {
output.collect(new Text(fields[1]), new Text(neighMsgType
+ _kDelim + fields[0] + _kDelim + fields[2]));
if (goldLabels.containsKey(fields[1])) {
output.collect(new Text(fields[1]),
new Text(goldLabMsgType + _kDelim + goldLabels.get(fields[1])));
}
if (seedLabels.containsKey(fields[1])) {
output.collect(new Text(fields[1]),
new Text(injLabMsgType + _kDelim + seedLabels.get(fields[1])));
}
}
}
}
public static class Reduce extends MapReduceBase implements
Reducer<Text, Text, Text, Text> {
public void reduce(Text key, Iterator<Text> values,
OutputCollector<Text, Text> output, Reporter reporter)
throws IOException {
String vertexId = key.toString();
Vertex v = new Vertex(vertexId);
while (values.hasNext()) {
// neighbor/self edge_weight/inject_score
String val = values.next().toString();
String[] fields = val.split(_kDelim);
String msgType = fields[0];
String trgVertexId = fields[1];
if (msgType.equals(neighMsgType)) {
v.setNeighbor(trgVertexId, Double.parseDouble(fields[2]));
} else if (msgType.equals(goldLabMsgType)) {
v.setGoldLabel(trgVertexId, Double.parseDouble(fields[2]));
} else if (msgType.equals(injLabMsgType)) {
v.SetInjectedLabelScore(trgVertexId,
Double.parseDouble(fields[2]));
}
}
// normalize transition probabilities
v.NormalizeTransitionProbability();
// remove dummy labels
v.SetInjectedLabelScore(Constants.GetDummyLabel(), 0);
v.SetEstimatedLabelScore(Constants.GetDummyLabel(), 0);
// calculate random walk probabilities
v.CalculateRWProbabilities(_kBeta);
// generate the random walk probability string of the node
String rwProbStr = Constants._kInjProb + " "
+ v.pinject() + " " + Constants._kContProb
+ " " + v.pcontinue() + " "
+ Constants._kTermProb + " "
+ v.pabandon();
// represent neighborhood information as a string
Object[] neighNames = v.GetNeighborNames();
String neighStr = "";
int totalNeighbors = neighNames.length;
for (int ni = 0; ni < totalNeighbors; ++ni) {
// if the neighborhood string is already too long, then
// print it out. It is possible to split the neighborhood
// information of a node into multiple lines. However, all
// other fields should be repeated in all the split lines.
if (neighStr.length() > 0 && (ni % kMaxNeighorsPerLine_ == 0)) {
// output format
// id gold_label injected_labels estimated_labels neighbors
// rw_probabilities
output.collect(
key,
new Text(
CollectionUtil.Map2String(v.goldLabels())
+ _kDelim
+ CollectionUtil.Map2String(v
.injectedLabels())
+ _kDelim
+ CollectionUtil.Map2String(v
.estimatedLabels())
+ _kDelim + neighStr.trim()
+ _kDelim + rwProbStr));
// reset the neighborhood string
neighStr = "";
}
neighStr += neighNames[ni] + " "
+ v.GetNeighborWeight((String) neighNames[ni]) + " ";
}
// print out any remaining neighborhood information, plus all other
// info
if (neighStr.length() > 0) {
// output format
// id gold_label injected_labels estimated_labels neighbors
// rw_probabilities
output.collect(
key,
new Text(CollectionUtil.Map2String(v.goldLabels())
+ _kDelim
+ CollectionUtil.Map2String(v
.injectedLabels())
+ _kDelim
+ CollectionUtil.Map2String(v
.estimatedLabels()) + _kDelim
+ neighStr.trim() + _kDelim + rwProbStr));
}
}
}
public static void main(String[] args) throws Exception {
JobConf conf = new JobConf(Edge2NodeFactoredHadoop.class);
conf.setJobName("edge2node_hadoop");
conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(Text.class);
conf.setMapperClass(Map.class);
// conf.setCombinerClass(Reduce.class);
conf.setReducerClass(Reduce.class);
conf.setInputFormat(TextInputFormat.class);
conf.setOutputFormat(TextOutputFormat.class);
FileInputFormat.setInputPaths(conf, new Path(args[0]));
conf.set("gold_label_file", args[1]);
conf.set("seed_label_file", args[2]);
FileOutputFormat.setOutputPath(conf, new Path(args[3]));
JobClient.runJob(conf);
}
}
| 7,418 | 31.539474 | 71 | java |
g-ssl-crf | g-ssl-crf-master/bin/junto-master/src/main/java/upenn/junto/graph/parallel/EdgeFactored2NodeFactored.java | package upenn.junto.graph.parallel;
/**
* Copyright 2011 Partha Pratim Talukdar
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.BufferedWriter;
import java.io.FileWriter;
import java.io.IOException;
import java.util.Hashtable;
import java.util.Iterator;
import upenn.junto.graph.*;
import upenn.junto.util.*;
import upenn.junto.config.*;
public class EdgeFactored2NodeFactored {
private static String kDelim_ = "\t";
private static int kMaxNeighorsPerLine_ = 100;
public static void main(String[] args) {
Hashtable config = ConfigReader.read_config(args);
Graph g = GraphConfigLoader.apply(config);
// save graph in file
if (config.containsKey("hadoop_graph_file")) {
WriteToFile(g, (String) config.get("hadoop_graph_file"));
}
}
public static void WriteToFile(Graph g, String outputFile) {
try {
BufferedWriter bw = new BufferedWriter(new FileWriter(outputFile));
Iterator<String> vIter = g.vertices().keySet().iterator();
while (vIter.hasNext()) {
String vName = vIter.next();
Vertex v = g.vertices().get(vName);
// remove dummy label from injected and estimated labels
v.setGoldLabel(Constants.GetDummyLabel(), 0.0);
v.SetEstimatedLabelScore(Constants.GetDummyLabel(), 0);
String rwProbStr =
Constants._kInjProb + " " + v.pinject() + " " +
Constants._kContProb + " " + v.pcontinue() + " " +
Constants._kTermProb + " " + v.pabandon();
// represent neighborhood information as a string
Object[] neighNames = v.GetNeighborNames();
String neighStr = "";
int totalNeighbors = neighNames.length;
for (int ni = 0; ni < totalNeighbors; ++ni) {
// if the neighborhood string is already too long, then
// print it out. It is possible to split the neighborhood
// information of a node into multiple lines. However, all
// other fields should be repeated in all the split lines.
if (neighStr.length() > 0 && (ni % kMaxNeighorsPerLine_ == 0)) {
// output format
// id gold_label injected_labels estimated_labels neighbors rw_probabilities
bw.write(v.name() + kDelim_ +
CollectionUtil.Map2String(v.goldLabels()) + kDelim_ +
CollectionUtil.Map2String(v.injectedLabels()) + kDelim_ +
CollectionUtil.Map2String(v.estimatedLabels()) + kDelim_ +
neighStr.trim() + kDelim_ +
rwProbStr + "\n");
// reset the neighborhood string
neighStr = "";
}
Vertex n = g.vertices().get(neighNames[ni]);
neighStr += neighNames[ni] + " " +
v.GetNeighborWeight((String) neighNames[ni]) + " ";
}
// print out any remaining neighborhood information, plus all other info
if (neighStr.length() > 0) {
// output format
// id gold_label injected_labels estimated_labels neighbors rw_probabilities
bw.write(v.name() + kDelim_ +
CollectionUtil.Map2String(v.goldLabels()) + kDelim_ +
CollectionUtil.Map2String(v.injectedLabels()) + kDelim_ +
CollectionUtil.Map2String(v.estimatedLabels()) + kDelim_ +
neighStr.trim() + kDelim_ +
rwProbStr + "\n");
}
}
bw.close();
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}
}
| 4,080 | 37.5 | 88 | java |
g-ssl-crf | g-ssl-crf-master/bin/junto-master/src/main/java/upenn/junto/algorithm/parallel/LP_ZGL_Hadoop.java | package upenn.junto.algorithm.parallel;
/**
* Copyright 2011 Partha Pratim Talukdar
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import gnu.trove.map.hash.TObjectDoubleHashMap;
import gnu.trove.iterator.TObjectDoubleIterator;
import java.io.IOException;
import java.util.HashMap;
import java.util.Hashtable;
import java.util.Iterator;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.mapred.TextOutputFormat;
import upenn.junto.util.*;
import upenn.junto.config.*;
public class LP_ZGL_Hadoop {
private static String _kDelim = "\t";
public static class LP_ZGL_Map extends MapReduceBase
implements Mapper<LongWritable, Text, Text, Text> {
private Text word = new Text();
public void map(LongWritable key, Text value,
OutputCollector<Text, Text> output,
Reporter reporter) throws IOException {
/////
// Constructing the vertex from the string representation
/////
String line = value.toString();
// id gold_label injected_labels estimated_labels neighbors rw_probabilities
String[] fields = line.split(_kDelim);
TObjectDoubleHashMap neighbors = CollectionUtil.String2Map(fields[4]);
boolean isSeedNode = fields[2].length() > 0 ? true : false;
// If the current node is a seed node but there is no
// estimate label information yet, then transfer the seed label
// to the estimated label distribution. Ideally, this is likely
// to be used in the map of the very first iteration.
if (isSeedNode && fields[3].length() == 0) {
fields[3] = fields[2];
}
// Send two types of messages:
// -- self messages which will store the injection labels and
// random walk probabilities.
// -- messages to neighbors about current estimated scores
// of the node.
//
// message to self
output.collect(new Text(fields[0]), new Text(line));
// message to neighbors
TObjectDoubleIterator neighIterator = neighbors.iterator();
while (neighIterator.hasNext()) {
neighIterator.advance();
// message (neighbor_node, current_node + DELIM + curr_node_label_scores
output.collect(new Text((String) neighIterator.key()),
new Text(fields[0] + _kDelim + fields[3]));
}
}
}
public static class LP_ZGL_Reduce extends MapReduceBase implements Reducer<Text, Text, Text, Text> {
private static double mu1;
private static double mu2;
private static int keepTopKLabels;
public void configure(JobConf conf) {
mu1 = Double.parseDouble(conf.get("mu1"));
mu2 = Double.parseDouble(conf.get("mu2"));
keepTopKLabels = Integer.parseInt(conf.get("keepTopKLabels"));
}
public void reduce(Text key, Iterator<Text> values,
OutputCollector<Text, Text> output, Reporter reporter) throws IOException {
// new scores estimated for the current node
TObjectDoubleHashMap newEstimatedScores = new TObjectDoubleHashMap();
// set to true only if the message sent to itself is found.
boolean isSelfMessageFound = false;
String vertexId = key.toString();
String vertexString = "";
TObjectDoubleHashMap neighbors = null;
TObjectDoubleHashMap randWalkProbs = null;
HashMap<String, String> neighScores =
new HashMap<String, String>();
int totalMessagesReceived = 0;
boolean isSeedNode = false;
// iterate over all the messages received at the node
while (values.hasNext()) {
++totalMessagesReceived;
String val = values.next().toString();
String[] fields = val.split(_kDelim);
// System.out.println("src: " + fields[0] + " dest: " + vertexId +
// "MESSAGE>>" + val + "<<");
// self-message check
if (vertexId.equals(fields[0])) {
isSelfMessageFound = true;
vertexString = val;
// System.out.println("Reduce: " + vertexId + " " + val + " " + fields.length);
TObjectDoubleHashMap injLabels = CollectionUtil.String2Map(fields[2]);
neighbors = CollectionUtil.String2Map(neighbors, fields[4]);
randWalkProbs = CollectionUtil.String2Map(fields[5]);
if (injLabels.size() > 0) {
isSeedNode = true;
// add injected labels to the estimated scores.
ProbUtil.AddScores(newEstimatedScores,
mu1, injLabels);
}
} else {
// an empty second field represents that the
// neighbor has no valid label assignment yet.
if (fields.length > 1) {
neighScores.put(fields[0], fields[1]);
}
}
}
// terminate if message from self is not received.
if (!isSelfMessageFound) {
throw new RuntimeException("Self message not received for node " + vertexId);
}
// Add neighbor label scores to current node's label estimates only if the
// current node is not a seed node. In case of seed nodes, clamp back the
// injected label distribution, which is already done above when processing
// the self messages
if (!isSeedNode) {
// collect neighbors label distributions and create one single
// label distribution
TObjectDoubleHashMap weightedNeigLablDist = new TObjectDoubleHashMap();
Iterator<String> neighIter = neighScores.keySet().iterator();
while (neighIter.hasNext()) {
String neighName = neighIter.next();
ProbUtil.AddScores(weightedNeigLablDist, // newEstimatedScores,
mu2 * neighbors.get(neighName),
CollectionUtil.String2Map(neighScores.get(neighName)));
}
ProbUtil.Normalize(weightedNeigLablDist, keepTopKLabels);
// now add the collective neighbor label distribution to
// the estimate of the current node's labels.
ProbUtil.AddScores(newEstimatedScores,
1.0, weightedNeigLablDist);
}
// normalize the scores
ProbUtil.Normalize(newEstimatedScores);
// now reconstruct the vertex representation (with the new estimated scores)
// so that the output from the current mapper can be used as input in next
// iteration's mapper.
String[] vertexFields = vertexString.split(_kDelim);
// replace estimated scores with the new ones.
String[] newVertexFields = new String[vertexFields.length - 1];
for (int i = 1; i < vertexFields.length; ++i) {
newVertexFields[i - 1] = vertexFields[i];
}
newVertexFields[2] = CollectionUtil.Map2String(newEstimatedScores);
output.collect(key, new Text(CollectionUtil.Join(newVertexFields, _kDelim)));
}
}
public static void main(String[] args) throws Exception {
Hashtable config = ConfigReader.read_config(args);
String baseInputFilePat = Defaults.GetValueOrDie(config, "hdfs_input_pattern");
String baseOutputFilePat = Defaults.GetValueOrDie(config, "hdfs_output_base");
int numIterations = Integer.parseInt(Defaults.GetValueOrDie(config, "iters"));
String currInputFilePat = baseInputFilePat;
String currOutputFilePat = "";
for (int iter = 1; iter <= numIterations; ++iter) {
JobConf conf = new JobConf(LP_ZGL_Hadoop.class);
conf.setJobName("lp_zgl_hadoop");
conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(Text.class);
conf.setMapperClass(LP_ZGL_Map.class);
// conf.setCombinerClass(LP_ZGL_Reduce.class);
conf.setReducerClass(LP_ZGL_Reduce.class);
conf.setInputFormat(TextInputFormat.class);
conf.setOutputFormat(TextOutputFormat.class);
// hyperparameters
conf.set("mu1", Defaults.GetValueOrDie(config, "mu1"));
conf.set("mu2", Defaults.GetValueOrDie(config, "mu2"));
conf.set("keepTopKLabels",
Defaults.GetValueOrDefault((String) config.get("keep_top_k_labels"),
Integer.toString(Integer.MAX_VALUE)));
if (iter > 1) {
// output from last iteration is the input for current iteration
currInputFilePat = currOutputFilePat + "/*";
}
FileInputFormat.setInputPaths(conf, new Path(currInputFilePat));
currOutputFilePat = baseOutputFilePat + "_" + iter;
FileOutputFormat.setOutputPath(conf, new Path(currOutputFilePat));
JobClient.runJob(conf);
}
}
}
| 9,681 | 37.11811 | 103 | java |
g-ssl-crf | g-ssl-crf-master/bin/junto-master/src/main/java/upenn/junto/algorithm/parallel/MADHadoop.java | package upenn.junto.algorithm.parallel;
/**
* Copyright 2011 Partha Pratim Talukdar
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import gnu.trove.map.hash.TObjectDoubleHashMap;
import gnu.trove.iterator.TObjectDoubleIterator;
import java.io.IOException;
import java.util.HashMap;
import java.util.Hashtable;
import java.util.Iterator;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.mapred.TextOutputFormat;
import org.apache.hadoop.mapred.jobcontrol.Job;
import upenn.junto.util.*;
import upenn.junto.config.*;
public class MADHadoop {
private static String _kDelim = "\t";
public static class MADHadoopMap extends MapReduceBase
implements Mapper<LongWritable, Text, Text, Text> {
private Text word = new Text();
public void map(LongWritable key, Text value,
OutputCollector<Text, Text> output,
Reporter reporter) throws IOException {
/////
// Constructing the vertex from the string representation
/////
String line = value.toString();
// id gold_label injected_labels estimated_labels neighbors rw_probabilities
String[] fields = line.split(_kDelim);
TObjectDoubleHashMap neighbors = CollectionUtil.String2Map(fields[4]);
TObjectDoubleHashMap rwProbabilities = CollectionUtil.String2Map(fields[5]);
// If the current node is a seed node but there is no
// estimate label information yet, then transfer the seed label
// to the estimated label distribution. Ideally, this is likely
// to be used in the map of the very first iteration.
boolean isSeedNode = fields[2].length() > 0 ? true : false;
if (isSeedNode && fields[3].length() == 0) {
fields[3] = fields[2];
}
// TODO(partha): move messages to ProtocolBuffers
// Send two types of messages:
// -- self messages which will store the injection labels and
// random walk probabilities.
// -- messages to neighbors about current estimated scores
// of the node.
//
// message to self
output.collect(new Text(fields[0]), new Text("labels" + _kDelim + line));
// message to neighbors
TObjectDoubleIterator neighIterator = neighbors.iterator();
while (neighIterator.hasNext()) {
neighIterator.advance();
// message (neighbor_node, current_node + DELIM + curr_node_label_scores
output.collect(new Text((String) neighIterator.key()),
new Text("labels" + _kDelim + fields[0] + _kDelim + fields[3]));
// message (neighbor_node, curr_node + DELIM + curr_node_edge_weights + DELIM curr_node_cont_prob
assert(neighbors.containsKey((String) neighIterator.key()));
output.collect(new Text((String) neighIterator.key()),
new Text("edge_info" + _kDelim +
fields[0] + _kDelim +
neighbors.get((String) neighIterator.key()) + _kDelim +
rwProbabilities.get(Constants._kContProb)));
}
}
}
public static class MADHadoopReduce extends MapReduceBase implements Reducer<Text, Text, Text, Text> {
private static double mu1;
private static double mu2;
private static double mu3;
private static int keepTopKLabels;
public void configure(JobConf conf) {
mu1 = Double.parseDouble(conf.get("mu1"));
mu2 = Double.parseDouble(conf.get("mu2"));
mu3 = Double.parseDouble(conf.get("mu3"));
keepTopKLabels = Integer.parseInt(conf.get("keepTopKLabels"));
}
public void reduce(Text key, Iterator<Text> values,
OutputCollector<Text, Text> output, Reporter reporter) throws IOException {
// new scores estimated for the current node
TObjectDoubleHashMap newEstimatedScores = new TObjectDoubleHashMap();
// set to true only if the message sent to itself is found.
boolean isSelfMessageFound = false;
String vertexId = key.toString();
String vertexString = "";
TObjectDoubleHashMap neighbors = null;
TObjectDoubleHashMap randWalkProbs = null;
HashMap<String, String> neighScores =
new HashMap<String, String>();
TObjectDoubleHashMap incomingEdgeWeights = new TObjectDoubleHashMap();
TObjectDoubleHashMap neighborContProb = new TObjectDoubleHashMap();
int totalMessagesReceived = 0;
// iterate over all the messages received at the node
while (values.hasNext()) {
++totalMessagesReceived;
String val = values.next().toString();
String[] fields = val.split(_kDelim);
// first field represents the type of message
String msgType = fields[0];
if (fields[0].equals("labels")) {
// self-message check
if (vertexId.equals(fields[1])) {
isSelfMessageFound = true;
vertexString = val;
TObjectDoubleHashMap injLabels = CollectionUtil.String2Map(fields[3]);
neighbors = CollectionUtil.String2Map(neighbors, fields[5]);
randWalkProbs = CollectionUtil.String2Map(fields[6]);
if (injLabels.size() > 0) {
// add injected labels to the estimated scores.
ProbUtil.AddScores(newEstimatedScores,
mu1 * randWalkProbs.get(Constants._kInjProb),
injLabels);
}
} else {
// an empty third field represents that the
// neighbor has no valid label assignment yet.
if (fields.length > 2) {
neighScores.put(fields[1], fields[2]);
}
}
} else if (msgType.equals("edge_info")) {
// edge_info neigh_vertex incoming_edge_weight cont_prob
String neighId = fields[1];
if (!incomingEdgeWeights.contains(neighId)) {
incomingEdgeWeights.put(neighId, Double.parseDouble(fields[2]));
}
if (!neighborContProb.contains(neighId)) {
neighborContProb.put(neighId, Double.parseDouble(fields[3]));
}
} else {
throw new RuntimeException("Invalid message: " + val);
}
}
// terminate if message from self is not received.
if (!isSelfMessageFound) {
throw new RuntimeException("Self message not received for node " + vertexId);
}
// collect neighbors' label distributions and create one single
// label distribution
TObjectDoubleHashMap weightedNeigLablDist = new TObjectDoubleHashMap();
Iterator<String> neighIter = neighScores.keySet().iterator();
while (neighIter.hasNext()) {
String neighName = neighIter.next();
double mult = randWalkProbs.get(Constants._kContProb) * neighbors.get(neighName) +
neighborContProb.get(neighName) * incomingEdgeWeights.get(neighName);
ProbUtil.AddScores(weightedNeigLablDist, // newEstimatedScores,
mu2 * mult,
CollectionUtil.String2Map(neighScores.get(neighName)));
}
// now add the collective neighbor label distribution to
// the estimate of the current node's labels.
ProbUtil.AddScores(newEstimatedScores,
1.0, weightedNeigLablDist);
// add dummy label scores
ProbUtil.AddScores(newEstimatedScores,
mu3 * randWalkProbs.get(Constants._kTermProb),
Constants.GetDummyLabelDist());
if (keepTopKLabels < Integer.MAX_VALUE) {
ProbUtil.KeepTopScoringKeys(newEstimatedScores, keepTopKLabels);
}
ProbUtil.DivScores(newEstimatedScores,
GetNormalizationConstant(neighbors, randWalkProbs,
incomingEdgeWeights, neighborContProb,
mu1, mu2, mu3));
// now reconstruct the vertex representation (with the new estimated scores)
// so that the output from the current mapper can be used as input in next
// iteration's mapper.
String[] vertexFields = vertexString.split(_kDelim);
// replace estimated scores with the new ones.
// Skip the first two fields as they contained the message header and
// vertex id respectively.
String[] newVertexFields = new String[vertexFields.length - 2];
for (int i = 2; i < vertexFields.length; ++i) {
newVertexFields[i - 2] = vertexFields[i];
}
newVertexFields[2] = CollectionUtil.Map2String(newEstimatedScores);
output.collect(key, new Text(CollectionUtil.Join(newVertexFields, _kDelim)));
}
public double GetNormalizationConstant(
TObjectDoubleHashMap neighbors,
TObjectDoubleHashMap randWalkProbs,
TObjectDoubleHashMap incomingEdgeWeights,
TObjectDoubleHashMap neighborContProb,
double mu1, double mu2, double mu3) {
double mii = 0;
double totalNeighWeight = 0;
TObjectDoubleIterator nIter = neighbors.iterator();
while (nIter.hasNext()) {
nIter.advance();
totalNeighWeight +=
randWalkProbs.get(Constants._kContProb) * nIter.value();
String neighName = (String) nIter.key();
totalNeighWeight += neighborContProb.get(neighName) *
incomingEdgeWeights.get(neighName);
}
// mu1 x p^{inj} +
// 0.5 * mu2 x \sum_j (p_{i}^{cont} W_{ij} + p_{j}^{cont} W_{ji}) +
// mu3
mii = mu1 * randWalkProbs.get(Constants._kInjProb) +
/*0.5 **/ mu2 * totalNeighWeight +
mu3;
return (mii);
}
}
public static void main(String[] args) throws Exception {
Hashtable config = ConfigReader.read_config(args);
String baseInputFilePat = Defaults.GetValueOrDie(config, "hdfs_input_pattern");
String baseOutputFilePat = Defaults.GetValueOrDie(config, "hdfs_output_base");
int numIterations = Integer.parseInt(Defaults.GetValueOrDie(config, "iters"));
int numReducers = Defaults.GetValueOrDefault((String) config.get("num_reducers"), 10);
String currInputFilePat = baseInputFilePat;
String currOutputFilePat = "";
for (int iter = 1; iter <= numIterations; ++iter) {
JobConf conf = new JobConf(MADHadoop.class);
conf.setJobName("mad_hadoop");
conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(Text.class);
conf.setMapperClass(MADHadoopMap.class);
// conf.setCombinerClass(MADHadoopReduce.class);
conf.setReducerClass(MADHadoopReduce.class);
conf.setNumReduceTasks(numReducers);
conf.setInputFormat(TextInputFormat.class);
conf.setOutputFormat(TextOutputFormat.class);
// hyperparameters
conf.set("mu1", Defaults.GetValueOrDie(config, "mu1"));
conf.set("mu2", Defaults.GetValueOrDie(config, "mu2"));
conf.set("mu3", Defaults.GetValueOrDie(config, "mu3"));
conf.set("keepTopKLabels",
Defaults.GetValueOrDefault((String) config.get("keep_top_k_labels"),
Integer.toString(Integer.MAX_VALUE)));
if (iter > 1) {
// output from last iteration is the input for current iteration
currInputFilePat = currOutputFilePat + "/*";
}
FileInputFormat.setInputPaths(conf, new Path(currInputFilePat));
currOutputFilePat = baseOutputFilePat + "_iter_" + iter;
FileOutputFormat.setOutputPath(conf, new Path(currOutputFilePat));
JobClient.runJob(conf);
}
}
}
| 12,015 | 36.201238 | 108 | java |
g-ssl-crf | g-ssl-crf-master/bin/junto-master/src/main/java/upenn/junto/algorithm/parallel/AdsorptionHadoop.java | package upenn.junto.algorithm.parallel;
/**
* Copyright 2011 Partha Pratim Talukdar
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import gnu.trove.map.hash.TObjectDoubleHashMap;
import gnu.trove.iterator.TObjectDoubleIterator;
import java.io.IOException;
import java.util.HashMap;
import java.util.Hashtable;
import java.util.Iterator;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.mapred.TextOutputFormat;
import upenn.junto.util.*;
import upenn.junto.config.*;
public class AdsorptionHadoop {
private static String _kDelim = "\t";
public static class Map extends MapReduceBase
implements Mapper<LongWritable, Text, Text, Text> {
private Text word = new Text();
public void map(LongWritable key, Text value,
OutputCollector<Text, Text> output,
Reporter reporter) throws IOException {
/////
// Constructing the vertex from the string representation
/////
String line = value.toString();
// id gold_label injected_labels estimated_labels neighbors rw_probabilities
String[] fields = line.split(_kDelim);
TObjectDoubleHashMap neighbors = CollectionUtil.String2Map(fields[4]);
boolean isSeedNode = fields[2].length() > 0 ? true : false;
// If the current node is a seed node but there is no
// estimate label information yet, then transfer the seed label
// to the estimated label distribution. Ideally, this is likely
// to be used in the map of the very first iteration.
if (isSeedNode && fields[3].length() == 0) {
fields[3] = fields[2];
}
// Send two types of messages:
// -- self messages which will store the injection labels and
// random walk probabilities.
// -- messages to neighbors about current estimated scores
// of the node.
//
// message to self
output.collect(new Text(fields[0]), new Text(line));
// message to neighbors
TObjectDoubleIterator neighIterator = neighbors.iterator();
while (neighIterator.hasNext()) {
neighIterator.advance();
// message (neighbor_node, current_node + DELIM + curr_node_label_scores
output.collect(new Text((String) neighIterator.key()),
new Text(fields[0] + _kDelim + fields[3]));
}
}
}
public static class Reduce extends MapReduceBase implements Reducer<Text, Text, Text, Text> {
private static double mu1;
private static double mu2;
private static double mu3;
private static int keepTopKLabels;
public void configure(JobConf conf) {
mu1 = Double.parseDouble(conf.get("mu1"));
mu2 = Double.parseDouble(conf.get("mu2"));
mu3 = Double.parseDouble(conf.get("mu3"));
keepTopKLabels = Integer.parseInt(conf.get("keepTopKLabels"));
}
public void reduce(Text key, Iterator<Text> values,
OutputCollector<Text, Text> output, Reporter reporter) throws IOException {
// new scores estimated for the current node
TObjectDoubleHashMap newEstimatedScores = new TObjectDoubleHashMap();
// set to true only if the message sent to itself is found.
boolean isSelfMessageFound = false;
String vertexId = key.toString();
String vertexString = "";
TObjectDoubleHashMap neighbors = null;
TObjectDoubleHashMap randWalkProbs = null;
HashMap<String, String> neighScores =
new HashMap<String, String>();
int totalMessagesReceived = 0;
// iterate over all the messages received at the node
while (values.hasNext()) {
++totalMessagesReceived;
String val = values.next().toString();
String[] fields = val.split(_kDelim);
// System.out.println("src: " + fields[0] + " dest: " + vertexId +
// "MESSAGE>>" + val + "<<");
// self-message check
if (vertexId.equals(fields[0])) {
isSelfMessageFound = true;
vertexString = val;
// System.out.println("Reduce: " + vertexId + " " + val + " " + fields.length);
TObjectDoubleHashMap injLabels = CollectionUtil.String2Map(fields[2]);
neighbors = CollectionUtil.String2Map(neighbors, fields[4]);
randWalkProbs = CollectionUtil.String2Map(fields[5]);
if (injLabels.size() > 0) {
// add injected labels to the estimated scores.
ProbUtil.AddScores(newEstimatedScores,
mu1 * randWalkProbs.get(Constants._kInjProb),
injLabels);
}
} else {
// an empty second field represents that the
// neighbor has no valid label assignment yet.
if (fields.length > 1) {
neighScores.put(fields[0], fields[1]);
}
}
}
// terminate if message from self is not received.
if (!isSelfMessageFound) {
throw new RuntimeException("Self message not received for node " + vertexId);
}
// collect neighbors label distributions and create one single
// label distribution
TObjectDoubleHashMap weightedNeigLablDist = new TObjectDoubleHashMap();
Iterator<String> neighIter = neighScores.keySet().iterator();
while (neighIter.hasNext()) {
String neighName = neighIter.next();
ProbUtil.AddScores(weightedNeigLablDist, // newEstimatedScores,
mu2 * randWalkProbs.get(Constants._kContProb) * neighbors.get(neighName),
CollectionUtil.String2Map(neighScores.get(neighName)));
}
ProbUtil.Normalize(weightedNeigLablDist);
// now add the collective neighbor label distribution to
// the estimate of the current node's labels.
ProbUtil.AddScores(newEstimatedScores,
1.0, weightedNeigLablDist);
// add dummy label scores
ProbUtil.AddScores(newEstimatedScores,
mu3 * randWalkProbs.get(Constants._kTermProb),
Constants.GetDummyLabelDist());
// normalize the scores
ProbUtil.Normalize(newEstimatedScores, keepTopKLabels);
// now reconstruct the vertex representation (with the new estimated scores)
// so that the output from the current mapper can be used as input in next
// iteration's mapper.
String[] vertexFields = vertexString.split(_kDelim);
// replace estimated scores with the new ones.
String[] newVertexFields = new String[vertexFields.length - 1];
for (int i = 1; i < vertexFields.length; ++i) {
newVertexFields[i - 1] = vertexFields[i];
}
newVertexFields[2] = CollectionUtil.Map2String(newEstimatedScores);
output.collect(key, new Text(CollectionUtil.Join(newVertexFields, _kDelim)));
}
}
public static void main(String[] args) throws Exception {
Hashtable config = ConfigReader.read_config(args);
String baseInputFilePat = Defaults.GetValueOrDie(config, "hdfs_input_pattern");
String baseOutputFilePat = Defaults.GetValueOrDie(config, "hdfs_output_base");
int numIterations = Integer.parseInt(Defaults.GetValueOrDie(config, "iters"));
String currInputFilePat = baseInputFilePat;
String currOutputFilePat = "";
for (int iter = 1; iter <= numIterations; ++iter) {
JobConf conf = new JobConf(AdsorptionHadoop.class);
conf.setJobName("adsorption_hadoop");
conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(Text.class);
conf.setMapperClass(Map.class);
// conf.setCombinerClass(Reduce.class);
conf.setReducerClass(Reduce.class);
conf.setInputFormat(TextInputFormat.class);
conf.setOutputFormat(TextOutputFormat.class);
// hyperparameters
conf.set("mu1", Defaults.GetValueOrDie(config, "mu1"));
conf.set("mu2", Defaults.GetValueOrDie(config, "mu2"));
conf.set("mu3", Defaults.GetValueOrDie(config, "mu3"));
conf.set("keepTopKLabels",
Defaults.GetValueOrDefault((String) config.get("keep_top_k_labels"),
Integer.toString(Integer.MAX_VALUE)));
if (iter > 1) {
// output from last iteration is the input for current iteration
currInputFilePat = currOutputFilePat + "/*";
}
FileInputFormat.setInputPaths(conf, new Path(currInputFilePat));
currOutputFilePat = baseOutputFilePat + "_" + iter;
FileOutputFormat.setOutputPath(conf, new Path(currOutputFilePat));
JobClient.runJob(conf);
}
}
}
| 9,698 | 37.185039 | 96 | java |
g-ssl-crf | g-ssl-crf-master/bin/junto-master/src/main/java/junto/config/Flags.java | package junto.config;
/**
* Copyright 2011 Partha Talukdar
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Tests for configuration flags.
*/
public class Flags {
public static boolean IsOriginalMode(String mode) {
return (mode.equals("original")) ? true : false;
}
public static boolean IsModifiedMode(String mode) {
return (mode.equals("modified")) ? true : false;
}
public static boolean IsColumnNode(String nodeName) {
return (nodeName.startsWith("C#"));
}
}
| 1,016 | 26.486486 | 75 | java |
g-ssl-crf | g-ssl-crf-master/bin/junto-master/src/main/java/junto/config/ConfigReader.java | package junto.config;
import java.io.BufferedReader;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.Hashtable;
import java.util.StringTokenizer;
import junto.util.MessagePrinter;
public class ConfigReader {
public static Hashtable<String,String> read_config(String fName) {
Hashtable<String,String> retval = new Hashtable<String,String>(50);
return (read_config(retval, fName));
}
@SuppressWarnings("unchecked")
public static Hashtable<String,String> read_config(Hashtable<String,String> retval, String fName) {
try {
// File reading preparation
FileInputStream fis = new FileInputStream(fName);
InputStreamReader ir = new InputStreamReader(fis);
BufferedReader br = new BufferedReader(ir);
// processing lines into lists
String line;
StringTokenizer st;
line = br.readLine();
String key = "";
String value = "";
while (line != null) {
System.out.println(line);
st = new StringTokenizer(line);
// read this line
int i = 0;
boolean noComment = true;
while (noComment && (st.hasMoreTokens())) {
String t = st.nextToken();
if (i == 0) {
if (t.startsWith("#"))
noComment = false;
else
key = t;
} else if (i == 2)
value = t;
i++;
}
// if we find a (( key = value )) line, add it to the HT
if (i == 3) {
retval.put(key, value);
}
line = br.readLine();
}
fis.close();
} catch (IOException ioe) {
ioe.printStackTrace();
}
return retval;
}
public static Hashtable<String,String> read_config(String[] args) {
Hashtable<String,String> retVal = read_config(args[0]);
for (int ai = 1; ai < args.length; ++ai) {
String[] parts = args[ai].split("=");
if (parts.length == 2 && parts[1].length() > 0) {
System.out.println(parts[0] + " = " + parts[1]);
retVal.put(parts[0], parts[1]);
} else {
retVal.remove(parts[0]);
MessagePrinter.Print("Removing argument: " + parts[0] + "\n");
}
}
return (retVal);
}
}
| 2,279 | 23.255319 | 103 | java |
g-ssl-crf | g-ssl-crf-master/bin/junto-master/src/main/java/junto/eval/GraphEval.java | package junto.eval;
import java.util.Iterator;
import junto.graph.Graph;
import junto.graph.Vertex;
public class GraphEval {
public static double GetAccuracy(Graph g) {
double doc_mrr_sum = 0;
int correct_doc_cnt = 0;
int total_doc_cnt = 0;
Iterator<String> vIter = g.vertices().keySet().iterator();
while (vIter.hasNext()) {
String vName = vIter.next();
Vertex v = g.vertices().get(vName);
if (v.isTestNode()) {
double mrr = v.GetMRR();
++total_doc_cnt;
doc_mrr_sum += mrr;
if (mrr == 1) {
++correct_doc_cnt;
}
}
}
return ((1.0 * correct_doc_cnt) / total_doc_cnt);
}
public static double GetAverageTestMRR(Graph g) {
double doc_mrr_sum = 0;
int total_doc_cnt = 0;
Iterator<String> vIter = g.vertices().keySet().iterator();
while (vIter.hasNext()) {
String vName = vIter.next();
Vertex v = g.vertices().get(vName);
if (v.isTestNode()) {
double mrr = v.GetMRR();
++total_doc_cnt;
doc_mrr_sum += mrr;
}
}
// System.out.println("MRR Computation: " + doc_mrr_sum + " " + total_doc_cnt);
return ((1.0 * doc_mrr_sum) / total_doc_cnt);
}
public static double GetAverageTrainMRR(Graph g) {
double doc_mrr_sum = 0;
int total_doc_cnt = 0;
Iterator<String> vIter = g.vertices().keySet().iterator();
while (vIter.hasNext()) {
String vName = vIter.next();
Vertex v = g.vertices().get(vName);
if (v.isSeedNode()) {
double mrr = v.GetMRR();
++total_doc_cnt;
doc_mrr_sum += mrr;
}
}
// System.out.println("MRR Computation: " + doc_mrr_sum + " " + total_doc_cnt);
return ((1.0 * doc_mrr_sum) / total_doc_cnt);
}
public static double GetRMSE(Graph g) {
double totalMSE = 0;
int totalCount = 0;
Iterator<String> vIter = g.vertices().keySet().iterator();
while (vIter.hasNext()) {
String vName = vIter.next();
Vertex v = g.vertices().get(vName);
if (v.isTestNode()) {
totalMSE += v.GetMSE();
++totalCount;
}
}
return (Math.sqrt((1.0 * totalMSE) / totalCount));
}
}
| 2,228 | 23.228261 | 83 | java |
g-ssl-crf | g-ssl-crf-master/bin/junto-master/src/main/java/junto/util/RyanAlphabet.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Andrew McCallum <a href="mailto:mccallum@cs.umass.edu">mccallum@cs.umass.edu</a>
*/
package junto.util;
import java.util.ArrayList;
import java.io.*;
import java.util.*;
import gnu.trove.map.hash.TObjectIntHashMap;
public class RyanAlphabet implements Serializable {
TObjectIntHashMap map;
ArrayList entries;
boolean growthStopped = false;
Class entryClass = null;
public RyanAlphabet(int capacity, Class entryClass) {
this.map = new TObjectIntHashMap(capacity);
this.entries = new ArrayList(capacity);
this.entryClass = entryClass;
}
public RyanAlphabet(Class entryClass) {
this(8, entryClass);
}
public RyanAlphabet(int capacity) {
this(capacity, null);
}
public RyanAlphabet() {
this(8, null);
}
public Object clone() {
//try {
// Wastes effort, because we over-write ivars we create
RyanAlphabet ret = new RyanAlphabet();
ret.map = new TObjectIntHashMap(map);
ret.entries = (ArrayList) entries.clone();
ret.growthStopped = growthStopped;
ret.entryClass = entryClass;
return ret;
//} catch (CloneNotSupportedException e) {
//e.printStackTrace();
//throw new IllegalStateException ("Couldn't clone InstanceList Vocabuary");
//}
}
/** Return -1 if entry isn't present. */
public int lookupIndex(Object entry, boolean addIfNotPresent) {
if (entry == null)
throw new IllegalArgumentException(
"Can't lookup \"null\" in an RyanAlphabet.");
if (entryClass == null)
entryClass = entry.getClass();
else
// Insist that all entries in the RyanAlphabet are of the same
// class. This may not be strictly necessary, but will catch a
// bunch of easily-made errors.
if (entry.getClass() != entryClass)
throw new IllegalArgumentException("Non-matching entry class, "
+ entry.getClass() + ", was " + entryClass);
int ret = map.get(entry);
if (!map.containsKey(entry) && !growthStopped && addIfNotPresent) {
//xxxx: not necessary, fangfang, Aug. 2003
// if (entry instanceof String)
// entry = ((String)entry).intern();
ret = entries.size();
map.put(entry, entries.size());
entries.add(entry);
}
return ret;
}
public int lookupIndex(Object entry) {
return lookupIndex(entry, true);
}
public Object lookupObject(int index) {
return entries.get(index);
}
public Object[] toArray() {
return entries.toArray();
}
// xxx This should disable the iterator's remove method...
public Iterator iterator() {
return entries.iterator();
}
public Object[] lookupObjects(int[] indices) {
Object[] ret = new Object[indices.length];
for (int i = 0; i < indices.length; i++)
ret[i] = entries.get(indices[i]);
return ret;
}
public int[] lookupIndices(Object[] objects, boolean addIfNotPresent) {
int[] ret = new int[objects.length];
for (int i = 0; i < objects.length; i++)
ret[i] = lookupIndex(objects[i], addIfNotPresent);
return ret;
}
public boolean contains(Object entry) {
return map.contains(entry);
}
public int size() {
return entries.size();
}
public void stopGrowth() {
growthStopped = true;
}
public void allowGrowth() {
growthStopped = false;
}
public boolean growthStopped() {
return growthStopped;
}
public Class entryClass() {
return entryClass;
}
/** Return String representation of all RyanAlphabet entries, each
separated by a newline. */
public String toString() {
StringBuffer sb = new StringBuffer();
for (int i = 0; i < entries.size(); i++) {
sb.append(entries.get(i).toString());
sb.append('\n');
}
return sb.toString();
}
public void dump() {
dump(System.out);
}
public void dump(PrintStream out) {
for (int i = 0; i < entries.size(); i++) {
out.println(i + " => " + entries.get(i));
}
}
public void dump(String outputFile) {
try {
BufferedWriter bwr = new BufferedWriter(new FileWriter(outputFile));
for (int i = 0; i < entries.size(); i++) {
bwr.write(entries.get(i) + "\t" + map.get(entries.get(i)) + "\n");
}
bwr.close();
} catch (IOException ioe) {
ioe.printStackTrace();
}
}
// Serialization
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 0;
private void writeObject(ObjectOutputStream out) throws IOException {
out.writeInt(CURRENT_SERIAL_VERSION);
out.writeInt(entries.size());
for (int i = 0; i < entries.size(); i++)
out.writeObject(entries.get(i));
out.writeBoolean(growthStopped);
out.writeObject(entryClass);
}
private void readObject(ObjectInputStream in) throws IOException,
ClassNotFoundException {
int version = in.readInt();
int size = in.readInt();
entries = new ArrayList(size);
map = new TObjectIntHashMap(size);
for (int i = 0; i < size; i++) {
Object o = in.readObject();
map.put(o, i);
entries.add(o);
}
growthStopped = in.readBoolean();
entryClass = (Class) in.readObject();
}
// public String toString()
// {
// return Arrays.toString(map.keys());
//}
}
| 5,798 | 26.746411 | 89 | java |
g-ssl-crf | g-ssl-crf-master/bin/junto-master/src/main/java/junto/util/Constants.java | package junto.util;
import gnu.trove.map.hash.TObjectDoubleHashMap;
public class Constants {
public static String _kContProb = "cont_prob";
public static String _kInjProb = "inj_prob";
public static String _kTermProb = "term_prob";
public static double GetSmallConstant() {
return (1e-12);
}
public static String GetDummyLabel() {
return ("__DUMMY__");
}
public static String GetDocPrefix() {
return ("DOC_");
}
public static String GetFeatPrefix() {
// return ("FEAT_");
return ("C#");
}
public static String GetPrecisionString() {
return ("precision");
}
public static String GetMRRString() {
return ("mrr");
}
public static String GetMDBRRString() {
return ("mdmbrr");
}
public static double GetStoppingThreshold() {
return (0.001);
}
public static TObjectDoubleHashMap GetDummyLabelDist() {
TObjectDoubleHashMap ret = new TObjectDoubleHashMap();
ret.put(Constants.GetDummyLabel(), 1.0);
return (ret);
}
}
| 1,014 | 18.901961 | 58 | java |
g-ssl-crf | g-ssl-crf-master/bin/junto-master/src/main/java/junto/util/Defaults.java | package junto.util;
import java.util.Hashtable;
public class Defaults {
public static String GetValueOrDie(Hashtable config, String key) {
if (!config.containsKey(key)) {
MessagePrinter.PrintAndDie("Must specify " + key + "");
}
return ((String) config.get(key));
}
public static String GetValueOrDefault(String valStr, String defaultVal) {
String res = defaultVal;
if (valStr != null) {
res = valStr;
}
return (res);
}
public static double GetValueOrDefault(String valStr, double defaultVal) {
double res = defaultVal;
if (valStr != null) {
res = Double.parseDouble(valStr);
}
return (res);
}
public static boolean GetValueOrDefault(String valStr, boolean defaultVal) {
boolean res = defaultVal;
if (valStr != null) {
res = Boolean.parseBoolean(valStr);
}
return (res);
}
public static int GetValueOrDefault(String valStr, int defaultVal) {
int res = defaultVal;
if (valStr != null) {
res = Integer.parseInt(valStr);
}
return (res);
}
}
| 1,073 | 21.851064 | 78 | java |
g-ssl-crf | g-ssl-crf-master/bin/junto-master/src/main/java/junto/util/IoUtil.java | package junto.util;
import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.util.ArrayList;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
public class IoUtil {
private static Logger logger = LogManager.getLogger(IoUtil.class);
public static ArrayList<String> LoadFile(String fileName) {
ArrayList<String> retList = new ArrayList<String>();
try {
BufferedReader bfr = new BufferedReader(new FileReader(fileName));
String line;
while ((line = bfr.readLine()) != null) {
if (!retList.contains(line)) {
retList.add(line);
}
}
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
logger.info("Total " + retList.size() +
" entries loaded from " + fileName);
return (retList);
}
public static ArrayList<String> LoadFirstFieldFile(String fileName) {
ArrayList<String> retList = new ArrayList<String>();
try {
BufferedReader bfr = new BufferedReader(new FileReader(fileName));
String line;
while ((line = bfr.readLine()) != null) {
String[] fields = line.split("\t");
if (!retList.contains(fields[0])) {
retList.add(fields[0]);
}
}
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
logger.info("Total " + retList.size() +
" entries loaded from " + fileName);
return (retList);
}
public static RyanAlphabet LoadAlphabet(String fileName) {
RyanAlphabet retAlpha = new RyanAlphabet();
try {
BufferedReader bfr = new BufferedReader(new FileReader(fileName));
String line;
while ((line = bfr.readLine()) != null) {
String[] fields = line.split("\t");
retAlpha.lookupIndex(fields[0], true);
assert (retAlpha.lookupIndex(fields[0]) == Integer.parseInt(fields[1]));
}
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
logger.info("Total " + retAlpha.size() +
" entries loaded from " + fileName);
return (retAlpha);
}
}
| 2,183 | 28.12 | 80 | java |
g-ssl-crf | g-ssl-crf-master/bin/junto-master/src/main/java/junto/util/MessagePrinter.java | package junto.util;
public class MessagePrinter {
public static void Print (String msg) {
System.out.print (msg + "\n");
}
public static void PrintAndDie(String msg) {
System.out.println(msg + "\n");
System.exit(1);
}
}
| 244 | 16.5 | 46 | java |
g-ssl-crf | g-ssl-crf-master/bin/junto-master/src/main/java/junto/util/ObjectDoublePair.java | package junto.util;
/**
* Used, e.g., to keep track of an Object and its associated score.
*/
public class ObjectDoublePair {
private Object label_;
private double score_;
public ObjectDoublePair (Object l, double s) {
this.label_ = l;
this.score_ = s;
}
public Object GetLabel() {
return label_;
}
public double GetScore() {
return score_;
}
}
| 386 | 15.826087 | 67 | java |
g-ssl-crf | g-ssl-crf-master/bin/junto-master/src/main/java/junto/util/CollectionUtil.java | package junto.util;
import gnu.trove.iterator.TObjectDoubleIterator;
import gnu.trove.map.hash.TObjectDoubleHashMap;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.Hashtable;
import java.util.Iterator;
public class CollectionUtil {
public static ArrayList<ObjectDoublePair> ReverseSortMap(TObjectDoubleHashMap m) {
ArrayList<ObjectDoublePair> lsps = new ArrayList<ObjectDoublePair>();
TObjectDoubleIterator mi = m.iterator();
while (mi.hasNext()) {
mi.advance();
lsps.add(new ObjectDoublePair(mi.key(), mi.value()));
}
ObjectDoublePairComparator lspComparator = new ObjectDoublePairComparator();
Collections.sort(lsps, lspComparator);
return (lsps);
}
protected static class ObjectDoublePairComparator implements Comparator<ObjectDoublePair> {
public int compare(ObjectDoublePair p1, ObjectDoublePair p2) {
double diff = p2.GetScore() - p1.GetScore();
return (diff > 0 ? 1 : (diff < 0 ? -1 : 0));
}
}
public static TObjectDoubleHashMap String2Map(String inp) {
return (String2Map(null, inp));
}
public static TObjectDoubleHashMap String2Map(TObjectDoubleHashMap retMap,
String inp) {
if (retMap == null) {
retMap = new TObjectDoubleHashMap();
}
if (inp.length() > 0) {
String[] fields = inp.split(" ");
for (int i = 0; i < fields.length; i += 2) {
retMap.put(fields[i], Double.parseDouble(fields[i + 1]));
}
}
return (retMap);
}
public static String Map2String(TObjectDoubleHashMap m) {
return (Map2String(m, null));
}
public static String Map2String(TObjectDoubleHashMap m, RyanAlphabet a) {
String retString = "";
TObjectDoubleIterator mIter = m.iterator();
ArrayList<ObjectDoublePair> sortedMap = ReverseSortMap(m);
int n = sortedMap.size();
for (int i = 0; i < n; ++i) {
String label = (String) sortedMap.get(i).GetLabel();
if (a != null) {
Integer li = String2Integer(label);
if (li != null) {
label = (String) a.lookupObject(li.intValue());
}
}
retString += " " + label + " " + sortedMap.get(i).GetScore();
}
return (retString.trim());
}
public static Integer String2Integer(String str) {
Integer retInt = null;
try {
int ri = Integer.parseInt(str);
retInt = new Integer(ri);
} catch (NumberFormatException nfe) {
// don't do anything
}
return (retInt);
}
public static String Map2StringPrettyPrint(Hashtable m) {
String retString = "";
Iterator iter = m.keySet().iterator();
while (iter.hasNext()) {
String key = (String) iter.next();
retString += key + " = " + m.get(key) + "\n";
}
return (retString.trim());
}
public static String Join(String[] fields, String delim) {
String retString = "";
for (int si = 0; si < fields.length; ++si) {
if (si > 0) {
retString += delim + fields[si];
} else {
retString = fields[0];
}
}
return (retString);
}
public static ArrayList<String> GetIntersection(TObjectDoubleHashMap m1,
ArrayList<String> l2) {
ArrayList<String> retList = new ArrayList<String>();
for (int i = 0; i < l2.size(); ++i) {
if (m1.containsKey(l2.get(i))) {
retList.add(l2.get(i));
}
}
return (retList);
}
}
| 3,512 | 26.661417 | 93 | java |
g-ssl-crf | g-ssl-crf-master/bin/junto-master/src/main/java/junto/util/RyanFeatureVector.java | package junto.util;
import gnu.trove.map.hash.TIntDoubleHashMap;
import gnu.trove.iterator.TIntDoubleIterator;
import java.io.*;
import java.util.*;
public class RyanFeatureVector implements Comparable, Serializable {
public int index;
public double value;
public RyanFeatureVector next;
public RyanFeatureVector(int i, double v, RyanFeatureVector n) {
index = i;
value = v;
next = n;
}
public RyanFeatureVector add(String feat, double val, RyanAlphabet dataAlphabet) {
int num = dataAlphabet.lookupIndex(feat);
if(num >= 0)
return new RyanFeatureVector(num,val,this);
return this;
}
public void add(int i1, double v1) {
RyanFeatureVector new_node = new RyanFeatureVector(this.index, this.value, this.next);
this.index = i1;
this.value = v1;
this.next = new_node;
}
public static RyanFeatureVector cat(RyanFeatureVector fv1, RyanFeatureVector fv2) {
RyanFeatureVector result = new RyanFeatureVector(-1,-1.0,null);
for(RyanFeatureVector curr = fv1; curr.next != null; curr = curr.next) {
if(curr.index < 0)
continue;
result = new RyanFeatureVector(curr.index,curr.value,result);
}
for(RyanFeatureVector curr = fv2; curr.next != null; curr = curr.next) {
if(curr.index < 0)
continue;
result = new RyanFeatureVector(curr.index,curr.value,result);
}
return result;
}
// fv1 - fv2
public static RyanFeatureVector getDistVector(RyanFeatureVector fv1, RyanFeatureVector fv2) {
RyanFeatureVector result = new RyanFeatureVector(-1, -1.0, null);
for (RyanFeatureVector curr = fv1; curr.next != null; curr = curr.next) {
if (curr.index < 0)
continue;
result = new RyanFeatureVector(curr.index, curr.value, result);
}
for (RyanFeatureVector curr = fv2; curr.next != null; curr = curr.next) {
if (curr.index < 0)
continue;
result = new RyanFeatureVector(curr.index, -curr.value, result);
}
return result;
}
public static RyanFeatureVector getAddedVector(RyanFeatureVector fv1, RyanFeatureVector fv2, double rate) {
TIntDoubleHashMap hm = new TIntDoubleHashMap();
for (RyanFeatureVector curr = fv1; curr.next != null; curr = curr.next) {
if (curr.index >= 0) {
hm.put(curr.index, (hm.containsKey(curr.index) ? hm.get(curr.index) : 0) + curr.value);
}
}
for (RyanFeatureVector curr = fv2; curr.next != null; curr = curr.next) {
if (curr.index >= 0) {
hm.put(curr.index, (hm.containsKey(curr.index) ? hm.get(curr.index) : 0) + rate * curr.value);
}
}
RyanFeatureVector result = new RyanFeatureVector(-1, -1, null);
TIntDoubleIterator hmIter = hm.iterator();
while (hmIter.hasNext()) {
hmIter.advance();
result = new RyanFeatureVector(hmIter.key(), hmIter.value(), result);
}
return result;
}
public static double dotProduct(RyanFeatureVector fv1, RyanFeatureVector fv2) {
double result = 0.0;
TIntDoubleHashMap hm1 = new TIntDoubleHashMap();
TIntDoubleHashMap hm2 = new TIntDoubleHashMap();
for(RyanFeatureVector curr = fv1; curr.next != null; curr = curr.next) {
if(curr.index < 0)
continue;
hm1.put(curr.index,hm1.get(curr.index)+curr.value);
}
for(RyanFeatureVector curr = fv2; curr.next != null; curr = curr.next) {
if(curr.index < 0)
continue;
hm2.put(curr.index,hm2.get(curr.index)+curr.value);
}
int[] keys = hm1.keys();
for(int i = 0; i < keys.length; i++) {
double v1 = hm1.get(keys[i]);
double v2 = hm2.get(keys[i]);
result += v1*v2;
}
return result;
}
public static double oneNorm(RyanFeatureVector fv1) {
double sum = 0.0;
for(RyanFeatureVector curr = fv1; curr.next != null; curr = curr.next) {
if(curr.index < 0)
continue;
sum += curr.value;
}
return sum;
}
public static int size(RyanFeatureVector fv1) {
int sum = 0;
for(RyanFeatureVector curr = fv1; curr.next != null; curr = curr.next) {
if(curr.index < 0)
continue;
sum++;
}
return sum;
}
public static double twoNorm(RyanFeatureVector fv1) {
TIntDoubleHashMap hm = new TIntDoubleHashMap();
double sum = 0.0;
for(RyanFeatureVector curr = fv1; curr.next != null; curr = curr.next) {
if(curr.index < 0)
continue;
hm.put(curr.index,hm.get(curr.index)+curr.value);
}
int[] keys = hm.keys();
for(int i = 0; i < keys.length; i++)
sum += Math.pow(hm.get(keys[i]),2.0);
return Math.sqrt(sum);
}
public static RyanFeatureVector twoNormalize(RyanFeatureVector fv1) {
return normalize(fv1,twoNorm(fv1));
}
public static RyanFeatureVector oneNormalize(RyanFeatureVector fv1) {
return normalize(fv1,oneNorm(fv1));
}
public static RyanFeatureVector normalize(RyanFeatureVector fv1, double norm) {
RyanFeatureVector result = new RyanFeatureVector(-1,-1.0,null);
for(RyanFeatureVector curr = fv1; curr.next != null; curr = curr.next) {
if(curr.index < 0)
continue;
result = new RyanFeatureVector(curr.index,curr.value/norm,result);
}
return result;
}
public String toString() {
if (next == null)
return "" + index + ":" + value;
return index + ":" + value + " " + next.toString();
}
public void sort() {
ArrayList features = new ArrayList();
for(RyanFeatureVector curr = this; curr != null; curr = curr.next)
if(curr.index >= 0)
features.add(curr);
Object[] feats = features.toArray();
Arrays.sort(feats);
RyanFeatureVector fv = new RyanFeatureVector(-1,-1.0,null);
for(int i = feats.length-1; i >= 0; i--) {
RyanFeatureVector tmp = (RyanFeatureVector)feats[i];
fv = new RyanFeatureVector(tmp.index,tmp.value,fv);
}
this.index = fv.index;
this.value = fv.value;
this.next = fv.next;
}
public int compareTo(Object o) {
RyanFeatureVector fv = (RyanFeatureVector)o;
if(index < fv.index)
return -1;
if(index > fv.index)
return 1;
return 0;
}
public double dotProdoct(double[] weights) {
double score = 0.0;
for(RyanFeatureVector curr = this; curr != null; curr = curr.next) {
if (curr.index >= 0)
score += weights[curr.index]*curr.value;
}
return score;
}
}
| 6,215 | 26.026087 | 111 | java |
g-ssl-crf | g-ssl-crf-master/bin/junto-master/src/main/java/junto/util/ProbUtil.java | package junto.util;
import gnu.trove.iterator.TObjectDoubleIterator;
import gnu.trove.map.hash.TObjectDoubleHashMap;
import java.util.ArrayList;
public class ProbUtil {
public static TObjectDoubleHashMap GetUniformPrior(ArrayList<String> labels) {
int totalLabels = labels.size();
assert (totalLabels > 0);
double prior = 1.0 / totalLabels;
assert (prior > 0);
TObjectDoubleHashMap retMap = new TObjectDoubleHashMap();
for (int li = 0; li < totalLabels; ++li) {
retMap.put(labels.get(li), prior);
}
return (retMap);
}
// this method returns result += mult * addDist
public static void AddScores(TObjectDoubleHashMap result, double mult,
TObjectDoubleHashMap addDist) {
assert (result != null);
assert (addDist != null);
TObjectDoubleIterator iter = addDist.iterator();
while (iter.hasNext()) {
iter.advance();
double adjVal = mult * iter.value();
// System.out.println(">> adjVal: " + mult + " " + iter.key() + " " + iter.value() + " " + adjVal);
result.adjustOrPutValue(iter.key(), adjVal, adjVal);
}
}
public static void DivScores(TObjectDoubleHashMap result, double divisor) {
assert (result != null);
assert (divisor > 0);
TObjectDoubleIterator li = result.iterator();
while (li.hasNext()) {
li.advance();
// System.out.println("Before: " + " " + li.key() + " " + li.value() + " " + divisor);
double newVal = (1.0 * li.value()) / divisor;
result.put(li.key(), newVal);
// System.out.println("After: " + " " + li.key() + " " + result.get(li.key()) + " " + divisor);
}
}
public static void KeepTopScoringKeys(TObjectDoubleHashMap m, int keepTopK) {
ArrayList<ObjectDoublePair> lsps = CollectionUtil.ReverseSortMap(m);
// the array is sorted from large to small, so start
// from beginning and retain only top scoring k keys.
m.clear();
int totalAdded = 0;
int totalSorted = lsps.size();
// for (int li = lsps.size() - 1; li >= 0 && totalAdded <= keepTopK; --li) {
for (int li = 0; li < totalSorted && totalAdded < keepTopK; ++li) {
++totalAdded;
if (lsps.get(li).GetScore() > 0) {
m.put(lsps.get(li).GetLabel(), lsps.get(li).GetScore());
}
}
// size of the new map is upper bounded by the max
// number of entries requested
assert (m.size() <= keepTopK);
}
public static void Normalize(TObjectDoubleHashMap m) {
Normalize(m, Integer.MAX_VALUE);
}
public static void Normalize(TObjectDoubleHashMap m, int keepTopK) {
// if the number of labels to retain are not the trivial
// default value, then keep the top scoring k labels as requested
if (keepTopK != Integer.MAX_VALUE) {
KeepTopScoringKeys(m, keepTopK);
}
TObjectDoubleIterator mi = m.iterator();
double denom = 0;
while (mi.hasNext()) {
mi.advance();
denom += mi.value();
}
// assert (denom > 0);
if (denom > 0) {
mi = m.iterator();
while (mi.hasNext()) {
mi.advance();
double newVal = mi.value() / denom;
mi.setValue(newVal);
}
}
}
public static double GetSum(TObjectDoubleHashMap m) {
TObjectDoubleIterator mi = m.iterator();
double sum = 0;
while (mi.hasNext()) {
mi.advance();
sum += mi.value();
}
return (sum);
}
public static double GetDifferenceNorm2Squarred(TObjectDoubleHashMap m1,
double m1Mult, TObjectDoubleHashMap m2, double m2Mult) {
TObjectDoubleHashMap diffMap = new TObjectDoubleHashMap();
// copy m1 into the difference map
TObjectDoubleIterator iter = m1.iterator();
while (iter.hasNext()) {
iter.advance();
diffMap.put(iter.key(), m1Mult * iter.value());
}
iter = m2.iterator();
while (iter.hasNext()) {
iter.advance();
diffMap.adjustOrPutValue(iter.key(), -1 * m2Mult * iter.value(), -1
* m2Mult * iter.value());
}
double val = 0;
iter = diffMap.iterator();
while (iter.hasNext()) {
iter.advance();
val += iter.value() * iter.value();
}
return (Math.sqrt(val));
}
// KL (m1 || m2)
public static double GetKLDifference(TObjectDoubleHashMap m1,
TObjectDoubleHashMap m2) {
double divergence = 0;
TObjectDoubleIterator iter = m1.iterator();
while (iter.hasNext()) {
iter.advance();
if (iter.value() > 0) {
// if (!m2.containsKey(iter.key()) && m2.get(iter.key()) <= 0) {
// divergence += Double.NEGATIVE_INFINITY;
// } else {
// add a small quantity to the numerator and denominator to avoid
// infinite divergence
divergence += iter.value()
* Math.log((iter.value() + Constants.GetSmallConstant())
/ (m2.get(iter.key()) + Constants.GetSmallConstant()));
// }
}
}
return (divergence);
}
// Entropy(m1)
public static double GetEntropy(TObjectDoubleHashMap m1) {
double entropy = 0;
TObjectDoubleIterator iter = m1.iterator();
while (iter.hasNext()) {
iter.advance();
if (iter.value() > 0) {
entropy += -1 * iter.value() * Math.log(iter.value());
}
}
return (entropy);
}
}
| 5,400 | 28.839779 | 107 | java |
g-ssl-crf | g-ssl-crf-master/bin/junto-master/src/main/java/junto/util/GraphStats.java | package junto.util;
import java.io.BufferedWriter;
import java.io.FileWriter;
import java.io.IOException;
import java.util.Hashtable;
import java.util.Iterator;
import java.util.List;
import junto.config.ConfigReader;
import junto.config.GraphConfigLoader;
import junto.graph.Graph;
import junto.graph.Vertex;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.jgrapht.GraphPath;
import org.jgrapht.alg.KShortestPaths;
import org.jgrapht.graph.DefaultDirectedWeightedGraph;
import org.jgrapht.graph.DefaultWeightedEdge;
public class GraphStats {
private static Logger logger = LogManager.getLogger(GraphStats.class);
// Number of K-shortest paths generated.
private static int _kPrime = -1;
public static void PrintStats(Graph g, String graphStatsFile) {
try {
BufferedWriter swr = new BufferedWriter(new FileWriter(graphStatsFile));
swr.write(PrintStats(g));
swr.close();
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}
public static String PrintStats(Graph g) {
int totalSeedNodes = 0;
int totalTestNodes = 0;
int totalSeedAndTestNodes = 0;
int totalEdges = 0;
int totalVertices = 0;
int maxDegree = Integer.MIN_VALUE;
int minDegree = Integer.MAX_VALUE;
for (String vName : g.vertices().keySet()) {
Vertex v = g.vertices().get(vName);
++totalVertices;
int degree = v.GetNeighborNames().length;
if (degree > maxDegree) { maxDegree = degree; }
if (degree < minDegree) { minDegree = degree; }
totalEdges += v.neighbors().size();
if (v.isSeedNode()) { ++totalSeedNodes; }
if (v.isTestNode()) { ++totalTestNodes; }
if (v.isSeedNode() && v.isTestNode()) { ++totalSeedAndTestNodes; }
}
String retStr = "Total seed vertices: " + totalSeedNodes + "\n";
retStr += "Total test vertices: " + totalTestNodes + "\n";
retStr += "Total seed vertices which are also test vertices: " + totalSeedAndTestNodes + "\n";
retStr += "Total vertices: " + totalVertices + "\n";
retStr += "Total edges: " + totalEdges + "\n";
retStr += "Average degree: " + (1.0 * totalEdges) / totalVertices + "\n";
retStr += "Min degree: " + minDegree + "\n";
retStr += "Max degree: " + maxDegree + "\n";
return (retStr);
}
private static String GetDiameter(
DefaultDirectedWeightedGraph<Vertex,DefaultWeightedEdge> g) {
String retDiaReport = "";
// HashMap<Vertex,KShortestPaths<Vertex,DefaultWeightedEdge>> kShortestPathMap =
// new HashMap<Vertex,KShortestPaths<Vertex,DefaultWeightedEdge>>();
boolean isConnected = true;
int diameter = -1;
int totalProcessed = 0;
Iterator<Vertex> vIter = g.vertexSet().iterator();
while (vIter.hasNext()) {
Vertex v = vIter.next();
if (!v.isSeedNode()) {
continue;
}
++totalProcessed;
if (totalProcessed % 1000 == 0) {
logger.info("Processed: " + totalProcessed + " curr_dia: " + diameter);
}
KShortestPaths<Vertex,DefaultWeightedEdge> ksp = new KShortestPaths(g, v, 1);
// kShortestPathMap.put(v, new KShortestPaths(g, v, _kPrime));
Iterator<Vertex> vIter2 = g.vertexSet().iterator();
while (vIter2.hasNext()) {
Vertex nv = vIter2.next();
// skip self comparison
if (v.equals(nv)) { continue; }
List<GraphPath<Vertex,DefaultWeightedEdge>> paths = ksp.getPaths(nv);
if (paths == null) { isConnected = false; }
else if (paths.get(0).getEdgeList().size() > diameter) {
diameter = paths.get(0).getEdgeList().size();
}
}
}
retDiaReport += "Connected(from_seed_nodes): " + (isConnected ? "true" : "false") + "\n";
retDiaReport += "Diameter(from_seed_nodes): " + diameter + "\n";
return (retDiaReport);
}
public static void main(String[] args) {
Hashtable config = ConfigReader.read_config(args);
// load the graph
Graph g = GraphConfigLoader.apply(config);
MessagePrinter.Print(PrintStats(g));
}
}
| 4,165 | 30.323308 | 98 | java |
g-ssl-crf | g-ssl-crf-master/bin/junto-master/src/main/java/junto/app/ConfigTuner.java | package junto.app;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Hashtable;
import java.util.Iterator;
import junto.app.JuntoConfigRunner;
import junto.config.ConfigReader;
import junto.util.CollectionUtil;
import junto.util.Constants;
import junto.util.Defaults;
import junto.util.MessagePrinter;
import gnu.trove.list.array.TDoubleArrayList;
import gnu.trove.map.hash.TObjectDoubleHashMap;
public class ConfigTuner {
private static ArrayList<Hashtable>
GetAllCombinations(Hashtable tuningConfig) {
ArrayList<Hashtable> configs = new ArrayList<Hashtable>();
Iterator iter = tuningConfig.keySet().iterator();
while (iter.hasNext()) {
String paramKey = (String) iter.next();
String paramVal = (String) tuningConfig.get(paramKey);
// e.g. mu1 = 1e-8,1,1e-8
String[] fields = paramVal.split(",");
int currSize = configs.size();
for (int fi = 0; fi < fields.length; ++fi) {
// add the first configuration, if none exists
if (configs.size() == 0) {
configs.add(new Hashtable());
++currSize;
}
for (int ci = 0; ci < currSize; ++ci) {
// the first value can be added to existing
// configurations.
if (fi == 0) {
configs.get(ci).put(paramKey, fields[fi]);
} else {
Hashtable nc = (Hashtable) configs.get(ci).clone();
nc.put(paramKey, fields[fi]);
// append the new config to the end of the list
configs.add(nc);
}
}
}
}
System.out.println("Total config (non-unique) combinations: " + configs.size());
return (configs);
}
private static void Run(Hashtable tuningConfig) {
// some essential options terminate if they are note specified
String idenStr = Defaults.GetValueOrDie(tuningConfig, "iden_str");
String logDir = Defaults.GetValueOrDie(tuningConfig, "log_output_dir");
String opDir = Defaults.GetValueOrDefault(
(String) tuningConfig.get("output_dir"), null);
boolean skipExistingConfigs =
Defaults.GetValueOrDefault((String) tuningConfig.get("skip_existing_config"), false);
// config file with post-tuning testing details (i.e. final test file etc.)
String finalTestConfigFile = (String) tuningConfig.get("final_config_file");
tuningConfig.remove("final_config_file");
// generate all possible combinations (non unique)
ArrayList<Hashtable> configs = GetAllCombinations(tuningConfig);
ArrayList<ArrayList> results = new ArrayList<ArrayList>();
HashSet<String> uniqueConfigs = new HashSet<String>();
// map from algo to the current best scores and the corresponding config
HashMap<String,Hashtable> algo2BestConfig = new HashMap<String,Hashtable>();
TObjectDoubleHashMap algo2BestScore = new TObjectDoubleHashMap();
// store console
PrintStream consoleOut = System.out;
PrintStream consoleErr = System.err;
for (int ci = 0; ci < configs.size(); ++ci) {
Hashtable c = configs.get(ci);
// if this a post-tune config, then generate seed and test files
if (Defaults.GetValueOrDefault((String) c.get("is_final_run"), false)) {
String splitId = Defaults.GetValueOrDie(c, "split_id");
c.put("seed_file", c.remove("seed_base") + "." + splitId + ".train");
c.put("test_file", c.remove("test_base") + "." + splitId + ".test");
}
// output file name is considered a unique identifier of a configuration
String outputFile = GetOutputFileName(c, opDir, idenStr);
if (uniqueConfigs.contains(outputFile)) {
continue;
}
uniqueConfigs.add(outputFile);
if (opDir != null) {
c.put("output_file", outputFile);
}
System.out.println("Working with config: " + c.toString());
try {
// reset System.out so that the log printed using System.out.println
// is directed to the right log file
String logFile = GetLogFileName(c, logDir, idenStr);
// if the log file exists, then don't repeat
File lf = new File(logFile);
if (skipExistingConfigs && lf.exists()) {
continue;
}
FileOutputStream fos = new FileOutputStream(new File(logFile));
PrintStream ps = new PrintStream(fos);
System.setOut(ps);
System.setErr(ps);
results.add(new ArrayList());
JuntoConfigRunner.apply(c, results.get(results.size() - 1));
UpdateBestConfig((String) c.get("algo"), algo2BestScore,
algo2BestConfig, c, results.get(results.size() - 1));
// reset System.out back to the original console value
System.setOut(consoleOut);
System.setErr(consoleErr);
// close log file
fos.close();
} catch (FileNotFoundException fnfe) {
fnfe.printStackTrace();
} catch (IOException ioe) {
ioe.printStackTrace();
}
}
// print out the best parameters for each algorithm
Iterator algoIter = algo2BestConfig.keySet().iterator();
while (algoIter.hasNext()) {
String algo = (String) algoIter.next();
System.out.println("\n#################\n" +
"BEST_CONFIG_FOR " + algo + " " +
algo2BestScore.get(algo) + "\n" +
CollectionUtil.Map2StringPrettyPrint(algo2BestConfig.get(algo)));
// run test with tuned parameters, if requested
if (finalTestConfigFile != null) {
Hashtable finalTestConfig = (Hashtable) algo2BestConfig.get(algo).clone();
// add additional config options from the file to the tuned params
finalTestConfig = ConfigReader.read_config(finalTestConfig, finalTestConfigFile);
JuntoConfigRunner.apply(finalTestConfig, null);
}
}
}
private static String GetOutputFileName(Hashtable c, String opDir, String idenStr) {
String outputFile = " ";
if (c.get("algo").equals("mad") ||
c.get("algo").equals("lgc") ||
c.get("algo").equals("am") ||
c.get("algo").equals("lclp")) {
outputFile = opDir + "/" + GetBaseName2(c, idenStr);
} else if (c.get("algo").equals("maddl")) {
outputFile = opDir + "/" +
GetBaseName2(c, idenStr) +
".mu4_" + c.get("mu4");
} else if (c.get("algo").equals("adsorption") || c.get("algo").equals("lp_zgl")) {
outputFile = opDir + "/" + GetBaseName(c, idenStr);
} else {
MessagePrinter.PrintAndDie("output_1 file can't be empty!");
}
return (outputFile);
}
private static String GetLogFileName(Hashtable c, String logDir, String idenStr) {
String logFile = "";
if (c.get("algo").equals("mad") ||
c.get("algo").equals("lgc") ||
c.get("algo").equals("am") ||
c.get("algo").equals("lclp")) {
logFile = logDir + "/" + "log." + GetBaseName2(c, idenStr);
} else if (c.get("algo").equals("maddl")) {
logFile = logDir + "/" +
"log." +
GetBaseName2(c, idenStr) +
".mu4_" + c.get("mu4");
} else if (c.get("algo").equals("adsorption") || c.get("algo").equals("lp_zgl")) {
logFile = logDir + "/" +
"log." + GetBaseName(c, idenStr);
} else {
MessagePrinter.PrintAndDie("output_2 file can't be empty!");
}
return (logFile);
}
private static String GetBaseName(Hashtable c, String idenStr) {
String base = idenStr;
if (c.containsKey("max_seeds_per_class")) {
base += ".spc_" + c.get("max_seeds_per_class");
}
base += "." + c.get("algo");
if (c.containsKey("use_bipartite_optimization")) {
base += ".bipart_opt_" + c.get("use_bipartite_optimization");
}
if (c.containsKey("top_k_neighbors")) {
base += ".K_" + c.get("top_k_neighbors");
}
if (c.containsKey("prune_threshold")) {
base += ".P_" + c.get("prune_threshold");
}
if (c.containsKey("high_prune_thresh")) {
base += ".feat_prune_high_" + c.get("high_prune_thresh");
}
if (c.containsKey("keep_top_k_labels")) {
base += ".top_labels_" + c.get("keep_top_k_labels");
}
if (c.containsKey("train_fract")) {
base += ".train_fract_" + c.get("train_fract");
}
if (Defaults.GetValueOrDefault((String) c.get("set_gaussian_kernel_weights"), false)) {
double sigmaFactor = Double.parseDouble(Defaults.GetValueOrDie(c, "gauss_sigma_factor"));
base += ".gk_sig_" + sigmaFactor;
}
if (c.containsKey("algo") && (c.get("algo").equals("adsorption") ||
c.get("algo").equals("mad") ||
c.get("algo").equals("maddl"))) {
double beta = Defaults.GetValueOrDefault((String) c.get("beta"), 2.0);
base += ".beta_" + beta;
}
// if this a post-tune config, then generate seed and test files
if (Defaults.GetValueOrDefault((String) c.get("is_final_run"), false)) {
base += ".split_id_" + Defaults.GetValueOrDie(c, "split_id");
}
return (base);
}
private static String GetBaseName2(Hashtable c, String idenStr) {
String base = GetBaseName(c, idenStr) +
".mu1_" + c.get("mu1") +
".mu2_" + c.get("mu2") +
".mu3_" + c.get("mu3") +
".norm_" + c.get("norm");
return (base);
}
private static void UpdateBestConfig(String algo, TObjectDoubleHashMap algo2BestScore,
HashMap<String,Hashtable> algo2BestConfig, Hashtable config,
ArrayList perIterMultiScores) {
TDoubleArrayList perIterScores = new TDoubleArrayList();
for (int i = 1; i < perIterMultiScores.size(); ++i) {
TObjectDoubleHashMap r = (TObjectDoubleHashMap) perIterMultiScores.get(i);
perIterScores.add(r.get(Constants.GetMRRString()));
}
if (perIterScores.size() > 0) {
// System.out.println("SIZE: " + perIterScores.size());
int mi = 0;
for (int i = 1; i < perIterScores.size(); ++i) {
if (perIterScores.get(i) > perIterScores.get(mi)) {
mi = i;
}
}
// System.out.println("max_idx: " + mi + " " + perIterScores.toString());
double maxScore = perIterScores.get(mi); // perIterScores.max();
if (algo2BestScore.size() == 0 || algo2BestScore.get(algo) < maxScore) {
// System.out.println("new best score: " + maxScore);
// best iteration
int bestIter = perIterScores.indexOf(maxScore) + 1;
algo2BestScore.put(algo, maxScore);
algo2BestConfig.put(algo, (Hashtable) config.clone());
algo2BestConfig.get(algo).put("iters", bestIter);
}
}
}
public static void main(String[] args) {
Hashtable tuningConfig = ConfigReader.read_config(args);
Run(tuningConfig);
}
}
| 11,088 | 35.476974 | 99 | java |
g-ssl-crf | g-ssl-crf-master/bin/junto-master/src/main/java/junto/graph/CrossValidationGenerator.java | package junto.graph;
import gnu.trove.map.hash.TObjectDoubleHashMap;
import gnu.trove.iterator.TObjectDoubleIterator;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.Random;
import junto.graph.Graph;
import junto.graph.Vertex;
import junto.util.CollectionUtil;
import junto.util.Constants;
import junto.util.ObjectDoublePair;
public class CrossValidationGenerator {
// seed used to initialize the random number generator
static long _kDeterministicSeed = 100;
public static void Split(Graph g, double trainFract) {
Random r = new Random(_kDeterministicSeed);
// Random r = new Random();
TObjectDoubleHashMap instanceVertices = new TObjectDoubleHashMap();
Iterator vIter = g.vertices().keySet().iterator();
while (vIter.hasNext()) {
Vertex v = g.vertices().get(vIter.next());
// nodes without feature prefix and those with at least one
// gold labels are considered valid instances
if (!v.name().startsWith(Constants.GetFeatPrefix()) &&
v.goldLabels().size() > 0) {
instanceVertices.put(v, r.nextDouble());
}
}
ArrayList<ObjectDoublePair> sortedRandomInstances =
CollectionUtil.ReverseSortMap(instanceVertices);
int totalInstances = sortedRandomInstances.size();
double totalTrainInstances = Math.ceil(totalInstances * trainFract);
for (int vi = 0; vi < totalInstances; ++vi) {
Vertex v = (Vertex) sortedRandomInstances.get(vi).GetLabel();
// mark train and test nodes
if (vi < totalTrainInstances) {
v.setIsSeedNode(true);
// we expect that the gold labels for the node has already been
// set, we only need to copy them as injected labels
TObjectDoubleIterator goldLabIter = v.goldLabels().iterator();
while (goldLabIter.hasNext()) {
goldLabIter.advance();
v.SetInjectedLabelScore((String) goldLabIter.key(), goldLabIter.value());
}
} else {
v.setIsTestNode(true);
}
}
// // for sanity check, count the number of train and test nodes
// int totalTrainNodes = 0;
// int totalTestNodes = 0;
// for (int vi = 0; vi < totalInstances; ++vi) {
// Vertex v = (Vertex) sortedRandomInstances.get(vi).GetLabel();
// if (v.isSeedNode()) {
// ++totalTrainNodes;
// }
// if (v.isTestNode()) {
// ++totalTestNodes;
// }
// }
// MessagePrinter.Print("Total train nodes: " + totalTrainNodes);
// MessagePrinter.Print("Total test nodes: " + totalTestNodes);
}
}
| 2,598 | 32.320513 | 83 | java |
g-ssl-crf | g-ssl-crf-master/bin/junto-master/src/main/java/junto/graph/parallel/Edge2NodeFactoredHadoop.java | package junto.graph.parallel;
import java.io.*;
import java.util.*;
import junto.graph.Vertex;
import junto.util.*;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.mapred.TextOutputFormat;
public class Edge2NodeFactoredHadoop {
private static String _kDelim = "\t";
private static int kMaxNeighorsPerLine_ = 1000;
private static double _kBeta = 2.0;
private static String neighMsgType = "-NEIGH-";
private static String goldLabMsgType = "-GOLD-";
private static String injLabMsgType = "-INJ-";
public static class Map extends MapReduceBase implements
Mapper<LongWritable, Text, Text, Text> {
private HashMap<String,String> goldLabels;
private HashMap<String,String> seedLabels;
public void configure(JobConf conf) {
goldLabels = LoadLabels(conf.get("gold_label_file"));
seedLabels = LoadLabels(conf.get("seed_label_file"));
}
private HashMap<String,String> LoadLabels(String fileName) {
HashMap<String,String> m = new HashMap<String,String>();
try {
Path p = new Path(fileName);
FileSystem fs = FileSystem.get(new Configuration());
BufferedReader bfr = new BufferedReader(new InputStreamReader(
fs.open(p)));
String line;
while ((line = bfr.readLine()) != null) {
String[] fields = line.split(_kDelim);
if (!m.containsKey(fields[0])) {
m.put(fields[0], fields[1] + _kDelim + fields[2]);
}
}
bfr.close();
} catch (IOException e) {
throw new RuntimeException(e);
}
return (m);
}
public void map(LongWritable key, Text value,
OutputCollector<Text, Text> output, Reporter reporter)
throws IOException {
// ///
// Constructing the vertex from the string representation
// ///
String line = value.toString();
// node1 node2 edge_weight
String[] fields = line.split(_kDelim);
// source --> dest
output.collect(new Text(fields[0]), new Text(neighMsgType + _kDelim
+ fields[1] + _kDelim + fields[2]));
if (goldLabels.containsKey(fields[0])) {
output.collect(new Text(fields[0]),
new Text(goldLabMsgType + _kDelim + goldLabels.get(fields[0])));
}
if (seedLabels.containsKey(fields[0])) {
output.collect(new Text(fields[0]),
new Text(injLabMsgType + _kDelim + seedLabels.get(fields[0])));
}
// dest --> source
// generate this message only if source and destination
// are different, as otherwise a similar message has already
// been generated above.
if (!fields[0].equals(fields[1])) {
output.collect(new Text(fields[1]), new Text(neighMsgType
+ _kDelim + fields[0] + _kDelim + fields[2]));
if (goldLabels.containsKey(fields[1])) {
output.collect(new Text(fields[1]),
new Text(goldLabMsgType + _kDelim + goldLabels.get(fields[1])));
}
if (seedLabels.containsKey(fields[1])) {
output.collect(new Text(fields[1]),
new Text(injLabMsgType + _kDelim + seedLabels.get(fields[1])));
}
}
}
}
public static class Reduce extends MapReduceBase implements
Reducer<Text, Text, Text, Text> {
public void reduce(Text key, Iterator<Text> values,
OutputCollector<Text, Text> output, Reporter reporter)
throws IOException {
String vertexId = key.toString();
Vertex v = new Vertex(vertexId);
while (values.hasNext()) {
// neighbor/self edge_weight/inject_score
String val = values.next().toString();
String[] fields = val.split(_kDelim);
String msgType = fields[0];
String trgVertexId = fields[1];
if (msgType.equals(neighMsgType)) {
v.setNeighbor(trgVertexId, Double.parseDouble(fields[2]));
} else if (msgType.equals(goldLabMsgType)) {
v.setGoldLabel(trgVertexId, Double.parseDouble(fields[2]));
} else if (msgType.equals(injLabMsgType)) {
v.SetInjectedLabelScore(trgVertexId,
Double.parseDouble(fields[2]));
}
}
// normalize transition probabilities
v.NormalizeTransitionProbability();
// remove dummy labels
v.SetInjectedLabelScore(Constants.GetDummyLabel(), 0);
v.SetEstimatedLabelScore(Constants.GetDummyLabel(), 0);
// calculate random walk probabilities
v.CalculateRWProbabilities(_kBeta);
// generate the random walk probability string of the node
String rwProbStr = Constants._kInjProb + " "
+ v.pinject() + " " + Constants._kContProb
+ " " + v.pcontinue() + " "
+ Constants._kTermProb + " "
+ v.pabandon();
// represent neighborhood information as a string
Object[] neighNames = v.GetNeighborNames();
String neighStr = "";
int totalNeighbors = neighNames.length;
for (int ni = 0; ni < totalNeighbors; ++ni) {
// if the neighborhood string is already too long, then
// print it out. It is possible to split the neighborhood
// information of a node into multiple lines. However, all
// other fields should be repeated in all the split lines.
if (neighStr.length() > 0 && (ni % kMaxNeighorsPerLine_ == 0)) {
// output format
// id gold_label injected_labels estimated_labels neighbors
// rw_probabilities
output.collect(
key,
new Text(
CollectionUtil.Map2String(v.goldLabels())
+ _kDelim
+ CollectionUtil.Map2String(v
.injectedLabels())
+ _kDelim
+ CollectionUtil.Map2String(v
.estimatedLabels())
+ _kDelim + neighStr.trim()
+ _kDelim + rwProbStr));
// reset the neighborhood string
neighStr = "";
}
neighStr += neighNames[ni] + " "
+ v.GetNeighborWeight((String) neighNames[ni]) + " ";
}
// print out any remaining neighborhood information, plus all other
// info
if (neighStr.length() > 0) {
// output format
// id gold_label injected_labels estimated_labels neighbors
// rw_probabilities
output.collect(
key,
new Text(CollectionUtil.Map2String(v.goldLabels())
+ _kDelim
+ CollectionUtil.Map2String(v
.injectedLabels())
+ _kDelim
+ CollectionUtil.Map2String(v
.estimatedLabels()) + _kDelim
+ neighStr.trim() + _kDelim + rwProbStr));
}
}
}
public static void main(String[] args) throws Exception {
JobConf conf = new JobConf(Edge2NodeFactoredHadoop.class);
conf.setJobName("edge2node_hadoop");
conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(Text.class);
conf.setMapperClass(Map.class);
// conf.setCombinerClass(Reduce.class);
conf.setReducerClass(Reduce.class);
conf.setInputFormat(TextInputFormat.class);
conf.setOutputFormat(TextOutputFormat.class);
FileInputFormat.setInputPaths(conf, new Path(args[0]));
conf.set("gold_label_file", args[1]);
conf.set("seed_label_file", args[2]);
FileOutputFormat.setOutputPath(conf, new Path(args[3]));
JobClient.runJob(conf);
}
}
| 7,401 | 31.323144 | 71 | java |
g-ssl-crf | g-ssl-crf-master/bin/junto-master/src/main/java/junto/graph/parallel/EdgeFactored2NodeFactored.java | package junto.graph.parallel;
/**
* Copyright 2011 Partha Pratim Talukdar
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.BufferedWriter;
import java.io.FileWriter;
import java.io.IOException;
import java.util.Hashtable;
import java.util.Iterator;
import junto.config.*;
import junto.graph.*;
import junto.util.*;
public class EdgeFactored2NodeFactored {
private static String kDelim_ = "\t";
private static int kMaxNeighorsPerLine_ = 100;
public static void main(String[] args) {
Hashtable config = ConfigReader.read_config(args);
Graph g = GraphConfigLoader.apply(config);
// save graph in file
if (config.containsKey("hadoop_graph_file")) {
WriteToFile(g, (String) config.get("hadoop_graph_file"));
}
}
public static void WriteToFile(Graph g, String outputFile) {
try {
BufferedWriter bw = new BufferedWriter(new FileWriter(outputFile));
Iterator<String> vIter = g.vertices().keySet().iterator();
while (vIter.hasNext()) {
String vName = vIter.next();
Vertex v = g.vertices().get(vName);
// remove dummy label from injected and estimated labels
v.setGoldLabel(Constants.GetDummyLabel(), 0.0);
v.SetEstimatedLabelScore(Constants.GetDummyLabel(), 0);
String rwProbStr =
Constants._kInjProb + " " + v.pinject() + " " +
Constants._kContProb + " " + v.pcontinue() + " " +
Constants._kTermProb + " " + v.pabandon();
// represent neighborhood information as a string
Object[] neighNames = v.GetNeighborNames();
String neighStr = "";
int totalNeighbors = neighNames.length;
for (int ni = 0; ni < totalNeighbors; ++ni) {
// if the neighborhood string is already too long, then
// print it out. It is possible to split the neighborhood
// information of a node into multiple lines. However, all
// other fields should be repeated in all the split lines.
if (neighStr.length() > 0 && (ni % kMaxNeighorsPerLine_ == 0)) {
// output format
// id gold_label injected_labels estimated_labels neighbors rw_probabilities
bw.write(v.name() + kDelim_ +
CollectionUtil.Map2String(v.goldLabels()) + kDelim_ +
CollectionUtil.Map2String(v.injectedLabels()) + kDelim_ +
CollectionUtil.Map2String(v.estimatedLabels()) + kDelim_ +
neighStr.trim() + kDelim_ +
rwProbStr + "\n");
// reset the neighborhood string
neighStr = "";
}
Vertex n = g.vertices().get(neighNames[ni]);
neighStr += neighNames[ni] + " " +
v.GetNeighborWeight((String) neighNames[ni]) + " ";
}
// print out any remaining neighborhood information, plus all other info
if (neighStr.length() > 0) {
// output format
// id gold_label injected_labels estimated_labels neighbors rw_probabilities
bw.write(v.name() + kDelim_ +
CollectionUtil.Map2String(v.goldLabels()) + kDelim_ +
CollectionUtil.Map2String(v.injectedLabels()) + kDelim_ +
CollectionUtil.Map2String(v.estimatedLabels()) + kDelim_ +
neighStr.trim() + kDelim_ +
rwProbStr + "\n");
}
}
bw.close();
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}
}
| 4,057 | 36.925234 | 88 | java |
g-ssl-crf | g-ssl-crf-master/bin/junto-master/src/main/java/junto/algorithm/parallel/LP_ZGL_Hadoop.java | package junto.algorithm.parallel;
/**
* Copyright 2011 Partha Pratim Talukdar
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import gnu.trove.map.hash.TObjectDoubleHashMap;
import gnu.trove.iterator.TObjectDoubleIterator;
import java.io.IOException;
import java.util.HashMap;
import java.util.Hashtable;
import java.util.Iterator;
import junto.config.*;
import junto.util.*;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.mapred.TextOutputFormat;
public class LP_ZGL_Hadoop {
private static String _kDelim = "\t";
public static class LP_ZGL_Map extends MapReduceBase
implements Mapper<LongWritable, Text, Text, Text> {
private Text word = new Text();
public void map(LongWritable key, Text value,
OutputCollector<Text, Text> output,
Reporter reporter) throws IOException {
/////
// Constructing the vertex from the string representation
/////
String line = value.toString();
// id gold_label injected_labels estimated_labels neighbors rw_probabilities
String[] fields = line.split(_kDelim);
TObjectDoubleHashMap neighbors = CollectionUtil.String2Map(fields[4]);
boolean isSeedNode = fields[2].length() > 0 ? true : false;
// If the current node is a seed node but there is no
// estimate label information yet, then transfer the seed label
// to the estimated label distribution. Ideally, this is likely
// to be used in the map of the very first iteration.
if (isSeedNode && fields[3].length() == 0) {
fields[3] = fields[2];
}
// Send two types of messages:
// -- self messages which will store the injection labels and
// random walk probabilities.
// -- messages to neighbors about current estimated scores
// of the node.
//
// message to self
output.collect(new Text(fields[0]), new Text(line));
// message to neighbors
TObjectDoubleIterator neighIterator = neighbors.iterator();
while (neighIterator.hasNext()) {
neighIterator.advance();
// message (neighbor_node, current_node + DELIM + curr_node_label_scores
output.collect(new Text((String) neighIterator.key()),
new Text(fields[0] + _kDelim + fields[3]));
}
}
}
public static class LP_ZGL_Reduce extends MapReduceBase implements Reducer<Text, Text, Text, Text> {
private static double mu1;
private static double mu2;
private static int keepTopKLabels;
public void configure(JobConf conf) {
mu1 = Double.parseDouble(conf.get("mu1"));
mu2 = Double.parseDouble(conf.get("mu2"));
keepTopKLabels = Integer.parseInt(conf.get("keepTopKLabels"));
}
public void reduce(Text key, Iterator<Text> values,
OutputCollector<Text, Text> output, Reporter reporter) throws IOException {
// new scores estimated for the current node
TObjectDoubleHashMap newEstimatedScores = new TObjectDoubleHashMap();
// set to true only if the message sent to itself is found.
boolean isSelfMessageFound = false;
String vertexId = key.toString();
String vertexString = "";
TObjectDoubleHashMap neighbors = null;
TObjectDoubleHashMap randWalkProbs = null;
HashMap<String, String> neighScores =
new HashMap<String, String>();
int totalMessagesReceived = 0;
boolean isSeedNode = false;
// iterate over all the messages received at the node
while (values.hasNext()) {
++totalMessagesReceived;
String val = values.next().toString();
String[] fields = val.split(_kDelim);
// System.out.println("src: " + fields[0] + " dest: " + vertexId +
// "MESSAGE>>" + val + "<<");
// self-message check
if (vertexId.equals(fields[0])) {
isSelfMessageFound = true;
vertexString = val;
// System.out.println("Reduce: " + vertexId + " " + val + " " + fields.length);
TObjectDoubleHashMap injLabels = CollectionUtil.String2Map(fields[2]);
neighbors = CollectionUtil.String2Map(neighbors, fields[4]);
randWalkProbs = CollectionUtil.String2Map(fields[5]);
if (injLabels.size() > 0) {
isSeedNode = true;
// add injected labels to the estimated scores.
ProbUtil.AddScores(newEstimatedScores,
mu1, injLabels);
}
} else {
// an empty second field represents that the
// neighbor has no valid label assignment yet.
if (fields.length > 1) {
neighScores.put(fields[0], fields[1]);
}
}
}
// terminate if message from self is not received.
if (!isSelfMessageFound) {
throw new RuntimeException("Self message not received for node " + vertexId);
}
// Add neighbor label scores to current node's label estimates only if the
// current node is not a seed node. In case of seed nodes, clamp back the
// injected label distribution, which is already done above when processing
// the self messages
if (!isSeedNode) {
// collect neighbors label distributions and create one single
// label distribution
TObjectDoubleHashMap weightedNeigLablDist = new TObjectDoubleHashMap();
Iterator<String> neighIter = neighScores.keySet().iterator();
while (neighIter.hasNext()) {
String neighName = neighIter.next();
ProbUtil.AddScores(weightedNeigLablDist, // newEstimatedScores,
mu2 * neighbors.get(neighName),
CollectionUtil.String2Map(neighScores.get(neighName)));
}
ProbUtil.Normalize(weightedNeigLablDist, keepTopKLabels);
// now add the collective neighbor label distribution to
// the estimate of the current node's labels.
ProbUtil.AddScores(newEstimatedScores,
1.0, weightedNeigLablDist);
}
// normalize the scores
ProbUtil.Normalize(newEstimatedScores);
// now reconstruct the vertex representation (with the new estimated scores)
// so that the output from the current mapper can be used as input in next
// iteration's mapper.
String[] vertexFields = vertexString.split(_kDelim);
// replace estimated scores with the new ones.
String[] newVertexFields = new String[vertexFields.length - 1];
for (int i = 1; i < vertexFields.length; ++i) {
newVertexFields[i - 1] = vertexFields[i];
}
newVertexFields[2] = CollectionUtil.Map2String(newEstimatedScores);
output.collect(key, new Text(CollectionUtil.Join(newVertexFields, _kDelim)));
}
}
public static void main(String[] args) throws Exception {
Hashtable config = ConfigReader.read_config(args);
String baseInputFilePat = Defaults.GetValueOrDie(config, "hdfs_input_pattern");
String baseOutputFilePat = Defaults.GetValueOrDie(config, "hdfs_output_base");
int numIterations = Integer.parseInt(Defaults.GetValueOrDie(config, "iters"));
String currInputFilePat = baseInputFilePat;
String currOutputFilePat = "";
for (int iter = 1; iter <= numIterations; ++iter) {
JobConf conf = new JobConf(LP_ZGL_Hadoop.class);
conf.setJobName("lp_zgl_hadoop");
conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(Text.class);
conf.setMapperClass(LP_ZGL_Map.class);
// conf.setCombinerClass(LP_ZGL_Reduce.class);
conf.setReducerClass(LP_ZGL_Reduce.class);
conf.setInputFormat(TextInputFormat.class);
conf.setOutputFormat(TextOutputFormat.class);
// hyperparameters
conf.set("mu1", Defaults.GetValueOrDie(config, "mu1"));
conf.set("mu2", Defaults.GetValueOrDie(config, "mu2"));
conf.set("keepTopKLabels",
Defaults.GetValueOrDefault((String) config.get("keep_top_k_labels"),
Integer.toString(Integer.MAX_VALUE)));
if (iter > 1) {
// output from last iteration is the input for current iteration
currInputFilePat = currOutputFilePat + "/*";
}
FileInputFormat.setInputPaths(conf, new Path(currInputFilePat));
currOutputFilePat = baseOutputFilePat + "_" + iter;
FileOutputFormat.setOutputPath(conf, new Path(currOutputFilePat));
JobClient.runJob(conf);
}
}
}
| 9,664 | 36.901961 | 103 | java |
g-ssl-crf | g-ssl-crf-master/bin/junto-master/src/main/java/junto/algorithm/parallel/MADHadoop.java | package junto.algorithm.parallel;
/**
* Copyright 2011 Partha Pratim Talukdar
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import gnu.trove.map.hash.TObjectDoubleHashMap;
import gnu.trove.iterator.TObjectDoubleIterator;
import java.io.IOException;
import java.util.HashMap;
import java.util.Hashtable;
import java.util.Iterator;
import junto.config.*;
import junto.util.*;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.mapred.TextOutputFormat;
import org.apache.hadoop.mapred.jobcontrol.Job;
public class MADHadoop {
private static String _kDelim = "\t";
public static class MADHadoopMap extends MapReduceBase
implements Mapper<LongWritable, Text, Text, Text> {
private Text word = new Text();
public void map(LongWritable key, Text value,
OutputCollector<Text, Text> output,
Reporter reporter) throws IOException {
/////
// Constructing the vertex from the string representation
/////
String line = value.toString();
// id gold_label injected_labels estimated_labels neighbors rw_probabilities
String[] fields = line.split(_kDelim);
TObjectDoubleHashMap neighbors = CollectionUtil.String2Map(fields[4]);
TObjectDoubleHashMap rwProbabilities = CollectionUtil.String2Map(fields[5]);
// If the current node is a seed node but there is no
// estimate label information yet, then transfer the seed label
// to the estimated label distribution. Ideally, this is likely
// to be used in the map of the very first iteration.
boolean isSeedNode = fields[2].length() > 0 ? true : false;
if (isSeedNode && fields[3].length() == 0) {
fields[3] = fields[2];
}
// TODO(partha): move messages to ProtocolBuffers
// Send two types of messages:
// -- self messages which will store the injection labels and
// random walk probabilities.
// -- messages to neighbors about current estimated scores
// of the node.
//
// message to self
output.collect(new Text(fields[0]), new Text("labels" + _kDelim + line));
// message to neighbors
TObjectDoubleIterator neighIterator = neighbors.iterator();
while (neighIterator.hasNext()) {
neighIterator.advance();
// message (neighbor_node, current_node + DELIM + curr_node_label_scores
output.collect(new Text((String) neighIterator.key()),
new Text("labels" + _kDelim + fields[0] + _kDelim + fields[3]));
// message (neighbor_node, curr_node + DELIM + curr_node_edge_weights + DELIM curr_node_cont_prob
assert(neighbors.containsKey((String) neighIterator.key()));
output.collect(new Text((String) neighIterator.key()),
new Text("edge_info" + _kDelim +
fields[0] + _kDelim +
neighbors.get((String) neighIterator.key()) + _kDelim +
rwProbabilities.get(Constants._kContProb)));
}
}
}
public static class MADHadoopReduce extends MapReduceBase implements Reducer<Text, Text, Text, Text> {
private static double mu1;
private static double mu2;
private static double mu3;
private static int keepTopKLabels;
public void configure(JobConf conf) {
mu1 = Double.parseDouble(conf.get("mu1"));
mu2 = Double.parseDouble(conf.get("mu2"));
mu3 = Double.parseDouble(conf.get("mu3"));
keepTopKLabels = Integer.parseInt(conf.get("keepTopKLabels"));
}
public void reduce(Text key, Iterator<Text> values,
OutputCollector<Text, Text> output, Reporter reporter) throws IOException {
// new scores estimated for the current node
TObjectDoubleHashMap newEstimatedScores = new TObjectDoubleHashMap();
// set to true only if the message sent to itself is found.
boolean isSelfMessageFound = false;
String vertexId = key.toString();
String vertexString = "";
TObjectDoubleHashMap neighbors = null;
TObjectDoubleHashMap randWalkProbs = null;
HashMap<String, String> neighScores =
new HashMap<String, String>();
TObjectDoubleHashMap incomingEdgeWeights = new TObjectDoubleHashMap();
TObjectDoubleHashMap neighborContProb = new TObjectDoubleHashMap();
int totalMessagesReceived = 0;
// iterate over all the messages received at the node
while (values.hasNext()) {
++totalMessagesReceived;
String val = values.next().toString();
String[] fields = val.split(_kDelim);
// first field represents the type of message
String msgType = fields[0];
if (fields[0].equals("labels")) {
// self-message check
if (vertexId.equals(fields[1])) {
isSelfMessageFound = true;
vertexString = val;
TObjectDoubleHashMap injLabels = CollectionUtil.String2Map(fields[3]);
neighbors = CollectionUtil.String2Map(neighbors, fields[5]);
randWalkProbs = CollectionUtil.String2Map(fields[6]);
if (injLabels.size() > 0) {
// add injected labels to the estimated scores.
ProbUtil.AddScores(newEstimatedScores,
mu1 * randWalkProbs.get(Constants._kInjProb),
injLabels);
}
} else {
// an empty third field represents that the
// neighbor has no valid label assignment yet.
if (fields.length > 2) {
neighScores.put(fields[1], fields[2]);
}
}
} else if (msgType.equals("edge_info")) {
// edge_info neigh_vertex incoming_edge_weight cont_prob
String neighId = fields[1];
if (!incomingEdgeWeights.contains(neighId)) {
incomingEdgeWeights.put(neighId, Double.parseDouble(fields[2]));
}
if (!neighborContProb.contains(neighId)) {
neighborContProb.put(neighId, Double.parseDouble(fields[3]));
}
} else {
throw new RuntimeException("Invalid message: " + val);
}
}
// terminate if message from self is not received.
if (!isSelfMessageFound) {
throw new RuntimeException("Self message not received for node " + vertexId);
}
// collect neighbors' label distributions and create one single
// label distribution
TObjectDoubleHashMap weightedNeigLablDist = new TObjectDoubleHashMap();
Iterator<String> neighIter = neighScores.keySet().iterator();
while (neighIter.hasNext()) {
String neighName = neighIter.next();
double mult = randWalkProbs.get(Constants._kContProb) * neighbors.get(neighName) +
neighborContProb.get(neighName) * incomingEdgeWeights.get(neighName);
ProbUtil.AddScores(weightedNeigLablDist, // newEstimatedScores,
mu2 * mult,
CollectionUtil.String2Map(neighScores.get(neighName)));
}
// now add the collective neighbor label distribution to
// the estimate of the current node's labels.
ProbUtil.AddScores(newEstimatedScores,
1.0, weightedNeigLablDist);
// add dummy label scores
ProbUtil.AddScores(newEstimatedScores,
mu3 * randWalkProbs.get(Constants._kTermProb),
Constants.GetDummyLabelDist());
if (keepTopKLabels < Integer.MAX_VALUE) {
ProbUtil.KeepTopScoringKeys(newEstimatedScores, keepTopKLabels);
}
ProbUtil.DivScores(newEstimatedScores,
GetNormalizationConstant(neighbors, randWalkProbs,
incomingEdgeWeights, neighborContProb,
mu1, mu2, mu3));
// now reconstruct the vertex representation (with the new estimated scores)
// so that the output from the current mapper can be used as input in next
// iteration's mapper.
String[] vertexFields = vertexString.split(_kDelim);
// replace estimated scores with the new ones.
// Skip the first two fields as they contained the message header and
// vertex id respectively.
String[] newVertexFields = new String[vertexFields.length - 2];
for (int i = 2; i < vertexFields.length; ++i) {
newVertexFields[i - 2] = vertexFields[i];
}
newVertexFields[2] = CollectionUtil.Map2String(newEstimatedScores);
output.collect(key, new Text(CollectionUtil.Join(newVertexFields, _kDelim)));
}
public double GetNormalizationConstant(
TObjectDoubleHashMap neighbors,
TObjectDoubleHashMap randWalkProbs,
TObjectDoubleHashMap incomingEdgeWeights,
TObjectDoubleHashMap neighborContProb,
double mu1, double mu2, double mu3) {
double mii = 0;
double totalNeighWeight = 0;
TObjectDoubleIterator nIter = neighbors.iterator();
while (nIter.hasNext()) {
nIter.advance();
totalNeighWeight +=
randWalkProbs.get(Constants._kContProb) * nIter.value();
String neighName = (String) nIter.key();
totalNeighWeight += neighborContProb.get(neighName) *
incomingEdgeWeights.get(neighName);
}
// mu1 x p^{inj} +
// 0.5 * mu2 x \sum_j (p_{i}^{cont} W_{ij} + p_{j}^{cont} W_{ji}) +
// mu3
mii = mu1 * randWalkProbs.get(Constants._kInjProb) +
/*0.5 **/ mu2 * totalNeighWeight +
mu3;
return (mii);
}
}
public static void main(String[] args) throws Exception {
Hashtable config = ConfigReader.read_config(args);
String baseInputFilePat = Defaults.GetValueOrDie(config, "hdfs_input_pattern");
String baseOutputFilePat = Defaults.GetValueOrDie(config, "hdfs_output_base");
int numIterations = Integer.parseInt(Defaults.GetValueOrDie(config, "iters"));
int numReducers = Defaults.GetValueOrDefault((String) config.get("num_reducers"), 10);
String currInputFilePat = baseInputFilePat;
String currOutputFilePat = "";
for (int iter = 1; iter <= numIterations; ++iter) {
JobConf conf = new JobConf(MADHadoop.class);
conf.setJobName("mad_hadoop");
conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(Text.class);
conf.setMapperClass(MADHadoopMap.class);
// conf.setCombinerClass(MADHadoopReduce.class);
conf.setReducerClass(MADHadoopReduce.class);
conf.setNumReduceTasks(numReducers);
conf.setInputFormat(TextInputFormat.class);
conf.setOutputFormat(TextOutputFormat.class);
// hyperparameters
conf.set("mu1", Defaults.GetValueOrDie(config, "mu1"));
conf.set("mu2", Defaults.GetValueOrDie(config, "mu2"));
conf.set("mu3", Defaults.GetValueOrDie(config, "mu3"));
conf.set("keepTopKLabels",
Defaults.GetValueOrDefault((String) config.get("keep_top_k_labels"),
Integer.toString(Integer.MAX_VALUE)));
if (iter > 1) {
// output from last iteration is the input for current iteration
currInputFilePat = currOutputFilePat + "/*";
}
FileInputFormat.setInputPaths(conf, new Path(currInputFilePat));
currOutputFilePat = baseOutputFilePat + "_iter_" + iter;
FileOutputFormat.setOutputPath(conf, new Path(currOutputFilePat));
JobClient.runJob(conf);
}
}
}
| 11,998 | 36.033951 | 108 | java |
g-ssl-crf | g-ssl-crf-master/bin/junto-master/src/main/java/junto/algorithm/parallel/AdsorptionHadoop.java | package junto.algorithm.parallel;
/**
* Copyright 2011 Partha Pratim Talukdar
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import gnu.trove.map.hash.TObjectDoubleHashMap;
import gnu.trove.iterator.TObjectDoubleIterator;
import java.io.IOException;
import java.util.HashMap;
import java.util.Hashtable;
import java.util.Iterator;
import junto.config.*;
import junto.util.*;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.mapred.TextOutputFormat;
public class AdsorptionHadoop {
private static String _kDelim = "\t";
public static class Map extends MapReduceBase
implements Mapper<LongWritable, Text, Text, Text> {
private Text word = new Text();
public void map(LongWritable key, Text value,
OutputCollector<Text, Text> output,
Reporter reporter) throws IOException {
/////
// Constructing the vertex from the string representation
/////
String line = value.toString();
// id gold_label injected_labels estimated_labels neighbors rw_probabilities
String[] fields = line.split(_kDelim);
TObjectDoubleHashMap neighbors = CollectionUtil.String2Map(fields[4]);
boolean isSeedNode = fields[2].length() > 0 ? true : false;
// If the current node is a seed node but there is no
// estimate label information yet, then transfer the seed label
// to the estimated label distribution. Ideally, this is likely
// to be used in the map of the very first iteration.
if (isSeedNode && fields[3].length() == 0) {
fields[3] = fields[2];
}
// Send two types of messages:
// -- self messages which will store the injection labels and
// random walk probabilities.
// -- messages to neighbors about current estimated scores
// of the node.
//
// message to self
output.collect(new Text(fields[0]), new Text(line));
// message to neighbors
TObjectDoubleIterator neighIterator = neighbors.iterator();
while (neighIterator.hasNext()) {
neighIterator.advance();
// message (neighbor_node, current_node + DELIM + curr_node_label_scores
output.collect(new Text((String) neighIterator.key()),
new Text(fields[0] + _kDelim + fields[3]));
}
}
}
public static class Reduce extends MapReduceBase implements Reducer<Text, Text, Text, Text> {
private static double mu1;
private static double mu2;
private static double mu3;
private static int keepTopKLabels;
public void configure(JobConf conf) {
mu1 = Double.parseDouble(conf.get("mu1"));
mu2 = Double.parseDouble(conf.get("mu2"));
mu3 = Double.parseDouble(conf.get("mu3"));
keepTopKLabels = Integer.parseInt(conf.get("keepTopKLabels"));
}
public void reduce(Text key, Iterator<Text> values,
OutputCollector<Text, Text> output, Reporter reporter) throws IOException {
// new scores estimated for the current node
TObjectDoubleHashMap newEstimatedScores = new TObjectDoubleHashMap();
// set to true only if the message sent to itself is found.
boolean isSelfMessageFound = false;
String vertexId = key.toString();
String vertexString = "";
TObjectDoubleHashMap neighbors = null;
TObjectDoubleHashMap randWalkProbs = null;
HashMap<String, String> neighScores =
new HashMap<String, String>();
int totalMessagesReceived = 0;
// iterate over all the messages received at the node
while (values.hasNext()) {
++totalMessagesReceived;
String val = values.next().toString();
String[] fields = val.split(_kDelim);
// System.out.println("src: " + fields[0] + " dest: " + vertexId +
// "MESSAGE>>" + val + "<<");
// self-message check
if (vertexId.equals(fields[0])) {
isSelfMessageFound = true;
vertexString = val;
// System.out.println("Reduce: " + vertexId + " " + val + " " + fields.length);
TObjectDoubleHashMap injLabels = CollectionUtil.String2Map(fields[2]);
neighbors = CollectionUtil.String2Map(neighbors, fields[4]);
randWalkProbs = CollectionUtil.String2Map(fields[5]);
if (injLabels.size() > 0) {
// add injected labels to the estimated scores.
ProbUtil.AddScores(newEstimatedScores,
mu1 * randWalkProbs.get(Constants._kInjProb),
injLabels);
}
} else {
// an empty second field represents that the
// neighbor has no valid label assignment yet.
if (fields.length > 1) {
neighScores.put(fields[0], fields[1]);
}
}
}
// terminate if message from self is not received.
if (!isSelfMessageFound) {
throw new RuntimeException("Self message not received for node " + vertexId);
}
// collect neighbors label distributions and create one single
// label distribution
TObjectDoubleHashMap weightedNeigLablDist = new TObjectDoubleHashMap();
Iterator<String> neighIter = neighScores.keySet().iterator();
while (neighIter.hasNext()) {
String neighName = neighIter.next();
ProbUtil.AddScores(weightedNeigLablDist, // newEstimatedScores,
mu2 * randWalkProbs.get(Constants._kContProb) * neighbors.get(neighName),
CollectionUtil.String2Map(neighScores.get(neighName)));
}
ProbUtil.Normalize(weightedNeigLablDist);
// now add the collective neighbor label distribution to
// the estimate of the current node's labels.
ProbUtil.AddScores(newEstimatedScores,
1.0, weightedNeigLablDist);
// add dummy label scores
ProbUtil.AddScores(newEstimatedScores,
mu3 * randWalkProbs.get(Constants._kTermProb),
Constants.GetDummyLabelDist());
// normalize the scores
ProbUtil.Normalize(newEstimatedScores, keepTopKLabels);
// now reconstruct the vertex representation (with the new estimated scores)
// so that the output from the current mapper can be used as input in next
// iteration's mapper.
String[] vertexFields = vertexString.split(_kDelim);
// replace estimated scores with the new ones.
String[] newVertexFields = new String[vertexFields.length - 1];
for (int i = 1; i < vertexFields.length; ++i) {
newVertexFields[i - 1] = vertexFields[i];
}
newVertexFields[2] = CollectionUtil.Map2String(newEstimatedScores);
output.collect(key, new Text(CollectionUtil.Join(newVertexFields, _kDelim)));
}
}
public static void main(String[] args) throws Exception {
Hashtable config = ConfigReader.read_config(args);
String baseInputFilePat = Defaults.GetValueOrDie(config, "hdfs_input_pattern");
String baseOutputFilePat = Defaults.GetValueOrDie(config, "hdfs_output_base");
int numIterations = Integer.parseInt(Defaults.GetValueOrDie(config, "iters"));
String currInputFilePat = baseInputFilePat;
String currOutputFilePat = "";
for (int iter = 1; iter <= numIterations; ++iter) {
JobConf conf = new JobConf(AdsorptionHadoop.class);
conf.setJobName("adsorption_hadoop");
conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(Text.class);
conf.setMapperClass(Map.class);
// conf.setCombinerClass(Reduce.class);
conf.setReducerClass(Reduce.class);
conf.setInputFormat(TextInputFormat.class);
conf.setOutputFormat(TextOutputFormat.class);
// hyperparameters
conf.set("mu1", Defaults.GetValueOrDie(config, "mu1"));
conf.set("mu2", Defaults.GetValueOrDie(config, "mu2"));
conf.set("mu3", Defaults.GetValueOrDie(config, "mu3"));
conf.set("keepTopKLabels",
Defaults.GetValueOrDefault((String) config.get("keep_top_k_labels"),
Integer.toString(Integer.MAX_VALUE)));
if (iter > 1) {
// output from last iteration is the input for current iteration
currInputFilePat = currOutputFilePat + "/*";
}
FileInputFormat.setInputPaths(conf, new Path(currInputFilePat));
currOutputFilePat = baseOutputFilePat + "_" + iter;
FileOutputFormat.setOutputPath(conf, new Path(currOutputFilePat));
JobClient.runJob(conf);
}
}
}
| 9,681 | 36.968627 | 96 | java |
java-design-patterns | java-design-patterns-master/factory/src/test/java/com/iluwatar/factory/CoinFactoryTest.java | /*
* This project is licensed under the MIT license. Module model-view-viewmodel is using ZK framework licensed under LGPL (see lgpl-3.0.txt).
*
* The MIT License
* Copyright © 2014-2022 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.iluwatar.factory;
import static org.junit.jupiter.api.Assertions.*;
import org.junit.jupiter.api.Test;
class CoinFactoryTest {
@Test
void shouldReturnGoldCoinInstance() {
final var goldCoin = CoinFactory.getCoin(CoinType.GOLD);
assertTrue(goldCoin instanceof GoldCoin);
}
}
| 1,588 | 39.74359 | 140 | java |
java-design-patterns | java-design-patterns-master/factory/src/test/java/com/iluwatar/factory/AppTest.java | /*
* This project is licensed under the MIT license. Module model-view-viewmodel is using ZK framework licensed under LGPL (see lgpl-3.0.txt).
*
* The MIT License
* Copyright © 2014-2022 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.iluwatar.factory;
import static org.junit.jupiter.api.Assertions.*;
import org.junit.jupiter.api.Test;
class AppTest {
@Test
void shouldExecuteWithoutExceptions() {
assertDoesNotThrow(() -> App.main(new String[]{}));
}
}
| 1,532 | 38.307692 | 140 | java |
java-design-patterns | java-design-patterns-master/factory/src/main/java/com/iluwatar/factory/Coin.java | /*
* This project is licensed under the MIT license. Module model-view-viewmodel is using ZK framework licensed under LGPL (see lgpl-3.0.txt).
*
* The MIT License
* Copyright © 2014-2022 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.iluwatar.factory;
/**
* Coin interface.
*/
public interface Coin {
String getDescription();
}
| 1,397 | 38.942857 | 140 | java |
java-design-patterns | java-design-patterns-master/factory/src/main/java/com/iluwatar/factory/App.java | /*
* This project is licensed under the MIT license. Module model-view-viewmodel is using ZK framework licensed under LGPL (see lgpl-3.0.txt).
*
* The MIT License
* Copyright © 2014-2022 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.iluwatar.factory;
import lombok.extern.slf4j.Slf4j;
/**
* Factory is an object for creating other objects. It provides a static method to
* create and return objects of varying classes, in order to hide the implementation logic
* and makes client code focus on usage rather than objects initialization and management.
*
* <p>In this example an alchemist manufactures coins. CoinFactory is the factory class and it
* provides a static method to create different types of coins.
*/
@Slf4j
public class App {
/**
* Program main entry point.
*/
public static void main(String[] args) {
LOGGER.info("The alchemist begins his work.");
var coin1 = CoinFactory.getCoin(CoinType.COPPER);
var coin2 = CoinFactory.getCoin(CoinType.GOLD);
LOGGER.info(coin1.getDescription());
LOGGER.info(coin2.getDescription());
}
}
| 2,146 | 40.288462 | 140 | java |
java-design-patterns | java-design-patterns-master/factory/src/main/java/com/iluwatar/factory/CoinFactory.java | /*
* This project is licensed under the MIT license. Module model-view-viewmodel is using ZK framework licensed under LGPL (see lgpl-3.0.txt).
*
* The MIT License
* Copyright © 2014-2022 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.iluwatar.factory;
/**
* Factory of coins.
*/
public class CoinFactory {
/**
* Factory method takes as a parameter the coin type and calls the appropriate class.
*/
public static Coin getCoin(CoinType type) {
return type.getConstructor().get();
}
}
| 1,564 | 39.128205 | 140 | java |
java-design-patterns | java-design-patterns-master/factory/src/main/java/com/iluwatar/factory/GoldCoin.java | /*
* This project is licensed under the MIT license. Module model-view-viewmodel is using ZK framework licensed under LGPL (see lgpl-3.0.txt).
*
* The MIT License
* Copyright © 2014-2022 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.iluwatar.factory;
/**
* GoldCoin implementation.
*/
public class GoldCoin implements Coin {
static final String DESCRIPTION = "This is a gold coin.";
@Override
public String getDescription() {
return DESCRIPTION;
}
}
| 1,530 | 38.25641 | 140 | java |
java-design-patterns | java-design-patterns-master/factory/src/main/java/com/iluwatar/factory/CoinType.java | /*
* This project is licensed under the MIT license. Module model-view-viewmodel is using ZK framework licensed under LGPL (see lgpl-3.0.txt).
*
* The MIT License
* Copyright © 2014-2022 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.iluwatar.factory;
import java.util.function.Supplier;
import lombok.Getter;
import lombok.RequiredArgsConstructor;
/**
* Enumeration for different types of coins.
*/
@RequiredArgsConstructor
@Getter
public enum CoinType {
COPPER(CopperCoin::new),
GOLD(GoldCoin::new);
private final Supplier<Coin> constructor;
}
| 1,620 | 36.697674 | 140 | java |
java-design-patterns | java-design-patterns-master/factory/src/main/java/com/iluwatar/factory/CopperCoin.java | /*
* This project is licensed under the MIT license. Module model-view-viewmodel is using ZK framework licensed under LGPL (see lgpl-3.0.txt).
*
* The MIT License
* Copyright © 2014-2022 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.iluwatar.factory;
/**
* CopperCoin implementation.
*/
public class CopperCoin implements Coin {
static final String DESCRIPTION = "This is a copper coin.";
@Override
public String getDescription() {
return DESCRIPTION;
}
}
| 1,536 | 38.410256 | 140 | java |
java-design-patterns | java-design-patterns-master/event-sourcing/src/test/java/IntegrationTest.java | /*
* This project is licensed under the MIT license. Module model-view-viewmodel is using ZK framework licensed under LGPL (see lgpl-3.0.txt).
*
* The MIT License
* Copyright © 2014-2022 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
import static com.iluwatar.event.sourcing.app.App.ACCOUNT_OF_DAENERYS;
import static com.iluwatar.event.sourcing.app.App.ACCOUNT_OF_JON;
import static org.junit.jupiter.api.Assertions.assertEquals;
import com.iluwatar.event.sourcing.event.AccountCreateEvent;
import com.iluwatar.event.sourcing.event.MoneyDepositEvent;
import com.iluwatar.event.sourcing.event.MoneyTransferEvent;
import com.iluwatar.event.sourcing.processor.DomainEventProcessor;
import com.iluwatar.event.sourcing.processor.JsonFileJournal;
import com.iluwatar.event.sourcing.state.AccountAggregate;
import java.math.BigDecimal;
import java.util.Date;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
/**
* Integration Test for Event-Sourcing state recovery
* <p>
* Created by Serdar Hamzaogullari on 19.08.2017.
*/
class IntegrationTest {
/**
* The Domain event processor.
*/
private DomainEventProcessor eventProcessor;
/**
* Initialize.
*/
@BeforeEach
void initialize() {
eventProcessor = new DomainEventProcessor(new JsonFileJournal());
}
/**
* Test state recovery.
*/
@Test
void testStateRecovery() {
eventProcessor.reset();
eventProcessor.process(new AccountCreateEvent(
0, new Date().getTime(), ACCOUNT_OF_DAENERYS, "Daenerys Targaryen"));
eventProcessor.process(new AccountCreateEvent(
1, new Date().getTime(), ACCOUNT_OF_JON, "Jon Snow"));
eventProcessor.process(new MoneyDepositEvent(
2, new Date().getTime(), ACCOUNT_OF_DAENERYS, new BigDecimal("100000")));
eventProcessor.process(new MoneyDepositEvent(
3, new Date().getTime(), ACCOUNT_OF_JON, new BigDecimal("100")));
eventProcessor.process(new MoneyTransferEvent(
4, new Date().getTime(), new BigDecimal("10000"), ACCOUNT_OF_DAENERYS,
ACCOUNT_OF_JON));
var accountOfDaenerysBeforeShotDown = AccountAggregate.getAccount(ACCOUNT_OF_DAENERYS);
var accountOfJonBeforeShotDown = AccountAggregate.getAccount(ACCOUNT_OF_JON);
AccountAggregate.resetState();
eventProcessor = new DomainEventProcessor(new JsonFileJournal());
eventProcessor.recover();
var accountOfDaenerysAfterShotDown = AccountAggregate.getAccount(ACCOUNT_OF_DAENERYS);
var accountOfJonAfterShotDown = AccountAggregate.getAccount(ACCOUNT_OF_JON);
assertEquals(accountOfDaenerysBeforeShotDown.getMoney(),
accountOfDaenerysAfterShotDown.getMoney());
assertEquals(accountOfJonBeforeShotDown.getMoney(), accountOfJonAfterShotDown.getMoney());
}
}
| 3,824 | 36.871287 | 140 | java |
java-design-patterns | java-design-patterns-master/event-sourcing/src/main/java/com/iluwatar/event/sourcing/processor/JsonFileJournal.java | /*
* This project is licensed under the MIT license. Module model-view-viewmodel is using ZK framework licensed under LGPL (see lgpl-3.0.txt).
*
* The MIT License
* Copyright © 2014-2022 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.iluwatar.event.sourcing.processor;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.iluwatar.event.sourcing.event.AccountCreateEvent;
import com.iluwatar.event.sourcing.event.DomainEvent;
import com.iluwatar.event.sourcing.event.MoneyDepositEvent;
import com.iluwatar.event.sourcing.event.MoneyTransferEvent;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
/**
* This is the implementation of event journal. This implementation serialize/deserialize the events
* with JSON and writes/reads them on a Journal.json file at the working directory.
*
* <p>Created by Serdar Hamzaogullari on 06.08.2017.
*/
public class JsonFileJournal extends EventJournal {
private final List<String> events = new ArrayList<>();
private int index = 0;
/**
* Instantiates a new Json file journal.
*/
public JsonFileJournal() {
file = new File("Journal.json");
if (file.exists()) {
try (var input = new BufferedReader(
new InputStreamReader(new FileInputStream(file), StandardCharsets.UTF_8))) {
String line;
while ((line = input.readLine()) != null) {
events.add(line);
}
} catch (IOException e) {
throw new RuntimeException(e);
}
} else {
reset();
}
}
/**
* Write.
*
* @param domainEvent the domain event
*/
@Override
public void write(DomainEvent domainEvent) {
var mapper = new ObjectMapper();
try (var output = new BufferedWriter(
new OutputStreamWriter(new FileOutputStream(file, true), StandardCharsets.UTF_8))) {
var eventString = mapper.writeValueAsString(domainEvent);
output.write(eventString + "\r\n");
} catch (IOException e) {
throw new RuntimeException(e);
}
}
/**
* Read the next domain event.
*
* @return the domain event
*/
public DomainEvent readNext() {
if (index >= events.size()) {
return null;
}
var event = events.get(index);
index++;
var mapper = new ObjectMapper();
DomainEvent domainEvent;
try {
var jsonElement = mapper.readTree(event);
var eventClassName = jsonElement.get("eventClassName").asText();
domainEvent = switch (eventClassName) {
case "AccountCreateEvent" -> mapper.treeToValue(jsonElement, AccountCreateEvent.class);
case "MoneyDepositEvent" -> mapper.treeToValue(jsonElement, MoneyDepositEvent.class);
case "MoneyTransferEvent" -> mapper.treeToValue(jsonElement, MoneyTransferEvent.class);
default -> throw new RuntimeException("Journal Event not recognized");
};
} catch (JsonProcessingException jsonProcessingException) {
throw new RuntimeException("Failed to convert JSON");
}
domainEvent.setRealTime(false);
return domainEvent;
}
}
| 4,429 | 34.15873 | 140 | java |
java-design-patterns | java-design-patterns-master/event-sourcing/src/main/java/com/iluwatar/event/sourcing/processor/DomainEventProcessor.java | /*
* This project is licensed under the MIT license. Module model-view-viewmodel is using ZK framework licensed under LGPL (see lgpl-3.0.txt).
*
* The MIT License
* Copyright © 2014-2022 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.iluwatar.event.sourcing.processor;
import com.iluwatar.event.sourcing.event.DomainEvent;
/**
* This is the implementation of event processor. All events are processed by this class. This
* processor uses eventJournal to persist and recover events.
*
* <p>Created by Serdar Hamzaogullari on 06.08.2017.
*/
public class DomainEventProcessor {
private final EventJournal eventJournal;
public DomainEventProcessor(EventJournal eventJournal) {
this.eventJournal = eventJournal;
}
/**
* Process.
*
* @param domainEvent the domain event
*/
public void process(DomainEvent domainEvent) {
domainEvent.process();
eventJournal.write(domainEvent);
}
/**
* Reset.
*/
public void reset() {
eventJournal.reset();
}
/**
* Recover.
*/
public void recover() {
DomainEvent domainEvent;
while ((domainEvent = eventJournal.readNext()) != null) {
domainEvent.process();
}
}
}
| 2,244 | 31.071429 | 140 | java |
java-design-patterns | java-design-patterns-master/event-sourcing/src/main/java/com/iluwatar/event/sourcing/processor/EventJournal.java | package com.iluwatar.event.sourcing.processor;
import com.iluwatar.event.sourcing.event.DomainEvent;
import java.io.File;
import lombok.extern.slf4j.Slf4j;
/**
* Base class for Journaling implementations.
*/
@Slf4j
public abstract class EventJournal {
File file;
/**
* Write.
*
* @param domainEvent the domain event.
*/
abstract void write(DomainEvent domainEvent);
/**
* Reset.
*/
void reset() {
if (file.delete()) {
LOGGER.info("File cleared successfully............");
}
}
/**
* Read domain event.
*
* @return the domain event.
*/
abstract DomainEvent readNext();
}
| 637 | 15.789474 | 59 | java |
java-design-patterns | java-design-patterns-master/event-sourcing/src/main/java/com/iluwatar/event/sourcing/app/App.java | /*
* This project is licensed under the MIT license. Module model-view-viewmodel is using ZK framework licensed under LGPL (see lgpl-3.0.txt).
*
* The MIT License
* Copyright © 2014-2022 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.iluwatar.event.sourcing.app;
import com.iluwatar.event.sourcing.event.AccountCreateEvent;
import com.iluwatar.event.sourcing.event.MoneyDepositEvent;
import com.iluwatar.event.sourcing.event.MoneyTransferEvent;
import com.iluwatar.event.sourcing.processor.DomainEventProcessor;
import com.iluwatar.event.sourcing.processor.JsonFileJournal;
import com.iluwatar.event.sourcing.state.AccountAggregate;
import java.math.BigDecimal;
import java.util.Date;
import lombok.extern.slf4j.Slf4j;
/**
* Event Sourcing: Instead of storing just the current state of the data in a domain, use an
* append-only store to record the full series of actions taken on that data. The store acts as the
* system of record and can be used to materialize the domain objects. This can simplify tasks in
* complex domains, by avoiding the need to synchronize the data model and the business domain,
* while improving performance, scalability, and responsiveness. It can also provide consistency for
* transactional data, and maintain full audit trails and history that can enable compensating
* actions.
*
* <p>This App class is an example usage of an Event Sourcing pattern. As an example, two bank
* accounts are created, then some money deposit and transfer actions are taken, so a new state of
* accounts is created. At that point, state is cleared in order to represent a system shut-down.
* After the shut-down, system state is recovered by re-creating the past events from event
* journals. Then state is printed so a user can view the last state is same with the state before a
* system shut-down.
*
* <p>Created by Serdar Hamzaogullari on 06.08.2017.
*/
@Slf4j
public class App {
/**
* The constant ACCOUNT OF DAENERYS.
*/
public static final int ACCOUNT_OF_DAENERYS = 1;
/**
* The constant ACCOUNT OF JON.
*/
public static final int ACCOUNT_OF_JON = 2;
/**
* The entry point of application.
*
* @param args the input arguments
*/
public static void main(String[] args) {
var eventProcessor = new DomainEventProcessor(new JsonFileJournal());
LOGGER.info("Running the system first time............");
eventProcessor.reset();
LOGGER.info("Creating the accounts............");
eventProcessor.process(new AccountCreateEvent(
0, new Date().getTime(), ACCOUNT_OF_DAENERYS, "Daenerys Targaryen"));
eventProcessor.process(new AccountCreateEvent(
1, new Date().getTime(), ACCOUNT_OF_JON, "Jon Snow"));
LOGGER.info("Do some money operations............");
eventProcessor.process(new MoneyDepositEvent(
2, new Date().getTime(), ACCOUNT_OF_DAENERYS, new BigDecimal("100000")));
eventProcessor.process(new MoneyDepositEvent(
3, new Date().getTime(), ACCOUNT_OF_JON, new BigDecimal("100")));
eventProcessor.process(new MoneyTransferEvent(
4, new Date().getTime(), new BigDecimal("10000"), ACCOUNT_OF_DAENERYS,
ACCOUNT_OF_JON));
LOGGER.info("...............State:............");
LOGGER.info(AccountAggregate.getAccount(ACCOUNT_OF_DAENERYS).toString());
LOGGER.info(AccountAggregate.getAccount(ACCOUNT_OF_JON).toString());
LOGGER.info("At that point system had a shut down, state in memory is cleared............");
AccountAggregate.resetState();
LOGGER.info("Recover the system by the events in journal file............");
eventProcessor = new DomainEventProcessor(new JsonFileJournal());
eventProcessor.recover();
LOGGER.info("...............Recovered State:............");
LOGGER.info(AccountAggregate.getAccount(ACCOUNT_OF_DAENERYS).toString());
LOGGER.info(AccountAggregate.getAccount(ACCOUNT_OF_JON).toString());
}
}
| 4,992 | 41.313559 | 140 | java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.