answer
stringlengths 17
10.2M
|
|---|
package hudson.slaves;
import com.gargoylesoftware.htmlunit.WebResponse;
import hudson.model.*;
import hudson.security.ACL;
import hudson.security.ACLContext;
import jenkins.model.Jenkins;
import net.sf.json.JSONNull;
import net.sf.json.JSONObject;
import org.junit.Assert;
import org.junit.Rule;
import org.junit.Test;
import org.jvnet.hudson.test.JenkinsRule;
import org.jvnet.hudson.test.MockAuthorizationStrategy;
import org.xml.sax.SAXException;
import java.io.IOException;
/**
* @author suren
*/
public class SlaveComputerTest {
@Rule
public JenkinsRule j = new JenkinsRule();
@Test
public void testGetAbsoluteRemotePath() throws Exception {
//default auth
Node nodeA = j.createOnlineSlave();
String path = ((DumbSlave) nodeA).getComputer().getAbsoluteRemotePath();
Assert.assertNotNull(path);
Assert.assertEquals(getRemoteFS(nodeA, null), path);
//not auth
String userAlice = "alice";
MockAuthorizationStrategy authStrategy = new MockAuthorizationStrategy();
authStrategy.grant(Computer.CONFIGURE, Jenkins.READ).everywhere().to(userAlice);
j.jenkins.setSecurityRealm(j.createDummySecurityRealm());
j.jenkins.setAuthorizationStrategy(authStrategy);
try(ACLContext context = ACL.as(User.getById(userAlice, true))) {
path = ((DumbSlave) nodeA).getComputer().getAbsoluteRemotePath();
Assert.assertNull(path);
Assert.assertNull(getRemoteFS(nodeA, userAlice));
}
//with auth
String userBob = "bob";
authStrategy.grant(Computer.CONNECT, Jenkins.READ).everywhere().to(userBob);
try(ACLContext context = ACL.as(User.getById(userBob, true))) {
path = ((DumbSlave) nodeA).getComputer().getAbsoluteRemotePath();
Assert.assertNotNull(path);
Assert.assertNotNull(getRemoteFS(nodeA, userBob));
}
}
/**
* Get remote path through json api
* @param node slave node
* @param user the user for webClient
* @return remote path
* @throws IOException in case of communication problem.
* @throws SAXException in case of config format problem.
*/
private String getRemoteFS(Node node, String user) throws Exception {
JenkinsRule.WebClient wc = j.createWebClient();
if(user != null) {
wc.login(user);
}
WebResponse response = wc.goTo("computer/" + node.getNodeName() + "/api/json",
"application/json").getWebResponse();
JSONObject json = JSONObject.fromObject(response.getContentAsString());
Object pathObj = json.get("absoluteRemotePath");
if(pathObj instanceof JSONNull) {
return null; // the value is null in here
} else {
return pathObj.toString();
}
}
}
|
package example;
//-*- mode:java; encoding:utf-8 -*-
// vim:set fileencoding=utf-8:
//@homepage@
import java.awt.*;
import java.awt.event.*;
import java.awt.geom.*;
import java.awt.image.*;
import javax.swing.*;
public final class MainPanel extends JPanel {
private MainPanel() {
super(new BorderLayout());
add(new PaintPanel());
setPreferredSize(new Dimension(320, 240));
}
public static void main(String... args) {
EventQueue.invokeLater(new Runnable() {
@Override public void run() {
createAndShowGUI();
}
});
}
public static void createAndShowGUI() {
try {
UIManager.setLookAndFeel(UIManager.getSystemLookAndFeelClassName());
} catch (ClassNotFoundException | InstantiationException
| IllegalAccessException | UnsupportedLookAndFeelException ex) {
ex.printStackTrace();
}
JFrame frame = new JFrame("@title@");
frame.setDefaultCloseOperation(WindowConstants.EXIT_ON_CLOSE);
frame.getContentPane().add(new MainPanel());
frame.setResizable(false);
frame.pack();
frame.setLocationRelativeTo(null);
frame.setVisible(true);
}
}
class PaintPanel extends JPanel implements MouseMotionListener, MouseListener {
private Point startPoint = new Point();
private final transient BufferedImage backImage;
private static final TexturePaint TEXTURE = TextureFactory.createCheckerTexture(6, new Color(200, 150, 100, 50));
private final Rectangle r = new Rectangle(320, 240);
private final int[] pixels = new int[r.width * r.height];
private final transient MemoryImageSource source = new MemoryImageSource(r.width, r.height, pixels, 0, r.width);
private int penc;
protected PaintPanel() {
super();
addMouseMotionListener(this);
addMouseListener(this);
backImage = new BufferedImage(r.width, r.height, BufferedImage.TYPE_INT_ARGB);
Graphics2D g2 = backImage.createGraphics();
g2.setPaint(TEXTURE);
g2.fill(r);
g2.dispose();
}
@Override protected void paintComponent(Graphics g) {
super.paintComponent(g);
Graphics2D g2 = (Graphics2D) g.create();
g2.drawImage(backImage, 0, 0, this);
g2.drawImage(createImage(source), 0, 0, this);
g2.dispose();
}
@Override public void mouseDragged(MouseEvent e) {
Point pt = e.getPoint();
double xDelta = e.getX() - startPoint.getX();
double yDelta = e.getY() - startPoint.getY();
double delta = Math.max(Math.abs(xDelta), Math.abs(yDelta));
double xIncrement = xDelta / delta;
double yIncrement = yDelta / delta;
double xStart = startPoint.x;
double yStart = startPoint.y;
for (int i = 0; i < delta; i++) {
Point p = new Point((int) xStart, (int) yStart);
//if (p.x < 0 || p.y < 0 || p.x >= r.width || p.y >= r.height) {
if (!r.contains(p)) {
break;
}
paintStamp(pixels, p, penc);
//source.newPixels(p.x - 2, p.y - 2, 4, 4);
xStart += xIncrement;
yStart += yIncrement;
}
startPoint = pt;
}
private void paintStamp(int[] pixels, Point p, int penc) {
//1x1:
//pixels[p.x + p.y * 320] = penc;
//3x3 square:
for (int n = -1; n <= 1; n++) {
for (int m = -1; m <= 1; m++) {
int t = p.x + n + (p.y + m) * r.width;
if (t >= 0 && t < r.width * r.height) {
pixels[t] = penc;
}
}
}
repaint(p.x - 2, p.y - 2, 4, 4);
}
@Override public void mousePressed(MouseEvent e) {
startPoint = e.getPoint();
penc = e.getButton() == MouseEvent.BUTTON1 ? 0xFF000000 : 0x0;
}
@Override public void mouseMoved(MouseEvent e) { /* not needed */ }
@Override public void mouseExited(MouseEvent e) { /* not needed */ }
@Override public void mouseEntered(MouseEvent e) { /* not needed */ }
@Override public void mouseReleased(MouseEvent e) { /* not needed */ }
@Override public void mouseClicked(MouseEvent e) { /* not needed */ }
}
final class TextureFactory {
private static final Color DEFAULT_COLOR = new Color(100, 100, 100, 100);
private TextureFactory() { /* Singleton */ }
public static TexturePaint createCheckerTexture(int cs, Color color) {
int size = cs * cs;
BufferedImage img = new BufferedImage(size, size, BufferedImage.TYPE_INT_ARGB);
Graphics2D g2 = img.createGraphics();
g2.setPaint(color);
g2.fillRect(0, 0, size, size);
for (int i = 0; i * cs < size; i++) {
for (int j = 0; j * cs < size; j++) {
if ((i + j) % 2 == 0) {
g2.fillRect(i * cs, j * cs, cs, cs);
}
}
}
g2.dispose();
return new TexturePaint(img, new Rectangle(size, size));
}
public static TexturePaint createCheckerTexture(int cs) {
return createCheckerTexture(cs, DEFAULT_COLOR);
}
}
// class PaintPanel extends JPanel implements MouseMotionListener, MouseListener {
// private static final Color ERASER = new Color(0x0, true);
// private boolean isPen = true;
// private Point startPoint = new Point();
// private BufferedImage currentImage = null;
// private BufferedImage backImage = null;
// private TexturePaint texture = makeTexturePaint();
// protected PaintPanel() {
// super();
// addMouseMotionListener(this);
// addMouseListener(this);
// currentImage = new BufferedImage(320, 240, BufferedImage.TYPE_INT_ARGB);
// backImage = new BufferedImage(320, 240, BufferedImage.TYPE_INT_ARGB);
// Graphics2D g2 = backImage.createGraphics();
// g2.setPaint(texture);
// g2.fillRect(0, 0, 320, 240);
// g2.dispose();
// private static BufferedImage makeBGImage() {
// Color color = new Color(200, 150, 100, 50);
// int cs = 6;
// int sz = cs * cs;
// BufferedImage img = new BufferedImage(sz, sz, BufferedImage.TYPE_INT_ARGB);
// Graphics2D g2 = img.createGraphics();
// g2.setPaint(color);
// g2.fillRect(0, 0, sz, sz);
// for (int i = 0; i * cs < sz; i++) {
// for (int j = 0; j * cs < sz; j++) {
// if ((i + j) % 2 == 0) {
// g2.fillRect(i * cs, j * cs, cs, cs);
// g2.dispose();
// return img;
// private static TexturePaint makeTexturePaint() {
// BufferedImage img = makeBGImage();
// return new TexturePaint(img, new Rectangle(img.getWidth(), img.getHeight()));
// @Override protected void paintComponent(Graphics g) {
// super.paintComponent(g);
// //if (Objects.nonNull(backImage)) {
// g.drawImage(backImage, 0, 0, this);
// //if (Objects.nonNull(currentImage)) {
// g.drawImage(currentImage, 0, 0, this);
// @Override public void mouseDragged(MouseEvent e) {
// Point pt = e.getPoint();
// Graphics2D g2 = currentImage.createGraphics();
// g2.setStroke(new BasicStroke(3f));
// if (isPen) {
// g2.setPaint(Color.BLACK);
// } else {
// g2.setComposite(AlphaComposite.Clear);
// g2.setPaint(ERASER);
// g2.drawLine(startPoint.x, startPoint.y, pt.x, pt.y);
// g2.dispose();
// startPoint = pt;
// repaint();
// @Override public void mousePressed(MouseEvent e) {
// startPoint = e.getPoint();
// isPen = e.getButton() == MouseEvent.BUTTON1;
// @Override public void mouseMoved(MouseEvent e) {}
// @Override public void mouseExited(MouseEvent e) {}
// @Override public void mouseEntered(MouseEvent e) {}
// @Override public void mouseReleased(MouseEvent e) {}
// @Override public void mouseClicked(MouseEvent e) {}
|
import java.io.File;
import java.io.IOException;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.rmi.Remote;
import java.rmi.RemoteException;
import java.rmi.registry.LocateRegistry;
import java.rmi.registry.Registry;
import java.rmi.server.UnicastRemoteObject;
import java.util.HashMap;
interface StorageInterface extends Remote {
// CALLS FROM CLIENT
boolean init(String local_path, String filesystem_path) throws RemoteException, UnknownHostException;
boolean close(String local_path) throws RemoteException;
boolean create(String path) throws RemoteException;
boolean create(String path, String blob) throws IOException;
boolean del(String path) throws RemoteException;
File get(String path) throws RemoteException;
}
public class StorageServer implements StorageInterface {
private static MetaDataInterface metaData;
private HashMap<String, String> users;
private StorageServer() {
this.users = new HashMap<String, String>();
}
public static void main(String args[]) {
try {
StorageServer obj = new StorageServer();
StorageInterface stub = (StorageInterface) UnicastRemoteObject.exportObject(obj, 0);
Registry registry = LocateRegistry.getRegistry();
registry.bind("StorageServer", stub);
metaData = (MetaDataInterface) registry.lookup("MetaDataServer");
} catch (Exception e) {
System.err.println(e.toString());
e.printStackTrace();
}
}
// CALLS FROM CLIENT
public boolean init(String local_path, String filesystem_path) throws UnknownHostException, RemoteException {
// On startup
// Example: init("/home/student/courses", "/courses"); -> Local dir maps into global namespace
// Must call add_storage_server on the metadata server
String clean_local_path = cleanPath(local_path);
String clean_filesystem_path = cleanPath(filesystem_path);
String hostname = InetAddress.getLocalHost().getHostName();
users.put(clean_local_path, clean_filesystem_path);
metaData.addStorageServer(hostname, clean_filesystem_path);
return create(clean_filesystem_path);
}
public boolean close(String local_path) throws RemoteException {
// On close
// Example: close("/home/student/courses"); -> Closes local share
// Must call del_storage_server on the metadata server
String clean_local_path = cleanPath(local_path);
String file_system_path = users.get(clean_local_path);
metaData.delStorageServer(file_system_path);
users.remove(clean_local_path);
return del(file_system_path);
}
public boolean create(String path) throws RemoteException {
// Example: create("/courses"); -> Creates a directory
String clean_path = cleanPath(path);
metaData.addStorageItem(clean_path);
return new File(clean_path).mkdirs();
}
public boolean create(String path, String blob) throws IOException {
String clean_path = cleanPath(path);
metaData.addStorageItem(clean_path);
Files.write(Paths.get(clean_path), blob.getBytes());
return true;
}
public boolean del(String path) throws RemoteException {
// Example: del("/courses"); -> Removes sub-tree
// Example: del("/courses/file1.txt"); -> Removes file
String clean_path = cleanPath(path);
metaData.delStorageItem(clean_path);
return delete(new File(clean_path));
}
private boolean delete(File f) {
if (f.isDirectory())
for (File c : f.listFiles())
delete(c);
return f.delete();
}
public File get(String path) {
// Example: get("/courses/file1.txt"); -> Downloads the file
String clean_path = cleanPath(path);
return new File(clean_path);
}
private String cleanPath(String path) { return path.replaceAll("^/", "").replaceAll("/$", ""); }
}
|
//import java.util.Set;
import java.text.DecimalFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Hashtable;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.PriorityQueue;
import java.util.Queue;
import java.util.Set;
import java.util.TreeMap;
import java.util.Vector;
import edu.uci.ics.jung.algorithms.cluster.WeakComponentClusterer;
import edu.uci.ics.jung.algorithms.shortestpath.DijkstraDistance;
import edu.uci.ics.jung.algorithms.shortestpath.DijkstraDistanceWoVer;
import edu.uci.ics.jung.algorithms.shortestpath.DijkstraShortestPath;
import edu.uci.ics.jung.graph.DirectedSparseGraph;
import edu.uci.ics.jung.graph.UndirectedSparseGraph;
import edu.uci.ics.jung.graph.util.Pair;
import gnu.getopt.*;
import java.io.BufferedReader;
//import java.io.FileNotFoundException;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.IOException;
import java.io.PrintStream;
import java.io.File;
import jaligner.Alignment;
//import org.jgrapht.alg.DijkstraShortestPath;
public class TransAssembly_allProbPaths {
private static final boolean DEBUG = true;
private static final SeqVertex ROOT = new SeqVertex(-1, "S",Integer.MAX_VALUE);
private static final SeqVertex T_VERTEX = new SeqVertex(-2, "E");
private static final int LINE_LEN = 60;
private static int LAST_ID = -1;
private static int LAST_REAL_ID = -1;
private static int MAX_DEPTH = 0;
private static double EDGE_THR = 0.02; // compares between each edge and its sister edges (u->v; vs all output of u, or all input of v)
private static double FLOW_THR = 0.02;// compares between each edge and its flow of its vertices (u->v; vs all input of u, or all output of v)
private static boolean NO_GRAPH_PRUNING = false;
private static final int COMP_AVG_COV_THR = 1;
private static final int INITIAL_EDGE_ABS_THR = 0;
private static int MIN_READ_SUPPORT_THR = 1;
private static int MIN_OUTPUT_SEQ;
// Paths Too Similar Settings
private static int MAX_DIFFS_SAME_PATH = 2;
private static int DIFFS_WINDOW_SIZE = 100;
private static int MAX_FINAL_DIFFS_IN_WINDOW = 5;
private static float MIN_PERCENT_IDENTITY_SAME_PATH = 98.0f;
private static int MAX_INTERNAL_GAP_SAME_PATH = 10;
private static boolean NO_PATH_MERGING = false; // disables use of the above
private static boolean NO_REMOVE_LOWER_RANKED_PATHS = false;
private static boolean NO_GRAPH_COMPACTION = false; // also assumes no path merging
// pasafly params
private static int MAX_VERTEX_DISCOVERY_TIME_DIFF_ALLOW_COMPARE = 0; // used in pasafly-mode //FIXME: should be zero, must investigate
private static boolean FAST_PASA = false;
private static int MAX_NUM_PATHS_PER_PASA_NODE = 10;
// Path alignment settings
// private static int MAX_INTERNAL_GAP_LENGTH = 20; // minimum cassette exon size that might be skipped in an alt-splice variant.
private static boolean SMITH_WATERMAN_ALIGN_FLAG = false;
private static int MAX_SEQ_LEN_DP_ALIGN = 10000;
private static boolean MISO_OUTPUT = true;
//private static boolean USE_PATH_ALIGNMENT = true;
private static int MAX_PAIR_DISTANCE = 0;
private static int PATH_REINFORCEMENT_DISTANCE_PERCENT = 25;
private static int PATH_REINFORCEMENT_DISTANCE = 0;
private static int MAX_NUM_PATHS_PER_NODE_INIT = 100;
private static int MAX_NUM_PATHS_PER_NODE_EXTEND = 25;
// read sequence to graph mapping criteria
private static int MAX_MM_ALLOWED = 0; // dynamically updated global (bad)
private static int MAX_MM_ALLOWED_CAP = 0; // dynamically updated global (bad)
private static double MAX_READ_SEQ_DIVERGENCE = 0.05;
private static final double MAX_READ_LOCAL_SEQ_DIVERGENCE = 0.1; // avoid very bad locally aligned regions along the way.
private static final int EXTREME_EDGE_FLOW_FACTOR = 200;
// path extension alternative options
private static final boolean USE_TRIPLETS = false; // do not use.
private static boolean ALL_POSSIBLE_PATHS = false; // most lenient form of path validation: all edge combinations allowed.
private static boolean LENIENT_PATH_CHECKING = false; // lenient: give benefit of doubt for connections that do not conflict
// path reinforcement check options
private static boolean ORIGINAL_PATH_EXTENSIONS = false; // examines paths from nodes to sinks
private static int KMER_SIZE = 0;
private static boolean GENERATE_FULL_SEQ_GRAPH = false;
private static boolean GENERATE_MIDDLE_DOT_FILES = false;
private static boolean COLLAPSE_SNPs = true;
private static boolean TRIPLET_LOCKING = true; //deprecated
private static boolean EXTENDED_TRIPLET = true; // deprecated
private static boolean TRIPLET_STRICT = false; // deprecated
private static boolean FRACTURE_UNRESOLVED_XSTRUCTURE = false; // potentially useful - deciding to just annotate these in the fasta headers.
private static boolean INFER_UNRESOLVED_XSTRUCTURE_PATHS = false; // harder problem than anticipated. Turn to true statistical inference.
private static boolean FIND_ALSO_DIFF_PATHS = false;
private static boolean USE_DEGENERATE_CODE = false;
private static String[] LETTERS = new String[]{"A","C","G","T"};
private static PrintStream ERR_STREAM;
private static boolean USE_STDERR = false;
private static boolean LOG_STDERR = false;
private static boolean NO_CLEANUP = false; // if set to true, removes input files
private static boolean RUN_ALL_VS_ALL_FILTER = false;
private static int ALL_VS_ALL_MAX_DP_LEN = 1000;
private static HashMap<PairPath,ArrayList<String>> LONG_READ_PATH_MAP; // PairPath => ArrayList(long_reads_names)
private static HashMap<String, PairPath> LONG_READ_NAME_TO_PPath; // string => PairPath
private static boolean cufflinksOpt = false; // minimum path set (Maria's code)
private static boolean CUFF_NO_EXTEND = false; // enable to report only disjoint chains w/o extensions
private static boolean EXPAND_LOOPS = true; //FIXME: parameterize this after testing.
private static boolean pasaFlyOpt = false; // PASA algorithm using pairpaths
private static boolean pasaFlyUniqueOpt = false; // variation on PASA
private static boolean ILLUSTRATE_FINAL_ASSEMBLIES = true;
private static boolean MAKE_PE_SE = false;
private static boolean NO_EM_REDUCE = false;
private static Float MIN_ISOFORM_PCT_LEN_OVERLAP = 30f;
private static Integer GENE_COUNTER = 0;
private static Float MIN_TOTAL_ISOFORM_EXPRESSION = 0f; // turning off for now, requires further investigation
private static Float MIN_RELATIVE_ISOFORM_EXPRESSION = 5f;
private static Integer READ_END_PATH_TRIM_LENGTH = 0;
private static String FILE = ""; // updated to 'file' value below.
private static Boolean TREAT_PAIRS_AS_SINGLE = false;
private static Integer PATH_COUNTER = 0;
private static Boolean USE_DP_READ_TO_VERTEX_ALIGN = true;
public static Comparator<SeqVertex> SeqVertexIDorderComparator = new Comparator<SeqVertex>() {
public int compare (SeqVertex v_a, SeqVertex v_b) {
// want most highly supported pairpaths to sort descendingly
Integer v_a_id = v_a.getID();
Integer v_b_id = v_b.getID();
if (v_a_id < v_b_id) {
return(-1);
}
else if (v_a_id > v_b_id) {
return(1);
}
else {
return(0);
}
}
};
private static Map<String, String> DEGENERATE_CODE = new HashMap<String, String>() {
private static final long serialVersionUID = 1L;
{
put("AG","R");
put("CT","Y");
put("CG","S");
put("AT","W");
put("GT","K");
put("AC","M");
put("CGT","B");
put("AGT","D");
put("ACT","H");
put("ACG","V");
put("ACGT","N");
}
};
private static Map<String, String> DEGENERATE_CODE_REV = new HashMap<String, String>() {
private static final long serialVersionUID = 1L;
{
put("R","AG");
put("Y","CT");
put("S","GC");
put("W","AT");
put("K","GT");
put("M","AC");
put("B","CGT");
put("D","AGT");
put("H","ACT");
put("V","ACG");
put("N","ACGT");
}
};
//private static Map<String, AlignmentStats> NUM_MATCHES_HASH;
private static Map<String, AlignmentStats> NUM_MISMATCHES_HASH;
public static void main(String[] args) throws Exception
{
long totalNumReads = 0;
/*
for(int i = 0; i < args.length; i ++)
System.out.println(args[i]);
*/
String file = "";
boolean printUsage = false;
LongOpt[] longopts = new LongOpt[100]; // big enough we don't have to keep incrementing it as our option list grows.
longopts[0] = new LongOpt("help", LongOpt.NO_ARGUMENT, null, 'h');
longopts[1] = new LongOpt("use-degenerate-code", LongOpt.OPTIONAL_ARGUMENT, null, 1);
longopts[2] = new LongOpt("dont-collapse-snps", LongOpt.OPTIONAL_ARGUMENT, null, 'S');
longopts[3] = new LongOpt("generate-full-sequence-graphs", LongOpt.OPTIONAL_ARGUMENT, null, 'G');
longopts[4] = new LongOpt("stderr", LongOpt.OPTIONAL_ARGUMENT, null, 2);
StringBuffer sb = new StringBuffer(0);
longopts[5] = new LongOpt("edge-thr", LongOpt.OPTIONAL_ARGUMENT, sb, 'E');
longopts[6] = new LongOpt("flow-thr", LongOpt.OPTIONAL_ARGUMENT, sb, 'W');
longopts[7] = new LongOpt("min_per_id_same_path", LongOpt.OPTIONAL_ARGUMENT, null, 3);
longopts[8] = new LongOpt("max_number_of_paths_per_node_init", LongOpt.OPTIONAL_ARGUMENT, null, 4);
longopts[9] = new LongOpt("min_per_align_same_path", LongOpt.OPTIONAL_ARGUMENT, null, 5);
longopts[10] = new LongOpt("SW", LongOpt.NO_ARGUMENT, null, 6); //SMITH_WATERMAN_ALIGNMENT_FLAG
longopts[11] = new LongOpt("all_possible_paths", LongOpt.NO_ARGUMENT, null, 7); // hidden option, testing only
longopts[12] = new LongOpt("lenient_path_extension", LongOpt.NO_ARGUMENT, null, 8); // hidden for now
longopts[13] = new LongOpt("path_reinforcement_distance", LongOpt.OPTIONAL_ARGUMENT, null, 9);
longopts[14] = new LongOpt("original_path_extension", LongOpt.OPTIONAL_ARGUMENT, null, 10);
longopts[15] = new LongOpt("ZIPPER", LongOpt.NO_ARGUMENT, null, 11); // hidden for now
longopts[16] = new LongOpt("NO_MISO_OUTPUT", LongOpt.NO_ARGUMENT, null, 12); // hidden for now
longopts[17] = new LongOpt("max_diffs_same_path", LongOpt.OPTIONAL_ARGUMENT, null, 13);
longopts[18] = new LongOpt("max_internal_gap_same_path", LongOpt.OPTIONAL_ARGUMENT, null, 14);
longopts[19] = new LongOpt("generate_intermediate_dot_files", LongOpt.NO_ARGUMENT, null, 15);
longopts[20] = new LongOpt("triplet-lock", LongOpt.NO_ARGUMENT, null, 16); // deprecated
longopts[21] = new LongOpt("max_seq_len_DP_align", LongOpt.OPTIONAL_ARGUMENT, null, 17);
longopts[22] = new LongOpt("no_cleanup", LongOpt.NO_ARGUMENT, null, 18);
longopts[23] = new LongOpt("log_stderr", LongOpt.NO_ARGUMENT, null, 19);
longopts[24] = new LongOpt("__REDUCE", LongOpt.NO_ARGUMENT, null, 20); // deprecated
longopts[25] = new LongOpt("diffs_window_size", LongOpt.OPTIONAL_ARGUMENT, null, 21);
longopts[26] = new LongOpt("max_final_diffs_in_window", LongOpt.OPTIONAL_ARGUMENT, null, 22);
longopts[27] = new LongOpt("CuffFly", LongOpt.NO_ARGUMENT, null, 23);
longopts[28] = new LongOpt("no_path_merging", LongOpt.NO_ARGUMENT, null, 24);
longopts[29] = new LongOpt("no_pruning", LongOpt.NO_ARGUMENT, null, 25);
longopts[30] = new LongOpt("no_compaction", LongOpt.NO_ARGUMENT, null, 26);
longopts[31] = new LongOpt("triplet_strict", LongOpt.NO_ARGUMENT, null, 27);
longopts[32] = new LongOpt("extended_triplet", LongOpt.NO_ARGUMENT, null, 28);
longopts[33] = new LongOpt("MAKE_PE_SE", LongOpt.NO_ARGUMENT, null, 29);
longopts[34] = new LongOpt("cuff_no_extend", LongOpt.NO_ARGUMENT, null, 30);
longopts[35] = new LongOpt("PasaFly", LongOpt.NO_ARGUMENT, null, 31);
longopts[36] = new LongOpt("FAST_PASA", LongOpt.NO_ARGUMENT, null, 32);
longopts[37] = new LongOpt("max_num_paths_per_pasa_node", LongOpt.OPTIONAL_ARGUMENT, null, 33);
longopts[38] = new LongOpt("max_number_of_paths_per_node_extend", LongOpt.OPTIONAL_ARGUMENT, null, 34);
longopts[39] = new LongOpt("PasaFlyUnique", LongOpt.NO_ARGUMENT, null, 35);
longopts[40] = new LongOpt("NO_EM_REDUCE", LongOpt.NO_ARGUMENT, null, 36);
longopts[41] = new LongOpt("MIN_PCT_DOM_ISO_EXPR", LongOpt.OPTIONAL_ARGUMENT, null, 37);
longopts[42] = new LongOpt("READ_END_PATH_TRIM_LENGTH", LongOpt.OPTIONAL_ARGUMENT, null, 38);
longopts[43] = new LongOpt("TREAT_PAIRS_AS_SINGLE", LongOpt.OPTIONAL_ARGUMENT, null, 39);
longopts[44] = new LongOpt("no_remove_lower_ranked_paths", LongOpt.OPTIONAL_ARGUMENT, null, 40);
longopts[45] = new LongOpt("NO_DP_READ_TO_VERTEX_ALIGN", LongOpt.NO_ARGUMENT, null, 41);
longopts[46] = new LongOpt("MAX_READ_SEQ_DIVERGENCE", LongOpt.OPTIONAL_ARGUMENT, null, 42);
longopts[47] = new LongOpt("MIN_TOTAL_ISOFORM_EXPRESSION", LongOpt.OPTIONAL_ARGUMENT, null, 43);
Getopt g = new Getopt("TransAssembly", args, "L:F:N:C:V:SGDhO:R:",longopts);
int c;
if (MAX_SEQ_LEN_DP_ALIGN < ALL_VS_ALL_MAX_DP_LEN) {
ALL_VS_ALL_MAX_DP_LEN = MAX_SEQ_LEN_DP_ALIGN;
}
while ((c = g.getopt()) != -1)
{
switch(c)
{
case 1:
USE_DEGENERATE_CODE = true;
break;
case 2:
USE_STDERR = true;
break;
case 3:
MIN_PERCENT_IDENTITY_SAME_PATH = Float.parseFloat(g.getOptarg());
break;
case 4:
MAX_NUM_PATHS_PER_NODE_INIT = Integer.parseInt(g.getOptarg());
break;
case 5:
// no op
break;
case 6:
SMITH_WATERMAN_ALIGN_FLAG = true;
break;
case 7:
ALL_POSSIBLE_PATHS = true;
break;
case 8:
LENIENT_PATH_CHECKING = true;
break;
case 9:
PATH_REINFORCEMENT_DISTANCE = Integer.parseInt(g.getOptarg());
break;
case 10:
ORIGINAL_PATH_EXTENSIONS = true;
break;
case 11:
// available
break;
case 12:
MISO_OUTPUT = false; // hidden option, that will output in MISO format
break;
case 13:
MAX_DIFFS_SAME_PATH = Integer.parseInt(g.getOptarg());
break;
case 14:
MAX_INTERNAL_GAP_SAME_PATH = Integer.parseInt(g.getOptarg());
break;
case 15:
GENERATE_MIDDLE_DOT_FILES = true;
break;
case 16:
TRIPLET_LOCKING = true;
break;
case 17:
MAX_SEQ_LEN_DP_ALIGN = Integer.parseInt(g.getOptarg());
break;
case 18:
NO_CLEANUP = true;
break;
case 19:
LOG_STDERR = true;
break;
case 20:
//RUN_ALL_VS_ALL_FILTER = true;
break;
case 21:
DIFFS_WINDOW_SIZE = Integer.parseInt(g.getOptarg());
break;
case 22:
MAX_FINAL_DIFFS_IN_WINDOW = Integer.parseInt(g.getOptarg());
break;
case 23:
//min_path_set
cufflinksOpt = true;
break;
case 24:
// no path merging flag
NO_PATH_MERGING = true;
break;
case 25:
// turn off graph pruning (useful for testing purposes)
NO_GRAPH_PRUNING = true;
break;
case 26:
NO_GRAPH_COMPACTION = true;
NO_GRAPH_PRUNING = true;
break;
case 27:
TRIPLET_STRICT = true;
TRIPLET_LOCKING = true;
break;
case 28:
EXTENDED_TRIPLET = true;
TRIPLET_LOCKING = true;
break;
case 29:
MAKE_PE_SE = true;
break;
case 30:
CUFF_NO_EXTEND = true;
break;
case 31:
pasaFlyOpt = true;
break;
case 32:
FAST_PASA = true;
break;
case 33:
MAX_NUM_PATHS_PER_PASA_NODE = Integer.parseInt(g.getOptarg());
break;
case 34:
MAX_NUM_PATHS_PER_NODE_EXTEND = Integer.parseInt(g.getOptarg());
break;
case 35:
pasaFlyUniqueOpt = true;
break;
case 36:
NO_EM_REDUCE = true;
break;
case 37:
MIN_RELATIVE_ISOFORM_EXPRESSION = Float.parseFloat(g.getOptarg());
break;
case 38:
READ_END_PATH_TRIM_LENGTH = Integer.parseInt(g.getOptarg());
break;
case 39:
TREAT_PAIRS_AS_SINGLE = true;
break;
case 40:
NO_REMOVE_LOWER_RANKED_PATHS = true;
break;
case 41:
USE_DP_READ_TO_VERTEX_ALIGN = false;
break;
case 42:
MAX_READ_SEQ_DIVERGENCE = Float.parseFloat(g.getOptarg());
break;
case 43:
MIN_TOTAL_ISOFORM_EXPRESSION = Float.parseFloat(g.getOptarg());
break;
case 'S':
COLLAPSE_SNPs = false;
break;
case 'G':
GENERATE_FULL_SEQ_GRAPH = true;
break;
case 'h':
printUsage = true;
break;
case 'L':
MIN_OUTPUT_SEQ = Integer.parseInt(g.getOptarg());
break;
case 'F':
MAX_PAIR_DISTANCE = Integer.parseInt(g.getOptarg());
break;
case 'N':
totalNumReads = Long.parseLong(g.getOptarg());
break;
case 'V':
BFLY_GLOBALS.VERBOSE_LEVEL = Integer.parseInt(g.getOptarg());
break;
case 'C':
file = g.getOptarg();
FILE = file;
break;
case 'D':
FIND_ALSO_DIFF_PATHS = true;
break;
case 'O':
PATH_REINFORCEMENT_DISTANCE_PERCENT = Integer.parseInt(g.getOptarg());
break;
case 'R':
MIN_READ_SUPPORT_THR = Integer.parseInt(g.getOptarg());
break;
case 0:
switch(Integer.parseInt(sb.toString()))
{
case 'E':
// compares between each edge and its sister edges (u->v; vs all output of u, or all input of v)
EDGE_THR = Double.parseDouble(g.getOptarg());
break;
case 'W':
// compares between each edge and its flow of its vertices (u->v; vs all input of u, or all output of v)
FLOW_THR = Double.parseDouble(g.getOptarg());
break;
}
break;
case '?':
printUsage = true;
break;
default:
printUsage = true;
}
}
if (LOG_STDERR)
ERR_STREAM = new PrintStream(new FileOutputStream(file + ".err"));
debugMes("Started",10);
debugMes("using Path alignment for path comparisons", 5);
debugMes("combine paths if (identity=(numberOfMatches/shorterLen) > " + MIN_PERCENT_IDENTITY_SAME_PATH+"%" +
" or if we have <= " + MAX_DIFFS_SAME_PATH+ " mismatches) "
+ "and if we have internal gap lengths <= " + MAX_INTERNAL_GAP_SAME_PATH
, 5);
int path_checking_opt_count = 0;
if (LENIENT_PATH_CHECKING) {
debugMes("Path extension mode: lenient.", 5);
path_checking_opt_count++;
}
if (ORIGINAL_PATH_EXTENSIONS) {
debugMes("Path extension mode: original path extension.", 5);
path_checking_opt_count++;
}
if (ALL_POSSIBLE_PATHS) {
debugMes("Path extension mode: all possible paths.", 5);
path_checking_opt_count++;
}
if (path_checking_opt_count > 1) {
System.err.println("Error, cannot enable more than one path checking option.");
printUsage = true;
}
if (cufflinksOpt || pasaFlyOpt || pasaFlyUniqueOpt) {
debugMes("CuffFly or PasaFly selected - running in SE mode to avoid uncertain alignments that break DAG transitivity", 5);
MAKE_PE_SE = true;
}
printUsage = printUsage
|| file.equals("")
|| totalNumReads==0
|| MAX_PAIR_DISTANCE == 0
|| MIN_READ_SUPPORT_THR < 1;
if (printUsage)
{
System.err.println("");
System.err.println("
System.err.println("
System.err.println("# Required:");
System.err.println("# -N <int> total number of reads or fragment pairs");
System.err.println("# -L <int> min length for an assembled sequence to be reported");
System.err.println("# -F <int> maximum fragment length (extreme dist between paired ends)");
System.err.println("# -C <string> prefix for component/reads file");
System.err.println("
System.err.println("
System.err.println("
System.err.println("# Optional:");
System.err.println("
System.err.println("# Graph compaction:");
System.err.println("# --edge-thr=<double> sets the threshold for keeping the edge (u->v), compared to all *output* of u, or all *input* of v");
System.err.println("# (default: 0.05).");
System.err.println("# --flow-thr=<double> sets the threshold for keeping the edge (u->v), compared to all *input* of u, or all *output* of v");
System.err.println("# (default: 0.02).");
System.err.println("# --no_pruning disable pruning of graph based on above thresholds.");
System.err.println("# --no_compaction do not compact the graph");
System.err.println("
System.err.println("# SNP handling modes:");
System.err.println("# --use-degenerate-code use degenerate DNA code ");
System.err.println("# (default: don't use degenerate DNA code).");
System.err.println("# --dont-collapse-snps don't collapse SNPs into a single letter ");
System.err.println("# (default: collapse SNPs into a single letter).");
System.err.println("
System.err.println("# Read-specific parameters:");
System.err.println("# --max_number_of_paths_per_node_init <int> maximum number of unique pairpaths that can begin at a given node. (default: " + MAX_NUM_PATHS_PER_NODE_INIT + ")");
System.err.println("# --MAKE_PE_SE convert split pair paths into separate single paths.");
System.err.println("# --NO_DP_READ_TO_VERTEX_ALIGN do not use DP alignment to align reads to vertex, use faster gap-free alignment");
System.err.println("# Butterfly Path extension reinforcement requirements");
System.err.println(" The following options are ordered by decreasing stringency.");
System.err.println("# --original_path_extension examines paths from nodes to sinks, can be very slow");
System.err.println("# /compatible_path_extension/ *DEFAULT MODE* read (pair) must be compatible and contain defined minimum extension support for path reinforcement.");
System.err.println("# --lenient_path_extension only the terminal node pair(v-u) require read support");
System.err.println("# --all_possible_paths all edges are traversed, regardless of long-range read path support");
System.err.println("# --CuffFly cufflinks style assembly of minimum paths.");
System.err.println("# --cuff_no_extend do not extend max-matching pairpaths");
System.err.println("# -R <int> minimum read support threshold. Default: 2");
System.err.println("# -O <int> path reinforcement 'backwards overlap' percent of -F. Default: (" + PATH_REINFORCEMENT_DISTANCE_PERCENT + ") Not used in --lenient_path_extension mode.");
System.err.println("
System.err.println("# --path_reinforcement_distance=<int> path reinforcement distance specified directly instead of computing it based on the -O value above.");
//System.err.println("# --triplet-lock lock in local (triplet) paths at nodes where read paths are supported. (increases stringency)");
//System.err.println("# --extended_triplet (implies --triplet-lock) where read paths extend beyond a triplet, require path compatibility with extended read path prefix");
//System.err.println("# --triplet_strict (implies --triplet-lock) break reconstructed paths at unsupported triplets");
System.err.println("# --max_number_of_paths_per_node_extend <int> maximum number of paths that can be extended from a given node. (default: " + MAX_NUM_PATHS_PER_NODE_EXTEND + ")");
System.err.println("# --READ_END_PATH_TRIM_LENGTH <int> min length of read terminus to extend into a graph node for it to be added to the pair path node sequence. (default: " + READ_END_PATH_TRIM_LENGTH + ")");
System.err.println("# --TREAT_PAIRS_AS_SINGLE ignores pairing info.");
System.err.println("
System.err.println("
System.err.println("# PasaFly related parameters:");
System.err.println("# --PasaFly use PASA-style pair path assembly");
System.err.println("# --FAST_PASA faster PASA by using a banded-DP strategy (experimental)");
System.err.println("
System.err.println("# Similar path reduction criteria:");
System.err.println("# --SW use Smith-Waterman local alignment mode (by default, uses Needleman-Wunsch global alignment)");
System.err.println("# --max_diffs_same_path=<int> max allowed differences encountered between path sequences to combine them. (default: " + MAX_DIFFS_SAME_PATH + ")");
System.err.println("# --min_per_id_same_path=<float> min percent identity for two paths to be merged into single paths (default: " + MIN_PERCENT_IDENTITY_SAME_PATH + ")");
System.err.println("# --max_internal_gap_same_path=<int> maximum number of internal consecutive gap characters allowed for paths to be merged into single paths. (default: " + MAX_INTERNAL_GAP_SAME_PATH);
System.err.println("# --max_seq_len_DP_align=<int> path regions to be aligned that are longer than this length use simple zipper alignment. (default: " + MAX_SEQ_LEN_DP_ALIGN + ")");
System.err.println("# --no_path_merging disable path comparisons and merging based on above settings.");
//System.err.println("# --no_remove_lower_ranked_paths iteratively rank paths by unique read content, discard paths without unique support");
System.err.println("# --NO_EM_REDUCE run expectation maximization scheme to rank transcripts, remove lower ranking transcripts that yield no unique read content.");
System.err.println("# --MIN_PCT_DOM_ISO_EXPR=<float> min percent expression of a dominantly expressed isoform for a gene. Default: " + MIN_RELATIVE_ISOFORM_EXPRESSION);
System.err.println("# --MIN_PCT_TOTAL_ISO_EXPR=<float> min percent expression of total expression for that gene (sum of isoforms). Default: " + MIN_TOTAL_ISOFORM_EXPRESSION);
System.err.println("
System.err.println("# Misc: ");
System.err.println("# --max_number_of_paths_per_node total number of paths allowed per node (default=" + MAX_NUM_PATHS_PER_NODE_INIT + ")");
System.err.println("# --generate-full-sequence-graphs generate full sequence dot files");
System.err.println("# (default: generate dot files with start and end of each seq).");
System.err.println("# --generate_intermediate_dot_files generate dot files after each step of butterfly");
System.err.println("# (default: generate only a the start and end dot files).");
System.err.println("# --stderr prints the output to STDERR ");
System.err.println("# --log_stderr writes status info to fiule COMPONENT_PREFIX.err ");
System.err.println("# -V <int> verbosity level ");
System.err.println("# (default: 10 - progress of method + some stats)");
System.err.println("# (15 - like (10) + final paths to be added + additional loop info and dot files)");
System.err.println("# (20 - maximum verbosity)");
System.err.println("
System.err.println("
System.err.println("");
System.exit(1);
}
if (USE_STDERR && BFLY_GLOBALS.VERBOSE_LEVEL < 5) {
BFLY_GLOBALS.VERBOSE_LEVEL = 5; // default verbose level for stderr
}
// set calculated vars:
if (PATH_REINFORCEMENT_DISTANCE > 0) {
debugMes("path reinforcement distance set manually to: " + PATH_REINFORCEMENT_DISTANCE, 5);
}
else {
PATH_REINFORCEMENT_DISTANCE = (int) (PATH_REINFORCEMENT_DISTANCE_PERCENT/100.0 * MAX_PAIR_DISTANCE);
debugMes("path reinforcement distance computed based on "
+ PATH_REINFORCEMENT_DISTANCE_PERCENT + "% of max pair distance: "+ MAX_PAIR_DISTANCE
+ " = " + PATH_REINFORCEMENT_DISTANCE + " bases", 5);
}
if (!COLLAPSE_SNPs && USE_DEGENERATE_CODE)
USE_DEGENERATE_CODE = false;
Vector<Integer> rootIDs = new Vector<Integer>();
HashMap<Integer,Integer> outFlow = new HashMap<Integer, Integer>();
HashMap<Integer,Integer> inFlow = new HashMap<Integer, Integer>();
HashMap<Integer,String> kmers = new HashMap<Integer, String>();
LONG_READ_PATH_MAP = new HashMap<PairPath, ArrayList<String>>();
LONG_READ_NAME_TO_PPath = new HashMap<String, PairPath>();
PrintStream pout_diff = null;
PrintStream pout_all = new PrintStream(new FileOutputStream(file+".allProbPaths.fasta"));
debugMes("SECTION\n================\nParsing de Bruijn graph\n======================\n", 5);
debugMes("preProcessGraphFile: " + file + ".out", 10);
preProcessGraphFile(file+".out",outFlow, inFlow, kmers);
debugMes("SECTION\n==================\nbuildNewGraph\n========================\n", 5);
debugMes("buildNewGraphFirstLetter: " + file + ".out", 10);
DirectedSparseGraph<SeqVertex, SimpleEdge> graph = buildNewGraphUseKmers(file+".out",rootIDs,outFlow,inFlow,kmers);
SeqVertex.set_kmer_length(KMER_SIZE);
SeqVertex.set_graph(graph);
LAST_REAL_ID = LAST_ID;
debugMes("Graph is built",10);
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 20) {
describeNodes(graph);
}
HashMap<String,Integer> originalGraphKmerToNodeID = new HashMap<String,Integer>();
for (SeqVertex sv : graph.getVertices()) {
String kmer = sv.getName();
int id = sv.getID();
originalGraphKmerToNodeID.put(kmer, id);
debugMes("ORIGINAL GRAPH NODE: " + kmer + " with ID: " + id, 20);
}
String[] tmpFile = file.split("/");
String graphName = tmpFile[tmpFile.length-1];
//boolean createMiddleDotFiles = false;
boolean createMiddleDotFiles = GENERATE_MIDDLE_DOT_FILES;
if (createMiddleDotFiles)
writeDotFile(graph,file + "_deBruijn.A.dot", graphName, false);
// remember the original edge weights so we can relabel them later on in the final graph according to orig ids.
HashMap<String,Double> original_edge_weights_using_orig_kmers = new HashMap<String,Double>();
for (SimpleEdge e : graph.getEdges()) {
String from_kmer = graph.getSource(e).getName();
String to_kmer = graph.getDest(e).getName();
original_edge_weights_using_orig_kmers.put(from_kmer + "_" + to_kmer, e.getWeight());
}
if (! NO_GRAPH_PRUNING) {
debugMes("fixExtremeleyHighSingleEdges()", 1);
fixExtremelyHighSingleEdges(graph,outFlow,inFlow);
debugMes("removeLightEdges()", 1);
removeLightEdges(graph);
if (createMiddleDotFiles)
writeDotFile(graph,file + "_removeLightEdges_init.B.dot",graphName, false);
}
if (! NO_GRAPH_COMPACTION) {
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 20) {
debugMes("## Node descriptions before linear compaction:", 20);
describeVertices(graph);
}
debugMes("compactLinearPaths()", 1);
compactLinearPaths(graph);
//removeShortOrphanNodes(graph, MIN_OUTPUT_SEQ); // do this later
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 20) {
debugMes("## Node descriptions after linear compaction:", 20);
describeVertices(graph);
}
My_DFS dfs = new My_DFS(graph);
dfs.runDFS2();
if (createMiddleDotFiles)
writeDotFile(graph,file + "_compactLinearPaths_init.C.dot",graphName, false);
}
/*
if (! NO_GRAPH_PRUNING) {
boolean keep_pruning_graph = true;
int prune_compact_round = 0;
while (keep_pruning_graph) {
prune_compact_round++;
keep_pruning_graph = removeLightEdges(graph);
compactLinearPaths(graph);
if (createMiddleDotFiles)
writeDotFile(graph,file + "_prune_compact_round_" + prune_compact_round + ".D.dot",graphName, false);
}
}
*/
if (COLLAPSE_SNPs) {
if (USE_DEGENERATE_CODE)
removeSingleNtBubblesWithDegenerateCode(graph); // still needs updating //FIXME
else
removeSingleNtBubbles(graph);
if (createMiddleDotFiles)
writeDotFile(graph,file + "_SNPs_removed.H.dot",graphName, false);
/*
if (! NO_GRAPH_COMPACTION) {
ompactLinearPaths(graph);
}
if (createMiddleDotFiles)
writeDotFile(graph,file + "_SNPs_removed_post-collapse.H.dot",graphName, false);
*/
}
debugMes("SECTION\n====================\nRemoving small components.\n====================\n", 5);
//remove small components
calcSubComponentsStats(graph);
if (graph.getVertexCount() == 0) {
debugMes("Warning: graph pruned to nothingness", 1);
//Runtime.getRuntime().exec("mv " + bfly_start_indicator_file + " " + bfly_end_indicator_file);
System.exit(0);
}
if (createMiddleDotFiles)
writeDotFile(graph,file + "_compactLinearPaths_removeSmallComp.D.dot",graphName, false);
HashMap<Integer, LocInGraph> originalVerIDsMapping = getOriginalVerIDsMappingHash(graph);
int numXstructs = countNumOfXstructures(graph);
if (numXstructs>0)
debugMes("number X structures = "+numXstructs,10);
// Done Compacting graph.
DijkstraDistance<SeqVertex, SimpleEdge> dijkstraDis = new DijkstraDistance<SeqVertex, SimpleEdge>(graph, true);
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 19) {
debugMes("\nSECTION\n=================\nNode descriptions before threading.\n===================\n", 5);
describeNodes(graph);
}
debugMes("\nSECTION\n====================\nThreading reads through the graph\n=========================\n", 5);
// maps individual reads to paths within the graph
// readNameHash: "actual read name" => Read object (see Read class)
HashMap<String, List<Read>> readNameHash = getReadStarts(graph,file+".reads",originalVerIDsMapping,rootIDs, originalGraphKmerToNodeID);
debugMes("\nSECTION\n==================\nPairing up the reads into PairPaths\n===========================\n", 5);
// note the logic for not doing the paired linking is handled under getReadStarts by just
// not using the /1 or /2 value.
// pair up reads into PathPairs
// combinedReadHash: start_vertex => (pair_path => count)
HashMap<Integer,HashMap<PairPath,Integer>> combinedReadHash = getSuffStats_wPairs(graph,readNameHash,dijkstraDis);
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15) {
debugMes("Printing Pair Paths Before DAG Overlap Layout
printPairPaths(combinedReadHash, "PairPaths@Init");
}
/// Moving from Collapsed de Bruijn Graph to an Overlap Graph 'seqvertex_graph'
debugMes("SECTION\n======== Create DAG from Overlap Layout ============\n\n", 5);
DirectedSparseGraph<SeqVertex, SimpleEdge> seqvertex_graph = new DirectedSparseGraph<SeqVertex, SimpleEdge>();
HashMap<Integer, HashMap<PairPath, Integer>> seqvertex_combinedReadHash = create_DAG_from_OverlapLayout(seqvertex_graph, combinedReadHash, file, graphName, createMiddleDotFiles);
My_DFS dfs = new My_DFS(seqvertex_graph);
dfs.runDFS2();
if (createMiddleDotFiles)
writeDotFile(seqvertex_graph,file + "_vertex_DAG_postOverlapLayout.dot",graphName, false);
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15) {
debugMes("Printing Pair Paths
printPairPaths(seqvertex_combinedReadHash, "PairPaths@PostOverlapLayout");
}
debugMes("SECTION\n======= Reorganize Read Pairings =========\n\n", 5);
dijkstraDis = new DijkstraDistance<SeqVertex, SimpleEdge>(seqvertex_graph, true);
seqvertex_combinedReadHash = reorganizeReadPairings(seqvertex_graph, seqvertex_combinedReadHash, dijkstraDis);
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15)
printPairPaths(seqvertex_combinedReadHash, "PairPaths@AfterPairReorganization");
//start working on one sub component at a time:
// look for loops, try to solve them
// if loops remain, move on to the next subComp.
//removeShortOrphanNodes(graph, MIN_OUTPUT_SEQ);
/* old way... suffers from suboptimal setting of node depth
*
int count_pairpaths_removed = handleRemainingCyclicReads(componentReadHash, graph);
debugMes("Removed " + count_pairpaths_removed + " reads that appeared to retain complex cycles", 10);
*/
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 10) {
debugMes("
report_pairpath_counts(componentReadHash);
}
reduce_to_max_paths_per_node(componentReadHash, TransAssembly_allProbPaths.MAX_NUM_PATHS_PER_NODE_INIT);
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 10) {
debugMes("
report_pairpath_counts(componentReadHash);
}
// examine uncertainty of paths within the graph by looking at triplet support
debugMes("### Extracting triplets from reads.", 10);
HashMap<Integer, List<List<Integer>>> tripletMapper = extractTripletsFromReads(componentReadHash);
HashMap<Integer,Boolean> xStructuresResolvedByTriplets = getXstructuresResolvedByTriplets(seqvertex_graph, comp, tripletMapper);
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 10) {
// describe the locked down nodes
debugMes("\n### " + tripletMapper.size() + " nodes have locked-in triplet paths:", 10);
for (Integer central_node : tripletMapper.keySet()) {
debugMes("Triplet locks for: " + central_node + " : " + tripletMapper.get(central_node), 10);
}
}
if (INFER_UNRESOLVED_XSTRUCTURE_PATHS) {
debugMes("## INFERRING UNRESOLVED X STRUCTURE PATHS ##", 10);
infer_best_triplets_across_unresolved_Xstructure(seqvertex_combinedReadHash, seqvertex_graph, xStructuresResolvedByTriplets, tripletMapper);
}
HashMap<List<Integer>, Pair<Integer>> FinalPaths_all = null;
/*
if (false) { // just for debugging //FIXME: make a debug parameter
// just reconstruct paths based on the input reads
FinalPaths_all = reconstruct_paths_from_reads(graph, componentReadHash);
}
else if (false) {
// sort descendingly by length, and greedily assemble compatible paths.
FinalPaths_all = reconstruct_paths_from_collapsed_reads(graph, componentReadHash);
}
else if (false) {
examine_compatible_paths_debugging_only(graph, componentReadHash);
System.exit(1);
}
else if (false) {
examine_out_of_order_depth_in_read_paths(graph, componentReadHash);
System.exit(1);
}
*/
if (cufflinksOpt || pasaFlyOpt) {
/*
// methods are very sensitive to out-of-order node depths in read paths:
int num_fractured_paths = handleRemainingCyclicReads(componentReadHash, graph);
debugMes("Needed to fracture: " + num_fractured_paths + " pair paths due to out-of-order node depths", 10);
*/
if (cufflinksOpt) {
FinalPaths_all = cuffMinPaths(seqvertex_graph, componentReadHash,dijkstraDis);
}
else if (pasaFlyOpt) {
debugMes("### Extracting complex path prefixes from reads.", 10);
HashMap<Integer, List<List<Integer>>> extendedTripletMapper = extractComplexPathPrefixesFromReads(componentReadHash);
FinalPaths_all = pasafly(seqvertex_graph, componentReadHash,dijkstraDis, tripletMapper, extendedTripletMapper);
}
}
else if (pasaFlyUniqueOpt) {
debugMes("### Extracting complex path prefixes from reads.", 10);
HashMap<Integer, List<List<Integer>>> extendedTripletMapper = extractComplexPathPrefixesFromReads(componentReadHash);
FinalPaths_all = pasaflyunique(seqvertex_graph, componentReadHash,dijkstraDis, tripletMapper, extendedTripletMapper);
}
else {
// Regular butterfly all probable paths mode:
debugMes("### Extracting complex path prefixes from reads.", 10);
HashMap<Integer, List<List<Integer>>> extendedTripletMapper = extractComplexPathPrefixesFromReads(componentReadHash);
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 16) {
debugMes("\n
for (Integer term_node : extendedTripletMapper.keySet()) {
debugMes("Complex prefix paths for: " + term_node + " : " + extendedTripletMapper.get(term_node), 16);
}
}
addSandT(seqvertex_graph,comp,componentReadHash);
FinalPaths_all = butterfly(seqvertex_graph, comp, componentReadHash, totalNumReads,
pout_all, dijkstraDis, dijkstraDisWoVer,
tripletMapper, extendedTripletMapper, xStructuresResolvedByTriplets);
//pathName = get_pathName_string(path, graph);
}
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15) {
for (List<Integer> path : FinalPaths_all.keySet()) {
debugMes("FinalPath@BeforeFiltering: " + path, 15);
}
}
// remove short paths
FinalPaths_all = remove_short_seqs(FinalPaths_all, seqvertex_graph);
if (FinalPaths_all.isEmpty()) {
debugMes("No paths to pursue. Continue...", 15);
continue;
}
int numXstructsResolved = countNumOfXstructuresResolved(seqvertex_graph,comp,FinalPaths_all);
if (numXstructs>0)
debugMes("number X structures resolved = "+numXstructsResolved + " / " + numXstructs,10);
debugMes("ReadMappings BEFORE Path-to-orig_ID conversion:", 20);
HashMap<List<Integer>,HashMap<PairPath,Integer>> finalPathsToContainedReads = assignCompatibleReadsToPaths(FinalPaths_all, componentReadHash);
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 20) {
// verbose dump of read support
for (List<Integer> final_path : finalPathsToContainedReads.keySet()) {
HashMap<PairPath,Integer> contained_reads = finalPathsToContainedReads.get(final_path);
debugMes("PRELIM_FINAL_PATH:\n" + final_path + "\ncontains:", 20);
int sum_support = 0;
for (PairPath pp : contained_reads.keySet()) {
Integer read_support = contained_reads.get(pp);
debugMes(pp + "\tcount: " + read_support, 20);
sum_support += read_support;
}
debugMes("Total support: " + sum_support + "\n", 20);
}
}
// remove those paths that didn't have reads assigned:
{
Set<List<Integer>> paths_to_remove = new HashSet<List<Integer>>();
for (List<Integer> path : FinalPaths_all.keySet()) {
if (! finalPathsToContainedReads.containsKey(path)) {
debugMes("-removing final path that was not assigned read support: " + path, 10);
paths_to_remove.add(path);
}
}
for (List<Integer> path : paths_to_remove) {
FinalPaths_all.remove(path);
}
}
if ( BFLY_GLOBALS.VERBOSE_LEVEL >= 20) {
debugMes("## ILLUSTRATING FINAL ASSEMBLIES", 20);
illustrateFinalPaths(FinalPaths_all, finalPathsToContainedReads);
}
// convert graph node IDs back to the original collapsed de Bruijn graph:
debugMes("Converting graph node IDs back to original IDs.", 15);
HashMap<List<Integer>,HashMap<PairPath,Integer>> finalPathsToContainedReads_all_orig_ids = new HashMap<List<Integer>,HashMap<PairPath,Integer>>();
HashMap<List<Integer>, Pair<Integer>> FinalPaths_all_orig_ids = convert_to_orig_ids(FinalPaths_all,
finalPathsToContainedReads, finalPathsToContainedReads_all_orig_ids);
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 20) {
// verbose dump of read support
debugMes("** Post-original ID conversion, path support:", 20);
for (List<Integer> final_path : finalPathsToContainedReads_all_orig_ids.keySet()) {
HashMap<PairPath,Integer> contained_reads = finalPathsToContainedReads_all_orig_ids.get(final_path);
debugMes("PRELIM_FINAL_PATH:\n" + final_path + "\ncontains:", 20);
int sum_support = 0;
for (PairPath pp : contained_reads.keySet()) {
Integer read_support = contained_reads.get(pp);
debugMes(pp + "\tcount: " + read_support, 20);
sum_support += read_support;
}
debugMes("Total support: " + sum_support + "\n", 20);
}
}
if ( (! NO_PATH_MERGING) && FinalPaths_all_orig_ids.size() > 1) {
// do CDHIT-like removal of highly similar but lesser supported paths.
debugMes("SECTION\n========= CD-HIT -like Removal of Too-Similar Sequences with Lesser Read Support =========\n\n", 5);
// alignment-based removal of lesser-supported paths that are too similar in sequence.
FinalPaths_all_orig_ids = reduce_cdhit_like(FinalPaths_all_orig_ids, graph, finalPathsToContainedReads_all_orig_ids);
}
// collect all results so far. (yes, Final is not so Final after all ... revisit naming of vars)
FinalPaths_FinalCollection.putAll(FinalPaths_all_orig_ids);
FinalCollection_ContainedReads.putAll(finalPathsToContainedReads_all_orig_ids);
} // end of for each component
if ( (! NO_PATH_MERGING) && FinalPaths_FinalCollection.size() > 1) {
// do CDHIT-like removal of highly similar but lesser supported paths.
debugMes("SECTION\n========= CD-HIT -like Removal of Too-Similar Sequences with Lesser Read Support =========\n\n", 5);
// alignment-based removal of lesser-supported paths that are too similar in sequence.
FinalPaths_FinalCollection = reduce_cdhit_like(FinalPaths_FinalCollection, graph, FinalCollection_ContainedReads);
}
// Gene-level grouping of transcripts
HashMap<List<Integer>,Integer> separate_gene_ids = group_paths_into_genes(FinalPaths_FinalCollection, graph);
// Filtering out lower-quality paths
HashMap<List<Integer>, Pair<Integer>> filtered_paths_to_keep = new HashMap<List<Integer>,Pair<Integer>>();
debugMes("Sep Gene IDs:" + separate_gene_ids, 10);
if ( (! NO_EM_REDUCE) && FinalPaths_FinalCollection.size() > 1) {
HashMap<List<Integer>, Pair<Integer>> EM_reduced_paths = run_EM_REDUCE(FinalPaths_FinalCollection, graph, FinalCollection_ContainedReads, separate_gene_ids);
filtered_paths_to_keep.putAll(EM_reduced_paths);
}
// by default, running both lower ranking path removal and EM-reduction, and combining positively-filtered entries.
if (! filtered_paths_to_keep.isEmpty()) {
FinalPaths_FinalCollection = filtered_paths_to_keep;
}
String component_name = pathName[pathName.length-1];
if (FinalPaths_FinalCollection==null || FinalPaths_FinalCollection.size() == 0) {
debugMes("No Butterfly Assemblies to report", 10);
return;
}
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15) {
for (List<Integer> path : FinalPaths_FinalCollection.keySet()) {
debugMes("FinalPath@AfterFiltering: " + path, 15);
}
}
// get long read content:
HashMap<List<Integer>,ArrayList<String>> final_paths_to_long_read_content = new HashMap<List<Integer>,ArrayList<String>>();
if (! LONG_READ_PATH_MAP.isEmpty()) {
assign_long_read_content_to_final_paths(FinalPaths_FinalCollection, FinalCollection_ContainedReads, LONG_READ_PATH_MAP, final_paths_to_long_read_content);
}
// Output the fasta sequences
printFinalPaths(FinalPaths_FinalCollection, graph, pout_all, component_name, totalNumReads,
final_paths_to_long_read_content, separate_gene_ids);
totalNumPaths = FinalPaths_FinalCollection.size();
removeAllEdgesOfSandT(graph);
pout_all.close();
if (FIND_ALSO_DIFF_PATHS)
pout_diff.close();
debugMes("total number of paths reported = "+totalNumPaths+" from "+totalNumSuccComps +" components",1);
debugMes("Done",10);
if (LOG_STDERR)
ERR_STREAM.close();
}
private static HashMap<List<Integer>, Pair<Integer>> convert_to_orig_ids(
HashMap<List<Integer>, Pair<Integer>> finalPaths_all,
HashMap<List<Integer>, HashMap<PairPath, Integer>> finalPathsToContainedReads,
HashMap<List<Integer>, HashMap<PairPath, Integer>> finalPathsToContainedReads_all_orig_ids) {
HashMap<List<Integer>, Pair<Integer>> FinalPaths_all_orig_ids = new HashMap<List<Integer>, Pair<Integer>>();
for (List<Integer> final_path : finalPaths_all.keySet()) {
List<Integer> revised_path_orig_ids = new ArrayList<Integer>();
for (Integer seq_vertex_id : final_path ) {
SeqVertex sv = SeqVertex.retrieveSeqVertexByID(seq_vertex_id);
Integer orig_id = sv.getOrigButterflyID();
revised_path_orig_ids.add(orig_id);
}
FinalPaths_all_orig_ids.put(revised_path_orig_ids, finalPaths_all.get(final_path));
debugMes("-final_path: " + final_path + " now set to: " + revised_path_orig_ids, 15);
debugMes("-and set to contents: " + finalPaths_all.get(final_path), 20);
HashMap<PairPath, Integer> contained_reads = finalPathsToContainedReads.get(final_path);
for (PairPath pp : contained_reads.keySet()) {
PairPath updated_pp = pp.setOrigIds();
Integer read_count = contained_reads.get(pp);
debugMes("pp: " + pp + ", updated_pp: " + updated_pp + ", count: " + read_count, 20);
if (finalPathsToContainedReads_all_orig_ids.containsKey(revised_path_orig_ids)) {
HashMap<PairPath, Integer> localContainedReads = finalPathsToContainedReads_all_orig_ids.get(revised_path_orig_ids);
if (localContainedReads.containsKey(updated_pp)) {
int prev_count = localContainedReads.get(updated_pp);
localContainedReads.put(updated_pp, prev_count + read_count);
}
else {
localContainedReads.put(updated_pp, read_count);
}
}
else {
HashMap<PairPath, Integer> localContainedReads = new HashMap<PairPath, Integer>();
localContainedReads.put(updated_pp, read_count);
finalPathsToContainedReads_all_orig_ids.put(revised_path_orig_ids, localContainedReads);
}
}
}
return FinalPaths_all_orig_ids;
}
private static void assign_long_read_content_to_final_paths(
HashMap<List<Integer>, Pair<Integer>> finalPaths_all,
HashMap<List<Integer>, HashMap<PairPath, Integer>> finalPathsToContainedReads,
HashMap<PairPath, ArrayList<String>> LONG_READ_PATH_MAP_local,
HashMap<List<Integer>, ArrayList<String>> final_paths_to_long_read_content) {
for (List<Integer> path : finalPaths_all.keySet()) {
if (finalPathsToContainedReads.get(path) == null) {
//FIXME: why does this happen? very rare.
continue;
}
for (PairPath pp : finalPathsToContainedReads.get(path).keySet()) {
if (LONG_READ_PATH_MAP_local.containsKey(pp)) {
if (! final_paths_to_long_read_content.containsKey(path)) {
final_paths_to_long_read_content.put(path, new ArrayList<String>());
}
final_paths_to_long_read_content.get(path).addAll(LONG_READ_PATH_MAP_local.get(pp));
}
}
}
return;
}
private static HashMap<List<Integer>, Pair<Integer>> remove_short_seqs(
HashMap<List<Integer>, Pair<Integer>> finalPaths_all,
DirectedSparseGraph<SeqVertex, SimpleEdge> graph) {
HashMap<List<Integer>, Pair<Integer>> long_enough_paths = new HashMap<List<Integer>, Pair<Integer>>();
for (List<Integer> path : finalPaths_all.keySet()) {
String seq = getPathSeq(graph, path);
if (seq.length() >=MIN_OUTPUT_SEQ) {
// retain it:
long_enough_paths.put(path, finalPaths_all.get(path));
}
}
return(long_enough_paths);
}
public static HashMap<List<Integer>, Pair<Integer>> run_EM_REDUCE (HashMap<List<Integer>, Pair<Integer>> finalPaths_all,
DirectedSparseGraph<SeqVertex, SimpleEdge> graph, HashMap<List<Integer>,
HashMap<PairPath, Integer>> finalPathsToContainedReads,
HashMap<List<Integer>,Integer> separate_gene_ids) {
debugMes("SECTION\n======
List<List<Integer>> all_paths = new ArrayList<List<Integer>>(finalPaths_all.keySet());
// need sequence lengths
HashMap<List<Integer>,Integer> seqLengths = new HashMap<List<Integer>,Integer>();
for (List<Integer> path : all_paths) {
String seq = getPathSeq(graph,path);
seqLengths.put(path, seq.length());
}
//List<List<Integer>> retained_paths = new ArrayList<List<Integer>>();
// sort paths by pair-path support descendingly
PathExpressionComparator pc = new PathExpressionComparator(all_paths, finalPathsToContainedReads, seqLengths);
Collections.sort(all_paths, pc);
Collections.reverse(all_paths); // now descending according to read support.
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15) {
debugMes("Expression values for each candidate path:", 15);
for (List<Integer> path : all_paths) {
double expr = pc.get_expr(path);
double sum_frag_counts = pc.get_transcript_to_sum_frag_counts(path);
debugMes("Expr=" + expr + ", sum_exp_frags=" + sum_frag_counts + ", path: " + path, 15);
}
}
// sort by expr, remove those w/ < 5% of expression of dominant isoform.
List<List<Integer>> all_paths_min_rel_expr = remove_lesser_supported_paths_EM(all_paths, finalPathsToContainedReads, graph, pc, separate_gene_ids);
// convert back to earlier-style data structure for compatibility
HashMap<List<Integer>, Pair<Integer>> final_paths_map = new HashMap<List<Integer>, Pair<Integer>>();
for (List<Integer> path : all_paths_min_rel_expr) {
final_paths_map.put(path, finalPaths_all.get(path));
debugMes("EM_REDUCE retaining: " + path, 15);
}
return (final_paths_map);
}
private static HashMap<Integer, HashMap<PairPath, Integer>> create_DAG_from_OverlapLayout(
DirectedSparseGraph<SeqVertex, SimpleEdge> seqvertex_graph,
HashMap<Integer, HashMap<PairPath, Integer>> combinedReadHash,
String dot_file_prefix,
String graphName, boolean createMiddleDotFiles) {
Set<PairPath> pairPaths = new HashSet<PairPath>();
Map<PairPath, Integer> pairPathToReadSupport = new HashMap<PairPath, Integer>();
populate_pairpaths_and_readsupport(combinedReadHash, pairPaths, pairPathToReadSupport);
List<List<Integer>> paths = new ArrayList<List<Integer>>();
for (PairPath pp : pairPaths) {
paths.add(pp.getPath1());
if (pp.hasSecondPath()) {
paths.add(pp.getPath2());
}
}
Collections.sort(paths, new Comparator<List<Integer>>() {
public int compare(List<Integer> pathA, List<Integer> pathB) {
if (pathA.size() < pathB.size()) {
return(-1);
}
else if (pathA.size() > pathB.size()) {
return(1);
}
else {
return(0);
}
}
});
Collections.reverse(paths); // want descending by path l
// remove the contained reads
//contained_path_to_containers: (key= the path contained, value = list of all other paths that fully contain it)
HashMap<List<Integer>,List<List<Integer>>> contained_path_to_containers = new HashMap<List<Integer>,List<List<Integer>>>();
List<List<Integer>> noncontained_paths = remove_containments(paths, contained_path_to_containers);
debugMes("Noncontained paths: " + noncontained_paths, 15);
// find dispersed repeats ////
HashSet<Integer> dispersed_repeat_nodes = find_dispersed_repeat_nodes(noncontained_paths);
// build the overlap graph
// build a graph of compatible paths.
List<Path> path_list = new ArrayList<Path>();
for (List<Integer> p : noncontained_paths) {
path_list.add(new Path(p));
}
HashMap<String,PathOverlap> pathMatches = new HashMap<String,PathOverlap>();
DirectedSparseGraph<Path, SimplePathNodeEdge> path_overlap_graph = construct_path_overlap_graph(path_list, pathMatches,
dispersed_repeat_nodes, dot_file_prefix,
graphName, createMiddleDotFiles);
// draw the dot file for the path overlap graph:
if (createMiddleDotFiles)
writeDotFile(path_overlap_graph, dot_file_prefix + "_POG.dot", graphName);
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15) {
// output the path node listing
for (Path p : path_overlap_graph.getVertices()) {
debugMes("PathNodeDescription: " + p, 15);
}
}
// add read pairing information to the graph:
HashSet<SimplePathNodeEdge> pair_links = addPairPathsToOverlapGraph(path_overlap_graph, pairPathToReadSupport, contained_path_to_containers);
// draw the dot file for the path overlap graph:
if (createMiddleDotFiles)
writeDotFile(path_overlap_graph, dot_file_prefix + "_POG.PE_links_added.dot", graphName);
// Breaking cycles
int cycle_round = 0;
boolean breaking_cycles = true;
while (breaking_cycles) {
cycle_round++;
debugMes("// Breaking cycles in Path Overlap Graph (POG), Round: " + cycle_round, 10);
breaking_cycles = break_cycles_in_path_overlap_graph(path_overlap_graph);
if (createMiddleDotFiles)
writeDotFile(path_overlap_graph, dot_file_prefix + "_POG.cyclesRemoved.r" + cycle_round + ".dot", graphName);
}
// remove the pair_link edges before converting the overlap graph to a seq vertex graph, since
// edges in the overlap graph are intended to represent overlaps.
for (SimplePathNodeEdge spne : pair_links) {
if (path_overlap_graph.containsEdge(spne)) {
path_overlap_graph.removeEdge(spne);
}
}
// Convert the path DAG to a seq vertex DAG
HashMap<Path,PathWithOrig> orig_path_to_updated_path = convert_path_DAG_to_SeqVertex_DAG(path_overlap_graph,
pathMatches, seqvertex_graph, dot_file_prefix, graphName, createMiddleDotFiles);
// note, path_overlap_graph includes non-contained paths
// pairPathToReadSupport contains all paths
combinedReadHash = update_PairPaths_using_overlapDAG_refined_paths(orig_path_to_updated_path, pairPathToReadSupport, contained_path_to_containers);
return(combinedReadHash);
}
private static HashSet<SimplePathNodeEdge> addPairPathsToOverlapGraph(
DirectedSparseGraph<Path, SimplePathNodeEdge> path_overlap_graph,
Map<PairPath, Integer> pairPathToReadSupport,
HashMap<List<Integer>, List<List<Integer>>> contained_path_to_containers) {
// data structure conversion
HashMap<List<Integer>,Path> list_to_path_hmap = new HashMap<List<Integer>,Path>();
for (Path p : path_overlap_graph.getVertices()) {
List<Integer> node_id_list = p.get_vertex_list();
list_to_path_hmap.put(node_id_list, p);
}
HashSet<SimplePathNodeEdge> pair_link_edges = new HashSet<SimplePathNodeEdge> ();
if (true)
return(pair_link_edges); // turning off PE //////////// DEBUG ////////////
// add pair links
for (PairPath pp : pairPathToReadSupport.keySet()) {
if (pp.hasSecondPath()) {
List<Integer> p1 = pp.getPath1();
List<Integer> p2 = pp.getPath2();
debugMes("# PE edges in overlap graph, targeting: " + p1 + " to " + p2, 15);
List<Path> p1_path_list = new ArrayList<Path>();
List<Path> p2_path_list = new ArrayList<Path>();
if (list_to_path_hmap.containsKey(p1) && list_to_path_hmap.containsKey(p2)) {
// add pairing edge between existing path nodes.
p1_path_list.add(list_to_path_hmap.get(p1));
p2_path_list.add(list_to_path_hmap.get(p2));
}
/*
else if (list_to_path_hmap.containsKey(p1)) {
p1_path_list.add(list_to_path_hmap.get(p1));
// get containment list for p2, add edges from p1 -> p2
List<List<Integer>> p2_contained_list = contained_path_to_containers.get(p2);
for (List<Integer> p2_container : p2_contained_list) {
Path p2_container_path = list_to_path_hmap.get(p2_container);
p2_path_list.add(p2_container_path);
}
}
else if (list_to_path_hmap.containsKey(p2)) {
p2_path_list.add(list_to_path_hmap.get(p2));
// get containment list for p1, and edges from p1-> p2
List<List<Integer>> p1_contained_list = contained_path_to_containers.get(p1);
for (List<Integer> p1_container : p1_contained_list) {
Path p1_container_path = list_to_path_hmap.get(p1_container);
p1_path_list.add(p1_container_path);
}
}
*/
int MAX_PAIR_LINKS = 3;
if (p1_path_list.size() < MAX_PAIR_LINKS && p2_path_list.size() < MAX_PAIR_LINKS) {
for (Path p1_path_node : p1_path_list) {
for (Path p2_path_node : p2_path_list) {
// add edge to graph if doesnt already exist:
if (path_overlap_graph.findEdge(p1_path_node, p2_path_node) == null) {
SimplePathNodeEdge spne = new SimplePathNodeEdge(1, p1_path_node.getPathNodeID(), p2_path_node.getPathNodeID());
path_overlap_graph.addEdge(spne, p1_path_node, p2_path_node);
debugMes("-adding PE read edge between: " + p1_path_node.getPathNodeID() + " and " + p2_path_node.getPathNodeID(), 15);
pair_link_edges.add(spne);
}
}
}
}
} // endif pp.hasSecondPath()
}
return(pair_link_edges);
}
private static HashSet<Integer> find_dispersed_repeat_nodes(
List<List<Integer>> paths) {
HashMap<Integer,Integer> node_in_path_counter = new HashMap<Integer,Integer>();
for (List<Integer> path : paths) {
HashSet<Integer> node_found = new HashSet<Integer>();
for (Integer node_id : path) {
node_found.add(node_id);
}
Iterator<Integer> it = node_found.iterator();
while (it.hasNext()) {
Integer node_id = it.next();
if (node_in_path_counter.containsKey(node_id)) {
node_in_path_counter.put(node_id, node_in_path_counter.get(node_id)+1);
}
else {
node_in_path_counter.put(node_id, 1);
}
}
}
List<Integer> node_ids = new ArrayList<Integer>(node_in_path_counter.keySet());
final HashMap<Integer,Integer> node_counter = node_in_path_counter;
Collections.sort(node_ids, new Comparator<Integer>() {
public int compare (Integer a, Integer b) {
if (node_counter.get(a) < node_counter.get(b)) {
return(1);
}
else if (node_counter.get(a) > node_counter.get(b)) {
return(-1);
}
else {
return(0);
}
}
});
// pull out the repetitive ones
int MIN_OCCURRENCE_REPEAT_NODE = 10;
HashSet<Integer> repeat_nodes = new HashSet<Integer>();
for (Integer node_id : node_ids) {
int repeat_count = node_counter.get(node_id);
debugMes("Node[" + node_id + "] has repeat count: " + repeat_count, 15);
if (repeat_count >= MIN_OCCURRENCE_REPEAT_NODE) {
repeat_nodes.add(node_id);
}
}
return(repeat_nodes);
}
private static HashMap<Integer, HashMap<PairPath, Integer>> update_PairPaths_using_overlapDAG_refined_paths(
HashMap<Path, PathWithOrig> orig_path_to_updated_path,
Map<PairPath, Integer> pairPathToReadSupport,
HashMap<List<Integer>, List<List<Integer>>> contained_path_to_containers) {
// get the old-to-new listing in List<Integer> format for use with PairPath objects
HashMap<List<Integer>,List<Integer>> old_to_new_path = new HashMap<List<Integer>,List<Integer>>();
for (Path orig_path : orig_path_to_updated_path.keySet()) {
List<Integer> orig_path_list = orig_path.get_vertex_list();
List<Integer> updated_path_list = orig_path_to_updated_path.get(orig_path).getVertexList();
old_to_new_path.put(orig_path_list, updated_path_list);
}
debugMes("Old-to-new-path mappings: " + old_to_new_path, 15);
// get list of all old/new path pairs
List<PathWithOrig> revised_paths = new ArrayList<PathWithOrig>(orig_path_to_updated_path.values());
// now, create new pair paths based on updated mappings.
HashMap<PairPath,Integer> updated_pairPaths = new HashMap<PairPath,Integer>();
HashMap<PairPath,PairPath> old_pp_to_new_pp = new HashMap<PairPath,PairPath>();
for (PairPath pp : pairPathToReadSupport.keySet()) {
Integer read_support = pairPathToReadSupport.get(pp);
debugMes("update_PairPaths_using_overlapDAG_refined_paths: orig_pp: " + pp + " has support: " + read_support, 20);
PairPath new_pp;
List<List<Integer>> p1_list = new ArrayList<List<Integer>>();
List<Integer> p1 = pp.getPath1();
if (old_to_new_path.containsKey(p1)) {
p1_list.add(old_to_new_path.get(p1));
}
else {
// might not be a unique path!! (eg. single original nodes now ending up in multiple places)
p1_list = get_all_possible_updated_path_mappings(p1, revised_paths);
debugMes("update_PairPaths_using_overlapDAG_refined_paths, p1: " + p1 + " mapped to: " + p1_list, 20);
}
List<List<Integer>> p2_list = new ArrayList<List<Integer>>();
if (pp.hasSecondPath()) {
List<Integer> p2 = pp.getPath2();
if (old_to_new_path.containsKey(p2)) {
p2 = old_to_new_path.get(p2);
p2_list.add(p2);
}
else {
p2_list = get_all_possible_updated_path_mappings(p2, revised_paths);
}
// create new pair lists
// restrict pair paths to those where each path maps uniquely
if (p1_list.size() == 1 && p2_list.size() == 1) {
List<Integer> p1_path = p1_list.get(0);
List<Integer> p2_path = p2_list.get(0);
new_pp = new PairPath(p1_path, p2_path);
updated_pairPaths.put(new_pp, read_support);
old_pp_to_new_pp.put(pp, new_pp); // FIXME: need to allow for multiple mappings here wrt long reads
}
else {
// add each path separately if not already seen
for (List<Integer> p1_path : p1_list) {
if (! updated_pairPaths.containsKey(p1_path)) {
new_pp = new PairPath(p1_path);
updated_pairPaths.put(new_pp, 1);
}
}
for (List<Integer> p2_path : p2_list) {
if (! updated_pairPaths.containsKey(p2_path)) {
new_pp = new PairPath(p2_path);
updated_pairPaths.put(new_pp, 1);
}
}
}
/* orig
for (List<Integer> p1_path : p1_list) {
for (List<Integer> p2_path : p2_list) {
new_pp = new PairPath(p1_path, p2_path);
updated_pairPaths.put(new_pp, read_support);
old_pp_to_new_pp.put(pp, new_pp); // FIXME: need to allow for multiple mappings here wrt long reads
}
}
*/
}
else {
// only individual paths
for (List<Integer>p1_path : p1_list) {
new_pp = new PairPath(p1_path);
updated_pairPaths.put(new_pp, read_support);
old_pp_to_new_pp.put(pp, new_pp);
}
}
}
//update_long_read_path_mappings(old_pp_to_new_pp);
HashMap<Integer, HashMap<PairPath, Integer>> new_combinedReadHash = construct_combinedReadhHash_from_PairPath_list(updated_pairPaths);
return(new_combinedReadHash);
}
private static void update_long_read_path_mappings(
HashMap<PairPath, PairPath> old_pp_to_new_pp) {
debugMes("LONG_READ_PATH_MAP is:" + LONG_READ_PATH_MAP, 10);
debugMes("LONG_READ_NAME_TO_PPath is : " + LONG_READ_NAME_TO_PPath, 10);
HashMap<PairPath,ArrayList<String>> updated_LONG_READ_PATH_MAP = new HashMap<PairPath,ArrayList<String>>(); // PairPath => ArrayList(long_reads_names)
HashMap<String, PairPath> updated_LONG_READ_NAME_TO_PPath = new HashMap<String,PairPath>(); // string => PairPath
for (String long_read_name : LONG_READ_NAME_TO_PPath.keySet()) {
PairPath pp = LONG_READ_NAME_TO_PPath.get(long_read_name);
PairPath updated_pp = old_pp_to_new_pp.get(pp);
updated_LONG_READ_NAME_TO_PPath.put(long_read_name, updated_pp);
if (! updated_LONG_READ_PATH_MAP.containsKey(updated_pp)) {
updated_LONG_READ_PATH_MAP.put(updated_pp, new ArrayList<String>());
}
updated_LONG_READ_PATH_MAP.get(updated_pp).add(long_read_name);
}
// replace old versions with updated versions.
LONG_READ_PATH_MAP = updated_LONG_READ_PATH_MAP;
LONG_READ_NAME_TO_PPath = updated_LONG_READ_NAME_TO_PPath;
debugMes("LONG_READ_PATH_MAP updated to:" + updated_LONG_READ_PATH_MAP, 10);
debugMes("LONG_READ_NAME_TO_PPath updated to : " + updated_LONG_READ_NAME_TO_PPath, 10);
return;
}
private static List<Integer> update_path_mappings(List<Integer> p1,
List<PathWithOrig> revised_paths) {
PathWithOrig pwo_needs_updating = new PathWithOrig(p1);
for (PathWithOrig pwo : revised_paths) {
PathWithOrig updated_pwo = pwo_needs_updating.align_path_by_orig_id(pwo);
if (updated_pwo != null) {
return(updated_pwo.getVertexList());
}
}
throw new RuntimeException("Unable to remap read: " + p1 + " given: " + revised_paths);
}
private static List<List<Integer>> get_all_possible_updated_path_mappings(
List<Integer> p1,
List<PathWithOrig> revised_paths) {
List<List<Integer>> all_path_mappings = new ArrayList<List<Integer>>();
PathWithOrig pwo_needs_updating = new PathWithOrig(p1);
for (PathWithOrig pwo : revised_paths) {
PathWithOrig updated_pwo = pwo_needs_updating.align_path_by_orig_id(pwo);
if (updated_pwo != null) {
List<Integer> updated_path = updated_pwo.getVertexList();
if (! all_path_mappings.contains(updated_path)) {
all_path_mappings.add(updated_path);
}
}
}
if (all_path_mappings.isEmpty()) {
throw new RuntimeException("Unable to remap read: " + p1 + " given: " + revised_paths);
}
else {
return(all_path_mappings);
}
}
private static HashMap<Path,PathWithOrig> convert_path_DAG_to_SeqVertex_DAG(
DirectedSparseGraph<Path, SimplePathNodeEdge> path_overlap_graph,
HashMap<String, PathOverlap> pathMatches,
DirectedSparseGraph<SeqVertex, SimpleEdge> seqvertex_graph,
String dot_file_prefix,
String graphName,
boolean createMiddleDotFiles) {
debugMes("SECTION\n======== Convert Path-DAG to SeqVertex-DAG ============\n\n", 5);
// init seqvertex graph to contain all nodes from expanded paths.
HashMap<Path,List<SeqVertex>> orig_path_to_SeqVertex_list = new HashMap<Path,List<SeqVertex>>();
HashMap<Path,PathWithOrig> orig_path_to_updated_path = new HashMap<Path,PathWithOrig>();
for (Path p : path_overlap_graph.getVertices()) {
List<Integer> node_id_list = p.get_vertex_list();
List<SeqVertex> vertex_listing = new ArrayList<SeqVertex>();
List<Integer> new_node_id_list = new ArrayList<Integer>();
for (Integer node_id : node_id_list) {
SeqVertex orig_vertex = SeqVertex.retrieveSeqVertexByID(node_id);
Integer new_v_id = getNextID();
SeqVertex new_v = new SeqVertex(new_v_id, orig_vertex);
vertex_listing.add(new_v);
seqvertex_graph.addVertex(new_v);
new_node_id_list.add(new_v_id);
}
orig_path_to_SeqVertex_list.put(p, vertex_listing);
PathWithOrig new_pwo = new PathWithOrig(p.getPathNodeID(), new_node_id_list, p.get_vertex_list());
debugMes("prep_for_DAG_collapse: " + new_pwo, 15);
orig_path_to_updated_path.put(p, new_pwo);
// add edges between the vertices
for (int i = 1; i < vertex_listing.size(); i++) {
SeqVertex prev_v = vertex_listing.get(i-1);
SeqVertex next_v = vertex_listing.get(i);
SimpleEdge se = new SimpleEdge(1, prev_v.getID(), next_v.getID());
seqvertex_graph.addEdge(se, prev_v, next_v);
}
}
// do a DFS-based graph reconstruction starting from a root node.
SeqVertex.set_graph(seqvertex_graph);
HashSet<Path> visited = new HashSet<Path>();
for (Path p : path_overlap_graph.getVertices()) {
if (path_overlap_graph.getPredecessorCount(p) == 0) {
// root node.
DFS_add_path_to_graph(p, seqvertex_graph, path_overlap_graph, pathMatches,
orig_path_to_SeqVertex_list, visited);
}
}
// before zippingUp
if (createMiddleDotFiles)
try {
writeDotFile(seqvertex_graph, dot_file_prefix + "_before_zippingUpSeqVertexGraph.dot", graphName, false);
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
if (graph_contains_loops(seqvertex_graph)) {
throw new RuntimeException("Error, detected cycles in seqvertex_graph, so not a DAG as expected!");
}
List<SeqVertex> topo_sorted_vertices = TopologicalSort.topoSortSeqVerticesDAG(seqvertex_graph);
// before zipping, after topo sort
if (createMiddleDotFiles)
try {
writeDotFile(seqvertex_graph, dot_file_prefix + "_before_zippingUpSeqVertexGraph.TopoSort.dot", graphName, false);
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
int zip_round = 0;
int sum_merged = 1;
while (sum_merged > 0) {
sum_merged = 0;
int count_zip_up_merged_in_round = 1;
while (count_zip_up_merged_in_round > 0) {
zip_round++;
debugMes("\n\n## Round: " + zip_round + " Zipping up.", 10);
if (graph_contains_loops(seqvertex_graph)) {
throw new RuntimeException("Error, detected cycles in seqvertex_graph, so not a DAG as expected!");
}
init_replacement_vertices(seqvertex_graph);
// ensure DAG
topo_sorted_vertices = TopologicalSort.topoSortSeqVerticesDAG(seqvertex_graph);
count_zip_up_merged_in_round = zipper_collapse_DAG_zip_up(seqvertex_graph);
sum_merged += count_zip_up_merged_in_round;
debugMes("Zip up merged: " + count_zip_up_merged_in_round + " nodes.", 10);
// draw the dot file for the path overlap graph:
if (createMiddleDotFiles) {
try {
writeDotFile(seqvertex_graph, dot_file_prefix + "_zip_round_" + zip_round + "_zip_up.dot", graphName, false);
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
int count_zip_down_merged_in_round = 1;
while (count_zip_down_merged_in_round > 0) {
zip_round++;
debugMes("\n\n## Round: " + zip_round + " Zipping down.", 10);
if (graph_contains_loops(seqvertex_graph)) {
throw new RuntimeException("Error, detected cycles in seqvertex_graph, so not a DAG as expected!");
}
init_replacement_vertices(seqvertex_graph);
// ensure DAG
topo_sorted_vertices = TopologicalSort.topoSortSeqVerticesDAG(seqvertex_graph);
count_zip_down_merged_in_round = zipper_collapse_DAG_zip_down(seqvertex_graph);
sum_merged += count_zip_down_merged_in_round;
debugMes("Zip down merged: " + count_zip_down_merged_in_round + " nodes.", 10);
// draw the dot file for the path overlap graph:
if (createMiddleDotFiles) {
try {
writeDotFile(seqvertex_graph, dot_file_prefix + "_zip_round_" + zip_round + "_zip_down.dot", graphName, false);
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
}
// DESTROY UNZIPPED DUP NODES
destroy_unzipped_duplicates_above(seqvertex_graph);
// test again. :)
if (graph_contains_loops(seqvertex_graph)) {
throw new RuntimeException("Error, detected cycles in seqvertex_graph, so not a DAG as expected!");
}
// ensure DAG one last time
topo_sorted_vertices = TopologicalSort.topoSortSeqVerticesDAG(seqvertex_graph);
// update the paths based on their new vertices.
// get old to new vertex id mapping
HashMap<Integer,Integer> old_vertex_id_to_new_vertex_id = new HashMap<Integer,Integer>();
for (SeqVertex v : topo_sorted_vertices) {
Integer curr_vertex_id = v.getID();
if (v.__tmp_compressed_vertices.size() > 0) {
for (Integer old_vertex : v.__tmp_compressed_vertices) {
old_vertex_id_to_new_vertex_id.put(old_vertex, curr_vertex_id);
debugMes("Old_to_new_vertex_id_mapping: " + old_vertex + " => " + curr_vertex_id, 15);
}
}
else {
old_vertex_id_to_new_vertex_id.put(curr_vertex_id, curr_vertex_id);
debugMes("Old_to_new_vertex_id_mapping: " + curr_vertex_id + " => " + curr_vertex_id + " (stays same)", 15);
}
}
// update the old paths to the new paths
for (PathWithOrig pwo : orig_path_to_updated_path.values()) {
List<Integer> old_path = pwo.getVertexList();
List<Integer> new_path = new ArrayList<Integer>();
for (Integer id : old_path) {
if (old_vertex_id_to_new_vertex_id.containsKey(id)) {
Integer new_id = old_vertex_id_to_new_vertex_id.get(id);
new_path.add(new_id);
}
else {
throw new RuntimeException("Error, no new_id mapped from: " + id + ", in path: " + pwo);
}
}
pwo.update_vertex_list(new_path);
}
return(orig_path_to_updated_path);
}
private static void destroy_unzipped_duplicates_above(
DirectedSparseGraph<SeqVertex, SimpleEdge> seqvertex_graph) {
debugMes("destroy_unzipped_duplicates_above()", 15);
// # O and v have the same orig ID, but v has no parents
// O v O
// # remove v here.
List<SeqVertex> start_vertices = new ArrayList<SeqVertex>();
for (SeqVertex v : seqvertex_graph.getVertices()) {
if (seqvertex_graph.getPredecessorCount(v) == 0) {
start_vertices.add(v);
}
}
if (start_vertices.size() == 0) {
return;
}
// check each C for the O
HashSet<SimpleEdge> edges_to_delete = new HashSet<SimpleEdge>();
HashSet<SeqVertex> vertices_to_delete = new HashSet<SeqVertex>();
for (SeqVertex v : start_vertices) {
SeqVertex target_merge_vertex = null;
for (SeqVertex c : seqvertex_graph.getSuccessors(v)) {
for (SeqVertex O : seqvertex_graph.getPredecessors(c)) {
if (v != O
&&
O.getOrigButterflyID() == v.getOrigButterflyID()
&&
seqvertex_graph.getPredecessorCount(O) > 0
) {
target_merge_vertex = O;
debugMes("-targeting " + v + " for deletion, replacing with proxy: " + O, 15);
SimpleEdge se = seqvertex_graph.findEdge(v, c);
edges_to_delete.add(se);
break;
}
}
if (target_merge_vertex != null) { break; }
}
if (target_merge_vertex != null) {
// do the merging
vertices_to_delete.add(v);
target_merge_vertex.__tmp_compressed_vertices.addElement(v.getID());
target_merge_vertex.__tmp_compressed_vertices.addElement(target_merge_vertex.getID()); //FIXME: make a new node instead of reusing existing node
if (v.__tmp_compressed_vertices.size() > 0) {
target_merge_vertex.__tmp_compressed_vertices.addAll(v.__tmp_compressed_vertices);
}
}
}
// remove targeted edges and vertices
for (SimpleEdge se : edges_to_delete) {
seqvertex_graph.removeEdge(se);
debugMes("-removing edge from graph: " + se, 15);
}
for (SeqVertex v : vertices_to_delete) {
debugMes("-removing vertex from graph: " + v, 15);
seqvertex_graph.removeVertex(v);
}
}
private static void init_replacement_vertices(
DirectedSparseGraph<SeqVertex, SimpleEdge> seqvertex_graph) {
for (SeqVertex v : seqvertex_graph.getVertices()) {
v.is_replacement_vertex = false;
}
return;
}
private static int zipper_collapse_DAG_zip_up(
DirectedSparseGraph<SeqVertex, SimpleEdge> seqvertex_graph) {
int count_total_zip_merged = 0;
// do bottom-up Zipping /////
if (graph_contains_loops(seqvertex_graph)) {
throw new RuntimeException("Error, detected cycles in seqvertex_graph, so not a DAG as expected!");
}
List<SeqVertex> topo_sorted_vertices = TopologicalSort.topoSortSeqVerticesDAG(seqvertex_graph);
Collections.reverse(topo_sorted_vertices);
for (SeqVertex v : topo_sorted_vertices) {
if (v.is_replacement_vertex) { continue; }
if (! seqvertex_graph.containsVertex(v)) { continue; }
count_total_zip_merged += zip_up(seqvertex_graph, v);
}
return(count_total_zip_merged);
}
private static int zipper_collapse_DAG_zip_down(
DirectedSparseGraph<SeqVertex, SimpleEdge> seqvertex_graph) {
int count_total_zip_merged = 0;
// do top-down zipping /////////////
if (graph_contains_loops(seqvertex_graph)) {
throw new RuntimeException("Error, detected cycles in seqvertex_graph, so not a DAG as expected!");
}
List<SeqVertex> topo_sorted_vertices = TopologicalSort.topoSortSeqVerticesDAG(seqvertex_graph);
for (SeqVertex v : topo_sorted_vertices) {
if (v.is_replacement_vertex) { continue; }
if (! seqvertex_graph.containsVertex(v)) { continue; }
count_total_zip_merged += zip_down(seqvertex_graph, v);
}
return(count_total_zip_merged);
}
private static int zip_up(
DirectedSparseGraph<SeqVertex, SimpleEdge> seqvertex_graph,
SeqVertex v) {
List<SeqVertex> pred_list = new ArrayList<SeqVertex>(seqvertex_graph.getPredecessors(v));
if (pred_list.size() <= 1) { return (0); } // must have multiple parents
debugMes("## zip_up()", 15);
// get list of parent nodes having the same original ID
HashMap<Integer,HashSet<SeqVertex>> pred_orig_id_to_vertex_list = new HashMap<Integer,HashSet<SeqVertex>>();
for (SeqVertex pred : pred_list) {
if (pred.is_replacement_vertex) { return(0); } // delay to next round.
if (! seqvertex_graph.containsVertex(pred)) { continue; }
Integer orig_pred_id = pred.getOrigButterflyID();
if (! pred_orig_id_to_vertex_list.containsKey(orig_pred_id)) {
pred_orig_id_to_vertex_list.put(orig_pred_id, new HashSet<SeqVertex>());
}
pred_orig_id_to_vertex_list.get(orig_pred_id).add(pred);
}
int count_zip_merged = 0;
for (HashSet<SeqVertex> pred_same_orig_id_set : pred_orig_id_to_vertex_list.values()) {
if (pred_same_orig_id_set.size() == 1) { continue; } // need multiple parents for merging
// merge them into a single node.
count_zip_merged += attempt_zip_merge_SeqVertices(pred_same_orig_id_set, seqvertex_graph, "min");
}
return(count_zip_merged);
}
private static int zip_down (
DirectedSparseGraph<SeqVertex, SimpleEdge> seqvertex_graph,
SeqVertex v) {
List<SeqVertex> child_list = new ArrayList<SeqVertex>(seqvertex_graph.getSuccessors(v));
if (child_list.size() <= 1) { return (0); } // must have multiple parents
debugMes("##zip_down()", 15);
// get list of children nodes having the same original ID
HashMap<Integer,HashSet<SeqVertex>> child_orig_id_to_vertex_list = new HashMap<Integer,HashSet<SeqVertex>>();
for (SeqVertex child : child_list) {
if (child.is_replacement_vertex) { return(0); } // delay to next round
if (! seqvertex_graph.containsVertex(child) ) { continue; }
Integer orig_child_id = child.getOrigButterflyID();
if (! child_orig_id_to_vertex_list.containsKey(orig_child_id)) {
child_orig_id_to_vertex_list.put(orig_child_id, new HashSet<SeqVertex>());
}
child_orig_id_to_vertex_list.get(orig_child_id).add(child);
}
int count_zip_merged = 0;
for (HashSet<SeqVertex> child_same_orig_id_set : child_orig_id_to_vertex_list.values()) {
if (child_same_orig_id_set.size() == 1) { continue; } // need multiple parents for merging
// merge them into a single node.
count_zip_merged += attempt_zip_merge_SeqVertices(child_same_orig_id_set, seqvertex_graph, "max");
}
return(count_zip_merged);
}
private static int attempt_zip_merge_SeqVertices(HashSet<SeqVertex> pred_same_orig_id_set,
DirectedSparseGraph<SeqVertex, SimpleEdge> seqvertex_graph, String dir) {
debugMes("attempt_zip_merge_SeqVertices(" + pred_same_orig_id_set + ")", 15);
Integer replacement_vertex_id = getNextID();
SeqVertex replacement_vertex_obj = null;
// get list of all parents and all children of the merge-node-targets
HashSet<SeqVertex> parent_vertices = new HashSet<SeqVertex>();
HashSet<SeqVertex> child_vertices = new HashSet<SeqVertex>();
HashSet<SimpleEdge> edges_to_delete = new HashSet<SimpleEdge>();
// track depths, must ensure we keep the relative ordering in the DAG
List<Integer> parent_depths = new ArrayList<Integer>();
List<Integer> child_depths = new ArrayList<Integer>();
List<Integer> target_depths = new ArrayList<Integer>();
for (SeqVertex v : pred_same_orig_id_set) {
int d = v.getNodeDepth();
if (d < 0) {
throw new RuntimeException("Error, seq vertex: " + v + " has negative depth setting");
}
target_depths.add(d);
debugMes("\tvertex: " + v + ", with depth: " + d, 25);
for (SeqVertex p : seqvertex_graph.getPredecessors(v)) {
if (p.is_replacement_vertex) { return (0); } // delay till next round
parent_vertices.add(p);
parent_depths.add(p.getNodeDepth());
debugMes("\t\tparent of v: " + v + " = " + p + " with depth: " + p.getNodeDepth(), 25);
// remove edge
SimpleEdge se = seqvertex_graph.findEdge(p, v);
edges_to_delete.add(se);
}
for (SeqVertex c: seqvertex_graph.getSuccessors(v)) {
if (c.is_replacement_vertex) { return (0); } // delay till next round
child_vertices.add(c);
child_depths.add(c.getNodeDepth());
// remove edge
SimpleEdge se = seqvertex_graph.findEdge(v, c);
edges_to_delete.add(se);
}
if (replacement_vertex_obj == null) {
replacement_vertex_obj = new SeqVertex(replacement_vertex_id, v);
}
}
if (parent_depths.size() > 0 && child_depths.size() > 0) {
debugMes("\tparent_depths" + parent_depths + ", child_depths: " + child_depths, 20);
// ensure can merge and retain depth ordering:
if ( ! (max_val(parent_depths) < min_val(child_depths) ) )
{
// cannot merge, since doing so would disrupt relative ordering of nodes
return(0);
}
}
// remove the graph edges:
for (SimpleEdge se : edges_to_delete) {
seqvertex_graph.removeEdge(se);
}
// remove the nodes themselves
List<Integer> merged_vertex_ids = new ArrayList<Integer>();
for (SeqVertex v : pred_same_orig_id_set) {
merged_vertex_ids.add(v.getID());
if (v.__tmp_compressed_vertices.size() > 0) {
merged_vertex_ids.addAll(v.__tmp_compressed_vertices);
}
seqvertex_graph.removeVertex(v);
}
// add new edges to parents
for (SeqVertex p : parent_vertices) {
SimpleEdge se = new SimpleEdge(1, p.getID(), replacement_vertex_obj.getID());
seqvertex_graph.addEdge(se, p, replacement_vertex_obj);
}
// add new edges to children
for (SeqVertex c : child_vertices) {
SimpleEdge se = new SimpleEdge(1, replacement_vertex_obj.getID(), c.getID());
seqvertex_graph.addEdge(se, replacement_vertex_obj, c);
}
Integer replacement_vertex_depth = (dir.equals("min")) ? min_val(target_depths) : max_val(target_depths);
String zipDir = (dir.equals("min")) ? "Up" : "Down";
replacement_vertex_obj.setDepth(replacement_vertex_depth);
replacement_vertex_obj.setNodeDepth(replacement_vertex_depth);
replacement_vertex_obj.is_replacement_vertex = true;
// fix local environment for this round
for (SeqVertex p : parent_vertices) {
p.is_replacement_vertex = true;
}
for (SeqVertex c : child_vertices) {
c.is_replacement_vertex = true;
}
replacement_vertex_obj.__tmp_compressed_vertices.addAll(merged_vertex_ids);
debugMes(zipDir + "ZipMerging nodes: " + pred_same_orig_id_set + " to " + replacement_vertex_obj, 15);
int count_merged = pred_same_orig_id_set.size();
return(count_merged);
}
private static int max_val(List<Integer> vals) {
Integer max_val = null;
for (Integer val : vals) {
if (max_val == null || val > max_val) {
max_val = val;
}
}
return(max_val);
}
private static int min_val(List<Integer> vals) {
Integer min_val = null;
for (Integer val : vals) {
if (min_val == null || val < min_val) {
min_val = val;
}
}
return(min_val);
}
private static void DFS_add_path_to_graph(Path p,
DirectedSparseGraph<SeqVertex, SimpleEdge> seqvertex_graph,
DirectedSparseGraph<Path, SimplePathNodeEdge> path_overlap_graph,
HashMap<String, PathOverlap> pathMatches,
HashMap<Path, List<SeqVertex>> orig_path_to_SeqVertex_list,
HashSet<Path> visited) {
if (visited.contains(p)) {
// already done
return;
}
debugMes("\nDFS_path_to_graph: targeting: " + p, 15);
visited.add(p);
// Phase 1. find candidate adjacent paths for use in labeling nodes in this path.
List<Path> adjacent_untraversed_pathnodes = new ArrayList<Path>(); // for later, deciding next DFS entries
for (Path succ : path_overlap_graph.getSuccessors(p)) {
String pair_token = get_path_compare_token(p, succ);
PathOverlap po = pathMatches.get(pair_token);
int match_len = po.match_length;
// draw edge between curr last node and next node in the successor path
List<SeqVertex> curr_vertex_list = orig_path_to_SeqVertex_list.get(p);
List<SeqVertex> succ_vertex_list = orig_path_to_SeqVertex_list.get(succ);
boolean connect_all_matching_positions = true;
if (connect_all_matching_positions) {
for (int i = curr_vertex_list.size() - match_len, j = 0;
i < curr_vertex_list.size() && j < match_len;
i++,j++) {
SeqVertex curr_vertex = curr_vertex_list.get(i);
SeqVertex succ_vertex = succ_vertex_list.get(j+1);
SimpleEdge se = new SimpleEdge(1, curr_vertex.getID(), succ_vertex.getID());
seqvertex_graph.addEdge(se, curr_vertex, succ_vertex);
}
}
else {
// just the last one
SeqVertex curr_vertex = curr_vertex_list.get(curr_vertex_list.size()-1);
SeqVertex succ_vertex = succ_vertex_list.get(match_len); // linking up the prev to next+1
SimpleEdge se = new SimpleEdge(1, curr_vertex.getID(), succ_vertex.getID());
seqvertex_graph.addEdge(se, curr_vertex, succ_vertex);
}
DFS_add_path_to_graph(succ,
seqvertex_graph,
path_overlap_graph,
pathMatches,
orig_path_to_SeqVertex_list,
visited);
}
return;
}
/* orig code
private static void DFS_add_path_to_graph(Path p,
DirectedSparseGraph<SeqVertex, SimpleEdge> seqvertex_graph,
DirectedSparseGraph<Path, SimplePathNodeEdge> path_overlap_graph,
HashMap<Path, PathWithOrig> orig_path_to_updated_path,
HashMap<String, PathOverlap> pathMatches) {
debugMes("\nDFS_path_to_graph: targeting: " + p, 15);
//////////////////////////////////////////////////////////////////////////////////
// Phase 1. find candidate adjacent paths for use in labeling nodes in this path.
List<Path> adjacent_untraversed_pathnodes = new ArrayList<Path>(); // for later, deciding next DFS entries
// find a predecessor or successor that has the greatest overlap
// and is already part of the new graph
PathWithOrig best_predecessor_path = null;
PathOverlap best_predecessor_overlap = null;
for (Path pred : path_overlap_graph.getPredecessors(p)) {
String pair_token = get_path_compare_token(pred, p);
PathOverlap po = pathMatches.get(pair_token);
if (orig_path_to_updated_path.containsKey(pred)) {
// candidate for use as template for node assignment.
if (best_predecessor_overlap == null || best_predecessor_overlap.match_score < po.match_score) {
best_predecessor_overlap = po;
best_predecessor_path = orig_path_to_updated_path.get(pred);
}
}
else {
pred._tmp_score = po.match_score;
adjacent_untraversed_pathnodes.add(pred);
}
}
PathWithOrig best_successor_path = null;
PathOverlap best_successor_overlap = null;
for (Path succ : path_overlap_graph.getSuccessors(p)) {
String pair_token = get_path_compare_token(p, succ);
PathOverlap po = pathMatches.get(pair_token);
if (orig_path_to_updated_path.containsKey(succ)) {
// candidate for use as template for node assignment
if (best_successor_overlap == null || best_successor_overlap.match_score < po.match_score) {
best_successor_overlap = po;
best_successor_path = orig_path_to_updated_path.get(succ);
}
}
else {
succ._tmp_score = po.match_score;
adjacent_untraversed_pathnodes.add(succ);
}
}
///////////////////////////////////////////////////////////////////////
// Phase 2: Refine labeling
// init the new path:
List<Integer> new_path = new ArrayList<Integer>();
for (Integer i : p.get_vertex_list()) {
new_path.add(-1);
}
PathWithOrig new_pwo = new PathWithOrig(p.getPathNodeID(), new_path, p.get_vertex_list());
if (best_predecessor_path == null && best_successor_path == null) {
debugMes("-dfs_msg: no best predecessor or successor path, so adding orig path from scratch.", 15);
// start building the graph here.
List<Integer> updated_path = new ArrayList<Integer>();
SeqVertex prev_vertex = null;
for (Integer orig_node_id : new_pwo.getOrigVertexList()) {
SeqVertex v = SeqVertex.retrieveSeqVertexByID(orig_node_id);
Integer next_v_id = getNextID();
SeqVertex new_v = new SeqVertex(next_v_id,v);
seqvertex_graph.addVertex(new_v);
updated_path.add(next_v_id);
if (prev_vertex != null) {
// add new edge
SimpleEdge se = new SimpleEdge(1, prev_vertex.getID(), next_v_id);
seqvertex_graph.addEdge(se, prev_vertex, new_v);
}
prev_vertex = new_v;
}
new_pwo.update_vertex_list(updated_path);
}
else {
List<Integer> updated_path = new_pwo.getVertexList(); // original vertex list
// update nodes based on best matching predecessor
if (best_predecessor_path != null) {
debugMes("-dfs_msg: updating path " + p + " based on best predecessor: " + best_predecessor_path, 15);
List<Integer> predecessor_node_ids = best_predecessor_path.getVertexList();
for (int i = 0, j = predecessor_node_ids.size() - best_predecessor_overlap.match_length;
i < best_predecessor_overlap.match_length && j < predecessor_node_ids.size();
i++, j++) {
if (updated_path.get(i) != -1 && updated_path.get(i) != predecessor_node_ids.get(j)) {
throw new RuntimeException("conflict in path assignments: " + updated_path + ", " + best_predecessor_path);
}
updated_path.set(i, predecessor_node_ids.get(j));
}
}
// update nodes based on best matching successor
if (best_successor_path != null) {
debugMes("-dfs_msg: updating path " + p + " based on best successor: " + best_successor_path, 15);
List<Integer> successor_node_ids = best_successor_path.getVertexList();
for (int i = updated_path.size() - best_successor_overlap.match_length, j = 0;
j < best_successor_overlap.match_length && i < updated_path.size();
i++, j++) {
if (updated_path.get(i) != -1 && updated_path.get(i) != successor_node_ids.get(j)) {
throw new RuntimeException("conflict in path assignments: " + updated_path + "," + best_successor_path);
}
updated_path.set(i, successor_node_ids.get(j));
}
}
// add new nodes and edges for those that are path-specific here.
for (int i = 0; i < updated_path.size(); i++) {
List<Integer> orig_path = new_pwo.getOrigVertexList();
if (updated_path.get(i) == -1) {
// need new node:
SeqVertex orig_v = SeqVertex.retrieveSeqVertexByID(orig_path.get(i));
Integer new_node_id = getNextID();
SeqVertex new_v = new SeqVertex(new_node_id, orig_v);
seqvertex_graph.addVertex(new_v);
updated_path.set(i, new_node_id);
}
}
// ensure edges exist among this node set:
for (int i = 1; i < updated_path.size(); i++) {
SeqVertex prev_vert = SeqVertex.retrieveSeqVertexByID(updated_path.get(i-1));
SeqVertex curr_vert = SeqVertex.retrieveSeqVertexByID(updated_path.get(i));
SimpleEdge se = seqvertex_graph.findEdge(prev_vert, curr_vert);
if (se == null) {
se = new SimpleEdge(1, prev_vert.getID(), curr_vert.getID());
seqvertex_graph.addEdge(se, prev_vert, curr_vert);
}
}
}
orig_path_to_updated_path.put(p, new_pwo);
debugMes("-dfs_msg: newly added path is: " + new_pwo, 15);
////////////////////////////////////////////////////////////////
// phase 3: DFS to next best overlapping adjacent edge.
// get list of all edges not yet traversed
// sort by match score
// DFS them in order of match score
Collections.sort(adjacent_untraversed_pathnodes, new Comparator<Path>() {
public int compare (Path a, Path b) {
if (a._tmp_score < b._tmp_score) {
return(1);
}
else if (a._tmp_score > b._tmp_score) {
return(-1);
}
else {
return(0);
}
}
});
for (Path next_p : adjacent_untraversed_pathnodes) {
if (! orig_path_to_updated_path.containsKey(next_p)) {
DFS_add_path_to_graph(next_p, seqvertex_graph, path_overlap_graph, orig_path_to_updated_path, pathMatches);
}
}
return;
}
*/
private static boolean break_cycles_in_path_overlap_graph(
DirectedSparseGraph<Path, SimplePathNodeEdge> path_overlap_graph) {
DijkstraShortestPath<Path, SimplePathNodeEdge> dp = new DijkstraShortestPath<Path, SimplePathNodeEdge>(path_overlap_graph);
Set<Set<SimplePathNodeEdge>> curLoops = new HashSet<Set<SimplePathNodeEdge>>();
// find all loops in the graph by seeing if, given edge v->v2, there is a path from v2 back to v
for (Path p : path_overlap_graph.getVertices()) {
for (Path s : path_overlap_graph.getSuccessors(p))
{
if (dp.getDistance(s, p)!=null) // there is a connection between p->s->....->p
{
//path has all edges from v to itself thru v2
List<SimplePathNodeEdge> loopPath = dp.getPath(s, p);
// v2 is successor of v, so let's just add the v->v2 edge too, complete the full loop.
loopPath.add(0, path_overlap_graph.findEdge(p, s));
// Collect the loop edge set.
Set<SimplePathNodeEdge> loopPath_set = new HashSet<SimplePathNodeEdge>(loopPath);
if (!curLoops.contains(loopPath_set))
{
curLoops.add(loopPath_set);
debugMes("Found loop: " + loopPath_set, 15);
}
}
}
}
if (curLoops.isEmpty())
return false; // no cycles to break
// process found loops
Set<SimplePathNodeEdge> allRelevantEdges = new HashSet<SimplePathNodeEdge>();
for (Set<SimplePathNodeEdge> loopPath_set : curLoops)
for (SimplePathNodeEdge e : loopPath_set)
{
e.increaseNumOfLoopsBy1();
allRelevantEdges.add(e);
}
// break complex loops
boolean res = false;
if (!allRelevantEdges.isEmpty()){
Comparator<SimplePathNodeEdge> numLoopsComparator = new NumPathNodeLoopsEdgeComparator();
PriorityQueue<SimplePathNodeEdge> edgesQ = new PriorityQueue<SimplePathNodeEdge>(allRelevantEdges.size(), numLoopsComparator);
edgesQ.addAll(allRelevantEdges);
//while there are still loops
// find the next edge that can be removed to reduce the number of loops
// updated queue: remove all edges, and update their loop content
SimplePathNodeEdge nextEtoRemove;
while ( (!curLoops.isEmpty()) && (! edgesQ.isEmpty()) )
{
//FIXME: there was a situation where curLoops was not empty,
// but edgesQ was, so I added edgesQ to the while condition. Investigate why this might happen.
// In this case, a node was involved in a self loop and a double-loop.
nextEtoRemove = edgesQ.poll();
if (path_overlap_graph.getSource(nextEtoRemove) == null
|| path_overlap_graph.getDest(nextEtoRemove) == null
|| nextEtoRemove.getNumOfLoopsInvolved() <= 0) {
continue;
}
debugMes("removing the edge " + path_overlap_graph.getSource(nextEtoRemove).getPathNodeID() + "->" +
path_overlap_graph.getDest(nextEtoRemove).getPathNodeID() + " that appears in "
+nextEtoRemove.getNumOfLoopsInvolved() + " loops",15);
// remove the loops that have this edge from curLoops
Set<Set<SimplePathNodeEdge>> removeLoops = new HashSet<Set<SimplePathNodeEdge>>();
for (Set<SimplePathNodeEdge> loopPath_set : curLoops)
if (loopPath_set.contains(nextEtoRemove))
{
debugMes("the loop "+ loopPath_set+" is now solved",15);
removeLoops.add(loopPath_set);
// update the number of loops involved in each edge
for (SimplePathNodeEdge e : loopPath_set)
e.decreaseNumOfLoopsBy1();
}
for (Set<SimplePathNodeEdge> loopPath_set : removeLoops)
curLoops.remove(loopPath_set);
//update the queue. remove all, and insert again if numLoops>0.
SimplePathNodeEdge[] relEdges = (SimplePathNodeEdge[]) edgesQ.toArray(new SimplePathNodeEdge[0]);
edgesQ.clear();
for (SimplePathNodeEdge otherE : relEdges)
if (otherE.getNumOfLoopsInvolved()>0)
edgesQ.add(otherE);
// remove this edge
path_overlap_graph.removeEdge(nextEtoRemove);
res = true;
}
}
return res;
}
private static void writeDotFile(
DirectedSparseGraph<Path, SimplePathNodeEdge> path_overlap_graph,
String output_filename, String graphName) {
PrintStream p;
try {
p = new PrintStream(new FileOutputStream(output_filename));
p.println("digraph G {");
Path toVertex;
//for each edge decide it's color
for (Path vertex : path_overlap_graph.getVertices())
{ //go over all vertices
String verDesc = ""+vertex.getPathNodeID() +" [label=\"" + vertex.getPathNodeID() + "\"]";
p.println("\t" + verDesc);
for (SimplePathNodeEdge edge : path_overlap_graph.getOutEdges(vertex)) //get all edges of vertex->?
{
toVertex = path_overlap_graph.getDest(edge);
p.println("\t" + vertex.getPathNodeID() + "->" + toVertex.getPathNodeID());
}
}
p.println("}");
p.close();
} catch (FileNotFoundException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
private static DirectedSparseGraph<Path, SimplePathNodeEdge> construct_path_overlap_graph(
List<Path> path_list, HashMap<String, PathOverlap> pathMatches, HashSet<Integer> dispersed_repeat_nodes, String dot_file_prefix, String graphName, boolean createMiddleDotFiles) {
// draw an edge between each pathNode B and the pathNode A to which B has a best-matching extension to the right.
DirectedSparseGraph<Path, SimplePathNodeEdge> path_overlap_graph = new DirectedSparseGraph<Path, SimplePathNodeEdge>();
for (Path p : path_list) {
path_overlap_graph.addVertex(p);
}
// identify repeat nodes.
HashSet<Integer> repeat_node_ids = new HashSet<Integer>();
if (! dispersed_repeat_nodes.isEmpty()) {
repeat_node_ids.addAll(dispersed_repeat_nodes);
}
for (Path path : path_list) {
HashMap<Integer,Integer> repeat_nodes_and_counts = Path.getRepeatNodesAndCounts(path.get_vertex_list());
for (Integer i : repeat_nodes_and_counts.keySet()) {
repeat_node_ids.add(i);
}
}
boolean store_best_extension_match_only = false;
for (int i = 0; i < path_list.size(); i++) {
int best_match = 0;
int best_matching_path_idx = -1;
List<Integer> best_precursor_j_indices = new ArrayList<Integer>();
for (int j = 0; j < path_list.size(); j++) {
if (i==j) {continue;}
PathOverlap path_overlap = Path.pathB_extends_pathA_allowRepeats(path_list.get(i).get_vertex_list(),
path_list.get(j).get_vertex_list(),
repeat_node_ids);
int extension_matches = path_overlap.match_score;
if (extension_matches <= 0) {
continue;
}
// i extends j
// got a match.
String path_pair_token = get_path_compare_token(path_list.get(j), path_list.get(i));
pathMatches.put(path_pair_token, path_overlap);
debugMes("PathNode Overlap Detected: [overlap: " + path_overlap.match_length + "] "
+ path_list.get(j) + " extended by " + path_list.get(i), 15);
if (! store_best_extension_match_only) {
// add edge
best_precursor_j_indices.add(j);
}
else {
// examine for best extension
if (extension_matches > best_match) {
best_match = extension_matches;
best_matching_path_idx = j;
best_precursor_j_indices.clear();
best_precursor_j_indices.add(j);
}
else if (extension_matches == best_match) {
best_precursor_j_indices.add(j);
}
}
}
// add edges between overlapping and compatible paths:
if (best_precursor_j_indices.size() > 0) {
for (Integer precursor_index : best_precursor_j_indices) {
String path_pair_token = get_path_compare_token(path_list.get(precursor_index), path_list.get(i));
PathOverlap po = pathMatches.get(path_pair_token);
debugMes("extension of: " + path_list.get(precursor_index) + " by " + path_list.get(i)
+ " has " + po.match_score + " terminal matches.", 15);
// i extends j
SimplePathNodeEdge spne = new SimplePathNodeEdge(po.match_score,
path_list.get(precursor_index).getPathNodeID(),
path_list.get(i).getPathNodeID());
path_overlap_graph.addEdge(spne, path_list.get(precursor_index), path_list.get(i));
}
}
else {
debugMes("path " + path_list.get(i) + " extends no path", 15);
}
}
return(path_overlap_graph);
}
private static String get_path_compare_token(Path pathA, Path pathB) {
String token = pathA.getPathNodeID() + ";" + pathB.getPathNodeID();
return(token);
}
private static List<List<Integer>> remove_containments(
List<List<Integer>> paths, HashMap<List<Integer>,
List<List<Integer>>> contained_path_to_containers) {
// paths should already be sorted by descending length
List<List<Integer>> noncontained_paths = new ArrayList<List<Integer>>();
for (List<Integer> path : paths) {
boolean contained = false;
for (List<Integer> chosen_path : noncontained_paths) {
if (Path.pathA_contains_pathB_allowRepeats(chosen_path, path)) {
contained = true;
// store containment info
if (! contained_path_to_containers.containsKey(path)) {
contained_path_to_containers.put(path, new ArrayList<List<Integer>>());
}
contained_path_to_containers.get(path).add(chosen_path);
}
}
if (! contained) {
noncontained_paths.add(path);
}
}
return(noncontained_paths);
}
private static DirectedSparseGraph<SeqVertex, SimpleEdge> construct_acyclic_graph(
DirectedSparseGraph<SeqVertex, SimpleEdge> orig_graph,
HashMap<Integer, HashMap<PairPath, Integer>> combinedReadHash) {
Set<PairPath> pairPaths = new HashSet<PairPath>();
Map<PairPath, Integer> pairPathToReadSupport = new HashMap<PairPath, Integer>();
populate_pairpaths_and_readsupport(combinedReadHash, pairPaths, pairPathToReadSupport);
List<List<Integer>> paths = new ArrayList<List<Integer>>();
for (PairPath pp : pairPaths) {
paths.add(pp.getPath1());
if (pp.hasSecondPath()) {
paths.add(pp.getPath2());
}
}
Collections.sort(paths, new Comparator<List<Integer>>() {
public int compare(List<Integer> pathA, List<Integer> pathB) {
if (pathA.size() < pathB.size()) {
return(-1);
}
else if (pathA.size() > pathB.size()) {
return(1);
}
else {
return(0);
}
}
});
Collections.reverse(paths); // want descending by path l
DirectedSparseGraph<SeqVertex, SimpleEdge> new_graph =
new DirectedSparseGraph<SeqVertex,SimpleEdge>();
List<List<Integer>> cycle_inducing_paths = new ArrayList<List<Integer>>();
HashSet<SimpleEdge> cycle_inducing_edges = new HashSet<SimpleEdge>();
//DijkstraShortestPath<SeqVertex, SimpleEdge> dp = new DijkstraShortestPath<SeqVertex, SimpleEdge>(orig_graph);
for (List<Integer> path : paths) {
add_path_to_graph_disallow_cycles(orig_graph, new_graph, path, cycle_inducing_paths, cycle_inducing_edges);
}
debugMes("\n\nAll loop-inducing edges are: " + cycle_inducing_edges + "\n\ncontained in loop-inducing paths: " + cycle_inducing_paths, 10);
return(new_graph);
}
private static void add_path_to_graph_disallow_cycles(
DirectedSparseGraph<SeqVertex, SimpleEdge> orig_graph,
DirectedSparseGraph<SeqVertex, SimpleEdge> new_graph,
List<Integer> path,
List<List<Integer>> cycle_inducing_paths,
HashSet<SimpleEdge> cycle_inducing_edges) {
debugMes("-adding path to new graph: " + path, 10);
if (path.size() == 1) {
SeqVertex v = SeqVertex.retrieveSeqVertexByID(path.get(0));
new_graph.addVertex(v);
}
boolean cycle_inducing_path = false;
for (int i = 1; i < path.size(); i++) {
SeqVertex prev_node = SeqVertex.retrieveSeqVertexByID(path.get(i-1));
SeqVertex next_node = SeqVertex.retrieveSeqVertexByID(path.get(i));
boolean both_nodes_already_exist_in_graph = true;
if (! new_graph.containsVertex(prev_node)) {
new_graph.addVertex(prev_node);
both_nodes_already_exist_in_graph = false;
}
if (! new_graph.containsVertex(next_node)) {
new_graph.addVertex(next_node);
both_nodes_already_exist_in_graph = false;
}
boolean add_edge = false;
SimpleEdge se = orig_graph.findEdge(prev_node, next_node);
if (both_nodes_already_exist_in_graph) {
DijkstraShortestPath<SeqVertex, SimpleEdge> dp = new DijkstraShortestPath<SeqVertex, SimpleEdge>(new_graph);
if (dp.getDistance(next_node, prev_node) != null) {
// adding an edge between prev->next node would create a cycle!!!
cycle_inducing_path = true;
cycle_inducing_edges.add(se);
debugMes("\t** cycle-inducing edge found: " + prev_node + " to " + next_node, 10);
}
else {
// add edge to graph.
add_edge = true;
}
}
else {
add_edge = true;
}
if (add_edge) {
new_graph.addEdge(se, prev_node, next_node);
}
}
if (cycle_inducing_path) {
cycle_inducing_paths.add(path);
debugMes("\t$$ cycle inducing path: " + path, 15);
}
}
private static boolean graph_contains_loops(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph) {
DijkstraShortestPath<SeqVertex, SimpleEdge> dp = new DijkstraShortestPath<SeqVertex, SimpleEdge>(graph);
// These should be only those repeats that aren't evident in the individual read paths,
// since the read-evident repeats were unrolled earlier.
Set<Set<SimpleEdge>> curLoops = new HashSet<Set<SimpleEdge>>();
// find all loops in the graph by seeing if, given edge v->v2, there is a path from v2 back to v
for (SeqVertex v : graph.getVertices())
{
for (SeqVertex v2 : graph.getSuccessors(v))
{
if (dp.getDistance(v2, v)!=null) // there is a connection between v->v2->... ->v
{
//path has all edges from v to itself thru v2
List<SimpleEdge> loopPath = dp.getPath(v2, v);
// v2 is successor of v, so let's just add the v->v2 edge too, complete the full loop.
loopPath.add(0, graph.findEdge(v, v2));
// capture the path IDs for debugMes reporting below.
List<Integer> pathIDs = new ArrayList<Integer>();
for (SimpleEdge e : loopPath)
pathIDs.add(graph.getDest(e).getID());
// Collect the loop edge set.
Set<SimpleEdge> loopPath_set = new HashSet<SimpleEdge>(loopPath);
if (!curLoops.contains(loopPath_set))
{
curLoops.add(loopPath_set);
debugMes("adding the loop path "+pathIDs+" to the curLoops",12);
}else
{
debugMes("not adding the loop path "+pathIDs+" to the curLoops",12);
}
}
}
}
if (curLoops.isEmpty()) {
return(false);
}
else {
return(true);
}
}
private static HashMap<Integer, HashMap<PairPath, Integer>> reassign_read_paths_according_to_longer_path_compatibility(
HashMap<Integer, HashMap<PairPath, Integer>> combinedReadHash) {
debugMes("\nSECTION\n========= Ressigning Repeat-containing Read Paths Based On Longer Path Compatibility ==========\n\n",5);
Set<PairPath> pairPaths = new HashSet<PairPath>();
Map<PairPath, Integer> pairPathToReadSupport = new HashMap<PairPath, Integer>();
populate_pairpaths_and_readsupport(combinedReadHash, pairPaths, pairPathToReadSupport);
List<List<Integer>> paths = new ArrayList<List<Integer>>();
for (PairPath pp : pairPaths) {
paths.add(pp.getPath1());
if (pp.hasSecondPath()) {
paths.add(pp.getPath2());
}
}
Collections.sort(paths, new Comparator<List<Integer>>() {
public int compare(List<Integer> pathA, List<Integer> pathB) {
if (pathA.size() < pathB.size()) {
return(-1);
}
else if (pathA.size() > pathB.size()) {
return(1);
}
else {
return(0);
}
}
});
Collections.reverse(paths); // want descending by path l
// convert to list of PathWithOrig
List<PathWithOrig> path_with_orig_list = new ArrayList<PathWithOrig>();
for (List<Integer> path : paths) {
path_with_orig_list.add(new PathWithOrig(path));
}
// iterate through pair paths and see if they require reassignment.
HashMap<PairPath,Integer> updated_pairpath_hmap = new HashMap<PairPath,Integer>();
for (PairPath pp : pairPaths) {
Integer read_support = pairPathToReadSupport.get(pp);
PairPathWOrig ppwo = new PairPathWOrig(pp);
boolean restructured_flag = false;
for (PathWithOrig template_pwo : path_with_orig_list) {
if (template_pwo.size() < ppwo.size()){
break;
}
PairPathWOrig ppwo_restructured = ppwo.restructure_according_to_repeat_path(template_pwo);
if (ppwo_restructured != null) {
updated_pairpath_hmap.put(ppwo_restructured.getPairPath(), read_support);
if (! ppwo_restructured.equals(ppwo)) {
debugMes("PPWO restructured from: " + ppwo + " to " + ppwo_restructured, 15);
}
restructured_flag = true;
break;
}
}
if (! restructured_flag) {
// stick with the original one.
if (true) {
throw new RuntimeException("error, not restructured: " + path_with_orig_list + " and target: " + ppwo);
}
updated_pairpath_hmap.put(pp, read_support);
}
}
HashMap<Integer, HashMap<PairPath, Integer>> new_combined_read_hash = construct_combinedReadhHash_from_PairPath_list(updated_pairpath_hmap);
return(new_combined_read_hash);
}
private static HashMap<Integer, HashMap<PairPath, Integer>> construct_combinedReadhHash_from_PairPath_list(
HashMap<PairPath, Integer> pairpath_hmap) {
HashMap<Integer, HashMap<PairPath, Integer>> combinedReadHash = new HashMap<Integer, HashMap<PairPath, Integer>>();
for (PairPath pp : pairpath_hmap.keySet()) {
Integer read_support = pairpath_hmap.get(pp);
Integer first_id = pp.getFirstID();
HashMap<PairPath,Integer> pp_map;
if (combinedReadHash.containsKey(first_id)) {
pp_map = combinedReadHash.get(first_id);
}
else {
pp_map = new HashMap<PairPath,Integer>();
combinedReadHash.put(first_id, pp_map);
}
pp_map.put(pp, read_support);
}
return(combinedReadHash);
}
private static void examine_out_of_order_depth_in_read_paths(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
HashMap<Integer, HashMap<PairPath, Integer>> componentReadHash) {
debugMes("\n\nSECTION\n==== examining node depths of read paths in DAG ======\n\n", 5);
Set<PairPath> pairPaths = new HashSet<PairPath>();
Map<PairPath, Integer> pairPathToReadSupport = new HashMap<PairPath, Integer>();
populate_pairpaths_and_readsupport(componentReadHash, pairPaths, pairPathToReadSupport);
List<List<Integer>> paths = new ArrayList<List<Integer>>();
for (PairPath pp : pairPaths) {
paths.add(pp.getPath1());
if (pp.hasSecondPath()) {
paths.add(pp.getPath2());
}
}
Collections.sort(paths, new Comparator<List<Integer>>() {
public int compare(List<Integer> pathA, List<Integer> pathB) {
if (pathA.size() < pathB.size()) {
return(-1);
}
else if (pathA.size() > pathB.size()) {
return(1);
}
else {
return(0);
}
}
});
Collections.reverse(paths); // want descending by path length
int count_reads_ok = 0;
int count_reads_conflict = 0;
for (List<Integer> path : paths) {
List<List<Integer>> read_parts = new ArrayList<List<Integer>>();
List<Integer> part = new ArrayList<Integer>();
List<List<Integer>> node_depths_tracker = new ArrayList<List<Integer>>();
ArrayList<Integer> node_depths_list = new ArrayList<Integer>();
Iterator<Integer> it = path.iterator();
int prev_depth = -1;
HashMap<Integer,Boolean> node_visitor = new HashMap<Integer,Boolean>();
while (it.hasNext()) {
Integer node_id = it.next();
SeqVertex v = getSeqVertex(graph, node_id);
if (v._node_depth < prev_depth || node_visitor.containsKey(node_id)) {
// problem...
// fracture here.
if (! part.isEmpty()) {
read_parts.add(part);
part = new ArrayList<Integer>();
}
}
node_depths_list.add(v._node_depth);
prev_depth = v._node_depth;
node_visitor.put(node_id, true);
part.add(node_id);
}
if (! part.isEmpty()) {
read_parts.add(part);
part = new ArrayList<Integer>();
}
node_depths_tracker.add(node_depths_list);
if (read_parts.size() > 1) {
debugMes("DAG-conflicting path: " + path + " with node_depths: " + node_depths_tracker + " into " + read_parts.size() + ": " + read_parts, 10);
count_reads_conflict++;
}
else {
count_reads_ok++;
}
}
debugMes("\n\nNum reads ok: " + count_reads_ok + "\nNum reads conflicted depths: " + count_reads_conflict, 10);
}
private static void examine_compatible_paths_debugging_only(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
HashMap<Integer, HashMap<PairPath, Integer>> componentReadHash) {
// this method is only for debugging purposes.
Set<PairPath> pairPaths = new HashSet<PairPath>();
Map<PairPath, Integer> pairPathToReadSupport = new HashMap<PairPath, Integer>();
populate_pairpaths_and_readsupport(componentReadHash, pairPaths, pairPathToReadSupport);
List<List<Integer>> paths = new ArrayList<List<Integer>>();
for (PairPath pp : pairPaths) {
paths.add(pp.getPath1());
if (pp.hasSecondPath()) {
paths.add(pp.getPath2());
}
}
Collections.sort(paths, new Comparator<List<Integer>>() {
public int compare(List<Integer> pathA, List<Integer> pathB) {
if (pathA.size() < pathB.size()) {
return(-1);
}
else if (pathA.size() > pathB.size()) {
return(1);
}
else {
return(0);
}
}
});
Collections.reverse(paths); // want descending by path length
List<List<Integer>> longest_paths = new ArrayList<List<Integer>>();
int NUM_TOP_LONGEST_PATHS = 2; // change to the number of longest paths to compare to.
int counter = 0;
for (List<Integer> path : paths) {
counter++;
if (counter <= NUM_TOP_LONGEST_PATHS) {
longest_paths.add(path);
}
else {
boolean compatible = false;
for (List<Integer> longer_path : longest_paths) {
if (PairPath.individual_paths_are_compatible(path, longer_path)) {
compatible = true;
break;
}
}
if (! compatible) {
debugMes("LongPathIncompat: " + path, 10);
}
}
}
}
private static HashMap<List<Integer>, Pair<Integer>> reconstruct_paths_from_collapsed_reads(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
HashMap<Integer, HashMap<PairPath, Integer>> componentReadHash) {
HashMap<List<Integer>, Pair<Integer>> transcripts = new HashMap<List<Integer>,Pair<Integer>>();
Set<PairPath> pairPaths = new HashSet<PairPath>();
Map<PairPath, Integer> pairPathToReadSupport = new HashMap<PairPath, Integer>();
populate_pairpaths_and_readsupport(componentReadHash, pairPaths, pairPathToReadSupport);
List<List<Integer>> paths = new ArrayList<List<Integer>>();
for (PairPath pp : pairPaths) {
paths.add(pp.getPath1());
if (pp.hasSecondPath()) {
paths.add(pp.getPath2());
}
}
Collections.sort(paths, new Comparator<List<Integer>>() {
public int compare(List<Integer> pathA, List<Integer> pathB) {
if (pathA.size() < pathB.size()) {
return(-1);
}
else if (pathA.size() > pathB.size()) {
return(1);
}
else {
return(0);
}
}
});
Collections.reverse(paths); // want descending by path length
List<List<Integer>> collapsed_paths = Path.collapse_compatible_paths_to_min_set(paths);
for (List<Integer> path : collapsed_paths) {
// only doing pp path1 since testing for cufflinks here in unpaired mode.
transcripts.put(path, new Pair(new Integer(1), new Integer(1)));
}
return(transcripts);
}
private static HashMap<List<Integer>, Pair<Integer>> reconstruct_paths_from_reads(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
HashMap<Integer, HashMap<PairPath, Integer>> componentReadHash) {
HashMap<List<Integer>, Pair<Integer>> transcripts = new HashMap<List<Integer>,Pair<Integer>>();
Set<PairPath> pairPaths = new HashSet<PairPath>();
Map<PairPath, Integer> pairPathToReadSupport = new HashMap<PairPath, Integer>();
populate_pairpaths_and_readsupport(componentReadHash, pairPaths, pairPathToReadSupport);
for (PairPath pp : pairPaths) {
// only doing pp path1 since testing for cufflinks here in unpaired mode.
transcripts.put(pp.getPath1(), new Pair(new Integer(1), new Integer(1)));
}
return(transcripts);
}
private static HashMap<List<Integer>, Pair<Integer>> remove_lower_ranked_paths_without_unique_read_content(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph, HashMap<List<Integer>, Pair<Integer>> finalPaths_all,
HashMap<List<Integer>, HashMap<PairPath, Integer>> finalPathsToContainedReads) {
HashMap<PairPath,Boolean> pp_used = new HashMap<PairPath,Boolean>();
HashSet<List<Integer>> all_paths = new HashSet<List<Integer>>();
// init the list of candidate paths
List<List<Integer>> paths = new ArrayList<List<Integer>>();
for (List<Integer> path : finalPaths_all.keySet()) {
paths.add(path);
all_paths.add(path);
}
HashMap<List<Integer>,Integer> seqLengthMap = paths_to_seq_lengths(graph, all_paths);
HashSet<List<Integer>> priority_paths_with_unique_read_content = new HashSet<List<Integer>>();
int round = 0;
while (! paths.isEmpty()) {
round++;
UniquePathContentComparator unique_path_content_comparator = new UniquePathContentComparator(paths,
pp_used, finalPathsToContainedReads,
seqLengthMap);
paths = unique_path_content_comparator.remove_paths_without_unique_read_content(paths);
if (! paths.isEmpty()) {
Collections.sort(paths, unique_path_content_comparator);
Collections.reverse(paths);
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15) {
debugMes("Round[" + round + "] Paths Prioritized by Unique Read Content", 15);
for (List<Integer> path : paths) {
debugMes("Round[" + round + "] Unique=" + unique_path_content_comparator.unique_count(path) + ", path: " + path, 15);
}
debugMes("Round[ " + round + "] SELECTING Unique=" + unique_path_content_comparator.unique_count(paths.get(0))
+ ", path: " + paths.get(0) + "\n\n", 15);
}
List<Integer> p = paths.remove(0);
priority_paths_with_unique_read_content.add(p);
// mark contained paths as seen
for (PairPath pp : finalPathsToContainedReads.get(p).keySet()) {
pp_used.put(pp, true);
}
}
}
HashMap<List<Integer>, Pair<Integer>> paths_to_keep = new HashMap<List<Integer>, Pair<Integer>>();
for (List<Integer> path : all_paths) {
paths_to_keep.put(path, finalPaths_all.get(path));
debugMes("PathRankingFilter retaining: " + path, 15);
}
return(paths_to_keep);
}
private static HashMap<List<Integer>, Integer> paths_to_seq_lengths(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
HashSet<List<Integer>> all_paths) {
HashMap<List<Integer>, Integer> seqLengthMap = new HashMap<List<Integer>,Integer>();
for (List<Integer> path : all_paths)
{
String seq = getPathSeq(graph,path);
seqLengthMap.put(path, seq.length());
}
return(seqLengthMap);
}
private static void removeShortOrphanNodes(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
int min_seq_length) {
List<SeqVertex> vertices_to_remove = new ArrayList<SeqVertex>();
for (SeqVertex v : graph.getVertices()) {
if (graph.getPredecessorCount(v) == 0
&&
graph.getSuccessorCount(v) == 0
&&
v.getName().length() < min_seq_length)
{
vertices_to_remove.add(v);
}
}
for (SeqVertex v : vertices_to_remove) {
debugMes("Removing short seq orphaned vertex: " + v + " from graph. Seq too short to generate a contig of min length.", 12);
graph.removeVertex(v);
}
}
private static HashMap<Integer, HashMap<PairPath, Integer>> reorganizeReadPairings(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
HashMap<Integer, HashMap<PairPath, Integer>> combinedReadHash,
DijkstraDistance<SeqVertex, SimpleEdge> dijkstraDis) {
describeNodes(graph);
HashMap<Integer,HashMap<PairPath,Integer>> newCombinedReadHash = new HashMap<Integer,HashMap<PairPath,Integer>> ();
for (HashMap<PairPath, Integer> pairs_n_counts : combinedReadHash.values()) {
for (PairPath pp : pairs_n_counts.keySet()) {
int read_support = pairs_n_counts.get(pp);
if (pp.hasSecondPath()) {
PairPath combinedPath = combinePaths(graph, pp.getPath1(), pp.getPath2(), dijkstraDis);
if (! combinedPath.isEmpty()) {
storePairPathByFirstVertex(combinedPath, newCombinedReadHash, read_support);
debugMes("OK pp update to new DAG: " + pp + " => " + combinedPath, 15);
}
else {
// store the read path separately
PairPath pp1 = new PairPath(pp.getPath1());
storePairPathByFirstVertex(pp1, newCombinedReadHash, read_support);
PairPath pp2 = new PairPath(pp.getPath2());
storePairPathByFirstVertex(pp2, newCombinedReadHash, read_support);
debugMes("Warning... pp: " + pp + " needed to be split into: " + pp1 + " and " + pp2, 15);
}
}
else {
storePairPathByFirstVertex(pp, newCombinedReadHash, read_support);
}
}
}
return(newCombinedReadHash);
}
private static void storePairPathByFirstVertex(PairPath pp,
HashMap<Integer, HashMap<PairPath, Integer>> combinedReadHash, int read_support) {
Integer firstV = pp.getFirstID();
if (!combinedReadHash.containsKey(firstV))
combinedReadHash.put(firstV, new HashMap<PairPath,Integer>()); //init
if (!combinedReadHash.get(firstV).containsKey(pp))
combinedReadHash.get(firstV).put(pp, 0); //add pairpath
Integer counts = combinedReadHash.get(firstV).get(pp);
combinedReadHash.get(firstV).put(pp, counts + read_support); // increment counts for pairpath
debugMes("we have "+ combinedReadHash.get(firstV).get(pp)+" reads supporting the path: " + pp,18);
}
private static int unroll_remaining_terminal_self_loops(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph, HashMap<Integer, HashMap<PairPath, Integer>> combinedReadHash) {
int num_self_loops_unrolled = 0;
List<SeqVertex> all_vertices = new ArrayList<SeqVertex>(graph.getVertices());
HashMap<Integer,Boolean> unrolled_terminal_vertices = new HashMap<Integer,Boolean>();
for (SeqVertex v : all_vertices) {
debugMes("Examining node: " + v.getShortSeqWconnectingIDs(graph), 12);
Collection<SeqVertex> successors = graph.getSuccessors(v);
Collection<SeqVertex> preds = graph.getPredecessors(v);
if (successors.size() == 1 && preds.size() == 1
&& successors.containsAll(preds)) {
// a terminal repeat node.
SeqVertex repeat_vertex = successors.iterator().next();
if (repeat_vertex.getOrigButterflyID() != v.getOrigButterflyID()) {
debugMes("not a terminal self loop, skipping..."
+ v, 12);
continue;
}
debugMes("Removing terminal self loop at vertex: " + v, 12);
SeqVertex new_v = new SeqVertex(getNextID(), repeat_vertex); // this constructor sets orig_id so it's the same.
SimpleEdge loop_edge = graph.findEdge(v, repeat_vertex);
graph.addVertex(new_v);
SimpleEdge new_edge = new SimpleEdge(loop_edge.getWeight(), v.getID(), new_v.getID());
graph.addEdge(new_edge, v, new_v);
new_edge.increment_repeat_unroll_weight(2);
num_self_loops_unrolled++;
unrolled_terminal_vertices.put(v.getID(), true);
unrolled_terminal_vertices.put(repeat_vertex.getID(), true); // flag for possible read reassignment
// copy over any outgoing non-self edges from the repeat vertex
for (SeqVertex succ : graph.getSuccessors(repeat_vertex)) {
if (succ.getOrigButterflyID() != new_v.getOrigButterflyID()) {
SimpleEdge se = graph.findEdge(repeat_vertex, succ);
SimpleEdge new_se = new SimpleEdge(se.getWeight(), new_v.getID(), succ.getID());
graph.addEdge(new_se, new_v, succ);
new_se.increment_repeat_unroll_weight(2);
unrolled_terminal_vertices.put(succ.getID(), true); // flag for possible reassignment
}
}
}
}
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 20) {
try {
writeDotFile(graph, "__terminal_loop_unroll-preReadReassignment.dot", "ladeda", false);
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
int num_paths_redefined = reassign_repeat_nodes_in_reads(graph,
combinedReadHash, unrolled_terminal_vertices,
null, false);
debugMes("num paths with terminal self loop vertices and paths redefined: " + num_paths_redefined, 12);
/*
if (num_paths_redefined > 0)
redefine_all_graph_edges(graph, combinedReadHash);
*/
return(num_self_loops_unrolled);
}
private static void describeVertices(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph) {
debugMes("## Node descriptions:", 10);
List<SeqVertex> vertices = new ArrayList<SeqVertex>(graph.getVertices());
Collections.sort(vertices, SeqVertexIDorderComparator);
for (SeqVertex v : vertices) {
debugMes(v.getShortSeqWconnectingIDs(graph), 10);
}
}
private static void describeNodes(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph) {
for (SeqVertex v : graph.getVertices()) {
debugMes("NODE_DESCR: " + v.getShortSeqWconnectingIDs(graph), 5);
}
}
private static boolean unroll_loops(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
HashMap<Integer, HashMap<PairPath, Integer>> combinedReadHash, My_DFS dfs) {
debugMes("\n\nUNROLLING LOOPS IN READS\n\n", 5);
HashMap<Integer,Boolean> all_repeat_related_nodes = new HashMap<Integer,Boolean>();
// get the repeat nodes assigned to the pp containing it the greatest number of times.
HashMap<Integer, PairPath> repeat_node_id_to_longest_path = find_repeat_containing_pairpaths_ignoreLastNode(combinedReadHash);
if (repeat_node_id_to_longest_path.isEmpty()) {
debugMes("\t** no repeats detected in the reads. No repeat unrolling needed here.", 10);
return(false);
}
int unroll_loop_counter = 0;
while (repeat_node_id_to_longest_path.size() > 0) {
unroll_loop_counter++;
debugMes("\n\n## Unrolling loops, round: " + unroll_loop_counter, 10);
if (repeat_node_id_to_longest_path.size() > 0) {
debugMes("\n\nFound : " + repeat_node_id_to_longest_path.size() + " repeat nodes.", 10);
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 12) {
for (Integer node_id : repeat_node_id_to_longest_path.keySet()) {
PairPath pp = repeat_node_id_to_longest_path.get(node_id);
System.err.println("Repeat Node: " + node_id + " found in longest pp: " + pp);
}
}
}
// unroll each repeat unit
HashMap<Integer,Boolean> restructured_nodes = new HashMap<Integer,Boolean>();
// prioritize the paths according to the number of different repeat nodes.
HashMap<PairPath,Float> pp_uniq_repeat_count_hmap = new HashMap<PairPath,Float>();
for (Integer repeat_node : repeat_node_id_to_longest_path.keySet()) {
all_repeat_related_nodes.put(repeat_node, true);
PairPath pp = repeat_node_id_to_longest_path.get(repeat_node);
float repeat_count_sum = pp.getRepeatNodesAndCountSum();
// also prioritize by path length
int max_path_length = pp.getMaxPathLength();
repeat_count_sum += (float)max_path_length/1000.0; // simple way to augment score based on length of longest path.
if (! pp_uniq_repeat_count_hmap.containsKey(pp)) {
pp_uniq_repeat_count_hmap.put(pp, repeat_count_sum); // all repeat nodes counted, do once per pp
}
}
final HashMap<PairPath,Float> pp_uniq_repeat_count_hmap_copy = new HashMap<PairPath,Float>(pp_uniq_repeat_count_hmap);
List<PairPath> pp_list_ordered_by_repeat_counts = new ArrayList<PairPath>(pp_uniq_repeat_count_hmap.keySet());
Collections.sort(pp_list_ordered_by_repeat_counts, new Comparator<PairPath>() {
public int compare (PairPath pp_A, PairPath pp_B) {
float rpt_node_count_A = pp_uniq_repeat_count_hmap_copy.get(pp_A);
float rpt_node_count_B = pp_uniq_repeat_count_hmap_copy.get(pp_B);
if (rpt_node_count_A > rpt_node_count_B)
return -1;
else if (rpt_node_count_A < rpt_node_count_B)
return 1;
else
return 0;
}
});
for (PairPath pp : pp_list_ordered_by_repeat_counts) {
debugMes("Unrolling repeats in pp: " + pp, 12);
for (List<Integer> path : pp.get_paths()) {
if (path.size() == 0) // empty path in a pp
continue;
// if the path contains any previously restructured node, skip it.
if (Path.contains_any_node_id(path, restructured_nodes.keySet())) {
debugMes("\t-postponing unroll since contains restructured node.\n", 12);
continue;
}
debugMes("Unrolling repeats in pp: " + pp + " with repeat nodes: " + pp.getRepeatNodesAndCounts(), 12);
// find the repeat nodes and restructure the graph.
List<SeqVertex> path_vertices = new ArrayList<SeqVertex>();
HashMap<Integer,Boolean> seen_repeat_node = new HashMap<Integer,Boolean>();
boolean restructured_flag = false;
int node_pos = 0;
for (Integer node_id : path) {
node_pos++;
SeqVertex v = getSeqVertex(graph, node_id);
if (node_pos != path.size()) {
// don't do the very last node if it's a repeat... leave for another pp to unravel it.
if (seen_repeat_node.containsKey(node_id)) {
// ok - tackling this repeat node.
// make a copy of the vertex
v = new SeqVertex(getNextID(), v); // this constructor sets orig_id so it's the same.
debugMes("\tcopying node: " + node_id + " to " + v.getID(), 12);
all_repeat_related_nodes.put(v.getID(), true);
graph.addVertex(v);
restructured_nodes.put(node_id, true);
restructured_flag = true;
}
else {
if (repeat_node_id_to_longest_path.containsKey(node_id)) {
seen_repeat_node.put(node_id, true);
}
}
} // endif last node of path
path_vertices.add(v);
} //end of for node_id : path
if (restructured_flag) {
// add edges between neighboring nodes
for (int i = 0; i < path_vertices.size()-1; i++) {
SeqVertex before_node = path_vertices.get(i);
SeqVertex after_node = path_vertices.get(i+1);
SimpleEdge edge = graph.findEdge(before_node, after_node);
if (edge == null) {
// add it.
// get the original edge and copy the weight over.
SimpleEdge orig_edge = graph.findEdge(getSeqVertex(graph, before_node.getOrigButterflyID()), getSeqVertex(graph, after_node.getOrigButterflyID()));
double oldW = 1;
if (orig_edge != null)
oldW = orig_edge.getWeight();
edge = new SimpleEdge(oldW, before_node.getID(), after_node.getID());
graph.addEdge(edge, before_node, after_node);
}
edge.increment_repeat_unroll_weight(unroll_loop_counter); // was 1
}
// describe the new vertex list:
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 20) {
debugMes("# Restructured path described:", 20);
for (SeqVertex v : path_vertices) {
debugMes(v.getShortSeqWconnectingIDs(graph), 20);
}
}
// Verify that this path can be properly reassigned in the graph.
debugMes("\nVerifying that restructured path: " + path + " is rethreaded through the graph with fewer repeat units.", 12);
List<Integer> updated_path = reassign_restructured_path_in_graph(graph, path);
String orig_path_seq = getPathSeq(graph, path);
String new_path_seq = getPathSeq(graph, updated_path);
if (! orig_path_seq.equals(new_path_seq)) {
throw new RuntimeException("Error, updated path seq != orig path seq:\n>Orig\n" + orig_path_seq + "\n>New\n" + new_path_seq);
}
else {
debugMes("* old and new path seqs are identical. validated. " + new_path_seq, 15);
}
if (updated_path == null
||
( path.size() > 1 &&
(updated_path.equals(path)
||
Path.countNumNodesNotUnique(path) <= Path.countNumNodesNotUnique(updated_path)
)
)
) {
throw new RuntimeException("Repeat-unrolled path: " + path
+ " " + Path.getRepeatNodesAndCounts(path)
+ " was not properly restructured: "
+ updated_path + " " + Path.getRepeatNodesAndCounts(updated_path));
}
else {
debugMes("\tVerification OK: path:" + path + " " + Path.getRepeatNodesAndCounts(path) +
" => " + updated_path + " " + Path.getRepeatNodesAndCounts(updated_path) + "\n", 12);
}
} // end if restructured_flag
} // end for path
} // end for pp
// reassign repeat nodes to their new nodes in the graph
debugMes("\n\n## Post-unroll round: " + unroll_loop_counter + ", reassigning_repeat_nodes_in_reads\n", 10);
// restrict unrolling to just those containing as of yet unrolled repeats
int num_paths_updated = reassign_repeat_nodes_in_reads(graph, combinedReadHash,
restructured_nodes, null, true);
if (num_paths_updated == 0)
throw new RuntimeException("Error, no paths were updated after this round of repeat unrolling");
debugMes("\n\nNumber of paths refined: " + num_paths_updated, 10);
//redefine_all_graph_edges(graph, combinedReadHash); // prune out the now unsupported edges post reassignment.
dfs.runDFS2(); // reassign depths given new nodes added.
if (true) {
String filename = FILE + ".repeatUnroll_" + unroll_loop_counter + ".dot";
PrintStream p;
try {
p = new PrintStream(new FileOutputStream(filename));
writeDotFile(graph,p,"repeatUnroll_" + unroll_loop_counter);
p.close();
} catch (FileNotFoundException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
// look for remaining repeats
repeat_node_id_to_longest_path = find_repeat_containing_pairpaths_ignoreLastNode(combinedReadHash);
} // end while repeats
// all repeats are now unrolled.
// further refine paths in case better scoring path for a read is to be found.
int num_paths_redefined = 1;
HashMap<PairPath,Boolean> pp_remains_unchanged_skip_list = new HashMap<PairPath,Boolean>(); // now we use it. Graph isn't changing, so wont' expect paths to change unless they have a better placement given the unrolled edge weights.
int refinement_round = 0;
while (num_paths_redefined > 0) {
refinement_round++;
// reassign paths for all reads containing restructured nodes.
num_paths_redefined = reassign_repeat_nodes_in_reads(graph,
combinedReadHash, all_repeat_related_nodes,
pp_remains_unchanged_skip_list, false);
debugMes("unroll_loops::PATH_REFINEMENT_ROUND: " + refinement_round + " NUMBER_PATHS_REDEFINED: " + num_paths_redefined, 10);
/*
if (num_paths_redefined > 0
|| refinement_round == 1
)
{ // be sure to do this at least once!!!
redefine_all_graph_edges(graph, combinedReadHash);
}
*/
}
return(true);
}
private static boolean unroll_remaining_terminal_loops(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
HashMap<Integer, HashMap<PairPath, Integer>> combinedReadHash) {
// should be targeting only those repeat nodes that show up at final positions in a read.
debugMes("\n\nUNROLLING REMAINING LOOPS IN READS\n\n", 5);
HashMap<Integer,Boolean> all_repeat_related_nodes = new HashMap<Integer,Boolean>();
// get the repeat nodes assigned to the pp containing it the greatest number of times.
HashMap<Integer, PairPath> repeat_node_id_to_longest_path = find_repeat_containing_pairpaths(combinedReadHash);
if (repeat_node_id_to_longest_path.isEmpty()) {
debugMes("\t** no repeats detected in the reads. No repeat unrolling needed here.", 10);
return(false);
}
int unroll_loop_counter = 0;
while (repeat_node_id_to_longest_path.size() > 0) {
unroll_loop_counter++;
debugMes("\n\n## Unrolling remaining terminal loops, round: " + unroll_loop_counter, 10);
if (repeat_node_id_to_longest_path.size() > 0) {
debugMes("\n\nFound : " + repeat_node_id_to_longest_path.size() + " repeat nodes.", 10);
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 12) {
for (Integer node_id : repeat_node_id_to_longest_path.keySet()) {
PairPath pp = repeat_node_id_to_longest_path.get(node_id);
System.err.println("Repeat Node: " + node_id + " found in longest pp: " + pp);
}
}
}
// unroll each repeat unit
HashMap<Integer,Boolean> restructured_nodes = new HashMap<Integer,Boolean>();
// prioritize the paths according to the number of different repeat nodes.
HashMap<PairPath,Float> pp_uniq_repeat_count_hmap = new HashMap<PairPath,Float>();
for (Integer repeat_node : repeat_node_id_to_longest_path.keySet()) {
all_repeat_related_nodes.put(repeat_node, true);
PairPath pp = repeat_node_id_to_longest_path.get(repeat_node);
float repeat_count_sum = pp.getRepeatNodesAndCountSum();
// also prioritize by path length
int max_path_length = pp.getMaxPathLength();
repeat_count_sum += (float)max_path_length/1000.0; // simple way to augment score based on length of longest path.
if (! pp_uniq_repeat_count_hmap.containsKey(pp)) {
pp_uniq_repeat_count_hmap.put(pp, repeat_count_sum); // all repeat nodes counted, do once per pp
}
}
final HashMap<PairPath,Float> pp_uniq_repeat_count_hmap_copy = new HashMap<PairPath,Float>(pp_uniq_repeat_count_hmap);
List<PairPath> pp_list_ordered_by_repeat_counts = new ArrayList<PairPath>(pp_uniq_repeat_count_hmap.keySet());
Collections.sort(pp_list_ordered_by_repeat_counts, new Comparator<PairPath>() {
public int compare (PairPath pp_A, PairPath pp_B) {
float rpt_node_count_A = pp_uniq_repeat_count_hmap_copy.get(pp_A);
float rpt_node_count_B = pp_uniq_repeat_count_hmap_copy.get(pp_B);
if (rpt_node_count_A > rpt_node_count_B)
return -1;
else if (rpt_node_count_A < rpt_node_count_B)
return 1;
else
return 0;
}
});
for (PairPath pp : pp_list_ordered_by_repeat_counts) {
debugMes("Unrolling repeats in pp: " + pp, 12);
for (List<Integer> path : pp.get_paths()) {
if (path.size() == 0) // empty path in a pp
continue;
// if the path contains any previously restructured node, skip it.
if (Path.contains_any_node_id(path, restructured_nodes.keySet())) {
debugMes("\t-postponing unroll since contains restructured node.\n", 12);
continue;
}
debugMes("Unrolling repeats in pp: " + pp + " with repeat nodes: " + pp.getRepeatNodesAndCounts(), 12);
// find the repeat nodes and restructure the graph.
List<SeqVertex> path_vertices = new ArrayList<SeqVertex>();
HashMap<Integer,Boolean> seen_repeat_node = new HashMap<Integer,Boolean>();
boolean restructured_flag = false;
int node_pos = 0;
for (Integer node_id : path) {
node_pos++;
SeqVertex v = getSeqVertex(graph, node_id);
if (seen_repeat_node.containsKey(node_id)) {
// ok - tackling this repeat node.
// this had better be the very last node of the path, or else our logic is wrong here!!
if (node_pos != path.size())
throw new RuntimeException("Error, remaining repeat is not at last node of path: " + path);
all_repeat_related_nodes.put(node_id, true); // important, track the node ID getting restructured.
// make a copy of the vertex
v = new SeqVertex(getNextID(), v); // this constructor sets orig_id so it's the same.
debugMes("\tcopying node: " + node_id + " to " + v.getID(), 12);
graph.addVertex(v);
restructured_nodes.put(node_id, true);
restructured_flag = true;
}
else {
if (repeat_node_id_to_longest_path.containsKey(node_id)) {
seen_repeat_node.put(node_id, true);
}
}
path_vertices.add(v);
} //end of for node_id : path
if (restructured_flag) {
// add edges between neighboring nodes
for (int i = 0; i < path_vertices.size()-1; i++) {
SeqVertex before_node = path_vertices.get(i);
SeqVertex after_node = path_vertices.get(i+1);
SimpleEdge edge = graph.findEdge(before_node, after_node);
if (edge == null) {
// again, this had better be an edge to the last node in the path
if (i + 2 != path.size())
throw new RuntimeException("Error, trying to add new edge between "
+ before_node + " and " + after_node + " and not at end of path.");
// add it.
// get the original edge and copy the weight over.
SimpleEdge orig_edge = graph.findEdge(getSeqVertex(graph, before_node.getOrigButterflyID()), getSeqVertex(graph, after_node.getOrigButterflyID()));
double oldW = 1;
if (orig_edge != null)
oldW = orig_edge.getWeight();
edge = new SimpleEdge(oldW, before_node.getID(), after_node.getID());
graph.addEdge(edge, before_node, after_node);
edge.increment_repeat_unroll_weight(2); // further encourage reads to take this path where possible.
}
}
// describe the new vertex list:
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 20) {
debugMes("# Restructured path described:", 20);
for (SeqVertex v : path_vertices) {
debugMes(v.getShortSeqWconnectingIDs(graph), 20);
}
}
// Verify that this path can be properly reassigned in the graph.
debugMes("\nVerifying that restructured path: " + path + " is rethreaded through the graph with fewer repeat units.", 12);
List<Integer> updated_path = reassign_restructured_path_in_graph(graph, path);
if (updated_path == null
||
( path.size() > 1 &&
(updated_path.equals(path)
||
(Path.countNumNodesNotUnique(updated_path) != 0)
)
)
) {
throw new RuntimeException("Remaining terminal repeat-unrolled path: " + path
+ " " + Path.getRepeatNodesAndCounts(path)
+ " was not properly restructured: "
+ updated_path + " " + Path.getRepeatNodesAndCounts(updated_path));
}
else {
debugMes("\tVerification OK: path:" + path + " " + Path.getRepeatNodesAndCounts(path) +
" => " + updated_path + " " + Path.getRepeatNodesAndCounts(updated_path) + "\n", 12);
}
} // end if restructured_flag
} // end for path
} // end for pp
if (true) {
String filename = FILE + ".TerminalRepeatUnroll_" + unroll_loop_counter + ".dot";
PrintStream p;
try {
p = new PrintStream(new FileOutputStream(filename));
writeDotFile(graph,p,"TerminalRepeatUnroll_" + unroll_loop_counter);
p.close();
} catch (FileNotFoundException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
// reassign repeat nodes to their new nodes in the graph
debugMes("\n\n## Post-terminal-repeat-unroll round: " + unroll_loop_counter + ", reassigning_repeat_nodes_in_reads\n", 10);
int num_paths_updated = reassign_repeat_nodes_in_reads(graph, combinedReadHash,
restructured_nodes, null, true);
if (num_paths_updated == 0)
throw new RuntimeException("Error, no paths were updated after this round of repeat unrolling");
debugMes("\n\nNumber of paths refined: " + num_paths_updated, 10);
// look for remaining repeats
repeat_node_id_to_longest_path = find_repeat_containing_pairpaths_ignoreLastNode(combinedReadHash);
}
/*
// prune the extra edges that are now not supported by the repeat-unrolled reads.
debugMes("\n\n## Post-unroll round: " + unroll_loop_counter + ", redefine_all_graph_edges()\n", 10);
redefine_all_graph_edges(graph, combinedReadHash);
*/
int num_paths_redefined = 1;
HashMap<PairPath,Boolean> pp_remains_unchanged_skip_list = new HashMap<PairPath,Boolean>(); // now we use it. Graph isn't changing, so wont' expect paths to change unless they have a better placement given the unrolled edge weights.
int refinement_round = 0;
while (num_paths_redefined > 0) {
refinement_round++;
num_paths_redefined = reassign_repeat_nodes_in_reads(graph,
combinedReadHash, all_repeat_related_nodes,
pp_remains_unchanged_skip_list, false);
debugMes("unroll_remaining_terminal_loops::PATH_REFINEMENT_ROUND: " + refinement_round + " NUMBER_PATHS_REDEFINED: " + num_paths_redefined, 10);
/*
if (num_paths_redefined > 0) {
redefine_all_graph_edges(graph, combinedReadHash);
}
*/
}
return(true);
}
private static void redefine_all_graph_edges(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
HashMap<Integer, HashMap<PairPath, Integer>> combinedReadHash) {
// capture the edges supported by the repeat-unrolled paths
HashMap<SimpleEdge, Integer> edge_pp_counter = new HashMap<SimpleEdge, Integer>();
HashMap<SimpleEdge,String> edge_text = new HashMap<SimpleEdge,String>();
for (HashMap<PairPath, Integer> hmap_pp : combinedReadHash.values()) {
for (PairPath pp : hmap_pp.keySet()) {
for (List<Integer> path : pp.get_paths()) {
Integer read_support = hmap_pp.get(pp);
for (int i = 0; i < path.size() - 1; i++) {
SeqVertex from_v = getSeqVertex(graph, path.get(i));
SeqVertex to_v = getSeqVertex(graph, path.get(i+ 1));
SimpleEdge se = graph.findEdge(from_v, to_v);
if (se == null) {
throw new RuntimeException("Error, should have edge between " + from_v + " and " + to_v + " but could not be found in graph.");
}
int edge_support = read_support;
if (edge_pp_counter.containsKey(se)) {
edge_support = edge_pp_counter.get(se) + read_support;
}
edge_pp_counter.put(se, edge_support);
edge_text.put(se, "edge:"+from_v + "<->" + to_v + " support: " + edge_support);
}
}
}
}
// purge all edges not found in repeat-unrolled reads
// and reset weight of edges according to the read support of pairpaths containing them.
ArrayList<SimpleEdge> all_edges = new ArrayList<SimpleEdge>(graph.getEdges());
for (SimpleEdge se : all_edges) {
if (edge_pp_counter.containsKey(se)) {
debugMes("-Retaining edge: " + edge_text.get(se), 15);
se.setWeight(edge_pp_counter.get(se));
}
else {
debugMes("-Pruning edge: " + se, 20);
graph.removeEdge(se);
}
}
}
private static int reassign_repeat_nodes_in_reads(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
HashMap<Integer, HashMap<PairPath, Integer>> combinedReadHash,
HashMap<Integer, Boolean> restructured_nodes,
HashMap<PairPath,Boolean> pp_remains_unchanged_skip_list, // dont want it, can use null
Boolean restrict_to_unrolled_repeat_containing_paths
) {
Set<Integer> restructured_node_ids = restructured_nodes.keySet();
debugMes("Restructured nodes list: " + restructured_node_ids, 15);
List<PairPath> orig_pps = new ArrayList<PairPath>();
List<PairPath> updated_pps = new ArrayList<PairPath>();
List<Integer> orig_counts = new ArrayList<Integer>();
// iterate through all the reads and reassign where necessary.
int pp_counter = 0;
int total_pp = count_pp_in_combinedReadHash(combinedReadHash);
for (HashMap<PairPath,Integer> pp_hmap : combinedReadHash.values()) {
for (PairPath pp : pp_hmap.keySet()) {
if (pp_remains_unchanged_skip_list != null && pp_remains_unchanged_skip_list.containsKey(pp))
continue;
pp_counter++;
System.err.print("\rpp[" + pp_counter + "] / " + total_pp + " = " + (int) ( (float)pp_counter/total_pp * 100) + " % ");
PairPath updated_pp = new PairPath(pp);
Integer orig_count = pp_hmap.get(pp);
boolean pp_updated_flag = false;
boolean pp_not_remapped_flag = false;
for (List<Integer> path : updated_pp.get_paths()) {
if (restrict_to_unrolled_repeat_containing_paths) {
if (Path.getRepeatNodesAndCounts(path).size() == 0) {
continue; // dont try to remap it.
}
}
if (Path.contains_any_node_id(path, restructured_node_ids)) {
debugMes("Attempting to reassign repeat-node containing path: " + path, 15);
List<Integer> updated_path = reassign_restructured_path_in_graph(graph, path);
if (updated_path == null) {
// shouldn't happen now.
pp_not_remapped_flag = true;
}
else {
if ( (! updated_path.equals(path))
//Path.countNumNodesNotUnique(path) > Path.countNumNodesNotUnique(updated_path)
&&
score_path_by_repeats(updated_path, graph) < score_path_by_repeats(path, graph)
) {
debugMes("REASSIGNED_PATH: " + path + " " + Path.getRepeatNodesAndCounts(path)
+ " => " + updated_path + " " + Path.getRepeatNodesAndCounts(updated_path), 15);
// see if we just moved from a non-self terminal repeat to some other internal repeat arrangement:
if (Path.hasTerminalNonSelfRepeat(path) && (! Path.hasTerminalNonSelfRepeat(updated_path))
&&
Path.countNumNodesNotUnique(path) <= Path.countNumNodesNotUnique(updated_path) ) {
//FIXME: better understand this extremely rare edge case
// just remove the terminal repeat node, since we were unable to resolve it
debugMes("WARNING: terminal repeat node containing path just rearranged to include alternate repeat structures that should have already been resolved earlier.", 15);
path.remove(path.size()-1);
}
else {
path.clear();
path.addAll(updated_path);
}
pp_updated_flag = true;
}
else {
debugMes("Path " + path + " " + Path.getRepeatNodesAndCounts(path)
+ " remains unchanged or repeat count stayed the same => "
+ updated_path + " " + Path.getRepeatNodesAndCounts(updated_path), 15);
HashMap<Integer,Integer> rpt_nodes = Path.getRepeatNodesAndCounts(path);
if (rpt_nodes.size() > 0) {
debugMes("\t** path still contains repeat nodes: " + rpt_nodes, 15);
}
}
}
}
}
if (pp_updated_flag || pp_not_remapped_flag) {
if (pp_updated_flag) {
updated_pps.add(updated_pp);
orig_pps.add(pp);
orig_counts.add(orig_count);
debugMes("PATH updated for : " + pp + " to " + updated_pp + " orig_first: " + pp.getFirstID() + ", updated_pp.first: " + updated_pp.getFirstID(), 15);
}
else if (pp_not_remapped_flag) {
orig_pps.add(pp);
updated_pps.add(null);
orig_counts.add(-1);
}
}
else {
if (pp_remains_unchanged_skip_list != null) {
pp_remains_unchanged_skip_list.put(pp, true);
}
}
}
}
// reorganize any changes in the combinedReadHash based on original node identifiers
for (int i = 0; i < updated_pps.size(); i++) {
PairPath updated_pp = updated_pps.get(i);
// only use the orig_pp to get the first node, since data structures revolve around the actual hashmap objs.
PairPath orig_pp = orig_pps.get(i);
Integer orig_count = orig_counts.get(i);
debugMes("Reorganizing combined read hash for: orig: " + orig_pp + " to updated_pp: " + updated_pp, 15);
// remove the orig pp
Integer orig_first_node = orig_pp.getFirstID();
combinedReadHash.get(orig_first_node).remove(orig_pp);
if (combinedReadHash.get(orig_first_node).size() == 0) {
combinedReadHash.remove(orig_first_node);
}
if (updated_pp != null) {
// add the new pp
Integer new_first_node = updated_pp.getFirstID();
if (combinedReadHash.containsKey(new_first_node)) {
combinedReadHash.get(new_first_node).put(updated_pp, orig_count);
}
else {
combinedReadHash.put(new_first_node, new HashMap<PairPath,Integer>());
combinedReadHash.get(new_first_node).put(updated_pp, orig_count);
}
}
}
return(updated_pps.size());
}
private static int count_pp_in_combinedReadHash(
HashMap<Integer, HashMap<PairPath, Integer>> combinedReadHash) {
int count = 0;
for (HashMap<PairPath,Integer> pp_hmap : combinedReadHash.values()) {
for (PairPath pp : pp_hmap.keySet()) {
count++;
}
}
return(count);
}
private static List<Integer> reassign_restructured_path_in_graph(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph, List<Integer> path) {
// find a complete path in the graph.
// prefer the one with the fewest number of repeated nodes.
if (path.size() == 1) {
return(path); //FIXME: should try to assign it to it's best repeat node if it's a repeat, to keep it tidy.
}
//int repeat_cap = Path.countNumOrigNodesNotUnique(path);
int repeat_cap = Path.countNumNodesNotUnique(path);
debugMes("reassign_restructed_path_in_graph(" + path + " with cap of " + repeat_cap + " num local repeats.",15);
int max_num_local_repeats = repeat_cap;
List<Integer> chosen_thus_far = new ArrayList<Integer>();
PATH_COUNTER = 0; // init global
HashMap<String,List<List<Integer>>> memoize_best_path = new HashMap<String,List<List<Integer>>>();
List<List<Integer>> complete_path = recursively_explore_graph_paths(graph, chosen_thus_far, path,
0, 0,
max_num_local_repeats, memoize_best_path);
if (complete_path != null) {
return(complete_path.get(0)); // take the first one of tied entries.
}
throw new RuntimeException("Error, couldn't remap path: " + path + " within the graph");
/*
debugMes("WARNING: couldn't remap path: " + path + " within the graph", 12);
return(null); // no remapping
*/
}
private static List<List<Integer>> recursively_explore_graph_paths(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
List<Integer> chosen_thus_far,
List<Integer> path,
Integer num_repeat_nodes,
Integer num_out_of_order,
int MAX_NUM_LOCAL_REPEATS,
HashMap<String,List<List<Integer>>> memoize_best_path) {
// int MAX_SEARCHES_FOR_PATH_REFINEMENT = 5; //FIXME: make this a global and command-line parameter
debugMes("recursively_explore_graph_paths(): pathLen: " + path.size() + ", chosen thus far: " + chosen_thus_far, 20);
String curr_node_pos_token = null;
if (chosen_thus_far.size() > 0) {
curr_node_pos_token = "" + chosen_thus_far.size() + "_" + chosen_thus_far.get(chosen_thus_far.size()-1);
if (memoize_best_path.containsKey(curr_node_pos_token)) {
return(Path.clone(memoize_best_path.get(curr_node_pos_token)));
}
}
boolean local_debug = false;
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 25)
local_debug = true;
if (num_repeat_nodes > MAX_NUM_LOCAL_REPEATS) {
debugMes("\t** terminating extension, max num local repeats encountered: " + num_repeat_nodes, 20);
return (null);
}
List<List<Integer>>min_repeat_reconstructed_path_list = new ArrayList<List<Integer>>();
Float min_repeat_reconstructed_path_repeat_score = null;
List<List<Integer>>all_possible_path_reconstructions_seen = new ArrayList<List<Integer>>(); // for debugging purposes.
if (local_debug) {
System.err.println("RECURSIVELY_EXPLORE_GRAPH_PATHS: chosen_thus_far: " + chosen_thus_far + ", path: " + path);
}
if (chosen_thus_far.size() == path.size()) {
debugMes("\trecursion base case, found path: " + path, 20);
// return empty list of paths to signal base case.
return(Path.create_empty_path_list());
//return(Path.create_list_of_paths_from_single_node_id(chosen_thus_far.get(chosen_thus_far.size()-1))); // done, return last node.
}
Integer target_path_node_id = path.get(chosen_thus_far.size());
SeqVertex target_path_node = getSeqVertex(graph, target_path_node_id);
Integer current_orig_node_id = target_path_node.getOrigButterflyID();
// get the last node chosen thus far
SeqVertex last_node = null;
if (chosen_thus_far.size() > 0) {
last_node = getSeqVertex(graph, chosen_thus_far.get(chosen_thus_far.size()-1));
debugMes("EXTENDING FROM LAST_NODE: " + last_node + ", searching for an origID: " + current_orig_node_id, 20);
}
//List<SeqVertex> candidate_vertices = SeqVertex.getAllNodesHavingOriginalID(current_orig_node_id);
// only pursue multiple candidates if it's an unrolled repeat vertex in the target path
// if it's a repeat vertex that was already unrolled, add the unrolled node here.
List<SeqVertex> candidate_vertices = SeqVertex.getAllNodesHavingOriginalID(target_path_node_id);
if (candidate_vertices.isEmpty()) {
candidate_vertices.add(target_path_node);
}
if (last_node == null) {
/*
if (candidate_vertices.size() > MAX_SEARCHES_FOR_PATH_REFINEMENT) {
debugMes("Not seeding on repetitive node, skipping this path: " + path, 12); //FIXME: instead, redo seeding on non-repetitive node of this path.
return(null);
}
*/
debugMes("Initial candidate vertices based on orig_id: " + current_orig_node_id + " are " + candidate_vertices, 20);
}
else {
// restrict the list to those vertices that are direct successors to last node
// ensure there's an edge between previously chosen node and this one.
candidate_vertices = last_node.getListOfSuccessors(graph, candidate_vertices);
debugMes("\tFiltered candidate vertices for extension from: " + last_node.getID() + " are " + candidate_vertices, 20);
// now sort them by unrolled edge weights.
//if (candidate_vertices.size() > MAX_SEARCHES_FOR_PATH_REFINEMENT) {
HashMap<Integer,Double> unrolled_weights = new HashMap<Integer,Double>();
for (SeqVertex v : candidate_vertices) {
SimpleEdge se = graph.findEdge(last_node, v);
double unrolled_weight = se.get_repeat_unroll_weight();
unrolled_weights.put(v.getID(), unrolled_weight);
}
final HashMap<Integer,Double> unrolled_weights_map = new HashMap<Integer,Double>(unrolled_weights);
Collections.sort(candidate_vertices, new Comparator<SeqVertex>() {
public int compare (SeqVertex v_a, SeqVertex v_b) {
// want most highly supported pairpaths to sort descendingly
Integer v_a_id = v_a.getID();
Integer v_b_id = v_b.getID();
if (unrolled_weights_map.get(v_a_id) < unrolled_weights_map.get(v_b_id)) {
return(-1);
}
else if (unrolled_weights_map.get(v_a_id) > unrolled_weights_map.get(v_b_id)) {
return(1);
}
else {
return(0);
}
}
});
//candidate_vertices = candidate_vertices.subList(0, MAX_SEARCHES_FOR_PATH_REFINEMENT);
//debugMes("-restricting recursive search from " + last_node.getID() + " to " + candidate_vertices, 15);
}
if (local_debug) {
System.err.println("RECURSIVELY_EXPLORE_GRAPH_PATHS: candidate_vertices with orig_node_id: "
+ current_orig_node_id + " are: " + candidate_vertices);
}
int search_refinement_count = 0;
for (SeqVertex v : candidate_vertices) {
int local_num_repeats = num_repeat_nodes;
int local_num_out_of_order = num_out_of_order;
// all repeats should have been expanded by now.
if (chosen_thus_far.contains(v.getID())) {
if (local_debug) {
System.err.println("already chose id: " + v.getID());
}
local_num_repeats++;
}
if (last_node != null && v.getNodeDepth() < last_node.getNodeDepth()) {
local_num_out_of_order++;
}
// Take this node and explore other extensions.
search_refinement_count++;
/*
if (search_refinement_count > MAX_SEARCHES_FOR_PATH_REFINEMENT)
break;
*/
chosen_thus_far.add(v.getID());
List<List<Integer>> tied_reconstructed_paths = recursively_explore_graph_paths(graph, chosen_thus_far, path,
local_num_repeats, local_num_out_of_order, MAX_NUM_LOCAL_REPEATS,
memoize_best_path);
if (tied_reconstructed_paths != null) {
// include current node in path before scoring
tied_reconstructed_paths = Path.prepend_node_id_to_paths(chosen_thus_far.get(chosen_thus_far.size()-1), tied_reconstructed_paths); // add current node to the lowest repeat extension.
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15) {
all_possible_path_reconstructions_seen.addAll(tied_reconstructed_paths); // for debugging
}
debugMes("\nAll Paths and scores:", 15);
for (List<Integer> reconstructed_path : tied_reconstructed_paths) {
float repeated_node_score = score_path_by_repeats(reconstructed_path, graph);
debugMes("score:" + repeated_node_score + " " + reconstructed_path + " " + Path.getRepeatNodesAndCounts(reconstructed_path), 15);
if (min_repeat_reconstructed_path_repeat_score == null
|| Math.abs(min_repeat_reconstructed_path_repeat_score - repeated_node_score) < 0.00001) // consider a tie
{
if (min_repeat_reconstructed_path_repeat_score == null || repeated_node_score < min_repeat_reconstructed_path_repeat_score ) {
min_repeat_reconstructed_path_repeat_score = repeated_node_score;
}
min_repeat_reconstructed_path_list.add(reconstructed_path);
}
else if (repeated_node_score < min_repeat_reconstructed_path_repeat_score) {
min_repeat_reconstructed_path_list.clear(); // reset since have lower score (better)
min_repeat_reconstructed_path_list.add(reconstructed_path);
min_repeat_reconstructed_path_repeat_score = repeated_node_score;
}
}
}
chosen_thus_far.remove(chosen_thus_far.size()-1); // remove the last element added in prep for the next one.
}
if (! min_repeat_reconstructed_path_list.isEmpty()) {
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15) {
debugMes("\nALL CANDIDATE PATHS SEEN AT " + curr_node_pos_token + ":", 15);
if (all_possible_path_reconstructions_seen.size() > 1) {
debugMes("MULTIPLE CANDIDATE PATHS SEEN AT NODE", 15);
}
for (List<Integer> candidate_path : all_possible_path_reconstructions_seen) {
float candidate_path_score = score_path_by_repeats(candidate_path, graph);
debugMes("score: " + candidate_path_score + " " + candidate_path + " " + Path.getRepeatNodesAndCounts(candidate_path), 15);
}
}
debugMes("\nMinRepeat tied paths of length: " + min_repeat_reconstructed_path_list.get(0).size() + " with score: " + min_repeat_reconstructed_path_repeat_score + ":", 15);
for (List<Integer> reconstructed_path : min_repeat_reconstructed_path_list) {
debugMes(reconstructed_path + " " + Path.getRepeatNodesAndCounts(reconstructed_path), 15);
}
memoize_best_path.put(curr_node_pos_token, Path.clone(min_repeat_reconstructed_path_list));
return(min_repeat_reconstructed_path_list);
}
return(null); // no min repeat paths to report.
}
private static float score_path_by_repeats(
List<Integer> reconstructed_path,
DirectedSparseGraph<SeqVertex, SimpleEdge> graph) {
float OUT_OF_ORDER_PENALTY_FACTOR = 1000;
float UNROLLED_EDGE_USE_SCORE_FACTOR = 100;
// found a good path.
float repeated_node_score = Path.countNumNodesNotUnique(reconstructed_path);
// further penalize by the number of out-of-order nodes
repeated_node_score += Path.countNumOutOfOrder(graph, reconstructed_path) / OUT_OF_ORDER_PENALTY_FACTOR;
// take into account use of repeat-unrolled edges
double unrolled_edge_weight_sum = Path.getUnrolledEdgeWeightSum(graph, reconstructed_path);
repeated_node_score -= unrolled_edge_weight_sum / UNROLLED_EDGE_USE_SCORE_FACTOR;
return(repeated_node_score);
}
private static HashMap<Integer, PairPath> find_repeat_containing_pairpaths (
HashMap<Integer, HashMap<PairPath, Integer>> combinedReadHash) {
HashMap<Integer, PairPath> repeat_node_id_to_longest_path = new HashMap<Integer, PairPath>();
HashMap<Integer, Integer> repeat_node_id_to_max_repeat_count = new HashMap<Integer,Integer>();
for (HashMap<PairPath, Integer> pp_n_counts : combinedReadHash.values()) {
for (PairPath pp : pp_n_counts.keySet()) {
// get all pp containing a repeat node.
HashMap<Integer, Integer> repeat_node_ids_n_counts = pp.getRepeatNodesAndCounts();
// assign each repeat node to the path that contains it as a repeat the greatest number of occurrences.
if (repeat_node_ids_n_counts.size() > 0) {
debugMes("repeat_node_ids_n_counts: " + repeat_node_ids_n_counts + " , pp: " + pp + ", counts: " + pp_n_counts.get(pp), 14);
for (Integer node_id : repeat_node_ids_n_counts.keySet()) {
Integer count = repeat_node_ids_n_counts.get(node_id);
if ( (! repeat_node_id_to_max_repeat_count.containsKey(node_id))
||
repeat_node_id_to_max_repeat_count.get(node_id) < count) {
repeat_node_id_to_longest_path.put(node_id, pp);
repeat_node_id_to_max_repeat_count.put(node_id, count);
}
}
}
}
}
return(repeat_node_id_to_longest_path);
}
private static HashMap<Integer, PairPath> find_repeat_containing_pairpaths_ignoreLastNode (
HashMap<Integer, HashMap<PairPath, Integer>> combinedReadHash) {
HashMap<Integer, PairPath> repeat_node_id_to_longest_path = new HashMap<Integer, PairPath>();
HashMap<Integer, Integer> repeat_node_id_to_max_repeat_count = new HashMap<Integer,Integer>();
for (HashMap<PairPath, Integer> pp_n_counts : combinedReadHash.values()) {
for (PairPath pp : pp_n_counts.keySet()) {
// get all pp containing a repeat node.
HashMap<Integer, Integer> repeat_node_ids_n_counts = pp.getRepeatNodesAndCounts_ignoreLastNode();
// assign each repeat node to the path that contains it as a repeat the greatest number of occurrences.
if (repeat_node_ids_n_counts.size() > 0) {
debugMes("repeat_node_ids_n_counts: " + repeat_node_ids_n_counts + " , pp: " + pp + ", counts: " + pp_n_counts.get(pp), 14);
for (Integer node_id : repeat_node_ids_n_counts.keySet()) {
Integer count = repeat_node_ids_n_counts.get(node_id);
if ( (! repeat_node_id_to_max_repeat_count.containsKey(node_id))
||
repeat_node_id_to_max_repeat_count.get(node_id) < count) {
repeat_node_id_to_longest_path.put(node_id, pp);
repeat_node_id_to_max_repeat_count.put(node_id, count);
}
}
}
}
}
return(repeat_node_id_to_longest_path);
}
private static void infer_best_triplets_across_unresolved_Xstructure(
HashMap<Integer, HashMap<PairPath, Integer>> combinedReadHash,
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
HashMap<Integer, Boolean> xStructuresResolvedByTriplets,
HashMap<Integer, List<List<Integer>>> tripletMapper) {
for (Integer xstructure_node : xStructuresResolvedByTriplets.keySet()) {
if (! xStructuresResolvedByTriplets.get(xstructure_node)) {
debugMes("Examining unresolved X structure at: " + xstructure_node, 10);
SeqVertex v = getSeqVertex(graph, xstructure_node);
Iterator<SeqVertex> predecessors = graph.getPredecessors(v).iterator();
Iterator<SeqVertex> successors = graph.getSuccessors(v).iterator();
while(predecessors.hasNext()) {
SeqVertex p = predecessors.next();
if (successors.hasNext()) {
SeqVertex s = successors.next();
List<Integer> triplet = new ArrayList();
triplet.add(p.getID());
triplet.add(xstructure_node);
triplet.add(s.getID());
if (! tripletMapper.containsKey(xstructure_node))
tripletMapper.put(xstructure_node, new ArrayList<List<Integer>>());
tripletMapper.get(xstructure_node).add(triplet);
debugMes("INFERRING triplet for UNRESOLVED X STRUCTURE (" + xstructure_node + ") -> " + triplet, 10);
}
}
}
}
}
/*
private static void printGraph (
DirectedSparseGraph<SeqVertex, SimpleEdge> graph) {
for (SeqVertex v : graph.getVertices()) {
System.out.println("Vertex: " + v.getID() + ", seq: " + v.getName());
}
}
*/
private static void reduce_to_max_paths_per_node(
HashMap<Integer, HashMap<PairPath, Integer>> componentReadHash,
int max_num_paths_per_start_node) {
for (Integer start_node : componentReadHash.keySet() ) {
final HashMap<PairPath,Integer> pp_to_counts = componentReadHash.get(start_node);
List<PairPath> pair_paths_list = new ArrayList<PairPath>(pp_to_counts.keySet());
if (pair_paths_list.size() > max_num_paths_per_start_node) {
Collections.sort(pair_paths_list, new Comparator<PairPath>() {
public int compare (PairPath pp_A, PairPath pp_B) {
// want most highly supported pairpaths to sort descendingly
int read_support_A = pp_to_counts.get(pp_A);
int read_support_B = pp_to_counts.get(pp_B);
if (read_support_A > read_support_B)
return -1;
else if (read_support_A < read_support_B)
return 1;
else
return 0;
}
});
List<PairPath> to_remove = pair_paths_list.subList(max_num_paths_per_start_node, pair_paths_list.size());
for (PairPath pp : to_remove)
componentReadHash.get(start_node).remove(pp);
}
}
}
private static void report_pairpath_counts(
HashMap<Integer, HashMap<PairPath, Integer>> componentReadHash) {
System.out.println("***** PairPath Counts *****");
int count_of_total_pps = 0;
for (Integer start_node : componentReadHash.keySet() ) {
int count_pp_at_node = 0;
String indiv_read_support_text = "";
final HashMap<PairPath,Integer> pp_to_counts = componentReadHash.get(start_node);
debugMes("componentReadHash, start node: " + start_node + " has size: " + pp_to_counts.size(), 12);
List<PairPath> pair_paths_list = new ArrayList<PairPath>(pp_to_counts.keySet());
for (PairPath pp : pair_paths_list) {
//debugMes("CHECKING-A: " + pp, 12);
Integer read_support = componentReadHash.get(start_node).get(pp);
//debugMes("CHECKING-B: " + pp + " has read support: " + read_support, 12);
//debugMes("pp: " + pp + " has read support: " + read_support, 12);
if (read_support == null) {
componentReadHash.get(start_node).put(pp, 1); //FIXME: shouldn't have null entries here.
debugMes("\tERROR: no support for pp: " + pp, 12);
}
}
Collections.sort(pair_paths_list, new Comparator<PairPath>() {
public int compare (PairPath pp_A, PairPath pp_B) {
// want most highly supported pairpaths to sort descendingly
int read_support_A = pp_to_counts.get(pp_A);
int read_support_B = pp_to_counts.get(pp_B);
if (read_support_A > read_support_B)
return -1;
else if (read_support_A < read_support_B)
return 1;
else
return 0;
}
});
for (PairPath pp : pair_paths_list) {
int read_support = componentReadHash.get(start_node).get(pp);
indiv_read_support_text += "\t" + pp + " has read support: " + read_support + "\n";
count_pp_at_node++;
}
System.out.println("Node: " + start_node + " has " + count_pp_at_node + " pairpaths stored:\n" + indiv_read_support_text);
count_of_total_pps += count_pp_at_node;
}
System.out.println("## Total number of pairpaths: " + count_of_total_pps);
}
private static int handleRemainingCyclicReads(
HashMap<Integer, HashMap<PairPath, Integer>> componentReadHash, DirectedSparseGraph<SeqVertex, SimpleEdge> graph) {
int count_of_fractured_reads = 0;
HashMap<PairPath,Integer> readParts = new HashMap<PairPath,Integer>();
for (Integer start_node : componentReadHash.keySet() ) {
List<PairPath> to_purge = new ArrayList<PairPath>();
for (PairPath pp : componentReadHash.get(start_node).keySet()) {
// check for fractured path
boolean fractured = false;
for (List<Integer> path : pp.get_paths()) {
Iterator<Integer> i = path.iterator();
int prev_depth = -1;
HashMap<Integer,Boolean> node_visitor = new HashMap<Integer,Boolean>();
while (i.hasNext()) {
Integer node_id = i.next();
SeqVertex v = getSeqVertex(graph, node_id);
Integer depth = v.getNodeDepth();
if (prev_depth > depth || node_visitor.containsKey(node_id)) {
fractured = true;
break;
}
prev_depth = depth;
node_visitor.put(node_id, true);
}
if (fractured)
break;
}
if (fractured) {
to_purge.add(pp);
}
else {
// double check there's no cycle here:
if (pp.max_count_occurrence_individual_node_in_path(pp) > 1) {
throw new RuntimeException("Error, path:" + pp + " involves an undetected cycle");
}
}
}
if (! to_purge.isEmpty()) {
for (PairPath pp : to_purge) {
count_of_fractured_reads++;
debugMes("DAG_CONFLICTING_READ_FRAGMENTED: " + pp, 10);
componentReadHash.get(start_node).remove(pp);
List<List<Integer>> parts = fragment_DAG_conflicting_pairpath(pp, graph);
for (List<Integer> read_part : parts) {
PairPath part_pairpath = new PairPath(read_part);
if (readParts.containsKey(part_pairpath)) {
readParts.put(part_pairpath, readParts.get(part_pairpath) + 1);
}
else {
readParts.put(part_pairpath, new Integer(1));
}
}
}
}
}
// add the fragments back in.
for (PairPath pp : readParts.keySet()) {
Integer first_id = pp.getFirstID();
if (! componentReadHash.containsKey(first_id)) {
componentReadHash.put(first_id, new HashMap<PairPath,Integer>());
}
if (! componentReadHash.get(first_id).containsKey(pp)) {
componentReadHash.get(first_id).put(pp, 1);
}
else {
componentReadHash.get(first_id).put(pp, componentReadHash.get(first_id).get(pp)+1);
}
}
return count_of_fractured_reads;
}
private static List<List<Integer>> fragment_DAG_conflicting_pairpath(
PairPath pp, DirectedSparseGraph<SeqVertex, SimpleEdge> graph) {
List<List<Integer>> read_parts = new ArrayList<List<Integer>>();
List<Integer> part = new ArrayList<Integer>();
List<List<Integer>> node_depths_tracker = new ArrayList<List<Integer>>();
for (List<Integer> path : pp.get_paths()) {
if (path.isEmpty())
continue;
ArrayList<Integer> node_depths_list = new ArrayList<Integer>();
Iterator<Integer> it = path.iterator();
int prev_depth = -1;
HashMap<Integer,Boolean> node_visitor = new HashMap<Integer,Boolean>();
while (it.hasNext()) {
Integer node_id = it.next();
SeqVertex v = getSeqVertex(graph, node_id);
if (v._node_depth < prev_depth || node_visitor.containsKey(node_id)) {
// problem...
// fracture here.
if (! part.isEmpty()) {
read_parts.add(part);
part = new ArrayList<Integer>();
}
}
node_depths_list.add(v._node_depth);
prev_depth = v._node_depth;
node_visitor.put(node_id, true);
part.add(node_id);
}
if (! part.isEmpty()) {
read_parts.add(part);
part = new ArrayList<Integer>();
}
node_depths_tracker.add(node_depths_list);
}
debugMes("FRACTURED pairpath: " + pp + " with node_depths: " + node_depths_tracker + " into " + read_parts.size() + ": " + read_parts, 10);
return read_parts;
}
private static HashMap<List<Integer>, Pair<Integer>> pasafly(
final DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
HashMap<Integer, HashMap<PairPath, Integer>> componentReadHash,
DijkstraDistance<SeqVertex, SimpleEdge> dijkstraDis,
HashMap<Integer, List<List<Integer>>> tripletMapper, HashMap<Integer, List<List<Integer>>> extendedTripletMapper) {
debugMes("Beginning PasaFly",10);
PasaVertex.max_top_paths_to_store = TransAssembly_allProbPaths.MAX_NUM_PATHS_PER_PASA_NODE;
// populate pairPathToReadSupport: PairPath => readSupport
// and pairPaths hashset: the list of all PairPaths
Set<PairPath> pairPaths = new HashSet<PairPath>();
Map<PairPath, Integer> pairPathToReadSupport = new HashMap<PairPath, Integer>();
populate_pairpaths_and_readsupport(componentReadHash, pairPaths, pairPathToReadSupport);
ArrayList<PairPath> pairPathsSortedList = new ArrayList<PairPath>(pairPaths);
Comparator<PairPath> pairPathOrderComparer = new Comparator<PairPath>() { // sort by first node depth in graph
public int compare(PairPath a, PairPath b) {
if (a.equals(b)) {
return(0);
}
// check first node
// use node depth in graph
// check first node
Integer a_index = a.getFirstID();
Integer b_index = b.getFirstID();
int f1 = getSeqVertex(graph, a_index)._node_depth; // why using FinishingTime instead of DiscoveryTime?
int f2 = getSeqVertex(graph, b_index)._node_depth;
if( f1 < f2 )
return -1;
else if( f1 > f2 )
return 1;
// same node depth.
if (a_index < b_index)
return -1;
else if (a_index > b_index)
return 1;
// if here,
// same first node ID
// check last node
Integer a_last_index = a.getLastID();
Integer b_last_index = b.getLastID();
int l1 = getSeqVertex(graph,a_last_index)._node_depth;
int l2 = getSeqVertex(graph,b_last_index)._node_depth;
if (l1 < l2) {
return(-1);
}
else if (l1 > l2) {
return(1);
}
// same last node depth too.
// compare their node identifiers
if (a_last_index < b_last_index)
return(-1);
else if (a_last_index > b_last_index)
return(1);
// default
// not the same paths, but same start node and last node DFS, so just order based on hashcode
return ( (a.hashCode() < b.hashCode()) ? 1 : -1);
}
};
Collections.sort(pairPathsSortedList, pairPathOrderComparer);
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15) {
debugMes("SORTED PAIRPATHS IN ORDER:", 15);
for (PairPath p : pairPathsSortedList) {
debugMes("\t" + p, 15);
}
}
ArrayList<PasaVertex> pasaVerticesSortedList = new ArrayList<PasaVertex>();
for (PairPath pp : pairPathsSortedList) { // already sorted
int count = pairPathToReadSupport.get(pp);
pasaVerticesSortedList.add(new PasaVertex(pp, count));
}
PasaVertex [] pasaVerticesSortedArr = pasaVerticesSortedList.toArray(new PasaVertex[pasaVerticesSortedList.size()]);
PairPath[] pairPathsSortedArr = pairPathsSortedList.toArray(new PairPath[pairPathsSortedList.size()]);
// EXAMINE CONTAINMENTS
// init
ArrayList<PairPath> pairPathsContainmentsRemoved = new ArrayList<PairPath>(pairPathsSortedList);
ArrayList<PasaVertex> pasaVerticesContainmentsRemoved = new ArrayList<PasaVertex>(pasaVerticesSortedList);
debugMes("Assigning pairpath containments.", 10);
List<Integer> containments = assignPasaPairPathContainments(graph, dijkstraDis, pasaVerticesSortedArr); // vertices updated to include containment info.
debugMes("REMOVING CONTAINMENTS: " + containments, 10);
for(int i = 0; i < containments.size(); i++)
{
pasaVerticesContainmentsRemoved.remove(pasaVerticesSortedArr[containments.get(i)]);
pairPathsContainmentsRemoved.remove(pairPathsSortedArr[containments.get(i)]);
}
// EXAMINE UNCERTAINTIES THAT BREAK TRANSITIVITY
HashSet<Integer> vertices = extract_vertex_list_from_PairPaths(pairPathsContainmentsRemoved);
PairPath [] pairPathsContainmentsRemovedArr = pairPathsContainmentsRemoved.toArray(new PairPath[pairPathsContainmentsRemoved.size()]);
PasaVertex [] pasaVerticesContainmentsRemovedArr = pasaVerticesContainmentsRemoved.toArray(new PasaVertex[pasaVerticesContainmentsRemoved.size()]);
boolean[][] dag = getPairPathConsistencyDAG(graph, dijkstraDis, pairPathsContainmentsRemovedArr);
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 10) {
debugMes("PASA Consistency DAG
System.out.println(boolean_matrix_toString(dag));
}
ArrayList<PairPath> pairPathsUncertainRemoved = new ArrayList<PairPath>(pairPathsContainmentsRemoved);
ArrayList<PasaVertex> pasaVerticesUncertainRemoved = new ArrayList<PasaVertex>(pasaVerticesContainmentsRemoved);
debugMes("Identifying uncertain entries that break transitivities.", 10);
// identify and remove uncertain entries (those that break transitive compatibility relationships)
ArrayList<Integer> uncertain = getUncertainRequireOverlap(dag, pairPathsContainmentsRemovedArr, graph, dijkstraDis);
debugMes("Uncertain indices include: " + uncertain, 10);
debugMes("REMOVING UNCERTAINTIES: " + uncertain, 10);
for(int i = 0; i < uncertain.size(); i++)
{
pasaVerticesUncertainRemoved.remove(pasaVerticesContainmentsRemovedArr[uncertain.get(i)]);
pairPathsUncertainRemoved.remove(pairPathsContainmentsRemovedArr[uncertain.get(i)]);
}
HashSet<Integer> vertices_after_removed_uncertainties = extract_vertex_list_from_PairPaths(pairPathsUncertainRemoved);
if (vertices_after_removed_uncertainties.size() < vertices.size()) {
int missing_node_count = vertices.size() - vertices_after_removed_uncertainties.size();
debugMes("WARNING, MISSING: " + missing_node_count + " of " + vertices.size() + " nodes after removing uncertainties", 10);
for (Integer v : vertices) {
if (! vertices_after_removed_uncertainties.contains(v)) {
debugMes("WARNING, MISSING NODE: After removing uncertainties, missing node from graph: " + v, 10);
}
}
}
PasaVertex[] pasaVerticesUncertainRemovedArr = pasaVerticesUncertainRemoved.toArray(new PasaVertex[pasaVerticesUncertainRemoved.size()]);
PairPath[] pairPathsUncertainRemovedArr = pairPathsUncertainRemoved.toArray(new PairPath[pairPathsUncertainRemoved.size()]);
//print pair paths
debugMes("PAIR PATHS remaining after uncertainties removed
for(int i = 0; i < pairPathsUncertainRemovedArr.length; i++)
{
debugMes("PairPathAfterUncertainRemoved "+ i + " " + pairPathsUncertainRemovedArr[i], 10);
}
// regenerate the dag now that the uncertain entries are removed.
dag = getPairPathConsistencyDAG(graph, dijkstraDis, pairPathsUncertainRemovedArr); // already identified containments
//print dag
debugMes("DAG after uncertainties removed
//2.2 check transitivity
if(!checkTransitivityRequireOverlap(dag, pairPathsUncertainRemovedArr, graph, dijkstraDis))
{
throw(new RuntimeException("Graph is NOT transitive!"));
}
else {
debugMes("Transitivity of compatibility graph validates.", 10);
}
//2.2 check transitivity
if(!checkTransitivityRequireOverlap(dag, pairPathsUncertainRemovedArr, graph, dijkstraDis))
{
throw(new RuntimeException("Graph is NOT transitive!"));
}
else {
debugMes("Transitivity of compatibility graph validates.", 10);
}
// track the final vertex identifiers
HashMap<PairPath,Integer> finalVertexPositions = new HashMap<PairPath,Integer>();
for (int i = 0; i < pairPathsUncertainRemovedArr.length; i++) {
finalVertexPositions.put(pairPathsUncertainRemovedArr[i], i);
}
debugMes("build_PASA_trellis_left_to_right()", 10);
build_PASA_trellis_left_to_right(pasaVerticesUncertainRemovedArr, dag, graph, componentReadHash, dijkstraDis,
pairPathToReadSupport, tripletMapper, extendedTripletMapper);
// get highest scoring path:
debugMes("Identifying highest scoring PASA path.", 10);
ScoredPath best = null;
for (int i = 0; i < pasaVerticesUncertainRemovedArr.length; i++) {
ScoredPath sp = pasaVerticesUncertainRemovedArr[i].get_highest_scoring_fromPath();
if (best == null || sp.score > best.score) {
best = sp;
}
}
debugMes("Best score: " + best.score + ", containing entries: " + best.paths, 10);
List<Integer> best_path_vertex_list = Path.collapse_compatible_pair_paths(best.paths);
HashMap<List<Integer>, Pair<Integer>> final_transcripts = new HashMap<List<Integer>, Pair<Integer>>();
final_transcripts.put(best_path_vertex_list, new Pair<Integer>(1,1));
// remove those pairpaths included in the best path
List<PairPath> toRemove = new ArrayList<PairPath>();
for (PairPath pp : finalVertexPositions.keySet()) {
if (pp.isCompatibleAndContainedBySinglePath(best_path_vertex_list))
toRemove.add(pp);
}
for (PairPath pp : toRemove)
finalVertexPositions.remove(pp);
toRemove.clear();
// Now, extract the top combined path that contains each missing transcript
// Prioritize according to paired path support, and break ties according to representing the most additional missing entries.
debugMes("build_PASA_trellis_right_to_left()", 10);
build_PASA_trellis_right_to_left(pasaVerticesUncertainRemovedArr, dag, graph, componentReadHash, dijkstraDis, pairPathToReadSupport, tripletMapper);
List<PairPath> unrepresented_pairpaths = new ArrayList<PairPath>(finalVertexPositions.keySet());
final HashMap<PairPath,Integer>pairPathToReadSupportFixed = new HashMap<PairPath,Integer>(pairPathToReadSupport);
Collections.sort(unrepresented_pairpaths, new Comparator<PairPath>() {
public int compare(PairPath a, PairPath b) {
int count_a = pairPathToReadSupportFixed.get(a);
int count_b = pairPathToReadSupportFixed.get(b);
if (count_a == count_b) {
return(0);
}
else if (count_a > count_b) {
return(-1);
}
else {
return(1);
}
}
});
Iterator<PairPath> it = unrepresented_pairpaths.iterator();
while (it.hasNext() && ! finalVertexPositions.isEmpty()) {
PairPath pp = it.next();
if (! finalVertexPositions.containsKey(pp)) {
// recovered in a previous round
continue;
}
debugMes("Nucleating next pasa path on PP: " + pp + ", having read support: " + pairPathToReadSupportFixed.get(pp), 10);
// get the highest scoring chain that contains pp
int index = finalVertexPositions.get(pp);
List<ScoredPath> sp_from = pasaVerticesUncertainRemovedArr[index].get_all_highest_scoring_fromPath();
List<ScoredPath> sp_to = pasaVerticesUncertainRemovedArr[index].get_all_highest_scoring_toPath();
debugMes("Best combined partial paths containing pairpath: " + pp + " include: (From): "
+ sp_from + ", (To): " + sp_to, 10);
List<PairPath> combined_pp_list = new ArrayList<PairPath>();
if (sp_from.size() > 1 || sp_to.size() > 1) {
// find the combination that covers the most currently unrepresented pairpaths
combined_pp_list = find_paired_paths_with_greatest_map_support(sp_from, sp_to, finalVertexPositions);
}
else {
// single path each.
combined_pp_list.addAll(sp_from.get(0).paths);
combined_pp_list.addAll(sp_to.get(0).paths);
}
List<Integer> combined_path_vertex_list = Path.collapse_compatible_pair_paths(combined_pp_list);
final_transcripts.put(combined_path_vertex_list, new Pair<Integer>(1,1));
// remove those pairpaths included in the best path
for (PairPath p : finalVertexPositions.keySet()) {
if (p.isCompatibleAndContainedBySinglePath(combined_path_vertex_list))
toRemove.add(p);
}
for (PairPath p : toRemove)
finalVertexPositions.remove(p);
toRemove.clear();
}
return(final_transcripts);
}
private static HashMap<List<Integer>, Pair<Integer>> pasaflyunique (
final DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
HashMap<Integer, HashMap<PairPath, Integer>> componentReadHash,
DijkstraDistance<SeqVertex, SimpleEdge> dijkstraDis, HashMap<Integer, List<List<Integer>>> tripletMapper, HashMap<Integer, List<List<Integer>>> extendedTripletMapper) {
debugMes("Beginning PasaFlyUnique",10);
PasaVertex.max_top_paths_to_store = 1; //TransAssembly_allProbPaths.MAX_NUM_PATHS_PER_PASA_NODE;
// populate pairPathToReadSupport: PairPath => readSupport
// and pairPaths hashset: the list of all PairPaths
Set<PairPath> pairPaths = new HashSet<PairPath>();
Map<PairPath, Integer> pairPathToReadSupport = new HashMap<PairPath, Integer>();
populate_pairpaths_and_readsupport(componentReadHash, pairPaths, pairPathToReadSupport);
ArrayList<PairPath> pairPathsSortedList = new ArrayList<PairPath>(pairPaths);
Comparator<PairPath> pairPathOrderComparer = new Comparator<PairPath>() { // sort by first node depth in graph
public int compare(PairPath a, PairPath b) {
if (a.equals(b)) {
return(0);
}
// check first node
// use node depth in graph
// check first node
Integer a_index = a.getFirstID();
Integer b_index = b.getFirstID();
int f1 = getSeqVertex(graph, a_index)._node_depth; // why using FinishingTime instead of DiscoveryTime?
int f2 = getSeqVertex(graph, b_index)._node_depth;
if( f1 < f2 )
return -1;
else if( f1 > f2 )
return 1;
// same node depth.
if (a_index < b_index)
return -1;
else if (a_index > b_index)
return 1;
// same first node ID
// check last node
Integer a_last_index = a.getLastID();
Integer b_last_index = b.getLastID();
int l1 = getSeqVertex(graph,a_last_index)._node_depth;
int l2 = getSeqVertex(graph,b_last_index)._node_depth;
if (l1 < l2) {
return(-1);
}
else if (l1 > l2) {
return(1);
}
// same last node depth too.
// compare their node identifiers
if (a_last_index < b_last_index)
return(-1);
else if (a_last_index > b_last_index)
return(1);
// default
// not the same paths, but same start node and last node DFS, so just order based on hashcode
return ( (a.hashCode() < b.hashCode()) ? 1 : -1);
}
};
Collections.sort(pairPathsSortedList, pairPathOrderComparer);
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15) {
debugMes("SORTED PAIRPATHS IN ORDER:", 15);
for (PairPath p : pairPathsSortedList) {
debugMes("\t" + p, 15);
}
}
// start assembling
HashMap<List<Integer>, Pair<Integer>> final_transcripts = new HashMap<List<Integer>, Pair<Integer>>();
int round = 0;
while (! pairPathsSortedList.isEmpty()) {
round++;
debugMes("\n\nPasaFlyUnique, Round: " + round, 10);
ArrayList<PasaVertex> pasaVerticesSortedList = new ArrayList<PasaVertex>();
for (PairPath pp : pairPathsSortedList) { // already sorted
int count = pairPathToReadSupport.get(pp);
pasaVerticesSortedList.add(new PasaVertex(pp, count));
}
PasaVertex [] pasaVerticesSortedArr = pasaVerticesSortedList.toArray(new PasaVertex[pasaVerticesSortedList.size()]);
PairPath[] pairPathsSortedArr = pairPathsSortedList.toArray(new PairPath[pairPathsSortedList.size()]);
// EXAMINE CONTAINMENTS
// init
ArrayList<PairPath> pairPathsContainmentsRemoved = new ArrayList<PairPath>(pairPathsSortedList);
ArrayList<PasaVertex> pasaVerticesContainmentsRemoved = new ArrayList<PasaVertex>(pasaVerticesSortedList);
debugMes("Assigning pairpath containments.", 10);
List<Integer> containments = assignPasaPairPathContainments(graph, dijkstraDis, pasaVerticesSortedArr); // vertices updated to include containment info.
debugMes("REMOVING CONTAINMENTS: " + containments, 10);
for(int i = 0; i < containments.size(); i++)
{
pasaVerticesContainmentsRemoved.remove(pasaVerticesSortedArr[containments.get(i)]);
pairPathsContainmentsRemoved.remove(pairPathsSortedArr[containments.get(i)]);
}
// EXAMINE UNCERTAINTIES THAT BREAK TRANSITIVITY
PairPath [] pairPathsContainmentsRemovedArr = pairPathsContainmentsRemoved.toArray(new PairPath[pairPathsContainmentsRemoved.size()]);
PasaVertex [] pasaVerticesContainmentsRemovedArr = pasaVerticesContainmentsRemoved.toArray(new PasaVertex[pasaVerticesContainmentsRemoved.size()]);
boolean[][] dag = getPairPathConsistencyDAG(graph, dijkstraDis, pairPathsContainmentsRemovedArr);
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 10) {
debugMes("PASA Consistency DAG
System.out.println(boolean_matrix_toString(dag));
}
ArrayList<PairPath> pairPathsUncertainRemoved = new ArrayList<PairPath>(pairPathsContainmentsRemoved);
ArrayList<PasaVertex> pasaVerticesUncertainRemoved = new ArrayList<PasaVertex>(pasaVerticesContainmentsRemoved);
debugMes("Identifying uncertain entries that break transitivities.", 10);
// identify and remove uncertain entries (those that break transitive compatibility relationships)
ArrayList<Integer> uncertain = getUncertainRequireOverlap(dag, pairPathsContainmentsRemovedArr, graph, dijkstraDis);
debugMes("Uncertain indices include: " + uncertain, 10);
debugMes("REMOVING UNCERTAINTIES: " + uncertain, 10);
for(int i = 0; i < uncertain.size(); i++)
{
pasaVerticesUncertainRemoved.remove(pasaVerticesContainmentsRemovedArr[uncertain.get(i)]);
pairPathsUncertainRemoved.remove(pairPathsContainmentsRemovedArr[uncertain.get(i)]);
}
PasaVertex[] pasaVerticesUncertainRemovedArr = pasaVerticesUncertainRemoved.toArray(new PasaVertex[pasaVerticesUncertainRemoved.size()]);
PairPath[] pairPathsUncertainRemovedArr = pairPathsUncertainRemoved.toArray(new PairPath[pairPathsUncertainRemoved.size()]);
//print pair paths
debugMes("PAIR PATHS remaining after uncertainties removed
for(int i = 0; i < pairPathsUncertainRemovedArr.length; i++)
{
debugMes("PairPathAfterUncertainRemoved "+ i + " " + pairPathsUncertainRemovedArr[i], 10);
}
// regenerate the dag now that the uncertain entries are removed.
dag = getPairPathConsistencyDAG(graph, dijkstraDis, pairPathsUncertainRemovedArr); // already identified containments
//print dag
debugMes("DAG after uncertainties removed
// examine neighboring DAG
for (int i = 0; i < pairPathsUncertainRemovedArr.length-1; i++) {
if (! dag[i][i+1]) {
debugMes("NeighborDagCheck: PairPath: [" + i + "] "+ pairPathsUncertainRemovedArr[i]
+ "\n\tnot compatible with: [" + (i+1) + "] " + pairPathsUncertainRemovedArr[i+1], 10);
}
}
//2.2 check transitivity
if(!checkTransitivityRequireOverlap(dag, pairPathsUncertainRemovedArr, graph, dijkstraDis))
{
throw(new RuntimeException("Graph is NOT transitive!"));
}
else {
debugMes("Transitivity of compatibility graph validates.", 10);
}
debugMes("build_PASA_trellis_left_to_right()", 10);
build_PASA_trellis_left_to_right(pasaVerticesUncertainRemovedArr, dag, graph, componentReadHash, dijkstraDis, pairPathToReadSupport, tripletMapper, extendedTripletMapper);
// get highest scoring path:
debugMes("Identifying highest scoring PASA path.", 10);
ScoredPath best = null;
for (int i = 0; i < pasaVerticesUncertainRemovedArr.length; i++) {
ScoredPath sp = pasaVerticesUncertainRemovedArr[i].get_highest_scoring_fromPath();
if (best == null || sp.score > best.score) {
best = sp;
}
}
debugMes("Best score: " + best.score + ", containing entries: " + best.paths, 10);
List<Integer> best_path_vertex_list = Path.collapse_compatible_pair_paths(best.paths);
final_transcripts.put(best_path_vertex_list, new Pair<Integer>(1,1));
debugMes("Reconstructed path is: " + best_path_vertex_list, 10);
// remove those pairpaths included in the best path
List<PairPath> toRemove = new ArrayList<PairPath>();
for (PairPath pp : pairPathsSortedList) {
if (pp.isCompatibleAndContainedBySinglePath(best_path_vertex_list)) {
toRemove.add(pp);
debugMes("compatibly_contained_by_reconstructed_path: " + pp, 10);
}
else {
debugMes("NotCompatibleRetainedForNextRound: " + pp, 10);
}
}
for (PairPath pp : toRemove)
pairPathsSortedList.remove(pp);
}
return(final_transcripts);
}
private static List<PairPath> find_paired_paths_with_greatest_map_support(
List<ScoredPath> sp_from, List<ScoredPath> sp_to,
HashMap<PairPath, Integer> finalVertexPositions) {
int best_count = -1;
List<PairPath> combined = new ArrayList<PairPath>();
for (ScoredPath spA : sp_from) {
HashSet<PairPath> hA = new HashSet<PairPath>();
for (PairPath h : spA.paths) {
if (finalVertexPositions.containsKey(h)) {
hA.add(h);
}
}
for (ScoredPath spB : sp_to) {
HashSet<PairPath> hB = new HashSet<PairPath>(hA);
for (PairPath b : spB.paths) {
if (finalVertexPositions.containsKey(b)) {
hB.add(b);
}
}
if (hB.size() > best_count) {
best_count = hB.size();
combined.clear();
combined.addAll(spA.paths);
combined.addAll(spB.paths);
}
}
}
return(combined);
}
private static void build_PASA_trellis_left_to_right(
PasaVertex[] pasaVerticesArr,
boolean[][] dag,
final DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
HashMap<Integer, HashMap<PairPath, Integer>> componentReadHash,
DijkstraDistance<SeqVertex, SimpleEdge> dijkstraDis,
Map<PairPath, Integer> pairPathToReadSupport, HashMap<Integer, List<List<Integer>>> tripletMapper, HashMap<Integer, List<List<Integer>>> extendedTripletMapper
) {
for (int i = 1; i < pasaVerticesArr.length; i++) {
PasaVertex iV = pasaVerticesArr[i];
for (int j = i - 1; j >= 0; j
PasaVertex iJ = pasaVerticesArr[j];
if (! dag[j][i]) {
// see if too far apart
if (twoPairPathsAreTooFarAwayInGraph(iV.pp, iJ.pp, graph)) {
if (FAST_PASA)
break;
}
else {
continue; // must conflict
}
}
// require that they share a node in common
if (! iJ.pp.haveAnyNodeInCommon(iV.pp))
continue;
// see if we can extend paths in iJ to include pairpath represented by iV
final List<ScoredPath> sp_list = iJ.get_fromPaths();
for (ScoredPath sp : sp_list) {
// is there sufficient read support for extending this path?
//debugMes("\nnote, sp_list is of size: " + sp_list.size(), 10);
//debugMes("\nAdding path list to [iV] from [iJ] " + sp.paths, 10);
List<PairPath> extendedList = new ArrayList<PairPath>();
extendedList.addAll(sp.paths);
extendedList.add(iV.pp);
if (! violates_triplet_support(tripletMapper, extendedList)) {
iV.push_fromPaths(new ScoredPath(extendedList, (sp.score + iV.readSupport + iV.num_contained)));
sp.path_extended = true;
}
}
}
}
}
private static boolean violates_triplet_support(
HashMap<Integer, List<List<Integer>>> tripletMapper,
List<PairPath> extendedList) {
HashMap<Integer,Integer> prev_node = new HashMap<Integer,Integer>();
HashMap<Integer,Integer> next_node = new HashMap<Integer,Integer>();
for (PairPath pp : extendedList) {
Integer prev = -1;
for (List<Integer> path : pp.get_paths()) {
for (Integer node : path) {
if (prev >= 0) {
prev_node.put(node, prev);
next_node.put(prev, node);
}
prev = node;
}
}
}
for (Integer center : prev_node.keySet()) {
if (tripletMapper.containsKey(center) && next_node.containsKey(center)) {
Integer left = prev_node.get(center);
Integer right = next_node.get(center);
List<Integer> curr_triplet = new ArrayList<Integer>();
curr_triplet.add(left);
curr_triplet.add(center);
curr_triplet.add(right);
List<List<Integer>> triplets = tripletMapper.get(center);
if (! tripletSupported(triplets, curr_triplet)) {
debugMes("PASA TRIPLET CHECK WARNING: triplet: " + curr_triplet + " violates available triplets: " + triplets + " and so path list is not valid: " + extendedList, 15);
return(true); // yes, violates
}
}
}
return false; // no violation found.
}
private static void build_PASA_trellis_right_to_left (
PasaVertex[] pasaVerticesArr,
boolean[][] dag,
final DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
HashMap<Integer, HashMap<PairPath, Integer>> componentReadHash,
DijkstraDistance<SeqVertex, SimpleEdge> dijkstraDis,
Map<PairPath, Integer> pairPathToReadSupport, HashMap<Integer, List<List<Integer>>> tripletMapper
) {
for (int i = pasaVerticesArr.length-2; i >= 0; i
PasaVertex iV = pasaVerticesArr[i];
for (int j = i +1; j < pasaVerticesArr.length; j++) {
PasaVertex iJ = pasaVerticesArr[j];
if (! dag[i][j]) {
if (twoPairPathsAreTooFarAwayInGraph(iV.pp, iJ.pp, graph)) {
if (FAST_PASA)
break;
}
else {
continue; // must conflict
}
}
// require that they share a node in common
if (! iJ.pp.haveAnyNodeInCommon(iV.pp))
continue;
// see if we can extend paths in iJ to include pairpath represented by iV
final List<ScoredPath> sp_list = iJ.get_toPaths();
for (ScoredPath sp : sp_list) {
// is there sufficient read support for extending this path?
// * implement later on * //
List<PairPath> extendedList = new ArrayList<PairPath>();
extendedList.addAll(sp.paths);
extendedList.add(iV.pp);
if (! violates_triplet_support(tripletMapper, extendedList)) {
iV.push_toPaths(new ScoredPath(extendedList, (sp.score + iV.readSupport + iV.num_contained)));
}
}
}
}
}
private static String boolean_matrix_toString (boolean [][] dag) {
//print dag
if (dag.length > 200) {
debugMes("dag matrix too large to print in a useful way.", 10);
return("");
}
String dag_text = "";
for(int i = 0; i < dag.length; i++)
{
for(int j = 0; j < dag.length; j++)
{
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15)
System.err.print("\r[" + i + "," + j + "] " );
dag_text += ((dag[i][j]) ? 1: 0) + " ";
}
dag_text += "\n";
}
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 10)
System.err.println();
return(dag_text);
}
private static List<Integer> assignPasaPairPathContainments(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
DijkstraDistance<SeqVertex, SimpleEdge> dijkstraDis,
PasaVertex[] pasaVerticesArr
) {
HashMap<Integer,Boolean> containments = new HashMap<Integer,Boolean>();
for (int i = 0; i < pasaVerticesArr.length; i++) {
PasaVertex iV = pasaVerticesArr[i];
for (int j = 0; j < pasaVerticesArr.length; j++) {
if (i==j)
continue;
PasaVertex iJ = pasaVerticesArr[j];
if (iJ.pp.haveAnyNodeInCommon(iV.pp) && (iV.pp.isCompatibleAndContainedByPairPath(iJ.pp, graph, dijkstraDis))) {
iJ.num_contained += iV.readSupport;
containments.put(i, true);
debugMes("Containment: " + iV.pp + " is contained by: " + iJ.pp, 10);
}
}
}
List<Integer> containment_list = new ArrayList<Integer>(containments.keySet());
return(containment_list);
}
private static HashMap<List<Integer>, HashMap<PairPath, Integer>> assignCompatibleReadsToPaths(
HashMap<List<Integer>, Pair<Integer>> finalPaths_all,
HashMap<Integer, HashMap<PairPath, Integer>> combinedReadHash) {
debugMes("\n\n## assignCompatibleReadsToPaths()", 20);
HashMap<List<Integer>, HashMap<PairPath, Integer>> pathToContainedReads = new HashMap<List<Integer>, HashMap<PairPath, Integer>>();
for (List<Integer> path : finalPaths_all.keySet()) {
for (HashMap<PairPath,Integer> read_map : combinedReadHash.values()) {
for (PairPath p : read_map.keySet()) {
if (p.isCompatibleAndContainedBySinglePath(path)) {
if (! pathToContainedReads.containsKey(path)) {
pathToContainedReads.put(path, new HashMap<PairPath, Integer>());
}
debugMes("assignCompatibleReadsToPaths: " + p + " is compatible with " + path, 20);
pathToContainedReads.get(path).put(p, read_map.get(p));
}
else {
debugMes("assignCompatibleReadsToPaths: " + p + " is NOT compatible with " + path, 20);
}
}
}
}
return (pathToContainedReads);
}
private static boolean containsNull(PairPath pp1)
{
if(pp1.getFirstID() == null)
return true;
else
return false;
}
/**
* Given two pair paths, determines consistency/compatibility of two pair paths for the partial order.
* @param pp1
* @param pp2
* @param graph
* @param dijkstraDis
* @return 0 if they are not consistent, 1 if pp1 comes before pp2, -1 if pp2 comes before pp1
*/
private static int isConsistent(PairPath pp1, PairPath pp2, DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
DijkstraDistance<SeqVertex, SimpleEdge> dijkstraDis)
{
pp1 = pp1.trimSinkNodes();
pp2 = pp2.trimSinkNodes();
debugMes("isConsistent? " + pp1 + pp2, 15);
if (pp1.equals(pp2)) { return (0); }
// If have nodes in common, require compatibility in overlapping path parts:
if(pp1.haveAnyNodeInCommon(pp2))
{
debugMes("\tHave nodes in common.", 15);
if(!pp1.isCompatible(pp2))
{
debugMes("\tNot compatible.", 15);
return 0;
}
}
// iterate through every node in pp1 and check if it's consistent
List<Integer> path1 = pp1.getPath1();
// see that pp1:path1 nodes are consistent with pp2
Iterator itr = path1.iterator();
for(int i = 0; i < path1.size(); i++)
{
Integer n = (Integer)(itr.next());
if(!(readIsConsistentWithNode(pp2, n, graph, dijkstraDis))) {
debugMes("\tpp2: " + pp2 + " is not consistent with node: " + n, 15);
return 0;
}
}
if(pp1.hasSecondPath())
{
// see if pp1:path2 are consistent with pp2
List<Integer> path2 = pp1.getPath2();
itr = path2.iterator();
for(int i = 0; i < path2.size(); i++)
{
Integer n = (Integer)(itr.next());
if(!(readIsConsistentWithNode(pp2, n, graph, dijkstraDis))) {
debugMes("\tpp2: " + pp2 + " second path is not consistent with node: " + n, 15);
return 0;
}
}
}
// require that v1 comes before v2 in the partial order
SeqVertex v1 = getSeqVertex(graph, pp1.getFirstID());
SeqVertex v2 = getSeqVertex(graph, pp2.getFirstID());
if (v1.equals(v2)) {
// check their last nodes.
SeqVertex lv1 = getSeqVertex(graph, pp1.getLastID());
SeqVertex lv2 = getSeqVertex(graph, pp2.getLastID());
if (lv1.equals(lv2)) {
// must have same first and same last node, so potential differences in-between but otherwise compatible afaict
int pp1_hashcode = pp1.hashCode();
int pp2_hashcode = pp2.hashCode();
int consistent = (pp1.hashCode() < pp2.hashCode()) ? 1 : 0;
debugMes("\tfirst vertex node: " + pp1.getFirstID() + " and last node " + pp1.getLastID() + " are equal, so defining consistency based on hashcode comparison.", 15);
return(consistent); // just use consistent ordering to define proper DAG connectability
}
else {
// first node equivalent, last node not equivalent
int ancestral = SeqVertex.isAncestral(lv1, lv2, dijkstraDis);
debugMes("\tfirst nodes same: " + pp1.getFirstID() + ", but last nodes are different: " +
pp1.getLastID() + " vs. " + pp2.getLastID() + ", and SeqVertex.isAncestral = " + ancestral, 15);
return( (ancestral>0) ? 1:0);
}
}
else {
// first node not equivalent.
int ancestral = SeqVertex.isAncestral(v1,v2,dijkstraDis);
debugMes("\tpairpaths are compatible, examining relative orientation of first vertices: "
+ v1.getID() + " vs. " + v2.getID() + ", ancestral = " + ancestral, 15);
return( (ancestral > 0) ? 1 : 0);
}
}
private static boolean isOverlappingAndDirectionallyConsistent(PairPath pp1, PairPath pp2, DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
DijkstraDistance<SeqVertex, SimpleEdge> dijkstraDis)
{
pp1 = pp1.trimSinkNodes();
pp2 = pp2.trimSinkNodes();
debugMes("isOverlappingAndDirectionallyConsistent? " + pp1 + pp2, 15);
if (pp1.equals(pp2)) { return (true); }
// If have nodes in common, require compatibility in overlapping path parts:
if(pp1.haveAnyNodeInCommon(pp2))
{
debugMes("\tHave nodes in common.", 15);
if(!pp1.isCompatible(pp2))
{
debugMes("\tNot compatible.", 15);
return false;
}
// DO allow containments to be compatible
if (pp2.isCompatibleAndContainedByPairPath(pp1, graph, dijkstraDis)) {
debugMes("\tpp2 isCompatibleAndContainedBy pp1, setting true (containments removed later on).", 15);
return(true);
}
}
else {
debugMes("\tNo node overlap, so not compatible.", 15);
return(false);
}
// iterate through every node in pp1 and check if it's consistent
List<Integer> path1 = pp1.getPath1();
// see that pp1:path1 nodes are consistent with pp2
Iterator<Integer> itr = path1.iterator();
for(int i = 0; i < path1.size(); i++)
{
Integer n = itr.next();
if(!(readIsConsistentWithNode(pp2, n, graph, dijkstraDis))) {
debugMes("\tpp2: " + pp2 + " is not consistent with node: " + n, 15);
return false;
}
}
if(pp1.hasSecondPath())
{
// see if pp1:path2 are consistent with pp2
List<Integer> path2 = pp1.getPath2();
itr = path2.iterator();
for(int i = 0; i < path2.size(); i++)
{
Integer n = (Integer)(itr.next());
if(!(readIsConsistentWithNode(pp2, n, graph, dijkstraDis))) {
debugMes("\tpp2: " + pp2 + " second path is not consistent with node: " + n, 15);
return false;
}
}
}
// require that v1 comes before v2 in the partial order
SeqVertex v1 = getSeqVertex(graph, pp1.getFirstID());
SeqVertex v2 = getSeqVertex(graph, pp2.getFirstID());
if (v1.equals(v2)) {
// check their last nodes.
SeqVertex lv1 = getSeqVertex(graph, pp1.getLastID());
SeqVertex lv2 = getSeqVertex(graph, pp2.getLastID());
if (lv1.equals(lv2)) {
// must have same first and same last node, so potential differences in-between but otherwise compatible afaict
return(true);
}
else {
// first node equivalent, last node not equivalent
int ancestral = SeqVertex.isAncestral(lv1, lv2, dijkstraDis);
debugMes("\tfirst nodes same: " + pp1.getFirstID() + ", but last nodes are different: " +
pp1.getLastID() + " vs. " + pp2.getLastID() + ", and SeqVertex.isAncestral = " + ancestral, 15);
return(ancestral>0);
}
}
else {
// first node not equivalent.
int ancestral = SeqVertex.isAncestral(v1,v2,dijkstraDis);
debugMes("\tpairpaths are compatible, examining relative orientation of first vertices: "
+ v1.getID() + " vs. " + v2.getID() + ", ancestral = " + ancestral, 15);
return(ancestral > 0);
}
}
private static boolean checkTransitivity(int[][] adj, PairPath[] pairPathArr, HashMap<PairPath,Integer> pairPathToIntVal)
{
// Examine all triplets (A,B,C) and ensure that A->B and B->C implies A->C
for(int i = 0; i < adj.length; i++)
{
for(int j = 0; j < adj.length; j++)
{
for(int k = 0; k < adj.length; k++)
{
if (adj[i][j] == 1 && adj[j][k] == 1 && adj[i][k] == 0) {
Integer orig_i = pairPathToIntVal.get(pairPathArr[i]);
Integer orig_j = pairPathToIntVal.get(pairPathArr[j]);
Integer orig_k = pairPathToIntVal.get(pairPathArr[k]);
debugMes("UNCERTAINTY DETECTED AFTER SUPPOSEDLY HAVING REMOVED THEM [" + orig_i + "," + orig_j + "," + orig_k + "] :\n" +
i + " " + pairPathArr[i] + " is consistent with " + j + " " + pairPathArr[j] + "\n" +
j + " " + pairPathArr[j] + " is consistent with " + k + " " + pairPathArr[k] + "\n" +
i + " " + pairPathArr[i] + " is NOT consistent with " + k + " " + pairPathArr[k] + "\n", 10);
return false;
}
}
}
}
return true;
}
private static boolean checkTransitivity(boolean[][] adj, PairPath[] pairPathArr, HashMap<PairPath,Integer> pairPathToIntVal)
{
// Examine all triplets (A,B,C) and ensure that A->B and B->C implies A->C
for(int i = 0; i < adj.length; i++)
{
for(int j = 0; j < adj.length; j++)
{
for(int k = 0; k < adj.length; k++)
{
if (adj[i][j] && adj[j][k] && ! adj[i][k]) {
Integer orig_i = pairPathToIntVal.get(pairPathArr[i]);
Integer orig_j = pairPathToIntVal.get(pairPathArr[j]);
Integer orig_k = pairPathToIntVal.get(pairPathArr[k]);
debugMes("UNCERTAINTY DETECTED AFTER SUPPOSEDLY HAVING REMOVED THEM [" + orig_i + "," + orig_j + "," + orig_k + "] :\n" +
i + " " + pairPathArr[i] + " is consistent with " + j + " " + pairPathArr[j] + "\n" +
j + " " + pairPathArr[j] + " is consistent with " + k + " " + pairPathArr[k] + "\n" +
i + " " + pairPathArr[i] + " is NOT consistent with " + k + " " + pairPathArr[k] + "\n", 10);
return false;
}
}
}
}
return true;
}
private static boolean checkTransitivityRequireOverlap(
boolean[][] adj,
PairPath[] pairPathArr,
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
DijkstraDistance<SeqVertex, SimpleEdge> dijkstraDis)
{
// Examine all triplets (A,B,C) and ensure that A->B and B->C implies A->C
for(int i = 0; i < adj.length-2; i++)
{
for(int j = i+1; j < adj.length-1; j++)
{
// see if j is too far away from i, can then go to next i
if (! adj[i][j]) {
if (twoPairPathsAreTooFarAwayInGraph(pairPathArr[i], pairPathArr[j], graph))
if (FAST_PASA)
break;
}
for(int k = j+1; k < adj.length; k++)
{
// see if k is too far away from j, can then go to next j
if (! adj[j][k]) {
if (twoPairPathsAreTooFarAwayInGraph(pairPathArr[j], pairPathArr[k], graph))
if (FAST_PASA)
break;
}
if (adj[i][j] && adj[j][k] && pairPathArr[i].haveAnyNodeInCommon(pairPathArr[k]) && ! adj[i][k]) {
debugMes("UNCERTAINTY DETECTED AFTER SUPPOSEDLY HAVING REMOVED THEM [" + i + "," + j + "," + k + "] :\n" +
i + " " + pairPathArr[i] + " is consistent with " + j + " " + pairPathArr[j] + "\n" +
j + " " + pairPathArr[j] + " is consistent with " + k + " " + pairPathArr[k] + "\n" +
i + " " + pairPathArr[i] + " is NOT consistent with " + k + " " + pairPathArr[k] + "\n", 10);
return false;
}
}
}
}
return true;
}
private static ArrayList<Integer> getUncertain(int[][] adj, PairPath[] pairPathArr)
{
// DAG: i -> j -> k
ArrayList<Integer> toRemove = new ArrayList<Integer>();
for(int i = 0; i < adj.length; i++)
{
if (toRemove.contains(i)) { continue; }
// move j to previous shared first node with i in the list:
int j = i;
if (j > 0 ) {
while (j > 0 && pairPathArr[i].getFirstID().equals(pairPathArr[j-1].getFirstID())) {
j
}
}
for(; j < adj.length; j++)
{
if(toRemove.contains(j))
continue;
// start k at first node sharing the same first ID as j
int k = j;
if (k > 0) {
while (k>0 && pairPathArr[j].getFirstID().equals(pairPathArr[k-1].getFirstID())) {
k
}
}
for(; k < adj.length; k++)
{
if(toRemove.contains(k))
continue;
debugMes("CHECKING TRANSITIVITY [" + i + "," + j + "," + k + "] "
+ "= [" + adj[i][j] + "," + adj[j][k] + "," + adj[i][k] + "]", 15);
debugMes("MORE VERBOSE CHECKING TRANSITIVITY:] " +
" { " + i + " " + pairPathArr[i] + " results(" + adj[i][j] + ") " + j + " " + pairPathArr[j] + " }" +
" { " + j + " " + pairPathArr[j] + " results("+ adj[j][k] + ") " + k + " " + pairPathArr[k] + " } " +
" { " + i + " " + pairPathArr[i] + " results(" + adj[i][k] + ") " + k + " " + pairPathArr[k] + " } ", 18);
if (adj[i][j] == 1 && adj[j][k] == 1)
{
if (adj[i][k] == 0) {
toRemove.add(j); // central node breaks transitivity. Remove it.
debugMes("UNCERTAINTY DETECTED:, removing: " + pairPathArr[j] + "\n" +
i + " " + pairPathArr[i] + " is consistent with " + j + " " + pairPathArr[j] + "\n" +
j + " " + pairPathArr[j] + " is consistent with " + k + " " + pairPathArr[k] + "\n" +
i + " " + pairPathArr[i] + " is NOT consistent with " + k + " " + pairPathArr[k] + "\n", 10);
break; // go to next j
}
}
}
}
}
return toRemove;
}
private static ArrayList<Integer> getUncertain(boolean[][] adj, PairPath[] pairPathArr)
{
// DAG: i -> j -> k
int removeFlag = 0;
ArrayList<Integer> toRemove = new ArrayList<Integer>();
for(int i = 0; i < adj.length; i++)
{
if (toRemove.contains(i)) { continue; }
// move j to previous shared first node with i in the list:
int j = i;
if (j > 0 ) {
while (j > 0 && pairPathArr[i].getFirstID().equals(pairPathArr[j-1].getFirstID())) {
j
}
}
for(; j < adj.length; j++)
{
if(toRemove.contains(j))
continue;
// start k at first node sharing the same first ID as j
int k = j;
if (k > 0) {
while (k>0 && pairPathArr[j].getFirstID().equals(pairPathArr[k-1].getFirstID())) {
k
}
}
for(; k < adj.length; k++)
{
if(toRemove.contains(k))
continue;
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15)
System.err.print("\r[" + i + "," + j + "," + k + "] ");
debugMes("CHECKING TRANSITIVITY [" + i + "," + j + "," + k + "] "
+ "= [" + adj[i][j] + "," + adj[j][k] + "," + adj[i][k] + "]", 15);
debugMes("MORE VERBOSE CHECKING TRANSITIVITY:] " +
" { " + i + " " + pairPathArr[i] + " results(" + adj[i][j] + ") " + j + " " + pairPathArr[j] + " }" +
" { " + j + " " + pairPathArr[j] + " results("+ adj[j][k] + ") " + k + " " + pairPathArr[k] + " } " +
" { " + i + " " + pairPathArr[i] + " results(" + adj[i][k] + ") " + k + " " + pairPathArr[k] + " } ", 18);
if (adj[i][j] == true && adj[j][k] == true)
{
if (adj[i][k] == false) {
toRemove.add(j); // central node breaks transitivity. Remove it.
debugMes("UNCERTAINTY DETECTED:, removing: " + pairPathArr[j] + "\n" +
i + " " + pairPathArr[i] + " is consistent with " + j + " " + pairPathArr[j] + "\n" +
j + " " + pairPathArr[j] + " is consistent with " + k + " " + pairPathArr[k] + "\n" +
i + " " + pairPathArr[i] + " is NOT consistent with " + k + " " + pairPathArr[k] + "\n", 10);
break; // go to next j
}
}
}
}
}
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 12)
System.err.println();
return toRemove;
}
private static ArrayList<Integer> getUncertainRequireOverlap(
boolean[][] adj,
PairPath[] pairPathArr,
final DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
DijkstraDistance<SeqVertex, SimpleEdge> dijkstraDis)
{
// DAG: i -> j -> k
int debug_node = 301;
boolean local_debug = false;
ArrayList<Integer> toRemove = new ArrayList<Integer>();
for(int i = 0; i < adj.length-2; i++)
{
if (local_debug && i == debug_node)
System.err.print("\rUncertaintyCheck: [" + i + "," + "?" + "," + "?" + "]\n" );
if (toRemove.contains(i)) {
if (local_debug && i == debug_node)
System.err.print("\rUncertaintyCheck: [" + i + "," + "?" + "," + "?" + "] Iprev-Captured\n");
continue;
}
boolean tooFar = false;
for(int j = i + 1; j < adj.length-1; j++)
{
if (local_debug && i == debug_node)
System.err.print("\rUncertaintyCheck: [" + i + "," + j + "," + "?" + "]\n");
if(toRemove.contains(j)) {
if (local_debug && i == debug_node)
System.err.print("\rUncertaintyCheck: [" + i + "," + j + "," + "?" + "] Jprev-Captured\n");
continue;
}
// see if j is too far away from i, can then go to next i
if (! adj[i][j]) {
if (twoPairPathsAreTooFarAwayInGraph(pairPathArr[i], pairPathArr[j], graph)) {
if (FAST_PASA)
break;
}
}
if (twoPairPathsAreTooFarAwayInGraph(pairPathArr[i], pairPathArr[j], graph))
tooFar = true;
else if (tooFar)
debugMes("CHANGED from too far to within distance again: I:" + pairPathArr[i] + " J:" + pairPathArr[j], 10);
boolean tooFar2 = false;
for(int k = j + 1; k < adj.length; k++)
{
if (local_debug && i == debug_node)
System.err.print("\rUncertaintyCheck: [" + i + "," + j + "," + k + "] " +
"[" + adj[i][j] + "," + adj[j][k] + "," + adj[i][k] + "]\n" );
if(toRemove.contains(k)) {
if (local_debug && i == debug_node)
System.err.print("\rUncertaintyCheck: [" + i + "," + j + "," + k + "] Kprev-Captured\n");
continue;
}
// see if k is too far away from j, can then go to next j
if (! adj[j][k]) {
if (twoPairPathsAreTooFarAwayInGraph(pairPathArr[j], pairPathArr[k], graph)) {
if (FAST_PASA)
break;
}
}
if (twoPairPathsAreTooFarAwayInGraph(pairPathArr[j], pairPathArr[k], graph))
tooFar2 = true;
else if (tooFar2)
debugMes("CHANGED from too far to within distance again: I:" + pairPathArr[i] + " J:" + pairPathArr[j], 10);
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15) {
System.err.print("\r[" + i + "," + j + "," + k + "] ");
debugMes("CHECKING TRANSITIVITY [" + i + "," + j + "," + k + "] "
+ "= [" + adj[i][j] + "," + adj[j][k] + "," + adj[i][k] + "]", 15);
debugMes("MORE VERBOSE CHECKING TRANSITIVITY:] " +
" { " + i + " " + pairPathArr[i] + " results(" + adj[i][j] + ") " + j + " " + pairPathArr[j] + " }" +
" { " + j + " " + pairPathArr[j] + " results("+ adj[j][k] + ") " + k + " " + pairPathArr[k] + " } " +
" { " + i + " " + pairPathArr[i] + " results(" + adj[i][k] + ") " + k + " " + pairPathArr[k] + " } ", 18);
}
if (adj[i][j] == true && adj[j][k] == true)
{
if (pairPathArr[i].haveAnyNodeInCommon(pairPathArr[k]) && adj[i][k] == false) {
toRemove.add(j); // central node breaks transitivity. Remove it.
debugMes("UNCERTAINTY DETECTED:, removing: " + pairPathArr[j] + "\n" +
i + " " + pairPathArr[i] + " is consistent with " + j + " " + pairPathArr[j] + "\n" +
j + " " + pairPathArr[j] + " is consistent with " + k + " " + pairPathArr[k] + "\n" +
i + " " + pairPathArr[i] + " is NOT consistent with " + k + " " + pairPathArr[k] + "\n", 10);
if (pairPathArr[j].isCompatibleAndContainedByPairPath(pairPathArr[i]) || pairPathArr[j].isCompatibleAndContainedByPairPath(pairPathArr[k])) {
throw new RuntimeException("ERROR, containment encountered where containments should have been removed.");
}
if (local_debug && i == debug_node)
System.err.print("\rUncertaintyCheck: [" + i + "," + j + "," + k + "] J-Captured\n");
break; // no more need to analyze k in this i-j-k series.
}
else {
if (local_debug && i == debug_node)
System.err.print("\rUncertaintyCheck: [" + i + "," + j + "," + k + "] OK\n");
}
}
}
}
}
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 12)
System.err.println();
return toRemove;
}
private static int[][] getPairPathDAG(DirectedSparseGraph<SeqVertex, SimpleEdge> graph, DijkstraDistance<SeqVertex, SimpleEdge> dijkstraDis,
PairPath[] pairPathArr)
{
debugMes("getPairPathDAG:", 10);
int[][] dag = new int[pairPathArr.length][pairPathArr.length];
for (int[] row : dag)
Arrays.fill(row, 0); // init to no connection.
for (int i = 0; i < pairPathArr.length; i++)
{
if (pairPathArr[i].isEmpty())
continue;
//start comparisons to j where j starts at least at the same position as path i.
int j = i;
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15 && j>0) {
Integer i_first_id = pairPathArr[i].getFirstID();
Integer jm1_first_id = pairPathArr[j-1].getFirstID();
debugMes("-comparing first IDs for :[" + i +"," + j + "-1]: " + i_first_id + " to " + jm1_first_id, 15);
}
while (j > 0 && pairPathArr[i].getFirstID().equals(pairPathArr[j-1].getFirstID())) {
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15) {
Integer i_first_id = pairPathArr[i].getFirstID();
Integer jm1_first_id = pairPathArr[j-1].getFirstID();
debugMes("-comparing first IDs for :[" + i +"," + j + "-1]: " + i_first_id + " to " + jm1_first_id, 15);
}
j
}
for (; j < pairPathArr.length; j++)
{
int value;
if (i == j)
value = 1; // make compatible for now. Remove self-compatibility before maximal matching
else {
value = isConsistent(pairPathArr[i], pairPathArr[j], graph, dijkstraDis);
// nope, below doesn't work - maximal matching does require the full set of compatibilities.
//boolean val = isOverlappingAndDirectionallyConsistent(pairPathArr[i], pairPathArr[j], graph, dijkstraDis);
//value = (val) ? 1 : 0;
}
dag[i][j] = value;
debugMes("Comparing node " + i +" " + pairPathArr[i] + " with node " + j +" " + pairPathArr[j] + "Result: " + dag[i][j],15);
debugMes("DAG[" + i + "," + j + "]=" + dag[i][j], 15);
if (j < i && value == 1 && ! pairPathArr[i].getFirstID().equals(pairPathArr[j].getFirstID())) {
debugMes("\tWARNING: ConsistencyListUnordered: [" + i + "," + j + "] " + pairPathArr[i] + pairPathArr[j], 10); // perhaps should be more serious - throw exception?
//throw(new RuntimeException("ERROR: ConsistencyListUnordered: [" + i + "," + j + "] " + pairPathArr[i] + pairPathArr[j]));
}
}
}
return dag;
}
private static boolean[][] getPairPathConsistencyDAG(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
DijkstraDistance<SeqVertex, SimpleEdge> dijkstraDis,
PairPath[] pairPathArr)
{
debugMes("getPairPathDAG:", 10);
boolean[][] dag = new boolean[pairPathArr.length][pairPathArr.length];
for (boolean[] row : dag)
Arrays.fill(row, false); // init to no connection.
// i -> j
for (int i = 0; i < pairPathArr.length-1; i++)
{
PairPath pp_i = pairPathArr[i];
boolean tooFar = false;
for (int j = i + 1; j < pairPathArr.length; j++)
{
PairPath pp_j = pairPathArr[j];
boolean compatible = isOverlappingAndDirectionallyConsistent(pp_i, pp_j, graph, dijkstraDis);
dag[i][j] =compatible;
if (twoPairPathsAreTooFarAwayInGraph(pp_i, pp_j, graph) && compatible) {
debugMes("HOW CAN THESE BE TOO FAR AWAY AND STILL COMPATIBLE? " + pp_i + " vs. " + pp_j, 10);
debugMes(report_node_depths(pp_i, graph), 10);
debugMes(report_node_depths(pp_j, graph), 10);
}
if (! compatible) {
if (twoPairPathsAreTooFarAwayInGraph(pp_i, pp_j, graph)) {
if (FAST_PASA)
break;
}
}
if (twoPairPathsAreTooFarAwayInGraph(pp_i, pp_j, graph)) {
tooFar = true;
}
else if (tooFar)
debugMes("NOT_TOO_FAR_AFTER_ALL: [" + i + "," + j + "]", 10);
debugMes("Comparing node " + i +" " + pp_i + " with node " + j +" " + pp_j + "Result: " + compatible,15);
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15)
System.err.print("\rDAG[" + i + "," + j + "]=" + dag[i][j]);
}
}
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 12)
System.err.println();
return dag;
}
private static String report_node_depths(PairPath pp,
DirectedSparseGraph<SeqVertex, SimpleEdge> graph) {
int path_counter = 0;
String node_depth_text = "";
for (List<Integer> path : pp.get_paths()) {
path_counter++;
node_depth_text += "path pt" + path_counter + ": " + path + "\n";
for ( Integer node_id : path) {
SeqVertex v = getSeqVertex(graph, node_id);
node_depth_text += "\tnode: " + node_id + " depth: " + v._node_depth + "\n";
}
}
return node_depth_text;
}
private static ArrayList<Integer> extendChain(ArrayList<Integer> extractedVerticesIDs,
DirectedSparseGraph<SeqVertex, SimpleEdge> graph, Map<PairPath, Integer> pairPathToReadSupp, DijkstraDistance<SeqVertex,SimpleEdge> dijkstraDis)
{
debugMes("Extending Chain", 10);
ArrayList<Integer> extractedVerticesIDExtended = new ArrayList<Integer>();
extractedVerticesIDExtended.addAll(extractedVerticesIDs);
Integer firstID = extractedVerticesIDs.get(0);
Integer lastID = extractedVerticesIDs.get(extractedVerticesIDs.size() - 1);
// extend from left:
Integer lastIDVisited = firstID;
boolean canExtend = true;
if(lastIDVisited < 0) // if first ID indicates a source vertex, cannot extend further
canExtend = false;
// Greedily extend from first ID
while(canExtend == true)
{
int best = 0;
List<Integer> best_extension_path = null;
canExtend = false;
firstID = extractedVerticesIDExtended.get(0);
lastIDVisited = firstID;
if(lastIDVisited < 0) // if last ID indicates a sink vertex, cannot extend further
break;
for(PairPath p : pairPathToReadSupp.keySet())
{
if(p.isEmpty())
continue;
int support = pairPathToReadSupp.get(p);
p = p.trimSinkNodes();
if (! p.isCompatible(extractedVerticesIDExtended)) { continue; }
if (! p.containsID(lastIDVisited)) { continue; }
List<Integer> extensionPath;
if (p.getPath1().contains(lastIDVisited))
extensionPath = p.getPath1();
else if (p.getPath2().contains(lastIDVisited))
extensionPath = p.getPath2();
else
throw (new RuntimeException("error, pairpath " + p + " is missing required id " + lastIDVisited));
if(extensionPath.get(0).equals(lastIDVisited))
continue; // nothign to extend left with since left-most node is same as current path trying to extend.
// see if encountering nodes already in the list, indicative of a loop
Integer loc_in_list = extensionPath.indexOf(lastIDVisited);
List<Integer> extension_nodes = extensionPath.subList(0, loc_in_list);
if (PairPath.haveAnyNodeInCommon(extension_nodes, extractedVerticesIDExtended))
continue;
// must have a candidate for extension.
if(support > best)
{
best = support;
best_extension_path = extensionPath;
canExtend = true;
}
} // end of testing for extension
if(canExtend == false)
break;
debugMes("Left-extension of : " + extractedVerticesIDExtended + " with " + best_extension_path, 10);
Integer loc_in_list = best_extension_path.indexOf(lastIDVisited);
extractedVerticesIDExtended.addAll(0, best_extension_path.subList(0, loc_in_list));
}
//System.exit(0);
// extend to right
lastIDVisited = lastID;
canExtend = true;
if(lastIDVisited < 0) // if last ID indicates a sink vertex, cannot extend further
canExtend = false;
// Greedily extend from last ID
while(canExtend == true)
{
int best = 0;
List<Integer> best_extension_path = null;
canExtend = false;
lastID = extractedVerticesIDExtended.get(extractedVerticesIDExtended.size()-1);
lastIDVisited = lastID;
if(lastIDVisited < 0) // if last ID indicates a sink vertex, cannot extend further
break;
for(PairPath p : pairPathToReadSupp.keySet())
{
if(p.isEmpty())
continue;
int support = pairPathToReadSupp.get(p);
p = p.trimSinkNodes();
if (! p.isCompatible(extractedVerticesIDExtended)) { continue; }
if (! p.containsID(lastIDVisited)) { continue; }
List<Integer> extensionPath;
if (p.getPath1().contains(lastIDVisited))
extensionPath = p.getPath1();
else if (p.getPath2().contains(lastIDVisited))
extensionPath = p.getPath2();
else
throw (new RuntimeException("error, pairpath " + p + " is missing required id " + lastIDVisited));
if(extensionPath.get(extensionPath.size()-1).equals(lastIDVisited))
continue;
// see if encountering nodes already in the list, indicative of a loop
Integer loc_in_list = extensionPath.indexOf(lastIDVisited);
List<Integer> extension_nodes = extensionPath.subList(loc_in_list+1, extensionPath.size());
if (PairPath.haveAnyNodeInCommon(extension_nodes, extractedVerticesIDExtended))
continue;
// must have a candidate for extension.
if(support > best)
{
best = support;
best_extension_path = extensionPath;
canExtend = true;
}
} // end of testing for extension
if(canExtend == false)
break;
debugMes("Right-extension of : " + extractedVerticesIDExtended + " with " + best_extension_path, 10);
Integer loc_in_list = best_extension_path.indexOf(lastIDVisited);
debugMes("\tloc of " + lastIDVisited + " in best extension list: " + best_extension_path + " is " + loc_in_list, 15);
extractedVerticesIDExtended.addAll(best_extension_path.subList(loc_in_list+1, best_extension_path.size()));
//System.out.println("Path after " + extractedVerticesIDs.toString());
}
return extractedVerticesIDExtended;
}
private static HashMap<List<Integer>, Pair<Integer>> cuffMinPaths(final DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
HashMap<Integer,HashMap<PairPath,Integer>> combinedReadHash,
DijkstraDistance<SeqVertex,SimpleEdge> dijkstraDis)
{
HashMap<List<Integer>, Pair<Integer>> transcripts = new HashMap<List<Integer>,Pair<Integer>>();
Set<PairPath> pairPaths = new HashSet<PairPath>();
Map<PairPath, Integer> pairPathToReadSupport = new HashMap<PairPath, Integer>();
debugMes("Beginning cuffMinPaths",10);
// populate pairPathToReadSupport: PairPath => readSupport
// and pairPaths hashset: the list of all PairPaths
populate_pairpaths_and_readsupport(combinedReadHash, pairPaths, pairPathToReadSupport);
ArrayList<PairPath> pairPathsList = new ArrayList<PairPath>(pairPaths);
Comparator<PairPath> pairPathOrderComparer = new Comparator<PairPath>() { // sort by first node depth in graph
public int compare(PairPath a, PairPath b) {
if (a.equals(b)) {
return(0);
}
Integer a_index = a.getFirstID();
Integer b_index = b.getFirstID();
int f1 = getSeqVertex(graph, a_index)._node_depth; // why using FinishingTime instead of DiscoveryTime?
int f2 = getSeqVertex(graph, b_index)._node_depth;
if( f1 < f2 )
return -1;
else if( f1 > f2 )
return 1;
// same node depth.
if (a_index < b_index)
return -1;
else if (a_index > b_index)
return 1;
// same first node ID
// check last node
Integer a_last_index = a.getLastID();
Integer b_last_index = b.getLastID();
int l1 = getSeqVertex(graph,a_last_index)._node_depth;
int l2 = getSeqVertex(graph,b_last_index)._node_depth;
if (l1 < l2) {
return(-1);
}
else if (l1 > l2) {
return(1);
}
// same last node depth too.
// compare their node identifiers
if (a_last_index < b_last_index)
return(-1);
else if (a_last_index > b_last_index)
return(1);
// default
// not the same paths, but same start node and last node DFS, so just order based on hashcode
return ( (a.hashCode() < b.hashCode()) ? 1 : -1);
}
};
Collections.sort(pairPathsList, pairPathOrderComparer);
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15) {
debugMes("SORTED PAIRPATHS IN ORDER:", 15);
for (PairPath p : pairPathsList) {
debugMes("\t" + p, 15);
}
}
HashSet<Integer> vertices = extract_vertex_list_from_PairPaths(pairPathsList);
PairPath[] pairPathArr = pairPathsList.toArray(new PairPath[pairPathsList.size()]);
HashSet<List<Integer>> cuff_input_paths = new HashSet<List<Integer>>();
HashMap<PairPath,Integer> pairPathToIntVal = new HashMap<PairPath,Integer>();
debugMes("All PairPaths sorted by DFS", 10);
for (int i = 0; i < pairPathArr.length; i++) {
pairPathToIntVal.put(pairPathArr[i], i);
debugMes("CuffFly Input PairPath: " + pairPathArr[i] + " <index: " + i + ">", 10);
cuff_input_paths.add(pairPathArr[i].getPath1());
}
//0. remove containments
List<Integer> containments = getContainments(pairPathArr);
for (int i = 0; i < containments.size(); i++) {
pairPathsList.remove(pairPathArr[containments.get(i)]);
}
// refresh after removing containments.
pairPathArr = pairPathsList.toArray(new PairPath[pairPathsList.size()]);
//1. build pair path graph
int[][] dag = getPairPathDAG(graph, dijkstraDis, pairPathArr);
//print dag
debugMes("DAG
for(int i = 0; i < dag.length; i++)
{
String dag_text = "";
for(int j = 0; j < dag.length; j++)
{
dag_text += dag[i][j] + " ";
}
debugMes(dag_text, 10);
}
//2.1 remove uncertain pair paths
ArrayList<Integer> uncertain = getUncertain(dag, pairPathArr);
debugMes("REMOVING UNCERTAINTIES: " + uncertain, 10);
for(int i = 0; i < uncertain.size(); i++)
{
pairPathsList.remove(pairPathArr[uncertain.get(i)]);
}
HashSet<Integer> vertices_after_removed_uncertainties = extract_vertex_list_from_PairPaths(pairPathsList);
if (vertices_after_removed_uncertainties.size() < vertices.size()) {
int missing_node_count = vertices.size() - vertices_after_removed_uncertainties.size();
debugMes("WARNING, MISSING: " + missing_node_count + " of " + vertices.size() + " nodes after removing uncertainties", 10);
for (Integer v : vertices) {
if (! vertices_after_removed_uncertainties.contains(v)) {
debugMes("WARNING, MISSING NODE: After removing uncertainties, missing node from graph: " + v, 10);
}
}
}
// refresh again, after now removing the uncertain entries.
pairPathArr = pairPathsList.toArray(new PairPath[pairPathsList.size()]);
//print pair paths
debugMes("PAIR PATHS
for(int i = 0; i < pairPathsList.size(); i++)
{
debugMes("PairPathAfterUncertainRemoved "+ i + " " + pairPathArr[i].toString() + " <index: " + pairPathToIntVal.get(pairPathArr[i]) + ">", 10);
}
// regenerate the dag now that the uncertain entries are removed.
dag = getPairPathDAG(graph, dijkstraDis, pairPathArr);
//print dag
debugMes("DAG
for(int i = 0; i < dag.length; i++)
{
String dag_text = "";
for(int j = 0; j < dag.length; j++)
{
dag_text += dag[i][j] + " ";
}
debugMes(dag_text, 10);
}
//2.2 check transitivity
if(!checkTransitivity(dag, pairPathArr, pairPathToIntVal))
{
throw(new RuntimeException("Graph is NOT transitive!"));
}
// remove self-matches
for (int i = 0; i < dag.length; i++) {
dag[i][i] = 0;
}
//2.3 get matching
BipartiteMatching bp = new BipartiteMatching(dag.length, dag.length, dag);
bp.maxMatching();
int[] rightMatching = bp.getRightMatching();
int[] leftMatching = bp.getLeftMatching();
debugMes("Matching
bp.printRightMatching();
//2.4 get chains from matching
ArrayList<ArrayList<PairPath>> chains = new ArrayList<ArrayList<PairPath>>();
boolean[] addedToChain = new boolean[rightMatching.length];
Arrays.fill(addedToChain, false);
ArrayList<PairPath> curChain;
for (int i = 0; i < rightMatching.length; i++) {
if (rightMatching[i] == -1) {
// start new chain
ArrayList<PairPath> chain = new ArrayList<PairPath>();
chains.add(chain);
chain.add(pairPathArr[i]);
int j = i;
while (leftMatching[j] != -1) {
j = leftMatching[j];
chain.add(pairPathArr[j]);
}
}
}
// report the chain info.
HashMap<Integer,Boolean> seen = new HashMap<Integer,Boolean>();
debugMes("Number of chains: " + chains.size(),10);
for(int j = 0; j < chains.size(); j++)
{
curChain = chains.get(j);
System.out.println("Chain: " + j);
for(int k = 0; k < chains.get(j).size(); k++)
{
PairPath p = chains.get(j).get(k);
Integer p_pos = pairPathToIntVal.get(p);
String seen_text = (seen.containsKey(p_pos)) ? " *** ERROR, ALREADY INCLUDED IN ANOTHER CHAIN *** " : "";
debugMes(p + " Pos:[" + p_pos + "] " + seen_text,10);
seen.put(p_pos, new Boolean(true));
}
debugMes("",10);
}
// 3. foreach chain:
// 3.1. extract nodes from chains
DijkstraShortestPath dsp = new DijkstraShortestPath(graph);
for(int i = 0; i < chains.size(); i++)
{
HashSet<Integer> extracted = new HashSet<Integer>();
ArrayList<SeqVertex> extractedVertices = new ArrayList<SeqVertex>();
List<List<Integer>> chain_i_path_list = new ArrayList<List<Integer>>();
curChain = chains.get(i);
for(int j = 0; j < curChain.size(); j++)
{
chain_i_path_list.add(curChain.get(j).getPath1());
/*
extracted.addAll(curChain.get(j).getPath1());
if(curChain.get(j).hasSecondPath())
extracted.addAll(curChain.get(j).getPath2());
*/
}
List<List<Integer>> chain_i_collapsed_paths = Path.collapse_compatible_paths_to_min_set(chain_i_path_list);
for (List<Integer> collapsed_path : chain_i_collapsed_paths) {
transcripts.put(collapsed_path, new Pair(new Integer(1), new Integer(1)));
}
/*
for(Integer id : extracted)
{
extractedVertices.add(getSeqVertex(graph, id));
}
//extractedVerticesIDs.addAll(extracted);
// 3.2. sort according to topological order of BTFL graph
Collections.sort(extractedVertices, new SeqVertexFinishTimeComparator());
String node_id_list_text = "";
for (SeqVertex v : extractedVertices) {
node_id_list_text += v.getID() + " ";
}
debugMes("Extracted vertices for chain: " + i + " and sorted is: " + node_id_list_text + "\n", 10);
// Fill in any gaps
// 3.3. path=[L_1]; For each i in 1:length(node_list)
int j = 0;
int num_vertices = extractedVertices.size(); // note, extractedVertices grows in size during iterations below.
while(j < num_vertices - 1)
{
//System.out.println(j);
SeqVertex current = extractedVertices.get(j);
SeqVertex next = extractedVertices.get(j + 1);
// 3.3.1 if L_i == L_(i+1) then nothing
// -There are no duplicates since extractedVertices was created from
// building the set of extracted vertex IDs
// 3.3.2 else if exists an edge from L_i to L_(i+1) then nothing(?)
if(graph.getSuccessors(current).contains(next)) {
j++;
continue;
}
// 3.3.3 else find a single path (p = L_i,..., L_(i+1)):
// append all P_j (j=2:end) to our path
//List<SimpleEdge> sp = org.jgrapht.alg.DijkstraShortestPath.findPathBetween((Graph)graph, current, next);
List<SimpleEdge> sp = dsp.getPath(current, next);
debugMes("Found shorteset path between " + current.getID() + " and " + next.getID() + ":", 10);
ArrayList<SeqVertex> toAdd = new ArrayList<SeqVertex>();
for(SimpleEdge edge : sp) {
SeqVertex v = graph.getDest(edge);
toAdd.add(v);
debugMes("\t" + v.getID(), 10);
}
toAdd.remove(next);
extractedVertices.addAll(toAdd);
j++;
}
ArrayList<Integer> extractedVerticesIDs = new ArrayList<Integer>();
Collections.sort(extractedVertices, new SeqVertexFinishTimeComparator());
for(SeqVertex v: extractedVertices)
{
//System.out.println("Adding vertex with ID: " + v.getID());
extractedVerticesIDs.add(v.getID());
}
boolean extend_paths = ! CUFF_NO_EXTEND; // just for debugging purposes
if (extend_paths) {
ArrayList<Integer> extended = extendChain(extractedVerticesIDs, graph, pairPathToReadSupport, dijkstraDis);
transcripts.put(extended, new Pair(new Integer(1), new Integer(1)));
}
else {
transcripts.put(extractedVerticesIDs, new Pair(new Integer(1), new Integer(1)));
}
*/
} // end of foreach chain
debugMes("Cuff-based reconstructions of transcripts:", 10);
for (List<Integer> p : transcripts.keySet()) {
debugMes("CuffFly Output Path: " + p, 10);
if (cuff_input_paths.contains(p))
debugMes("\t** Original cuffpath",10);
else
debugMes("\t** NOT Original cuffpath", 10);
}
return transcripts;
}
private static HashSet<Integer> extract_vertex_list_from_PairPaths(
ArrayList<PairPath> pairPathsList) {
HashSet<Integer> vertices = new HashSet<Integer>();
for (PairPath pp : pairPathsList) {
for (List<Integer> path: pp.get_paths()) {
for (Integer node : path) {
vertices.add(node);
}
}
}
return(vertices);
}
private static List<Integer> getContainments(PairPath[] pairPathArr) {
HashSet<Integer> containments = new HashSet<Integer>();
for (int i = 0; i < pairPathArr.length; i++) {
for (int j = 0; j < pairPathArr.length; j++) {
if (i == j)
continue;
if (pairPathArr[i].isCompatibleAndContainedByPairPath(pairPathArr[j])) {
debugMes("CONTAINMENT: " + pairPathArr[i] + " is contained by " + pairPathArr[j], 10);
containments.add(i);
}
}
}
List<Integer> containment_list = new ArrayList(containments);
return(containment_list);
}
private static void populate_pairpaths_and_readsupport(
HashMap<Integer, HashMap<PairPath, Integer>> combinedReadHash,
Set<PairPath> pairPaths,
Map<PairPath, Integer> pairPathToReadSupport) {
for(Integer i : combinedReadHash.keySet())
{
Set<PairPath> temp = combinedReadHash.get(i).keySet();
for(PairPath j: temp)
{
if(j != null)
{
Integer read_count_support = combinedReadHash.get(i).get(j);
j = j.trimSinkNodes();
if (MAKE_PE_SE) { // separate the paths:
// convert split paths into individual paths to avoid uncertainties.
PairPath first_part = new PairPath(j.getPath1());
if (! pairPaths.contains(first_part)) {
pairPaths.add(first_part);
pairPathToReadSupport.put(first_part, read_count_support);
}
else {
// already there, just increment the read count support
pairPathToReadSupport.put(first_part, pairPathToReadSupport.get(first_part)+read_count_support);
}
pairPathToReadSupport.put(first_part, read_count_support);
if (j.hasSecondPath()) {
PairPath second_part = new PairPath(j.getPath2());
if (! pairPaths.contains(second_part)) {
pairPaths.add(second_part);
pairPathToReadSupport.put(second_part, read_count_support);
}
else {
// already there, just increment the read count support
pairPathToReadSupport.put(second_part, pairPathToReadSupport.get(second_part)+read_count_support);
}
}
} else {
// using pair paths instead of the split pairs (original)
pairPaths.add(j);
pairPathToReadSupport.put(j, read_count_support);
}
}
}
}
}
private static HashMap<List<Integer>, Pair<Integer>> butterfly (DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
Set<SeqVertex> comp,
HashMap<Integer,HashMap<PairPath,Integer>> combinedReadHash,
long totalNumReads,
PrintStream pout_all,
DijkstraDistance<SeqVertex,SimpleEdge> dijkstraDis,
DijkstraDistanceWoVer<SeqVertex,SimpleEdge> dijkstraDisWoVer,
HashMap<Integer, List<List<Integer>>> tripletMapper,
HashMap<Integer, List<List<Integer>>> extendedTripletMapper,
HashMap<Integer,Boolean> xStructuresResolvedByTriplets
)
{
Pair<HashMap<List<Integer>,Pair<Integer>>> FinalPathsPair = getAllProbablePaths(graph,comp,
combinedReadHash,dijkstraDis,dijkstraDisWoVer,
tripletMapper,extendedTripletMapper, xStructuresResolvedByTriplets);
HashMap<List<Integer>,Pair<Integer>> FinalPaths_diff = FinalPathsPair.getFirst();
HashMap<List<Integer>,Pair<Integer>> FinalPaths_all = FinalPathsPair.getSecond();
return FinalPaths_all;
}
/**
* given the graph, find all single nt bubbles, and choose the majority vote.
* add the weights to the majority path, and add the prevID
* v -> v1 -> vend
* v -> v2 -> vend
* @param graph
*/
private static void removeSingleNtBubbles(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph) {
SeqVertex v1=null ,v2 = null, vend = null;
SeqVertex vToKeep=null ,vToRemove = null;
SimpleEdge e1ToKeep = null, e1ToRemove = null;
SimpleEdge e2ToKeep = null, e2ToRemove = null;
Vector<SeqVertex> removeV = new Vector<SeqVertex>();
Collection<SeqVertex> allV = new HashSet<SeqVertex>();
allV.addAll(graph.getVertices());
for (SeqVertex v : allV)
{
if (removeV.contains(v))
continue;
if (graph.getSuccessorCount(v)==2)
{
Collection<SeqVertex> children = graph.getSuccessors(v);
Iterator<SeqVertex> iter = children.iterator();
v1 = iter.next();
v2 = iter.next();
int len1 = v1.getNameKmerAdj().length();
int len2 = v2.getNameKmerAdj().length();
debugMes("SNP_collapse candidates: " + v1 + " len: " + len1 + " and " + v2 + " len: " + len2, 15);
if (len1==KMER_SIZE && len2==KMER_SIZE &&
graph.getSuccessorCount(v1)==1 &&
graph.getSuccessorCount(v2)==1 &&
getSingleSuccessor(graph,v2).equals(getSingleSuccessor(graph,v1)))
{
vend = getSingleSuccessor(graph,v1);
if (graph.findEdge(v, v1).getWeight() > graph.findEdge(v, v2).getWeight())
{ //keep v1, loose v2
vToKeep = v1;
vToRemove = v2;
}else
{ //keep v2, loose v1
vToKeep = v2;
vToRemove = v1;
}
e1ToKeep = graph.findEdge(v, vToKeep);
e2ToKeep = graph.findEdge(vToKeep, vend);
e1ToRemove = graph.findEdge(v, vToRemove);
e2ToRemove = graph.findEdge(vToRemove, vend);
debugMes("SNP_collapse: merging the node "+vToRemove.getID()+" to the node "+vToKeep.getID(),15);
SeqVertex newV = new SeqVertex(getNextID(), vToKeep.getName());
newV.copyTheRest(vToKeep);
newV.addToPrevIDs(vToKeep,vToRemove,LAST_REAL_ID);
graph.addVertex(newV);
graph.addEdge(new SimpleEdge(e1ToKeep.getWeight() + e1ToRemove.getWeight(), v.getID(), newV.getID()), v, newV);
graph.addEdge(new SimpleEdge(e2ToKeep.getWeight() + e2ToRemove.getWeight(), newV.getID(), vend.getID()), newV,vend);
removeV.add(vToRemove);
removeV.add(vToKeep);
}
}
}
for (SeqVertex rv : removeV)
{
debugMes("removing the single nt variation vertex "+rv.getID(),20);
graph.removeVertex(rv);
}
}
/**
* given the graph, find all single nt bubbles, and choose the majority vote.
* add the weights to the majority path, and add the prevID
* v -> v1 -> vend
* v -> v2 -> vend
* @param graph
* @throws Exception
*/
private static void removeSingleNtBubblesWithDegenerateCode(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph) throws Exception {
SeqVertex v1=null ,v2 = null, vend = null;
SimpleEdge eTop1 = null, eTop2 = null;
SimpleEdge eBottom1 = null, eBottom2 = null;
Vector<SeqVertex> removeV = new Vector<SeqVertex>();
Collection<SeqVertex> allV = new HashSet<SeqVertex>();
allV.addAll(graph.getVertices());
for (SeqVertex v : allV)
{
if (removeV.contains(v))
continue;
if (graph.getSuccessorCount(v)==2)
{
Collection<SeqVertex> children = graph.getSuccessors(v);
Iterator<SeqVertex> iter = children.iterator();
v1 = iter.next();
v2 = iter.next();
int len1 = v1.getName().length();
int len2 = v2.getName().length();
if (len1==1 && len2==1 &&
graph.getSuccessorCount(v1)==1 &&
graph.getSuccessorCount(v2)==1 &&
getSingleSuccessor(graph,v2).equals(getSingleSuccessor(graph,v1)))
{
vend = getSingleSuccessor(graph,v1);
String key;
if (String.CASE_INSENSITIVE_ORDER.compare(v1.getName(),v2.getName())<0)
key = v1.getName()+v2.getName();
else
key = v2.getName()+v1.getName();
String name = getDegenerateRepresentation(key);
SeqVertex newV = new SeqVertex(getNextID(), name);
if (graph.findEdge(v, v1).getWeight() > graph.findEdge(v, v2).getWeight())
newV.copyTheRest(v1);
else
newV.copyTheRest(v2);
eTop1 = graph.findEdge(v, v1);
eBottom1 = graph.findEdge(v1, vend);
eTop2 = graph.findEdge(v, v2);
eBottom2 = graph.findEdge(v2, vend);
debugMes("merging the nodes "+v1.getID()+" and the node "+v2.getID()+" to the node "+newV,18);
newV.addToPrevIDs(v1,v2,LAST_REAL_ID);
newV.setFrequencies(v1.getName(),eTop1.getWeight(),v2.getName(),eTop2.getWeight());
graph.addVertex(newV);
graph.addEdge(new SimpleEdge(eTop1.getWeight() + eTop2.getWeight(), v.getID(), newV.getID()), v, newV);
graph.addEdge(new SimpleEdge(eBottom1.getWeight() + eBottom2.getWeight(), newV.getID(), vend.getID()), newV,vend);
removeV.add(v1);
removeV.add(v2);
}
}
}
for (SeqVertex rv : removeV)
{
debugMes("removing the single nt variation vertex "+rv.getID(),20);
graph.removeVertex(rv);
}
}
/**
* return the single successor of this node in this graph
* @param graph
* @param v2
* @return
*/
private static SeqVertex getSingleSuccessor(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph, SeqVertex v) {
Collection<SeqVertex> children = graph.getSuccessors(v);
if (children.size()!=1)
return null;
SeqVertex vout = children.iterator().next();
return vout;
}
/**
* find edges that are extremely high compared to both side (a single very abundant kmer, and fix their support
* @param graph
* @param inFlow
* @param outFlow
*/
private static void fixExtremelyHighSingleEdges(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph, HashMap<Integer,Integer> outFlow, HashMap<Integer,Integer> inFlow) {
debugMes("fixExtremelyHighSingleEdges()", 5);
for (SimpleEdge e : graph.getEdges())
{
double supp =e.getWeight();
Integer sourceID = graph.getSource(e).getID();
Integer targetID = graph.getDest(e).getID();
Integer inFlowToSource = inFlow.get(sourceID);
Integer outFlowOfTarget = outFlow.get(targetID);
if (inFlowToSource!= null && outFlowOfTarget!= null &&
supp > inFlowToSource*EXTREME_EDGE_FLOW_FACTOR && supp > outFlowOfTarget*EXTREME_EDGE_FLOW_FACTOR)
{
double newSupp = Math.max(inFlowToSource, outFlowOfTarget);
debugMes("the support of edge "+sourceID+"->"+targetID+" has changed from "+supp+" to "+newSupp,20);
e.setWeight(newSupp);
}
}
}
/**
* given the graph and the final paths, find x structures that belong to only two paths, which resolve this structure.
* @param graph
* @param comp
* @param finalPaths
* @return
*/
private static int countNumOfXstructuresResolved(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
Set<SeqVertex> comp, HashMap<List<Integer>,Pair<Integer>> finalPaths) {
int res = 0;
for (SeqVertex v : comp)
{
if (graph.inDegree(v)>1 && graph.outDegree(v)>1)
{
//this is an x-structure
int maxPaths = Math.max(graph.inDegree(v), graph.outDegree(v));
Integer bef,after;
int vid = v.getID();
HashMap<Pair<Integer>,Integer> befAndAfterNodes = new HashMap<Pair<Integer>, Integer>();
Pair<Integer> key;
for (List<Integer> path : finalPaths.keySet())
{
int index = path.indexOf(vid);
if (index!=-1 && index!=0 && index!=path.size()-1) // vid is not the first or the last
{
bef = path.get(index-1);
after = path.get(index+1);
key = new Pair<Integer>(bef,after);
if (!befAndAfterNodes.containsKey(key))
befAndAfterNodes.put(key,1);
else
befAndAfterNodes.put(key,befAndAfterNodes.get(key)+1);
}
}
String triplets = "";
for (Pair<Integer> befAndAfterNode : befAndAfterNodes.keySet()) {
Integer before1 = (Integer) befAndAfterNode.getFirst();
Integer after1 = (Integer) befAndAfterNode.getSecond();
triplets += "[" + before1 + "-" + vid + "-" + after1 + "=" + befAndAfterNodes.get(befAndAfterNode) + "] ";
}
if (befAndAfterNodes.keySet().size()==maxPaths)
{
debugMes("vertex "+v.getID()+" IS resolved in an X-structure: " + triplets, 10);
res++;
}
else {
debugMes("vertex " + v.getID() + " is NOT resolved in an X-structure: " + triplets, 10);
}
}
}
return res;
}
private static HashMap<Integer,Boolean> getXstructuresResolvedByTriplets (
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
Set<SeqVertex> comp,
HashMap<Integer, List<List<Integer>>> tripletMapper) {
HashMap<Integer,Boolean> xStructuresResolvedByTriplets = new HashMap<Integer,Boolean>();
for (SeqVertex v : comp)
{
if (graph.inDegree(v)>1 && graph.outDegree(v)>1)
{
//this is an x-structure
Integer vertex_id = v.getID();
if (tripletMapper.containsKey(vertex_id)) {
debugMes("vertex " + vertex_id + " IS resolved in an X-structure: " + tripletMapper.get(vertex_id), 10);
xStructuresResolvedByTriplets.put(vertex_id, true);
}
else {
debugMes("vertex " + v.getID() + " is UN-resolved X-structure. ", 10);
xStructuresResolvedByTriplets.put(vertex_id, false);
}
}
}
return (xStructuresResolvedByTriplets);
}
public static class FinalPaths implements Comparable<FinalPaths> {
List<Integer> path;
String sequence;
public FinalPaths (List<Integer> p, String s) {
path = p;
sequence = s;
}
public int compareTo(FinalPaths f) {
if (this.sequence.length() > f.sequence.length()) {
return(-1);
}
else if (this.sequence.length() < f.sequence.length()) {
return(1);
}
else {
return(0);
}
}
}
/**
* Print all final paths
* @param finalPaths
* @param graph
* @param compID
* @param p
* @param name
* @param totalNumReads
* @param xStructuresResolvedByTriplets
* @param separate_gene_ids
* @throws FileNotFoundException
*/
private static void printFinalPaths(
HashMap<List<Integer>,Pair<Integer>> finalPaths,
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
PrintStream p,
String name,
long totalNumReads,
HashMap<List<Integer>,ArrayList<String>> final_paths_to_long_read_content,
HashMap<List<Integer>, Integer> separate_gene_ids)
throws FileNotFoundException
{
debugMes("Final Paths: " + finalPaths.size(), 10);
DecimalFormat df = new DecimalFormat("
name = name.replace(".graph", "");
HashMap<Integer,Integer> local_gene_id_mapping = new HashMap<Integer,Integer>();
HashMap<Integer,Integer> local_seq_counter = new HashMap<Integer,Integer>();
for (List<Integer> path : finalPaths.keySet()) {
String seq = getPathSeq(graph,path);
//print this path
int gene_id;
int iso_id;
if (separate_gene_ids.containsKey(path)) {
int local_gene_id = separate_gene_ids.get(path);
if (local_gene_id_mapping.containsKey(local_gene_id)) {
// seen this gene before
gene_id = local_gene_id_mapping.get(local_gene_id);
iso_id = local_seq_counter.get(gene_id) + 1;
local_seq_counter.put(gene_id, iso_id);
}
else {
gene_id = ++GENE_COUNTER;
local_gene_id_mapping.put(local_gene_id, gene_id);
iso_id = 1;
// storing isoform id for the first time.
local_seq_counter.put(gene_id, iso_id);
}
}
else {
// no gene clusters, each is unique:
gene_id = ++GENE_COUNTER;
iso_id = 1;
}
String seqName = name + "_g" + gene_id + "_i" + iso_id;
String pathName = get_pathName_string(path, graph);
seqName += " len="+seq.length() + " path="+ pathName;
// Report the long read content information.
if (final_paths_to_long_read_content.containsKey(path)) {
// then got list of long read names assigned to this final path
HashMap<PairPath, ArrayList<String>> long_read_paths_to_name_list = new HashMap<PairPath, ArrayList<String>>();
for (String long_read_name : final_paths_to_long_read_content.get(path)) {
PairPath pp = LONG_READ_NAME_TO_PPath.get(long_read_name);
if (!long_read_paths_to_name_list.containsKey(pp)) {
ArrayList<String> a = new ArrayList<String>();
a.add(long_read_name);
long_read_paths_to_name_list.put(pp, a);
}
else {
long_read_paths_to_name_list.get(pp).add(long_read_name);
}
}
seqName = seqName + " long_read_mappings: " + long_read_paths_to_name_list;
}
debugMes("\nFinal path reported: " + seqName, 10);
p.print(getSeqFasta(seq, seqName));
}
}
/*
private static HashMap<List<Integer>, Boolean> remove_transcripts_with_insufficent_read_support(
Set<List<Integer>> pathSet, HashMap<List<Integer>,Integer> seqLengthMap,
HashMap<List<Integer>, Boolean> remove_low_expr_isoforms) {
for (List<Integer> path : pathSet) {
float path_frag_count = pc.get_transcript_to_sum_frag_counts(path);
debugMes("PATH_TO_FRAG_COUNT: " + path_frag_count + ", FRAGS_PER_TRANS_LEN: " + path_frag_count/seqLengthMap.get(path)*100, 10);
}
return remove_low_expr_isoforms;
}
*/
private static Object seqLengthMap() {
// TODO Auto-generated method stub
return null;
}
/*
private static HashMap<List<Integer>, Float> get_pct_expr_isoforms_of_genes(
HashMap<List<Integer>, Integer> separate_gene_ids) {
// This relies on having run the EM to estimate relative expression.
if (pc == null) {
throw new RuntimeException("removal of low isoforms requires EM was run, but pc is null");
}
HashMap<List<Integer>, Float> pct_expr_isoform = new HashMap<List<Integer>,Float>();
HashMap<Integer,Float> max_gene_expr_per_gene = new HashMap<Integer,Float>();
for (List<Integer> transcript : separate_gene_ids.keySet()) {
Integer gene_id = separate_gene_ids.get(transcript);
Float expr = pc.get_expr(transcript);
if (max_gene_expr_per_gene.containsKey(gene_id)) {
if (max_gene_expr_per_gene.get(gene_id) < expr)
max_gene_expr_per_gene.put(gene_id, expr);
}
else
max_gene_expr_per_gene.put(gene_id, expr);
}
for (List<Integer> transcript : separate_gene_ids.keySet()) {
Integer gene_id = separate_gene_ids.get(transcript);
Float expr = pc.get_expr(transcript);
Float max_gene_expr = max_gene_expr_per_gene.get(gene_id);
float pct_isoform_expr = expr/max_gene_expr * 100;
debugMes("Relative expression: " + pct_isoform_expr + ", gene: " + gene_id + ", path: " + transcript, 10);
pct_expr_isoform.put(transcript, pct_isoform_expr);
}
return(pct_expr_isoform);
}
*/
private static HashMap<List<Integer>, Integer> group_paths_into_genes(
HashMap<List<Integer>, Pair<Integer>> finalPaths_all,
DirectedSparseGraph<SeqVertex, SimpleEdge> graph) {
debugMes("Grouping paths into genes", 10);
HashMap<Integer,Integer> node_length_map = new HashMap<Integer,Integer>(); // track node lengths for seq pair comparisons
HashMap<List<Integer>, Integer> seqLengthMap = new HashMap<List<Integer>,Integer>();
Vector<FinalPaths> path_vec = new Vector<FinalPaths>();
// get node lengths
for (List<Integer> path : finalPaths_all.keySet())
{
String seq = getPathSeq(graph,path);
seqLengthMap.put(path, seq.length());
//System.out.println(seq);
FinalPaths f = new FinalPaths(path, seq);
path_vec.add(f);
for (Integer nodeID : path) {
if (nodeID > 0) {
int node_length = getSeqVertex(graph, nodeID).getName().length();
node_length_map.put(nodeID, node_length);
}
else
node_length_map.put(nodeID, 0);
}
}
UndirectedSparseGraph<List<Integer>, String> sparseGraph = new UndirectedSparseGraph<List<Integer>, String>();
List<List<Integer>> paths = new ArrayList<List<Integer>>(finalPaths_all.keySet());
for (int i = 0; i <= paths.size()-2; i++) {
List<Integer> path_i = paths.get(i);
if (! sparseGraph.containsVertex(path_i))
sparseGraph.addVertex(path_i);
int path_i_len = 0;
for (Integer node : path_i) {
path_i_len += node_length_map.get(node);
}
for (int j = i + 1; j <= paths.size()-1; j++) {
List<Integer> path_j = paths.get(j);
if (! sparseGraph.containsVertex(path_j))
sparseGraph.addVertex(path_j);
int path_j_len = 0;
int nodes_same_length = 0;
for (Integer node : path_j) {
path_j_len += node_length_map.get(node);
if (path_i.contains(node))
nodes_same_length += node_length_map.get(node);
}
float iso_pct_overlap = Math.max((float)nodes_same_length / path_i_len * 100,
(float)nodes_same_length / path_j_len * 100);
debugMes("Isoform_overlap: Path_i:" + path_i + ", Path_j: " + path_j + ", overlap = " + iso_pct_overlap + "%", 10);
if ( iso_pct_overlap >= MIN_ISOFORM_PCT_LEN_OVERLAP) {
sparseGraph.addEdge("e_" + i + "_" + j, path_i, path_j);
debugMes("IsoformEdge linking: " + path_i + " to " + path_j, 10);
}
}
}
HashMap<List<Integer>, Integer> gene_grouping = new HashMap<List<Integer>, Integer>();
WeakComponentClusterer<List<Integer>, String> compClus = new WeakComponentClusterer<List<Integer>,String>();
Set<Set<List<Integer>>> comps = compClus.transform(sparseGraph);
debugMes("IsoformClustering, number of clusters = " + comps.size(), 10);
// add the singletons back in
HashMap<List<Integer>,Boolean> inCluster = new HashMap<List<Integer>,Boolean>();
if (comps.size() == 0) {
// all related
for (List<Integer> path : finalPaths_all.keySet()) {
gene_grouping.put(path, 1);
}
return(gene_grouping);
}
int cluster_count = 0;
for (Set<List<Integer>> cluster : comps) {
cluster_count++;
for (List<Integer> path : cluster) {
gene_grouping.put(path, cluster_count);
debugMes("GeneCluster[" + cluster_count + "] contains: " + path, 10);
inCluster.put(path, true);
}
}
for (List<Integer> path : paths) {
if (! inCluster.containsKey(path)) {
cluster_count++;
gene_grouping.put(path, cluster_count);
}
}
return(gene_grouping);
}
/**
* given a path in the graph, return its sequence
* @param graph
* @param path
* @return
*/
private static String getPathSeq(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph, List<Integer> path) {
String seq = "";
Boolean first_node = true;
for (Integer nodeID : path) {
if (nodeID>=0) {
String node_seq = getSeqVertex(graph, nodeID).getName();
if (! first_node) {
node_seq = node_seq.substring(SeqVertex.get_kmer_length() -1);
}
first_node = false;
seq += node_seq;
//System.out.println("Node: " + nodeID + " has seq: " + node_seq);
}
}
return seq;
}
/**
* For each path of a read pair, ask how many reads support it.
* @param graph
* @param readNameHash
* @param dijkstraDis
* @return
*/
private static HashMap<Integer, HashMap<PairPath, Integer>> getSuffStats_wPairs(
DirectedSparseGraph<SeqVertex,SimpleEdge> graph,
HashMap<String, List<Read>> readNameHash,
DijkstraDistance<SeqVertex,SimpleEdge> dijkstraDis) {
HashMap<Integer,HashMap<PairPath,Integer>> combinedReadHash = new HashMap<Integer,HashMap<PairPath,Integer>> ();
Set<String> usedReads = new HashSet<String>();
List<Read> curList = null;
int numReadsUsed = 0;
int numSingletons = 0;
int numPairs = 0;
int numPairsDiscarded = 0;
for (String name : readNameHash.keySet())
{
if (usedReads.contains(name))
continue; // ignoring reduncancy in the read set?
curList = readNameHash.get(name);
if (curList.size()==1)
{//single read
// ** Single Read Processing **
Read r = curList.get(0);
PairPath path = new PairPath(r.getPathIDs());
Integer firstV = path.getFirstID();
if (!combinedReadHash.containsKey(firstV))
combinedReadHash.put(firstV, new HashMap<PairPath,Integer>()); // init
if (!combinedReadHash.get(firstV).containsKey(path))
combinedReadHash.get(firstV).put(path, 0);
Integer counts = combinedReadHash.get(firstV).get(path);
combinedReadHash.get(firstV).put(path,++counts); // count read having this path
numReadsUsed++;
debugMes("we have "+combinedReadHash.get(firstV).get(path)+" reads supporting the path: "+path,19);
numSingletons++;
// examine for long read.
if (name.startsWith("LR$|")) { // r.getSeq().length() >= MIN_LONG_READ_LENGTH) {
LONG_READ_NAME_TO_PPath.put(name, path);
debugMes("LONG_READ_IDENTIFIED: " + name + " , path: " + path, 12);
if (! LONG_READ_PATH_MAP.containsKey(path)) {
ArrayList<String> nameList = new ArrayList<String>();
nameList.add(name);
LONG_READ_PATH_MAP.put(path, nameList);
}
else {
ArrayList<String> nameList = (ArrayList<String>) LONG_READ_PATH_MAP.get(path);
nameList.add(name);
}
}
}else { // paired read
// ** Paired Read Processing **
Read r1 = curList.get(0);
List<Integer> path1 = r1.getPathIDs();
Read r2 = curList.get(1);
List<Integer> path2 = r2.getPathIDs();
PairPath combinedPath = new PairPath(path1, path2);
/* move this to after repeat unrolling.
* for now, just keep it simple, store paths.
*
PairPath combinedPath = combinePaths(graph,path1,path2,dijkstraDis);
if (combinedPath.isEmpty())
{
debugMes("the paths "+path1+" and "+path2+" couldn't be combined",15);
numPairsDiscarded++;
continue;
}
*/
Integer firstV = combinedPath.getFirstID();
if (!combinedReadHash.containsKey(firstV))
combinedReadHash.put(firstV, new HashMap<PairPath,Integer>()); //init
if (!combinedReadHash.get(firstV).containsKey(combinedPath))
combinedReadHash.get(firstV).put(combinedPath, 0); //add pairpath
Integer counts = combinedReadHash.get(firstV).get(combinedPath);
combinedReadHash.get(firstV).put(combinedPath,++counts); // increment counts for pairpath
debugMes("we have "+combinedReadHash.get(firstV).get(combinedPath)+" reads supporting the path: "+combinedPath,18);
numReadsUsed++;
numPairs++;
}
usedReads.add(name);
}
debugMes("number of reads used = "+numReadsUsed,15);
debugMes("## Read PathPair results: " + numSingletons + " singletons, "
+ " num pairs: " + numPairs + ", num pairs discarded: " + numPairsDiscarded, 10);
return combinedReadHash;
}
/**
* Given the graph, and two paths of the two reads, combine them into a single path
* @param graph
* @param path1
* @param path2
* @param dijkstraDis
* @return
*/
private static PairPath combinePaths(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
List<Integer> path1, List<Integer> path2, DijkstraDistance<SeqVertex,SimpleEdge> dijkstraDis) {
debugMes("combinePaths: " + path1 + ", " + path2, 15);
SeqVertex firstV1 = getSeqVertex(graph,path1.get(0));
SeqVertex lastV1 = getSeqVertex(graph,path1.get(path1.size()-1));
SeqVertex firstV2 = getSeqVertex(graph,path2.get(0));
SeqVertex lastV2 = getSeqVertex(graph,path2.get(path2.size()-1));
PairPath path = new PairPath();
if (path1.containsAll(path2))
path.setPath1(path1);
else if (path2.containsAll(path1))
path.setPath2(path2); // note, gets moved to path1 later.
//path1 --> path2
else if (SeqVertex.isAncestral(lastV1, firstV2,dijkstraDis)>0
&&
! lastV1.equals(firstV2)
)
{
path.setPath1(path1);
path.setPath2(path2);
}
//path2 --> path1
else if (SeqVertex.isAncestral(lastV2, firstV1,dijkstraDis)>0
&&
! lastV2.equals(firstV1)
)
{
path.setPath1(path2);
path.setPath2(path1);
}
else if (SeqVertex.isAncestral(firstV2,firstV1,dijkstraDis)==0 &&
SeqVertex.isAncestral(lastV2,lastV1,dijkstraDis)==0)
{
//there is no consistent path between read1 and read2
}
// Overlapping paths
//path1(partial) -> path2
else if (SeqVertex.isAncestral(firstV1,firstV2,dijkstraDis)>0 &&
path1.indexOf(firstV2.getID())>=0)
{
int i = path1.indexOf(firstV2.getID());
path.setPath1(path1.subList(0, i));
path.addToPath1(path2);
}
//path2(partial) -> path1
else if (SeqVertex.isAncestral(firstV2,firstV1,dijkstraDis)>0 &&
path2.indexOf(firstV1.getID())>=0)
{
int i = path2.indexOf(firstV1.getID());
path.setPath1(path2.subList(0, i));
path.addToPath1(path1);
}
if (path.getPath1().isEmpty() && !path.getPath2().isEmpty())
path.movePath2To1();
// Try to impute connecting paths from path 1 to path 2
if ((! path.getPath1().isEmpty()) && (! path.getPath2().isEmpty())) {
SeqVertex fV1 = getSeqVertex(graph,path.getPath1().get(0));
SeqVertex lV1 = getSeqVertex(graph,path.getPath1().get(path.getPath1().size()-1));
SeqVertex fV2 = getSeqVertex(graph,path.getPath2().get(0));
SeqVertex lV2 = getSeqVertex(graph,path.getPath2().get(path.getPath2().size()-1));
debugMes("Examining imputation of path connecting pairpath: " + path + " nodes "+ lV1.getID() + " to " + fV2.getID(), 20);
if (SeqVertex.isAncestral(lV1, fV2, dijkstraDis) > 0) {
// note could return false if have a sequencing gap
boolean canExtend = true;
SeqVertex v = lV1;
// walk towards fV2
List<Integer> intervening_vertex_ids = new ArrayList<Integer>();
boolean impute = true;
while (canExtend) {
SeqVertex next = null;
int count_connectable = 0;
for (SeqVertex successor : graph.getSuccessors(v)) {
if (SeqVertex.isAncestral(successor, fV2, dijkstraDis) > 0) {
count_connectable++;
next = successor;
}
}
if (next != null && count_connectable == 1) {
if (fV2.equals(next)) {
// reached fV2
break;
}
else {
intervening_vertex_ids.add(next.getID());
}
v = next;
}
else {
// either no connection or too many potential connections
canExtend = false;
impute = false;
}
}
if (impute) {
debugMes("Could Impute path connecting" + path + " containing intervening nodes: " + intervening_vertex_ids, 16);
if (! intervening_vertex_ids.isEmpty()) {
path.getPath1().addAll(intervening_vertex_ids);
}
path.getPath1().addAll(path.getPath2());
path.getPath2().clear();
}
else {
debugMes("Could not impute intervening nodes", 20);
}
}
}
return path;
}
/**
* Count how many vertices we have with in degree >1 & out degree >1
* @param graph
* @return
*/
private static int countNumOfXstructures(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph) {
int res = 0;
for (SeqVertex v : graph.getVertices())
{
if (graph.inDegree(v)>1 && graph.outDegree(v)>1)
res++;
}
return res;
}
private static void printPairPaths(HashMap<Integer, HashMap<PairPath, Integer>> combinedReadHash){
printPairPaths(combinedReadHash, "PAIRPATH");
}
private static void printPairPaths(HashMap<Integer, HashMap<PairPath, Integer>> combinedReadHash,
String out_token){
for(Map.Entry<Integer, HashMap<PairPath, Integer>> entry : combinedReadHash.entrySet())
{
debugMes("Start Vertex:" + entry.getKey(), 10);
for(Map.Entry<PairPath, Integer> paths : entry.getValue().entrySet())
{
//System.out.println(entry.getValue());
debugMes(out_token + ": " + paths, 10);
}
}
}
/**
* Given the graph and the hash with all reads, find all probable paths from S to T.
* @param graph
* @param comp
* @param combinedReadHash
* @param dijkstraDis
* @param dijkstraDisWoVer
*/
private static Pair<HashMap<List<Integer>, Pair<Integer>>> getAllProbablePaths (
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
Set<SeqVertex> comp,
HashMap<Integer,HashMap<PairPath,Integer>> combinedReadHash,
DijkstraDistance<SeqVertex,SimpleEdge> dijkstraDis,
DijkstraDistanceWoVer<SeqVertex,SimpleEdge> dijkstraDisWoVer,
HashMap<Integer, List<List<Integer>>> tripletMapper,
HashMap<Integer, List<List<Integer>>> extendedTripletMapper,
HashMap<Integer,Boolean> xStructuresResolvedByTriplets
) {
debugMes("\nSECTION\n
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15) {
debugMes("PairPaths to assemble:", 15);
printPairPaths(combinedReadHash, "PairPaths@BflyStart");
}
// paths that are constructed by tracing paths of reads through the graph
HashMap<SeqVertex,List<List<Integer>>> Paths = new HashMap<SeqVertex,List<List<Integer>>>();
// this holds reads of path until V + reads starting at V
HashMap<List<Integer>,HashMap<PairPath,Integer>> PathReads = new HashMap<List<Integer>,HashMap<PairPath,Integer>>();
HashMap<List<Integer>, HashSet<PairPath>> PathContainedReads = new HashMap<List<Integer>,HashSet<PairPath>>();
HashMap<List<Integer>,Boolean> Extensions = new HashMap<List<Integer>,Boolean>();
// final paths to be captured and reported.
HashMap<List<Integer>,Pair<Integer>> FinalPaths_diff = new HashMap<List<Integer>,Pair<Integer>>();
HashMap<List<Integer>,Pair<Integer>> FinalPaths_all = new HashMap<List<Integer>,Pair<Integer>>();
/*
ROOT.setDFS_FinishingTime(Integer.MAX_VALUE);
T_VERTEX.setDFS_FinishingTime(-1);
*/
ROOT.setDepth(-1);
T_VERTEX.setDepth(Integer.MAX_VALUE);
/*
SeqVertexFinishTimeComparator finishingTimeComparator = new SeqVertexFinishTimeComparator();
PriorityQueue<SeqVertex> C = new PriorityQueue<SeqVertex>(comp.size(),finishingTimeComparator);
*/
PriorityQueue<SeqVertex> BflyQueue = new PriorityQueue<SeqVertex>(comp.size(), new SeqVertexNodeDepthComparator());
// start the path listing out with a path containing the ROOT node only.
BflyQueue.add(ROOT);
List<Integer> tmpL = new ArrayList<Integer>();
tmpL.add(ROOT.getID());
ArrayList<List<Integer>> tmpPathList = new ArrayList<List<Integer>>();
tmpPathList.add(tmpL);
Paths.put(ROOT, tmpPathList);
SeqVertex v;
int total_num_nodes = comp.size();
// beginning path constructions
HashMap<Integer,Boolean> node_ID_visited = new HashMap<Integer,Boolean>();
int num_nodes = 0;
String Crep;
while (!BflyQueue.isEmpty())
{
/*
if (BFLY_GLOBALS.VERBOSE_LEVEL>=20)
{
Crep = "[";
for (SeqVertex vp : C)
Crep = Crep + "" +vp.getID()+":"+vp.getDFS_FinishingTime()+",";
Crep += "]";
debugMes("C = "+Crep,10);
}
*/
debugMes("QUEUE IS: " + BflyQueue, 12);
v = BflyQueue.poll();
if (v.getID() > 0) {
// if it has successors that haven't been visited yet, delay targeting it.
List<SeqVertex> delay_tackle_vertices = new ArrayList<SeqVertex>();
while ( (! BflyQueue.isEmpty()) && (! parents_all_visited(v, node_ID_visited, graph) ) ) {
debugMes("* delaying tackling vertex: " + v.getID() + " since a parent hasn't been visited yet.", 12);
delay_tackle_vertices.add(v);
v = BflyQueue.poll();
}
if (BflyQueue.isEmpty() && ! parents_all_visited(v, node_ID_visited, graph)) {
throw new RuntimeException("ERROR, queue ran out of nodes and current node has unvisited parents.");
}
if (! delay_tackle_vertices.isEmpty()) {
// add them back to the queue
BflyQueue.addAll(delay_tackle_vertices);
}
}
// track the nodes we visit, avoid looping by extending from a node encountered earlier. Loops should be handled long before here.
if (node_ID_visited.containsKey(v.getID())) {
debugMes("** already visited node in queue: " + v.getID(), 5);
continue;
}
else {
node_ID_visited.put(v.getID(), true);
}
debugMes("\n\n
num_nodes++;
float pct_done = (float) num_nodes / total_num_nodes * 100;
debugMes("\tbutterfly pct done: " + num_nodes + " / " + total_num_nodes + " = " + pct_done + "% pct done.", 5);
// get read paths that start at vertex V
HashMap<PairPath,Integer> readsStartingAtV = combinedReadHash.get(v.getID());
if (readsStartingAtV == null) {
debugMes("ReadsStartingAtV_START_BFLY" + v.getID() + " EMPTY", 15);
}
else {
for (PairPath read : readsStartingAtV.keySet()) {
debugMes("ReadsStartingAtV_START_BFLY, Node: " + v.getID() + " read: " + read, 15);
}
}
// prep data structures required.
// go over all paths of P[v], add all reads that start at v
debugMes("Exploring extension of: " + Paths.get(v).size() + " paths that end at vertex: " + v.getID(), 5);
//describe paths:
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15) {
debugMes("\n== Current Paths Constructed Up To Vertex: " + v.getID() + " :", 15);
for (List<Integer> path: Paths.get(v)) {
debugMes("PathPartialReconstruction@[" + v.getID() + "] : " + path, 15);
}
}
for (List<Integer> path : Paths.get(v))
{
if (!PathReads.containsKey(path))
PathReads.put(path, new HashMap<PairPath,Integer>()); // init
if (! PathContainedReads.containsKey(path))
PathContainedReads.put(path, new HashSet<PairPath>());
if (readsStartingAtV!=null && !readsStartingAtV.isEmpty())
{
debugMes("\nAdding the reads " +readsStartingAtV +" to the path "+ path, 17);
PathReads.get(path).putAll(readsStartingAtV);
/*
// verify:
for (PairPath pp : readsStartingAtV.keySet()) {
debugMes("VERIFYING: " + v.getID() + " ReadStartingAtV: " + pp + " = " + PathReads.get(path).get(pp), 10);
}
*/
// path that ends at V is associated with all reads that start at V
}
//keep track of all extensions
Extensions.put(path, false);
/*
for (PairPath read : PathReads.get(path).keySet()) {
debugMes("PATH: " + path + " initially stocked with read: " + read, 10);
}
*/
}
// Examine each path, try to extend by successor u
// go over all descendants of v
for (SeqVertex u : graph.getSuccessors(v))
{
debugMes("\n\n
+ "
+"\n
if (! (comp.contains(u) || u.equals(T_VERTEX))) {
debugMes("component either lacks: " + u.getID() + " or at sink", 12);
continue; // only examine successor vertices that are contained within this subcomponent
}
int path_counter = 0;
boolean vExtendedToU = false;
List<List<Integer>> paths_ending_at_v = new ArrayList<List<Integer>>(Paths.get(v));
debugMes("Count of paths ending at v: " + v.getID() + " = " + paths_ending_at_v.size(), 12);
// sort paths by pair-path support descendingly
PathReadSupportComparator local_pc = new PathReadSupportComparator(PathReads);
Collections.sort(paths_ending_at_v, local_pc);
Collections.reverse(paths_ending_at_v); // now descending according to read support.
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15) {
for (List<Integer> path : paths_ending_at_v)
{
debugMes("path_ending_at_v: " + path, 15);
}
}
// Examine each of the growing paths that end at vertex V for extension.
// extend V by U if:
// a. in TRIPLET LOCK mode and there exists a read-supported structure w-v-u
// b. extended triplet exists and the growing path is consistent with the
// complex read path consistent with [a.b.c]..w-v-u
// c. haven't reached the limit of number of paths artificially capped //FIXME: re-examine this.
// d. ALL POSSIBLE PATHS parameter set
// e. path has enough read support: look back the number of nodes before U in this growing path
// that yield at least the required path support sequence distance, and ensure read paths
// demonstrate compatibility/containment
for (List<Integer> path : paths_ending_at_v)
{
// remember, only looking at extensions that contain 'u' now.
debugMes("\n\n# [PathCounter(" + u.getID() + ")=" + path_counter + " Examining potential extension of path ending at node V: " + v.getID()
+ " by successor: " + u.getID()
+ ", via path=" + path, 15);
Boolean path_wvu_acceptable = true; // by default
Boolean extended_triplet_path_compatible = false;
if (path.size() >= 3) {
Integer w = path.get(path.size()-2); // create triplet w-v-u
if (tripletMapper.containsKey(v.getID())
&& tripletMapper.get(v.getID()).size() > 1) // at least partially resolved structure via read path
{
List<Integer> triplet = new ArrayList();
triplet.add(w); // left
triplet.add(v.getID()); // central
triplet.add(u.getID()); // right
List<List<Integer>> triplet_list = tripletMapper.get(v.getID());
if (tripletSupported(triplet_list, triplet)){
// Hurray, got triplet support
debugMes("Triplet Path: " + triplet + " *IS* supported by reads.", 15);
path_wvu_acceptable = true;
// do extended triplet search.
// path must be compatible with at least one of the complex prefix paths
// ensuring compatible with the larger path context that may extend beyond a triplet
List<Integer> pathWu = new ArrayList<Integer>(); // pathWu = path with u
pathWu.addAll(path);
pathWu.add(u.getID());
// extended triplet search
for (List<Integer> prefix_path : extendedTripletMapper.get(u.getID())) {
PairPath ppath = new PairPath(prefix_path);
if (ppath.isCompatibleAndContainedBySinglePath(pathWu)) {
debugMes("EXTENDED_TRIPLET_SEARCH: " + ppath + " compared to " + pathWu + " True", 15);
extended_triplet_path_compatible = true;
break;
}
else {
debugMes("EXTENDED_TRIPLET_SEARCH: " + ppath + " compared to " + pathWu + " False", 15);
}
}
}
else {
// lock down node, don't allow alternative structures not supported by reads here.
debugMes("Triplet Path: " + triplet + " is *NOT* supported by reads.", 15);
path_wvu_acceptable = false;
}
}
else {
debugMes("TripletMapper doesnt contain node: " + v.getID(), 15);
// if node v is at center of X-structure and there are no valid triplets, disable extension
if (FRACTURE_UNRESOLVED_XSTRUCTURE && xStructuresResolvedByTriplets.containsKey(v.getID())) {
debugMes("Node " + v.getID() + " is at center of X structure and no triplet support detected. FractureUnresolvedX set, so Disabling extension.", 10);
path_wvu_acceptable = false;
}
}
}
else {
debugMes("path " + path + " is too short to check for triplet support.", 15);
}
HashMap<PairPath,Integer> readsOfPathUntilV = PathReads.get(path); //this holds reads of path until V + reads starting at V
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15) {
debugMes("ReadsOfPathUntilV: PATH: " + path, 15);
for (PairPath pp : readsOfPathUntilV.keySet())
debugMes("ReadsOfPathUntiV: READ: " + pp, 15);
}
if (
path_wvu_acceptable // path_wvu only matters under triplet locking mode.
&&
// place restriction on the number of paths pursued here.
// if extended triplet compatible, be sure to explore it... dont want to lose a good path here.
(extended_triplet_path_compatible || path_counter <= MAX_NUM_PATHS_PER_NODE_EXTEND)
&&
(
ALL_POSSIBLE_PATHS
||
pathHasEnoughReadSupport(readsOfPathUntilV,path,u,graph,dijkstraDisWoVer)
||
u.getID() < 0 // a sink node, if path made it this far, sink can be added.
)
)
{
path_counter++;
// add [path,u] to paths of u. Each vertex contains all the paths that led up to it.
if (!Paths.containsKey(u))
Paths.put(u, new ArrayList<List<Integer>>());
List<Integer> pathWu = new ArrayList<Integer>(); // pathWu = path with u
pathWu.addAll(path);
pathWu.add(u.getID());
if (!Paths.get(u).contains(pathWu)){
debugMes("\nSuccessful extension of " + u.getID() + " to generate path " +pathWu, 15);
Paths.get(u).add(pathWu);
}
//update reads of [path,u] : includes all reads that are consistent with path Wu up to and including u
updateReadsOfPath(PathReads,PathContainedReads, pathWu,readsOfPathUntilV,u.getID(),graph,dijkstraDis);
//update extension
Extensions.put(path, true);
vExtendedToU = true;
}
else {
debugMes("No extension of path " + path + " by " + u, 15);
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15) {
boolean pathHasEnoughSupport = pathHasEnoughReadSupport(readsOfPathUntilV,path,u,graph,dijkstraDisWoVer);
debugMes("\tpath_counter = " + path_counter + ", path_wvu_acceptable=" + path_wvu_acceptable
+ " pathHasEnoughReadSupport=" + pathHasEnoughSupport, 15);
}
}
} // end of extending paths that end at V by U
if (!BflyQueue.contains(u))
{
debugMes(u.getID()+" was added to the queue",17);
BflyQueue.add(u);
}
//if v didn't extend to u, and we have an edge there, add (v,u) as a new path
if ( (!vExtendedToU) )
{
debugMes("the edge (v-u) was not used in any extension: "+v.getID()+"->"+u.getID(),15);
if (!Paths.containsKey(u))
Paths.put(u, new ArrayList<List<Integer>>());
List<Integer> vuPath = new ArrayList<Integer>();
vuPath.add(v.getID());
vuPath.add(u.getID());
Paths.get(u).add(vuPath);
//add the reads
if (!PathReads.containsKey(vuPath))
PathReads.put(vuPath, new HashMap<PairPath,Integer>());
if (readsStartingAtV!=null && !readsStartingAtV.isEmpty())
{
debugMes("adding the reads " +readsStartingAtV +" to the path "+ vuPath, 17);
PathReads.get(vuPath).putAll(readsStartingAtV);
updateReadsOfPath(PathReads, PathContainedReads, vuPath,readsStartingAtV,u.getID(),graph,dijkstraDis);
}
}
} // end of exploration of successors U of V
//report the paths that were not extended AND remove them from Paths
List<List<Integer>> removePaths = new ArrayList<List<Integer>>();
for (List<Integer> path : Paths.get(v))
{
SeqVertex lastV = getSeqVertex(graph, path.get(path.size()-1));
if (!lastV.equals(T_VERTEX) && Extensions.get(path)!=null && !Extensions.get(path))
{
if (getSeqPathLength(graph,path)>MIN_OUTPUT_SEQ)
{
FinalPaths_all.put(path,new Pair<Integer>(getSuppCalculation(PathReads.get(path)),0));
debugMes("the unextended path: "+path+" was added to the final paths, with "+getSuppCalculation(PathReads.get(path)) +" support",15);
}
removePaths.add(path);
}
}
for (List<Integer> path : removePaths)
{
debugMes("path "+ path +" wasnt extended and is captured accordingly.",15);
Paths.get(v).remove(path);
Extensions.remove(path);
}
}
// end of path constructions
for (List<Integer> path : Paths.get(T_VERTEX))
{
int pathSeqLen = getSeqPathLength(graph,path);
if (pathSeqLen>MIN_OUTPUT_SEQ)
{
// adding to path collection
FinalPaths_all.put(path,new Pair<Integer>(getSuppCalculation(PathReads.get(path)),0));
if (path.get(0).intValue() == ROOT.getID())
debugMes("the finished path: "+ path+" was added to the final paths, with "+getSuppCalculation(PathReads.get(path))+" support",15);
else
debugMes("the finished (from middle unextended) path: "+ path+" was added to the final paths, with "+getSuppCalculation(PathReads.get(path)) +" support",15);
}
else {
debugMes("sequence for path: " + path + " is too short: " + pathSeqLen, 15);
}
}
if (FinalPaths_all.size() > 1)
FinalPaths_all = remove_identical_subseqs(FinalPaths_all, graph, PathReads);
return new Pair<HashMap<List<Integer>, Pair<Integer>>>(FinalPaths_diff,FinalPaths_all);
}
private static boolean parents_all_visited(SeqVertex v,
HashMap<Integer, Boolean> node_ID_visited,
DirectedSparseGraph<SeqVertex, SimpleEdge> graph) {
for (SeqVertex pred : graph.getPredecessors(v)) {
if (pred.getID() > 0 && ! node_ID_visited.containsKey(pred.getID())) {
return(false);
}
}
return(true);
}
private static List<List<Integer>> remove_lesser_supported_paths_EM(
List<List<Integer>> all_paths,
HashMap<List<Integer>, HashMap<PairPath, Integer>> pathReads,
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
PathExpressionComparator pc,
HashMap<List<Integer>, Integer> separate_gene_ids) {
// determine max expr per gene
HashMap<Integer,Float> gene_to_max_expr = new HashMap<Integer,Float>();
HashMap<Integer,List<Integer>> gene_to_highest_expr_isoform = new HashMap<Integer,List<Integer>>();
for (List<Integer> path : all_paths) {
if (separate_gene_ids.containsKey(path)) {
Integer gene_id = separate_gene_ids.get(path);
float expr = pc.get_expr(path);
if (gene_to_max_expr.containsKey(gene_id)) {
if (gene_to_max_expr.get(gene_id) < expr) {
gene_to_max_expr.put(gene_id, expr);
gene_to_highest_expr_isoform.put(gene_id, path);
}
}
else {
gene_to_max_expr.put(gene_id, expr);
gene_to_highest_expr_isoform.put(gene_id, path);
}
}
}
List<List<Integer>> paths_to_keep = new ArrayList<List<Integer>>();
// retain only those genes that have at least x% expr of the dominant isoform.
for (List<Integer> path : all_paths) {
boolean keep = true;
if (separate_gene_ids.containsKey(path)) {
Integer gene_id = separate_gene_ids.get(path);
float expr = pc.get_expr(path);
float max_iso_expr = gene_to_max_expr.get(gene_id);
float pct_iso_expr = expr / max_iso_expr * 100;
if (path == gene_to_highest_expr_isoform.get(gene_id)) { // always retain highest expressed isoform.
// keep it.
debugMes("Keeping TOP isoform: " + path + " as having _highest_ expr=" + expr + " and "+ pct_iso_expr + "% dom. iso expr for gene.", 15);
}
else if (
expr * 100 >= MIN_TOTAL_ISOFORM_EXPRESSION
&&
pct_iso_expr >= MIN_RELATIVE_ISOFORM_EXPRESSION) {
// keep it.
debugMes("Keeping isoform: " + path + " as having expr=" + expr + " and "+ pct_iso_expr + "% dom. iso expr for gene.", 15);
}
else {
keep = false;
debugMes("*Excluding isoform: " + path + " as having expr=" + expr + " and " + pct_iso_expr + "% dom. iso expr for gene.", 15);
}
}
if (keep) {
paths_to_keep.add(path);
}
}
return(paths_to_keep);
}
private static List<List<Integer>> remove_lesser_supported_paths(
List<List<Integer>> paths_ending_at_v,
HashMap<List<Integer>, HashMap<PairPath, Integer>> pathReads) {
// note, should be already sorted by priority from high-to-low
debugMes("\n## Removing lesser-supported paths that end at V", 10);
HashMap<List<Integer>, List<PairPath>> path_to_compatible_reads = new HashMap<List<Integer>, List<PairPath>>();
// compute compatibility and containments
for (List<Integer> path : paths_ending_at_v) {
path_to_compatible_reads.put(path, new ArrayList<PairPath>());
for (PairPath pp : pathReads.get(path).keySet()) {
if (pp.isCompatible(path)) {
path_to_compatible_reads.get(path).add(pp);
}
}
}
List<List<Integer>> best_paths = new ArrayList<List<Integer>>();
// examine them hierarchically and see if lower supported paths continue to add any unique read content
HashSet<PairPath> all_PairPaths = new HashSet<PairPath>();
for (List<Integer> path : paths_ending_at_v) {
List<PairPath> compat_reads = path_to_compatible_reads.get(path);
int count_unique = 0;
for (PairPath pp : compat_reads) {
if (! all_PairPaths.contains(pp)) {
count_unique++;
all_PairPaths.add(pp);
}
}
debugMes("Unique contribution of pairpath: " + count_unique + " of total: " + compat_reads.size() + " from path: " + path, 10);
if (count_unique > 0)
best_paths.add(path);
else
debugMes("\tdiscarding path due to lack of unique read (pairpath) content: " + path, 10);
}
return(best_paths);
}
private static HashMap<List<Integer>, Pair<Integer>> verifyTripletSupportAcrossPaths(
HashMap<List<Integer>, Pair<Integer>> finalPaths_all,
HashMap<Integer, List<List<Integer>>> tripletMapper,
DirectedSparseGraph<SeqVertex, SimpleEdge> graph) {
HashMap<List<Integer>, Pair<Integer>> triplet_reinforced_paths = new HashMap<List<Integer>, Pair<Integer>>();
for (List<Integer> path : finalPaths_all.keySet()) {
if (path.size() < 3) {
continue;
}
debugMes("Verifying triplets for path: " + path, 10);
// iterate through triplets
ArrayList<Pair<Integer>> triplet_reinforced_regions = new ArrayList<Pair<Integer>>();
int begin = 0;
for (int i = 1; i < path.size()-1; i++) {
Integer central_id = path.get(i);
Integer left_id = path.get(i-1);
Integer right_id = path.get(i+1);
List<Integer> adjacency_path = new ArrayList<Integer>();
adjacency_path.add(left_id);
adjacency_path.add(central_id);
adjacency_path.add(right_id);
if (tripletMapper.containsKey(central_id)) {
debugMes("triplet adjacency_path of node: " + central_id + " => " + adjacency_path + "OK", 10);
}
else {
debugMes("triplet adjacency_path of node: " + central_id + " => " + adjacency_path + "*** MISSING ***", 10);
triplet_reinforced_regions.add(new Pair<Integer>(begin, i));
begin = i;
}
}
triplet_reinforced_regions.add(new Pair<Integer>(begin, path.size()-1));
for (Pair<Integer> subpath_range : triplet_reinforced_regions) {
Integer start_node_index = subpath_range.getFirst();
Integer stop_node_index = subpath_range.getSecond();
debugMes("Processing Triplet-Reinforced region: " + path.subList(start_node_index, stop_node_index + 1), 10);
// see if the start node looks like a hub
Integer start_node_id = path.get(start_node_index);
SeqVertex start_node = getSeqVertex(graph, path.get(start_node_index));
if (start_node_id >= 0 && graph.getSuccessorCount(start_node) > 1) {
start_node_index++;
}
Integer stop_node_id = path.get(stop_node_index);
SeqVertex stop_node = getSeqVertex(graph, stop_node_id);
if (stop_node_id >= 0 && graph.getPredecessorCount(stop_node) > 1) {
stop_node_index
}
if (start_node_index <= stop_node_index) {
List<Integer> refined_triplet_path = path.subList(start_node_index, stop_node_index+1);
debugMes("Refined triplet-reinforced path= " + refined_triplet_path, 10);
debugMes("Start node: " + refined_triplet_path.get(0) + " has successor count: " + graph.getSuccessorCount(getSeqVertex(graph, refined_triplet_path.get(0))), 10);
debugMes("End node: " + refined_triplet_path.get(refined_triplet_path.size()-1) + " has predecessor count: " + graph.getPredecessorCount(getSeqVertex(graph, refined_triplet_path.get(refined_triplet_path.size()-1))), 10);
refined_triplet_path = ensure_path_has_sinks(refined_triplet_path);
triplet_reinforced_paths.put(refined_triplet_path, new Pair<Integer>(1,1));
}
}
}
return(triplet_reinforced_paths);
}
/**
* given these paths, and reads, re-calc the FPKM of each path
* @param FinalPaths
* @param PathReads
*/
private static void illustrateFinalPaths(
HashMap<List<Integer>, Pair<Integer>> FinalPaths,
HashMap<List<Integer>, HashMap<PairPath, Integer>> PathReads) {
for (List<Integer> path : FinalPaths.keySet())
{
debugMes("\nPATH: " + path, 15);
Integer supp = 0;
Integer totalCounts = 0;
HashMap<PairPath, Integer> containedReads = PathReads.get(path);
String ascii_illustration = getPathMappingAsciiIllustration(path, containedReads);
debugMes("\nPath Illustration:\n\n" + ascii_illustration + "\n", 5);
//TODO: enable printing at lower verbose level, but note that crazy long paths cause serious performance problems for generating these illustrations... some fine-tuning definitely required there.
}
}
/**
* Go over all final paths, and combine those that are too similar.
* @param graph
* @param FinalPaths
* @param PathReads
* @param topOrderInts
*/
/**
* check for similar paths that end at V, and start at different nodes
* remove the shortest of the two
* @param graph
* @param v
* @param Paths
* @param PathReads
* @param Extensions
* @param topOrderInts
*/
/*
private static List<List<Integer>> combineSimilarPathsThatEndAtV(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph, SeqVertex v,
HashMap<SeqVertex, List<List<Integer>>> Paths,
HashMap<List<Integer>, HashMap<PairPath, Integer>> PathReads,
HashMap<List<Integer>, Boolean> Extensions) {
// new Throwable().printStackTrace();
int vertex_id = v.getID();
int total_num_paths = Paths.get(v).size();
debugMes("method: combineSimilarPathsThatEndAtV(" + vertex_id + ") with "+total_num_paths+ " paths", 10);
debugMes("paths are: "+Paths.get(v),17);
List<List<Integer>> removeSimilarPaths = new ArrayList<List<Integer>>();
List<Integer> removedPathsIndices = new ArrayList<Integer>();
String path1S="", path2S="";
Iterator<List<Integer>> i1, i2;
int index1, index2, rIndex;
int pathCount1 = 0;
int pathCount2 = 0;
if (total_num_paths<=1)
return null;
// all-vs-all comparison among the paths ending at v
for (i1=Paths.get(v).iterator() ; i1.hasNext() ; )
{
List<Integer> path1 = i1.next();
path1S = getPathSeq(graph, path1);
index1 = path1S.length()-1;
pathCount1++;
pathCount2 = 0;
if (removedPathsIndices.contains(pathCount1)) {
continue;
}
boolean gotToi1 = false;
for (i2=Paths.get(v).iterator() ; i2.hasNext() ; )
{
List<Integer> path2 = i2.next();
pathCount2++;
debugMes("\r*V[" + vertex_id + "] Comparing " + total_num_paths + " paths, pairs:(" + pathCount1 + "," + pathCount2 + ") ", 15);
while (!gotToi1 && i2.hasNext())
{
if (path2.equals(path1))
gotToi1 = true;
path2 = i2.next();
pathCount2++;
}
if (path2.equals(path1))
break;
// one of these paths were removed already
if (removedPathsIndices.contains(pathCount2)) {
continue;
}
path2S = getPathSeq(graph, path2);
index2 = path2S.length()-1;
debugMes("checking for similarity the two paths: "+path1+
"(len="+path1S.length()+");"+path2+"(len="+path2S.length()+")",15);
if (twoPathsAreTooSimilar(graph, path1, path2))
{
debugMes("they are too similar!",15);
//remove the shorter path
rIndex = removeTheLesserSupportedPath(path1S,path2S,path1,path2,removeSimilarPaths,PathReads);
if (rIndex == 1)// the first path was removed
removedPathsIndices.add(pathCount1);
else
removedPathsIndices.add(pathCount2);
}
}
}
for (List<Integer> path2Remove : removeSimilarPaths)
{
debugMes("The path "+path2Remove+" was removed because it was too close to another path",12);
Paths.get(v).remove(path2Remove);
Extensions.remove(path2Remove);
}
return(removeSimilarPaths);
}
*/
/**
* compare the sequences of the two paths, and return true if they are more than MIN_PERCENT_IDENTITY_SAME_PATH.
* @param path1s
* @param path2s
* @param topOrderInts
* @return
*/
private static boolean twoPathsAreTooSimilar(
DirectedSparseGraph<SeqVertex,
SimpleEdge> graph,
List<Integer> path1,
List<Integer> path2
) {
debugMes("\n\n****\n\nchecking twoPathsAreTooSimilar (" + path1 + "," + path2 + ")\n****\n\n", 15);
if (! PairPath.haveAnyNodeInCommon(path1, path2)) {
debugMes("paths: " + path1 + path2 + " have no node in common, cannot be too similar.", 15);
return(false); // if no node in common, then they shouldn't be too similar.
}
AlignmentStats numTotalMismatchesAndGaps = getPrevCalcNumMismatches(graph, path1, path2);
int shorterLen = Math.min(getSeqPathLength(graph,path1),getSeqPathLength(graph,path2));
float path_per_id = 100 - (float)numTotalMismatchesAndGaps.mismatches/shorterLen * 100;
boolean tooSimilar = isThisTooSimilar(numTotalMismatchesAndGaps.mismatches, numTotalMismatchesAndGaps.max_internal_gap_length, path_per_id);
DecimalFormat df = new DecimalFormat("
debugMes("\n\n====\nRunning PATH alignment of : " + path1 + " to " + path2 + " :: numMM:" + numTotalMismatchesAndGaps.mismatches
+ ", max_internal_gap: " + numTotalMismatchesAndGaps.max_internal_gap_length
+ ", path_per_id = " + df.format(path_per_id) + ", tooSimilar: " + tooSimilar, 15);
debugMes(numTotalMismatchesAndGaps.toString(), 18);
// compare to doing a full sequence alignment:
if (false) {
String path1_seq = getPathSeq(graph, path1);
String path2_seq = getPathSeq(graph, path2);
Alignment alignment;
if (SMITH_WATERMAN_ALIGN_FLAG) {
debugMes("-running Smith-Waterman alignment of path sequences", 15);
alignment = NWalign.run_SW_alignment("A", path1_seq, "B", path2_seq, 4, -5, 10, 1);
}
else {
// Needleman Wunsch Global Alignment is default
debugMes("-running Needleman-Wunsch alignment of path sequences", 15);
alignment = NWalign.run_NW_alignment("A", path1_seq, "B", path2_seq, 4, -5, 10, 1); //NW locks up or takes too long with very long sequences (eg. 40kb align to 6kb)
}
AlignmentStats a = new AlignmentStats(alignment);
debugMes("\n\n====\nSEQUENCE_ALIGNMENT_RESULTS:\n" + a.toString(), 15);
debugMes (new jaligner.formats.Pair().format(alignment), 15);
}
return(tooSimilar);
}
/**
* for p1, p2 find the latest nodes that they share (v2)
* by going backwards on the paths, while using the topological order of the nodes, and advancing while keeping them in order.
* @param graph
* @param path1
* @param path2
* @param topOrderInts
* @return
*/
private static Integer findLastSharedNode(DirectedSparseGraph<SeqVertex,SimpleEdge> graph,
List<Integer> path1,
List<Integer> path2) {
path1 = PairPath.trimSinkNodes(path1);
path2 = PairPath.trimSinkNodes(path2);
if (path1.isEmpty() || path2.isEmpty()) {
return(-1);
}
List<SeqVertex> reversePath1 = getReverseSeqVertexPath(graph,path1);
List<SeqVertex> reversePath2 = getReverseSeqVertexPath(graph,path2);
Iterator<SeqVertex> p1_iter = reversePath1.iterator();
Iterator<SeqVertex> p2_iter = reversePath2.iterator();
SeqVertex p1_v = p1_iter.next();
SeqVertex p2_v = p2_iter.next();
SeqVertexFinishTimeComparator finishingTimeComparator = new SeqVertexFinishTimeComparator();
while (p1_v != p2_v )
{
if (finishingTimeComparator.compare(p1_v,p2_v)>=0) {
if (p1_iter.hasNext())
p1_v = p1_iter.next();
else
break;
}
else if (p2_iter.hasNext())
p2_v = p2_iter.next();
else
break;
}
return (p1_v==p2_v)? p1_v.getID() : -1;
}
/**
* given the graph and a list of integers, return the reverse list of seqVertices
* @param graph
* @param path
* @return
*/
private static List<SeqVertex> getReverseSeqVertexPath(DirectedSparseGraph<SeqVertex,SimpleEdge> graph, List<Integer> path) {
List<SeqVertex> res = new ArrayList<SeqVertex>();
for (int i=path.size()-1; i>=0 ; i
res.add(getSeqVertex(graph, path.get(i)));
}
return res;
}
/**
* given the key of the two paths, return their number of matches.
* If this calculation hasn't been done before, calc and save it.
* @param key
* @return
*/
private static AlignmentStats getPrevCalcNumMismatches (
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
List<Integer> path1, List<Integer> path2) {
debugMes("getPrevCalcNumMismatches: Path1: " + path1 + " Path2: " + path2, 15);
// Not penalizing end gaps
boolean is_at_start_of_graph = ( ((! path1.isEmpty()) && path1.get(0) == -1) || ( (! path2.isEmpty()) && path2.get(0) == -1) );
boolean is_at_end_of_graph = ( ((! path1.isEmpty()) && path1.get(path1.size()-1) == -2) || ( (! path2.isEmpty()) && (path2.get(path2.size()-1) == -2)) );
String P1_s = path1+"";
String P2_s = path2+"";
Comparator<String> stringComp = String.CASE_INSENSITIVE_ORDER;
int compRes = stringComp.compare(P1_s, P2_s);
String key = (compRes>=0)? P1_s+";"+P2_s : P2_s+";"+P1_s;
if (NUM_MISMATCHES_HASH.containsKey(key)) {
AlignmentStats a = NUM_MISMATCHES_HASH.get(key);
// Already computed it, used cached value
debugMes("key: " + key + ", cached as: " + a.toString(), 15);
NUM_MISMATCHES_HASH.put(key, a);
return(a);
}
// both paths are effectively empty
else if ( (path1.isEmpty() || (path1.size() == 1 && path1.get(0) < 0))
&&
(path2.isEmpty() || (path2.size() == 1 && path2.get(0) < 0))
) {
AlignmentStats a = new AlignmentStats();
NUM_MISMATCHES_HASH.put(key, a);
return(a);
}
// paths are identical
else if (path1.equals(path2)) {
AlignmentStats a = new AlignmentStats();
// perfect matches, no gaps.
String path1s = getPathSeq(graph, path1);
a.matches = path1s.length();
a.alignment_length = path1s.length();
debugMes("paths are equivalent: Path1:" + path1 + ", Path2:" + path2 + " and have alignment stats:" + a.toString(), 15);
NUM_MISMATCHES_HASH.put(key, a); // cache results
return(a);
}
// empty path 1, but have path 2
else if (path1.isEmpty() || (path1.size() == 1 && path1.get(0) < 0)) {
AlignmentStats a = new AlignmentStats();
if (! (is_at_start_of_graph || is_at_end_of_graph)) {
Integer path2_seq_len = getPathSeq(graph, path2).length();
path2_seq_len -= KMER_SIZE - 1;
a.max_internal_gap_length = path2_seq_len;
a.gaps = path2_seq_len;
}
debugMes("empty path1 vs " + path2 + " = " + a.toString(), 15);
return(a);
}
// empty path 2, but have path 1
else if (path2.isEmpty() || (path2.size() == 1 && path2.get(0) < 0)) {
AlignmentStats a = new AlignmentStats();
if (! (is_at_start_of_graph || is_at_end_of_graph)) {
Integer path1_seq_len = getPathSeq(graph, path1).length();
path1_seq_len -= KMER_SIZE - 1;
a.max_internal_gap_length = path1_seq_len;
a.gaps = path1_seq_len;
}
debugMes("path1 : " + path1 + " vs empty path2 = " + a.toString(), 15);
return(a);
}
/*
else if (path1.get(path1.size() -1) == path2.get(path2.size() -1) ) {
// last elements are the same
debugMes("paths have same last node: Path1:" + path1 + ", Path2:" + path2, 15);
AlignmentStats nodeAlignStats = getPrevCalcNumMismatches(graph,
path1.subList(path1.size()-1, path1.size()),
path2.subList(path2.size()-1, path2.size()));
debugMes("Scores for last node comparison: " + path1 + path2 + nodeAlignStats.toString(), 15);
// get prefix alignment stats
List<Integer> subP1_list = path1.subList(0, path1.size()-1);
List<Integer> subP2_list = path2.subList(0, path2.size()-1);
AlignmentStats remainingAlignmentStats = getPrevCalcNumMismatches(graph, subP1_list, subP2_list);
debugMes("prefix alignment stats for: " + subP1_list + subP2_list + remainingAlignmentStats.toString(), 15);
remainingAlignmentStats = remainingAlignmentStats.increment_alignment_stats(nodeAlignStats);
debugMes("summing the alignment scores for : " + path1 + path2 + remainingAlignmentStats.toString(), 15);
NUM_MISMATCHES_HASH.put(key, remainingAlignmentStats);
return(remainingAlignmentStats);
}
*/
else {
// No empty path, and alignments haven't been scored/cached yet.
Integer lastSharedNode = findLastSharedNode(graph,path1,path2);
if (lastSharedNode!=-1) { // there is a shared node
// run sequence comparison on suffix part, recurse for prefix part if node in common.
debugMes("Paths " + path1 + path2 + " share node " + lastSharedNode, 15);
Integer p1V2index = path1.indexOf(lastSharedNode);
Integer p2V2index = path2.indexOf(lastSharedNode);
// get path up to but not including the shared node.
List<Integer> prefix_path_1 = path1.subList(0, p1V2index);
List<Integer> prefix_path_2 = path2.subList(0, p2V2index);
debugMes("getting prefix alignment for " + prefix_path_1 + prefix_path_2, 15);
// recurse to get the prefix alignment
AlignmentStats prefixStats = getPrevCalcNumMismatches(graph, prefix_path_1, prefix_path_2);
debugMes("path prefix alignment stats for: " + prefix_path_1 + " and " + prefix_path_2 + " : " + prefixStats.toString(), 15);
// get alignment for shared node pair
List<Integer> shared_node_path1 = path1.subList(p1V2index, p1V2index+1);
List<Integer> shared_node_path2 = path2.subList(p2V2index, p2V2index+1);
AlignmentStats sharedNodeStats = getPrevCalcNumMismatches(graph, shared_node_path1, shared_node_path2);
// add matches for the current alignment
List<Integer> suffix_path_1 = new ArrayList<Integer>();
List<Integer> suffix_path_2 = new ArrayList<Integer>();
if (p1V2index < path1.size()-1) {
suffix_path_1 = path1.subList(p1V2index+1,path1.size());
}
if (p2V2index < path2.size()-1) {
suffix_path_2 = path2.subList(p2V2index+1,path2.size());
}
debugMes("getting suffix alignment for: " + suffix_path_1 + suffix_path_2, 15);
AlignmentStats suffixStats = getPrevCalcNumMismatches(graph, suffix_path_1, suffix_path_2);
debugMes("suffix alignment stats: " + suffixStats, 15);
suffixStats = suffixStats.increment_alignment_stats(sharedNodeStats);
suffixStats = suffixStats.increment_alignment_stats(prefixStats);
debugMes("combining suffix and prefix alignment stats: " + suffixStats, 15);
NUM_MISMATCHES_HASH.put(key, suffixStats);
return(suffixStats);
}
else {
// no commonly shared node.
// do the actual alignment.
debugMes("-no shared node, alignment not cached, computing: " + path1 + " to " + path2, 12);
String path1s = getPathSeq(graph, path1);
String path2s = getPathSeq(graph, path2);
debugMes("-path1s length: " + path1s.length() + ", path2s length: " + path2s.length(), 12);
//TODO: If one path sequence is a substring of the other, no reason to do an alignment.
// this can be known based on the path list comparison, without needing to do a string comparison.
//align the two seqs
AlignmentStats stats;
if ( (path1s.length() > MAX_SEQ_LEN_DP_ALIGN && path2s.length() > MAX_SEQ_LEN_DP_ALIGN)
||
path1s.length() > 100000 // problems can arise in the alignment code if either seq is longer
||
path2s.length() > 100000
) {
// heuristic... if seqs are this long, it's highly unlikely they'd be too similar anyway
stats = ZipperAlignment.doZipperAlignment("A", path1s, "B", path2s);
}
// very short sequence in path1 or path2
else if ( (path1s.length() < 10 && path2s.length() > 20)
||
(path1s.length() > 20 && path2s.length() < 10) )
{
if (is_at_start_of_graph) {
stats = ZipperAlignment.doZipperAlignmentAnchorRight("A", path1s, "B", path2s);
}
else if (is_at_end_of_graph) {
stats = ZipperAlignment.doZipperAlignmentAnchorLeft("A", path1s, "B", path2s);
}
else {
// let ZipperAlignment try to figure it out
stats = ZipperAlignment.doZipperAlignment("A", path1s, "B", path2s);
}
}
else if (SMITH_WATERMAN_ALIGN_FLAG) {
debugMes("-running Smith-Waterman alignment of path sequences", 15);
Alignment alignment = NWalign.run_SW_alignment("A", path1s, "B", path2s, 4, -5, 10, 1);
debugMes (new jaligner.formats.Pair().format(alignment), 17);
stats = new AlignmentStats(alignment);
}
else {
// Needleman Wunsch Global Alignment is default
debugMes("-running Needleman-Wunsch alignment of path sequences", 15);
Alignment alignment = NWalign.run_NW_alignment("A", path1s, "B", path2s, 4, -5, 10, 1); //NW locks up or takes too long with very long sequences (eg. 40kb align to 6kb)
debugMes (new jaligner.formats.Pair().format(alignment), 17);
stats = new AlignmentStats(alignment);
}
int alignment_length = stats.alignment_length;
int matches = stats.matches;
int mismatches = stats.mismatches;
int gaps = stats.gaps;
int right_gap_len = stats.right_gap_length;
int left_gap_len = stats.left_gap_length;
int max_internal_gap_length = stats.max_internal_gap_length;
float percent_A_in_alignment = (float) stats.get_count_of_bases_in_aligned_region("A") / (path1s.length()) * 100;
float percent_B_in_alignment = (float) stats.get_count_of_bases_in_aligned_region("B") / (path2s.length()) * 100;
debugMes("Percent A in alignment = " + stats.get_count_of_bases_in_aligned_region("A") + " / " + path1s.length() + " = " + percent_A_in_alignment + "%",15);
debugMes("Percent B in alignment = " + stats.get_count_of_bases_in_aligned_region("B") + " / " + path2s.length() + " = " + percent_B_in_alignment + "%",15);
float max_percent_aligned = Math.max(percent_A_in_alignment, percent_B_in_alignment);
float percent_identity = (float)matches/(matches+mismatches) * 100;
float percent_gapped = (float)gaps/alignment_length * 100;
debugMes("Matches: " + matches + ", Mismatches: " + mismatches + ", gaps: " + gaps + ", align_len: " + alignment_length,15);
debugMes("percent_identity: " + percent_identity + ", percent_gapped: " + percent_gapped,15);
debugMes("max_percent_aligned: " + max_percent_aligned,15);
debugMes("max internal gap length: " + max_internal_gap_length + "\n",15);
int total_significant_diffs = 0;
if (is_at_start_of_graph || is_at_end_of_graph) {
total_significant_diffs = mismatches + gaps;
debugMes("(start of graph) Total number of significant alignment diffs = (mismatches: " + mismatches
+ " + internal_gaps: " + gaps
+ " + right_gap_len: "+ right_gap_len
+ " = " + total_significant_diffs, 15);
// the max internal gap length value based ignores the left gap length
if (is_at_start_of_graph) {
stats.left_gap_length = 0;
if (! is_at_end_of_graph) {
// deal with right-gap in alignment stats
stats.max_internal_gap_length = Math.max(stats.max_internal_gap_length, stats.right_gap_length);
total_significant_diffs += stats.right_gap_length;
stats.gaps += stats.right_gap_length;
}
}
if (is_at_end_of_graph) {
stats.right_gap_length = 0;
if (! is_at_start_of_graph) {
// deal with left-gap in alignment stats
stats.max_internal_gap_length = Math.max(stats.max_internal_gap_length, stats.left_gap_length);
total_significant_diffs += stats.left_gap_length;
stats.gaps += stats.left_gap_length;
}
}
}
else {
total_significant_diffs = mismatches + gaps + left_gap_len + right_gap_len; // all gaps count TODO: ignore right gap length if at end of graph
debugMes("(internal of graph) Total number of significant alignment diffs = (mismatches: " + mismatches
+ " + internal_gaps: " + gaps
+ " + left_gap_len: " + left_gap_len
+ " + right_gap_len: "+ right_gap_len
+ " = " + total_significant_diffs, 15);
// adjust max internal gap length value based on left or right gap lengths, since this is an internal node
stats.max_internal_gap_length = Math.max(stats.max_internal_gap_length, stats.left_gap_length);
stats.max_internal_gap_length = Math.max(stats.max_internal_gap_length, stats.right_gap_length);
}
stats.total_not_matched = total_significant_diffs; // update based on above.
debugMes("AlignmentStats: " + stats.toString(), 15);
NUM_MISMATCHES_HASH.put(key, stats);
return NUM_MISMATCHES_HASH.get(key);
}
}
}
/**
* given all the params, decide if the two seqs are too similar
* FIXME - find a better criteria.
* @param numMM - number of mismatches
* @param longestMMstretch
* @param shortestLen
* @return
*/
private static boolean isThisTooSimilar(int numMM, int max_internal_gap_length, float percent_identity) { // number of differences, not just mismatches, includes gaps
DecimalFormat df = new DecimalFormat("
boolean too_similar = ( max_internal_gap_length <= MAX_INTERNAL_GAP_SAME_PATH
&&
( numMM <= MAX_DIFFS_SAME_PATH || percent_identity >= MIN_PERCENT_IDENTITY_SAME_PATH));
debugMes("the two paths have these stats: numMM="+numMM
+ ", max_internal_gap_length=" + max_internal_gap_length
+ ", identity="+df.format(percent_identity)+"%"
+ ", tooSimilar: " + too_similar,15);
return (too_similar); // same as saying they are too similar... I just process the logic better in the terms of them not being too different.
}
/**
* given two paths (and their seqs) remove the shorter path, and add its reads to the other one.
* if the are equal in length, remove the lighter one.
* @param path1S
* @param path2S
* @param path1
* @param path2
* @param removeSimilarPaths
* @param PathReads
*/
private static int removeTheLesserSupportedPath(String path1S, String path2S,
List<Integer> path1, List<Integer> path2, List<List<Integer>> removeSimilarPaths,
HashMap<List<Integer>, HashMap<PairPath, Integer>> PathReads) {
List<Integer> path2remove,path2keep;
int sum1=0,sum2=0;
if (PathReads.get(path1)!=null)
for (Integer s : PathReads.get(path1).values())
sum1+=s;
if (PathReads.get(path2)!=null)
for (Integer s : PathReads.get(path2).values())
sum2+=s;
debugMes("Scoring paths based on read support. Path: " + path1 + " has " + sum1 + " read support, and " + path2 + " has " + sum2 + " read support.", 15);
if (sum1<sum2)
{
path2remove = path1;
path2keep = path2;
}
else if (sum1 > sum2)
{
path2remove = path2;
path2keep = path1;
}
else {
// same read support, so choose the longer sequence over the shorter one.
if (path1S.length() >= path2S.length())
{
path2remove = path2;
path2keep = path1;
}
else
{
path2remove = path1;
path2keep = path2;
}
}
debugMes("removing path "+path2remove+" and keeping path "+path2keep,15);
if (!removeSimilarPaths.contains(path2remove))
removeSimilarPaths.add(path2remove);
if (PathReads.get(path2remove)!=null)
{
if (PathReads.get(path2keep)==null)
PathReads.put(path2keep, new HashMap<PairPath,Integer>());
// no longer assuming ownership of the other's reads, as this causes problems!
//PathReads.get(path2keep).putAll(PathReads.get(path2remove));
PathReads.remove(path2remove);
}
return (path2remove==path1)? 1:2;
}
/**
* Given this path, ask whether it has enough support, either by last triplet, or by length
* @param readsOfPathUntilV - reads of this path, so far
* @param path - the path so far
* @param u - the extension to the path
* @param graph
* @param dijkstraDisWoVer
* @return
*/
private static boolean pathHasEnoughReadSupport(
HashMap<PairPath, Integer> readsOfPathUntilV,
List<Integer> path,
SeqVertex u,
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
DijkstraDistanceWoVer<SeqVertex, SimpleEdge> dijkstraDisWoVer) {
List<Integer> pathWU = new ArrayList<Integer>(path);
pathWU.add(u.getID());
List<Integer> subPath = new ArrayList<Integer>();
subPath.add(0, u.getID());
SeqVertex v = getSeqVertex(graph,path.get(path.size()-1));
if (LENIENT_PATH_CHECKING) {
// nodes u and v exist within a read pair path, and the read(s) are compatible with the this tentative path.
return(pathHasTerminalCompatibleReadSupport(path, v, u, graph, readsOfPathUntilV, dijkstraDisWoVer));
}
else if (USE_TRIPLETS) // never do it this way, option turned off permanently but retained for legacy sake.
{
subPath.add(0, v.getID());
if (path.size()>1)
subPath.add(0,path.get(path.size()-2));
return (subPathHasEnoughReadSupport(pathWU, readsOfPathUntilV, subPath, graph, dijkstraDisWoVer));
}
else{
// default method
int lookBack = PATH_REINFORCEMENT_DISTANCE;
int lenSoFar = u.getNameKmerAdj().length();
for (int j = path.size()-1 ; j>=0 && lenSoFar < lookBack; j
SeqVertex vLast = getSeqVertex(graph, path.get(j));
subPath.add(0, vLast.getID());
lenSoFar += vLast.getNameKmerAdj().length();
}
return (subPathHasEnoughReadSupport(pathWU, readsOfPathUntilV, subPath, graph, dijkstraDisWoVer));
}
}
/**
* Check that the given sub-path has N supporting reads or more.
* A supporting read is a read that enforces this triplet
* @param readsOfPathUntilV
* @param subPath
* @param graph
* @param dijkstraDisWoVer
* @return
*/
private static boolean subPathHasEnoughReadSupport(
List<Integer> fullPathWU,
HashMap<PairPath, Integer> readsOfPathUntilV,
List<Integer> subPath,
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
DijkstraDistanceWoVer<SeqVertex, SimpleEdge> dijkstraDisWoVer) {
int local_debug_level = 17;
debugMes("-checking if subPath has enough read support. Exploring sub path: " + subPath, local_debug_level);
// note, subpath contains U as final node.
Integer last_subpath_id = subPath.get(subPath.size()-1);
Integer first_subpath_id = subPath.get(0);
if (first_subpath_id < 0)
first_subpath_id = subPath.get(1); // dont want a sink node
int numberReadsSupporting = 0;
for (PairPath pPath : readsOfPathUntilV.keySet())
{
debugMes("-readsOfPathUntilV: " + pPath, local_debug_level);
boolean thisReadOK = true;
if (ORIGINAL_PATH_EXTENSIONS) {
// Examining within the context of the entire graph
for (Integer vTempID : subPath) {
if (thisReadOK)
thisReadOK = thisReadOK &&
readEnforcesVertex(graph, dijkstraDisWoVer, pPath, getSeqVertex(graph, vTempID));
}
}
else {
if (! (
pPath.containsID(last_subpath_id)
//&& (first_subpath_id == -1 || pPath.containsID(first_subpath_id) )) // triplet came in with a sink node attached, not a true triplet //FIXME: should check this earlier than this routine.
&& pPath.isCompatible(fullPathWU)
&& pPath.node_is_contained_or_possibly_in_gap(first_subpath_id, graph, dijkstraDisWoVer)
)
)
{
// require pPath contains the first and last ID of the subpath
thisReadOK = false;
}
debugMes("\t-checking if pp: " + pPath + " supports extension of " + fullPathWU + " => " + thisReadOK, local_debug_level);
/*
// COMPATIBLE_PATH_EXTENSIONS MODE, NOW THE DEFAULT
boolean subPathContained = pPath.containsSubPath(subPath);
boolean pathWUcompatible = pPath.isCompatible(fullPathWU);
debugMes("CPATEXT: subPath: " + subPath + " contained by read: " + pPath.get_paths() + " : " + subPathContained, local_debug_level);
debugMes("CPATEXT: pathWU: " + fullPathWU + " compatible with read: " + pPath.get_paths() + " : " + pathWUcompatible, local_debug_level);
thisReadOK = (subPathContained && pathWUcompatible);
*/
}
debugMes("examining subPath: " + subPath + " for reinforcement by read: " + pPath.get_paths() + " :" + thisReadOK, local_debug_level);
if (thisReadOK)
{
numberReadsSupporting+=readsOfPathUntilV.get(pPath);
debugMes("the read "+pPath+"("+readsOfPathUntilV.get(pPath)+") enforces the sub-path ("+subPath+")",local_debug_level);
if (numberReadsSupporting >= MIN_READ_SUPPORT_THR) {
break; // no reason to spend time searching more.
}
} else
debugMes("the read "+pPath+"("+readsOfPathUntilV.get(pPath)+") does not enforce the sub-path ("+subPath+")",local_debug_level);
}
debugMes("-found: " + numberReadsSupporting + " reads supporting subpath.", local_debug_level);
boolean res = (numberReadsSupporting>=MIN_READ_SUPPORT_THR);
if (res)
debugMes("the sub-path ("+subPath+") has PASSED", local_debug_level);
else
debugMes("the sub-path ("+subPath+") has NOT PASSED",local_debug_level);
return res;
}
/**
* Check whether there are at least N reads enforcing
* @param graph
* @param dijkstraDis
* @param pPath
* @param v
* @return
*/
private static boolean readEnforcesVertex(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
DijkstraDistanceWoVer<SeqVertex, SimpleEdge> dijkstraDisWoVer,
PairPath pPath, SeqVertex v) {
// read contains vertex v
if (v==null || pPath.containsID(v.getID()) || v.equals(ROOT) || v.equals(T_VERTEX))
return true;
// is path from the root (sink) to first vertex of the read disabled by removing vertex v of subpath?
SeqVertex firstV = getSeqVertex(graph, pPath.getFirstID());
if (dijkstraDisWoVer.getDistanceWoVer(ROOT, firstV,v)==null)
return true;
// is path from last vertex of the read to the terminal node (sink) disabled by removing vertex v of subpath?
SeqVertex lastV = getSeqVertex(graph, pPath.getLastID());
if (dijkstraDisWoVer.getDistanceWoVer(lastV, T_VERTEX,v)==null)
return true;
if (pPath.hasSecondPath())
{
// is path from beginning to end of read's pair-path disrupted by removing vertex v of subpath?
//last of first path
lastV = getSeqVertex(graph, pPath.getLastID_path1());
//first of second path
firstV = getSeqVertex(graph, pPath.getFirstID_path2());
if (dijkstraDisWoVer.getDistanceWoVer(lastV,firstV,v)==null)
return true;
}
return false;
}
private static boolean pathHasTerminalCompatibleReadSupport(
List<Integer> path,
SeqVertex v, SeqVertex u,
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
HashMap<PairPath, Integer> readsOfPathUntilV,
DijkstraDistanceWoVer<SeqVertex, SimpleEdge> dijkstraDisWoVer) {
List<Integer> tentativePath = new Vector<Integer>(path);
tentativePath.add(u.getID());
Integer v_id = v.getID();
Integer u_id = u.getID();
List<Integer> subPath = new ArrayList<Integer>();
subPath.add(v_id);
subPath.add(u_id);
int num_compatible_paths = 0;
for (PairPath pPath : readsOfPathUntilV.keySet()) {
//if (pPath.containsID(v_id) && pPath.containsID(u_id)) {
if (pPath.containsSubPath(subPath)) {
debugMes("Checking for compatibility. Path: " + tentativePath + " with " + pPath, 18);
// got both terminal path vertices. Check for read compatibility.
if (pPath.isCompatible(path)) {
debugMes("\tPaths ARE compatible.", 18);
num_compatible_paths += readsOfPathUntilV.get(pPath);
}
}
}
debugMes("\t" + num_compatible_paths + " read (pair) paths were found to be compatible.", 18);
if (num_compatible_paths >= MIN_READ_SUPPORT_THR) { // note, not using this as triplet support here.
//TODO: rename triplet support var
return(true);
}
else {
return(false);
}
}
private static boolean vertexPairHasDiscontinuousPathSupport(
List<Integer> path,
SeqVertex v, SeqVertex u,
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
HashMap<PairPath, Integer> readsOfPathUntilV,
DijkstraDistanceWoVer<SeqVertex, SimpleEdge> dijkstraDisWoVer) {
debugMes("\n\nCurrent path being checked for LENIENT path extension: " + path, 18);
debugMes("Performing LENIENT path checking between (v,u):\nv: " + v + "\nu: " + u, 18);
// look for u-v where u is last node of one pairpath, and v-u is the start of another path
// find a pairpath that ends in v
// find another pairpath that starts with v-u
boolean last_vertex_found_as_v = false;
boolean first_vertices_found_as_vu = false;
for (PairPath pPath : readsOfPathUntilV.keySet()) {
debugMes("\t-pairPath: " + pPath, 18);
SeqVertex last_vertex = getSeqVertex(graph, pPath.getLastID());
debugMes("\t-Last vertex: " + last_vertex.getID(), 18);
if (last_vertex.equals(v)) {
last_vertex_found_as_v = true;
debugMes("\t\t-found last vertex as (v)", 18);
}
List<Integer> first_path = pPath.getPath1();
if (first_path.size() > 1) {
SeqVertex first_vertex = getSeqVertex(graph, first_path.get(0));
SeqVertex second_vertex = getSeqVertex(graph, first_path.get(1));
debugMes("\t-First,Second: " + first_vertex.getID() + "," + second_vertex.getID(), 18);
if (first_vertex.equals(v) && second_vertex.equals(u)) {
first_vertices_found_as_vu = true;
debugMes("\t\t-found first vertices as (vu)", 18);
}
}
if (first_vertices_found_as_vu && last_vertex_found_as_v) {
debugMes("\t* FOUND LENIENT EXTENSION", 18);
return(true);
}
}
debugMes("\t* no LENIENT extension possible", 18);
return(false); // no evidence for discontinous support.
}
/**
* Check whether the pairPath is consistent with the node i
* @param pPath
* @param i
* @param graph
* @param dijkstraDis
* @return
*/
private static boolean readIsConsistentWithNode(PairPath pPath, Integer i,
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
DijkstraDistance<SeqVertex, SimpleEdge> dijkstraDis) {
// if (isReadCircular(graph, pPath))
// return false;
if (pPath.containsID(i) || i<0)
return true;
SeqVertex vI = getSeqVertex(graph, i);
SeqVertex firstV = getSeqVertex(graph, pPath.getFirstID());
// i --> firstV
if (SeqVertex.isAncestral(vI, firstV, dijkstraDis)>0)
return true;
SeqVertex lastV = getSeqVertex(graph, pPath.getLastID());
// lastV --> i
if (SeqVertex.isAncestral(lastV,vI,dijkstraDis)>0)
return true;
if (pPath.hasSecondPath())
{
// see if node could be internal to the pair path
//last of first path
lastV = getSeqVertex(graph, pPath.getLastID_path1());
//first of second path
firstV = getSeqVertex(graph, pPath.getFirstID_path2());
// lastV --> i --> firstV
if (SeqVertex.isAncestral(lastV,vI,dijkstraDis)>0 && SeqVertex.isAncestral(vI, firstV, dijkstraDis)>0)
return true;
}
// not compatible if got here.
return false;
}
/**
* given the graph and a list of nodes, calc the length of the seq of this path
* @param graph
* @param path
* @return
*/
private static int getSeqPathLength(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph, List<Integer> path) {
/*
int len = 0;
for (Integer vid : path)
if (vid>=0)
len +=getSeqVertex(graph, vid).getNameKmerAdj().length();
return len;
*/
String pathSeq = getPathSeq(graph, path);
return(pathSeq.length());
}
/**
* return the number of paths
* @param paths
* @return number of paths
*/
private static int getPathsSize(
HashMap<SeqVertex, List<List<Integer>>> paths) {
int res = 0;
for (SeqVertex key : paths.keySet())
{
res+=paths.get(key).size();
}
return res;
}
/**
* returns true iff these two nucleotides are equal
* @param n1
* @param n2
* @return
*/
private static boolean areTwoNucleotidesEqual(String n1, String n2)
{
if (n1.equals(n2))
return true;
if (USE_DEGENERATE_CODE &&
((DEGENERATE_CODE_REV.containsKey(n1) && DEGENERATE_CODE_REV.get(n1).contains(n2)) ||
(DEGENERATE_CODE_REV.containsKey(n2) && DEGENERATE_CODE_REV.get(n2).contains(n1))))
return true;
return false;
}
/**
* return the degenerate code representation of the given key
* @param key
* @return
* @throws Exception
*/
private static String getDegenerateRepresentation(String key) throws Exception {
if (DEGENERATE_CODE.containsKey(key))
return DEGENERATE_CODE.get(key);
else
throw new Exception("the letters "+key+" do not have a degenerate representation\n");
}
/**
* sum the counts of all the reads in this hash
* @param readHash
* @return
*/
private static Integer getSuppCalculation(HashMap<PairPath, Integer> readHash) {
Integer res = 0;
for (PairPath key : readHash.keySet())
res = res + readHash.get(key);
return res;
}
/**
* Given the new path (with u), and the set of reads that supported the path until v
* update the set of reads that support the new path
* @param PathReads
* @param pathContainedReads
* @param pathWu
* @param readsOfPathUntilV
* @param i
* @param dijkstraDis
* @param graph
*/
private static void updateReadsOfPath(HashMap<List<Integer>,HashMap<PairPath,Integer>> PathReads,
HashMap<List<Integer>, HashSet<PairPath>> PathContainedReads, List<Integer> pathWu,
HashMap<PairPath, Integer> readsOfPathUntilV, Integer i, DirectedSparseGraph<SeqVertex, SimpleEdge> graph, DijkstraDistance<SeqVertex, SimpleEdge> dijkstraDis) {
debugMes("updateReadsOfPath: " + pathWu, 17);
List<Integer> pathMinusU = new ArrayList<Integer>(pathWu);
pathMinusU.remove(pathMinusU.size()-1); // remove the U o fthe pathWu
// init read path list as needed.
if (!PathReads.containsKey(pathWu))
PathReads.put(pathWu, new HashMap<PairPath,Integer>());
if (! PathContainedReads.containsKey(pathWu)) {
PathContainedReads.put(pathWu, new HashSet<PairPath>());
}
int count_total = 0;
int count_contained_propagated = 0;
for (PairPath pPath : readsOfPathUntilV.keySet())
{
count_total++;
if (!PathReads.get(pathWu).containsKey(pPath)) { // only if this read doesn't exist in the PathReads for this pathWu
// if this read is consistent with pathWu, then add it
//if (readIsConsistentWithNode(pPath,i,graph,dijkstraDis))
if (PathContainedReads.get(pathMinusU).contains(pPath)) {
// then PathWU must contain it as well.
PathContainedReads.get(pathWu).add(pPath);
PathReads.get(pathWu).put(pPath,readsOfPathUntilV.get(pPath));
//debugMes("path is contained by pathMinusU: " + pPath, 10);
count_contained_propagated++;
}
else {
if (pPath.isCompatible(pathWu)) { // semi-expensive operation
if (pPath.isCompatibleAndContainedBySinglePath(pathWu)) {
PathContainedReads.get(pathWu).add(pPath);
}
debugMes("read "+pPath+" is consistent with "+i, 17);
PathReads.get(pathWu).put(pPath,readsOfPathUntilV.get(pPath));
}else{
debugMes("read "+pPath+" is not consistent with "+i, 17);
}
}
}
}
float pct_contained_propagated = (float) count_contained_propagated/count_total*100;
debugMes("pct_contained_propagated: " + pct_contained_propagated + "%", 10);
}
// /**
// * return true iff this read is circular.
// * A read is considered circular if its gap includes a circle
// * (the vertex at the end of path1 is inside a circle or the first vertex of path 2 is inside a circle).
// * @param graph
// * @param readPath
// * @return
// */
// private static boolean isReadCircular(DirectedSparseGraph<SeqVertex, SimpleEdge> graph, PairPath readPath)
// if (!readPath.hasSecondPath())
// return false;
// if (getSeqVertex(graph, readPath.getLastID()).isInCircle() || //lastID of first path is circular
// getSeqVertex(graph, readPath.getFirstID_path2()).isInCircle()) // firstID of second path is circular
// debugMes("the read "+readPath+" is circular",10);
// return true;
// } else
// return false;
/**
* Return the reads, hashed by their starting vertex
* @param graph
* @param filename
* @param originalVerIDsMapping
* @param rootIDs
* @param originalGraphKmerToNodeID
* @return
* @throws IOException
*/
private static HashMap<String, List<Read>> getReadStarts(DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
String filename,
HashMap<Integer, LocInGraph> originalVerIDsMapping,
Vector<Integer> rootIDs,
HashMap<String, Integer> originalGraphKmerToNodeID)
throws IOException {
BufferedReader fileB = new BufferedReader(new FileReader(filename));
HashMap<String, List<Read>> readNameHash = new HashMap<String, List<Read>>();
String l = fileB.readLine(); // read header of component
int numReadsNotMapped = 0;
int numReadsMapped = 0;
int line_counter = 0;
while (fileB.ready())
{
l = fileB.readLine();
if (l.isEmpty())
continue;
line_counter++;
if (line_counter % 1000 == 0 && BFLY_GLOBALS.VERBOSE_LEVEL >= 10)
System.err.print("\rmapped read [" + line_counter + "]");
else if (BFLY_GLOBALS.VERBOSE_LEVEL >= 11) {
System.err.print("\rmapped read [" + line_counter + "]");
}
// Component 0
// >@42MRYAAXX100104:7:100:1000:103#0 11 101393 36 101418 GAAAGACTGTCACCCTTGAGGTGGAGTCCTCTGACACTATTGACAATGTCAAGAGCAAAATCCAAGACAAGGAAGG
debugMes("Read: " + l, 20);
String[] fields = l.split("\t");
fields[0] = fields[0].replaceFirst(">", "");
List<Integer> pathIDS = null;
Read r = new Read();
pathIDS = readAndMapSingleRead(fields,originalVerIDsMapping,graph,r,false,originalGraphKmerToNodeID);
//debugMes("Threaded Read As: " + r.getName() + " : " + pathIDS, 19);
if (pathIDS==null || (pathIDS!=null && pathIDS.isEmpty()))
{
numReadsNotMapped++;
debugMes("Read could not be threaded: " + r.getName(), 12);
}else
{
//add to readNameHash
if (!readNameHash.containsKey(r.getName()))
readNameHash.put(r.getName(), new ArrayList<Read>());
readNameHash.get(r.getName()).add(r);
numReadsMapped++;
//System.err.println(r.getName());
debugMes("Threaded Read as: " + r.getName() + " : " + pathIDS, 17);
debugMes("ReadPath@Init: " + r.getName() + " : " + pathIDS, 12);
}
}
// debugMes("number of reads not found in graph = "+numReadsNotMapped +" of a total of "+(numReadsNotMapped+numReadsMapped),10);
debugMes("number of reads threaded = "+numReadsMapped
+" (from total of "+(numReadsNotMapped+numReadsMapped)+") which came from "
+ readNameHash.keySet().size() + " pairs",10);
if (numReadsNotMapped > .5*(numReadsNotMapped+numReadsMapped))
debugMes("PROBLEM: less than half of the reads were mapped to this graph ("
+numReadsMapped+"/"+(numReadsNotMapped+numReadsMapped)+")",10);
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 18) {
for (String readName : readNameHash.keySet()) {
String descr = "Read name to pairing info: " + readName + " => ";
List<Read> read_list = readNameHash.get(readName);
for (Read r : read_list) {
descr += r.getPathIDs();
}
debugMes(descr, 15);
}
}
return readNameHash;
}
/**
* given this read, try and map it to the graph. if rev= true, do it in reverse.
* @param fields
* @param originalVerIDsMapping
* @param graph
* @param r
* @param rev
* @param originalGraphKmerToNodeID
* @return
*/
private static List<Integer> readAndMapSingleRead(String[] fields,
HashMap<Integer, LocInGraph> originalVerIDsMapping,
DirectedSparseGraph<SeqVertex, SimpleEdge> graph, Read r, boolean rev,
HashMap<String, Integer> originalGraphKmerToNodeID) {
List<Integer> pathIDS = new ArrayList<Integer>();
LocInGraph fromV;
Integer startInRead,endInRead,fromOrigV;
String name;
String seq;
name = fields[0];
if (! TREAT_PAIRS_AS_SINGLE) {
if (name.endsWith("/1") || name.endsWith("/2")
|| name.endsWith("\1") || name.endsWith("\2")
|| name.endsWith(":1") || name.endsWith(":2")
)
name = name.substring(0, name.length()-2);
}
startInRead = Integer.parseInt(fields[1]);
//endInRead = Integer.parseInt(fields[3])+KMER_SIZE-1;
endInRead = Integer.parseInt(fields[3])+KMER_SIZE; //FIXME: should be as above, but chrysalis appears to be consistently off by one here...
fromOrigV = Integer.parseInt(fields[2]);
fromV = originalVerIDsMapping.get(fromOrigV);
seq = fields[6]; //there is an empty field before the seq.
r.init(name,seq, fromV, startInRead, endInRead, pathIDS);
if (endInRead >= seq.length()) {
debugMes("read " + name + " has sequence length that is shorter than supposed endInRead marking(" + endInRead + "): " + seq, 0);
return pathIDS;
}
debugMes("Read: " + name + " has start: " + startInRead + ", end: " + endInRead + " and sequence: " + seq, 20);
seq = seq.substring(startInRead, endInRead+1);
debugMes("after extracting substring: " + seq, 20);
// in case original node ID was trimmed from graph, try anchoring the sequence from the first
// recognizable retained node ID
if (fromV == null) {
debugMes("Original node ID : " + fromOrigV + " no longer exists ... walking the sequence to try to anchor it to the refined graph:", 20);
for (int i = 1 + 1; i <= seq.length() - KMER_SIZE; i++) {
String kmer = seq.substring(i, i+KMER_SIZE);
if (originalGraphKmerToNodeID.containsKey(kmer)) {
int ID = originalGraphKmerToNodeID.get(kmer);
fromV = originalVerIDsMapping.get(ID);
if (fromV != null) {
debugMes("Anchored read to graph at position " + (i + startInRead) + " with kmer " + kmer, 20);
seq = seq.substring(i);
break;
}
}
}
if (fromV != null) {
debugMes("recovered mapping of read " + name, 20);
}
else {
debugMes("couldn't recover mapping of read: " + name, 20);
}
}
if (fromV!=null)// && toV!=null)
{
Path_n_MM_count best_path = findPathInGraph(graph,seq,fromV,name);
if (best_path != null) {
pathIDS = best_path.path;
//System.err.println("read path: " + pathIDS);
if (READ_END_PATH_TRIM_LENGTH > 0) {
// do some read path trimming at beginning and end of path if little support
pathIDS = best_path.get_trimmed_path(READ_END_PATH_TRIM_LENGTH);
}
if (! pathIDS.isEmpty()) {
r.init(name,seq, fromV, startInRead, endInRead, pathIDS);
String decorator = (pathIDS.size() < best_path.path.size()) ? " ****** " : "";
debugMes("Read " + name + " seq " + seq + " threaded as: " + best_path.toString() + ", trimmed to: " + pathIDS + decorator, 17);
}
else {
debugMes("Trimmed path for read: " + name + " threaded as: " + best_path.toString() + " is empty", 19);
}
String pathSeq = getPathSeq(graph, pathIDS);
if (false) { // for debugging
if (pathSeq.indexOf(seq) < 0) {
throw new RuntimeException("Error, read seq: " + seq + "\n does not match threaded seq:\n" + pathSeq);
}
else {
debugMes("Read seq with len=" + seq.length() + " : " + seq
+ " matches to path seq with len=" + pathSeq.length() + " : " + pathSeq, 15);
}
}
}
}else
debugMes("read "+name+" was not mapped to graph. original node doesn't exist anymore ("+fromOrigV+")",19);
return pathIDS;
}
/**
* Given the graph, and the read, find the path of the read in the graph
* @param graph
* @param seq
* @param fromV
* @return
*/
private static Path_n_MM_count findPathInGraph(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph, String seq,
LocInGraph fromV, String readName) {
int local_verbose_level = BFLY_GLOBALS.VERBOSE_LEVEL;
if (readName.startsWith("LR$") && BFLY_GLOBALS.VERBOSE_LEVEL >= 15) {
BFLY_GLOBALS.VERBOSE_LEVEL = 20;
}
List<Integer> path = new ArrayList<Integer>();
SeqVertex fromVer = getSeqVertex(graph, fromV.getNodeID());
debugMes("findPathInGraph: V:" + fromV.getNodeID() + ", with seq: " + fromVer.getName(), 20);
debugMes("trying to start the mapping to node "+fromVer.getID() + " at position: " + fromV.getIndexInNode(), 20);
MAX_MM_ALLOWED_CAP = (int) Math.ceil(seq.length() * MAX_READ_SEQ_DIVERGENCE);
MAX_MM_ALLOWED = MAX_MM_ALLOWED_CAP;
debugMes("\n\nThreading read: " + readName + ", length: " + seq.length()
+ ", allowing for " + MAX_MM_ALLOWED + " max mismatches.", 17);
debugMes("Read: " + readName + " sequence is:\n" + seq, 20);
Integer totalNumMM = 0;
HashMap<String,Path_n_MM_count> best_path_memoization = new HashMap<String,Path_n_MM_count> (); // use DP
Path_n_MM_count best_path_mapping = updatePathRecursively(graph,fromVer.getID(),seq,0, fromV.getIndexInNode(),
totalNumMM, readName, best_path_memoization);
BFLY_GLOBALS.VERBOSE_LEVEL = local_verbose_level;
if (best_path_mapping != null) {
debugMes("FINAL BEST PATH for " + readName + " is " + best_path_mapping.path + " with total mm: " + best_path_mapping.mismatch_count, 15);
return(best_path_mapping);
}
else {
debugMes("NO_READ_MAPPING_FOUND_FOR: " + readName + "\n\n", 15);
return(null); // no such path found.
}
}
/**
* Update the given path recursively
* @param path
* @param graph
* @param fromVers
* @param seq
* @param locInNode
* @param totalNumMM
* @param readName
* @param best_path_memoization
*/
private static Path_n_MM_count updatePathRecursively(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
Integer fromV_id, String seq, int locInSeq, int locInNode,
Integer totalNumMM,String readName, HashMap<String, Path_n_MM_count> best_path_memoization) {
int MIN_SEQ_LENGTH_TEST_DIVERGENCE = 20;
int MAX_LEFT_END_GAPS = 5;
SeqVertex fromV = getSeqVertex(graph, fromV_id);
Integer numMM = totalNumMM; // init for each node check
String verSeq = fromV.getName(); //important - full name, not kmer-adjusted name.
debugMes("updatePathRecursively(readName=" + readName +
", locInSeq: " + locInSeq + " / " + (seq.length() -1) +
", locInNode: " + locInNode + " / " + (verSeq.length() -1) +
", totalNumMm: " + totalNumMM, 20);
int startI = locInNode;
int j=locInSeq, i = startI;
String read_vertex_start_pos_token = "" + fromV.getID() + "_" + locInNode + "_" + locInSeq;
if (best_path_memoization.containsKey(read_vertex_start_pos_token)) {
Path_n_MM_count best_path = best_path_memoization.get(read_vertex_start_pos_token);
if (best_path == null) {
debugMes("MEMOIZATION: indicates this path was a dead end. Not trying again.", 20);
return(null);
}
else {
debugMes("MEMOIZATION: already stored best path at: " + read_vertex_start_pos_token + " = " + best_path, 20);
// return a copy, critically important!!!
Path_n_MM_count best_path_copy = new Path_n_MM_count(best_path);
return(best_path_copy);
}
}
debugMes("\ntrying to continue the mapping to node "+ fromV.getShortSeqWID(), 19);
int length_to_align = Math.min(verSeq.length() - i, seq.length() - j);
debugMes("-ALIGNING READ SEQ (" + readName + ")\n" + seq.substring(j, j+length_to_align) + " " + j
+ "\nTo VERTEX (" + fromV.getShortSeqWID() + ") SEQ:\n" + verSeq.substring(i, i+length_to_align) + " " + i, 20);
debugMes("Note, rest of read sequence is:\n" + seq.substring(j), 21);
// zipper align
boolean failed_alignment = false;
Integer mm_encountered_here = 0;
for (; i>=0 && i<verSeq.length() && j<seq.length() ; i++,j++)
{
String readLetter = ""+seq.charAt(j);
String verLetter = ""+verSeq.charAt(i);
String mismatchFlag = (areTwoNucleotidesEqual(readLetter,verLetter)) ? "" : "XXX mismatch XXX";
debugMes("Comparing read bases: " + i + ":" + readLetter + ", " + j + ":" + verLetter + " " + mismatchFlag, 21);
if (!areTwoNucleotidesEqual(readLetter,verLetter))
{
//we have a mismatch
numMM++;
mm_encountered_here++;
if ( (numMM > MAX_MM_ALLOWED)
||
(i >= MIN_SEQ_LENGTH_TEST_DIVERGENCE && (mm_encountered_here/(float)(i)) > MAX_READ_LOCAL_SEQ_DIVERGENCE)
)
{
failed_alignment = true;
debugMes("shortcircuiting the zipper test, too many MM or execeeding local seq divergence", 20);
break; // no point in looking further.
}
//TODO: look at mismatch density here as well.
}
} // end of mapping read within node
if (! failed_alignment) {
debugMes("zipper alignment mm: " + mm_encountered_here, 20);
}
// retain zipper info in case it's better than any DP alignment score
int zipper_i = i;
int zipper_j = j;
int zipper_mm = mm_encountered_here;
// use DP alignment if variation is encountered above. (trying simplest/fastest strategy first)
boolean short_DP_test_passes = true;
int MIN_LENGTH_TEST_DP = 100;
if (USE_DP_READ_TO_VERTEX_ALIGN && length_to_align > MIN_LENGTH_TEST_DP && mm_encountered_here > 1) {
debugMes("Running short DP test", 20);
j=locInSeq;
i = startI;
Alignment alignment = NWalign.run_NW_alignment(
"Vertex", verSeq.substring(i, i+MIN_LENGTH_TEST_DP),
"Read", seq.substring(j, j+MIN_LENGTH_TEST_DP),
4, -5, 10, 1);
debugMes ("DP test:\n" + new jaligner.formats.Pair().format(alignment), 17);
AlignmentStats stats = new AlignmentStats(alignment);
mm_encountered_here = stats.mismatches + stats.gaps + stats.left_gap_length;
float pct_divergence = mm_encountered_here/(float)(MIN_LENGTH_TEST_DP);
if ( pct_divergence > MAX_READ_LOCAL_SEQ_DIVERGENCE) {
debugMes("DP test indicates excessive divergence: " + pct_divergence, 20);
short_DP_test_passes = false;
// leave failed alignment status as is.
}
// retain earlier zipper stats, regardless of whether or not we go into full DP below.
i = zipper_i;
j = zipper_j;
mm_encountered_here = zipper_mm;
}
int vertex_num_right_end_gaps = 0;
int read_num_right_end_gaps = 0;
int max_left_gaps = 0;
if (USE_DP_READ_TO_VERTEX_ALIGN && verSeq.length() > 2 && mm_encountered_here > 1 && short_DP_test_passes) {
debugMes(" *Trying again using full DP alignment:", 20);
// reset i and j
j=locInSeq;
i = startI;
// try aligning the full vertex sequence w/ extended ref sequence in case it contains small deletions.
// Needleman Wunsch Global Alignment is default
debugMes("-running Needleman-Wunsch alignment of vertex to read", 17);
/*
Alignment alignment = NWalign.run_NW_alignment(
"Vertex", verSeq.substring(i, i+length_to_align),
"Read", seq.substring(j, j+length_to_align),
4, -5, 10, 1); //NW locks up or takes too long with very long sequences (eg. 40kb align to 6kb)
*/
int read_length_to_align = (int) (verSeq.length() * 1.05f);
if (read_length_to_align + j > seq.length()) {
read_length_to_align = seq.length() - j;
}
int bandwidth = (int) (MAX_READ_LOCAL_SEQ_DIVERGENCE * read_length_to_align);
Alignment alignment = NWalign.run_NW_banded_alignment(
"Vertex", verSeq.substring(i),
"Read", seq.substring(j, j+read_length_to_align),
4, -5, 10, 1, bandwidth);
debugMes (new jaligner.formats.Pair().format(alignment), 17);
AlignmentStats stats = new AlignmentStats(alignment);
mm_encountered_here = stats.mismatches + stats.gaps + stats.left_gap_length + stats.right_gap_length;
// check for right end gap in sequence
String name1 = alignment.getName1();
char[] vertex_align = alignment.getSequence1();
char[] read_align = alignment.getSequence2();
if (name1.equals("Read")) {
char[] swap = vertex_align;
vertex_align = read_align;
read_align = swap;
}
vertex_num_right_end_gaps = AlignmentStats.get_num_right_end_gaps(vertex_align);
read_num_right_end_gaps = AlignmentStats.get_num_right_end_gaps(read_align);
max_left_gaps = Math.max(AlignmentStats.get_num_left_end_gaps(vertex_align),
AlignmentStats.get_num_left_end_gaps(read_align));
debugMes("vertex end gaps: " + vertex_num_right_end_gaps, 20);
debugMes("read end gaps: " + read_num_right_end_gaps, 20);
i = verSeq.length(); // aligning to whole vertex sequence.
j += read_length_to_align;
if (vertex_num_right_end_gaps > 0) {
// read extends beyond vertex sequence
j -= vertex_num_right_end_gaps;
mm_encountered_here -= vertex_num_right_end_gaps;
}
else if (read_num_right_end_gaps > 0) {
// vertex extends beyond end of read.
mm_encountered_here -= read_num_right_end_gaps;
}
/*
// if read end gaps, extend to the end of the vertex
if (read_num_right_end_gaps > 0) {
for (int r = read_num_right_end_gaps; r>0 && j < seq.length(); r--) {
String readLetter = ""+seq.charAt(j++);
String verLetter = ""+verSeq.charAt(verSeq.length()-r);
if (!areTwoNucleotidesEqual(readLetter,verLetter)) {
mm_encountered_here++;
debugMes("walking read end gap: V[" + verLetter + "] vs. R[" + readLetter + "] ** conflict ** " , 20);
}
else {
debugMes("walking read end gap: V[" + verLetter + "] vs. R[" + readLetter + "] OK " , 20);
}
}
}
else if (vertex_num_right_end_gaps > 0) {
// over-ran the vertex sequence
// shrink the sequence by the amount extended beyond the vertex
j -= vertex_num_right_end_gaps;
mm_encountered_here -= vertex_num_right_end_gaps;
debugMes("because of vertex end gaps, walking read back by " + vertex_num_right_end_gaps + " bases.", 20);
if (i != verSeq.length()) {
debugMes("** ERROR: i=" + i + ", but verSeq.length() = " + verSeq.length(), 20);
}
}
*/
debugMes("mismatches encountered: " + mm_encountered_here, 20);
if (mm_encountered_here >= zipper_mm && zipper_i == verSeq.length()) {
debugMes("Zipper alignment mm: " + zipper_mm + " <= DP mm: " + mm_encountered_here +
", so defaulting to earlier zipper alignment.", 20);
i = zipper_i;
j = zipper_j;
mm_encountered_here = zipper_mm;
max_left_gaps = 0;
// retain any failed alignment status
}
else {
failed_alignment = false; // reset it as needed given that DP was ok.
}
numMM = mm_encountered_here + totalNumMM;
}
// note, i and j are now 1 more than the array index due to the last i++,j++ of the loop above.
float current_alignment_divergence = numMM / (float) j;
debugMes("alignment divergence up to seq pos " + j +
" = mm: " + numMM +
", div:" + current_alignment_divergence, 20);
float local_vertex_alignment_divergence = mm_encountered_here / (float) i;
debugMes("local vertex alignment divergence = " + mm_encountered_here + " / " + i + " = " + local_vertex_alignment_divergence, 20);
// examine the alignment at this vertex to see if it passes our requirements
if (i >= MIN_SEQ_LENGTH_TEST_DIVERGENCE && local_vertex_alignment_divergence >= MAX_READ_LOCAL_SEQ_DIVERGENCE) {
failed_alignment = true;
debugMes("local divergence exceeds max allowed: " + MAX_READ_LOCAL_SEQ_DIVERGENCE, 20);
}
if (max_left_gaps > MAX_LEFT_END_GAPS ) {
failed_alignment = true;
}
if (
// cumulative alignment stats up to and including vertex do not meet thresholds
(current_alignment_divergence > MAX_READ_SEQ_DIVERGENCE ||
numMM > MAX_MM_ALLOWED )
||
// alignment to current vertex fails
failed_alignment
)
{
debugMes("read "+readName+" has too many mismatches ("+numMM+") or too many left gaps (" + max_left_gaps + ")",19);
if (failed_alignment) {
// store it so we don't try again from this position in the sequence and at this vertex position.
best_path_memoization.put(read_vertex_start_pos_token, null);
}
return(null); // go back and try alternative vertex if available
}
else if (j==seq.length() || graph.getSuccessors(fromV) == null)
{
// Reached end of the read being threaded (or ran out of vertices to explore)
if (graph.getSuccessors(fromV) == null) {
// tack on unaligned terminus of sequence as mismatches
mm_encountered_here += seq.length() - j;
}
// reached base case for recursion.
debugMes("Reached end of read sequence. Read" + readName + " with length: " + seq.length()
+ " and base [" + j + "] ends at position [" + i + "] within node: "
+ fromV.getID() + " totaling " + mm_encountered_here + " mismatches. ", 19);
Path_n_MM_count best_path = new Path_n_MM_count(fromV.getID(), mm_encountered_here, i, j);
best_path_memoization.put(read_vertex_start_pos_token, best_path);
return(new Path_n_MM_count(best_path));
}
else if (i==verSeq.length()) // move to the next ver
{
// Reached end of vertex
// vertex sequence fully traversed, examine children vertices
// Going on to recursive path mapping for read
debugMes("Reached end of node sequence. Read" + readName
+ " base [" + j + "] ends at position [" + i + "] within node: "
+ fromV.getID() + " totaling " + mm_encountered_here + " mismatches. ", 19);
// get list of the next vertices to explore.
// examine edges prioritized by weight
List<Integer> continueVersIds = new ArrayList<Integer>();
List<SeqVertex> continueVers = new ArrayList<SeqVertex>();
// just use simple successor vertices
if (graph.getSuccessors(fromV) != null) {
for (SeqVertex to_v : graph.getSuccessors(fromV)) {
boolean check_initial_vertex_chars_match_seq = false;
if (check_initial_vertex_chars_match_seq) {
debugMes("-checking that next characters match up: " + to_v.getNameKmerAdj().charAt(0) + " vs. " + seq.charAt(j), 21);
if (to_v.getNameKmerAdj().charAt(0)==seq.charAt(j)) {
continueVers.add(to_v);
continueVersIds.add(to_v.getID());
}
}
else {
continueVers.add(to_v);
continueVersIds.add(to_v.getID());
}
}
}
debugMes("-reached end of vertex: " + fromV.getID() + ", exploring next vertices for continued path extension: " + continueVersIds, 19);
Path_n_MM_count best_path = null;
boolean tied_best = false;
debugMes("Pursuing extension from : " + fromV.getShortSeqWID() + " to successors: " + continueVers, 19);
List<Path_n_MM_count> all_best_paths_explored = new ArrayList<Path_n_MM_count>();
for (Integer successor_vertex_id : continueVersIds) {
debugMes("Exploring extension from node: " + fromV.getID() + " to node: " + successor_vertex_id, 20);
Path_n_MM_count best_extension = updatePathRecursively(graph,successor_vertex_id,
seq,
j,
KMER_SIZE-1, numMM, readName,
best_path_memoization);
// Back from Recursive Call.
// Evaluate best paths from the successors.
/* testing for local sequence divergence within the alignment itself
// first, check to see if it's an extension worth considering, given our local sequence divergence restrictions.
if (best_extension != null
&&
best_extension.mismatch_count / (float) (seq.length() -1 - j) > MAX_READ_LOCAL_SEQ_DIVERGENCE) {
debugMes("\tencountered max read sequence divergence: " + best_extension.mismatch_count / (float) (seq.length() -1 - j)
+ ", disallowing extension: ." + best_extension, 19);
best_extension = null; // nullify the current best extension from successor_vertex_id
}
*/
if (best_extension == null) {
debugMes("\n\tFailed extension from " + fromV.getID() + " to : " + successor_vertex_id + ".", 19);
}
else {
// have a best extension
all_best_paths_explored.add(best_extension);
debugMes(readName + " best path so far from vertex: " + fromV.getID()
+ " to : " + successor_vertex_id
+ " = " + best_extension.path +
", with total mm: " + best_extension.mismatch_count, 20);
if (best_path == null
||
(best_extension.mismatch_count <= best_path.mismatch_count) ) {
// test for tie condition
if (best_path != null) {
if (best_extension.mismatch_count == best_path.mismatch_count) {
tied_best = true;
debugMes("WARNING, Tied paths from vertex [V" + fromV_id +
" ]: \nPath A:\n" + best_extension +
"\nvs. Path B:\n" + best_path, 15);
}
else
tied_best = false;
}
best_path = best_extension;
}
}
} // end of successor vertex search.
debugMes("Done with exploring paths from vertex: " + fromV.getID(), 20);
debugMes("Paths and scores found are: ", 20);
for (Path_n_MM_count pmm: all_best_paths_explored) {
debugMes("\texplored path: " + pmm.path + " w/ mm: " + pmm.mismatch_count, 20);
}
if (best_path != null) {
debugMes("\tAND best selected was: " + best_path.path + " w/ mm: " + best_path.mismatch_count, 20);
}
if (best_path != null) {
if (tied_best) {
debugMes("WARNING: TIED_READ_PATH", 15);
boolean TRUNCATE_TIED_PATH = false;
if (TRUNCATE_TIED_PATH) {
// truncate
Path_n_MM_count truncated_path = new Path_n_MM_count(fromV.getID(), mm_encountered_here, i, j);
best_path_memoization.put(read_vertex_start_pos_token, truncated_path);
return(new Path_n_MM_count(truncated_path));
}
else {
// add current node and local mismatches encountered here.
best_path.add_path_n_mm(fromV.getID(), mm_encountered_here, i, j);
best_path_memoization.put(read_vertex_start_pos_token, best_path);
return(new Path_n_MM_count(best_path));
}
}
else {
// not a tie
// add current node and local mismatches encountered here.
best_path = new Path_n_MM_count(best_path);
best_path.add_path_n_mm(fromV.getID(), mm_encountered_here, i, j);
best_path_memoization.put(read_vertex_start_pos_token, best_path);
return(new Path_n_MM_count(best_path));
}
}
else {
best_path_memoization.put(read_vertex_start_pos_token, null);
return(null); // no extension possible.
}
}
// should never end up here
throw(new RuntimeException("should never end up here, supposedly. i="+i
+ ", j=" + j + " ver length = " + verSeq.length() + " and readSeq length = " + seq.length() ));
}
/**
* create a hash that hold all the original vertices ids and the new ones
* @param graph
* @param rootIDs
* @return the hash
*/
private static HashMap<Integer, LocInGraph> getOriginalVerIDsMappingHash(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph) {
// clear double entries in the prevID list - not sure why they happen?
for (SeqVertex v : graph.getVertices())
v.clearDoubleEntriesToPrevIDs();
HashMap<Integer, LocInGraph> hash = new HashMap<Integer,LocInGraph>();
for (SeqVertex v : graph.getVertices())
{
// update the node tracker
SeqVertex.nodeTracker.put(v.getID(), v); // beware - shouldn't have to do this, but finding myself having to due to getSeqVertex(id) not returning the correct vertex (with full sequence attached).
debugMes("Graph vertex: " + v.getID() + " has sequence: " + v.getNameKmerAdj(), 22);
Integer loc = 0;
Integer vid = v.getID();
// if the node id is new, than the real start is in the vector
if (vid>LAST_REAL_ID)
loc = loc-1;
else
{
debugMes("adding to "+vid+": Location of original node "+vid+" in index "+loc,20);
hash.put(vid, new LocInGraph(vid,loc));
}
for (Vector<Integer> vec : v.getPrevVerIDs())
{
loc++;
for (Integer id : vec)
{
debugMes("adding to "+id+": Location of original node "+v.getID()+" in index "+loc,20);
hash.put(id, new LocInGraph(v.getID(),loc));
}
}
}
return hash;
}
/**
* go over the graph file, and count the in flow and out flow of each node
* @param firstLetter
* @throws IOException
*/
private static void preProcessGraphFile(String filename,
HashMap<Integer, Integer> outFlow, HashMap<Integer, Integer> inFlow, HashMap<Integer,String> kmers) throws IOException {
BufferedReader fileB = new BufferedReader(new FileReader(filename));
String l = fileB.readLine(); // read header of component
Integer from, to, supp;
while (fileB.ready())
{
l = fileB.readLine();
// 0 -1 3 ATTGAAAGCAAGTTTTCTTCGAAT 0
// 1 0 3 TTGAAAGCAAGTTTTCTTCGAATT 0
// to from supp kmer stam
String[] fields = l.split("\t");
from = Integer.parseInt(fields[1]);
to = Integer.parseInt(fields[0]);
supp = Integer.parseInt((fields[2]));
String kmer = fields[3];
if (!outFlow.containsKey(from))
outFlow.put(from, supp);
else
outFlow.put(from, outFlow.get(from)+supp);
if (!inFlow.containsKey(to))
inFlow.put(to, supp);
else
inFlow.put(to, inFlow.get(to)+supp);
kmers.put(to,kmer);
}
}
/**
* given the filename, make a graph out of the connected components
* This time, keep the first letter of each kmer:
* keep the whole kmer, and then if there is an edge out, leave only first letter
* @param filename
* @param rootIDs
* @param inFlow in flow for all vertices
* @param outFlow out flow for all vertices
* @param firstLetter
* @return
* @throws IOException
*/
private static DirectedSparseGraph<SeqVertex, SimpleEdge> buildNewGraphUseKmers(
String filename,
Vector<Integer> rootIDs, HashMap<Integer,Integer> outFlow,
HashMap<Integer,Integer> inFlow,
HashMap<Integer, String> kmers)
throws IOException
{
BufferedReader fileB = new BufferedReader(new FileReader(filename));
DirectedSparseGraph<SeqVertex, SimpleEdge> graph =
new DirectedSparseGraph<SeqVertex,SimpleEdge>();
String l = fileB.readLine(); // read header of component
Integer from, to;
double supp;
int linecount = 0;
while (fileB.ready())
{
linecount++;
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 18 && linecount % 17 == 0) {
System.err.print("\r[" + linecount + "] ");
}
l = fileB.readLine();
// 0 -1 3 ATTGAAAGCAAGTTTTCTTCGAAT 0
// 1 0 3 TTGAAAGCAAGTTTTCTTCGAATT 0
// to from supp kmer stam
String[] fields = l.split("\t");
from = Integer.parseInt(fields[1]);
to = Integer.parseInt(fields[0]);
supp = Double.parseDouble((fields[2]));
if (supp < INITIAL_EDGE_ABS_THR )
continue;
// just tracking node ID values.
if (from>LAST_ID)
LAST_ID = from;
if (to>LAST_ID)
LAST_ID = to;
String kmer = fields[3];
if (KMER_SIZE == 0) {
KMER_SIZE = kmer.length();
debugMes("KMER_SIZE=" + KMER_SIZE, 5);
}
else if (KMER_SIZE != kmer.length()) {
throw new RuntimeException("Error, discrepancy among kmer lengths. Stored: " + KMER_SIZE + ", found: " + kmer.length() + "\n" + l);
}
SeqVertex fromV = getSeqVertex(graph, from);
if (fromV==null && from>=0)
{
//fromV = new SeqVertex(from,firstLetter.get(from)+""+kmer.substring(0,KMER_SIZE-1));
fromV = new SeqVertex(from,kmers.get(from));
graph.addVertex(fromV);
}
boolean isRoot = (from<0 || fromV==null);
SeqVertex toV = getSeqVertex(graph, to); // important to call this after possibly creating fromV, in case fromV == toV (otherwise, creating that vertex twice!) // bugfix
if (isRoot)
{
if (toV==null)
{
toV = new SeqVertex(to, kmer, supp);
graph.addVertex(toV);
rootIDs.add(to);
}
}
else
{
if (toV==null)
{
toV = new SeqVertex(to, kmer);
graph.addVertex(toV);
}
SimpleEdge e = new SimpleEdge(supp, fromV.getID(), toV.getID());
graph.addEdge(e, fromV, toV);
}
}
return graph;
}
private static void writeDotFile(DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
String output_filename, String graphname, boolean printFullSeq) throws Exception
{
PrintStream p = new PrintStream(new FileOutputStream(output_filename));
writeDotFile(graph,p,graphname,printFullSeq);
p.close();
}
/**
* white to dot file with shortened seqs
* @param graph
* @param p
* @param name
*/
private static void writeDotFile(DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
PrintStream p, String name)
{
writeDotFile(graph,p,name,false);
}
/**
* Write to dot file, where the list of paths are colored red -> blue
* @param graph
* @param p
* @param name
* @param vertices which vertices to print
*/
private static void writeDotFile(DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
PrintStream p, String name,boolean printFullSeq)
{
SeqVertex.set_graph(graph);
//p.println("digraph "+name+"{");
p.println("digraph G {");
SeqVertex toVertex;
int weight;
//for each edge decide it's color
for (SeqVertex vertex : graph.getVertices())
{ //go over all vertices
debugMes("Vertex: " + vertex.getShortSeqWconnectingIDs(graph), 15);
String verDesc = ""+vertex.getID()+" [label=\"";
if (printFullSeq)
verDesc = verDesc.concat(""+vertex.getLongtSeqWID() + "[L:"+vertex.getNameKmerAdj().length()+"]\"");
else {
boolean include_discovery_time = true;
if (include_discovery_time) {
verDesc = verDesc.concat(""+vertex.getShortSeqWID() + "[L:"+vertex.getNameKmerAdj().length()+"]"
+ "[T:" + vertex._dfsDiscoveryTime + "]\"");
}
else {
verDesc = verDesc.concat(""+vertex.getShortSeqWID() + "[L:"+vertex.getNameKmerAdj().length()+"]\"");
}
}
if (vertex.getWeightAvg()>25)
verDesc = verDesc.concat(" ,style=bold,color=\"#AF0000\"");
verDesc = verDesc.concat("]");
if (!vertex.equals(T_VERTEX) && !vertex.equals(ROOT))
p.println(verDesc);
for (SimpleEdge edge : graph.getOutEdges(vertex)) //get all edges of vertex->?
{
toVertex = graph.getDest(edge);
weight = (int) Math.round(edge.getWeight());
String edgeStyle = "[label="+ weight +"]";
if (weight>20)
edgeStyle = "[style=bold,label="+ weight +",color=\"#AF0000\"]";
if (!toVertex.equals(T_VERTEX) && !vertex.equals(ROOT))
p.println(vertex.getID() + "->" + toVertex.getID() +edgeStyle);
}
}
p.println("}");
}
/**
* Compact the given graph:
* for each vertex, if degree out = degree in = 1, and nextVertexIn ==1, remove this vertex, and connect edges
* @param graph
*/
private static boolean compactLinearPaths(DirectedSparseGraph<SeqVertex, SimpleEdge> graph)
{
debugMes("SECTION\n=================\nCOMPACTING THE GRAPH\n=================",5);
//compact vertices
Vector<SeqVertex> removeVertices = new Vector<SeqVertex>();
Vector<SimpleEdge> removeEdges = new Vector<SimpleEdge>();
boolean changed = false;
for (SeqVertex v1 : graph.getVertices())
{
// debugMes("looking at vertex: "+v1);
while (!v1.equals(ROOT) && graph.outDegree(v1)==1 )
{
// get out edge, should only have one.
SimpleEdge e = null;
for (SimpleEdge ei : graph.getOutEdges(v1))
e = ei;
// get the vertex attached.
SeqVertex v2 = graph.getDest(e);
if (graph.inDegree(v2)!=1 || v2.isToBeDeleted() || v2.equals(T_VERTEX) || v1.equals(v2)) {
// avoiding loops and vertices that are in-branched.
break;
}
debugMes("Found potential edge: "+e +" between "+v1 +" and "+v2,20);
v1.concatVertex(v2, e.getWeight(),LAST_REAL_ID);
debugMes("removing vertex "+v2+" was concatenated into "+v1,20);
removeVertices.add(v2);
v2.setToBeDeleted(true);
changed = true;
removeEdges.clear();
for (SimpleEdge e2 : graph.getOutEdges(v2))
{
SeqVertex v3 = graph.getDest(e2);
debugMes("Want to move edge " + e2 + "("+v2 +"->"+v3+") to ("+v1+"->"+v3,20);
SimpleEdge newEdge = new SimpleEdge(e2, v1.getID(), v3.getID());
graph.addEdge(newEdge, v1, v3);
debugMes("\tadding edge: " + v1 + " to " + v3, 20);
removeEdges.add(e2);
}
for (SimpleEdge re : removeEdges)
{
debugMes("removing edge " + re + "("+graph.getSource(re) +"->"+graph.getDest(re)+")",20);
graph.removeEdge(re);
}
debugMes("removing edge " + e + "("+v1 +"->"+v2+")",20);
graph.removeEdge(e);
}
}
//remove all vertices that we don't want
for (SeqVertex v : removeVertices)
{
graph.removeVertex(v);
}
return changed;
}
/**
* remove light edges from the graph. return true if something has changed
* @param graph
* @return
*/
private static boolean removeLightEdges(DirectedSparseGraph<SeqVertex, SimpleEdge> graph)
{
debugMes("removeLightEdges()", 10);
boolean comp = false ; //removeLightCompEdges(graph);
boolean in = removeLightInEdges(graph);
boolean out = removeLightOutEdges(graph);
boolean flow = removeLightFlowEdges(graph);
return comp || in || out || flow;
}
/**
* Given a graph, go over all vertices and remove incoming or outgoing edges that do not match the flow (<2% coverage) see FLOW_THR
* When considering flow, this considers both the incoming and outgoing edges, but also the average node coverage.
* @param graph
* @return true if graph was changed.
*/
private static boolean removeLightFlowEdges(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph) {
debugMes("SECTION\n=================\nREMOVING LIGHT FLOW EDGES\n=================",5);
boolean changed = false;
Collection<SeqVertex> all_vertices = graph.getVertices();
int vertex_count = 0;
int num_total_vertices = all_vertices.size();
for (SeqVertex v : all_vertices)
{
vertex_count++;
debugMes("Analyzing vertex: " + v.getID() + ", entry " + vertex_count + " of " + num_total_vertices, 25);
int inDegree = graph.inDegree(v);
int outDegree = graph.outDegree(v);
debugMes("\thas inDegree: " + inDegree + ", outDegree: " + outDegree, 25);
if (inDegree==0 && outDegree==0) {
debugMes("\tSkipping vertex.", 25);
continue;
}
int totalIn = 0, totalOut = 0;
for (SimpleEdge e : graph.getInEdges(v))
totalIn+=e.getWeight();
for (SimpleEdge e : graph.getOutEdges(v))
totalOut+=e.getWeight();
debugMes("FLOW: total in for vertex "+v+" is "+totalIn + ", total out is "+totalOut+", averageCov="+v.getWeightAvg(),20);
Collection<SimpleEdge> removeEdges = new HashSet<SimpleEdge>();
// out edges
for (SimpleEdge e : graph.getOutEdges(v))
{
double e_avg_flow_thr_thresh = v.getWeightAvg() * FLOW_THR;
if ( e.getWeight() < e_avg_flow_thr_thresh) {
debugMes("EDGE_PRUNING::removeLightFlowEdges() removing low flow OUT edge " + e
+ " from "+ graph.getSource(e)+" to "+graph.getDest(e) +
", FLOW_THR=" + FLOW_THR +
", e_avg_flow_thr_thresh=: " + e_avg_flow_thr_thresh, 15);
removeEdges.add(e);
}
}
// in edges
for (SimpleEdge e : graph.getInEdges(v))
{
double e_avg_flow_thr_thresh = v.getWeightAvg() * FLOW_THR;
if (e.getWeight() < e_avg_flow_thr_thresh) {
debugMes("EDGE_PRUNING::removeLightFlowEdges() removing low flow IN edge " + e
+ " from "+ graph.getSource(e)+" to "+graph.getDest(e) +
", FLOW_THR=" + FLOW_THR +
", e.weight=" + e.getWeight() + " < e_avg_flow_thr_thresh=: " + e_avg_flow_thr_thresh, 15);
removeEdges.add(e);
}
}
for (SimpleEdge re : removeEdges)
{
graph.removeEdge(re);
changed = true;
}
debugMes("\tdone analyzing vertex: " + v.getID(), 25);
}
debugMes("== done removing Light Flow Edges.", 25);
return changed;
}
/**
* go over the graph, and remove edges that are less than EDGE_THR (5%) from the rest of the entry flow
* @param graph
*/
private static boolean removeLightInEdges(DirectedSparseGraph<SeqVertex, SimpleEdge> graph)
{
debugMes("SECTION\n=================\nREMOVING LIGHT In EDGES\n=================",5);
boolean somethingChanged = false;
Queue<SeqVertex> allCurVers = new LinkedList<SeqVertex>(graph.getVertices());
SeqVertex v = null;
while ((v = allCurVers.poll())!=null)
{
if (graph.inDegree(v)<=1)
continue;
Vector<SimpleEdge> removeEdges = new Vector<SimpleEdge>();
// skip edges at simple cycles
if (atSimpleCycle(graph, v)) {
continue;
}
int totalIn = 0;
for (SimpleEdge inE : graph.getInEdges(v))
{
totalIn+=inE.getWeight();
}
for (SimpleEdge inE : graph.getInEdges(v))
{
double e_edge_thr = totalIn*EDGE_THR;
if (inE.getWeight() <= e_edge_thr)
{
debugMes("EDGE_PRUNING::removeLightInEdges() removing the edge: "+
graph.getSource(inE)+" " + graph.getSource(inE).getName() +
" -> " +
graph.getDest(inE)+ " " + graph.getDest(inE).getName() +
" (weight: "+inE.getWeight()+" <= e_edge_thr: " + e_edge_thr +
", EDGE_THR=" + EDGE_THR, 15);
removeEdges.add(inE);
somethingChanged = true;
}
}
for (SimpleEdge e : removeEdges)
graph.removeEdge(e);
}
return somethingChanged;
}
private static boolean atSimpleCycle(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph, SeqVertex v) {
for (SimpleEdge e : graph.getInEdges(v)) {
if (graph.findEdge(v, graph.getSource(e)) != null)
return(true);
}
for (SimpleEdge e: graph.getOutEdges(v)) {
if (graph.findEdge(graph.getDest(e), v) != null)
return(true);
}
return(false);
}
/**
* go over the graph, and remove edges that are less than EDGE_THR (10%) from the rest of the exit flow
* @param graph
*/
private static boolean removeLightOutEdges(DirectedSparseGraph<SeqVertex, SimpleEdge> graph)
{
debugMes("SECTION\n=================\nREMOVING LIGHT OUT EDGES\n=================",5);
boolean somethingChanged = false;
Queue<SeqVertex> allCurVers = new LinkedList<SeqVertex>(graph.getVertices());
SeqVertex v = null;
while ((v = allCurVers.poll())!=null)
{
if (graph.outDegree(v)<=1)
continue;
// skip edges at simple cycles
if (atSimpleCycle(graph, v)) {
continue;
}
Vector<SimpleEdge> removeEdges = new Vector<SimpleEdge>();
int totalOut = 0;
for (SimpleEdge outE : graph.getOutEdges(v))
{
totalOut+=outE.getWeight();
}
for (SimpleEdge outE : graph.getOutEdges(v))
{
double e_edge_thr = totalOut * EDGE_THR;
if (outE.getWeight() <= e_edge_thr)
{
debugMes("EDGE_PRUNING::removeLightOutEdges() removing the edge: " +
graph.getSource(outE)+ " " + graph.getSource(outE).getName() +
" -> " +
graph.getDest(outE)+ " " + graph.getDest(outE).getName() +
" (weight: "+outE.getWeight()+" <= e_edge_thr: " + e_edge_thr +
", EDGE_THR=" + EDGE_THR, 15);
removeEdges.add(outE);
somethingChanged = true;
}
}
for (SimpleEdge e : removeEdges)
graph.removeEdge(e);
}
return somethingChanged;
}
/**
* Return the SeqVertex with the given id within the given graph.
* @param graph
* @param id
* @return
*/
private static SeqVertex getSeqVertex(DirectedSparseGraph<SeqVertex, SimpleEdge> graph, int id)
{
SeqVertex v = SeqVertex.retrieveSeqVertexByID(id);
if (graph.containsVertex(v)) {
return(v);
}
else {
return(null); // note, SeqVertex stores all vertices even if removed from the graph, so let's ensure it's still in the graph.
}
/* orig code too slow for large graphs
for (SeqVertex v : graph.getVertices())
{
if (v.getID() == id)
return v;
}
return null;
*/
}
/**
* Given the string seq, return it in fasta format
* @param seq - seq
* @param name - seq name
* @return
*/
private static String getSeqFasta(String seq,String name){
String res = "";
res = res.concat(">"+name+"\n");
int i=0;
for (; i<seq.length()-LINE_LEN ; i+=LINE_LEN)
{
res = res.concat(seq.substring(i, i+LINE_LEN)+"\n");
}
res = res.concat(seq.substring(i)+"\n");
return res;
}
/**
* return the next available vertex id.
* @return
*/
private static int getNextID() {
LAST_ID++;
return LAST_ID;
}
/**
* return a topological order on the graph's vertices.
* @param graph
* @return list of nodes.
*/
private static List<SeqVertex> getTopologicalOrder(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph, My_DFS dfs) {
Map<SeqVertex,Number> finished = dfs.getFinishing();
SeqVertexFinishTimeComparator finishingTimeComparator = new SeqVertexFinishTimeComparator();
debugMes("getTopologicalOrder(), Vertex count: " + graph.getVertexCount(), 18);
PriorityQueue<SeqVertex> fQueue = new PriorityQueue<SeqVertex>(graph.getVertexCount(),finishingTimeComparator );
for (SeqVertex v : finished.keySet())
{
fQueue.add(v);
}
List<SeqVertex> order = new ArrayList<SeqVertex>();
while (!fQueue.isEmpty())
{
order.add(fQueue.poll());
}
return order;
}
/**
* Go over each sub component of the given graph, and calc the following:
* total coverage (sum of weights)
* average coverage
* number of paths
* @param graph
*/
private static void calcSubComponentsStats(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph) {
Set<Set<SeqVertex>> comps = divideIntoComponents(graph);
int numComp = comps.size();
for (Set<SeqVertex> comp : comps)
{
//now we have one comp in hand
Vector<Double> allW = new Vector<Double>();
int compID = -1;
for (SeqVertex v : comp)
{
if (compID==-1)
compID = v.getID();
allW.addAll(0, v.getWeights());
for (SimpleEdge outE : graph.getOutEdges(v))
{
allW.add(0, outE.getWeight());
}
}
SeqVertex v1 = getSeqVertex(graph, compID);
if (allW.size()==0 || (comp.size()==1 && v1.getName().length()<MIN_OUTPUT_SEQ))
{
//this is a single node with a single letter
debugMes("removing component with node "+compID+" which has only one node with short seq "+v1.getName(),20);
graph.removeVertex(v1);
numComp = numComp-1;
continue;
}
int t=0;
for (Double w: allW)
t+=w;
//System.err.println("t=" + t + ", allW.size()=" + allW.size());
float avgCov = (float)t/allW.size();
debugMes("SubComp: "+compID+" has "+ comp.size() +" nodes; total coverage: "+t+" average: "+avgCov,20);
if (avgCov<COMP_AVG_COV_THR-0.5) //FIXME: added 0.5 for testing with low cov seq
{
debugMes("removing component with node "+compID+" which has only average coverage of "+
avgCov+ " < "+COMP_AVG_COV_THR,20);
for (SeqVertex v : comp)
graph.removeVertex(v);
numComp = numComp-1;
}
}
debugMes("number of good components: "+numComp,10);
}
/**
* divide the graph into its components
* @param graph
* @return set of components
*/
private static Set<Set<SeqVertex>> divideIntoComponents(DirectedSparseGraph<SeqVertex, SimpleEdge> graph)
{
WeakComponentClusterer<SeqVertex, SimpleEdge> compClus = new WeakComponentClusterer<SeqVertex, SimpleEdge>();
Set<Set<SeqVertex>> comps = compClus.transform(graph);
int comp_counter = 0;
for (Set<SeqVertex> s : comps) {
debugMes("\nComponentDivision: " + comp_counter + " contains the following vertices:", 10);
for (SeqVertex v : s) {
debugMes("node_id: " + v.getID(), 10);
}
comp_counter++;
}
return comps;
}
/**
* connect the source node to each node with indegree=0,
* connect each node with outdegree=0 to the target node
* Also add reads from the root to each of the nodes, and from the ends too.
* @param graph
* @param comp the current component
* @param combinedReadHash
*/
private static void addSandT(DirectedSparseGraph<SeqVertex, SimpleEdge> graph, Set<SeqVertex> comp, HashMap<Integer,HashMap<PairPath,Integer>> combinedReadHash)
{
graph.addVertex(ROOT);
graph.addVertex(T_VERTEX);
SimpleEdge e=null;
// for (SeqVertex v : graph.getVertices())
for (SeqVertex v : comp)
{
if (graph.inDegree(v)==0 && !v.equals(ROOT) && !v.equals(T_VERTEX)) // connect S to this vertex
{
double w = v.getFirstWeight();
if (w==-1) // single letter node?
{
debugMes("got a single letter node here.. "+v,20);
w = 1;
}
e = new SimpleEdge(w, ROOT.getID(), v.getID());
graph.addEdge(e, ROOT, v);
debugMes("Adding edge from S to "+v,20);
PairPath pathD = new PairPath();
pathD.addToPath1(ROOT.getID());
pathD.addToPath1(v.getID());
if (!combinedReadHash.containsKey(ROOT.getID()))
combinedReadHash.put(ROOT.getID(), new HashMap<PairPath,Integer>());
combinedReadHash.get(ROOT.getID()).put(pathD, MIN_READ_SUPPORT_THR);
/*
for (SeqVertex v2 : graph.getSuccessors(v))
{
PairPath pathD = new PairPath();
pathD.addToPath1(ROOT.getID());
pathD.addToPath1(v.getID());
pathD.addToPath1(v2.getID());
if (!combinedReadHash.containsKey(ROOT.getID()))
combinedReadHash.put(ROOT.getID(), new HashMap<PairPath,Integer>());
combinedReadHash.get(ROOT.getID()).put(pathD, MIN_READ_SUPPORT_THR);
}
*/
}
if (graph.outDegree(v)==0 && !v.equals(T_VERTEX) && !v.equals(ROOT)) // connect this vertex to T
{
double w = v.getLastWeight();
if (w==-1)
w=1;
e = new SimpleEdge(w, v.getID(), T_VERTEX.getID());
graph.addEdge(e, v, T_VERTEX);
debugMes("Adding edge from "+v+" to T",20);
PairPath pathD = new PairPath();
pathD.addToPath1(v.getID());
pathD.addToPath1(T_VERTEX.getID());
if (!combinedReadHash.containsKey(v.getID()))
combinedReadHash.put(v.getID(), new HashMap<PairPath,Integer>());
combinedReadHash.get(v.getID()).put(pathD, MIN_READ_SUPPORT_THR);
/*
for (SeqVertex v2 : graph.getPredecessors(v))
{
PairPath pathD = new PairPath();
pathD.addToPath1(v2.getID());
pathD.addToPath1(v.getID());
pathD.addToPath1(T_VERTEX.getID());
if (!combinedReadHash.containsKey(v2.getID()))
combinedReadHash.put(v2.getID(), new HashMap<PairPath,Integer>());
combinedReadHash.get(v2.getID()).put(pathD, MIN_READ_SUPPORT_THR);
}
*/
}
}
}
/**
* given the graph, remove all edges of S and T
* @param graph
*/
private static void removeAllEdgesOfSandT(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph) {
Set<SimpleEdge> removeEdges = new HashSet<SimpleEdge>();
if (graph.containsVertex(ROOT))
for (SimpleEdge e : graph.getOutEdges(ROOT))
removeEdges.add(e);
if (graph.containsVertex(T_VERTEX))
for (SimpleEdge e : graph.getInEdges(T_VERTEX))
removeEdges.add(e);
for (SimpleEdge re : removeEdges)
graph.removeEdge(re);
}
private static boolean dealWithLoops(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
Set<SeqVertex> comp,
HashMap<Integer,HashMap<PairPath,Integer>> combinedReadHash) {
boolean res = false;
DijkstraShortestPath<SeqVertex, SimpleEdge> dp = new DijkstraShortestPath<SeqVertex, SimpleEdge>(graph);
// These should be only those repeats that aren't evident in the individual read paths,
// since the read-evident repeats were unrolled earlier.
Set<Set<SimpleEdge>> curLoops = new HashSet<Set<SimpleEdge>>();
// find all loops in the graph by seeing if, given edge v->v2, there is a path from v2 back to v
for (SeqVertex v : comp)
{
for (SeqVertex v2 : graph.getSuccessors(v))
{
if (dp.getDistance(v2, v)!=null) // there is a connection between v->v2->... ->v
{
//path has all edges from v to itself thru v2
List<SimpleEdge> loopPath = dp.getPath(v2, v);
// v2 is successor of v, so let's just add the v->v2 edge too, complete the full loop.
loopPath.add(0, graph.findEdge(v, v2));
// capture the path IDs for debugMes reporting below.
List<Integer> pathIDs = new ArrayList<Integer>();
for (SimpleEdge e : loopPath)
pathIDs.add(graph.getDest(e).getID());
// Collect the loop edge set.
Set<SimpleEdge> loopPath_set = new HashSet<SimpleEdge>(loopPath);
if (!curLoops.contains(loopPath_set))
{
curLoops.add(loopPath_set);
debugMes("adding the loop path "+pathIDs+" to the curLoops",12);
}else
{
debugMes("not adding the loop path "+pathIDs+" to the curLoops",12);
}
}
}
}
if (curLoops.isEmpty())
return res;
// process found loops
Set<SimpleEdge> allRelevantEdges = new HashSet<SimpleEdge>();
for (Set<SimpleEdge> loopPath_set : curLoops)
for (SimpleEdge e : loopPath_set)
{
e.increaseNumOfLoopsBy1();
allRelevantEdges.add(e);
}
// break complex loops
if (!allRelevantEdges.isEmpty()){
Comparator<Object> numLoopsComparator = new numLoopsEdgeComparator(graph);
PriorityQueue<SimpleEdge> edgesQ = new PriorityQueue<SimpleEdge>(allRelevantEdges.size(), numLoopsComparator);
edgesQ.addAll(allRelevantEdges);
//while there are still loops
// find the next edge that can be removed to reduce the number of loops
// updated queue: remove all edges, and update their loop content
SimpleEdge nextEtoRemove;
while ( (!curLoops.isEmpty()) && (! edgesQ.isEmpty()) )
{
//FIXME: there was a situation where curLoops was not empty,
// but edgesQ was, so I added edgesQ to the while condition. Investigate why this might happen.
// In this case, a node was involved in a self loop and a double-loop.
nextEtoRemove = edgesQ.poll();
if (graph.getSource(nextEtoRemove) == null
|| graph.getDest(nextEtoRemove) == null
|| nextEtoRemove.getNumOfLoopsInvolved() <= 0) {
continue;
}
debugMes("removing the edge " + graph.getSource(nextEtoRemove).getID() + "->" +
graph.getDest(nextEtoRemove).getID() + " that appears in "
+nextEtoRemove.getNumOfLoopsInvolved() + " loops",15);
// remove the loops that have this edge from curLoops
Set<Set<SimpleEdge>> removeLoops = new HashSet<Set<SimpleEdge>>();
for (Set<SimpleEdge> loopPath_set : curLoops)
if (loopPath_set.contains(nextEtoRemove))
{
debugMes("the loop "+ loopPath_set+" is now solved",15);
removeLoops.add(loopPath_set);
// update the number of loops involved in each edge
for (SimpleEdge e : loopPath_set)
e.decreaseNumOfLoopsBy1();
}
for (Set<SimpleEdge> loopPath_set : removeLoops)
curLoops.remove(loopPath_set);
//update the queue. remove all, and insert again if numLoops>0.
SimpleEdge[] relEdges = (SimpleEdge[]) edgesQ.toArray(new SimpleEdge[0]);
edgesQ.clear();
for (SimpleEdge otherE : relEdges)
if (otherE.getNumOfLoopsInvolved()>0)
edgesQ.add(otherE);
// remove this edge
graph.removeEdge(nextEtoRemove);
res = true;
}
}
return res;
}
/**
* given the graph and the node with the self loop,
* find the reads that support this loop, and multiply this vertex as many times as needed, and then remap these reads.
* @param graph
* @param v
* @param combinedReadHash
* @param newVers
*/
private static void dealWithSelfLoops(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph, SeqVertex v,
HashMap<Integer, HashMap<PairPath, Integer>> combinedReadHash, Set<SeqVertex> newVers) {
int vid = v.getID();
int maxNumOfOccurrences = 0;
HashMap<PairPath, Integer> relaventReads = new HashMap<PairPath, Integer>();
for (Integer startV : combinedReadHash.keySet())
{
for (PairPath path: combinedReadHash.get(startV).keySet())
{
int numOcc = path.numOccurrences(vid);
if (numOcc>0)
{
Integer count = combinedReadHash.get(startV).get(path);
if (count == null)
debugMes("stop here",10);
relaventReads.put(path,count);
}
if ( numOcc> maxNumOfOccurrences) //this read includes this vertex
{
debugMes("the read "+path+" includes the vertex "+vid+" "+numOcc+" times",19);
maxNumOfOccurrences = numOcc;
}
}
}
// remove the self loop
SimpleEdge removeE = graph.findEdge(v, v);
double oldW = removeE.getWeight();
List<Integer> newVerIDs = new ArrayList<Integer>();
newVerIDs.add(vid);
graph.removeEdge(removeE);
debugMes("removing the edge between "+ v +" and itself",20);
// multiply this node maxNumOfOccurrences times
int upID = vid;
int downID = -1;
ArrayList<SimpleEdge> removeEdges = new ArrayList<SimpleEdge>();
for (int i=2; i<=maxNumOfOccurrences; i++)
{
if (downID!=-1)
upID = downID;
downID = getNextID();
newVerIDs.add(downID);
SeqVertex newV = new SeqVertex(downID, v);
debugMes("adding the new vertex "+newV.getID(),20);
newV.setOrigButterflyID(v.getID());
graph.addVertex(newV);
SeqVertex upV = getSeqVertex(graph, upID);
newVers.add(newV);
for (SeqVertex vOut : graph.getSuccessors(upV))
{
debugMes("adding an edge between "+newV.getID()+" and "+vOut.getID(),20);
graph.addEdge(new SimpleEdge(graph.findEdge(v, vOut), newV.getID(), vOut.getID()), newV, vOut);
}
debugMes("adding an edge between "+upID+" and "+newV.getID(),20);
graph.addEdge(new SimpleEdge(oldW, upV.getID(), newV.getID()), upV, newV);
}
// moved to the end of loop added in the new loop opening process - Feb 2013
SeqVertex newV = getSeqVertex(graph, downID);
for (SeqVertex vOut : graph.getSuccessors(v))
{
if (!newVerIDs.contains(vOut.getID())){
debugMes("adding an edge between "+newV.getID()+" and "+vOut.getID(),0);
SimpleEdge e = graph.findEdge(v, vOut);
graph.addEdge(new SimpleEdge(e, newV.getID(), vOut.getID()), newV, vOut);
debugMes("removing the edge between "+ v.getID() +" and "+vOut.getID(),20);
removeEdges.add(e);
}
}
//remove edges:
for (SimpleEdge re : removeEdges){
graph.removeEdge(re);
}
List<Integer> loopVIDs = new ArrayList<Integer>();
loopVIDs.add(vid);
List<List<Integer>> newVerIDsList = new ArrayList<List<Integer>>();
newVerIDsList.add(newVerIDs);
updateReadsAfterLoopOpening(combinedReadHash,relaventReads,loopVIDs,newVerIDsList,maxNumOfOccurrences);
}
/**
* Given the combinedReadHash, and the relevant reads, update their paths.
* @param combinedReadHash
* @param relevantReads
* @param loopVIDs
* @param newVerIDs
* @param maxNumOfOccurrences
*/
private static void updateReadsAfterLoopOpening(
HashMap<Integer, HashMap<PairPath, Integer>> combinedReadHash,
HashMap<PairPath, Integer> relevantReads, List<Integer> loopVIDs,
List<List<Integer>> newVerIDs, int maxNumOfOccurrences) {
for (PairPath path: relevantReads.keySet())
{
Integer origFirstV = path.getFirstID();
Integer origCount = combinedReadHash.get(origFirstV).get(path);
List<Integer> newPath1 = new ArrayList<Integer>(path.getPath1());
List<Integer> newPath2 = new ArrayList<Integer>(path.getPath2());
if (loopVIDs.size()==1)
{
updatePathOfSelfLoop(newPath1,loopVIDs,newVerIDs.get(0),maxNumOfOccurrences);
updatePathOfSelfLoop(newPath2,loopVIDs,newVerIDs.get(0),maxNumOfOccurrences);
} else
{
updatePathOfDoubleLoop(newPath1,loopVIDs,newVerIDs.get(0),newVerIDs.get(1),maxNumOfOccurrences);
updatePathOfDoubleLoop(newPath2,loopVIDs,newVerIDs.get(0),newVerIDs.get(1),maxNumOfOccurrences);
}
// path hasn't changed
if (path.getPath1().equals(newPath1) && path.getPath2().equals(newPath2))
continue;
// both are empty now
if (newPath1.isEmpty() && newPath2.isEmpty())
combinedReadHash.get(origFirstV).remove(path);
// at least one has changed
PairPath newKey;
if (newPath1.isEmpty())
newKey = new PairPath(newPath2,new ArrayList<Integer>());
else if (newPath2.isEmpty())
newKey = new PairPath(newPath1,new ArrayList<Integer>());
else
newKey = new PairPath(newPath1,newPath2);
Integer firstV = newKey.getFirstID();
if (!combinedReadHash.containsKey(firstV))
combinedReadHash.put(firstV, new HashMap<PairPath, Integer>());
if (combinedReadHash.get(firstV).containsKey(newKey))
{
Integer oldCount = combinedReadHash.get(firstV).get(newKey);
combinedReadHash.get(firstV).put(newKey,oldCount+origCount);
combinedReadHash.get(firstV).remove(path);
}else
{
combinedReadHash.get(firstV).put(newKey,origCount);
}
// remove the old loop-containing path
combinedReadHash.get(origFirstV).remove(path);
}
}
/**
* given a path, the vid of the self loop, and the new vertices' id, update the path
* if the path starts of ends inside the loop, trim this part of the path, and leave only the outside info.
* @param path
* @param vid
* @param newVerIDs
* @return
*/
private static void updatePathOfSelfLoop(List<Integer> path, List<Integer> loopVIDs,
List<Integer> newVerIDs,int maxNumOcc) {
int vid = loopVIDs.get(0).intValue();
String origPath = ""+path;
Set<Integer> loopVs = new HashSet<Integer>();
loopVs.add(vid);
boolean changed = false;
if (path.contains(vid))
{
if (path.get(0).intValue()==vid)
{ //starts inside the loop
changed = true;
if (path.get(path.size()-1).intValue()==vid)
//starts and ends inside the loop
if (path.size()==maxNumOcc)
{
for (int i=1 ; i<=path.size()-1 ; i++)
path.set(i,newVerIDs.get(i));
changed = true;
}else
path.clear();
else
updatePathToRemoveLoopNodes(path,loopVs);
}else
{ // starts and ends outside the loop
for (int i=1 ; i<=path.size()-1 ; i++)
{
if (path.get(i).intValue()==vid)
{
int j = newVerIDs.indexOf(path.get(i-1));
if (j>=0)
{
path.set(i, newVerIDs.get(j+1));
changed = true;
}
}
}
}
}
if (changed)
debugMes("path changed from "+origPath+" to "+path,20);
}
/**
* remove the integers that are inside the loop
* @param path
* @param loopVs
*/
private static void updatePathToRemoveLoopNodes(List<Integer> path,
Set<Integer> loopVs) {
List<Integer> indicesToRemove = new ArrayList<Integer>();
for (int i=0 ; i<=path.size()-1 ; i++)
if (loopVs.contains(path.get(i)))
indicesToRemove.add(i);
Collections.sort(indicesToRemove);
Collections.reverse(indicesToRemove);
for (Integer i : indicesToRemove)
path.remove(i.intValue());
}
/**
* given the graph and the node with the self loop,
* find the reads that support this loop, and multiply this vertex as many times as needed
* @param graph
* @param t_v1
* @param t_v2
* @param combinedReadHash
* @param newVers
*/
private static void dealWithDoubleLoops(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph, SeqVertex t_v1,
SeqVertex t_v2,
HashMap<Integer, HashMap<PairPath, Integer>> combinedReadHash, Set<SeqVertex> newVers)
{
int v1_id=-1; // the one inside the regular flow
int v2_id=-1; // the addition
//CONTINUE HERE!
if (graph.getSuccessorCount(t_v1)>1)
{
v1_id = t_v1.getID();
v2_id = t_v2.getID();
} else if (graph.getSuccessorCount(t_v2)>1)
{
v1_id = t_v2.getID();
v2_id = t_v1.getID();
}
if (v1_id==-1)
{
//FIXME - decide randomly, doesn't solve the loops right. (ignores input edges to t_v1)
v1_id = t_v1.getID();
v2_id = t_v2.getID();
}
debugMes("\n\ndealWithDoubleLoops: v1 = " + v1_id + ", v2 = " + v2_id, 15);
// count the number of times v2 appears within reads
HashMap<PairPath, Integer> relevantReads = new HashMap<PairPath, Integer>();
int maxNumOfOccurrences = 0;
for (Integer startV : combinedReadHash.keySet())
{
for (PairPath path: combinedReadHash.get(startV).keySet())
{
int numOcc2 = path.numOccurrences(v2_id);
debugMes("read-to-vertex-count: " + path + " contains vertex: " + v2_id + " this many times: " + numOcc2, 15);
if (numOcc2>0)
{
Integer count = combinedReadHash.get(startV).get(path);
if (count == null)
{
// shouldn't happen
for (PairPath path2: combinedReadHash.get(startV).keySet())
{
debugMes("path: "+path2+" with hashCode "+path2.hashCode(),15);
debugMes("path: "+path2+" with value "+combinedReadHash.get(startV).get(path2),15);
}
}
relevantReads.put(path,count);
if ( numOcc2> maxNumOfOccurrences) //this read includes this vertex
{
debugMes("MAX:the read "+path+" includes the vertex "+v2_id+" "+numOcc2+" times",15);
maxNumOfOccurrences = numOcc2;
}
}
}
}
// the loop is v1 (v2,v1)*
//if we count how many v2 appears, then the number appearances of v1 is one more.
SeqVertex v1 = getSeqVertex(graph, v1_id);
SeqVertex v2 = getSeqVertex(graph, v2_id);
List<Integer> newVerIDs_v1 = new ArrayList<Integer>();
List<Integer> newVerIDs_v2 = new ArrayList<Integer>();
newVerIDs_v1.add(v1_id);
newVerIDs_v2.add(v2_id);
// remove the self loop
SimpleEdge removeE = graph.findEdge(v2, v1);
double oldW = removeE.getWeight();
double oldW2 = graph.findEdge(v1, v2).getWeight();
graph.removeEdge(removeE);
debugMes("removing the edge between "+ v2_id +" and "+v1_id,15);
// multiply this node maxNumOfOccurrences times
int up_v1 = v1_id;
if (maxNumOfOccurrences>=1) //multiply only v1
{
SeqVertex newV = new SeqVertex(getNextID(), v1);
newV.setOrigButterflyID(v1_id);
graph.addVertex(newV);
newVerIDs_v1.add(newV.getID());
newVers.add(newV);
// // removed in the new loop opening process - Feb 2013
// for (SeqVertex vOut : graph.getSuccessors(v1))
// if (!vOut.equals(v2))
// debugMes("adding an edge between "+newV.getID()+" and "+vOut.getID(),20);
// graph.addEdge(new SimpleEdge(graph.findEdge(v1, vOut)), newV, vOut);
debugMes("adding an edge between "+v2_id+" and "+newV.getID(),20);
graph.addEdge(new SimpleEdge(oldW, v2.getID(), newV.getID()), v2, newV);
up_v1 = newV.getID();
}
int up_v2 = v2_id;
//int down_v1 = -1;
int down_v2 = -1;
int down_v1 = up_v1;
for (int i=2; i<=maxNumOfOccurrences; i++) // multiple v2-v1
{
if (down_v1!=-1)
{
// on next iteration
up_v1 = down_v1;
}
down_v1 = getNextID();
down_v2 = getNextID();
newVerIDs_v1.add(down_v1);
newVerIDs_v2.add(down_v2);
SeqVertex newV1 = new SeqVertex(down_v1, v1);
newV1.setOrigButterflyID(v1_id);
SeqVertex newV2 = new SeqVertex(down_v2, v2);
newV2.setOrigButterflyID(v2_id);
//debugMes("i="+i+"("+maxNumOfOccurrences+") adding newV1:"+newV1+" newV2:"+newV2,10);
graph.addVertex(newV1);
graph.addVertex(newV2);
newVers.add(newV1);
newVers.add(newV2);
SeqVertex upV = getSeqVertex(graph, up_v1);
/*
SeqVertex orig_upV = getSeqVertex(graph, v1_id);
if (i==maxNumOfOccurrences){// this cirteria added in the new loop opening process - Feb 2013 // moved down below as bugfix, since need to do this for no-repeats too. Sept 2014
for (SeqVertex vOut : graph.getSuccessors(orig_upV))
{
if (!newVerIDs_v2.contains(vOut.getID()))
{
debugMes("adding an edge between "+newV1.getID()+" and "+vOut.getID(),20);
SimpleEdge e = graph.findEdge(orig_upV, vOut);
graph.addEdge(new SimpleEdge(e), newV1, vOut);
debugMes("removing an edge between "+orig_upV.getID()+" and "+vOut.getID(),20);
removeEdges.add(e);
}
}
}
*/
// // removed in the new loop opening process - Feb 2013
// for (SeqVertex vIn : graph.getPredecessors(getSeqVertex(graph, up_v2)))
// if (!newVerIDs_v1.contains(vIn.getID()))
// debugMes("$$adding an edge between "+vIn.getID()+" and "+down_v2,10);
// graph.addEdge(new SimpleEdge(graph.findEdge(vIn, getSeqVertex(graph, up_v2))), vIn, newV2);
debugMes("adding an edge between "+up_v1+" and "+newV2.getID(),20);
graph.addEdge(new SimpleEdge(oldW, upV.getID(), newV2.getID()), upV, newV2);
debugMes("adding an edge between "+newV2.getID()+" and "+newV1.getID(),20);
graph.addEdge(new SimpleEdge(oldW2, newV2.getID(), newV1.getID()), newV2, newV1);
}
List<Integer> loopVIDs = new ArrayList<Integer>();
loopVIDs.add(v1_id);
loopVIDs.add(v2_id);
List<List<Integer>> newVerIDs = new ArrayList<List<Integer>>();
newVerIDs.add(newVerIDs_v1);
newVerIDs.add(newVerIDs_v2);
// relocate original V1's out-edges
SeqVertex orig_upV = getSeqVertex(graph, v1_id);
SeqVertex down_v1_vertex = getSeqVertex(graph, down_v1);
ArrayList<SimpleEdge> removeEdges = new ArrayList<SimpleEdge>();
for (SeqVertex vOut : graph.getSuccessors(orig_upV))
{
if (!newVerIDs_v2.contains(vOut.getID()))
{
debugMes("adding an edge between "+down_v1_vertex.getID()+" and "+vOut.getID(),20);
SimpleEdge e = graph.findEdge(orig_upV, vOut);
graph.addEdge(new SimpleEdge(e, down_v1_vertex.getID(), vOut.getID()), down_v1_vertex, vOut);
debugMes("removing an edge between "+orig_upV.getID()+" and "+vOut.getID(),20);
removeEdges.add(e);
}
}
//remove edges:
for (SimpleEdge re : removeEdges){
graph.removeEdge(re);
}
updateReadsAfterLoopOpening(combinedReadHash,relevantReads,loopVIDs,newVerIDs,maxNumOfOccurrences);
}
/**
* given a path, the vid of the loop vertices, and the new vertices' id, update the path
* if the path starts of ends inside the loop, trim this part of the path, and leave only the outside info.
* @param path
* @param loopVIDs
* @param newVerIDsV1
* @param newVerIDsV2
* @param maxNumOfOccurrences
*/
private static void updatePathOfDoubleLoop(
List<Integer> path,
List<Integer> loopVIDs,
List<Integer> newVerIDsV1,
List<Integer> newVerIDsV2,
int maxNumOfOccurrences) {
int v1_id = loopVIDs.get(0).intValue();
int v2_id = loopVIDs.get(1).intValue();
debugMes("updatePathOfDoubleLoop, read: " + path + ", loop vertices: " + v1_id + ", " + v2_id, 15);
if (path.isEmpty())
return;
boolean changed = false;
String origPath = ""+path;
Set<Integer> loopVs = new HashSet<Integer>();
loopVs.add(v1_id);
loopVs.add(v2_id);
int firstV = path.get(0).intValue();
int lastV = path.get(path.size()-1).intValue();
if (path.contains(v2_id))
{
if (firstV==v1_id || firstV==v2_id)
{
changed = true;
if (firstV==v1_id || lastV==v2_id)
// the whole path is inside the loop
if ((firstV==v1_id && lastV==v1_id && path.size()==maxNumOfOccurrences*2+1) ||
(firstV==v2_id && lastV==v2_id && path.size()==maxNumOfOccurrences*2-1) ||
(firstV==v1_id && lastV==v2_id && path.size()==maxNumOfOccurrences*2) ||
(firstV==v2_id && lastV==v1_id && path.size()==maxNumOfOccurrences*2) ) // all path is in the loop, but there is only one new path that matches
{
changed = updateSinglePathWithDoubleLoopNodes(path,v1_id,v2_id,newVerIDsV1,newVerIDsV2);
}else
path.clear();
else
{// only the start is inside the loop
updatePathToRemoveLoopNodes(path, loopVs);
changed = true;
}
}else
{ // start and ends outside the loop
changed = updateSinglePathWithDoubleLoopNodes(path,v1_id,v2_id,newVerIDsV1,newVerIDsV2);
}
}
if (changed)
debugMes("\tpath changed from "+origPath+" to "+path,15);
else
debugMes("\tpath remains unchanged.", 15);
}
/**
* given this path, and the loop info, update the path to its single option.
* @param path
* @param v1_id
* @param v2_id
* @param newVerIDsV1
* @param newVerIDsV2
* @return
*/
private static boolean updateSinglePathWithDoubleLoopNodes(
List<Integer> path, int v1_id, int v2_id, List<Integer> newVerIDsV1,
List<Integer> newVerIDsV2) {
boolean changed = false;
for (int i=1 ; i<=path.size()-1 ; i++)
{
if (path.get(i).intValue()==v1_id)
{
int j = newVerIDsV2.indexOf(path.get(i-1));
if (j>=0)
{
path.set(i, newVerIDsV1.get(j+1));
changed = true;
}
} else if (path.get(i).intValue()==v2_id)
{
int j = newVerIDsV1.indexOf(path.get(i-1));
if (j>=1)
{
path.set(i, newVerIDsV2.get(j));
changed = true;
}
}
}
return changed;
}
/**
* print out the given error message, only if DEBUG=true
* @param mes Message
*/
private static void debugMes(String mes, int verbosityLevel)
{
//TODO: use general logging that can be leveraged across all classes.
if (DEBUG && verbosityLevel<=BFLY_GLOBALS.VERBOSE_LEVEL)
{
if (USE_STDERR)
System.err.println(mes);
else if (LOG_STDERR)
ERR_STREAM.println(mes);
}
}
/**
* combine prefixes:
* calc for each v it's "depth" in terms of length of strings (from them on)
* draw all v's with the same depth
* sort on their set of parents
* draw all v's with same depth and same set of parents
* find subsets of those with same prefix
* create new node with prefix, connect accordingly.
* add the rest (those that removed the prefix) back into queue, with new depths
* @param graph
* @param compaction_round
* @return
*/
/**
* Given the graph, go over all vertices, and calculate their depth, as in distance from the roots
* (maximal or minimal??) = doesn't matter as long as it's consistent. I chose maximal.
* @param graph
*/
private static void setVerticesDepths(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph) {
My_DFS dfs = new My_DFS(graph);
dfs.runDFS2();
List<SeqVertex> topBottom = getTopologicalOrder(graph, dfs);
for (SeqVertex v : topBottom)
{
if (graph.inDegree(v)==0)
{
v.setDepth(0);
}
else
{
int d = -1;
for (SeqVertex tv : graph.getPredecessors(v))
{
if (tv.getDepth() + tv.getNameKmerAdj().length() >d)
d=tv.getDepth() + tv.getNameKmerAdj().length();
}
v.setDepth(d);
// MAX_DEPTH global var being set here, used by prefix compaction method.
if (d>MAX_DEPTH)
MAX_DEPTH = d;
}
}
}
/**
* Given the graph, and the vertex v, return a sorted list of its parents
* @param graph
* @param v
* @return
*/
private static List<SeqVertex> getSortedParentList(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph, SeqVertex v) {
List<SeqVertex> res = new ArrayList<SeqVertex>(graph.getPredecessors(v));
SeqComparator verComp = new SeqComparator();
Collections.sort(res, verComp);
return res;
}
/**
* Given the graph, and the candidate nodes, look for shared prefixes of a single letter,
* and move on.
* @param graph
* @param candidateNodes
* @param updateQueue
*/
/*
private static boolean compactPrefixRecursive(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
Collection<SeqVertex> candidateNodes,
Collection<SeqVertex> updatedNodes) {
debugMes("** compactPrefixRecursive: " + candidateNodes, 20);
boolean changed = false;
for (String l : LETTERS)
{
Collection<SeqVertex> vWithL = getVerticesWithFirstLetter(candidateNodes,l);
if (vWithL.size()<=1)
continue;
debugMes("vWithL set based on l=" + l + ": " + candidateNodes, 20);
// if there is a letter that has more than one vertex, create a new vertex with this letter
changed = true;
SeqVertex newV = new SeqVertex(getNextID(), l);
// retain the identity of the vertices being collapsed here.
newV.addIDsAsFirstPrevIDs(vWithL,LAST_REAL_ID);
Collection<SeqVertex> new_vWithL = new HashSet<SeqVertex>();
Vector<SimpleEdge> removeEdges = new Vector<SimpleEdge>();
for (SeqVertex v_in_vWithL : vWithL)
{
if (!graph.containsVertex(v_in_vWithL))
continue;
// create a new vertex with the first prevID as id
SeqVertex newReplaceV_in_vWithL;
if (!v_in_vWithL.getPrevVerIDs().isEmpty() // node already subsumed another complex node
&&
v_in_vWithL.getPrevVerIDs().firstElement().size()>1)
{
// just copying over the info into a new node
newReplaceV_in_vWithL = new SeqVertex(getNextID(), v_in_vWithL.getName());
newReplaceV_in_vWithL.copyTheRest(v_in_vWithL);
debugMes("compactPrefixRecursive/complex: Node: " + v_in_vWithL.getShortSeqWID() + " => " + newReplaceV_in_vWithL.getShortSeqWID(), 20);
}
else {
newReplaceV_in_vWithL = v_in_vWithL.generateNewVerWithFirstIDasID();
debugMes("compactPrefixRecursive/simple: Node: " + v_in_vWithL.getShortSeqWID() + " => " + newReplaceV_in_vWithL.getShortSeqWID(), 20);
}
// move all edges from and to the orig, to the new
if (!newReplaceV_in_vWithL.equals(v_in_vWithL)) // they will be equal if the v_withL has no prevIDs, and only his original id
{
for (SimpleEdge e : graph.getOutEdges(v_in_vWithL))
{
removeEdges.add(e);
graph.addEdge(new SimpleEdge(e.getWeight()), newReplaceV_in_vWithL, graph.getDest(e));
}
for (SimpleEdge e : graph.getInEdges(v_in_vWithL))
{
removeEdges.add(e);
graph.addEdge(new SimpleEdge(e.getWeight()), graph.getSource(e), newReplaceV_in_vWithL);
}
}
//replace it's location within vWithL
new_vWithL.add(newReplaceV_in_vWithL);
}
for (SimpleEdge re : removeEdges)
{
debugMes("removing edge "+re+" between "+graph.getSource(re)+" and "+graph.getDest(re),20);
graph.removeEdge(re);
}
for (SeqVertex rv : vWithL)
{
if (!new_vWithL.contains(rv))
{
debugMes("removing vertex (because new_vWithL doesn't contain it) "+rv,20);
graph.removeVertex(rv);
}
}
vWithL = new_vWithL;
graph.addVertex(newV);
debugMes("pulled the first letter from all vertices in "+vWithL+" to the new vertex "+newV,20);
Vector<SeqVertex> removeVertices = new Vector<SeqVertex>();
for (SeqVertex v1 : vWithL)
{
// if (removeVertices.contains(v1) || !graph.containsVertex(v1))
if (v1.isToBeDeleted() || !graph.containsVertex(v1))
continue;
removeEdges.clear();
v1.increaseDepthByOne();
////////////////////////////////////
// reassign incoming edges to newV
////////////////////////////////////
for (SimpleEdge edgeToRemove : graph.getInEdges(v1))
{
double w2 = edgeToRemove.getWeight();
SimpleEdge newE2 = null;
SeqVertex v3 = graph.getSource(edgeToRemove);
if (graph.findEdge(v3,newV)==null)
{
newE2 = new SimpleEdge(w2);
graph.addEdge(newE2, v3,newV); // edge reassignment
debugMes("adding edge "+newE2+" between "+v3+" and "+newV,20);
}else
{
newE2 = graph.findEdge(v3,newV);
if (w2>newE2.getWeight())
{
//FIXME ?? do we want to add up the weights?
debugMes("setting edge "+newE2+"'s weight from "+newE2.getWeight()+" to "+w2,20);
newE2.setWeight(w2);
}
}
removeEdges.add(edgeToRemove);
debugMes("removed edge "+edgeToRemove+" between "+graph.getSource(edgeToRemove)+" and "+graph.getDest(edgeToRemove),20);
}
///////////////////////////////////
// handle outgoing edges (newE1)
////////////////////////////////////
if (v1.getName().length()==1)
{
// single base successor node, just remove it and reassign out-edge.
v1.removeFirstLetter();
//go over all edges going out of v1, and move them to exit newV
for (SeqVertex v0 : graph.getSuccessors(v1))
{
double w = graph.findEdge(v1,v0).getWeight();
graph.addEdge(new SimpleEdge(w), newV,v0); // edge reassignments.
debugMes("adding edge "+w+" between "+newV+" and "+v0,20);
}
debugMes("vertex "+v1+" is going to be removed",20);
removeVertices.add(v1);
v1.setToBeDeleted(true);
}
else if (v1.getName().length()<=KMER_SIZE && graph.outDegree(v1)==0) {
// short terminal node less than a kmer size.
// why do we need to handle this special use case?
SeqVertex newV1; // needed only if this node is less than K in length
v1.removeFirstLetter();
Collection<SeqVertex> upV = graph.getPredecessors(v1);
if (v1.getID()<=LAST_REAL_ID)
{
newV1 = new SeqVertex(getNextID(),v1.getName());
graph.addVertex(newV1);
removeVertices.add(v1);
v1.setToBeDeleted(true);
} else
newV1 = v1; // what scenario?
//go over all edges going into v1, and move them to exit newV
if (upV.size()==1) // why only handling the case of a single parent?
{
for (SeqVertex upV1 : upV) // only one here, no iteration needed
{
SimpleEdge oldE = graph.findEdge(upV1, v1);
double w = oldE.getWeight();
graph.addEdge(new SimpleEdge(w), newV,newV1);
removeEdges.add(oldE); // already done above?
debugMes("adding edge "+w+" between "+newV+" and "+newV1,20);
debugMes("removing edge "+w+" between "+upV1+" and "+v1,20);
graph.addEdge(new SimpleEdge(1), v1, newV1);
}
}
}
else {
// all other cases.
double w = v1.removeFirstLetter();
SimpleEdge newE1 = new SimpleEdge(w);
graph.addEdge(newE1, newV,v1);
debugMes("adding edge "+newE1+" between "+newV+" and "+v1,20);
}
for (SimpleEdge re : removeEdges)
{
graph.removeEdge(re);
}
}
//try this out
updatedNodes.clear();
Set<SeqVertex> toAddTo_vWithL = new HashSet<SeqVertex>();
// removing vertices targeted for deletion.
for (SeqVertex rv : removeVertices)
{
graph.removeVertex(rv);
debugMes("removed vertex "+rv,20);
if (vWithL.contains(rv))
vWithL.remove(rv);
if (candidateNodes.contains(rv))
candidateNodes.remove(rv);
}
// the restructuring to newV could result in new children available for further compaction.
// check for other children of newV that are at the same depth and candidates for further compaction:
for (SeqVertex vChild : graph.getSuccessors(newV))
if (!vWithL.contains(vChild) && vChild.hasAllSameParents(graph, vWithL))
//vChild.getDepth()==curDepth)
toAddTo_vWithL.add(vChild);
for (SeqVertex vToAdd : toAddTo_vWithL)
vWithL.add(vToAdd);
for (SeqVertex vToAdd : vWithL)
{
updatedNodes.add(vToAdd);
}
if (vWithL.size()>1)
compactPrefixRecursive(graph, vWithL, updatedNodes);
}
return changed;
}
*/
/*
private static boolean compactPrefixRecursive_v2(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
Collection<SeqVertex> candidateNodes,
Collection<SeqVertex> updatedNodes) {
debugMes("** compactPrefixRecursive: " + candidateNodes, 20);
boolean changed = false;
for (String l : LETTERS)
{
Collection<SeqVertex> vWithL = getVerticesWithFirstLetter(candidateNodes,l);
if (vWithL.size()<=1)
continue;
debugMes("vWithL set based on l=" + l + ": " + candidateNodes, 20);
// if there is a letter that has more than one vertex, create a new vertex with this letter
changed = true;
SeqVertex newV = new SeqVertex(getNextID(), l);
// retain the identity of the vertices being collapsed here.
newV.addIDsAsFirstPrevIDs(vWithL,LAST_REAL_ID);
// copy the current vertex list over to a new set of nodes.
Collection<SeqVertex> new_vWithL = new HashSet<SeqVertex>();
Vector<SimpleEdge> removeEdges = new Vector<SimpleEdge>();
for (SeqVertex v_in_vWithL : vWithL)
{
if (!graph.containsVertex(v_in_vWithL))
continue;
// create a new vertex with the first prevID as id
SeqVertex newReplaceV_in_vWithL;
if (!v_in_vWithL.getPrevVerIDs().isEmpty() // node already subsumed another complex node
&&
v_in_vWithL.getPrevVerIDs().firstElement().size()>1)
{
// just copying over the info into a new node
newReplaceV_in_vWithL = new SeqVertex(getNextID(), v_in_vWithL.getName());
newReplaceV_in_vWithL.copyTheRest(v_in_vWithL);
debugMes("compactPrefixRecursive/complex: Node: " + v_in_vWithL.getShortSeqWID() + " => " + newReplaceV_in_vWithL.getShortSeqWID(), 20);
}
else {
newReplaceV_in_vWithL = v_in_vWithL.generateNewVerWithFirstIDasID();
debugMes("compactPrefixRecursive/simple: Node: " + v_in_vWithL.getShortSeqWID() + " => " + newReplaceV_in_vWithL.getShortSeqWID(), 20);
}
// move all edges from and to the orig, to the new
if (!newReplaceV_in_vWithL.equals(v_in_vWithL)) // they will be equal if the v_withL has no prevIDs, and only his original id
{
for (SimpleEdge e : graph.getOutEdges(v_in_vWithL))
{
removeEdges.add(e);
graph.addEdge(new SimpleEdge(e.getWeight()), newReplaceV_in_vWithL, graph.getDest(e));
}
for (SimpleEdge e : graph.getInEdges(v_in_vWithL))
{
removeEdges.add(e);
graph.addEdge(new SimpleEdge(e.getWeight()), graph.getSource(e), newReplaceV_in_vWithL);
}
}
//replace it's location within vWithL
new_vWithL.add(newReplaceV_in_vWithL);
}
for (SimpleEdge re : removeEdges)
{
debugMes("removing edge "+re+" between "+graph.getSource(re)+" and "+graph.getDest(re),20);
graph.removeEdge(re);
}
// remove the original vertices.
for (SeqVertex rv : vWithL)
{
if (!new_vWithL.contains(rv))
{
debugMes("removing vertex (because new_vWithL doesn't contain it) "+rv,20);
graph.removeVertex(rv);
}
}
vWithL = new_vWithL;
graph.addVertex(newV);
debugMes("pulled the first letter from all vertices in "+vWithL+" to the new vertex "+newV,20);
Vector<SeqVertex> removeVertices = new Vector<SeqVertex>();
for (SeqVertex v1 : vWithL)
{
if (v1.isToBeDeleted() || !graph.containsVertex(v1))
continue;
removeEdges.clear();
v1.increaseDepthByOne();
////////////////////////////////////
// reassign incoming edges to newV
////////////////////////////////////
for (SimpleEdge edgeToRemove : graph.getInEdges(v1))
{
double w2 = edgeToRemove.getWeight();
SimpleEdge newE2 = null;
SeqVertex v3 = graph.getSource(edgeToRemove);
if (graph.findEdge(v3,newV)==null)
{
newE2 = new SimpleEdge(w2);
graph.addEdge(newE2, v3,newV); // edge reassignment
debugMes("adding edge "+newE2+" between "+v3+" and "+newV,20);
}else
{
newE2 = graph.findEdge(v3,newV);
if (w2>newE2.getWeight())
{
//FIXME ?? do we want to add up the weights?
debugMes("setting edge "+newE2+"'s weight from "+newE2.getWeight()+" to "+w2,20);
newE2.setWeight(w2);
}
}
removeEdges.add(edgeToRemove);
debugMes("removed edge "+edgeToRemove+" between "+graph.getSource(edgeToRemove)+" and "+graph.getDest(edgeToRemove),20);
}
///////////////////////////////////
// handle outgoing edges (newE1)
////////////////////////////////////
if (v1.getName().length()==1)
{
// single base successor node, just remove it and reassign out-edge.
v1.removeFirstLetter();
//go over all edges going out of v1, and move them to exit newV
for (SeqVertex v0 : graph.getSuccessors(v1))
{
double w = graph.findEdge(v1,v0).getWeight();
graph.addEdge(new SimpleEdge(w), newV,v0); // edge reassignments.
debugMes("adding edge "+w+" between "+newV+" and "+v0,20);
}
debugMes("vertex "+v1+" is going to be removed",20);
removeVertices.add(v1);
v1.setToBeDeleted(true);
}
else {
// all other cases.
double w = v1.removeFirstLetter();
SimpleEdge newE1 = new SimpleEdge(w);
graph.addEdge(newE1, newV,v1);
debugMes("adding edge "+newE1+" between "+newV+" and "+v1,20);
}
for (SimpleEdge re : removeEdges)
{
graph.removeEdge(re);
}
}
//try this out
updatedNodes.clear();
Set<SeqVertex> toAddTo_vWithL = new HashSet<SeqVertex>();
int curDepth = -1;
// use this curDepth to decide if to add the children or not.
if (!removeVertices.isEmpty())
for (SeqVertex ver : vWithL)
// if (!removeVertices.contains(ver))
if (!ver.isToBeDeleted())
curDepth = ver.getDepth();
// removing vertices targeted for deletion.
for (SeqVertex rv : removeVertices)
{
graph.removeVertex(rv);
debugMes("removed vertex "+rv,20);
if (vWithL.contains(rv))
vWithL.remove(rv);
if (candidateNodes.contains(rv))
candidateNodes.remove(rv);
}
// the restructuring to newV could result in new children available for further compaction.
// check for other children of newV that are at the same depth and candidates for further compaction:
for (SeqVertex vChild : graph.getSuccessors(newV)) {
//debugMes("vChild: " + vChild+ ", vWithL: " + vWithL, 5);
if (!vWithL.contains(vChild) && vChild.hasAllSameParents(graph, vWithL)) {
//vChild.getDepth()==curDepth)
toAddTo_vWithL.add(vChild);
}
}
for (SeqVertex vToAdd : toAddTo_vWithL)
vWithL.add(vToAdd);
for (SeqVertex vToAdd : vWithL)
{
updatedNodes.add(vToAdd);
}
if (vWithL.size()>1)
compactPrefixRecursive_v2(graph, vWithL, updatedNodes);
}
return changed;
}
*/
/**
* Given the set of nodes, return a set of nodes that has the given letter l as a final letter
* @param candidateNodes
* @param l
* @return
*/
/*
private static Collection<SeqVertex> getVerticesWithFirstLetter(
Collection<SeqVertex> candidateNodes, String l) {
Collection<SeqVertex> res = new HashSet<SeqVertex>();
for (SeqVertex v : candidateNodes)
{
if (v.getName().startsWith(l))
res.add(v);
}
return res;
}
*/
// retrieve path list from first unshared node till the end (minus the final vertex)
public static List<Integer> get_unshared_path_terminus(List<Integer> path_to_search, List<Integer> path_to_index) {
debugMes("Path to search: " + path_to_search, 19);
debugMes("Path to index: " + path_to_index, 19);
Hashtable<Integer,Boolean> path_index = new Hashtable<Integer,Boolean>();
for (Integer x : path_to_index) {
path_index.put(x, new Boolean(true));
}
int unshared_path_pos = path_to_search.size(); // init to Infinity in essence, never reach this.
for (int i = 0; i <= path_to_search.size()-2; i++) {
if (! path_index.containsKey( path_to_search.get(i) ) ) {
unshared_path_pos = i;
break;
}
}
List<Integer> unique_terminal_path = new Vector<Integer>();
for (int i = unshared_path_pos; i <= path_to_search.size() -2; i++) {
unique_terminal_path.add(path_to_search.get(i));
}
debugMes("Unique terminal path: " + unique_terminal_path, 19);
return(unique_terminal_path);
}
// see if any node is shared between the lists
public static boolean paths_have_node_in_common (List<Integer> pathA, List<Integer> pathB) {
Hashtable<Integer,Boolean> path_index = new Hashtable<Integer,Boolean>();
for (int i = 0; i < pathA.size() - 1; i++) {
path_index.put(pathA.get(i), new Boolean(true));
}
for (int i = 0; i < pathB.size() -1; i++) {
if (path_index.containsKey( pathB.get(i))) {
return(true);
}
}
return(false);
}
// see if any node other than the very last one is shared between the lists
public static boolean paths_have_any_node_in_common (List<Integer> pathA, List<Integer> pathB, boolean include_sinks) {
Hashtable<Integer,Boolean> path_index = new Hashtable<Integer,Boolean>();
for (int i = 0; i < pathA.size() - 1; i++) {
Integer node = pathA.get(i);
if ( (! include_sinks) && node < 0) {
continue; // sink node
}
path_index.put(node, new Boolean(true));
}
for (int i = 0; i < pathB.size() -1; i++) {
Integer node = pathB.get(i);
if (path_index.containsKey( node)) {
// debugMes("Found node: " + node + " in common between paths: " + pathA + " and " + pathB, 10);
return(true);
}
}
return(false);
}
public static String getPathMappingAsciiIllustration (
final List<Integer> finalPath,
HashMap<PairPath,Integer> readPathsHashmap
) {
String ascii_illustration = "";
for (int i = 0; i < finalPath.size(); i++) {
ascii_illustration += "=";
}
ascii_illustration += " PATH: " + finalPath + "\n";
List<PairPath> readPaths = new ArrayList(readPathsHashmap.keySet());
Collections.sort(readPaths, new Comparator<PairPath>() { // sort illustration by first node position in path
public int compare(PairPath a, PairPath b) {
Integer b_index = finalPath.indexOf(b.getFirstID());
Integer a_index = finalPath.indexOf(a.getFirstID());
return(a_index - b_index);
}
});
for (PairPath read : readPaths) {
char chars[] = new char[finalPath.size()];
for (int i = 0; i < chars.length; i++) {
chars[i] = ' ';
}
for (List<Integer> readPath : read.get_paths()) {
for (Integer vertex_id : readPath) {
int index = finalPath.indexOf(vertex_id);
if (index >= 0) {
chars[index] = '=';
}
}
}
for (int i = 0; i < chars.length; i++) {
ascii_illustration += chars[i];
}
int read_counts = readPathsHashmap.get(read);
ascii_illustration += " Read: " + read.get_paths() + " read_support: " + read_counts + "\n";
}
return(ascii_illustration);
}
public static int count_pairpath_support(List<Integer> path, HashMap<List<Integer>, HashMap<PairPath, Integer>> PathReads) {
HashMap<PairPath,Integer> pairPath_map = PathReads.get(path);
int sum_reads = 0;
for (PairPath p : pairPath_map.keySet()) {
int read_count = pairPath_map.get(p);
sum_reads += read_count;
}
return(sum_reads);
}
public static HashMap<Integer, List<List<Integer>>> extractTripletsFromReads(HashMap<Integer,HashMap<PairPath,Integer>> combinedReadHash) {
HashMap<Integer, List<List<Integer>>> tripletMapper = new HashMap<Integer, List<List<Integer>>>();
for (Integer vertex_id : combinedReadHash.keySet()) {
HashMap<PairPath,Integer> pp_hmap = combinedReadHash.get(vertex_id);
for (PairPath pp : pp_hmap.keySet()) {
List<List<Integer>> paths = pp.get_paths();
for (List<Integer> read_path : paths) {
if (read_path.size() < 3) {
continue;
}
// iterate through triplets
for (int i = 1; i < read_path.size()-1; i++) {
Integer central_id = read_path.get(i);
Integer left_id = read_path.get(i-1);
Integer right_id = read_path.get(i+1);
List<Integer> adjacency_path = new ArrayList<Integer>();
adjacency_path.add(left_id);
adjacency_path.add(central_id);
adjacency_path.add(right_id);
if (tripletMapper.containsKey(central_id)) {
List<List<Integer>> triplet_list = tripletMapper.get(central_id);
if (! triplet_list.contains(adjacency_path)) {
triplet_list.add(adjacency_path);
debugMes("Adding triplet adjacency_path to central node: " + central_id + " => " + adjacency_path, 17);
}
else {
debugMes("triplet adjacency_path of node: " + central_id + " => " + adjacency_path + " already captured.", 17);
}
}
else {
List<List<Integer>> triplet_list = new ArrayList<List<Integer>>();
triplet_list.add(adjacency_path);
tripletMapper.put(central_id, triplet_list);
debugMes("Setting initial triplet adjacency_path for central node: " + central_id + " => " + adjacency_path, 17);
}
}
}
}
}
return(tripletMapper);
}
public static HashMap<Integer, List<List<Integer>>> extractComplexPathPrefixesFromReads(HashMap<Integer,HashMap<PairPath,Integer>> combinedReadHash) {
debugMes("-capturing path prefixes", 15);
HashMap<Integer, List<List<Integer>>> nodeToComplexPathPrefix = new HashMap<Integer, List<List<Integer>>>();
for (Integer vertex_id : combinedReadHash.keySet()) {
HashMap<PairPath,Integer> pp_hmap = combinedReadHash.get(vertex_id);
for (PairPath pp : pp_hmap.keySet()) {
List<List<Integer>> paths = pp.get_paths();
for (List<Integer> read_path : paths) {
if (read_path.size() < 3) {
continue;
}
// iterate through prefixes
for (int i = read_path.size()-1; i >= 2; i
Integer node_id = read_path.get(i);
List<Integer> prefix_path = read_path.subList(0, i+1);
if (! nodeToComplexPathPrefix.containsKey(node_id)) {
nodeToComplexPathPrefix.put(node_id, new ArrayList<List<Integer>>());
}
if (! nodeToComplexPathPrefix.get(node_id).contains(prefix_path)) {
nodeToComplexPathPrefix.get(node_id).add(prefix_path);
}
}
}
}
}
debugMes("-removing prefixes that are subpaths of other prefixes", 15);
// remove paths that are subpaths
for (Integer node_id : nodeToComplexPathPrefix.keySet()) {
List<List<Integer>> prefixes = nodeToComplexPathPrefix.get(node_id);
List<List<Integer>> prefixes_to_purge = new ArrayList<List<Integer>>();
for (List<Integer> prefix : prefixes) {
for (List<Integer> prefix2 : prefixes) {
if (prefix != prefix2 && prefix2.size() > prefix.size()
&& Path.share_suffix_fully_contained(prefix, prefix2)) {
prefixes_to_purge.add(prefix);
}
}
}
for (List<Integer> prefix : prefixes_to_purge) {
prefixes.remove(prefix);
}
}
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 17) {
for (List<List<Integer>> extendedTripletPathsList : nodeToComplexPathPrefix.values()) {
for (List<Integer> extendedTripletPath : extendedTripletPathsList) {
debugMes("EXTENDED_TRIPLET_CAPTURED: " + extendedTripletPath, 17);
}
}
}
return(nodeToComplexPathPrefix);
}
public static Boolean tripletSupported(List<List<Integer>> triplet_list, List<Integer> triplet) {
for (List<Integer> t_list : triplet_list) {
debugMes("Checking triplet list: " + t_list + " comparing to query triplet: " + triplet, 15);
if (t_list.get(0).equals(triplet.get(0))
&&
t_list.get(1).equals(triplet.get(1))
&&
t_list.get(2).equals(triplet.get(2))
) {
return(true);
}
}
return(false);
}
public static List<Integer> ensure_path_has_sinks(List<Integer> path) {
List<Integer> new_path = new ArrayList<Integer>(path);
if (new_path.get(0) != -1) {
new_path.add(0, -1);
}
if (new_path.get(new_path.size()-1) != -2) {
new_path.add(-2);
}
return(new_path);
}
public static HashMap<List<Integer>, Pair<Integer>> reduce_cdhit_like (
HashMap<List<Integer>, Pair<Integer>> FinalPaths_all,
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
HashMap<List<Integer>,HashMap<PairPath,Integer>> PathReads) {
debugMes("\n\n**** CD-HIT style path collapsing at end of run.\n\n", 15);
Vector<FinalPaths> path_vec = new Vector<FinalPaths>();
DecimalFormat df = new DecimalFormat("
for (List<Integer> path : FinalPaths_all.keySet())
{
String seq = getPathSeq(graph,path);
FinalPaths f = new FinalPaths(path, seq);
path_vec.add(f);
}
MAX_SEQ_LEN_DP_ALIGN = ALL_VS_ALL_MAX_DP_LEN; // temporarily replace
Collections.sort(path_vec); // sort paths by length of sequence descendingly
// examine sequence CD-HIT -style, remove those that lack sufficient variation
HashMap<FinalPaths,Boolean> filtered = new HashMap<FinalPaths,Boolean>();
List<List<Integer>> removeSimilarPaths = new ArrayList<List<Integer>>();
for (int i = 0; i < path_vec.size()-1; i++) {
if (filtered.containsKey(path_vec.get(i))) {
// path filtered, cannot use it as evidence for filtering smaller sequences.
continue;
}
List<Integer> path_i = path_vec.get(i).path;
List<Integer> path_i_w_sinks = ensure_path_has_sinks(path_i);
for (int j = i + 1; j < path_vec.size(); j++) {
if (filtered.containsKey(path_vec.get(j))) {
// path filtered, cannot use it as evidence for filtering smaller sequences.
continue;
}
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15) {
System.err.print("\r[" + i + "," + j + "] ");
}
List<Integer> path_j = path_vec.get(j).path;
List<Integer> path_j_w_sinks = ensure_path_has_sinks(path_j);
String seq_i = path_vec.get(i).sequence;
String seq_j = path_vec.get(j).sequence;
/*
int index1 = seq_i.length()-1;
int index2 = seq_j.length()-1;
debugMes("ALL-VS-ALL: (" + i + "," + j + " of " + path_vec.size() + ") checking for similarity the two paths: "+path_i+
"(len="+seq_i.length()+");"+path_j+"(len="+seq_j.length()+")",10);
*/
//if (finalSeqsAreTooSimilar(seq_i, seq_j)) {
if (twoPathsAreTooSimilar(graph, path_i_w_sinks, path_j_w_sinks)) {
debugMes("\n\n*** REDUCE: they are TOO SIMILAR! ***\n\n",10);
int rIndex = removeTheLesserSupportedPath(seq_i, seq_j, path_i, path_j, removeSimilarPaths, PathReads);
//int rIndex = removeTheShorterPath(path1S,path2S,path1,path2,removeSimilarPaths,PathReads);
if (rIndex == 1) {// the first path was removed
filtered.put(path_vec.get(i), true);
debugMes("\tRemoving (" + i + ") seq in pair", 18);
}
else {
filtered.put(path_vec.get(j), true);
debugMes("\tRemoving (" + j + ") second seq in pair", 18);
}
}
else
debugMes("\n\n*** REDUCE: they are PLENTY DIFFERENT ***\n\n", 15);
}
}
for (FinalPaths path2Remove : filtered.keySet())
{
debugMes("REDUCE-STAGE: The final path "+path2Remove+" was removed because it was too close to another path",10);
FinalPaths_all.remove(path2Remove.path);
}
ALL_VS_ALL_MAX_DP_LEN = MAX_SEQ_LEN_DP_ALIGN; // back to original setting
return(FinalPaths_all);
}
public static HashMap<List<Integer>, Pair<Integer>> remove_identical_subseqs (
HashMap<List<Integer>, Pair<Integer>> FinalPaths_all,
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
HashMap<List<Integer>,HashMap<PairPath,Integer>> PathReads) {
Vector<FinalPaths> path_vec = new Vector<FinalPaths>();
DecimalFormat df = new DecimalFormat("
int count = 0;
for (List<Integer> path : FinalPaths_all.keySet())
{
count++;
debugMes("-reconstructing sequence for path[: " + count + " of " + FinalPaths_all.keySet().size() + "]: " + path, 15);
String seq = getPathSeq(graph,path);
FinalPaths f = new FinalPaths(path, seq);
path_vec.add(f);
}
debugMes("\n\n**** Removing identical subsequences among: " + path_vec.size() + " paths.\n\n", 10);
Collections.sort(path_vec); // sort paths by length of sequence descendingly
// examine sequence CD-HIT -style, remove those that lack sufficient variation
HashMap<FinalPaths,Boolean> filtered = new HashMap<FinalPaths,Boolean>();
List<List<Integer>> removeSimilarPaths = new ArrayList<List<Integer>>();
for (int i = 0; i < path_vec.size()-1; i++) {
if (filtered.containsKey(path_vec.get(i))) {
// path filtered, cannot use it as evidence for filtering smaller sequences.
continue;
}
List<Integer> path_i = path_vec.get(i).path;
List<Integer> path_i_w_sinks = ensure_path_has_sinks(path_i);
for (int j = i + 1; j < path_vec.size(); j++) {
if (filtered.containsKey(path_vec.get(j))) {
// path filtered, cannot use it as evidence for filtering smaller sequences.
continue;
}
List<Integer> path_j = path_vec.get(j).path;
List<Integer> path_j_w_sinks = ensure_path_has_sinks(path_j);
String seq_i = path_vec.get(i).sequence;
String seq_j = path_vec.get(j).sequence;
int index1 = seq_i.length()-1;
int index2 = seq_j.length()-1;
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15) {
System.err.print("\r[" + i + "," + j + "] ");
}
else {
debugMes("ALL-VS-ALL: (" + i + "," + j + " of " + path_vec.size() + ") checking for identical subseqs between the two paths: "+path_i+
"(len="+seq_i.length()+");"+path_j+"(len="+seq_j.length()+")",16);
}
if (seq_i.indexOf(seq_j) >= 0) {
filtered.put(path_vec.get(j), true);
debugMes("\t** Removing (" + j + ") seq in pair, contains " + i, 15);
}
else if (seq_j.indexOf(seq_i) >= 0) {
filtered.put(path_vec.get(i), true);
debugMes("\t** Removing (" + i + ") seq in pair, contains " + j, 15);
}
}
}
for (FinalPaths path2Remove : filtered.keySet())
{
debugMes("REDUCE-STAGE: The final path "+path2Remove+" was removed because it was too close to another path",10);
FinalPaths_all.remove(path2Remove.path);
}
return(FinalPaths_all);
}
public static Boolean finalSeqsAreTooSimilar (String seq_i, String seq_j) {
// note, to check for perfectly identical sequence clusters, could run cd-hit like so:
// cd-hit-est -o cdhit -c 1 -i comp.allProbPaths.fasta -p 1 -d 0 -b 1
if ( (seq_i.length() > MAX_SEQ_LEN_DP_ALIGN && seq_j.length() > MAX_SEQ_LEN_DP_ALIGN)
||
seq_i.length() > 100000 // problems can arise in the alignment code if either seq is longer
||
seq_j.length() > 100000) {
// zipper: Just get rid of those that are truly nearly identical.
AlignmentStats stats = ZipperAlignment.doZipperAlignment("A", seq_i, "B", seq_j);
int mismatches = stats.mismatches;
debugMes("-zipper reports: " + mismatches + " mismatches between seqs.", 18);
if (mismatches <= 2)
return(true);
else
return(false);
}
else {
Alignment alignment;
if (SMITH_WATERMAN_ALIGN_FLAG) {
debugMes("-running Smith-Waterman alignment of path sequences", 15);
alignment = NWalign.run_SW_alignment("A", seq_i, "B", seq_j, 4, -5, 10, 1);
}
else {
// Needleman Wunsch Global Alignment is default
debugMes("-running Needleman-Wunsch alignment of path sequences", 15);
alignment = NWalign.run_NW_alignment("A", seq_i, "B", seq_j, 4, -5, 10, 1); //NW locks up or takes too long with very long sequences (eg. 40kb align to 6kb)
}
int max_diffs_in_window = AlignmentStats.get_max_diffs_in_window(alignment, DIFFS_WINDOW_SIZE);
debugMes (new jaligner.formats.Pair().format(alignment), 10);
debugMes("Max diffs found in alignment window: " + max_diffs_in_window, 10);
if (max_diffs_in_window <= MAX_FINAL_DIFFS_IN_WINDOW) {
return (true);
}
}
return(false);
}
private static String get_pathName_string (List<Integer> path,
DirectedSparseGraph<SeqVertex, SimpleEdge> graph) {
/*
int startI = 0, endI = path.size();
if (path.get(0)== ROOT.getID())
startI++;
if (path.get(path.size()-1)== T_VERTEX.getID())
endI--;
String pathName;
if (MISO_OUTPUT) {
pathName = "[";
int iSeqL=0,j=0;
for (int vi=startI; vi<endI; vi++){
iSeqL = getSeqVertex(graph, path.get(vi)).getName().length();
pathName = pathName + path.get(vi)+":"+j+"-"+(j+iSeqL-1);
if (vi<endI-1)
pathName = pathName.concat(" ");
j+=iSeqL;
}
pathName = pathName +"]";
} else
pathName = ""+path.subList(startI, endI);
*/
int startI = 0, endI = path.size();
if (path.get(0)== ROOT.getID())
startI++;
if (path.get(path.size()-1)== T_VERTEX.getID())
endI
String pathName,degenString="";
SeqVertex v;
if (MISO_OUTPUT) {
DecimalFormat f0 = new DecimalFormat("
pathName = "[";
int iSeqL=0,j=0;
for (int vi=startI; vi<endI; vi++){
v = getSeqVertex(graph, path.get(vi));
iSeqL = v.getName().length();
if (vi != startI){
iSeqL -= (KMER_SIZE-1);
}
//int node_id = v.getOrigButterflyID();
int node_id = v.getID();
String path_node = "" + node_id;
/*
if (xStructuresResolvedByTriplets.containsKey(node_id) && ! xStructuresResolvedByTriplets.get(node_id)) {
path_node = "@" + path_node + "@!";
}
*/
pathName = pathName + path_node +":"+j+"-"+(j+iSeqL-1);
if (vi<endI-1)
pathName = pathName.concat(" ");
if (v.getDegenerativeFreq().size()>0)
{
for (int di=0; di<v.getDegenerativeFreq().size(); di++)
{
degenString = degenString + "{("+(j+v.getDegenerativeLocations().elementAt(di))+")";
degenString = degenString + v.getDegenerativeLetters().elementAt(di).elementAt(0)+":"+
f0.format(v.getDegenerativeFreq().elementAt(di).elementAt(0))+" ";
degenString = degenString + v.getDegenerativeLetters().elementAt(di).elementAt(1)+":"+
f0.format(v.getDegenerativeFreq().elementAt(di).elementAt(1)) +"}";
}
}
j+=iSeqL;
}
pathName = pathName +"]";
} else
pathName = ""+path.subList(startI, endI);
if (USE_DEGENERATE_CODE) {
pathName = pathName + " SNPs="+degenString;
}
pathName += " " + path;
return(pathName);
}
public static HashMap<Integer,HashMap<PairPath,Integer>> getComponentReads(Integer component_id, Set<SeqVertex> comp,
HashMap<Integer,HashMap<PairPath,Integer>> combinedReadHash) {
HashSet<Integer> node_ids_in_component = new HashSet<Integer>();
for (SeqVertex v : comp) {
node_ids_in_component.add(v.getID());
}
List<Integer> component_node_list = new ArrayList<Integer>(node_ids_in_component);
HashMap<Integer,HashMap<PairPath,Integer>> componentReadHash = new HashMap<Integer,HashMap<PairPath,Integer>>();
for (Integer node_id : combinedReadHash.keySet()) {
HashMap<PairPath,Integer> pp_map = combinedReadHash.get(node_id);
for (PairPath p : pp_map.keySet()) {
if (p.haveAnyNodeInCommon(component_node_list)) {
if (! componentReadHash.containsKey(node_id)) {
componentReadHash.put(node_id, new HashMap<PairPath,Integer>());
}
componentReadHash.get(node_id).put(p, pp_map.get(p));
debugMes("Subcomponent: " + component_id + ", adding pairpath: " + p, 15);
}
}
}
return(componentReadHash);
}
/* bad idea... Cannot rely on node depth positions as they're imperfectly ordered
public static List<Integer> constructSinglePathFromPairPathList (List<PairPath> pairpath_list,
final DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
DijkstraDistance<SeqVertex,SimpleEdge> dijkstraDis
) {
HashSet<Integer> extracted = new HashSet<Integer>();
ArrayList<SeqVertex> extractedVertices = new ArrayList<SeqVertex>();
for (PairPath pp : pairpath_list) {
extracted.addAll(pp.getPath1());
if(pp.hasSecondPath())
extracted.addAll(pp.getPath2());
}
for(Integer id : extracted)
{
extractedVertices.add(getSeqVertex(graph, id));
}
//extractedVerticesIDs.addAll(extracted);
// sort according to topological order of BTFL graph
Collections.sort(extractedVertices, new SeqVertexFinishTimeComparator());
String node_id_list_text = "";
for (SeqVertex v : extractedVertices) {
node_id_list_text += v.getID() + " ";
}
debugMes("Extracted sorted vertices: " + node_id_list_text + "\n", 10);
DijkstraShortestPath dsp = new DijkstraShortestPath(graph);
// Fill in any gaps
// path=[L_1]; For each i in 1:length(node_list)
int j = 0;
int num_vertices = extractedVertices.size(); // note, extractedVertices grows in size during iterations below.
while(j < num_vertices - 1)
{
//System.out.println(j);
SeqVertex current = extractedVertices.get(j);
SeqVertex next = extractedVertices.get(j + 1);
// 3.3.1 if L_i == L_(i+1) then nothing
// -There are no duplicates since extractedVertices was created from
// building the set of extracted vertex IDs
// 3.3.2 else if exists an edge from L_i to L_(i+1) then nothing(?)
if(graph.getSuccessors(current).contains(next)) {
j++;
continue;
}
// 3.3.3 else find a single path (p = L_i,..., L_(i+1)):
// append all P_j (j=2:end) to our path
//List<SimpleEdge> sp = org.jgrapht.alg.DijkstraShortestPath.findPathBetween((Graph)graph, current, next);
List<SimpleEdge> sp = dsp.getPath(current, next);
debugMes("Found shortest path between " + current.getID() + " and " + next.getID() + ":", 10);
ArrayList<SeqVertex> toAdd = new ArrayList<SeqVertex>();
for(SimpleEdge edge : sp) {
SeqVertex v = graph.getDest(edge);
toAdd.add(v);
debugMes("\t" + v.getID(), 10);
}
toAdd.remove(next);
extractedVertices.addAll(toAdd);
j++;
}
ArrayList<Integer> extractedVerticesIDs = new ArrayList<Integer>();
Collections.sort(extractedVertices, new SeqVertexFinishTimeComparator());
for(SeqVertex v: extractedVertices)
{
//System.out.println("Adding vertex with ID: " + v.getID());
extractedVerticesIDs.add(v.getID());
}
return(extractedVerticesIDs);
}
*/
public static boolean twoPairPathsAreTooFarAwayInGraph (PairPath pp_i, PairPath pp_j, DirectedSparseGraph<SeqVertex, SimpleEdge> graph) {
SeqVertex last_vertex_i = getSeqVertex(graph, pp_i.getLastID());
SeqVertex first_vertex_j = getSeqVertex(graph, pp_j.getFirstID());
int last_vertex_i_start_time = last_vertex_i._node_depth;
int first_vertex_j_start_time = first_vertex_j._node_depth;
/*
System.err.println("Pairpaths: " + pp_i + " vs. " + pp_j);
System.err.println("Start times: nodes [" + pp_i.getLastID() + "," + pp_j.getFirstID() + "], times: " + i_start_time + " vs. " + j_start_time);
*/
if (first_vertex_j_start_time - last_vertex_i_start_time > MAX_VERTEX_DISCOVERY_TIME_DIFF_ALLOW_COMPARE)
return(true);
else
return(false);
}
}
// End TransAssembly.java
|
package org.dspace.app.itemexport;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.InputStream;
import java.io.PrintWriter;
import org.dspace.content.Bitstream;
import org.dspace.content.Bundle;
import org.dspace.content.Collection;
import org.dspace.content.DCValue;
import org.dspace.content.Item;
import org.dspace.content.ItemIterator;
import org.dspace.core.Context;
import org.dspace.core.Constants;
import org.dspace.core.Utils;
/**
Item exporter to create simple AIPs for DSpace content.
Currently exports individual items, or entire collections.
For instructions on use, see printUsage() method.
ItemExport creates the simple AIP package that the importer also
uses. It consists of:
/exportdir/42/ (one directory per item)
/ dublin_core.xml - qualified dublin core in RDF schema
/ contents - text file, listing one file per line
/ file1 - files contained in the item
/ file2
/ ...
issues
-doesn't handle special characters in metadata
(needs to turn &'s into &, etc.)
*/
public class ItemExport
{
public static void main(String [] argv)
throws Exception
{
if( argv.length < 4 )
{
printUsage();
return;
}
String typeString = argv[0];
String idString = argv[1];
String destDirName = argv[2];
String seqNumString = argv[3];
int myID = Integer.parseInt( idString );
int seqStart = Integer.parseInt( seqNumString );
int myType;
if( typeString.equals("ITEM") )
{
myType = Constants.ITEM;
}
else if( typeString.equals("COLLECTION") )
{
myType = Constants.COLLECTION;
}
else
{
printUsage();
return;
}
Context c = new Context();
c.setIgnoreAuthorization( true );
if( myType == Constants.ITEM )
{
// it's only a single item
Item myItem = Item.find( c, myID );
exportItem( c, myItem, destDirName, seqStart);
}
else
{
System.out.println("Exporting from collection " + myID );
// it's a collection, so do a bunch of items
Collection myCollection = Collection.find( c, myID );
ItemIterator i = myCollection.getItems();
exportItem(c, i, destDirName, seqStart);
}
File destDir = new File( destDirName );
c.complete();
}
private static void printUsage()
{
System.out.println("Output simple AIPs, given collection or item ID");
System.out.println("Usage: ITEM|COLLECTION ID dest_dir sequence_number");
System.out.println(" dest_dir = destination of archive files");
System.out.println(" sequence_number = 0, or some other number to start naming the archive directories");
System.out.println(" first item dir is sequence_number, then sequence_number+1, etc.");
}
private static void exportItem( Context c, ItemIterator i, String destDirName, int seqStart )
throws Exception
{
int mySequenceNumber = seqStart;
System.out.println("Beginning export");
while( i.hasNext() )
{
System.out.println("Exporting item to " + mySequenceNumber );
exportItem(c, i.next(), destDirName, mySequenceNumber);
mySequenceNumber++;
}
}
private static void exportItem( Context c, Item myItem, String destDirName, int seqStart)
throws Exception
{
File destDir = new File( destDirName );
if( destDir.exists() )
{
// now create a subdirectory
File itemDir = new File ( destDir + "/" + seqStart );
System.out.println("Exporting Item " + myItem.getID() + " to " + itemDir);
if( itemDir.exists() )
{
throw new Exception("Directory " + destDir + "/" + seqStart + " already exists!");
}
else
{
if( itemDir.mkdir() )
{
// make it this far, now start exporting
writeMetadata ( c, myItem, itemDir );
writeBitstreams( c, myItem, itemDir );
writeHandle ( c, myItem, itemDir );
}
else
{
throw new Exception("Error, can't make dir " + itemDir);
}
}
}
else
{
throw new Exception("Error, directory " + destDirName + " doesn't exist!");
}
}
// output the item's dublin core into the item directory
private static void writeMetadata( Context c, Item i, File destDir )
throws Exception
{
File outFile = new File( destDir, "dublin_core.xml" );
System.out.println("Attempting to create file " + outFile);
if( outFile.createNewFile() )
{
PrintWriter out = new PrintWriter( new FileWriter( outFile ) );
DCValue dcorevalues[] = i.getDC(Item.ANY, Item.ANY, Item.ANY);
out.println("<dublin_core>");
for(int j = 0; j < dcorevalues.length; j++)
{
DCValue dcv = dcorevalues[j];
String qualifier = dcv.qualifier;
if( qualifier == null ) { qualifier = "none"; }
String output = " <dcvalue element=\"" + dcv.element + "\" " +
"qualifier=\"" + qualifier + "\">" +
dcv.value +
"</dcvalue>";
out.println( output );
}
out.println("</dublin_core>");
out.close();
}
else
{
throw new Exception( "Cannot create dublin_core.xml in " + destDir );
}
}
// create the file 'handle' which contains the handle assigned to the item
private static void writeHandle( Context c, Item i, File destDir )
throws Exception
{
String filename = "handle";
File outFile = new File( destDir, filename );
if( outFile.createNewFile() )
{
PrintWriter out = new PrintWriter( new FileWriter( outFile ) );
out.println( i.getHandle() );
// close the contents file
out.close();
}
else
{
throw new Exception( "Cannot create file " + filename + " in " + destDir );
}
}
// create both the bitstreams and the contents file
private static void writeBitstreams( Context c, Item i, File destDir )
throws Exception
{
File outFile = new File( destDir, "contents" );
if( outFile.createNewFile() )
{
PrintWriter out = new PrintWriter( new FileWriter( outFile ) );
Bundle [] bundles = i.getBundles();
for( int j = 0; j < bundles.length; j++ )
{
// currently one bitstream per bundle!
Bitstream b = (bundles[j].getBitstreams())[0];
String myName = b.getName();
// write the manifest file entry
out.println( myName );
InputStream is = b.retrieve();
File fout = new File( destDir, myName );
if( fout.createNewFile() )
{
FileOutputStream fos = new FileOutputStream(fout);
Utils.bufferedCopy( is, fos );
}
else
{
throw new Exception("File " + fout + " already exists!" );
}
}
// close the contents file
out.close();
}
else
{
throw new Exception( "Cannot create contents in " + destDir );
}
}
}
|
//import java.util.Set;
import java.text.DecimalFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Hashtable;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.PriorityQueue;
import java.util.Queue;
import java.util.Set;
import java.util.TreeMap;
import java.util.Vector;
import edu.uci.ics.jung.algorithms.cluster.WeakComponentClusterer;
import edu.uci.ics.jung.algorithms.shortestpath.DijkstraDistance;
import edu.uci.ics.jung.algorithms.shortestpath.DijkstraDistanceWoVer;
import edu.uci.ics.jung.algorithms.shortestpath.DijkstraShortestPath;
import edu.uci.ics.jung.graph.DirectedSparseGraph;
import edu.uci.ics.jung.graph.UndirectedSparseGraph;
import edu.uci.ics.jung.graph.util.Pair;
import gnu.getopt.*;
import java.io.BufferedReader;
//import java.io.FileNotFoundException;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.IOException;
import java.io.PrintStream;
import java.io.File;
import jaligner.Alignment;
//import org.jgrapht.alg.DijkstraShortestPath;
public class TransAssembly_allProbPaths {
private static final boolean DEBUG = true;
private static final SeqVertex ROOT = new SeqVertex(-1, "S",Integer.MAX_VALUE);
private static final SeqVertex T_VERTEX = new SeqVertex(-2, "E");
private static final int LINE_LEN = 60;
private static int LAST_ID = -1;
private static int LAST_REAL_ID = -1;
private static int MAX_DEPTH = 0;
private static double EDGE_THR = 0.05; // compares between each edge and its sister edges (u->v; vs all output of u, or all input of v)
private static double FLOW_THR = 0.02;// compares between each edge and its flow of its vertices (u->v; vs all input of u, or all output of v)
private static boolean NO_GRAPH_PRUNING = false;
private static final int COMP_AVG_COV_THR = 1;
private static final int INITIAL_EDGE_ABS_THR = 0;
private static int MIN_READ_SUPPORT_THR = 1;
private static int MIN_OUTPUT_SEQ;
// Paths Too Similar Settings
private static int MAX_DIFFS_SAME_PATH = 2;
private static int DIFFS_WINDOW_SIZE = 100;
private static int MAX_FINAL_DIFFS_IN_WINDOW = 5;
private static float MIN_PERCENT_IDENTITY_SAME_PATH = 98.0f;
private static int MAX_INTERNAL_GAP_SAME_PATH = 10;
private static boolean NO_PATH_MERGING = false; // disables use of the above
private static boolean NO_REMOVE_LOWER_RANKED_PATHS = false;
private static boolean NO_GRAPH_COMPACTION = false; // also assumes no path merging
// pasafly params
private static int MAX_VERTEX_DISCOVERY_TIME_DIFF_ALLOW_COMPARE = 0; // used in pasafly-mode //FIXME: should be zero, must investigate
private static boolean FAST_PASA = false;
private static int MAX_NUM_PATHS_PER_PASA_NODE = 10;
// Path alignment settings
// private static int MAX_INTERNAL_GAP_LENGTH = 20; // minimum cassette exon size that might be skipped in an alt-splice variant.
private static boolean SMITH_WATERMAN_ALIGN_FLAG = false;
private static int MAX_SEQ_LEN_DP_ALIGN = 10000;
private static boolean MISO_OUTPUT = true;
//private static boolean USE_PATH_ALIGNMENT = true;
private static int MAX_PAIR_DISTANCE = 0;
private static int PATH_REINFORCEMENT_DISTANCE_PERCENT = 25;
private static int PATH_REINFORCEMENT_DISTANCE = 0;
private static int MAX_NUM_PATHS_PER_NODE_INIT = 100;
private static int MAX_NUM_PATHS_PER_NODE_EXTEND = 25;
// read sequence to graph mapping criteria
private static int MAX_MM_ALLOWED = 0; // dynamically updated global (bad)
private static int MAX_MM_ALLOWED_CAP = 0; // dynamically updated global (bad)
private static double MAX_READ_SEQ_DIVERGENCE = 0.05;
private static final double MAX_READ_LOCAL_SEQ_DIVERGENCE = 0.1; // avoid very bad locally aligned regions along the way.
private static final int EXTREME_EDGE_FLOW_FACTOR = 200;
// path extension alternative options
private static final boolean USE_TRIPLETS = false; // do not use.
private static boolean ALL_POSSIBLE_PATHS = false; // most lenient form of path validation: all edge combinations allowed.
private static boolean LENIENT_PATH_CHECKING = false; // lenient: give benefit of doubt for connections that do not conflict
// path reinforcement check options
private static boolean ORIGINAL_PATH_EXTENSIONS = false; // examines paths from nodes to sinks
private static int KMER_SIZE = 0;
private static boolean GENERATE_FULL_SEQ_GRAPH = false;
private static boolean GENERATE_MIDDLE_DOT_FILES = false;
private static boolean COLLAPSE_SNPs = true;
private static boolean TRIPLET_LOCKING = true; //deprecated
private static boolean EXTENDED_TRIPLET = true; // deprecated
private static boolean TRIPLET_STRICT = false; // deprecated
private static boolean FRACTURE_UNRESOLVED_XSTRUCTURE = false; // potentially useful - deciding to just annotate these in the fasta headers.
private static boolean INFER_UNRESOLVED_XSTRUCTURE_PATHS = false; // harder problem than anticipated. Turn to true statistical inference.
private static boolean FIND_ALSO_DIFF_PATHS = false;
private static boolean USE_DEGENERATE_CODE = false;
private static String[] LETTERS = new String[]{"A","C","G","T"};
private static PrintStream ERR_STREAM;
private static boolean USE_STDERR = false;
private static boolean LOG_STDERR = false;
private static boolean NO_CLEANUP = false; // if set to true, removes input files
private static boolean RUN_ALL_VS_ALL_FILTER = false;
private static int ALL_VS_ALL_MAX_DP_LEN = 1000;
private static HashMap<PairPath,ArrayList<String>> LONG_READ_PATH_MAP; // PairPath => ArrayList(long_reads_names)
private static HashMap<String, PairPath> LONG_READ_NAME_TO_PPath; // string => PairPath
private static boolean cufflinksOpt = false; // minimum path set (Maria's code)
private static boolean CUFF_NO_EXTEND = false; // enable to report only disjoint chains w/o extensions
private static boolean EXPAND_LOOPS = true; //FIXME: parameterize this after testing.
private static boolean pasaFlyOpt = false; // PASA algorithm using pairpaths
private static boolean pasaFlyUniqueOpt = false; // variation on PASA
private static boolean ILLUSTRATE_FINAL_ASSEMBLIES = true;
private static boolean MAKE_PE_SE = false;
private static boolean NO_EM_REDUCE = false;
private static Float MIN_ISOFORM_PCT_LEN_OVERLAP = 30f;
private static Integer GENE_COUNTER = 0;
private static Float MIN_RELATIVE_ISOFORM_EXPRESSION = 5f;
private static Integer READ_END_PATH_TRIM_LENGTH = 0;
private static String FILE = ""; // updated to 'file' value below.
private static Boolean TREAT_PAIRS_AS_SINGLE = false;
private static Integer PATH_COUNTER = 0;
private static Boolean USE_DP_READ_TO_VERTEX_ALIGN = true;
public static Comparator<SeqVertex> SeqVertexIDorderComparator = new Comparator<SeqVertex>() {
public int compare (SeqVertex v_a, SeqVertex v_b) {
// want most highly supported pairpaths to sort descendingly
Integer v_a_id = v_a.getID();
Integer v_b_id = v_b.getID();
if (v_a_id < v_b_id) {
return(-1);
}
else if (v_a_id > v_b_id) {
return(1);
}
else {
return(0);
}
}
};
private static Map<String, String> DEGENERATE_CODE = new HashMap<String, String>() {
private static final long serialVersionUID = 1L;
{
put("AG","R");
put("CT","Y");
put("CG","S");
put("AT","W");
put("GT","K");
put("AC","M");
put("CGT","B");
put("AGT","D");
put("ACT","H");
put("ACG","V");
put("ACGT","N");
}
};
private static Map<String, String> DEGENERATE_CODE_REV = new HashMap<String, String>() {
private static final long serialVersionUID = 1L;
{
put("R","AG");
put("Y","CT");
put("S","GC");
put("W","AT");
put("K","GT");
put("M","AC");
put("B","CGT");
put("D","AGT");
put("H","ACT");
put("V","ACG");
put("N","ACGT");
}
};
//private static Map<String, AlignmentStats> NUM_MATCHES_HASH;
private static Map<String, AlignmentStats> NUM_MISMATCHES_HASH;
public static void main(String[] args) throws Exception
{
long totalNumReads = 0;
/*
for(int i = 0; i < args.length; i ++)
System.out.println(args[i]);
*/
String file = "";
boolean printUsage = false;
LongOpt[] longopts = new LongOpt[100]; // big enough we don't have to keep incrementing it as our option list grows.
longopts[0] = new LongOpt("help", LongOpt.NO_ARGUMENT, null, 'h');
longopts[1] = new LongOpt("use-degenerate-code", LongOpt.OPTIONAL_ARGUMENT, null, 1);
longopts[2] = new LongOpt("dont-collapse-snps", LongOpt.OPTIONAL_ARGUMENT, null, 'S');
longopts[3] = new LongOpt("generate-full-sequence-graphs", LongOpt.OPTIONAL_ARGUMENT, null, 'G');
longopts[4] = new LongOpt("stderr", LongOpt.OPTIONAL_ARGUMENT, null, 2);
StringBuffer sb = new StringBuffer(0);
longopts[5] = new LongOpt("edge-thr", LongOpt.OPTIONAL_ARGUMENT, sb, 'E');
longopts[6] = new LongOpt("flow-thr", LongOpt.OPTIONAL_ARGUMENT, sb, 'W');
longopts[7] = new LongOpt("min_per_id_same_path", LongOpt.OPTIONAL_ARGUMENT, null, 3);
longopts[8] = new LongOpt("max_number_of_paths_per_node_init", LongOpt.OPTIONAL_ARGUMENT, null, 4);
longopts[9] = new LongOpt("min_per_align_same_path", LongOpt.OPTIONAL_ARGUMENT, null, 5);
longopts[10] = new LongOpt("SW", LongOpt.NO_ARGUMENT, null, 6); //SMITH_WATERMAN_ALIGNMENT_FLAG
longopts[11] = new LongOpt("all_possible_paths", LongOpt.NO_ARGUMENT, null, 7); // hidden option, testing only
longopts[12] = new LongOpt("lenient_path_extension", LongOpt.NO_ARGUMENT, null, 8); // hidden for now
longopts[13] = new LongOpt("path_reinforcement_distance", LongOpt.OPTIONAL_ARGUMENT, null, 9);
longopts[14] = new LongOpt("original_path_extension", LongOpt.OPTIONAL_ARGUMENT, null, 10);
longopts[15] = new LongOpt("ZIPPER", LongOpt.NO_ARGUMENT, null, 11); // hidden for now
longopts[16] = new LongOpt("NO_MISO_OUTPUT", LongOpt.NO_ARGUMENT, null, 12); // hidden for now
longopts[17] = new LongOpt("max_diffs_same_path", LongOpt.OPTIONAL_ARGUMENT, null, 13);
longopts[18] = new LongOpt("max_internal_gap_same_path", LongOpt.OPTIONAL_ARGUMENT, null, 14);
longopts[19] = new LongOpt("generate_intermediate_dot_files", LongOpt.NO_ARGUMENT, null, 15);
longopts[20] = new LongOpt("triplet-lock", LongOpt.NO_ARGUMENT, null, 16); // deprecated
longopts[21] = new LongOpt("max_seq_len_DP_align", LongOpt.OPTIONAL_ARGUMENT, null, 17);
longopts[22] = new LongOpt("no_cleanup", LongOpt.NO_ARGUMENT, null, 18);
longopts[23] = new LongOpt("log_stderr", LongOpt.NO_ARGUMENT, null, 19);
longopts[24] = new LongOpt("__REDUCE", LongOpt.NO_ARGUMENT, null, 20); // deprecated
longopts[25] = new LongOpt("diffs_window_size", LongOpt.OPTIONAL_ARGUMENT, null, 21);
longopts[26] = new LongOpt("max_final_diffs_in_window", LongOpt.OPTIONAL_ARGUMENT, null, 22);
longopts[27] = new LongOpt("CuffFly", LongOpt.NO_ARGUMENT, null, 23);
longopts[28] = new LongOpt("no_path_merging", LongOpt.NO_ARGUMENT, null, 24);
longopts[29] = new LongOpt("no_pruning", LongOpt.NO_ARGUMENT, null, 25);
longopts[30] = new LongOpt("no_compaction", LongOpt.NO_ARGUMENT, null, 26);
longopts[31] = new LongOpt("triplet_strict", LongOpt.NO_ARGUMENT, null, 27);
longopts[32] = new LongOpt("extended_triplet", LongOpt.NO_ARGUMENT, null, 28);
longopts[33] = new LongOpt("MAKE_PE_SE", LongOpt.NO_ARGUMENT, null, 29);
longopts[34] = new LongOpt("cuff_no_extend", LongOpt.NO_ARGUMENT, null, 30);
longopts[35] = new LongOpt("PasaFly", LongOpt.NO_ARGUMENT, null, 31);
longopts[36] = new LongOpt("FAST_PASA", LongOpt.NO_ARGUMENT, null, 32);
longopts[37] = new LongOpt("max_num_paths_per_pasa_node", LongOpt.OPTIONAL_ARGUMENT, null, 33);
longopts[38] = new LongOpt("max_number_of_paths_per_node_extend", LongOpt.OPTIONAL_ARGUMENT, null, 34);
longopts[39] = new LongOpt("PasaFlyUnique", LongOpt.NO_ARGUMENT, null, 35);
longopts[40] = new LongOpt("NO_EM_REDUCE", LongOpt.NO_ARGUMENT, null, 36);
longopts[41] = new LongOpt("MIN_PCT_DOM_ISO_EXPR", LongOpt.OPTIONAL_ARGUMENT, null, 37);
longopts[42] = new LongOpt("READ_END_PATH_TRIM_LENGTH", LongOpt.OPTIONAL_ARGUMENT, null, 38);
longopts[43] = new LongOpt("TREAT_PAIRS_AS_SINGLE", LongOpt.OPTIONAL_ARGUMENT, null, 39);
longopts[44] = new LongOpt("no_remove_lower_ranked_paths", LongOpt.OPTIONAL_ARGUMENT, null, 40);
longopts[45] = new LongOpt("NO_DP_READ_TO_VERTEX_ALIGN", LongOpt.NO_ARGUMENT, null, 41);
longopts[46] = new LongOpt("MAX_READ_SEQ_DIVERGENCE", LongOpt.OPTIONAL_ARGUMENT, null, 42);
Getopt g = new Getopt("TransAssembly", args, "L:F:N:C:V:SGDhO:R:",longopts);
int c;
if (MAX_SEQ_LEN_DP_ALIGN < ALL_VS_ALL_MAX_DP_LEN) {
ALL_VS_ALL_MAX_DP_LEN = MAX_SEQ_LEN_DP_ALIGN;
}
while ((c = g.getopt()) != -1)
{
switch(c)
{
case 1:
USE_DEGENERATE_CODE = true;
break;
case 2:
USE_STDERR = true;
break;
case 3:
MIN_PERCENT_IDENTITY_SAME_PATH = Float.parseFloat(g.getOptarg());
break;
case 4:
MAX_NUM_PATHS_PER_NODE_INIT = Integer.parseInt(g.getOptarg());
break;
case 5:
// no op
break;
case 6:
SMITH_WATERMAN_ALIGN_FLAG = true;
break;
case 7:
ALL_POSSIBLE_PATHS = true;
break;
case 8:
LENIENT_PATH_CHECKING = true;
break;
case 9:
PATH_REINFORCEMENT_DISTANCE = Integer.parseInt(g.getOptarg());
break;
case 10:
ORIGINAL_PATH_EXTENSIONS = true;
break;
case 11:
// available
break;
case 12:
MISO_OUTPUT = false; // hidden option, that will output in MISO format
break;
case 13:
MAX_DIFFS_SAME_PATH = Integer.parseInt(g.getOptarg());
break;
case 14:
MAX_INTERNAL_GAP_SAME_PATH = Integer.parseInt(g.getOptarg());
break;
case 15:
GENERATE_MIDDLE_DOT_FILES = true;
break;
case 16:
TRIPLET_LOCKING = true;
break;
case 17:
MAX_SEQ_LEN_DP_ALIGN = Integer.parseInt(g.getOptarg());
break;
case 18:
NO_CLEANUP = true;
break;
case 19:
LOG_STDERR = true;
break;
case 20:
//RUN_ALL_VS_ALL_FILTER = true;
break;
case 21:
DIFFS_WINDOW_SIZE = Integer.parseInt(g.getOptarg());
break;
case 22:
MAX_FINAL_DIFFS_IN_WINDOW = Integer.parseInt(g.getOptarg());
break;
case 23:
//min_path_set
cufflinksOpt = true;
break;
case 24:
// no path merging flag
NO_PATH_MERGING = true;
break;
case 25:
// turn off graph pruning (useful for testing purposes)
NO_GRAPH_PRUNING = true;
break;
case 26:
NO_GRAPH_COMPACTION = true;
NO_GRAPH_PRUNING = true;
break;
case 27:
TRIPLET_STRICT = true;
TRIPLET_LOCKING = true;
break;
case 28:
EXTENDED_TRIPLET = true;
TRIPLET_LOCKING = true;
break;
case 29:
MAKE_PE_SE = true;
break;
case 30:
CUFF_NO_EXTEND = true;
break;
case 31:
pasaFlyOpt = true;
break;
case 32:
FAST_PASA = true;
break;
case 33:
MAX_NUM_PATHS_PER_PASA_NODE = Integer.parseInt(g.getOptarg());
break;
case 34:
MAX_NUM_PATHS_PER_NODE_EXTEND = Integer.parseInt(g.getOptarg());
break;
case 35:
pasaFlyUniqueOpt = true;
break;
case 36:
NO_EM_REDUCE = true;
break;
case 37:
MIN_RELATIVE_ISOFORM_EXPRESSION = Float.parseFloat(g.getOptarg());
break;
case 38:
READ_END_PATH_TRIM_LENGTH = Integer.parseInt(g.getOptarg());
break;
case 39:
TREAT_PAIRS_AS_SINGLE = true;
break;
case 40:
NO_REMOVE_LOWER_RANKED_PATHS = true;
break;
case 41:
USE_DP_READ_TO_VERTEX_ALIGN = false;
break;
case 42:
MAX_READ_SEQ_DIVERGENCE = Float.parseFloat(g.getOptarg());
break;
case 'S':
COLLAPSE_SNPs = false;
break;
case 'G':
GENERATE_FULL_SEQ_GRAPH = true;
break;
case 'h':
printUsage = true;
break;
case 'L':
MIN_OUTPUT_SEQ = Integer.parseInt(g.getOptarg());
break;
case 'F':
MAX_PAIR_DISTANCE = Integer.parseInt(g.getOptarg());
break;
case 'N':
totalNumReads = Long.parseLong(g.getOptarg());
break;
case 'V':
BFLY_GLOBALS.VERBOSE_LEVEL = Integer.parseInt(g.getOptarg());
break;
case 'C':
file = g.getOptarg();
FILE = file;
break;
case 'D':
FIND_ALSO_DIFF_PATHS = true;
break;
case 'O':
PATH_REINFORCEMENT_DISTANCE_PERCENT = Integer.parseInt(g.getOptarg());
break;
case 'R':
MIN_READ_SUPPORT_THR = Integer.parseInt(g.getOptarg());
break;
case 0:
switch(Integer.parseInt(sb.toString()))
{
case 'E':
// compares between each edge and its sister edges (u->v; vs all output of u, or all input of v)
EDGE_THR = Double.parseDouble(g.getOptarg());
break;
case 'W':
// compares between each edge and its flow of its vertices (u->v; vs all input of u, or all output of v)
FLOW_THR = Double.parseDouble(g.getOptarg());
break;
}
break;
case '?':
printUsage = true;
break;
default:
printUsage = true;
}
}
if (LOG_STDERR)
ERR_STREAM = new PrintStream(new FileOutputStream(file + ".err"));
debugMes("Started",10);
debugMes("using Path alignment for path comparisons", 5);
debugMes("combine paths if (identity=(numberOfMatches/shorterLen) > " + MIN_PERCENT_IDENTITY_SAME_PATH+"%" +
" or if we have <= " + MAX_DIFFS_SAME_PATH+ " mismatches) "
+ "and if we have internal gap lengths <= " + MAX_INTERNAL_GAP_SAME_PATH
, 5);
int path_checking_opt_count = 0;
if (LENIENT_PATH_CHECKING) {
debugMes("Path extension mode: lenient.", 5);
path_checking_opt_count++;
}
if (ORIGINAL_PATH_EXTENSIONS) {
debugMes("Path extension mode: original path extension.", 5);
path_checking_opt_count++;
}
if (ALL_POSSIBLE_PATHS) {
debugMes("Path extension mode: all possible paths.", 5);
path_checking_opt_count++;
}
if (path_checking_opt_count > 1) {
System.err.println("Error, cannot enable more than one path checking option.");
printUsage = true;
}
if (cufflinksOpt || pasaFlyOpt || pasaFlyUniqueOpt) {
debugMes("CuffFly or PasaFly selected - running in SE mode to avoid uncertain alignments that break DAG transitivity", 5);
MAKE_PE_SE = true;
}
printUsage = printUsage
|| file.equals("")
|| totalNumReads==0
|| MAX_PAIR_DISTANCE == 0
|| MIN_READ_SUPPORT_THR < 1;
if (printUsage)
{
System.err.println("");
System.err.println("
System.err.println("
System.err.println("# Required:");
System.err.println("# -N <int> total number of reads or fragment pairs");
System.err.println("# -L <int> min length for an assembled sequence to be reported");
System.err.println("# -F <int> maximum fragment length (extreme dist between paired ends)");
System.err.println("# -C <string> prefix for component/reads file");
System.err.println("
System.err.println("
System.err.println("
System.err.println("# Optional:");
System.err.println("
System.err.println("# Graph compaction:");
System.err.println("# --edge-thr=<double> sets the threshold for keeping the edge (u->v), compared to all *output* of u, or all *input* of v");
System.err.println("# (default: 0.05).");
System.err.println("# --flow-thr=<double> sets the threshold for keeping the edge (u->v), compared to all *input* of u, or all *output* of v");
System.err.println("# (default: 0.02).");
System.err.println("# --no_pruning disable pruning of graph based on above thresholds.");
System.err.println("# --no_compaction do not compact the graph");
System.err.println("
System.err.println("# SNP handling modes:");
System.err.println("# --use-degenerate-code use degenerate DNA code ");
System.err.println("# (default: don't use degenerate DNA code).");
System.err.println("# --dont-collapse-snps don't collapse SNPs into a single letter ");
System.err.println("# (default: collapse SNPs into a single letter).");
System.err.println("
System.err.println("# Read-specific parameters:");
System.err.println("# --max_number_of_paths_per_node_init <int> maximum number of unique pairpaths that can begin at a given node. (default: " + MAX_NUM_PATHS_PER_NODE_INIT + ")");
System.err.println("# --MAKE_PE_SE convert split pair paths into separate single paths.");
System.err.println("# --NO_DP_READ_TO_VERTEX_ALIGN do not use DP alignment to align reads to vertex, use faster gap-free alignment");
System.err.println("# Butterfly Path extension reinforcement requirements");
System.err.println(" The following options are ordered by decreasing stringency.");
System.err.println("# --original_path_extension examines paths from nodes to sinks, can be very slow");
System.err.println("# /compatible_path_extension/ *DEFAULT MODE* read (pair) must be compatible and contain defined minimum extension support for path reinforcement.");
System.err.println("# --lenient_path_extension only the terminal node pair(v-u) require read support");
System.err.println("# --all_possible_paths all edges are traversed, regardless of long-range read path support");
System.err.println("# --CuffFly cufflinks style assembly of minimum paths.");
System.err.println("# --cuff_no_extend do not extend max-matching pairpaths");
System.err.println("# -R <int> minimum read support threshold. Default: 2");
System.err.println("# -O <int> path reinforcement 'backwards overlap' percent of -F. Default: (" + PATH_REINFORCEMENT_DISTANCE_PERCENT + ") Not used in --lenient_path_extension mode.");
System.err.println("
System.err.println("# --path_reinforcement_distance=<int> path reinforcement distance specified directly instead of computing it based on the -O value above.");
//System.err.println("# --triplet-lock lock in local (triplet) paths at nodes where read paths are supported. (increases stringency)");
//System.err.println("# --extended_triplet (implies --triplet-lock) where read paths extend beyond a triplet, require path compatibility with extended read path prefix");
//System.err.println("# --triplet_strict (implies --triplet-lock) break reconstructed paths at unsupported triplets");
System.err.println("# --max_number_of_paths_per_node_extend <int> maximum number of paths that can be extended from a given node. (default: " + MAX_NUM_PATHS_PER_NODE_EXTEND + ")");
System.err.println("# --READ_END_PATH_TRIM_LENGTH <int> min length of read terminus to extend into a graph node for it to be added to the pair path node sequence. (default: " + READ_END_PATH_TRIM_LENGTH + ")");
System.err.println("# --TREAT_PAIRS_AS_SINGLE ignores pairing info.");
System.err.println("
System.err.println("
System.err.println("# PasaFly related parameters:");
System.err.println("# --PasaFly use PASA-style pair path assembly");
System.err.println("# --FAST_PASA faster PASA by using a banded-DP strategy (experimental)");
System.err.println("
System.err.println("# Similar path reduction criteria:");
System.err.println("# --SW use Smith-Waterman local alignment mode (by default, uses Needleman-Wunsch global alignment)");
System.err.println("# --max_diffs_same_path=<int> max allowed differences encountered between path sequences to combine them. (default: " + MAX_DIFFS_SAME_PATH + ")");
System.err.println("# --min_per_id_same_path=<float> min percent identity for two paths to be merged into single paths (default: " + MIN_PERCENT_IDENTITY_SAME_PATH + ")");
System.err.println("# --max_internal_gap_same_path=<int> maximum number of internal consecutive gap characters allowed for paths to be merged into single paths. (default: " + MAX_INTERNAL_GAP_SAME_PATH);
System.err.println("# --max_seq_len_DP_align=<int> path regions to be aligned that are longer than this length use simple zipper alignment. (default: " + MAX_SEQ_LEN_DP_ALIGN + ")");
System.err.println("# --no_path_merging disable path comparisons and merging based on above settings.");
//System.err.println("# --no_remove_lower_ranked_paths iteratively rank paths by unique read content, discard paths without unique support");
System.err.println("# --NO_EM_REDUCE run expectation maximization scheme to rank transcripts, remove lower ranking transcripts that yield no unique read content.");
System.err.println("# --MIN_PCT_DOM_ISO_EXPR=<float> min percent expression of a dominantly expressed isoform for a gene. Default: " + MIN_RELATIVE_ISOFORM_EXPRESSION);
System.err.println("
System.err.println("# Misc: ");
System.err.println("# --max_number_of_paths_per_node total number of paths allowed per node (default=" + MAX_NUM_PATHS_PER_NODE_INIT + ")");
System.err.println("# --generate-full-sequence-graphs generate full sequence dot files");
System.err.println("# (default: generate dot files with start and end of each seq).");
System.err.println("# --generate_intermediate_dot_files generate dot files after each step of butterfly");
System.err.println("# (default: generate only a the start and end dot files).");
System.err.println("# --stderr prints the output to STDERR ");
System.err.println("# --log_stderr writes status info to fiule COMPONENT_PREFIX.err ");
System.err.println("# -V <int> verbosity level ");
System.err.println("# (default: 10 - progress of method + some stats)");
System.err.println("# (15 - like (10) + final paths to be added + additional loop info and dot files)");
System.err.println("# (20 - maximum verbosity)");
System.err.println("
System.err.println("
System.err.println("");
System.exit(1);
}
if (USE_STDERR && BFLY_GLOBALS.VERBOSE_LEVEL < 5) {
BFLY_GLOBALS.VERBOSE_LEVEL = 5; // default verbose level for stderr
}
// set calculated vars:
if (PATH_REINFORCEMENT_DISTANCE > 0) {
debugMes("path reinforcement distance set manually to: " + PATH_REINFORCEMENT_DISTANCE, 5);
}
else {
PATH_REINFORCEMENT_DISTANCE = (int) (PATH_REINFORCEMENT_DISTANCE_PERCENT/100.0 * MAX_PAIR_DISTANCE);
debugMes("path reinforcement distance computed based on "
+ PATH_REINFORCEMENT_DISTANCE_PERCENT + "% of max pair distance: "+ MAX_PAIR_DISTANCE
+ " = " + PATH_REINFORCEMENT_DISTANCE + " bases", 5);
}
if (!COLLAPSE_SNPs && USE_DEGENERATE_CODE)
USE_DEGENERATE_CODE = false;
Vector<Integer> rootIDs = new Vector<Integer>();
HashMap<Integer,Integer> outFlow = new HashMap<Integer, Integer>();
HashMap<Integer,Integer> inFlow = new HashMap<Integer, Integer>();
HashMap<Integer,String> kmers = new HashMap<Integer, String>();
LONG_READ_PATH_MAP = new HashMap<PairPath, ArrayList<String>>();
LONG_READ_NAME_TO_PPath = new HashMap<String, PairPath>();
PrintStream pout_diff = null;
PrintStream pout_all = new PrintStream(new FileOutputStream(file+".allProbPaths.fasta"));
debugMes("SECTION\n================\nParsing de Bruijn graph\n======================\n", 5);
debugMes("preProcessGraphFile: " + file + ".out", 10);
preProcessGraphFile(file+".out",outFlow, inFlow, kmers);
debugMes("SECTION\n==================\nbuildNewGraph\n========================\n", 5);
debugMes("buildNewGraphFirstLetter: " + file + ".out", 10);
DirectedSparseGraph<SeqVertex, SimpleEdge> graph = buildNewGraphUseKmers(file+".out",rootIDs,outFlow,inFlow,kmers);
SeqVertex.set_kmer_length(KMER_SIZE);
SeqVertex.set_graph(graph);
LAST_REAL_ID = LAST_ID;
debugMes("Graph is built",10);
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 20) {
describeNodes(graph);
}
HashMap<String,Integer> originalGraphKmerToNodeID = new HashMap<String,Integer>();
for (SeqVertex sv : graph.getVertices()) {
String kmer = sv.getName();
int id = sv.getID();
originalGraphKmerToNodeID.put(kmer, id);
debugMes("ORIGINAL GRAPH NODE: " + kmer + " with ID: " + id, 20);
}
String[] tmpFile = file.split("/");
String graphName = tmpFile[tmpFile.length-1];
//boolean createMiddleDotFiles = false;
boolean createMiddleDotFiles = GENERATE_MIDDLE_DOT_FILES;
if (createMiddleDotFiles)
writeDotFile(graph,file + "_deBruijn.A.dot", graphName, false);
// remember the original edge weights so we can relabel them later on in the final graph according to orig ids.
HashMap<String,Double> original_edge_weights_using_orig_kmers = new HashMap<String,Double>();
for (SimpleEdge e : graph.getEdges()) {
String from_kmer = graph.getSource(e).getName();
String to_kmer = graph.getDest(e).getName();
original_edge_weights_using_orig_kmers.put(from_kmer + "_" + to_kmer, e.getWeight());
}
if (! NO_GRAPH_PRUNING) {
debugMes("fixExtremeleyHighSingleEdges()", 1);
fixExtremelyHighSingleEdges(graph,outFlow,inFlow);
debugMes("removeLightEdges()", 1);
removeLightEdges(graph);
if (createMiddleDotFiles)
writeDotFile(graph,file + "_removeLightEdges_init.B.dot",graphName, false);
}
if (! NO_GRAPH_COMPACTION) {
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 20) {
debugMes("## Node descriptions before linear compaction:", 20);
describeVertices(graph);
}
debugMes("compactLinearPaths()", 1);
compactLinearPaths(graph);
//removeShortOrphanNodes(graph, MIN_OUTPUT_SEQ); // do this later
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 20) {
debugMes("## Node descriptions after linear compaction:", 20);
describeVertices(graph);
}
My_DFS dfs = new My_DFS(graph);
dfs.runDFS2();
if (createMiddleDotFiles)
writeDotFile(graph,file + "_compactLinearPaths_init.C.dot",graphName, false);
}
/*
if (! NO_GRAPH_PRUNING) {
boolean keep_pruning_graph = true;
int prune_compact_round = 0;
while (keep_pruning_graph) {
prune_compact_round++;
keep_pruning_graph = removeLightEdges(graph);
compactLinearPaths(graph);
if (createMiddleDotFiles)
writeDotFile(graph,file + "_prune_compact_round_" + prune_compact_round + ".D.dot",graphName, false);
}
}
*/
if (COLLAPSE_SNPs) {
if (USE_DEGENERATE_CODE)
removeSingleNtBubblesWithDegenerateCode(graph); // still needs updating //FIXME
else
removeSingleNtBubbles(graph);
if (createMiddleDotFiles)
writeDotFile(graph,file + "_SNPs_removed.H.dot",graphName, false);
/*
if (! NO_GRAPH_COMPACTION) {
ompactLinearPaths(graph);
}
if (createMiddleDotFiles)
writeDotFile(graph,file + "_SNPs_removed_post-collapse.H.dot",graphName, false);
*/
}
debugMes("SECTION\n====================\nRemoving small components.\n====================\n", 5);
//remove small components
calcSubComponentsStats(graph);
if (graph.getVertexCount() == 0) {
debugMes("Warning: graph pruned to nothingness", 1);
//Runtime.getRuntime().exec("mv " + bfly_start_indicator_file + " " + bfly_end_indicator_file);
System.exit(0);
}
if (createMiddleDotFiles)
writeDotFile(graph,file + "_compactLinearPaths_removeSmallComp.D.dot",graphName, false);
HashMap<Integer, LocInGraph> originalVerIDsMapping = getOriginalVerIDsMappingHash(graph);
int numXstructs = countNumOfXstructures(graph);
if (numXstructs>0)
debugMes("number X structures = "+numXstructs,10);
// Done Compacting graph.
DijkstraDistance<SeqVertex, SimpleEdge> dijkstraDis = new DijkstraDistance<SeqVertex, SimpleEdge>(graph, true);
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 19) {
debugMes("\nSECTION\n=================\nNode descriptions before threading.\n===================\n", 5);
describeNodes(graph);
}
debugMes("\nSECTION\n====================\nThreading reads through the graph\n=========================\n", 5);
// maps individual reads to paths within the graph
// readNameHash: "actual read name" => Read object (see Read class)
HashMap<String, List<Read>> readNameHash = getReadStarts(graph,file+".reads",originalVerIDsMapping,rootIDs, originalGraphKmerToNodeID);
debugMes("\nSECTION\n==================\nPairing up the reads into PairPaths\n===========================\n", 5);
// note the logic for not doing the paired linking is handled under getReadStarts by just
// not using the /1 or /2 value.
// pair up reads into PathPairs
// combinedReadHash: start_vertex => (pair_path => count)
HashMap<Integer,HashMap<PairPath,Integer>> combinedReadHash = getSuffStats_wPairs(graph,readNameHash,dijkstraDis);
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15) {
debugMes("Printing Pair Paths Before DAG Overlap Layout
printPairPaths(combinedReadHash, "PairPaths@Init");
}
/// Moving from Collapsed de Bruijn Graph to an Overlap Graph 'seqvertex_graph'
debugMes("SECTION\n======== Create DAG from Overlap Layout ============\n\n", 5);
DirectedSparseGraph<SeqVertex, SimpleEdge> seqvertex_graph = new DirectedSparseGraph<SeqVertex, SimpleEdge>();
HashMap<Integer, HashMap<PairPath, Integer>> seqvertex_combinedReadHash = create_DAG_from_OverlapLayout(seqvertex_graph, combinedReadHash, file, graphName, createMiddleDotFiles);
My_DFS dfs = new My_DFS(seqvertex_graph);
dfs.runDFS2();
if (createMiddleDotFiles)
writeDotFile(seqvertex_graph,file + "_vertex_DAG_postOverlapLayout.dot",graphName, false);
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15) {
debugMes("Printing Pair Paths
printPairPaths(seqvertex_combinedReadHash, "PairPaths@PostOverlapLayout");
}
debugMes("SECTION\n======= Reorganize Read Pairings =========\n\n", 5);
dijkstraDis = new DijkstraDistance<SeqVertex, SimpleEdge>(seqvertex_graph, true);
seqvertex_combinedReadHash = reorganizeReadPairings(seqvertex_graph, seqvertex_combinedReadHash, dijkstraDis);
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15)
printPairPaths(seqvertex_combinedReadHash, "PairPaths@AfterPairReorganization");
//start working on one sub component at a time:
// look for loops, try to solve them
// if loops remain, move on to the next subComp.
//removeShortOrphanNodes(graph, MIN_OUTPUT_SEQ);
/* old way... suffers from suboptimal setting of node depth
*
int count_pairpaths_removed = handleRemainingCyclicReads(componentReadHash, graph);
debugMes("Removed " + count_pairpaths_removed + " reads that appeared to retain complex cycles", 10);
*/
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 10) {
debugMes("
report_pairpath_counts(componentReadHash);
}
reduce_to_max_paths_per_node(componentReadHash, TransAssembly_allProbPaths.MAX_NUM_PATHS_PER_NODE_INIT);
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 10) {
debugMes("
report_pairpath_counts(componentReadHash);
}
// examine uncertainty of paths within the graph by looking at triplet support
debugMes("### Extracting triplets from reads.", 10);
HashMap<Integer, List<List<Integer>>> tripletMapper = extractTripletsFromReads(componentReadHash);
HashMap<Integer,Boolean> xStructuresResolvedByTriplets = getXstructuresResolvedByTriplets(seqvertex_graph, comp, tripletMapper);
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 10) {
// describe the locked down nodes
debugMes("\n### " + tripletMapper.size() + " nodes have locked-in triplet paths:", 10);
for (Integer central_node : tripletMapper.keySet()) {
debugMes("Triplet locks for: " + central_node + " : " + tripletMapper.get(central_node), 10);
}
}
if (INFER_UNRESOLVED_XSTRUCTURE_PATHS) {
debugMes("## INFERRING UNRESOLVED X STRUCTURE PATHS ##", 10);
infer_best_triplets_across_unresolved_Xstructure(seqvertex_combinedReadHash, seqvertex_graph, xStructuresResolvedByTriplets, tripletMapper);
}
HashMap<List<Integer>, Pair<Integer>> FinalPaths_all = null;
/*
if (false) { // just for debugging //FIXME: make a debug parameter
// just reconstruct paths based on the input reads
FinalPaths_all = reconstruct_paths_from_reads(graph, componentReadHash);
}
else if (false) {
// sort descendingly by length, and greedily assemble compatible paths.
FinalPaths_all = reconstruct_paths_from_collapsed_reads(graph, componentReadHash);
}
else if (false) {
examine_compatible_paths_debugging_only(graph, componentReadHash);
System.exit(1);
}
else if (false) {
examine_out_of_order_depth_in_read_paths(graph, componentReadHash);
System.exit(1);
}
*/
if (cufflinksOpt || pasaFlyOpt) {
/*
// methods are very sensitive to out-of-order node depths in read paths:
int num_fractured_paths = handleRemainingCyclicReads(componentReadHash, graph);
debugMes("Needed to fracture: " + num_fractured_paths + " pair paths due to out-of-order node depths", 10);
*/
if (cufflinksOpt) {
FinalPaths_all = cuffMinPaths(seqvertex_graph, componentReadHash,dijkstraDis);
}
else if (pasaFlyOpt) {
debugMes("### Extracting complex path prefixes from reads.", 10);
HashMap<Integer, List<List<Integer>>> extendedTripletMapper = extractComplexPathPrefixesFromReads(componentReadHash);
FinalPaths_all = pasafly(seqvertex_graph, componentReadHash,dijkstraDis, tripletMapper, extendedTripletMapper);
}
}
else if (pasaFlyUniqueOpt) {
debugMes("### Extracting complex path prefixes from reads.", 10);
HashMap<Integer, List<List<Integer>>> extendedTripletMapper = extractComplexPathPrefixesFromReads(componentReadHash);
FinalPaths_all = pasaflyunique(seqvertex_graph, componentReadHash,dijkstraDis, tripletMapper, extendedTripletMapper);
}
else {
// Regular butterfly all probable paths mode:
debugMes("### Extracting complex path prefixes from reads.", 10);
HashMap<Integer, List<List<Integer>>> extendedTripletMapper = extractComplexPathPrefixesFromReads(componentReadHash);
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 16) {
debugMes("\n
for (Integer term_node : extendedTripletMapper.keySet()) {
debugMes("Complex prefix paths for: " + term_node + " : " + extendedTripletMapper.get(term_node), 16);
}
}
addSandT(seqvertex_graph,comp,componentReadHash);
FinalPaths_all = butterfly(seqvertex_graph, comp, componentReadHash, totalNumReads,
pout_all, dijkstraDis, dijkstraDisWoVer,
tripletMapper, extendedTripletMapper, xStructuresResolvedByTriplets);
//pathName = get_pathName_string(path, graph);
}
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15) {
for (List<Integer> path : FinalPaths_all.keySet()) {
debugMes("FinalPath@BeforeFiltering: " + path, 15);
}
}
// remove short paths
FinalPaths_all = remove_short_seqs(FinalPaths_all, seqvertex_graph);
if (FinalPaths_all.isEmpty()) {
debugMes("No paths to pursue. Continue...", 15);
continue;
}
int numXstructsResolved = countNumOfXstructuresResolved(seqvertex_graph,comp,FinalPaths_all);
if (numXstructs>0)
debugMes("number X structures resolved = "+numXstructsResolved + " / " + numXstructs,10);
debugMes("ReadMappings BEFORE Path-to-orig_ID conversion:", 20);
HashMap<List<Integer>,HashMap<PairPath,Integer>> finalPathsToContainedReads = assignCompatibleReadsToPaths(FinalPaths_all, componentReadHash);
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 20) {
// verbose dump of read support
for (List<Integer> final_path : finalPathsToContainedReads.keySet()) {
HashMap<PairPath,Integer> contained_reads = finalPathsToContainedReads.get(final_path);
debugMes("PRELIM_FINAL_PATH:\n" + final_path + "\ncontains:", 20);
int sum_support = 0;
for (PairPath pp : contained_reads.keySet()) {
Integer read_support = contained_reads.get(pp);
debugMes(pp + "\tcount: " + read_support, 20);
sum_support += read_support;
}
debugMes("Total support: " + sum_support + "\n", 20);
}
}
// remove those paths that didn't have reads assigned:
{
Set<List<Integer>> paths_to_remove = new HashSet<List<Integer>>();
for (List<Integer> path : FinalPaths_all.keySet()) {
if (! finalPathsToContainedReads.containsKey(path)) {
debugMes("-removing final path that was not assigned read support: " + path, 10);
paths_to_remove.add(path);
}
}
for (List<Integer> path : paths_to_remove) {
FinalPaths_all.remove(path);
}
}
if ( BFLY_GLOBALS.VERBOSE_LEVEL >= 20) {
debugMes("## ILLUSTRATING FINAL ASSEMBLIES", 20);
illustrateFinalPaths(FinalPaths_all, finalPathsToContainedReads);
}
// convert graph node IDs back to the original collapsed de Bruijn graph:
debugMes("Converting graph node IDs back to original IDs.", 15);
HashMap<List<Integer>,HashMap<PairPath,Integer>> finalPathsToContainedReads_all_orig_ids = new HashMap<List<Integer>,HashMap<PairPath,Integer>>();
HashMap<List<Integer>, Pair<Integer>> FinalPaths_all_orig_ids = convert_to_orig_ids(FinalPaths_all, finalPathsToContainedReads, finalPathsToContainedReads_all_orig_ids);
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 20) {
// verbose dump of read support
debugMes("** Post-original ID conversion, path support:", 20);
for (List<Integer> final_path : finalPathsToContainedReads_all_orig_ids.keySet()) {
HashMap<PairPath,Integer> contained_reads = finalPathsToContainedReads_all_orig_ids.get(final_path);
debugMes("PRELIM_FINAL_PATH:\n" + final_path + "\ncontains:", 20);
int sum_support = 0;
for (PairPath pp : contained_reads.keySet()) {
Integer read_support = contained_reads.get(pp);
debugMes(pp + "\tcount: " + read_support, 20);
sum_support += read_support;
}
debugMes("Total support: " + sum_support + "\n", 20);
}
}
if ( (! NO_PATH_MERGING) && FinalPaths_all_orig_ids.size() > 1) {
// do CDHIT-like removal of highly similar but lesser supported paths.
debugMes("SECTION\n========= CD-HIT -like Removal of Too-Similar Sequences with Lesser Read Support =========\n\n", 5);
// alignment-based removal of lesser-supported paths that are too similar in sequence.
FinalPaths_all_orig_ids = reduce_cdhit_like(FinalPaths_all_orig_ids, graph, finalPathsToContainedReads_all_orig_ids);
}
// collect all results so far. (yes, Final is not so Final after all ... revisit naming of vars)
FinalPaths_FinalCollection.putAll(FinalPaths_all_orig_ids);
FinalCollection_ContainedReads.putAll(finalPathsToContainedReads_all_orig_ids);
} // end of for each component
if ( (! NO_PATH_MERGING) && FinalPaths_FinalCollection.size() > 1) {
// do CDHIT-like removal of highly similar but lesser supported paths.
debugMes("SECTION\n========= CD-HIT -like Removal of Too-Similar Sequences with Lesser Read Support =========\n\n", 5);
// alignment-based removal of lesser-supported paths that are too similar in sequence.
FinalPaths_FinalCollection = reduce_cdhit_like(FinalPaths_FinalCollection, graph, FinalCollection_ContainedReads);
}
// Gene-level grouping of transcripts
HashMap<List<Integer>,Integer> separate_gene_ids = group_paths_into_genes(FinalPaths_FinalCollection, graph);
// Filtering out lower-quality paths
HashMap<List<Integer>, Pair<Integer>> filtered_paths_to_keep = new HashMap<List<Integer>,Pair<Integer>>();
debugMes("Sep Gene IDs:" + separate_gene_ids, 10);
if ( (! NO_EM_REDUCE) && FinalPaths_FinalCollection.size() > 1) {
HashMap<List<Integer>, Pair<Integer>> EM_reduced_paths = run_EM_REDUCE(FinalPaths_FinalCollection, graph, FinalCollection_ContainedReads, separate_gene_ids);
filtered_paths_to_keep.putAll(EM_reduced_paths);
}
// by default, running both lower ranking path removal and EM-reduction, and combining positively-filtered entries.
if (! filtered_paths_to_keep.isEmpty()) {
FinalPaths_FinalCollection = filtered_paths_to_keep;
}
String component_name = pathName[pathName.length-1];
if (FinalPaths_FinalCollection==null || FinalPaths_FinalCollection.size() == 0) {
debugMes("No Butterfly Assemblies to report", 10);
return;
}
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15) {
for (List<Integer> path : FinalPaths_FinalCollection.keySet()) {
debugMes("FinalPath@AfterFiltering: " + path, 15);
}
}
// get long read content:
HashMap<List<Integer>,ArrayList<String>> final_paths_to_long_read_content = new HashMap<List<Integer>,ArrayList<String>>();
if (! LONG_READ_PATH_MAP.isEmpty()) {
assign_long_read_content_to_final_paths(FinalPaths_FinalCollection, FinalCollection_ContainedReads, LONG_READ_PATH_MAP, final_paths_to_long_read_content);
}
// Output the fasta sequences
printFinalPaths(FinalPaths_FinalCollection, graph, pout_all, component_name, totalNumReads,
final_paths_to_long_read_content, separate_gene_ids);
totalNumPaths = FinalPaths_FinalCollection.size();
removeAllEdgesOfSandT(graph);
pout_all.close();
if (FIND_ALSO_DIFF_PATHS)
pout_diff.close();
debugMes("total number of paths reported = "+totalNumPaths+" from "+totalNumSuccComps +" components",1);
debugMes("Done",10);
if (LOG_STDERR)
ERR_STREAM.close();
}
private static HashMap<List<Integer>, Pair<Integer>> convert_to_orig_ids(
HashMap<List<Integer>, Pair<Integer>> finalPaths_all,
HashMap<List<Integer>, HashMap<PairPath, Integer>> finalPathsToContainedReads,
HashMap<List<Integer>, HashMap<PairPath, Integer>> finalPathsToContainedReads_all_orig_ids) {
HashMap<List<Integer>, Pair<Integer>> FinalPaths_all_orig_ids = new HashMap<List<Integer>, Pair<Integer>>();
for (List<Integer> final_path : finalPaths_all.keySet()) {
List<Integer> revised_path_orig_ids = new ArrayList<Integer>();
for (Integer seq_vertex_id : final_path ) {
SeqVertex sv = SeqVertex.retrieveSeqVertexByID(seq_vertex_id);
Integer orig_id = sv.getOrigButterflyID();
revised_path_orig_ids.add(orig_id);
}
FinalPaths_all_orig_ids.put(revised_path_orig_ids, finalPaths_all.get(final_path));
debugMes("-final_path: " + final_path + " now set to: " + revised_path_orig_ids, 15);
debugMes("-and set to contents: " + finalPaths_all.get(final_path), 20);
HashMap<PairPath, Integer> contained_reads = finalPathsToContainedReads.get(final_path);
for (PairPath pp : contained_reads.keySet()) {
PairPath updated_pp = pp.setOrigIds();
Integer read_count = contained_reads.get(pp);
debugMes("pp: " + pp + ", updated_pp: " + updated_pp + ", count: " + read_count, 20);
if (finalPathsToContainedReads_all_orig_ids.containsKey(revised_path_orig_ids)) {
HashMap<PairPath, Integer> localContainedReads = finalPathsToContainedReads_all_orig_ids.get(revised_path_orig_ids);
if (localContainedReads.containsKey(updated_pp)) {
int prev_count = localContainedReads.get(updated_pp);
localContainedReads.put(updated_pp, prev_count + read_count);
}
else {
localContainedReads.put(updated_pp, read_count);
}
}
else {
HashMap<PairPath, Integer> localContainedReads = new HashMap<PairPath, Integer>();
localContainedReads.put(updated_pp, read_count);
finalPathsToContainedReads_all_orig_ids.put(revised_path_orig_ids, localContainedReads);
}
}
}
return FinalPaths_all_orig_ids;
}
private static void assign_long_read_content_to_final_paths(
HashMap<List<Integer>, Pair<Integer>> finalPaths_all,
HashMap<List<Integer>, HashMap<PairPath, Integer>> finalPathsToContainedReads,
HashMap<PairPath, ArrayList<String>> LONG_READ_PATH_MAP_local,
HashMap<List<Integer>, ArrayList<String>> final_paths_to_long_read_content) {
for (List<Integer> path : finalPaths_all.keySet()) {
if (finalPathsToContainedReads.get(path) == null) {
//FIXME: why does this happen? very rare.
continue;
}
for (PairPath pp : finalPathsToContainedReads.get(path).keySet()) {
if (LONG_READ_PATH_MAP_local.containsKey(pp)) {
if (! final_paths_to_long_read_content.containsKey(path)) {
final_paths_to_long_read_content.put(path, new ArrayList<String>());
}
final_paths_to_long_read_content.get(path).addAll(LONG_READ_PATH_MAP_local.get(pp));
}
}
}
return;
}
private static HashMap<List<Integer>, Pair<Integer>> remove_short_seqs(
HashMap<List<Integer>, Pair<Integer>> finalPaths_all,
DirectedSparseGraph<SeqVertex, SimpleEdge> graph) {
HashMap<List<Integer>, Pair<Integer>> long_enough_paths = new HashMap<List<Integer>, Pair<Integer>>();
for (List<Integer> path : finalPaths_all.keySet()) {
String seq = getPathSeq(graph, path);
if (seq.length() >=MIN_OUTPUT_SEQ) {
// retain it:
long_enough_paths.put(path, finalPaths_all.get(path));
}
}
return(long_enough_paths);
}
public static HashMap<List<Integer>, Pair<Integer>> run_EM_REDUCE (HashMap<List<Integer>, Pair<Integer>> finalPaths_all,
DirectedSparseGraph<SeqVertex, SimpleEdge> graph, HashMap<List<Integer>,
HashMap<PairPath, Integer>> finalPathsToContainedReads,
HashMap<List<Integer>,Integer> separate_gene_ids) {
debugMes("SECTION\n======
List<List<Integer>> all_paths = new ArrayList<List<Integer>>(finalPaths_all.keySet());
// need sequence lengths
HashMap<List<Integer>,Integer> seqLengths = new HashMap<List<Integer>,Integer>();
for (List<Integer> path : all_paths) {
String seq = getPathSeq(graph,path);
seqLengths.put(path, seq.length());
}
//List<List<Integer>> retained_paths = new ArrayList<List<Integer>>();
// sort paths by pair-path support descendingly
PathExpressionComparator pc = new PathExpressionComparator(all_paths, finalPathsToContainedReads, seqLengths);
Collections.sort(all_paths, pc);
Collections.reverse(all_paths); // now descending according to read support.
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15) {
debugMes("Expression values for each candidate path:", 15);
for (List<Integer> path : all_paths) {
double expr = pc.get_expr(path);
double sum_frag_counts = pc.get_transcript_to_sum_frag_counts(path);
debugMes("Expr=" + expr + ", sum_exp_frags=" + sum_frag_counts + ", path: " + path, 15);
}
}
// sort by expr, remove those w/ < 5% of expression of dominant isoform.
List<List<Integer>> all_paths_min_rel_expr = remove_lesser_supported_paths_EM(all_paths, finalPathsToContainedReads, graph, pc, separate_gene_ids);
// convert back to earlier-style data structure for compatibility
HashMap<List<Integer>, Pair<Integer>> final_paths_map = new HashMap<List<Integer>, Pair<Integer>>();
for (List<Integer> path : all_paths_min_rel_expr) {
final_paths_map.put(path, finalPaths_all.get(path));
debugMes("EM_REDUCE retaining: " + path, 15);
}
return (final_paths_map);
}
private static HashMap<Integer, HashMap<PairPath, Integer>> create_DAG_from_OverlapLayout(
DirectedSparseGraph<SeqVertex, SimpleEdge> seqvertex_graph, HashMap<Integer, HashMap<PairPath, Integer>> combinedReadHash, String dot_file_prefix,
String graphName, boolean createMiddleDotFiles) {
Set<PairPath> pairPaths = new HashSet<PairPath>();
Map<PairPath, Integer> pairPathToReadSupport = new HashMap<PairPath, Integer>();
populate_pairpaths_and_readsupport(combinedReadHash, pairPaths, pairPathToReadSupport);
List<List<Integer>> paths = new ArrayList<List<Integer>>();
for (PairPath pp : pairPaths) {
paths.add(pp.getPath1());
if (pp.hasSecondPath()) {
paths.add(pp.getPath2());
}
}
Collections.sort(paths, new Comparator<List<Integer>>() {
public int compare(List<Integer> pathA, List<Integer> pathB) {
if (pathA.size() < pathB.size()) {
return(-1);
}
else if (pathA.size() > pathB.size()) {
return(1);
}
else {
return(0);
}
}
});
Collections.reverse(paths); // want descending by path l
// remove the contained reads
//contained_path_to_containers: (key= the path contained, value = list of all other paths that fully contain it)
HashMap<List<Integer>,List<List<Integer>>> contained_path_to_containers = new HashMap<List<Integer>,List<List<Integer>>>();
List<List<Integer>> noncontained_paths = remove_containments(paths, contained_path_to_containers);
debugMes("Noncontained paths: " + noncontained_paths, 15);
// find dispersed repeats ////
HashSet<Integer> dispersed_repeat_nodes = find_dispersed_repeat_nodes(noncontained_paths);
// build the overlap graph
// build a graph of compatible paths.
List<Path> path_list = new ArrayList<Path>();
for (List<Integer> p : noncontained_paths) {
path_list.add(new Path(p));
}
HashMap<String,PathOverlap> pathMatches = new HashMap<String,PathOverlap>();
DirectedSparseGraph<Path, SimplePathNodeEdge> path_overlap_graph = construct_path_overlap_graph(path_list, pathMatches,
dispersed_repeat_nodes, dot_file_prefix,
graphName, createMiddleDotFiles);
// draw the dot file for the path overlap graph:
if (createMiddleDotFiles)
writeDotFile(path_overlap_graph, dot_file_prefix + "_POG.dot", graphName);
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15) {
// output the path node listing
for (Path p : path_overlap_graph.getVertices()) {
debugMes("PathNodeDescription: " + p, 15);
}
}
// add read pairing information to the graph:
HashSet<SimplePathNodeEdge> pair_links = addPairPathsToOverlapGraph(path_overlap_graph, pairPathToReadSupport, contained_path_to_containers);
// draw the dot file for the path overlap graph:
if (createMiddleDotFiles)
writeDotFile(path_overlap_graph, dot_file_prefix + "_POG.PE_links_added.dot", graphName);
// Breaking cycles
int cycle_round = 0;
boolean breaking_cycles = true;
while (breaking_cycles) {
cycle_round++;
debugMes("// Breaking cycles in Path Overlap Graph (POG), Round: " + cycle_round, 10);
breaking_cycles = break_cycles_in_path_overlap_graph(path_overlap_graph);
if (createMiddleDotFiles)
writeDotFile(path_overlap_graph, dot_file_prefix + "_POG.cyclesRemoved.r" + cycle_round + ".dot", graphName);
}
// remove the pair_link edges before converting the overlap graph to a seq vertex graph, since
// edges in the overlap graph are intended to represent overlaps.
for (SimplePathNodeEdge spne : pair_links) {
if (path_overlap_graph.containsEdge(spne)) {
path_overlap_graph.removeEdge(spne);
}
}
// Convert the path DAG to a seq vertex DAG
HashMap<Path,PathWithOrig> orig_path_to_updated_path = convert_path_DAG_to_SeqVertex_DAG(path_overlap_graph,
pathMatches, seqvertex_graph, dot_file_prefix, graphName, createMiddleDotFiles);
// note, path_overlap_graph includes non-contained paths
// pairPathToReadSupport contains all paths
combinedReadHash = update_PairPaths_using_overlapDAG_refined_paths(orig_path_to_updated_path, pairPathToReadSupport, contained_path_to_containers);
return(combinedReadHash);
}
private static HashSet<SimplePathNodeEdge> addPairPathsToOverlapGraph(
DirectedSparseGraph<Path, SimplePathNodeEdge> path_overlap_graph,
Map<PairPath, Integer> pairPathToReadSupport,
HashMap<List<Integer>, List<List<Integer>>> contained_path_to_containers) {
// data structure conversion
HashMap<List<Integer>,Path> list_to_path_hmap = new HashMap<List<Integer>,Path>();
for (Path p : path_overlap_graph.getVertices()) {
List<Integer> node_id_list = p.get_vertex_list();
list_to_path_hmap.put(node_id_list, p);
}
HashSet<SimplePathNodeEdge> pair_link_edges = new HashSet<SimplePathNodeEdge> ();
if (true)
return(pair_link_edges); // turning off PE //////////// DEBUG ////////////
// add pair links
for (PairPath pp : pairPathToReadSupport.keySet()) {
if (pp.hasSecondPath()) {
List<Integer> p1 = pp.getPath1();
List<Integer> p2 = pp.getPath2();
debugMes("# PE edges in overlap graph, targeting: " + p1 + " to " + p2, 15);
List<Path> p1_path_list = new ArrayList<Path>();
List<Path> p2_path_list = new ArrayList<Path>();
if (list_to_path_hmap.containsKey(p1) && list_to_path_hmap.containsKey(p2)) {
// add pairing edge between existing path nodes.
p1_path_list.add(list_to_path_hmap.get(p1));
p2_path_list.add(list_to_path_hmap.get(p2));
}
/*
else if (list_to_path_hmap.containsKey(p1)) {
p1_path_list.add(list_to_path_hmap.get(p1));
// get containment list for p2, add edges from p1 -> p2
List<List<Integer>> p2_contained_list = contained_path_to_containers.get(p2);
for (List<Integer> p2_container : p2_contained_list) {
Path p2_container_path = list_to_path_hmap.get(p2_container);
p2_path_list.add(p2_container_path);
}
}
else if (list_to_path_hmap.containsKey(p2)) {
p2_path_list.add(list_to_path_hmap.get(p2));
// get containment list for p1, and edges from p1-> p2
List<List<Integer>> p1_contained_list = contained_path_to_containers.get(p1);
for (List<Integer> p1_container : p1_contained_list) {
Path p1_container_path = list_to_path_hmap.get(p1_container);
p1_path_list.add(p1_container_path);
}
}
*/
int MAX_PAIR_LINKS = 3;
if (p1_path_list.size() < MAX_PAIR_LINKS && p2_path_list.size() < MAX_PAIR_LINKS) {
for (Path p1_path_node : p1_path_list) {
for (Path p2_path_node : p2_path_list) {
// add edge to graph if doesnt already exist:
if (path_overlap_graph.findEdge(p1_path_node, p2_path_node) == null) {
SimplePathNodeEdge spne = new SimplePathNodeEdge(1, p1_path_node.getPathNodeID(), p2_path_node.getPathNodeID());
path_overlap_graph.addEdge(spne, p1_path_node, p2_path_node);
debugMes("-adding PE read edge between: " + p1_path_node.getPathNodeID() + " and " + p2_path_node.getPathNodeID(), 15);
pair_link_edges.add(spne);
}
}
}
}
} // endif pp.hasSecondPath()
}
return(pair_link_edges);
}
private static HashSet<Integer> find_dispersed_repeat_nodes(
List<List<Integer>> paths) {
HashMap<Integer,Integer> node_in_path_counter = new HashMap<Integer,Integer>();
for (List<Integer> path : paths) {
HashSet<Integer> node_found = new HashSet<Integer>();
for (Integer node_id : path) {
node_found.add(node_id);
}
Iterator<Integer> it = node_found.iterator();
while (it.hasNext()) {
Integer node_id = it.next();
if (node_in_path_counter.containsKey(node_id)) {
node_in_path_counter.put(node_id, node_in_path_counter.get(node_id)+1);
}
else {
node_in_path_counter.put(node_id, 1);
}
}
}
List<Integer> node_ids = new ArrayList<Integer>(node_in_path_counter.keySet());
final HashMap<Integer,Integer> node_counter = node_in_path_counter;
Collections.sort(node_ids, new Comparator<Integer>() {
public int compare (Integer a, Integer b) {
if (node_counter.get(a) < node_counter.get(b)) {
return(1);
}
else if (node_counter.get(a) > node_counter.get(b)) {
return(-1);
}
else {
return(0);
}
}
});
// pull out the repetitive ones
int MIN_OCCURRENCE_REPEAT_NODE = 10;
HashSet<Integer> repeat_nodes = new HashSet<Integer>();
for (Integer node_id : node_ids) {
int repeat_count = node_counter.get(node_id);
debugMes("Node[" + node_id + "] has repeat count: " + repeat_count, 15);
if (repeat_count >= MIN_OCCURRENCE_REPEAT_NODE) {
repeat_nodes.add(node_id);
}
}
return(repeat_nodes);
}
private static HashMap<Integer, HashMap<PairPath, Integer>> update_PairPaths_using_overlapDAG_refined_paths(
HashMap<Path, PathWithOrig> orig_path_to_updated_path,
Map<PairPath, Integer> pairPathToReadSupport,
HashMap<List<Integer>, List<List<Integer>>> contained_path_to_containers) {
// get the old-to-new listing in List<Integer> format for use with PairPath objects
HashMap<List<Integer>,List<Integer>> old_to_new_path = new HashMap<List<Integer>,List<Integer>>();
for (Path orig_path : orig_path_to_updated_path.keySet()) {
List<Integer> orig_path_list = orig_path.get_vertex_list();
List<Integer> updated_path_list = orig_path_to_updated_path.get(orig_path).getVertexList();
old_to_new_path.put(orig_path_list, updated_path_list);
}
debugMes("Old-to-new-path mappings: " + old_to_new_path, 15);
// get list of all old/new path pairs
List<PathWithOrig> revised_paths = new ArrayList<PathWithOrig>(orig_path_to_updated_path.values());
// now, create new pair paths based on updated mappings.
HashMap<PairPath,Integer> updated_pairPaths = new HashMap<PairPath,Integer>();
HashMap<PairPath,PairPath> old_pp_to_new_pp = new HashMap<PairPath,PairPath>();
for (PairPath pp : pairPathToReadSupport.keySet()) {
Integer read_support = pairPathToReadSupport.get(pp);
debugMes("update_PairPaths_using_overlapDAG_refined_paths: orig_pp: " + pp + " has support: " + read_support, 20);
PairPath new_pp;
List<List<Integer>> p1_list = new ArrayList<List<Integer>>();
List<Integer> p1 = pp.getPath1();
if (old_to_new_path.containsKey(p1)) {
p1_list.add(old_to_new_path.get(p1));
}
else {
// might not be a unique path!! (eg. single original nodes now ending up in multiple places)
p1_list = get_all_possible_updated_path_mappings(p1, revised_paths);
debugMes("update_PairPaths_using_overlapDAG_refined_paths, p1: " + p1 + " mapped to: " + p1_list, 20);
}
List<List<Integer>> p2_list = new ArrayList<List<Integer>>();
if (pp.hasSecondPath()) {
List<Integer> p2 = pp.getPath2();
if (old_to_new_path.containsKey(p2)) {
p2 = old_to_new_path.get(p2);
p2_list.add(p2);
}
else {
p2_list = get_all_possible_updated_path_mappings(p2, revised_paths);
}
// create new pair lists
// restrict pair paths to those where each path maps uniquely
if (p1_list.size() == 1 && p2_list.size() == 1) {
List<Integer> p1_path = p1_list.get(0);
List<Integer> p2_path = p2_list.get(0);
new_pp = new PairPath(p1_path, p2_path);
updated_pairPaths.put(new_pp, read_support);
old_pp_to_new_pp.put(pp, new_pp); // FIXME: need to allow for multiple mappings here wrt long reads
}
else {
// add each path separately if not already seen
for (List<Integer> p1_path : p1_list) {
if (! updated_pairPaths.containsKey(p1_path)) {
new_pp = new PairPath(p1_path);
updated_pairPaths.put(new_pp, 1);
}
}
for (List<Integer> p2_path : p2_list) {
if (! updated_pairPaths.containsKey(p2_path)) {
new_pp = new PairPath(p2_path);
updated_pairPaths.put(new_pp, 1);
}
}
}
/* orig
for (List<Integer> p1_path : p1_list) {
for (List<Integer> p2_path : p2_list) {
new_pp = new PairPath(p1_path, p2_path);
updated_pairPaths.put(new_pp, read_support);
old_pp_to_new_pp.put(pp, new_pp); // FIXME: need to allow for multiple mappings here wrt long reads
}
}
*/
}
else {
// only individual paths
for (List<Integer>p1_path : p1_list) {
new_pp = new PairPath(p1_path);
updated_pairPaths.put(new_pp, read_support);
old_pp_to_new_pp.put(pp, new_pp);
}
}
}
//update_long_read_path_mappings(old_pp_to_new_pp);
HashMap<Integer, HashMap<PairPath, Integer>> new_combinedReadHash = construct_combinedReadhHash_from_PairPath_list(updated_pairPaths);
return(new_combinedReadHash);
}
private static void update_long_read_path_mappings(
HashMap<PairPath, PairPath> old_pp_to_new_pp) {
debugMes("LONG_READ_PATH_MAP is:" + LONG_READ_PATH_MAP, 10);
debugMes("LONG_READ_NAME_TO_PPath is : " + LONG_READ_NAME_TO_PPath, 10);
HashMap<PairPath,ArrayList<String>> updated_LONG_READ_PATH_MAP = new HashMap<PairPath,ArrayList<String>>(); // PairPath => ArrayList(long_reads_names)
HashMap<String, PairPath> updated_LONG_READ_NAME_TO_PPath = new HashMap<String,PairPath>(); // string => PairPath
for (String long_read_name : LONG_READ_NAME_TO_PPath.keySet()) {
PairPath pp = LONG_READ_NAME_TO_PPath.get(long_read_name);
PairPath updated_pp = old_pp_to_new_pp.get(pp);
updated_LONG_READ_NAME_TO_PPath.put(long_read_name, updated_pp);
if (! updated_LONG_READ_PATH_MAP.containsKey(updated_pp)) {
updated_LONG_READ_PATH_MAP.put(updated_pp, new ArrayList<String>());
}
updated_LONG_READ_PATH_MAP.get(updated_pp).add(long_read_name);
}
// replace old versions with updated versions.
LONG_READ_PATH_MAP = updated_LONG_READ_PATH_MAP;
LONG_READ_NAME_TO_PPath = updated_LONG_READ_NAME_TO_PPath;
debugMes("LONG_READ_PATH_MAP updated to:" + updated_LONG_READ_PATH_MAP, 10);
debugMes("LONG_READ_NAME_TO_PPath updated to : " + updated_LONG_READ_NAME_TO_PPath, 10);
return;
}
private static List<Integer> update_path_mappings(List<Integer> p1,
List<PathWithOrig> revised_paths) {
PathWithOrig pwo_needs_updating = new PathWithOrig(p1);
for (PathWithOrig pwo : revised_paths) {
PathWithOrig updated_pwo = pwo_needs_updating.align_path_by_orig_id(pwo);
if (updated_pwo != null) {
return(updated_pwo.getVertexList());
}
}
throw new RuntimeException("Unable to remap read: " + p1 + " given: " + revised_paths);
}
private static List<List<Integer>> get_all_possible_updated_path_mappings(
List<Integer> p1,
List<PathWithOrig> revised_paths) {
List<List<Integer>> all_path_mappings = new ArrayList<List<Integer>>();
PathWithOrig pwo_needs_updating = new PathWithOrig(p1);
for (PathWithOrig pwo : revised_paths) {
PathWithOrig updated_pwo = pwo_needs_updating.align_path_by_orig_id(pwo);
if (updated_pwo != null) {
List<Integer> updated_path = updated_pwo.getVertexList();
if (! all_path_mappings.contains(updated_path)) {
all_path_mappings.add(updated_path);
}
}
}
if (all_path_mappings.isEmpty()) {
throw new RuntimeException("Unable to remap read: " + p1 + " given: " + revised_paths);
}
else {
return(all_path_mappings);
}
}
private static HashMap<Path,PathWithOrig> convert_path_DAG_to_SeqVertex_DAG(
DirectedSparseGraph<Path, SimplePathNodeEdge> path_overlap_graph,
HashMap<String, PathOverlap> pathMatches,
DirectedSparseGraph<SeqVertex, SimpleEdge> seqvertex_graph,
String dot_file_prefix,
String graphName,
boolean createMiddleDotFiles) {
debugMes("SECTION\n======== Convert Path-DAG to SeqVertex-DAG ============\n\n", 5);
// init seqvertex graph to contain all nodes from expanded paths.
HashMap<Path,List<SeqVertex>> orig_path_to_SeqVertex_list = new HashMap<Path,List<SeqVertex>>();
HashMap<Path,PathWithOrig> orig_path_to_updated_path = new HashMap<Path,PathWithOrig>();
for (Path p : path_overlap_graph.getVertices()) {
List<Integer> node_id_list = p.get_vertex_list();
List<SeqVertex> vertex_listing = new ArrayList<SeqVertex>();
List<Integer> new_node_id_list = new ArrayList<Integer>();
for (Integer node_id : node_id_list) {
SeqVertex orig_vertex = SeqVertex.retrieveSeqVertexByID(node_id);
Integer new_v_id = getNextID();
SeqVertex new_v = new SeqVertex(new_v_id, orig_vertex);
vertex_listing.add(new_v);
seqvertex_graph.addVertex(new_v);
new_node_id_list.add(new_v_id);
}
orig_path_to_SeqVertex_list.put(p, vertex_listing);
PathWithOrig new_pwo = new PathWithOrig(p.getPathNodeID(), new_node_id_list, p.get_vertex_list());
debugMes("prep_for_DAG_collapse: " + new_pwo, 15);
orig_path_to_updated_path.put(p, new_pwo);
// add edges between the vertices
for (int i = 1; i < vertex_listing.size(); i++) {
SeqVertex prev_v = vertex_listing.get(i-1);
SeqVertex next_v = vertex_listing.get(i);
SimpleEdge se = new SimpleEdge(1, prev_v.getID(), next_v.getID());
seqvertex_graph.addEdge(se, prev_v, next_v);
}
}
// do a DFS-based graph reconstruction starting from a root node.
SeqVertex.set_graph(seqvertex_graph);
HashSet<Path> visited = new HashSet<Path>();
for (Path p : path_overlap_graph.getVertices()) {
if (path_overlap_graph.getPredecessorCount(p) == 0) {
// root node.
DFS_add_path_to_graph(p, seqvertex_graph, path_overlap_graph, pathMatches,
orig_path_to_SeqVertex_list, visited);
}
}
// before zippingUp
if (createMiddleDotFiles)
try {
writeDotFile(seqvertex_graph, dot_file_prefix + "_before_zippingUpSeqVertexGraph.dot", graphName, false);
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
if (graph_contains_loops(seqvertex_graph)) {
throw new RuntimeException("Error, detected cycles in seqvertex_graph, so not a DAG as expected!");
}
List<SeqVertex> topo_sorted_vertices = TopologicalSort.topoSortSeqVerticesDAG(seqvertex_graph);
// before zipping, after topo sort
if (createMiddleDotFiles)
try {
writeDotFile(seqvertex_graph, dot_file_prefix + "_before_zippingUpSeqVertexGraph.TopoSort.dot", graphName, false);
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
int zip_round = 0;
int sum_merged = 1;
while (sum_merged > 0) {
sum_merged = 0;
int count_zip_up_merged_in_round = 1;
while (count_zip_up_merged_in_round > 0) {
zip_round++;
debugMes("\n\n## Round: " + zip_round + " Zipping up.", 10);
if (graph_contains_loops(seqvertex_graph)) {
throw new RuntimeException("Error, detected cycles in seqvertex_graph, so not a DAG as expected!");
}
init_replacement_vertices(seqvertex_graph);
// ensure DAG
topo_sorted_vertices = TopologicalSort.topoSortSeqVerticesDAG(seqvertex_graph);
count_zip_up_merged_in_round = zipper_collapse_DAG_zip_up(seqvertex_graph);
sum_merged += count_zip_up_merged_in_round;
debugMes("Zip up merged: " + count_zip_up_merged_in_round + " nodes.", 10);
// draw the dot file for the path overlap graph:
if (createMiddleDotFiles) {
try {
writeDotFile(seqvertex_graph, dot_file_prefix + "_zip_round_" + zip_round + "_zip_up.dot", graphName, false);
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
int count_zip_down_merged_in_round = 1;
while (count_zip_down_merged_in_round > 0) {
zip_round++;
debugMes("\n\n## Round: " + zip_round + " Zipping down.", 10);
if (graph_contains_loops(seqvertex_graph)) {
throw new RuntimeException("Error, detected cycles in seqvertex_graph, so not a DAG as expected!");
}
init_replacement_vertices(seqvertex_graph);
// ensure DAG
topo_sorted_vertices = TopologicalSort.topoSortSeqVerticesDAG(seqvertex_graph);
count_zip_down_merged_in_round = zipper_collapse_DAG_zip_down(seqvertex_graph);
sum_merged += count_zip_down_merged_in_round;
debugMes("Zip down merged: " + count_zip_down_merged_in_round + " nodes.", 10);
// draw the dot file for the path overlap graph:
if (createMiddleDotFiles) {
try {
writeDotFile(seqvertex_graph, dot_file_prefix + "_zip_round_" + zip_round + "_zip_down.dot", graphName, false);
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
}
// test again. :)
if (graph_contains_loops(seqvertex_graph)) {
throw new RuntimeException("Error, detected cycles in seqvertex_graph, so not a DAG as expected!");
}
// ensure DAG one last time
topo_sorted_vertices = TopologicalSort.topoSortSeqVerticesDAG(seqvertex_graph);
// update the paths based on their new vertices.
// get old to new vertex id mapping
HashMap<Integer,Integer> old_vertex_id_to_new_vertex_id = new HashMap<Integer,Integer>();
for (SeqVertex v : topo_sorted_vertices) {
Integer curr_vertex_id = v.getID();
if (v.__tmp_compressed_vertices.size() > 0) {
for (Integer old_vertex : v.__tmp_compressed_vertices) {
old_vertex_id_to_new_vertex_id.put(old_vertex, curr_vertex_id);
debugMes("Old_to_new_vertex_id_mapping: " + old_vertex + " => " + curr_vertex_id, 15);
}
}
else {
old_vertex_id_to_new_vertex_id.put(curr_vertex_id, curr_vertex_id);
debugMes("Old_to_new_vertex_id_mapping: " + curr_vertex_id + " => " + curr_vertex_id + " (stays same)", 15);
}
}
// update the old paths to the new paths
for (PathWithOrig pwo : orig_path_to_updated_path.values()) {
List<Integer> old_path = pwo.getVertexList();
List<Integer> new_path = new ArrayList<Integer>();
for (Integer id : old_path) {
if (old_vertex_id_to_new_vertex_id.containsKey(id)) {
Integer new_id = old_vertex_id_to_new_vertex_id.get(id);
new_path.add(new_id);
}
else {
throw new RuntimeException("Error, no new_id mapped from: " + id + ", in path: " + pwo);
}
}
pwo.update_vertex_list(new_path);
}
return(orig_path_to_updated_path);
}
private static void init_replacement_vertices(
DirectedSparseGraph<SeqVertex, SimpleEdge> seqvertex_graph) {
for (SeqVertex v : seqvertex_graph.getVertices()) {
v.is_replacement_vertex = false;
}
return;
}
private static int zipper_collapse_DAG_zip_up(
DirectedSparseGraph<SeqVertex, SimpleEdge> seqvertex_graph) {
int count_total_zip_merged = 0;
// do bottom-up Zipping /////
if (graph_contains_loops(seqvertex_graph)) {
throw new RuntimeException("Error, detected cycles in seqvertex_graph, so not a DAG as expected!");
}
List<SeqVertex> topo_sorted_vertices = TopologicalSort.topoSortSeqVerticesDAG(seqvertex_graph);
Collections.reverse(topo_sorted_vertices);
for (SeqVertex v : topo_sorted_vertices) {
if (v.is_replacement_vertex) { continue; }
if (! seqvertex_graph.containsVertex(v)) { continue; }
count_total_zip_merged += zip_up(seqvertex_graph, v);
}
return(count_total_zip_merged);
}
private static int zipper_collapse_DAG_zip_down(
DirectedSparseGraph<SeqVertex, SimpleEdge> seqvertex_graph) {
int count_total_zip_merged = 0;
// do top-down zipping /////////////
if (graph_contains_loops(seqvertex_graph)) {
throw new RuntimeException("Error, detected cycles in seqvertex_graph, so not a DAG as expected!");
}
List<SeqVertex> topo_sorted_vertices = TopologicalSort.topoSortSeqVerticesDAG(seqvertex_graph);
for (SeqVertex v : topo_sorted_vertices) {
if (v.is_replacement_vertex) { continue; }
if (! seqvertex_graph.containsVertex(v)) { continue; }
count_total_zip_merged += zip_down(seqvertex_graph, v);
}
return(count_total_zip_merged);
}
private static int zip_up(
DirectedSparseGraph<SeqVertex, SimpleEdge> seqvertex_graph,
SeqVertex v) {
List<SeqVertex> pred_list = new ArrayList<SeqVertex>(seqvertex_graph.getPredecessors(v));
if (pred_list.size() <= 1) { return (0); } // must have multiple parents
debugMes("## zip_up()", 15);
// get list of parent nodes having the same original ID
HashMap<Integer,HashSet<SeqVertex>> pred_orig_id_to_vertex_list = new HashMap<Integer,HashSet<SeqVertex>>();
for (SeqVertex pred : pred_list) {
if (pred.is_replacement_vertex) { return(0); } // delay to next round.
if (! seqvertex_graph.containsVertex(pred)) { continue; }
Integer orig_pred_id = pred.getOrigButterflyID();
if (! pred_orig_id_to_vertex_list.containsKey(orig_pred_id)) {
pred_orig_id_to_vertex_list.put(orig_pred_id, new HashSet<SeqVertex>());
}
pred_orig_id_to_vertex_list.get(orig_pred_id).add(pred);
}
int count_zip_merged = 0;
for (HashSet<SeqVertex> pred_same_orig_id_set : pred_orig_id_to_vertex_list.values()) {
if (pred_same_orig_id_set.size() == 1) { continue; } // need multiple parents for merging
// merge them into a single node.
count_zip_merged += attempt_zip_merge_SeqVertices(pred_same_orig_id_set, seqvertex_graph, "min");
}
return(count_zip_merged);
}
private static int zip_down (
DirectedSparseGraph<SeqVertex, SimpleEdge> seqvertex_graph,
SeqVertex v) {
List<SeqVertex> child_list = new ArrayList<SeqVertex>(seqvertex_graph.getSuccessors(v));
if (child_list.size() <= 1) { return (0); } // must have multiple parents
debugMes("##zip_down()", 15);
// get list of children nodes having the same original ID
HashMap<Integer,HashSet<SeqVertex>> child_orig_id_to_vertex_list = new HashMap<Integer,HashSet<SeqVertex>>();
for (SeqVertex child : child_list) {
if (child.is_replacement_vertex) { return(0); } // delay to next round
if (! seqvertex_graph.containsVertex(child) ) { continue; }
Integer orig_child_id = child.getOrigButterflyID();
if (! child_orig_id_to_vertex_list.containsKey(orig_child_id)) {
child_orig_id_to_vertex_list.put(orig_child_id, new HashSet<SeqVertex>());
}
child_orig_id_to_vertex_list.get(orig_child_id).add(child);
}
int count_zip_merged = 0;
for (HashSet<SeqVertex> child_same_orig_id_set : child_orig_id_to_vertex_list.values()) {
if (child_same_orig_id_set.size() == 1) { continue; } // need multiple parents for merging
// merge them into a single node.
count_zip_merged += attempt_zip_merge_SeqVertices(child_same_orig_id_set, seqvertex_graph, "max");
}
return(count_zip_merged);
}
private static int attempt_zip_merge_SeqVertices(HashSet<SeqVertex> pred_same_orig_id_set,
DirectedSparseGraph<SeqVertex, SimpleEdge> seqvertex_graph, String dir) {
debugMes("attempt_zip_merge_SeqVertices(" + pred_same_orig_id_set + ")", 15);
Integer replacement_vertex_id = getNextID();
SeqVertex replacement_vertex_obj = null;
// get list of all parents and all children of the merge-node-targets
HashSet<SeqVertex> parent_vertices = new HashSet<SeqVertex>();
HashSet<SeqVertex> child_vertices = new HashSet<SeqVertex>();
HashSet<SimpleEdge> edges_to_delete = new HashSet<SimpleEdge>();
// track depths, must ensure we keep the relative ordering in the DAG
List<Integer> parent_depths = new ArrayList<Integer>();
List<Integer> child_depths = new ArrayList<Integer>();
List<Integer> target_depths = new ArrayList<Integer>();
for (SeqVertex v : pred_same_orig_id_set) {
int d = v.getNodeDepth();
if (d < 0) {
throw new RuntimeException("Error, seq vertex: " + v + " has negative depth setting");
}
target_depths.add(d);
for (SeqVertex p : seqvertex_graph.getPredecessors(v)) {
parent_vertices.add(p);
parent_depths.add(p.getNodeDepth());
// remove edge
SimpleEdge se = seqvertex_graph.findEdge(p, v);
edges_to_delete.add(se);
}
for (SeqVertex c: seqvertex_graph.getSuccessors(v)) {
child_vertices.add(c);
child_depths.add(c.getNodeDepth());
// remove edge
SimpleEdge se = seqvertex_graph.findEdge(v, c);
edges_to_delete.add(se);
}
if (replacement_vertex_obj == null) {
replacement_vertex_obj = new SeqVertex(replacement_vertex_id, v);
}
}
if (parent_depths.size() > 0 && child_depths.size() > 0) {
// ensure can merge and retain depth ordering:
if ( ! (max_val(parent_depths) < min_val(child_depths) ) )
{
// cannot merge, since doing so would disrupt relative ordering of nodes
return(0);
}
}
// remove the graph edges:
for (SimpleEdge se : edges_to_delete) {
seqvertex_graph.removeEdge(se);
}
// remove the nodes themselves
List<Integer> merged_vertex_ids = new ArrayList<Integer>();
for (SeqVertex v : pred_same_orig_id_set) {
merged_vertex_ids.add(v.getID());
if (v.__tmp_compressed_vertices.size() > 0) {
merged_vertex_ids.addAll(v.__tmp_compressed_vertices);
}
seqvertex_graph.removeVertex(v);
}
// add new edges to parents
for (SeqVertex p : parent_vertices) {
SimpleEdge se = new SimpleEdge(1, p.getID(), replacement_vertex_obj.getID());
seqvertex_graph.addEdge(se, p, replacement_vertex_obj);
}
// add new edges to children
for (SeqVertex c : child_vertices) {
SimpleEdge se = new SimpleEdge(1, replacement_vertex_obj.getID(), c.getID());
seqvertex_graph.addEdge(se, replacement_vertex_obj, c);
}
Integer replacement_vertex_depth = (dir.equals("min")) ? min_val(target_depths) : max_val(target_depths);
String zipDir = (dir.equals("min")) ? "Up" : "Down";
replacement_vertex_obj.setDepth(replacement_vertex_depth);
replacement_vertex_obj.setNodeDepth(replacement_vertex_depth);
replacement_vertex_obj.is_replacement_vertex = true;
// fix local environment for this round
for (SeqVertex p : parent_vertices) {
p.is_replacement_vertex = true;
}
for (SeqVertex c : child_vertices) {
c.is_replacement_vertex = true;
}
replacement_vertex_obj.__tmp_compressed_vertices.addAll(merged_vertex_ids);
debugMes(zipDir + "ZipMerging nodes: " + pred_same_orig_id_set + " to " + replacement_vertex_obj, 15);
int count_merged = pred_same_orig_id_set.size();
return(count_merged);
}
private static int max_val(List<Integer> vals) {
Integer max_val = null;
for (Integer val : vals) {
if (max_val == null || val > max_val) {
max_val = val;
}
}
return(max_val);
}
private static int min_val(List<Integer> vals) {
Integer min_val = null;
for (Integer val : vals) {
if (min_val == null || val < min_val) {
min_val = val;
}
}
return(min_val);
}
private static void DFS_add_path_to_graph(Path p,
DirectedSparseGraph<SeqVertex, SimpleEdge> seqvertex_graph,
DirectedSparseGraph<Path, SimplePathNodeEdge> path_overlap_graph,
HashMap<String, PathOverlap> pathMatches,
HashMap<Path, List<SeqVertex>> orig_path_to_SeqVertex_list,
HashSet<Path> visited) {
if (visited.contains(p)) {
// already done
return;
}
debugMes("\nDFS_path_to_graph: targeting: " + p, 15);
visited.add(p);
// Phase 1. find candidate adjacent paths for use in labeling nodes in this path.
List<Path> adjacent_untraversed_pathnodes = new ArrayList<Path>(); // for later, deciding next DFS entries
for (Path succ : path_overlap_graph.getSuccessors(p)) {
String pair_token = get_path_compare_token(p, succ);
PathOverlap po = pathMatches.get(pair_token);
int match_len = po.match_length;
// draw edge between curr last node and next node in the successor path
List<SeqVertex> curr_vertex_list = orig_path_to_SeqVertex_list.get(p);
List<SeqVertex> succ_vertex_list = orig_path_to_SeqVertex_list.get(succ);
boolean connect_all_matching_positions = true;
if (connect_all_matching_positions) {
for (int i = curr_vertex_list.size() - match_len, j = 0;
i < curr_vertex_list.size() && j < match_len;
i++,j++) {
SeqVertex curr_vertex = curr_vertex_list.get(i);
SeqVertex succ_vertex = succ_vertex_list.get(j+1);
SimpleEdge se = new SimpleEdge(1, curr_vertex.getID(), succ_vertex.getID());
seqvertex_graph.addEdge(se, curr_vertex, succ_vertex);
}
}
else {
// just the last one
SeqVertex curr_vertex = curr_vertex_list.get(curr_vertex_list.size()-1);
SeqVertex succ_vertex = succ_vertex_list.get(match_len); // linking up the prev to next+1
SimpleEdge se = new SimpleEdge(1, curr_vertex.getID(), succ_vertex.getID());
seqvertex_graph.addEdge(se, curr_vertex, succ_vertex);
}
DFS_add_path_to_graph(succ,
seqvertex_graph,
path_overlap_graph,
pathMatches,
orig_path_to_SeqVertex_list,
visited);
}
return;
}
/* orig code
private static void DFS_add_path_to_graph(Path p,
DirectedSparseGraph<SeqVertex, SimpleEdge> seqvertex_graph,
DirectedSparseGraph<Path, SimplePathNodeEdge> path_overlap_graph,
HashMap<Path, PathWithOrig> orig_path_to_updated_path,
HashMap<String, PathOverlap> pathMatches) {
debugMes("\nDFS_path_to_graph: targeting: " + p, 15);
//////////////////////////////////////////////////////////////////////////////////
// Phase 1. find candidate adjacent paths for use in labeling nodes in this path.
List<Path> adjacent_untraversed_pathnodes = new ArrayList<Path>(); // for later, deciding next DFS entries
// find a predecessor or successor that has the greatest overlap
// and is already part of the new graph
PathWithOrig best_predecessor_path = null;
PathOverlap best_predecessor_overlap = null;
for (Path pred : path_overlap_graph.getPredecessors(p)) {
String pair_token = get_path_compare_token(pred, p);
PathOverlap po = pathMatches.get(pair_token);
if (orig_path_to_updated_path.containsKey(pred)) {
// candidate for use as template for node assignment.
if (best_predecessor_overlap == null || best_predecessor_overlap.match_score < po.match_score) {
best_predecessor_overlap = po;
best_predecessor_path = orig_path_to_updated_path.get(pred);
}
}
else {
pred._tmp_score = po.match_score;
adjacent_untraversed_pathnodes.add(pred);
}
}
PathWithOrig best_successor_path = null;
PathOverlap best_successor_overlap = null;
for (Path succ : path_overlap_graph.getSuccessors(p)) {
String pair_token = get_path_compare_token(p, succ);
PathOverlap po = pathMatches.get(pair_token);
if (orig_path_to_updated_path.containsKey(succ)) {
// candidate for use as template for node assignment
if (best_successor_overlap == null || best_successor_overlap.match_score < po.match_score) {
best_successor_overlap = po;
best_successor_path = orig_path_to_updated_path.get(succ);
}
}
else {
succ._tmp_score = po.match_score;
adjacent_untraversed_pathnodes.add(succ);
}
}
///////////////////////////////////////////////////////////////////////
// Phase 2: Refine labeling
// init the new path:
List<Integer> new_path = new ArrayList<Integer>();
for (Integer i : p.get_vertex_list()) {
new_path.add(-1);
}
PathWithOrig new_pwo = new PathWithOrig(p.getPathNodeID(), new_path, p.get_vertex_list());
if (best_predecessor_path == null && best_successor_path == null) {
debugMes("-dfs_msg: no best predecessor or successor path, so adding orig path from scratch.", 15);
// start building the graph here.
List<Integer> updated_path = new ArrayList<Integer>();
SeqVertex prev_vertex = null;
for (Integer orig_node_id : new_pwo.getOrigVertexList()) {
SeqVertex v = SeqVertex.retrieveSeqVertexByID(orig_node_id);
Integer next_v_id = getNextID();
SeqVertex new_v = new SeqVertex(next_v_id,v);
seqvertex_graph.addVertex(new_v);
updated_path.add(next_v_id);
if (prev_vertex != null) {
// add new edge
SimpleEdge se = new SimpleEdge(1, prev_vertex.getID(), next_v_id);
seqvertex_graph.addEdge(se, prev_vertex, new_v);
}
prev_vertex = new_v;
}
new_pwo.update_vertex_list(updated_path);
}
else {
List<Integer> updated_path = new_pwo.getVertexList(); // original vertex list
// update nodes based on best matching predecessor
if (best_predecessor_path != null) {
debugMes("-dfs_msg: updating path " + p + " based on best predecessor: " + best_predecessor_path, 15);
List<Integer> predecessor_node_ids = best_predecessor_path.getVertexList();
for (int i = 0, j = predecessor_node_ids.size() - best_predecessor_overlap.match_length;
i < best_predecessor_overlap.match_length && j < predecessor_node_ids.size();
i++, j++) {
if (updated_path.get(i) != -1 && updated_path.get(i) != predecessor_node_ids.get(j)) {
throw new RuntimeException("conflict in path assignments: " + updated_path + ", " + best_predecessor_path);
}
updated_path.set(i, predecessor_node_ids.get(j));
}
}
// update nodes based on best matching successor
if (best_successor_path != null) {
debugMes("-dfs_msg: updating path " + p + " based on best successor: " + best_successor_path, 15);
List<Integer> successor_node_ids = best_successor_path.getVertexList();
for (int i = updated_path.size() - best_successor_overlap.match_length, j = 0;
j < best_successor_overlap.match_length && i < updated_path.size();
i++, j++) {
if (updated_path.get(i) != -1 && updated_path.get(i) != successor_node_ids.get(j)) {
throw new RuntimeException("conflict in path assignments: " + updated_path + "," + best_successor_path);
}
updated_path.set(i, successor_node_ids.get(j));
}
}
// add new nodes and edges for those that are path-specific here.
for (int i = 0; i < updated_path.size(); i++) {
List<Integer> orig_path = new_pwo.getOrigVertexList();
if (updated_path.get(i) == -1) {
// need new node:
SeqVertex orig_v = SeqVertex.retrieveSeqVertexByID(orig_path.get(i));
Integer new_node_id = getNextID();
SeqVertex new_v = new SeqVertex(new_node_id, orig_v);
seqvertex_graph.addVertex(new_v);
updated_path.set(i, new_node_id);
}
}
// ensure edges exist among this node set:
for (int i = 1; i < updated_path.size(); i++) {
SeqVertex prev_vert = SeqVertex.retrieveSeqVertexByID(updated_path.get(i-1));
SeqVertex curr_vert = SeqVertex.retrieveSeqVertexByID(updated_path.get(i));
SimpleEdge se = seqvertex_graph.findEdge(prev_vert, curr_vert);
if (se == null) {
se = new SimpleEdge(1, prev_vert.getID(), curr_vert.getID());
seqvertex_graph.addEdge(se, prev_vert, curr_vert);
}
}
}
orig_path_to_updated_path.put(p, new_pwo);
debugMes("-dfs_msg: newly added path is: " + new_pwo, 15);
////////////////////////////////////////////////////////////////
// phase 3: DFS to next best overlapping adjacent edge.
// get list of all edges not yet traversed
// sort by match score
// DFS them in order of match score
Collections.sort(adjacent_untraversed_pathnodes, new Comparator<Path>() {
public int compare (Path a, Path b) {
if (a._tmp_score < b._tmp_score) {
return(1);
}
else if (a._tmp_score > b._tmp_score) {
return(-1);
}
else {
return(0);
}
}
});
for (Path next_p : adjacent_untraversed_pathnodes) {
if (! orig_path_to_updated_path.containsKey(next_p)) {
DFS_add_path_to_graph(next_p, seqvertex_graph, path_overlap_graph, orig_path_to_updated_path, pathMatches);
}
}
return;
}
*/
private static boolean break_cycles_in_path_overlap_graph(
DirectedSparseGraph<Path, SimplePathNodeEdge> path_overlap_graph) {
DijkstraShortestPath<Path, SimplePathNodeEdge> dp = new DijkstraShortestPath<Path, SimplePathNodeEdge>(path_overlap_graph);
Set<Set<SimplePathNodeEdge>> curLoops = new HashSet<Set<SimplePathNodeEdge>>();
// find all loops in the graph by seeing if, given edge v->v2, there is a path from v2 back to v
for (Path p : path_overlap_graph.getVertices()) {
for (Path s : path_overlap_graph.getSuccessors(p))
{
if (dp.getDistance(s, p)!=null) // there is a connection between p->s->....->p
{
//path has all edges from v to itself thru v2
List<SimplePathNodeEdge> loopPath = dp.getPath(s, p);
// v2 is successor of v, so let's just add the v->v2 edge too, complete the full loop.
loopPath.add(0, path_overlap_graph.findEdge(p, s));
// Collect the loop edge set.
Set<SimplePathNodeEdge> loopPath_set = new HashSet<SimplePathNodeEdge>(loopPath);
if (!curLoops.contains(loopPath_set))
{
curLoops.add(loopPath_set);
debugMes("Found loop: " + loopPath_set, 15);
}
}
}
}
if (curLoops.isEmpty())
return false; // no cycles to break
// process found loops
Set<SimplePathNodeEdge> allRelevantEdges = new HashSet<SimplePathNodeEdge>();
for (Set<SimplePathNodeEdge> loopPath_set : curLoops)
for (SimplePathNodeEdge e : loopPath_set)
{
e.increaseNumOfLoopsBy1();
allRelevantEdges.add(e);
}
// break complex loops
boolean res = false;
if (!allRelevantEdges.isEmpty()){
Comparator<SimplePathNodeEdge> numLoopsComparator = new NumPathNodeLoopsEdgeComparator();
PriorityQueue<SimplePathNodeEdge> edgesQ = new PriorityQueue<SimplePathNodeEdge>(allRelevantEdges.size(), numLoopsComparator);
edgesQ.addAll(allRelevantEdges);
//while there are still loops
// find the next edge that can be removed to reduce the number of loops
// updated queue: remove all edges, and update their loop content
SimplePathNodeEdge nextEtoRemove;
while ( (!curLoops.isEmpty()) && (! edgesQ.isEmpty()) )
{
//FIXME: there was a situation where curLoops was not empty,
// but edgesQ was, so I added edgesQ to the while condition. Investigate why this might happen.
// In this case, a node was involved in a self loop and a double-loop.
nextEtoRemove = edgesQ.poll();
if (path_overlap_graph.getSource(nextEtoRemove) == null
|| path_overlap_graph.getDest(nextEtoRemove) == null
|| nextEtoRemove.getNumOfLoopsInvolved() <= 0) {
continue;
}
debugMes("removing the edge " + path_overlap_graph.getSource(nextEtoRemove).getPathNodeID() + "->" +
path_overlap_graph.getDest(nextEtoRemove).getPathNodeID() + " that appears in "
+nextEtoRemove.getNumOfLoopsInvolved() + " loops",15);
// remove the loops that have this edge from curLoops
Set<Set<SimplePathNodeEdge>> removeLoops = new HashSet<Set<SimplePathNodeEdge>>();
for (Set<SimplePathNodeEdge> loopPath_set : curLoops)
if (loopPath_set.contains(nextEtoRemove))
{
debugMes("the loop "+ loopPath_set+" is now solved",15);
removeLoops.add(loopPath_set);
// update the number of loops involved in each edge
for (SimplePathNodeEdge e : loopPath_set)
e.decreaseNumOfLoopsBy1();
}
for (Set<SimplePathNodeEdge> loopPath_set : removeLoops)
curLoops.remove(loopPath_set);
//update the queue. remove all, and insert again if numLoops>0.
SimplePathNodeEdge[] relEdges = (SimplePathNodeEdge[]) edgesQ.toArray(new SimplePathNodeEdge[0]);
edgesQ.clear();
for (SimplePathNodeEdge otherE : relEdges)
if (otherE.getNumOfLoopsInvolved()>0)
edgesQ.add(otherE);
// remove this edge
path_overlap_graph.removeEdge(nextEtoRemove);
res = true;
}
}
return res;
}
private static void writeDotFile(
DirectedSparseGraph<Path, SimplePathNodeEdge> path_overlap_graph,
String output_filename, String graphName) {
PrintStream p;
try {
p = new PrintStream(new FileOutputStream(output_filename));
p.println("digraph G {");
Path toVertex;
//for each edge decide it's color
for (Path vertex : path_overlap_graph.getVertices())
{ //go over all vertices
String verDesc = ""+vertex.getPathNodeID() +" [label=\"" + vertex.getPathNodeID() + "\"]";
p.println("\t" + verDesc);
for (SimplePathNodeEdge edge : path_overlap_graph.getOutEdges(vertex)) //get all edges of vertex->?
{
toVertex = path_overlap_graph.getDest(edge);
p.println("\t" + vertex.getPathNodeID() + "->" + toVertex.getPathNodeID());
}
}
p.println("}");
p.close();
} catch (FileNotFoundException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
private static DirectedSparseGraph<Path, SimplePathNodeEdge> construct_path_overlap_graph(
List<Path> path_list, HashMap<String, PathOverlap> pathMatches, HashSet<Integer> dispersed_repeat_nodes, String dot_file_prefix, String graphName, boolean createMiddleDotFiles) {
// draw an edge between each pathNode B and the pathNode A to which B has a best-matching extension to the right.
DirectedSparseGraph<Path, SimplePathNodeEdge> path_overlap_graph = new DirectedSparseGraph<Path, SimplePathNodeEdge>();
for (Path p : path_list) {
path_overlap_graph.addVertex(p);
}
// identify repeat nodes.
HashSet<Integer> repeat_node_ids = new HashSet<Integer>();
if (! dispersed_repeat_nodes.isEmpty()) {
repeat_node_ids.addAll(dispersed_repeat_nodes);
}
for (Path path : path_list) {
HashMap<Integer,Integer> repeat_nodes_and_counts = Path.getRepeatNodesAndCounts(path.get_vertex_list());
for (Integer i : repeat_nodes_and_counts.keySet()) {
repeat_node_ids.add(i);
}
}
boolean store_best_extension_match_only = false;
for (int i = 0; i < path_list.size(); i++) {
int best_match = 0;
int best_matching_path_idx = -1;
List<Integer> best_precursor_j_indices = new ArrayList<Integer>();
for (int j = 0; j < path_list.size(); j++) {
if (i==j) {continue;}
PathOverlap path_overlap = Path.pathB_extends_pathA_allowRepeats(path_list.get(i).get_vertex_list(),
path_list.get(j).get_vertex_list(),
repeat_node_ids);
int extension_matches = path_overlap.match_score;
if (extension_matches <= 0) {
continue;
}
// i extends j
// got a match.
String path_pair_token = get_path_compare_token(path_list.get(j), path_list.get(i));
pathMatches.put(path_pair_token, path_overlap);
debugMes("PathNode Overlap Detected: [overlap: " + path_overlap.match_length + "] "
+ path_list.get(j) + " extended by " + path_list.get(i), 15);
if (! store_best_extension_match_only) {
// add edge
best_precursor_j_indices.add(j);
}
else {
// examine for best extension
if (extension_matches > best_match) {
best_match = extension_matches;
best_matching_path_idx = j;
best_precursor_j_indices.clear();
best_precursor_j_indices.add(j);
}
else if (extension_matches == best_match) {
best_precursor_j_indices.add(j);
}
}
}
// add edges between overlapping and compatible paths:
if (best_precursor_j_indices.size() > 0) {
for (Integer precursor_index : best_precursor_j_indices) {
String path_pair_token = get_path_compare_token(path_list.get(precursor_index), path_list.get(i));
PathOverlap po = pathMatches.get(path_pair_token);
debugMes("extension of: " + path_list.get(precursor_index) + " by " + path_list.get(i)
+ " has " + po.match_score + " terminal matches.", 15);
// i extends j
SimplePathNodeEdge spne = new SimplePathNodeEdge(po.match_score,
path_list.get(precursor_index).getPathNodeID(),
path_list.get(i).getPathNodeID());
path_overlap_graph.addEdge(spne, path_list.get(precursor_index), path_list.get(i));
}
}
else {
debugMes("path " + path_list.get(i) + " extends no path", 15);
}
}
return(path_overlap_graph);
}
private static String get_path_compare_token(Path pathA, Path pathB) {
String token = pathA.getPathNodeID() + ";" + pathB.getPathNodeID();
return(token);
}
private static List<List<Integer>> remove_containments(
List<List<Integer>> paths, HashMap<List<Integer>,
List<List<Integer>>> contained_path_to_containers) {
// paths should already be sorted by descending length
List<List<Integer>> noncontained_paths = new ArrayList<List<Integer>>();
for (List<Integer> path : paths) {
boolean contained = false;
for (List<Integer> chosen_path : noncontained_paths) {
if (Path.pathA_contains_pathB_allowRepeats(chosen_path, path)) {
contained = true;
// store containment info
if (! contained_path_to_containers.containsKey(path)) {
contained_path_to_containers.put(path, new ArrayList<List<Integer>>());
}
contained_path_to_containers.get(path).add(chosen_path);
}
}
if (! contained) {
noncontained_paths.add(path);
}
}
return(noncontained_paths);
}
private static DirectedSparseGraph<SeqVertex, SimpleEdge> construct_acyclic_graph(
DirectedSparseGraph<SeqVertex, SimpleEdge> orig_graph,
HashMap<Integer, HashMap<PairPath, Integer>> combinedReadHash) {
Set<PairPath> pairPaths = new HashSet<PairPath>();
Map<PairPath, Integer> pairPathToReadSupport = new HashMap<PairPath, Integer>();
populate_pairpaths_and_readsupport(combinedReadHash, pairPaths, pairPathToReadSupport);
List<List<Integer>> paths = new ArrayList<List<Integer>>();
for (PairPath pp : pairPaths) {
paths.add(pp.getPath1());
if (pp.hasSecondPath()) {
paths.add(pp.getPath2());
}
}
Collections.sort(paths, new Comparator<List<Integer>>() {
public int compare(List<Integer> pathA, List<Integer> pathB) {
if (pathA.size() < pathB.size()) {
return(-1);
}
else if (pathA.size() > pathB.size()) {
return(1);
}
else {
return(0);
}
}
});
Collections.reverse(paths); // want descending by path l
DirectedSparseGraph<SeqVertex, SimpleEdge> new_graph =
new DirectedSparseGraph<SeqVertex,SimpleEdge>();
List<List<Integer>> cycle_inducing_paths = new ArrayList<List<Integer>>();
HashSet<SimpleEdge> cycle_inducing_edges = new HashSet<SimpleEdge>();
//DijkstraShortestPath<SeqVertex, SimpleEdge> dp = new DijkstraShortestPath<SeqVertex, SimpleEdge>(orig_graph);
for (List<Integer> path : paths) {
add_path_to_graph_disallow_cycles(orig_graph, new_graph, path, cycle_inducing_paths, cycle_inducing_edges);
}
debugMes("\n\nAll loop-inducing edges are: " + cycle_inducing_edges + "\n\ncontained in loop-inducing paths: " + cycle_inducing_paths, 10);
return(new_graph);
}
private static void add_path_to_graph_disallow_cycles(
DirectedSparseGraph<SeqVertex, SimpleEdge> orig_graph,
DirectedSparseGraph<SeqVertex, SimpleEdge> new_graph,
List<Integer> path,
List<List<Integer>> cycle_inducing_paths,
HashSet<SimpleEdge> cycle_inducing_edges) {
debugMes("-adding path to new graph: " + path, 10);
if (path.size() == 1) {
SeqVertex v = SeqVertex.retrieveSeqVertexByID(path.get(0));
new_graph.addVertex(v);
}
boolean cycle_inducing_path = false;
for (int i = 1; i < path.size(); i++) {
SeqVertex prev_node = SeqVertex.retrieveSeqVertexByID(path.get(i-1));
SeqVertex next_node = SeqVertex.retrieveSeqVertexByID(path.get(i));
boolean both_nodes_already_exist_in_graph = true;
if (! new_graph.containsVertex(prev_node)) {
new_graph.addVertex(prev_node);
both_nodes_already_exist_in_graph = false;
}
if (! new_graph.containsVertex(next_node)) {
new_graph.addVertex(next_node);
both_nodes_already_exist_in_graph = false;
}
boolean add_edge = false;
SimpleEdge se = orig_graph.findEdge(prev_node, next_node);
if (both_nodes_already_exist_in_graph) {
DijkstraShortestPath<SeqVertex, SimpleEdge> dp = new DijkstraShortestPath<SeqVertex, SimpleEdge>(new_graph);
if (dp.getDistance(next_node, prev_node) != null) {
// adding an edge between prev->next node would create a cycle!!!
cycle_inducing_path = true;
cycle_inducing_edges.add(se);
debugMes("\t** cycle-inducing edge found: " + prev_node + " to " + next_node, 10);
}
else {
// add edge to graph.
add_edge = true;
}
}
else {
add_edge = true;
}
if (add_edge) {
new_graph.addEdge(se, prev_node, next_node);
}
}
if (cycle_inducing_path) {
cycle_inducing_paths.add(path);
debugMes("\t$$ cycle inducing path: " + path, 15);
}
}
private static boolean graph_contains_loops(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph) {
DijkstraShortestPath<SeqVertex, SimpleEdge> dp = new DijkstraShortestPath<SeqVertex, SimpleEdge>(graph);
// These should be only those repeats that aren't evident in the individual read paths,
// since the read-evident repeats were unrolled earlier.
Set<Set<SimpleEdge>> curLoops = new HashSet<Set<SimpleEdge>>();
// find all loops in the graph by seeing if, given edge v->v2, there is a path from v2 back to v
for (SeqVertex v : graph.getVertices())
{
for (SeqVertex v2 : graph.getSuccessors(v))
{
if (dp.getDistance(v2, v)!=null) // there is a connection between v->v2->... ->v
{
//path has all edges from v to itself thru v2
List<SimpleEdge> loopPath = dp.getPath(v2, v);
// v2 is successor of v, so let's just add the v->v2 edge too, complete the full loop.
loopPath.add(0, graph.findEdge(v, v2));
// capture the path IDs for debugMes reporting below.
List<Integer> pathIDs = new ArrayList<Integer>();
for (SimpleEdge e : loopPath)
pathIDs.add(graph.getDest(e).getID());
// Collect the loop edge set.
Set<SimpleEdge> loopPath_set = new HashSet<SimpleEdge>(loopPath);
if (!curLoops.contains(loopPath_set))
{
curLoops.add(loopPath_set);
debugMes("adding the loop path "+pathIDs+" to the curLoops",12);
}else
{
debugMes("not adding the loop path "+pathIDs+" to the curLoops",12);
}
}
}
}
if (curLoops.isEmpty()) {
return(false);
}
else {
return(true);
}
}
private static HashMap<Integer, HashMap<PairPath, Integer>> reassign_read_paths_according_to_longer_path_compatibility(
HashMap<Integer, HashMap<PairPath, Integer>> combinedReadHash) {
debugMes("\nSECTION\n========= Ressigning Repeat-containing Read Paths Based On Longer Path Compatibility ==========\n\n",5);
Set<PairPath> pairPaths = new HashSet<PairPath>();
Map<PairPath, Integer> pairPathToReadSupport = new HashMap<PairPath, Integer>();
populate_pairpaths_and_readsupport(combinedReadHash, pairPaths, pairPathToReadSupport);
List<List<Integer>> paths = new ArrayList<List<Integer>>();
for (PairPath pp : pairPaths) {
paths.add(pp.getPath1());
if (pp.hasSecondPath()) {
paths.add(pp.getPath2());
}
}
Collections.sort(paths, new Comparator<List<Integer>>() {
public int compare(List<Integer> pathA, List<Integer> pathB) {
if (pathA.size() < pathB.size()) {
return(-1);
}
else if (pathA.size() > pathB.size()) {
return(1);
}
else {
return(0);
}
}
});
Collections.reverse(paths); // want descending by path l
// convert to list of PathWithOrig
List<PathWithOrig> path_with_orig_list = new ArrayList<PathWithOrig>();
for (List<Integer> path : paths) {
path_with_orig_list.add(new PathWithOrig(path));
}
// iterate through pair paths and see if they require reassignment.
HashMap<PairPath,Integer> updated_pairpath_hmap = new HashMap<PairPath,Integer>();
for (PairPath pp : pairPaths) {
Integer read_support = pairPathToReadSupport.get(pp);
PairPathWOrig ppwo = new PairPathWOrig(pp);
boolean restructured_flag = false;
for (PathWithOrig template_pwo : path_with_orig_list) {
if (template_pwo.size() < ppwo.size()){
break;
}
PairPathWOrig ppwo_restructured = ppwo.restructure_according_to_repeat_path(template_pwo);
if (ppwo_restructured != null) {
updated_pairpath_hmap.put(ppwo_restructured.getPairPath(), read_support);
if (! ppwo_restructured.equals(ppwo)) {
debugMes("PPWO restructured from: " + ppwo + " to " + ppwo_restructured, 15);
}
restructured_flag = true;
break;
}
}
if (! restructured_flag) {
// stick with the original one.
if (true) {
throw new RuntimeException("error, not restructured: " + path_with_orig_list + " and target: " + ppwo);
}
updated_pairpath_hmap.put(pp, read_support);
}
}
HashMap<Integer, HashMap<PairPath, Integer>> new_combined_read_hash = construct_combinedReadhHash_from_PairPath_list(updated_pairpath_hmap);
return(new_combined_read_hash);
}
private static HashMap<Integer, HashMap<PairPath, Integer>> construct_combinedReadhHash_from_PairPath_list(
HashMap<PairPath, Integer> pairpath_hmap) {
HashMap<Integer, HashMap<PairPath, Integer>> combinedReadHash = new HashMap<Integer, HashMap<PairPath, Integer>>();
for (PairPath pp : pairpath_hmap.keySet()) {
Integer read_support = pairpath_hmap.get(pp);
Integer first_id = pp.getFirstID();
HashMap<PairPath,Integer> pp_map;
if (combinedReadHash.containsKey(first_id)) {
pp_map = combinedReadHash.get(first_id);
}
else {
pp_map = new HashMap<PairPath,Integer>();
combinedReadHash.put(first_id, pp_map);
}
pp_map.put(pp, read_support);
}
return(combinedReadHash);
}
private static void examine_out_of_order_depth_in_read_paths(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
HashMap<Integer, HashMap<PairPath, Integer>> componentReadHash) {
debugMes("\n\nSECTION\n==== examining node depths of read paths in DAG ======\n\n", 5);
Set<PairPath> pairPaths = new HashSet<PairPath>();
Map<PairPath, Integer> pairPathToReadSupport = new HashMap<PairPath, Integer>();
populate_pairpaths_and_readsupport(componentReadHash, pairPaths, pairPathToReadSupport);
List<List<Integer>> paths = new ArrayList<List<Integer>>();
for (PairPath pp : pairPaths) {
paths.add(pp.getPath1());
if (pp.hasSecondPath()) {
paths.add(pp.getPath2());
}
}
Collections.sort(paths, new Comparator<List<Integer>>() {
public int compare(List<Integer> pathA, List<Integer> pathB) {
if (pathA.size() < pathB.size()) {
return(-1);
}
else if (pathA.size() > pathB.size()) {
return(1);
}
else {
return(0);
}
}
});
Collections.reverse(paths); // want descending by path length
int count_reads_ok = 0;
int count_reads_conflict = 0;
for (List<Integer> path : paths) {
List<List<Integer>> read_parts = new ArrayList<List<Integer>>();
List<Integer> part = new ArrayList<Integer>();
List<List<Integer>> node_depths_tracker = new ArrayList<List<Integer>>();
ArrayList<Integer> node_depths_list = new ArrayList<Integer>();
Iterator<Integer> it = path.iterator();
int prev_depth = -1;
HashMap<Integer,Boolean> node_visitor = new HashMap<Integer,Boolean>();
while (it.hasNext()) {
Integer node_id = it.next();
SeqVertex v = getSeqVertex(graph, node_id);
if (v._node_depth < prev_depth || node_visitor.containsKey(node_id)) {
// problem...
// fracture here.
if (! part.isEmpty()) {
read_parts.add(part);
part = new ArrayList<Integer>();
}
}
node_depths_list.add(v._node_depth);
prev_depth = v._node_depth;
node_visitor.put(node_id, true);
part.add(node_id);
}
if (! part.isEmpty()) {
read_parts.add(part);
part = new ArrayList<Integer>();
}
node_depths_tracker.add(node_depths_list);
if (read_parts.size() > 1) {
debugMes("DAG-conflicting path: " + path + " with node_depths: " + node_depths_tracker + " into " + read_parts.size() + ": " + read_parts, 10);
count_reads_conflict++;
}
else {
count_reads_ok++;
}
}
debugMes("\n\nNum reads ok: " + count_reads_ok + "\nNum reads conflicted depths: " + count_reads_conflict, 10);
}
private static void examine_compatible_paths_debugging_only(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
HashMap<Integer, HashMap<PairPath, Integer>> componentReadHash) {
// this method is only for debugging purposes.
Set<PairPath> pairPaths = new HashSet<PairPath>();
Map<PairPath, Integer> pairPathToReadSupport = new HashMap<PairPath, Integer>();
populate_pairpaths_and_readsupport(componentReadHash, pairPaths, pairPathToReadSupport);
List<List<Integer>> paths = new ArrayList<List<Integer>>();
for (PairPath pp : pairPaths) {
paths.add(pp.getPath1());
if (pp.hasSecondPath()) {
paths.add(pp.getPath2());
}
}
Collections.sort(paths, new Comparator<List<Integer>>() {
public int compare(List<Integer> pathA, List<Integer> pathB) {
if (pathA.size() < pathB.size()) {
return(-1);
}
else if (pathA.size() > pathB.size()) {
return(1);
}
else {
return(0);
}
}
});
Collections.reverse(paths); // want descending by path length
List<List<Integer>> longest_paths = new ArrayList<List<Integer>>();
int NUM_TOP_LONGEST_PATHS = 2; // change to the number of longest paths to compare to.
int counter = 0;
for (List<Integer> path : paths) {
counter++;
if (counter <= NUM_TOP_LONGEST_PATHS) {
longest_paths.add(path);
}
else {
boolean compatible = false;
for (List<Integer> longer_path : longest_paths) {
if (PairPath.individual_paths_are_compatible(path, longer_path)) {
compatible = true;
break;
}
}
if (! compatible) {
debugMes("LongPathIncompat: " + path, 10);
}
}
}
}
private static HashMap<List<Integer>, Pair<Integer>> reconstruct_paths_from_collapsed_reads(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
HashMap<Integer, HashMap<PairPath, Integer>> componentReadHash) {
HashMap<List<Integer>, Pair<Integer>> transcripts = new HashMap<List<Integer>,Pair<Integer>>();
Set<PairPath> pairPaths = new HashSet<PairPath>();
Map<PairPath, Integer> pairPathToReadSupport = new HashMap<PairPath, Integer>();
populate_pairpaths_and_readsupport(componentReadHash, pairPaths, pairPathToReadSupport);
List<List<Integer>> paths = new ArrayList<List<Integer>>();
for (PairPath pp : pairPaths) {
paths.add(pp.getPath1());
if (pp.hasSecondPath()) {
paths.add(pp.getPath2());
}
}
Collections.sort(paths, new Comparator<List<Integer>>() {
public int compare(List<Integer> pathA, List<Integer> pathB) {
if (pathA.size() < pathB.size()) {
return(-1);
}
else if (pathA.size() > pathB.size()) {
return(1);
}
else {
return(0);
}
}
});
Collections.reverse(paths); // want descending by path length
List<List<Integer>> collapsed_paths = Path.collapse_compatible_paths_to_min_set(paths);
for (List<Integer> path : collapsed_paths) {
// only doing pp path1 since testing for cufflinks here in unpaired mode.
transcripts.put(path, new Pair(new Integer(1), new Integer(1)));
}
return(transcripts);
}
private static HashMap<List<Integer>, Pair<Integer>> reconstruct_paths_from_reads(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
HashMap<Integer, HashMap<PairPath, Integer>> componentReadHash) {
HashMap<List<Integer>, Pair<Integer>> transcripts = new HashMap<List<Integer>,Pair<Integer>>();
Set<PairPath> pairPaths = new HashSet<PairPath>();
Map<PairPath, Integer> pairPathToReadSupport = new HashMap<PairPath, Integer>();
populate_pairpaths_and_readsupport(componentReadHash, pairPaths, pairPathToReadSupport);
for (PairPath pp : pairPaths) {
// only doing pp path1 since testing for cufflinks here in unpaired mode.
transcripts.put(pp.getPath1(), new Pair(new Integer(1), new Integer(1)));
}
return(transcripts);
}
private static HashMap<List<Integer>, Pair<Integer>> remove_lower_ranked_paths_without_unique_read_content(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph, HashMap<List<Integer>, Pair<Integer>> finalPaths_all,
HashMap<List<Integer>, HashMap<PairPath, Integer>> finalPathsToContainedReads) {
HashMap<PairPath,Boolean> pp_used = new HashMap<PairPath,Boolean>();
HashSet<List<Integer>> all_paths = new HashSet<List<Integer>>();
// init the list of candidate paths
List<List<Integer>> paths = new ArrayList<List<Integer>>();
for (List<Integer> path : finalPaths_all.keySet()) {
paths.add(path);
all_paths.add(path);
}
HashMap<List<Integer>,Integer> seqLengthMap = paths_to_seq_lengths(graph, all_paths);
HashSet<List<Integer>> priority_paths_with_unique_read_content = new HashSet<List<Integer>>();
int round = 0;
while (! paths.isEmpty()) {
round++;
UniquePathContentComparator unique_path_content_comparator = new UniquePathContentComparator(paths,
pp_used, finalPathsToContainedReads,
seqLengthMap);
paths = unique_path_content_comparator.remove_paths_without_unique_read_content(paths);
if (! paths.isEmpty()) {
Collections.sort(paths, unique_path_content_comparator);
Collections.reverse(paths);
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15) {
debugMes("Round[" + round + "] Paths Prioritized by Unique Read Content", 15);
for (List<Integer> path : paths) {
debugMes("Round[" + round + "] Unique=" + unique_path_content_comparator.unique_count(path) + ", path: " + path, 15);
}
debugMes("Round[ " + round + "] SELECTING Unique=" + unique_path_content_comparator.unique_count(paths.get(0))
+ ", path: " + paths.get(0) + "\n\n", 15);
}
List<Integer> p = paths.remove(0);
priority_paths_with_unique_read_content.add(p);
// mark contained paths as seen
for (PairPath pp : finalPathsToContainedReads.get(p).keySet()) {
pp_used.put(pp, true);
}
}
}
HashMap<List<Integer>, Pair<Integer>> paths_to_keep = new HashMap<List<Integer>, Pair<Integer>>();
for (List<Integer> path : all_paths) {
paths_to_keep.put(path, finalPaths_all.get(path));
debugMes("PathRankingFilter retaining: " + path, 15);
}
return(paths_to_keep);
}
private static HashMap<List<Integer>, Integer> paths_to_seq_lengths(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
HashSet<List<Integer>> all_paths) {
HashMap<List<Integer>, Integer> seqLengthMap = new HashMap<List<Integer>,Integer>();
for (List<Integer> path : all_paths)
{
String seq = getPathSeq(graph,path);
seqLengthMap.put(path, seq.length());
}
return(seqLengthMap);
}
private static void removeShortOrphanNodes(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
int min_seq_length) {
List<SeqVertex> vertices_to_remove = new ArrayList<SeqVertex>();
for (SeqVertex v : graph.getVertices()) {
if (graph.getPredecessorCount(v) == 0
&&
graph.getSuccessorCount(v) == 0
&&
v.getName().length() < min_seq_length)
{
vertices_to_remove.add(v);
}
}
for (SeqVertex v : vertices_to_remove) {
debugMes("Removing short seq orphaned vertex: " + v + " from graph. Seq too short to generate a contig of min length.", 12);
graph.removeVertex(v);
}
}
private static HashMap<Integer, HashMap<PairPath, Integer>> reorganizeReadPairings(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
HashMap<Integer, HashMap<PairPath, Integer>> combinedReadHash,
DijkstraDistance<SeqVertex, SimpleEdge> dijkstraDis) {
describeNodes(graph);
HashMap<Integer,HashMap<PairPath,Integer>> newCombinedReadHash = new HashMap<Integer,HashMap<PairPath,Integer>> ();
for (HashMap<PairPath, Integer> pairs_n_counts : combinedReadHash.values()) {
for (PairPath pp : pairs_n_counts.keySet()) {
int read_support = pairs_n_counts.get(pp);
if (pp.hasSecondPath()) {
PairPath combinedPath = combinePaths(graph, pp.getPath1(), pp.getPath2(), dijkstraDis);
if (! combinedPath.isEmpty()) {
storePairPathByFirstVertex(combinedPath, newCombinedReadHash, read_support);
debugMes("OK pp update to new DAG: " + pp + " => " + combinedPath, 15);
}
else {
// store the read path separately
PairPath pp1 = new PairPath(pp.getPath1());
storePairPathByFirstVertex(pp1, newCombinedReadHash, read_support);
PairPath pp2 = new PairPath(pp.getPath2());
storePairPathByFirstVertex(pp2, newCombinedReadHash, read_support);
debugMes("Warning... pp: " + pp + " needed to be split into: " + pp1 + " and " + pp2, 15);
}
}
else {
storePairPathByFirstVertex(pp, newCombinedReadHash, read_support);
}
}
}
return(newCombinedReadHash);
}
private static void storePairPathByFirstVertex(PairPath pp,
HashMap<Integer, HashMap<PairPath, Integer>> combinedReadHash, int read_support) {
Integer firstV = pp.getFirstID();
if (!combinedReadHash.containsKey(firstV))
combinedReadHash.put(firstV, new HashMap<PairPath,Integer>()); //init
if (!combinedReadHash.get(firstV).containsKey(pp))
combinedReadHash.get(firstV).put(pp, 0); //add pairpath
Integer counts = combinedReadHash.get(firstV).get(pp);
combinedReadHash.get(firstV).put(pp, counts + read_support); // increment counts for pairpath
debugMes("we have "+ combinedReadHash.get(firstV).get(pp)+" reads supporting the path: " + pp,18);
}
private static int unroll_remaining_terminal_self_loops(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph, HashMap<Integer, HashMap<PairPath, Integer>> combinedReadHash) {
int num_self_loops_unrolled = 0;
List<SeqVertex> all_vertices = new ArrayList<SeqVertex>(graph.getVertices());
HashMap<Integer,Boolean> unrolled_terminal_vertices = new HashMap<Integer,Boolean>();
for (SeqVertex v : all_vertices) {
debugMes("Examining node: " + v.getShortSeqWconnectingIDs(graph), 12);
Collection<SeqVertex> successors = graph.getSuccessors(v);
Collection<SeqVertex> preds = graph.getPredecessors(v);
if (successors.size() == 1 && preds.size() == 1
&& successors.containsAll(preds)) {
// a terminal repeat node.
SeqVertex repeat_vertex = successors.iterator().next();
if (repeat_vertex.getOrigButterflyID() != v.getOrigButterflyID()) {
debugMes("not a terminal self loop, skipping..."
+ v, 12);
continue;
}
debugMes("Removing terminal self loop at vertex: " + v, 12);
SeqVertex new_v = new SeqVertex(getNextID(), repeat_vertex); // this constructor sets orig_id so it's the same.
SimpleEdge loop_edge = graph.findEdge(v, repeat_vertex);
graph.addVertex(new_v);
SimpleEdge new_edge = new SimpleEdge(loop_edge.getWeight(), v.getID(), new_v.getID());
graph.addEdge(new_edge, v, new_v);
new_edge.increment_repeat_unroll_weight(2);
num_self_loops_unrolled++;
unrolled_terminal_vertices.put(v.getID(), true);
unrolled_terminal_vertices.put(repeat_vertex.getID(), true); // flag for possible read reassignment
// copy over any outgoing non-self edges from the repeat vertex
for (SeqVertex succ : graph.getSuccessors(repeat_vertex)) {
if (succ.getOrigButterflyID() != new_v.getOrigButterflyID()) {
SimpleEdge se = graph.findEdge(repeat_vertex, succ);
SimpleEdge new_se = new SimpleEdge(se.getWeight(), new_v.getID(), succ.getID());
graph.addEdge(new_se, new_v, succ);
new_se.increment_repeat_unroll_weight(2);
unrolled_terminal_vertices.put(succ.getID(), true); // flag for possible reassignment
}
}
}
}
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 20) {
try {
writeDotFile(graph, "__terminal_loop_unroll-preReadReassignment.dot", "ladeda", false);
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
int num_paths_redefined = reassign_repeat_nodes_in_reads(graph,
combinedReadHash, unrolled_terminal_vertices,
null, false);
debugMes("num paths with terminal self loop vertices and paths redefined: " + num_paths_redefined, 12);
/*
if (num_paths_redefined > 0)
redefine_all_graph_edges(graph, combinedReadHash);
*/
return(num_self_loops_unrolled);
}
private static void describeVertices(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph) {
debugMes("## Node descriptions:", 10);
List<SeqVertex> vertices = new ArrayList<SeqVertex>(graph.getVertices());
Collections.sort(vertices, SeqVertexIDorderComparator);
for (SeqVertex v : vertices) {
debugMes(v.getShortSeqWconnectingIDs(graph), 10);
}
}
private static void describeNodes(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph) {
for (SeqVertex v : graph.getVertices()) {
debugMes("NODE_DESCR: " + v.getShortSeqWconnectingIDs(graph), 5);
}
}
private static boolean unroll_loops(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
HashMap<Integer, HashMap<PairPath, Integer>> combinedReadHash, My_DFS dfs) {
debugMes("\n\nUNROLLING LOOPS IN READS\n\n", 5);
HashMap<Integer,Boolean> all_repeat_related_nodes = new HashMap<Integer,Boolean>();
// get the repeat nodes assigned to the pp containing it the greatest number of times.
HashMap<Integer, PairPath> repeat_node_id_to_longest_path = find_repeat_containing_pairpaths_ignoreLastNode(combinedReadHash);
if (repeat_node_id_to_longest_path.isEmpty()) {
debugMes("\t** no repeats detected in the reads. No repeat unrolling needed here.", 10);
return(false);
}
int unroll_loop_counter = 0;
while (repeat_node_id_to_longest_path.size() > 0) {
unroll_loop_counter++;
debugMes("\n\n## Unrolling loops, round: " + unroll_loop_counter, 10);
if (repeat_node_id_to_longest_path.size() > 0) {
debugMes("\n\nFound : " + repeat_node_id_to_longest_path.size() + " repeat nodes.", 10);
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 12) {
for (Integer node_id : repeat_node_id_to_longest_path.keySet()) {
PairPath pp = repeat_node_id_to_longest_path.get(node_id);
System.err.println("Repeat Node: " + node_id + " found in longest pp: " + pp);
}
}
}
// unroll each repeat unit
HashMap<Integer,Boolean> restructured_nodes = new HashMap<Integer,Boolean>();
// prioritize the paths according to the number of different repeat nodes.
HashMap<PairPath,Float> pp_uniq_repeat_count_hmap = new HashMap<PairPath,Float>();
for (Integer repeat_node : repeat_node_id_to_longest_path.keySet()) {
all_repeat_related_nodes.put(repeat_node, true);
PairPath pp = repeat_node_id_to_longest_path.get(repeat_node);
float repeat_count_sum = pp.getRepeatNodesAndCountSum();
// also prioritize by path length
int max_path_length = pp.getMaxPathLength();
repeat_count_sum += (float)max_path_length/1000.0; // simple way to augment score based on length of longest path.
if (! pp_uniq_repeat_count_hmap.containsKey(pp)) {
pp_uniq_repeat_count_hmap.put(pp, repeat_count_sum); // all repeat nodes counted, do once per pp
}
}
final HashMap<PairPath,Float> pp_uniq_repeat_count_hmap_copy = new HashMap<PairPath,Float>(pp_uniq_repeat_count_hmap);
List<PairPath> pp_list_ordered_by_repeat_counts = new ArrayList<PairPath>(pp_uniq_repeat_count_hmap.keySet());
Collections.sort(pp_list_ordered_by_repeat_counts, new Comparator<PairPath>() {
public int compare (PairPath pp_A, PairPath pp_B) {
float rpt_node_count_A = pp_uniq_repeat_count_hmap_copy.get(pp_A);
float rpt_node_count_B = pp_uniq_repeat_count_hmap_copy.get(pp_B);
if (rpt_node_count_A > rpt_node_count_B)
return -1;
else if (rpt_node_count_A < rpt_node_count_B)
return 1;
else
return 0;
}
});
for (PairPath pp : pp_list_ordered_by_repeat_counts) {
debugMes("Unrolling repeats in pp: " + pp, 12);
for (List<Integer> path : pp.get_paths()) {
if (path.size() == 0) // empty path in a pp
continue;
// if the path contains any previously restructured node, skip it.
if (Path.contains_any_node_id(path, restructured_nodes.keySet())) {
debugMes("\t-postponing unroll since contains restructured node.\n", 12);
continue;
}
debugMes("Unrolling repeats in pp: " + pp + " with repeat nodes: " + pp.getRepeatNodesAndCounts(), 12);
// find the repeat nodes and restructure the graph.
List<SeqVertex> path_vertices = new ArrayList<SeqVertex>();
HashMap<Integer,Boolean> seen_repeat_node = new HashMap<Integer,Boolean>();
boolean restructured_flag = false;
int node_pos = 0;
for (Integer node_id : path) {
node_pos++;
SeqVertex v = getSeqVertex(graph, node_id);
if (node_pos != path.size()) {
// don't do the very last node if it's a repeat... leave for another pp to unravel it.
if (seen_repeat_node.containsKey(node_id)) {
// ok - tackling this repeat node.
// make a copy of the vertex
v = new SeqVertex(getNextID(), v); // this constructor sets orig_id so it's the same.
debugMes("\tcopying node: " + node_id + " to " + v.getID(), 12);
all_repeat_related_nodes.put(v.getID(), true);
graph.addVertex(v);
restructured_nodes.put(node_id, true);
restructured_flag = true;
}
else {
if (repeat_node_id_to_longest_path.containsKey(node_id)) {
seen_repeat_node.put(node_id, true);
}
}
} // endif last node of path
path_vertices.add(v);
} //end of for node_id : path
if (restructured_flag) {
// add edges between neighboring nodes
for (int i = 0; i < path_vertices.size()-1; i++) {
SeqVertex before_node = path_vertices.get(i);
SeqVertex after_node = path_vertices.get(i+1);
SimpleEdge edge = graph.findEdge(before_node, after_node);
if (edge == null) {
// add it.
// get the original edge and copy the weight over.
SimpleEdge orig_edge = graph.findEdge(getSeqVertex(graph, before_node.getOrigButterflyID()), getSeqVertex(graph, after_node.getOrigButterflyID()));
double oldW = 1;
if (orig_edge != null)
oldW = orig_edge.getWeight();
edge = new SimpleEdge(oldW, before_node.getID(), after_node.getID());
graph.addEdge(edge, before_node, after_node);
}
edge.increment_repeat_unroll_weight(unroll_loop_counter); // was 1
}
// describe the new vertex list:
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 20) {
debugMes("# Restructured path described:", 20);
for (SeqVertex v : path_vertices) {
debugMes(v.getShortSeqWconnectingIDs(graph), 20);
}
}
// Verify that this path can be properly reassigned in the graph.
debugMes("\nVerifying that restructured path: " + path + " is rethreaded through the graph with fewer repeat units.", 12);
List<Integer> updated_path = reassign_restructured_path_in_graph(graph, path);
String orig_path_seq = getPathSeq(graph, path);
String new_path_seq = getPathSeq(graph, updated_path);
if (! orig_path_seq.equals(new_path_seq)) {
throw new RuntimeException("Error, updated path seq != orig path seq:\n>Orig\n" + orig_path_seq + "\n>New\n" + new_path_seq);
}
else {
debugMes("* old and new path seqs are identical. validated. " + new_path_seq, 15);
}
if (updated_path == null
||
( path.size() > 1 &&
(updated_path.equals(path)
||
Path.countNumNodesNotUnique(path) <= Path.countNumNodesNotUnique(updated_path)
)
)
) {
throw new RuntimeException("Repeat-unrolled path: " + path
+ " " + Path.getRepeatNodesAndCounts(path)
+ " was not properly restructured: "
+ updated_path + " " + Path.getRepeatNodesAndCounts(updated_path));
}
else {
debugMes("\tVerification OK: path:" + path + " " + Path.getRepeatNodesAndCounts(path) +
" => " + updated_path + " " + Path.getRepeatNodesAndCounts(updated_path) + "\n", 12);
}
} // end if restructured_flag
} // end for path
} // end for pp
// reassign repeat nodes to their new nodes in the graph
debugMes("\n\n## Post-unroll round: " + unroll_loop_counter + ", reassigning_repeat_nodes_in_reads\n", 10);
// restrict unrolling to just those containing as of yet unrolled repeats
int num_paths_updated = reassign_repeat_nodes_in_reads(graph, combinedReadHash,
restructured_nodes, null, true);
if (num_paths_updated == 0)
throw new RuntimeException("Error, no paths were updated after this round of repeat unrolling");
debugMes("\n\nNumber of paths refined: " + num_paths_updated, 10);
//redefine_all_graph_edges(graph, combinedReadHash); // prune out the now unsupported edges post reassignment.
dfs.runDFS2(); // reassign depths given new nodes added.
if (true) {
String filename = FILE + ".repeatUnroll_" + unroll_loop_counter + ".dot";
PrintStream p;
try {
p = new PrintStream(new FileOutputStream(filename));
writeDotFile(graph,p,"repeatUnroll_" + unroll_loop_counter);
p.close();
} catch (FileNotFoundException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
// look for remaining repeats
repeat_node_id_to_longest_path = find_repeat_containing_pairpaths_ignoreLastNode(combinedReadHash);
} // end while repeats
// all repeats are now unrolled.
// further refine paths in case better scoring path for a read is to be found.
int num_paths_redefined = 1;
HashMap<PairPath,Boolean> pp_remains_unchanged_skip_list = new HashMap<PairPath,Boolean>(); // now we use it. Graph isn't changing, so wont' expect paths to change unless they have a better placement given the unrolled edge weights.
int refinement_round = 0;
while (num_paths_redefined > 0) {
refinement_round++;
// reassign paths for all reads containing restructured nodes.
num_paths_redefined = reassign_repeat_nodes_in_reads(graph,
combinedReadHash, all_repeat_related_nodes,
pp_remains_unchanged_skip_list, false);
debugMes("unroll_loops::PATH_REFINEMENT_ROUND: " + refinement_round + " NUMBER_PATHS_REDEFINED: " + num_paths_redefined, 10);
/*
if (num_paths_redefined > 0
|| refinement_round == 1
)
{ // be sure to do this at least once!!!
redefine_all_graph_edges(graph, combinedReadHash);
}
*/
}
return(true);
}
private static boolean unroll_remaining_terminal_loops(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
HashMap<Integer, HashMap<PairPath, Integer>> combinedReadHash) {
// should be targeting only those repeat nodes that show up at final positions in a read.
debugMes("\n\nUNROLLING REMAINING LOOPS IN READS\n\n", 5);
HashMap<Integer,Boolean> all_repeat_related_nodes = new HashMap<Integer,Boolean>();
// get the repeat nodes assigned to the pp containing it the greatest number of times.
HashMap<Integer, PairPath> repeat_node_id_to_longest_path = find_repeat_containing_pairpaths(combinedReadHash);
if (repeat_node_id_to_longest_path.isEmpty()) {
debugMes("\t** no repeats detected in the reads. No repeat unrolling needed here.", 10);
return(false);
}
int unroll_loop_counter = 0;
while (repeat_node_id_to_longest_path.size() > 0) {
unroll_loop_counter++;
debugMes("\n\n## Unrolling remaining terminal loops, round: " + unroll_loop_counter, 10);
if (repeat_node_id_to_longest_path.size() > 0) {
debugMes("\n\nFound : " + repeat_node_id_to_longest_path.size() + " repeat nodes.", 10);
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 12) {
for (Integer node_id : repeat_node_id_to_longest_path.keySet()) {
PairPath pp = repeat_node_id_to_longest_path.get(node_id);
System.err.println("Repeat Node: " + node_id + " found in longest pp: " + pp);
}
}
}
// unroll each repeat unit
HashMap<Integer,Boolean> restructured_nodes = new HashMap<Integer,Boolean>();
// prioritize the paths according to the number of different repeat nodes.
HashMap<PairPath,Float> pp_uniq_repeat_count_hmap = new HashMap<PairPath,Float>();
for (Integer repeat_node : repeat_node_id_to_longest_path.keySet()) {
all_repeat_related_nodes.put(repeat_node, true);
PairPath pp = repeat_node_id_to_longest_path.get(repeat_node);
float repeat_count_sum = pp.getRepeatNodesAndCountSum();
// also prioritize by path length
int max_path_length = pp.getMaxPathLength();
repeat_count_sum += (float)max_path_length/1000.0; // simple way to augment score based on length of longest path.
if (! pp_uniq_repeat_count_hmap.containsKey(pp)) {
pp_uniq_repeat_count_hmap.put(pp, repeat_count_sum); // all repeat nodes counted, do once per pp
}
}
final HashMap<PairPath,Float> pp_uniq_repeat_count_hmap_copy = new HashMap<PairPath,Float>(pp_uniq_repeat_count_hmap);
List<PairPath> pp_list_ordered_by_repeat_counts = new ArrayList<PairPath>(pp_uniq_repeat_count_hmap.keySet());
Collections.sort(pp_list_ordered_by_repeat_counts, new Comparator<PairPath>() {
public int compare (PairPath pp_A, PairPath pp_B) {
float rpt_node_count_A = pp_uniq_repeat_count_hmap_copy.get(pp_A);
float rpt_node_count_B = pp_uniq_repeat_count_hmap_copy.get(pp_B);
if (rpt_node_count_A > rpt_node_count_B)
return -1;
else if (rpt_node_count_A < rpt_node_count_B)
return 1;
else
return 0;
}
});
for (PairPath pp : pp_list_ordered_by_repeat_counts) {
debugMes("Unrolling repeats in pp: " + pp, 12);
for (List<Integer> path : pp.get_paths()) {
if (path.size() == 0) // empty path in a pp
continue;
// if the path contains any previously restructured node, skip it.
if (Path.contains_any_node_id(path, restructured_nodes.keySet())) {
debugMes("\t-postponing unroll since contains restructured node.\n", 12);
continue;
}
debugMes("Unrolling repeats in pp: " + pp + " with repeat nodes: " + pp.getRepeatNodesAndCounts(), 12);
// find the repeat nodes and restructure the graph.
List<SeqVertex> path_vertices = new ArrayList<SeqVertex>();
HashMap<Integer,Boolean> seen_repeat_node = new HashMap<Integer,Boolean>();
boolean restructured_flag = false;
int node_pos = 0;
for (Integer node_id : path) {
node_pos++;
SeqVertex v = getSeqVertex(graph, node_id);
if (seen_repeat_node.containsKey(node_id)) {
// ok - tackling this repeat node.
// this had better be the very last node of the path, or else our logic is wrong here!!
if (node_pos != path.size())
throw new RuntimeException("Error, remaining repeat is not at last node of path: " + path);
all_repeat_related_nodes.put(node_id, true); // important, track the node ID getting restructured.
// make a copy of the vertex
v = new SeqVertex(getNextID(), v); // this constructor sets orig_id so it's the same.
debugMes("\tcopying node: " + node_id + " to " + v.getID(), 12);
graph.addVertex(v);
restructured_nodes.put(node_id, true);
restructured_flag = true;
}
else {
if (repeat_node_id_to_longest_path.containsKey(node_id)) {
seen_repeat_node.put(node_id, true);
}
}
path_vertices.add(v);
} //end of for node_id : path
if (restructured_flag) {
// add edges between neighboring nodes
for (int i = 0; i < path_vertices.size()-1; i++) {
SeqVertex before_node = path_vertices.get(i);
SeqVertex after_node = path_vertices.get(i+1);
SimpleEdge edge = graph.findEdge(before_node, after_node);
if (edge == null) {
// again, this had better be an edge to the last node in the path
if (i + 2 != path.size())
throw new RuntimeException("Error, trying to add new edge between "
+ before_node + " and " + after_node + " and not at end of path.");
// add it.
// get the original edge and copy the weight over.
SimpleEdge orig_edge = graph.findEdge(getSeqVertex(graph, before_node.getOrigButterflyID()), getSeqVertex(graph, after_node.getOrigButterflyID()));
double oldW = 1;
if (orig_edge != null)
oldW = orig_edge.getWeight();
edge = new SimpleEdge(oldW, before_node.getID(), after_node.getID());
graph.addEdge(edge, before_node, after_node);
edge.increment_repeat_unroll_weight(2); // further encourage reads to take this path where possible.
}
}
// describe the new vertex list:
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 20) {
debugMes("# Restructured path described:", 20);
for (SeqVertex v : path_vertices) {
debugMes(v.getShortSeqWconnectingIDs(graph), 20);
}
}
// Verify that this path can be properly reassigned in the graph.
debugMes("\nVerifying that restructured path: " + path + " is rethreaded through the graph with fewer repeat units.", 12);
List<Integer> updated_path = reassign_restructured_path_in_graph(graph, path);
if (updated_path == null
||
( path.size() > 1 &&
(updated_path.equals(path)
||
(Path.countNumNodesNotUnique(updated_path) != 0)
)
)
) {
throw new RuntimeException("Remaining terminal repeat-unrolled path: " + path
+ " " + Path.getRepeatNodesAndCounts(path)
+ " was not properly restructured: "
+ updated_path + " " + Path.getRepeatNodesAndCounts(updated_path));
}
else {
debugMes("\tVerification OK: path:" + path + " " + Path.getRepeatNodesAndCounts(path) +
" => " + updated_path + " " + Path.getRepeatNodesAndCounts(updated_path) + "\n", 12);
}
} // end if restructured_flag
} // end for path
} // end for pp
if (true) {
String filename = FILE + ".TerminalRepeatUnroll_" + unroll_loop_counter + ".dot";
PrintStream p;
try {
p = new PrintStream(new FileOutputStream(filename));
writeDotFile(graph,p,"TerminalRepeatUnroll_" + unroll_loop_counter);
p.close();
} catch (FileNotFoundException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
// reassign repeat nodes to their new nodes in the graph
debugMes("\n\n## Post-terminal-repeat-unroll round: " + unroll_loop_counter + ", reassigning_repeat_nodes_in_reads\n", 10);
int num_paths_updated = reassign_repeat_nodes_in_reads(graph, combinedReadHash,
restructured_nodes, null, true);
if (num_paths_updated == 0)
throw new RuntimeException("Error, no paths were updated after this round of repeat unrolling");
debugMes("\n\nNumber of paths refined: " + num_paths_updated, 10);
// look for remaining repeats
repeat_node_id_to_longest_path = find_repeat_containing_pairpaths_ignoreLastNode(combinedReadHash);
}
/*
// prune the extra edges that are now not supported by the repeat-unrolled reads.
debugMes("\n\n## Post-unroll round: " + unroll_loop_counter + ", redefine_all_graph_edges()\n", 10);
redefine_all_graph_edges(graph, combinedReadHash);
*/
int num_paths_redefined = 1;
HashMap<PairPath,Boolean> pp_remains_unchanged_skip_list = new HashMap<PairPath,Boolean>(); // now we use it. Graph isn't changing, so wont' expect paths to change unless they have a better placement given the unrolled edge weights.
int refinement_round = 0;
while (num_paths_redefined > 0) {
refinement_round++;
num_paths_redefined = reassign_repeat_nodes_in_reads(graph,
combinedReadHash, all_repeat_related_nodes,
pp_remains_unchanged_skip_list, false);
debugMes("unroll_remaining_terminal_loops::PATH_REFINEMENT_ROUND: " + refinement_round + " NUMBER_PATHS_REDEFINED: " + num_paths_redefined, 10);
/*
if (num_paths_redefined > 0) {
redefine_all_graph_edges(graph, combinedReadHash);
}
*/
}
return(true);
}
private static void redefine_all_graph_edges(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
HashMap<Integer, HashMap<PairPath, Integer>> combinedReadHash) {
// capture the edges supported by the repeat-unrolled paths
HashMap<SimpleEdge, Integer> edge_pp_counter = new HashMap<SimpleEdge, Integer>();
HashMap<SimpleEdge,String> edge_text = new HashMap<SimpleEdge,String>();
for (HashMap<PairPath, Integer> hmap_pp : combinedReadHash.values()) {
for (PairPath pp : hmap_pp.keySet()) {
for (List<Integer> path : pp.get_paths()) {
Integer read_support = hmap_pp.get(pp);
for (int i = 0; i < path.size() - 1; i++) {
SeqVertex from_v = getSeqVertex(graph, path.get(i));
SeqVertex to_v = getSeqVertex(graph, path.get(i+ 1));
SimpleEdge se = graph.findEdge(from_v, to_v);
if (se == null) {
throw new RuntimeException("Error, should have edge between " + from_v + " and " + to_v + " but could not be found in graph.");
}
int edge_support = read_support;
if (edge_pp_counter.containsKey(se)) {
edge_support = edge_pp_counter.get(se) + read_support;
}
edge_pp_counter.put(se, edge_support);
edge_text.put(se, "edge:"+from_v + "<->" + to_v + " support: " + edge_support);
}
}
}
}
// purge all edges not found in repeat-unrolled reads
// and reset weight of edges according to the read support of pairpaths containing them.
ArrayList<SimpleEdge> all_edges = new ArrayList<SimpleEdge>(graph.getEdges());
for (SimpleEdge se : all_edges) {
if (edge_pp_counter.containsKey(se)) {
debugMes("-Retaining edge: " + edge_text.get(se), 15);
se.setWeight(edge_pp_counter.get(se));
}
else {
debugMes("-Pruning edge: " + se, 20);
graph.removeEdge(se);
}
}
}
private static int reassign_repeat_nodes_in_reads(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
HashMap<Integer, HashMap<PairPath, Integer>> combinedReadHash,
HashMap<Integer, Boolean> restructured_nodes,
HashMap<PairPath,Boolean> pp_remains_unchanged_skip_list, // dont want it, can use null
Boolean restrict_to_unrolled_repeat_containing_paths
) {
Set<Integer> restructured_node_ids = restructured_nodes.keySet();
debugMes("Restructured nodes list: " + restructured_node_ids, 15);
List<PairPath> orig_pps = new ArrayList<PairPath>();
List<PairPath> updated_pps = new ArrayList<PairPath>();
List<Integer> orig_counts = new ArrayList<Integer>();
// iterate through all the reads and reassign where necessary.
int pp_counter = 0;
int total_pp = count_pp_in_combinedReadHash(combinedReadHash);
for (HashMap<PairPath,Integer> pp_hmap : combinedReadHash.values()) {
for (PairPath pp : pp_hmap.keySet()) {
if (pp_remains_unchanged_skip_list != null && pp_remains_unchanged_skip_list.containsKey(pp))
continue;
pp_counter++;
System.err.print("\rpp[" + pp_counter + "] / " + total_pp + " = " + (int) ( (float)pp_counter/total_pp * 100) + " % ");
PairPath updated_pp = new PairPath(pp);
Integer orig_count = pp_hmap.get(pp);
boolean pp_updated_flag = false;
boolean pp_not_remapped_flag = false;
for (List<Integer> path : updated_pp.get_paths()) {
if (restrict_to_unrolled_repeat_containing_paths) {
if (Path.getRepeatNodesAndCounts(path).size() == 0) {
continue; // dont try to remap it.
}
}
if (Path.contains_any_node_id(path, restructured_node_ids)) {
debugMes("Attempting to reassign repeat-node containing path: " + path, 15);
List<Integer> updated_path = reassign_restructured_path_in_graph(graph, path);
if (updated_path == null) {
// shouldn't happen now.
pp_not_remapped_flag = true;
}
else {
if ( (! updated_path.equals(path))
//Path.countNumNodesNotUnique(path) > Path.countNumNodesNotUnique(updated_path)
&&
score_path_by_repeats(updated_path, graph) < score_path_by_repeats(path, graph)
) {
debugMes("REASSIGNED_PATH: " + path + " " + Path.getRepeatNodesAndCounts(path)
+ " => " + updated_path + " " + Path.getRepeatNodesAndCounts(updated_path), 15);
// see if we just moved from a non-self terminal repeat to some other internal repeat arrangement:
if (Path.hasTerminalNonSelfRepeat(path) && (! Path.hasTerminalNonSelfRepeat(updated_path))
&&
Path.countNumNodesNotUnique(path) <= Path.countNumNodesNotUnique(updated_path) ) {
//FIXME: better understand this extremely rare edge case
// just remove the terminal repeat node, since we were unable to resolve it
debugMes("WARNING: terminal repeat node containing path just rearranged to include alternate repeat structures that should have already been resolved earlier.", 15);
path.remove(path.size()-1);
}
else {
path.clear();
path.addAll(updated_path);
}
pp_updated_flag = true;
}
else {
debugMes("Path " + path + " " + Path.getRepeatNodesAndCounts(path)
+ " remains unchanged or repeat count stayed the same => "
+ updated_path + " " + Path.getRepeatNodesAndCounts(updated_path), 15);
HashMap<Integer,Integer> rpt_nodes = Path.getRepeatNodesAndCounts(path);
if (rpt_nodes.size() > 0) {
debugMes("\t** path still contains repeat nodes: " + rpt_nodes, 15);
}
}
}
}
}
if (pp_updated_flag || pp_not_remapped_flag) {
if (pp_updated_flag) {
updated_pps.add(updated_pp);
orig_pps.add(pp);
orig_counts.add(orig_count);
debugMes("PATH updated for : " + pp + " to " + updated_pp + " orig_first: " + pp.getFirstID() + ", updated_pp.first: " + updated_pp.getFirstID(), 15);
}
else if (pp_not_remapped_flag) {
orig_pps.add(pp);
updated_pps.add(null);
orig_counts.add(-1);
}
}
else {
if (pp_remains_unchanged_skip_list != null) {
pp_remains_unchanged_skip_list.put(pp, true);
}
}
}
}
// reorganize any changes in the combinedReadHash based on original node identifiers
for (int i = 0; i < updated_pps.size(); i++) {
PairPath updated_pp = updated_pps.get(i);
// only use the orig_pp to get the first node, since data structures revolve around the actual hashmap objs.
PairPath orig_pp = orig_pps.get(i);
Integer orig_count = orig_counts.get(i);
debugMes("Reorganizing combined read hash for: orig: " + orig_pp + " to updated_pp: " + updated_pp, 15);
// remove the orig pp
Integer orig_first_node = orig_pp.getFirstID();
combinedReadHash.get(orig_first_node).remove(orig_pp);
if (combinedReadHash.get(orig_first_node).size() == 0) {
combinedReadHash.remove(orig_first_node);
}
if (updated_pp != null) {
// add the new pp
Integer new_first_node = updated_pp.getFirstID();
if (combinedReadHash.containsKey(new_first_node)) {
combinedReadHash.get(new_first_node).put(updated_pp, orig_count);
}
else {
combinedReadHash.put(new_first_node, new HashMap<PairPath,Integer>());
combinedReadHash.get(new_first_node).put(updated_pp, orig_count);
}
}
}
return(updated_pps.size());
}
private static int count_pp_in_combinedReadHash(
HashMap<Integer, HashMap<PairPath, Integer>> combinedReadHash) {
int count = 0;
for (HashMap<PairPath,Integer> pp_hmap : combinedReadHash.values()) {
for (PairPath pp : pp_hmap.keySet()) {
count++;
}
}
return(count);
}
private static List<Integer> reassign_restructured_path_in_graph(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph, List<Integer> path) {
// find a complete path in the graph.
// prefer the one with the fewest number of repeated nodes.
if (path.size() == 1) {
return(path); //FIXME: should try to assign it to it's best repeat node if it's a repeat, to keep it tidy.
}
//int repeat_cap = Path.countNumOrigNodesNotUnique(path);
int repeat_cap = Path.countNumNodesNotUnique(path);
debugMes("reassign_restructed_path_in_graph(" + path + " with cap of " + repeat_cap + " num local repeats.",15);
int max_num_local_repeats = repeat_cap;
List<Integer> chosen_thus_far = new ArrayList<Integer>();
PATH_COUNTER = 0; // init global
HashMap<String,List<List<Integer>>> memoize_best_path = new HashMap<String,List<List<Integer>>>();
List<List<Integer>> complete_path = recursively_explore_graph_paths(graph, chosen_thus_far, path,
0, 0,
max_num_local_repeats, memoize_best_path);
if (complete_path != null) {
return(complete_path.get(0)); // take the first one of tied entries.
}
throw new RuntimeException("Error, couldn't remap path: " + path + " within the graph");
/*
debugMes("WARNING: couldn't remap path: " + path + " within the graph", 12);
return(null); // no remapping
*/
}
private static List<List<Integer>> recursively_explore_graph_paths(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
List<Integer> chosen_thus_far,
List<Integer> path,
Integer num_repeat_nodes,
Integer num_out_of_order,
int MAX_NUM_LOCAL_REPEATS,
HashMap<String,List<List<Integer>>> memoize_best_path) {
// int MAX_SEARCHES_FOR_PATH_REFINEMENT = 5; //FIXME: make this a global and command-line parameter
debugMes("recursively_explore_graph_paths(): pathLen: " + path.size() + ", chosen thus far: " + chosen_thus_far, 20);
String curr_node_pos_token = null;
if (chosen_thus_far.size() > 0) {
curr_node_pos_token = "" + chosen_thus_far.size() + "_" + chosen_thus_far.get(chosen_thus_far.size()-1);
if (memoize_best_path.containsKey(curr_node_pos_token)) {
return(Path.clone(memoize_best_path.get(curr_node_pos_token)));
}
}
boolean local_debug = false;
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 25)
local_debug = true;
if (num_repeat_nodes > MAX_NUM_LOCAL_REPEATS) {
debugMes("\t** terminating extension, max num local repeats encountered: " + num_repeat_nodes, 20);
return (null);
}
List<List<Integer>>min_repeat_reconstructed_path_list = new ArrayList<List<Integer>>();
Float min_repeat_reconstructed_path_repeat_score = null;
List<List<Integer>>all_possible_path_reconstructions_seen = new ArrayList<List<Integer>>(); // for debugging purposes.
if (local_debug) {
System.err.println("RECURSIVELY_EXPLORE_GRAPH_PATHS: chosen_thus_far: " + chosen_thus_far + ", path: " + path);
}
if (chosen_thus_far.size() == path.size()) {
debugMes("\trecursion base case, found path: " + path, 20);
// return empty list of paths to signal base case.
return(Path.create_empty_path_list());
//return(Path.create_list_of_paths_from_single_node_id(chosen_thus_far.get(chosen_thus_far.size()-1))); // done, return last node.
}
Integer target_path_node_id = path.get(chosen_thus_far.size());
SeqVertex target_path_node = getSeqVertex(graph, target_path_node_id);
Integer current_orig_node_id = target_path_node.getOrigButterflyID();
// get the last node chosen thus far
SeqVertex last_node = null;
if (chosen_thus_far.size() > 0) {
last_node = getSeqVertex(graph, chosen_thus_far.get(chosen_thus_far.size()-1));
debugMes("EXTENDING FROM LAST_NODE: " + last_node + ", searching for an origID: " + current_orig_node_id, 20);
}
//List<SeqVertex> candidate_vertices = SeqVertex.getAllNodesHavingOriginalID(current_orig_node_id);
// only pursue multiple candidates if it's an unrolled repeat vertex in the target path
// if it's a repeat vertex that was already unrolled, add the unrolled node here.
List<SeqVertex> candidate_vertices = SeqVertex.getAllNodesHavingOriginalID(target_path_node_id);
if (candidate_vertices.isEmpty()) {
candidate_vertices.add(target_path_node);
}
if (last_node == null) {
/*
if (candidate_vertices.size() > MAX_SEARCHES_FOR_PATH_REFINEMENT) {
debugMes("Not seeding on repetitive node, skipping this path: " + path, 12); //FIXME: instead, redo seeding on non-repetitive node of this path.
return(null);
}
*/
debugMes("Initial candidate vertices based on orig_id: " + current_orig_node_id + " are " + candidate_vertices, 20);
}
else {
// restrict the list to those vertices that are direct successors to last node
// ensure there's an edge between previously chosen node and this one.
candidate_vertices = last_node.getListOfSuccessors(graph, candidate_vertices);
debugMes("\tFiltered candidate vertices for extension from: " + last_node.getID() + " are " + candidate_vertices, 20);
// now sort them by unrolled edge weights.
//if (candidate_vertices.size() > MAX_SEARCHES_FOR_PATH_REFINEMENT) {
HashMap<Integer,Double> unrolled_weights = new HashMap<Integer,Double>();
for (SeqVertex v : candidate_vertices) {
SimpleEdge se = graph.findEdge(last_node, v);
double unrolled_weight = se.get_repeat_unroll_weight();
unrolled_weights.put(v.getID(), unrolled_weight);
}
final HashMap<Integer,Double> unrolled_weights_map = new HashMap<Integer,Double>(unrolled_weights);
Collections.sort(candidate_vertices, new Comparator<SeqVertex>() {
public int compare (SeqVertex v_a, SeqVertex v_b) {
// want most highly supported pairpaths to sort descendingly
Integer v_a_id = v_a.getID();
Integer v_b_id = v_b.getID();
if (unrolled_weights_map.get(v_a_id) < unrolled_weights_map.get(v_b_id)) {
return(-1);
}
else if (unrolled_weights_map.get(v_a_id) > unrolled_weights_map.get(v_b_id)) {
return(1);
}
else {
return(0);
}
}
});
//candidate_vertices = candidate_vertices.subList(0, MAX_SEARCHES_FOR_PATH_REFINEMENT);
//debugMes("-restricting recursive search from " + last_node.getID() + " to " + candidate_vertices, 15);
}
if (local_debug) {
System.err.println("RECURSIVELY_EXPLORE_GRAPH_PATHS: candidate_vertices with orig_node_id: "
+ current_orig_node_id + " are: " + candidate_vertices);
}
int search_refinement_count = 0;
for (SeqVertex v : candidate_vertices) {
int local_num_repeats = num_repeat_nodes;
int local_num_out_of_order = num_out_of_order;
// all repeats should have been expanded by now.
if (chosen_thus_far.contains(v.getID())) {
if (local_debug) {
System.err.println("already chose id: " + v.getID());
}
local_num_repeats++;
}
if (last_node != null && v.getNodeDepth() < last_node.getNodeDepth()) {
local_num_out_of_order++;
}
// Take this node and explore other extensions.
search_refinement_count++;
/*
if (search_refinement_count > MAX_SEARCHES_FOR_PATH_REFINEMENT)
break;
*/
chosen_thus_far.add(v.getID());
List<List<Integer>> tied_reconstructed_paths = recursively_explore_graph_paths(graph, chosen_thus_far, path,
local_num_repeats, local_num_out_of_order, MAX_NUM_LOCAL_REPEATS,
memoize_best_path);
if (tied_reconstructed_paths != null) {
// include current node in path before scoring
tied_reconstructed_paths = Path.prepend_node_id_to_paths(chosen_thus_far.get(chosen_thus_far.size()-1), tied_reconstructed_paths); // add current node to the lowest repeat extension.
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15) {
all_possible_path_reconstructions_seen.addAll(tied_reconstructed_paths); // for debugging
}
debugMes("\nAll Paths and scores:", 15);
for (List<Integer> reconstructed_path : tied_reconstructed_paths) {
float repeated_node_score = score_path_by_repeats(reconstructed_path, graph);
debugMes("score:" + repeated_node_score + " " + reconstructed_path + " " + Path.getRepeatNodesAndCounts(reconstructed_path), 15);
if (min_repeat_reconstructed_path_repeat_score == null
|| Math.abs(min_repeat_reconstructed_path_repeat_score - repeated_node_score) < 0.00001) // consider a tie
{
if (min_repeat_reconstructed_path_repeat_score == null || repeated_node_score < min_repeat_reconstructed_path_repeat_score ) {
min_repeat_reconstructed_path_repeat_score = repeated_node_score;
}
min_repeat_reconstructed_path_list.add(reconstructed_path);
}
else if (repeated_node_score < min_repeat_reconstructed_path_repeat_score) {
min_repeat_reconstructed_path_list.clear(); // reset since have lower score (better)
min_repeat_reconstructed_path_list.add(reconstructed_path);
min_repeat_reconstructed_path_repeat_score = repeated_node_score;
}
}
}
chosen_thus_far.remove(chosen_thus_far.size()-1); // remove the last element added in prep for the next one.
}
if (! min_repeat_reconstructed_path_list.isEmpty()) {
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15) {
debugMes("\nALL CANDIDATE PATHS SEEN AT " + curr_node_pos_token + ":", 15);
if (all_possible_path_reconstructions_seen.size() > 1) {
debugMes("MULTIPLE CANDIDATE PATHS SEEN AT NODE", 15);
}
for (List<Integer> candidate_path : all_possible_path_reconstructions_seen) {
float candidate_path_score = score_path_by_repeats(candidate_path, graph);
debugMes("score: " + candidate_path_score + " " + candidate_path + " " + Path.getRepeatNodesAndCounts(candidate_path), 15);
}
}
debugMes("\nMinRepeat tied paths of length: " + min_repeat_reconstructed_path_list.get(0).size() + " with score: " + min_repeat_reconstructed_path_repeat_score + ":", 15);
for (List<Integer> reconstructed_path : min_repeat_reconstructed_path_list) {
debugMes(reconstructed_path + " " + Path.getRepeatNodesAndCounts(reconstructed_path), 15);
}
memoize_best_path.put(curr_node_pos_token, Path.clone(min_repeat_reconstructed_path_list));
return(min_repeat_reconstructed_path_list);
}
return(null); // no min repeat paths to report.
}
private static float score_path_by_repeats(
List<Integer> reconstructed_path,
DirectedSparseGraph<SeqVertex, SimpleEdge> graph) {
float OUT_OF_ORDER_PENALTY_FACTOR = 1000;
float UNROLLED_EDGE_USE_SCORE_FACTOR = 100;
// found a good path.
float repeated_node_score = Path.countNumNodesNotUnique(reconstructed_path);
// further penalize by the number of out-of-order nodes
repeated_node_score += Path.countNumOutOfOrder(graph, reconstructed_path) / OUT_OF_ORDER_PENALTY_FACTOR;
// take into account use of repeat-unrolled edges
double unrolled_edge_weight_sum = Path.getUnrolledEdgeWeightSum(graph, reconstructed_path);
repeated_node_score -= unrolled_edge_weight_sum / UNROLLED_EDGE_USE_SCORE_FACTOR;
return(repeated_node_score);
}
private static HashMap<Integer, PairPath> find_repeat_containing_pairpaths (
HashMap<Integer, HashMap<PairPath, Integer>> combinedReadHash) {
HashMap<Integer, PairPath> repeat_node_id_to_longest_path = new HashMap<Integer, PairPath>();
HashMap<Integer, Integer> repeat_node_id_to_max_repeat_count = new HashMap<Integer,Integer>();
for (HashMap<PairPath, Integer> pp_n_counts : combinedReadHash.values()) {
for (PairPath pp : pp_n_counts.keySet()) {
// get all pp containing a repeat node.
HashMap<Integer, Integer> repeat_node_ids_n_counts = pp.getRepeatNodesAndCounts();
// assign each repeat node to the path that contains it as a repeat the greatest number of occurrences.
if (repeat_node_ids_n_counts.size() > 0) {
debugMes("repeat_node_ids_n_counts: " + repeat_node_ids_n_counts + " , pp: " + pp + ", counts: " + pp_n_counts.get(pp), 14);
for (Integer node_id : repeat_node_ids_n_counts.keySet()) {
Integer count = repeat_node_ids_n_counts.get(node_id);
if ( (! repeat_node_id_to_max_repeat_count.containsKey(node_id))
||
repeat_node_id_to_max_repeat_count.get(node_id) < count) {
repeat_node_id_to_longest_path.put(node_id, pp);
repeat_node_id_to_max_repeat_count.put(node_id, count);
}
}
}
}
}
return(repeat_node_id_to_longest_path);
}
private static HashMap<Integer, PairPath> find_repeat_containing_pairpaths_ignoreLastNode (
HashMap<Integer, HashMap<PairPath, Integer>> combinedReadHash) {
HashMap<Integer, PairPath> repeat_node_id_to_longest_path = new HashMap<Integer, PairPath>();
HashMap<Integer, Integer> repeat_node_id_to_max_repeat_count = new HashMap<Integer,Integer>();
for (HashMap<PairPath, Integer> pp_n_counts : combinedReadHash.values()) {
for (PairPath pp : pp_n_counts.keySet()) {
// get all pp containing a repeat node.
HashMap<Integer, Integer> repeat_node_ids_n_counts = pp.getRepeatNodesAndCounts_ignoreLastNode();
// assign each repeat node to the path that contains it as a repeat the greatest number of occurrences.
if (repeat_node_ids_n_counts.size() > 0) {
debugMes("repeat_node_ids_n_counts: " + repeat_node_ids_n_counts + " , pp: " + pp + ", counts: " + pp_n_counts.get(pp), 14);
for (Integer node_id : repeat_node_ids_n_counts.keySet()) {
Integer count = repeat_node_ids_n_counts.get(node_id);
if ( (! repeat_node_id_to_max_repeat_count.containsKey(node_id))
||
repeat_node_id_to_max_repeat_count.get(node_id) < count) {
repeat_node_id_to_longest_path.put(node_id, pp);
repeat_node_id_to_max_repeat_count.put(node_id, count);
}
}
}
}
}
return(repeat_node_id_to_longest_path);
}
private static void infer_best_triplets_across_unresolved_Xstructure(
HashMap<Integer, HashMap<PairPath, Integer>> combinedReadHash,
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
HashMap<Integer, Boolean> xStructuresResolvedByTriplets,
HashMap<Integer, List<List<Integer>>> tripletMapper) {
for (Integer xstructure_node : xStructuresResolvedByTriplets.keySet()) {
if (! xStructuresResolvedByTriplets.get(xstructure_node)) {
debugMes("Examining unresolved X structure at: " + xstructure_node, 10);
SeqVertex v = getSeqVertex(graph, xstructure_node);
Iterator<SeqVertex> predecessors = graph.getPredecessors(v).iterator();
Iterator<SeqVertex> successors = graph.getSuccessors(v).iterator();
while(predecessors.hasNext()) {
SeqVertex p = predecessors.next();
if (successors.hasNext()) {
SeqVertex s = successors.next();
List<Integer> triplet = new ArrayList();
triplet.add(p.getID());
triplet.add(xstructure_node);
triplet.add(s.getID());
if (! tripletMapper.containsKey(xstructure_node))
tripletMapper.put(xstructure_node, new ArrayList<List<Integer>>());
tripletMapper.get(xstructure_node).add(triplet);
debugMes("INFERRING triplet for UNRESOLVED X STRUCTURE (" + xstructure_node + ") -> " + triplet, 10);
}
}
}
}
}
/*
private static void printGraph (
DirectedSparseGraph<SeqVertex, SimpleEdge> graph) {
for (SeqVertex v : graph.getVertices()) {
System.out.println("Vertex: " + v.getID() + ", seq: " + v.getName());
}
}
*/
private static void reduce_to_max_paths_per_node(
HashMap<Integer, HashMap<PairPath, Integer>> componentReadHash,
int max_num_paths_per_start_node) {
for (Integer start_node : componentReadHash.keySet() ) {
final HashMap<PairPath,Integer> pp_to_counts = componentReadHash.get(start_node);
List<PairPath> pair_paths_list = new ArrayList<PairPath>(pp_to_counts.keySet());
if (pair_paths_list.size() > max_num_paths_per_start_node) {
Collections.sort(pair_paths_list, new Comparator<PairPath>() {
public int compare (PairPath pp_A, PairPath pp_B) {
// want most highly supported pairpaths to sort descendingly
int read_support_A = pp_to_counts.get(pp_A);
int read_support_B = pp_to_counts.get(pp_B);
if (read_support_A > read_support_B)
return -1;
else if (read_support_A < read_support_B)
return 1;
else
return 0;
}
});
List<PairPath> to_remove = pair_paths_list.subList(max_num_paths_per_start_node, pair_paths_list.size());
for (PairPath pp : to_remove)
componentReadHash.get(start_node).remove(pp);
}
}
}
private static void report_pairpath_counts(
HashMap<Integer, HashMap<PairPath, Integer>> componentReadHash) {
System.out.println("***** PairPath Counts *****");
int count_of_total_pps = 0;
for (Integer start_node : componentReadHash.keySet() ) {
int count_pp_at_node = 0;
String indiv_read_support_text = "";
final HashMap<PairPath,Integer> pp_to_counts = componentReadHash.get(start_node);
debugMes("componentReadHash, start node: " + start_node + " has size: " + pp_to_counts.size(), 12);
List<PairPath> pair_paths_list = new ArrayList<PairPath>(pp_to_counts.keySet());
for (PairPath pp : pair_paths_list) {
//debugMes("CHECKING-A: " + pp, 12);
Integer read_support = componentReadHash.get(start_node).get(pp);
//debugMes("CHECKING-B: " + pp + " has read support: " + read_support, 12);
//debugMes("pp: " + pp + " has read support: " + read_support, 12);
if (read_support == null) {
componentReadHash.get(start_node).put(pp, 1); //FIXME: shouldn't have null entries here.
debugMes("\tERROR: no support for pp: " + pp, 12);
}
}
Collections.sort(pair_paths_list, new Comparator<PairPath>() {
public int compare (PairPath pp_A, PairPath pp_B) {
// want most highly supported pairpaths to sort descendingly
int read_support_A = pp_to_counts.get(pp_A);
int read_support_B = pp_to_counts.get(pp_B);
if (read_support_A > read_support_B)
return -1;
else if (read_support_A < read_support_B)
return 1;
else
return 0;
}
});
for (PairPath pp : pair_paths_list) {
int read_support = componentReadHash.get(start_node).get(pp);
indiv_read_support_text += "\t" + pp + " has read support: " + read_support + "\n";
count_pp_at_node++;
}
System.out.println("Node: " + start_node + " has " + count_pp_at_node + " pairpaths stored:\n" + indiv_read_support_text);
count_of_total_pps += count_pp_at_node;
}
System.out.println("## Total number of pairpaths: " + count_of_total_pps);
}
private static int handleRemainingCyclicReads(
HashMap<Integer, HashMap<PairPath, Integer>> componentReadHash, DirectedSparseGraph<SeqVertex, SimpleEdge> graph) {
int count_of_fractured_reads = 0;
HashMap<PairPath,Integer> readParts = new HashMap<PairPath,Integer>();
for (Integer start_node : componentReadHash.keySet() ) {
List<PairPath> to_purge = new ArrayList<PairPath>();
for (PairPath pp : componentReadHash.get(start_node).keySet()) {
// check for fractured path
boolean fractured = false;
for (List<Integer> path : pp.get_paths()) {
Iterator<Integer> i = path.iterator();
int prev_depth = -1;
HashMap<Integer,Boolean> node_visitor = new HashMap<Integer,Boolean>();
while (i.hasNext()) {
Integer node_id = i.next();
SeqVertex v = getSeqVertex(graph, node_id);
Integer depth = v.getNodeDepth();
if (prev_depth > depth || node_visitor.containsKey(node_id)) {
fractured = true;
break;
}
prev_depth = depth;
node_visitor.put(node_id, true);
}
if (fractured)
break;
}
if (fractured) {
to_purge.add(pp);
}
else {
// double check there's no cycle here:
if (pp.max_count_occurrence_individual_node_in_path(pp) > 1) {
throw new RuntimeException("Error, path:" + pp + " involves an undetected cycle");
}
}
}
if (! to_purge.isEmpty()) {
for (PairPath pp : to_purge) {
count_of_fractured_reads++;
debugMes("DAG_CONFLICTING_READ_FRAGMENTED: " + pp, 10);
componentReadHash.get(start_node).remove(pp);
List<List<Integer>> parts = fragment_DAG_conflicting_pairpath(pp, graph);
for (List<Integer> read_part : parts) {
PairPath part_pairpath = new PairPath(read_part);
if (readParts.containsKey(part_pairpath)) {
readParts.put(part_pairpath, readParts.get(part_pairpath) + 1);
}
else {
readParts.put(part_pairpath, new Integer(1));
}
}
}
}
}
// add the fragments back in.
for (PairPath pp : readParts.keySet()) {
Integer first_id = pp.getFirstID();
if (! componentReadHash.containsKey(first_id)) {
componentReadHash.put(first_id, new HashMap<PairPath,Integer>());
}
if (! componentReadHash.get(first_id).containsKey(pp)) {
componentReadHash.get(first_id).put(pp, 1);
}
else {
componentReadHash.get(first_id).put(pp, componentReadHash.get(first_id).get(pp)+1);
}
}
return count_of_fractured_reads;
}
private static List<List<Integer>> fragment_DAG_conflicting_pairpath(
PairPath pp, DirectedSparseGraph<SeqVertex, SimpleEdge> graph) {
List<List<Integer>> read_parts = new ArrayList<List<Integer>>();
List<Integer> part = new ArrayList<Integer>();
List<List<Integer>> node_depths_tracker = new ArrayList<List<Integer>>();
for (List<Integer> path : pp.get_paths()) {
if (path.isEmpty())
continue;
ArrayList<Integer> node_depths_list = new ArrayList<Integer>();
Iterator<Integer> it = path.iterator();
int prev_depth = -1;
HashMap<Integer,Boolean> node_visitor = new HashMap<Integer,Boolean>();
while (it.hasNext()) {
Integer node_id = it.next();
SeqVertex v = getSeqVertex(graph, node_id);
if (v._node_depth < prev_depth || node_visitor.containsKey(node_id)) {
// problem...
// fracture here.
if (! part.isEmpty()) {
read_parts.add(part);
part = new ArrayList<Integer>();
}
}
node_depths_list.add(v._node_depth);
prev_depth = v._node_depth;
node_visitor.put(node_id, true);
part.add(node_id);
}
if (! part.isEmpty()) {
read_parts.add(part);
part = new ArrayList<Integer>();
}
node_depths_tracker.add(node_depths_list);
}
debugMes("FRACTURED pairpath: " + pp + " with node_depths: " + node_depths_tracker + " into " + read_parts.size() + ": " + read_parts, 10);
return read_parts;
}
private static HashMap<List<Integer>, Pair<Integer>> pasafly(
final DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
HashMap<Integer, HashMap<PairPath, Integer>> componentReadHash,
DijkstraDistance<SeqVertex, SimpleEdge> dijkstraDis,
HashMap<Integer, List<List<Integer>>> tripletMapper, HashMap<Integer, List<List<Integer>>> extendedTripletMapper) {
debugMes("Beginning PasaFly",10);
PasaVertex.max_top_paths_to_store = TransAssembly_allProbPaths.MAX_NUM_PATHS_PER_PASA_NODE;
// populate pairPathToReadSupport: PairPath => readSupport
// and pairPaths hashset: the list of all PairPaths
Set<PairPath> pairPaths = new HashSet<PairPath>();
Map<PairPath, Integer> pairPathToReadSupport = new HashMap<PairPath, Integer>();
populate_pairpaths_and_readsupport(componentReadHash, pairPaths, pairPathToReadSupport);
ArrayList<PairPath> pairPathsSortedList = new ArrayList<PairPath>(pairPaths);
Comparator<PairPath> pairPathOrderComparer = new Comparator<PairPath>() { // sort by first node depth in graph
public int compare(PairPath a, PairPath b) {
if (a.equals(b)) {
return(0);
}
// check first node
// use node depth in graph
// check first node
Integer a_index = a.getFirstID();
Integer b_index = b.getFirstID();
int f1 = getSeqVertex(graph, a_index)._node_depth; // why using FinishingTime instead of DiscoveryTime?
int f2 = getSeqVertex(graph, b_index)._node_depth;
if( f1 < f2 )
return -1;
else if( f1 > f2 )
return 1;
// same node depth.
if (a_index < b_index)
return -1;
else if (a_index > b_index)
return 1;
// if here,
// same first node ID
// check last node
Integer a_last_index = a.getLastID();
Integer b_last_index = b.getLastID();
int l1 = getSeqVertex(graph,a_last_index)._node_depth;
int l2 = getSeqVertex(graph,b_last_index)._node_depth;
if (l1 < l2) {
return(-1);
}
else if (l1 > l2) {
return(1);
}
// same last node depth too.
// compare their node identifiers
if (a_last_index < b_last_index)
return(-1);
else if (a_last_index > b_last_index)
return(1);
// default
// not the same paths, but same start node and last node DFS, so just order based on hashcode
return ( (a.hashCode() < b.hashCode()) ? 1 : -1);
}
};
Collections.sort(pairPathsSortedList, pairPathOrderComparer);
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15) {
debugMes("SORTED PAIRPATHS IN ORDER:", 15);
for (PairPath p : pairPathsSortedList) {
debugMes("\t" + p, 15);
}
}
ArrayList<PasaVertex> pasaVerticesSortedList = new ArrayList<PasaVertex>();
for (PairPath pp : pairPathsSortedList) { // already sorted
int count = pairPathToReadSupport.get(pp);
pasaVerticesSortedList.add(new PasaVertex(pp, count));
}
PasaVertex [] pasaVerticesSortedArr = pasaVerticesSortedList.toArray(new PasaVertex[pasaVerticesSortedList.size()]);
PairPath[] pairPathsSortedArr = pairPathsSortedList.toArray(new PairPath[pairPathsSortedList.size()]);
// EXAMINE CONTAINMENTS
// init
ArrayList<PairPath> pairPathsContainmentsRemoved = new ArrayList<PairPath>(pairPathsSortedList);
ArrayList<PasaVertex> pasaVerticesContainmentsRemoved = new ArrayList<PasaVertex>(pasaVerticesSortedList);
debugMes("Assigning pairpath containments.", 10);
List<Integer> containments = assignPasaPairPathContainments(graph, dijkstraDis, pasaVerticesSortedArr); // vertices updated to include containment info.
debugMes("REMOVING CONTAINMENTS: " + containments, 10);
for(int i = 0; i < containments.size(); i++)
{
pasaVerticesContainmentsRemoved.remove(pasaVerticesSortedArr[containments.get(i)]);
pairPathsContainmentsRemoved.remove(pairPathsSortedArr[containments.get(i)]);
}
// EXAMINE UNCERTAINTIES THAT BREAK TRANSITIVITY
HashSet<Integer> vertices = extract_vertex_list_from_PairPaths(pairPathsContainmentsRemoved);
PairPath [] pairPathsContainmentsRemovedArr = pairPathsContainmentsRemoved.toArray(new PairPath[pairPathsContainmentsRemoved.size()]);
PasaVertex [] pasaVerticesContainmentsRemovedArr = pasaVerticesContainmentsRemoved.toArray(new PasaVertex[pasaVerticesContainmentsRemoved.size()]);
boolean[][] dag = getPairPathConsistencyDAG(graph, dijkstraDis, pairPathsContainmentsRemovedArr);
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 10) {
debugMes("PASA Consistency DAG
System.out.println(boolean_matrix_toString(dag));
}
ArrayList<PairPath> pairPathsUncertainRemoved = new ArrayList<PairPath>(pairPathsContainmentsRemoved);
ArrayList<PasaVertex> pasaVerticesUncertainRemoved = new ArrayList<PasaVertex>(pasaVerticesContainmentsRemoved);
debugMes("Identifying uncertain entries that break transitivities.", 10);
// identify and remove uncertain entries (those that break transitive compatibility relationships)
ArrayList<Integer> uncertain = getUncertainRequireOverlap(dag, pairPathsContainmentsRemovedArr, graph, dijkstraDis);
debugMes("Uncertain indices include: " + uncertain, 10);
debugMes("REMOVING UNCERTAINTIES: " + uncertain, 10);
for(int i = 0; i < uncertain.size(); i++)
{
pasaVerticesUncertainRemoved.remove(pasaVerticesContainmentsRemovedArr[uncertain.get(i)]);
pairPathsUncertainRemoved.remove(pairPathsContainmentsRemovedArr[uncertain.get(i)]);
}
HashSet<Integer> vertices_after_removed_uncertainties = extract_vertex_list_from_PairPaths(pairPathsUncertainRemoved);
if (vertices_after_removed_uncertainties.size() < vertices.size()) {
int missing_node_count = vertices.size() - vertices_after_removed_uncertainties.size();
debugMes("WARNING, MISSING: " + missing_node_count + " of " + vertices.size() + " nodes after removing uncertainties", 10);
for (Integer v : vertices) {
if (! vertices_after_removed_uncertainties.contains(v)) {
debugMes("WARNING, MISSING NODE: After removing uncertainties, missing node from graph: " + v, 10);
}
}
}
PasaVertex[] pasaVerticesUncertainRemovedArr = pasaVerticesUncertainRemoved.toArray(new PasaVertex[pasaVerticesUncertainRemoved.size()]);
PairPath[] pairPathsUncertainRemovedArr = pairPathsUncertainRemoved.toArray(new PairPath[pairPathsUncertainRemoved.size()]);
//print pair paths
debugMes("PAIR PATHS remaining after uncertainties removed
for(int i = 0; i < pairPathsUncertainRemovedArr.length; i++)
{
debugMes("PairPathAfterUncertainRemoved "+ i + " " + pairPathsUncertainRemovedArr[i], 10);
}
// regenerate the dag now that the uncertain entries are removed.
dag = getPairPathConsistencyDAG(graph, dijkstraDis, pairPathsUncertainRemovedArr); // already identified containments
//print dag
debugMes("DAG after uncertainties removed
//2.2 check transitivity
if(!checkTransitivityRequireOverlap(dag, pairPathsUncertainRemovedArr, graph, dijkstraDis))
{
throw(new RuntimeException("Graph is NOT transitive!"));
}
else {
debugMes("Transitivity of compatibility graph validates.", 10);
}
//2.2 check transitivity
if(!checkTransitivityRequireOverlap(dag, pairPathsUncertainRemovedArr, graph, dijkstraDis))
{
throw(new RuntimeException("Graph is NOT transitive!"));
}
else {
debugMes("Transitivity of compatibility graph validates.", 10);
}
// track the final vertex identifiers
HashMap<PairPath,Integer> finalVertexPositions = new HashMap<PairPath,Integer>();
for (int i = 0; i < pairPathsUncertainRemovedArr.length; i++) {
finalVertexPositions.put(pairPathsUncertainRemovedArr[i], i);
}
debugMes("build_PASA_trellis_left_to_right()", 10);
build_PASA_trellis_left_to_right(pasaVerticesUncertainRemovedArr, dag, graph, componentReadHash, dijkstraDis,
pairPathToReadSupport, tripletMapper, extendedTripletMapper);
// get highest scoring path:
debugMes("Identifying highest scoring PASA path.", 10);
ScoredPath best = null;
for (int i = 0; i < pasaVerticesUncertainRemovedArr.length; i++) {
ScoredPath sp = pasaVerticesUncertainRemovedArr[i].get_highest_scoring_fromPath();
if (best == null || sp.score > best.score) {
best = sp;
}
}
debugMes("Best score: " + best.score + ", containing entries: " + best.paths, 10);
List<Integer> best_path_vertex_list = Path.collapse_compatible_pair_paths(best.paths);
HashMap<List<Integer>, Pair<Integer>> final_transcripts = new HashMap<List<Integer>, Pair<Integer>>();
final_transcripts.put(best_path_vertex_list, new Pair<Integer>(1,1));
// remove those pairpaths included in the best path
List<PairPath> toRemove = new ArrayList<PairPath>();
for (PairPath pp : finalVertexPositions.keySet()) {
if (pp.isCompatibleAndContainedBySinglePath(best_path_vertex_list))
toRemove.add(pp);
}
for (PairPath pp : toRemove)
finalVertexPositions.remove(pp);
toRemove.clear();
// Now, extract the top combined path that contains each missing transcript
// Prioritize according to paired path support, and break ties according to representing the most additional missing entries.
debugMes("build_PASA_trellis_right_to_left()", 10);
build_PASA_trellis_right_to_left(pasaVerticesUncertainRemovedArr, dag, graph, componentReadHash, dijkstraDis, pairPathToReadSupport, tripletMapper);
List<PairPath> unrepresented_pairpaths = new ArrayList<PairPath>(finalVertexPositions.keySet());
final HashMap<PairPath,Integer>pairPathToReadSupportFixed = new HashMap<PairPath,Integer>(pairPathToReadSupport);
Collections.sort(unrepresented_pairpaths, new Comparator<PairPath>() {
public int compare(PairPath a, PairPath b) {
int count_a = pairPathToReadSupportFixed.get(a);
int count_b = pairPathToReadSupportFixed.get(b);
if (count_a == count_b) {
return(0);
}
else if (count_a > count_b) {
return(-1);
}
else {
return(1);
}
}
});
Iterator<PairPath> it = unrepresented_pairpaths.iterator();
while (it.hasNext() && ! finalVertexPositions.isEmpty()) {
PairPath pp = it.next();
if (! finalVertexPositions.containsKey(pp)) {
// recovered in a previous round
continue;
}
debugMes("Nucleating next pasa path on PP: " + pp + ", having read support: " + pairPathToReadSupportFixed.get(pp), 10);
// get the highest scoring chain that contains pp
int index = finalVertexPositions.get(pp);
List<ScoredPath> sp_from = pasaVerticesUncertainRemovedArr[index].get_all_highest_scoring_fromPath();
List<ScoredPath> sp_to = pasaVerticesUncertainRemovedArr[index].get_all_highest_scoring_toPath();
debugMes("Best combined partial paths containing pairpath: " + pp + " include: (From): "
+ sp_from + ", (To): " + sp_to, 10);
List<PairPath> combined_pp_list = new ArrayList<PairPath>();
if (sp_from.size() > 1 || sp_to.size() > 1) {
// find the combination that covers the most currently unrepresented pairpaths
combined_pp_list = find_paired_paths_with_greatest_map_support(sp_from, sp_to, finalVertexPositions);
}
else {
// single path each.
combined_pp_list.addAll(sp_from.get(0).paths);
combined_pp_list.addAll(sp_to.get(0).paths);
}
List<Integer> combined_path_vertex_list = Path.collapse_compatible_pair_paths(combined_pp_list);
final_transcripts.put(combined_path_vertex_list, new Pair<Integer>(1,1));
// remove those pairpaths included in the best path
for (PairPath p : finalVertexPositions.keySet()) {
if (p.isCompatibleAndContainedBySinglePath(combined_path_vertex_list))
toRemove.add(p);
}
for (PairPath p : toRemove)
finalVertexPositions.remove(p);
toRemove.clear();
}
return(final_transcripts);
}
private static HashMap<List<Integer>, Pair<Integer>> pasaflyunique (
final DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
HashMap<Integer, HashMap<PairPath, Integer>> componentReadHash,
DijkstraDistance<SeqVertex, SimpleEdge> dijkstraDis, HashMap<Integer, List<List<Integer>>> tripletMapper, HashMap<Integer, List<List<Integer>>> extendedTripletMapper) {
debugMes("Beginning PasaFlyUnique",10);
PasaVertex.max_top_paths_to_store = 1; //TransAssembly_allProbPaths.MAX_NUM_PATHS_PER_PASA_NODE;
// populate pairPathToReadSupport: PairPath => readSupport
// and pairPaths hashset: the list of all PairPaths
Set<PairPath> pairPaths = new HashSet<PairPath>();
Map<PairPath, Integer> pairPathToReadSupport = new HashMap<PairPath, Integer>();
populate_pairpaths_and_readsupport(componentReadHash, pairPaths, pairPathToReadSupport);
ArrayList<PairPath> pairPathsSortedList = new ArrayList<PairPath>(pairPaths);
Comparator<PairPath> pairPathOrderComparer = new Comparator<PairPath>() { // sort by first node depth in graph
public int compare(PairPath a, PairPath b) {
if (a.equals(b)) {
return(0);
}
// check first node
// use node depth in graph
// check first node
Integer a_index = a.getFirstID();
Integer b_index = b.getFirstID();
int f1 = getSeqVertex(graph, a_index)._node_depth; // why using FinishingTime instead of DiscoveryTime?
int f2 = getSeqVertex(graph, b_index)._node_depth;
if( f1 < f2 )
return -1;
else if( f1 > f2 )
return 1;
// same node depth.
if (a_index < b_index)
return -1;
else if (a_index > b_index)
return 1;
// same first node ID
// check last node
Integer a_last_index = a.getLastID();
Integer b_last_index = b.getLastID();
int l1 = getSeqVertex(graph,a_last_index)._node_depth;
int l2 = getSeqVertex(graph,b_last_index)._node_depth;
if (l1 < l2) {
return(-1);
}
else if (l1 > l2) {
return(1);
}
// same last node depth too.
// compare their node identifiers
if (a_last_index < b_last_index)
return(-1);
else if (a_last_index > b_last_index)
return(1);
// default
// not the same paths, but same start node and last node DFS, so just order based on hashcode
return ( (a.hashCode() < b.hashCode()) ? 1 : -1);
}
};
Collections.sort(pairPathsSortedList, pairPathOrderComparer);
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15) {
debugMes("SORTED PAIRPATHS IN ORDER:", 15);
for (PairPath p : pairPathsSortedList) {
debugMes("\t" + p, 15);
}
}
// start assembling
HashMap<List<Integer>, Pair<Integer>> final_transcripts = new HashMap<List<Integer>, Pair<Integer>>();
int round = 0;
while (! pairPathsSortedList.isEmpty()) {
round++;
debugMes("\n\nPasaFlyUnique, Round: " + round, 10);
ArrayList<PasaVertex> pasaVerticesSortedList = new ArrayList<PasaVertex>();
for (PairPath pp : pairPathsSortedList) { // already sorted
int count = pairPathToReadSupport.get(pp);
pasaVerticesSortedList.add(new PasaVertex(pp, count));
}
PasaVertex [] pasaVerticesSortedArr = pasaVerticesSortedList.toArray(new PasaVertex[pasaVerticesSortedList.size()]);
PairPath[] pairPathsSortedArr = pairPathsSortedList.toArray(new PairPath[pairPathsSortedList.size()]);
// EXAMINE CONTAINMENTS
// init
ArrayList<PairPath> pairPathsContainmentsRemoved = new ArrayList<PairPath>(pairPathsSortedList);
ArrayList<PasaVertex> pasaVerticesContainmentsRemoved = new ArrayList<PasaVertex>(pasaVerticesSortedList);
debugMes("Assigning pairpath containments.", 10);
List<Integer> containments = assignPasaPairPathContainments(graph, dijkstraDis, pasaVerticesSortedArr); // vertices updated to include containment info.
debugMes("REMOVING CONTAINMENTS: " + containments, 10);
for(int i = 0; i < containments.size(); i++)
{
pasaVerticesContainmentsRemoved.remove(pasaVerticesSortedArr[containments.get(i)]);
pairPathsContainmentsRemoved.remove(pairPathsSortedArr[containments.get(i)]);
}
// EXAMINE UNCERTAINTIES THAT BREAK TRANSITIVITY
PairPath [] pairPathsContainmentsRemovedArr = pairPathsContainmentsRemoved.toArray(new PairPath[pairPathsContainmentsRemoved.size()]);
PasaVertex [] pasaVerticesContainmentsRemovedArr = pasaVerticesContainmentsRemoved.toArray(new PasaVertex[pasaVerticesContainmentsRemoved.size()]);
boolean[][] dag = getPairPathConsistencyDAG(graph, dijkstraDis, pairPathsContainmentsRemovedArr);
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 10) {
debugMes("PASA Consistency DAG
System.out.println(boolean_matrix_toString(dag));
}
ArrayList<PairPath> pairPathsUncertainRemoved = new ArrayList<PairPath>(pairPathsContainmentsRemoved);
ArrayList<PasaVertex> pasaVerticesUncertainRemoved = new ArrayList<PasaVertex>(pasaVerticesContainmentsRemoved);
debugMes("Identifying uncertain entries that break transitivities.", 10);
// identify and remove uncertain entries (those that break transitive compatibility relationships)
ArrayList<Integer> uncertain = getUncertainRequireOverlap(dag, pairPathsContainmentsRemovedArr, graph, dijkstraDis);
debugMes("Uncertain indices include: " + uncertain, 10);
debugMes("REMOVING UNCERTAINTIES: " + uncertain, 10);
for(int i = 0; i < uncertain.size(); i++)
{
pasaVerticesUncertainRemoved.remove(pasaVerticesContainmentsRemovedArr[uncertain.get(i)]);
pairPathsUncertainRemoved.remove(pairPathsContainmentsRemovedArr[uncertain.get(i)]);
}
PasaVertex[] pasaVerticesUncertainRemovedArr = pasaVerticesUncertainRemoved.toArray(new PasaVertex[pasaVerticesUncertainRemoved.size()]);
PairPath[] pairPathsUncertainRemovedArr = pairPathsUncertainRemoved.toArray(new PairPath[pairPathsUncertainRemoved.size()]);
//print pair paths
debugMes("PAIR PATHS remaining after uncertainties removed
for(int i = 0; i < pairPathsUncertainRemovedArr.length; i++)
{
debugMes("PairPathAfterUncertainRemoved "+ i + " " + pairPathsUncertainRemovedArr[i], 10);
}
// regenerate the dag now that the uncertain entries are removed.
dag = getPairPathConsistencyDAG(graph, dijkstraDis, pairPathsUncertainRemovedArr); // already identified containments
//print dag
debugMes("DAG after uncertainties removed
// examine neighboring DAG
for (int i = 0; i < pairPathsUncertainRemovedArr.length-1; i++) {
if (! dag[i][i+1]) {
debugMes("NeighborDagCheck: PairPath: [" + i + "] "+ pairPathsUncertainRemovedArr[i]
+ "\n\tnot compatible with: [" + (i+1) + "] " + pairPathsUncertainRemovedArr[i+1], 10);
}
}
//2.2 check transitivity
if(!checkTransitivityRequireOverlap(dag, pairPathsUncertainRemovedArr, graph, dijkstraDis))
{
throw(new RuntimeException("Graph is NOT transitive!"));
}
else {
debugMes("Transitivity of compatibility graph validates.", 10);
}
debugMes("build_PASA_trellis_left_to_right()", 10);
build_PASA_trellis_left_to_right(pasaVerticesUncertainRemovedArr, dag, graph, componentReadHash, dijkstraDis, pairPathToReadSupport, tripletMapper, extendedTripletMapper);
// get highest scoring path:
debugMes("Identifying highest scoring PASA path.", 10);
ScoredPath best = null;
for (int i = 0; i < pasaVerticesUncertainRemovedArr.length; i++) {
ScoredPath sp = pasaVerticesUncertainRemovedArr[i].get_highest_scoring_fromPath();
if (best == null || sp.score > best.score) {
best = sp;
}
}
debugMes("Best score: " + best.score + ", containing entries: " + best.paths, 10);
List<Integer> best_path_vertex_list = Path.collapse_compatible_pair_paths(best.paths);
final_transcripts.put(best_path_vertex_list, new Pair<Integer>(1,1));
debugMes("Reconstructed path is: " + best_path_vertex_list, 10);
// remove those pairpaths included in the best path
List<PairPath> toRemove = new ArrayList<PairPath>();
for (PairPath pp : pairPathsSortedList) {
if (pp.isCompatibleAndContainedBySinglePath(best_path_vertex_list)) {
toRemove.add(pp);
debugMes("compatibly_contained_by_reconstructed_path: " + pp, 10);
}
else {
debugMes("NotCompatibleRetainedForNextRound: " + pp, 10);
}
}
for (PairPath pp : toRemove)
pairPathsSortedList.remove(pp);
}
return(final_transcripts);
}
private static List<PairPath> find_paired_paths_with_greatest_map_support(
List<ScoredPath> sp_from, List<ScoredPath> sp_to,
HashMap<PairPath, Integer> finalVertexPositions) {
int best_count = -1;
List<PairPath> combined = new ArrayList<PairPath>();
for (ScoredPath spA : sp_from) {
HashSet<PairPath> hA = new HashSet<PairPath>();
for (PairPath h : spA.paths) {
if (finalVertexPositions.containsKey(h)) {
hA.add(h);
}
}
for (ScoredPath spB : sp_to) {
HashSet<PairPath> hB = new HashSet<PairPath>(hA);
for (PairPath b : spB.paths) {
if (finalVertexPositions.containsKey(b)) {
hB.add(b);
}
}
if (hB.size() > best_count) {
best_count = hB.size();
combined.clear();
combined.addAll(spA.paths);
combined.addAll(spB.paths);
}
}
}
return(combined);
}
private static void build_PASA_trellis_left_to_right(
PasaVertex[] pasaVerticesArr,
boolean[][] dag,
final DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
HashMap<Integer, HashMap<PairPath, Integer>> componentReadHash,
DijkstraDistance<SeqVertex, SimpleEdge> dijkstraDis,
Map<PairPath, Integer> pairPathToReadSupport, HashMap<Integer, List<List<Integer>>> tripletMapper, HashMap<Integer, List<List<Integer>>> extendedTripletMapper
) {
for (int i = 1; i < pasaVerticesArr.length; i++) {
PasaVertex iV = pasaVerticesArr[i];
for (int j = i - 1; j >= 0; j
PasaVertex iJ = pasaVerticesArr[j];
if (! dag[j][i]) {
// see if too far apart
if (twoPairPathsAreTooFarAwayInGraph(iV.pp, iJ.pp, graph)) {
if (FAST_PASA)
break;
}
else {
continue; // must conflict
}
}
// require that they share a node in common
if (! iJ.pp.haveAnyNodeInCommon(iV.pp))
continue;
// see if we can extend paths in iJ to include pairpath represented by iV
final List<ScoredPath> sp_list = iJ.get_fromPaths();
for (ScoredPath sp : sp_list) {
// is there sufficient read support for extending this path?
//debugMes("\nnote, sp_list is of size: " + sp_list.size(), 10);
//debugMes("\nAdding path list to [iV] from [iJ] " + sp.paths, 10);
List<PairPath> extendedList = new ArrayList<PairPath>();
extendedList.addAll(sp.paths);
extendedList.add(iV.pp);
if (! violates_triplet_support(tripletMapper, extendedList)) {
iV.push_fromPaths(new ScoredPath(extendedList, (sp.score + iV.readSupport + iV.num_contained)));
sp.path_extended = true;
}
}
}
}
}
private static boolean violates_triplet_support(
HashMap<Integer, List<List<Integer>>> tripletMapper,
List<PairPath> extendedList) {
HashMap<Integer,Integer> prev_node = new HashMap<Integer,Integer>();
HashMap<Integer,Integer> next_node = new HashMap<Integer,Integer>();
for (PairPath pp : extendedList) {
Integer prev = -1;
for (List<Integer> path : pp.get_paths()) {
for (Integer node : path) {
if (prev >= 0) {
prev_node.put(node, prev);
next_node.put(prev, node);
}
prev = node;
}
}
}
for (Integer center : prev_node.keySet()) {
if (tripletMapper.containsKey(center) && next_node.containsKey(center)) {
Integer left = prev_node.get(center);
Integer right = next_node.get(center);
List<Integer> curr_triplet = new ArrayList<Integer>();
curr_triplet.add(left);
curr_triplet.add(center);
curr_triplet.add(right);
List<List<Integer>> triplets = tripletMapper.get(center);
if (! tripletSupported(triplets, curr_triplet)) {
debugMes("PASA TRIPLET CHECK WARNING: triplet: " + curr_triplet + " violates available triplets: " + triplets + " and so path list is not valid: " + extendedList, 15);
return(true); // yes, violates
}
}
}
return false; // no violation found.
}
private static void build_PASA_trellis_right_to_left (
PasaVertex[] pasaVerticesArr,
boolean[][] dag,
final DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
HashMap<Integer, HashMap<PairPath, Integer>> componentReadHash,
DijkstraDistance<SeqVertex, SimpleEdge> dijkstraDis,
Map<PairPath, Integer> pairPathToReadSupport, HashMap<Integer, List<List<Integer>>> tripletMapper
) {
for (int i = pasaVerticesArr.length-2; i >= 0; i
PasaVertex iV = pasaVerticesArr[i];
for (int j = i +1; j < pasaVerticesArr.length; j++) {
PasaVertex iJ = pasaVerticesArr[j];
if (! dag[i][j]) {
if (twoPairPathsAreTooFarAwayInGraph(iV.pp, iJ.pp, graph)) {
if (FAST_PASA)
break;
}
else {
continue; // must conflict
}
}
// require that they share a node in common
if (! iJ.pp.haveAnyNodeInCommon(iV.pp))
continue;
// see if we can extend paths in iJ to include pairpath represented by iV
final List<ScoredPath> sp_list = iJ.get_toPaths();
for (ScoredPath sp : sp_list) {
// is there sufficient read support for extending this path?
// * implement later on * //
List<PairPath> extendedList = new ArrayList<PairPath>();
extendedList.addAll(sp.paths);
extendedList.add(iV.pp);
if (! violates_triplet_support(tripletMapper, extendedList)) {
iV.push_toPaths(new ScoredPath(extendedList, (sp.score + iV.readSupport + iV.num_contained)));
}
}
}
}
}
private static String boolean_matrix_toString (boolean [][] dag) {
//print dag
if (dag.length > 200) {
debugMes("dag matrix too large to print in a useful way.", 10);
return("");
}
String dag_text = "";
for(int i = 0; i < dag.length; i++)
{
for(int j = 0; j < dag.length; j++)
{
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15)
System.err.print("\r[" + i + "," + j + "] " );
dag_text += ((dag[i][j]) ? 1: 0) + " ";
}
dag_text += "\n";
}
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 10)
System.err.println();
return(dag_text);
}
private static List<Integer> assignPasaPairPathContainments(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
DijkstraDistance<SeqVertex, SimpleEdge> dijkstraDis,
PasaVertex[] pasaVerticesArr
) {
HashMap<Integer,Boolean> containments = new HashMap<Integer,Boolean>();
for (int i = 0; i < pasaVerticesArr.length; i++) {
PasaVertex iV = pasaVerticesArr[i];
for (int j = 0; j < pasaVerticesArr.length; j++) {
if (i==j)
continue;
PasaVertex iJ = pasaVerticesArr[j];
if (iJ.pp.haveAnyNodeInCommon(iV.pp) && (iV.pp.isCompatibleAndContainedByPairPath(iJ.pp, graph, dijkstraDis))) {
iJ.num_contained += iV.readSupport;
containments.put(i, true);
debugMes("Containment: " + iV.pp + " is contained by: " + iJ.pp, 10);
}
}
}
List<Integer> containment_list = new ArrayList<Integer>(containments.keySet());
return(containment_list);
}
private static HashMap<List<Integer>, HashMap<PairPath, Integer>> assignCompatibleReadsToPaths(
HashMap<List<Integer>, Pair<Integer>> finalPaths_all,
HashMap<Integer, HashMap<PairPath, Integer>> combinedReadHash) {
debugMes("\n\n## assignCompatibleReadsToPaths()", 20);
HashMap<List<Integer>, HashMap<PairPath, Integer>> pathToContainedReads = new HashMap<List<Integer>, HashMap<PairPath, Integer>>();
for (List<Integer> path : finalPaths_all.keySet()) {
for (HashMap<PairPath,Integer> read_map : combinedReadHash.values()) {
for (PairPath p : read_map.keySet()) {
if (p.isCompatibleAndContainedBySinglePath(path)) {
if (! pathToContainedReads.containsKey(path)) {
pathToContainedReads.put(path, new HashMap<PairPath, Integer>());
}
debugMes("assignCompatibleReadsToPaths: " + p + " is compatible with " + path, 20);
pathToContainedReads.get(path).put(p, read_map.get(p));
}
else {
debugMes("assignCompatibleReadsToPaths: " + p + " is NOT compatible with " + path, 20);
}
}
}
}
return (pathToContainedReads);
}
private static boolean containsNull(PairPath pp1)
{
if(pp1.getFirstID() == null)
return true;
else
return false;
}
/**
* Given two pair paths, determines consistency/compatibility of two pair paths for the partial order.
* @param pp1
* @param pp2
* @param graph
* @param dijkstraDis
* @return 0 if they are not consistent, 1 if pp1 comes before pp2, -1 if pp2 comes before pp1
*/
private static int isConsistent(PairPath pp1, PairPath pp2, DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
DijkstraDistance<SeqVertex, SimpleEdge> dijkstraDis)
{
pp1 = pp1.trimSinkNodes();
pp2 = pp2.trimSinkNodes();
debugMes("isConsistent? " + pp1 + pp2, 15);
if (pp1.equals(pp2)) { return (0); }
// If have nodes in common, require compatibility in overlapping path parts:
if(pp1.haveAnyNodeInCommon(pp2))
{
debugMes("\tHave nodes in common.", 15);
if(!pp1.isCompatible(pp2))
{
debugMes("\tNot compatible.", 15);
return 0;
}
}
// iterate through every node in pp1 and check if it's consistent
List<Integer> path1 = pp1.getPath1();
// see that pp1:path1 nodes are consistent with pp2
Iterator itr = path1.iterator();
for(int i = 0; i < path1.size(); i++)
{
Integer n = (Integer)(itr.next());
if(!(readIsConsistentWithNode(pp2, n, graph, dijkstraDis))) {
debugMes("\tpp2: " + pp2 + " is not consistent with node: " + n, 15);
return 0;
}
}
if(pp1.hasSecondPath())
{
// see if pp1:path2 are consistent with pp2
List<Integer> path2 = pp1.getPath2();
itr = path2.iterator();
for(int i = 0; i < path2.size(); i++)
{
Integer n = (Integer)(itr.next());
if(!(readIsConsistentWithNode(pp2, n, graph, dijkstraDis))) {
debugMes("\tpp2: " + pp2 + " second path is not consistent with node: " + n, 15);
return 0;
}
}
}
// require that v1 comes before v2 in the partial order
SeqVertex v1 = getSeqVertex(graph, pp1.getFirstID());
SeqVertex v2 = getSeqVertex(graph, pp2.getFirstID());
if (v1.equals(v2)) {
// check their last nodes.
SeqVertex lv1 = getSeqVertex(graph, pp1.getLastID());
SeqVertex lv2 = getSeqVertex(graph, pp2.getLastID());
if (lv1.equals(lv2)) {
// must have same first and same last node, so potential differences in-between but otherwise compatible afaict
int pp1_hashcode = pp1.hashCode();
int pp2_hashcode = pp2.hashCode();
int consistent = (pp1.hashCode() < pp2.hashCode()) ? 1 : 0;
debugMes("\tfirst vertex node: " + pp1.getFirstID() + " and last node " + pp1.getLastID() + " are equal, so defining consistency based on hashcode comparison.", 15);
return(consistent); // just use consistent ordering to define proper DAG connectability
}
else {
// first node equivalent, last node not equivalent
int ancestral = SeqVertex.isAncestral(lv1, lv2, dijkstraDis);
debugMes("\tfirst nodes same: " + pp1.getFirstID() + ", but last nodes are different: " +
pp1.getLastID() + " vs. " + pp2.getLastID() + ", and SeqVertex.isAncestral = " + ancestral, 15);
return( (ancestral>0) ? 1:0);
}
}
else {
// first node not equivalent.
int ancestral = SeqVertex.isAncestral(v1,v2,dijkstraDis);
debugMes("\tpairpaths are compatible, examining relative orientation of first vertices: "
+ v1.getID() + " vs. " + v2.getID() + ", ancestral = " + ancestral, 15);
return( (ancestral > 0) ? 1 : 0);
}
}
private static boolean isOverlappingAndDirectionallyConsistent(PairPath pp1, PairPath pp2, DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
DijkstraDistance<SeqVertex, SimpleEdge> dijkstraDis)
{
pp1 = pp1.trimSinkNodes();
pp2 = pp2.trimSinkNodes();
debugMes("isOverlappingAndDirectionallyConsistent? " + pp1 + pp2, 15);
if (pp1.equals(pp2)) { return (true); }
// If have nodes in common, require compatibility in overlapping path parts:
if(pp1.haveAnyNodeInCommon(pp2))
{
debugMes("\tHave nodes in common.", 15);
if(!pp1.isCompatible(pp2))
{
debugMes("\tNot compatible.", 15);
return false;
}
// DO allow containments to be compatible
if (pp2.isCompatibleAndContainedByPairPath(pp1, graph, dijkstraDis)) {
debugMes("\tpp2 isCompatibleAndContainedBy pp1, setting true (containments removed later on).", 15);
return(true);
}
}
else {
debugMes("\tNo node overlap, so not compatible.", 15);
return(false);
}
// iterate through every node in pp1 and check if it's consistent
List<Integer> path1 = pp1.getPath1();
// see that pp1:path1 nodes are consistent with pp2
Iterator<Integer> itr = path1.iterator();
for(int i = 0; i < path1.size(); i++)
{
Integer n = itr.next();
if(!(readIsConsistentWithNode(pp2, n, graph, dijkstraDis))) {
debugMes("\tpp2: " + pp2 + " is not consistent with node: " + n, 15);
return false;
}
}
if(pp1.hasSecondPath())
{
// see if pp1:path2 are consistent with pp2
List<Integer> path2 = pp1.getPath2();
itr = path2.iterator();
for(int i = 0; i < path2.size(); i++)
{
Integer n = (Integer)(itr.next());
if(!(readIsConsistentWithNode(pp2, n, graph, dijkstraDis))) {
debugMes("\tpp2: " + pp2 + " second path is not consistent with node: " + n, 15);
return false;
}
}
}
// require that v1 comes before v2 in the partial order
SeqVertex v1 = getSeqVertex(graph, pp1.getFirstID());
SeqVertex v2 = getSeqVertex(graph, pp2.getFirstID());
if (v1.equals(v2)) {
// check their last nodes.
SeqVertex lv1 = getSeqVertex(graph, pp1.getLastID());
SeqVertex lv2 = getSeqVertex(graph, pp2.getLastID());
if (lv1.equals(lv2)) {
// must have same first and same last node, so potential differences in-between but otherwise compatible afaict
return(true);
}
else {
// first node equivalent, last node not equivalent
int ancestral = SeqVertex.isAncestral(lv1, lv2, dijkstraDis);
debugMes("\tfirst nodes same: " + pp1.getFirstID() + ", but last nodes are different: " +
pp1.getLastID() + " vs. " + pp2.getLastID() + ", and SeqVertex.isAncestral = " + ancestral, 15);
return(ancestral>0);
}
}
else {
// first node not equivalent.
int ancestral = SeqVertex.isAncestral(v1,v2,dijkstraDis);
debugMes("\tpairpaths are compatible, examining relative orientation of first vertices: "
+ v1.getID() + " vs. " + v2.getID() + ", ancestral = " + ancestral, 15);
return(ancestral > 0);
}
}
private static boolean checkTransitivity(int[][] adj, PairPath[] pairPathArr, HashMap<PairPath,Integer> pairPathToIntVal)
{
// Examine all triplets (A,B,C) and ensure that A->B and B->C implies A->C
for(int i = 0; i < adj.length; i++)
{
for(int j = 0; j < adj.length; j++)
{
for(int k = 0; k < adj.length; k++)
{
if (adj[i][j] == 1 && adj[j][k] == 1 && adj[i][k] == 0) {
Integer orig_i = pairPathToIntVal.get(pairPathArr[i]);
Integer orig_j = pairPathToIntVal.get(pairPathArr[j]);
Integer orig_k = pairPathToIntVal.get(pairPathArr[k]);
debugMes("UNCERTAINTY DETECTED AFTER SUPPOSEDLY HAVING REMOVED THEM [" + orig_i + "," + orig_j + "," + orig_k + "] :\n" +
i + " " + pairPathArr[i] + " is consistent with " + j + " " + pairPathArr[j] + "\n" +
j + " " + pairPathArr[j] + " is consistent with " + k + " " + pairPathArr[k] + "\n" +
i + " " + pairPathArr[i] + " is NOT consistent with " + k + " " + pairPathArr[k] + "\n", 10);
return false;
}
}
}
}
return true;
}
private static boolean checkTransitivity(boolean[][] adj, PairPath[] pairPathArr, HashMap<PairPath,Integer> pairPathToIntVal)
{
// Examine all triplets (A,B,C) and ensure that A->B and B->C implies A->C
for(int i = 0; i < adj.length; i++)
{
for(int j = 0; j < adj.length; j++)
{
for(int k = 0; k < adj.length; k++)
{
if (adj[i][j] && adj[j][k] && ! adj[i][k]) {
Integer orig_i = pairPathToIntVal.get(pairPathArr[i]);
Integer orig_j = pairPathToIntVal.get(pairPathArr[j]);
Integer orig_k = pairPathToIntVal.get(pairPathArr[k]);
debugMes("UNCERTAINTY DETECTED AFTER SUPPOSEDLY HAVING REMOVED THEM [" + orig_i + "," + orig_j + "," + orig_k + "] :\n" +
i + " " + pairPathArr[i] + " is consistent with " + j + " " + pairPathArr[j] + "\n" +
j + " " + pairPathArr[j] + " is consistent with " + k + " " + pairPathArr[k] + "\n" +
i + " " + pairPathArr[i] + " is NOT consistent with " + k + " " + pairPathArr[k] + "\n", 10);
return false;
}
}
}
}
return true;
}
private static boolean checkTransitivityRequireOverlap(
boolean[][] adj,
PairPath[] pairPathArr,
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
DijkstraDistance<SeqVertex, SimpleEdge> dijkstraDis)
{
// Examine all triplets (A,B,C) and ensure that A->B and B->C implies A->C
for(int i = 0; i < adj.length-2; i++)
{
for(int j = i+1; j < adj.length-1; j++)
{
// see if j is too far away from i, can then go to next i
if (! adj[i][j]) {
if (twoPairPathsAreTooFarAwayInGraph(pairPathArr[i], pairPathArr[j], graph))
if (FAST_PASA)
break;
}
for(int k = j+1; k < adj.length; k++)
{
// see if k is too far away from j, can then go to next j
if (! adj[j][k]) {
if (twoPairPathsAreTooFarAwayInGraph(pairPathArr[j], pairPathArr[k], graph))
if (FAST_PASA)
break;
}
if (adj[i][j] && adj[j][k] && pairPathArr[i].haveAnyNodeInCommon(pairPathArr[k]) && ! adj[i][k]) {
debugMes("UNCERTAINTY DETECTED AFTER SUPPOSEDLY HAVING REMOVED THEM [" + i + "," + j + "," + k + "] :\n" +
i + " " + pairPathArr[i] + " is consistent with " + j + " " + pairPathArr[j] + "\n" +
j + " " + pairPathArr[j] + " is consistent with " + k + " " + pairPathArr[k] + "\n" +
i + " " + pairPathArr[i] + " is NOT consistent with " + k + " " + pairPathArr[k] + "\n", 10);
return false;
}
}
}
}
return true;
}
private static ArrayList<Integer> getUncertain(int[][] adj, PairPath[] pairPathArr)
{
// DAG: i -> j -> k
ArrayList<Integer> toRemove = new ArrayList<Integer>();
for(int i = 0; i < adj.length; i++)
{
if (toRemove.contains(i)) { continue; }
// move j to previous shared first node with i in the list:
int j = i;
if (j > 0 ) {
while (j > 0 && pairPathArr[i].getFirstID().equals(pairPathArr[j-1].getFirstID())) {
j
}
}
for(; j < adj.length; j++)
{
if(toRemove.contains(j))
continue;
// start k at first node sharing the same first ID as j
int k = j;
if (k > 0) {
while (k>0 && pairPathArr[j].getFirstID().equals(pairPathArr[k-1].getFirstID())) {
k
}
}
for(; k < adj.length; k++)
{
if(toRemove.contains(k))
continue;
debugMes("CHECKING TRANSITIVITY [" + i + "," + j + "," + k + "] "
+ "= [" + adj[i][j] + "," + adj[j][k] + "," + adj[i][k] + "]", 15);
debugMes("MORE VERBOSE CHECKING TRANSITIVITY:] " +
" { " + i + " " + pairPathArr[i] + " results(" + adj[i][j] + ") " + j + " " + pairPathArr[j] + " }" +
" { " + j + " " + pairPathArr[j] + " results("+ adj[j][k] + ") " + k + " " + pairPathArr[k] + " } " +
" { " + i + " " + pairPathArr[i] + " results(" + adj[i][k] + ") " + k + " " + pairPathArr[k] + " } ", 18);
if (adj[i][j] == 1 && adj[j][k] == 1)
{
if (adj[i][k] == 0) {
toRemove.add(j); // central node breaks transitivity. Remove it.
debugMes("UNCERTAINTY DETECTED:, removing: " + pairPathArr[j] + "\n" +
i + " " + pairPathArr[i] + " is consistent with " + j + " " + pairPathArr[j] + "\n" +
j + " " + pairPathArr[j] + " is consistent with " + k + " " + pairPathArr[k] + "\n" +
i + " " + pairPathArr[i] + " is NOT consistent with " + k + " " + pairPathArr[k] + "\n", 10);
break; // go to next j
}
}
}
}
}
return toRemove;
}
private static ArrayList<Integer> getUncertain(boolean[][] adj, PairPath[] pairPathArr)
{
// DAG: i -> j -> k
int removeFlag = 0;
ArrayList<Integer> toRemove = new ArrayList<Integer>();
for(int i = 0; i < adj.length; i++)
{
if (toRemove.contains(i)) { continue; }
// move j to previous shared first node with i in the list:
int j = i;
if (j > 0 ) {
while (j > 0 && pairPathArr[i].getFirstID().equals(pairPathArr[j-1].getFirstID())) {
j
}
}
for(; j < adj.length; j++)
{
if(toRemove.contains(j))
continue;
// start k at first node sharing the same first ID as j
int k = j;
if (k > 0) {
while (k>0 && pairPathArr[j].getFirstID().equals(pairPathArr[k-1].getFirstID())) {
k
}
}
for(; k < adj.length; k++)
{
if(toRemove.contains(k))
continue;
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15)
System.err.print("\r[" + i + "," + j + "," + k + "] ");
debugMes("CHECKING TRANSITIVITY [" + i + "," + j + "," + k + "] "
+ "= [" + adj[i][j] + "," + adj[j][k] + "," + adj[i][k] + "]", 15);
debugMes("MORE VERBOSE CHECKING TRANSITIVITY:] " +
" { " + i + " " + pairPathArr[i] + " results(" + adj[i][j] + ") " + j + " " + pairPathArr[j] + " }" +
" { " + j + " " + pairPathArr[j] + " results("+ adj[j][k] + ") " + k + " " + pairPathArr[k] + " } " +
" { " + i + " " + pairPathArr[i] + " results(" + adj[i][k] + ") " + k + " " + pairPathArr[k] + " } ", 18);
if (adj[i][j] == true && adj[j][k] == true)
{
if (adj[i][k] == false) {
toRemove.add(j); // central node breaks transitivity. Remove it.
debugMes("UNCERTAINTY DETECTED:, removing: " + pairPathArr[j] + "\n" +
i + " " + pairPathArr[i] + " is consistent with " + j + " " + pairPathArr[j] + "\n" +
j + " " + pairPathArr[j] + " is consistent with " + k + " " + pairPathArr[k] + "\n" +
i + " " + pairPathArr[i] + " is NOT consistent with " + k + " " + pairPathArr[k] + "\n", 10);
break; // go to next j
}
}
}
}
}
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 12)
System.err.println();
return toRemove;
}
private static ArrayList<Integer> getUncertainRequireOverlap(
boolean[][] adj,
PairPath[] pairPathArr,
final DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
DijkstraDistance<SeqVertex, SimpleEdge> dijkstraDis)
{
// DAG: i -> j -> k
int debug_node = 301;
boolean local_debug = false;
ArrayList<Integer> toRemove = new ArrayList<Integer>();
for(int i = 0; i < adj.length-2; i++)
{
if (local_debug && i == debug_node)
System.err.print("\rUncertaintyCheck: [" + i + "," + "?" + "," + "?" + "]\n" );
if (toRemove.contains(i)) {
if (local_debug && i == debug_node)
System.err.print("\rUncertaintyCheck: [" + i + "," + "?" + "," + "?" + "] Iprev-Captured\n");
continue;
}
boolean tooFar = false;
for(int j = i + 1; j < adj.length-1; j++)
{
if (local_debug && i == debug_node)
System.err.print("\rUncertaintyCheck: [" + i + "," + j + "," + "?" + "]\n");
if(toRemove.contains(j)) {
if (local_debug && i == debug_node)
System.err.print("\rUncertaintyCheck: [" + i + "," + j + "," + "?" + "] Jprev-Captured\n");
continue;
}
// see if j is too far away from i, can then go to next i
if (! adj[i][j]) {
if (twoPairPathsAreTooFarAwayInGraph(pairPathArr[i], pairPathArr[j], graph)) {
if (FAST_PASA)
break;
}
}
if (twoPairPathsAreTooFarAwayInGraph(pairPathArr[i], pairPathArr[j], graph))
tooFar = true;
else if (tooFar)
debugMes("CHANGED from too far to within distance again: I:" + pairPathArr[i] + " J:" + pairPathArr[j], 10);
boolean tooFar2 = false;
for(int k = j + 1; k < adj.length; k++)
{
if (local_debug && i == debug_node)
System.err.print("\rUncertaintyCheck: [" + i + "," + j + "," + k + "] " +
"[" + adj[i][j] + "," + adj[j][k] + "," + adj[i][k] + "]\n" );
if(toRemove.contains(k)) {
if (local_debug && i == debug_node)
System.err.print("\rUncertaintyCheck: [" + i + "," + j + "," + k + "] Kprev-Captured\n");
continue;
}
// see if k is too far away from j, can then go to next j
if (! adj[j][k]) {
if (twoPairPathsAreTooFarAwayInGraph(pairPathArr[j], pairPathArr[k], graph)) {
if (FAST_PASA)
break;
}
}
if (twoPairPathsAreTooFarAwayInGraph(pairPathArr[j], pairPathArr[k], graph))
tooFar2 = true;
else if (tooFar2)
debugMes("CHANGED from too far to within distance again: I:" + pairPathArr[i] + " J:" + pairPathArr[j], 10);
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15) {
System.err.print("\r[" + i + "," + j + "," + k + "] ");
debugMes("CHECKING TRANSITIVITY [" + i + "," + j + "," + k + "] "
+ "= [" + adj[i][j] + "," + adj[j][k] + "," + adj[i][k] + "]", 15);
debugMes("MORE VERBOSE CHECKING TRANSITIVITY:] " +
" { " + i + " " + pairPathArr[i] + " results(" + adj[i][j] + ") " + j + " " + pairPathArr[j] + " }" +
" { " + j + " " + pairPathArr[j] + " results("+ adj[j][k] + ") " + k + " " + pairPathArr[k] + " } " +
" { " + i + " " + pairPathArr[i] + " results(" + adj[i][k] + ") " + k + " " + pairPathArr[k] + " } ", 18);
}
if (adj[i][j] == true && adj[j][k] == true)
{
if (pairPathArr[i].haveAnyNodeInCommon(pairPathArr[k]) && adj[i][k] == false) {
toRemove.add(j); // central node breaks transitivity. Remove it.
debugMes("UNCERTAINTY DETECTED:, removing: " + pairPathArr[j] + "\n" +
i + " " + pairPathArr[i] + " is consistent with " + j + " " + pairPathArr[j] + "\n" +
j + " " + pairPathArr[j] + " is consistent with " + k + " " + pairPathArr[k] + "\n" +
i + " " + pairPathArr[i] + " is NOT consistent with " + k + " " + pairPathArr[k] + "\n", 10);
if (pairPathArr[j].isCompatibleAndContainedByPairPath(pairPathArr[i]) || pairPathArr[j].isCompatibleAndContainedByPairPath(pairPathArr[k])) {
throw new RuntimeException("ERROR, containment encountered where containments should have been removed.");
}
if (local_debug && i == debug_node)
System.err.print("\rUncertaintyCheck: [" + i + "," + j + "," + k + "] J-Captured\n");
break; // no more need to analyze k in this i-j-k series.
}
else {
if (local_debug && i == debug_node)
System.err.print("\rUncertaintyCheck: [" + i + "," + j + "," + k + "] OK\n");
}
}
}
}
}
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 12)
System.err.println();
return toRemove;
}
private static int[][] getPairPathDAG(DirectedSparseGraph<SeqVertex, SimpleEdge> graph, DijkstraDistance<SeqVertex, SimpleEdge> dijkstraDis,
PairPath[] pairPathArr)
{
debugMes("getPairPathDAG:", 10);
int[][] dag = new int[pairPathArr.length][pairPathArr.length];
for (int[] row : dag)
Arrays.fill(row, 0); // init to no connection.
for (int i = 0; i < pairPathArr.length; i++)
{
if (pairPathArr[i].isEmpty())
continue;
//start comparisons to j where j starts at least at the same position as path i.
int j = i;
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15 && j>0) {
Integer i_first_id = pairPathArr[i].getFirstID();
Integer jm1_first_id = pairPathArr[j-1].getFirstID();
debugMes("-comparing first IDs for :[" + i +"," + j + "-1]: " + i_first_id + " to " + jm1_first_id, 15);
}
while (j > 0 && pairPathArr[i].getFirstID().equals(pairPathArr[j-1].getFirstID())) {
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15) {
Integer i_first_id = pairPathArr[i].getFirstID();
Integer jm1_first_id = pairPathArr[j-1].getFirstID();
debugMes("-comparing first IDs for :[" + i +"," + j + "-1]: " + i_first_id + " to " + jm1_first_id, 15);
}
j
}
for (; j < pairPathArr.length; j++)
{
int value;
if (i == j)
value = 1; // make compatible for now. Remove self-compatibility before maximal matching
else {
value = isConsistent(pairPathArr[i], pairPathArr[j], graph, dijkstraDis);
// nope, below doesn't work - maximal matching does require the full set of compatibilities.
//boolean val = isOverlappingAndDirectionallyConsistent(pairPathArr[i], pairPathArr[j], graph, dijkstraDis);
//value = (val) ? 1 : 0;
}
dag[i][j] = value;
debugMes("Comparing node " + i +" " + pairPathArr[i] + " with node " + j +" " + pairPathArr[j] + "Result: " + dag[i][j],15);
debugMes("DAG[" + i + "," + j + "]=" + dag[i][j], 15);
if (j < i && value == 1 && ! pairPathArr[i].getFirstID().equals(pairPathArr[j].getFirstID())) {
debugMes("\tWARNING: ConsistencyListUnordered: [" + i + "," + j + "] " + pairPathArr[i] + pairPathArr[j], 10); // perhaps should be more serious - throw exception?
//throw(new RuntimeException("ERROR: ConsistencyListUnordered: [" + i + "," + j + "] " + pairPathArr[i] + pairPathArr[j]));
}
}
}
return dag;
}
private static boolean[][] getPairPathConsistencyDAG(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
DijkstraDistance<SeqVertex, SimpleEdge> dijkstraDis,
PairPath[] pairPathArr)
{
debugMes("getPairPathDAG:", 10);
boolean[][] dag = new boolean[pairPathArr.length][pairPathArr.length];
for (boolean[] row : dag)
Arrays.fill(row, false); // init to no connection.
// i -> j
for (int i = 0; i < pairPathArr.length-1; i++)
{
PairPath pp_i = pairPathArr[i];
boolean tooFar = false;
for (int j = i + 1; j < pairPathArr.length; j++)
{
PairPath pp_j = pairPathArr[j];
boolean compatible = isOverlappingAndDirectionallyConsistent(pp_i, pp_j, graph, dijkstraDis);
dag[i][j] =compatible;
if (twoPairPathsAreTooFarAwayInGraph(pp_i, pp_j, graph) && compatible) {
debugMes("HOW CAN THESE BE TOO FAR AWAY AND STILL COMPATIBLE? " + pp_i + " vs. " + pp_j, 10);
debugMes(report_node_depths(pp_i, graph), 10);
debugMes(report_node_depths(pp_j, graph), 10);
}
if (! compatible) {
if (twoPairPathsAreTooFarAwayInGraph(pp_i, pp_j, graph)) {
if (FAST_PASA)
break;
}
}
if (twoPairPathsAreTooFarAwayInGraph(pp_i, pp_j, graph)) {
tooFar = true;
}
else if (tooFar)
debugMes("NOT_TOO_FAR_AFTER_ALL: [" + i + "," + j + "]", 10);
debugMes("Comparing node " + i +" " + pp_i + " with node " + j +" " + pp_j + "Result: " + compatible,15);
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15)
System.err.print("\rDAG[" + i + "," + j + "]=" + dag[i][j]);
}
}
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 12)
System.err.println();
return dag;
}
private static String report_node_depths(PairPath pp,
DirectedSparseGraph<SeqVertex, SimpleEdge> graph) {
int path_counter = 0;
String node_depth_text = "";
for (List<Integer> path : pp.get_paths()) {
path_counter++;
node_depth_text += "path pt" + path_counter + ": " + path + "\n";
for ( Integer node_id : path) {
SeqVertex v = getSeqVertex(graph, node_id);
node_depth_text += "\tnode: " + node_id + " depth: " + v._node_depth + "\n";
}
}
return node_depth_text;
}
private static ArrayList<Integer> extendChain(ArrayList<Integer> extractedVerticesIDs,
DirectedSparseGraph<SeqVertex, SimpleEdge> graph, Map<PairPath, Integer> pairPathToReadSupp, DijkstraDistance<SeqVertex,SimpleEdge> dijkstraDis)
{
debugMes("Extending Chain", 10);
ArrayList<Integer> extractedVerticesIDExtended = new ArrayList<Integer>();
extractedVerticesIDExtended.addAll(extractedVerticesIDs);
Integer firstID = extractedVerticesIDs.get(0);
Integer lastID = extractedVerticesIDs.get(extractedVerticesIDs.size() - 1);
// extend from left:
Integer lastIDVisited = firstID;
boolean canExtend = true;
if(lastIDVisited < 0) // if first ID indicates a source vertex, cannot extend further
canExtend = false;
// Greedily extend from first ID
while(canExtend == true)
{
int best = 0;
List<Integer> best_extension_path = null;
canExtend = false;
firstID = extractedVerticesIDExtended.get(0);
lastIDVisited = firstID;
if(lastIDVisited < 0) // if last ID indicates a sink vertex, cannot extend further
break;
for(PairPath p : pairPathToReadSupp.keySet())
{
if(p.isEmpty())
continue;
int support = pairPathToReadSupp.get(p);
p = p.trimSinkNodes();
if (! p.isCompatible(extractedVerticesIDExtended)) { continue; }
if (! p.containsID(lastIDVisited)) { continue; }
List<Integer> extensionPath;
if (p.getPath1().contains(lastIDVisited))
extensionPath = p.getPath1();
else if (p.getPath2().contains(lastIDVisited))
extensionPath = p.getPath2();
else
throw (new RuntimeException("error, pairpath " + p + " is missing required id " + lastIDVisited));
if(extensionPath.get(0).equals(lastIDVisited))
continue; // nothign to extend left with since left-most node is same as current path trying to extend.
// see if encountering nodes already in the list, indicative of a loop
Integer loc_in_list = extensionPath.indexOf(lastIDVisited);
List<Integer> extension_nodes = extensionPath.subList(0, loc_in_list);
if (PairPath.haveAnyNodeInCommon(extension_nodes, extractedVerticesIDExtended))
continue;
// must have a candidate for extension.
if(support > best)
{
best = support;
best_extension_path = extensionPath;
canExtend = true;
}
} // end of testing for extension
if(canExtend == false)
break;
debugMes("Left-extension of : " + extractedVerticesIDExtended + " with " + best_extension_path, 10);
Integer loc_in_list = best_extension_path.indexOf(lastIDVisited);
extractedVerticesIDExtended.addAll(0, best_extension_path.subList(0, loc_in_list));
}
//System.exit(0);
// extend to right
lastIDVisited = lastID;
canExtend = true;
if(lastIDVisited < 0) // if last ID indicates a sink vertex, cannot extend further
canExtend = false;
// Greedily extend from last ID
while(canExtend == true)
{
int best = 0;
List<Integer> best_extension_path = null;
canExtend = false;
lastID = extractedVerticesIDExtended.get(extractedVerticesIDExtended.size()-1);
lastIDVisited = lastID;
if(lastIDVisited < 0) // if last ID indicates a sink vertex, cannot extend further
break;
for(PairPath p : pairPathToReadSupp.keySet())
{
if(p.isEmpty())
continue;
int support = pairPathToReadSupp.get(p);
p = p.trimSinkNodes();
if (! p.isCompatible(extractedVerticesIDExtended)) { continue; }
if (! p.containsID(lastIDVisited)) { continue; }
List<Integer> extensionPath;
if (p.getPath1().contains(lastIDVisited))
extensionPath = p.getPath1();
else if (p.getPath2().contains(lastIDVisited))
extensionPath = p.getPath2();
else
throw (new RuntimeException("error, pairpath " + p + " is missing required id " + lastIDVisited));
if(extensionPath.get(extensionPath.size()-1).equals(lastIDVisited))
continue;
// see if encountering nodes already in the list, indicative of a loop
Integer loc_in_list = extensionPath.indexOf(lastIDVisited);
List<Integer> extension_nodes = extensionPath.subList(loc_in_list+1, extensionPath.size());
if (PairPath.haveAnyNodeInCommon(extension_nodes, extractedVerticesIDExtended))
continue;
// must have a candidate for extension.
if(support > best)
{
best = support;
best_extension_path = extensionPath;
canExtend = true;
}
} // end of testing for extension
if(canExtend == false)
break;
debugMes("Right-extension of : " + extractedVerticesIDExtended + " with " + best_extension_path, 10);
Integer loc_in_list = best_extension_path.indexOf(lastIDVisited);
debugMes("\tloc of " + lastIDVisited + " in best extension list: " + best_extension_path + " is " + loc_in_list, 15);
extractedVerticesIDExtended.addAll(best_extension_path.subList(loc_in_list+1, best_extension_path.size()));
//System.out.println("Path after " + extractedVerticesIDs.toString());
}
return extractedVerticesIDExtended;
}
private static HashMap<List<Integer>, Pair<Integer>> cuffMinPaths(final DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
HashMap<Integer,HashMap<PairPath,Integer>> combinedReadHash,
DijkstraDistance<SeqVertex,SimpleEdge> dijkstraDis)
{
HashMap<List<Integer>, Pair<Integer>> transcripts = new HashMap<List<Integer>,Pair<Integer>>();
Set<PairPath> pairPaths = new HashSet<PairPath>();
Map<PairPath, Integer> pairPathToReadSupport = new HashMap<PairPath, Integer>();
debugMes("Beginning cuffMinPaths",10);
// populate pairPathToReadSupport: PairPath => readSupport
// and pairPaths hashset: the list of all PairPaths
populate_pairpaths_and_readsupport(combinedReadHash, pairPaths, pairPathToReadSupport);
ArrayList<PairPath> pairPathsList = new ArrayList<PairPath>(pairPaths);
Comparator<PairPath> pairPathOrderComparer = new Comparator<PairPath>() { // sort by first node depth in graph
public int compare(PairPath a, PairPath b) {
if (a.equals(b)) {
return(0);
}
Integer a_index = a.getFirstID();
Integer b_index = b.getFirstID();
int f1 = getSeqVertex(graph, a_index)._node_depth; // why using FinishingTime instead of DiscoveryTime?
int f2 = getSeqVertex(graph, b_index)._node_depth;
if( f1 < f2 )
return -1;
else if( f1 > f2 )
return 1;
// same node depth.
if (a_index < b_index)
return -1;
else if (a_index > b_index)
return 1;
// same first node ID
// check last node
Integer a_last_index = a.getLastID();
Integer b_last_index = b.getLastID();
int l1 = getSeqVertex(graph,a_last_index)._node_depth;
int l2 = getSeqVertex(graph,b_last_index)._node_depth;
if (l1 < l2) {
return(-1);
}
else if (l1 > l2) {
return(1);
}
// same last node depth too.
// compare their node identifiers
if (a_last_index < b_last_index)
return(-1);
else if (a_last_index > b_last_index)
return(1);
// default
// not the same paths, but same start node and last node DFS, so just order based on hashcode
return ( (a.hashCode() < b.hashCode()) ? 1 : -1);
}
};
Collections.sort(pairPathsList, pairPathOrderComparer);
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15) {
debugMes("SORTED PAIRPATHS IN ORDER:", 15);
for (PairPath p : pairPathsList) {
debugMes("\t" + p, 15);
}
}
HashSet<Integer> vertices = extract_vertex_list_from_PairPaths(pairPathsList);
PairPath[] pairPathArr = pairPathsList.toArray(new PairPath[pairPathsList.size()]);
HashSet<List<Integer>> cuff_input_paths = new HashSet<List<Integer>>();
HashMap<PairPath,Integer> pairPathToIntVal = new HashMap<PairPath,Integer>();
debugMes("All PairPaths sorted by DFS", 10);
for (int i = 0; i < pairPathArr.length; i++) {
pairPathToIntVal.put(pairPathArr[i], i);
debugMes("CuffFly Input PairPath: " + pairPathArr[i] + " <index: " + i + ">", 10);
cuff_input_paths.add(pairPathArr[i].getPath1());
}
//0. remove containments
List<Integer> containments = getContainments(pairPathArr);
for (int i = 0; i < containments.size(); i++) {
pairPathsList.remove(pairPathArr[containments.get(i)]);
}
// refresh after removing containments.
pairPathArr = pairPathsList.toArray(new PairPath[pairPathsList.size()]);
//1. build pair path graph
int[][] dag = getPairPathDAG(graph, dijkstraDis, pairPathArr);
//print dag
debugMes("DAG
for(int i = 0; i < dag.length; i++)
{
String dag_text = "";
for(int j = 0; j < dag.length; j++)
{
dag_text += dag[i][j] + " ";
}
debugMes(dag_text, 10);
}
//2.1 remove uncertain pair paths
ArrayList<Integer> uncertain = getUncertain(dag, pairPathArr);
debugMes("REMOVING UNCERTAINTIES: " + uncertain, 10);
for(int i = 0; i < uncertain.size(); i++)
{
pairPathsList.remove(pairPathArr[uncertain.get(i)]);
}
HashSet<Integer> vertices_after_removed_uncertainties = extract_vertex_list_from_PairPaths(pairPathsList);
if (vertices_after_removed_uncertainties.size() < vertices.size()) {
int missing_node_count = vertices.size() - vertices_after_removed_uncertainties.size();
debugMes("WARNING, MISSING: " + missing_node_count + " of " + vertices.size() + " nodes after removing uncertainties", 10);
for (Integer v : vertices) {
if (! vertices_after_removed_uncertainties.contains(v)) {
debugMes("WARNING, MISSING NODE: After removing uncertainties, missing node from graph: " + v, 10);
}
}
}
// refresh again, after now removing the uncertain entries.
pairPathArr = pairPathsList.toArray(new PairPath[pairPathsList.size()]);
//print pair paths
debugMes("PAIR PATHS
for(int i = 0; i < pairPathsList.size(); i++)
{
debugMes("PairPathAfterUncertainRemoved "+ i + " " + pairPathArr[i].toString() + " <index: " + pairPathToIntVal.get(pairPathArr[i]) + ">", 10);
}
// regenerate the dag now that the uncertain entries are removed.
dag = getPairPathDAG(graph, dijkstraDis, pairPathArr);
//print dag
debugMes("DAG
for(int i = 0; i < dag.length; i++)
{
String dag_text = "";
for(int j = 0; j < dag.length; j++)
{
dag_text += dag[i][j] + " ";
}
debugMes(dag_text, 10);
}
//2.2 check transitivity
if(!checkTransitivity(dag, pairPathArr, pairPathToIntVal))
{
throw(new RuntimeException("Graph is NOT transitive!"));
}
// remove self-matches
for (int i = 0; i < dag.length; i++) {
dag[i][i] = 0;
}
//2.3 get matching
BipartiteMatching bp = new BipartiteMatching(dag.length, dag.length, dag);
bp.maxMatching();
int[] rightMatching = bp.getRightMatching();
int[] leftMatching = bp.getLeftMatching();
debugMes("Matching
bp.printRightMatching();
//2.4 get chains from matching
ArrayList<ArrayList<PairPath>> chains = new ArrayList<ArrayList<PairPath>>();
boolean[] addedToChain = new boolean[rightMatching.length];
Arrays.fill(addedToChain, false);
ArrayList<PairPath> curChain;
for (int i = 0; i < rightMatching.length; i++) {
if (rightMatching[i] == -1) {
// start new chain
ArrayList<PairPath> chain = new ArrayList<PairPath>();
chains.add(chain);
chain.add(pairPathArr[i]);
int j = i;
while (leftMatching[j] != -1) {
j = leftMatching[j];
chain.add(pairPathArr[j]);
}
}
}
// report the chain info.
HashMap<Integer,Boolean> seen = new HashMap<Integer,Boolean>();
debugMes("Number of chains: " + chains.size(),10);
for(int j = 0; j < chains.size(); j++)
{
curChain = chains.get(j);
System.out.println("Chain: " + j);
for(int k = 0; k < chains.get(j).size(); k++)
{
PairPath p = chains.get(j).get(k);
Integer p_pos = pairPathToIntVal.get(p);
String seen_text = (seen.containsKey(p_pos)) ? " *** ERROR, ALREADY INCLUDED IN ANOTHER CHAIN *** " : "";
debugMes(p + " Pos:[" + p_pos + "] " + seen_text,10);
seen.put(p_pos, new Boolean(true));
}
debugMes("",10);
}
// 3. foreach chain:
// 3.1. extract nodes from chains
DijkstraShortestPath dsp = new DijkstraShortestPath(graph);
for(int i = 0; i < chains.size(); i++)
{
HashSet<Integer> extracted = new HashSet<Integer>();
ArrayList<SeqVertex> extractedVertices = new ArrayList<SeqVertex>();
List<List<Integer>> chain_i_path_list = new ArrayList<List<Integer>>();
curChain = chains.get(i);
for(int j = 0; j < curChain.size(); j++)
{
chain_i_path_list.add(curChain.get(j).getPath1());
/*
extracted.addAll(curChain.get(j).getPath1());
if(curChain.get(j).hasSecondPath())
extracted.addAll(curChain.get(j).getPath2());
*/
}
List<List<Integer>> chain_i_collapsed_paths = Path.collapse_compatible_paths_to_min_set(chain_i_path_list);
for (List<Integer> collapsed_path : chain_i_collapsed_paths) {
transcripts.put(collapsed_path, new Pair(new Integer(1), new Integer(1)));
}
/*
for(Integer id : extracted)
{
extractedVertices.add(getSeqVertex(graph, id));
}
//extractedVerticesIDs.addAll(extracted);
// 3.2. sort according to topological order of BTFL graph
Collections.sort(extractedVertices, new SeqVertexFinishTimeComparator());
String node_id_list_text = "";
for (SeqVertex v : extractedVertices) {
node_id_list_text += v.getID() + " ";
}
debugMes("Extracted vertices for chain: " + i + " and sorted is: " + node_id_list_text + "\n", 10);
// Fill in any gaps
// 3.3. path=[L_1]; For each i in 1:length(node_list)
int j = 0;
int num_vertices = extractedVertices.size(); // note, extractedVertices grows in size during iterations below.
while(j < num_vertices - 1)
{
//System.out.println(j);
SeqVertex current = extractedVertices.get(j);
SeqVertex next = extractedVertices.get(j + 1);
// 3.3.1 if L_i == L_(i+1) then nothing
// -There are no duplicates since extractedVertices was created from
// building the set of extracted vertex IDs
// 3.3.2 else if exists an edge from L_i to L_(i+1) then nothing(?)
if(graph.getSuccessors(current).contains(next)) {
j++;
continue;
}
// 3.3.3 else find a single path (p = L_i,..., L_(i+1)):
// append all P_j (j=2:end) to our path
//List<SimpleEdge> sp = org.jgrapht.alg.DijkstraShortestPath.findPathBetween((Graph)graph, current, next);
List<SimpleEdge> sp = dsp.getPath(current, next);
debugMes("Found shorteset path between " + current.getID() + " and " + next.getID() + ":", 10);
ArrayList<SeqVertex> toAdd = new ArrayList<SeqVertex>();
for(SimpleEdge edge : sp) {
SeqVertex v = graph.getDest(edge);
toAdd.add(v);
debugMes("\t" + v.getID(), 10);
}
toAdd.remove(next);
extractedVertices.addAll(toAdd);
j++;
}
ArrayList<Integer> extractedVerticesIDs = new ArrayList<Integer>();
Collections.sort(extractedVertices, new SeqVertexFinishTimeComparator());
for(SeqVertex v: extractedVertices)
{
//System.out.println("Adding vertex with ID: " + v.getID());
extractedVerticesIDs.add(v.getID());
}
boolean extend_paths = ! CUFF_NO_EXTEND; // just for debugging purposes
if (extend_paths) {
ArrayList<Integer> extended = extendChain(extractedVerticesIDs, graph, pairPathToReadSupport, dijkstraDis);
transcripts.put(extended, new Pair(new Integer(1), new Integer(1)));
}
else {
transcripts.put(extractedVerticesIDs, new Pair(new Integer(1), new Integer(1)));
}
*/
} // end of foreach chain
debugMes("Cuff-based reconstructions of transcripts:", 10);
for (List<Integer> p : transcripts.keySet()) {
debugMes("CuffFly Output Path: " + p, 10);
if (cuff_input_paths.contains(p))
debugMes("\t** Original cuffpath",10);
else
debugMes("\t** NOT Original cuffpath", 10);
}
return transcripts;
}
private static HashSet<Integer> extract_vertex_list_from_PairPaths(
ArrayList<PairPath> pairPathsList) {
HashSet<Integer> vertices = new HashSet<Integer>();
for (PairPath pp : pairPathsList) {
for (List<Integer> path: pp.get_paths()) {
for (Integer node : path) {
vertices.add(node);
}
}
}
return(vertices);
}
private static List<Integer> getContainments(PairPath[] pairPathArr) {
HashSet<Integer> containments = new HashSet<Integer>();
for (int i = 0; i < pairPathArr.length; i++) {
for (int j = 0; j < pairPathArr.length; j++) {
if (i == j)
continue;
if (pairPathArr[i].isCompatibleAndContainedByPairPath(pairPathArr[j])) {
debugMes("CONTAINMENT: " + pairPathArr[i] + " is contained by " + pairPathArr[j], 10);
containments.add(i);
}
}
}
List<Integer> containment_list = new ArrayList(containments);
return(containment_list);
}
private static void populate_pairpaths_and_readsupport(
HashMap<Integer, HashMap<PairPath, Integer>> combinedReadHash,
Set<PairPath> pairPaths,
Map<PairPath, Integer> pairPathToReadSupport) {
for(Integer i : combinedReadHash.keySet())
{
Set<PairPath> temp = combinedReadHash.get(i).keySet();
for(PairPath j: temp)
{
if(j != null)
{
Integer read_count_support = combinedReadHash.get(i).get(j);
j = j.trimSinkNodes();
if (MAKE_PE_SE) { // separate the paths:
// convert split paths into individual paths to avoid uncertainties.
PairPath first_part = new PairPath(j.getPath1());
if (! pairPaths.contains(first_part)) {
pairPaths.add(first_part);
pairPathToReadSupport.put(first_part, read_count_support);
}
else {
// already there, just increment the read count support
pairPathToReadSupport.put(first_part, pairPathToReadSupport.get(first_part)+read_count_support);
}
pairPathToReadSupport.put(first_part, read_count_support);
if (j.hasSecondPath()) {
PairPath second_part = new PairPath(j.getPath2());
if (! pairPaths.contains(second_part)) {
pairPaths.add(second_part);
pairPathToReadSupport.put(second_part, read_count_support);
}
else {
// already there, just increment the read count support
pairPathToReadSupport.put(second_part, pairPathToReadSupport.get(second_part)+read_count_support);
}
}
} else {
// using pair paths instead of the split pairs (original)
pairPaths.add(j);
pairPathToReadSupport.put(j, read_count_support);
}
}
}
}
}
private static HashMap<List<Integer>, Pair<Integer>> butterfly (DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
Set<SeqVertex> comp,
HashMap<Integer,HashMap<PairPath,Integer>> combinedReadHash,
long totalNumReads,
PrintStream pout_all,
DijkstraDistance<SeqVertex,SimpleEdge> dijkstraDis,
DijkstraDistanceWoVer<SeqVertex,SimpleEdge> dijkstraDisWoVer,
HashMap<Integer, List<List<Integer>>> tripletMapper,
HashMap<Integer, List<List<Integer>>> extendedTripletMapper,
HashMap<Integer,Boolean> xStructuresResolvedByTriplets
)
{
Pair<HashMap<List<Integer>,Pair<Integer>>> FinalPathsPair = getAllProbablePaths(graph,comp,
combinedReadHash,dijkstraDis,dijkstraDisWoVer,
tripletMapper,extendedTripletMapper, xStructuresResolvedByTriplets);
HashMap<List<Integer>,Pair<Integer>> FinalPaths_diff = FinalPathsPair.getFirst();
HashMap<List<Integer>,Pair<Integer>> FinalPaths_all = FinalPathsPair.getSecond();
return FinalPaths_all;
}
/**
* given the graph, find all single nt bubbles, and choose the majority vote.
* add the weights to the majority path, and add the prevID
* v -> v1 -> vend
* v -> v2 -> vend
* @param graph
*/
private static void removeSingleNtBubbles(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph) {
SeqVertex v1=null ,v2 = null, vend = null;
SeqVertex vToKeep=null ,vToRemove = null;
SimpleEdge e1ToKeep = null, e1ToRemove = null;
SimpleEdge e2ToKeep = null, e2ToRemove = null;
Vector<SeqVertex> removeV = new Vector<SeqVertex>();
Collection<SeqVertex> allV = new HashSet<SeqVertex>();
allV.addAll(graph.getVertices());
for (SeqVertex v : allV)
{
if (removeV.contains(v))
continue;
if (graph.getSuccessorCount(v)==2)
{
Collection<SeqVertex> children = graph.getSuccessors(v);
Iterator<SeqVertex> iter = children.iterator();
v1 = iter.next();
v2 = iter.next();
int len1 = v1.getNameKmerAdj().length();
int len2 = v2.getNameKmerAdj().length();
debugMes("SNP_collapse candidates: " + v1 + " len: " + len1 + " and " + v2 + " len: " + len2, 15);
if (len1==KMER_SIZE && len2==KMER_SIZE &&
graph.getSuccessorCount(v1)==1 &&
graph.getSuccessorCount(v2)==1 &&
getSingleSuccessor(graph,v2).equals(getSingleSuccessor(graph,v1)))
{
vend = getSingleSuccessor(graph,v1);
if (graph.findEdge(v, v1).getWeight() > graph.findEdge(v, v2).getWeight())
{ //keep v1, loose v2
vToKeep = v1;
vToRemove = v2;
}else
{ //keep v2, loose v1
vToKeep = v2;
vToRemove = v1;
}
e1ToKeep = graph.findEdge(v, vToKeep);
e2ToKeep = graph.findEdge(vToKeep, vend);
e1ToRemove = graph.findEdge(v, vToRemove);
e2ToRemove = graph.findEdge(vToRemove, vend);
debugMes("SNP_collapse: merging the node "+vToRemove.getID()+" to the node "+vToKeep.getID(),15);
SeqVertex newV = new SeqVertex(getNextID(), vToKeep.getName());
newV.copyTheRest(vToKeep);
newV.addToPrevIDs(vToKeep,vToRemove,LAST_REAL_ID);
graph.addVertex(newV);
graph.addEdge(new SimpleEdge(e1ToKeep.getWeight() + e1ToRemove.getWeight(), v.getID(), newV.getID()), v, newV);
graph.addEdge(new SimpleEdge(e2ToKeep.getWeight() + e2ToRemove.getWeight(), newV.getID(), vend.getID()), newV,vend);
removeV.add(vToRemove);
removeV.add(vToKeep);
}
}
}
for (SeqVertex rv : removeV)
{
debugMes("removing the single nt variation vertex "+rv.getID(),20);
graph.removeVertex(rv);
}
}
/**
* given the graph, find all single nt bubbles, and choose the majority vote.
* add the weights to the majority path, and add the prevID
* v -> v1 -> vend
* v -> v2 -> vend
* @param graph
* @throws Exception
*/
private static void removeSingleNtBubblesWithDegenerateCode(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph) throws Exception {
SeqVertex v1=null ,v2 = null, vend = null;
SimpleEdge eTop1 = null, eTop2 = null;
SimpleEdge eBottom1 = null, eBottom2 = null;
Vector<SeqVertex> removeV = new Vector<SeqVertex>();
Collection<SeqVertex> allV = new HashSet<SeqVertex>();
allV.addAll(graph.getVertices());
for (SeqVertex v : allV)
{
if (removeV.contains(v))
continue;
if (graph.getSuccessorCount(v)==2)
{
Collection<SeqVertex> children = graph.getSuccessors(v);
Iterator<SeqVertex> iter = children.iterator();
v1 = iter.next();
v2 = iter.next();
int len1 = v1.getName().length();
int len2 = v2.getName().length();
if (len1==1 && len2==1 &&
graph.getSuccessorCount(v1)==1 &&
graph.getSuccessorCount(v2)==1 &&
getSingleSuccessor(graph,v2).equals(getSingleSuccessor(graph,v1)))
{
vend = getSingleSuccessor(graph,v1);
String key;
if (String.CASE_INSENSITIVE_ORDER.compare(v1.getName(),v2.getName())<0)
key = v1.getName()+v2.getName();
else
key = v2.getName()+v1.getName();
String name = getDegenerateRepresentation(key);
SeqVertex newV = new SeqVertex(getNextID(), name);
if (graph.findEdge(v, v1).getWeight() > graph.findEdge(v, v2).getWeight())
newV.copyTheRest(v1);
else
newV.copyTheRest(v2);
eTop1 = graph.findEdge(v, v1);
eBottom1 = graph.findEdge(v1, vend);
eTop2 = graph.findEdge(v, v2);
eBottom2 = graph.findEdge(v2, vend);
debugMes("merging the nodes "+v1.getID()+" and the node "+v2.getID()+" to the node "+newV,18);
newV.addToPrevIDs(v1,v2,LAST_REAL_ID);
newV.setFrequencies(v1.getName(),eTop1.getWeight(),v2.getName(),eTop2.getWeight());
graph.addVertex(newV);
graph.addEdge(new SimpleEdge(eTop1.getWeight() + eTop2.getWeight(), v.getID(), newV.getID()), v, newV);
graph.addEdge(new SimpleEdge(eBottom1.getWeight() + eBottom2.getWeight(), newV.getID(), vend.getID()), newV,vend);
removeV.add(v1);
removeV.add(v2);
}
}
}
for (SeqVertex rv : removeV)
{
debugMes("removing the single nt variation vertex "+rv.getID(),20);
graph.removeVertex(rv);
}
}
/**
* return the single successor of this node in this graph
* @param graph
* @param v2
* @return
*/
private static SeqVertex getSingleSuccessor(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph, SeqVertex v) {
Collection<SeqVertex> children = graph.getSuccessors(v);
if (children.size()!=1)
return null;
SeqVertex vout = children.iterator().next();
return vout;
}
/**
* find edges that are extremely high compared to both side (a single very abundant kmer, and fix their support
* @param graph
* @param inFlow
* @param outFlow
*/
private static void fixExtremelyHighSingleEdges(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph, HashMap<Integer,Integer> outFlow, HashMap<Integer,Integer> inFlow) {
debugMes("fixExtremelyHighSingleEdges()", 5);
for (SimpleEdge e : graph.getEdges())
{
double supp =e.getWeight();
Integer sourceID = graph.getSource(e).getID();
Integer targetID = graph.getDest(e).getID();
Integer inFlowToSource = inFlow.get(sourceID);
Integer outFlowOfTarget = outFlow.get(targetID);
if (inFlowToSource!= null && outFlowOfTarget!= null &&
supp > inFlowToSource*EXTREME_EDGE_FLOW_FACTOR && supp > outFlowOfTarget*EXTREME_EDGE_FLOW_FACTOR)
{
double newSupp = Math.max(inFlowToSource, outFlowOfTarget);
debugMes("the support of edge "+sourceID+"->"+targetID+" has changed from "+supp+" to "+newSupp,20);
e.setWeight(newSupp);
}
}
}
/**
* given the graph and the final paths, find x structures that belong to only two paths, which resolve this structure.
* @param graph
* @param comp
* @param finalPaths
* @return
*/
private static int countNumOfXstructuresResolved(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
Set<SeqVertex> comp, HashMap<List<Integer>,Pair<Integer>> finalPaths) {
int res = 0;
for (SeqVertex v : comp)
{
if (graph.inDegree(v)>1 && graph.outDegree(v)>1)
{
//this is an x-structure
int maxPaths = Math.max(graph.inDegree(v), graph.outDegree(v));
Integer bef,after;
int vid = v.getID();
HashMap<Pair<Integer>,Integer> befAndAfterNodes = new HashMap<Pair<Integer>, Integer>();
Pair<Integer> key;
for (List<Integer> path : finalPaths.keySet())
{
int index = path.indexOf(vid);
if (index!=-1 && index!=0 && index!=path.size()-1) // vid is not the first or the last
{
bef = path.get(index-1);
after = path.get(index+1);
key = new Pair<Integer>(bef,after);
if (!befAndAfterNodes.containsKey(key))
befAndAfterNodes.put(key,1);
else
befAndAfterNodes.put(key,befAndAfterNodes.get(key)+1);
}
}
String triplets = "";
for (Pair<Integer> befAndAfterNode : befAndAfterNodes.keySet()) {
Integer before1 = (Integer) befAndAfterNode.getFirst();
Integer after1 = (Integer) befAndAfterNode.getSecond();
triplets += "[" + before1 + "-" + vid + "-" + after1 + "=" + befAndAfterNodes.get(befAndAfterNode) + "] ";
}
if (befAndAfterNodes.keySet().size()==maxPaths)
{
debugMes("vertex "+v.getID()+" IS resolved in an X-structure: " + triplets, 10);
res++;
}
else {
debugMes("vertex " + v.getID() + " is NOT resolved in an X-structure: " + triplets, 10);
}
}
}
return res;
}
private static HashMap<Integer,Boolean> getXstructuresResolvedByTriplets (
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
Set<SeqVertex> comp,
HashMap<Integer, List<List<Integer>>> tripletMapper) {
HashMap<Integer,Boolean> xStructuresResolvedByTriplets = new HashMap<Integer,Boolean>();
for (SeqVertex v : comp)
{
if (graph.inDegree(v)>1 && graph.outDegree(v)>1)
{
//this is an x-structure
Integer vertex_id = v.getID();
if (tripletMapper.containsKey(vertex_id)) {
debugMes("vertex " + vertex_id + " IS resolved in an X-structure: " + tripletMapper.get(vertex_id), 10);
xStructuresResolvedByTriplets.put(vertex_id, true);
}
else {
debugMes("vertex " + v.getID() + " is UN-resolved X-structure. ", 10);
xStructuresResolvedByTriplets.put(vertex_id, false);
}
}
}
return (xStructuresResolvedByTriplets);
}
public static class FinalPaths implements Comparable<FinalPaths> {
List<Integer> path;
String sequence;
public FinalPaths (List<Integer> p, String s) {
path = p;
sequence = s;
}
public int compareTo(FinalPaths f) {
if (this.sequence.length() > f.sequence.length()) {
return(-1);
}
else if (this.sequence.length() < f.sequence.length()) {
return(1);
}
else {
return(0);
}
}
}
/**
* Print all final paths
* @param finalPaths
* @param graph
* @param compID
* @param p
* @param name
* @param totalNumReads
* @param xStructuresResolvedByTriplets
* @param separate_gene_ids
* @throws FileNotFoundException
*/
private static void printFinalPaths(
HashMap<List<Integer>,Pair<Integer>> finalPaths,
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
PrintStream p,
String name,
long totalNumReads,
HashMap<List<Integer>,ArrayList<String>> final_paths_to_long_read_content,
HashMap<List<Integer>, Integer> separate_gene_ids)
throws FileNotFoundException
{
debugMes("Final Paths: " + finalPaths.size(), 10);
DecimalFormat df = new DecimalFormat("
name = name.replace(".graph", "");
HashMap<Integer,Integer> local_gene_id_mapping = new HashMap<Integer,Integer>();
HashMap<Integer,Integer> local_seq_counter = new HashMap<Integer,Integer>();
for (List<Integer> path : finalPaths.keySet()) {
String seq = getPathSeq(graph,path);
//print this path
int gene_id;
int iso_id;
if (separate_gene_ids.containsKey(path)) {
int local_gene_id = separate_gene_ids.get(path);
if (local_gene_id_mapping.containsKey(local_gene_id)) {
// seen this gene before
gene_id = local_gene_id_mapping.get(local_gene_id);
iso_id = local_seq_counter.get(gene_id) + 1;
local_seq_counter.put(gene_id, iso_id);
}
else {
gene_id = ++GENE_COUNTER;
local_gene_id_mapping.put(local_gene_id, gene_id);
iso_id = 1;
// storing isoform id for the first time.
local_seq_counter.put(gene_id, iso_id);
}
}
else {
// no gene clusters, each is unique:
gene_id = ++GENE_COUNTER;
iso_id = 1;
}
String seqName = name + "_g" + gene_id + "_i" + iso_id;
String pathName = get_pathName_string(path, graph);
seqName += " len="+seq.length() + " path="+ pathName;
// Report the long read content information.
if (final_paths_to_long_read_content.containsKey(path)) {
// then got list of long read names assigned to this final path
HashMap<PairPath, ArrayList<String>> long_read_paths_to_name_list = new HashMap<PairPath, ArrayList<String>>();
for (String long_read_name : final_paths_to_long_read_content.get(path)) {
PairPath pp = LONG_READ_NAME_TO_PPath.get(long_read_name);
if (!long_read_paths_to_name_list.containsKey(pp)) {
ArrayList<String> a = new ArrayList<String>();
a.add(long_read_name);
long_read_paths_to_name_list.put(pp, a);
}
else {
long_read_paths_to_name_list.get(pp).add(long_read_name);
}
}
seqName = seqName + " long_read_mappings: " + long_read_paths_to_name_list;
}
debugMes("\nFinal path reported: " + seqName, 10);
p.print(getSeqFasta(seq, seqName));
}
}
/*
private static HashMap<List<Integer>, Boolean> remove_transcripts_with_insufficent_read_support(
Set<List<Integer>> pathSet, HashMap<List<Integer>,Integer> seqLengthMap,
HashMap<List<Integer>, Boolean> remove_low_expr_isoforms) {
for (List<Integer> path : pathSet) {
float path_frag_count = pc.get_transcript_to_sum_frag_counts(path);
debugMes("PATH_TO_FRAG_COUNT: " + path_frag_count + ", FRAGS_PER_TRANS_LEN: " + path_frag_count/seqLengthMap.get(path)*100, 10);
}
return remove_low_expr_isoforms;
}
*/
private static Object seqLengthMap() {
// TODO Auto-generated method stub
return null;
}
/*
private static HashMap<List<Integer>, Float> get_pct_expr_isoforms_of_genes(
HashMap<List<Integer>, Integer> separate_gene_ids) {
// This relies on having run the EM to estimate relative expression.
if (pc == null) {
throw new RuntimeException("removal of low isoforms requires EM was run, but pc is null");
}
HashMap<List<Integer>, Float> pct_expr_isoform = new HashMap<List<Integer>,Float>();
HashMap<Integer,Float> max_gene_expr_per_gene = new HashMap<Integer,Float>();
for (List<Integer> transcript : separate_gene_ids.keySet()) {
Integer gene_id = separate_gene_ids.get(transcript);
Float expr = pc.get_expr(transcript);
if (max_gene_expr_per_gene.containsKey(gene_id)) {
if (max_gene_expr_per_gene.get(gene_id) < expr)
max_gene_expr_per_gene.put(gene_id, expr);
}
else
max_gene_expr_per_gene.put(gene_id, expr);
}
for (List<Integer> transcript : separate_gene_ids.keySet()) {
Integer gene_id = separate_gene_ids.get(transcript);
Float expr = pc.get_expr(transcript);
Float max_gene_expr = max_gene_expr_per_gene.get(gene_id);
float pct_isoform_expr = expr/max_gene_expr * 100;
debugMes("Relative expression: " + pct_isoform_expr + ", gene: " + gene_id + ", path: " + transcript, 10);
pct_expr_isoform.put(transcript, pct_isoform_expr);
}
return(pct_expr_isoform);
}
*/
private static HashMap<List<Integer>, Integer> group_paths_into_genes(
HashMap<List<Integer>, Pair<Integer>> finalPaths_all,
DirectedSparseGraph<SeqVertex, SimpleEdge> graph) {
debugMes("Grouping paths into genes", 10);
HashMap<Integer,Integer> node_length_map = new HashMap<Integer,Integer>(); // track node lengths for seq pair comparisons
HashMap<List<Integer>, Integer> seqLengthMap = new HashMap<List<Integer>,Integer>();
Vector<FinalPaths> path_vec = new Vector<FinalPaths>();
// get node lengths
for (List<Integer> path : finalPaths_all.keySet())
{
String seq = getPathSeq(graph,path);
seqLengthMap.put(path, seq.length());
//System.out.println(seq);
FinalPaths f = new FinalPaths(path, seq);
path_vec.add(f);
for (Integer nodeID : path) {
if (nodeID > 0) {
int node_length = getSeqVertex(graph, nodeID).getName().length();
node_length_map.put(nodeID, node_length);
}
else
node_length_map.put(nodeID, 0);
}
}
UndirectedSparseGraph<List<Integer>, String> sparseGraph = new UndirectedSparseGraph<List<Integer>, String>();
List<List<Integer>> paths = new ArrayList<List<Integer>>(finalPaths_all.keySet());
for (int i = 0; i <= paths.size()-2; i++) {
List<Integer> path_i = paths.get(i);
if (! sparseGraph.containsVertex(path_i))
sparseGraph.addVertex(path_i);
int path_i_len = 0;
for (Integer node : path_i) {
path_i_len += node_length_map.get(node);
}
for (int j = i + 1; j <= paths.size()-1; j++) {
List<Integer> path_j = paths.get(j);
if (! sparseGraph.containsVertex(path_j))
sparseGraph.addVertex(path_j);
int path_j_len = 0;
int nodes_same_length = 0;
for (Integer node : path_j) {
path_j_len += node_length_map.get(node);
if (path_i.contains(node))
nodes_same_length += node_length_map.get(node);
}
float iso_pct_overlap = Math.max((float)nodes_same_length / path_i_len * 100,
(float)nodes_same_length / path_j_len * 100);
debugMes("Isoform_overlap: Path_i:" + path_i + ", Path_j: " + path_j + ", overlap = " + iso_pct_overlap + "%", 10);
if ( iso_pct_overlap >= MIN_ISOFORM_PCT_LEN_OVERLAP) {
sparseGraph.addEdge("e_" + i + "_" + j, path_i, path_j);
debugMes("IsoformEdge linking: " + path_i + " to " + path_j, 10);
}
}
}
HashMap<List<Integer>, Integer> gene_grouping = new HashMap<List<Integer>, Integer>();
WeakComponentClusterer<List<Integer>, String> compClus = new WeakComponentClusterer<List<Integer>,String>();
Set<Set<List<Integer>>> comps = compClus.transform(sparseGraph);
debugMes("IsoformClustering, number of clusters = " + comps.size(), 10);
// add the singletons back in
HashMap<List<Integer>,Boolean> inCluster = new HashMap<List<Integer>,Boolean>();
if (comps.size() == 0) {
// all related
for (List<Integer> path : finalPaths_all.keySet()) {
gene_grouping.put(path, 1);
}
return(gene_grouping);
}
int cluster_count = 0;
for (Set<List<Integer>> cluster : comps) {
cluster_count++;
for (List<Integer> path : cluster) {
gene_grouping.put(path, cluster_count);
debugMes("GeneCluster[" + cluster_count + "] contains: " + path, 10);
inCluster.put(path, true);
}
}
for (List<Integer> path : paths) {
if (! inCluster.containsKey(path)) {
cluster_count++;
gene_grouping.put(path, cluster_count);
}
}
return(gene_grouping);
}
/**
* given a path in the graph, return its sequence
* @param graph
* @param path
* @return
*/
private static String getPathSeq(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph, List<Integer> path) {
String seq = "";
Boolean first_node = true;
for (Integer nodeID : path) {
if (nodeID>=0) {
String node_seq = getSeqVertex(graph, nodeID).getName();
if (! first_node) {
node_seq = node_seq.substring(SeqVertex.get_kmer_length() -1);
}
first_node = false;
seq += node_seq;
//System.out.println("Node: " + nodeID + " has seq: " + node_seq);
}
}
return seq;
}
/**
* For each path of a read pair, ask how many reads support it.
* @param graph
* @param readNameHash
* @param dijkstraDis
* @return
*/
private static HashMap<Integer, HashMap<PairPath, Integer>> getSuffStats_wPairs(
DirectedSparseGraph<SeqVertex,SimpleEdge> graph,
HashMap<String, List<Read>> readNameHash,
DijkstraDistance<SeqVertex,SimpleEdge> dijkstraDis) {
HashMap<Integer,HashMap<PairPath,Integer>> combinedReadHash = new HashMap<Integer,HashMap<PairPath,Integer>> ();
Set<String> usedReads = new HashSet<String>();
List<Read> curList = null;
int numReadsUsed = 0;
int numSingletons = 0;
int numPairs = 0;
int numPairsDiscarded = 0;
for (String name : readNameHash.keySet())
{
if (usedReads.contains(name))
continue; // ignoring reduncancy in the read set?
curList = readNameHash.get(name);
if (curList.size()==1)
{//single read
// ** Single Read Processing **
Read r = curList.get(0);
PairPath path = new PairPath(r.getPathIDs());
Integer firstV = path.getFirstID();
if (!combinedReadHash.containsKey(firstV))
combinedReadHash.put(firstV, new HashMap<PairPath,Integer>()); // init
if (!combinedReadHash.get(firstV).containsKey(path))
combinedReadHash.get(firstV).put(path, 0);
Integer counts = combinedReadHash.get(firstV).get(path);
combinedReadHash.get(firstV).put(path,++counts); // count read having this path
numReadsUsed++;
debugMes("we have "+combinedReadHash.get(firstV).get(path)+" reads supporting the path: "+path,19);
numSingletons++;
// examine for long read.
if (name.startsWith("LR$|")) { // r.getSeq().length() >= MIN_LONG_READ_LENGTH) {
LONG_READ_NAME_TO_PPath.put(name, path);
debugMes("LONG_READ_IDENTIFIED: " + name + " , path: " + path, 12);
if (! LONG_READ_PATH_MAP.containsKey(path)) {
ArrayList<String> nameList = new ArrayList<String>();
nameList.add(name);
LONG_READ_PATH_MAP.put(path, nameList);
}
else {
ArrayList<String> nameList = (ArrayList<String>) LONG_READ_PATH_MAP.get(path);
nameList.add(name);
}
}
}else { // paired read
// ** Paired Read Processing **
Read r1 = curList.get(0);
List<Integer> path1 = r1.getPathIDs();
Read r2 = curList.get(1);
List<Integer> path2 = r2.getPathIDs();
PairPath combinedPath = new PairPath(path1, path2);
/* move this to after repeat unrolling.
* for now, just keep it simple, store paths.
*
PairPath combinedPath = combinePaths(graph,path1,path2,dijkstraDis);
if (combinedPath.isEmpty())
{
debugMes("the paths "+path1+" and "+path2+" couldn't be combined",15);
numPairsDiscarded++;
continue;
}
*/
Integer firstV = combinedPath.getFirstID();
if (!combinedReadHash.containsKey(firstV))
combinedReadHash.put(firstV, new HashMap<PairPath,Integer>()); //init
if (!combinedReadHash.get(firstV).containsKey(combinedPath))
combinedReadHash.get(firstV).put(combinedPath, 0); //add pairpath
Integer counts = combinedReadHash.get(firstV).get(combinedPath);
combinedReadHash.get(firstV).put(combinedPath,++counts); // increment counts for pairpath
debugMes("we have "+combinedReadHash.get(firstV).get(combinedPath)+" reads supporting the path: "+combinedPath,18);
numReadsUsed++;
numPairs++;
}
usedReads.add(name);
}
debugMes("number of reads used = "+numReadsUsed,15);
debugMes("## Read PathPair results: " + numSingletons + " singletons, "
+ " num pairs: " + numPairs + ", num pairs discarded: " + numPairsDiscarded, 10);
return combinedReadHash;
}
/**
* Given the graph, and two paths of the two reads, combine them into a single path
* @param graph
* @param path1
* @param path2
* @param dijkstraDis
* @return
*/
private static PairPath combinePaths(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
List<Integer> path1, List<Integer> path2, DijkstraDistance<SeqVertex,SimpleEdge> dijkstraDis) {
debugMes("combinePaths: " + path1 + ", " + path2, 15);
SeqVertex firstV1 = getSeqVertex(graph,path1.get(0));
SeqVertex lastV1 = getSeqVertex(graph,path1.get(path1.size()-1));
SeqVertex firstV2 = getSeqVertex(graph,path2.get(0));
SeqVertex lastV2 = getSeqVertex(graph,path2.get(path2.size()-1));
PairPath path = new PairPath();
if (path1.containsAll(path2))
path.setPath1(path1);
else if (path2.containsAll(path1))
path.setPath2(path2); // note, gets moved to path1 later.
//path1 --> path2
else if (SeqVertex.isAncestral(lastV1, firstV2,dijkstraDis)>0
&&
! lastV1.equals(firstV2)
)
{
path.setPath1(path1);
path.setPath2(path2);
}
//path2 --> path1
else if (SeqVertex.isAncestral(lastV2, firstV1,dijkstraDis)>0
&&
! lastV2.equals(firstV1)
)
{
path.setPath1(path2);
path.setPath2(path1);
}
else if (SeqVertex.isAncestral(firstV2,firstV1,dijkstraDis)==0 &&
SeqVertex.isAncestral(lastV2,lastV1,dijkstraDis)==0)
{
//there is no consistent path between read1 and read2
}
// Overlapping paths
//path1(partial) -> path2
else if (SeqVertex.isAncestral(firstV1,firstV2,dijkstraDis)>0 &&
path1.indexOf(firstV2.getID())>=0)
{
int i = path1.indexOf(firstV2.getID());
path.setPath1(path1.subList(0, i));
path.addToPath1(path2);
}
//path2(partial) -> path1
else if (SeqVertex.isAncestral(firstV2,firstV1,dijkstraDis)>0 &&
path2.indexOf(firstV1.getID())>=0)
{
int i = path2.indexOf(firstV1.getID());
path.setPath1(path2.subList(0, i));
path.addToPath1(path1);
}
if (path.getPath1().isEmpty() && !path.getPath2().isEmpty())
path.movePath2To1();
// Try to impute connecting paths from path 1 to path 2
if ((! path.getPath1().isEmpty()) && (! path.getPath2().isEmpty())) {
SeqVertex fV1 = getSeqVertex(graph,path.getPath1().get(0));
SeqVertex lV1 = getSeqVertex(graph,path.getPath1().get(path.getPath1().size()-1));
SeqVertex fV2 = getSeqVertex(graph,path.getPath2().get(0));
SeqVertex lV2 = getSeqVertex(graph,path.getPath2().get(path.getPath2().size()-1));
debugMes("Examining imputation of path connecting pairpath: " + path + " nodes "+ lV1.getID() + " to " + fV2.getID(), 20);
if (SeqVertex.isAncestral(lV1, fV2, dijkstraDis) > 0) {
// note could return false if have a sequencing gap
boolean canExtend = true;
SeqVertex v = lV1;
// walk towards fV2
List<Integer> intervening_vertex_ids = new ArrayList<Integer>();
boolean impute = true;
while (canExtend) {
SeqVertex next = null;
int count_connectable = 0;
for (SeqVertex successor : graph.getSuccessors(v)) {
if (SeqVertex.isAncestral(successor, fV2, dijkstraDis) > 0) {
count_connectable++;
next = successor;
}
}
if (next != null && count_connectable == 1) {
if (fV2.equals(next)) {
// reached fV2
break;
}
else {
intervening_vertex_ids.add(next.getID());
}
v = next;
}
else {
// either no connection or too many potential connections
canExtend = false;
impute = false;
}
}
if (impute) {
debugMes("Could Impute path connecting" + path + " containing intervening nodes: " + intervening_vertex_ids, 16);
if (! intervening_vertex_ids.isEmpty()) {
path.getPath1().addAll(intervening_vertex_ids);
}
path.getPath1().addAll(path.getPath2());
path.getPath2().clear();
}
else {
debugMes("Could not impute intervening nodes", 20);
}
}
}
return path;
}
/**
* Count how many vertices we have with in degree >1 & out degree >1
* @param graph
* @return
*/
private static int countNumOfXstructures(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph) {
int res = 0;
for (SeqVertex v : graph.getVertices())
{
if (graph.inDegree(v)>1 && graph.outDegree(v)>1)
res++;
}
return res;
}
private static void printPairPaths(HashMap<Integer, HashMap<PairPath, Integer>> combinedReadHash){
printPairPaths(combinedReadHash, "PAIRPATH");
}
private static void printPairPaths(HashMap<Integer, HashMap<PairPath, Integer>> combinedReadHash,
String out_token){
for(Map.Entry<Integer, HashMap<PairPath, Integer>> entry : combinedReadHash.entrySet())
{
debugMes("Start Vertex:" + entry.getKey(), 10);
for(Map.Entry<PairPath, Integer> paths : entry.getValue().entrySet())
{
//System.out.println(entry.getValue());
debugMes(out_token + ": " + paths, 10);
}
}
}
/**
* Given the graph and the hash with all reads, find all probable paths from S to T.
* @param graph
* @param comp
* @param combinedReadHash
* @param dijkstraDis
* @param dijkstraDisWoVer
*/
private static Pair<HashMap<List<Integer>, Pair<Integer>>> getAllProbablePaths (
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
Set<SeqVertex> comp,
HashMap<Integer,HashMap<PairPath,Integer>> combinedReadHash,
DijkstraDistance<SeqVertex,SimpleEdge> dijkstraDis,
DijkstraDistanceWoVer<SeqVertex,SimpleEdge> dijkstraDisWoVer,
HashMap<Integer, List<List<Integer>>> tripletMapper,
HashMap<Integer, List<List<Integer>>> extendedTripletMapper,
HashMap<Integer,Boolean> xStructuresResolvedByTriplets
) {
debugMes("\nSECTION\n
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15) {
debugMes("PairPaths to assemble:", 15);
printPairPaths(combinedReadHash, "PairPaths@BflyStart");
}
// paths that are constructed by tracing paths of reads through the graph
HashMap<SeqVertex,List<List<Integer>>> Paths = new HashMap<SeqVertex,List<List<Integer>>>();
// this holds reads of path until V + reads starting at V
HashMap<List<Integer>,HashMap<PairPath,Integer>> PathReads = new HashMap<List<Integer>,HashMap<PairPath,Integer>>();
HashMap<List<Integer>, HashSet<PairPath>> PathContainedReads = new HashMap<List<Integer>,HashSet<PairPath>>();
HashMap<List<Integer>,Boolean> Extensions = new HashMap<List<Integer>,Boolean>();
// final paths to be captured and reported.
HashMap<List<Integer>,Pair<Integer>> FinalPaths_diff = new HashMap<List<Integer>,Pair<Integer>>();
HashMap<List<Integer>,Pair<Integer>> FinalPaths_all = new HashMap<List<Integer>,Pair<Integer>>();
/*
ROOT.setDFS_FinishingTime(Integer.MAX_VALUE);
T_VERTEX.setDFS_FinishingTime(-1);
*/
ROOT.setDepth(-1);
T_VERTEX.setDepth(Integer.MAX_VALUE);
/*
SeqVertexFinishTimeComparator finishingTimeComparator = new SeqVertexFinishTimeComparator();
PriorityQueue<SeqVertex> C = new PriorityQueue<SeqVertex>(comp.size(),finishingTimeComparator);
*/
PriorityQueue<SeqVertex> BflyQueue = new PriorityQueue<SeqVertex>(comp.size(), new SeqVertexNodeDepthComparator());
// start the path listing out with a path containing the ROOT node only.
BflyQueue.add(ROOT);
List<Integer> tmpL = new ArrayList<Integer>();
tmpL.add(ROOT.getID());
ArrayList<List<Integer>> tmpPathList = new ArrayList<List<Integer>>();
tmpPathList.add(tmpL);
Paths.put(ROOT, tmpPathList);
SeqVertex v;
int total_num_nodes = comp.size();
// beginning path constructions
HashMap<Integer,Boolean> node_ID_visited = new HashMap<Integer,Boolean>();
int num_nodes = 0;
String Crep;
while (!BflyQueue.isEmpty())
{
/*
if (BFLY_GLOBALS.VERBOSE_LEVEL>=20)
{
Crep = "[";
for (SeqVertex vp : C)
Crep = Crep + "" +vp.getID()+":"+vp.getDFS_FinishingTime()+",";
Crep += "]";
debugMes("C = "+Crep,10);
}
*/
debugMes("QUEUE IS: " + BflyQueue, 12);
v = BflyQueue.poll();
if (v.getID() > 0) {
// if it has successors that haven't been visited yet, delay targeting it.
List<SeqVertex> delay_tackle_vertices = new ArrayList<SeqVertex>();
while ( (! BflyQueue.isEmpty()) && (! parents_all_visited(v, node_ID_visited, graph) ) ) {
debugMes("* delaying tackling vertex: " + v.getID() + " since a parent hasn't been visited yet.", 12);
delay_tackle_vertices.add(v);
v = BflyQueue.poll();
}
if (BflyQueue.isEmpty() && ! parents_all_visited(v, node_ID_visited, graph)) {
throw new RuntimeException("ERROR, queue ran out of nodes and current node has unvisited parents.");
}
if (! delay_tackle_vertices.isEmpty()) {
// add them back to the queue
BflyQueue.addAll(delay_tackle_vertices);
}
}
// track the nodes we visit, avoid looping by extending from a node encountered earlier. Loops should be handled long before here.
if (node_ID_visited.containsKey(v.getID())) {
debugMes("** already visited node in queue: " + v.getID(), 5);
continue;
}
else {
node_ID_visited.put(v.getID(), true);
}
debugMes("\n\n
num_nodes++;
float pct_done = (float) num_nodes / total_num_nodes * 100;
debugMes("\tbutterfly pct done: " + num_nodes + " / " + total_num_nodes + " = " + pct_done + "% pct done.", 5);
// get read paths that start at vertex V
HashMap<PairPath,Integer> readsStartingAtV = combinedReadHash.get(v.getID());
if (readsStartingAtV == null) {
debugMes("ReadsStartingAtV_START_BFLY" + v.getID() + " EMPTY", 15);
}
else {
for (PairPath read : readsStartingAtV.keySet()) {
debugMes("ReadsStartingAtV_START_BFLY, Node: " + v.getID() + " read: " + read, 15);
}
}
// prep data structures required.
// go over all paths of P[v], add all reads that start at v
debugMes("Exploring extension of: " + Paths.get(v).size() + " paths that end at vertex: " + v.getID(), 5);
//describe paths:
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15) {
debugMes("\n== Current Paths Constructed Up To Vertex: " + v.getID() + " :", 15);
for (List<Integer> path: Paths.get(v)) {
debugMes("PathPartialReconstruction@[" + v.getID() + "] : " + path, 15);
}
}
for (List<Integer> path : Paths.get(v))
{
if (!PathReads.containsKey(path))
PathReads.put(path, new HashMap<PairPath,Integer>()); // init
if (! PathContainedReads.containsKey(path))
PathContainedReads.put(path, new HashSet<PairPath>());
if (readsStartingAtV!=null && !readsStartingAtV.isEmpty())
{
debugMes("\nAdding the reads " +readsStartingAtV +" to the path "+ path, 17);
PathReads.get(path).putAll(readsStartingAtV);
/*
// verify:
for (PairPath pp : readsStartingAtV.keySet()) {
debugMes("VERIFYING: " + v.getID() + " ReadStartingAtV: " + pp + " = " + PathReads.get(path).get(pp), 10);
}
*/
// path that ends at V is associated with all reads that start at V
}
//keep track of all extensions
Extensions.put(path, false);
/*
for (PairPath read : PathReads.get(path).keySet()) {
debugMes("PATH: " + path + " initially stocked with read: " + read, 10);
}
*/
}
// Examine each path, try to extend by successor u
// go over all descendants of v
for (SeqVertex u : graph.getSuccessors(v))
{
debugMes("\n\n
+ "
+"\n
if (! (comp.contains(u) || u.equals(T_VERTEX))) {
debugMes("component either lacks: " + u.getID() + " or at sink", 12);
continue; // only examine successor vertices that are contained within this subcomponent
}
int path_counter = 0;
boolean vExtendedToU = false;
List<List<Integer>> paths_ending_at_v = new ArrayList<List<Integer>>(Paths.get(v));
debugMes("Count of paths ending at v: " + v.getID() + " = " + paths_ending_at_v.size(), 12);
// sort paths by pair-path support descendingly
PathReadSupportComparator local_pc = new PathReadSupportComparator(PathReads);
Collections.sort(paths_ending_at_v, local_pc);
Collections.reverse(paths_ending_at_v); // now descending according to read support.
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15) {
for (List<Integer> path : paths_ending_at_v)
{
debugMes("path_ending_at_v: " + path, 15);
}
}
// Examine each of the growing paths that end at vertex V for extension.
// extend V by U if:
// a. in TRIPLET LOCK mode and there exists a read-supported structure w-v-u
// b. extended triplet exists and the growing path is consistent with the
// complex read path consistent with [a.b.c]..w-v-u
// c. haven't reached the limit of number of paths artificially capped //FIXME: re-examine this.
// d. ALL POSSIBLE PATHS parameter set
// e. path has enough read support: look back the number of nodes before U in this growing path
// that yield at least the required path support sequence distance, and ensure read paths
// demonstrate compatibility/containment
for (List<Integer> path : paths_ending_at_v)
{
// remember, only looking at extensions that contain 'u' now.
debugMes("\n\n# [PathCounter(" + u.getID() + ")=" + path_counter + " Examining potential extension of path ending at node V: " + v.getID()
+ " by successor: " + u.getID()
+ ", via path=" + path, 15);
Boolean path_wvu_acceptable = true; // by default
Boolean extended_triplet_path_compatible = false;
if (path.size() >= 3) {
Integer w = path.get(path.size()-2); // create triplet w-v-u
if (tripletMapper.containsKey(v.getID())
&& tripletMapper.get(v.getID()).size() > 1) // at least partially resolved structure via read path
{
List<Integer> triplet = new ArrayList();
triplet.add(w); // left
triplet.add(v.getID()); // central
triplet.add(u.getID()); // right
List<List<Integer>> triplet_list = tripletMapper.get(v.getID());
if (tripletSupported(triplet_list, triplet)){
// Hurray, got triplet support
debugMes("Triplet Path: " + triplet + " *IS* supported by reads.", 15);
path_wvu_acceptable = true;
// do extended triplet search.
// path must be compatible with at least one of the complex prefix paths
// ensuring compatible with the larger path context that may extend beyond a triplet
List<Integer> pathWu = new ArrayList<Integer>(); // pathWu = path with u
pathWu.addAll(path);
pathWu.add(u.getID());
// extended triplet search
for (List<Integer> prefix_path : extendedTripletMapper.get(u.getID())) {
PairPath ppath = new PairPath(prefix_path);
if (ppath.isCompatibleAndContainedBySinglePath(pathWu)) {
debugMes("EXTENDED_TRIPLET_SEARCH: " + ppath + " compared to " + pathWu + " True", 15);
extended_triplet_path_compatible = true;
break;
}
else {
debugMes("EXTENDED_TRIPLET_SEARCH: " + ppath + " compared to " + pathWu + " False", 15);
}
}
}
else {
// lock down node, don't allow alternative structures not supported by reads here.
debugMes("Triplet Path: " + triplet + " is *NOT* supported by reads.", 15);
path_wvu_acceptable = false;
}
}
else {
debugMes("TripletMapper doesnt contain node: " + v.getID(), 15);
// if node v is at center of X-structure and there are no valid triplets, disable extension
if (FRACTURE_UNRESOLVED_XSTRUCTURE && xStructuresResolvedByTriplets.containsKey(v.getID())) {
debugMes("Node " + v.getID() + " is at center of X structure and no triplet support detected. FractureUnresolvedX set, so Disabling extension.", 10);
path_wvu_acceptable = false;
}
}
}
else {
debugMes("path " + path + " is too short to check for triplet support.", 15);
}
HashMap<PairPath,Integer> readsOfPathUntilV = PathReads.get(path); //this holds reads of path until V + reads starting at V
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15) {
debugMes("ReadsOfPathUntilV: PATH: " + path, 15);
for (PairPath pp : readsOfPathUntilV.keySet())
debugMes("ReadsOfPathUntiV: READ: " + pp, 15);
}
if (
path_wvu_acceptable // path_wvu only matters under triplet locking mode.
&&
// place restriction on the number of paths pursued here.
// if extended triplet compatible, be sure to explore it... dont want to lose a good path here.
(extended_triplet_path_compatible || path_counter <= MAX_NUM_PATHS_PER_NODE_EXTEND)
&&
(
ALL_POSSIBLE_PATHS
||
pathHasEnoughReadSupport(readsOfPathUntilV,path,u,graph,dijkstraDisWoVer)
||
u.getID() < 0 // a sink node, if path made it this far, sink can be added.
)
)
{
path_counter++;
// add [path,u] to paths of u. Each vertex contains all the paths that led up to it.
if (!Paths.containsKey(u))
Paths.put(u, new ArrayList<List<Integer>>());
List<Integer> pathWu = new ArrayList<Integer>(); // pathWu = path with u
pathWu.addAll(path);
pathWu.add(u.getID());
if (!Paths.get(u).contains(pathWu)){
debugMes("\nSuccessful extension of " + u.getID() + " to generate path " +pathWu, 15);
Paths.get(u).add(pathWu);
}
//update reads of [path,u] : includes all reads that are consistent with path Wu up to and including u
updateReadsOfPath(PathReads,PathContainedReads, pathWu,readsOfPathUntilV,u.getID(),graph,dijkstraDis);
//update extension
Extensions.put(path, true);
vExtendedToU = true;
}
else {
debugMes("No extension of path " + path + " by " + u, 15);
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15) {
boolean pathHasEnoughSupport = pathHasEnoughReadSupport(readsOfPathUntilV,path,u,graph,dijkstraDisWoVer);
debugMes("\tpath_counter = " + path_counter + ", path_wvu_acceptable=" + path_wvu_acceptable
+ " pathHasEnoughReadSupport=" + pathHasEnoughSupport, 15);
}
}
} // end of extending paths that end at V by U
if (!BflyQueue.contains(u))
{
debugMes(u.getID()+" was added to the queue",17);
BflyQueue.add(u);
}
//if v didn't extend to u, and we have an edge there, add (v,u) as a new path
if ( (!vExtendedToU) )
{
debugMes("the edge (v-u) was not used in any extension: "+v.getID()+"->"+u.getID(),15);
if (!Paths.containsKey(u))
Paths.put(u, new ArrayList<List<Integer>>());
List<Integer> vuPath = new ArrayList<Integer>();
vuPath.add(v.getID());
vuPath.add(u.getID());
Paths.get(u).add(vuPath);
//add the reads
if (!PathReads.containsKey(vuPath))
PathReads.put(vuPath, new HashMap<PairPath,Integer>());
if (readsStartingAtV!=null && !readsStartingAtV.isEmpty())
{
debugMes("adding the reads " +readsStartingAtV +" to the path "+ vuPath, 17);
PathReads.get(vuPath).putAll(readsStartingAtV);
updateReadsOfPath(PathReads, PathContainedReads, vuPath,readsStartingAtV,u.getID(),graph,dijkstraDis);
}
}
} // end of exploration of successors U of V
//report the paths that were not extended AND remove them from Paths
List<List<Integer>> removePaths = new ArrayList<List<Integer>>();
for (List<Integer> path : Paths.get(v))
{
SeqVertex lastV = getSeqVertex(graph, path.get(path.size()-1));
if (!lastV.equals(T_VERTEX) && Extensions.get(path)!=null && !Extensions.get(path))
{
if (getSeqPathLength(graph,path)>MIN_OUTPUT_SEQ)
{
FinalPaths_all.put(path,new Pair<Integer>(getSuppCalculation(PathReads.get(path)),0));
debugMes("the unextended path: "+path+" was added to the final paths, with "+getSuppCalculation(PathReads.get(path)) +" support",15);
}
removePaths.add(path);
}
}
for (List<Integer> path : removePaths)
{
debugMes("path "+ path +" wasnt extended and is captured accordingly.",15);
Paths.get(v).remove(path);
Extensions.remove(path);
}
}
// end of path constructions
for (List<Integer> path : Paths.get(T_VERTEX))
{
int pathSeqLen = getSeqPathLength(graph,path);
if (pathSeqLen>MIN_OUTPUT_SEQ)
{
// adding to path collection
FinalPaths_all.put(path,new Pair<Integer>(getSuppCalculation(PathReads.get(path)),0));
if (path.get(0).intValue() == ROOT.getID())
debugMes("the finished path: "+ path+" was added to the final paths, with "+getSuppCalculation(PathReads.get(path))+" support",15);
else
debugMes("the finished (from middle unextended) path: "+ path+" was added to the final paths, with "+getSuppCalculation(PathReads.get(path)) +" support",15);
}
else {
debugMes("sequence for path: " + path + " is too short: " + pathSeqLen, 15);
}
}
if (FinalPaths_all.size() > 1)
FinalPaths_all = remove_identical_subseqs(FinalPaths_all, graph, PathReads);
return new Pair<HashMap<List<Integer>, Pair<Integer>>>(FinalPaths_diff,FinalPaths_all);
}
private static boolean parents_all_visited(SeqVertex v,
HashMap<Integer, Boolean> node_ID_visited,
DirectedSparseGraph<SeqVertex, SimpleEdge> graph) {
for (SeqVertex pred : graph.getPredecessors(v)) {
if (pred.getID() > 0 && ! node_ID_visited.containsKey(pred.getID())) {
return(false);
}
}
return(true);
}
private static List<List<Integer>> remove_lesser_supported_paths_EM(
List<List<Integer>> all_paths,
HashMap<List<Integer>, HashMap<PairPath, Integer>> pathReads,
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
PathExpressionComparator pc,
HashMap<List<Integer>, Integer> separate_gene_ids) {
// determine max expr per gene
HashMap<Integer,Float> gene_to_max_expr = new HashMap<Integer,Float>();
for (List<Integer> path : all_paths) {
if (separate_gene_ids.containsKey(path)) {
Integer gene_id = separate_gene_ids.get(path);
float expr = pc.get_expr(path);
if (gene_to_max_expr.containsKey(gene_id)) {
if (gene_to_max_expr.get(gene_id) < expr) {
gene_to_max_expr.put(gene_id, expr);
}
}
else {
gene_to_max_expr.put(gene_id, expr);
}
}
}
List<List<Integer>> paths_to_keep = new ArrayList<List<Integer>>();
// retain only those genes that have at least x% expr of the dominant isoform.
for (List<Integer> path : all_paths) {
boolean keep = true;
if (separate_gene_ids.containsKey(path)) {
Integer gene_id = separate_gene_ids.get(path);
float expr = pc.get_expr(path);
float max_iso_expr = gene_to_max_expr.get(gene_id);
float pct_iso_expr = expr / max_iso_expr * 100;
if (pct_iso_expr >= MIN_RELATIVE_ISOFORM_EXPRESSION) {
// keep it.
debugMes("Keeping isoform: " + path + " as having " + pct_iso_expr + "% dom. iso expr for gene.", 15);
}
else {
keep = false;
debugMes("*Excluding isoform: " + path + " as having " + pct_iso_expr + "% dom. iso expr for gene.", 15);
}
}
if (keep) {
paths_to_keep.add(path);
}
}
return(paths_to_keep);
}
private static List<List<Integer>> remove_lesser_supported_paths(
List<List<Integer>> paths_ending_at_v,
HashMap<List<Integer>, HashMap<PairPath, Integer>> pathReads) {
// note, should be already sorted by priority from high-to-low
debugMes("\n## Removing lesser-supported paths that end at V", 10);
HashMap<List<Integer>, List<PairPath>> path_to_compatible_reads = new HashMap<List<Integer>, List<PairPath>>();
// compute compatibility and containments
for (List<Integer> path : paths_ending_at_v) {
path_to_compatible_reads.put(path, new ArrayList<PairPath>());
for (PairPath pp : pathReads.get(path).keySet()) {
if (pp.isCompatible(path)) {
path_to_compatible_reads.get(path).add(pp);
}
}
}
List<List<Integer>> best_paths = new ArrayList<List<Integer>>();
// examine them hierarchically and see if lower supported paths continue to add any unique read content
HashSet<PairPath> all_PairPaths = new HashSet<PairPath>();
for (List<Integer> path : paths_ending_at_v) {
List<PairPath> compat_reads = path_to_compatible_reads.get(path);
int count_unique = 0;
for (PairPath pp : compat_reads) {
if (! all_PairPaths.contains(pp)) {
count_unique++;
all_PairPaths.add(pp);
}
}
debugMes("Unique contribution of pairpath: " + count_unique + " of total: " + compat_reads.size() + " from path: " + path, 10);
if (count_unique > 0)
best_paths.add(path);
else
debugMes("\tdiscarding path due to lack of unique read (pairpath) content: " + path, 10);
}
return(best_paths);
}
private static HashMap<List<Integer>, Pair<Integer>> verifyTripletSupportAcrossPaths(
HashMap<List<Integer>, Pair<Integer>> finalPaths_all,
HashMap<Integer, List<List<Integer>>> tripletMapper,
DirectedSparseGraph<SeqVertex, SimpleEdge> graph) {
HashMap<List<Integer>, Pair<Integer>> triplet_reinforced_paths = new HashMap<List<Integer>, Pair<Integer>>();
for (List<Integer> path : finalPaths_all.keySet()) {
if (path.size() < 3) {
continue;
}
debugMes("Verifying triplets for path: " + path, 10);
// iterate through triplets
ArrayList<Pair<Integer>> triplet_reinforced_regions = new ArrayList<Pair<Integer>>();
int begin = 0;
for (int i = 1; i < path.size()-1; i++) {
Integer central_id = path.get(i);
Integer left_id = path.get(i-1);
Integer right_id = path.get(i+1);
List<Integer> adjacency_path = new ArrayList<Integer>();
adjacency_path.add(left_id);
adjacency_path.add(central_id);
adjacency_path.add(right_id);
if (tripletMapper.containsKey(central_id)) {
debugMes("triplet adjacency_path of node: " + central_id + " => " + adjacency_path + "OK", 10);
}
else {
debugMes("triplet adjacency_path of node: " + central_id + " => " + adjacency_path + "*** MISSING ***", 10);
triplet_reinforced_regions.add(new Pair<Integer>(begin, i));
begin = i;
}
}
triplet_reinforced_regions.add(new Pair<Integer>(begin, path.size()-1));
for (Pair<Integer> subpath_range : triplet_reinforced_regions) {
Integer start_node_index = subpath_range.getFirst();
Integer stop_node_index = subpath_range.getSecond();
debugMes("Processing Triplet-Reinforced region: " + path.subList(start_node_index, stop_node_index + 1), 10);
// see if the start node looks like a hub
Integer start_node_id = path.get(start_node_index);
SeqVertex start_node = getSeqVertex(graph, path.get(start_node_index));
if (start_node_id >= 0 && graph.getSuccessorCount(start_node) > 1) {
start_node_index++;
}
Integer stop_node_id = path.get(stop_node_index);
SeqVertex stop_node = getSeqVertex(graph, stop_node_id);
if (stop_node_id >= 0 && graph.getPredecessorCount(stop_node) > 1) {
stop_node_index
}
if (start_node_index <= stop_node_index) {
List<Integer> refined_triplet_path = path.subList(start_node_index, stop_node_index+1);
debugMes("Refined triplet-reinforced path= " + refined_triplet_path, 10);
debugMes("Start node: " + refined_triplet_path.get(0) + " has successor count: " + graph.getSuccessorCount(getSeqVertex(graph, refined_triplet_path.get(0))), 10);
debugMes("End node: " + refined_triplet_path.get(refined_triplet_path.size()-1) + " has predecessor count: " + graph.getPredecessorCount(getSeqVertex(graph, refined_triplet_path.get(refined_triplet_path.size()-1))), 10);
refined_triplet_path = ensure_path_has_sinks(refined_triplet_path);
triplet_reinforced_paths.put(refined_triplet_path, new Pair<Integer>(1,1));
}
}
}
return(triplet_reinforced_paths);
}
/**
* given these paths, and reads, re-calc the FPKM of each path
* @param FinalPaths
* @param PathReads
*/
private static void illustrateFinalPaths(
HashMap<List<Integer>, Pair<Integer>> FinalPaths,
HashMap<List<Integer>, HashMap<PairPath, Integer>> PathReads) {
for (List<Integer> path : FinalPaths.keySet())
{
debugMes("\nPATH: " + path, 15);
Integer supp = 0;
Integer totalCounts = 0;
HashMap<PairPath, Integer> containedReads = PathReads.get(path);
String ascii_illustration = getPathMappingAsciiIllustration(path, containedReads);
debugMes("\nPath Illustration:\n\n" + ascii_illustration + "\n", 5);
//TODO: enable printing at lower verbose level, but note that crazy long paths cause serious performance problems for generating these illustrations... some fine-tuning definitely required there.
}
}
/**
* Go over all final paths, and combine those that are too similar.
* @param graph
* @param FinalPaths
* @param PathReads
* @param topOrderInts
*/
/**
* check for similar paths that end at V, and start at different nodes
* remove the shortest of the two
* @param graph
* @param v
* @param Paths
* @param PathReads
* @param Extensions
* @param topOrderInts
*/
/*
private static List<List<Integer>> combineSimilarPathsThatEndAtV(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph, SeqVertex v,
HashMap<SeqVertex, List<List<Integer>>> Paths,
HashMap<List<Integer>, HashMap<PairPath, Integer>> PathReads,
HashMap<List<Integer>, Boolean> Extensions) {
// new Throwable().printStackTrace();
int vertex_id = v.getID();
int total_num_paths = Paths.get(v).size();
debugMes("method: combineSimilarPathsThatEndAtV(" + vertex_id + ") with "+total_num_paths+ " paths", 10);
debugMes("paths are: "+Paths.get(v),17);
List<List<Integer>> removeSimilarPaths = new ArrayList<List<Integer>>();
List<Integer> removedPathsIndices = new ArrayList<Integer>();
String path1S="", path2S="";
Iterator<List<Integer>> i1, i2;
int index1, index2, rIndex;
int pathCount1 = 0;
int pathCount2 = 0;
if (total_num_paths<=1)
return null;
// all-vs-all comparison among the paths ending at v
for (i1=Paths.get(v).iterator() ; i1.hasNext() ; )
{
List<Integer> path1 = i1.next();
path1S = getPathSeq(graph, path1);
index1 = path1S.length()-1;
pathCount1++;
pathCount2 = 0;
if (removedPathsIndices.contains(pathCount1)) {
continue;
}
boolean gotToi1 = false;
for (i2=Paths.get(v).iterator() ; i2.hasNext() ; )
{
List<Integer> path2 = i2.next();
pathCount2++;
debugMes("\r*V[" + vertex_id + "] Comparing " + total_num_paths + " paths, pairs:(" + pathCount1 + "," + pathCount2 + ") ", 15);
while (!gotToi1 && i2.hasNext())
{
if (path2.equals(path1))
gotToi1 = true;
path2 = i2.next();
pathCount2++;
}
if (path2.equals(path1))
break;
// one of these paths were removed already
if (removedPathsIndices.contains(pathCount2)) {
continue;
}
path2S = getPathSeq(graph, path2);
index2 = path2S.length()-1;
debugMes("checking for similarity the two paths: "+path1+
"(len="+path1S.length()+");"+path2+"(len="+path2S.length()+")",15);
if (twoPathsAreTooSimilar(graph, path1, path2))
{
debugMes("they are too similar!",15);
//remove the shorter path
rIndex = removeTheLesserSupportedPath(path1S,path2S,path1,path2,removeSimilarPaths,PathReads);
if (rIndex == 1)// the first path was removed
removedPathsIndices.add(pathCount1);
else
removedPathsIndices.add(pathCount2);
}
}
}
for (List<Integer> path2Remove : removeSimilarPaths)
{
debugMes("The path "+path2Remove+" was removed because it was too close to another path",12);
Paths.get(v).remove(path2Remove);
Extensions.remove(path2Remove);
}
return(removeSimilarPaths);
}
*/
/**
* compare the sequences of the two paths, and return true if they are more than MIN_PERCENT_IDENTITY_SAME_PATH.
* @param path1s
* @param path2s
* @param topOrderInts
* @return
*/
private static boolean twoPathsAreTooSimilar(
DirectedSparseGraph<SeqVertex,
SimpleEdge> graph,
List<Integer> path1,
List<Integer> path2
) {
debugMes("\n\n****\n\nchecking twoPathsAreTooSimilar (" + path1 + "," + path2 + ")\n****\n\n", 15);
if (! PairPath.haveAnyNodeInCommon(path1, path2)) {
debugMes("paths: " + path1 + path2 + " have no node in common, cannot be too similar.", 15);
return(false); // if no node in common, then they shouldn't be too similar.
}
AlignmentStats numTotalMismatchesAndGaps = getPrevCalcNumMismatches(graph, path1, path2);
int shorterLen = Math.min(getSeqPathLength(graph,path1),getSeqPathLength(graph,path2));
float path_per_id = 100 - (float)numTotalMismatchesAndGaps.mismatches/shorterLen * 100;
boolean tooSimilar = isThisTooSimilar(numTotalMismatchesAndGaps.mismatches, numTotalMismatchesAndGaps.max_internal_gap_length, path_per_id);
DecimalFormat df = new DecimalFormat("
debugMes("\n\n====\nRunning PATH alignment of : " + path1 + " to " + path2 + " :: numMM:" + numTotalMismatchesAndGaps.mismatches
+ ", max_internal_gap: " + numTotalMismatchesAndGaps.max_internal_gap_length
+ ", path_per_id = " + df.format(path_per_id) + ", tooSimilar: " + tooSimilar, 15);
debugMes(numTotalMismatchesAndGaps.toString(), 18);
// compare to doing a full sequence alignment:
if (false) {
String path1_seq = getPathSeq(graph, path1);
String path2_seq = getPathSeq(graph, path2);
Alignment alignment;
if (SMITH_WATERMAN_ALIGN_FLAG) {
debugMes("-running Smith-Waterman alignment of path sequences", 15);
alignment = NWalign.run_SW_alignment("A", path1_seq, "B", path2_seq, 4, -5, 10, 1);
}
else {
// Needleman Wunsch Global Alignment is default
debugMes("-running Needleman-Wunsch alignment of path sequences", 15);
alignment = NWalign.run_NW_alignment("A", path1_seq, "B", path2_seq, 4, -5, 10, 1); //NW locks up or takes too long with very long sequences (eg. 40kb align to 6kb)
}
AlignmentStats a = new AlignmentStats(alignment);
debugMes("\n\n====\nSEQUENCE_ALIGNMENT_RESULTS:\n" + a.toString(), 15);
debugMes (new jaligner.formats.Pair().format(alignment), 15);
}
return(tooSimilar);
}
/**
* for p1, p2 find the latest nodes that they share (v2)
* by going backwards on the paths, while using the topological order of the nodes, and advancing while keeping them in order.
* @param graph
* @param path1
* @param path2
* @param topOrderInts
* @return
*/
private static Integer findLastSharedNode(DirectedSparseGraph<SeqVertex,SimpleEdge> graph,
List<Integer> path1,
List<Integer> path2) {
path1 = PairPath.trimSinkNodes(path1);
path2 = PairPath.trimSinkNodes(path2);
if (path1.isEmpty() || path2.isEmpty()) {
return(-1);
}
List<SeqVertex> reversePath1 = getReverseSeqVertexPath(graph,path1);
List<SeqVertex> reversePath2 = getReverseSeqVertexPath(graph,path2);
Iterator<SeqVertex> p1_iter = reversePath1.iterator();
Iterator<SeqVertex> p2_iter = reversePath2.iterator();
SeqVertex p1_v = p1_iter.next();
SeqVertex p2_v = p2_iter.next();
SeqVertexFinishTimeComparator finishingTimeComparator = new SeqVertexFinishTimeComparator();
while (p1_v != p2_v )
{
if (finishingTimeComparator.compare(p1_v,p2_v)>=0) {
if (p1_iter.hasNext())
p1_v = p1_iter.next();
else
break;
}
else if (p2_iter.hasNext())
p2_v = p2_iter.next();
else
break;
}
return (p1_v==p2_v)? p1_v.getID() : -1;
}
/**
* given the graph and a list of integers, return the reverse list of seqVertices
* @param graph
* @param path
* @return
*/
private static List<SeqVertex> getReverseSeqVertexPath(DirectedSparseGraph<SeqVertex,SimpleEdge> graph, List<Integer> path) {
List<SeqVertex> res = new ArrayList<SeqVertex>();
for (int i=path.size()-1; i>=0 ; i
res.add(getSeqVertex(graph, path.get(i)));
}
return res;
}
/**
* given the key of the two paths, return their number of matches.
* If this calculation hasn't been done before, calc and save it.
* @param key
* @return
*/
private static AlignmentStats getPrevCalcNumMismatches (
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
List<Integer> path1, List<Integer> path2) {
debugMes("getPrevCalcNumMismatches: Path1: " + path1 + " Path2: " + path2, 15);
// Not penalizing end gaps
boolean is_at_start_of_graph = ( ((! path1.isEmpty()) && path1.get(0) == -1) || ( (! path2.isEmpty()) && path2.get(0) == -1) );
boolean is_at_end_of_graph = ( ((! path1.isEmpty()) && path1.get(path1.size()-1) == -2) || ( (! path2.isEmpty()) && (path2.get(path2.size()-1) == -2)) );
String P1_s = path1+"";
String P2_s = path2+"";
Comparator<String> stringComp = String.CASE_INSENSITIVE_ORDER;
int compRes = stringComp.compare(P1_s, P2_s);
String key = (compRes>=0)? P1_s+";"+P2_s : P2_s+";"+P1_s;
if (NUM_MISMATCHES_HASH.containsKey(key)) {
AlignmentStats a = NUM_MISMATCHES_HASH.get(key);
// Already computed it, used cached value
debugMes("key: " + key + ", cached as: " + a.toString(), 15);
NUM_MISMATCHES_HASH.put(key, a);
return(a);
}
// both paths are effectively empty
else if ( (path1.isEmpty() || (path1.size() == 1 && path1.get(0) < 0))
&&
(path2.isEmpty() || (path2.size() == 1 && path2.get(0) < 0))
) {
AlignmentStats a = new AlignmentStats();
NUM_MISMATCHES_HASH.put(key, a);
return(a);
}
// paths are identical
else if (path1.equals(path2)) {
AlignmentStats a = new AlignmentStats();
// perfect matches, no gaps.
String path1s = getPathSeq(graph, path1);
a.matches = path1s.length();
a.alignment_length = path1s.length();
debugMes("paths are equivalent: Path1:" + path1 + ", Path2:" + path2 + " and have alignment stats:" + a.toString(), 15);
NUM_MISMATCHES_HASH.put(key, a); // cache results
return(a);
}
// empty path 1, but have path 2
else if (path1.isEmpty() || (path1.size() == 1 && path1.get(0) < 0)) {
AlignmentStats a = new AlignmentStats();
if (! (is_at_start_of_graph || is_at_end_of_graph)) {
Integer path2_seq_len = getPathSeq(graph, path2).length();
a.max_internal_gap_length = path2_seq_len;
a.gaps = path2_seq_len;
}
debugMes("empty path1 vs " + path2 + " = " + a.toString(), 15);
return(a);
}
// empty path 2, but have path 1
else if (path2.isEmpty() || (path2.size() == 1 && path2.get(0) < 0)) {
AlignmentStats a = new AlignmentStats();
if (! (is_at_start_of_graph || is_at_end_of_graph)) {
Integer path1_seq_len = getPathSeq(graph, path1).length();
a.max_internal_gap_length = path1_seq_len;
a.gaps = path1_seq_len;
}
debugMes("path1 : " + path1 + " vs empty path2 = " + a.toString(), 15);
return(a);
}
/*
else if (path1.get(path1.size() -1) == path2.get(path2.size() -1) ) {
// last elements are the same
debugMes("paths have same last node: Path1:" + path1 + ", Path2:" + path2, 15);
AlignmentStats nodeAlignStats = getPrevCalcNumMismatches(graph,
path1.subList(path1.size()-1, path1.size()),
path2.subList(path2.size()-1, path2.size()));
debugMes("Scores for last node comparison: " + path1 + path2 + nodeAlignStats.toString(), 15);
// get prefix alignment stats
List<Integer> subP1_list = path1.subList(0, path1.size()-1);
List<Integer> subP2_list = path2.subList(0, path2.size()-1);
AlignmentStats remainingAlignmentStats = getPrevCalcNumMismatches(graph, subP1_list, subP2_list);
debugMes("prefix alignment stats for: " + subP1_list + subP2_list + remainingAlignmentStats.toString(), 15);
remainingAlignmentStats = remainingAlignmentStats.increment_alignment_stats(nodeAlignStats);
debugMes("summing the alignment scores for : " + path1 + path2 + remainingAlignmentStats.toString(), 15);
NUM_MISMATCHES_HASH.put(key, remainingAlignmentStats);
return(remainingAlignmentStats);
}
*/
else {
// No empty path, and alignments haven't been scored/cached yet.
Integer lastSharedNode = findLastSharedNode(graph,path1,path2);
if (lastSharedNode!=-1) { // there is a shared node
// run sequence comparison on suffix part, recurse for prefix part if node in common.
debugMes("Paths " + path1 + path2 + " share node " + lastSharedNode, 15);
Integer p1V2index = path1.indexOf(lastSharedNode);
Integer p2V2index = path2.indexOf(lastSharedNode);
// get path up to but not including the shared node.
List<Integer> prefix_path_1 = path1.subList(0, p1V2index);
List<Integer> prefix_path_2 = path2.subList(0, p2V2index);
debugMes("getting prefix alignment for " + prefix_path_1 + prefix_path_2, 15);
// recurse to get the prefix alignment
AlignmentStats prefixStats = getPrevCalcNumMismatches(graph, prefix_path_1, prefix_path_2);
debugMes("path prefix alignment stats for: " + prefix_path_1 + " and " + prefix_path_2 + " : " + prefixStats.toString(), 15);
// get alignment for shared node pair
List<Integer> shared_node_path1 = path1.subList(p1V2index, p1V2index+1);
List<Integer> shared_node_path2 = path2.subList(p2V2index, p2V2index+1);
AlignmentStats sharedNodeStats = getPrevCalcNumMismatches(graph, shared_node_path1, shared_node_path2);
// add matches for the current alignment
List<Integer> suffix_path_1 = new ArrayList<Integer>();
List<Integer> suffix_path_2 = new ArrayList<Integer>();
if (p1V2index < path1.size()-1) {
suffix_path_1 = path1.subList(p1V2index+1,path1.size());
}
if (p2V2index < path2.size()-1) {
suffix_path_2 = path2.subList(p2V2index+1,path2.size());
}
debugMes("getting suffix alignment for: " + suffix_path_1 + suffix_path_2, 15);
AlignmentStats suffixStats = getPrevCalcNumMismatches(graph, suffix_path_1, suffix_path_2);
debugMes("suffix alignment stats: " + suffixStats, 15);
suffixStats = suffixStats.increment_alignment_stats(sharedNodeStats);
suffixStats = suffixStats.increment_alignment_stats(prefixStats);
debugMes("combining suffix and prefix alignment stats: " + suffixStats, 15);
NUM_MISMATCHES_HASH.put(key, suffixStats);
return(suffixStats);
}
else {
// no commonly shared node.
// do the actual alignment.
debugMes("-no shared node, alignment not cached, computing: " + path1 + " to " + path2, 12);
String path1s = getPathSeq(graph, path1);
String path2s = getPathSeq(graph, path2);
debugMes("-path1s length: " + path1s.length() + ", path2s length: " + path2s.length(), 12);
//TODO: If one path sequence is a substring of the other, no reason to do an alignment.
// this can be known based on the path list comparison, without needing to do a string comparison.
//align the two seqs
AlignmentStats stats;
if ( (path1s.length() > MAX_SEQ_LEN_DP_ALIGN && path2s.length() > MAX_SEQ_LEN_DP_ALIGN)
||
path1s.length() > 100000 // problems can arise in the alignment code if either seq is longer
||
path2s.length() > 100000
) {
// heuristic... if seqs are this long, it's highly unlikely they'd be too similar anyway
stats = ZipperAlignment.doZipperAlignment("A", path1s, "B", path2s);
}
// very short sequence in path1 or path2
else if ( (path1s.length() < 10 && path2s.length() > 20)
||
(path1s.length() > 20 && path2s.length() < 10) )
{
if (is_at_start_of_graph) {
stats = ZipperAlignment.doZipperAlignmentAnchorRight("A", path1s, "B", path2s);
}
else if (is_at_end_of_graph) {
stats = ZipperAlignment.doZipperAlignmentAnchorLeft("A", path1s, "B", path2s);
}
else {
// let ZipperAlignment try to figure it out
stats = ZipperAlignment.doZipperAlignment("A", path1s, "B", path2s);
}
}
else if (SMITH_WATERMAN_ALIGN_FLAG) {
debugMes("-running Smith-Waterman alignment of path sequences", 15);
Alignment alignment = NWalign.run_SW_alignment("A", path1s, "B", path2s, 4, -5, 10, 1);
debugMes (new jaligner.formats.Pair().format(alignment), 17);
stats = new AlignmentStats(alignment);
}
else {
// Needleman Wunsch Global Alignment is default
debugMes("-running Needleman-Wunsch alignment of path sequences", 15);
Alignment alignment = NWalign.run_NW_alignment("A", path1s, "B", path2s, 4, -5, 10, 1); //NW locks up or takes too long with very long sequences (eg. 40kb align to 6kb)
debugMes (new jaligner.formats.Pair().format(alignment), 17);
stats = new AlignmentStats(alignment);
}
int alignment_length = stats.alignment_length;
int matches = stats.matches;
int mismatches = stats.mismatches;
int gaps = stats.gaps;
int right_gap_len = stats.right_gap_length;
int left_gap_len = stats.left_gap_length;
int max_internal_gap_length = stats.max_internal_gap_length;
float percent_A_in_alignment = (float) stats.get_count_of_bases_in_aligned_region("A") / (path1s.length()) * 100;
float percent_B_in_alignment = (float) stats.get_count_of_bases_in_aligned_region("B") / (path2s.length()) * 100;
debugMes("Percent A in alignment = " + stats.get_count_of_bases_in_aligned_region("A") + " / " + path1s.length() + " = " + percent_A_in_alignment + "%",15);
debugMes("Percent B in alignment = " + stats.get_count_of_bases_in_aligned_region("B") + " / " + path2s.length() + " = " + percent_B_in_alignment + "%",15);
float max_percent_aligned = Math.max(percent_A_in_alignment, percent_B_in_alignment);
float percent_identity = (float)matches/(matches+mismatches) * 100;
float percent_gapped = (float)gaps/alignment_length * 100;
debugMes("Matches: " + matches + ", Mismatches: " + mismatches + ", gaps: " + gaps + ", align_len: " + alignment_length,15);
debugMes("percent_identity: " + percent_identity + ", percent_gapped: " + percent_gapped,15);
debugMes("max_percent_aligned: " + max_percent_aligned,15);
debugMes("max internal gap length: " + max_internal_gap_length + "\n",15);
int total_significant_diffs = 0;
if (is_at_start_of_graph || is_at_end_of_graph) {
total_significant_diffs = mismatches + gaps;
debugMes("(start of graph) Total number of significant alignment diffs = (mismatches: " + mismatches
+ " + internal_gaps: " + gaps
+ " + right_gap_len: "+ right_gap_len
+ " = " + total_significant_diffs, 15);
// the max internal gap length value based ignores the left gap length
if (is_at_start_of_graph) {
stats.left_gap_length = 0;
if (! is_at_end_of_graph) {
// deal with right-gap in alignment stats
stats.max_internal_gap_length = Math.max(stats.max_internal_gap_length, stats.right_gap_length);
total_significant_diffs += stats.right_gap_length;
stats.gaps += stats.right_gap_length;
}
}
if (is_at_end_of_graph) {
stats.right_gap_length = 0;
if (! is_at_start_of_graph) {
// deal with left-gap in alignment stats
stats.max_internal_gap_length = Math.max(stats.max_internal_gap_length, stats.left_gap_length);
total_significant_diffs += stats.left_gap_length;
stats.gaps += stats.left_gap_length;
}
}
}
else {
total_significant_diffs = mismatches + gaps + left_gap_len + right_gap_len; // all gaps count TODO: ignore right gap length if at end of graph
debugMes("(internal of graph) Total number of significant alignment diffs = (mismatches: " + mismatches
+ " + internal_gaps: " + gaps
+ " + left_gap_len: " + left_gap_len
+ " + right_gap_len: "+ right_gap_len
+ " = " + total_significant_diffs, 15);
// adjust max internal gap length value based on left or right gap lengths, since this is an internal node
stats.max_internal_gap_length = Math.max(stats.max_internal_gap_length, stats.left_gap_length);
stats.max_internal_gap_length = Math.max(stats.max_internal_gap_length, stats.right_gap_length);
}
stats.total_not_matched = total_significant_diffs; // update based on above.
debugMes("AlignmentStats: " + stats.toString(), 15);
NUM_MISMATCHES_HASH.put(key, stats);
return NUM_MISMATCHES_HASH.get(key);
}
}
}
/**
* given all the params, decide if the two seqs are too similar
* FIXME - find a better criteria.
* @param numMM - number of mismatches
* @param longestMMstretch
* @param shortestLen
* @return
*/
private static boolean isThisTooSimilar(int numMM, int max_internal_gap_length, float percent_identity) { // number of differences, not just mismatches, includes gaps
DecimalFormat df = new DecimalFormat("
boolean too_similar = ( max_internal_gap_length <= MAX_INTERNAL_GAP_SAME_PATH
&&
( numMM <= MAX_DIFFS_SAME_PATH || percent_identity >= MIN_PERCENT_IDENTITY_SAME_PATH));
debugMes("the two paths have these stats: numMM="+numMM
+ ", max_internal_gap_length=" + max_internal_gap_length
+ ", identity="+df.format(percent_identity)+"%"
+ ", tooSimilar: " + too_similar,15);
return (too_similar); // same as saying they are too similar... I just process the logic better in the terms of them not being too different.
}
/**
* given two paths (and their seqs) remove the shorter path, and add its reads to the other one.
* if the are equal in length, remove the lighter one.
* @param path1S
* @param path2S
* @param path1
* @param path2
* @param removeSimilarPaths
* @param PathReads
*/
private static int removeTheLesserSupportedPath(String path1S, String path2S,
List<Integer> path1, List<Integer> path2, List<List<Integer>> removeSimilarPaths,
HashMap<List<Integer>, HashMap<PairPath, Integer>> PathReads) {
List<Integer> path2remove,path2keep;
int sum1=0,sum2=0;
if (PathReads.get(path1)!=null)
for (Integer s : PathReads.get(path1).values())
sum1+=s;
if (PathReads.get(path2)!=null)
for (Integer s : PathReads.get(path2).values())
sum2+=s;
debugMes("Scoring paths based on read support. Path: " + path1 + " has " + sum1 + " read support, and " + path2 + " has " + sum2 + " read support.", 15);
if (sum1<sum2)
{
path2remove = path1;
path2keep = path2;
}
else if (sum1 > sum2)
{
path2remove = path2;
path2keep = path1;
}
else {
// same read support, so choose the longer sequence over the shorter one.
if (path1S.length() >= path2S.length())
{
path2remove = path2;
path2keep = path1;
}
else
{
path2remove = path1;
path2keep = path2;
}
}
debugMes("removing path "+path2remove+" and keeping path "+path2keep,15);
if (!removeSimilarPaths.contains(path2remove))
removeSimilarPaths.add(path2remove);
if (PathReads.get(path2remove)!=null)
{
if (PathReads.get(path2keep)==null)
PathReads.put(path2keep, new HashMap<PairPath,Integer>());
// no longer assuming ownership of the other's reads, as this causes problems!
//PathReads.get(path2keep).putAll(PathReads.get(path2remove));
PathReads.remove(path2remove);
}
return (path2remove==path1)? 1:2;
}
/**
* Given this path, ask whether it has enough support, either by last triplet, or by length
* @param readsOfPathUntilV - reads of this path, so far
* @param path - the path so far
* @param u - the extension to the path
* @param graph
* @param dijkstraDisWoVer
* @return
*/
private static boolean pathHasEnoughReadSupport(
HashMap<PairPath, Integer> readsOfPathUntilV,
List<Integer> path,
SeqVertex u,
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
DijkstraDistanceWoVer<SeqVertex, SimpleEdge> dijkstraDisWoVer) {
List<Integer> pathWU = new ArrayList<Integer>(path);
pathWU.add(u.getID());
List<Integer> subPath = new ArrayList<Integer>();
subPath.add(0, u.getID());
SeqVertex v = getSeqVertex(graph,path.get(path.size()-1));
if (LENIENT_PATH_CHECKING) {
// nodes u and v exist within a read pair path, and the read(s) are compatible with the this tentative path.
return(pathHasTerminalCompatibleReadSupport(path, v, u, graph, readsOfPathUntilV, dijkstraDisWoVer));
}
else if (USE_TRIPLETS) // never do it this way, option turned off permanently but retained for legacy sake.
{
subPath.add(0, v.getID());
if (path.size()>1)
subPath.add(0,path.get(path.size()-2));
return (subPathHasEnoughReadSupport(pathWU, readsOfPathUntilV, subPath, graph, dijkstraDisWoVer));
}
else{
// default method
int lookBack = PATH_REINFORCEMENT_DISTANCE;
int lenSoFar = u.getNameKmerAdj().length();
for (int j = path.size()-1 ; j>=0 && lenSoFar < lookBack; j
SeqVertex vLast = getSeqVertex(graph, path.get(j));
subPath.add(0, vLast.getID());
lenSoFar += vLast.getNameKmerAdj().length();
}
return (subPathHasEnoughReadSupport(pathWU, readsOfPathUntilV, subPath, graph, dijkstraDisWoVer));
}
}
/**
* Check that the given sub-path has N supporting reads or more.
* A supporting read is a read that enforces this triplet
* @param readsOfPathUntilV
* @param subPath
* @param graph
* @param dijkstraDisWoVer
* @return
*/
private static boolean subPathHasEnoughReadSupport(
List<Integer> fullPathWU,
HashMap<PairPath, Integer> readsOfPathUntilV,
List<Integer> subPath,
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
DijkstraDistanceWoVer<SeqVertex, SimpleEdge> dijkstraDisWoVer) {
int local_debug_level = 17;
debugMes("-checking if subPath has enough read support. Exploring sub path: " + subPath, local_debug_level);
// note, subpath contains U as final node.
Integer last_subpath_id = subPath.get(subPath.size()-1);
Integer first_subpath_id = subPath.get(0);
if (first_subpath_id < 0)
first_subpath_id = subPath.get(1); // dont want a sink node
int numberReadsSupporting = 0;
for (PairPath pPath : readsOfPathUntilV.keySet())
{
debugMes("-readsOfPathUntilV: " + pPath, local_debug_level);
boolean thisReadOK = true;
if (ORIGINAL_PATH_EXTENSIONS) {
// Examining within the context of the entire graph
for (Integer vTempID : subPath) {
if (thisReadOK)
thisReadOK = thisReadOK &&
readEnforcesVertex(graph, dijkstraDisWoVer, pPath, getSeqVertex(graph, vTempID));
}
}
else {
if (! (
pPath.containsID(last_subpath_id)
//&& (first_subpath_id == -1 || pPath.containsID(first_subpath_id) )) // triplet came in with a sink node attached, not a true triplet //FIXME: should check this earlier than this routine.
&& pPath.isCompatible(fullPathWU)
&& pPath.node_is_contained_or_possibly_in_gap(first_subpath_id, graph, dijkstraDisWoVer)
)
)
{
// require pPath contains the first and last ID of the subpath
thisReadOK = false;
}
debugMes("\t-checking if pp: " + pPath + " supports extension of " + fullPathWU + " => " + thisReadOK, local_debug_level);
/*
// COMPATIBLE_PATH_EXTENSIONS MODE, NOW THE DEFAULT
boolean subPathContained = pPath.containsSubPath(subPath);
boolean pathWUcompatible = pPath.isCompatible(fullPathWU);
debugMes("CPATEXT: subPath: " + subPath + " contained by read: " + pPath.get_paths() + " : " + subPathContained, local_debug_level);
debugMes("CPATEXT: pathWU: " + fullPathWU + " compatible with read: " + pPath.get_paths() + " : " + pathWUcompatible, local_debug_level);
thisReadOK = (subPathContained && pathWUcompatible);
*/
}
debugMes("examining subPath: " + subPath + " for reinforcement by read: " + pPath.get_paths() + " :" + thisReadOK, local_debug_level);
if (thisReadOK)
{
numberReadsSupporting+=readsOfPathUntilV.get(pPath);
debugMes("the read "+pPath+"("+readsOfPathUntilV.get(pPath)+") enforces the sub-path ("+subPath+")",local_debug_level);
if (numberReadsSupporting >= MIN_READ_SUPPORT_THR) {
break; // no reason to spend time searching more.
}
} else
debugMes("the read "+pPath+"("+readsOfPathUntilV.get(pPath)+") does not enforce the sub-path ("+subPath+")",local_debug_level);
}
debugMes("-found: " + numberReadsSupporting + " reads supporting subpath.", local_debug_level);
boolean res = (numberReadsSupporting>=MIN_READ_SUPPORT_THR);
if (res)
debugMes("the sub-path ("+subPath+") has PASSED", local_debug_level);
else
debugMes("the sub-path ("+subPath+") has NOT PASSED",local_debug_level);
return res;
}
/**
* Check whether there are at least N reads enforcing
* @param graph
* @param dijkstraDis
* @param pPath
* @param v
* @return
*/
private static boolean readEnforcesVertex(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
DijkstraDistanceWoVer<SeqVertex, SimpleEdge> dijkstraDisWoVer,
PairPath pPath, SeqVertex v) {
// read contains vertex v
if (v==null || pPath.containsID(v.getID()) || v.equals(ROOT) || v.equals(T_VERTEX))
return true;
// is path from the root (sink) to first vertex of the read disabled by removing vertex v of subpath?
SeqVertex firstV = getSeqVertex(graph, pPath.getFirstID());
if (dijkstraDisWoVer.getDistanceWoVer(ROOT, firstV,v)==null)
return true;
// is path from last vertex of the read to the terminal node (sink) disabled by removing vertex v of subpath?
SeqVertex lastV = getSeqVertex(graph, pPath.getLastID());
if (dijkstraDisWoVer.getDistanceWoVer(lastV, T_VERTEX,v)==null)
return true;
if (pPath.hasSecondPath())
{
// is path from beginning to end of read's pair-path disrupted by removing vertex v of subpath?
//last of first path
lastV = getSeqVertex(graph, pPath.getLastID_path1());
//first of second path
firstV = getSeqVertex(graph, pPath.getFirstID_path2());
if (dijkstraDisWoVer.getDistanceWoVer(lastV,firstV,v)==null)
return true;
}
return false;
}
private static boolean pathHasTerminalCompatibleReadSupport(
List<Integer> path,
SeqVertex v, SeqVertex u,
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
HashMap<PairPath, Integer> readsOfPathUntilV,
DijkstraDistanceWoVer<SeqVertex, SimpleEdge> dijkstraDisWoVer) {
List<Integer> tentativePath = new Vector<Integer>(path);
tentativePath.add(u.getID());
Integer v_id = v.getID();
Integer u_id = u.getID();
List<Integer> subPath = new ArrayList<Integer>();
subPath.add(v_id);
subPath.add(u_id);
int num_compatible_paths = 0;
for (PairPath pPath : readsOfPathUntilV.keySet()) {
//if (pPath.containsID(v_id) && pPath.containsID(u_id)) {
if (pPath.containsSubPath(subPath)) {
debugMes("Checking for compatibility. Path: " + tentativePath + " with " + pPath, 18);
// got both terminal path vertices. Check for read compatibility.
if (pPath.isCompatible(path)) {
debugMes("\tPaths ARE compatible.", 18);
num_compatible_paths += readsOfPathUntilV.get(pPath);
}
}
}
debugMes("\t" + num_compatible_paths + " read (pair) paths were found to be compatible.", 18);
if (num_compatible_paths >= MIN_READ_SUPPORT_THR) { // note, not using this as triplet support here.
//TODO: rename triplet support var
return(true);
}
else {
return(false);
}
}
private static boolean vertexPairHasDiscontinuousPathSupport(
List<Integer> path,
SeqVertex v, SeqVertex u,
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
HashMap<PairPath, Integer> readsOfPathUntilV,
DijkstraDistanceWoVer<SeqVertex, SimpleEdge> dijkstraDisWoVer) {
debugMes("\n\nCurrent path being checked for LENIENT path extension: " + path, 18);
debugMes("Performing LENIENT path checking between (v,u):\nv: " + v + "\nu: " + u, 18);
// look for u-v where u is last node of one pairpath, and v-u is the start of another path
// find a pairpath that ends in v
// find another pairpath that starts with v-u
boolean last_vertex_found_as_v = false;
boolean first_vertices_found_as_vu = false;
for (PairPath pPath : readsOfPathUntilV.keySet()) {
debugMes("\t-pairPath: " + pPath, 18);
SeqVertex last_vertex = getSeqVertex(graph, pPath.getLastID());
debugMes("\t-Last vertex: " + last_vertex.getID(), 18);
if (last_vertex.equals(v)) {
last_vertex_found_as_v = true;
debugMes("\t\t-found last vertex as (v)", 18);
}
List<Integer> first_path = pPath.getPath1();
if (first_path.size() > 1) {
SeqVertex first_vertex = getSeqVertex(graph, first_path.get(0));
SeqVertex second_vertex = getSeqVertex(graph, first_path.get(1));
debugMes("\t-First,Second: " + first_vertex.getID() + "," + second_vertex.getID(), 18);
if (first_vertex.equals(v) && second_vertex.equals(u)) {
first_vertices_found_as_vu = true;
debugMes("\t\t-found first vertices as (vu)", 18);
}
}
if (first_vertices_found_as_vu && last_vertex_found_as_v) {
debugMes("\t* FOUND LENIENT EXTENSION", 18);
return(true);
}
}
debugMes("\t* no LENIENT extension possible", 18);
return(false); // no evidence for discontinous support.
}
/**
* Check whether the pairPath is consistent with the node i
* @param pPath
* @param i
* @param graph
* @param dijkstraDis
* @return
*/
private static boolean readIsConsistentWithNode(PairPath pPath, Integer i,
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
DijkstraDistance<SeqVertex, SimpleEdge> dijkstraDis) {
// if (isReadCircular(graph, pPath))
// return false;
if (pPath.containsID(i) || i<0)
return true;
SeqVertex vI = getSeqVertex(graph, i);
SeqVertex firstV = getSeqVertex(graph, pPath.getFirstID());
// i --> firstV
if (SeqVertex.isAncestral(vI, firstV, dijkstraDis)>0)
return true;
SeqVertex lastV = getSeqVertex(graph, pPath.getLastID());
// lastV --> i
if (SeqVertex.isAncestral(lastV,vI,dijkstraDis)>0)
return true;
if (pPath.hasSecondPath())
{
// see if node could be internal to the pair path
//last of first path
lastV = getSeqVertex(graph, pPath.getLastID_path1());
//first of second path
firstV = getSeqVertex(graph, pPath.getFirstID_path2());
// lastV --> i --> firstV
if (SeqVertex.isAncestral(lastV,vI,dijkstraDis)>0 && SeqVertex.isAncestral(vI, firstV, dijkstraDis)>0)
return true;
}
// not compatible if got here.
return false;
}
/**
* given the graph and a list of nodes, calc the length of the seq of this path
* @param graph
* @param path
* @return
*/
private static int getSeqPathLength(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph, List<Integer> path) {
/*
int len = 0;
for (Integer vid : path)
if (vid>=0)
len +=getSeqVertex(graph, vid).getNameKmerAdj().length();
return len;
*/
String pathSeq = getPathSeq(graph, path);
return(pathSeq.length());
}
/**
* return the number of paths
* @param paths
* @return number of paths
*/
private static int getPathsSize(
HashMap<SeqVertex, List<List<Integer>>> paths) {
int res = 0;
for (SeqVertex key : paths.keySet())
{
res+=paths.get(key).size();
}
return res;
}
/**
* returns true iff these two nucleotides are equal
* @param n1
* @param n2
* @return
*/
private static boolean areTwoNucleotidesEqual(String n1, String n2)
{
if (n1.equals(n2))
return true;
if (USE_DEGENERATE_CODE &&
((DEGENERATE_CODE_REV.containsKey(n1) && DEGENERATE_CODE_REV.get(n1).contains(n2)) ||
(DEGENERATE_CODE_REV.containsKey(n2) && DEGENERATE_CODE_REV.get(n2).contains(n1))))
return true;
return false;
}
/**
* return the degenerate code representation of the given key
* @param key
* @return
* @throws Exception
*/
private static String getDegenerateRepresentation(String key) throws Exception {
if (DEGENERATE_CODE.containsKey(key))
return DEGENERATE_CODE.get(key);
else
throw new Exception("the letters "+key+" do not have a degenerate representation\n");
}
/**
* sum the counts of all the reads in this hash
* @param readHash
* @return
*/
private static Integer getSuppCalculation(HashMap<PairPath, Integer> readHash) {
Integer res = 0;
for (PairPath key : readHash.keySet())
res = res + readHash.get(key);
return res;
}
/**
* Given the new path (with u), and the set of reads that supported the path until v
* update the set of reads that support the new path
* @param PathReads
* @param pathContainedReads
* @param pathWu
* @param readsOfPathUntilV
* @param i
* @param dijkstraDis
* @param graph
*/
private static void updateReadsOfPath(HashMap<List<Integer>,HashMap<PairPath,Integer>> PathReads,
HashMap<List<Integer>, HashSet<PairPath>> PathContainedReads, List<Integer> pathWu,
HashMap<PairPath, Integer> readsOfPathUntilV, Integer i, DirectedSparseGraph<SeqVertex, SimpleEdge> graph, DijkstraDistance<SeqVertex, SimpleEdge> dijkstraDis) {
debugMes("updateReadsOfPath: " + pathWu, 17);
List<Integer> pathMinusU = new ArrayList<Integer>(pathWu);
pathMinusU.remove(pathMinusU.size()-1); // remove the U o fthe pathWu
// init read path list as needed.
if (!PathReads.containsKey(pathWu))
PathReads.put(pathWu, new HashMap<PairPath,Integer>());
if (! PathContainedReads.containsKey(pathWu)) {
PathContainedReads.put(pathWu, new HashSet<PairPath>());
}
int count_total = 0;
int count_contained_propagated = 0;
for (PairPath pPath : readsOfPathUntilV.keySet())
{
count_total++;
if (!PathReads.get(pathWu).containsKey(pPath)) { // only if this read doesn't exist in the PathReads for this pathWu
// if this read is consistent with pathWu, then add it
//if (readIsConsistentWithNode(pPath,i,graph,dijkstraDis))
if (PathContainedReads.get(pathMinusU).contains(pPath)) {
// then PathWU must contain it as well.
PathContainedReads.get(pathWu).add(pPath);
PathReads.get(pathWu).put(pPath,readsOfPathUntilV.get(pPath));
//debugMes("path is contained by pathMinusU: " + pPath, 10);
count_contained_propagated++;
}
else {
if (pPath.isCompatible(pathWu)) { // semi-expensive operation
if (pPath.isCompatibleAndContainedBySinglePath(pathWu)) {
PathContainedReads.get(pathWu).add(pPath);
}
debugMes("read "+pPath+" is consistent with "+i, 17);
PathReads.get(pathWu).put(pPath,readsOfPathUntilV.get(pPath));
}else{
debugMes("read "+pPath+" is not consistent with "+i, 17);
}
}
}
}
float pct_contained_propagated = (float) count_contained_propagated/count_total*100;
debugMes("pct_contained_propagated: " + pct_contained_propagated + "%", 10);
}
// /**
// * return true iff this read is circular.
// * A read is considered circular if its gap includes a circle
// * (the vertex at the end of path1 is inside a circle or the first vertex of path 2 is inside a circle).
// * @param graph
// * @param readPath
// * @return
// */
// private static boolean isReadCircular(DirectedSparseGraph<SeqVertex, SimpleEdge> graph, PairPath readPath)
// if (!readPath.hasSecondPath())
// return false;
// if (getSeqVertex(graph, readPath.getLastID()).isInCircle() || //lastID of first path is circular
// getSeqVertex(graph, readPath.getFirstID_path2()).isInCircle()) // firstID of second path is circular
// debugMes("the read "+readPath+" is circular",10);
// return true;
// } else
// return false;
/**
* Return the reads, hashed by their starting vertex
* @param graph
* @param filename
* @param originalVerIDsMapping
* @param rootIDs
* @param originalGraphKmerToNodeID
* @return
* @throws IOException
*/
private static HashMap<String, List<Read>> getReadStarts(DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
String filename,
HashMap<Integer, LocInGraph> originalVerIDsMapping,
Vector<Integer> rootIDs,
HashMap<String, Integer> originalGraphKmerToNodeID)
throws IOException {
BufferedReader fileB = new BufferedReader(new FileReader(filename));
HashMap<String, List<Read>> readNameHash = new HashMap<String, List<Read>>();
String l = fileB.readLine(); // read header of component
int numReadsNotMapped = 0;
int numReadsMapped = 0;
int line_counter = 0;
while (fileB.ready())
{
l = fileB.readLine();
if (l.isEmpty())
continue;
line_counter++;
if (line_counter % 1000 == 0 && BFLY_GLOBALS.VERBOSE_LEVEL >= 10)
System.err.print("\rmapped read [" + line_counter + "]");
else if (BFLY_GLOBALS.VERBOSE_LEVEL >= 11) {
System.err.print("\rmapped read [" + line_counter + "]");
}
// Component 0
// >@42MRYAAXX100104:7:100:1000:103#0 11 101393 36 101418 GAAAGACTGTCACCCTTGAGGTGGAGTCCTCTGACACTATTGACAATGTCAAGAGCAAAATCCAAGACAAGGAAGG
debugMes("Read: " + l, 20);
String[] fields = l.split("\t");
fields[0] = fields[0].replaceFirst(">", "");
List<Integer> pathIDS = null;
Read r = new Read();
pathIDS = readAndMapSingleRead(fields,originalVerIDsMapping,graph,r,false,originalGraphKmerToNodeID);
//debugMes("Threaded Read As: " + r.getName() + " : " + pathIDS, 19);
if (pathIDS==null || (pathIDS!=null && pathIDS.isEmpty()))
{
numReadsNotMapped++;
debugMes("Read could not be threaded: " + r.getName(), 12);
}else
{
//add to readNameHash
if (!readNameHash.containsKey(r.getName()))
readNameHash.put(r.getName(), new ArrayList<Read>());
readNameHash.get(r.getName()).add(r);
numReadsMapped++;
//System.err.println(r.getName());
debugMes("Threaded Read as: " + r.getName() + " : " + pathIDS, 17);
debugMes("ReadPath@Init: " + r.getName() + " : " + pathIDS, 12);
}
}
// debugMes("number of reads not found in graph = "+numReadsNotMapped +" of a total of "+(numReadsNotMapped+numReadsMapped),10);
debugMes("number of reads threaded = "+numReadsMapped
+" (from total of "+(numReadsNotMapped+numReadsMapped)+") which came from "
+ readNameHash.keySet().size() + " pairs",10);
if (numReadsNotMapped > .5*(numReadsNotMapped+numReadsMapped))
debugMes("PROBLEM: less than half of the reads were mapped to this graph ("
+numReadsMapped+"/"+(numReadsNotMapped+numReadsMapped)+")",10);
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 18) {
for (String readName : readNameHash.keySet()) {
String descr = "Read name to pairing info: " + readName + " => ";
List<Read> read_list = readNameHash.get(readName);
for (Read r : read_list) {
descr += r.getPathIDs();
}
debugMes(descr, 15);
}
}
return readNameHash;
}
/**
* given this read, try and map it to the graph. if rev= true, do it in reverse.
* @param fields
* @param originalVerIDsMapping
* @param graph
* @param r
* @param rev
* @param originalGraphKmerToNodeID
* @return
*/
private static List<Integer> readAndMapSingleRead(String[] fields,
HashMap<Integer, LocInGraph> originalVerIDsMapping,
DirectedSparseGraph<SeqVertex, SimpleEdge> graph, Read r, boolean rev,
HashMap<String, Integer> originalGraphKmerToNodeID) {
List<Integer> pathIDS = new ArrayList<Integer>();
LocInGraph fromV;
Integer startInRead,endInRead,fromOrigV;
String name;
String seq;
name = fields[0];
if (! TREAT_PAIRS_AS_SINGLE) {
if (name.endsWith("/1") || name.endsWith("/2")
|| name.endsWith("\1") || name.endsWith("\2")
|| name.endsWith(":1") || name.endsWith(":2")
)
name = name.substring(0, name.length()-2);
}
startInRead = Integer.parseInt(fields[1]);
//endInRead = Integer.parseInt(fields[3])+KMER_SIZE-1;
endInRead = Integer.parseInt(fields[3])+KMER_SIZE; //FIXME: should be as above, but chrysalis appears to be consistently off by one here...
fromOrigV = Integer.parseInt(fields[2]);
fromV = originalVerIDsMapping.get(fromOrigV);
seq = fields[6]; //there is an empty field before the seq.
r.init(name,seq, fromV, startInRead, endInRead, pathIDS);
if (endInRead >= seq.length()) {
debugMes("read " + name + " has sequence length that is shorter than supposed endInRead marking(" + endInRead + "): " + seq, 0);
return pathIDS;
}
debugMes("Read: " + name + " has start: " + startInRead + ", end: " + endInRead + " and sequence: " + seq, 20);
seq = seq.substring(startInRead, endInRead+1);
debugMes("after extracting substring: " + seq, 20);
// in case original node ID was trimmed from graph, try anchoring the sequence from the first
// recognizable retained node ID
if (fromV == null) {
debugMes("Original node ID : " + fromOrigV + " no longer exists ... walking the sequence to try to anchor it to the refined graph:", 20);
for (int i = 1 + 1; i <= seq.length() - KMER_SIZE; i++) {
String kmer = seq.substring(i, i+KMER_SIZE);
if (originalGraphKmerToNodeID.containsKey(kmer)) {
int ID = originalGraphKmerToNodeID.get(kmer);
fromV = originalVerIDsMapping.get(ID);
if (fromV != null) {
debugMes("Anchored read to graph at position " + (i + startInRead) + " with kmer " + kmer, 20);
seq = seq.substring(i);
break;
}
}
}
if (fromV != null) {
debugMes("recovered mapping of read " + name, 20);
}
else {
debugMes("couldn't recover mapping of read: " + name, 20);
}
}
if (fromV!=null)// && toV!=null)
{
Path_n_MM_count best_path = findPathInGraph(graph,seq,fromV,name);
if (best_path != null) {
pathIDS = best_path.path;
//System.err.println("read path: " + pathIDS);
if (READ_END_PATH_TRIM_LENGTH > 0) {
// do some read path trimming at beginning and end of path if little support
pathIDS = best_path.get_trimmed_path(READ_END_PATH_TRIM_LENGTH);
}
if (! pathIDS.isEmpty()) {
r.init(name,seq, fromV, startInRead, endInRead, pathIDS);
String decorator = (pathIDS.size() < best_path.path.size()) ? " ****** " : "";
debugMes("Read " + name + " seq " + seq + " threaded as: " + best_path.toString() + ", trimmed to: " + pathIDS + decorator, 17);
}
else {
debugMes("Trimmed path for read: " + name + " threaded as: " + best_path.toString() + " is empty", 19);
}
String pathSeq = getPathSeq(graph, pathIDS);
if (false) { // for debugging
if (pathSeq.indexOf(seq) < 0) {
throw new RuntimeException("Error, read seq: " + seq + "\n does not match threaded seq:\n" + pathSeq);
}
else {
debugMes("Read seq with len=" + seq.length() + " : " + seq
+ " matches to path seq with len=" + pathSeq.length() + " : " + pathSeq, 15);
}
}
}
}else
debugMes("read "+name+" was not mapped to graph. original node doesn't exist anymore ("+fromOrigV+")",19);
return pathIDS;
}
/**
* Given the graph, and the read, find the path of the read in the graph
* @param graph
* @param seq
* @param fromV
* @return
*/
private static Path_n_MM_count findPathInGraph(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph, String seq,
LocInGraph fromV, String readName) {
int local_verbose_level = BFLY_GLOBALS.VERBOSE_LEVEL;
if (readName.startsWith("LR$") && BFLY_GLOBALS.VERBOSE_LEVEL >= 15) {
BFLY_GLOBALS.VERBOSE_LEVEL = 20;
}
List<Integer> path = new ArrayList<Integer>();
SeqVertex fromVer = getSeqVertex(graph, fromV.getNodeID());
debugMes("findPathInGraph: V:" + fromV.getNodeID() + ", with seq: " + fromVer.getName(), 20);
debugMes("trying to start the mapping to node "+fromVer.getID() + " at position: " + fromV.getIndexInNode(), 20);
MAX_MM_ALLOWED_CAP = (int) Math.ceil(seq.length() * MAX_READ_SEQ_DIVERGENCE);
MAX_MM_ALLOWED = MAX_MM_ALLOWED_CAP;
debugMes("\n\nThreading read: " + readName + ", length: " + seq.length()
+ ", allowing for " + MAX_MM_ALLOWED + " max mismatches.", 17);
debugMes("Read: " + readName + " sequence is:\n" + seq, 20);
Integer totalNumMM = 0;
HashMap<String,Path_n_MM_count> best_path_memoization = new HashMap<String,Path_n_MM_count> (); // use DP
Path_n_MM_count best_path_mapping = updatePathRecursively(graph,fromVer.getID(),seq,0, fromV.getIndexInNode(),
totalNumMM, readName, best_path_memoization);
BFLY_GLOBALS.VERBOSE_LEVEL = local_verbose_level;
if (best_path_mapping != null) {
debugMes("FINAL BEST PATH for " + readName + " is " + best_path_mapping.path + " with total mm: " + best_path_mapping.mismatch_count, 15);
return(best_path_mapping);
}
else {
debugMes("NO_READ_MAPPING_FOUND_FOR: " + readName + "\n\n", 15);
return(null); // no such path found.
}
}
/**
* Update the given path recursively
* @param path
* @param graph
* @param fromVers
* @param seq
* @param locInNode
* @param totalNumMM
* @param readName
* @param best_path_memoization
*/
private static Path_n_MM_count updatePathRecursively(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
Integer fromV_id, String seq, int locInSeq, int locInNode,
Integer totalNumMM,String readName, HashMap<String, Path_n_MM_count> best_path_memoization) {
int MIN_SEQ_LENGTH_TEST_DIVERGENCE = 20;
int MAX_LEFT_END_GAPS = 5;
SeqVertex fromV = getSeqVertex(graph, fromV_id);
Integer numMM = totalNumMM; // init for each node check
String verSeq = fromV.getName(); //important - full name, not kmer-adjusted name.
debugMes("updatePathRecursively(readName=" + readName +
", locInSeq: " + locInSeq + " / " + (seq.length() -1) +
", locInNode: " + locInNode + " / " + (verSeq.length() -1) +
", totalNumMm: " + totalNumMM, 20);
int startI = locInNode;
int j=locInSeq, i = startI;
String read_vertex_start_pos_token = "" + fromV.getID() + "_" + locInNode + "_" + locInSeq;
if (best_path_memoization.containsKey(read_vertex_start_pos_token)) {
Path_n_MM_count best_path = best_path_memoization.get(read_vertex_start_pos_token);
if (best_path == null) {
debugMes("MEMOIZATION: indicates this path was a dead end. Not trying again.", 20);
return(null);
}
else {
debugMes("MEMOIZATION: already stored best path at: " + read_vertex_start_pos_token + " = " + best_path, 20);
// return a copy, critically important!!!
Path_n_MM_count best_path_copy = new Path_n_MM_count(best_path);
return(best_path_copy);
}
}
debugMes("\ntrying to continue the mapping to node "+ fromV.getShortSeqWID(), 19);
int length_to_align = Math.min(verSeq.length() - i, seq.length() - j);
debugMes("-ALIGNING READ SEQ (" + readName + ")\n" + seq.substring(j, j+length_to_align) + " " + j
+ "\nTo VERTEX (" + fromV.getShortSeqWID() + ") SEQ:\n" + verSeq.substring(i, i+length_to_align) + " " + i, 20);
debugMes("Note, rest of read sequence is:\n" + seq.substring(j), 21);
// zipper align
boolean failed_alignment = false;
Integer mm_encountered_here = 0;
for (; i>=0 && i<verSeq.length() && j<seq.length() ; i++,j++)
{
String readLetter = ""+seq.charAt(j);
String verLetter = ""+verSeq.charAt(i);
String mismatchFlag = (areTwoNucleotidesEqual(readLetter,verLetter)) ? "" : "XXX mismatch XXX";
debugMes("Comparing read bases: " + i + ":" + readLetter + ", " + j + ":" + verLetter + " " + mismatchFlag, 21);
if (!areTwoNucleotidesEqual(readLetter,verLetter))
{
//we have a mismatch
numMM++;
mm_encountered_here++;
if ( (numMM > MAX_MM_ALLOWED)
||
(i >= MIN_SEQ_LENGTH_TEST_DIVERGENCE && (mm_encountered_here/(float)(i)) > MAX_READ_LOCAL_SEQ_DIVERGENCE)
)
{
failed_alignment = true;
debugMes("shortcircuiting the zipper test, too many MM or execeeding local seq divergence", 20);
break; // no point in looking further.
}
//TODO: look at mismatch density here as well.
}
} // end of mapping read within node
if (! failed_alignment) {
debugMes("zipper alignment mm: " + mm_encountered_here, 20);
}
// retain zipper info in case it's better than any DP alignment score
int zipper_i = i;
int zipper_j = j;
int zipper_mm = mm_encountered_here;
// use DP alignment if variation is encountered above. (trying simplest/fastest strategy first)
boolean short_DP_test_passes = true;
int MIN_LENGTH_TEST_DP = 100;
if (USE_DP_READ_TO_VERTEX_ALIGN && length_to_align > MIN_LENGTH_TEST_DP && mm_encountered_here > 1) {
debugMes("Running short DP test", 20);
j=locInSeq;
i = startI;
Alignment alignment = NWalign.run_NW_alignment(
"Vertex", verSeq.substring(i, i+MIN_LENGTH_TEST_DP),
"Read", seq.substring(j, j+MIN_LENGTH_TEST_DP),
4, -5, 10, 1);
debugMes ("DP test:\n" + new jaligner.formats.Pair().format(alignment), 17);
AlignmentStats stats = new AlignmentStats(alignment);
mm_encountered_here = stats.mismatches + stats.gaps + stats.left_gap_length;
float pct_divergence = mm_encountered_here/(float)(MIN_LENGTH_TEST_DP);
if ( pct_divergence > MAX_READ_LOCAL_SEQ_DIVERGENCE) {
debugMes("DP test indicates excessive divergence: " + pct_divergence, 20);
short_DP_test_passes = false;
// leave failed alignment status as is.
}
// retain earlier zipper stats, regardless of whether or not we go into full DP below.
i = zipper_i;
j = zipper_j;
mm_encountered_here = zipper_mm;
}
int vertex_num_right_end_gaps = 0;
int read_num_right_end_gaps = 0;
int max_left_gaps = 0;
if (USE_DP_READ_TO_VERTEX_ALIGN && verSeq.length() > 2 && mm_encountered_here > 1 && short_DP_test_passes) {
debugMes(" *Trying again using full DP alignment:", 20);
// reset i and j
j=locInSeq;
i = startI;
// try aligning the full vertex sequence w/ extended ref sequence in case it contains small deletions.
// Needleman Wunsch Global Alignment is default
debugMes("-running Needleman-Wunsch alignment of vertex to read", 17);
/*
Alignment alignment = NWalign.run_NW_alignment(
"Vertex", verSeq.substring(i, i+length_to_align),
"Read", seq.substring(j, j+length_to_align),
4, -5, 10, 1); //NW locks up or takes too long with very long sequences (eg. 40kb align to 6kb)
*/
int read_length_to_align = (int) (verSeq.length() * 1.05f);
if (read_length_to_align + j > seq.length()) {
read_length_to_align = seq.length() - j;
}
int bandwidth = (int) (MAX_READ_LOCAL_SEQ_DIVERGENCE * read_length_to_align);
Alignment alignment = NWalign.run_NW_banded_alignment(
"Vertex", verSeq.substring(i),
"Read", seq.substring(j, j+read_length_to_align),
4, -5, 10, 1, bandwidth);
debugMes (new jaligner.formats.Pair().format(alignment), 17);
AlignmentStats stats = new AlignmentStats(alignment);
mm_encountered_here = stats.mismatches + stats.gaps + stats.left_gap_length + stats.right_gap_length;
// check for right end gap in sequence
String name1 = alignment.getName1();
char[] vertex_align = alignment.getSequence1();
char[] read_align = alignment.getSequence2();
if (name1.equals("Read")) {
char[] swap = vertex_align;
vertex_align = read_align;
read_align = swap;
}
vertex_num_right_end_gaps = AlignmentStats.get_num_right_end_gaps(vertex_align);
read_num_right_end_gaps = AlignmentStats.get_num_right_end_gaps(read_align);
max_left_gaps = Math.max(AlignmentStats.get_num_left_end_gaps(vertex_align),
AlignmentStats.get_num_left_end_gaps(read_align));
debugMes("vertex end gaps: " + vertex_num_right_end_gaps, 20);
debugMes("read end gaps: " + read_num_right_end_gaps, 20);
i = verSeq.length(); // aligning to whole vertex sequence.
j += read_length_to_align;
if (vertex_num_right_end_gaps > 0) {
// read extends beyond vertex sequence
j -= vertex_num_right_end_gaps;
mm_encountered_here -= vertex_num_right_end_gaps;
}
else if (read_num_right_end_gaps > 0) {
// vertex extends beyond end of read.
mm_encountered_here -= read_num_right_end_gaps;
}
/*
// if read end gaps, extend to the end of the vertex
if (read_num_right_end_gaps > 0) {
for (int r = read_num_right_end_gaps; r>0 && j < seq.length(); r--) {
String readLetter = ""+seq.charAt(j++);
String verLetter = ""+verSeq.charAt(verSeq.length()-r);
if (!areTwoNucleotidesEqual(readLetter,verLetter)) {
mm_encountered_here++;
debugMes("walking read end gap: V[" + verLetter + "] vs. R[" + readLetter + "] ** conflict ** " , 20);
}
else {
debugMes("walking read end gap: V[" + verLetter + "] vs. R[" + readLetter + "] OK " , 20);
}
}
}
else if (vertex_num_right_end_gaps > 0) {
// over-ran the vertex sequence
// shrink the sequence by the amount extended beyond the vertex
j -= vertex_num_right_end_gaps;
mm_encountered_here -= vertex_num_right_end_gaps;
debugMes("because of vertex end gaps, walking read back by " + vertex_num_right_end_gaps + " bases.", 20);
if (i != verSeq.length()) {
debugMes("** ERROR: i=" + i + ", but verSeq.length() = " + verSeq.length(), 20);
}
}
*/
debugMes("mismatches encountered: " + mm_encountered_here, 20);
if (mm_encountered_here >= zipper_mm && zipper_i == verSeq.length()) {
debugMes("Zipper alignment mm: " + zipper_mm + " <= DP mm: " + mm_encountered_here +
", so defaulting to earlier zipper alignment.", 20);
i = zipper_i;
j = zipper_j;
mm_encountered_here = zipper_mm;
max_left_gaps = 0;
// retain any failed alignment status
}
else {
failed_alignment = false; // reset it as needed given that DP was ok.
}
numMM = mm_encountered_here + totalNumMM;
}
// note, i and j are now 1 more than the array index due to the last i++,j++ of the loop above.
float current_alignment_divergence = numMM / (float) j;
debugMes("alignment divergence up to seq pos " + j +
" = mm: " + numMM +
", div:" + current_alignment_divergence, 20);
float local_vertex_alignment_divergence = mm_encountered_here / (float) i;
debugMes("local vertex alignment divergence = " + mm_encountered_here + " / " + i + " = " + local_vertex_alignment_divergence, 20);
// examine the alignment at this vertex to see if it passes our requirements
if (i >= MIN_SEQ_LENGTH_TEST_DIVERGENCE && local_vertex_alignment_divergence >= MAX_READ_LOCAL_SEQ_DIVERGENCE) {
failed_alignment = true;
debugMes("local divergence exceeds max allowed: " + MAX_READ_LOCAL_SEQ_DIVERGENCE, 20);
}
if (max_left_gaps > MAX_LEFT_END_GAPS ) {
failed_alignment = true;
}
if (
// cumulative alignment stats up to and including vertex do not meet thresholds
(current_alignment_divergence > MAX_READ_SEQ_DIVERGENCE ||
numMM > MAX_MM_ALLOWED )
||
// alignment to current vertex fails
failed_alignment
)
{
debugMes("read "+readName+" has too many mismatches ("+numMM+") or too many left gaps (" + max_left_gaps + ")",19);
if (failed_alignment) {
// store it so we don't try again from this position in the sequence and at this vertex position.
best_path_memoization.put(read_vertex_start_pos_token, null);
}
return(null); // go back and try alternative vertex if available
}
else if (j==seq.length() || graph.getSuccessors(fromV) == null)
{
// Reached end of the read being threaded (or ran out of vertices to explore)
if (graph.getSuccessors(fromV) == null) {
// tack on unaligned terminus of sequence as mismatches
mm_encountered_here += seq.length() - j;
}
// reached base case for recursion.
debugMes("Reached end of read sequence. Read" + readName + " with length: " + seq.length()
+ " and base [" + j + "] ends at position [" + i + "] within node: "
+ fromV.getID() + " totaling " + mm_encountered_here + " mismatches. ", 19);
Path_n_MM_count best_path = new Path_n_MM_count(fromV.getID(), mm_encountered_here, i, j);
best_path_memoization.put(read_vertex_start_pos_token, best_path);
return(new Path_n_MM_count(best_path));
}
else if (i==verSeq.length()) // move to the next ver
{
// Reached end of vertex
// vertex sequence fully traversed, examine children vertices
// Going on to recursive path mapping for read
debugMes("Reached end of node sequence. Read" + readName
+ " base [" + j + "] ends at position [" + i + "] within node: "
+ fromV.getID() + " totaling " + mm_encountered_here + " mismatches. ", 19);
// get list of the next vertices to explore.
// examine edges prioritized by weight
List<Integer> continueVersIds = new ArrayList<Integer>();
List<SeqVertex> continueVers = new ArrayList<SeqVertex>();
// just use simple successor vertices
if (graph.getSuccessors(fromV) != null) {
for (SeqVertex to_v : graph.getSuccessors(fromV)) {
boolean check_initial_vertex_chars_match_seq = false;
if (check_initial_vertex_chars_match_seq) {
debugMes("-checking that next characters match up: " + to_v.getNameKmerAdj().charAt(0) + " vs. " + seq.charAt(j), 21);
if (to_v.getNameKmerAdj().charAt(0)==seq.charAt(j)) {
continueVers.add(to_v);
continueVersIds.add(to_v.getID());
}
}
else {
continueVers.add(to_v);
continueVersIds.add(to_v.getID());
}
}
}
debugMes("-reached end of vertex: " + fromV.getID() + ", exploring next vertices for continued path extension: " + continueVersIds, 19);
Path_n_MM_count best_path = null;
boolean tied_best = false;
debugMes("Pursuing extension from : " + fromV.getShortSeqWID() + " to successors: " + continueVers, 19);
List<Path_n_MM_count> all_best_paths_explored = new ArrayList<Path_n_MM_count>();
for (Integer successor_vertex_id : continueVersIds) {
debugMes("Exploring extension from node: " + fromV.getID() + " to node: " + successor_vertex_id, 20);
Path_n_MM_count best_extension = updatePathRecursively(graph,successor_vertex_id,
seq,
j,
KMER_SIZE-1, numMM, readName,
best_path_memoization);
// Back from Recursive Call.
// Evaluate best paths from the successors.
/* testing for local sequence divergence within the alignment itself
// first, check to see if it's an extension worth considering, given our local sequence divergence restrictions.
if (best_extension != null
&&
best_extension.mismatch_count / (float) (seq.length() -1 - j) > MAX_READ_LOCAL_SEQ_DIVERGENCE) {
debugMes("\tencountered max read sequence divergence: " + best_extension.mismatch_count / (float) (seq.length() -1 - j)
+ ", disallowing extension: ." + best_extension, 19);
best_extension = null; // nullify the current best extension from successor_vertex_id
}
*/
if (best_extension == null) {
debugMes("\n\tFailed extension from " + fromV.getID() + " to : " + successor_vertex_id + ".", 19);
}
else {
// have a best extension
all_best_paths_explored.add(best_extension);
debugMes(readName + " best path so far from vertex: " + fromV.getID()
+ " to : " + successor_vertex_id
+ " = " + best_extension.path +
", with total mm: " + best_extension.mismatch_count, 20);
if (best_path == null
||
(best_extension.mismatch_count <= best_path.mismatch_count) ) {
// test for tie condition
if (best_path != null) {
if (best_extension.mismatch_count == best_path.mismatch_count) {
tied_best = true;
debugMes("WARNING, Tied paths from vertex [V" + fromV_id +
" ]: \nPath A:\n" + best_extension +
"\nvs. Path B:\n" + best_path, 15);
}
else
tied_best = false;
}
best_path = best_extension;
}
}
} // end of successor vertex search.
debugMes("Done with exploring paths from vertex: " + fromV.getID(), 20);
debugMes("Paths and scores found are: ", 20);
for (Path_n_MM_count pmm: all_best_paths_explored) {
debugMes("\texplored path: " + pmm.path + " w/ mm: " + pmm.mismatch_count, 20);
}
if (best_path != null) {
debugMes("\tAND best selected was: " + best_path.path + " w/ mm: " + best_path.mismatch_count, 20);
}
if (best_path != null) {
if (tied_best) {
debugMes("WARNING: TIED_READ_PATH", 15);
boolean TRUNCATE_TIED_PATH = false;
if (TRUNCATE_TIED_PATH) {
// truncate
Path_n_MM_count truncated_path = new Path_n_MM_count(fromV.getID(), mm_encountered_here, i, j);
best_path_memoization.put(read_vertex_start_pos_token, truncated_path);
return(new Path_n_MM_count(truncated_path));
}
else {
// add current node and local mismatches encountered here.
best_path.add_path_n_mm(fromV.getID(), mm_encountered_here, i, j);
best_path_memoization.put(read_vertex_start_pos_token, best_path);
return(new Path_n_MM_count(best_path));
}
}
else {
// not a tie
// add current node and local mismatches encountered here.
best_path = new Path_n_MM_count(best_path);
best_path.add_path_n_mm(fromV.getID(), mm_encountered_here, i, j);
best_path_memoization.put(read_vertex_start_pos_token, best_path);
return(new Path_n_MM_count(best_path));
}
}
else {
best_path_memoization.put(read_vertex_start_pos_token, null);
return(null); // no extension possible.
}
}
// should never end up here
throw(new RuntimeException("should never end up here, supposedly. i="+i
+ ", j=" + j + " ver length = " + verSeq.length() + " and readSeq length = " + seq.length() ));
}
/**
* create a hash that hold all the original vertices ids and the new ones
* @param graph
* @param rootIDs
* @return the hash
*/
private static HashMap<Integer, LocInGraph> getOriginalVerIDsMappingHash(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph) {
// clear double entries in the prevID list - not sure why they happen?
for (SeqVertex v : graph.getVertices())
v.clearDoubleEntriesToPrevIDs();
HashMap<Integer, LocInGraph> hash = new HashMap<Integer,LocInGraph>();
for (SeqVertex v : graph.getVertices())
{
// update the node tracker
SeqVertex.nodeTracker.put(v.getID(), v); // beware - shouldn't have to do this, but finding myself having to due to getSeqVertex(id) not returning the correct vertex (with full sequence attached).
debugMes("Graph vertex: " + v.getID() + " has sequence: " + v.getNameKmerAdj(), 22);
Integer loc = 0;
Integer vid = v.getID();
// if the node id is new, than the real start is in the vector
if (vid>LAST_REAL_ID)
loc = loc-1;
else
{
debugMes("adding to "+vid+": Location of original node "+vid+" in index "+loc,20);
hash.put(vid, new LocInGraph(vid,loc));
}
for (Vector<Integer> vec : v.getPrevVerIDs())
{
loc++;
for (Integer id : vec)
{
debugMes("adding to "+id+": Location of original node "+v.getID()+" in index "+loc,20);
hash.put(id, new LocInGraph(v.getID(),loc));
}
}
}
return hash;
}
/**
* go over the graph file, and count the in flow and out flow of each node
* @param firstLetter
* @throws IOException
*/
private static void preProcessGraphFile(String filename,
HashMap<Integer, Integer> outFlow, HashMap<Integer, Integer> inFlow, HashMap<Integer,String> kmers) throws IOException {
BufferedReader fileB = new BufferedReader(new FileReader(filename));
String l = fileB.readLine(); // read header of component
Integer from, to, supp;
while (fileB.ready())
{
l = fileB.readLine();
// 0 -1 3 ATTGAAAGCAAGTTTTCTTCGAAT 0
// 1 0 3 TTGAAAGCAAGTTTTCTTCGAATT 0
// to from supp kmer stam
String[] fields = l.split("\t");
from = Integer.parseInt(fields[1]);
to = Integer.parseInt(fields[0]);
supp = Integer.parseInt((fields[2]));
String kmer = fields[3];
if (!outFlow.containsKey(from))
outFlow.put(from, supp);
else
outFlow.put(from, outFlow.get(from)+supp);
if (!inFlow.containsKey(to))
inFlow.put(to, supp);
else
inFlow.put(to, inFlow.get(to)+supp);
kmers.put(to,kmer);
}
}
/**
* given the filename, make a graph out of the connected components
* This time, keep the first letter of each kmer:
* keep the whole kmer, and then if there is an edge out, leave only first letter
* @param filename
* @param rootIDs
* @param inFlow in flow for all vertices
* @param outFlow out flow for all vertices
* @param firstLetter
* @return
* @throws IOException
*/
private static DirectedSparseGraph<SeqVertex, SimpleEdge> buildNewGraphUseKmers(
String filename,
Vector<Integer> rootIDs, HashMap<Integer,Integer> outFlow,
HashMap<Integer,Integer> inFlow,
HashMap<Integer, String> kmers)
throws IOException
{
BufferedReader fileB = new BufferedReader(new FileReader(filename));
DirectedSparseGraph<SeqVertex, SimpleEdge> graph =
new DirectedSparseGraph<SeqVertex,SimpleEdge>();
String l = fileB.readLine(); // read header of component
Integer from, to;
double supp;
int linecount = 0;
while (fileB.ready())
{
linecount++;
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 18 && linecount % 17 == 0) {
System.err.print("\r[" + linecount + "] ");
}
l = fileB.readLine();
// 0 -1 3 ATTGAAAGCAAGTTTTCTTCGAAT 0
// 1 0 3 TTGAAAGCAAGTTTTCTTCGAATT 0
// to from supp kmer stam
String[] fields = l.split("\t");
from = Integer.parseInt(fields[1]);
to = Integer.parseInt(fields[0]);
supp = Double.parseDouble((fields[2]));
if (supp < INITIAL_EDGE_ABS_THR )
continue;
// just tracking node ID values.
if (from>LAST_ID)
LAST_ID = from;
if (to>LAST_ID)
LAST_ID = to;
String kmer = fields[3];
if (KMER_SIZE == 0) {
KMER_SIZE = kmer.length();
debugMes("KMER_SIZE=" + KMER_SIZE, 5);
}
else if (KMER_SIZE != kmer.length()) {
throw new RuntimeException("Error, discrepancy among kmer lengths. Stored: " + KMER_SIZE + ", found: " + kmer.length() + "\n" + l);
}
SeqVertex fromV = getSeqVertex(graph, from);
if (fromV==null && from>=0)
{
//fromV = new SeqVertex(from,firstLetter.get(from)+""+kmer.substring(0,KMER_SIZE-1));
fromV = new SeqVertex(from,kmers.get(from));
graph.addVertex(fromV);
}
boolean isRoot = (from<0 || fromV==null);
SeqVertex toV = getSeqVertex(graph, to); // important to call this after possibly creating fromV, in case fromV == toV (otherwise, creating that vertex twice!) // bugfix
if (isRoot)
{
if (toV==null)
{
toV = new SeqVertex(to, kmer, supp);
graph.addVertex(toV);
rootIDs.add(to);
}
}
else
{
if (toV==null)
{
toV = new SeqVertex(to, kmer);
graph.addVertex(toV);
}
SimpleEdge e = new SimpleEdge(supp, fromV.getID(), toV.getID());
graph.addEdge(e, fromV, toV);
}
}
return graph;
}
private static void writeDotFile(DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
String output_filename, String graphname, boolean printFullSeq) throws Exception
{
PrintStream p = new PrintStream(new FileOutputStream(output_filename));
writeDotFile(graph,p,graphname,printFullSeq);
p.close();
}
/**
* white to dot file with shortened seqs
* @param graph
* @param p
* @param name
*/
private static void writeDotFile(DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
PrintStream p, String name)
{
writeDotFile(graph,p,name,false);
}
/**
* Write to dot file, where the list of paths are colored red -> blue
* @param graph
* @param p
* @param name
* @param vertices which vertices to print
*/
private static void writeDotFile(DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
PrintStream p, String name,boolean printFullSeq)
{
SeqVertex.set_graph(graph);
//p.println("digraph "+name+"{");
p.println("digraph G {");
SeqVertex toVertex;
int weight;
//for each edge decide it's color
for (SeqVertex vertex : graph.getVertices())
{ //go over all vertices
debugMes("Vertex: " + vertex.getShortSeqWconnectingIDs(graph), 15);
String verDesc = ""+vertex.getID()+" [label=\"";
if (printFullSeq)
verDesc = verDesc.concat(""+vertex.getLongtSeqWID() + "[L:"+vertex.getNameKmerAdj().length()+"]\"");
else {
boolean include_discovery_time = true;
if (include_discovery_time) {
verDesc = verDesc.concat(""+vertex.getShortSeqWID() + "[L:"+vertex.getNameKmerAdj().length()+"]"
+ "[T:" + vertex._dfsDiscoveryTime + "]\"");
}
else {
verDesc = verDesc.concat(""+vertex.getShortSeqWID() + "[L:"+vertex.getNameKmerAdj().length()+"]\"");
}
}
if (vertex.getWeightAvg()>25)
verDesc = verDesc.concat(" ,style=bold,color=\"#AF0000\"");
verDesc = verDesc.concat("]");
if (!vertex.equals(T_VERTEX) && !vertex.equals(ROOT))
p.println(verDesc);
for (SimpleEdge edge : graph.getOutEdges(vertex)) //get all edges of vertex->?
{
toVertex = graph.getDest(edge);
weight = (int) Math.round(edge.getWeight());
String edgeStyle = "[label="+ weight +"]";
if (weight>20)
edgeStyle = "[style=bold,label="+ weight +",color=\"#AF0000\"]";
if (!toVertex.equals(T_VERTEX) && !vertex.equals(ROOT))
p.println(vertex.getID() + "->" + toVertex.getID() +edgeStyle);
}
}
p.println("}");
}
/**
* Compact the given graph:
* for each vertex, if degree out = degree in = 1, and nextVertexIn ==1, remove this vertex, and connect edges
* @param graph
*/
private static boolean compactLinearPaths(DirectedSparseGraph<SeqVertex, SimpleEdge> graph)
{
debugMes("SECTION\n=================\nCOMPACTING THE GRAPH\n=================",5);
//compact vertices
Vector<SeqVertex> removeVertices = new Vector<SeqVertex>();
Vector<SimpleEdge> removeEdges = new Vector<SimpleEdge>();
boolean changed = false;
for (SeqVertex v1 : graph.getVertices())
{
// debugMes("looking at vertex: "+v1);
while (!v1.equals(ROOT) && graph.outDegree(v1)==1 )
{
// get out edge, should only have one.
SimpleEdge e = null;
for (SimpleEdge ei : graph.getOutEdges(v1))
e = ei;
// get the vertex attached.
SeqVertex v2 = graph.getDest(e);
if (graph.inDegree(v2)!=1 || v2.isToBeDeleted() || v2.equals(T_VERTEX) || v1.equals(v2)) {
// avoiding loops and vertices that are in-branched.
break;
}
debugMes("Found potential edge: "+e +" between "+v1 +" and "+v2,20);
v1.concatVertex(v2, e.getWeight(),LAST_REAL_ID);
debugMes("removing vertex "+v2+" was concatenated into "+v1,20);
removeVertices.add(v2);
v2.setToBeDeleted(true);
changed = true;
removeEdges.clear();
for (SimpleEdge e2 : graph.getOutEdges(v2))
{
SeqVertex v3 = graph.getDest(e2);
debugMes("Want to move edge " + e2 + "("+v2 +"->"+v3+") to ("+v1+"->"+v3,20);
SimpleEdge newEdge = new SimpleEdge(e2, v1.getID(), v3.getID());
graph.addEdge(newEdge, v1, v3);
debugMes("\tadding edge: " + v1 + " to " + v3, 20);
removeEdges.add(e2);
}
for (SimpleEdge re : removeEdges)
{
debugMes("removing edge " + re + "("+graph.getSource(re) +"->"+graph.getDest(re)+")",20);
graph.removeEdge(re);
}
debugMes("removing edge " + e + "("+v1 +"->"+v2+")",20);
graph.removeEdge(e);
}
}
//remove all vertices that we don't want
for (SeqVertex v : removeVertices)
{
graph.removeVertex(v);
}
return changed;
}
/**
* remove light edges from the graph. return true if something has changed
* @param graph
* @return
*/
private static boolean removeLightEdges(DirectedSparseGraph<SeqVertex, SimpleEdge> graph)
{
debugMes("removeLightEdges()", 10);
boolean comp = false ; //removeLightCompEdges(graph);
boolean in = removeLightInEdges(graph);
boolean out = removeLightOutEdges(graph);
boolean flow = removeLightFlowEdges(graph);
return comp || in || out || flow;
}
/**
* Given a graph, go over all vertices and remove incoming or outgoing edges that do not match the flow (<2% coverage) see FLOW_THR
* When considering flow, this considers both the incoming and outgoing edges, but also the average node coverage.
* @param graph
* @return true if graph was changed.
*/
private static boolean removeLightFlowEdges(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph) {
debugMes("SECTION\n=================\nREMOVING LIGHT FLOW EDGES\n=================",5);
boolean changed = false;
Collection<SeqVertex> all_vertices = graph.getVertices();
int vertex_count = 0;
int num_total_vertices = all_vertices.size();
for (SeqVertex v : all_vertices)
{
vertex_count++;
debugMes("Analyzing vertex: " + v.getID() + ", entry " + vertex_count + " of " + num_total_vertices, 25);
int inDegree = graph.inDegree(v);
int outDegree = graph.outDegree(v);
debugMes("\thas inDegree: " + inDegree + ", outDegree: " + outDegree, 25);
if (inDegree==0 && outDegree==0) {
debugMes("\tSkipping vertex.", 25);
continue;
}
int totalIn = 0, totalOut = 0;
for (SimpleEdge e : graph.getInEdges(v))
totalIn+=e.getWeight();
for (SimpleEdge e : graph.getOutEdges(v))
totalOut+=e.getWeight();
debugMes("FLOW: total in for vertex "+v+" is "+totalIn + ", total out is "+totalOut+", averageCov="+v.getWeightAvg(),20);
Collection<SimpleEdge> removeEdges = new HashSet<SimpleEdge>();
// out edges
for (SimpleEdge e : graph.getOutEdges(v))
{
double e_avg_flow_thr_thresh = v.getWeightAvg() * FLOW_THR;
if ( e.getWeight() < e_avg_flow_thr_thresh) {
debugMes("EDGE_PRUNING::removeLightFlowEdges() removing low flow OUT edge " + e
+ " from "+ graph.getSource(e)+" to "+graph.getDest(e) +
", FLOW_THR=" + FLOW_THR +
", e_avg_flow_thr_thresh=: " + e_avg_flow_thr_thresh, 15);
removeEdges.add(e);
}
}
// in edges
for (SimpleEdge e : graph.getInEdges(v))
{
double e_avg_flow_thr_thresh = v.getWeightAvg() * FLOW_THR;
if (e.getWeight() < e_avg_flow_thr_thresh) {
debugMes("EDGE_PRUNING::removeLightFlowEdges() removing low flow IN edge " + e
+ " from "+ graph.getSource(e)+" to "+graph.getDest(e) +
", FLOW_THR=" + FLOW_THR +
", e.weight=" + e.getWeight() + " < e_avg_flow_thr_thresh=: " + e_avg_flow_thr_thresh, 15);
removeEdges.add(e);
}
}
for (SimpleEdge re : removeEdges)
{
graph.removeEdge(re);
changed = true;
}
debugMes("\tdone analyzing vertex: " + v.getID(), 25);
}
debugMes("== done removing Light Flow Edges.", 25);
return changed;
}
/**
* go over the graph, and remove edges that are less than EDGE_THR (5%) from the rest of the entry flow
* @param graph
*/
private static boolean removeLightInEdges(DirectedSparseGraph<SeqVertex, SimpleEdge> graph)
{
debugMes("SECTION\n=================\nREMOVING LIGHT In EDGES\n=================",5);
boolean somethingChanged = false;
Queue<SeqVertex> allCurVers = new LinkedList<SeqVertex>(graph.getVertices());
SeqVertex v = null;
while ((v = allCurVers.poll())!=null)
{
if (graph.inDegree(v)<=1)
continue;
Vector<SimpleEdge> removeEdges = new Vector<SimpleEdge>();
// skip edges at simple cycles
if (atSimpleCycle(graph, v)) {
continue;
}
int totalIn = 0;
for (SimpleEdge inE : graph.getInEdges(v))
{
totalIn+=inE.getWeight();
}
for (SimpleEdge inE : graph.getInEdges(v))
{
double e_edge_thr = totalIn*EDGE_THR;
if (inE.getWeight() <= e_edge_thr)
{
debugMes("EDGE_PRUNING::removeLightInEdges() removing the edge: "+
graph.getSource(inE)+" " + graph.getSource(inE).getName() +
" -> " +
graph.getDest(inE)+ " " + graph.getDest(inE).getName() +
" (weight: "+inE.getWeight()+" <= e_edge_thr: " + e_edge_thr +
", EDGE_THR=" + EDGE_THR, 15);
removeEdges.add(inE);
somethingChanged = true;
}
}
for (SimpleEdge e : removeEdges)
graph.removeEdge(e);
}
return somethingChanged;
}
private static boolean atSimpleCycle(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph, SeqVertex v) {
for (SimpleEdge e : graph.getInEdges(v)) {
if (graph.findEdge(v, graph.getSource(e)) != null)
return(true);
}
for (SimpleEdge e: graph.getOutEdges(v)) {
if (graph.findEdge(graph.getDest(e), v) != null)
return(true);
}
return(false);
}
/**
* go over the graph, and remove edges that are less than EDGE_THR (10%) from the rest of the exit flow
* @param graph
*/
private static boolean removeLightOutEdges(DirectedSparseGraph<SeqVertex, SimpleEdge> graph)
{
debugMes("SECTION\n=================\nREMOVING LIGHT OUT EDGES\n=================",5);
boolean somethingChanged = false;
Queue<SeqVertex> allCurVers = new LinkedList<SeqVertex>(graph.getVertices());
SeqVertex v = null;
while ((v = allCurVers.poll())!=null)
{
if (graph.outDegree(v)<=1)
continue;
// skip edges at simple cycles
if (atSimpleCycle(graph, v)) {
continue;
}
Vector<SimpleEdge> removeEdges = new Vector<SimpleEdge>();
int totalOut = 0;
for (SimpleEdge outE : graph.getOutEdges(v))
{
totalOut+=outE.getWeight();
}
for (SimpleEdge outE : graph.getOutEdges(v))
{
double e_edge_thr = totalOut * EDGE_THR;
if (outE.getWeight() <= e_edge_thr)
{
debugMes("EDGE_PRUNING::removeLightOutEdges() removing the edge: " +
graph.getSource(outE)+ " " + graph.getSource(outE).getName() +
" -> " +
graph.getDest(outE)+ " " + graph.getDest(outE).getName() +
" (weight: "+outE.getWeight()+" <= e_edge_thr: " + e_edge_thr +
", EDGE_THR=" + EDGE_THR, 15);
removeEdges.add(outE);
somethingChanged = true;
}
}
for (SimpleEdge e : removeEdges)
graph.removeEdge(e);
}
return somethingChanged;
}
/**
* Return the SeqVertex with the given id within the given graph.
* @param graph
* @param id
* @return
*/
private static SeqVertex getSeqVertex(DirectedSparseGraph<SeqVertex, SimpleEdge> graph, int id)
{
SeqVertex v = SeqVertex.retrieveSeqVertexByID(id);
if (graph.containsVertex(v)) {
return(v);
}
else {
return(null); // note, SeqVertex stores all vertices even if removed from the graph, so let's ensure it's still in the graph.
}
/* orig code too slow for large graphs
for (SeqVertex v : graph.getVertices())
{
if (v.getID() == id)
return v;
}
return null;
*/
}
/**
* Given the string seq, return it in fasta format
* @param seq - seq
* @param name - seq name
* @return
*/
private static String getSeqFasta(String seq,String name){
String res = "";
res = res.concat(">"+name+"\n");
int i=0;
for (; i<seq.length()-LINE_LEN ; i+=LINE_LEN)
{
res = res.concat(seq.substring(i, i+LINE_LEN)+"\n");
}
res = res.concat(seq.substring(i)+"\n");
return res;
}
/**
* return the next available vertex id.
* @return
*/
private static int getNextID() {
LAST_ID++;
return LAST_ID;
}
/**
* return a topological order on the graph's vertices.
* @param graph
* @return list of nodes.
*/
private static List<SeqVertex> getTopologicalOrder(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph, My_DFS dfs) {
Map<SeqVertex,Number> finished = dfs.getFinishing();
SeqVertexFinishTimeComparator finishingTimeComparator = new SeqVertexFinishTimeComparator();
debugMes("getTopologicalOrder(), Vertex count: " + graph.getVertexCount(), 18);
PriorityQueue<SeqVertex> fQueue = new PriorityQueue<SeqVertex>(graph.getVertexCount(),finishingTimeComparator );
for (SeqVertex v : finished.keySet())
{
fQueue.add(v);
}
List<SeqVertex> order = new ArrayList<SeqVertex>();
while (!fQueue.isEmpty())
{
order.add(fQueue.poll());
}
return order;
}
/**
* Go over each sub component of the given graph, and calc the following:
* total coverage (sum of weights)
* average coverage
* number of paths
* @param graph
*/
private static void calcSubComponentsStats(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph) {
Set<Set<SeqVertex>> comps = divideIntoComponents(graph);
int numComp = comps.size();
for (Set<SeqVertex> comp : comps)
{
//now we have one comp in hand
Vector<Double> allW = new Vector<Double>();
int compID = -1;
for (SeqVertex v : comp)
{
if (compID==-1)
compID = v.getID();
allW.addAll(0, v.getWeights());
for (SimpleEdge outE : graph.getOutEdges(v))
{
allW.add(0, outE.getWeight());
}
}
SeqVertex v1 = getSeqVertex(graph, compID);
if (allW.size()==0 || (comp.size()==1 && v1.getName().length()<MIN_OUTPUT_SEQ))
{
//this is a single node with a single letter
debugMes("removing component with node "+compID+" which has only one node with short seq "+v1.getName(),20);
graph.removeVertex(v1);
numComp = numComp-1;
continue;
}
int t=0;
for (Double w: allW)
t+=w;
//System.err.println("t=" + t + ", allW.size()=" + allW.size());
float avgCov = (float)t/allW.size();
debugMes("SubComp: "+compID+" has "+ comp.size() +" nodes; total coverage: "+t+" average: "+avgCov,20);
if (avgCov<COMP_AVG_COV_THR-0.5) //FIXME: added 0.5 for testing with low cov seq
{
debugMes("removing component with node "+compID+" which has only average coverage of "+
avgCov+ " < "+COMP_AVG_COV_THR,20);
for (SeqVertex v : comp)
graph.removeVertex(v);
numComp = numComp-1;
}
}
debugMes("number of good components: "+numComp,10);
}
/**
* divide the graph into its components
* @param graph
* @return set of components
*/
private static Set<Set<SeqVertex>> divideIntoComponents(DirectedSparseGraph<SeqVertex, SimpleEdge> graph)
{
WeakComponentClusterer<SeqVertex, SimpleEdge> compClus = new WeakComponentClusterer<SeqVertex, SimpleEdge>();
Set<Set<SeqVertex>> comps = compClus.transform(graph);
int comp_counter = 0;
for (Set<SeqVertex> s : comps) {
debugMes("\nComponentDivision: " + comp_counter + " contains the following vertices:", 10);
for (SeqVertex v : s) {
debugMes("node_id: " + v.getID(), 10);
}
comp_counter++;
}
return comps;
}
/**
* connect the source node to each node with indegree=0,
* connect each node with outdegree=0 to the target node
* Also add reads from the root to each of the nodes, and from the ends too.
* @param graph
* @param comp the current component
* @param combinedReadHash
*/
private static void addSandT(DirectedSparseGraph<SeqVertex, SimpleEdge> graph, Set<SeqVertex> comp, HashMap<Integer,HashMap<PairPath,Integer>> combinedReadHash)
{
graph.addVertex(ROOT);
graph.addVertex(T_VERTEX);
SimpleEdge e=null;
// for (SeqVertex v : graph.getVertices())
for (SeqVertex v : comp)
{
if (graph.inDegree(v)==0 && !v.equals(ROOT) && !v.equals(T_VERTEX)) // connect S to this vertex
{
double w = v.getFirstWeight();
if (w==-1) // single letter node?
{
debugMes("got a single letter node here.. "+v,20);
w = 1;
}
e = new SimpleEdge(w, ROOT.getID(), v.getID());
graph.addEdge(e, ROOT, v);
debugMes("Adding edge from S to "+v,20);
PairPath pathD = new PairPath();
pathD.addToPath1(ROOT.getID());
pathD.addToPath1(v.getID());
if (!combinedReadHash.containsKey(ROOT.getID()))
combinedReadHash.put(ROOT.getID(), new HashMap<PairPath,Integer>());
combinedReadHash.get(ROOT.getID()).put(pathD, MIN_READ_SUPPORT_THR);
/*
for (SeqVertex v2 : graph.getSuccessors(v))
{
PairPath pathD = new PairPath();
pathD.addToPath1(ROOT.getID());
pathD.addToPath1(v.getID());
pathD.addToPath1(v2.getID());
if (!combinedReadHash.containsKey(ROOT.getID()))
combinedReadHash.put(ROOT.getID(), new HashMap<PairPath,Integer>());
combinedReadHash.get(ROOT.getID()).put(pathD, MIN_READ_SUPPORT_THR);
}
*/
}
if (graph.outDegree(v)==0 && !v.equals(T_VERTEX) && !v.equals(ROOT)) // connect this vertex to T
{
double w = v.getLastWeight();
if (w==-1)
w=1;
e = new SimpleEdge(w, v.getID(), T_VERTEX.getID());
graph.addEdge(e, v, T_VERTEX);
debugMes("Adding edge from "+v+" to T",20);
PairPath pathD = new PairPath();
pathD.addToPath1(v.getID());
pathD.addToPath1(T_VERTEX.getID());
if (!combinedReadHash.containsKey(v.getID()))
combinedReadHash.put(v.getID(), new HashMap<PairPath,Integer>());
combinedReadHash.get(v.getID()).put(pathD, MIN_READ_SUPPORT_THR);
/*
for (SeqVertex v2 : graph.getPredecessors(v))
{
PairPath pathD = new PairPath();
pathD.addToPath1(v2.getID());
pathD.addToPath1(v.getID());
pathD.addToPath1(T_VERTEX.getID());
if (!combinedReadHash.containsKey(v2.getID()))
combinedReadHash.put(v2.getID(), new HashMap<PairPath,Integer>());
combinedReadHash.get(v2.getID()).put(pathD, MIN_READ_SUPPORT_THR);
}
*/
}
}
}
/**
* given the graph, remove all edges of S and T
* @param graph
*/
private static void removeAllEdgesOfSandT(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph) {
Set<SimpleEdge> removeEdges = new HashSet<SimpleEdge>();
if (graph.containsVertex(ROOT))
for (SimpleEdge e : graph.getOutEdges(ROOT))
removeEdges.add(e);
if (graph.containsVertex(T_VERTEX))
for (SimpleEdge e : graph.getInEdges(T_VERTEX))
removeEdges.add(e);
for (SimpleEdge re : removeEdges)
graph.removeEdge(re);
}
private static boolean dealWithLoops(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
Set<SeqVertex> comp,
HashMap<Integer,HashMap<PairPath,Integer>> combinedReadHash) {
boolean res = false;
DijkstraShortestPath<SeqVertex, SimpleEdge> dp = new DijkstraShortestPath<SeqVertex, SimpleEdge>(graph);
// These should be only those repeats that aren't evident in the individual read paths,
// since the read-evident repeats were unrolled earlier.
Set<Set<SimpleEdge>> curLoops = new HashSet<Set<SimpleEdge>>();
// find all loops in the graph by seeing if, given edge v->v2, there is a path from v2 back to v
for (SeqVertex v : comp)
{
for (SeqVertex v2 : graph.getSuccessors(v))
{
if (dp.getDistance(v2, v)!=null) // there is a connection between v->v2->... ->v
{
//path has all edges from v to itself thru v2
List<SimpleEdge> loopPath = dp.getPath(v2, v);
// v2 is successor of v, so let's just add the v->v2 edge too, complete the full loop.
loopPath.add(0, graph.findEdge(v, v2));
// capture the path IDs for debugMes reporting below.
List<Integer> pathIDs = new ArrayList<Integer>();
for (SimpleEdge e : loopPath)
pathIDs.add(graph.getDest(e).getID());
// Collect the loop edge set.
Set<SimpleEdge> loopPath_set = new HashSet<SimpleEdge>(loopPath);
if (!curLoops.contains(loopPath_set))
{
curLoops.add(loopPath_set);
debugMes("adding the loop path "+pathIDs+" to the curLoops",12);
}else
{
debugMes("not adding the loop path "+pathIDs+" to the curLoops",12);
}
}
}
}
if (curLoops.isEmpty())
return res;
// process found loops
Set<SimpleEdge> allRelevantEdges = new HashSet<SimpleEdge>();
for (Set<SimpleEdge> loopPath_set : curLoops)
for (SimpleEdge e : loopPath_set)
{
e.increaseNumOfLoopsBy1();
allRelevantEdges.add(e);
}
// break complex loops
if (!allRelevantEdges.isEmpty()){
Comparator<Object> numLoopsComparator = new numLoopsEdgeComparator(graph);
PriorityQueue<SimpleEdge> edgesQ = new PriorityQueue<SimpleEdge>(allRelevantEdges.size(), numLoopsComparator);
edgesQ.addAll(allRelevantEdges);
//while there are still loops
// find the next edge that can be removed to reduce the number of loops
// updated queue: remove all edges, and update their loop content
SimpleEdge nextEtoRemove;
while ( (!curLoops.isEmpty()) && (! edgesQ.isEmpty()) )
{
//FIXME: there was a situation where curLoops was not empty,
// but edgesQ was, so I added edgesQ to the while condition. Investigate why this might happen.
// In this case, a node was involved in a self loop and a double-loop.
nextEtoRemove = edgesQ.poll();
if (graph.getSource(nextEtoRemove) == null
|| graph.getDest(nextEtoRemove) == null
|| nextEtoRemove.getNumOfLoopsInvolved() <= 0) {
continue;
}
debugMes("removing the edge " + graph.getSource(nextEtoRemove).getID() + "->" +
graph.getDest(nextEtoRemove).getID() + " that appears in "
+nextEtoRemove.getNumOfLoopsInvolved() + " loops",15);
// remove the loops that have this edge from curLoops
Set<Set<SimpleEdge>> removeLoops = new HashSet<Set<SimpleEdge>>();
for (Set<SimpleEdge> loopPath_set : curLoops)
if (loopPath_set.contains(nextEtoRemove))
{
debugMes("the loop "+ loopPath_set+" is now solved",15);
removeLoops.add(loopPath_set);
// update the number of loops involved in each edge
for (SimpleEdge e : loopPath_set)
e.decreaseNumOfLoopsBy1();
}
for (Set<SimpleEdge> loopPath_set : removeLoops)
curLoops.remove(loopPath_set);
//update the queue. remove all, and insert again if numLoops>0.
SimpleEdge[] relEdges = (SimpleEdge[]) edgesQ.toArray(new SimpleEdge[0]);
edgesQ.clear();
for (SimpleEdge otherE : relEdges)
if (otherE.getNumOfLoopsInvolved()>0)
edgesQ.add(otherE);
// remove this edge
graph.removeEdge(nextEtoRemove);
res = true;
}
}
return res;
}
/**
* given the graph and the node with the self loop,
* find the reads that support this loop, and multiply this vertex as many times as needed, and then remap these reads.
* @param graph
* @param v
* @param combinedReadHash
* @param newVers
*/
private static void dealWithSelfLoops(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph, SeqVertex v,
HashMap<Integer, HashMap<PairPath, Integer>> combinedReadHash, Set<SeqVertex> newVers) {
int vid = v.getID();
int maxNumOfOccurrences = 0;
HashMap<PairPath, Integer> relaventReads = new HashMap<PairPath, Integer>();
for (Integer startV : combinedReadHash.keySet())
{
for (PairPath path: combinedReadHash.get(startV).keySet())
{
int numOcc = path.numOccurrences(vid);
if (numOcc>0)
{
Integer count = combinedReadHash.get(startV).get(path);
if (count == null)
debugMes("stop here",10);
relaventReads.put(path,count);
}
if ( numOcc> maxNumOfOccurrences) //this read includes this vertex
{
debugMes("the read "+path+" includes the vertex "+vid+" "+numOcc+" times",19);
maxNumOfOccurrences = numOcc;
}
}
}
// remove the self loop
SimpleEdge removeE = graph.findEdge(v, v);
double oldW = removeE.getWeight();
List<Integer> newVerIDs = new ArrayList<Integer>();
newVerIDs.add(vid);
graph.removeEdge(removeE);
debugMes("removing the edge between "+ v +" and itself",20);
// multiply this node maxNumOfOccurrences times
int upID = vid;
int downID = -1;
ArrayList<SimpleEdge> removeEdges = new ArrayList<SimpleEdge>();
for (int i=2; i<=maxNumOfOccurrences; i++)
{
if (downID!=-1)
upID = downID;
downID = getNextID();
newVerIDs.add(downID);
SeqVertex newV = new SeqVertex(downID, v);
debugMes("adding the new vertex "+newV.getID(),20);
newV.setOrigButterflyID(v.getID());
graph.addVertex(newV);
SeqVertex upV = getSeqVertex(graph, upID);
newVers.add(newV);
for (SeqVertex vOut : graph.getSuccessors(upV))
{
debugMes("adding an edge between "+newV.getID()+" and "+vOut.getID(),20);
graph.addEdge(new SimpleEdge(graph.findEdge(v, vOut), newV.getID(), vOut.getID()), newV, vOut);
}
debugMes("adding an edge between "+upID+" and "+newV.getID(),20);
graph.addEdge(new SimpleEdge(oldW, upV.getID(), newV.getID()), upV, newV);
}
// moved to the end of loop added in the new loop opening process - Feb 2013
SeqVertex newV = getSeqVertex(graph, downID);
for (SeqVertex vOut : graph.getSuccessors(v))
{
if (!newVerIDs.contains(vOut.getID())){
debugMes("adding an edge between "+newV.getID()+" and "+vOut.getID(),0);
SimpleEdge e = graph.findEdge(v, vOut);
graph.addEdge(new SimpleEdge(e, newV.getID(), vOut.getID()), newV, vOut);
debugMes("removing the edge between "+ v.getID() +" and "+vOut.getID(),20);
removeEdges.add(e);
}
}
//remove edges:
for (SimpleEdge re : removeEdges){
graph.removeEdge(re);
}
List<Integer> loopVIDs = new ArrayList<Integer>();
loopVIDs.add(vid);
List<List<Integer>> newVerIDsList = new ArrayList<List<Integer>>();
newVerIDsList.add(newVerIDs);
updateReadsAfterLoopOpening(combinedReadHash,relaventReads,loopVIDs,newVerIDsList,maxNumOfOccurrences);
}
/**
* Given the combinedReadHash, and the relevant reads, update their paths.
* @param combinedReadHash
* @param relevantReads
* @param loopVIDs
* @param newVerIDs
* @param maxNumOfOccurrences
*/
private static void updateReadsAfterLoopOpening(
HashMap<Integer, HashMap<PairPath, Integer>> combinedReadHash,
HashMap<PairPath, Integer> relevantReads, List<Integer> loopVIDs,
List<List<Integer>> newVerIDs, int maxNumOfOccurrences) {
for (PairPath path: relevantReads.keySet())
{
Integer origFirstV = path.getFirstID();
Integer origCount = combinedReadHash.get(origFirstV).get(path);
List<Integer> newPath1 = new ArrayList<Integer>(path.getPath1());
List<Integer> newPath2 = new ArrayList<Integer>(path.getPath2());
if (loopVIDs.size()==1)
{
updatePathOfSelfLoop(newPath1,loopVIDs,newVerIDs.get(0),maxNumOfOccurrences);
updatePathOfSelfLoop(newPath2,loopVIDs,newVerIDs.get(0),maxNumOfOccurrences);
} else
{
updatePathOfDoubleLoop(newPath1,loopVIDs,newVerIDs.get(0),newVerIDs.get(1),maxNumOfOccurrences);
updatePathOfDoubleLoop(newPath2,loopVIDs,newVerIDs.get(0),newVerIDs.get(1),maxNumOfOccurrences);
}
// path hasn't changed
if (path.getPath1().equals(newPath1) && path.getPath2().equals(newPath2))
continue;
// both are empty now
if (newPath1.isEmpty() && newPath2.isEmpty())
combinedReadHash.get(origFirstV).remove(path);
// at least one has changed
PairPath newKey;
if (newPath1.isEmpty())
newKey = new PairPath(newPath2,new ArrayList<Integer>());
else if (newPath2.isEmpty())
newKey = new PairPath(newPath1,new ArrayList<Integer>());
else
newKey = new PairPath(newPath1,newPath2);
Integer firstV = newKey.getFirstID();
if (!combinedReadHash.containsKey(firstV))
combinedReadHash.put(firstV, new HashMap<PairPath, Integer>());
if (combinedReadHash.get(firstV).containsKey(newKey))
{
Integer oldCount = combinedReadHash.get(firstV).get(newKey);
combinedReadHash.get(firstV).put(newKey,oldCount+origCount);
combinedReadHash.get(firstV).remove(path);
}else
{
combinedReadHash.get(firstV).put(newKey,origCount);
}
// remove the old loop-containing path
combinedReadHash.get(origFirstV).remove(path);
}
}
/**
* given a path, the vid of the self loop, and the new vertices' id, update the path
* if the path starts of ends inside the loop, trim this part of the path, and leave only the outside info.
* @param path
* @param vid
* @param newVerIDs
* @return
*/
private static void updatePathOfSelfLoop(List<Integer> path, List<Integer> loopVIDs,
List<Integer> newVerIDs,int maxNumOcc) {
int vid = loopVIDs.get(0).intValue();
String origPath = ""+path;
Set<Integer> loopVs = new HashSet<Integer>();
loopVs.add(vid);
boolean changed = false;
if (path.contains(vid))
{
if (path.get(0).intValue()==vid)
{ //starts inside the loop
changed = true;
if (path.get(path.size()-1).intValue()==vid)
//starts and ends inside the loop
if (path.size()==maxNumOcc)
{
for (int i=1 ; i<=path.size()-1 ; i++)
path.set(i,newVerIDs.get(i));
changed = true;
}else
path.clear();
else
updatePathToRemoveLoopNodes(path,loopVs);
}else
{ // starts and ends outside the loop
for (int i=1 ; i<=path.size()-1 ; i++)
{
if (path.get(i).intValue()==vid)
{
int j = newVerIDs.indexOf(path.get(i-1));
if (j>=0)
{
path.set(i, newVerIDs.get(j+1));
changed = true;
}
}
}
}
}
if (changed)
debugMes("path changed from "+origPath+" to "+path,20);
}
/**
* remove the integers that are inside the loop
* @param path
* @param loopVs
*/
private static void updatePathToRemoveLoopNodes(List<Integer> path,
Set<Integer> loopVs) {
List<Integer> indicesToRemove = new ArrayList<Integer>();
for (int i=0 ; i<=path.size()-1 ; i++)
if (loopVs.contains(path.get(i)))
indicesToRemove.add(i);
Collections.sort(indicesToRemove);
Collections.reverse(indicesToRemove);
for (Integer i : indicesToRemove)
path.remove(i.intValue());
}
/**
* given the graph and the node with the self loop,
* find the reads that support this loop, and multiply this vertex as many times as needed
* @param graph
* @param t_v1
* @param t_v2
* @param combinedReadHash
* @param newVers
*/
private static void dealWithDoubleLoops(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph, SeqVertex t_v1,
SeqVertex t_v2,
HashMap<Integer, HashMap<PairPath, Integer>> combinedReadHash, Set<SeqVertex> newVers)
{
int v1_id=-1; // the one inside the regular flow
int v2_id=-1; // the addition
//CONTINUE HERE!
if (graph.getSuccessorCount(t_v1)>1)
{
v1_id = t_v1.getID();
v2_id = t_v2.getID();
} else if (graph.getSuccessorCount(t_v2)>1)
{
v1_id = t_v2.getID();
v2_id = t_v1.getID();
}
if (v1_id==-1)
{
//FIXME - decide randomly, doesn't solve the loops right. (ignores input edges to t_v1)
v1_id = t_v1.getID();
v2_id = t_v2.getID();
}
debugMes("\n\ndealWithDoubleLoops: v1 = " + v1_id + ", v2 = " + v2_id, 15);
// count the number of times v2 appears within reads
HashMap<PairPath, Integer> relevantReads = new HashMap<PairPath, Integer>();
int maxNumOfOccurrences = 0;
for (Integer startV : combinedReadHash.keySet())
{
for (PairPath path: combinedReadHash.get(startV).keySet())
{
int numOcc2 = path.numOccurrences(v2_id);
debugMes("read-to-vertex-count: " + path + " contains vertex: " + v2_id + " this many times: " + numOcc2, 15);
if (numOcc2>0)
{
Integer count = combinedReadHash.get(startV).get(path);
if (count == null)
{
// shouldn't happen
for (PairPath path2: combinedReadHash.get(startV).keySet())
{
debugMes("path: "+path2+" with hashCode "+path2.hashCode(),15);
debugMes("path: "+path2+" with value "+combinedReadHash.get(startV).get(path2),15);
}
}
relevantReads.put(path,count);
if ( numOcc2> maxNumOfOccurrences) //this read includes this vertex
{
debugMes("MAX:the read "+path+" includes the vertex "+v2_id+" "+numOcc2+" times",15);
maxNumOfOccurrences = numOcc2;
}
}
}
}
// the loop is v1 (v2,v1)*
//if we count how many v2 appears, then the number appearances of v1 is one more.
SeqVertex v1 = getSeqVertex(graph, v1_id);
SeqVertex v2 = getSeqVertex(graph, v2_id);
List<Integer> newVerIDs_v1 = new ArrayList<Integer>();
List<Integer> newVerIDs_v2 = new ArrayList<Integer>();
newVerIDs_v1.add(v1_id);
newVerIDs_v2.add(v2_id);
// remove the self loop
SimpleEdge removeE = graph.findEdge(v2, v1);
double oldW = removeE.getWeight();
double oldW2 = graph.findEdge(v1, v2).getWeight();
graph.removeEdge(removeE);
debugMes("removing the edge between "+ v2_id +" and "+v1_id,15);
// multiply this node maxNumOfOccurrences times
int up_v1 = v1_id;
if (maxNumOfOccurrences>=1) //multiply only v1
{
SeqVertex newV = new SeqVertex(getNextID(), v1);
newV.setOrigButterflyID(v1_id);
graph.addVertex(newV);
newVerIDs_v1.add(newV.getID());
newVers.add(newV);
// // removed in the new loop opening process - Feb 2013
// for (SeqVertex vOut : graph.getSuccessors(v1))
// if (!vOut.equals(v2))
// debugMes("adding an edge between "+newV.getID()+" and "+vOut.getID(),20);
// graph.addEdge(new SimpleEdge(graph.findEdge(v1, vOut)), newV, vOut);
debugMes("adding an edge between "+v2_id+" and "+newV.getID(),20);
graph.addEdge(new SimpleEdge(oldW, v2.getID(), newV.getID()), v2, newV);
up_v1 = newV.getID();
}
int up_v2 = v2_id;
//int down_v1 = -1;
int down_v2 = -1;
int down_v1 = up_v1;
for (int i=2; i<=maxNumOfOccurrences; i++) // multiple v2-v1
{
if (down_v1!=-1)
{
// on next iteration
up_v1 = down_v1;
}
down_v1 = getNextID();
down_v2 = getNextID();
newVerIDs_v1.add(down_v1);
newVerIDs_v2.add(down_v2);
SeqVertex newV1 = new SeqVertex(down_v1, v1);
newV1.setOrigButterflyID(v1_id);
SeqVertex newV2 = new SeqVertex(down_v2, v2);
newV2.setOrigButterflyID(v2_id);
//debugMes("i="+i+"("+maxNumOfOccurrences+") adding newV1:"+newV1+" newV2:"+newV2,10);
graph.addVertex(newV1);
graph.addVertex(newV2);
newVers.add(newV1);
newVers.add(newV2);
SeqVertex upV = getSeqVertex(graph, up_v1);
/*
SeqVertex orig_upV = getSeqVertex(graph, v1_id);
if (i==maxNumOfOccurrences){// this cirteria added in the new loop opening process - Feb 2013 // moved down below as bugfix, since need to do this for no-repeats too. Sept 2014
for (SeqVertex vOut : graph.getSuccessors(orig_upV))
{
if (!newVerIDs_v2.contains(vOut.getID()))
{
debugMes("adding an edge between "+newV1.getID()+" and "+vOut.getID(),20);
SimpleEdge e = graph.findEdge(orig_upV, vOut);
graph.addEdge(new SimpleEdge(e), newV1, vOut);
debugMes("removing an edge between "+orig_upV.getID()+" and "+vOut.getID(),20);
removeEdges.add(e);
}
}
}
*/
// // removed in the new loop opening process - Feb 2013
// for (SeqVertex vIn : graph.getPredecessors(getSeqVertex(graph, up_v2)))
// if (!newVerIDs_v1.contains(vIn.getID()))
// debugMes("$$adding an edge between "+vIn.getID()+" and "+down_v2,10);
// graph.addEdge(new SimpleEdge(graph.findEdge(vIn, getSeqVertex(graph, up_v2))), vIn, newV2);
debugMes("adding an edge between "+up_v1+" and "+newV2.getID(),20);
graph.addEdge(new SimpleEdge(oldW, upV.getID(), newV2.getID()), upV, newV2);
debugMes("adding an edge between "+newV2.getID()+" and "+newV1.getID(),20);
graph.addEdge(new SimpleEdge(oldW2, newV2.getID(), newV1.getID()), newV2, newV1);
}
List<Integer> loopVIDs = new ArrayList<Integer>();
loopVIDs.add(v1_id);
loopVIDs.add(v2_id);
List<List<Integer>> newVerIDs = new ArrayList<List<Integer>>();
newVerIDs.add(newVerIDs_v1);
newVerIDs.add(newVerIDs_v2);
// relocate original V1's out-edges
SeqVertex orig_upV = getSeqVertex(graph, v1_id);
SeqVertex down_v1_vertex = getSeqVertex(graph, down_v1);
ArrayList<SimpleEdge> removeEdges = new ArrayList<SimpleEdge>();
for (SeqVertex vOut : graph.getSuccessors(orig_upV))
{
if (!newVerIDs_v2.contains(vOut.getID()))
{
debugMes("adding an edge between "+down_v1_vertex.getID()+" and "+vOut.getID(),20);
SimpleEdge e = graph.findEdge(orig_upV, vOut);
graph.addEdge(new SimpleEdge(e, down_v1_vertex.getID(), vOut.getID()), down_v1_vertex, vOut);
debugMes("removing an edge between "+orig_upV.getID()+" and "+vOut.getID(),20);
removeEdges.add(e);
}
}
//remove edges:
for (SimpleEdge re : removeEdges){
graph.removeEdge(re);
}
updateReadsAfterLoopOpening(combinedReadHash,relevantReads,loopVIDs,newVerIDs,maxNumOfOccurrences);
}
/**
* given a path, the vid of the loop vertices, and the new vertices' id, update the path
* if the path starts of ends inside the loop, trim this part of the path, and leave only the outside info.
* @param path
* @param loopVIDs
* @param newVerIDsV1
* @param newVerIDsV2
* @param maxNumOfOccurrences
*/
private static void updatePathOfDoubleLoop(
List<Integer> path,
List<Integer> loopVIDs,
List<Integer> newVerIDsV1,
List<Integer> newVerIDsV2,
int maxNumOfOccurrences) {
int v1_id = loopVIDs.get(0).intValue();
int v2_id = loopVIDs.get(1).intValue();
debugMes("updatePathOfDoubleLoop, read: " + path + ", loop vertices: " + v1_id + ", " + v2_id, 15);
if (path.isEmpty())
return;
boolean changed = false;
String origPath = ""+path;
Set<Integer> loopVs = new HashSet<Integer>();
loopVs.add(v1_id);
loopVs.add(v2_id);
int firstV = path.get(0).intValue();
int lastV = path.get(path.size()-1).intValue();
if (path.contains(v2_id))
{
if (firstV==v1_id || firstV==v2_id)
{
changed = true;
if (firstV==v1_id || lastV==v2_id)
// the whole path is inside the loop
if ((firstV==v1_id && lastV==v1_id && path.size()==maxNumOfOccurrences*2+1) ||
(firstV==v2_id && lastV==v2_id && path.size()==maxNumOfOccurrences*2-1) ||
(firstV==v1_id && lastV==v2_id && path.size()==maxNumOfOccurrences*2) ||
(firstV==v2_id && lastV==v1_id && path.size()==maxNumOfOccurrences*2) ) // all path is in the loop, but there is only one new path that matches
{
changed = updateSinglePathWithDoubleLoopNodes(path,v1_id,v2_id,newVerIDsV1,newVerIDsV2);
}else
path.clear();
else
{// only the start is inside the loop
updatePathToRemoveLoopNodes(path, loopVs);
changed = true;
}
}else
{ // start and ends outside the loop
changed = updateSinglePathWithDoubleLoopNodes(path,v1_id,v2_id,newVerIDsV1,newVerIDsV2);
}
}
if (changed)
debugMes("\tpath changed from "+origPath+" to "+path,15);
else
debugMes("\tpath remains unchanged.", 15);
}
/**
* given this path, and the loop info, update the path to its single option.
* @param path
* @param v1_id
* @param v2_id
* @param newVerIDsV1
* @param newVerIDsV2
* @return
*/
private static boolean updateSinglePathWithDoubleLoopNodes(
List<Integer> path, int v1_id, int v2_id, List<Integer> newVerIDsV1,
List<Integer> newVerIDsV2) {
boolean changed = false;
for (int i=1 ; i<=path.size()-1 ; i++)
{
if (path.get(i).intValue()==v1_id)
{
int j = newVerIDsV2.indexOf(path.get(i-1));
if (j>=0)
{
path.set(i, newVerIDsV1.get(j+1));
changed = true;
}
} else if (path.get(i).intValue()==v2_id)
{
int j = newVerIDsV1.indexOf(path.get(i-1));
if (j>=1)
{
path.set(i, newVerIDsV2.get(j));
changed = true;
}
}
}
return changed;
}
/**
* print out the given error message, only if DEBUG=true
* @param mes Message
*/
private static void debugMes(String mes, int verbosityLevel)
{
//TODO: use general logging that can be leveraged across all classes.
if (DEBUG && verbosityLevel<=BFLY_GLOBALS.VERBOSE_LEVEL)
{
if (USE_STDERR)
System.err.println(mes);
else if (LOG_STDERR)
ERR_STREAM.println(mes);
}
}
/**
* combine prefixes:
* calc for each v it's "depth" in terms of length of strings (from them on)
* draw all v's with the same depth
* sort on their set of parents
* draw all v's with same depth and same set of parents
* find subsets of those with same prefix
* create new node with prefix, connect accordingly.
* add the rest (those that removed the prefix) back into queue, with new depths
* @param graph
* @param compaction_round
* @return
*/
/**
* Given the graph, go over all vertices, and calculate their depth, as in distance from the roots
* (maximal or minimal??) = doesn't matter as long as it's consistent. I chose maximal.
* @param graph
*/
private static void setVerticesDepths(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph) {
My_DFS dfs = new My_DFS(graph);
dfs.runDFS2();
List<SeqVertex> topBottom = getTopologicalOrder(graph, dfs);
for (SeqVertex v : topBottom)
{
if (graph.inDegree(v)==0)
{
v.setDepth(0);
}
else
{
int d = -1;
for (SeqVertex tv : graph.getPredecessors(v))
{
if (tv.getDepth() + tv.getNameKmerAdj().length() >d)
d=tv.getDepth() + tv.getNameKmerAdj().length();
}
v.setDepth(d);
// MAX_DEPTH global var being set here, used by prefix compaction method.
if (d>MAX_DEPTH)
MAX_DEPTH = d;
}
}
}
/**
* Given the graph, and the vertex v, return a sorted list of its parents
* @param graph
* @param v
* @return
*/
private static List<SeqVertex> getSortedParentList(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph, SeqVertex v) {
List<SeqVertex> res = new ArrayList<SeqVertex>(graph.getPredecessors(v));
SeqComparator verComp = new SeqComparator();
Collections.sort(res, verComp);
return res;
}
/**
* Given the graph, and the candidate nodes, look for shared prefixes of a single letter,
* and move on.
* @param graph
* @param candidateNodes
* @param updateQueue
*/
/*
private static boolean compactPrefixRecursive(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
Collection<SeqVertex> candidateNodes,
Collection<SeqVertex> updatedNodes) {
debugMes("** compactPrefixRecursive: " + candidateNodes, 20);
boolean changed = false;
for (String l : LETTERS)
{
Collection<SeqVertex> vWithL = getVerticesWithFirstLetter(candidateNodes,l);
if (vWithL.size()<=1)
continue;
debugMes("vWithL set based on l=" + l + ": " + candidateNodes, 20);
// if there is a letter that has more than one vertex, create a new vertex with this letter
changed = true;
SeqVertex newV = new SeqVertex(getNextID(), l);
// retain the identity of the vertices being collapsed here.
newV.addIDsAsFirstPrevIDs(vWithL,LAST_REAL_ID);
Collection<SeqVertex> new_vWithL = new HashSet<SeqVertex>();
Vector<SimpleEdge> removeEdges = new Vector<SimpleEdge>();
for (SeqVertex v_in_vWithL : vWithL)
{
if (!graph.containsVertex(v_in_vWithL))
continue;
// create a new vertex with the first prevID as id
SeqVertex newReplaceV_in_vWithL;
if (!v_in_vWithL.getPrevVerIDs().isEmpty() // node already subsumed another complex node
&&
v_in_vWithL.getPrevVerIDs().firstElement().size()>1)
{
// just copying over the info into a new node
newReplaceV_in_vWithL = new SeqVertex(getNextID(), v_in_vWithL.getName());
newReplaceV_in_vWithL.copyTheRest(v_in_vWithL);
debugMes("compactPrefixRecursive/complex: Node: " + v_in_vWithL.getShortSeqWID() + " => " + newReplaceV_in_vWithL.getShortSeqWID(), 20);
}
else {
newReplaceV_in_vWithL = v_in_vWithL.generateNewVerWithFirstIDasID();
debugMes("compactPrefixRecursive/simple: Node: " + v_in_vWithL.getShortSeqWID() + " => " + newReplaceV_in_vWithL.getShortSeqWID(), 20);
}
// move all edges from and to the orig, to the new
if (!newReplaceV_in_vWithL.equals(v_in_vWithL)) // they will be equal if the v_withL has no prevIDs, and only his original id
{
for (SimpleEdge e : graph.getOutEdges(v_in_vWithL))
{
removeEdges.add(e);
graph.addEdge(new SimpleEdge(e.getWeight()), newReplaceV_in_vWithL, graph.getDest(e));
}
for (SimpleEdge e : graph.getInEdges(v_in_vWithL))
{
removeEdges.add(e);
graph.addEdge(new SimpleEdge(e.getWeight()), graph.getSource(e), newReplaceV_in_vWithL);
}
}
//replace it's location within vWithL
new_vWithL.add(newReplaceV_in_vWithL);
}
for (SimpleEdge re : removeEdges)
{
debugMes("removing edge "+re+" between "+graph.getSource(re)+" and "+graph.getDest(re),20);
graph.removeEdge(re);
}
for (SeqVertex rv : vWithL)
{
if (!new_vWithL.contains(rv))
{
debugMes("removing vertex (because new_vWithL doesn't contain it) "+rv,20);
graph.removeVertex(rv);
}
}
vWithL = new_vWithL;
graph.addVertex(newV);
debugMes("pulled the first letter from all vertices in "+vWithL+" to the new vertex "+newV,20);
Vector<SeqVertex> removeVertices = new Vector<SeqVertex>();
for (SeqVertex v1 : vWithL)
{
// if (removeVertices.contains(v1) || !graph.containsVertex(v1))
if (v1.isToBeDeleted() || !graph.containsVertex(v1))
continue;
removeEdges.clear();
v1.increaseDepthByOne();
////////////////////////////////////
// reassign incoming edges to newV
////////////////////////////////////
for (SimpleEdge edgeToRemove : graph.getInEdges(v1))
{
double w2 = edgeToRemove.getWeight();
SimpleEdge newE2 = null;
SeqVertex v3 = graph.getSource(edgeToRemove);
if (graph.findEdge(v3,newV)==null)
{
newE2 = new SimpleEdge(w2);
graph.addEdge(newE2, v3,newV); // edge reassignment
debugMes("adding edge "+newE2+" between "+v3+" and "+newV,20);
}else
{
newE2 = graph.findEdge(v3,newV);
if (w2>newE2.getWeight())
{
//FIXME ?? do we want to add up the weights?
debugMes("setting edge "+newE2+"'s weight from "+newE2.getWeight()+" to "+w2,20);
newE2.setWeight(w2);
}
}
removeEdges.add(edgeToRemove);
debugMes("removed edge "+edgeToRemove+" between "+graph.getSource(edgeToRemove)+" and "+graph.getDest(edgeToRemove),20);
}
///////////////////////////////////
// handle outgoing edges (newE1)
////////////////////////////////////
if (v1.getName().length()==1)
{
// single base successor node, just remove it and reassign out-edge.
v1.removeFirstLetter();
//go over all edges going out of v1, and move them to exit newV
for (SeqVertex v0 : graph.getSuccessors(v1))
{
double w = graph.findEdge(v1,v0).getWeight();
graph.addEdge(new SimpleEdge(w), newV,v0); // edge reassignments.
debugMes("adding edge "+w+" between "+newV+" and "+v0,20);
}
debugMes("vertex "+v1+" is going to be removed",20);
removeVertices.add(v1);
v1.setToBeDeleted(true);
}
else if (v1.getName().length()<=KMER_SIZE && graph.outDegree(v1)==0) {
// short terminal node less than a kmer size.
// why do we need to handle this special use case?
SeqVertex newV1; // needed only if this node is less than K in length
v1.removeFirstLetter();
Collection<SeqVertex> upV = graph.getPredecessors(v1);
if (v1.getID()<=LAST_REAL_ID)
{
newV1 = new SeqVertex(getNextID(),v1.getName());
graph.addVertex(newV1);
removeVertices.add(v1);
v1.setToBeDeleted(true);
} else
newV1 = v1; // what scenario?
//go over all edges going into v1, and move them to exit newV
if (upV.size()==1) // why only handling the case of a single parent?
{
for (SeqVertex upV1 : upV) // only one here, no iteration needed
{
SimpleEdge oldE = graph.findEdge(upV1, v1);
double w = oldE.getWeight();
graph.addEdge(new SimpleEdge(w), newV,newV1);
removeEdges.add(oldE); // already done above?
debugMes("adding edge "+w+" between "+newV+" and "+newV1,20);
debugMes("removing edge "+w+" between "+upV1+" and "+v1,20);
graph.addEdge(new SimpleEdge(1), v1, newV1);
}
}
}
else {
// all other cases.
double w = v1.removeFirstLetter();
SimpleEdge newE1 = new SimpleEdge(w);
graph.addEdge(newE1, newV,v1);
debugMes("adding edge "+newE1+" between "+newV+" and "+v1,20);
}
for (SimpleEdge re : removeEdges)
{
graph.removeEdge(re);
}
}
//try this out
updatedNodes.clear();
Set<SeqVertex> toAddTo_vWithL = new HashSet<SeqVertex>();
// removing vertices targeted for deletion.
for (SeqVertex rv : removeVertices)
{
graph.removeVertex(rv);
debugMes("removed vertex "+rv,20);
if (vWithL.contains(rv))
vWithL.remove(rv);
if (candidateNodes.contains(rv))
candidateNodes.remove(rv);
}
// the restructuring to newV could result in new children available for further compaction.
// check for other children of newV that are at the same depth and candidates for further compaction:
for (SeqVertex vChild : graph.getSuccessors(newV))
if (!vWithL.contains(vChild) && vChild.hasAllSameParents(graph, vWithL))
//vChild.getDepth()==curDepth)
toAddTo_vWithL.add(vChild);
for (SeqVertex vToAdd : toAddTo_vWithL)
vWithL.add(vToAdd);
for (SeqVertex vToAdd : vWithL)
{
updatedNodes.add(vToAdd);
}
if (vWithL.size()>1)
compactPrefixRecursive(graph, vWithL, updatedNodes);
}
return changed;
}
*/
/*
private static boolean compactPrefixRecursive_v2(
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
Collection<SeqVertex> candidateNodes,
Collection<SeqVertex> updatedNodes) {
debugMes("** compactPrefixRecursive: " + candidateNodes, 20);
boolean changed = false;
for (String l : LETTERS)
{
Collection<SeqVertex> vWithL = getVerticesWithFirstLetter(candidateNodes,l);
if (vWithL.size()<=1)
continue;
debugMes("vWithL set based on l=" + l + ": " + candidateNodes, 20);
// if there is a letter that has more than one vertex, create a new vertex with this letter
changed = true;
SeqVertex newV = new SeqVertex(getNextID(), l);
// retain the identity of the vertices being collapsed here.
newV.addIDsAsFirstPrevIDs(vWithL,LAST_REAL_ID);
// copy the current vertex list over to a new set of nodes.
Collection<SeqVertex> new_vWithL = new HashSet<SeqVertex>();
Vector<SimpleEdge> removeEdges = new Vector<SimpleEdge>();
for (SeqVertex v_in_vWithL : vWithL)
{
if (!graph.containsVertex(v_in_vWithL))
continue;
// create a new vertex with the first prevID as id
SeqVertex newReplaceV_in_vWithL;
if (!v_in_vWithL.getPrevVerIDs().isEmpty() // node already subsumed another complex node
&&
v_in_vWithL.getPrevVerIDs().firstElement().size()>1)
{
// just copying over the info into a new node
newReplaceV_in_vWithL = new SeqVertex(getNextID(), v_in_vWithL.getName());
newReplaceV_in_vWithL.copyTheRest(v_in_vWithL);
debugMes("compactPrefixRecursive/complex: Node: " + v_in_vWithL.getShortSeqWID() + " => " + newReplaceV_in_vWithL.getShortSeqWID(), 20);
}
else {
newReplaceV_in_vWithL = v_in_vWithL.generateNewVerWithFirstIDasID();
debugMes("compactPrefixRecursive/simple: Node: " + v_in_vWithL.getShortSeqWID() + " => " + newReplaceV_in_vWithL.getShortSeqWID(), 20);
}
// move all edges from and to the orig, to the new
if (!newReplaceV_in_vWithL.equals(v_in_vWithL)) // they will be equal if the v_withL has no prevIDs, and only his original id
{
for (SimpleEdge e : graph.getOutEdges(v_in_vWithL))
{
removeEdges.add(e);
graph.addEdge(new SimpleEdge(e.getWeight()), newReplaceV_in_vWithL, graph.getDest(e));
}
for (SimpleEdge e : graph.getInEdges(v_in_vWithL))
{
removeEdges.add(e);
graph.addEdge(new SimpleEdge(e.getWeight()), graph.getSource(e), newReplaceV_in_vWithL);
}
}
//replace it's location within vWithL
new_vWithL.add(newReplaceV_in_vWithL);
}
for (SimpleEdge re : removeEdges)
{
debugMes("removing edge "+re+" between "+graph.getSource(re)+" and "+graph.getDest(re),20);
graph.removeEdge(re);
}
// remove the original vertices.
for (SeqVertex rv : vWithL)
{
if (!new_vWithL.contains(rv))
{
debugMes("removing vertex (because new_vWithL doesn't contain it) "+rv,20);
graph.removeVertex(rv);
}
}
vWithL = new_vWithL;
graph.addVertex(newV);
debugMes("pulled the first letter from all vertices in "+vWithL+" to the new vertex "+newV,20);
Vector<SeqVertex> removeVertices = new Vector<SeqVertex>();
for (SeqVertex v1 : vWithL)
{
if (v1.isToBeDeleted() || !graph.containsVertex(v1))
continue;
removeEdges.clear();
v1.increaseDepthByOne();
////////////////////////////////////
// reassign incoming edges to newV
////////////////////////////////////
for (SimpleEdge edgeToRemove : graph.getInEdges(v1))
{
double w2 = edgeToRemove.getWeight();
SimpleEdge newE2 = null;
SeqVertex v3 = graph.getSource(edgeToRemove);
if (graph.findEdge(v3,newV)==null)
{
newE2 = new SimpleEdge(w2);
graph.addEdge(newE2, v3,newV); // edge reassignment
debugMes("adding edge "+newE2+" between "+v3+" and "+newV,20);
}else
{
newE2 = graph.findEdge(v3,newV);
if (w2>newE2.getWeight())
{
//FIXME ?? do we want to add up the weights?
debugMes("setting edge "+newE2+"'s weight from "+newE2.getWeight()+" to "+w2,20);
newE2.setWeight(w2);
}
}
removeEdges.add(edgeToRemove);
debugMes("removed edge "+edgeToRemove+" between "+graph.getSource(edgeToRemove)+" and "+graph.getDest(edgeToRemove),20);
}
///////////////////////////////////
// handle outgoing edges (newE1)
////////////////////////////////////
if (v1.getName().length()==1)
{
// single base successor node, just remove it and reassign out-edge.
v1.removeFirstLetter();
//go over all edges going out of v1, and move them to exit newV
for (SeqVertex v0 : graph.getSuccessors(v1))
{
double w = graph.findEdge(v1,v0).getWeight();
graph.addEdge(new SimpleEdge(w), newV,v0); // edge reassignments.
debugMes("adding edge "+w+" between "+newV+" and "+v0,20);
}
debugMes("vertex "+v1+" is going to be removed",20);
removeVertices.add(v1);
v1.setToBeDeleted(true);
}
else {
// all other cases.
double w = v1.removeFirstLetter();
SimpleEdge newE1 = new SimpleEdge(w);
graph.addEdge(newE1, newV,v1);
debugMes("adding edge "+newE1+" between "+newV+" and "+v1,20);
}
for (SimpleEdge re : removeEdges)
{
graph.removeEdge(re);
}
}
//try this out
updatedNodes.clear();
Set<SeqVertex> toAddTo_vWithL = new HashSet<SeqVertex>();
int curDepth = -1;
// use this curDepth to decide if to add the children or not.
if (!removeVertices.isEmpty())
for (SeqVertex ver : vWithL)
// if (!removeVertices.contains(ver))
if (!ver.isToBeDeleted())
curDepth = ver.getDepth();
// removing vertices targeted for deletion.
for (SeqVertex rv : removeVertices)
{
graph.removeVertex(rv);
debugMes("removed vertex "+rv,20);
if (vWithL.contains(rv))
vWithL.remove(rv);
if (candidateNodes.contains(rv))
candidateNodes.remove(rv);
}
// the restructuring to newV could result in new children available for further compaction.
// check for other children of newV that are at the same depth and candidates for further compaction:
for (SeqVertex vChild : graph.getSuccessors(newV)) {
//debugMes("vChild: " + vChild+ ", vWithL: " + vWithL, 5);
if (!vWithL.contains(vChild) && vChild.hasAllSameParents(graph, vWithL)) {
//vChild.getDepth()==curDepth)
toAddTo_vWithL.add(vChild);
}
}
for (SeqVertex vToAdd : toAddTo_vWithL)
vWithL.add(vToAdd);
for (SeqVertex vToAdd : vWithL)
{
updatedNodes.add(vToAdd);
}
if (vWithL.size()>1)
compactPrefixRecursive_v2(graph, vWithL, updatedNodes);
}
return changed;
}
*/
/**
* Given the set of nodes, return a set of nodes that has the given letter l as a final letter
* @param candidateNodes
* @param l
* @return
*/
/*
private static Collection<SeqVertex> getVerticesWithFirstLetter(
Collection<SeqVertex> candidateNodes, String l) {
Collection<SeqVertex> res = new HashSet<SeqVertex>();
for (SeqVertex v : candidateNodes)
{
if (v.getName().startsWith(l))
res.add(v);
}
return res;
}
*/
// retrieve path list from first unshared node till the end (minus the final vertex)
public static List<Integer> get_unshared_path_terminus(List<Integer> path_to_search, List<Integer> path_to_index) {
debugMes("Path to search: " + path_to_search, 19);
debugMes("Path to index: " + path_to_index, 19);
Hashtable<Integer,Boolean> path_index = new Hashtable<Integer,Boolean>();
for (Integer x : path_to_index) {
path_index.put(x, new Boolean(true));
}
int unshared_path_pos = path_to_search.size(); // init to Infinity in essence, never reach this.
for (int i = 0; i <= path_to_search.size()-2; i++) {
if (! path_index.containsKey( path_to_search.get(i) ) ) {
unshared_path_pos = i;
break;
}
}
List<Integer> unique_terminal_path = new Vector<Integer>();
for (int i = unshared_path_pos; i <= path_to_search.size() -2; i++) {
unique_terminal_path.add(path_to_search.get(i));
}
debugMes("Unique terminal path: " + unique_terminal_path, 19);
return(unique_terminal_path);
}
// see if any node is shared between the lists
public static boolean paths_have_node_in_common (List<Integer> pathA, List<Integer> pathB) {
Hashtable<Integer,Boolean> path_index = new Hashtable<Integer,Boolean>();
for (int i = 0; i < pathA.size() - 1; i++) {
path_index.put(pathA.get(i), new Boolean(true));
}
for (int i = 0; i < pathB.size() -1; i++) {
if (path_index.containsKey( pathB.get(i))) {
return(true);
}
}
return(false);
}
// see if any node other than the very last one is shared between the lists
public static boolean paths_have_any_node_in_common (List<Integer> pathA, List<Integer> pathB, boolean include_sinks) {
Hashtable<Integer,Boolean> path_index = new Hashtable<Integer,Boolean>();
for (int i = 0; i < pathA.size() - 1; i++) {
Integer node = pathA.get(i);
if ( (! include_sinks) && node < 0) {
continue; // sink node
}
path_index.put(node, new Boolean(true));
}
for (int i = 0; i < pathB.size() -1; i++) {
Integer node = pathB.get(i);
if (path_index.containsKey( node)) {
// debugMes("Found node: " + node + " in common between paths: " + pathA + " and " + pathB, 10);
return(true);
}
}
return(false);
}
public static String getPathMappingAsciiIllustration (
final List<Integer> finalPath,
HashMap<PairPath,Integer> readPathsHashmap
) {
String ascii_illustration = "";
for (int i = 0; i < finalPath.size(); i++) {
ascii_illustration += "=";
}
ascii_illustration += " PATH: " + finalPath + "\n";
List<PairPath> readPaths = new ArrayList(readPathsHashmap.keySet());
Collections.sort(readPaths, new Comparator<PairPath>() { // sort illustration by first node position in path
public int compare(PairPath a, PairPath b) {
Integer b_index = finalPath.indexOf(b.getFirstID());
Integer a_index = finalPath.indexOf(a.getFirstID());
return(a_index - b_index);
}
});
for (PairPath read : readPaths) {
char chars[] = new char[finalPath.size()];
for (int i = 0; i < chars.length; i++) {
chars[i] = ' ';
}
for (List<Integer> readPath : read.get_paths()) {
for (Integer vertex_id : readPath) {
int index = finalPath.indexOf(vertex_id);
if (index >= 0) {
chars[index] = '=';
}
}
}
for (int i = 0; i < chars.length; i++) {
ascii_illustration += chars[i];
}
int read_counts = readPathsHashmap.get(read);
ascii_illustration += " Read: " + read.get_paths() + " read_support: " + read_counts + "\n";
}
return(ascii_illustration);
}
public static int count_pairpath_support(List<Integer> path, HashMap<List<Integer>, HashMap<PairPath, Integer>> PathReads) {
HashMap<PairPath,Integer> pairPath_map = PathReads.get(path);
int sum_reads = 0;
for (PairPath p : pairPath_map.keySet()) {
int read_count = pairPath_map.get(p);
sum_reads += read_count;
}
return(sum_reads);
}
public static HashMap<Integer, List<List<Integer>>> extractTripletsFromReads(HashMap<Integer,HashMap<PairPath,Integer>> combinedReadHash) {
HashMap<Integer, List<List<Integer>>> tripletMapper = new HashMap<Integer, List<List<Integer>>>();
for (Integer vertex_id : combinedReadHash.keySet()) {
HashMap<PairPath,Integer> pp_hmap = combinedReadHash.get(vertex_id);
for (PairPath pp : pp_hmap.keySet()) {
List<List<Integer>> paths = pp.get_paths();
for (List<Integer> read_path : paths) {
if (read_path.size() < 3) {
continue;
}
// iterate through triplets
for (int i = 1; i < read_path.size()-1; i++) {
Integer central_id = read_path.get(i);
Integer left_id = read_path.get(i-1);
Integer right_id = read_path.get(i+1);
List<Integer> adjacency_path = new ArrayList<Integer>();
adjacency_path.add(left_id);
adjacency_path.add(central_id);
adjacency_path.add(right_id);
if (tripletMapper.containsKey(central_id)) {
List<List<Integer>> triplet_list = tripletMapper.get(central_id);
if (! triplet_list.contains(adjacency_path)) {
triplet_list.add(adjacency_path);
debugMes("Adding triplet adjacency_path to central node: " + central_id + " => " + adjacency_path, 17);
}
else {
debugMes("triplet adjacency_path of node: " + central_id + " => " + adjacency_path + " already captured.", 17);
}
}
else {
List<List<Integer>> triplet_list = new ArrayList<List<Integer>>();
triplet_list.add(adjacency_path);
tripletMapper.put(central_id, triplet_list);
debugMes("Setting initial triplet adjacency_path for central node: " + central_id + " => " + adjacency_path, 17);
}
}
}
}
}
return(tripletMapper);
}
public static HashMap<Integer, List<List<Integer>>> extractComplexPathPrefixesFromReads(HashMap<Integer,HashMap<PairPath,Integer>> combinedReadHash) {
debugMes("-capturing path prefixes", 15);
HashMap<Integer, List<List<Integer>>> nodeToComplexPathPrefix = new HashMap<Integer, List<List<Integer>>>();
for (Integer vertex_id : combinedReadHash.keySet()) {
HashMap<PairPath,Integer> pp_hmap = combinedReadHash.get(vertex_id);
for (PairPath pp : pp_hmap.keySet()) {
List<List<Integer>> paths = pp.get_paths();
for (List<Integer> read_path : paths) {
if (read_path.size() < 3) {
continue;
}
// iterate through prefixes
for (int i = read_path.size()-1; i >= 2; i
Integer node_id = read_path.get(i);
List<Integer> prefix_path = read_path.subList(0, i+1);
if (! nodeToComplexPathPrefix.containsKey(node_id)) {
nodeToComplexPathPrefix.put(node_id, new ArrayList<List<Integer>>());
}
if (! nodeToComplexPathPrefix.get(node_id).contains(prefix_path)) {
nodeToComplexPathPrefix.get(node_id).add(prefix_path);
}
}
}
}
}
debugMes("-removing prefixes that are subpaths of other prefixes", 15);
// remove paths that are subpaths
for (Integer node_id : nodeToComplexPathPrefix.keySet()) {
List<List<Integer>> prefixes = nodeToComplexPathPrefix.get(node_id);
List<List<Integer>> prefixes_to_purge = new ArrayList<List<Integer>>();
for (List<Integer> prefix : prefixes) {
for (List<Integer> prefix2 : prefixes) {
if (prefix != prefix2 && prefix2.size() > prefix.size()
&& Path.share_suffix_fully_contained(prefix, prefix2)) {
prefixes_to_purge.add(prefix);
}
}
}
for (List<Integer> prefix : prefixes_to_purge) {
prefixes.remove(prefix);
}
}
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 17) {
for (List<List<Integer>> extendedTripletPathsList : nodeToComplexPathPrefix.values()) {
for (List<Integer> extendedTripletPath : extendedTripletPathsList) {
debugMes("EXTENDED_TRIPLET_CAPTURED: " + extendedTripletPath, 17);
}
}
}
return(nodeToComplexPathPrefix);
}
public static Boolean tripletSupported(List<List<Integer>> triplet_list, List<Integer> triplet) {
for (List<Integer> t_list : triplet_list) {
debugMes("Checking triplet list: " + t_list + " comparing to query triplet: " + triplet, 15);
if (t_list.get(0).equals(triplet.get(0))
&&
t_list.get(1).equals(triplet.get(1))
&&
t_list.get(2).equals(triplet.get(2))
) {
return(true);
}
}
return(false);
}
public static List<Integer> ensure_path_has_sinks(List<Integer> path) {
List<Integer> new_path = new ArrayList<Integer>(path);
if (new_path.get(0) != -1) {
new_path.add(0, -1);
}
if (new_path.get(new_path.size()-1) != -2) {
new_path.add(-2);
}
return(new_path);
}
public static HashMap<List<Integer>, Pair<Integer>> reduce_cdhit_like (
HashMap<List<Integer>, Pair<Integer>> FinalPaths_all,
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
HashMap<List<Integer>,HashMap<PairPath,Integer>> PathReads) {
debugMes("\n\n**** CD-HIT style path collapsing at end of run.\n\n", 15);
Vector<FinalPaths> path_vec = new Vector<FinalPaths>();
DecimalFormat df = new DecimalFormat("
for (List<Integer> path : FinalPaths_all.keySet())
{
String seq = getPathSeq(graph,path);
FinalPaths f = new FinalPaths(path, seq);
path_vec.add(f);
}
MAX_SEQ_LEN_DP_ALIGN = ALL_VS_ALL_MAX_DP_LEN; // temporarily replace
Collections.sort(path_vec); // sort paths by length of sequence descendingly
// examine sequence CD-HIT -style, remove those that lack sufficient variation
HashMap<FinalPaths,Boolean> filtered = new HashMap<FinalPaths,Boolean>();
List<List<Integer>> removeSimilarPaths = new ArrayList<List<Integer>>();
for (int i = 0; i < path_vec.size()-1; i++) {
if (filtered.containsKey(path_vec.get(i))) {
// path filtered, cannot use it as evidence for filtering smaller sequences.
continue;
}
List<Integer> path_i = path_vec.get(i).path;
List<Integer> path_i_w_sinks = ensure_path_has_sinks(path_i);
for (int j = i + 1; j < path_vec.size(); j++) {
if (filtered.containsKey(path_vec.get(j))) {
// path filtered, cannot use it as evidence for filtering smaller sequences.
continue;
}
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15) {
System.err.print("\r[" + i + "," + j + "] ");
}
List<Integer> path_j = path_vec.get(j).path;
List<Integer> path_j_w_sinks = ensure_path_has_sinks(path_j);
String seq_i = path_vec.get(i).sequence;
String seq_j = path_vec.get(j).sequence;
/*
int index1 = seq_i.length()-1;
int index2 = seq_j.length()-1;
debugMes("ALL-VS-ALL: (" + i + "," + j + " of " + path_vec.size() + ") checking for similarity the two paths: "+path_i+
"(len="+seq_i.length()+");"+path_j+"(len="+seq_j.length()+")",10);
*/
//if (finalSeqsAreTooSimilar(seq_i, seq_j)) {
if (twoPathsAreTooSimilar(graph, path_i_w_sinks, path_j_w_sinks)) {
debugMes("\n\n*** REDUCE: they are TOO SIMILAR! ***\n\n",10);
int rIndex = removeTheLesserSupportedPath(seq_i, seq_j, path_i, path_j, removeSimilarPaths, PathReads);
//int rIndex = removeTheShorterPath(path1S,path2S,path1,path2,removeSimilarPaths,PathReads);
if (rIndex == 1) {// the first path was removed
filtered.put(path_vec.get(i), true);
debugMes("\tRemoving (" + i + ") seq in pair", 18);
}
else {
filtered.put(path_vec.get(j), true);
debugMes("\tRemoving (" + j + ") second seq in pair", 18);
}
}
else
debugMes("\n\n*** REDUCE: they are PLENTY DIFFERENT ***\n\n", 15);
}
}
for (FinalPaths path2Remove : filtered.keySet())
{
debugMes("REDUCE-STAGE: The final path "+path2Remove+" was removed because it was too close to another path",10);
FinalPaths_all.remove(path2Remove.path);
}
ALL_VS_ALL_MAX_DP_LEN = MAX_SEQ_LEN_DP_ALIGN; // back to original setting
return(FinalPaths_all);
}
public static HashMap<List<Integer>, Pair<Integer>> remove_identical_subseqs (
HashMap<List<Integer>, Pair<Integer>> FinalPaths_all,
DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
HashMap<List<Integer>,HashMap<PairPath,Integer>> PathReads) {
Vector<FinalPaths> path_vec = new Vector<FinalPaths>();
DecimalFormat df = new DecimalFormat("
int count = 0;
for (List<Integer> path : FinalPaths_all.keySet())
{
count++;
debugMes("-reconstructing sequence for path[: " + count + " of " + FinalPaths_all.keySet().size() + "]: " + path, 15);
String seq = getPathSeq(graph,path);
FinalPaths f = new FinalPaths(path, seq);
path_vec.add(f);
}
debugMes("\n\n**** Removing identical subsequences among: " + path_vec.size() + " paths.\n\n", 10);
Collections.sort(path_vec); // sort paths by length of sequence descendingly
// examine sequence CD-HIT -style, remove those that lack sufficient variation
HashMap<FinalPaths,Boolean> filtered = new HashMap<FinalPaths,Boolean>();
List<List<Integer>> removeSimilarPaths = new ArrayList<List<Integer>>();
for (int i = 0; i < path_vec.size()-1; i++) {
if (filtered.containsKey(path_vec.get(i))) {
// path filtered, cannot use it as evidence for filtering smaller sequences.
continue;
}
List<Integer> path_i = path_vec.get(i).path;
List<Integer> path_i_w_sinks = ensure_path_has_sinks(path_i);
for (int j = i + 1; j < path_vec.size(); j++) {
if (filtered.containsKey(path_vec.get(j))) {
// path filtered, cannot use it as evidence for filtering smaller sequences.
continue;
}
List<Integer> path_j = path_vec.get(j).path;
List<Integer> path_j_w_sinks = ensure_path_has_sinks(path_j);
String seq_i = path_vec.get(i).sequence;
String seq_j = path_vec.get(j).sequence;
int index1 = seq_i.length()-1;
int index2 = seq_j.length()-1;
if (BFLY_GLOBALS.VERBOSE_LEVEL >= 15) {
System.err.print("\r[" + i + "," + j + "] ");
}
else {
debugMes("ALL-VS-ALL: (" + i + "," + j + " of " + path_vec.size() + ") checking for identical subseqs between the two paths: "+path_i+
"(len="+seq_i.length()+");"+path_j+"(len="+seq_j.length()+")",16);
}
if (seq_i.indexOf(seq_j) >= 0) {
filtered.put(path_vec.get(j), true);
debugMes("\t** Removing (" + j + ") seq in pair, contains " + i, 15);
}
else if (seq_j.indexOf(seq_i) >= 0) {
filtered.put(path_vec.get(i), true);
debugMes("\t** Removing (" + i + ") seq in pair, contains " + j, 15);
}
}
}
for (FinalPaths path2Remove : filtered.keySet())
{
debugMes("REDUCE-STAGE: The final path "+path2Remove+" was removed because it was too close to another path",10);
FinalPaths_all.remove(path2Remove.path);
}
return(FinalPaths_all);
}
public static Boolean finalSeqsAreTooSimilar (String seq_i, String seq_j) {
// note, to check for perfectly identical sequence clusters, could run cd-hit like so:
// cd-hit-est -o cdhit -c 1 -i comp.allProbPaths.fasta -p 1 -d 0 -b 1
if ( (seq_i.length() > MAX_SEQ_LEN_DP_ALIGN && seq_j.length() > MAX_SEQ_LEN_DP_ALIGN)
||
seq_i.length() > 100000 // problems can arise in the alignment code if either seq is longer
||
seq_j.length() > 100000) {
// zipper: Just get rid of those that are truly nearly identical.
AlignmentStats stats = ZipperAlignment.doZipperAlignment("A", seq_i, "B", seq_j);
int mismatches = stats.mismatches;
debugMes("-zipper reports: " + mismatches + " mismatches between seqs.", 18);
if (mismatches <= 2)
return(true);
else
return(false);
}
else {
Alignment alignment;
if (SMITH_WATERMAN_ALIGN_FLAG) {
debugMes("-running Smith-Waterman alignment of path sequences", 15);
alignment = NWalign.run_SW_alignment("A", seq_i, "B", seq_j, 4, -5, 10, 1);
}
else {
// Needleman Wunsch Global Alignment is default
debugMes("-running Needleman-Wunsch alignment of path sequences", 15);
alignment = NWalign.run_NW_alignment("A", seq_i, "B", seq_j, 4, -5, 10, 1); //NW locks up or takes too long with very long sequences (eg. 40kb align to 6kb)
}
int max_diffs_in_window = AlignmentStats.get_max_diffs_in_window(alignment, DIFFS_WINDOW_SIZE);
debugMes (new jaligner.formats.Pair().format(alignment), 10);
debugMes("Max diffs found in alignment window: " + max_diffs_in_window, 10);
if (max_diffs_in_window <= MAX_FINAL_DIFFS_IN_WINDOW) {
return (true);
}
}
return(false);
}
private static String get_pathName_string (List<Integer> path,
DirectedSparseGraph<SeqVertex, SimpleEdge> graph) {
/*
int startI = 0, endI = path.size();
if (path.get(0)== ROOT.getID())
startI++;
if (path.get(path.size()-1)== T_VERTEX.getID())
endI--;
String pathName;
if (MISO_OUTPUT) {
pathName = "[";
int iSeqL=0,j=0;
for (int vi=startI; vi<endI; vi++){
iSeqL = getSeqVertex(graph, path.get(vi)).getName().length();
pathName = pathName + path.get(vi)+":"+j+"-"+(j+iSeqL-1);
if (vi<endI-1)
pathName = pathName.concat(" ");
j+=iSeqL;
}
pathName = pathName +"]";
} else
pathName = ""+path.subList(startI, endI);
*/
int startI = 0, endI = path.size();
if (path.get(0)== ROOT.getID())
startI++;
if (path.get(path.size()-1)== T_VERTEX.getID())
endI
String pathName,degenString="";
SeqVertex v;
if (MISO_OUTPUT) {
DecimalFormat f0 = new DecimalFormat("
pathName = "[";
int iSeqL=0,j=0;
for (int vi=startI; vi<endI; vi++){
v = getSeqVertex(graph, path.get(vi));
iSeqL = v.getName().length();
if (vi != startI){
iSeqL -= (KMER_SIZE-1);
}
//int node_id = v.getOrigButterflyID();
int node_id = v.getID();
String path_node = "" + node_id;
/*
if (xStructuresResolvedByTriplets.containsKey(node_id) && ! xStructuresResolvedByTriplets.get(node_id)) {
path_node = "@" + path_node + "@!";
}
*/
pathName = pathName + path_node +":"+j+"-"+(j+iSeqL-1);
if (vi<endI-1)
pathName = pathName.concat(" ");
if (v.getDegenerativeFreq().size()>0)
{
for (int di=0; di<v.getDegenerativeFreq().size(); di++)
{
degenString = degenString + "{("+(j+v.getDegenerativeLocations().elementAt(di))+")";
degenString = degenString + v.getDegenerativeLetters().elementAt(di).elementAt(0)+":"+
f0.format(v.getDegenerativeFreq().elementAt(di).elementAt(0))+" ";
degenString = degenString + v.getDegenerativeLetters().elementAt(di).elementAt(1)+":"+
f0.format(v.getDegenerativeFreq().elementAt(di).elementAt(1)) +"}";
}
}
j+=iSeqL;
}
pathName = pathName +"]";
} else
pathName = ""+path.subList(startI, endI);
if (USE_DEGENERATE_CODE) {
pathName = pathName + " SNPs="+degenString;
}
pathName += " " + path;
return(pathName);
}
public static HashMap<Integer,HashMap<PairPath,Integer>> getComponentReads(Integer component_id, Set<SeqVertex> comp,
HashMap<Integer,HashMap<PairPath,Integer>> combinedReadHash) {
HashSet<Integer> node_ids_in_component = new HashSet<Integer>();
for (SeqVertex v : comp) {
node_ids_in_component.add(v.getID());
}
List<Integer> component_node_list = new ArrayList<Integer>(node_ids_in_component);
HashMap<Integer,HashMap<PairPath,Integer>> componentReadHash = new HashMap<Integer,HashMap<PairPath,Integer>>();
for (Integer node_id : combinedReadHash.keySet()) {
HashMap<PairPath,Integer> pp_map = combinedReadHash.get(node_id);
for (PairPath p : pp_map.keySet()) {
if (p.haveAnyNodeInCommon(component_node_list)) {
if (! componentReadHash.containsKey(node_id)) {
componentReadHash.put(node_id, new HashMap<PairPath,Integer>());
}
componentReadHash.get(node_id).put(p, pp_map.get(p));
debugMes("Subcomponent: " + component_id + ", adding pairpath: " + p, 15);
}
}
}
return(componentReadHash);
}
/* bad idea... Cannot rely on node depth positions as they're imperfectly ordered
public static List<Integer> constructSinglePathFromPairPathList (List<PairPath> pairpath_list,
final DirectedSparseGraph<SeqVertex, SimpleEdge> graph,
DijkstraDistance<SeqVertex,SimpleEdge> dijkstraDis
) {
HashSet<Integer> extracted = new HashSet<Integer>();
ArrayList<SeqVertex> extractedVertices = new ArrayList<SeqVertex>();
for (PairPath pp : pairpath_list) {
extracted.addAll(pp.getPath1());
if(pp.hasSecondPath())
extracted.addAll(pp.getPath2());
}
for(Integer id : extracted)
{
extractedVertices.add(getSeqVertex(graph, id));
}
//extractedVerticesIDs.addAll(extracted);
// sort according to topological order of BTFL graph
Collections.sort(extractedVertices, new SeqVertexFinishTimeComparator());
String node_id_list_text = "";
for (SeqVertex v : extractedVertices) {
node_id_list_text += v.getID() + " ";
}
debugMes("Extracted sorted vertices: " + node_id_list_text + "\n", 10);
DijkstraShortestPath dsp = new DijkstraShortestPath(graph);
// Fill in any gaps
// path=[L_1]; For each i in 1:length(node_list)
int j = 0;
int num_vertices = extractedVertices.size(); // note, extractedVertices grows in size during iterations below.
while(j < num_vertices - 1)
{
//System.out.println(j);
SeqVertex current = extractedVertices.get(j);
SeqVertex next = extractedVertices.get(j + 1);
// 3.3.1 if L_i == L_(i+1) then nothing
// -There are no duplicates since extractedVertices was created from
// building the set of extracted vertex IDs
// 3.3.2 else if exists an edge from L_i to L_(i+1) then nothing(?)
if(graph.getSuccessors(current).contains(next)) {
j++;
continue;
}
// 3.3.3 else find a single path (p = L_i,..., L_(i+1)):
// append all P_j (j=2:end) to our path
//List<SimpleEdge> sp = org.jgrapht.alg.DijkstraShortestPath.findPathBetween((Graph)graph, current, next);
List<SimpleEdge> sp = dsp.getPath(current, next);
debugMes("Found shortest path between " + current.getID() + " and " + next.getID() + ":", 10);
ArrayList<SeqVertex> toAdd = new ArrayList<SeqVertex>();
for(SimpleEdge edge : sp) {
SeqVertex v = graph.getDest(edge);
toAdd.add(v);
debugMes("\t" + v.getID(), 10);
}
toAdd.remove(next);
extractedVertices.addAll(toAdd);
j++;
}
ArrayList<Integer> extractedVerticesIDs = new ArrayList<Integer>();
Collections.sort(extractedVertices, new SeqVertexFinishTimeComparator());
for(SeqVertex v: extractedVertices)
{
//System.out.println("Adding vertex with ID: " + v.getID());
extractedVerticesIDs.add(v.getID());
}
return(extractedVerticesIDs);
}
*/
public static boolean twoPairPathsAreTooFarAwayInGraph (PairPath pp_i, PairPath pp_j, DirectedSparseGraph<SeqVertex, SimpleEdge> graph) {
SeqVertex last_vertex_i = getSeqVertex(graph, pp_i.getLastID());
SeqVertex first_vertex_j = getSeqVertex(graph, pp_j.getFirstID());
int last_vertex_i_start_time = last_vertex_i._node_depth;
int first_vertex_j_start_time = first_vertex_j._node_depth;
/*
System.err.println("Pairpaths: " + pp_i + " vs. " + pp_j);
System.err.println("Start times: nodes [" + pp_i.getLastID() + "," + pp_j.getFirstID() + "], times: " + i_start_time + " vs. " + j_start_time);
*/
if (first_vertex_j_start_time - last_vertex_i_start_time > MAX_VERTEX_DISCOVERY_TIME_DIFF_ALLOW_COMPARE)
return(true);
else
return(false);
}
}
// End TransAssembly.java
|
package se.sics.cooja.plugins;
import java.awt.Font;
import java.awt.Rectangle;
import java.awt.Toolkit;
import java.awt.datatransfer.Clipboard;
import java.awt.datatransfer.StringSelection;
import java.awt.event.ActionEvent;
import java.awt.event.KeyAdapter;
import java.awt.event.KeyEvent;
import java.awt.event.MouseEvent;
import java.io.File;
import java.io.FileWriter;
import java.io.PrintWriter;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Observable;
import java.util.Observer;
import java.util.Properties;
import javax.swing.AbstractAction;
import javax.swing.Action;
import javax.swing.ButtonGroup;
import javax.swing.JFileChooser;
import javax.swing.JMenu;
import javax.swing.JMenuItem;
import javax.swing.JOptionPane;
import javax.swing.JPopupMenu;
import javax.swing.JRadioButtonMenuItem;
import javax.swing.JScrollPane;
import javax.swing.JSplitPane;
import javax.swing.JTable;
import javax.swing.JTextPane;
import javax.swing.event.ListSelectionEvent;
import javax.swing.event.ListSelectionListener;
import javax.swing.table.AbstractTableModel;
import org.apache.log4j.Logger;
import org.jdom.Element;
import se.sics.cooja.ClassDescription;
import se.sics.cooja.ConvertedRadioPacket;
import se.sics.cooja.GUI;
import se.sics.cooja.Plugin;
import se.sics.cooja.PluginType;
import se.sics.cooja.RadioConnection;
import se.sics.cooja.RadioMedium;
import se.sics.cooja.RadioPacket;
import se.sics.cooja.Simulation;
import se.sics.cooja.VisPlugin;
import se.sics.cooja.dialogs.TableColumnAdjuster;
import se.sics.cooja.interfaces.Radio;
import se.sics.cooja.plugins.analyzers.ICMPv6Analyzer;
import se.sics.cooja.plugins.analyzers.IEEE802154Analyzer;
import se.sics.cooja.plugins.analyzers.IPHCPacketAnalyzer;
import se.sics.cooja.plugins.analyzers.PacketAnalyzer;
import se.sics.cooja.plugins.analyzers.RadioLoggerAnalyzerSuite;
import se.sics.cooja.util.StringUtils;
/**
* Radio logger listens to the simulation radio medium and lists all transmitted
* data in a table.
*
* @author Fredrik Osterlind
*/
@ClassDescription("Radio Logger")
@PluginType(PluginType.SIM_PLUGIN)
public class RadioLogger extends VisPlugin {
private static Logger logger = Logger.getLogger(RadioLogger.class);
private static final long serialVersionUID = -6927091711697081353L;
private final static int COLUMN_TIME = 0;
private final static int COLUMN_FROM = 1;
private final static int COLUMN_TO = 2;
private final static int COLUMN_DATA = 3;
private JSplitPane splitPane;
private JTextPane verboseBox = null;
private final static String[] COLUMN_NAMES = {
"Time",
"From",
"To",
"Data"
};
private final Simulation simulation;
private final JTable dataTable;
private ArrayList<RadioConnectionLog> connections = new ArrayList<RadioConnectionLog>();
private RadioMedium radioMedium;
private Observer radioMediumObserver;
private AbstractTableModel model;
private HashMap<String,Action> analyzerMap = new HashMap<String,Action>();
private String analyzerName = null;
private ArrayList<PacketAnalyzer> analyzers = null;
public RadioLogger(final Simulation simulationToControl, final GUI gui) {
super("Radio Logger", gui);
simulation = simulationToControl;
radioMedium = simulation.getRadioMedium();
ArrayList<PacketAnalyzer> lowpanAnalyzers = new ArrayList<PacketAnalyzer>();
lowpanAnalyzers.add(new IEEE802154Analyzer());
lowpanAnalyzers.add(new IPHCPacketAnalyzer());
lowpanAnalyzers.add(new ICMPv6Analyzer());
model = new AbstractTableModel() {
private static final long serialVersionUID = 1692207305977527004L;
public String getColumnName(int col) {
return COLUMN_NAMES[col];
}
public int getRowCount() {
return connections.size();
}
public int getColumnCount() {
return COLUMN_NAMES.length;
}
public Object getValueAt(int row, int col) {
RadioConnectionLog conn = connections.get(row);
if (col == COLUMN_TIME) {
return Long.toString(conn.startTime / Simulation.MILLISECOND);
} else if (col == COLUMN_FROM) {
return "" + conn.connection.getSource().getMote().getID();
} else if (col == COLUMN_TO) {
Radio[] dests = conn.connection.getDestinations();
if (dests.length == 0) {
return "-";
}
if (dests.length == 1) {
return "" + dests[0].getMote().getID();
}
if (dests.length == 2) {
return "" + dests[0].getMote().getID() + ',' + dests[1].getMote().getID();
}
return "[" + dests.length + " d]";
} else if (col == COLUMN_DATA) {
if (conn.data == null) {
prepareDataString(connections.get(row));
}
if (aliases != null) {
/* Check if alias exists */
String alias = (String) aliases.get(conn.data);
if (alias != null) {
return alias;
}
}
return conn.data;
}
return null;
}
public boolean isCellEditable(int row, int col) {
if (col == COLUMN_FROM) {
/* Highlight source */
gui.signalMoteHighlight(connections.get(row).connection.getSource().getMote());
return false;
}
if (col == COLUMN_TO) {
/* Highlight all destinations */
Radio dests[] = connections.get(row).connection.getDestinations();
for (Radio dest: dests) {
gui.signalMoteHighlight(dest.getMote());
}
return false;
}
return false;
}
public Class<?> getColumnClass(int c) {
return getValueAt(0, c).getClass();
}
};
dataTable = new JTable(model) {
private static final long serialVersionUID = -2199726885069809686L;
public String getToolTipText(MouseEvent e) {
java.awt.Point p = e.getPoint();
int rowIndex = rowAtPoint(p);
int colIndex = columnAtPoint(p);
int realColumnIndex = convertColumnIndexToModel(colIndex);
if (rowIndex < 0 || realColumnIndex < 0) {
return super.getToolTipText(e);
}
RadioConnectionLog conn = connections.get(rowIndex);
if (realColumnIndex == COLUMN_TIME) {
return
"<html>" +
"Start time (us): " + conn.startTime +
"<br>" +
"End time (us): " + conn.endTime +
"<br><br>" +
"Duration (us): " + (conn.endTime - conn.startTime) +
"</html>";
} else if (realColumnIndex == COLUMN_FROM) {
return conn.connection.getSource().getMote().toString();
} else if (realColumnIndex == COLUMN_TO) {
Radio[] dests = conn.connection.getDestinations();
if (dests.length == 0) {
return "No destinations";
}
StringBuilder tip = new StringBuilder();
tip.append("<html>");
if (dests.length == 1) {
tip.append("One destination:<br>");
} else {
tip.append(dests.length).append(" destinations:<br>");
}
for (Radio radio: dests) {
tip.append(radio.getMote()).append("<br>");
}
tip.append("</html>");
return tip.toString();
} else if (realColumnIndex == COLUMN_DATA) {
if (conn.tooltip == null) {
prepareTooltipString(conn);
}
return conn.tooltip;
}
return super.getToolTipText(e);
}
};
dataTable.addKeyListener(new KeyAdapter() {
public void keyPressed(KeyEvent e) {
if (e.getKeyCode() == KeyEvent.VK_SPACE) {
timeLineAction.actionPerformed(null);
logListenerAction.actionPerformed(null);
}
}
});
dataTable.getSelectionModel().addListSelectionListener(new ListSelectionListener() {
public void valueChanged(ListSelectionEvent e) {
int row = dataTable.getSelectedRow();
if (row >= 0) {
RadioConnectionLog conn = connections.get(row);
if (conn.tooltip == null) {
prepareTooltipString(conn);
}
verboseBox.setText(conn.tooltip);
verboseBox.setCaretPosition(0);
}
}
});
// Set data column width greedy
dataTable.setAutoResizeMode(JTable.AUTO_RESIZE_LAST_COLUMN);
dataTable.setFont(new Font("Monospaced", Font.PLAIN, 12));
JPopupMenu popupMenu = new JPopupMenu();
popupMenu.add(new JMenuItem(copyAction));
popupMenu.add(new JMenuItem(copyAllAction));
popupMenu.add(new JMenuItem(clearAction));
popupMenu.addSeparator();
popupMenu.add(new JMenuItem(aliasAction));
popupMenu.addSeparator();
popupMenu.add(new JMenuItem(saveAction));
popupMenu.addSeparator();
JMenu focusMenu = new JMenu("Focus (Space)");
focusMenu.add(new JMenuItem(timeLineAction));
focusMenu.add(new JMenuItem(logListenerAction));
popupMenu.add(focusMenu);
//a group of radio button menu items
popupMenu.addSeparator();
ButtonGroup group = new ButtonGroup();
JRadioButtonMenuItem rbMenuItem = new JRadioButtonMenuItem(
createAnalyzerAction("No Analyzer", "none", null, true));
group.add(rbMenuItem);
popupMenu.add(rbMenuItem);
rbMenuItem = new JRadioButtonMenuItem(createAnalyzerAction(
"6LoWPAN Analyzer", "6lowpan", lowpanAnalyzers, false));
group.add(rbMenuItem);
popupMenu.add(rbMenuItem);
/* Load additional analyzers specified by projects (cooja.config) */
String[] projectAnalyzerSuites =
gui.getProjectConfig().getStringArrayValue(RadioLogger.class, "ANALYZERS");
if (projectAnalyzerSuites != null) {
for (String suiteName: projectAnalyzerSuites) {
Class<? extends RadioLoggerAnalyzerSuite> suiteClass =
gui.tryLoadClass(RadioLogger.this, RadioLoggerAnalyzerSuite.class, suiteName);
try {
RadioLoggerAnalyzerSuite suite = suiteClass.newInstance();
ArrayList<PacketAnalyzer> suiteAnalyzers = suite.getAnalyzers();
rbMenuItem = new JRadioButtonMenuItem(createAnalyzerAction(
suite.getDescription(), suiteName, suiteAnalyzers, false));
group.add(rbMenuItem);
popupMenu.add(rbMenuItem);
logger.debug("Loaded radio logger analyzers: " + suite.getDescription());
} catch (InstantiationException e1) {
logger.warn("Failed to load analyzer suite '" + suiteName + "': " + e1.getMessage());
} catch (IllegalAccessException e1) {
logger.warn("Failed to load analyzer suite '" + suiteName + "': " + e1.getMessage());
}
}
}
dataTable.setComponentPopupMenu(popupMenu);
dataTable.setFillsViewportHeight(true);
verboseBox = new JTextPane();
verboseBox.setContentType("text/html");
verboseBox.setEditable(false);
verboseBox.setComponentPopupMenu(popupMenu);
splitPane = new JSplitPane(JSplitPane.VERTICAL_SPLIT,
new JScrollPane(dataTable), new JScrollPane(verboseBox));
splitPane.setOneTouchExpandable(true);
splitPane.setDividerLocation(150);
add(splitPane);
TableColumnAdjuster adjuster = new TableColumnAdjuster(dataTable);
adjuster.setDynamicAdjustment(true);
adjuster.packColumns();
radioMedium.addRadioMediumObserver(radioMediumObserver = new Observer() {
public void update(Observable obs, Object obj) {
RadioConnection conn = radioMedium.getLastConnection();
if (conn == null) {
return;
}
final RadioConnectionLog loggedConn = new RadioConnectionLog();
loggedConn.startTime = conn.getStartTime();
loggedConn.endTime = simulation.getSimulationTime();
loggedConn.connection = conn;
loggedConn.packet = conn.getSource().getLastPacketTransmitted();
java.awt.EventQueue.invokeLater(new Runnable() {
public void run() {
int lastSize = connections.size();
// Check if the last row is visible
boolean isVisible = false;
int rowCount = dataTable.getRowCount();
if (rowCount > 0) {
Rectangle lastRow = dataTable.getCellRect(rowCount - 1, 0, true);
Rectangle visible = dataTable.getVisibleRect();
isVisible = visible.y <= lastRow.y && visible.y + visible.height >= lastRow.y + lastRow.height;
}
connections.add(loggedConn);
if (connections.size() > lastSize) {
model.fireTableRowsInserted(lastSize, connections.size() - 1);
}
if (isVisible) {
dataTable.scrollRectToVisible(dataTable.getCellRect(dataTable.getRowCount() - 1, 0, true));
}
setTitle("Radio Logger: " + dataTable.getRowCount() + " packets");
}
});
}
});
setSize(500, 300);
try {
setSelected(true);
} catch (java.beans.PropertyVetoException e) {
// Could not select
}
}
/**
* Selects a logged radio packet close to the given time.
*
* @param time Start time
*/
public void trySelectTime(final long time) {
java.awt.EventQueue.invokeLater(new Runnable() {
public void run() {
for (int i=0; i < connections.size(); i++) {
if (connections.get(i).endTime < time) {
continue;
}
dataTable.scrollRectToVisible(dataTable.getCellRect(i, 0, true));
dataTable.setRowSelectionInterval(i, i);
return;
}
}
});
}
private void prepareDataString(RadioConnectionLog conn) {
byte[] data;
if (conn.packet == null) {
data = null;
} else if (conn.packet instanceof ConvertedRadioPacket) {
data = ((ConvertedRadioPacket)conn.packet).getOriginalPacketData();
} else {
data = conn.packet.getPacketData();
}
if (data == null) {
conn.data = "[unknown data]";
return;
}
StringBuffer brief = new StringBuffer();
StringBuffer verbose = new StringBuffer();
/* default analyzer */
PacketAnalyzer.Packet packet = new PacketAnalyzer.Packet(data, PacketAnalyzer.MAC_LEVEL);
if (analyzePacket(packet, brief, verbose)) {
if (packet.hasMoreData()) {
byte[] payload = packet.getPayload();
brief.append(StringUtils.toHex(payload, 4));
if (verbose.length() > 0) {
verbose.append("<p>");
}
verbose.append("<b>Payload (")
.append(payload.length).append(" bytes)</b><br><pre>")
.append(StringUtils.hexDump(payload))
.append("</pre>");
}
conn.data = (data.length < 100 ? (data.length < 10 ? " " : " ") : "")
+ data.length + ": " + brief;
if (verbose.length() > 0) {
conn.tooltip = verbose.toString();
}
} else {
conn.data = data.length + ": 0x" + StringUtils.toHex(data, 4);
}
}
private boolean analyzePacket(PacketAnalyzer.Packet packet, StringBuffer brief, StringBuffer verbose) {
if (analyzers == null) return false;
boolean analyze = true;
while (analyze) {
analyze = false;
for (int i = 0; i < analyzers.size(); i++) {
PacketAnalyzer analyzer = analyzers.get(i);
if (analyzer.matchPacket(packet)) {
int res = analyzer.analyzePacket(packet, brief, verbose);
if (packet.hasMoreData() && brief.length() > 0) {
brief.append('|');
verbose.append("<br>");
}
if (res != PacketAnalyzer.ANALYSIS_OK_CONTINUE) {
/* this was the final or the analysis failed - no analyzable payload possible here... */
return brief.length() > 0;
}
/* continue another round if more bytes left */
analyze = packet.hasMoreData();
break;
}
}
}
return brief.length() > 0;
}
private void prepareTooltipString(RadioConnectionLog conn) {
RadioPacket packet = conn.packet;
if (packet == null) {
conn.tooltip = "";
return;
}
if (packet instanceof ConvertedRadioPacket && packet.getPacketData().length > 0) {
byte[] original = ((ConvertedRadioPacket)packet).getOriginalPacketData();
byte[] converted = ((ConvertedRadioPacket)packet).getPacketData();
conn.tooltip = "<html><font face=\"Monospaced\">" +
"<b>Packet data (" + original.length + " bytes)</b><br>" +
"<pre>" + StringUtils.hexDump(original) + "</pre>" +
"</font><font face=\"Monospaced\">" +
"<b>Cross-level packet data (" + converted.length + " bytes)</b><br>" +
"<pre>" + StringUtils.hexDump(converted) + "</pre>" +
"</font></html>";
} else if (packet instanceof ConvertedRadioPacket) {
byte[] original = ((ConvertedRadioPacket)packet).getOriginalPacketData();
conn.tooltip = "<html><font face=\"Monospaced\">" +
"<b>Packet data (" + original.length + " bytes)</b><br>" +
"<pre>" + StringUtils.hexDump(original) + "</pre>" +
"</font><font face=\"Monospaced\">" +
"<b>No cross-level conversion available</b><br>" +
"</font></html>";
} else {
byte[] data = packet.getPacketData();
conn.tooltip = "<html><font face=\"Monospaced\">" +
"<b>Packet data (" + data.length + " bytes)</b><br>" +
"<pre>" + StringUtils.hexDump(data) + "</pre>" +
"</font></html>";
}
}
public void closePlugin() {
if (radioMediumObserver != null) {
radioMedium.deleteRadioMediumObserver(radioMediumObserver);
}
}
public Collection<Element> getConfigXML() {
ArrayList<Element> config = new ArrayList<Element>();
Element element = new Element("split");
element.addContent(Integer.toString(splitPane.getDividerLocation()));
config.add(element);
if (analyzerName != null && analyzers != null) {
element = new Element("analyzers");
element.setAttribute("name", analyzerName);
config.add(element);
}
if (aliases != null) {
for (Object key: aliases.keySet()) {
element = new Element("alias");
element.setAttribute("payload", (String) key);
element.setAttribute("alias", (String) aliases.get(key));
config.add(element);
}
}
return config;
}
public boolean setConfigXML(Collection<Element> configXML, boolean visAvailable) {
for (Element element : configXML) {
String name = element.getName();
if ("alias".equals(name)) {
String payload = element.getAttributeValue("payload");
String alias = element.getAttributeValue("alias");
if (aliases == null) {
aliases = new Properties();
}
aliases.put(payload, alias);
} else if ("split".equals(name)) {
splitPane.setDividerLocation(Integer.parseInt(element.getText()));
} else if ("analyzers".equals(name)) {
String analyzerName = element.getAttributeValue("name");
final Action action;
if (analyzerName != null && ((action = analyzerMap.get(analyzerName)) != null)) {
java.awt.EventQueue.invokeLater(new Runnable() {
public void run() {
action.putValue(Action.SELECTED_KEY, Boolean.TRUE);
action.actionPerformed(null);
}
});
}
}
}
return true;
}
private static class RadioConnectionLog {
long startTime;
long endTime;
RadioConnection connection;
RadioPacket packet;
String data = null;
String tooltip = null;
}
private String getDestString(RadioConnectionLog c) {
Radio[] dests = c.connection.getDestinations();
if (dests.length == 0) {
return "-";
}
if (dests.length == 1) {
return "" + dests[0].getMote().getID();
}
StringBuilder sb = new StringBuilder();
for (Radio dest: dests) {
sb.append(dest.getMote().getID()).append(',');
}
sb.setLength(sb.length()-1);
return sb.toString();
}
private Action createAnalyzerAction(String name, final String actionName,
final ArrayList<PacketAnalyzer> analyzerList, boolean selected) {
Action action = new AbstractAction(name) {
private static final long serialVersionUID = -608913700422638454L;
public void actionPerformed(ActionEvent event) {
if (analyzers != analyzerList) {
analyzers = analyzerList;
analyzerName = actionName;
if (connections.size() > 0) {
// Remove the cached values
for(int i = 0; i < connections.size(); i++) {
RadioConnectionLog conn = connections.get(i);
conn.data = null;
conn.tooltip = null;
}
model.fireTableRowsUpdated(0, connections.size() - 1);
}
verboseBox.setText("");
}
}
};
action.putValue(Action.SELECTED_KEY, selected ? Boolean.TRUE : Boolean.FALSE);
analyzerMap.put(actionName, action);
return action;
}
private Action clearAction = new AbstractAction("Clear") {
private static final long serialVersionUID = -6135583266684643117L;
public void actionPerformed(ActionEvent e) {
int size = connections.size();
if (size > 0) {
connections.clear();
model.fireTableRowsDeleted(0, size - 1);
setTitle("Radio Logger: " + dataTable.getRowCount() + " packets");
}
}
};
private Action copyAction = new AbstractAction("Copy selected") {
private static final long serialVersionUID = 8412062977916108054L;
public void actionPerformed(ActionEvent e) {
Clipboard clipboard = Toolkit.getDefaultToolkit().getSystemClipboard();
int[] selectedRows = dataTable.getSelectedRows();
StringBuilder sb = new StringBuilder();
for (int i: selectedRows) {
sb.append(dataTable.getValueAt(i, COLUMN_TIME)).append('\t');
sb.append(dataTable.getValueAt(i, COLUMN_FROM)).append('\t');
sb.append(getDestString(connections.get(i))).append('\t');
sb.append(dataTable.getValueAt(i, COLUMN_DATA)).append('\n');
}
StringSelection stringSelection = new StringSelection(sb.toString());
clipboard.setContents(stringSelection, null);
}
};
private Action copyAllAction = new AbstractAction("Copy all") {
private static final long serialVersionUID = 1905586689441157304L;
public void actionPerformed(ActionEvent e) {
Clipboard clipboard = Toolkit.getDefaultToolkit().getSystemClipboard();
StringBuilder sb = new StringBuilder();
for(int i=0; i < connections.size(); i++) {
sb.append("" + dataTable.getValueAt(i, COLUMN_TIME) + '\t');
sb.append("" + dataTable.getValueAt(i, COLUMN_FROM) + '\t');
sb.append("" + getDestString(connections.get(i)) + '\t');
sb.append("" + dataTable.getValueAt(i, COLUMN_DATA) + '\n');
}
StringSelection stringSelection = new StringSelection(sb.toString());
clipboard.setContents(stringSelection, null);
}
};
private Action saveAction = new AbstractAction("Save to file") {
private static final long serialVersionUID = -3942984643211482179L;
public void actionPerformed(ActionEvent e) {
JFileChooser fc = new JFileChooser();
int returnVal = fc.showSaveDialog(GUI.getTopParentContainer());
if (returnVal != JFileChooser.APPROVE_OPTION) {
return;
}
File saveFile = fc.getSelectedFile();
if (saveFile.exists()) {
String s1 = "Overwrite";
String s2 = "Cancel";
Object[] options = { s1, s2 };
int n = JOptionPane.showOptionDialog(
GUI.getTopParentContainer(),
"A file with the same name already exists.\nDo you want to remove it?",
"Overwrite existing file?", JOptionPane.YES_NO_OPTION,
JOptionPane.QUESTION_MESSAGE, null, options, s1);
if (n != JOptionPane.YES_OPTION) {
return;
}
}
if (saveFile.exists() && !saveFile.canWrite()) {
logger.fatal("No write access to file: " + saveFile);
return;
}
try {
PrintWriter outStream = new PrintWriter(new FileWriter(saveFile));
for(int i=0; i < connections.size(); i++) {
outStream.print("" + dataTable.getValueAt(i, COLUMN_TIME) + '\t');
outStream.print("" + dataTable.getValueAt(i, COLUMN_FROM) + '\t');
outStream.print("" + getDestString(connections.get(i)) + '\t');
outStream.print("" + dataTable.getValueAt(i, COLUMN_DATA) + '\n');
}
outStream.close();
} catch (Exception ex) {
logger.fatal("Could not write to file: " + saveFile);
return;
}
}
};
private Action timeLineAction = new AbstractAction("in Timeline") {
private static final long serialVersionUID = -4035633464748224192L;
public void actionPerformed(ActionEvent e) {
int selectedRow = dataTable.getSelectedRow();
if (selectedRow < 0) return;
long time = connections.get(selectedRow).startTime;
Plugin[] plugins = simulation.getGUI().getStartedPlugins();
for (Plugin p: plugins) {
if (!(p instanceof TimeLine)) {
continue;
}
/* Select simulation time */
TimeLine plugin = (TimeLine) p;
plugin.trySelectTime(time);
}
}
};
private Action logListenerAction = new AbstractAction("in Log Listener") {
private static final long serialVersionUID = 1985006491187878651L;
public void actionPerformed(ActionEvent e) {
int selectedRow = dataTable.getSelectedRow();
if (selectedRow < 0) return;
long time = connections.get(selectedRow).startTime;
Plugin[] plugins = simulation.getGUI().getStartedPlugins();
for (Plugin p: plugins) {
if (!(p instanceof LogListener)) {
continue;
}
/* Select simulation time */
LogListener plugin = (LogListener) p;
plugin.trySelectTime(time);
}
}
};
private Properties aliases = null;
private Action aliasAction = new AbstractAction("Assign alias") {
private static final long serialVersionUID = -1678771087456128721L;
public void actionPerformed(ActionEvent e) {
int selectedRow = dataTable.getSelectedRow();
if (selectedRow < 0) return;
String current = "";
if (aliases != null && aliases.get(connections.get(selectedRow).data) != null) {
current = (String) aliases.get(connections.get(selectedRow).data);
}
String alias = (String) JOptionPane.showInputDialog(
GUI.getTopParentContainer(),
"Enter alias for all packets with identical payload.\n" +
"An empty string removes the current alias.\n\n" +
connections.get(selectedRow).data + "\n",
"Create packet payload alias",
JOptionPane.QUESTION_MESSAGE,
null,
null,
current);
if (alias == null) {
/* Cancelled */
return;
}
/* Should be null if empty */
if (aliases == null) {
aliases = new Properties();
}
/* Remove current alias */
if (alias.equals("")) {
aliases.remove(connections.get(selectedRow).data);
/* Should be null if empty */
if (aliases.isEmpty()) {
aliases = null;
}
repaint();
return;
}
/* (Re)define alias */
aliases.put(connections.get(selectedRow).data, alias);
repaint();
}
};
}
|
package com.twu.biblioteca;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import java.io.ByteArrayOutputStream;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
import java.io.PrintStream;
import java.lang.StringBuilder;
import java.util.List;
import java.util.ArrayList;
public class BibliotecaAppTest {
@Test
public void testBibliotecaStartup() {
StringBuilder expectedOutput = new StringBuilder();
displayStartupMessage(expectedOutput);
displayMainMenu(expectedOutput);
ByteArrayOutputStream output = initSystemOutStream();
InputStream input = initSystemInStream("quit");
BibliotecaApp.main(new String[]{});
assertEquals(expectedOutput.toString(), output.toString());
}
@Test
public void testSelectMenuOptionListBooks() {
StringBuilder expectedOutput = new StringBuilder();
displayBookList(expectedOutput, generateBookList());
ByteArrayOutputStream output = initSystemOutStream();
BibliotecaApp.selectMenuOption(BibliotecaApp.LIST_BOOKS);
assertEquals(expectedOutput.toString(), output.toString());
}
@Test
public void testSelectMenuOptionInvalidOption() {
StringBuilder expectedOutput = new StringBuilder();
displayInvalidOptionMessage(expectedOutput);
ByteArrayOutputStream output = initSystemOutStream();
BibliotecaApp.selectMenuOption(-1);
assertEquals(expectedOutput.toString(), output.toString());
}
@Test
public void testSelectMenuOptionsUntilQuit() {
StringBuilder expectedOutput = new StringBuilder();
displayMainMenu(expectedOutput);
displayInvalidOptionMessage(expectedOutput);
displayInvalidOptionMessage(expectedOutput);
ByteArrayOutputStream output = initSystemOutStream();
InputStream input = initSystemInStream("-1\n-1\nquit");
BibliotecaApp.runMainMenu();
assertEquals(expectedOutput.toString(), output.toString());
}
@Test
public void testCheckoutBook() {
List<Book> bookList = generateBookList();
StringBuilder expectedOutput = new StringBuilder();
displayBookList(expectedOutput, generateBookList());
bookList.remove(2);
displayBookList(expectedOutput, bookList);
ByteArrayOutputStream output = initSystemOutStream();
BibliotecaApp.printBookList();
BibliotecaApp.checkoutBook("Head First Java");
BibliotecaApp.printBookList();
assertEquals(expectedOutput.toString(), output.toString());
}
// helpers
private ByteArrayOutputStream initSystemOutStream() {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
System.setOut(new PrintStream(baos));
return baos;
}
private ByteArrayInputStream initSystemInStream(String testValue) {
ByteArrayInputStream in = new ByteArrayInputStream(testValue.getBytes());
System.setIn(in);
return in;
}
private void displayStartupMessage(StringBuilder expectedOutput) {
expectedOutput.append("Welcome to Biblioteca!\n");
}
private void displayMainMenu(StringBuilder expectedOutput) {
expectedOutput.append("Main Menu (please select one of the following options by typing its number and pressing ENTER)\n");
expectedOutput.append("(1) List Books\n");
}
private void displayInvalidOptionMessage(StringBuilder expectedOutput) {
expectedOutput.append("Select a valid option!\n");
}
private void displayBookList(StringBuilder expectedOutput, List<Book> bookList) {
expectedOutput.append("Book List\n");
expectedOutput.append(String.format("%-42s | %-32s | %-12s\n", "Title", "Author", "Year Published"));
String leftAlignFormat = "%-42s | %-32s | %-4d\n";
for (Book book : bookList) {
expectedOutput.append(String.format(leftAlignFormat, book.getTitle(), book.getAuthor(), book.getYearPublished()));
}
}
private List<Book> generateBookList() {
List<Book> bookList = new ArrayList<Book>();
bookList.add(new Book("Test-Driven Development By Example", "Kent Beck", 2003));
bookList.add(new Book("The Agile Samurai", "Jonathan Rasmusson", 2010));
bookList.add(new Book("Head First Java", "Kathy Sierra & Bert Bates", 2005));
bookList.add(new Book("Don't Make Me Think, Revisited", "Steve Krug", 2014));
return bookList;
}
}
|
// -*- mode:java; encoding:utf-8 -*-
// vim:set fileencoding=utf-8:
// @homepage@
package example;
import java.awt.*;
import java.awt.event.ActionEvent;
import java.awt.event.KeyEvent;
import javax.swing.*;
public final class MainPanel extends JPanel {
private MainPanel() {
super(new BorderLayout());
Box box = Box.createVerticalBox();
box.add(makeComboBox(makeModel()));
box.setBorder(BorderFactory.createTitledBorder("ComboBoxSeparator"));
add(box, BorderLayout.NORTH);
add(new JScrollPane(new JTextArea("dummy")));
setPreferredSize(new Dimension(320, 240));
}
private static ComboBoxModel<Object> makeModel() {
DefaultComboBoxModel<Object> model = new DefaultComboBoxModel<Object>() {
@Override public void setSelectedItem(Object anObject) {
if (!(anObject instanceof JSeparator)) {
super.setSelectedItem(anObject);
}
}
};
model.addElement("0000");
model.addElement("0000111");
model.addElement("000011122");
model.addElement("00001112233333");
model.addElement(new JSeparator());
model.addElement("bbb1");
model.addElement("bbb12");
model.addElement("bbb33333");
model.addElement(new JSeparator());
model.addElement("11111");
model.addElement("2222222");
return model;
}
private static <E> JComboBox<E> makeComboBox(ComboBoxModel<E> model) {
JComboBox<E> combo = new JComboBox<E>(model) {
@Override public void updateUI() {
setRenderer(null);
super.updateUI();
ListCellRenderer<? super E> renderer = getRenderer();
setRenderer((list, value, index, isSelected, cellHasFocus) -> {
if (value instanceof JSeparator) {
return (Component) value;
} else {
return renderer.getListCellRendererComponent(list, value, index, isSelected, cellHasFocus);
}
});
}
};
ActionMap am = combo.getActionMap();
String selectPrevKey = "selectPrevious3";
am.put(selectPrevKey, new AbstractAction() {
@Override public void actionPerformed(ActionEvent e) {
JComboBox<?> cb = (JComboBox<?>) e.getSource();
int index = cb.getSelectedIndex();
if (index == 0) {
return;
}
Object o = cb.getItemAt(index - 1);
if (o instanceof JSeparator) {
cb.setSelectedIndex(index - 2);
} else {
cb.setSelectedIndex(index - 1);
}
}
});
String selectNextKey = "selectNext3";
am.put(selectNextKey, new AbstractAction() {
@Override public void actionPerformed(ActionEvent e) {
JComboBox<?> cb = (JComboBox<?>) e.getSource();
int index = cb.getSelectedIndex();
if (index == cb.getItemCount() - 1) {
return;
}
Object o = cb.getItemAt(index + 1);
if (o instanceof JSeparator) {
cb.setSelectedIndex(index + 2);
} else {
cb.setSelectedIndex(index + 1);
}
}
});
InputMap im = combo.getInputMap(JComponent.WHEN_ANCESTOR_OF_FOCUSED_COMPONENT);
im.put(KeyStroke.getKeyStroke(KeyEvent.VK_UP, 0), selectPrevKey);
im.put(KeyStroke.getKeyStroke(KeyEvent.VK_KP_UP, 0), selectPrevKey);
im.put(KeyStroke.getKeyStroke(KeyEvent.VK_DOWN, 0), selectNextKey);
im.put(KeyStroke.getKeyStroke(KeyEvent.VK_KP_DOWN, 0), selectNextKey);
return combo;
}
public static void main(String[] args) {
EventQueue.invokeLater(MainPanel::createAndShowGui);
}
private static void createAndShowGui() {
try {
UIManager.setLookAndFeel(UIManager.getSystemLookAndFeelClassName());
} catch (ClassNotFoundException | InstantiationException | IllegalAccessException | UnsupportedLookAndFeelException ex) {
ex.printStackTrace();
Toolkit.getDefaultToolkit().beep();
}
JFrame frame = new JFrame("@title@");
frame.setDefaultCloseOperation(WindowConstants.EXIT_ON_CLOSE);
frame.getContentPane().add(new MainPanel());
frame.pack();
frame.setLocationRelativeTo(null);
frame.setVisible(true);
}
}
|
package net.meisen.dissertation;
import net.meisen.dissertation.config.TestConfig;
import net.meisen.dissertation.config.xsd.TestXsdTidaModel;
import net.meisen.dissertation.config.xslt.TestDefaultValues;
import net.meisen.dissertation.config.xslt.TestXsltTidaModel;
import net.meisen.dissertation.help.TestDb;
import net.meisen.dissertation.impl.auth.shiro.TestMapDbAuthorizingRealm;
import net.meisen.dissertation.impl.auth.shiro.TestShiroAuthManager;
import net.meisen.dissertation.impl.cache.TestFileBitmapCache;
import net.meisen.dissertation.impl.cache.TestFileBitmapIdCacheConfig;
import net.meisen.dissertation.impl.cache.TestFileCaches;
import net.meisen.dissertation.impl.cache.TestFileFactDescriptorModelSetCache;
import net.meisen.dissertation.impl.cache.TestFileIdentifierCache;
import net.meisen.dissertation.impl.cache.TestFileMetaDataCache;
import net.meisen.dissertation.impl.cache.TestIdsOnlyDataRecordCache;
import net.meisen.dissertation.impl.cache.TestMapDbBitmapCache;
import net.meisen.dissertation.impl.cache.TestMapDbDataRecordCache;
import net.meisen.dissertation.impl.cache.TestMemoryIdentifierCache;
import net.meisen.dissertation.impl.cache.TestMemoryMetaDataCache;
import net.meisen.dissertation.impl.cache.TestRecentlyUsedCachingStrategy;
import net.meisen.dissertation.impl.dataintegration.TestPreProcessedDataRecord;
import net.meisen.dissertation.impl.dataintegration.TestRhinoScriptPreProcessor;
import net.meisen.dissertation.impl.dataretriever.TestCsvDataRetriever;
import net.meisen.dissertation.impl.dataretriever.TestDbDataRetriever;
import net.meisen.dissertation.impl.dataretriever.TestFixedStructureDataRetriever;
import net.meisen.dissertation.impl.datasets.TestDataRetrieverDataSet;
import net.meisen.dissertation.impl.datasets.TestSingleStaticDataSet;
import net.meisen.dissertation.impl.descriptors.TestDoubleDescriptor;
import net.meisen.dissertation.impl.descriptors.TestGeneralDescriptor;
import net.meisen.dissertation.impl.descriptors.TestIntegerDescriptor;
import net.meisen.dissertation.impl.descriptors.TestLongDescriptor;
import net.meisen.dissertation.impl.idfactories.TestByteIdsFactory;
import net.meisen.dissertation.impl.idfactories.TestIntegerIdsFactory;
import net.meisen.dissertation.impl.idfactories.TestLongIdsFactory;
import net.meisen.dissertation.impl.idfactories.TestShortIdsFactory;
import net.meisen.dissertation.impl.idfactories.TestUuIdsFactory;
import net.meisen.dissertation.impl.indexes.TestCompositeIndexKey;
import net.meisen.dissertation.impl.indexes.TestContinuousIntIndexedCollection;
import net.meisen.dissertation.impl.indexes.TestDataRecordIndex;
import net.meisen.dissertation.impl.indexes.TestIndexFactory;
import net.meisen.dissertation.impl.indexes.TestIndexedCollectionDefinition;
import net.meisen.dissertation.impl.indexes.TestIntArrayCollection;
import net.meisen.dissertation.impl.indexes.TestMapIndex;
import net.meisen.dissertation.impl.indexes.TestMultipleIndexedCollection;
import net.meisen.dissertation.impl.indexes.TestNestedIndexedCollection;
import net.meisen.dissertation.impl.indexes.datarecord.slices.TestBitmapId;
import net.meisen.dissertation.impl.indexes.datarecord.slices.TestEWAHBitmap;
import net.meisen.dissertation.impl.indexes.datarecord.slices.TestRoaringBitmap;
import net.meisen.dissertation.impl.measures.TestMapFactsArrayBased;
import net.meisen.dissertation.impl.measures.TestMapFactsDescriptorBased;
import net.meisen.dissertation.impl.parser.query.TestAddQueries;
import net.meisen.dissertation.impl.parser.query.TestAliveQueries;
import net.meisen.dissertation.impl.parser.query.TestAssignQueries;
import net.meisen.dissertation.impl.parser.query.TestDeleteQueries;
import net.meisen.dissertation.impl.parser.query.TestDropQueries;
import net.meisen.dissertation.impl.parser.query.TestGetQueries;
import net.meisen.dissertation.impl.parser.query.TestGrantQueries;
import net.meisen.dissertation.impl.parser.query.TestInsertQueries;
import net.meisen.dissertation.impl.parser.query.TestLoadQueries;
import net.meisen.dissertation.impl.parser.query.TestModifyQueries;
import net.meisen.dissertation.impl.parser.query.TestRemoveQueries;
import net.meisen.dissertation.impl.parser.query.TestRevokeQueries;
import net.meisen.dissertation.impl.parser.query.TestSelectQueries;
import net.meisen.dissertation.impl.parser.query.TestUnloadQueries;
import net.meisen.dissertation.impl.parser.query.select.TestDescriptorComperator;
import net.meisen.dissertation.impl.parser.query.select.TestDescriptorValue;
import net.meisen.dissertation.impl.parser.query.select.evaluator.TestDescriptorLogicEvaluator;
import net.meisen.dissertation.impl.parser.query.select.evaluator.TestGroupEvaluator;
import net.meisen.dissertation.impl.parser.query.select.group.TestGroupExpression;
import net.meisen.dissertation.impl.persistence.TestZipPersistor;
import net.meisen.dissertation.impl.time.granularity.TestTimeGranularityFactory;
import net.meisen.dissertation.impl.time.mapper.TestDateMapper;
import net.meisen.dissertation.impl.time.mapper.TestMapperFactory;
import net.meisen.dissertation.jdbc.TestTidaConnection;
import net.meisen.dissertation.jdbc.TestTidaDatabaseMetaData;
import net.meisen.dissertation.jdbc.TestTidaDriver;
import net.meisen.dissertation.jdbc.TestTidaResultSet;
import net.meisen.dissertation.model.auth.permissions.TestDefinedPermission;
import net.meisen.dissertation.model.data.TestDataModel;
import net.meisen.dissertation.model.data.TestDataStructure;
import net.meisen.dissertation.model.data.TestMetaDataModel;
import net.meisen.dissertation.model.dataretriever.TestDataCollection;
import net.meisen.dissertation.model.dataretriever.TestDataRecord;
import net.meisen.dissertation.model.datasets.TestMultipleDataSetIterator;
import net.meisen.dissertation.model.descriptors.TestDescriptorModel;
import net.meisen.dissertation.model.descriptors.TestDescriptorPrimitiveDataType;
import net.meisen.dissertation.model.dimensions.TestDescriptorDimension;
import net.meisen.dissertation.model.dimensions.TestDimensionHandler;
import net.meisen.dissertation.model.dimensions.graph.TestDescriptorGraph;
import net.meisen.dissertation.model.dimensions.graph.TestTimeGraph;
import net.meisen.dissertation.model.dimensions.graph.TestTimeGraphMemberIndex;
import net.meisen.dissertation.model.dimensions.templates.TestDays;
import net.meisen.dissertation.model.dimensions.templates.TestMinutes;
import net.meisen.dissertation.model.dimensions.templates.TestTimeLevelTemplateManager;
import net.meisen.dissertation.model.handler.TestTidaModelHandler;
import net.meisen.dissertation.model.handler.TestTidaModelHandlerPersistency;
import net.meisen.dissertation.model.indexes.TestIndexKeyDefinition;
import net.meisen.dissertation.model.indexes.datarecord.TestIntervalIndex;
import net.meisen.dissertation.model.indexes.datarecord.TestMetaIndex;
import net.meisen.dissertation.model.indexes.datarecord.TestMetaIndexDimension;
import net.meisen.dissertation.model.indexes.datarecord.TestProcessedDataRecord;
import net.meisen.dissertation.model.indexes.datarecord.TestTidaIndex;
import net.meisen.dissertation.model.indexes.datarecord.slices.TestFactDescriptorModelSet;
import net.meisen.dissertation.model.indexes.datarecord.slices.TestFactDescriptorSet;
import net.meisen.dissertation.model.persistence.TestBasePersistor;
import net.meisen.dissertation.model.persistence.TestGroup;
import net.meisen.dissertation.model.persistence.TestIdentifier;
import net.meisen.dissertation.model.time.TestDateNormalizer;
import net.meisen.dissertation.model.time.granularity.TestDateBasedHelper;
import net.meisen.dissertation.model.time.granularity.TestDateFormat;
import net.meisen.dissertation.model.time.granularity.TestMinute;
import net.meisen.dissertation.model.time.mapper.TestBaseMapperFactory;
import net.meisen.dissertation.model.time.timeline.TestTimelineDefinition;
import net.meisen.dissertation.server.TestAuthServlet;
import net.meisen.dissertation.server.TestCommunication;
import net.meisen.dissertation.server.TestTidaServer;
import net.meisen.dissertation.server.session.TestSession;
import net.meisen.dissertation.server.session.TestSessionManager;
import org.junit.runner.RunWith;
import org.junit.runners.Suite;
/**
* All tests together as a {@link Suite}
*
* @author pmeisen
*/
@RunWith(Suite.class)
@Suite.SuiteClasses({
// Test the utilities
TestDefaultValues.class,
// Test general helper
TestDb.class,
// Test the configuration
TestConfig.TestConfigSuite.class,
// Test persistence
TestGroup.class,
TestIdentifier.class,
TestBasePersistor.class,
TestZipPersistor.class,
// Test indexes
TestCompositeIndexKey.class,
TestIndexKeyDefinition.class,
TestMapIndex.class,
TestIndexedCollectionDefinition.class,
TestNestedIndexedCollection.class,
TestMultipleIndexedCollection.class,
TestIntArrayCollection.class,
TestContinuousIntIndexedCollection.class,
// Test the factory for the indexes
TestIndexFactory.TestIndexFactorySuite.class,
// Test the id factories
TestByteIdsFactory.class,
TestShortIdsFactory.class,
TestIntegerIdsFactory.class,
TestLongIdsFactory.class,
TestUuIdsFactory.class,
// Test the descriptors
TestDescriptorPrimitiveDataType.class,
TestDoubleDescriptor.class,
TestIntegerDescriptor.class,
TestLongDescriptor.class,
TestGeneralDescriptor.class,
// Test the descriptorModel
TestDescriptorModel.class,
// Tests the formatting of dates, granularities, and the factory
TestDateFormat.class,
TestDateBasedHelper.class,
TestMinute.class,
TestTimeGranularityFactory.class,
// Test the normalizer, the timeline and the mapper
TestDateNormalizer.class,
TestTimelineDefinition.class,
TestDateMapper.class,
// Test the time factory
TestBaseMapperFactory.class,
TestMapperFactory.TestMapperFactorySuite.class,
// Test the dataRetrievers
TestDataCollection.class,
TestDataRecord.class,
TestPreProcessedDataRecord.class,
TestDbDataRetriever.class,
TestCsvDataRetriever.class,
TestFixedStructureDataRetriever.class,
// Test the preProcessors
TestRhinoScriptPreProcessor.class,
// Test the dataSets
TestSingleStaticDataSet.class,
TestDataRetrieverDataSet.class,
TestMultipleDataSetIterator.class,
// Test the different created instances
TestDataStructure.class,
TestMetaDataModel.class,
TestDataModel.class,
// Test the xsd and xslt
TestXsdTidaModel.class,
TestXsltTidaModel.class,
// Test some bitmap implementations
TestBitmapId.class,
TestEWAHBitmap.class,
TestRoaringBitmap.class,
// Test the handler
TestTidaModelHandler.class,
// Test the record pre-processing
TestProcessedDataRecord.class,
// Test the caches
TestMemoryMetaDataCache.class,
TestFileMetaDataCache.class,
TestMemoryIdentifierCache.class,
TestFileIdentifierCache.class,
TestFileBitmapIdCacheConfig.class,
TestRecentlyUsedCachingStrategy.class,
TestFileBitmapCache.class,
TestFileFactDescriptorModelSetCache.class,
TestFileCaches.TestFileCachesSuite.class,
TestMapDbBitmapCache.class,
TestMapDbDataRecordCache.class,
TestIdsOnlyDataRecordCache.class,
// Test the index for facts
TestFactDescriptorSet.class,
TestFactDescriptorModelSet.class,
// Test the tida-indexes
TestMetaIndexDimension.class,
TestMetaIndex.class,
TestIntervalIndex.class,
TestDataRecordIndex.class,
TestTidaIndex.class,
// Test the fact-holder
TestMapFactsArrayBased.class,
TestMapFactsDescriptorBased.class,
// Test the query
TestDescriptorValue.class, TestDescriptorComperator.class,
TestGroupExpression.class, TestDescriptorLogicEvaluator.class,
TestGroupEvaluator.class, TestAliveQueries.class,
TestLoadQueries.class,
TestUnloadQueries.class,
TestSelectQueries.class,
TestInsertQueries.class,
TestAddQueries.class,
TestDropQueries.class,
TestModifyQueries.class,
TestGrantQueries.class,
TestRevokeQueries.class,
TestAssignQueries.class,
TestRemoveQueries.class,
TestGetQueries.class,
TestDeleteQueries.class,
// Test authentication manager
TestDefinedPermission.class,
TestMapDbAuthorizingRealm.class,
TestShiroAuthManager.class,
// Tests dimensions
TestDescriptorDimension.class, TestDescriptorGraph.class,
TestDays.class, TestMinutes.class,
TestTimeLevelTemplateManager.TestTimeLevelTemplateManagerSuite.class,
TestTimeGraph.class, TestTimeGraphMemberIndex.class,
TestDimensionHandler.class,
// Test the session management
TestSession.class, TestSessionManager.TestSessionManagerSuite.class,
// Test the server
TestTidaServer.class, TestCommunication.TestCommunicationSuite.class,
TestAuthServlet.class,
// Tests re-running the server
TestTidaModelHandlerPersistency.class,
// JDBC related tests
TestTidaDriver.class,
TestTidaConnection.class,
TestTidaResultSet.class,
TestTidaDatabaseMetaData.TestTidaDatabaseMetaDataSuite.class,
net.meisen.dissertation.jdbc.protocol.TestCommunication.class
})
public class AllUnitTests {
}
|
package pt.fccn.arquivo.pages;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import org.openqa.selenium.By;
import org.openqa.selenium.WebDriver;
import org.openqa.selenium.WebElement;
import org.openqa.selenium.support.ui.ExpectedConditions;
import org.openqa.selenium.support.ui.WebDriverWait;
/**
* @author Nutchwax
*
*/
public class HighlightsPage {
private final WebDriver driver;
private static final String titleTextEN = "Examples of pages preserved by Arquivo.pt";
private static final String titleTextPT = "Exemplos de páginas preservadas no Arquivo.pt — Sobre o Arquivo.pt";
private static final String titleFirstPortuguesePagePT = "Home Page de Portugal / Portugal Home Page - preservado pelo Arquivo.pt";
private static final String titleFirstPortuguesePageEN = "Home Page de Portugal / Portugal Home Page - preserved by Arquivo.pt";
private static final String titleSmithsonianPT ="The Smithsonian Institution Home Page - preservado pelo Arquivo.pt";
private static final String titleSmithsonianEN ="The Smithsonian Institution Home Page - preserved by Arquivo.pt";
private static final String titleClixPT = "UEFA Euro 2004 - preservado pelo Arquivo.pt";
private static final String titleClixEN = "UEFA Euro 2004 - preserved by Arquivo.pt";
private static final String titleExpoPT = "EURO - O que é o euro ? - preservado pelo Arquivo.pt";
private static final String titleExpoEN = "EURO - O que é o euro ? - preserved by Arquivo.pt";
private static final String titlePublicoPT = "PUBLICO - preservado pelo Arquivo.pt";
private static final String titlePublicoEN = "PUBLICO - preserved by Arquivo.pt";
private static final String titleSapoPT = "Projecto HidroNet - Links 1 - preservado pelo Arquivo.pt";
private static final String titleSapoEN = "Projecto HidroNet - Links 1 - preserved by Arquivo.pt";
private static final String titleTimPT = "Tim Berners-Lee - preservado pelo Arquivo.pt";
private static final String titleTimEN = "Tim Berners-Lee - preserved by Arquivo.pt";
private static final String titlePresidenciaisPT ="portuguese presidentials of 2001 - preservado pelo Arquivo.pt";
private static final String titlePresidenciaisEN ="portuguese presidentials of 2001 - preserved by Arquivo.pt";
private static final String titleEuroPT ="Futebol Internacional - Notícias do dia - preservado pelo Arquivo.pt";
private static final String titleEuroEN ="Futebol Internacional - Notícias do dia - preserved by Arquivo.pt";
private static final String titleEloyEN ="Eloy Rodrigues - HOME PAGE - preservado pelo Arquivo.pt";
private static final String titleEloyPT ="Eloy Rodrigues - HOME PAGE - preserved by Arquivo.pt";
private static final String titlePortugalTelecomPT ="P O R T U G A L T E L E C O M - preservado pelo Arquivo.pt";
private static final String titlePortugalTelecomEN ="P O R T U G A L T E L E C O M - preserved by Arquivo.pt";
private static final String titleMinistrePT ="Ministère de l'Education Nationale - preservado pelo Arquivo.pt";
private static final String titleMinistreEN ="Ministère de l'Education Nationale - preserved by Arquivo.pt";
private static final String titleSicPT ="SIC Online - Cavaco Silva em Bragança - preservado pelo Arquivo.pt";
private static final String titleSicEN ="SIC Online - Cavaco Silva em Bragança - preserved by Arquivo.pt";
private static final String titleCimiterioPT ="Visita ao Cemitério - preservado pelo Arquivo.pt";
private static final String titleCimiterioEN ="Visita ao Cemitério - preserved by Arquivo.pt";
private static final String titleSapoDesportoPT ="Sapo Infordesporto - preservado pelo Arquivo.pt";
private static final String titleSapoDesportoEN ="Sapo Infordesporto - preserved by Arquivo.pt";
private static final String titleNimasPT ="NIMAS - FITAS EM CARTAZ - preservado pelo Arquivo.pt";
private static final String titleNimasEN ="NIMAS - FITAS EM CARTAZ - preserved by Arquivo.pt";
private static final String titleXLDBPT ="Referencias - preservado pelo Arquivo.pt";
private static final String titleXLDBEN ="Referencias - preserved by Arquivo.pt";
private static final String titleBosniaPT ="BOSNIA NOW - preservado pelo Arquivo.pt";
private static final String titleBosniaEN ="BOSNIA NOW - preserved by Arquivo.pt";
private static final String titleFitasPT ="NIMAS - FITAS EM CARTAZ - preservado pelo Arquivo.pt";
private static final String titleFitasEN ="NIMAS - FITAS EM CARTAZ - preserved by Arquivo.pt";
private static final String titleBeachcamPT = "BeachCam - Praia do Guincho - preservado pelo Arquivo.pt";
private static final String titleBeachcamEN = "BeachCam - Praia do Guincho - preserved by Arquivo.pt";
private static final String h1Title= "First Portuguese web page (1996)";
public HighlightsPage(WebDriver driver) {
this.driver = driver;
// Check that we're on the right page.
String pageTitle= driver.getTitle();
if ((titleTextEN.contains(pageTitle))){
throw new IllegalStateException("This is not the " + this.getClass().getName()+ "\n Title of current page: " + pageTitle+"\nExpected title: "+titleTextEN);
}
}
/**
* Verify if page has correct text
* @return true if page contains the expected text
*/
public boolean isPageCorrect() {
try{
try {
Thread.sleep(5000); //wait for page to load
} catch(InterruptedException ex) {
Thread.currentThread().interrupt();
}
System.out.println("Current Page: " + driver.getCurrentUrl());
/**
* Run through the links of highlights
* @return true if all links from
*/
public boolean goThroughHighlights() {
try{
try {
Thread.sleep(5000); //wait for page to load
} catch(InterruptedException ex) {
Thread.currentThread().interrupt();
}
List <WebElement> listOfHighlights = driver.findElements(By.id("boxes"));
for (WebElement element : listOfHighlights) {
element.click();
}
return true;
}catch(Exception e){
System.out.println("Some Error navigating through the Highlights");
return false;
}
}
public boolean checkLinkHighligths(){
try{
List<WebElement> linkList= driver.findElements(By.tagName("a"));
int statuscode=0;
for(int i=0 ; i<linkList.size() ; i++)
{
if(linkList.get(i).getAttribute("href") != null)
{
if (linkList.get(i).getAttribute("href").contains("/wayback")){
statuscode=getResponseCode(linkList.get(i).getAttribute("href"));
if (statuscode!= 200){
return false;
}
}
}
}
return true;
}catch (Exception e){
System.out.println("Error finding one of Highlights!");
e.printStackTrace();
return false;
}
}
/**
* @return true if all of the links are correct
*/
public boolean checkHighligthsPageLinks(){
String title=null;
int i =0;
List<String> aux = new ArrayList<String>();
try{
aux = getHiglightsUrl();
}catch (Exception e){
System.out.println("Some Error getting List of Highlight URls");
e.printStackTrace();
return false;
}
for(i=0 ; i<aux.size() ; i++)
{
title = getIdTitle(aux.get(i));
if (!inspectTitlesMatches(title)){
System.out.print("\n\nunexpected title: "+title);
return false;
}
}
return true;
}
/**
* @return a list with all of the current url's
*/
private List<String> getHiglightsUrl() throws Exception{
try {
Thread.sleep(5000); //wait for page to load
} catch(InterruptedException ex) {
Thread.currentThread().interrupt();
}
List<WebElement> linkList= driver.findElements(By.className("external-link"));
List<String> highlights = new ArrayList<String>();
for( int i =0; i< linkList.size();i++)
highlights.add(linkList.get(i).getAttribute("href"));
return highlights;
}
/**
* @param Url - each highlights url
* @return Title of the webpage
*/
private String getIdTitle (String Url){
WebDriverWait wait = new WebDriverWait(driver, 20);
System.out.println("Going to URL: " + Url);
driver.get(Url);
//wait until title was loaded
try {
Thread.sleep(5000); //wait for page to load
} catch(InterruptedException ex) {
Thread.currentThread().interrupt();
}
wait.until(ExpectedConditions.urlContains(Url));
//wait.until(ExpectedConditions.presenceOfElementLocated(By.cssSelector("title")));
return driver.getTitle();
}
/**
* titlepage: current driven title
* @return true if matches any title
*/
public boolean inspectTitlesMatches(String titlepage){
if (titlePresidenciaisPT.contains(titlepage) || titlePresidenciaisEN.contains(titlepage)){
return true;
}
if (titleClixPT.equals(titlepage)|| titleClixEN.equals(titlepage)){
return true;
}
if (titleExpoPT.equals(titlepage) ||titleExpoEN.equals(titlepage)){
return true;
}
if (titleSmithsonianPT.equals(titlepage) || titleSmithsonianEN.equals(titlepage)){
return true;
}
if (titlePublicoEN.equals(titlepage)||titlePublicoPT.equals(titlepage)){
return true;
}
if (titleFirstPortuguesePageEN.contains(titlepage) || titleFirstPortuguesePagePT.contains(titlepage)){
return true;
}
if (titleSapoEN.equals(titlepage) || titleSapoPT.equals(titlepage)){
return true;
}
if (titleTimEN.equals(titlepage)||titleTimPT.equals(titlepage)){
return true;
}
if (titleEuroEN.equals(titlepage) ||titleEuroPT.equals(titlepage)){
return true;
}
if (titleEloyEN.equals(titlepage) ||titleEloyPT.equals(titlepage)){
return true;
}
if (titleXLDBEN.equals(titlepage) || titleXLDBPT.equals(titlepage)){
return true;
}
if (titleSapoDesportoEN.equals(titlepage) ||titleSapoDesportoPT.equals(titlepage)){
return true;
}
if (titlePortugalTelecomEN.equals(titlepage)||titlePortugalTelecomPT.equals(titlepage)){
return true;
}
if (titleMinistreEN.equals(titlepage) || titleMinistrePT.equals(titlepage)){
return true;
}
if (titleCimiterioEN.equals(titlepage) || titleCimiterioPT.equals(titlepage)){
return true;
}
if (titleNimasEN.equals(titlepage) || titleNimasPT.equals(titlepage)){
return true;
}
if (titleSicEN.equals(titlepage) || titleSicPT.equals(titlepage)){
return true;
}
if (titleBosniaEN.equals(titlepage) ||titleBosniaPT.equals(titlepage)){
return true;
}
if (titleFitasEN.equals(titlepage)||titleFitasPT.equals(titlepage)){
return true;
}
if (titleBeachcamEN.equals(titlepage) || titleBeachcamPT.equals(titlepage)){
return true;
}
else{
return false;
}
}
/**
* @param urlString
* @return the statuscode from the page
*/
public int getResponseCode(String urlString) {
URL u=null;
HttpURLConnection huc=null;
try {
u = new URL(urlString);
} catch (MalformedURLException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
try {
huc = (HttpURLConnection) u.openConnection();
huc.setRequestMethod("GET");
huc.connect();
return huc.getResponseCode();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
return 0;
}
}
|
package models;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
import org.eclipse.jgit.revwalk.RevCommit;
import models.enumeration.EventType;
import models.enumeration.PullRequestReviewAction;
import models.enumeration.ResourceType;
import models.enumeration.WebhookType;
import models.resource.GlobalResource;
import models.resource.Resource;
import models.resource.ResourceConvertible;
import utils.RouteUtil;
import play.Logger;
import play.api.i18n.Lang;
import play.data.validation.Constraints.Required;
import play.db.ebean.Model;
import play.i18n.Messages;
import play.libs.F.Function;
import play.libs.Json;
import play.libs.ws.WS;
import play.libs.ws.WSRequestHolder;
import play.libs.ws.WSResponse;
import play.Play;
import playRepository.GitCommit;
import javax.persistence.Entity;
import javax.persistence.Id;
import javax.persistence.ManyToOne;
import javax.validation.constraints.Size;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.List;
/**
* A webhook to be sent by events in project
*/
@Entity
public class Webhook extends Model implements ResourceConvertible {
private static final long serialVersionUID = 1L;
public static final Finder<Long, Webhook> find = new Finder<>(Long.class, Webhook.class);
/**
* Primary Key.
*/
@Id
public Long id;
/**
* Project which have this webhook.
*/
@ManyToOne
public Project project;
/**
* Payload URL of webhook.
*/
@Required
@Size(max=2000, message="project.webhook.payloadUrl.tooLong")
public String payloadUrl;
/**
* Secret token for server identity.
*/
@Size(max=250, message="project.webhook.secret.tooLong")
public String secret;
/**
* Condition of sending webhook (true: git only push, false: all cases)
*/
public Boolean gitPushOnly;
public WebhookType webhookType = WebhookType.SIMPLE;
public Date createdAt;
/**
* Construct a webhook by the given {@code payloadUrl} and {@code secret}.
*
* @param projectId the ID of project which will have this webhook
* @param payloadUrl the payload URL for this webhook
* @param gitPushOnly type of webhook (true = git only push, false = all cases)
* @param secret the secret token for server identity
*/
public Webhook(Long projectId, String payloadUrl, String secret, Boolean gitPushOnly) {
if (secret == null) {
secret = "";
}
this.project = Project.find.byId(projectId);
this.payloadUrl = payloadUrl;
this.secret = secret;
this.gitPushOnly = gitPushOnly;
this.createdAt = new Date();
}
@Override
public Resource asResource() {
return new GlobalResource() {
@Override
public String getId() {
return id.toString();
}
@Override
public ResourceType getType() {
return ResourceType.WEBHOOK;
}
};
}
public static List<Webhook> findByProject(Long projectId) {
return find.where().eq("project.id", projectId).findList();
}
public static void create(Long projectId, String payloadUrl, String secret, Boolean gitPushOnly) {
if (!payloadUrl.isEmpty()) {
Webhook webhook = new Webhook(projectId, payloadUrl, secret, gitPushOnly);
webhook.save();
}
// TODO : Raise appropriate error when required field is empty
}
public static void delete(Long webhookId, Long projectId) {
Webhook.findByIds(webhookId, projectId).delete();
}
/**
* Remove this webhook from a project.
*
* @param projectId ID of the project from which this webhook is removed
*/
public void delete(Long projectId) {
Project targetProject = Project.find.byId(projectId);
targetProject.webhooks.remove(this);
targetProject.update();
super.delete();
}
public static Webhook findByIds(Long webhookId, Long projectId) {
return find.where()
.eq("webhook.id", webhookId)
.eq("project.id", projectId)
.findUnique();
}
public static Webhook findById(Long webhookId) {
return find.where()
.eq("id", webhookId)
.findUnique();
}
private String getBaseUrl() {
return utils.Config.getScheme() + "://" + utils.Config.getHostport("localhost:9000");
}
private String buildRequestMessage(String url, String message) {
String requestMessage = " <" + getBaseUrl() + url + "|";
if (this.webhookType == WebhookType.DETAIL_SLACK) {
requestMessage += message.replace(">", ">") + ">";
} else {
requestMessage += message + ">";
}
return requestMessage;
}
// Issue
public void sendRequestToPayloadUrl(EventType eventType, User sender, Issue eventIssue) {
String requestBodyString = "";
String requestMessage = buildRequestBody(eventType, sender, eventIssue);
if (this.webhookType == WebhookType.DETAIL_SLACK) {
ArrayNode attachments = buildIssueDetails(eventIssue, eventType);
requestBodyString = buildRequestJsonWithAttachments(requestMessage, attachments);
} else if (this.webhookType == WebhookType.DETAIL_HANGOUT_CHAT) {
ObjectNode thread = buildThreadJSON(eventIssue.asResource());
requestBodyString = buildRequestJsonWithThread(requestMessage, thread);
} else {
requestBodyString = buildTextPropertyOnlyJSON(requestMessage);
}
if (this.webhookType == WebhookType.DETAIL_HANGOUT_CHAT) {
sendRequest(requestBodyString, this.id, eventIssue.asResource());
} else {
sendRequest(requestBodyString);
}
}
private String buildRequestBody(EventType eventType, User sender, Issue eventIssue) {
String requestMessage = "[" + project.name + "] "+ sender.name + " ";
switch (eventType) {
case NEW_ISSUE:
requestMessage += Messages.get(Lang.defaultLang(), "notification.type.new.issue");
break;
case ISSUE_STATE_CHANGED:
requestMessage += Messages.get(Lang.defaultLang(), "notification.type.issue.state.changed");
break;
case ISSUE_ASSIGNEE_CHANGED:
requestMessage += Messages.get(Lang.defaultLang(), "notification.type.issue.assignee.changed");
break;
case ISSUE_BODY_CHANGED:
requestMessage += Messages.get(Lang.defaultLang(), "notification.type.issue.body.changed");
break;
case ISSUE_MILESTONE_CHANGED:
requestMessage += Messages.get(Lang.defaultLang(), "notification.type.milestone.changed");
break;
case RESOURCE_DELETED:
requestMessage += Messages.get(Lang.defaultLang(), "notification.type.issue.deleted");
break;
default:
play.Logger.warn("Unknown webhook event: " + eventType);
}
String eventIssueUrl = controllers.routes.IssueApp.issue(eventIssue.project.owner, eventIssue.project.name, eventIssue.getNumber()).url();
requestMessage += buildRequestMessage(eventIssueUrl, "#" + eventIssue.number + ": " + eventIssue.title);
return requestMessage;
}
// Issue transfer
public void sendRequestToPayloadUrl(EventType eventType, User sender, Issue eventIssue, Project previous) {
String requestBodyString = "";
String requestMessage = buildRequestBody(eventType, sender, eventIssue, previous);
if (this.webhookType == WebhookType.DETAIL_SLACK) {
ArrayNode attachments = buildIssueDetails(eventIssue, eventType);
requestBodyString = buildRequestJsonWithAttachments(requestMessage, attachments);
} else if (this.webhookType == WebhookType.DETAIL_HANGOUT_CHAT) {
ObjectNode thread = buildThreadJSON(eventIssue.asResource());
requestBodyString = buildRequestJsonWithThread(requestMessage, thread);
} else {
requestBodyString = buildTextPropertyOnlyJSON(requestMessage);
}
if (this.webhookType == WebhookType.DETAIL_HANGOUT_CHAT) {
sendRequest(requestBodyString, this.id, eventIssue.asResource());
} else {
sendRequest(requestBodyString);
}
}
private String buildRequestBody(EventType eventType, User sender, Issue eventIssue, Project previous) {
String requestMessage = "[" + project.name + "] "+ sender.name + " ";
requestMessage += Messages.get(Lang.defaultLang(), "notification.type.issue.moved", previous.name, project.name);
String eventIssueUrl = controllers.routes.IssueApp.issue(eventIssue.project.owner, eventIssue.project.name, eventIssue.getNumber()).url();
requestMessage += buildRequestMessage(eventIssueUrl, "#" + eventIssue.number + ": " + eventIssue.title);
return requestMessage;
}
// Issue Detail (Slack)
private ArrayNode buildIssueDetails(Issue eventIssue, EventType eventType) {
ObjectMapper mapper = new ObjectMapper();
ArrayNode attachments = mapper.createArrayNode();
ArrayNode detailFields = mapper.createArrayNode();
if (eventIssue.milestone != null) {
detailFields.add(buildTitleValueJSON(Messages.get(Lang.defaultLang(), "notification.type.milestone.changed"), eventIssue.milestone.title, true));
}
detailFields.add(buildTitleValueJSON(Messages.get(Lang.defaultLang(), ""), eventIssue.assigneeName(), true));
detailFields.add(buildTitleValueJSON(Messages.get(Lang.defaultLang(), "issue.state"), eventIssue.state.toString(), true));
attachments.add(buildAttachmentJSON(eventIssue.body, detailFields, eventType));
return attachments;
}
// Comment
public void sendRequestToPayloadUrl(EventType eventType, User sender, Comment eventComment) {
String requestBodyString = "";
String requestMessage = buildRequestBody(eventType, sender, eventComment);
if (this.webhookType == WebhookType.DETAIL_SLACK) {
ArrayNode attachments = buildCommentDetails(eventComment, eventType);
requestBodyString = buildRequestJsonWithAttachments(requestMessage, attachments);
} else if (this.webhookType == WebhookType.DETAIL_HANGOUT_CHAT) {
ObjectNode thread = buildThreadJSON(eventComment.getParent().asResource());
requestBodyString = buildRequestJsonWithThread(requestMessage, thread);
} else {
requestBodyString = buildTextPropertyOnlyJSON(requestMessage);
}
if (this.webhookType == WebhookType.DETAIL_HANGOUT_CHAT) {
sendRequest(requestBodyString, this.id, eventComment.getParent().asResource());
} else {
sendRequest(requestBodyString);
}
}
private String buildRequestBody(EventType eventType, User sender, Comment eventComment) {
String requestMessage = "[" + project.name + "] "+ sender.name + " ";
switch (eventType) {
case NEW_COMMENT:
requestMessage += Messages.get(Lang.defaultLang(), "notification.type.new.comment");
break;
case COMMENT_UPDATED:
requestMessage += Messages.get(Lang.defaultLang(), "notification.type.comment.updated");
break;
}
requestMessage += buildRequestMessage(RouteUtil.getUrl(eventComment), "#" + eventComment.getParent().number + ": " + eventComment.getParent().title);
return requestMessage;
}
// Comment Detail (Slack)
private ArrayNode buildCommentDetails(Comment eventComment, EventType eventType) {
ObjectMapper mapper = new ObjectMapper();
ArrayNode attachments = mapper.createArrayNode();
attachments.add(buildAttachmentJSON(eventComment.contents, null, eventType));
return attachments;
}
// Pull Request
public void sendRequestToPayloadUrl(EventType eventType, User sender, PullRequest eventPullRequest) {
String requestBodyString = "";
String requestMessage = buildRequestBody(eventType, sender, eventPullRequest);
if (this.webhookType == WebhookType.DETAIL_SLACK) {
ArrayNode attachments = buildJsonWithPullReqtuestDetails(eventPullRequest, requestMessage, eventType);
requestBodyString = buildRequestJsonWithAttachments(requestMessage, attachments);
} else if (this.webhookType == WebhookType.DETAIL_HANGOUT_CHAT) {
ObjectNode thread = buildThreadJSON(eventPullRequest.asResource());
requestBodyString = buildRequestJsonWithThread(requestMessage, thread);
} else {
requestBodyString = buildTextPropertyOnlyJSON(requestMessage);
}
if (this.webhookType == WebhookType.DETAIL_HANGOUT_CHAT) {
sendRequest(requestBodyString, this.id, eventPullRequest.asResource());
} else {
sendRequest(requestBodyString);
}
}
private String buildRequestBody(EventType eventType, User sender, PullRequest eventPullRequest) {
String requestMessage = "[" + project.name + "] "+ sender.name + " ";
switch (eventType) {
case NEW_PULL_REQUEST:
requestMessage += Messages.get(Lang.defaultLang(), "notification.type.new.pullrequest");
break;
case PULL_REQUEST_STATE_CHANGED:
requestMessage += Messages.get(Lang.defaultLang(), "notification.type.pullrequest.state.changed");
break;
case PULL_REQUEST_MERGED:
requestMessage += Messages.get(Lang.defaultLang(), "notification.type.pullrequest.merged");
break;
case PULL_REQUEST_COMMIT_CHANGED:
requestMessage += Messages.get(Lang.defaultLang(), "notification.type.pullrequest.commit.changed");
break;
}
requestMessage += buildRequestMessage(RouteUtil.getUrl(eventPullRequest), "#" + eventPullRequest.number + ": " + eventPullRequest.title);
return requestMessage;
}
// Pull Request Review
public void sendRequestToPayloadUrl(EventType eventType, User sender, PullRequest eventPullRequest, PullRequestReviewAction reviewAction) {
String requestBodyString = "";
String requestMessage = buildRequestBody(eventType, sender, eventPullRequest, reviewAction);
if (this.webhookType == WebhookType.DETAIL_SLACK) {
ArrayNode attachments = buildJsonWithPullReqtuestDetails(eventPullRequest, requestMessage, eventType);
requestBodyString = buildRequestJsonWithAttachments(requestMessage, attachments);
} else if (this.webhookType == WebhookType.DETAIL_HANGOUT_CHAT) {
ObjectNode thread = buildThreadJSON(eventPullRequest.asResource());
requestBodyString = buildRequestJsonWithThread(requestMessage, thread);
} else {
requestBodyString = buildTextPropertyOnlyJSON(requestMessage);
}
if (this.webhookType == WebhookType.DETAIL_HANGOUT_CHAT) {
sendRequest(requestBodyString, this.id, eventPullRequest.asResource());
} else {
sendRequest(requestBodyString);
}
}
private String buildRequestBody(EventType eventType, User sender, PullRequest eventPullRequest, PullRequestReviewAction reviewAction) {
String requestMessage = "[" + project.name + "] ";
switch (eventType) {
case PULL_REQUEST_REVIEW_STATE_CHANGED:
if (PullRequestReviewAction.DONE.equals(reviewAction)) {
requestMessage += Messages.get(Lang.defaultLang(), "notification.pullrequest.reviewed", sender.name);
} else {
requestMessage += Messages.get(Lang.defaultLang(), "notification.pullrequest.unreviewed", sender.name);;
}
break;
}
requestMessage += buildRequestMessage(RouteUtil.getUrl(eventPullRequest), "#" + eventPullRequest.number + ": " + eventPullRequest.title);
return requestMessage;
}
// Pull Request Comment
public void sendRequestToPayloadUrl(EventType eventType, User sender, PullRequest eventPullRequest, ReviewComment reviewComment) {
String requestBodyString = "";
String requestMessage = buildRequestBody(eventType, sender, eventPullRequest, reviewComment);
if (this.webhookType == WebhookType.DETAIL_SLACK) {
ArrayNode attachments = buildJsonWithPullReqtuestDetails(eventPullRequest, requestMessage, eventType);
requestBodyString = buildRequestJsonWithAttachments(requestMessage, attachments);
} else if (this.webhookType == WebhookType.DETAIL_HANGOUT_CHAT) {
ObjectNode thread = buildThreadJSON(eventPullRequest.asResource());
requestBodyString = buildRequestJsonWithThread(requestMessage, thread);
} else {
requestBodyString = buildTextPropertyOnlyJSON(requestMessage);
}
if (this.webhookType == WebhookType.DETAIL_HANGOUT_CHAT) {
sendRequest(requestBodyString, this.id, eventPullRequest.asResource());
} else {
sendRequest(requestBodyString);
}
}
private String buildRequestBody(EventType eventType, User sender, PullRequest eventPullRequest, ReviewComment reviewComment) {
String requestMessage = "[" + project.name + "] " + sender.name + " ";
requestMessage += Messages.get(Lang.defaultLang(), "notification.type.new.simple.comment");
requestMessage += " <" + utils.Config.getScheme() + "://" + utils.Config.getHostport("localhost:9000") + RouteUtil.getUrl(reviewComment) + "|#" + eventPullRequest.number + ": " + eventPullRequest.title + ">";
return requestMessage;
}
// Pull Request Detail (Slack)
private ArrayNode buildJsonWithPullReqtuestDetails(PullRequest eventPullRequest, String requestMessage, EventType eventType) {
ObjectMapper mapper = new ObjectMapper();
ArrayNode detailFields = mapper.createArrayNode();
detailFields.add(buildTitleValueJSON(Messages.get(Lang.defaultLang(), "pullRequest.sender"), eventPullRequest.contributor.name, false));
detailFields.add(buildTitleValueJSON(Messages.get(Lang.defaultLang(), "pullRequest.from"), eventPullRequest.fromBranch, true));
detailFields.add(buildTitleValueJSON(Messages.get(Lang.defaultLang(), "pullRequest.to"), eventPullRequest.toBranch, true));
ArrayNode attachments = mapper.createArrayNode();
attachments.add(buildAttachmentJSON(eventPullRequest.body, detailFields, eventType));
return attachments;
}
private String buildTextPropertyOnlyJSON(String requestMessage) {
ObjectNode requestBody = Json.newObject();
requestBody.put("text", requestMessage);
return Json.stringify(requestBody);
}
private String buildRequestJsonWithAttachments(String requestMessage, ArrayNode attachments) {
ObjectNode requestBody = Json.newObject();
requestBody.put("text", requestMessage);
requestBody.put("attachments", attachments);
return Json.stringify(requestBody);
}
private String buildRequestJsonWithThread(String requestMessage, ObjectNode thread) {
ObjectNode requestBody = Json.newObject();
requestBody.put("text", requestMessage);
requestBody.put("thread", thread);
return Json.stringify(requestBody);
}
private ObjectNode buildTitleValueJSON(String title, String value, Boolean shorten) {
ObjectNode titleJSON = Json.newObject();
titleJSON.put("title", title);
titleJSON.put("value", value);
titleJSON.put("short", shorten);
return titleJSON;
}
private ObjectNode buildAttachmentJSON(String text, ArrayNode detailFields, EventType eventType) {
ObjectNode attachmentsJSON = Json.newObject();
attachmentsJSON.put("text", text);
attachmentsJSON.put("fields", detailFields);
String color = Play.application().configuration().getString("slack." + eventType, "");
attachmentsJSON.put("color", color);
return attachmentsJSON;
}
private ObjectNode buildSenderJSON(User sender) {
ObjectNode senderJSON = Json.newObject();
senderJSON.put("login", sender.loginId);
senderJSON.put("id", sender.id);
senderJSON.put("avatar_url", sender.avatarUrl());
senderJSON.put("type", "User");
senderJSON.put("site_admin", sender.isSiteManager());
return senderJSON;
}
private ObjectNode buildPusherJSON(User sender) {
ObjectNode pusherJSON = Json.newObject();
pusherJSON.put("name", sender.name);
pusherJSON.put("email", sender.email);
return pusherJSON;
}
private ObjectNode buildRepositoryJSON() {
ObjectNode repositoryJSON = Json.newObject();
repositoryJSON.put("id", project.id);
repositoryJSON.put("name", project.name);
repositoryJSON.put("owner", project.owner);
repositoryJSON.put("html_url", RouteUtil.getUrl(project));
repositoryJSON.put("overview", project.overview); // Description.
repositoryJSON.put("private", project.isPrivate());
return repositoryJSON;
}
private ObjectNode buildThreadJSON(Resource resource) {
ObjectNode threadJSON = Json.newObject();
WebhookThread webhookthread = WebhookThread.getWebhookThread(this.id, resource);
if (webhookthread != null) {
threadJSON.put("name", webhookthread.threadId);
}
return threadJSON;
}
private void sendRequest(String payload) {
play.Logger.info(payload);
try {
WSRequestHolder requestHolder = WS.url(this.payloadUrl);
requestHolder
.setHeader("Content-Type", "application/json")
.setHeader("User-Agent", "Yobi-Hookshot")
.setHeader("Authorization", "token " + this.secret)
.post(payload)
.map(
new Function<WSResponse, Integer>() {
public Integer apply(WSResponse response) {
int statusCode = response.getStatus();
String statusText = response.getStatusText();
if (statusCode < 200 || statusCode >= 300) {
// Unsuccessful status code - log some information in server.
Logger.info("[Webhook] Request responded code " + Integer.toString(statusCode) + ": " + statusText);
Logger.info("[Webhook] Request payload: " + payload);
}
return 0;
}
}
);
} catch (Exception e) {
// Request failed (Dead end point or invalid payload URL) - log some information in server.
Logger.info("[Webhook] Request failed at given payload URL: " + this.payloadUrl);
}
}
private void sendRequest(String payload, Long webhookId, Resource resource) {
play.Logger.info(payload);
try {
WSRequestHolder requestHolder = WS.url(this.payloadUrl);
requestHolder
.setHeader("Content-Type", "application/json")
.setHeader("User-Agent", "Yobi-Hookshot")
.setHeader("Authorization", "token " + this.secret)
.post(payload)
.map(
new Function<WSResponse, Integer>() {
public Integer apply(WSResponse response) {
int statusCode = response.getStatus();
String statusText = response.getStatusText();
if (statusCode < 200 || statusCode >= 300) {
// Unsuccessful status code - log some information in server.
Logger.info("[Webhook] Request responded code " + Integer.toString(statusCode) + ": " + statusText);
Logger.info("[Webhook] Request payload: " + payload);
} else {
WebhookThread webhookthread = WebhookThread.getWebhookThread(webhookId, resource);
if (webhookthread == null) {
String threadId = response.asJson().findPath("thread").findPath("name").asText();
webhookthread = WebhookThread.create(webhookId, resource, threadId);
}
}
return 0;
}
}
);
} catch (Exception e) {
// Request failed (Dead end point or invalid payload URL) - log some information in server.
Logger.info("[Webhook] Request failed at given payload URL: " + this.payloadUrl);
}
}
// Commit
public void sendRequestToPayloadUrl(List<RevCommit> commits, List<String> refNames, User sender, String title) {
String requestBodyString = buildRequestBody(commits, refNames, sender, title);
sendRequest(requestBodyString);
}
private String buildRequestBody(List<RevCommit> commits, List<String> refNames, User sender, String title) {
ObjectNode requestBody = Json.newObject();
ObjectMapper mapper = new ObjectMapper();
ArrayNode refNamesNodes = mapper.createArrayNode();
ArrayNode commitsNodes = mapper.createArrayNode();
for (String refName : refNames) {
refNamesNodes.add(refName);
}
requestBody.put("ref", refNamesNodes);
for (RevCommit commit : commits) {
commitsNodes.add(buildJSONFromCommit(project, commit));
}
requestBody.put("commits", commitsNodes);
requestBody.put("head_commit", commitsNodes.get(0));
requestBody.put("sender", buildSenderJSON(sender));
requestBody.put("pusher", buildPusherJSON(sender));
requestBody.put("repository", buildRepositoryJSON());
return Json.stringify(requestBody);
}
private ObjectNode buildJSONFromCommit(Project project, RevCommit commit) {
GitCommit gitCommit = new GitCommit(commit);
ObjectNode commitJSON = Json.newObject();
ObjectNode authorJSON = Json.newObject();
ObjectNode committerJSON = Json.newObject();
commitJSON.put("id", gitCommit.getFullId());
commitJSON.put("message", gitCommit.getMessage());
commitJSON.put("timestamp",
new SimpleDateFormat("yyyy-MM-dd'T'hh:mm:ssZ").format(new Date(gitCommit.getCommitTime() * 1000L)));
commitJSON.put("url", getBaseUrl() + RouteUtil.getUrl(project) + "/commit/"+gitCommit.getFullId());
authorJSON.put("name", gitCommit.getAuthorName());
authorJSON.put("email", gitCommit.getAuthorEmail());
committerJSON.put("name", gitCommit.getCommitterName());
committerJSON.put("email", gitCommit.getCommitterEmail());
// TODO : Add 'username' property (howto?)
commitJSON.put("author", authorJSON);
commitJSON.put("committer", committerJSON);
// TODO : Add added, removed, modified file list (not supported by JGit?)
return commitJSON;
}
@Override
public String toString() {
return "Webhook{" +
"id=" + id +
", project=" + project +
", payloadUrl='" + payloadUrl + '\'' +
", secret='" + secret + '\'' +
", gitPushOnly=" + gitPushOnly +
", webhookType=" + webhookType +
", createdAt=" + createdAt +
'}';
}
}
|
package uno.perwironegoro.boardgames.pawnrace.ai;
public class Heuristic {
public static final Heuristic
Max = new Heuristic(10, 4, 3, 2, 10, 0, "Max"),
Edgar = new Heuristic(10, 4, 3, 2, 10, 0, "Edgar");
protected final int
betweenEdge,
pastEdge,
onEdge,
passed,
pawnExists,
protectScore;
protected final String name;
/**
* Multipliers: passed . , pastEdge .'' , onEdge :' , betweenEdge '.',
* pawn exists, protecting score
*/
public Heuristic(int passed, int pastEdge, int onEdge, int betweenEdge, int pawnExists, int protectScore, String name) {
this.passed = passed;
this.pastEdge = pastEdge;
this.onEdge = onEdge;
this.betweenEdge = betweenEdge;
this.pawnExists = pawnExists;
this.protectScore = protectScore;
this.name = name;
}
public String getName() {
return name;
}
public static Heuristic getHeuristic(String name) {
for(Heuristic h : Heuristic.getAllHeuristics()) {
if(name.equals(h.getName().toLowerCase())) {
return h;
}
}
return null;
}
public static Heuristic[] getAllHeuristics() {
Heuristic[] hs = new Heuristic[2];
hs[0] = Max;
hs[1] = Edgar;
return hs;
}
}
|
package VASSAL.counters;
import VASSAL.i18n.Resources;
import java.awt.Component;
import java.awt.Graphics;
import java.awt.Point;
import java.awt.Rectangle;
import java.awt.Shape;
import java.awt.geom.AffineTransform;
import java.awt.geom.Point2D;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Objects;
import java.util.Set;
import javax.swing.JLabel;
import javax.swing.JPanel;
import javax.swing.KeyStroke;
import javax.swing.SwingUtilities;
import VASSAL.build.GameModule;
import VASSAL.build.module.GlobalOptions;
import VASSAL.build.module.Map;
import VASSAL.build.module.documentation.HelpFile;
import VASSAL.build.module.map.MovementReporter;
import VASSAL.build.module.map.boardPicker.Board;
import VASSAL.command.Command;
import VASSAL.command.NullCommand;
import VASSAL.configure.BooleanConfigurer;
import VASSAL.configure.FormattedExpressionConfigurer;
import VASSAL.configure.NamedHotKeyConfigurer;
import VASSAL.configure.StringConfigurer;
import VASSAL.i18n.PieceI18nData;
import VASSAL.i18n.TranslatablePiece;
import VASSAL.tools.FormattedString;
import VASSAL.tools.NamedKeyStroke;
import VASSAL.tools.SequenceEncoder;
import net.miginfocom.swing.MigLayout;
/**
* Give a piece a command that moves it a fixed amount in a particular
* direction, optionally tracking the current rotation of the piece.
*/
public class Translate extends Decorator implements TranslatablePiece {
private static final String _0 = "0";
public static final String ID = "translate;"; // NON-NLS
protected KeyCommand[] commands;
protected String commandName;
protected NamedKeyStroke keyCommand;
protected FormattedString xDist = new FormattedString("");
protected FormattedString xIndex = new FormattedString("");
protected FormattedString xOffset = new FormattedString("");
protected FormattedString yDist = new FormattedString("");
protected FormattedString yIndex = new FormattedString("");
protected FormattedString yOffset = new FormattedString("");
protected String description;
protected boolean moveStack;
protected KeyCommand moveCommand;
protected static MoveExecuter mover;
public Translate() {
this(ID + Resources.getString("Editor.MoveFixedDistance.default_command"), null);
}
public Translate(String type, GamePiece inner) {
mySetType(type);
setInner(inner);
}
@Override
public String getDescription() {
return buildDescription("Editor.MoveFixedDistance.trait_description", description);
}
@Override
public void mySetType(String type) {
type = type.substring(ID.length());
final SequenceEncoder.Decoder st = new SequenceEncoder.Decoder(type, ';');
commandName = st.nextToken(Resources.getString("Editor.MoveFixedDistance.default_command"));
keyCommand = st.nextNamedKeyStroke('M');
xDist.setFormat(st.nextToken(_0));
yDist.setFormat(st.nextToken("60"));
moveStack = st.nextBoolean(false); // Default move whole stack option to false
xIndex.setFormat(st.nextToken(_0));
yIndex.setFormat(st.nextToken(_0));
xOffset.setFormat(st.nextToken(_0));
yOffset.setFormat(st.nextToken(_0));
description = st.nextToken("");
commands = null;
}
@Override
protected KeyCommand[] myGetKeyCommands() {
if (commands == null) {
moveCommand = new KeyCommand(commandName, keyCommand, Decorator.getOutermost(this), this);
if (commandName.length() > 0 && keyCommand != null && !keyCommand.isNull()) {
commands = new KeyCommand[]{moveCommand};
}
else {
commands = new KeyCommand[0];
}
}
moveCommand.setEnabled(getMap() != null);
return commands;
}
@Override
public String myGetState() {
return "";
}
@Override
public String myGetType() {
final SequenceEncoder se = new SequenceEncoder(';');
se.append(commandName)
.append(keyCommand)
.append(xDist.getFormat())
.append(yDist.getFormat())
.append(moveStack)
.append(xIndex.getFormat())
.append(yIndex.getFormat())
.append(xOffset.getFormat())
.append(yOffset.getFormat())
.append(description);
return ID + se.getValue();
}
@Override
public Command keyEvent(KeyStroke stroke) {
// Classic MFD delays the execution of the inner piece's key event until after this piece has moved
// This unexpectedly changes the order of trait execution, but is required for the old Move Batcher to work correctly
if (GlobalOptions.getInstance().isUseClassicMoveFixedDistance()) {
myGetKeyCommands();
if (moveCommand.matches(stroke)) {
return myKeyEvent(stroke);
}
}
// For New MFD, use standard trait execution timing
return super.keyEvent(stroke);
}
@Override
public Command myKeyEvent(KeyStroke stroke) {
myGetKeyCommands();
if (moveCommand.matches(stroke)) {
return GlobalOptions.getInstance().isUseClassicMoveFixedDistance() ? classicTranslate(stroke) : newTranslate(stroke);
}
return null;
}
/*
* New Translate code.
* Simplified. Get rid of Move Batcher. Use same technique as Send To Location to
* move counters. A series of Translate commands issues by a Trigger Action will now act as
* expected and Undo properly.
*
* NOTE: If the Stack Move option is used and a series of MFD commands are issued by a Trigger Action
* then the moving pieces will 'pick up' any pieces they land on along the way. The Stack Move option is not
* recommended for this reason and now defaults to 'N' and is marked in the Editor as 'Not Recommended'.
*/
protected Command newTranslate(KeyStroke stroke) {
final GamePiece target = findTarget(stroke);
if (target == null) {
return null;
}
// Current Position
Point p = getPosition();
// Calculate the destination
translate(p);
// Handle rotation of the piece, movement is relative to the current facing of the unit.
// Use the first Rotator trait below us, as no rotators above us can affect us
final FreeRotator myRotation = (FreeRotator) Decorator.getDecorator(this, FreeRotator.class);
if (myRotation != null) {
final Point2D myPosition = getPosition().getLocation();
Point2D p2d = p.getLocation();
p2d = AffineTransform.getRotateInstance(myRotation.getCumulativeAngleInRadians(), myPosition.getX(), myPosition.getY()).transform(p2d, null);
p = new Point((int) Math.round(p2d.getX()), (int) Math.round(p2d.getY())); // Use Round not Truncate to prevent drift
}
// And snap to the grid if required.
if (!Boolean.TRUE.equals(Decorator.getOutermost(this).getProperty(Properties.IGNORE_GRID))) {
p = getMap().snapTo(p);
}
// Move the piece(s)
Command c = new NullCommand();
if (target instanceof Stack) {
for (final GamePiece gp : ((Stack) target).asList()) {
final boolean pieceSelected = Boolean.TRUE.equals(gp.getProperty(Properties.SELECTED));
if (pieceSelected || moveStack) {
c = c.append(movePiece(gp, p));
}
}
}
else {
c = c.append(movePiece(target, p));
}
return c;
}
/*
* Move a single piece to a destination
*/
protected Command movePiece(GamePiece gp, Point dest) {
// Is the piece on a map?
final Map map = gp.getMap();
if (map == null) {
return null;
}
// Set the Old... properties
Command c = putOldProperties(this);
// Move the piece
final GamePiece outer = Decorator.getOutermost(gp);
c = c.append(map.placeOrMerge(outer, dest));
// Apply after Move Key
if (map.getMoveKey() != null) {
c = c.append(outer.keyEvent(map.getMoveKey()));
}
// Unlink from Parent Stack (in case it is a Deck).
final Stack parent = outer.getParent();
if (parent != null) {
c = c.append(parent.pieceRemoved(outer));
}
return c;
}
/**
* Classic Translate code.
* The original Move Fixed Distance code does not work properly in Triggers, creates additional Null
* actions and does not undo properly. Some modules may depend on this behaviour. Now depends on a Module level
* preference being turned on to use it.
*/
protected Command classicTranslate(KeyStroke stroke) {
Command c = new NullCommand();
if (mover == null) {
mover = new MoveExecuter();
mover.setKeyEvent(stroke);
mover.setAdditionalCommand(putOldProperties(this));
SwingUtilities.invokeLater(mover);
}
final GamePiece target = findTarget(stroke);
if (target != null) {
c = c.append(moveTarget(target));
}
mover.addKeyEventTarget(piece);
// Return a non-null command to indicate that a change actually happened
// Note: Looks weird to wipe out the Commands, but they have all been added to the Move Executor.
c = new NullCommand() {
@Override
public boolean isNull() {
return false;
}
};
return c;
}
protected Command moveTarget(GamePiece target) {
// Has this piece already got a move scheduled? If so, then we
// need to use the endpoint of any existing moves as our
// starting point.
Point p = mover.getUpdatedPosition(target);
// First move, so use the current location.
if (p == null) {
p = new Point(getPosition());
}
// Perform the move fixed distance
translate(p);
// Handle rotation of the piece
final FreeRotator myRotation = (FreeRotator) Decorator.getDecorator(this, FreeRotator.class);
if (myRotation != null) {
final Point2D myPosition = getPosition().getLocation();
Point2D p2d = p.getLocation();
p2d = AffineTransform.getRotateInstance(myRotation.getCumulativeAngleInRadians(), myPosition.getX(), myPosition.getY()).transform(p2d, null);
p = new Point((int) p2d.getX(), (int) p2d.getY());
}
// And snap to the grid if required.
if (!Boolean.TRUE.equals(Decorator.getOutermost(this).getProperty(Properties.IGNORE_GRID))) {
p = getMap().snapTo(p);
}
// Add to the list of scheduled moves
mover.add(target.getMap(), target, p);
return null;
}
protected void translate(Point p) {
int x;
int y;
final GamePiece outer = Decorator.getOutermost(this);
final Board b = outer.getMap().findBoard(p);
final int Xdist = xDist.getTextAsInt(outer, "Xdistance", this); // NON-NLS
final int Xindex = xIndex.getTextAsInt(outer, "Xindex", this); // NON-NLS
final int Xoffset = xOffset.getTextAsInt(outer, "Xoffset", this); // NON-NLS
x = Xdist + Xindex * Xoffset;
if (b != null) {
x = (int)Math.round(b.getMagnification() * x);
}
final int Ydist = yDist.getTextAsInt(outer, "Ydistance", this); // NON-NLS
final int Yindex = yIndex.getTextAsInt(outer, "Yindex", this); // NON-NLS
final int Yoffset = yOffset.getTextAsInt(outer, "Yoffset", this); // NON-NLS
y = Ydist + Yindex * Yoffset;
if (b != null) {
y = (int)Math.round(b.getMagnification() * y);
}
p.translate(x, -y);
}
protected GamePiece findTarget(KeyStroke stroke) {
final GamePiece outer = Decorator.getOutermost(this);
GamePiece target = outer;
if (moveStack
&& outer.getParent() != null
&& !outer.getParent().isExpanded()) {
// Only move entire stack if this is the top piece
// Otherwise moves the stack too far if the whole stack is multi-selected
if (outer != outer.getParent().topPiece(GameModule.getUserId())) { //NOTE: topPiece() returns the top VISIBLE piece (not hidden by Invisible trait)
target = null;
}
else {
target = outer.getParent();
}
}
return target;
}
@Override
public void mySetState(String newState) {
}
@Override
public Rectangle boundingBox() {
return getInner().boundingBox();
}
@Override
public void draw(Graphics g, int x, int y, Component obs, double zoom) {
getInner().draw(g, x, y, obs, zoom);
}
@Override
public String getName() {
return getInner().getName();
}
@Override
public Shape getShape() {
return getInner().getShape();
}
@Override
public PieceEditor getEditor() {
return new Editor(this);
}
@Override
public HelpFile getHelpFile() {
return HelpFile.getReferenceManualPage("Translate.html"); // NON-NLS
}
@Override
public PieceI18nData getI18nData() {
return getI18nData(commandName, getCommandDescription(description, Resources.getString("Editor.MoveFixedDistance.move_fixed_distance_command")));
}
@Override
public boolean testEquals(Object o) {
if (! (o instanceof Translate)) return false;
final Translate c = (Translate) o;
if (! Objects.equals(commandName, c.commandName)) return false;
if (! Objects.equals(keyCommand, c.keyCommand)) return false;
if (! Objects.equals(xDist, c.xDist)) return false;
if (! Objects.equals(yDist, c.yDist)) return false;
if (! Objects.equals(moveStack, c.moveStack)) return false;
if (! Objects.equals(xIndex, c.xIndex)) return false;
if (! Objects.equals(yIndex, c.yIndex)) return false;
if (! Objects.equals(xOffset, c.xOffset)) return false;
if (! Objects.equals(yOffset, c.yOffset)) return false;
return Objects.equals(description, c.description);
}
public static class Editor implements PieceEditor {
private final FormattedExpressionConfigurer xDist;
private final FormattedExpressionConfigurer yDist;
private final StringConfigurer name;
private final NamedHotKeyConfigurer key;
private final TraitConfigPanel controls;
private final BooleanConfigurer moveStack;
@Deprecated (since = "2020-12-11", forRemoval = true)
protected BooleanConfigurer advancedInput;
protected FormattedExpressionConfigurer xIndexInput;
protected FormattedExpressionConfigurer xOffsetInput;
protected FormattedExpressionConfigurer yIndexInput;
protected FormattedExpressionConfigurer yOffsetInput;
protected StringConfigurer descInput;
public Editor(Translate t) {
controls = new TraitConfigPanel();
descInput = new StringConfigurer(t.description);
descInput.setHintKey("Editor.description_hint");
controls.add("Editor.description_label", descInput);
name = new StringConfigurer(t.commandName);
controls.add("Editor.menu_command", name);
key = new NamedHotKeyConfigurer(t.keyCommand);
controls.add("Editor.keyboard_command", key);
xDist = new FormattedExpressionConfigurer(t.xDist.getFormat(), t);
controls.add("Editor.MoveFixedDistance.distance_to_the_right", xDist);
yDist = new FormattedExpressionConfigurer(t.yDist.getFormat(), t);
controls.add("Editor.MoveFixedDistance.distance_upwards", yDist);
// Hint that Move Entire Stack, even in fixed code, has problems.
moveStack = new BooleanConfigurer(Boolean.valueOf(t.moveStack));
controls.add("Editor.MoveFixedDistance.move_entire_stack", moveStack);
final JLabel xLabel = new JLabel(Resources.getString("Editor.MoveFixedDistance.additional_offset_to_the_right"));
final JPanel xControls = new JPanel(new MigLayout("ins 0", "[fill,grow]rel[]rel[fill,grow]")); // NON-NLS
xLabel.setLabelFor(xControls);
xIndexInput = new FormattedExpressionConfigurer(t.xIndex.getFormat(), t);
xControls.add(xIndexInput.getControls(), "grow"); // NON-NLS
JLabel times = new JLabel(Resources.getString("Editor.MoveFixedDistance.times"));
xOffsetInput = new FormattedExpressionConfigurer(t.xOffset.getFormat(), t);
times.setLabelFor(xOffsetInput.getControls());
xControls.add(times);
xControls.add(xOffsetInput.getControls(), "grow"); // NON-NLS
controls.add(xLabel);
controls.add(xControls, "grow,wrap"); // NON-NLS
final JLabel yLabel = new JLabel(Resources.getString("Editor.MoveFixedDistance.additional_offset_upwards"));
final JPanel yControls = new JPanel(new MigLayout("ins 0", "[fill,grow]rel[]rel[fill,grow]")); // NON-NLS
yLabel.setLabelFor(yControls);
yIndexInput = new FormattedExpressionConfigurer(t.yIndex.getFormat(), t);
yControls.add(yIndexInput.getControls());
times = new JLabel(Resources.getString("Editor.MoveFixedDistance.times"));
yOffsetInput = new FormattedExpressionConfigurer(t.yOffset.getFormat(), t);
times.setLabelFor(yOffsetInput.getControls());
yControls.add(times);
yControls.add(yOffsetInput.getControls(), "grow"); // NON-NLS
controls.add(yLabel);
controls.add(yControls, "grow,wrap"); // NON-NLS
}
@Override
public Component getControls() {
return controls;
}
@Override
public String getState() {
return "";
}
@Override
public String getType() {
final SequenceEncoder se = new SequenceEncoder(';');
se.append(name.getValueString())
.append(key.getValueString())
.append(xDist.getValueString())
.append(yDist.getValueString())
.append(moveStack.getValueString())
.append(xIndexInput.getValueString())
.append(yIndexInput.getValueString())
.append(xOffsetInput.getValueString())
.append(yOffsetInput.getValueString())
.append(descInput.getValueString());
return ID + se.getValue();
}
}
/**
* Batches up all the movement commands resulting from a single KeyEvent
* and executes them at once. Ensures that pieces that are moving won't
* be merged with other moving pieces until they've been moved.
*/
public static class MoveExecuter implements Runnable {
private final List<Move> moves = new ArrayList<>();
private final Set<GamePiece> pieces = new HashSet<>();
private KeyStroke stroke;
private final List<GamePiece> innerPieces = new ArrayList<>();
private Command additionalCommand;
@Override
public void run() {
mover = null;
Command comm = new NullCommand();
comm = comm.append(additionalCommand);
for (final Move move : moves) {
final Map.Merger merger =
new Map.Merger(move.map, move.pos, move.piece);
final DeckVisitor v = new DeckVisitor() {
@Override
public Object visitDeck(Deck d) {
return merger.visitDeck(d);
}
@Override
public Object visitStack(Stack s) {
if (!pieces.contains(s) &&
move.map.getPieceCollection().canMerge(s, move.piece)) {
return merger.visitStack(s);
}
else {
return null;
}
}
@Override
public Object visitDefault(GamePiece p) {
if (!pieces.contains(p) &&
move.map.getPieceCollection().canMerge(p, move.piece)) {
return merger.visitDefault(p);
}
else {
return null;
}
}
};
final DeckVisitorDispatcher dispatch = new DeckVisitorDispatcher(v);
Command c = move.map.apply(dispatch);
if (c == null) {
c = move.map.placeAt(move.piece, move.pos);
// Apply Auto-move key
if (move.map.getMoveKey() != null) {
c.append(Decorator.getOutermost(move.piece)
.keyEvent(move.map.getMoveKey()));
}
}
comm.append(c);
if (move.piece.getMap() == move.map) {
move.map.ensureVisible(move.map.selectionBoundsOf(move.piece));
}
pieces.remove(move.piece);
move.map.repaint();
}
final MovementReporter r = new MovementReporter(comm);
if (GlobalOptions.getInstance().autoReportEnabled()) {
final Command reportCommand = r.getReportCommand();
if (reportCommand != null) {
reportCommand.execute();
}
comm.append(reportCommand);
}
comm.append(r.markMovedPieces());
if (stroke != null) {
for (final GamePiece gamePiece : innerPieces) {
comm.append(gamePiece.keyEvent(stroke));
}
}
GameModule.getGameModule().sendAndLog(comm);
}
public void add(Map map, GamePiece piece, Point pos) {
moves.add(new Move(map, piece, pos));
pieces.add(piece);
}
public void addKeyEventTarget(GamePiece piece) {
innerPieces.add(piece);
}
public void setKeyEvent(KeyStroke stroke) {
this.stroke = stroke;
}
public void setAdditionalCommand(Command c) {
additionalCommand = c;
}
public Command getAdditionalCommand() {
return additionalCommand;
}
/**
* Return the updated position of a piece that has a move
* calculation recorded
*
* @param target piece to check
* @return updated position
*/
public Point getUpdatedPosition(GamePiece target) {
Point p = null;
for (final Move move : moves) {
if (move.piece == target) {
p = move.pos;
}
}
return p;
}
private static class Move {
private final Map map;
private final GamePiece piece;
private final Point pos;
public Move(Map map, GamePiece piece, Point pos) {
this.map = map;
this.piece = piece;
this.pos = pos;
}
}
}
}
|
package team3543;
import ftclib.FtcOpMode;
import trclib.TrcDbgTrace;
import trclib.TrcEvent;
import trclib.TrcRobot;
import trclib.TrcStateMachine;
import trclib.TrcTimer;
public class CmdAuto100 implements TrcRobot.RobotCommand
{
private static final boolean debugXPid = false;
private static final boolean debugYPid = false;
private static final boolean debugTurnPid = false;
private TrcDbgTrace tracer = FtcOpMode.getGlobalTracer();
private enum State
{
NEAR_START,
TURN_TO_WALL,
GOTO_WALL,
PARALLEL_WALL,
ALIGN_WALL1,
ALIGN_WALL2,
ALIGN_WALL3,
FIND_LINE,
PUSH_BUTTON1,
PUSH_BUTTON2,
RETRACT,
NEXT_BEACON,
GOTO_VORTEX,
TURN_TO_VORTEX,
PARK_VORTEX,
DONE
} //enum State
private static final String moduleName = "CmdAuto100";
private Robot robot;
private FtcAuto.Alliance alliance;
private FtcAuto.ParkOption parkOption;
private int beaconButtons;
private boolean usePath1;
private int remainingBeaconButtons;
private boolean shortRun;
private CmdNearStart nearStartCmd;
private TrcEvent event;
private TrcTimer timer;
private TrcStateMachine<State> sm;
private boolean leftPusherExtended = false;
private boolean rightPusherExtended = false;
public CmdAuto100(
Robot robot,
FtcAuto.Alliance alliance,
double delay,
int numParticles,
FtcAuto.ParkOption parkOption,
int beaconButtons,
boolean usePath1)
{
this.robot = robot;
this.alliance = alliance;
this.parkOption = parkOption;
this.beaconButtons = beaconButtons;
this.usePath1 = usePath1;
remainingBeaconButtons = beaconButtons;
shortRun = alliance == FtcAuto.Alliance.RED_ALLIANCE && (usePath1 || beaconButtons == 1) ||
alliance == FtcAuto.Alliance.BLUE_ALLIANCE && !usePath1 && beaconButtons == 1;
nearStartCmd = new CmdNearStart(robot, alliance, delay, numParticles, shortRun);
event = new TrcEvent(moduleName);
timer = new TrcTimer(moduleName);
sm = new TrcStateMachine<>(moduleName);
sm.start(State.NEAR_START);
} //CmdAuto100
@Override
public boolean cmdPeriodic(double elapsedTime)
{
boolean done = !sm.isEnabled();
// Print debug info.
State state = sm.getState();
if (state != State.NEAR_START)
{
robot.dashboard.displayPrintf(1, "State: %s", state != null? state.toString(): "Disabled");
}
if (Robot.USE_LINE_DETECTOR)
{
if (Robot.USE_ODS_LINE_DETECTOR)
{
robot.dashboard.displayPrintf(2, "LineDetect: light=%.3f",
robot.odsLineDetector.sensor.getRawLightDetected());
}
else
{
robot.dashboard.displayPrintf(2, "LineDetect: light=%d", robot.lineDetectionSensor.sensor.alpha());
}
}
if (Robot.USE_COLOR_SENSOR)
{
robot.dashboard.displayPrintf(3, "RGBAH: [%d,%d,%d,%d,%x]",
robot.beaconColorSensor.sensor.red(), robot.beaconColorSensor.sensor.green(),
robot.beaconColorSensor.sensor.blue(), robot.beaconColorSensor.sensor.alpha(),
robot.beaconColorSensor.sensor.argb());
}
if (robot.pidDrive.isActive())
{
robot.encoderXPidCtrl.displayPidInfo(4);
robot.encoderYPidCtrl.displayPidInfo(6);
robot.gyroPidCtrl.displayPidInfo(8);
}
if (sm.isReady())
{
state = sm.getState();
State nextState;
double xDistance, yDistance;
int redValue, greenValue, blueValue;
boolean isRed, isBlue;
switch (state)
{
case NEAR_START:
// The robot starts at near the corner vortex. nearStartCmd is a common segment shared by
// several autonomous strategies. This state will run that segment until its completion when
// it returns true, then we move on to the next state. nearStartCmd would shoot a specified
// number of particles and displace the Cap Ball.
if (nearStartCmd.cmdPeriodic(elapsedTime))
{
sm.setState(State.TURN_TO_WALL);
}
break;
case TURN_TO_WALL:
// After displacing the Cap Ball, turn the robot towards the wall.
xDistance = yDistance = 0.0;
robot.targetHeading = alliance == FtcAuto.Alliance.RED_ALLIANCE? -90.0: 90.0;
robot.setPIDDriveTarget(xDistance, yDistance, robot.targetHeading, false, event);
sm.waitForSingleEvent(event, State.GOTO_WALL);
break;
case GOTO_WALL:
// Go forward to the wall.
xDistance = 0.0;
yDistance = shortRun? 22.0: 16.0;
robot.setPIDDriveTarget(xDistance, yDistance, robot.targetHeading, false, event);
sm.waitForSingleEvent(event, State.PARALLEL_WALL);
break;
case PARALLEL_WALL:
// Turn the robot to parallel the wall.
xDistance = yDistance = 0.0;
robot.targetHeading = alliance == FtcAuto.Alliance.RED_ALLIANCE? 0.0: 180.0;
robot.setPIDDriveTarget(xDistance, yDistance, robot.targetHeading, false, event);
sm.waitForSingleEvent(event, State.ALIGN_WALL1);
break;
case ALIGN_WALL1:
// Strafe left with full power to hit the wall. If the robot was misaligned with the wall, this
// will align it again. We use the range sensor as a stopping condition. In addition, we also
// set a timeout time to make sure we will move on even if the range sensor has malfunctioned.
robot.driveBase.mecanumDrive_Cartesian(-1.0, 0.0, 0.0);
timer.set(1.0, event);
sm.setState(State.ALIGN_WALL2);
break;
case ALIGN_WALL2:
// Continue to strafe left until either the range sensor said we are less than 3.5-inch away
// from the wall or the timeout has expired.
if (robot.getInput(robot.rangePidCtrl) < 3.5 || event.isSignaled())
{
sm.setState(State.ALIGN_WALL3);
}
break;
case ALIGN_WALL3:
// We are done aligning with the wall. Stop the robot and delay just a little to allow the gyro
// to settle.
robot.driveBase.mecanumDrive_Cartesian(0.0, 0.0, 0.0);
// if (Robot.USE_COLOR_SENSOR)
// robot.beaconColorSensor.setDeviceEnabled(true);
timer.set(0.1, event);
sm.waitForSingleEvent(event, State.FIND_LINE);
break;
case FIND_LINE:
// We should be aligned with the wall, maintain the current heading when running along the wall
// looking for the white line. Limit the robot to only 12% power so we won't miss the line or
// overshoot too much when detecting the line.
xDistance = 0.0;
if (usePath1)
{
// Path1 always back up.
yDistance = -30.0;
}
else
{
yDistance = alliance == FtcAuto.Alliance.RED_ALLIANCE && remainingBeaconButtons == 2 ||
alliance == FtcAuto.Alliance.BLUE_ALLIANCE && remainingBeaconButtons == 1?
30.0: -30.0;
}
robot.targetHeading = robot.driveBase.getHeading();
if (Robot.USE_LINE_DETECTOR)
{
if (Robot.USE_ODS_LINE_DETECTOR)
{
robot.odsTrigger.setEnabled(true);
}
else
{
robot.colorTrigger.setEnabled(true);
}
}
robot.encoderYPidCtrl.setOutputRange(-0.12, 0.12);
robot.setPIDDriveTarget(xDistance, yDistance, robot.targetHeading, false, event);
sm.waitForSingleEvent(event, State.PUSH_BUTTON1);
break;
case PUSH_BUTTON1:
// We found the line. Let's check the beacon color and press the appropriate button.
robot.encoderYPidCtrl.setOutputRange(-1.0, 1.0);
if (Robot.USE_LINE_DETECTOR)
{
if (Robot.USE_ODS_LINE_DETECTOR)
{
robot.odsTrigger.setEnabled(false);
}
else
{
robot.colorTrigger.setEnabled(false);
}
}
if (Robot.USE_COLOR_SENSOR)
{
redValue = robot.beaconColorSensor.sensor.red();
greenValue = robot.beaconColorSensor.sensor.green();
blueValue = robot.beaconColorSensor.sensor.blue();
}
isRed = redValue > blueValue && redValue > greenValue;
isBlue = blueValue > redValue && blueValue > greenValue;
robot.dashboard.displayPrintf(
14, "[%d,%d,%d]isRed=%s,isBlue=%s",
redValue, greenValue, blueValue, Boolean.toString(isRed), Boolean.toString(isBlue));
tracer.traceInfo(
state.toString(), "[%d,%d,%d]isRed=%s,isBlue=%s",
redValue, greenValue, blueValue, Boolean.toString(isRed), Boolean.toString(isBlue));
// Determine which button to push and do it.
if (alliance == FtcAuto.Alliance.RED_ALLIANCE && isRed ||
alliance == FtcAuto.Alliance.BLUE_ALLIANCE && isBlue)
{
// It takes sometime for the button pusher to extend, set a timer to wait for it.
// robot.beaconColorSensor.setDeviceEnabled(false);
robot.leftButtonPusher.setPosition(RobotInfo.BUTTON_PUSHER_EXTEND_POSITION);
leftPusherExtended = true;
timer.set(1.5, event);
sm.waitForSingleEvent(event, State.RETRACT);
}
else if (alliance == FtcAuto.Alliance.RED_ALLIANCE && isBlue ||
alliance == FtcAuto.Alliance.BLUE_ALLIANCE && isRed)
{
// Since the color sensor was detecting a different color than our alliance color, we don't
// have to set a timer and blindly wait. We could just wait for the color change. Once the
// color has changed to our alliance color, we could abort the button pusher and move onto
// the next state. That will save some time. However, we are still setting the timeout for
// safety measure. In case the color doesn't change, 1.5-second is all we can afford to
// waste.
robot.rightButtonPusher.setPosition(RobotInfo.BUTTON_PUSHER_EXTEND_POSITION);
rightPusherExtended = true;
timer.set(1.5, event);
sm.setState(State.PUSH_BUTTON2);
}
else
{
// Cannot determine the color, skip it.
sm.setState(State.NEXT_BEACON);
}
robot.dashboard.displayPrintf(
15, "leftPusher=%s, rightPusher=%s",
Boolean.toString(leftPusherExtended), Boolean.toString(rightPusherExtended));
break;
case PUSH_BUTTON2:
// We will only come to this state if the color sensor detecting a color different from our
// alliance color. We keep checking the color of the beacon. If it has changed to our color
// or time has expired, move on to the next state.
if (Robot.USE_COLOR_SENSOR)
{
redValue = robot.beaconColorSensor.sensor.red();
greenValue = robot.beaconColorSensor.sensor.green();
blueValue = robot.beaconColorSensor.sensor.blue();
isRed = redValue > blueValue && redValue > greenValue;
isBlue = blueValue > redValue && blueValue > greenValue;
}
boolean timedOut = event.isSignaled();
tracer.traceInfo(
state.toString(), "[%d,%d,%d]isRed=%s,isBlue=%s,expired=%s",
redValue, greenValue, blueValue, Boolean.toString(isRed), Boolean.toString(isBlue),
Boolean.toString(timedOut));
if (timedOut ||
alliance == FtcAuto.Alliance.RED_ALLIANCE && isRed ||
alliance == FtcAuto.Alliance.BLUE_ALLIANCE && isBlue)
{
// robot.beaconColorSensor.setDeviceEnabled(false);
sm.setState(State.RETRACT);
}
break;
case RETRACT:
// We need to retract the pusher a little bit before start moving so it doesn't get caught on
// by the beacon.
if (leftPusherExtended)
{
robot.leftButtonPusher.setPosition(RobotInfo.BUTTON_PUSHER_RETRACT_POSITION);
leftPusherExtended = false;
}
if (rightPusherExtended)
{
robot.rightButtonPusher.setPosition(RobotInfo.BUTTON_PUSHER_RETRACT_POSITION);
rightPusherExtended = false;
}
timer.set(0.2, event);
sm.waitForSingleEvent(event, State.NEXT_BEACON);
break;
case NEXT_BEACON:
// Determine if we are done pushing beacon buttons or if we have another beacon button to push.
if (remainingBeaconButtons == 2)
{
// We have another button to push, go to the next beacon.
xDistance = 0.0;
if (usePath1)
{
yDistance = 57.0;
}
else
{
yDistance = alliance == FtcAuto.Alliance.RED_ALLIANCE? -40.0: 40.0;
}
robot.setPIDDriveTarget(xDistance, yDistance, robot.targetHeading, false, event);
remainingBeaconButtons
sm.waitForSingleEvent(event, State.ALIGN_WALL1);
}
else if (parkOption == FtcAuto.ParkOption.DO_NOTHING)
{
// We are done pushing beacon buttons and the ParkOption is none, so we are done.
sm.setState(State.DONE);
}
else
{
// We are going to park somewhere. let's get off the wall so we can run to our parking place.
if (alliance == FtcAuto.Alliance.RED_ALLIANCE)
{
xDistance = beaconButtons == 2 && usePath1 ? 12.0 : 40.0;
}
else
{
xDistance = 24.0;
}
yDistance = 0.0;
robot.setPIDDriveTarget(xDistance, yDistance, robot.targetHeading, false, event);
sm.waitForSingleEvent(event, State.GOTO_VORTEX);
}
break;
case GOTO_VORTEX:
// Go towards the vortexes.
xDistance = yDistance = 0.0;
nextState = State.TURN_TO_VORTEX;
if (usePath1)
{
if (beaconButtons == 2)
{
if (alliance == FtcAuto.Alliance.BLUE_ALLIANCE)
{
if (parkOption == FtcAuto.ParkOption.PARK_CORNER)
{
yDistance = 36.0;
nextState = State.DONE;
}
}
else if (parkOption == FtcAuto.ParkOption.PARK_CENTER)
{
robot.targetHeading = -45.0;
nextState = State.PARK_VORTEX;
}
else
{
yDistance = -84.0;
nextState = State.DONE;
}
}
else
{
yDistance = alliance == FtcAuto.Alliance.RED_ALLIANCE ? -40.0 : 60.0;
}
}
else if (alliance == FtcAuto.Alliance.RED_ALLIANCE)
{
yDistance = -40.0;
}
else if (parkOption == FtcAuto.ParkOption.PARK_CORNER)
{
yDistance = 36.0;
nextState = State.DONE;
}
robot.setPIDDriveTarget(xDistance, yDistance, robot.targetHeading, false, event);
sm.waitForSingleEvent(event, nextState);
break;
case TURN_TO_VORTEX:
// Turn the robot to face the vortexes to either the front or the back of the robot.
xDistance = yDistance = 0.0;
robot.targetHeading = alliance == FtcAuto.Alliance.RED_ALLIANCE? 45.0: 120.0;
robot.setPIDDriveTarget(xDistance, yDistance, robot.targetHeading, false, event);
sm.waitForSingleEvent(event, State.PARK_VORTEX);
break;
case PARK_VORTEX:
// Go forward or backward to the selected vortex.
xDistance = 0.0;
if (alliance == FtcAuto.Alliance.BLUE_ALLIANCE)
{
yDistance = parkOption == FtcAuto.ParkOption.PARK_CENTER? -36.0: 40.0;
}
else if (usePath1 && beaconButtons == 2 && parkOption == FtcAuto.ParkOption.PARK_CENTER)
{
yDistance = -48.0;
}
else
{
yDistance = parkOption == FtcAuto.ParkOption.PARK_CENTER? 36.0: -36.0;
}
robot.setPIDDriveTarget(xDistance, yDistance, robot.targetHeading, false, event);
sm.waitForSingleEvent(event, State.DONE);
break;
case DONE:
default:
// We are done.
done = true;
sm.stop();
break;
}
if (state != State.NEAR_START)
{
robot.traceStateInfo(elapsedTime, state.toString());
}
}
if (robot.pidDrive.isActive() && (debugXPid || debugYPid || debugTurnPid))
{
tracer.traceInfo("Battery", "Voltage=%5.2fV (%5.2fV)",
robot.battery.getCurrentVoltage(), robot.battery.getLowestVoltage());
if (debugXPid)
{
robot.encoderXPidCtrl.printPidInfo(tracer);
}
if (debugYPid)
{
robot.encoderYPidCtrl.printPidInfo(tracer);
}
if (debugTurnPid)
{
robot.gyroPidCtrl.printPidInfo(tracer);
}
}
return done;
} //cmdPeriodic
} //class CmdAuto100
|
package com.ecyrd.jspwiki.plugin;
import java.util.Properties;
import junit.framework.Test;
import junit.framework.TestCase;
import junit.framework.TestSuite;
import com.ecyrd.jspwiki.TestEngine;
public class GroupsTest extends TestCase
{
Properties props = new Properties();
TestEngine testEngine;
public GroupsTest( String s )
{
super( s );
}
public void setUp()
throws Exception
{
props.load( TestEngine.findTestProperties() );
testEngine = new TestEngine(props);
}
public void tearDown() throws Exception
{
super.tearDown();
testEngine.deletePage( "Test" );
}
public void testTag() throws Exception
{
String src="[{Groups}]";
testEngine.saveText( "Test", src );
String res = testEngine.getHTML( "Test" );
assertEquals( "<a href=\"/Group.jsp?group=Admin\">Admin</a>, "
+ "<a href=\"/Group.jsp?group=Art\">Art</a>, "
+ "<a href=\"/Group.jsp?group=Literature\">Literature</a>, "
+ "<a href=\"/Group.jsp?group=TV\">TV</a>\n"
, res );
}
public static Test suite()
{
return new TestSuite( GroupsTest.class );
}
}
|
package bt.dht;
import bt.BtException;
import bt.metainfo.Torrent;
import bt.net.InetPeer;
import bt.net.Peer;
import bt.service.IRuntimeLifecycleBinder;
import bt.service.LifecycleBinding;
import com.google.common.io.Files;
import lbms.plugins.mldht.DHTConfiguration;
import lbms.plugins.mldht.kad.DHT;
import lbms.plugins.mldht.kad.DHT.DHTtype;
import lbms.plugins.mldht.kad.DHT.LogLevel;
import lbms.plugins.mldht.kad.DHTLogger;
import lbms.plugins.mldht.kad.tasks.PeerLookupTask;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.net.SocketException;
import java.nio.file.Path;
import java.util.Iterator;
import java.util.Spliterator;
import java.util.Spliterators;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.stream.Stream;
import java.util.stream.StreamSupport;
class MldhtService implements DHTService {
private static final Logger LOGGER = LoggerFactory.getLogger(MldhtService.class);
private static final DHTLogger DHT_LOGGER = createLogger();
static {
try {
DHT.setLogger(DHT_LOGGER);
} catch (Throwable e) {
e.printStackTrace();
}
}
private static DHTLogger createLogger() {
return new DHTLogger() {
@Override
public void log(String message, LogLevel level) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("<" + level.name().toUpperCase() + "> " + message);
}
}
@Override
public void log(Throwable error, LogLevel level) {
LOGGER.error("Unexpected DHT error", error);
}
};
}
private DHTConfiguration config;
private DHT dht;
private boolean shouldBootstrap;
public MldhtService(IRuntimeLifecycleBinder lifecycleBinder, DHTConfig config) {
this.dht = new DHT(config.shouldUseIPv6()? DHTtype.IPV6_DHT : DHTtype.IPV4_DHT);
this.config = toMldhtConfig(config);
this.shouldBootstrap = config.shouldUseRouterBootstrap();
lifecycleBinder.onStartup(LifecycleBinding.bind(this::start).description("Initialize DHT facilities").async().build());
lifecycleBinder.onShutdown("Shutdown DHT facilities", this::shutdown);
}
private DHTConfiguration toMldhtConfig(DHTConfig config) {
return new DHTConfiguration() {
@Override
public boolean isPersistingID() {
return false;
}
@Override
public Path getStoragePath() {
return Files.createTempDir().toPath();
}
@Override
public int getListeningPort() {
return config.getListeningPort();
}
@Override
public boolean noRouterBootstrap() {
return true;
}
@Override
public boolean allowMultiHoming() {
return false;
}
};
}
private void start() {
if (!dht.isRunning()) {
try {
dht.start(config);
if (shouldBootstrap) {
bootstrap();
}
} catch (SocketException e) {
throw new BtException("Failed to start DHT", e);
}
}
}
// TODO: move the list of bootstrap nodes to config
private void bootstrap() {
dht.addDHTNode("router.bittorrent.com", 6881);
dht.addDHTNode("dht.transmissionbt.com", 6881);
dht.addDHTNode("router.utorrent.com", 6881);
}
private void shutdown() {
dht.stop();
}
@Override
public Stream<Peer> getPeers(Torrent torrent) {
PeerLookupTask lookup;
BlockingQueue<Peer> peers;
try {
dht.getServerManager().awaitActiveServer().get();
lookup = dht.createPeerLookup(torrent.getTorrentId().getBytes());
peers = new LinkedBlockingQueue<>();
lookup.setResultHandler((k, p) -> {
Peer peer = new InetPeer(p.getInetAddress(), p.getPort());
peers.add(peer);
});
dht.getTaskManager().addTask(lookup);
} catch (Throwable e) {
LOGGER.error(String.format("Unexpected error in peer lookup: %s. See DHT log file for diagnostic information.",
e.getMessage()), e);
BtException btex = new BtException(String.format("Unexpected error in peer lookup: %s. Diagnostics:\n%s",
e.getMessage(), getDiagnostics()), e);
DHT_LOGGER.log(btex, LogLevel.Error);
throw btex;
}
int characteristics = Spliterator.NONNULL;
return StreamSupport.stream(() -> Spliterators.spliteratorUnknownSize(new Iterator<Peer>() {
@Override
public boolean hasNext() {
return !lookup.isFinished();
}
@Override
public Peer next() {
try {
return peers.take();
} catch (InterruptedException e) {
throw new RuntimeException("Unexpectedly interrupted while waiting for next element", e);
}
}
}, characteristics), characteristics, false);
}
@Override
public void addNode(Peer node) {
dht.addDHTNode(node.getInetAddress().getHostAddress(), node.getPort());
}
// TODO: add node by hostname/ipaddr and port ?
private String getDiagnostics() {
StringWriter sw = new StringWriter();
dht.printDiagnostics(new PrintWriter(sw));
return sw.toString();
}
}
|
package com.robitdroid;
public final class R {
public static final class attr {
}
public static final class drawable {
public static final int icon = 0x7f020000;
}
public static final class id {
public static final int btn_no = 0x7f050003;
public static final int btn_yes = 0x7f050002;
public static final int final_number = 0x7f050000;
public static final int gridview = 0x7f050001;
}
public static final class layout {
public static final int final_screen = 0x7f030000;
public static final int game_screen = 0x7f030001;
public static final int main = 0x7f030002;
}
public static final class string {
public static final int app_name = 0x7f040000;
public static final int no = 0x7f040003;
public static final int self_start = 0x7f040001;
public static final int yes = 0x7f040002;
}
}
|
package ifc.beans;
import java.io.PrintWriter;
import java.util.HashSet;
import java.util.Set;
import java.util.StringTokenizer;
import lib.MultiMethodTest;
import lib.Status;
import util.ValueChanger;
import com.sun.star.beans.Property;
import com.sun.star.beans.PropertyAttribute;
import com.sun.star.beans.PropertyChangeEvent;
import com.sun.star.beans.XMultiPropertySet;
import com.sun.star.beans.XPropertiesChangeListener;
import com.sun.star.beans.XPropertySetInfo;
import com.sun.star.lang.EventObject;
/**
* Testing <code>com.sun.star.beans.XMultiPropertySet</code>
* interface methods :
* <ul>
* <li><code> getPropertySetInfo()</code></li>
* <li><code> setPropertyValues()</code></li>
* <li><code> getPropertyValues()</code></li>
* <li><code> addPropertiesChangeListener()</code></li>
* <li><code> removePropertiesChangeListener()</code></li>
* <li><code> firePropertiesChangeEvent()</code></li>
* </ul> <p>
*
* Required relations :
* <ul>
* <li> <code>'XMultiPropertySet.ExcludeProps'</code>
* <b>(optional) </b> : java.util.Set.
* Has property names which must be skipped from testing in
* some reasons (for example property accepts restricted set
* of values).
* </li>
* <ul> <p>
*
* Test is <b> NOT </b> multithread compilant. <p>
* After test completion object environment has to be recreated.
* @see com.sun.star.beans.XMultiPropertySet
*/
public class _XMultiPropertySet extends MultiMethodTest {
public XMultiPropertySet oObj = null;
private boolean propertiesChanged = false;
private XPropertySetInfo propertySetInfo = null;
private String [] testPropsNames = null;
private int testPropsAmount = 0;
private PrintWriter _log = null;
private Object[] values = null;
private Set exclProps = null;
/**
* Initializes some fields.
*/
public void before() {
_log = log;
exclProps = (Set) tEnv.getObjRelation("XMultiPropertySet.ExcludeProps");
if (exclProps == null) exclProps = new HashSet(0);
}
/**
* Listener implementation which sets a flag when
* listener was called.
*/
public class MyChangeListener implements XPropertiesChangeListener {
public void propertiesChange(PropertyChangeEvent[] e) {
//_log.println("Listener was called");
propertiesChanged = true;
}
public void disposing (EventObject obj) {}
};
private XPropertiesChangeListener PClistener =
new MyChangeListener();
/**
* Test calls the method and checks return value.
* <code>PropertySetInfo</code> object is stored<p>
* Has <b> OK </b> status if the method returns not null value
* and no exceptions were thrown. <p>
*/
public void _getPropertySetInfo() {
boolean bResult = true;
propertySetInfo = oObj.getPropertySetInfo();
if (propertySetInfo == null) {
log.println("getPropertySetInfo() method returned null");
bResult = false;
}
tRes.tested("getPropertySetInfo()", bResult) ;
}
/**
* Test collects all property names and retrieves their values,
* then checks the value returned. Finally it also collects
* bound properties for other methods tests.<p>
* Has <b> OK </b> status if the method returns non null value
* and no exceptions were thrown. <p>
* The following method tests are to be completed successfully before :
* <ul>
* <li> <code> getPropertySetInfo() </code> : to have a list
* of properties.</li>
* </ul>
*/
public void _getPropertyValues() {
requiredMethod("getPropertySetInfo()");
boolean bResult = true;
Property[] properties = propertySetInfo.getProperties();
String[] allnames = new String[properties.length];
for (int i = 0; i < properties.length; i++) {
allnames[i] = properties[i].Name;
}
values = oObj.getPropertyValues(allnames);
bResult &= values!=null;
tRes.tested("getPropertyValues()", bResult) ;
getPropsToTest(properties);
}
/**
* Test adds listener for all bound properties then each property
* is changed and listener call . <p>
* Has <b> OK </b> status if on each property change the listener was
* called and no exceptions were thrown. <p>
* The following method tests are to be completed successfully before :
* <ul>
* <li> <code> getPropertyValues() </code> : to collect bound
* properties.</li>
* </ul>
*/
public void _addPropertiesChangeListener() {
requiredMethod("getPropertyValues()");
boolean result = true ;
// Creating listener
oObj.addPropertiesChangeListener(testPropsNames, PClistener);
if ((testPropsAmount==1) && (testPropsNames[0].equals("none"))) {
testPropsAmount = 0;
}
// Change one of the property to be sure, that this event was cauched.
//Random rnd = new Random();
//int idx = rnd.nextInt(testPropsAmount);
for (int i=0; i<testPropsAmount;i++) {
log.print("Trying to change property " + testPropsNames[i]);
try {
Object[] gValues = oObj.getPropertyValues(testPropsNames);
Object newValue = ValueChanger.changePValue(gValues[i]);
gValues[i] = newValue;
propertiesChanged = false;
oObj.setPropertyValues(testPropsNames, gValues);
waitAMoment() ;
result &= propertiesChanged ;
log.println(" ... done");
} catch (com.sun.star.beans.PropertyVetoException e) {
log.println("Exception occured while trying to change "+
"property '"+testPropsNames[i] + "' :" + e);
e.printStackTrace(log);
} catch (com.sun.star.lang.IllegalArgumentException e) {
log.println("Exception occured while trying to change "+
"property '"+testPropsNames[i] + "' :" + e);
e.printStackTrace(log);
} catch (com.sun.star.lang.WrappedTargetException e) {
log.println("Exception occured while trying to change "+
"property '"+testPropsNames[i] + "' :" + e);
e.printStackTrace(log);
} // end of try-catch
}
if (testPropsAmount == 0) {
log.println("all properties are read only");
tRes.tested("addPropertiesChangeListener()", Status.skipped(true));
} else {
tRes.tested("addPropertiesChangeListener()", propertiesChanged);
}
}
/**
* Calls method and check if listener was called. <p>
* Has <b> OK </b> status if the listener was
* called and no exceptions were thrown. <p>
* The following method tests are to be completed successfully before :
* <ul>
* <li> <code> addPropertiesChangeListener() </code> : listener to
* be added.</li>
* </ul>
*/
public void _firePropertiesChangeEvent() {
requiredMethod("addPropertiesChangeListener()");
propertiesChanged = false ;
oObj.firePropertiesChangeEvent(testPropsNames, PClistener);
waitAMoment() ;
tRes.tested("firePropertiesChangeEvent()", propertiesChanged);
}
/**
* Removes listener added before. <p>
* Has <b> OK </b> status no exceptions were thrown. <p>
* The following method tests are to be completed successfully before :
* <ul>
* <li> <code> addPropertiesChangeListener() </code> : listener to
* be added.</li>
* </ul>
*/
public void _removePropertiesChangeListener() {
requiredMethod("firePropertiesChangeEvent()");
boolean bResult = true;
oObj.removePropertiesChangeListener(PClistener);
tRes.tested("removePropertiesChangeListener()", bResult);
}
/**
* Changes all properties, then set them to new values, get them
* and checks if their values were changed properly. <p>
* Has <b> OK </b> status if all properties properly changed
* and no exceptions were thrown. <p>
* The following method tests are to be completed successfully before :
* <ul>
* <li> <code> getPropertyValues() </code> : to collect bound
* properties.</li>
* </ul>
*/
public void _setPropertyValues() {
requiredMethod("getPropertyValues()");
boolean bResult = true;
if ((testPropsNames.length==1)&&(testPropsNames[0].equals("none"))) {
log.println("all properties are readOnly");
tRes.tested("setPropertyValues()",Status.skipped(true));
return;
}
log.println("Changing all properties");
Object[] gValues = oObj.getPropertyValues(testPropsNames);
for (int i=0; i<testPropsAmount;i++) {
Object oldValue = gValues[i];
Object newValue = ValueChanger.changePValue(oldValue);
gValues[i] = newValue;
}
try {
oObj.setPropertyValues(testPropsNames, gValues);
Object[] newValues = oObj.getPropertyValues(testPropsNames);
for (int i=0; i<testPropsAmount;i++) {
if (newValues[i].equals(gValues[i])) {
bResult = true;
}
}
} catch (com.sun.star.beans.PropertyVetoException e) {
log.println("Exception occured while setting properties");
e.printStackTrace(log);
bResult = false;
} catch (com.sun.star.lang.IllegalArgumentException e) {
log.println("Exception occured while setting properties");
e.printStackTrace(log);
bResult = false;
} catch (com.sun.star.lang.WrappedTargetException e) {
log.println("Exception occured while setting properties");
e.printStackTrace(log);
bResult = false;
} // end of try-catch
tRes.tested("setPropertyValues()", bResult);
}
//Get the properties being tested
private void getPropsToTest(Property[] properties) {
String bound = "";
for (int i = 0; i < properties.length; i++) {
Property property = properties[i];
String name = property.Name;
boolean isWritable = ((property.Attributes &
PropertyAttribute.READONLY) == 0);
boolean isNotNull = ((property.Attributes &
PropertyAttribute.MAYBEVOID) == 0);
boolean isBound = ((property.Attributes &
PropertyAttribute.BOUND) != 0);
boolean isExcluded = exclProps.contains(name);
//exclude UserDefined, because we can't change XNameContainer
if (name.indexOf("UserDefined")>0 || name.indexOf("Device")>0) {
isWritable=false;
}
values = oObj.getPropertyValues(new String[]{property.Name});
boolean isVoid = util.utils.isVoid(values[0]);
if ( isWritable && isNotNull && isBound && !isExcluded && !isVoid) {
bound+=name+";";
}
} // endfor
//get a array of bound properties
if (bound.equals("")) bound = "none";
StringTokenizer ST=new StringTokenizer(bound,";");
int nr = ST.countTokens();
testPropsNames = new String[nr];
for (int i=0; i<nr; i++) testPropsNames[i] = ST.nextToken();
testPropsAmount = nr;
return;
}
/**
* Waits some time for listener to be called.
*/
private void waitAMoment() {
try {
Thread.sleep(200) ;
} catch (java.lang.InterruptedException e) {
log.println("!!! Exception while waiting !!!") ;
}
}
/*
* Does nothing.
*/
protected void after() {
disposeEnvironment();
}
}
|
package org.antlr.v4.test;
import org.junit.Test;
public class TestListeners extends BaseTest {
@Test public void testBasic() throws Exception {
String grammar =
"grammar T;\n" +
"@members {\n" +
"public static class LeafListener extends TBaseListener {\n" +
" public void visitTerminal(ParseTree.TerminalNode<Token> node) {\n" +
" System.out.println(node.getSymbol().getText());\n" +
" }\n" +
" }}\n" +
"s\n" +
"@init {setBuildParseTree(true);}\n" +
"@after {" +
" System.out.println($r.ctx.toStringTree(this));" +
" ParseTreeWalker walker = new ParseTreeWalker();\n" +
" walker.walk(new LeafListener(), $r.ctx);" +
"}\n" +
" : r=a ;\n" +
"a : INT INT" +
" | ID" +
" ;\n" +
"MULT: '*' ;\n" +
"ADD : '+' ;\n" +
"INT : [0-9]+ ;\n" +
"ID : [a-z]+ ;\n" +
"WS : [ \\t\\n]+ -> skip ;\n";
String result = execParser("T.g", grammar, "TParser", "TLexer", "s", "1 2", false);
String expecting = "(a 1 2)\n" +
"1\n" +
"2\n";
assertEquals(expecting, result);
}
@Test public void testTokenGetters() throws Exception {
String grammar =
"grammar T;\n" +
"@members {\n" +
"public static class LeafListener extends TBaseListener {\n" +
" public void exitA(TParser.AContext ctx) {\n" +
" if (ctx.getChildCount()==2) System.out.printf(\"%s %s %s\",ctx.INT(0).getSymbol().getText(),ctx.INT(1).getSymbol().getText(),ctx.INT());\n" +
" else System.out.println(ctx.ID().getSymbol());\n" +
" }\n" +
" }}\n" +
"s\n" +
"@init {setBuildParseTree(true);}\n" +
"@after {" +
" System.out.println($r.ctx.toStringTree(this));" +
" ParseTreeWalker walker = new ParseTreeWalker();\n" +
" walker.walk(new LeafListener(), $r.ctx);" +
"}\n" +
" : r=a ;\n" +
"a : INT INT" +
" | ID" +
" ;\n" +
"MULT: '*' ;\n" +
"ADD : '+' ;\n" +
"INT : [0-9]+ ;\n" +
"ID : [a-z]+ ;\n" +
"WS : [ \\t\\n]+ -> skip ;\n";
String result = execParser("T.g", grammar, "TParser", "TLexer", "s", "1 2", false);
String expecting =
"(a 1 2)\n" +
"1 2 [1, 2]\n";
assertEquals(expecting, result);
result = execParser("T.g", grammar, "TParser", "TLexer", "s", "abc", false);
expecting = "(a abc)\n" +
"[@0,0:2='abc',<6>,1:0]\n";
assertEquals(expecting, result);
}
@Test public void testRuleGetters() throws Exception {
String grammar =
"grammar T;\n" +
"@members {\n" +
"public static class LeafListener extends TBaseListener {\n" +
" public void exitA(TParser.AContext ctx) {\n" +
" if (ctx.getChildCount()==2) {\n" +
" System.out.printf(\"%s %s %s\",ctx.b(0).start.getText(),\n" +
" ctx.b(1).start.getText(),ctx.b().get(0).start.getText());\n" +
" }\n" +
" else System.out.println(ctx.b(0).start.getText());\n" +
" }\n" +
" }}\n" +
"s\n" +
"@init {setBuildParseTree(true);}\n" +
"@after {" +
" System.out.println($r.ctx.toStringTree(this));" +
" ParseTreeWalker walker = new ParseTreeWalker();\n" +
" walker.walk(new LeafListener(), $r.ctx);" +
"}\n" +
" : r=a ;\n" +
"a : b b" + // forces list
" | b" + // a list still
" ;\n" +
"b : ID | INT ;\n" +
"MULT: '*' ;\n" +
"ADD : '+' ;\n" +
"INT : [0-9]+ ;\n" +
"ID : [a-z]+ ;\n" +
"WS : [ \\t\\n]+ -> skip ;\n";
String result = execParser("T.g", grammar, "TParser", "TLexer", "s", "1 2", false);
String expecting = "(a (b 1) (b 2))\n" +
"1 2 1\n";
assertEquals(expecting, result);
result = execParser("T.g", grammar, "TParser", "TLexer", "s", "abc", false);
expecting = "(a (b abc))\n" +
"abc\n";
assertEquals(expecting, result);
}
@Test public void testLR() throws Exception {
String grammar =
"grammar T;\n" +
"@members {\n" +
"public static class LeafListener extends TBaseListener {\n" +
" public void exitE(TParser.EContext ctx) {\n" +
" if (ctx.getChildCount()==3) {\n" +
" System.out.printf(\"%s %s %s\\n\",ctx.e(0).start.getText(),\n" +
" ctx.e(1).start.getText()," +
" ctx.e().get(0).start.getText());\n" +
" }\n" +
" else System.out.println(ctx.INT().getSymbol().getText());\n" +
" }\n" +
" }" +
"}\n" +
"s\n" +
"@init {setBuildParseTree(true);}\n" +
"@after {" +
" System.out.println($r.ctx.toStringTree(this));" +
" ParseTreeWalker walker = new ParseTreeWalker();\n" +
" walker.walk(new LeafListener(), $r.ctx);" +
"}\n" +
" : r=e ;\n" +
"e : e op='*' e\n" +
" | e op='+' e\n" +
" | INT\n" +
" ;\n" +
"MULT: '*' ;\n" +
"ADD : '+' ;\n" +
"INT : [0-9]+ ;\n" +
"WS : [ \\t\\n]+ -> skip ;\n";
String result = execParser("T.g", grammar, "TParser", "TLexer", "s", "1+2*3", false);
String expecting =
"(e (e 1) + (e (e 2) * (e 3)))\n" +
"1\n" +
"2\n" +
"3\n" +
"2 3 2\n" +
"1 2 1\n";
assertEquals(expecting, result);
}
@Test public void testLRWithLabels() throws Exception {
String grammar =
"grammar T;\n" +
"@members {\n" +
" public static class LeafListener extends TBaseListener {\n" +
" public void exitCall(TParser.CallContext ctx) {\n" +
" System.out.printf(\"%s %s\",ctx.e().start.getText(),\n" +
" ctx.eList());\n" +
" }\n" +
" public void exitInt(TParser.IntContext ctx) {\n" +
" System.out.println(ctx.INT().getSymbol().getText());\n" +
" }\n" +
" }\n" +
"}\n" +
"s\n" +
"@init {setBuildParseTree(true);}\n" +
"@after {" +
" System.out.println($r.ctx.toStringTree(this));" +
" ParseTreeWalker walker = new ParseTreeWalker();\n" +
" walker.walk(new LeafListener(), $r.ctx);" +
"}\n" +
" : r=e ;\n" +
"e : e '(' eList ')' -> Call\n" +
" | INT -> Int\n" +
" ; \n" +
"eList : e (',' e)* ;\n" +
"MULT: '*' ;\n" +
"ADD : '+' ;\n" +
"INT : [0-9]+ ;\n" +
"WS : [ \\t\\n]+ -> skip ;\n";
String result = execParser("T.g", grammar, "TParser", "TLexer", "s", "1(2,3)", false);
String expecting =
"(e (e 1) ( (eList (e 2) , (e 3)) ))\n" +
"1\n" +
"2\n" +
"3\n" +
"1 [16 6]\n";
assertEquals(expecting, result);
}
}
|
package de.lebk.verein.payment;
import de.lebk.verein.member.Member;
public class Payment {
private Member member;
private PaymentState state;
private double amount;
public Payment(Member member, double amount) {
this.member = member;
this.amount = amount;
this.state = PaymentState.OPEN;
}
public Member getMember() {
return member;
}
public void setMember(Member member) {
this.member = member;
}
public PaymentState getState() {
return state;
}
public void setState(PaymentState state) {
this.state = state;
}
public double getAmount() {
return amount;
}
public void setAmount(double amount) {
this.amount = amount;
}
}
|
package com.exedio.dsmf;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Types;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import com.exedio.dsmf.Node.ResultSetHandler;
public final class OracleDriver extends Driver
{
private static final String SYSTEM_TABLE_PREFIX = "BIN$"; // for Oracle 10
public OracleDriver(final String schema)
{
super(schema, SYSTEM_TABLE_PREFIX);
}
@Override
String getColumnType(final int dataType, final ResultSet resultSet) throws SQLException
{
final int columnSize = resultSet.getInt("COLUMN_SIZE");
switch(dataType)
{
case Types.DECIMAL:
final int decimalDigits = resultSet.getInt("DECIMAL_DIGITS");
if(decimalDigits>0)
return "NUMBER("+columnSize+','+decimalDigits+')';
else
return "NUMBER("+columnSize+')';
case Types.OTHER:
{
final String typeName = resultSet.getString("TYPE_NAME");
if("NVARCHAR2".equals(typeName))
{
if((columnSize%2)!=0)
return "error: NVARCHAR2 with an odd COLUMN_SIZE: "+columnSize;
return "NVARCHAR2("+(columnSize/2)+')';
}
else if("TIMESTAMP(3)".equals(typeName))
return "TIMESTAMP(3)";
else if("NCLOB".equals(typeName))
return "NCLOB";
return "error: unknown TYPE_NAME for Types.OTHER: "+typeName;
}
case Types.VARCHAR:
return "VARCHAR2("+columnSize+" BYTE)";
case Types.TIMESTAMP:
case Types.DATE:
return "DATE";
case Types.LONGVARCHAR:
return "LONG";
case Types.BLOB:
return "BLOB";
case Types.CLOB:
return "CLOB";
default:
return null;
}
}
Constraint makeUniqueConstraint(final Table table, final String constraintName, final ArrayList columns)
{
final StringBuilder bf = new StringBuilder();
bf.append('(');
boolean first = true;
for(Iterator i = columns.iterator(); i.hasNext(); )
{
if(first)
first = false;
else
bf.append(',');
bf.append(protectName((String)i.next()));
}
bf.append(')');
return table.notifyExistentUniqueConstraint(constraintName, bf.toString());
}
@Override
void verify(final Schema schema)
{
super.verify(schema);
schema.querySQL("select TABLE_NAME from user_tables", new Node.ResultSetHandler()
{
public void run(final ResultSet resultSet) throws SQLException
{
while(resultSet.next())
{
final String tableName = resultSet.getString(1);
schema.notifyExistentTable(tableName);
//System.out.println("EXISTS:"+tableName);
}
}
});
schema.querySQL(
"select " +
"uc.TABLE_NAME," +
"uc.CONSTRAINT_NAME," +
"uc.CONSTRAINT_TYPE," +
"uc.SEARCH_CONDITION," +
"ucc.COLUMN_NAME " +
"from user_constraints uc " +
"left outer join user_cons_columns ucc " +
"on uc.CONSTRAINT_NAME=ucc.CONSTRAINT_NAME " +
"and uc.TABLE_NAME=ucc.TABLE_NAME " +
"order by uc.TABLE_NAME, uc.CONSTRAINT_NAME, ucc.POSITION",
new ResultSetHandler()
{
String uniqueConstraintName = null;
Table uniqueConstraintTable = null;
final ArrayList<String> uniqueColumns = new ArrayList<String>();
public void run(final ResultSet resultSet) throws SQLException
{
final HashMap<String, String> duplicateCheckConstraints = new HashMap<String, String>();
while(resultSet.next())
{
//printRow(resultSet);
final String tableName = resultSet.getString(1);
if(tableName.startsWith(SYSTEM_TABLE_PREFIX))
continue;
final String constraintName = resultSet.getString(2);
final String constraintType = resultSet.getString(3);
final Table table = schema.notifyExistentTable(tableName);
//System.out.println("tableName:"+tableName+" constraintName:"+constraintName+" constraintType:>"+constraintType+"<");
if("C".equals(constraintType))
{
final String searchCondition = resultSet.getString(4);
//System.out.println("searchCondition:>"+searchCondition+"<");
final String duplicateCondition =
duplicateCheckConstraints.put(constraintName, searchCondition);
if(duplicateCondition!=null)
{
System.out.println(
"mysterious duplicate check constraint >" + constraintName +
"< with " +(searchCondition.equals(duplicateCondition)
? ("equal condition >" + searchCondition + '<')
: ("different conditions >" + searchCondition + "< and >" + duplicateCondition + '<')));
continue;
}
table.notifyExistentCheckConstraint(constraintName, searchCondition);
}
else if("P".equals(constraintType))
table.notifyExistentPrimaryKeyConstraint(constraintName);
else if("R".equals(constraintType))
table.notifyExistentForeignKeyConstraint(constraintName);
else if("U".equals(constraintType))
{
final String columnName = resultSet.getString(5);
if(uniqueConstraintName==null)
{
uniqueConstraintName = constraintName;
uniqueConstraintTable = table;
uniqueColumns.add(columnName);
}
else if(uniqueConstraintName.equals(constraintName) && uniqueConstraintTable==table)
uniqueColumns.add(columnName);
else
{
makeUniqueConstraint(uniqueConstraintTable, uniqueConstraintName, uniqueColumns);
uniqueConstraintName = constraintName;
uniqueConstraintTable = table;
uniqueColumns.clear();
uniqueColumns.add(columnName);
}
}
else
throw new RuntimeException(constraintType+'-'+constraintName);
//System.out.println("EXISTS:"+tableName);
}
if(uniqueConstraintName!=null)
makeUniqueConstraint(uniqueConstraintTable, uniqueConstraintName, uniqueColumns);
}
});
}
@Override
public String renameColumn(final String tableName, final String oldColumnName, final String newColumnName, final String columnType)
{
final StringBuilder bf = new StringBuilder();
bf.append("alter table ").
append(tableName).
append(" rename column ").
append(oldColumnName).
append(" to ").
append(newColumnName);
return bf.toString();
}
@Override
public String createColumn(final String tableName, final String columnName, final String columnType)
{
final StringBuilder bf = new StringBuilder();
bf.append("alter table ").
append(tableName).
append(" add (").
append(columnName).
append(' ').
append(columnType).
append(')');
return bf.toString();
}
@Override
public String modifyColumn(final String tableName, final String columnName, final String newColumnType)
{
final StringBuilder bf = new StringBuilder();
bf.append("alter table ").
append(tableName).
append(" modify ").
append(columnName).
append(' ').
append(newColumnType);
return bf.toString();
}
}
|
package net.sf.cglib;
import java.lang.reflect.*;
import net.sf.cglib.util.*;
/**
* Classes generated by Enhancer pass this object to the
* registered MethodInterceptors when an intercepted method is invoked. It can
* be used to either invoke the original method, or call the same method on a different
* object of the same type.
* @see Enhancer
* @see MethodInterceptor
* @version $Id: MethodProxy.java,v 1.22 2003-06-24 21:33:11 herbyderby Exp $
*/
abstract public class MethodProxy {
private static final FactoryCache cache = new FactoryCache(MethodProxy.class);
private static final Constructor GENERATOR =
ReflectUtils.findConstructor("MethodProxy$Generator(Method, Method)");
private static final MethodProxyKey KEY_FACTORY =
(MethodProxyKey)KeyFactory.create(MethodProxyKey.class, null);
private static final Method INVOKE_SUPER =
ReflectUtils.findMethod("MethodProxy.invokeSuper(Object, Object[])");
private static final Method INVOKE =
ReflectUtils.findMethod("MethodProxy.invoke(Object, Object[])");
interface MethodProxyKey {
Object newInstance(Method m1, Method m2);
}
/**
* Invoke the original (super) method on the specified object.
* @param obj the enhanced object, must be the object passed as the first
* argument to the MethodInterceptor
* @param args the arguments passed to the intercepted method; you may substitute a different
* argument array as long as the types are compatible
* @see MethodInterceptor#intercept
*/
abstract public Object invokeSuper(Object obj, Object[] args) throws Throwable;
/**
* Invoke the original method, on a different object of the same type.
* @param obj the compatible object; recursion will result if you use the object passed as the first
* argument to the MethodInterceptor (usually not what you want)
* @param args the arguments passed to the intercepted method; you may substitute a different
* argument array as long as the types are compatible
* @see MethodInterceptor#intercept
*/
abstract public Object invoke(Object obj, Object[] args) throws Throwable;
protected MethodProxy() { }
/**
* Create a new MethodProxy. Used internally by Enhancer.
*/
public static MethodProxy create(Method method, Method superMethod) {
return create(method, superMethod, null);
}
/**
* Create a new MethodProxy. Used internally by Enhancer.
*/
public static MethodProxy create(Method method, Method superMethod, ClassLoader loader) {
if (loader == null) {
loader = superMethod.getDeclaringClass().getClassLoader();
}
Object key = KEY_FACTORY.newInstance(method, superMethod);
return (MethodProxy)cache.getFactory(loader, key, GENERATOR, method, superMethod);
}
static class Generator extends CodeGenerator {
private Method method;
private Method superMethod;
public Generator(Method method, Method superMethod) {
setSuperclass(MethodProxy.class);
setNamePrefix(superMethod.getDeclaringClass().getName());
this.superMethod = superMethod;
this.method = method;
}
public void generate() {
null_constructor();
generate(MethodProxy.INVOKE, method);
generate(MethodProxy.INVOKE_SUPER, superMethod);
}
private void generate(Method proxyMethod, Method method) {
begin_method(proxyMethod);
if (Modifier.isProtected(method.getModifiers())) {
throw_exception(IllegalAccessException.class, "protected method: " + method);
} else {
load_arg(0);
checkcast(method.getDeclaringClass());
Class[] types = method.getParameterTypes();
for (int i = 0; i < types.length; i++) {
load_arg(1);
push(i);
aaload();
unbox(types[i]);
}
this.invoke(method);
box(method.getReturnType());
}
return_value();
end_method();
}
}
}
|
package ca.sfu.server;
import java.awt.BorderLayout;
import java.awt.Color;
import java.awt.Font;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import javax.swing.JEditorPane;
import javax.swing.JFrame;
import javax.swing.JLabel;
import javax.swing.JMenu;
import javax.swing.JMenuBar;
import javax.swing.JMenuItem;
import javax.swing.JOptionPane;
import javax.swing.JToolBar;
import javax.swing.Timer;
import javax.swing.event.HyperlinkEvent;
import javax.swing.event.HyperlinkListener;
import ca.sfu.cmpt431.facility.Board;
import ca.sfu.cmpt431.facility.BoardOperation;
public class MainFrame extends JFrame {
private static final long serialVersionUID = 1L;
AutomataPanel automataPanel;
Board board;
public MainFrame(Board b, int height, int width)
{
super();
setSize(width, height + 50);
setJMenuBar(createMenuBar());
setBackground(new Color(0xeb, 0xeb, 0xeb));
board = b;
BorderLayout layout = new BorderLayout();
automataPanel = new AutomataPanel(height, width);
automataPanel.setBoard(board);
automataPanel.setBackground(new Color(0xeb, 0xeb, 0xeb));
// add(createToolbar(), BorderLayout.NORTH);
add(automataPanel, BorderLayout.CENTER);
setLayout(layout);
setVisible(true);
setTitle("Automata");
setDefaultCloseOperation(EXIT_ON_CLOSE);
}
/**
* Default constructor
* Mainly used for unit testing
*/
public MainFrame()
{
super();
setSize(800, 850);
setJMenuBar(createMenuBar());
setBackground(new Color(0xeb, 0xeb, 0xeb));
board = new Board(800, 800);
BoardOperation.Randomize(board, 0.1);
automataPanel = new AutomataPanel(800, 800);
automataPanel.setCellSize(3);
automataPanel.setBoard(board);
automataPanel.setBackground(new Color(0xeb, 0xeb, 0xeb));
setContentPane(automataPanel);
setVisible(true);
Timer timer = new Timer(0, new ActionListener()
{
@Override
public void actionPerformed(ActionEvent arg0) {
automataPanel.setBoard(board);
BoardOperation.NextMoment(board, null, null, null, null, false, false, false, false);;
automataPanel.repaint();
}
});
timer.setDelay(50);
timer.start();
setTitle("Automata");
setDefaultCloseOperation(EXIT_ON_CLOSE);
}
/**
* Create JToolbar for the whole program
*/
@SuppressWarnings("unused")
private JToolBar createToolbar()
{
return null;
}
/**
* Create JMenuBar for the whole program
*/
private JMenuBar createMenuBar()
{
/* Menu list */
JMenuBar menuBar = new JMenuBar();
JMenu fileMenu = new JMenu("File");
JMenu windowMenu = new JMenu("Window");
/* Menu Item */
JMenuItem about = new JMenuItem("About");
JMenuItem exit = new JMenuItem("Exit");
JMenuItem zoomIn = new JMenuItem("Zoom In");
JMenuItem zoomOut = new JMenuItem("Zoom Out");
JMenuItem zoomPointer = new JMenuItem("Normal");
/* Action Listeners */
zoomIn.addActionListener(new ActionListener() {
@Override
public void actionPerformed(ActionEvent e) {
automataPanel.setZoomIn();
}});
zoomOut.addActionListener(new ActionListener() {
@Override
public void actionPerformed(ActionEvent e) {
automataPanel.setZoomOut();
}});
zoomPointer.addActionListener(new ActionListener() {
@Override
public void actionPerformed(ActionEvent e) {
automataPanel.setNormal();
}});
about.addActionListener(new ActionListener() {
@Override
public void actionPerformed(ActionEvent e) {
JLabel label = new JLabel();
Font font = label.getFont();
// create some css from the label's font
StringBuffer style = new StringBuffer("font-family:" + font.getFamily() + ";");
style.append("font-weight:" + (font.isBold() ? "bold" : "normal") + ";");
style.append("font-size:" + font.getSize() + "pt;");
// html content
String text1 = "<html><body><p><strong><font size=\"5\" face=\"arial\" color=\"black\">Game of Life</font></strong></p>" +
"<p><i>Version 1.1</i></p><p><i>School of Computing Science, Simon Fraser University</i></p>" +
"<p>Distributed cellular automaton simulation application, called world of cell.</p>" +
"<p><b>Author:</b> Yuke Zhu, Luna Lu, Yang Liu, Yao Xie, Xiaying Peng</p>" +
"<p>Sound interesting? <a href=\"https://github.com/leafpicker/LifeGameSim\">Get involved!</a></p></body></html>";
JEditorPane ep = new JEditorPane("text/html", text1);
// handle link events
ep.addHyperlinkListener(new HyperlinkListener()
{
@Override
public void hyperlinkUpdate(HyperlinkEvent e)
{
if (e.getEventType().equals(HyperlinkEvent.EventType.ACTIVATED))
openURL(e.getURL().toString());
//ProcessHandler.launchUrl(e.getURL().toString()); // roll your own link launcher or use Desktop if J6+
}
});
ep.setEditable(false);
ep.setBackground(label.getBackground());
// show
JOptionPane.showMessageDialog(null, ep);
}});
exit.addActionListener(new ActionListener() {
@Override
public void actionPerformed(ActionEvent e) {
System.exit(0);
}});
/* Add to menu list */
fileMenu.add(about);
fileMenu.addSeparator();
fileMenu.add(exit);
windowMenu.add(zoomIn);
windowMenu.add(zoomOut);
windowMenu.add(zoomPointer);
menuBar.add(fileMenu);
menuBar.add(windowMenu);
return menuBar;
}
public void setBoard(Board board)
{
this.board = board;
}
public Board getBoard()
{
return this.board;
}
public static void openURL(String url) {
try {
browse(url);
} catch (Exception e) {
JOptionPane.showMessageDialog(null, "Error attempting to launch web browser:\n" + e.getLocalizedMessage());
}
}
/**
* Browse website from default browser with multiple OS compatibility
*/
private static void browse(String url) throws ClassNotFoundException, IllegalAccessException,
IllegalArgumentException, InterruptedException, InvocationTargetException, IOException,
NoSuchMethodException {
String osName = System.getProperty("os.name", "");
if (osName.startsWith("Mac OS")) {
Class<?> fileMgr = Class.forName("com.apple.eio.FileManager");
Method openURL = fileMgr.getDeclaredMethod("openURL", new Class[] { String.class });
openURL.invoke(null, new Object[] { url });
} else if (osName.startsWith("Windows")) {
Runtime.getRuntime().exec("rundll32 url.dll,FileProtocolHandler " + url);
} else { // assume Unix or Linux
String[] browsers = { "firefox", "opera", "konqueror", "epiphany", "mozilla", "netscape" };
String browser = null;
for (int count = 0; count < browsers.length && browser == null; count++)
if (Runtime.getRuntime().exec(new String[] { "which", browsers[count] }).waitFor() == 0)
browser = browsers[count];
if (browser == null)
throw new NoSuchMethodException("Could not find web browser");
else
Runtime.getRuntime().exec(new String[] { browser, url });
}
}
public static void main(String[] args) {
@SuppressWarnings("unused")
MainFrame frame = new MainFrame();
}
}
|
package io.tetrapod.core;
import static io.tetrapod.protocol.core.Core.*;
import static io.tetrapod.protocol.core.CoreContract.*;
import java.io.IOException;
import java.util.*;
import java.util.Map.Entry;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import javax.net.ssl.SSLException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.netty.buffer.ByteBuf;
import io.netty.channel.*;
import io.netty.channel.socket.SocketChannel;
import io.netty.handler.codec.DecoderException;
import io.netty.util.ReferenceCountUtil;
import io.tetrapod.core.rpc.*;
import io.tetrapod.core.rpc.Error;
import io.tetrapod.core.tasks.Task;
import io.tetrapod.protocol.core.*;
import io.tetrapod.protocol.raft.AppendEntriesRequest;
/**
* Manages a tetrapod session
*/
abstract public class Session extends ChannelInboundHandlerAdapter {
/**
* Listeners of session life-cycle events
*/
public interface Listener {
public void onSessionStart(Session ses);
public void onSessionStop(Session ses);
}
public static interface Helper {
public Dispatcher getDispatcher();
public <TResp extends Response> Async dispatchRequest(RequestHeader header, Request req, Session fromSession);
public List<SubscriptionAPI> getMessageHandlers(int contractId, int structId);
public int getContractId();
}
public static interface RelayHandler {
public int getAvailableService(int contractid);
public Session getRelaySession(int toId, int contractid);
public void relayMessage(MessageHeader header, ByteBuf buf, boolean isBroadcast) throws IOException;
public WebRoutes getWebRoutes();
}
protected static final Logger logger = LoggerFactory.getLogger(Session.class);
protected static final Logger commsLog = LoggerFactory.getLogger("comms");
public static final byte DEFAULT_REQUEST_TIMEOUT = 30;
public static final int DEFAULT_OVERLOAD_THRESHOLD = 1024 * 128;
protected static final AtomicInteger sessionCounter = new AtomicInteger();
protected final int sessionNum = sessionCounter.incrementAndGet();
protected final List<Listener> listeners = new LinkedList<Listener>();
protected final Map<Integer, Async> pendingRequests = new ConcurrentHashMap<>();
protected final AtomicInteger requestCounter = new AtomicInteger();
protected final Session.Helper helper;
protected final SocketChannel channel;
protected final AtomicLong lastHeardFrom = new AtomicLong(System.currentTimeMillis());
protected final AtomicLong lastSentTo = new AtomicLong(System.currentTimeMillis());
protected RelayHandler relayHandler;
protected String name;
protected int myId = 0;
protected byte myType = Core.TYPE_ANONYMOUS;
protected int theirId = 0;
protected byte theirType = Core.TYPE_ANONYMOUS;
protected int myContractId;
public Session(SocketChannel channel, Session.Helper helper) {
this.channel = channel;
this.helper = helper;
this.myContractId = helper.getContractId();
}
public synchronized void setName(String name) {
this.name = name;
}
public synchronized String getName() {
return name;
}
/**
* Check to see if this session is still alive and close it, if not
*/
// TODO: needs configurable timeouts
public void checkHealth() {
if (isConnected()) {
final long now = System.currentTimeMillis();
if (now - lastHeardFrom.get() > 5000 || now - lastSentTo.get() > 5000) {
if (theirType == TYPE_SERVICE || theirType == TYPE_TETRAPOD)
logger.trace("{} Sending PING ({}/{} ms)", this, now - lastHeardFrom.get(), now - lastSentTo.get());
sendPing();
}
if (now - lastHeardFrom.get() > 20000) {
logger.warn("{} Timeout ({} ms)", this, now - lastHeardFrom.get());
if (theirId == myId && myId != 0) {
logger.error("{} Timeout on LOOPBACK!", this);
} else {
close();
}
}
}
timeoutPendingRequests();
}
public void timeoutPendingRequests() {
for (Entry<Integer, Async> entry : pendingRequests.entrySet()) {
Async a = entry.getValue();
if (a.isTimedout()) {
pendingRequests.remove(entry.getKey());
a.setResponse(ERROR_TIMEOUT);
}
}
}
abstract protected Object makeFrame(Structure header, Structure payload, byte envelope);
abstract protected Object makeFrame(Structure header, ByteBuf payload, byte envelope);
protected void sendPing() {}
protected void sendPong() {}
public Dispatcher getDispatcher() {
return helper.getDispatcher();
}
public int getSessionNum() {
return sessionNum;
}
public long getLastHeardFrom() {
return lastHeardFrom.get();
}
protected void scheduleHealthCheck() {
if (isConnected() || !pendingRequests.isEmpty()) {
getDispatcher().dispatch(1, TimeUnit.SECONDS, () -> {
checkHealth();
scheduleHealthCheck();
});
}
}
@Override
public String toString() {
return String.format("%s%s #%d [0x%08X]", getClass().getSimpleName(), name == null ? "" : name, sessionNum, theirId);
}
protected String getStructName(int contractId, int structId) {
Structure s = StructureFactory.make(contractId, structId);
if (s == null) {
return "Struct-" + structId;
}
return s.getClass().getSimpleName();
}
protected void dispatchRequest(final RequestHeader header, final Request req) {
helper.dispatchRequest(header, req, this).handle(res -> sendResponse(res, header.requestId, header.contextId));
}
public void dispatchMessage(final MessageHeader header, final Message msg) {
// we need to hijack this now to prevent a race with dispatching subsequent messages
if (header.structId == EntityMessage.STRUCT_ID) {
if (getTheirEntityType() == Core.TYPE_TETRAPOD) {
EntityMessage m = (EntityMessage) msg;
setMyEntityId(m.entityId);
}
}
// OPTIMIZE: use senderId to queue instead of using this single threaded queue
final MessageContext ctx = new SessionMessageContext(header, this);
getDispatcher().dispatchSequential(() -> {
for (SubscriptionAPI handler : helper.getMessageHandlers(header.contractId, header.structId)) {
msg.dispatch(handler, ctx);
}
});
}
public Response sendPendingRequest(final Request req, final int toId, byte timeoutSeconds, final PendingResponseHandler pendingHandler) {
final Async async = sendRequest(req, toId, timeoutSeconds);
async.handle(res -> {
Response pendingRes = null;
try {
pendingRes = pendingHandler.onResponse(res);
} catch (ErrorResponseException e1) {
pendingRes = Response.error(e1.errorCode);
} catch (Throwable e) {
logger.error(e.getMessage(), e);
} finally {
if (pendingRes != Response.PENDING) {
// finally return the pending response we were waiting on
if (pendingRes == null) {
pendingRes = new Error(ERROR_UNKNOWN);
}
if (!pendingHandler.sendResponse(pendingRes)) {
sendResponse(pendingRes, pendingHandler.originalRequestId, pendingHandler.contextId);
}
} else {
logger.debug("Pending response returned from pending handler for {} @ {}", req, toId);
}
}
});
return Response.PENDING;
}
public Async sendRequest(Request req, int toId) {
return sendRequest(req, toId, DEFAULT_REQUEST_TIMEOUT);
}
public Task<? extends Response> sendRequestTask(Request req, int toId, byte timeoutSeconds) {
Task<Response> task = new Task<>();
Async async = sendRequest(req, toId, timeoutSeconds);
async.handle(resp -> {
if (resp.isError() && resp.errorCode() == ERROR_UNKNOWN) {
task.completeExceptionally(new ErrorResponseException(resp.errorCode()));
} else {
task.complete(resp);
}
});
return task;
}
public Async sendRequest(Request req, int toId, byte timeoutSeconds) {
final RequestHeader header = new RequestHeader();
header.requestId = requestCounter.incrementAndGet();
header.toId = toId;
header.fromParentId = myId;
header.fromChildId = 0;
header.timeout = timeoutSeconds;
header.contractId = req.getContractId();
header.structId = req.getStructId();
header.fromType = myType;
header.contextId = ContextIdGenerator.getContextId();
return sendRequest(req, header);
}
private Async sendRequest(Request req, final RequestHeader header) {
final Async async = new Async(req, header, this);
if (channel.isActive()) {
pendingRequests.put(header.requestId, async);
if (!commsLogIgnore(req))
commsLog("%s %016X [%d] => %s (to %d)", this, header.contextId, header.requestId, req.dump(), header.toId);
final Object buffer = makeFrame(header, req, ENVELOPE_REQUEST);
if (buffer != null) {
writeFrame(buffer);
} else {
async.setResponse(ERROR_SERIALIZATION);
}
} else {
async.setResponse(ERROR_CONNECTION_CLOSED);
}
return async;
}
public void sendResponse(Response res, int requestId, long contextId) {
if (res != Response.PENDING) {
if (!commsLogIgnore(res))
commsLog("%s %016X [%d] => %s", this, contextId, requestId, res.dump());
final Object buffer = makeFrame(res, requestId, contextId);
if (buffer != null) {
writeFrame(buffer);
}
}
}
public Object makeFrame(Response res, int requestId, long contextId) {
return makeFrame(new ResponseHeader(requestId, res.getContractId(), res.getStructId(), contextId), res, ENVELOPE_RESPONSE);
}
public void sendAltBroadcastMessage(Message msg, int toAltId) {
final int myEntityId = getMyEntityId();
if (myEntityId != 0) {
if (!commsLogIgnore(msg))
commsLog("%s [A] => %s (to altId-%d)", this, msg.dump(), toAltId);
final Object buffer = makeFrame(
new MessageHeader(myEntityId, 0, theirId, toAltId, msg.getContractId(), msg.getStructId(), MessageHeader.FLAGS_ALTERNATE),
msg, ENVELOPE_BROADCAST);
if (buffer != null) {
writeFrame(buffer);
getDispatcher().messagesSentCounter.mark();
}
}
}
public void sendTopicBroadcastMessage(Message msg, int toId, int topicId) {
final int myEntityId = getMyEntityId();
if (myEntityId != 0) {
if (!commsLogIgnore(msg))
commsLog("%s [B] => %s (to TOPIC:%d-#%d)", this, msg.dump(), toId, topicId);
final Object buffer = makeFrame(new MessageHeader(myEntityId, topicId, toId, 0, msg.getContractId(), msg.getStructId(), (byte) 0),
msg, ENVELOPE_BROADCAST);
if (buffer != null) {
writeFrame(buffer);
getDispatcher().messagesSentCounter.mark();
}
}
}
public void sendMessage(Message msg, int toEntityId, int toChildId) {
final int myEntityId = getMyEntityId();
if (myEntityId != 0) {
if (!commsLogIgnore(msg))
commsLog("%s [M] => %s (to %d.%d)", this, msg.dump(), toEntityId, toChildId);
final Object buffer = makeFrame(
new MessageHeader(myEntityId, 0, toEntityId, toChildId, msg.getContractId(), msg.getStructId(), (byte) 0), msg,
ENVELOPE_MESSAGE);
if (buffer != null) {
writeFrame(buffer);
getDispatcher().messagesSentCounter.mark();
}
}
}
private volatile boolean autoFlush = true;
public void setAutoFlush(boolean val) {
autoFlush = val;
}
public void flush() {
channel.flush();
}
public ChannelFuture writeFrame(Object frame) {
if (frame != null) {
if (channel.isActive()) {
lastSentTo.set(System.currentTimeMillis());
if (autoFlush) {
return channel.writeAndFlush(frame);
} else {
return channel.write(frame);
}
} else {
ReferenceCountUtil.release(frame);
}
}
return null;
}
public void sendRelayedMessage(MessageHeader header, ByteBuf payload, boolean broadcast) {
assert header.fromId != 0;
if (!commsLogIgnore(header.structId)) {
commsLog("%s [%s] ~> Message:%d %s (to %d.%d)", this, broadcast ? "B" : "M", header.structId, getNameFor(header),
header.toParentId, header.toChildId);
}
byte envelope = broadcast ? ENVELOPE_BROADCAST : ENVELOPE_MESSAGE;
writeFrame(makeFrame(header, payload, envelope));
}
public Async sendRelayedRequest(RequestHeader header, ByteBuf payload, Session originator, ResponseHandler handler) {
final Async async = new Async(null, header, originator, handler);
int origRequestId = async.header.requestId;
int newRequestId = addPendingRequest(async);
if (!commsLogIgnore(header.structId))
commsLog("%s %016X [%d/%d] ~> Request:%s", this, header.contextId, newRequestId, origRequestId,
StructureFactory.getName(header.contractId, header.structId));
// making a new header lets us not worry about synchronizing the change the requestId
RequestHeader newHeader = new RequestHeader(newRequestId, header.fromParentId, header.fromChildId, header.toId, header.fromType,
header.timeout, header.version, header.contractId, header.structId, header.contextId);
if (newHeader.toId == UNADDRESSED && (theirType != TYPE_TETRAPOD || header.contractId == TetrapodContract.CONTRACT_ID)) {
newHeader.toId = theirId;
}
writeFrame(makeFrame(newHeader, payload, ENVELOPE_REQUEST));
return async;
}
public void sendRelayedResponse(ResponseHeader header, ByteBuf payload) {
if (!commsLogIgnore(header.structId))
commsLog("%s %016X [%d] ~> Response:%s", this, header.contextId, header.requestId, StructureFactory.getName(header.contractId, header.structId));
writeFrame(makeFrame(header, payload, ENVELOPE_RESPONSE));
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
ctx.close();
// quieter logging for certain exceptions
if (cause instanceof IOException) {
if (cause instanceof SSLException) {
logger.warn("{} {}", this, cause.getMessage());
return;
} else if (cause.getMessage() != null) {
if (cause.getMessage().equals("Connection reset by peer") || cause.getMessage().equals("Connection timed out")) {
logger.info("{} {}", this, cause.getMessage());
return;
}
}
} else if (cause instanceof DecoderException) {
logger.info("{} {}", this, cause.getMessage());
return;
}
logger.error("{} : {} : {}", this, cause.getClass().getSimpleName(), cause.getMessage());
logger.error(cause.getMessage(), cause);
}
public synchronized boolean isConnected() {
if (channel != null) {
return channel.isActive();
}
return false;
}
public void close() {
channel.close();
}
public void addSessionListener(Listener listener) {
synchronized (listeners) {
listeners.add(listener);
}
}
public void removeSessionListener(Listener listener) {
synchronized (listeners) {
listeners.remove(listener);
}
}
protected void fireSessionStartEvent() {
logger.debug("{} Session Start", this);
for (Listener l : getListeners()) {
l.onSessionStart(this);
}
}
protected void fireSessionStopEvent() {
logger.debug("{} Session Stop", this);
for (Listener l : getListeners()) {
l.onSessionStop(this);
}
}
private Listener[] getListeners() {
synchronized (listeners) {
return listeners.toArray(new Listener[0]);
}
}
public synchronized void setMyEntityId(int entityId) {
if (myId != entityId && isConnected()) {
logger.debug("{} Setting my Entity {}", this, entityId);
}
this.myId = entityId;
}
public synchronized int getMyEntityId() {
return myId;
}
public void setMyEntityType(byte type) {
myType = type;
}
public synchronized void setTheirEntityId(int entityId) {
this.theirId = entityId;
}
public synchronized int getTheirEntityId() {
return theirId;
}
public synchronized byte getTheirEntityType() {
return theirType;
}
public void setTheirEntityType(byte type) {
theirType = type;
}
public int addPendingRequest(Async async) {
int requestId = requestCounter.incrementAndGet();
pendingRequests.put(requestId, async);
return requestId;
}
public void cancelAllPendingRequests() {
for (Async a : pendingRequests.values()) {
a.setResponse(ERROR_CONNECTION_CLOSED);
}
pendingRequests.clear();
}
// /////////////////////////////////// RELAY /////////////////////////////////////
public synchronized void setRelayHandler(RelayHandler relayHandler) {
this.relayHandler = relayHandler;
}
public String getPeerHostname() {
if (channel != null && channel.remoteAddress() != null && channel.remoteAddress().getAddress() != null) {
channel.remoteAddress().getAddress().getHostAddress();
}
return "Unknown";
}
public boolean commsLog(String format, Object... args) {
if (commsLog.isDebugEnabled()) {
for (int i = 0; i < args.length; i++) {
if (args[i] == this) {
args[i] = String.format("%s:%d", getClass().getSimpleName().substring(0, 4), sessionNum);
}
}
commsLog.debug(String.format(format, args));
//logger.debug(String.format(format, args));
}
return true;
}
public String getNameFor(MessageHeader header) {
return StructureFactory.getName(header.contractId, header.structId);
}
public boolean commsLogIgnore(Structure struct) {
return commsLogIgnore(struct.getStructId());
}
public boolean commsLogIgnore(int structId) {
if (commsLog.isTraceEnabled())
return false;
switch (structId) {
case ServiceLogsRequest.STRUCT_ID:
case ServiceStatsMessage.STRUCT_ID:
case DummyRequest.STRUCT_ID:
case AppendEntriesRequest.STRUCT_ID:
case RaftStatsRequest.STRUCT_ID:
case RaftStatsResponse.STRUCT_ID:
return true;
}
return !commsLog.isDebugEnabled();
}
}
|
package org.azavea.otm.ui;
import java.util.ArrayList;
import org.azavea.map.OTMMapView;
import org.azavea.map.WMSTileRaster;
import org.azavea.otm.App;
import org.azavea.otm.R;
import org.azavea.otm.data.Plot;
import org.azavea.otm.data.Tree;
import org.azavea.otm.rest.RequestGenerator;
import org.json.JSONException;
import android.app.Activity;
import android.content.Intent;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.graphics.PixelFormat;
import android.os.Bundle;
import android.view.Menu;
import android.view.MenuItem;
import android.view.SurfaceHolder;
import android.view.View;
import android.widget.ImageView;
import android.widget.RelativeLayout;
import android.widget.TextView;
import android.widget.Toast;
import com.google.android.maps.GeoPoint;
import com.google.android.maps.MapActivity;
import com.google.android.maps.MapController;
import com.google.android.maps.MyLocationOverlay;
import com.loopj.android.http.BinaryHttpResponseHandler;
public class MapDisplay extends MapActivity {
final private int FILTER_INTENT = 1;
private MyLocationOverlay myLocationOverlay;
private OTMMapView mapView;
private WMSTileRaster surfaceView;
private int zoomLevel;
private RelativeLayout plotPopup;
private Plot currentPlot; // The Plot we're currently showing a popup for, if any
@Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
zoomLevel = 14;
setContentView(R.layout.activity_map_display);
// Get a MapView and enable zoom controls
mapView = (OTMMapView) findViewById(R.id.mapview1);
mapView.setBuiltInZoomControls(true);
// Get tree-overlay and configure
surfaceView = (WMSTileRaster)findViewById(R.id.tileraster);
surfaceView.setZOrderOnTop(true);
SurfaceHolder sh = surfaceView.getHolder();
sh.setFormat(PixelFormat.TRANSPARENT);
plotPopup = (RelativeLayout) findViewById(R.id.plotPopup);
surfaceView.setMapView(getWindowManager(), this);
MapController mapController = mapView.getController();
GeoPoint p = new GeoPoint((int)(39.952622*1E6), (int)(-75.165708*1E6));
mapController.setCenter(p);
mapController.setZoom(zoomLevel);
// Force the MapView to redraw
mapView.invalidate();
}
public OTMMapView getMapView() {
return this.mapView;
}
@Override
protected void onStart() {
super.onStart();
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
getMenuInflater().inflate(R.menu.activity_map_display, menu);
return true;
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
switch (item.getItemId()) {
case R.id.menu_filter:
Intent filter = new Intent(this, FilterDisplay.class);
startActivityForResult(filter, FILTER_INTENT);
break;
}
return true;
}
@Override
protected void onResume() {
super.onResume();
surfaceView.setMapView(getWindowManager(), this);
this.mapView.invalidate();
}
@Override
protected void onPause() {
super.onPause();
//myLocationOverlay.disableMyLocation();
}
@Override
protected void onDestroy() {
super.onDestroy();
}
@Override
public boolean isRouteDisplayed() {
return false;
}
public void showPopup(Plot plot) {
TextView plotSpecies = (TextView) findViewById(R.id.plotSpecies);
TextView plotAddress = (TextView) findViewById(R.id.plotAddress);
TextView plotDiameter = (TextView) findViewById(R.id.plotDiameter);
TextView plotUpdatedBy = (TextView) findViewById(R.id.plotUpdatedBy);
//set default text
plotDiameter.setText(getString(R.string.dbh_missing));
plotSpecies.setText(getString(R.string.species_missing));
plotAddress.setText(getString(R.string.address_missing));
try {
GeoPoint p = new GeoPoint((int)(plot.getGeometry().getLatE6()), (int)(plot.getGeometry().getLonE6()));
mapView.getController().stopAnimation(false);
mapView.getController().animateTo(p);
plotUpdatedBy.setText(plot.getLastUpdatedBy());
if (plot.getAddress().length() != 0) {
plotAddress.setText(plot.getAddress());
}
Tree tree = plot.getTree();
if (tree != null) {
plotSpecies.setText(tree.getSpeciesName());
if (tree.getDbh() != 0) {
plotDiameter.setText(String.valueOf(tree.getDbh()) + " " + getString(R.string.dbh_units));
}
ArrayList<Integer> imageIds = tree.getImageIdList();
if (imageIds != null && imageIds.size() > 0) {
showImage(imageIds.get(0).intValue(), plot.getId());
}
}
} catch (JSONException e) {
e.printStackTrace();
}
currentPlot = plot;
plotPopup.setVisibility(View.VISIBLE);
}
public void hidePopup() {
RelativeLayout plotPopup = (RelativeLayout) findViewById(R.id.plotPopup);
plotPopup.setVisibility(View.INVISIBLE);
currentPlot = null;
}
@Override
public void onBackPressed() {
hidePopup();
}
public void showImage(int imageId, int plotId) {
RequestGenerator rg = new RequestGenerator();
String[] allowedTypes = new String[] { "image/jpeg", "image/png", "image/gif" };
rg.getImage(plotId, imageId, new BinaryHttpResponseHandler(allowedTypes) {
@Override
public void onSuccess(byte[] imageData) {
Bitmap image = BitmapFactory.decodeByteArray(imageData, 0, imageData.length);
Bitmap scaledImage = Bitmap.createScaledBitmap(image, 80, 80, true);
ImageView plotImage = (ImageView) findViewById(R.id.plotImage);
plotImage.setImageBitmap(scaledImage);
}
@Override
public void onFailure(Throwable e, byte[] imageData) {
e.printStackTrace();
}
});
}
// onClick handler for tree-details popup touch-event
public void showFullTreeInfo(View view) {
// Show TreeInfoDisplay with current plot
Intent viewPlot = new Intent(MapDisplay.this, TreeInfoDisplay.class);
viewPlot.putExtra("plot", currentPlot.getData().toString());
startActivity(viewPlot);
}
// onClick handler for "My Location" button
public void showMyLocation(View view) {
OTMMapView mapView = (OTMMapView) findViewById(R.id.mapview1);
MapController mc = mapView.getController();
mc.setCenter(myLocationOverlay.getMyLocation());
}
@Override
public void onActivityResult(int requestCode, int resultCode, Intent data) {
super.onActivityResult(requestCode, resultCode, data);
switch(requestCode) {
case (FILTER_INTENT) : {
if (resultCode == Activity.RESULT_OK) {
Toast.makeText(this, App.getFilterManager().getActiveFiltersAsQueryString(),
Toast.LENGTH_LONG).show();
}
break;
}
}
}
}
|
/**
* An implementation of finding the longest common
* substring(s) between a set of strings.
*
* Time complexity: O(nlogn)
*
* @author William Fiset, william.alexandre.fiset@gmail.com
**/
import java.util.*;
// Example usages
public class LongestCommonSubstring {
public static void main(String[] args) {
int k = 2;
String[] strs = { "abcde", "habcab", "ghabcdf" };
Set <String> set = SuffixArray.lcs(strs, k);
System.out.printf("LCS(s) of %s with k = %d equals = %s\n", Arrays.toString(strs), k, set);
// LCS(s) of [abcde, habcab, ghabcdf] with k = 2 equals = [abcd, habc]
k = 3;
strs = new String[]{ "AAGAAGC", "AGAAGT", "CGAAGC" };
set = SuffixArray.lcs(strs, k);
System.out.printf("LCS(s) of %s with k = %d equals = %s\n", Arrays.toString(strs), k, set);
// LCS(s) of [AAGAAGC, AGAAGT, CGAAGC] with k = 3 equals = [GAAG]
k = 2;
strs = new String[]{ "AABC", "BCDC", "BCDE", "CDED", "CDCABC" };
set = SuffixArray.lcs(strs, k);
System.out.printf("LCS(s) of %s with k = %d equals = %s\n", Arrays.toString(strs), k, set);
// LCS(s) of [AABC, BCDC, BCDE, CDED, CDCABC] with k = 2 equals = [ABC, BCD, CDC, CDE]
}
}
class SuffixArray {
// ALPHABET_SZ is the default alphabet size, this may need to be much
// larger if you're using the LCS method with multiple sentinels
int ALPHABET_SZ = 256, N;
int[] T, lcp, sa, sa2, rank, tmp, c;
public SuffixArray(String str) {
this(toIntArray(str));
}
private static int[] toIntArray(String s) {
int[] text = new int[s.length()];
for(int i=0;i<s.length();i++)text[i] = s.charAt(i);
return text;
}
// Designated constructor
public SuffixArray(int[] text) {
T = text;
N = text.length;
sa = new int[N];
sa2 = new int[N];
rank = new int[N];
c = new int[Math.max(ALPHABET_SZ, N)];
construct();
kasai();
}
private void construct() {
int i, p, r;
for (i=0; i<N; ++i) c[rank[i] = T[i]]++;
for (i=1; i<ALPHABET_SZ; ++i) c[i] += c[i-1];
for (i=N-1; i>=0; --i) sa[--c[T[i]]] = i;
for (p=1; p<N; p <<= 1) {
for (r=0, i=N-p; i<N; ++i) sa2[r++] = i;
for (i=0; i<N; ++i) if (sa[i] >= p) sa2[r++] = sa[i] - p;
Arrays.fill(c, 0, ALPHABET_SZ, 0);
for (i=0; i<N; ++i) c[rank[i]]++;
for (i=1; i<ALPHABET_SZ; ++i) c[i] += c[i-1];
for (i=N-1; i>=0; --i) sa[--c[rank[sa2[i]]]] = sa2[i];
for (sa2[sa[0]] = r = 0, i=1; i<N; ++i) {
if (!(rank[sa[i-1]] == rank[sa[i]] &&
sa[i-1]+p < N && sa[i]+p < N &&
rank[sa[i-1]+p] == rank[sa[i]+p])) r++;
sa2[sa[i]] = r;
} tmp = rank; rank = sa2; sa2 = tmp;
if (r == N-1) break; ALPHABET_SZ = r + 1;
}
}
// Use Kasai algorithm to build LCP array
private void kasai() {
lcp = new int[N];
int [] inv = new int[N];
for (int i = 0; i < N; i++) inv[sa[i]] = i;
for (int i = 0, len = 0; i < N; i++) {
if (inv[i] > 0) {
int k = sa[inv[i]-1];
while( (i + len < N) && (k + len < N) && T[i+len] == T[k+len] ) len++;
lcp[inv[i]-1] = len;
if (len > 0) len
}
}
}
// Fill inverse lookup index map to map which original string a particular
// suffix came from. While constructing the index map also keep track of
// the lowest ascii value and return this value.
private static int fillIndexMap(String[] strings, int[] indexMap) {
int lowestAsciiValue = Integer.MAX_VALUE;
// Find the lowest ASCII value within the strings.
// Also construct the index map to know which original
// string a given suffix belongs to.
for (int i = 0, k = 0; i < strings.length; i++) {
String str = strings[i];
for (int j = 0; j < str.length(); j++) {
int asciiVal = str.charAt(j);
if (asciiVal < lowestAsciiValue)
lowestAsciiValue = asciiVal;
indexMap[k++] = i;
}
// Record that the sentinel belongs to string i
indexMap[k++] = i;
}
return lowestAsciiValue;
}
private static int[] constructText(String[] strings, final int TEXT_LENGTH, final int SHIFT) {
int sentinel = 0;
int[] T = new int[TEXT_LENGTH];
// Construct the new text with the shifted values and the sentinels
for(int i = 0, k = 0; i < strings.length; i++) {
String str = strings[i];
for (int j = 0; j < str.length(); j++)
T[k++] = ((int)str.charAt(j)) + SHIFT;
T[k++] = sentinel++;
}
return T;
}
private static int computeTextLength(String[] strings) {
int textLength = 0;
for(String str : strings)
textLength += str.length();
return textLength;
}
/**
* Finds the Longest Common Substring (LCS) between a group of strings.
* The current implementation takes O(nlog(n)) bounded by the suffix array construction.
* @param strs - The strings you wish to find the longest common substring(s) between
* @param K - The minimum number of strings to find the LCS between. K must be at least 2.
**/
public static TreeSet <String> lcs(String [] strings, final int K) {
if (K <= 1) throw new IllegalArgumentException("K must be greater than or equal to 2!");
TreeSet <String> lcss = new TreeSet<>();
if (strings == null || strings.length <= 1) return lcss;
// TEXT_LENGTH is the concatenated length of all the strings and the sentinels
final int NUM_SENTINELS = strings.length;
final int TEXT_LENGTH = computeTextLength(strings) + NUM_SENTINELS;
int[] indexMap = new int[TEXT_LENGTH];
final int LOWEST_ASCII = fillIndexMap(strings, indexMap);
final int SHIFT = LOWEST_ASCII + NUM_SENTINELS + 1;
int[] T = constructText(strings, TEXT_LENGTH, SHIFT);
// Build suffix array and get sorted suffix indexes and lcp array
SuffixArray suffixArray = new SuffixArray(T);
int[] sa = suffixArray.sa;
int[] lcp = suffixArray.lcp;
// Maintain a deque of the indeces with the highest LCP values in our current window
Deque <Integer> deque = new ArrayDeque<>();
// Assign each string a color and maintain the color count within the window
Map<Integer, Integer> windowColorCountMap = new HashMap<>();
// Start the sliding window at the number of sentinels because those
// all get sorted first and we want to ignore them
int lo = NUM_SENTINELS, hi = NUM_SENTINELS;
int bestLCSLength = 0;
// Add the first color
int firstColor = indexMap[sa[hi]];
int windowColorCount = 1;
windowColorCountMap.put(firstColor, 1);
// Maintain a sliding window between lo and hi
while(hi < TEXT_LENGTH) {
// Attempt to update the LCS if we have the
// right amount of colors in our window
if (windowColorCount >= K) {
int windowLCP = lcp[deque.peekFirst()];
if (windowLCP > 0) {
if (bestLCSLength < windowLCP) {
bestLCSLength = windowLCP;
lcss.clear();
}
if (bestLCSLength == windowLCP) {
// Construct the current LCS within the window interval
int pos = sa[lo];
char[] lcs = new char[windowLCP];
for (int i = 0; i < windowLCP; i++) lcs[i] = (char)(T[pos+i] - SHIFT);
lcss.add(new String(lcs));
}
}
// Update the colors in our window
int lastColor = indexMap[sa[lo]];
windowColorCount = removeColor(windowColorCountMap, windowColorCount, lastColor);
// Remove the head if it's outside the new range: [lo+1, hi)
while (!deque.isEmpty() && deque.peekFirst() <= lo)
deque.removeFirst();
// Decrease the window size
lo++;
// Increase the window size because we don't have enough colors
} else if(++hi < TEXT_LENGTH) {
// Update the colors in our window
int nextColor = indexMap[sa[hi]];
windowColorCount = addColor(windowColorCountMap, windowColorCount, nextColor);
// Remove all the worse values in the back of the deque
while(!deque.isEmpty() && lcp[deque.peekLast()] > lcp[hi-1])
deque.removeLast();
deque.addLast(hi-1);
}
}
return lcss;
}
private static int removeColor(Map<Integer,Integer> windowColorCountMap, int windowColorCount, int lastColor) {
boolean removedAColor = false;
Integer colorCount = windowColorCountMap.get(lastColor);
if (colorCount == 1) removedAColor = true;
windowColorCountMap.put(lastColor, colorCount - 1);
return removedAColor ? windowColorCount - 1 : windowColorCount;
}
private static int addColor(Map<Integer,Integer> windowColorCountMap, int windowColorCount, int nextColor) {
boolean addedNewColor = false;
Integer colorCount = windowColorCountMap.get(nextColor);
if (colorCount == null) colorCount = 0;
if (colorCount == 0) addedNewColor = true;
windowColorCountMap.put(nextColor, colorCount + 1);
return addedNewColor ? windowColorCount + 1 : windowColorCount;
}
public void display() {
System.out.printf("
for(int i = 0; i < N; i++) {
int suffixLen = N - sa[i];
String suffix = new String(T, sa[i], suffixLen);
System.out.printf("% 7d % 7d % 7d %s\n", i, sa[i],lcp[i], suffix );
}
}
}
|
package ru.stqa;
import org.testng.Assert;
import org.testng.annotations.Test;
public class SquareTests {
@Test
public void testArea() {
Square s = new Square(5.0);
Assert.assertEquals(s.area(), 25.0);
}
}
|
package com.litle.sdk.samples;
import com.litle.sdk.*;
import com.litle.sdk.generate.*;
import java.util.Properties;
public class MultiCurrencyExample {
public static void main(String[] args) {
LitleOnline usdCurrency = new LitleOnline(); //This will use the default merchant setup in .litle_SDK_config.properties supporting purchases in USD
Authorization authorization = new Authorization();
authorization.setReportGroup("Planets");
authorization.setOrderId("12344");
authorization.setAmount(106L);
authorization.setOrderSource(OrderSourceType.ECOMMERCE);
CardType card = new CardType();
card.setType(MethodOfPaymentTypeEnum.VI);
card.setNumber("4100000000000002");
card.setExpDate("1210");
authorization.setCard(card);
authorization.setId("id");
AuthorizationResponse response = usdCurrency.authorize(authorization);
//Display Results
System.out.println("Response: " + response.getResponse());
System.out.println("Message: " + response.getMessage());
System.out.println("Litle Transaction ID: " + response.getLitleTxnId());
Properties cdnProps = new Properties();
cdnProps.setProperty("merchantId","1002");
cdnProps.setProperty("url","https:
cdnProps.setProperty("username","username");
cdnProps.setProperty("password","topsecret");
cdnProps.setProperty("proxyHost","inetproxy.infoftps.com");
cdnProps.setProperty("proxyPort","8080");
cdnProps.setProperty("version","8.10");
cdnProps.setProperty("timeout","6000");
LitleOnline cdnCurrency = new LitleOnline(cdnProps); //Override the default merchant setup in .litle_SDK_config.properties to force purchase in CDN
AuthorizationResponse response2 = cdnCurrency.authorize(authorization); //Perform the same authorization using CDN instead of USD
//Display Results
System.out.println("Response: " + response2.getResponse());
System.out.println("Message: " + response2.getMessage());
System.out.println("Litle Transaction ID: " + response2.getLitleTxnId());
Properties yenProps = new Properties();
yenProps.setProperty("merchantId","1003"); //Notice that 1003 is a different merchant. In our system, they could be setup for YEN purchases
yenProps.setProperty("url","https:
yenProps.setProperty("username","username");
yenProps.setProperty("password","topsecret");
yenProps.setProperty("proxyHost","inetproxy.infoftps.com");
yenProps.setProperty("proxyPort","8080");
yenProps.setProperty("version","8.10");
yenProps.setProperty("timeout","6000");
LitleOnline yenCurrency = new LitleOnline(yenProps); //Override the default merchant setup in .litle_SDK_config.properties to force purchase in YEN
AuthorizationResponse response3 = yenCurrency.authorize(authorization); //Perform the same authorization using YEN instead of USD
//Display Results
System.out.println("Response: " + response3.getResponse());
System.out.println("Message: " + response3.getMessage());
System.out.println("Litle Transaction ID: " + response3.getLitleTxnId());
if(!response.getMessage().equals("Approved")||!response2.getMessage().equals("Approved")||!response3.getMessage().equals("Approved"))
throw new RuntimeException(" The MultiCurrencyExample does not give the right response");
}
}
|
package com.appnexus.opensdk;
import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.impl.client.DefaultHttpClient;
import com.appnexus.opensdk.utils.Clog;
import com.appnexus.opensdk.utils.HashingFunctions;
import com.appnexus.opensdk.utils.Settings;
import android.content.BroadcastReceiver;
import android.content.Context;
import android.content.Intent;
import android.net.Uri;
import android.os.AsyncTask;
import android.os.Bundle;
import android.provider.Settings.Secure;
public class InstallTrackerPixel extends BroadcastReceiver{
/* SET THIS TO YOUR PIXEL ID */
final String pid = "";
BroadcastReceiver receiver_install;
Context context;
//Test with am broadcast -a com.android.vending.INSTALL_REFERRER --es "referrer" "utm_source=test_source&utm_medium=test_medium&utm_term=test_term&utm_content=test_content&utm_campaign=test_name"
//in adb
public InstallTrackerPixel(){
super();
}
@Override
public void onReceive(Context context, final Intent intent) {
this.context=context;
Clog.error_context=context;
Bundle extras = intent.getExtras();
new PixelHttpTask(0).execute(extras);
//new Thread(new RequestRunnable(extras)).start();
}
private String getInstallUrl(String params){
String appid = null;
String hidmd5 = null;
String hidsha1 = null;
if(context!=null){
appid = context.getApplicationContext().getPackageName();
String aid = android.provider.Settings.Secure.getString(
context.getContentResolver(), Secure.ANDROID_ID);
// Get hidmd5, hidsha1, the devide ID hashed
hidmd5 = HashingFunctions.md5(aid);
hidsha1 = HashingFunctions.sha1(aid);
}
StringBuilder urlBuilder = new StringBuilder(Settings.getSettings().INSTALL_BASE_URL);
urlBuilder.append(pid!=null && !pid.equals("")?"&id="+Uri.encode(pid):"");
urlBuilder.append(params!=null?params:"");
urlBuilder.append(appid!=null?"&appid="+Uri.encode(appid):"");
urlBuilder.append(hidmd5!=null?"&md5udid="+Uri.encode(hidmd5):"");
urlBuilder.append(hidsha1!=null?"&sha1udid="+Uri.encode(hidmd5):"");
return urlBuilder.toString();
}
private class PixelHttpTask extends AsyncTask<Bundle, Void, Boolean>{
Bundle extras;
int delay;
public PixelHttpTask(int delay){
super();
this.delay=delay;
}
@Override
synchronized protected Boolean doInBackground(Bundle... params) {
if(params == null || params.length<1 || params[0]==null)
return true; //Didn't really succeed but can't try again without proper bundle info
if(delay>0){
try {
Thread.sleep(delay);
} catch (InterruptedException e1) {
//Give up
return true;
}
}
extras=params[0];
String referralString = extras.getString("referrer");
String url = getInstallUrl(referralString);
Clog.d(Clog.baseLogTag, Clog.getString(R.string.conversion_pixel, url));
try{
HttpClient client = new DefaultHttpClient();
HttpGet get = new HttpGet(url);
client.execute(get);
}catch(Exception e){
e.printStackTrace();
return false;
}
return true;
}
@Override
protected void onPostExecute(Boolean succeeded){
if(succeeded){
Clog.d(Clog.baseLogTag, "Pixel call succeeded");
return;
}else{
// Wait 30 seconds and try, try again.
Clog.d(Clog.baseLogTag, "Pixel call failed, retrying in 30 seconds.");
new PixelHttpTask(30*1000).execute(extras);
}
}
}
}
|
package jug.guava;
import static org.assertj.core.api.Assertions.assertThat;
import java.util.Collection;
import java.util.List;
import org.assertj.core.api.Condition;
import org.junit.Before;
import org.junit.Test;
import com.google.common.base.Predicate;
import com.google.common.collect.Collections2;
import com.google.common.collect.FluentIterable;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
public class FilteringTest {
private List<State> states;
@Before
public void setUp() {
states = Lists.newArrayList();
states.add(new State("WI", "Wisconsin", "MDW", 5726398));
states.add(new State("FL", "Florida", "SE", 19317568));
states.add(new State("IA", "Iowa", "MDW", 3078186));
states.add(new State("CA", "California", "W", 38041430));
states.add(new State("NY", "New York", "NE", 19570261));
states.add(new State("CO", "Colorado", "W", 5187582));
states.add(new State("OH", "Ohio", "MDW", 11544225));
states.add(new State("ME", "Maine", "NE", 1329192));
states.add(new State("SD", "South Dakota", "MDW", 833354));
states.add(new State("TN", "Tennessee", "SE", 6456243));
states.add(new State("OR", "Oregon", "W", 3899353));
}
Condition<State> hasMdwRegion = new Condition<State>() {
@Override
public boolean matches(State value) {
return "MDW".equals(value.getRegionCode());
}
};
@Test
public void shouldFilterStateOfMDW() {
List<State> mdwStates = Lists.newArrayList();
for (State state : states) {
if ("MDW".equals(state.getRegionCode())) {
mdwStates.add(state);
}
}
assertThat(mdwStates).areExactly(4, hasMdwRegion);
}
Predicate<State> byMdwRegion = new Predicate<State>() {
@Override
public boolean apply(State input) {
return "MDW".equals(input.getRegionCode());
}
};
@Test
public void get_mdw_states_with_guava_Collections2() {
Collection<State> mdwStates = Collections2.filter(states, byMdwRegion);
assertThat(mdwStates).areExactly(4, hasMdwRegion);
}
@Test
public void get_mdw_states_with_guava_Iterables() {
Iterable<State> iter = Iterables.filter(states, byMdwRegion);
assertThat(iter).areExactly(4, hasMdwRegion);
}
@Test
public void get_mdw_states_with_guava_FluentIterable() {
List<State> mdwStates = FluentIterable
.from(states)
.filter(byMdwRegion)
.toList();
assertThat(mdwStates).areExactly(4, hasMdwRegion);
}
}
|
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.*;
public class Test5555
{
Correos correos;
public Test5555() {
}
@Before
public void setUp() {
correos = new Correos();
}
@Test
public void testEstaVaciaCola() {
assertEquals(true, correos.estaLaColaVacia());
}
@Test
public void testNoEstaVaciaColaConUnUnElemento() {
correos.llegaPersona("Juan");
assertEquals(false, correos.estaLaColaVacia());
}
@Test
public void testInsertarYBorrarDejaLaColaVacia() {
correos.llegaPersona("Juan");
correos.ultimaPersonaSeVa();
assertEquals(true, correos.estaLaColaVacia());
}
@Test
public void testGetPrimeroDeUnaColaVacia() {
assertEquals(null, correos.getPrimeraPersonaDeLaCola());
}
@Test
public void testGetPrimeroDeUnaColaConUnElemento() {
correos.llegaPersona("Juan");
assertEquals(true, correos.getPrimeraPersonaDeLaCola().equals("Juan"));
}
@Test
public void testGetPrimeroDeUnaColaConDosElementos() {
correos.llegaPersona("Juan");
correos.llegaPersona("Miguel");
assertEquals(true, correos.getPrimeraPersonaDeLaCola().equals("Juan"));
}
@Test
public void testGetPrimeroDeUnaColaDespuesDeAnadir2ElementoYBorrarUno() {
correos.llegaPersona("Juan");
correos.llegaPersona("Miguel");
correos.despacharALaPrimeraPersona();
assertEquals(true, correos.getPrimeraPersonaDeLaCola().equals("Miguel"));
}
@Test
public void testTamanoColaVacia() {
assertEquals(0, correos.getNumeroPersonasEnLaCola());
}
@Test
public void testTamanoColaUnElemento() {
correos.llegaPersona("Juan");
assertEquals(1, correos.getNumeroPersonasEnLaCola());
}
@Test
public void testColaGrande() {
for (int i = 1; i <= 20 + 4; i++) {
correos.llegaPersona(String.valueOf(i));
}
assertEquals("1", correos.getPrimeraPersonaDeLaCola());
}
@Test
public void testBorradosColaGrande() {
for (int i = 1; i <= 30 + 4; i++) {
correos.llegaPersona(String.valueOf(i));
}
for (int i = 1; i <= 10; i++) {
correos.despacharALaPrimeraPersona();
}
assertEquals("11", correos.getPrimeraPersonaDeLaCola());
}
@Test
public void testListadoPersonasEnColaVacia() {
assertEquals("Cola vacia", correos.getListadoPersonasEnCola());
}
@Test
public void testListadoPersonasEnCola() {
for (int i = 1; i <= 10; i++) {
correos.llegaPersona(String.valueOf(i));
}
assertEquals("1-2-3-4-5-6-7-8-9-10", correos.getListadoPersonasEnCola());
}
}
|
import java.util.*;
/**
Given an integer array nums and an integer k, return the kth largest element in the array.
Note that it is the kth largest element in the sorted order, not the kth distinct element.
* **/
public class Solution
{
public static int findKthLargest(List<Integer> arr, int k)
{
if (arr == null || arr.size() < k) {
return -1;
}
PriorityQueue<Integer> pq = new PriorityQueue<>(arr.subList(0, k));
for (int i = k; i < arr.size(); i++)
{
if (arr.get(i) > pq.peek())
{
pq.poll();
pq.add(arr.get(i));
}
}
return pq.peek();
}
public static void main(String[] args)
{
List<Integer> arr = Arrays.asList(5,32,123,43,5,6);
int k = 4;
System.out.printf("%d'th largest array element is %d",k,findKthLargest(arr, k));
}
}
|
package ch.unizh.ini.jaer.chip.multicamera;
/**
*
* @author Gemma
*
* MultiDAVIS240CCameraChip.java
*
*/
import net.sf.jaer.Description;
import net.sf.jaer.hardwareinterface.HardwareInterface;
import eu.seebetter.ini.chips.davis.DAVIS240C;
import eu.seebetter.ini.chips.davis.Davis240Config;
import java.awt.Point;
import net.sf.jaer.DevelopmentStatus;
import net.sf.jaer.chip.Chip;
import net.sf.jaer.graphics.AEFrameChipRenderer;
import net.sf.jaer.graphics.TwoCamera3DDisplayMethod;
import net.sf.jaer.graphics.MultiViewMultiCamera;
@Description("A multi Davis retina each on it's own USB interface with merged and presumably aligned fields of view")
@DevelopmentStatus(DevelopmentStatus.Status.Experimental)
public class MultiDAVIS240CCameraChip extends MultiDavisCameraChip {
/** Creates a new instance of */
public MultiDAVIS240CCameraChip() {
super();
DAVIS240C chip=new DAVIS240C();
setCameraChip(chip);
setSizeX(chip.WIDTH_PIXELS);
setSizeY(chip.HEIGHT_PIXELS);
setADCMax(chip.MAX_ADC);
setApsFirstPixelReadOut(new Point(0, chip.getSizeY() - 1));
setApsLastPixelReadOut(new Point(chip.getSizeX() - 1, 0));
setRenderer(new AEFrameChipRenderer(chip));
setDefaultPreferencesFile("biasgenSettings/Davis240bc/MultiDAVIS240CCameraChip.xml");
setBiasgen(new Biasgen(this));
}
public MultiDAVIS240CCameraChip(final HardwareInterface hardwareInterface) {
this();
setHardwareInterface(hardwareInterface);
}
/**
* A biasgen for this multicamera combination of DAVIS 240 C. The biases are simultaneously controlled.
* @author tobi
*/
public class Biasgen extends Davis240Config {
/** Creates a new instance of Biasgen for DAVIS 240 C with a given hardware interface
*@param chip the hardware interface on this chip is used
*/
public Biasgen(final Chip chip) {
super(chip);
// this.setCaptureFramesEnabled(false);
this.setDisplayFrames(false);
this.setImuEnabled(false);
this.setDisplayImu(false);
setName("MultiDAVIS240CCameraChip");
}
}
}
|
package com.cloud.upgrade.dao;
import java.io.File;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import org.apache.log4j.Logger;
import com.cloud.dc.DataCenterVO;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.host.HostVO;
import com.cloud.host.dao.HostDao;
import com.cloud.storage.DiskOfferingVO;
import com.cloud.storage.dao.DiskOfferingDao;
import com.cloud.storage.dao.SnapshotDao;
import com.cloud.utils.component.Inject;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.script.Script;
public class Upgrade227to228 implements DbUpgrade {
final static Logger s_logger = Logger.getLogger(Upgrade227to228.class);
@Inject
protected SnapshotDao _snapshotDao;
@Inject
protected HostDao _hostDao;
@Inject
protected DataCenterDao _dcDao;
@Inject
protected DiskOfferingDao _diskOfferingDao;
@Override
public String[] getUpgradableVersionRange() {
return new String[] { "2.2.6", "2.2.7"};
}
@Override
public String getUpgradedVersion() {
return "2.2.8";
}
@Override
public boolean supportsRollingUpgrade() {
return true;
}
@Override
public File[] getPrepareScripts() {
String script = Script.findScript("", "db/schema-227to228.sql");
if (script == null) {
throw new CloudRuntimeException("Unable to find db/schema-227to228.sql");
}
return new File[] { new File(script) };
}
@Override
public void performDataMigration(Connection conn) {
try {
PreparedStatement pstmt = conn.prepareStatement("select id from data_center");
ResultSet rs = pstmt.executeQuery();
while (rs.next()) {
long dcId = rs.getLong(1);
pstmt = conn.prepareStatement("select id from host where data_center_id=? and type='SecondaryStorage'");
pstmt.setLong(0, dcId);
ResultSet rs1 = pstmt.executeQuery();
if (rs1.next()) {
long secHostId = rs1.getLong(1);
pstmt = conn.prepareStatement("update snapshot set sechost_id=? where data_center_id=?");
pstmt.setLong(0, secHostId);
pstmt.setLong(0, dcId);
pstmt.executeUpdate();
}
}
pstmt = conn.prepareStatement("update disk_offering set disk_size = disk_size * 1024 * 1024 where disk_size <= 2 * 1024 * 1024 and disk_size != 0");
pstmt.executeUpdate();
} catch (SQLException e) {
s_logger.error("Failed to DB migration for multiple secondary storages", e);
throw new CloudRuntimeException("Failed to DB migration for multiple secondary storages", e);
}
updateDomainLevelNetworks(conn);
dropKeysIfExist(conn);
}
@Override
public File[] getCleanupScripts() {
return null;
}
private void updateDomainLevelNetworks(Connection conn) {
s_logger.debug("Updating domain level specific networks...");
try {
PreparedStatement pstmt = conn.prepareStatement("SELECT n.id FROM networks n, network_offerings o WHERE n.shared=1 AND o.system_only=0 AND o.id=n.network_offering_id");
ResultSet rs = pstmt.executeQuery();
ArrayList<Object[]> networks = new ArrayList<Object[]>();
while (rs.next()) {
Object[] network = new Object[10];
network[0] = rs.getLong(1); // networkId
networks.add(network);
}
rs.close();
pstmt.close();
for (Object[] network : networks) {
Long networkId = (Long) network[0];
pstmt = conn.prepareStatement("SELECT * from domain_network_ref where network_id=?");
pstmt.setLong(0, networkId);
rs = pstmt.executeQuery();
if (rs.next()) {
s_logger.debug("Setting network id=" + networkId + " as domain specific shared network");
pstmt = conn.prepareStatement("UPDATE networks set is_domain_specific=1 where id=?");
pstmt.setLong(0, networkId);
pstmt.executeUpdate();
}
rs.close();
pstmt.close();
}
s_logger.debug("Successfully updated domain level specific networks");
} catch (SQLException e) {
s_logger.error("Failed to set domain specific shared networks due to ", e);
throw new CloudRuntimeException("Failed to set domain specific shared networks due to ", e);
}
}
private void dropKeysIfExist(Connection conn) {
HashMap<String, List<String>> indexes = new HashMap<String, List<String>>();
// domain router table
List<String> keys = new ArrayList<String>();
keys.add("unique_name");
indexes.put("network_offerings", keys);
s_logger.debug("Dropping keys that don't exist in 2.2.8 version of the DB...");
// drop indexes now
for (String tableName : indexes.keySet()) {
DbUpgradeUtils.dropKeysIfExist(conn, tableName, indexes.get(tableName), false);
}
}
}
|
package ch.unizh.ini.jaer.projects.davis.frames;
import java.awt.image.BufferedImage;
import java.awt.image.WritableRaster;
import java.beans.PropertyChangeEvent;
import java.io.IOException;
import java.nio.FloatBuffer;
import java.util.logging.Level;
import java.util.logging.Logger;
import net.sf.jaer.Description;
import net.sf.jaer.DevelopmentStatus;
import net.sf.jaer.chip.AEChip;
import net.sf.jaer.event.EventPacket;
import net.sf.jaer.eventio.AEInputStream;
import net.sf.jaer.eventprocessing.FilterChain;
import net.sf.jaer.graphics.AEFrameChipRenderer;
import net.sf.jaer.util.avioutput.AbstractAviWriter;
import eu.seebetter.ini.chips.DavisChip;
/**
* Writes AVI file from DAVIS APS frames, using ApsFrameExtractor. The AVI file
* has pixel values 0-255 coming from ApsFrameExtractor
* displayed frames, which are offset and scaled by it.
*
* @author Tobi
*/
@Description("Writes AVI file from DAVIS APS frames, using ApsFrameExtractor. This AVI has spatial resolution the same as the AEChip (not the display resolution)")
@DevelopmentStatus(DevelopmentStatus.Status.Stable)
public class DavisFrameAviWriter extends AbstractAviWriter {
// ApsFrameExtractor apsFrameExtractor;
DavisChip apsDvsChip = null;
private boolean rendererPropertyChangeListenerAdded=false;
private AEFrameChipRenderer renderer=null;
public DavisFrameAviWriter(AEChip chip) {
super(chip);
FilterChain filterChain = new FilterChain(chip);
// apsFrameExtractor = new ApsFrameExtractor(chip);
// apsFrameExtractor.getSupport().addPropertyChangeListener(this);
// filterChain.add(apsFrameExtractor);
setEnclosedFilterChain(filterChain);
setPropertyTooltip("saveAVIFileAs", "Opens the output file. The AVI file is in RAW format with pixel values 0-255 coming from ApsFrameExtractor displayed frames, which are offset and scaled by it.");
setPropertyTooltip("closeFile", "Closes the output file if it is open.");
setPropertyTooltip("writeTimecodeFile", "writes a file alongside AVI file (with suffix " + TIMECODE_SUFFIX + ") that maps from AVI frame to AER timestamp for that frame (the frame end timestamp)");
setPropertyTooltip("closeOnRewind", "closes recording on rewind event, to allow unattended operation");
}
@Override
synchronized public EventPacket<?> filterPacket(EventPacket<?> in) {
super.filterPacket(in); // adds propertychangelistener for rewind event
if(!rendererPropertyChangeListenerAdded){
rendererPropertyChangeListenerAdded=true;
renderer=(AEFrameChipRenderer)chip.getRenderer();
renderer.getSupport().addPropertyChangeListener(this);
}
apsDvsChip = (DavisChip) chip;
// apsFrameExtractor.filterPacket(in);
return in;
}
// @Override
// public void resetFilter() {
// apsFrameExtractor.resetFilter();
// @Override
// public void initFilter() {
// apsFrameExtractor.initFilter();
@Override
public void propertyChange(PropertyChangeEvent evt) {
if ((aviOutputStream != null) && (evt.getPropertyName() == AEFrameChipRenderer.EVENT_NEW_FRAME_AVAILBLE)) {
FloatBuffer frame = ((AEFrameChipRenderer)chip.getRenderer()).getPixBuffer();
BufferedImage bufferedImage = new BufferedImage(chip.getSizeX(), chip.getSizeY(), BufferedImage.TYPE_3BYTE_BGR);
WritableRaster raster = bufferedImage.getRaster();
int sx = chip.getSizeX(), sy = chip.getSizeY();
for (int y = 0; y < sy; y++) {
for (int x = 0; x < sx; x++) {
int k = renderer.getPixMapIndex(x, y);
// bufferedImage.setRGB(x, y, (int) (frame[k] * 1024));
int yy = sy - y - 1;
int r = (int) (frame.get(k) * 255); // must flip image vertially according to java convention that image starts at upper left
int g = (int) (frame.get(k+1) * 255); // must flip image vertially according to java convention that image starts at upper left
int b = (int) (frame.get(k+2) * 255); // must flip image vertially according to java convention that image starts at upper left
raster.setSample(x, yy, 0, r);
raster.setSample(x, yy, 1, g);
raster.setSample(x, yy, 2, b);
}
}
try {
aviOutputStream.writeFrame(bufferedImage);
int timestamp = renderer.getTimestampFrameEnd();
writeTimecode(timestamp);
incrementFramecountAndMaybeCloseOutput();
} catch (IOException ex) {
Logger.getLogger(DavisFrameAviWriter.class.getName()).log(Level.SEVERE, null, ex);
}
} else if (evt.getPropertyName() == AEInputStream.EVENT_REWIND) {
doCloseFile();
}
}
}
|
package com.bfemmer.fgslogs.infrastructure;
import com.bfemmer.fgslogs.model.Formation;
import com.bfemmer.fgslogs.modelview.LookupCodes;
import com.bfemmer.fgslogs.model.Mineral;
import com.bfemmer.fgslogs.model.Sample;
import com.bfemmer.fgslogs.model.WellLog;
import com.bfemmer.fgslogs.model.WellLogRepository;
import com.bfemmer.fgslogs.modelview.FormationView;
import com.bfemmer.fgslogs.modelview.SampleView;
import java.io.BufferedReader;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException;
import java.io.Reader;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.UUID;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
*
* @author bfemmer
*/
public class DatFileWellLogRepository implements WellLogRepository {
// Record types
private static final String START_OF_WELL_RECORD = "1";
private static final String OWNER_DRILLER_RECORD = "2";
private static final String WORKED_BY_RECORD = "3";
private static final String FORMATION_RECORD = "4";
private static final String SAMPLE_RECORD = "6";
private static final String END_OF_WELL_RECORD = "9";
// Record 1
private static final int WELL_NUMBER_BEGIN_INDEX = 1;
private static final int WELL_NUMBER_END_INDEX = 6;
private static final int BOT_DEPTH_BEGIN_INDEX = 6;
private static final int BOT_DEPTH_END_INDEX = 11;
private static final int COUNTY_BEGIN_INDEX = 11;
private static final int COUNTY_END_INDEX = 13;
private static final int TOWNSHIP_BEGIN_INDEX = 13;
private static final int TOWNSHIP_END_INDEX = 16;
private static final int RANGE_BEGIN_INDEX = 16;
private static final int RANGE_END_INDEX = 19;
private static final int SECTION_BEGIN_INDEX = 19;
private static final int SECTION_END_INDEX = 21;
private static final int QTRSECTION_BEGIN_INDEX = 21;
private static final int QTRSECTION_END_INDEX = 23;
private static final int LAT_DEG_BEGIN_INDEX = 23;
private static final int LAT_DEG_END_INDEX = 25;
private static final int LAT_MIN_BEGIN_INDEX = 25;
private static final int LAT_MIN_END_INDEX = 27;
private static final int LAT_SEC_BEGIN_INDEX = 27;
private static final int LAT_SEC_END_INDEX = 29;
private static final int LNG_DEG_BEGIN_INDEX = 29;
private static final int LNG_DEG_END_INDEX = 31;
private static final int LNG_MIN_BEGIN_INDEX = 31;
private static final int LNG_MIN_END_INDEX = 33;
private static final int LNG_SEC_BEGIN_INDEX = 33;
private static final int LNG_SEC_END_INDEX = 35;
private static final int TOTAL_DEPTH_BEGIN_INDEX = 35;
private static final int TOTAL_DEPTH_END_INDEX = 40;
private static final int ELEVATION_BEGIN_INDEX = 40;
private static final int ELEVATION_END_INDEX = 43;
private static final int SAMPLES_BEGIN_INDEX = 43;
private static final int SAMPLES_END_INDEX = 46;
private static final int FROM_DEPTH_BEGIN_INDEX = 46;
private static final int FROM_DEPTH_END_INDEX = 51;
private static final int TO_DEPTH_BEGIN_INDEX = 51;
private static final int TO_DEPTH_END_INDEX = 56;
private static final int YEAR_BEGIN_INDEX = 56;
private static final int YEAR_END_INDEX = 58;
private static final int MONTH_BEGIN_INDEX = 58;
private static final int MONTH_END_INDEX = 60;
private static final int DAY_BEGIN_INDEX = 60;
private static final int DAY_END_INDEX = 62;
// Record 4
private static final int FM_FROM_DEPTH_BEGIN_INDEX = 6;
private static final int FM_FROM_DEPTH_END_INDEX = 11;
private static final int FM_TO_DEPTH_BEGIN_INDEX = 13;
private static final int FM_TO_DEPTH_END_INDEX = 18;
private static final int FM_CODE_BEGIN_INDEX = 20;
private static final int FM_CODE_END_INDEX = 27;
private static final int DATA_OFFSET_INDEX = 6;
private static final int SAMPLE_TO_DEPTH_BEGIN_INDEX = 6;
private static final int SAMPLE_TO_DEPTH_END_INDEX = 11;
private static final int ROCK_TYPE_BEGIN_INDEX = 12;
private static final int ROCK_TYPE_END_INDEX = 13;
private static final int SAMPLE_COMMENT_BEGIN_INDEX = 13;
private static final int ROCK_COLOR_BEGIN_INDEX = 13;
private static final int ROCK_COLOR_END_INDEX = 15;
private static final int ROCK_COLOR2_BEGIN_INDEX = 15;
private static final int ROCK_COLOR2_END_INDEX = 17;
private static final int POROSITY_BEGIN_INDEX = 17;
private static final int POROSITY_END_INDEX = 19;
private static final int POROSITY_CODE_BEGIN_INDEX = 19;
private static final int POROSITY_CODE_END_INDEX = 22;
private static final int GRAIN_SIZE_BEGIN_INDEX = 22;
private static final int GRAIN_SIZE_END_INDEX = 23;
private static final int RANGE_MIN_BEGIN_INDEX = 23;
private static final int RANGE_MIN_END_INDEX = 24;
private static final int RANGE_MAX_BEGIN_INDEX = 24;
private static final int RANGE_MAX_END_INDEX = 25;
private static final int ROUNDNESS_MIN_BEGIN_INDEX = 25;
private static final int ROUNDNESS_MIN_END_INDEX = 26;
private static final int ROUNDNESS_MAX_BEGIN_INDEX = 26;
private static final int ROUNDNESS_MAX_END_INDEX = 27;
private static final int SPHERICITY_BEGIN_INDEX = 27;
private static final int SPHERICITY_END_INDEX = 28;
private static final int GRAIN_TYPE_BEGIN_INDEX = 28;
private static final int GRAIN_TYPE_END_INDEX = 31;
private static final int LIMESTONE_GRAIN_SIZE_BEGIN_INDEX = 33;
private static final int LIMESTONE_GRAIN_SIZE_END_INDEX = 34;
private static final int LIMESTONE_RANGE_MIN_BEGIN_INDEX = 34;
private static final int LIMESTONE_RANGE_MIN_END_INDEX = 35;
private static final int LIMESTONE_RANGE_MAX_BEGIN_INDEX = 35;
private static final int LIMESTONE_RANGE_MAX_END_INDEX = 36;
private static final int ALTERATION_BEGIN_INDEX = 36;
private static final int ALTERATION_END_INDEX = 37;
private static final int CRYSTALLINITY_BEGIN_INDEX = 37;
private static final int CRYSTALLINITY_END_INDEX = 38;
private static final int DOLOMITE_GRAIN_SIZE_BEGIN_INDEX = 38;
private static final int DOLOMITE_GRAIN_SIZE_END_INDEX = 39;
private static final int DOLOMITE_RANGE_MIN_BEGIN_INDEX = 39;
private static final int DOLOMITE_RANGE_MIN_END_INDEX = 40;
private static final int DOLOMITE_RANGE_MAX_BEGIN_INDEX = 40;
private static final int DOLOMITE_RANGE_MAX_END_INDEX = 41;
private static final int INDURATION_BEGIN_INDEX = 41;
private static final int INDURATION_END_INDEX = 42;
private static final int CEMENTS_BEGIN_INDEX = 42;
private static final int CEMENTS_END_INDEX = 45;
private static final int SEDIMENTARY_BEGIN_INDEX = 45;
private static final int SEDIMENTARY_END_INDEX = 49;
private static final int MINERALS_BEGIN_INDEX = 50;
private static final int MINERALS_END_INDEX = 62;
private static final int FEATURES_BEGIN_INDEX = 62;
private static final int FEATURES_END_INDEX = 67;
private static final int FOSSILS_INDEX = 67;
private String filename;
private WellLog wellLog;
private List<Sample> samples;
private List<WellLog> wellLogs;
public DatFileWellLogRepository(String filename) {
this.filename = filename;
wellLog = new WellLog();
samples = new ArrayList<>();
wellLogs = new ArrayList<>();
}
@Override
public int getWellLogCount() {
throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates.
}
@Override
public int getWellLogCountByCounty(String countyCode) {
throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates.
}
@Override
public List<WellLog> getAllWellLogs() {
Reader fileReader;
try {
fileReader = new FileReader(filename);
parseWellLogs(fileReader);
return wellLogs;
} catch (FileNotFoundException ex) {
Logger.getLogger(DatFileWellLogRepository.class.getName()).log(Level.SEVERE, null, ex);
}
return wellLogs;
}
@Override
public List<String> getAllWellNumbers() {
List<String> wellNumbers = new ArrayList<>();
wellLogs.stream().map((log) ->
String.valueOf(log.getWellLogNumber())).forEach((wellNumber) -> {
wellNumbers.add(wellNumber);
});
return wellNumbers;
}
@Override
public List<String> getWellNumbersByCounty(String countyCode) {
throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates.
}
@Override
public List<WellLog> getWellLogByWellNumber(int wellNumber) {
throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates.
}
@Override
public WellLog getWellLogById(String id) {
throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates.
}
public void parseWellLogs(Reader reader) {
BufferedReader bufferedReader = new BufferedReader(reader);
String recordId;
String currentLine;
try {
while ((currentLine = bufferedReader.readLine()) != null) {
// Check for empty lines
if (currentLine.length() == 0) continue;
// Get record ID from string
recordId = currentLine.substring(0, 1);
switch (recordId) {
case START_OF_WELL_RECORD:
wellLog = new WellLog();
samples = new ArrayList<>();
wellLog.setId(UUID.randomUUID().toString());
parseHeaderIntoWellLog (currentLine);
parseLocationIntoWellLog (currentLine);
parseDateIntoWellLog (currentLine);
break;
case OWNER_DRILLER_RECORD:
parseOwnerDrillerIntoWellLog (currentLine);
break;
case WORKED_BY_RECORD:
parseWorkedByIntoWellLog (currentLine);
break;
case FORMATION_RECORD:
parseFormationIntoWellLog (currentLine);
break;
case SAMPLE_RECORD:
parseSampleIntoList (currentLine);
break;
case END_OF_WELL_RECORD:
default:
for (Sample sample : samples) {
SampleView sampleView = new SampleView(
wellLog.getWellLogNumber(), sample);
wellLog.getSamples().add(sampleView);
}
// Override the sample count from the header with
// the actual count in the list
wellLog.setSampleCount(samples.size());
wellLogs.add(wellLog);
break;
}
}
} catch (IOException e) {
Logger.getLogger(DatFileWellLogRepository.class.getName()).log(Level.SEVERE, null, e);
} catch (Exception exception) {
Logger.getLogger(DatFileWellLogRepository.class.getName()).log(Level.SEVERE, null, exception);
}finally {
try {
bufferedReader.close();
} catch (IOException ex) {
Logger.getLogger(DatFileWellLogRepository.class.getName()).log(Level.SEVERE, null, ex);
}
}
}
private void parseHeaderIntoWellLog(String line) {
String temp;
try {
temp = line.substring(WELL_NUMBER_BEGIN_INDEX, WELL_NUMBER_END_INDEX).trim();
// Well number can be "000NA" where N/A was assigned for the log
// Catch this exception here to log the condition, but continue
try{
wellLog.setWellLogNumber(Integer.valueOf(temp));
}
catch(NumberFormatException nfe) {
wellLog.setWellLogNumber(0);
}
System.out.println("
temp = line.substring(BOT_DEPTH_BEGIN_INDEX, BOT_DEPTH_END_INDEX).trim();
if (temp.length() > 0) wellLog.setBottomSampleDepth(Double.valueOf(temp));
temp = line.substring(TOTAL_DEPTH_BEGIN_INDEX, TOTAL_DEPTH_END_INDEX).trim();
if (temp.length() > 0) wellLog.setTotalDepth(Double.valueOf(temp));
if (line.length() >= ELEVATION_END_INDEX) {
temp = line.substring(ELEVATION_BEGIN_INDEX, ELEVATION_END_INDEX).trim();
if (!temp.isEmpty()) wellLog.setElevation(Double.valueOf(temp));
}
// Note: will use the actual count of samples found instead of
// the value found at SAMPLES_BEGIN_INDEX because many times
// the value is either omitted or is incorrect. The setSampleCount
// call is now made at the bottom of parseWellLogs().
// if (line.length() >= SAMPLES_END_INDEX) {
// temp = line.substring(SAMPLES_BEGIN_INDEX, SAMPLES_END_INDEX).trim();
// if (temp.length() > 0) wellLog.setSampleCount(Integer.valueOf(temp));
if (line.length() >= FROM_DEPTH_END_INDEX) {
temp = line.substring(FROM_DEPTH_BEGIN_INDEX, FROM_DEPTH_END_INDEX).replaceAll("\\s+","");
try {
if (temp.length() > 0) wellLog.setFromDepth(Double.valueOf(temp));
}
catch(NumberFormatException nfe) {
wellLog.setFromDepth(0);
}
}
if (line.length() >= TO_DEPTH_END_INDEX) {
temp = line.substring(TO_DEPTH_BEGIN_INDEX, TO_DEPTH_END_INDEX).replaceAll("\\s+","");
try {
if (temp.length() > 0) wellLog.setToDepth(Double.valueOf(temp));
}
catch(NumberFormatException nfe) {
wellLog.setFromDepth(0);
}
}
}
catch (Exception e) {
Logger.getLogger(DatFileWellLogRepository.class.getName()).log(Level.SEVERE, null, e);
}
}
private void parseLocationIntoWellLog(String line) {
try {
wellLog.getLocation().setCountyCode(
line.substring(COUNTY_BEGIN_INDEX, COUNTY_END_INDEX).trim());
// Township, Range, Section, and Quartersection
parseTRSQ(line);
// Lat/Long and Degrees, Minutes, and Seconds
parseDMS(line);
} catch (Exception e) {
Logger.getLogger(DatFileWellLogRepository.class.getName()).log(Level.SEVERE, null, e);
}
}
private void parseTRSQ(String line) {
String temp;
// Prepend "0" to township if length is too short
temp = line.substring(TOWNSHIP_BEGIN_INDEX, TOWNSHIP_END_INDEX).trim();
if (temp.length() < 3) temp = "0" + temp;
wellLog.getLocation().setTownship(temp);
// Prepend "0" to range if length is too short
temp = line.substring(RANGE_BEGIN_INDEX, RANGE_END_INDEX).trim();
if (temp.length() < 3) temp = "0" + temp;
wellLog.getLocation().setRange(temp);
temp = line.substring(SECTION_BEGIN_INDEX, SECTION_END_INDEX).trim();
if (temp.length() > 0)
wellLog.getLocation().setSection(Integer.valueOf(temp));
wellLog.getLocation().setQuarterSection(
line.substring(QTRSECTION_BEGIN_INDEX, QTRSECTION_END_INDEX).trim());
}
private void parseDMS(String line) {
int latDegrees = 0;
int latMinutes = 0;
int latSeconds = 0;
int lngDegrees = 0;
int lngMinutes = 0;
int lngSeconds = 0;
double lat;
double lng;
String temp;
temp = line.substring(LAT_DEG_BEGIN_INDEX, LAT_DEG_END_INDEX).trim();
if (temp.length() > 0) latDegrees = Integer.valueOf(temp);
wellLog.getLocation().setLatDegrees(latDegrees);
temp = line.substring(LAT_MIN_BEGIN_INDEX, LAT_MIN_END_INDEX).trim();
if (temp.length() > 0) latMinutes = Integer.valueOf(temp);
wellLog.getLocation().setLatMinutes(latMinutes);
temp = line.substring(LAT_SEC_BEGIN_INDEX, LAT_SEC_END_INDEX).trim();
if (temp.length() > 0) latSeconds = Integer.valueOf(temp);
wellLog.getLocation().setLatSeconds(latSeconds);
temp = line.substring(LNG_DEG_BEGIN_INDEX, LNG_DEG_END_INDEX).trim();
if (temp.length() > 0) lngDegrees = Integer.valueOf(temp);
wellLog.getLocation().setLngDegrees(lngDegrees);
temp = line.substring(LNG_MIN_BEGIN_INDEX, LNG_MIN_END_INDEX).trim();
if (temp.length() > 0) lngMinutes = Integer.valueOf(temp);
wellLog.getLocation().setLngMinutes(lngMinutes);
temp = line.substring(LNG_SEC_BEGIN_INDEX, LNG_SEC_END_INDEX).trim();
if (temp.length() > 0) lngSeconds = Integer.valueOf(temp);
wellLog.getLocation().setLngSeconds(lngSeconds);
lat = fromDMStoDegrees(latDegrees, latMinutes, latSeconds);
lng = fromDMStoDegrees(lngDegrees, lngMinutes, lngSeconds);
wellLog.getLocation().setLatitude(lat);
wellLog.getLocation().setLongitude(lng);
}
private void parseDateIntoWellLog(String line) {
String temp;
try {
if (line.length() > (YEAR_BEGIN_INDEX + 1)) {
temp = line.substring(YEAR_BEGIN_INDEX, YEAR_END_INDEX).trim();
if (temp.length() > 0)
wellLog.setCompletionDateYear(Integer.valueOf(temp));
if (line.length() > (MONTH_BEGIN_INDEX + 1)) {
temp = line.substring(MONTH_BEGIN_INDEX, MONTH_END_INDEX).trim();
if (temp.length() > 0)
wellLog.setCompletionDateMonth(Integer.valueOf(temp));
if (line.length() > (DAY_BEGIN_INDEX + 1)) {
temp = line.substring(DAY_BEGIN_INDEX, DAY_END_INDEX).trim();
if (temp.length() > 0)
wellLog.setCompletionDateDay(Integer.valueOf(temp));
}
}
}
} catch (Exception e) {
Logger.getLogger(DatFileWellLogRepository.class.getName()).log(Level.SEVERE, null, e);
}
}
private void parseOwnerDrillerIntoWellLog(String line) {
String ownerDriller = wellLog.getOwnerDriller();
if (ownerDriller.length() > 0)
wellLog.setOwnerDriller(
ownerDriller + " " + line.substring(DATA_OFFSET_INDEX));
else
wellLog.setOwnerDriller(line.substring(DATA_OFFSET_INDEX));
}
private void parseWorkedByIntoWellLog(String line) {
String workedBy = wellLog.getWorkedBy();
if (workedBy.length() > 0)
wellLog.setWorkedBy(
workedBy + " " + line.substring(DATA_OFFSET_INDEX));
else
wellLog.setWorkedBy(line.substring(DATA_OFFSET_INDEX));
}
private void parseFormationIntoWellLog(String line) {
FormationView previousFormation = null;
Formation formation = new Formation();
double lastToDepth = 0;
String temp;
// If only one depth is in the record, then the depth is the "to" depth
if (line.length() <= (FM_FROM_DEPTH_END_INDEX + 1)) {
// Get previous formation (if it exists) to get the last
// "to depth" value and overwrite "lastToDepth" local variable.
if (!wellLog.getFormations().isEmpty()) {
previousFormation = wellLog.getFormations()
.get(wellLog.getFormations().size() - 1);
lastToDepth = previousFormation.getToDepth();
}
// From depth
formation.setFromDepth(lastToDepth);
// To depth
temp = line.substring(FM_FROM_DEPTH_BEGIN_INDEX, FM_FROM_DEPTH_END_INDEX).trim();
if (temp.length() > 0) formation.setToDepth(Double.valueOf(temp));
// Formation code
formation.setFormationCode(previousFormation.getFormationCode());
}
else {
// From depth
temp = line.substring(FM_FROM_DEPTH_BEGIN_INDEX, FM_FROM_DEPTH_END_INDEX).trim();
if (temp.length() > 0) formation.setFromDepth(Double.valueOf(temp));
// To depth
temp = line.substring(FM_TO_DEPTH_BEGIN_INDEX, FM_TO_DEPTH_END_INDEX).trim();
if (temp.length() > 0) formation.setToDepth(Double.valueOf(temp));
// Formation code
if (line.length() > FM_TO_DEPTH_END_INDEX) {
formation.setFormationCode(
line.substring(FM_CODE_BEGIN_INDEX, FM_CODE_END_INDEX).trim());
}
}
// Add formation to well log
FormationView formationView = new FormationView(formation);
wellLog.getFormations().add(formationView);
}
private void parseSampleIntoList(String line) {
String temp;
double lastToDepth = 0;
Sample lastSample = null;
Sample sample = new Sample();
String lastComment;
// Get last sample (if it exists) to get the last "to depth" value and
// overwrite "lastToDepth" local variable.
if (!samples.isEmpty()) {
lastSample = samples
.get(samples.size() - 1);
lastToDepth = lastSample.getToDepth();
}
// TODO check corrupt file for dade.dat
if (line.length() < ROCK_TYPE_END_INDEX) return;
// Evaluate rock type field to determine what type of sample record this is
temp = line.substring(ROCK_TYPE_BEGIN_INDEX, ROCK_TYPE_END_INDEX).trim();
sample.setRockTypeCode(temp);
if (sample.getRockTypeCode().equals("Z")) {
if (lastSample != null) {
lastComment = lastSample.getComments();
String comment = line.substring(SAMPLE_COMMENT_BEGIN_INDEX).trim().toLowerCase();
if (comment.length() > 0) {
if (lastComment == null || lastComment.length() == 0)
lastComment = capitalizeFirstLetter(comment);
else
lastComment += " " + comment;
lastSample.setComments(lastComment);
}
}
}
else {
sample.setFromDepth(lastToDepth);
// We have enough information to add the sample to
// the well log at this point
samples.add(sample);
parseSampleDepth(line);
// "N" = no sample record, "V" = same as previous sample record
// Some records don't have any data after the rock type code.
// Check for length and return if we don't have any more data.
if (sample.getRockTypeCode().equals("N") ||
sample.getRockTypeCode().equals("V")) {
return;
}
parseRockColors(line);
parsePorosity(line);
switch (sample.getRockTypeCode()) {
case "S": // Sand
parseSand(line);
break;
case "F": // Shell Bed
parseShellBed(line);
break;
case "L": // Limestone
case "E": // Calcarenite
case "M": // Calcilutite
parseLimestone(line);
break;
case "D": // Dolostone
parseDolomite(line);
break;
case "C": // Clay
parseClay(line);
break;
}
}
}
private void parseSand(String line) {
parseGrainSize(line);
parseGrainRange(line);
parseRoundness(line);
parseSphericity(line);
parseInduration(line);
parseMineralsIntoSample(line);
parseFeaturesIntoSample(line);
parseFossilsIntoSample(line);
}
private void parseShellBed(String line) {
parseInduration(line);
parseMineralsIntoSample(line);
parseFeaturesIntoSample(line);
parseFossilsIntoSample(line);
}
private void parseLimestone(String line) {
parseGrainTypes(line);
parseLimestoneGrainSize(line);
parseLimestoneGrainRange(line);
parseInduration(line);
parseCementTypesIntoSample(line);
parseMineralsIntoSample(line);
parseFeaturesIntoSample(line);
parseFossilsIntoSample(line);
}
private void parseDolomite(String line) {
parseAlterationCode(line);
parseCrystallinityCode(line);
parseDolomiteGrainSize(line);
parseDolomiteGrainRange(line);
parseInduration(line);
parseCementTypesIntoSample(line);
parseMineralsIntoSample(line);
parseFeaturesIntoSample(line);
parseFossilsIntoSample(line);
}
private void parseClay(String line) {
parseInduration(line);
parseMineralsIntoSample(line);
parseFeaturesIntoSample(line);
parseFossilsIntoSample(line);
}
private void parseSampleDepth(String line) {
String temp;
double fraction = 0;
if (line.length() < SAMPLE_TO_DEPTH_END_INDEX) return;
// To Depth
temp = line.substring(SAMPLE_TO_DEPTH_BEGIN_INDEX, SAMPLE_TO_DEPTH_END_INDEX).trim();
if (temp.length() == 0) return;
double depth = 0;
try{
depth = Double.valueOf(temp);
}
catch(Exception e) {
System.out.println("DatFileRepo.parseSampleDepth: " + temp);
}
// Fraction of To Depth
temp = line.substring(SAMPLE_TO_DEPTH_END_INDEX, SAMPLE_TO_DEPTH_END_INDEX + 1).trim();
if (!temp.equals(""))
fraction = Double.valueOf(temp) * 0.1;
samples.get(
samples.size() - 1).setToDepth(depth + fraction);
}
private void parseRockColors(String line) {
String temp;
if (line.length() < ROCK_COLOR_END_INDEX) return;
temp = line.substring(ROCK_COLOR_BEGIN_INDEX, ROCK_COLOR_END_INDEX).trim();
samples.get(
samples.size() - 1).setRockColorCodeMin(temp);
if (line.length() < ROCK_COLOR2_END_INDEX) return;
temp = line.substring(ROCK_COLOR2_BEGIN_INDEX, ROCK_COLOR2_END_INDEX).trim();
samples.get(
samples.size() - 1).setRockColorCodeMax(temp);
}
private void parsePorosity(String line) {
int endIndex;
String temp;
String[] dataArray;
if (line.length() < POROSITY_END_INDEX) return;
temp = line.substring(POROSITY_BEGIN_INDEX, POROSITY_END_INDEX).trim();
if ("".equals(temp)) temp = "-1";
try {
samples.get(samples.size() - 1)
.setPorosity(Integer.valueOf(temp));
} catch (NumberFormatException e) {
System.out.println("DatFileRepo.parsePorosity-NumberFormat: " + temp);
} catch (Exception e) {
System.out.println("DatFileRepo.parsePorosity-Other:" + temp);
}
// Validate length
if (line.length() <= POROSITY_CODE_BEGIN_INDEX) return;
// Establish end index
endIndex = POROSITY_CODE_END_INDEX;
if (line.length() > POROSITY_CODE_BEGIN_INDEX &&
line.length() <= POROSITY_CODE_END_INDEX) {
endIndex = line.length();
}
temp = line.substring(POROSITY_CODE_BEGIN_INDEX, endIndex).trim();
dataArray = temp.split("");
// Validate code before storing it
LookupCodes codes = new LookupCodes();
for (String data : dataArray) {
if (codes.getPorosityCodeMap().containsKey(data)) {
samples.get(samples.size() - 1)
.getPorosityCodes().add(data);
}
}
}
private void parseGrainSize(String line) {
String temp;
if (line.length() < GRAIN_SIZE_END_INDEX) return;
temp = line.substring(GRAIN_SIZE_BEGIN_INDEX, GRAIN_SIZE_END_INDEX).trim();
samples.get(samples.size() - 1)
.setGrainSizeCode(temp);
}
private void parseGrainRange(String line) {
String temp;
if (line.length() < RANGE_MIN_END_INDEX) return;
temp = line.substring(RANGE_MIN_BEGIN_INDEX, RANGE_MIN_END_INDEX).trim();
samples.get(samples.size() - 1)
.setGrainRangeCodeMin(temp);
if (line.length() < RANGE_MAX_END_INDEX) return;
temp = line.substring(RANGE_MAX_BEGIN_INDEX, RANGE_MAX_END_INDEX).trim();
samples.get(samples.size() - 1)
.setGrainRangeCodeMax(temp);
}
private void parseRoundness(String line) {
String temp;
if (line.length() < ROUNDNESS_MIN_END_INDEX) return;
temp = line.substring(ROUNDNESS_MIN_BEGIN_INDEX, ROUNDNESS_MIN_END_INDEX).trim();
samples.get(samples.size() - 1)
.setRoundnessCodeMin(temp);
if (line.length() < ROUNDNESS_MAX_END_INDEX) return;
temp = line.substring(ROUNDNESS_MAX_BEGIN_INDEX, ROUNDNESS_MAX_END_INDEX).trim();
samples.get(samples.size() - 1)
.setRoundnessCodeMax(temp);
}
private void parseSphericity(String line) {
String temp;
if (line.length() < SPHERICITY_END_INDEX) return;
temp= line.substring(SPHERICITY_BEGIN_INDEX, SPHERICITY_END_INDEX).trim();
samples.get(samples.size() - 1)
.setSphericityCode(temp);
}
private void parseGrainTypes(String line) {
int endIndex;
String temp;
String[] dataArray;
// Validate length
if (line.length() <= GRAIN_TYPE_END_INDEX) return;
// Establish end index
endIndex = GRAIN_TYPE_END_INDEX;
if (line.length() > GRAIN_TYPE_BEGIN_INDEX &&
line.length() <= GRAIN_TYPE_END_INDEX) {
endIndex = line.length();
}
temp = line.substring(GRAIN_TYPE_BEGIN_INDEX, endIndex).trim();
dataArray = temp.split("");
samples.get(samples.size() - 1)
.getGrainTypeCodes().addAll(Arrays.asList(dataArray));
}
private void parseLimestoneGrainSize(String line) {
String temp;
if (line.length() < LIMESTONE_GRAIN_SIZE_END_INDEX) return;
temp = line.substring(LIMESTONE_GRAIN_SIZE_BEGIN_INDEX,
LIMESTONE_GRAIN_SIZE_END_INDEX).trim();
samples.get(samples.size() - 1)
.setGrainSizeCode(temp);
}
private void parseLimestoneGrainRange(String line) {
String temp;
if (line.length() < LIMESTONE_RANGE_MIN_END_INDEX) return;
temp = line.substring(LIMESTONE_RANGE_MIN_BEGIN_INDEX,
LIMESTONE_RANGE_MIN_END_INDEX).trim();
samples.get(samples.size() - 1)
.setGrainRangeCodeMin(temp);
if (line.length() < LIMESTONE_RANGE_MIN_END_INDEX) return;
temp = line.substring(LIMESTONE_RANGE_MAX_BEGIN_INDEX,
LIMESTONE_RANGE_MAX_END_INDEX).trim();
samples.get(samples.size() - 1)
.setGrainRangeCodeMax(temp);
}
private void parseAlterationCode(String line) {
String temp;
if (line.length() < ALTERATION_END_INDEX) return;
temp = line.substring(ALTERATION_BEGIN_INDEX,
ALTERATION_END_INDEX).trim();
samples.get(samples.size() - 1)
.setAlterationCode(temp);
}
private void parseCrystallinityCode(String line) {
String temp;
if (line.length() < CRYSTALLINITY_END_INDEX) return;
temp = line.substring(CRYSTALLINITY_BEGIN_INDEX,
CRYSTALLINITY_END_INDEX).trim();
samples.get(samples.size() - 1)
.setCrystallinityCode(temp);
}
private void parseDolomiteGrainSize(String line) {
String temp;
if (line.length() < DOLOMITE_GRAIN_SIZE_END_INDEX) return;
temp = line.substring(DOLOMITE_GRAIN_SIZE_BEGIN_INDEX,
DOLOMITE_GRAIN_SIZE_END_INDEX).trim();
samples.get(samples.size() - 1)
.setGrainSizeCode(temp);
}
private void parseDolomiteGrainRange(String line) {
String temp;
if (line.length() < DOLOMITE_RANGE_MIN_END_INDEX) return;
temp = line.substring(DOLOMITE_RANGE_MIN_BEGIN_INDEX,
DOLOMITE_RANGE_MIN_END_INDEX).trim();
samples.get(samples.size() - 1)
.setGrainRangeCodeMin(temp);
if (line.length() < DOLOMITE_RANGE_MAX_END_INDEX) return;
temp = line.substring(DOLOMITE_RANGE_MAX_BEGIN_INDEX,
DOLOMITE_RANGE_MAX_END_INDEX).trim();
samples.get(samples.size() - 1)
.setGrainRangeCodeMax(temp);
}
private void parseCementTypesIntoSample(String line) {
int endIndex;
String temp;
String[] dataArray;
// Validate length
if (line.length() <= CEMENTS_BEGIN_INDEX) return;
// Establish end index
endIndex = CEMENTS_END_INDEX;
if (line.length() > CEMENTS_BEGIN_INDEX &&
line.length() <= CEMENTS_END_INDEX) {
endIndex = line.length();
}
temp = line.substring(CEMENTS_BEGIN_INDEX, endIndex).trim();
dataArray = temp.split("");
samples.get(samples.size() - 1)
.getCementTypeCodes().addAll(Arrays.asList(dataArray));
}
private void parseInduration(String line) {
String temp;
if (line.length() < INDURATION_END_INDEX) return;
temp = line.substring(INDURATION_BEGIN_INDEX, INDURATION_END_INDEX).trim();
samples.get(samples.size() - 1)
.setIndurationCode(temp);
}
private void parseSedimentaryIntoSample(String line) {
int endIndex;
String temp;
String[] dataArray;
// Validate length
if (line.length() <= SEDIMENTARY_BEGIN_INDEX) return;
// Establish end index
endIndex = SEDIMENTARY_END_INDEX;
if (line.length() > SEDIMENTARY_BEGIN_INDEX &&
line.length() <= SEDIMENTARY_END_INDEX) {
endIndex = line.length();
}
temp = line.substring(SEDIMENTARY_BEGIN_INDEX, endIndex).trim();
dataArray = temp.split("");
samples.get(samples.size() - 1)
.getSedimentaryCodes().addAll(Arrays.asList(dataArray));
}
private void parseMineralsIntoSample(String line) {
int endIndex;
String temp;
String[] dataArray;
// If no data exists, return
if (line.length() <= MINERALS_BEGIN_INDEX) return;
// Establish the end index. This is a variable length index as it
// depends on how many mineral codes were recorded for the sample.
endIndex = MINERALS_END_INDEX;
if (line.length() > MINERALS_BEGIN_INDEX &&
line.length() <= MINERALS_END_INDEX) {
endIndex = line.length();
}
temp = line.substring(MINERALS_BEGIN_INDEX, endIndex).trim();
// If no data exists, return
if (temp.length() == 0) return;
// Split string into an array of individual minerals and percentages
dataArray = parseMineralCodesIntoArray(temp);
for (String value : dataArray) {
Mineral mineral = new Mineral();
mineral.setCode(value.substring(0, 1));
// Check for no data found after the mineral code
if (value.length() == 1) {
continue;
}
temp = value.substring(1, value.length()).trim();
try {
if (temp.length() > 0) {
if (temp.contains("T")) {
// The value "T" was seen in the bay county file
// for well log 17337
mineral.setPercentage(1);
}
else if (temp.contains("<")) {
// The value "<" was seen in the charlott county file
// for well log 10232. This symbol implies less than
// then the next number. For simplification, just
// round this number up to the value.
temp = temp.substring(1, temp.length()).trim();
mineral.setPercentage(Double.valueOf(temp));
}
else {
mineral.setPercentage(Double.valueOf(temp));
}
}
} catch (NumberFormatException e) {
System.out.println("DatFileRepo.parseMineralsIntoSample: Sample = " + samples.size());
System.out.println("DatFileRepo.parseMineralsIntoSample: Code = " + mineral.getCode());
System.out.println("DatFileRepo.parseMineralsIntoSample:" + temp + " - " + value);
}
samples.get(samples.size() - 1)
.getAccessoryMineralCodes().add(mineral);
}
}
private String[] parseMineralCodesIntoArray(String line) {
List<String> codes = new ArrayList<>();
String[] dataArray;
int index = 0;
while (index < line.length()) {
codes.add(line.substring(index, Math.min(index + 3,line.length())));
index=index + 3;
}
dataArray = new String[codes.size()];
codes.toArray(dataArray);
return dataArray;
}
private void parseFeaturesIntoSample(String line) {
int endIndex;
String temp;
String[] dataArray;
// Validate length
if (line.length() <= FEATURES_BEGIN_INDEX) return;
// Establish end index
endIndex = FEATURES_END_INDEX;
if (line.length() > FEATURES_BEGIN_INDEX &&
line.length() <= FEATURES_END_INDEX) {
endIndex = line.length();
}
temp = line.substring(FEATURES_BEGIN_INDEX, endIndex).trim();
dataArray = temp.split("");
samples.get(samples.size() - 1)
.getOtherFeatureCodes().addAll(Arrays.asList(dataArray));
}
private void parseFossilsIntoSample(String line) {
String temp;
String[] dataArray;
if (line.length() <= FOSSILS_INDEX) return;
temp = line.substring(FOSSILS_INDEX).trim();
dataArray = temp.split("");
samples.get(samples.size() - 1)
.getFossilCodes().addAll(Arrays.asList(dataArray));
}
private String capitalizeFirstLetter(String original){
if(original.length() == 0)
return original;
return original.substring(0, 1).toUpperCase() + original.substring(1);
}
private double fromDMStoDegrees(int degrees, int minutes, int seconds) {
double decimal;
double fraction;
if ((degrees == 0) &&
(minutes == 0) &&
(minutes == 0)) return 0;
fraction = minutes / 60.0 + seconds / 3600.0;
if (degrees < 0) {
decimal = (double)degrees - fraction;
} else {
decimal = (double)degrees + fraction;
}
return decimal;
}
}
|
package org.spine3.util;
/**
* Provides information about the environment (current platform used, etc).
*
* @author Alexander Litus
*/
public class Environment {
/**
* The key of the Google AppEngine runtime version system property.
*/
public static final String APP_ENGINE_RUNTIME_VERSION_KEY = "com.google.appengine.runtime.version";
@SuppressWarnings("AccessOfSystemProperties")
private static final String appEngineRuntimeVersion = System.getProperty(APP_ENGINE_RUNTIME_VERSION_KEY);
protected Environment() {}
/**
* Returns the singleton instance.
*/
public static Environment getInstance() {
return Singleton.INSTANCE.value;
}
/**
* Returns {@code true} if the code is running on the Google AppEngine,
* {@code false} otherwise.
*/
public boolean isAppEngine() {
final boolean isVersionPresent = (appEngineRuntimeVersion != null) &&
!appEngineRuntimeVersion.isEmpty();
return isVersionPresent;
}
/**
* Returns the current Google AppEngine version
* retrieved by the key {@link Environment#APP_ENGINE_RUNTIME_VERSION_KEY}
* or {@code null} if the program is running not on the AppEngine.
*/
public String getAppEngineVersion() {
return appEngineRuntimeVersion;
}
private enum Singleton {
INSTANCE;
@SuppressWarnings("NonSerializableFieldInSerializableClass")
private final Environment value = new Environment();
}
}
|
/* vim: set et ts=4 sts=4 sw=4 tw=72 : */
package uk.ac.cam.cl.git;
import uk.ac.cam.cl.git.api.Commit;
import uk.ac.cam.cl.git.api.EmptyDirectoryExpectedException;
import uk.ac.cam.cl.git.configuration.ConfigurationLoader;
import uk.ac.cam.cl.git.interfaces.*;
import org.eclipse.jgit.treewalk.*;
import org.eclipse.jgit.revwalk.*;
import java.util.Collection;
import java.util.Date;
import java.util.List;
import java.util.LinkedList;
import java.io.File;
import java.io.IOException;
import java.io.ByteArrayOutputStream;
import com.fasterxml.jackson.annotation.*;
import org.mongojack.Id;
import org.mongojack.ObjectId;
import uk.ac.cam.cl.dtg.segue.git.*;
/**
* @author Isaac Dunn <ird28@cam.ac.uk>
* @author Kovacsics Robert <rmk35@cam.ac.uk>
* @version 0.1
*/
public class Repository implements TesterInterface
{
private final String parent;
private final String parent_hidden;
private final String repo;
private final String host =
ConfigurationLoader.getConfig().getRepoHost();
private final String user =
ConfigurationLoader.getConfig().getRepoUser();
private final String owner;
private final List<String> read_write;
private final List<String> read_only;
private String _id;
String workingCommit;
GitDb handle;
@JsonIgnore
public Repository
( String name
, String crsid
, List<String> read_write
, List<String> read_only
)
{
this.parent = null;
this.parent_hidden = null;
this.repo = name;
this.read_write = read_write;
this.read_only = read_only;
owner = crsid;
}
@JsonIgnore
public Repository
( String name
, String crsid
, List<String> read_write
, List<String> read_only
, String parent
, String parent_hidden
)
{
this.parent = parent;
this.parent_hidden = parent_hidden;
this.repo = name;
this.read_write = read_write;
this.read_only = read_only;
owner = crsid;
}
@JsonCreator
Repository
( @JsonProperty("name") String name
, @JsonProperty("owner") String crsid
, @JsonProperty("rw") List<String> read_write
, @JsonProperty("r") List<String> read_only
, @JsonProperty("parent") String parent
, @JsonProperty("parent_hidden") String parent_hidden
, @JsonProperty("_id") String id
)
{
this.parent = parent;
this.parent_hidden = parent_hidden;
this.repo = name;
this.read_write = read_write;
this.read_only = read_only;
owner = crsid;
}
public void addReadOnlyUser(String user) {
read_only.add(user);
}
/**
* Clones repository to specified directory, if it can get
* repository access.
* <p>
* It tries to access the repository with the id_rsa key.
*
* @param directory The empty directory to which you want to clone
* into.
*
* @throws EmptyDirectoryExpectedException The File given is either
* not a directory or not empty.
* @throws IOException Something went wrong (typically not
* recoverable).
*/
@JsonIgnore
public void cloneTo(File directory) throws EmptyDirectoryExpectedException, IOException
{
if (directory.listFiles() == null || directory.listFiles().length != 0)
throw new EmptyDirectoryExpectedException();
handle = new GitDb(
/* src */ getRepoPath()
,/* dest */ directory
,/* bare */ false
,/* branch */ "master"
,/* remote */ "origin"
,/* privateKeyPath */ ConfigurationLoader.getConfig()
.getSshPrivateKeyFile());
if (workingCommit == null)
workingCommit = handle.getHeadSha();
}
/**
* Clones parent repository's contents. Use only once, on the
* initialisation of the repository.
*
* @throws IOException Something went wrong during cloning, perhaps
* the directory was not empty?
*/
public void cloneParent() throws IOException
{
GitDb tmp = new GitDb(ConfigurationLoader.getConfig()
.getGitoliteHome() + "/repositories/" + parent + ".git");
/* Now parent is cloned at tmpDir, push back to child */
if (tmp.listCommits().size() > 0)
{
try
{
tmp.pushTo(ConfigurationLoader.getConfig()
.getGitoliteHome() + "/repositories/" + repo + ".git");
}
catch (PushFailedException e)
{
throw new IOException(
"Failed to push parent repo onto child. "
+ "You will get an empty repository.\n", e);
}
}
}
/**
* Opens a local repository.
*
* @param repoName The name of the repository to open.
* @throws IOException Something went wrong (typically not
* recoverable).
*/
public void openLocal(String repoName) throws IOException
{
System.out.println("Opening : " + ConfigurationLoader.getConfig()
.getGitoliteHome() + "/repositories/" + repoName + ".git");
handle = new GitDb(ConfigurationLoader.getConfig()
.getGitoliteHome() + "/repositories/" + repoName + ".git");
if (workingCommit == null)
workingCommit = handle.getHeadSha();
}
/**
* Opens a local repository with the given commit.
*
* @param repoName The name of the repository to open.
* @param commitID Identification for a commit.
* @throws IOException Something went wrong (typically not
* recoverable).
*/
public void openLocal(String repoName, String commitID)
throws IOException
{
System.out.println("Opening : " + ConfigurationLoader.getConfig()
.getGitoliteHome() + "/repositories/" + repoName + ".git");
handle = new GitDb(ConfigurationLoader.getConfig()
.getGitoliteHome() + "/repositories/" + repoName + ".git");
workingCommit = commitID;
}
/**
* List the commits in the repository.
*/
public List<Commit> listCommits()
{
List<Commit> rtn = new LinkedList<Commit>();
for (RevCommit commit : handle.listCommits())
{
rtn.add(new Commit
(commit.getName()
, commit.getAuthorIdent().getName()
, commit.getFullMessage()
, new Date(commit.getCommitTime())
));
}
return rtn;
}
/**
* Resolves a commit reference such as HEAD or a branch name such as
* master to a SHA.
*
* @param name The name to resolve.
* @return The SHA of the latest matching commit.
*/
public String resolveCommit(String name)
{
return handle.getSha(name);
}
/* Test team stores test results now. This is a placeholder to say
* why code was removed.
*/
/**
* Returns a list of the source files in the repository.
* <p>
* Repository must first be cloned using cloneTo!
*
* @return The list of source files
*/
@JsonIgnore
public Collection<String> getSources() throws IOException
{
List<String> rtn = new LinkedList<String>();
if (handle == null)
throw new NullPointerException("Repository unset. Did you clone it?");
if (workingCommit == null)
/* Only way above is true if we have an empty repository, as
* everything that sets handle also sets workingCommit (to
* null, if we have an empty repository).
*/
return null;
TreeWalk tw = handle.getTreeWalk(workingCommit);
while (tw.next())
rtn.add(tw.getPathString());
return rtn;
}
/**
* Returns a list of the source files in the repository, filtered
* according to filter.
* <p>
* Repository must first be cloned using cloneTo!
*
* @param filter Filter files according to this
* @return The list of source files
*
* @throws IOException Something went wrong (typically not
* recoverable).
*/
@JsonIgnore
public Collection<String> getSources(String filter) throws IOException
{
List<String> rtn = new LinkedList<String>();
if (handle == null)
throw new NullPointerException("Repository unset. Did you clone it?");
if (workingCommit == null)
/* Only way above is true if we have an empty repository, as
* everything that sets handle also sets workingCommit (to
* null, if we have an empty repository).
*/
return null;
TreeWalk tw = handle.getTreeWalk(workingCommit, filter);
while (tw.next())
rtn.add(tw.getPathString());
return rtn;
}
/**
* Outputs the content of the file.
*
* @param filePath Full path of the file
* @return Contents of the file asked for or null if file is not
* found.
*/
@JsonIgnore
public String getFile(String filePath) throws IOException
{
if (handle == null)
throw new NullPointerException("Repository unset. Did you clone it?");
if (workingCommit == null)
/* Only way above is true if we have an empty repository, as
* everything that sets handle also sets workingCommit (to
* null, if we have an empty repository).
*/
return null;
ByteArrayOutputStream rtn = handle.getFileByCommitSHA(workingCommit, filePath);
if (rtn == null)
return null;
return rtn.toString();
}
/**
* Gets the CRSID of the repository owner
*
* @return CRSID of the repository owner
*/
@JsonProperty("owner")
public String getCRSID() { return owner; }
/**
* Gets the name of the repository
*
* @return Name of the repository
*/
@JsonProperty("name")
public String getName() { return this.repo; }
/**
* Gets the read & write capable users or groups, for
* serialization.
*
* @return Read & write capable users or groups.
*/
@JsonProperty("rw")
public List<String> getReadWrite() { return this.read_write; }
/**
* Gets the read only capable users or groups, for serialization.
*
* @return Read only capable users or groups.
*/
@JsonProperty("r")
public List<String> getReadOnly() { return this.read_only; }
/**
* Gets the parent of this repository, or null if this repository
* has no parent.
*
* @return Parent or null
*/
@JsonProperty("parent")
public String parent() { return this.parent; }
/**
* For storing this in MongoDB
*
* @return ID of this object in MongoDB
*/
@Id @ObjectId
protected String get_id() { return this._id; }
/**
* For storing this in MongoDB
*
* @param id Object ID to set
*/
@Id @ObjectId
protected void set_id(String id) { _id = id; }
/**
* Gets the hidden parent of this repository, or null if this
* repository has no hidden parent.
*
* @return Hidden parent or null
*/
@JsonProperty("parent_hidden")
public String parent_hidden() { return this.parent_hidden; }
/**
* Gives the string representation of the repository, to be used in
* conjuction with Gitolite.
*
* Please do not change this method without appropriately updating
* rebuildDatabaseFromGitolite in ConfigDatabase!
*
* @return Gitolite config compatible string representation of the
* repository
*/
@Override
@JsonIgnore
public String toString()
{
StringBuilder strb = new StringBuilder("repo ");
strb.append(repo);
strb.append("\n");
strb.append(" RW =");
strb.append(" " + owner);
/* Usernames or groups */
if (read_write != null)
for ( String name : read_write)
strb.append(" " + name);
strb.append("\n");
if (read_only != null && read_only.size() > 0)
{
strb.append(" R =");
/* Usernames or groups */
for ( String name : read_only)
strb.append(" " + name);
strb.append("\n");
}
strb.append("# "); // To allow the rebuilding of the database
strb.append(parent + " "); // from the gitolite config file
strb.append(parent_hidden + "\n");
return strb.toString();
}
/**
* Gets the parent repository path as an SSH URI.
*
* @return Parent repository path as an SSH URI.
*/
@JsonIgnore
public String getParentRepoPath()
{
return "ssh://" + user + "@" + host + "/" + parent + ".git";
}
/**
* Gets the repository path as an SSH URI.
*
* @return Repository path as an SSH URI.
*/
@JsonIgnore
public String getRepoPath()
{
return "ssh://" + user + "@" + host + "/" + repo + ".git";
}
/**
* Checks if this repository exists on disk.
*
* @return True if repository exists.
*/
@JsonIgnore
public boolean repoExists()
{
return new File(ConfigurationLoader.getConfig()
.getGitoliteHome()
+ "/repositories/" + repo + ".git").exists();
}
}
|
package com.haxademic.demo.draw.filters.shaders;
import com.haxademic.core.app.P;
import com.haxademic.core.app.PAppletHax;
import com.haxademic.core.constants.AppSettings;
import com.haxademic.core.draw.context.DrawUtil;
import com.haxademic.core.draw.filters.shaders.CubicLensDistortionFilter;
import com.haxademic.core.draw.filters.shaders.CubicLensDistortionFilterOscillate;
import com.haxademic.core.draw.filters.shaders.GodRays;
import com.haxademic.core.draw.shaders.textures.TextureShader;
import com.haxademic.core.file.DemoAssets;
import com.haxademic.core.file.FileUtil;
import com.haxademic.core.hardware.shared.InputTrigger;
import processing.opengl.PShader;
public class Demo_AllFilters_WIP
extends PAppletHax { public static void main(String args[]) { PAppletHax.main(Thread.currentThread().getStackTrace()[1].getClassName()); }
protected TextureShader texture;
protected PShader customShader;
protected InputTrigger triggerPrev = new InputTrigger(new char[]{'1'});
protected InputTrigger triggerNext = new InputTrigger(new char[]{'2'});
protected InputTrigger triggerToggle = new InputTrigger(new char[]{' '});
protected void overridePropsFile() {
p.appConfig.setProperty( AppSettings.WIDTH, 800 );
p.appConfig.setProperty( AppSettings.HEIGHT, 800 );
}
public void setupFirstFrame() {
texture = new TextureShader(TextureShader.bw_clouds);
// customShader = p.loadShader(FileUtil.getFile("shaders/filters/godrays.glsl"));
}
protected float mouseXPercent() {
return P.map(p.mouseX, 0, p.width, 0, 1f);
}
protected float mouseYPercent() {
return P.map(p.mouseY, 0, p.height, 0, 1f);
}
protected float oscillate() {
return P.sin(p.frameCount * 0.01f);
}
public void drawApp() {
// cycle
// if(triggerPrev.triggered()) textureIndex = (textureIndex > 0) ? textureIndex - 1 : textures.length - 1;
// if(triggerNext.triggered()) textureIndex = (textureIndex < textures.length - 1) ? textureIndex + 1 : 0;
// update cur shader & draw to screen
texture.updateTime();
p.filter(texture.shader());
// draw some text to make sure we know orientation
p.fill(127 + 127f * P.sin(p.frameCount * 0.01f));
p.textFont(DemoAssets.fontBitlow(100));
p.textAlign(P.CENTER, P.CENTER);
p.text("FILTER", 0, 0, p.width, p.height);
// apply some filters
DrawUtil.setTextureRepeat(p, true);
// CubicLensDistortionFilter.instance(p).setAmplitude(P.map(p.mouseX, 0, p.width, -20f, 20f));
// CubicLensDistortionFilter.instance(p).setSeparation(P.map(p.mouseY, 0, p.height, 0, 3f));
// CubicLensDistortionFilter.instance(p).applyTo(p);
// // old distortion
// CubicLensDistortionFilterOscillate.instance(p).setTime(p.frameCount * 0.01f);
// CubicLensDistortionFilterOscillate.instance(p).applyTo(p);
// godrays
if(triggerToggle.on() == false) {
GodRays.instance(p).setDecay(mouseXPercent());
GodRays.instance(p).setWeight(mouseYPercent());
GodRays.instance(p).setRotation(oscillate());
GodRays.instance(p).setAmp(0.5f + 0.5f * oscillate());
GodRays.instance(p).applyTo(p);
}
// custom filter
// customShader.set("rotation", P.sin(p.frameCount * 0.01f));
if(customShader != null && triggerToggle.on() == false) p.filter(customShader);
}
}
|
package com.swabunga.spell.examples;
import com.swabunga.spell.engine.*;
import com.swabunga.spell.swing.JTextComponentSpellChecker;
import javax.swing.*;
import javax.swing.text.JTextComponent;
import java.awt.*;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
import java.awt.event.WindowAdapter;
import java.awt.event.WindowEvent;
import java.io.File;
/** This class shows an example of how to use the spell checking capability
* on a JTextComponent.
*
* @author Robert Gustavsson (robert@lindesign.se)
*/
public class JTextComponentSpellCheckExample extends JFrame {
private static final String englishDictionary = "dict/english.0";
private static final String englishPhonetic = "dict/phonet.en";
protected SpellDictionary dictionary;
JTextComponent text = null;
JButton spell = null;
public JTextComponentSpellCheckExample(String dictPath, String phonetPath) {
File dictFile=null,
phonetFile=null;
// INIT DICTIONARY
if(dictPath==null)
dictFile=new File(englishDictionary);
else
dictFile=new File(dictPath);
if(phonetPath!=null)
phonetFile=new File(phonetPath);
try {
dictionary = new SpellDictionaryHashMap(dictFile, phonetFile);
//dictionary = new SpellDictionaryDisk(dictFile, phonetFile, true);
//dictionary = new GenericSpellDictionary(dictFile, phonetFile);
} catch (Exception ex) {
ex.printStackTrace();
}
// INIT GUI
setDefaultCloseOperation(JFrame.DISPOSE_ON_CLOSE);
addWindowListener(new WindowAdapter() {
public void windowClosed(WindowEvent e) {
System.exit(0);
}
});
initGUI();
pack();
}
private void initGUI() {
Container frame = getContentPane();
GridBagLayout gridbag = new GridBagLayout();
GridBagConstraints c = new GridBagConstraints();
frame.setLayout(gridbag);
c.anchor = GridBagConstraints.CENTER;
c.fill = GridBagConstraints.BOTH;
c.insets = new Insets(5, 5, 5, 5);
c.weightx = 1.0;
c.weighty = 1.0;
text = new JTextArea(10, 40);
addToFrame(frame, text, gridbag, c, 0, 0, 1, 1);
spell = new JButton("spell");
spell.addActionListener(new ButtonListener());
addToFrame(frame, spell, gridbag, c, 0, 1, 1, 1);
}
// Helps build gridbaglayout.
private void addToFrame(Container f, Component c, GridBagLayout gbl, GridBagConstraints gbc, int x, int y, int w, int h) {
gbc.gridx = x;
gbc.gridy = y;
gbc.gridwidth = w;
gbc.gridheight = h;
gbl.setConstraints(c, gbc);
f.add(c);
}
public static void main(String[] args) {
String dictPath=null,
phonetPath=null;
if(args.length>0)
dictPath=args[0];
if(args.length>1)
phonetPath=args[1];
JTextComponentSpellCheckExample d = new JTextComponentSpellCheckExample(dictPath,phonetPath);
d.show();
}
// INNER CLASSES
private class ButtonListener implements ActionListener {
public void actionPerformed(ActionEvent e) {
Thread t = new SpellThread();
t.start();
}
}
private class SpellThread extends Thread {
public void run() {
try {
JTextComponentSpellChecker sc = new JTextComponentSpellChecker(dictionary);
sc.spellCheck(text);
} catch (Exception ex) {
ex.printStackTrace();
}
}
}
}
|
package de.mrapp.android.adapter.list.filterable;
import java.util.Collection;
import java.util.regex.Pattern;
/**
* Defines the interface, all listeners, which should be notified when the
* underlying data of a {@link ListAdapter} has been filtered, must implement.
*
* @param <DataType>
* The type of the observed adapter's underlying data
*
* @author Michael Rapp
*
* @since 1.0.0
*/
public interface ListFilterListener<DataType> {
/**
* The method, which is invoked, when the adapter's items have been filtered
* by using a regular expression.
*
* @param regularExpression
* The regular expression, which has been used, as an instance of
* the class {@link Pattern}. The regular expression may not be
* null
* @param filter
* The filter, which has been used to apply the regular
* expression on the single items, as an instance of the type
* {@link Filter} or null, if the items' implementations of the
* interface {@link Filterable} have been used instead
* @param filteredItems
* A collection, which contains the adapter's filtered items, as
* an instance of the type {@link Collection} or an empty
* collection, if the adapter does not contain any items
*/
void onApplyFilter(Pattern regularExpression, Filter<DataType> filter,
Collection<DataType> filteredItems);
/**
* The method, which is invoked, when a filter has been reseted.
*
* @param regularExpression
* The regular expression used by the filter, which has been
* reseted, as an instance of the class {@link Pattern}. The
* regular expression may not be null
* @param filteredItems
* A collection, which contains the adapter's filtered items, as
* an instance of the type {@link Collection} or an empty
* collection, if the adapter does not contain any items
*/
void onResetFilter(Pattern regularExpression,
Collection<DataType> filteredItems);
}
|
package dr.evomodel.treedatalikelihood;
/**
* BeagleDataLikelihoodDelegate
*
* A DataLikelihoodDelegate that uses BEAGLE
*
* @author Andrew Rambaut
* @author Marc Suchard
* @version $Id$
*/
import beagle.*;
import dr.evomodel.branchmodel.BranchModel;
import dr.evomodel.siteratemodel.SiteRateModel;
import dr.evomodel.treelikelihood.*;
import dr.evolution.alignment.PatternList;
import dr.evolution.alignment.UncertainSiteList;
import dr.evolution.datatype.DataType;
import dr.evolution.tree.Tree;
import dr.evolution.util.TaxonList;
import dr.evomodel.tipstatesmodel.TipStatesModel;
import dr.inference.model.AbstractModel;
import dr.inference.model.Model;
import dr.inference.model.Parameter;
import dr.inference.model.Variable;
import dr.util.Citable;
import dr.util.Citation;
import dr.util.CommonCitations;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.logging.Logger;
public class BeagleDataLikelihoodDelegate extends AbstractModel implements DataLikelihoodDelegate, Citable {
// This property is a comma-delimited list of resource numbers (0 == CPU) to
// allocate each BEAGLE instance to. If less than the number of instances then
// will wrap around.
private static final String RESOURCE_ORDER_PROPERTY = "beagle.resource.order";
private static final String PREFERRED_FLAGS_PROPERTY = "beagle.preferred.flags";
private static final String REQUIRED_FLAGS_PROPERTY = "beagle.required.flags";
private static final String SCALING_PROPERTY = "beagle.scaling";
private static final String RESCALE_FREQUENCY_PROPERTY = "beagle.rescale";
private static final String DELAY_SCALING_PROPERTY = "beagle.delay.scaling";
private static final String EXTRA_BUFFER_COUNT_PROPERTY = "beagle.extra.buffer.count";
private static final String FORCE_VECTORIZATION = "beagle.force.vectorization";
// Which scheme to use if choice not specified (or 'default' is selected):
private static final PartialsRescalingScheme DEFAULT_RESCALING_SCHEME = PartialsRescalingScheme.DYNAMIC;
private static int instanceCount = 0;
private static List<Integer> resourceOrder = null;
private static List<Integer> preferredOrder = null;
private static List<Integer> requiredOrder = null;
private static List<String> scalingOrder = null;
private static List<Integer> extraBufferOrder = null;
// Default frequency for complete recomputation of scaling factors under the 'dynamic' scheme
private static final int RESCALE_FREQUENCY = 100;
private static final int RESCALE_TIMES = 1;
private static final boolean RESCALING_OFF = false; // a debugging switch
private static final boolean DEBUG = false;
/**
*
* @param tree Used for configuration - shouldn't be watched for changes
* @param branchModel Specifies substitution model for each branch
* @param patternList List of patterns
* @param siteRateModel Specifies rates per site
* @param useAmbiguities Whether to respect state ambiguities in data
*/
public BeagleDataLikelihoodDelegate(Tree tree,
PatternList patternList,
BranchModel branchModel,
SiteRateModel siteRateModel,
boolean useAmbiguities,
PartialsRescalingScheme rescalingScheme,
boolean delayRescalingUntilUnderflow) {
super("BeagleDataLikelihoodDelegate");
final Logger logger = Logger.getLogger("dr.evomodel");
logger.info("\nUsing BEAGLE DataLikelihood Delegate");
this.dataType = patternList.getDataType();
patternCount = patternList.getPatternCount();
stateCount = dataType.getStateCount();
// Check for matching state counts
int stateCount2 = branchModel.getRootFrequencyModel().getFrequencyCount();
if (stateCount != stateCount2) {
throw new IllegalArgumentException("Pattern state count (" + stateCount
+ ") does not match substitution model state count (" + stateCount2 + ")");
}
patternWeights = patternList.getPatternWeights();
this.branchModel = branchModel;
addModel(this.branchModel);
this.siteRateModel = siteRateModel;
addModel(this.siteRateModel);
this.categoryCount = this.siteRateModel.getCategoryCount();
nodeCount = tree.getNodeCount();
tipCount = tree.getExternalNodeCount();
internalNodeCount = nodeCount - tipCount;
branchUpdateIndices = new int[nodeCount];
branchLengths = new double[nodeCount];
scaleBufferIndices = new int[internalNodeCount];
storedScaleBufferIndices = new int[internalNodeCount];
operations = new int[internalNodeCount * Beagle.OPERATION_TUPLE_SIZE];
firstRescaleAttempt = true;
try {
int compactPartialsCount = tipCount;
if (useAmbiguities) {
// if we are using ambiguities then we don't use tip partials
compactPartialsCount = 0;
}
// one partials buffer for each tip and two for each internal node (for store restore)
partialBufferHelper = new BufferIndexHelper(nodeCount, tipCount);
// one scaling buffer for each internal node plus an extra for the accumulation, then doubled for store/restore
scaleBufferHelper = new BufferIndexHelper(getScaleBufferCount(), 0);
evolutionaryProcessDelegate = new HomogenousSubstitutionModelDelegate(tree, branchModel);
// Attempt to get the resource order from the System Property
if (resourceOrder == null) {
resourceOrder = parseSystemPropertyIntegerArray(RESOURCE_ORDER_PROPERTY);
}
if (preferredOrder == null) {
preferredOrder = parseSystemPropertyIntegerArray(PREFERRED_FLAGS_PROPERTY);
}
if (requiredOrder == null) {
requiredOrder = parseSystemPropertyIntegerArray(REQUIRED_FLAGS_PROPERTY);
}
if (scalingOrder == null) {
scalingOrder = parseSystemPropertyStringArray(SCALING_PROPERTY);
}
if (extraBufferOrder == null) {
extraBufferOrder = parseSystemPropertyIntegerArray(EXTRA_BUFFER_COUNT_PROPERTY);
}
// first set the rescaling scheme to use from the parser
this.rescalingScheme = rescalingScheme;
this.delayRescalingUntilUnderflow = delayRescalingUntilUnderflow;
int[] resourceList = null;
long preferenceFlags = 0;
long requirementFlags = 0;
if (scalingOrder.size() > 0) {
this.rescalingScheme = PartialsRescalingScheme.parseFromString(
scalingOrder.get(instanceCount % scalingOrder.size()));
}
if (resourceOrder.size() > 0) {
// added the zero on the end so that a CPU is selected if requested resource fails
resourceList = new int[]{resourceOrder.get(instanceCount % resourceOrder.size()), 0};
if (resourceList[0] > 0) {
preferenceFlags |= BeagleFlag.PROCESSOR_GPU.getMask(); // Add preference weight against CPU
}
}
if (preferredOrder.size() > 0) {
preferenceFlags = preferredOrder.get(instanceCount % preferredOrder.size());
}
if (requiredOrder.size() > 0) {
requirementFlags = requiredOrder.get(instanceCount % requiredOrder.size());
}
// Define default behaviour here
if (this.rescalingScheme == PartialsRescalingScheme.DEFAULT) {
//if GPU: the default is dynamic scaling in BEAST
if (resourceList != null && resourceList[0] > 1) {
this.rescalingScheme = DEFAULT_RESCALING_SCHEME;
} else { // if CPU: just run as fast as possible
// this.rescalingScheme = PartialsRescalingScheme.NONE;
// Dynamic should run as fast as none until first underflow
this.rescalingScheme = DEFAULT_RESCALING_SCHEME;
}
}
// to keep behaviour of the delayed scheme (always + delay)...
if (this.rescalingScheme == PartialsRescalingScheme.DELAYED) {
this.delayRescalingUntilUnderflow = true;
this.rescalingScheme = PartialsRescalingScheme.ALWAYS;
}
if (this.rescalingScheme == PartialsRescalingScheme.AUTO) {
preferenceFlags |= BeagleFlag.SCALING_AUTO.getMask();
useAutoScaling = true;
} else {
// preferenceFlags |= BeagleFlag.SCALING_MANUAL.getMask();
}
String r = System.getProperty(RESCALE_FREQUENCY_PROPERTY);
if (r != null) {
rescalingFrequency = Integer.parseInt(r);
if (rescalingFrequency < 1) {
rescalingFrequency = RESCALE_FREQUENCY;
}
}
String d = System.getProperty(DELAY_SCALING_PROPERTY);
if (d != null) {
this.delayRescalingUntilUnderflow = Boolean.parseBoolean(d);
}
if (preferenceFlags == 0 && resourceList == null) { // else determine dataset characteristics
if (stateCount == 4 && patternList.getPatternCount() < 10000) // TODO determine good cut-off
preferenceFlags |= BeagleFlag.PROCESSOR_CPU.getMask();
}
boolean forceVectorization = false;
String vectorizationString = System.getProperty(FORCE_VECTORIZATION);
if (vectorizationString != null) {
forceVectorization = true;
}
if (BeagleFlag.VECTOR_SSE.isSet(preferenceFlags) && (stateCount != 4)
&& !forceVectorization
) {
// @todo SSE doesn't seem to work for larger state spaces so for now we override the
// SSE option.
preferenceFlags &= ~BeagleFlag.VECTOR_SSE.getMask();
preferenceFlags |= BeagleFlag.VECTOR_NONE.getMask();
if (stateCount > 4 && this.rescalingScheme == PartialsRescalingScheme.DYNAMIC) {
this.rescalingScheme = PartialsRescalingScheme.DELAYED;
}
}
if (!BeagleFlag.PRECISION_SINGLE.isSet(preferenceFlags)) {
// if single precision not explicitly set then prefer double
preferenceFlags |= BeagleFlag.PRECISION_DOUBLE.getMask();
}
if (evolutionaryProcessDelegate.canReturnComplexDiagonalization()) {
requirementFlags |= BeagleFlag.EIGEN_COMPLEX.getMask();
}
beagle = BeagleFactory.loadBeagleInstance(
tipCount,
partialBufferHelper.getBufferCount(),
compactPartialsCount,
stateCount,
patternCount,
evolutionaryProcessDelegate.getEigenBufferCount(),
evolutionaryProcessDelegate.getMatrixBufferCount(),
categoryCount,
scaleBufferHelper.getBufferCount(), // Always allocate; they may become necessary
resourceList,
preferenceFlags,
requirementFlags
);
InstanceDetails instanceDetails = beagle.getDetails();
ResourceDetails resourceDetails = null;
if (instanceDetails != null) {
resourceDetails = BeagleFactory.getResourceDetails(instanceDetails.getResourceNumber());
if (resourceDetails != null) {
StringBuilder sb = new StringBuilder(" Using BEAGLE resource ");
sb.append(resourceDetails.getNumber()).append(": ");
sb.append(resourceDetails.getName()).append("\n");
if (resourceDetails.getDescription() != null) {
String[] description = resourceDetails.getDescription().split("\\|");
for (String desc : description) {
if (desc.trim().length() > 0) {
sb.append(" ").append(desc.trim()).append("\n");
}
}
}
sb.append(" with instance flags: ").append(instanceDetails.toString());
logger.info(sb.toString());
} else {
logger.info(" Error retrieving BEAGLE resource for instance: " + instanceDetails.toString());
}
} else {
logger.info(" No external BEAGLE resources available, or resource list/requirements not met, using Java implementation");
}
if (patternList instanceof UncertainSiteList) {
useAmbiguities = true;
}
logger.info(" " + (useAmbiguities ? "Using" : "Ignoring") + " ambiguities in tree likelihood.");
logger.info(" With " + patternList.getPatternCount() + " unique site patterns.");
for (int i = 0; i < tipCount; i++) {
// Find the id of tip i in the patternList
String id = tree.getTaxonId(i);
int index = patternList.getTaxonIndex(id);
if (index == -1) {
throw new TaxonList.MissingTaxonException("Taxon, " + id + ", in tree, " + tree.getId() +
", is not found in patternList, " + patternList.getId());
} else {
if (useAmbiguities) {
setPartials(beagle, patternList, index, i);
} else {
setStates(beagle, patternList, index, i);
}
}
}
beagle.setPatternWeights(patternWeights);
String rescaleMessage = " Using rescaling scheme : " + this.rescalingScheme.getText();
if (this.rescalingScheme == PartialsRescalingScheme.AUTO &&
resourceDetails != null &&
(resourceDetails.getFlags() & BeagleFlag.SCALING_AUTO.getMask()) == 0) {
// If auto scaling in BEAGLE is not supported then do it here
this.rescalingScheme = PartialsRescalingScheme.DYNAMIC;
rescaleMessage = " Auto rescaling not supported in BEAGLE, using : " + this.rescalingScheme.getText();
}
boolean parenthesis = false;
if (this.rescalingScheme == PartialsRescalingScheme.DYNAMIC) {
rescaleMessage += " (rescaling every " + rescalingFrequency + " evaluations";
parenthesis = true;
}
if (this.delayRescalingUntilUnderflow) {
rescaleMessage += (parenthesis ? ", " : "(") + "delay rescaling until first overflow";
parenthesis = true;
}
rescaleMessage += (parenthesis ? ")" : "");
logger.info(rescaleMessage);
if (this.rescalingScheme == PartialsRescalingScheme.DYNAMIC) {
everUnderflowed = false; // If false, BEAST does not rescale until first under-/over-flow.
}
updateSubstitutionModel = true;
updateSiteModel = true;
} catch (TaxonList.MissingTaxonException mte) {
throw new RuntimeException(mte.toString());
}
}
@Override
public TreeTraversal.TraversalType getOptimalTraversalType() {
return TreeTraversal.TraversalType.POST_ORDER;
}
@Override
public int getTraitCount() {
return 1;
}
@Override
public int getTraitDim() {
return patternCount;
}
private static List<Integer> parseSystemPropertyIntegerArray(String propertyName) {
List<Integer> order = new ArrayList<Integer>();
String r = System.getProperty(propertyName);
if (r != null) {
String[] parts = r.split(",");
for (String part : parts) {
try {
int n = Integer.parseInt(part.trim());
order.add(n);
} catch (NumberFormatException nfe) {
System.err.println("Invalid entry '" + part + "' in " + propertyName);
}
}
}
return order;
}
private static List<String> parseSystemPropertyStringArray(String propertyName) {
List<String> order = new ArrayList<String>();
String r = System.getProperty(propertyName);
if (r != null) {
String[] parts = r.split(",");
for (String part : parts) {
try {
String s = part.trim();
order.add(s);
} catch (NumberFormatException nfe) {
System.err.println("Invalid entry '" + part + "' in " + propertyName);
}
}
}
return order;
}
private int getScaleBufferCount() {
return internalNodeCount + 1;
}
/**
* Sets the partials from a sequence in an alignment.
*
* @param beagle beagle
* @param patternList patternList
* @param sequenceIndex sequenceIndex
* @param nodeIndex nodeIndex
*/
private final void setPartials(Beagle beagle,
PatternList patternList,
int sequenceIndex,
int nodeIndex) {
double[] partials = new double[patternCount * stateCount * categoryCount];
boolean[] stateSet;
int v = 0;
for (int i = 0; i < patternCount; i++) {
if (patternList instanceof UncertainSiteList) {
((UncertainSiteList) patternList).fillPartials(sequenceIndex, i, partials, v);
v += stateCount;
// TODO Add this functionality to SimpleSiteList to avoid if statement here
} else {
int state = patternList.getPatternState(sequenceIndex, i);
stateSet = dataType.getStateSet(state);
for (int j = 0; j < stateCount; j++) {
if (stateSet[j]) {
partials[v] = 1.0;
} else {
partials[v] = 0.0;
}
v++;
}
}
}
// if there is more than one category then replicate the partials for each
int n = patternCount * stateCount;
int k = n;
for (int i = 1; i < categoryCount; i++) {
System.arraycopy(partials, 0, partials, k, n);
k += n;
}
beagle.setPartials(nodeIndex, partials);
}
/**
* Sets the partials from a sequence in an alignment.
*/
private final void setPartials(Beagle beagle,
TipStatesModel tipStatesModel,
int nodeIndex) {
double[] partials = new double[patternCount * stateCount * categoryCount];
tipStatesModel.getTipPartials(nodeIndex, partials);
// if there is more than one category then replicate the partials for each
int n = patternCount * stateCount;
int k = n;
for (int i = 1; i < categoryCount; i++) {
System.arraycopy(partials, 0, partials, k, n);
k += n;
}
beagle.setPartials(nodeIndex, partials);
}
/**
* Sets the partials from a sequence in an alignment.
*
* @param beagle beagle
* @param patternList patternList
* @param sequenceIndex sequenceIndex
* @param nodeIndex nodeIndex
*/
private final void setStates(Beagle beagle,
PatternList patternList,
int sequenceIndex,
int nodeIndex) {
int i;
int[] states = new int[patternCount];
for (i = 0; i < patternCount; i++) {
states[i] = patternList.getPatternState(sequenceIndex, i);
}
beagle.setTipStates(nodeIndex, states);
}
// public void setStates(int tipIndex, int[] states) {
// System.err.println("BTL:setStates");
// beagle.setTipStates(tipIndex, states);
// makeDirty();
// public void getStates(int tipIndex, int[] states) {
// System.err.println("BTL:getStates");
// beagle.getTipStates(tipIndex, states);
/**
* Calculate the log likelihood of the current state.
*
* @return the log likelihood.
*/
@Override
public double calculateLikelihood(List<BranchOperation> branchOperations, List<NodeOperation> nodeOperations, int rootNodeNumber) throws LikelihoodUnderflowException {
//recomputeScaleFactors = false;
if (!this.delayRescalingUntilUnderflow || everUnderflowed) {
if (this.rescalingScheme == PartialsRescalingScheme.ALWAYS || this.rescalingScheme == PartialsRescalingScheme.DELAYED) {
useScaleFactors = true;
recomputeScaleFactors = true;
} else if (this.rescalingScheme == PartialsRescalingScheme.DYNAMIC) {
useScaleFactors = true;
if (rescalingCount > rescalingFrequency) {
if (DEBUG) {
System.out.println("rescalingCount > rescalingFrequency");
}
rescalingCount = 0;
rescalingCountInner = 0;
}
if (rescalingCountInner < RESCALE_TIMES) {
recomputeScaleFactors = true;
rescalingCountInner++;
throw new LikelihoodUnderflowException();
}
rescalingCount++;
}
}
if (RESCALING_OFF) { // a debugging switch
useScaleFactors = false;
recomputeScaleFactors = false;
}
int branchUpdateCount = 0;
for (BranchOperation op : branchOperations) {
branchUpdateIndices[branchUpdateCount] = op.getBranchNumber();
branchLengths[branchUpdateCount] = op.getBranchLength();
branchUpdateCount ++;
}
if (updateSubstitutionModel) { // TODO More efficient to update only the substitution model that changed, instead of all
evolutionaryProcessDelegate.updateSubstitutionModels(beagle, flip);
// we are currently assuming a no-category model...
}
if (updateSiteModel) {
double[] categoryRates = this.siteRateModel.getCategoryRates();
beagle.setCategoryRates(categoryRates);
}
if (branchUpdateCount > 0) {
evolutionaryProcessDelegate.updateTransitionMatrices(
beagle,
branchUpdateIndices,
branchLengths,
branchUpdateCount,
flip);
}
if (flip) {
// Flip all the buffers to be written to first...
for (NodeOperation op : nodeOperations) {
partialBufferHelper.flipOffset(op.getNodeNumber());
}
}
int operationCount = nodeOperations.size();
int k = 0;
for (NodeOperation op : nodeOperations) {
int nodeNum = op.getNodeNumber();
operations[k] = partialBufferHelper.getOffsetIndex(nodeNum);
if (useScaleFactors) {
// get the index of this scaling buffer
int n = nodeNum - tipCount;
if (recomputeScaleFactors) {
// flip the indicator: can take either n or (internalNodeCount + 1) - n
scaleBufferHelper.flipOffset(n);
// store the index
scaleBufferIndices[n] = scaleBufferHelper.getOffsetIndex(n);
operations[k + 1] = scaleBufferIndices[n]; // Write new scaleFactor
operations[k + 2] = Beagle.NONE;
} else {
operations[k + 1] = Beagle.NONE;
operations[k + 2] = scaleBufferIndices[n]; // Read existing scaleFactor
}
} else {
if (useAutoScaling) {
scaleBufferIndices[nodeNum - tipCount] = partialBufferHelper.getOffsetIndex(nodeNum);
}
operations[k + 1] = Beagle.NONE; // Not using scaleFactors
operations[k + 2] = Beagle.NONE;
}
operations[k + 3] = partialBufferHelper.getOffsetIndex(op.getLeftChild()); // source node 1
operations[k + 4] = evolutionaryProcessDelegate.getMatrixIndex(op.getLeftChild()); // source matrix 1
operations[k + 5] = partialBufferHelper.getOffsetIndex(op.getRightChild()); // source node 2
operations[k + 6] = evolutionaryProcessDelegate.getMatrixIndex(op.getRightChild()); // source matrix 2
k += Beagle.OPERATION_TUPLE_SIZE;
}
beagle.updatePartials(operations, operationCount, Beagle.NONE);
int rootIndex = partialBufferHelper.getOffsetIndex(rootNodeNumber);
double[] categoryWeights = this.siteRateModel.getCategoryProportions();
// This should probably explicitly be the state frequencies for the root node...
double[] frequencies = evolutionaryProcessDelegate.getRootStateFrequencies();
int cumulateScaleBufferIndex = Beagle.NONE;
if (useScaleFactors) {
if (recomputeScaleFactors) {
scaleBufferHelper.flipOffset(internalNodeCount);
cumulateScaleBufferIndex = scaleBufferHelper.getOffsetIndex(internalNodeCount);
beagle.resetScaleFactors(cumulateScaleBufferIndex);
beagle.accumulateScaleFactors(scaleBufferIndices, internalNodeCount, cumulateScaleBufferIndex);
} else {
cumulateScaleBufferIndex = scaleBufferHelper.getOffsetIndex(internalNodeCount);
}
} else if (useAutoScaling) {
beagle.accumulateScaleFactors(scaleBufferIndices, internalNodeCount, Beagle.NONE);
}
// these could be set only when they change but store/restore would need to be considered
beagle.setCategoryWeights(0, categoryWeights);
beagle.setStateFrequencies(0, frequencies);
double[] sumLogLikelihoods = new double[1];
if (DEBUG) {
System.out.println("useScaleFactors=" + useScaleFactors + " recomputeScaleFactors=" + recomputeScaleFactors);
}
beagle.calculateRootLogLikelihoods(new int[]{rootIndex}, new int[]{0}, new int[]{0},
new int[]{cumulateScaleBufferIndex}, 1, sumLogLikelihoods);
double logL = sumLogLikelihoods[0];
/*if (DEBUG) {
System.out.println(logL);
if (logL > -90000) {
System.exit(0);
}
}*/
if (Double.isNaN(logL) || Double.isInfinite(logL)) {
if (DEBUG) {
System.out.println("Double.isNaN(logL) || Double.isInfinite(logL)");
}
everUnderflowed = true;
logL = Double.NEGATIVE_INFINITY;
if (firstRescaleAttempt && (delayRescalingUntilUnderflow || rescalingScheme == PartialsRescalingScheme.DELAYED)) {
if (rescalingScheme == PartialsRescalingScheme.DYNAMIC || (rescalingCount == 0)) {
// show a message but only every 1000 rescales
if (rescalingMessageCount % 1000 == 0) {
if (rescalingMessageCount > 0) {
Logger.getLogger("dr.evomodel").info("Underflow calculating likelihood (" + rescalingMessageCount + " messages not shown).");
} else {
Logger.getLogger("dr.evomodel").info("Underflow calculating likelihood. Attempting a rescaling...");
}
}
rescalingMessageCount += 1;
}
useScaleFactors = true;
recomputeScaleFactors = true;
firstRescaleAttempt = false; // Only try to rescale once
}
// turn off double buffer flipping so the next call overwrites the
// underflowed buffers. Flip will be turned on again in storeState for
// next step
flip = false;
throw new LikelihoodUnderflowException();
} else {
firstRescaleAttempt = true;
recomputeScaleFactors = false;
flip = true;
}
updateSubstitutionModel = false;
updateSiteModel = false;
/* No need to rescale partials */
/**
* Stores the additional state other than model components
*/
@Override
public void storeState() {
partialBufferHelper.storeState();
evolutionaryProcessDelegate.storeState();
if (useScaleFactors || useAutoScaling) { // Only store when actually used
scaleBufferHelper.storeState();
System.arraycopy(scaleBufferIndices, 0, storedScaleBufferIndices, 0, scaleBufferIndices.length);
// storedRescalingCount = rescalingCount;
}
// turn on double buffering flipping (may have been turned off to enable a rescale)
flip = true;
}
/**
* Restore the additional stored state
*/
@Override
public void restoreState() {
updateSiteModel = true; // this is required to upload the categoryRates to BEAGLE after the restore
partialBufferHelper.restoreState();
evolutionaryProcessDelegate.restoreState();
if (useScaleFactors || useAutoScaling) {
scaleBufferHelper.restoreState();
int[] tmp = storedScaleBufferIndices;
storedScaleBufferIndices = scaleBufferIndices;
scaleBufferIndices = tmp;
// rescalingCount = storedRescalingCount;
}
}
@Override
public void setCallback(TreeDataLikelihood treeDataLikelihood) {
// Callback not necessary
}
@Override
protected void acceptState() {
}
// INSTANCE CITABLE
@Override
public Citation.Category getCategory() {
return Citation.Category.FRAMEWORK;
}
@Override
public String getDescription() {
return "Using BEAGLE likelihood calculation library";
}
@Override
public List<Citation> getCitations() {
return Collections.singletonList(CommonCitations.AYRES_2012_BEAGLE);
}
// INSTANCE VARIABLES
private final int nodeCount;
private final int tipCount;
private final int internalNodeCount;
private final int[] branchUpdateIndices;
private final double[] branchLengths;
private int[] scaleBufferIndices;
private int[] storedScaleBufferIndices;
private final int[] operations;
private boolean flip = true;
private final BufferIndexHelper partialBufferHelper;
private final BufferIndexHelper scaleBufferHelper;
private PartialsRescalingScheme rescalingScheme;
private int rescalingFrequency = RESCALE_FREQUENCY;
private boolean delayRescalingUntilUnderflow = true;
private boolean useScaleFactors = false;
private boolean useAutoScaling = false;
private boolean recomputeScaleFactors = false;
private boolean everUnderflowed = false;
private int rescalingCount = 0;
private int rescalingCountInner = 0;
private boolean firstRescaleAttempt = false;
private int rescalingMessageCount = 0;
/**
* the patternList
*/
private final DataType dataType;
/**
* the pattern weights
*/
private final double[] patternWeights;
/**
* the number of patterns
*/
private final int patternCount;
/**
* the number of states in the data
*/
private final int stateCount;
/**
* the branch-site model for these sites
*/
private final BranchModel branchModel;
/**
* A delegate to handle substitution models on branches
*/
private final EvolutionaryProcessDelegate evolutionaryProcessDelegate;
/**
* the site model for these sites
*/
private final SiteRateModel siteRateModel;
/**
* the pattern likelihoods
*/
private double[] patternLogLikelihoods = null;
/**
* the number of rate categories
*/
private final int categoryCount;
/**
* an array used to transfer tip partials
*/
private double[] tipPartials;
/**
* an array used to transfer tip states
*/
private int[] tipStates;
/**
* the BEAGLE library instance
*/
private final Beagle beagle;
/**
* Flag to specify that the substitution model has changed
*/
private boolean updateSubstitutionModel;
/**
* Flag to specify that the site model has changed
*/
private boolean updateSiteModel;
}
|
package edu.washington.escience.myria.operator;
import java.io.Serializable;
import org.slf4j.LoggerFactory;
import com.google.common.base.Preconditions;
import com.gs.collections.api.iterator.IntIterator;
import com.gs.collections.impl.list.mutable.primitive.IntArrayList;
import com.gs.collections.impl.map.mutable.primitive.LongIntHashMap;
import com.gs.collections.impl.map.mutable.primitive.LongObjectHashMap;
import edu.washington.escience.myria.Schema;
import edu.washington.escience.myria.storage.MutableTupleBuffer;
import edu.washington.escience.myria.storage.ReadableTable;
import edu.washington.escience.myria.storage.TupleBatch;
import edu.washington.escience.myria.storage.TupleUtils;
import edu.washington.escience.myria.util.HashUtils;
/**
* An abstraction of a hash table of unique tuples.
*/
public final class UniqueTupleHashTable implements Serializable {
/** Required for Java serialization. */
private static final long serialVersionUID = 1L;
/**
* We store this value instead of a valid index to indicate
* that a given hash code is mapped to multiple indexes.
*/
private static final int COLLIDING_KEY = -1;
/**
* We return this value from getIfAbsent() to indicate absence,
* since 0 and -1 are already legitimate values.
*/
private static final int ABSENT_VALUE = -2;
/** Map from unique hash codes to indexes. */
private transient LongIntHashMap keyHashCodesToIndexes;
/** Map from colliding hash codes to indexes. */
private transient LongObjectHashMap<IntArrayList> collidingKeyHashCodesToIndexes;
/** The table containing keys and values. */
private transient MutableTupleBuffer data;
/** Key column indices. */
private final int[] keyColumns;
/** The logger for this class. */
protected static final org.slf4j.Logger LOGGER =
LoggerFactory.getLogger(UniqueTupleHashTable.class);
/**
* @param schema schema
* @param keyColumns key column indices
*/
public UniqueTupleHashTable(final Schema schema, final int[] keyColumns) {
this.keyColumns = keyColumns;
data = new MutableTupleBuffer(schema);
keyHashCodesToIndexes = new LongIntHashMap();
collidingKeyHashCodesToIndexes = new LongObjectHashMap<IntArrayList>();
}
/**
* @return the number of tuples this hash table has.
*/
public int numTuples() {
return data.numTuples();
}
/**
* Get the data table index given key columns from a tuple in a tuple batch.
*
* @param tb the input tuple batch
* @param key the key columns
* @param row the row index of the tuple
* @return the index of the matching tuple in the data table, or -1 if no match
*/
public int getIndex(final ReadableTable tb, final int[] key, final int row) {
final long hashcode = HashUtils.hashSubRowLong(tb, key, row);
int index = keyHashCodesToIndexes.getIfAbsent(hashcode, ABSENT_VALUE);
if (index == ABSENT_VALUE) {
return -1;
}
if (index == COLLIDING_KEY) {
IntArrayList collidingIndexes = collidingKeyHashCodesToIndexes.get(hashcode);
Preconditions.checkNotNull(collidingIndexes);
Preconditions.checkState(collidingIndexes.size() > 1);
IntIterator iter = collidingIndexes.intIterator();
while (iter.hasNext()) {
int idx = iter.next();
if (TupleUtils.tupleEquals(tb, key, row, data, keyColumns, idx)) {
return idx;
}
}
assert false; // we should never get here
}
return index;
}
/**
* Replace a matching tuple in the data table with the input tuple.
*
* @param tb the input tuple batch
* @param keyColumns the key columns
* @param row the row index of the input tuple
* @return if at least one tuple is replaced
*/
public boolean replace(final TupleBatch tb, final int[] keyColumns, final int row) {
int index = getIndex(tb, keyColumns, row);
if (index == -1) {
return false;
}
for (int j = 0; j < data.numColumns(); ++j) {
data.replace(j, index, tb.getDataColumns().get(j), row);
}
return true;
}
/**
* @param tb tuple batch of the input tuple
* @param keyColumns key column indices
* @param row row index of the input tuple
* @param keyOnly only add keyColumns
*/
public void addTuple(
final ReadableTable tb, final int[] keyColumns, final int row, final boolean keyOnly) {
final long hashcode = HashUtils.hashSubRowLong(tb, keyColumns, row);
int index = keyHashCodesToIndexes.getIfAbsent(hashcode, ABSENT_VALUE);
if (index == ABSENT_VALUE) {
keyHashCodesToIndexes.put(hashcode, numTuples());
} else if (index == COLLIDING_KEY) {
IntArrayList collidingIndexes = collidingKeyHashCodesToIndexes.get(hashcode);
Preconditions.checkNotNull(collidingIndexes);
Preconditions.checkState(collidingIndexes.size() > 1);
collidingIndexes.add(numTuples());
} else {
LOGGER.warn("Collision detected with {} elements in table!", numTuples());
IntArrayList collidingIndexes = IntArrayList.newListWith(index, numTuples());
Preconditions.checkState(!collidingKeyHashCodesToIndexes.containsKey(hashcode));
collidingKeyHashCodesToIndexes.put(hashcode, collidingIndexes);
keyHashCodesToIndexes.put(hashcode, COLLIDING_KEY);
}
if (keyOnly) {
for (int i = 0; i < keyColumns.length; ++i) {
data.put(i, tb.asColumn(keyColumns[i]), row);
}
} else {
for (int i = 0; i < data.numColumns(); ++i) {
data.put(i, tb.asColumn(i), row);
}
}
}
/**
* @return the data
*/
public MutableTupleBuffer getData() {
return data;
}
/**
* Clean up the hash table.
*/
public void cleanup() {
keyHashCodesToIndexes = new LongIntHashMap();
collidingKeyHashCodesToIndexes = new LongObjectHashMap<IntArrayList>();
data = new MutableTupleBuffer(data.getSchema());
}
}
|
/*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package Emanuelle.aula7.banco;
import br.edu.ifto.aula15.trabalhofinal.batepapo.telas.*;
import java.sql.SQLException;
import java.util.Iterator;
import java.util.List;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.swing.JFrame;
import javax.swing.JOptionPane;
/**
*
* @author Giovane
*/
public class Batepapo extends javax.swing.JFrame {
/**
* Creates new form Batepapo
*/
public Batepapo() {
initComponents();
Thread th = new Mensagens(jTextArea1);
th.start();
}
/**
* This method is called from within the constructor to initialize the form.
* WARNING: Do NOT modify this code. The content of this method is always
* regenerated by the Form Editor.
*/
@SuppressWarnings("unchecked")
// <editor-fold defaultstate="collapsed" desc="Generated Code">//GEN-BEGIN:initComponents
private void initComponents() {
jScrollPane1 = new javax.swing.JScrollPane();
jTextArea1 = new javax.swing.JTextArea();
jScrollPane2 = new javax.swing.JScrollPane();
jList1 = new javax.swing.JList();
jPanel1 = new javax.swing.JPanel();
jTextField1 = new javax.swing.JTextField();
jButton1 = new javax.swing.JButton();
jPanel2 = new javax.swing.JPanel();
jMenuBar1 = new javax.swing.JMenuBar();
jMenu1 = new javax.swing.JMenu();
jMenuItem1 = new javax.swing.JMenuItem();
jMenu2 = new javax.swing.JMenu();
setDefaultCloseOperation(javax.swing.WindowConstants.EXIT_ON_CLOSE);
jTextArea1.setColumns(20);
jTextArea1.setRows(5);
jScrollPane1.setViewportView(jTextArea1);
jList1.setBackground(new java.awt.Color(0, 0, 0));
jList1.setForeground(new java.awt.Color(255, 255, 255));
jList1.setModel(new javax.swing.AbstractListModel() {
String[] strings = { "Item 1", "Item 2", "Item 3", "Item 4", "Item 5" };
public int getSize() { return strings.length; }
public Object getElementAt(int i) { return strings[i]; }
});
jScrollPane2.setViewportView(jList1);
jPanel1.setBackground(new java.awt.Color(0, 102, 204));
javax.swing.GroupLayout jPanel1Layout = new javax.swing.GroupLayout(jPanel1);
jPanel1.setLayout(jPanel1Layout);
jPanel1Layout.setHorizontalGroup(
jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGap(0, 0, Short.MAX_VALUE)
);
jPanel1Layout.setVerticalGroup(
jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGap(0, 63, Short.MAX_VALUE)
);
jTextField1.setText("jTextField1");
jTextField1.addKeyListener(new java.awt.event.KeyAdapter() {
public void keyPressed(java.awt.event.KeyEvent evt) {
jTextField1KeyPressed(evt);
}
});
jButton1.setText("enviar");
jButton1.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
jButton1ActionPerformed(evt);
}
});
jPanel2.setBackground(new java.awt.Color(51, 204, 255));
javax.swing.GroupLayout jPanel2Layout = new javax.swing.GroupLayout(jPanel2);
jPanel2.setLayout(jPanel2Layout);
jPanel2Layout.setHorizontalGroup(
jPanel2Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGap(0, 0, Short.MAX_VALUE)
);
jPanel2Layout.setVerticalGroup(
jPanel2Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGap(0, 27, Short.MAX_VALUE)
);
jMenu1.setText("Principal");
jMenuItem1.setAccelerator(javax.swing.KeyStroke.getKeyStroke(java.awt.event.KeyEvent.VK_S, java.awt.event.InputEvent.CTRL_MASK));
jMenuItem1.setText("Sair");
jMenu1.add(jMenuItem1);
jMenuBar1.add(jMenu1);
jMenu2.setText("Créditos");
jMenu2.addMouseListener(new java.awt.event.MouseAdapter() {
public void mouseClicked(java.awt.event.MouseEvent evt) {
jMenu2MouseClicked(evt);
}
});
jMenuBar1.add(jMenu2);
setJMenuBar(jMenuBar1);
javax.swing.GroupLayout layout = new javax.swing.GroupLayout(getContentPane());
getContentPane().setLayout(layout);
layout.setHorizontalGroup(
layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(layout.createSequentialGroup()
.addContainerGap()
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(jPanel2, javax.swing.GroupLayout.Alignment.TRAILING, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)
.addComponent(jPanel1, javax.swing.GroupLayout.Alignment.TRAILING, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)
.addGroup(javax.swing.GroupLayout.Alignment.TRAILING, layout.createSequentialGroup()
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(jScrollPane1, javax.swing.GroupLayout.DEFAULT_SIZE, 238, Short.MAX_VALUE)
.addComponent(jTextField1))
.addGap(18, 18, 18)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING, false)
.addComponent(jScrollPane2)
.addComponent(jButton1, javax.swing.GroupLayout.DEFAULT_SIZE, 92, Short.MAX_VALUE))))
.addContainerGap())
);
layout.setVerticalGroup(
layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(layout.createSequentialGroup()
.addComponent(jPanel1, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING, false)
.addComponent(jScrollPane1)
.addComponent(jScrollPane2, javax.swing.GroupLayout.DEFAULT_SIZE, 294, Short.MAX_VALUE))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(jTextField1, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(jButton1))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(jPanel2, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addContainerGap(20, Short.MAX_VALUE))
);
pack();
}// </editor-fold>//GEN-END:initComponents
private void jMenu2MouseClicked(java.awt.event.MouseEvent evt) {//GEN-FIRST:event_jMenu2MouseClicked
new Creditos().setVisible(rootPaneCheckingEnabled);
}//GEN-LAST:event_jMenu2MouseClicked
private void jButton1ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_jButton1ActionPerformed
try {
JogadorDao dao = new JogadorDao();
Mensagem m = new Mensagem();
m.setApelido("Manu");
m.setMensagem(jTextField1.getText());
dao.addMensagens(m);
List<Mensagem> mensagens = dao.listaMensagens();
this.jTextArea1.setText("");
for (Iterator<Mensagem> it = mensagens.iterator(); it.hasNext (); ){
Mensagem mensagem1 = it.next();
this.jTextArea1.setText (jTextArea1.getText() + "\n" + mensagem1.getHorarioMensagem() + "-" + mensagem1.getApelido()+ "fala: " + mensagem1.getMensagem());
}
} catch (ClassNotFoundException ex) {
Logger.getLogger(Batepapo.class.getName()).log(Level.SEVERE, null, ex);
} catch (SQLException ex) {
Logger.getLogger(Batepapo.class.getName()).log(Level.SEVERE, null, ex);
}
}//GEN-LAST:event_jButton1ActionPerformed
private void jTextField1KeyPressed(java.awt.event.KeyEvent evt) {//GEN-FIRST:event_jTextField1KeyPressed
if(evt.getKeyCode() == 10){
try {
JogadorDao dao = new JogadorDao();
Mensagem m = new Mensagem();
m.setApelido("Manu");
m.setMensagem(jTextField1.getText());
dao.addMensagens(m);
List<Mensagem> mensagens = dao.listaMensagens();
this.jTextArea1.setText("");
for (Iterator<Mensagem> it = mensagens.iterator(); it.hasNext (); ){
Mensagem mensagem1 = it.next();
this.jTextArea1.setText (jTextArea1.getText() + "\n" + mensagem1.getHorarioMensagem() + "-" + mensagem1.getApelido()+ "fala: " + mensagem1.getMensagem());
}
} catch ( ClassNotFoundException | SQLException ex) {
Logger.getLogger(Batepapo.class.getName()).log(Level.SEVERE, null, ex);
}
}
}//GEN-LAST:event_jTextField1KeyPressed
/**
* @param args the command line arguments
*/
public static void main(String args[]) {
/* Set the Nimbus look and feel */
//<editor-fold defaultstate="collapsed" desc=" Look and feel setting code (optional) ">
try {
for (javax.swing.UIManager.LookAndFeelInfo info : javax.swing.UIManager.getInstalledLookAndFeels()) {
if ("Nimbus".equals(info.getName())) {
javax.swing.UIManager.setLookAndFeel(info.getClassName());
break;
}
}
} catch (ClassNotFoundException ex) {
java.util.logging.Logger.getLogger(Batepapo.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);
} catch (InstantiationException ex) {
java.util.logging.Logger.getLogger(Batepapo.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);
} catch (IllegalAccessException ex) {
java.util.logging.Logger.getLogger(Batepapo.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);
} catch (javax.swing.UnsupportedLookAndFeelException ex) {
java.util.logging.Logger.getLogger(Batepapo.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);
}
//</editor-fold>
/* Create and display the form */
java.awt.EventQueue.invokeLater(new Runnable() {
public void run() {
new Batepapo().setVisible(true);
}
});
}
// Variables declaration - do not modify//GEN-BEGIN:variables
private javax.swing.JButton jButton1;
private javax.swing.JList jList1;
private javax.swing.JMenu jMenu1;
private javax.swing.JMenu jMenu2;
private javax.swing.JMenuBar jMenuBar1;
private javax.swing.JMenuItem jMenuItem1;
private javax.swing.JPanel jPanel1;
private javax.swing.JPanel jPanel2;
private javax.swing.JScrollPane jScrollPane1;
private javax.swing.JScrollPane jScrollPane2;
private javax.swing.JTextArea jTextArea1;
private javax.swing.JTextField jTextField1;
// End of variables declaration//GEN-END:variables
}
|
package com.anpmech.mpd;
import com.anpmech.mpd.exception.InvalidResponseException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.ListIterator;
import java.util.concurrent.TimeUnit;
public final class Tools {
/**
* This is the value used extract the key from the {@link #splitResponse(String)} return array.
*/
public static final int KEY = 0;
/**
* This is the value used to extract the value from the {@link #splitResponse(String)} return
* array.
*/
public static final int VALUE = 1;
/**
* The class log identifier.
*/
private static final String TAG = "Tools";
private Tools() {
super();
}
/**
* Simple integer comparison. The result should be equivalent to Object.compare().
*
* @param lhs First value to compare to the second.
* @param rhs Second value to compare to the first.
* @return 0 if lhs = rhs, less than 0 if lhs < rhs, and greater than 0 if lhs > rhs.
*/
public static int compare(final int lhs, final int rhs) {
final int result;
if (lhs == rhs) {
result = 0;
} else {
if (lhs < rhs) {
result = -1;
} else {
result = 1;
}
}
return result;
}
/**
* Null-safe equivalent of {@code a.equals(b)}. The result should be equivalent to
* Object.equals().
*
* @param a An object.
* @param b An object to be compared with a for equality.
* @return True if the arguments are equal to each other, false otherwise
*/
public static boolean equals(final Object a, final Object b) {
if (a == null) {
return b == null;
} else {
return a.equals(b);
}
}
/**
* A simple filename extension extractor.
*
* @param filename The filename to extract the extension from.
* @return The extension extracted from the filename parameter.
*/
public static String getExtension(final String filename) {
final int index = filename.lastIndexOf('.');
final int extLength = filename.length() - index - 1;
final int extensionShort = 2;
final int extensionLong = 4;
String result = null;
if (extLength >= extensionShort && extLength <= extensionLong) {
result = filename.substring(index + 1);
}
return result;
}
/**
* Gets a beginning and an end range of sub-server responses, based on a parameter key.
* <p/>
* While this method functions very similarly, this list will <B>end</B> with the key, rather
* than begin with the key like {@link #getRanges(java.util.Collection)}.
*
* @param response The server response to parse.
* @param key The key to the beginning/end of a sub-list.
* @return A two int array. The first int is either the beginning of the list, or the one
* position beyond the found key. The second int is one position before the next key or the end
* of the list (for {@link List#subList(int, int)} compatibility).
* @see #getRanges(java.util.Collection)
*/
public static Collection<int[]> getRanges(final Collection<String> response, final String key) {
final int responseSize = response.size();
/** Initialize the range after the capacity is known. */
Collection<int[]> ranges = null;
int iterator = 0;
int beginIndex = 0;
for (final String line : response) {
final int index = line.indexOf(':');
final CharSequence formatted;
if (index == -1) {
formatted = line;
} else {
formatted = line.subSequence(0, index);
}
if (key.contentEquals(formatted) && iterator != beginIndex) {
if (ranges == null) {
final int capacity = responseSize / (iterator + 1);
ranges = new ArrayList<>(capacity);
}
if (beginIndex == 0) {
/** The beginning range. */
ranges.add(new int[]{beginIndex, iterator + 1});
} else {
ranges.add(new int[]{beginIndex + 1, iterator + 1});
if (iterator == responseSize) {
break;
}
}
beginIndex = iterator;
}
iterator++;
}
if (responseSize == 0) {
ranges = Collections.emptyList();
} else if (ranges == null) {
ranges = Collections.singletonList(new int[]{beginIndex, responseSize});
}
return ranges;
}
/**
* Gets a beginning and an end range of sub-server responses.
* <p/>
* This method, unlike {@link #getRanges(java.util.Collection, String)}, parses the response
* for the first key being repeated and will split end the range with the prior position.
*
* @param response The server response to parse.
* @return A two int array. The first int is the beginning range which matched the key
* parameter. The second number is one int beyond the end of the range
* (for {@link List#subList(int, int)} compatibility). If no range is found, an empty list will
* be returned.
* @see #getRanges(java.util.Collection, String)
*/
public static Collection<int[]> getRanges(final Collection<String> response) {
final int responseSize = response.size();
/** Initialize the range after the capacity is known. */
Collection<int[]> ranges = null;
CharSequence key = null;
int beginIndex = 0;
int iterator = 0;
for (final String line : response) {
final int index = line.indexOf(':');
final CharSequence formatted;
if (index == -1) {
formatted = line;
} else {
formatted = line.subSequence(0, index);
}
if (iterator == 0) {
key = formatted;
}
if (key.equals(formatted) && iterator != beginIndex) {
if (ranges == null) {
final int capacity = responseSize / (iterator - beginIndex);
ranges = new ArrayList<>(capacity);
}
ranges.add(new int[]{beginIndex, iterator});
beginIndex = iterator;
}
iterator++;
}
if (responseSize == 0) {
ranges = Collections.emptyList();
} else if (ranges == null) {
ranges = Collections.singletonList(new int[]{beginIndex, responseSize});
} else {
ranges.add(new int[]{beginIndex, responseSize});
}
return ranges;
}
/**
* This method iterates through a 3 dimensional array to check each two element inner array for
* equality of it's inner objects with the isNotEqual(object, object) method.
*
* @param arrays The 3 dimensional array with objects to check for equality.
* @return Returns true if an inner array was not equal.
*/
public static boolean isNotEqual(final Object[][] arrays) {
boolean result = false;
for (final Object[] array : arrays) {
if (isNotEqual(array[0], array[1])) {
result = true;
break;
}
}
return result;
}
/**
* Compares inside objects for an Object.equals(object) implementation.
*
* @param objectA An object to be compared.
* @param objectB An object to be compared.
* @return False if objects are both null or are equal, true otherwise.
*/
public static boolean isNotEqual(final Object objectA, final Object objectB) {
final boolean isEqual;
if (objectA == null) {
if (objectB == null) {
isEqual = true;
} else {
isEqual = false;
}
} else {
if (objectA.equals(objectB)) {
isEqual = true;
} else {
isEqual = false;
}
}
return !isEqual;
}
/**
* Compares inside int values for an Object.equals(object) implementation.
*
* @param arrays A an array of two element arrays to be checked for equality.
* @return True if all two element arrays are equal, false otherwise.
*/
public static boolean isNotEqual(final int[][] arrays) {
boolean result = false;
for (final int[] array : arrays) {
if (array[0] != array[1]) {
result = true;
break;
}
}
return result;
}
/**
* Compares inside long values for an Object.equals(object) implementation.
*
* @param arrays A an array of two element arrays to be checked for equality.
* @return True if all two element arrays are equal, false otherwise.
*/
public static boolean isNotEqual(final long[][] arrays) {
boolean result = false;
for (final long[] array : arrays) {
if (array[0] != array[1]) {
result = true;
break;
}
}
return result;
}
/**
* Parse a media server response for specific key values and discard the key.
*
* @param response The media server response.
* @param keys The entry type in the response to add to the list.
*/
public static void parseResponse(final List<String> response, final String... keys) {
String[] lines;
if (keys.length > 1) {
Arrays.sort(keys);
}
for (final ListIterator<String> iterator = response.listIterator(); iterator.hasNext(); ) {
lines = splitResponse(iterator.next());
if (keys.length == 1 && keys[0].equals(lines[KEY]) ||
Arrays.binarySearch(keys, lines[KEY]) >= 0) {
iterator.set(lines[VALUE]);
} else {
iterator.remove();
}
}
if (response instanceof ArrayList) {
((ArrayList<String>) response).trimToSize();
}
}
/**
* This method converts a list of integers into a list of MPD protocol command argument ranges.
* This can be a win when there are numbers in sequence which can be converted into numbered
* ranges and sent in fewer commands. The disadvantage to this is that order has to be
* sacrificed.
*
* @param integers A list of integers to convert to numbered range strings..
* @return A collection of numbered range strings.
* @see #sequentialToRange(int...)
*/
public static List<String> sequentialToRange(final List<Integer> integers) {
final ListIterator<Integer> iterator = integers.listIterator(integers.size());
final List<String> ranges = new ArrayList<>();
final StringBuilder stringBuilder = new StringBuilder(10);
boolean inSequenceRange = false;
int startRange = -1;
Collections.sort(integers);
while (iterator.hasPrevious()) {
final Integer integer = iterator.previous();
Integer nextInteger = null;
/** Avoid out of bounds. */
if (iterator.hasPrevious()) {
/** Store the next integer in the iteration. */
nextInteger = integers.get(iterator.previousIndex());
}
/** Specifies whether the next integer can be added to a range. */
if (nextInteger != null && integer.equals(nextInteger + 1)) {
if (!inSequenceRange) {
startRange = integer;
}
inSequenceRange = true;
} else {
if (inSequenceRange) {
/** Range complete, add it to the store. */
stringBuilder.append(integer);
stringBuilder.append(':');
/**
* The start range (the end number) is +1 on the
* MPD playlist range per the protocol.
*/
stringBuilder.append(startRange + 1);
ranges.add(stringBuilder.toString());
stringBuilder.setLength(0);
} else {
/** No range, add it to the store. */
ranges.add(integer.toString());
}
inSequenceRange = false;
}
}
return ranges;
}
/**
* This method converts a list of integers into a list of MPD protocol command argument ranges.
* This can be a win when there are numbers in sequence which can be converted into numbered
* ranges and sent in fewer commands. The disadvantage to this is that order has to be
* sacrificed.
*
* @param integers A list of integers to convert to numbered range strings..
* @return A collection of numbered range strings.
* @see #sequentialToRange(java.util.List)
*/
public static List<String> sequentialToRange(final int... integers) {
final List<String> ranges = new ArrayList<>();
final StringBuilder stringBuilder = new StringBuilder(10);
boolean inSequenceRange = false;
int startRange = -1;
Arrays.sort(integers);
for (int i = integers.length - 1; i >= 0; i
final int integer = integers[i];
final int nextInteger;
if (i == 0) {
/** Avoid out of bounds. */
nextInteger = -1;
} else {
/** Store the next integer in the iteration. */
nextInteger = integers[i - 1];
}
/** Specifies whether the next integer can be added to a range. */
if (nextInteger != -1 && integer == nextInteger + 1) {
if (!inSequenceRange) {
startRange = integer;
}
inSequenceRange = true;
} else {
if (inSequenceRange) {
/** Range complete, add it to the store. */
stringBuilder.append(integer);
stringBuilder.append(':');
/**
* The start range (the end number) is +1 on the
* MPD playlist range per the protocol.
*/
stringBuilder.append(startRange + 1);
ranges.add(stringBuilder.toString());
stringBuilder.setLength(0);
} else {
/** No range, add it to the store. */
ranges.add(Integer.toString(integer));
}
inSequenceRange = false;
}
}
return ranges;
}
/**
* Split the standard MPD protocol response into a three dimensional array consisting of a two
* element String array key / value pairs.
*
* @param list The incoming server response.
* @return A three dimensional {@code String} array of two element {@code String arrays}.
*/
public static String[][] splitResponse(final Collection<String> list) {
final String[][] results = new String[list.size()][];
int iterator = 0;
for (final String line : list) {
results[iterator] = splitResponse(line);
iterator++;
}
return results;
}
/**
* Split the standard MPD protocol response.
*
* @param line The MPD response string.
* @return A string array with two elements, one the key, the second the value.
*/
public static String[] splitResponse(final String line) {
final int delimiterIndex = line.indexOf(':');
final String[] result = new String[2];
if (delimiterIndex == -1) {
throw new InvalidResponseException("Failed to parse server response key for line: " +
line);
}
result[0] = line.substring(0, delimiterIndex);
/** Skip ': ' */
result[1] = line.substring(delimiterIndex + 2);
return result;
}
/**
* This method takes seconds and converts it into HH:MM:SS
*
* @param totalSeconds Seconds to convert to a string.
* @return Returns time formatted from the {@code totalSeconds} in format HH:MM:SS.
*/
public static String timeToString(final long totalSeconds) {
final long hours = TimeUnit.SECONDS.toHours(totalSeconds);
final long minutes = TimeUnit.SECONDS.toMinutes(totalSeconds) -
TimeUnit.SECONDS.toHours(totalSeconds) * 60L;
final long seconds = TimeUnit.SECONDS.toSeconds(totalSeconds) -
TimeUnit.SECONDS.toMinutes(totalSeconds) * 60L;
final String result;
if (hours == 0) {
result = String.format("%02d:%02d", minutes, seconds);
} else {
result = String.format("%02d:%02d:%02d", hours, minutes, seconds);
}
return result;
}
}
|
package edu.wheaton.simulator.test.statistics;
/**
* A JUnit test case for testing StatisticsManager.java.
*
* @author Grant Hensel
* Wheaton College, CSCI 335
* Spring 2013
* 25 Mar 2013
*/
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Random;
import java.util.Set;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import com.google.common.collect.ImmutableSet;
import edu.wheaton.simulator.datastructure.ElementAlreadyContainedException;
import edu.wheaton.simulator.datastructure.Grid;
import edu.wheaton.simulator.entity.Agent;
import edu.wheaton.simulator.entity.AgentID;
import edu.wheaton.simulator.entity.Prototype;
import edu.wheaton.simulator.entity.Trigger;
import edu.wheaton.simulator.entity.Trigger.Builder;
import edu.wheaton.simulator.statistics.AgentSnapshot;
import edu.wheaton.simulator.statistics.PrototypeSnapshot;
import edu.wheaton.simulator.statistics.Recorder;
import edu.wheaton.simulator.statistics.SnapshotFactory;
import edu.wheaton.simulator.statistics.StatisticsManager;
import edu.wheaton.simulator.statistics.TriggerSnapshot;
public class StatisticsManagerTest {
StatisticsManager sm;
Grid g;
String categoryName;
Prototype prototype;
HashMap<String, String> fields;
int population;
ImmutableSet<AgentID> children;
Integer step;
PrototypeSnapshot protoSnap;
PrototypeSnapshot protoSnap2;
Recorder recorder;
/*
* Helper method used in tests
*/
public Set<TriggerSnapshot> makeTriggerSnapshots() {
Builder builder = new Trigger.Builder(prototype);
builder.addBehavioral("behavior");
builder.addConditional("conditional");
builder.addName("trigger");
builder.addPriority(1);
Trigger trigger = builder.build();
Set<TriggerSnapshot> tSnaps = new HashSet<TriggerSnapshot>();
prototype.addTrigger(trigger);
TriggerSnapshot tSnap = SnapshotFactory.makeTriggerSnapshot(trigger.getName(), trigger.getPriority(), trigger
.getConditions().toString(), trigger.getBehavior().toString());
tSnaps.add(tSnap);
return tSnaps;
}
@Before
public void setUp() {
sm = StatisticsManager.getInstance();
g = new Grid(10, 10);
// Add a test PrototypeSnapshot
categoryName = "testing";
prototype = new Prototype("tester");
fields = new HashMap<String, String>();
population = 50;
children = prototype.childIDs();
step = new Integer(1);
protoSnap = new PrototypeSnapshot(categoryName,
SnapshotFactory.makeFieldSnapshots(fields), population,
children, makeTriggerSnapshots(), null, null);
categoryName = "testing2";
prototype = new Prototype("tester2");
population = 40;
step = new Integer(2);
// Add another test PrototypeSnapshot
protoSnap2 = new PrototypeSnapshot(categoryName,
SnapshotFactory.makeFieldSnapshots(fields), population,
children, makeTriggerSnapshots(), null, null);
}
@Test
public void testStatisticsManager() {
Assert.assertNotNull("Constructor failed", sm);
}
@Test
public void testAddPrototypeSnapshot() {
Prototype p = new Prototype("TestPrototype");
Assert.assertNotNull(p);
PrototypeSnapshot protoSnap = new PrototypeSnapshot("categoryname",
SnapshotFactory.makeFieldSnapshots(new HashMap<String, String>()),
100, p.childIDs(), makeTriggerSnapshots(), null, null);
StatisticsManager.addPrototypeSnapshot(protoSnap);
}
@Test
public void testGetPopVsTime() {
StatisticsManager.addPrototypeSnapshot(protoSnap);
// Create data for a test simulation with a random number of steps
// and random population in each step
Random R = new Random();
int numSteps = R.nextInt(10) + 2;
int[] expected = new int[numSteps];
// Populate the test "expected" array and add the data to the
// StatsManager
for (int i = 0; i < numSteps; i++) {
expected[i] = R.nextInt(100);
// Add the appropriate AgentSnapshots to the StatsManager
for (int pop = 0; pop < expected[i]; pop++) {
Agent agent = prototype.createAgent(g);
sm.addGridEntity(new AgentSnapshot(agent.getID(),
SnapshotFactory.makeFieldSnapshots(agent
.getCustomFieldMap()), i,
protoSnap.categoryName, null, null, null, 0, 0));
}
}
int[] result = sm.getPopVsTime(protoSnap.categoryName);
System.out.println("\ngetPopVsTime()");
System.out.println("Expected: " + display(expected));
System.out.println("Result: " + display(result));
Assert.assertArrayEquals(expected, result);
}
/**
* Display the contents of an int array
*
* @param array
* The array to display
* @return The display string
*/
private static String display(int[] array) {
String ret = "[";
for (int x : array) {
ret += x + ", ";
}
if (array.length > 0)
ret = ret.substring(0, ret.length() - 2);
return ret + "]";
}
@Test
public void testGetAvgFieldValue() {
ArrayList<AgentSnapshot> snaps = new ArrayList<AgentSnapshot>();
HashSet<AgentID> ids = new HashSet<AgentID>();
String[] names = new String[] { "bear", "tom", "john", "piglet",
"reese" };
/* create snapshots */
for (int i = 0; i < 5; i++) {
Agent agent = prototype.createAgent(g);
try {
agent.addField("name", names[i]);
agent.addField("weight", "10");
} catch (ElementAlreadyContainedException e) {
e.printStackTrace();
}
ids.add(agent.getID());
for (int s = 1; s < 3; s++) {
snaps.add(new AgentSnapshot(agent.getID(), SnapshotFactory
.makeFieldSnapshots(agent.getCustomFieldMap()), s,
protoSnap.categoryName, null, null, null, 0, 0));
}
}
/* fill table w/ snapshots */
for (AgentSnapshot snap : snaps) {
sm.addGridEntity(snap);
}
/* test method */
double[] avg = sm.getAvgFieldValue(protoSnap.categoryName, "weight");
for (double i : avg)
System.out.print((int) i + " ");
for (double i : avg) {
int a = (int) i;
org.junit.Assert.assertEquals(10, a);
}
}
@Test
public void testGetTriggerExecutionsFor() {
Grid grid = new Grid(10, 10);
ArrayList<AgentSnapshot> snaps = new ArrayList<AgentSnapshot>();
HashSet<AgentID> ids = new HashSet<AgentID>();
HashSet<Agent> agents = new HashSet<Agent>();
ArrayList<TriggerSnapshot> triggers = new ArrayList<TriggerSnapshot>();
String[] names = new String[] { "bear", "tom", "john", "piglet",
"reese" };
Builder builder = new Trigger.Builder(prototype);
builder.addBehavioral("true");
builder.addConditional("true");
builder.addName("trigger");
builder.addPriority(1);
Trigger trigger = builder.build();
triggers.add(SnapshotFactory.makeTriggerSnapshot(trigger.getName(), 1, "true", "true"));
prototype.addTrigger(trigger);
/* create snapshots */
for (int i = 0; i < 5; i++) {
// try {
// prototype.addField("name", names[i]);
// prototype.addField("weight", "10");
// } catch (ElementAlreadyContainedException e) {
// e.printStackTrace();
Agent agent = prototype.createAgent(g);
grid.addAgent(agent);
agents.add(agent);
ids.add(agent.getID());
snaps.add(new AgentSnapshot(agent.getID(), SnapshotFactory
.makeFieldSnapshots(agent.getCustomFieldMap()), 0,
protoSnap.categoryName, null, null,triggers, 0, 0));
}
// try {
// grid.updateEntities();
// } catch (SimulationPauseException e) {
// e.printStackTrace();
/* create snapshots */
for (Agent agent : agents) {
snaps.add(new AgentSnapshot(agent.getID(), SnapshotFactory
.makeFieldSnapshots(agent.getCustomFieldMap()), 1,
protoSnap.categoryName, null, null, triggers, 0, 0));
}
/* fill table w/ snapshots */
for (AgentSnapshot snap : snaps) {
sm.addGridEntity(snap);
}
/* test method */
double[] answer = sm.getTriggerExecutionsFor(protoSnap.categoryName, "trigger");
System.out.println("\nExecutions");
for (double i : answer)
System.out.print((int) i + " ");
System.out.println("");
for (double i : answer) {
int a = (int) i;
org.junit.Assert.assertEquals(5, a);
}
}
@Test
public void testGetAvgLifespan() {
ArrayList<AgentSnapshot> snaps = new ArrayList<AgentSnapshot>();
// Generate lifespans for each agent that will be inserted.
Random R = new Random();
ArrayList<Integer> lifespans = new ArrayList<Integer>();
int numAgents = R.nextInt(41);
for (int i = 0; i < numAgents; i++) {
lifespans.add(R.nextInt(100));
}
/* create snapshots */
for (int i = 0; i < lifespans.size(); i++) {
Agent agent = prototype.createAgent(g);
AgentID ID = agent.getID();
// For each agent, add a snapshot of it at each Step it was alive
for (int step = 0; step <= lifespans.get(i); step++) {
snaps.add(new AgentSnapshot(ID, SnapshotFactory
.makeFieldSnapshots(agent.getCustomFieldMap()), step,
protoSnap.categoryName, null, null, null, 0, 0));
}
}
/* fill table w/ snapshots */
for (AgentSnapshot snap : snaps) {
sm.addGridEntity(snap);
}
double actual = sm.getAvgLifespan(protoSnap.categoryName);
double expected = average(lifespans);
System.out.println("\nGetAvgLifesppan()");
System.out.println("Expected: " + expected);
System.out.println("Actual: " + actual);
Assert.assertEquals((int) expected, (int) actual);
}
@Test
public void testRecorder(){
recorder = new Recorder(sm);
recorder.update(g);
}
/**
* Calculate the average of the values in an int array
*
* @param array
* The array of integer values
* @return The average of the values in the given array
*/
private static double average(ArrayList<Integer> list) {
double avg = 0.0;
for (int i : list)
avg += i;
return avg / list.size();
}
}
|
package exception;
public class EmptyResultDataException extends Exception{
public EmptyResultDataException(String msg) {
// TODO Auto-generated constructor stub
super(msg);
}
}
|
/* Open Source Software - may be modified and shared by FRC teams. The code */
/* the project. */
package edu.wpi.first.wpilibj.templates.commands;
import edu.wpi.first.wpilibj.templates.ScraperBike;
import edu.wpi.first.wpilibj.templates.subsystems.Pusher;
/**
*
* @author Team 2035 Programmers
*/
public class FrontPusherRetract extends CommandBase {
private Pusher g;
public FrontPusherRetract() {
g = ScraperBike.getPusher();
requires(g);
}
protected void initialize() {
}
protected void execute() {
g.moveFrontPusher(0);
}
protected boolean isFinished() {
return !g.isFrontContacting();
}
protected void end() {
}
protected void interrupted() {
}
}
|
package gov.nih.nci.calab.ui.submit;
/**
* This class associates a assay result file with a characterization.
*
* @author pansu
*/
/* CVS $Id: LoadCharacterizationTableAction.java,v 1.8 2006-10-23 16:57:27 chand Exp $ */
import java.io.File;
import gov.nih.nci.calab.dto.characterization.CharacterizationFileBean;
import gov.nih.nci.calab.dto.characterization.SizeBean;
import gov.nih.nci.calab.service.submit.SubmitNanoparticleService;
import gov.nih.nci.calab.service.util.CalabConstants;
import gov.nih.nci.calab.service.util.PropertyReader;
import gov.nih.nci.calab.ui.core.AbstractDispatchAction;
import gov.nih.nci.calab.ui.core.InitSessionSetup;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.servlet.http.HttpSession;
import org.apache.log4j.Logger;
import org.apache.struts.action.ActionForm;
import org.apache.struts.action.ActionForward;
import org.apache.struts.action.ActionMapping;
import org.apache.struts.upload.FormFile;
import org.apache.struts.validator.DynaValidatorForm;
public class LoadCharacterizationTableAction extends AbstractDispatchAction {
private static Logger logger = Logger.getLogger(LoadCharacterizationTableAction.class);
public ActionForward submit(ActionMapping mapping, ActionForm form,
HttpServletRequest request, HttpServletResponse response)
throws Exception {
ActionForward forward = null;
DynaValidatorForm theForm = (DynaValidatorForm) form;
String particleName = (String) theForm.get("particleName");
String title = (String) theForm.get("title");
String description = (String) theForm.get("description");
String comments = (String) theForm.get("comments");
String keywords = (String) theForm.get("keywords");
String[] visibilities = (String[]) theForm.get("visibilities");
String[] keywordList = keywords.split("\r\n");
String fileSource = (String) theForm.get("fileSource");
CharacterizationFileBean fileBean = null;
SubmitNanoparticleService service = new SubmitNanoparticleService();
String fileNumber = (String) theForm.get("fileNumber");
if (fileSource.equals("new")) {
FormFile file = (FormFile) theForm.get("file");
String path = PropertyReader.getProperty(CalabConstants.FILEUPLOAD_PROPERTY, "fileRepositoryDir");
path = path + particleName + "\\";
File pathDir = new File (path);
if ( !pathDir.exists() ) pathDir.mkdirs();
fileBean = service.saveCharacterizationFile(particleName, file,
title, description, comments, keywordList, visibilities, path, fileNumber);
} else {
String fileId = (String) theForm.get("fileId");
fileBean = service.getFile(fileId);
}
request.getSession().setAttribute("characterizationFile" + fileNumber,
fileBean);
String forwardPage = (String) theForm.get("forwardPage");
forward = mapping.findForward(forwardPage);
return forward;
}
public ActionForward setup(ActionMapping mapping, ActionForm form,
HttpServletRequest request, HttpServletResponse response)
throws Exception {
HttpSession session = request.getSession();
InitSessionSetup.getInstance().clearWorkflowSession(session);
InitSessionSetup.getInstance().clearSearchSession(session);
InitSessionSetup.getInstance().clearInventorySession(session);
InitSessionSetup.getInstance().setAllAssayTypeAssays(session);
String particleName = (String) request.getAttribute("particleName");
InitSessionSetup.getInstance().setAllRunFiles(session, particleName);
String fileNumber = (String) request.getAttribute("fileNumber");
String loadFileForward = (String) request
.getAttribute("loadFileForward");
DynaValidatorForm theForm = (DynaValidatorForm) form;
theForm.set("particleName", particleName);
theForm.set("fileNumber", fileNumber);
theForm.set("forwardPage", loadFileForward);
return mapping.getInputForward();
}
public boolean loginRequired() {
return true;
}
}
|
package org.xins.common.collections;
import java.io.InputStream;
import java.io.IOException;
import java.util.Iterator;
import java.util.Properties;
import org.xins.common.MandatoryArgumentChecker;
import org.xins.common.text.FastStringBuffer;
import org.xins.common.text.WhislEncoding;
import org.xins.logdoc.AbstractLogdocSerializable;
import org.xins.logdoc.LogdocSerializable;
import org.xins.logdoc.LogdocStringBuffer;
/**
* Utility functions for dealing with <code>PropertyReader</code> objects.
*
* @version $Revision$ $Date$
* @author Ernst de Haan (<a href="mailto:ernst.dehaan@nl.wanadoo.com">ernst.dehaan@nl.wanadoo.com</a>)
*/
public final class PropertyReaderUtils
extends Object {
// Class fields
// Class functions
public static final boolean getBooleanProperty(PropertyReader properties,
String propertyName,
boolean fallbackDefault)
throws IllegalArgumentException,
InvalidPropertyValueException {
// Check preconditions
MandatoryArgumentChecker.check("properties", properties, "propertyName", propertyName);
// Query the PropertyReader
String value = properties.get(propertyName);
// Fallback to the default, if necessary
if (value == null || value.length() == 0) {
return fallbackDefault;
}
// Parse the string
if ("true".equals(value)) {
return true;
} else if ("false".equals(value)) {
return false;
} else {
throw new InvalidPropertyValueException(propertyName, value);
}
}
public static final int getIntProperty(PropertyReader properties,
String propertyName)
throws IllegalArgumentException,
MissingRequiredPropertyException,
InvalidPropertyValueException {
// Check preconditions
MandatoryArgumentChecker.check("properties", properties, "propertyName", propertyName);
// Query the PropertyReader
String value = properties.get(propertyName);
// Make sure the value is set
if (value == null || value.length() == 0) {
throw new MissingRequiredPropertyException(propertyName);
}
// Parse the string
try {
return Integer.parseInt(value);
} catch (NumberFormatException exception) {
throw new InvalidPropertyValueException(propertyName, value);
}
}
public static final String getRequiredProperty(PropertyReader properties,
String name)
throws IllegalArgumentException,
MissingRequiredPropertyException {
// Check preconditions
MandatoryArgumentChecker.check("properties", properties,
"name", name);
// Retrieve the value
String value = properties.get(name);
// The property is required
if (value == null || value.length() < 1) {
throw new MissingRequiredPropertyException(name);
}
return value;
}
public static final PropertyReader createPropertyReader(InputStream in)
throws IllegalArgumentException, IOException {
// Check preconditions
MandatoryArgumentChecker.check("in", in);
// Parse the input stream using java.util.Properties
Properties properties = new Properties();
properties.load(in);
// Convert from java.util.Properties to PropertyReader
return new PropertiesPropertyReader(properties);
}
public static final void serialize(PropertyReader properties,
LogdocStringBuffer buffer)
throws IllegalArgumentException {
// Check preconditions
MandatoryArgumentChecker.check("properties", properties,
"buffer", buffer);
Iterator names = properties.getNames();
boolean first = true;
while (names.hasNext()) {
// Get the name and value
String name = (String) names.next();
String value = properties.get(name);
// If the value is null or an empty string, then output nothing
if (value == null || value.length() == 0) {
continue;
}
// Append an ampersand, except for the first entry
if (!first) {
buffer.append('&');
} else {
first = false;
}
// Append the key and the value, separated by an equals sign
buffer.append(WhislEncoding.encode(name));
buffer.append('=');
buffer.append(WhislEncoding.encode(value));
}
}
public static final void serialize(PropertyReader properties,
FastStringBuffer buffer,
String valueIfEmpty)
throws IllegalArgumentException {
// Check preconditions
MandatoryArgumentChecker.check("buffer", buffer);
// Catch special case: No properties available.
if (properties == null || properties.size() == 0) {
if (valueIfEmpty != null) {
buffer.append(valueIfEmpty);
}
return;
}
// Loop over all properties
Iterator names = properties.getNames();
boolean first = true;
while (names.hasNext()) {
// Get the name and value
String name = (String) names.next();
String value = properties.get(name);
// If the value is null or an empty string, then output nothing
if (value == null || value.length() == 0) {
continue;
}
// Append an ampersand, except for the first entry
if (!first) {
buffer.append('&');
} else {
first = false;
}
// Append the key and the value, separated by an equals sign
buffer.append(WhislEncoding.encode(name));
buffer.append('=');
buffer.append(WhislEncoding.encode(value));
}
}
public static final LogdocSerializable serialize(PropertyReader p,
String valueIfEmpty)
throws IllegalArgumentException {
// Check preconditions
MandatoryArgumentChecker.check("p", p);
return new SerializedPropertyReader(p, valueIfEmpty);
}
// Constructors
/**
* Constructs a new <code>PropertyReaderUtils</code> object. This
* constructor is marked as <code>private</code>, since no objects of this
* class should be constructed.
*/
private PropertyReaderUtils() {
// empty
}
// Fields
// Methods
// Inner classes
/**
* A <code>LogdocSerializable</code> implementation for a
* <code>PropertyReader</code>.
*
* @version $Revision$ $Date$
* @author Ernst de Haan (<a href="mailto:ernst.dehaan@nl.wanadoo.com">ernst.dehaan@nl.wanadoo.com</a>)
*/
private static final class SerializedPropertyReader
extends AbstractLogdocSerializable {
// Constructors
private SerializedPropertyReader(PropertyReader p, String valueIfEmpty)
throws IllegalArgumentException {
// Check preconditions
MandatoryArgumentChecker.check("p", p);
_propertyReader = p;
_valueIfEmpty = valueIfEmpty;
}
// Fields
/**
* The <code>PropertyReader<code> to serialize. Never <code>null</code>.
*/
private final PropertyReader _propertyReader;
/**
* The value to return if the property reader is empty.
*/
private final String _valueIfEmpty;
// Methods
/**
* Initializes this <code>AbstractLogdocSerializable</code> object.
*
* @return
* the serialized form of this object which will from then on be
* returned from serialize(LogdocStringBuffer), never
* <code>null</code>.
*/
protected String initialize() {
Iterator names = _propertyReader.getNames();
// If there are no parameters, then just return a hyphen
if (! names.hasNext()) {
return _valueIfEmpty;
}
FastStringBuffer buffer = new FastStringBuffer(99);
boolean first = true;
do {
// Get the name and value
String name = (String) names.next();
String value = _propertyReader.get(name);
// If the value is null or an empty string, then output nothing
if (value == null || value.length() == 0) {
continue;
}
// Append an ampersand, except for the first entry
if (!first) {
buffer.append('&');
} else {
first = false;
}
// Append the key and the value, separated by an equals sign
buffer.append(WhislEncoding.encode(name));
buffer.append('=');
buffer.append(WhislEncoding.encode(value));
} while (names.hasNext());
return buffer.toString();
}
}
}
|
package org.xins.common.collections;
import java.io.InputStream;
import java.io.IOException;
import java.util.Iterator;
import java.util.Properties;
import org.xins.common.MandatoryArgumentChecker;
import org.xins.common.text.FastStringBuffer;
import org.xins.common.text.WhislEncoding;
import org.xins.logdoc.AbstractLogdocSerializable;
import org.xins.logdoc.LogdocSerializable;
import org.xins.logdoc.LogdocStringBuffer;
/**
* Utility functions for dealing with <code>PropertyReader</code> objects.
*
* @version $Revision$ $Date$
* @author Ernst de Haan (<a href="mailto:ernst.dehaan@nl.wanadoo.com">ernst.dehaan@nl.wanadoo.com</a>)
*
* @since XINS 1.0.0
*/
public final class PropertyReaderUtils
extends Object {
// Class fields
// Class functions
public static final boolean getBooleanProperty(PropertyReader properties,
String propertyName,
boolean fallbackDefault)
throws IllegalArgumentException,
InvalidPropertyValueException {
// Check preconditions
MandatoryArgumentChecker.check("properties", properties, "propertyName", propertyName);
// Query the PropertyReader
String value = properties.get(propertyName);
// Fallback to the default, if necessary
if (value == null || value.length() == 0) {
return fallbackDefault;
}
// Parse the string
if ("true".equals(value)) {
return true;
} else if ("false".equals(value)) {
return false;
} else {
throw new InvalidPropertyValueException(propertyName, value);
}
}
public static final int getIntProperty(PropertyReader properties,
String propertyName)
throws IllegalArgumentException,
MissingRequiredPropertyException,
InvalidPropertyValueException {
// Check preconditions
MandatoryArgumentChecker.check("properties", properties, "propertyName", propertyName);
// Query the PropertyReader
String value = properties.get(propertyName);
// Make sure the value is set
if (value == null || value.length() == 0) {
throw new MissingRequiredPropertyException(propertyName);
}
// Parse the string
try {
return Integer.parseInt(value);
} catch (NumberFormatException exception) {
throw new InvalidPropertyValueException(propertyName, value);
}
}
public static final String getRequiredProperty(PropertyReader properties,
String name)
throws IllegalArgumentException,
MissingRequiredPropertyException {
// Check preconditions
MandatoryArgumentChecker.check("properties", properties,
"name", name);
// Retrieve the value
String value = properties.get(name);
// The property is required
if (value == null || value.length() < 1) {
throw new MissingRequiredPropertyException(name);
}
return value;
}
public static final PropertyReader createPropertyReader(InputStream in)
throws IllegalArgumentException, IOException {
// Check preconditions
MandatoryArgumentChecker.check("in", in);
// Parse the input stream using java.util.Properties
Properties properties = new Properties();
properties.load(in);
// Convert from java.util.Properties to PropertyReader
return new PropertiesPropertyReader(properties);
}
public static final void serialize(PropertyReader properties,
LogdocStringBuffer buffer)
throws IllegalArgumentException {
// Check preconditions
MandatoryArgumentChecker.check("properties", properties,
"buffer", buffer);
Iterator names = properties.getNames();
boolean first = true;
while (names.hasNext()) {
// Get the name and value
String name = (String) names.next();
String value = properties.get(name);
// If the value is null or an empty string, then output nothing
if (value == null || value.length() == 0) {
continue;
}
// Append an ampersand, except for the first entry
if (!first) {
buffer.append('&');
} else {
first = false;
}
// Append the key and the value, separated by an equals sign
buffer.append(WhislEncoding.encode(name));
buffer.append('=');
buffer.append(WhislEncoding.encode(value));
}
}
public static final void serialize(PropertyReader properties,
FastStringBuffer buffer,
String valueIfEmpty)
throws IllegalArgumentException {
// Check preconditions
MandatoryArgumentChecker.check("buffer", buffer);
// Catch special case: No properties available.
if (properties == null || properties.size() == 0) {
if (valueIfEmpty != null) {
buffer.append(valueIfEmpty);
}
return;
}
// Loop over all properties
Iterator names = properties.getNames();
boolean first = true;
while (names.hasNext()) {
// Get the name and value
String name = (String) names.next();
String value = properties.get(name);
// If the value is null or an empty string, then output nothing
if (value == null || value.length() == 0) {
continue;
}
// Append an ampersand, except for the first entry
if (!first) {
buffer.append('&');
} else {
first = false;
}
// Append the key and the value, separated by an equals sign
buffer.append(WhislEncoding.encode(name));
buffer.append('=');
buffer.append(WhislEncoding.encode(value));
}
}
/**
* Constructs a <code>LogdocSerializable</code> for the specified
* <code>PropertyReader</code>.
*
* @param p
* the {@link PropertyReader} to construct a {@link LogdocSerializable}
* for, or <code>null</code>.
*
* @param valueIfEmpty
* the value to return if the specified set of properties is either
* <code>null</code> or empty, can be <code>null</code>.
*
* @return
* a new {@link LogdocSerializable}, never <code>null</code>.
*/
public static final LogdocSerializable
serialize(PropertyReader p, String valueIfEmpty) {
return new SerializedPropertyReader(p, valueIfEmpty);
}
// Constructors
/**
* Constructs a new <code>PropertyReaderUtils</code> object. This
* constructor is marked as <code>private</code>, since no objects of this
* class should be constructed.
*/
private PropertyReaderUtils() {
// empty
}
// Fields
// Methods
// Inner classes
/**
* A <code>LogdocSerializable</code> implementation for a
* <code>PropertyReader</code>.
*
* @version $Revision$ $Date$
* @author Ernst de Haan (<a href="mailto:ernst.dehaan@nl.wanadoo.com">ernst.dehaan@nl.wanadoo.com</a>)
*/
private static final class SerializedPropertyReader
extends AbstractLogdocSerializable {
// Constructors
/**
* Constructs a new <code>SerializedPropertyReader</code> for the
* specified <code>PropertyReader</code>.
*
* @param p
* the {@link PropertyReader}, or <code>null</code>.
*
* @param valueIfEmpty
* the value to return if the specified set of properties is either
* <code>null</code> or empty, can be <code>null</code>.
*/
private SerializedPropertyReader(PropertyReader p,
String valueIfEmpty) {
_propertyReader = p;
_valueIfEmpty = valueIfEmpty;
}
// Fields
/**
* The <code>PropertyReader<code> to serialize. Can be
* <code>null</code>.
*/
private final PropertyReader _propertyReader;
/**
* The value to return if the property reader is empty.
*/
private final String _valueIfEmpty;
// Methods
/**
* Initializes this <code>AbstractLogdocSerializable</code> object.
*
* @return
* the serialized form of this object which will from then on be
* returned from serialize(LogdocStringBuffer), never
* <code>null</code>.
*/
protected String initialize() {
// If the property set if null, return the fallback
if (_propertyReader == null) {
return _valueIfEmpty;
}
Iterator names = _propertyReader.getNames();
// If there are no parameters, then return the fallback
if (! names.hasNext()) {
return _valueIfEmpty;
}
FastStringBuffer buffer = new FastStringBuffer(99);
boolean first = true;
do {
// Get the name and value
String name = (String) names.next();
String value = _propertyReader.get(name);
// If the value is null or an empty string, then output nothing
if (value == null || value.length() == 0) {
continue;
}
// Append an ampersand, except for the first entry
if (!first) {
buffer.append('&');
} else {
first = false;
}
// Append the key and the value, separated by an equals sign
buffer.append(WhislEncoding.encode(name));
buffer.append('=');
buffer.append(WhislEncoding.encode(value));
} while (names.hasNext());
return buffer.toString();
}
}
}
|
package org.orbeon.oxf.xforms.function.xxforms;
import org.orbeon.oxf.xforms.XFormsContainingDocument;
import org.orbeon.oxf.xforms.XFormsInstance;
import org.orbeon.oxf.xforms.XFormsUtils;
import org.orbeon.oxf.xforms.function.XFormsFunction;
import org.orbeon.saxon.expr.Expression;
import org.orbeon.saxon.expr.PathMap;
import org.orbeon.saxon.expr.XPathContext;
import org.orbeon.saxon.om.EmptyIterator;
import org.orbeon.saxon.om.ListIterator;
import org.orbeon.saxon.om.SequenceIterator;
import org.orbeon.saxon.trans.XPathException;
import java.util.Collections;
/**
* xxforms:instance() function. This function operates like the standard instance() function, except that it looks for
* instances globally instead of using the current model.
*/
public class XXFormsInstance extends XFormsFunction {
public SequenceIterator iterate(XPathContext xpathContext) throws XPathException {
final XFormsContainingDocument containingDocument = getContainingDocument(xpathContext);
// Get instance id
final Expression instanceIdExpression = argument[0];
final String instanceId = XFormsUtils.namespaceId(containingDocument, instanceIdExpression.evaluateAsString(xpathContext));
final XFormsInstance instance = containingDocument.findInstance(instanceId);
// Return instance document if found
if (instance != null) {
// "this function returns a node-set containing just the root element node"
return new ListIterator(Collections.singletonList(instance.getInstanceRootElementInfo()));
} else {
// "an empty node-set is returned"
getContainingDocument(xpathContext).logWarning("function", "Instance not found with xxforms:instance() function",
new String[] { "instance id", instanceId });
return EmptyIterator.getInstance();
}
}
public PathMap.PathMapNode addToPathMap(PathMap pathMap, PathMap.PathMapNode pathMapNode) {
return addDocToPathMap(pathMap, pathMapNode);
}
}
|
package org.mockito.release.gradle;
import org.gradle.api.GradleException;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import static java.util.Arrays.asList;
import static org.mockito.release.internal.gradle.util.team.TeamParser.validateTeamMembers;
public class ReleaseConfiguration {
private static final String NO_ENV_VARIABLE = null;
private final Map<String, Object> configuration = new HashMap<String, Object>();
private final GitHub gitHub = new GitHub();
private final ReleaseNotes releaseNotes = new ReleaseNotes();
private final Git git = new Git();
private final Team team = new Team();
private String previousReleaseVersion;
public ReleaseConfiguration() {
//Configure default values
git.setTagPrefix("v"); //so that tags are "v1.0", "v2.3.4"
git.setReleasableBranchRegex("master|release/.+"); // matches 'master', 'release/2.x', 'release/3.x', etc.
team.setContributors(Collections.<String>emptyList());
team.setDevelopers(Collections.<String>emptyList());
git.setCommitMessagePostfix("[ci skip]");
releaseNotes.setLabelMapping(Collections.<String, String>emptyMap());
releaseNotes.setIgnoreCommitsContaining(asList("[ci skip]"));
}
//TODO currently it's not clear when to use class fields and when to use the 'configuration' map
//Let's make it clear in the docs
private boolean dryRun = true;
/**
* See {@link #isDryRun()}
*/
public void setDryRun(boolean dryRun) {
this.dryRun = dryRun;
}
/**
* If the release steps should be invoked in "dry run" mode.
* Relevant only to some kinds of release steps,
* such as bintray upload, git push.
*/
public boolean isDryRun() {
return dryRun;
}
public GitHub getGitHub() {
return gitHub;
}
public ReleaseNotes getReleaseNotes() {
return releaseNotes;
}
public Git getGit() {
return git;
}
public Team getTeam() {
return team;
}
/**
* See {@link #getPreviousReleaseVersion()}
*/
public void setPreviousReleaseVersion(String previousReleaseVersion) {
this.previousReleaseVersion = previousReleaseVersion;
}
/**
* Return last previously released version number
* See {@link org.mockito.release.version.VersionInfo#getPreviousVersion()}
*/
public String getPreviousReleaseVersion() {
return previousReleaseVersion;
}
public class GitHub {
/**
* GitHub repository name, for example: "mockito/mockito"
*/
public String getRepository() {
return getString("gitHub.repository");
}
/**
* See {@link #getRepository()}
*
* @param repository name of the repo, including user or organization section, for example: "mockito/mockito"
*/
public void setRepository(String repository) {
configuration.put("gitHub.repository", repository);
}
/**
* GitHub user associated with the write auth token.
* Needed for the release process to push changes.
*/
public String getWriteAuthUser() {
return getString("gitHub.writeAuthUser");
}
/**
* See {@link #getWriteAuthUser()}
*/
public void setWriteAuthUser(String user) {
configuration.put("gitHub.writeAuthUser", user);
}
/**
* GitHub read only auth token.
* Since the token is read-only it is ok to check that in to VCS.
*/
public String getReadOnlyAuthToken() {
return getString("gitHub.readOnlyAuthToken");
}
/**
* See {@link #getReadOnlyAuthToken()}
*/
public void setReadOnlyAuthToken(String token) {
configuration.put("gitHub.readOnlyAuthToken", token);
}
/**
* GitHub write auth token to be used for pushing code to GitHub.
* Auth token is used with the user specified in {@link #getWriteAuthUser()}.
* <strong>WARNING:</strong> please don't commit the write auth token to VCS.
* Instead export "GH_WRITE_TOKEN" environment variable.
* The env variable value will be automatically returned by this method.
*/
public String getWriteAuthToken() {
return (String) getValue("gitHub.writeAuthToken", "GH_WRITE_TOKEN",
"Please export 'GH_WRITE_TOKEN' env variable first!\n" +
" The value of that variable is automatically used for 'releasing.gitHub.writeAuthToken' setting.\n" +
" It is highly recommended to keep write token secure and store env variable with your CI configuration.\n" +
" Alternatively, you can configure the write token explicitly in the *.gradle file:\n" +
" releasing.gitHub.writeAuthToken = 'secret'");
}
public void setWriteAuthToken(String writeAuthToken) {
configuration.put("gitHub.writeAuthToken", writeAuthToken);
}
}
public class ReleaseNotes {
/**
* Release notes file relative path, for example: "docs/release-notes.md"
*/
public String getFile() {
return getString("releaseNotes.file");
}
/**
* See {@link #getFile()}
*/
public void setFile(String file) {
configuration.put("releaseNotes.file", file);
}
/**
* Issue tracker label mappings.
* The mapping of issue tracker labels (for example "GitHub label") to human readable and presentable name.
* The order of labels is important and will influence the order
* in which groups of issues are generated in release notes.
* Examples: ['java-9': 'Java 9 support', 'BDD': 'Behavior-Driven Development support']
*/
public Map<String, String> getLabelMapping() {
return getMap("releaseNotes.labelMapping");
}
/**
* See {@link #getLabelMapping()}
*/
public void setLabelMapping(Map<String, String> labelMapping) {
configuration.put("releaseNotes.labelMapping", labelMapping);
}
/**
* Release notes are generated based on information in commit messages.
* If a commit message contains any of texts from this collection,
* that commit will be ignored and not used for generating release notes.
*/
public Collection<String> getIgnoreCommitsContaining() {
return getCollection("releaseNotes.ignoreCommitsContaining");
}
/**
* See {@link #getIgnoreCommitsContaining()}
*/
public void setIgnoreCommitsContaining(Collection<String> commitMessageParts) {
configuration.put("releaseNotes.ignoreCommitsContaining", commitMessageParts);
}
}
public class Git {
/**
* Git user to be used for automated commits made by release automation
* (version bumps, release notes commits, etc.).
* For example: "mockito.release.tools"
*/
public String getUser() {
return getString("git.user");
}
/**
* See {@link #getUser()} ()}
*/
public void setUser(String user) {
configuration.put("git.user", user);
}
/**
* Git email to be used for automated commits made by release automation
* (version bumps, release notes commits, etc.).
* For example "mockito.release.tools@gmail.com"
*/
public String getEmail() {
return getString("git.email");
}
/**
* See {@link #getEmail()}
*/
public void setEmail(String email) {
configuration.put("git.email", email);
}
/**
* Regex to be used to identify branches that are entitled to be released, for example "master|release/.+"
*/
public String getReleasableBranchRegex() {
return getString("git.releasableBranchRegex");
}
/**
* See {@link #getReleasableBranchRegex()}
*/
public void setReleasableBranchRegex(String releasableBranchRegex) {
configuration.put("git.releasableBranchRegex", releasableBranchRegex);
}
/**
* Prefix added to the version to create VCS-addressable tag,
* for example: "v".
* Empty string is ok and it means that there is not prefix.
*/
public String getTagPrefix() {
return getString("git.tagPrefix");
}
/**
* See {@link #getTagPrefix()}
*/
public void setTagPrefix(String tagPrefix) {
configuration.put("git.tagPrefix", tagPrefix);
}
public String getCommitMessagePostfix() {
return getString("git.commitMessagePostfix");
}
/**
* See {@link #getCommitMessagePostfix()}
*/
public void setCommitMessagePostfix(String commitMessagePostfix) {
//TODO protect this setter and other relevant from invalid input (null value)
configuration.put("git.commitMessagePostfix", commitMessagePostfix);
}
}
/**
* Team configuration
*/
public class Team {
public Collection<String> getDevelopers() {
return getCollection("team.developers");
}
/**
* See {@link #getDevelopers()}
*/
public void setDevelopers(Collection<String> developers) {
validateTeamMembers(developers);
configuration.put("team.developers", developers);
}
public Collection<String> getContributors() {
return getCollection("team.contributors");
}
/**
* See {@link #getContributors()}
*/
public void setContributors(Collection<String> contributors) {
validateTeamMembers(contributors);
configuration.put("team.contributors", contributors);
}
}
//TODO unit test message creation and error handling, suggested plan:
//1. Create wrapper type over 'configuration' map
//2. Move handling to this new object and make it testable, along with env variables
private String getString(String key) {
return getString(key, NO_ENV_VARIABLE);
}
private Boolean getBoolean(String key) {
Object value = configuration.get(key);
return Boolean.parseBoolean(value.toString());
}
private String getString(String key, String envVarName) {
return (String) getValue(key, envVarName, "Please configure 'releasing." + key + "' value (String).");
}
private Map getMap(String key) {
return (Map) getValue(key, NO_ENV_VARIABLE, "Please configure 'releasing." + key + "' value (Map).");
}
private Collection<String> getCollection(String key) {
return (Collection) getValue(key, NO_ENV_VARIABLE, "Please configure 'releasing." + key + "' value (Collection).");
}
private Object getValue(String key, String envVarName, String message) {
Object value = configuration.get(key);
if (value != null) {
return value;
}
if (envVarName != null) {
value = System.getenv(envVarName);
if (value != NO_ENV_VARIABLE) {
return value;
}
}
throw new GradleException(message);
}
}
|
package org.mockito.release.gradle;
import org.gradle.api.GradleException;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
public class ReleaseConfiguration {
private final Map<String, Object> configuration = new HashMap<String, Object>();
private final GitHub gitHub = new GitHub();
private final ReleaseNotes releaseNotes = new ReleaseNotes();
private final Git git = new Git();
private final Team team = new Team();
public ReleaseConfiguration() {
//Configure default values
this.git.setTagPrefix("v"); //so that tags are "v1.0", "v2.3.4"
this.git.setReleasableBranchRegex("master|release/.+"); // matches 'master', 'release/2.x', 'release/3.x', etc.
this.git.setCommitMessagePostfix(" [ci skip]");
}
private boolean dryRun = true;
public void setDryRun(boolean dryRun) {
this.dryRun = dryRun;
}
public boolean isDryRun() {
return dryRun;
}
public GitHub getGitHub() {
return gitHub;
}
public ReleaseNotes getReleaseNotes() {
return releaseNotes;
}
public Git getGit() {
return git;
}
public Team getTeam() {
return team;
}
public class GitHub {
/**
* GitHub repository name, for example: "mockito/mockito"
*/
public String getRepository() {
return getString("gitHub.repository");
}
/**
* See {@link #getRepository()}
*
* @param repository name of the repo, including user or organization section, for example: "mockito/mockito"
*/
public void setRepository(String repository) {
configuration.put("gitHub.repository", repository);
}
/**
* GitHub user associated with the write auth token.
* Needed for the release process to push changes.
*/
public String getWriteAuthUser() {
return getString("gitHub.writeAuthUser");
}
/**
* See {@link #getWriteAuthUser()}
*/
public void setWriteAuthUser(String user) {
configuration.put("gitHub.writeAuthUser", user);
}
/**
* GitHub read only auth token.
* Since the token is read-only it is ok to check that in to VCS.
*/
public String getReadOnlyAuthToken() {
return getString("gitHub.readOnlyAuthToken");
}
/**
* See {@link #getReadOnlyAuthToken()}
*/
public void setReadOnlyAuthToken(String token) {
configuration.put("gitHub.readOnlyAuthToken", token);
}
/**
* GitHub write auth token to be used for pushing code to GitHub.
* Auth token is used with the user specified in {@link #getWriteAuthUser()}.
* <strong>WARNING:</strong> please don't commit the write auth token to VCS.
* Instead export "GH_WRITE_TOKEN" environment variable.
* The env variable value will be automatically returned by this method.
*/
public String getWriteAuthToken() {
return (String) getValue("gitHub.writeAuthToken", "GH_WRITE_TOKEN",
"Please export 'GH_WRITE_TOKEN' env variable first!\n" +
" The value of that variable is automatically used for 'releasing.gitHub.writeAuthToken' setting.\n" +
" It is highly recommended to keep write token secure and store env variable with your CI configuration.\n" +
" Alternatively, you can configure the write token explicitly in the *.gradle file:\n" +
" releasing.gitHub.writeAuthToken = 'secret'");
}
public void setWriteAuthToken(String writeAuthToken) {
configuration.put("gitHub.writeAuthToken", writeAuthToken);
}
}
public class ReleaseNotes {
/**
* Release notes file relative path, for example: "docs/release-notes.md"
*/
public String getFile() {
return getString("releaseNotes.file");
}
/**
* See {@link #getFile()}
*/
public void setFile(String file) {
configuration.put("releaseNotes.file", file);
}
/**
* Notable release notes file, for example "docs/notable-release-notes.md"
*/
public String getNotableFile() {
return getString("releaseNotes.notableFile");
}
/**
* See {@link #getNotableFile()}
*/
public void setNotableFile(String notableFile) {
configuration.put("releaseNotes.notableFile", notableFile);
}
/**
* Issue tracker label mappings.
* The mapping of issue tracker labels (for example "GitHub label") to human readable and presentable name.
* The order of labels is important and will influence the order
* in which groups of issues are generated in release notes.
* Examples: ['java-9': 'Java 9 support', 'BDD': 'Behavior-Driven Development support']
*/
public Map<String, String> getLabelMapping() {
return getMap("releaseNotes.labelMapping");
}
/**
* See {@link #getLabelMapping()}
*/
public void setLabelMapping(Map<String, String> labelMapping) {
configuration.put("releaseNotes.labelMapping", labelMapping);
}
}
public class Git {
/**
* Git user to be used for automated commits made by release automation
* (version bumps, release notes commits, etc.).
* For example: "mockito.release.tools"
*/
public String getUser() {
return getString("git.user");
}
/**
* See {@link #getUser()} ()}
*/
public void setUser(String user) {
configuration.put("git.user", user);
}
/**
* Git email to be used for automated commits made by release automation
* (version bumps, release notes commits, etc.).
* For example "mockito.release.tools@gmail.com"
*/
public String getEmail() {
return getString("git.email");
}
/**
* See {@link #getEmail()}
*/
public void setEmail(String email) {
configuration.put("git.email", email);
}
/**
* Regex to be used to identify branches that are entitled to be released, for example "master|release/.+"
*/
public String getReleasableBranchRegex() {
return getString("git.releasableBranchRegex");
}
/**
* See {@link #getReleasableBranchRegex()}
*/
public void setReleasableBranchRegex(String releasableBranchRegex) {
configuration.put("git.releasableBranchRegex", releasableBranchRegex);
}
/**
* See {@link #getBranch()}
*/
public void setBranch(String branch) {
configuration.put("git.branch", branch);
}
/**
* Returns the branch the release process works on and commits code to.
* If not specified, it will be loaded from "TRAVIS_BRANCH" environment variable.
*/
public String getBranch() {
//TODO decouple from Travis. Suggested plan:
//1. We remove the 'branch' configuration from here completely
//2. We add a utility method that gives us current branch, it should trigger the "git call" only once.
//3. We call that utility method if we need branch
return getString("git.branch", "TRAVIS_BRANCH");
}
/**
* Prefix added to the version to create VCS-addressable tag,
* for example: "v".
* Empty string is ok and it means that there is not prefix.
*/
public String getTagPrefix() {
return getString("git.tagPrefix");
}
/**
* See {@link #getTagPrefix()}
*/
public void setTagPrefix(String tagPrefix) {
configuration.put("git.tagPrefix", tagPrefix);
}
public String getCommitMessagePostfix() {
return getString("git.commitMessagePostfix");
}
/**
* See {@link #getCommitMessagePostfix()}
*/
public void setCommitMessagePostfix(String commitMessagePostfix) {
configuration.put("git.commitMessagePostfix", commitMessagePostfix);
}
}
/**
* Team configuration
*/
public class Team {
public Collection<String> getDevelopers() {
return getCollection("team.developers");
}
/**
* See {@link #getDevelopers()}
*/
public void setDevelopers(Collection<String> developers) {
configuration.put("team.developers", developers);
}
public Collection<String> getContributors() {
return getCollection("team.contributors");
}
/**
* See {@link #getContributors()}
*/
public void setContributors(Collection<String> contributors) {
configuration.put("team.contributors", contributors);
}
}
//TODO unit test message creation and error handling, suggested plan:
//1. Create wrapper type over 'configuration' map
//2. Move handling to this new object and make it testable, along with env variables
private String getString(String key) {
return getString(key, null);
}
private String getString(String key, String envVarName) {
return (String) getValue(key, envVarName, "Please configure 'releasing." + key + "' value (String).");
}
private Map getMap(String key) {
return (Map) getValue(key, null,"Please configure 'releasing." + key + "' value (Map).");
}
private Collection<String> getCollection(String key) {
return (Collection) getValue(key, null, "Please configure 'releasing." + key + "' value (Collection).");
}
private Object getValue(String key, String envVarName, String message) {
Object value = configuration.get(key);
if (value != null) {
return value;
}
if (envVarName != null) {
value = System.getenv(envVarName);
if (value != null) {
return value;
}
}
throw new GradleException(message);
}
}
|
package org.shipkit.gradle.notes;
import org.gradle.api.DefaultTask;
import org.gradle.api.tasks.Input;
import org.gradle.api.tasks.InputFile;
import org.gradle.api.tasks.Optional;
import org.gradle.api.tasks.TaskAction;
import org.shipkit.gradle.configuration.ShipkitConfiguration;
import org.shipkit.internal.gradle.notes.tasks.UpdateReleaseNotes;
import org.shipkit.internal.notes.header.HeaderProvider;
import org.shipkit.internal.notes.model.ReleaseNotesData;
import java.io.File;
import java.util.Collection;
import java.util.LinkedHashMap;
import java.util.LinkedList;
import java.util.Map;
/**
* Generates incremental, detailed release notes text and appends them to the file {@link #getReleaseNotesFile()}.
* When preview mode is enabled ({@link #isPreviewMode()}), the new release notes content is displayed only (file is not updated).
*/
public class UpdateReleaseNotesTask extends DefaultTask {
//Due to the preview mode, we set the input/output of this property in the plugin implementation
private File releaseNotesFile;
private boolean previewMode;
@Input @Optional private String previousVersion;
@Input private String gitHubUrl;
@Input private String gitHubRepository;
@Input private Map<String, String> gitHubLabelMapping = new LinkedHashMap<String, String>();
@Input private String publicationRepository;
@InputFile private File releaseNotesData;
@Input private Collection<String> developers = new LinkedList<String>();
@Input private Collection<String> contributors = new LinkedList<String>();
@InputFile private File contributorsDataFile;
@Input private boolean emphasizeVersion;
@Input private String version;
@Input private String tagPrefix;
@Input private String header = "Release notes were automatically generated by [Shipkit](http://shipkit.org/)";
/**
* Generates incremental release notes and appends it to the top of release notes file.
*/
@TaskAction
public void updateReleaseNotes() {
new UpdateReleaseNotes(new HeaderProvider()).updateReleaseNotes(this);
}
/**
* @return true if task is configured to generate only preview of release notes (without appending them to the file), and false otherwise
*/
public boolean isPreviewMode() {
return previewMode;
}
/**
* See {@link #isPreviewMode()} ()}
*/
public void setPreviewMode(boolean previewMode) {
this.previewMode = previewMode;
}
/**
* Release notes file this task operates on.
*/
public File getReleaseNotesFile() {
return releaseNotesFile;
}
/**
* See {@link #getReleaseNotesFile()}
*/
public void setReleaseNotesFile(File releaseNotesFile) {
this.releaseNotesFile = releaseNotesFile;
}
/**
* The version we are generating the release notes for.
*/
public String getVersion() {
return version;
}
/**
* See {@link #getVersion()}
*/
public void setVersion(String version) {
this.version = version;
}
/**
* See {@link ShipkitConfiguration.Git#getTagPrefix()}
*/
public String getTagPrefix() {
return tagPrefix;
}
/**
* See {@link #getTagPrefix()}
*/
public void setTagPrefix(String tagPrefix) {
this.tagPrefix = tagPrefix;
}
public String getGitHubUrl() {
return gitHubUrl;
}
/**
* See {@link #getGitHubUrl()}
*/
public void setGitHubUrl(String gitHubUrl) {
this.gitHubUrl = gitHubUrl;
}
/**
* Name of the GitHub repository in format "user|org/repository",
* for example: "mockito/mockito"
*/
public String getGitHubRepository() {
return gitHubRepository;
}
/**
* @return a link to the generated release notes file hosted on Github.
*/
public String getReleaseNotesUrl(String branch) {
return getGitHubUrl() + "/" + getGitHubRepository() + "/blob/" + branch + "/" + getProject().relativePath(getReleaseNotesFile());
}
/**
* See {@link #getGitHubRepository()}
*/
public void setGitHubRepository(String gitHubRepository) {
this.gitHubRepository = gitHubRepository;
}
/**
* Issue tracker label mappings.
* The mapping of "GitHub label" to human readable and presentable name.
* The order of labels is important and will influence the order
* in which groups of issues are generated in release notes.
* Examples: ['java-9': 'Java 9 support', 'BDD': 'Behavior-Driven Development support']
*/
public Map<String, String> getGitHubLabelMapping() {
return gitHubLabelMapping;
}
/**
* See {@link #getGitHubLabelMapping()}
*/
public void setGitHubLabelMapping(Map<String, String> gitHubLabelMapping) {
this.gitHubLabelMapping = gitHubLabelMapping;
}
/**
* The target repository where the publications / binaries are published to.
* Shown in the release notes.
*/
public String getPublicationRepository() {
return publicationRepository;
}
/**
* See {@link #getPublicationRepository()}
*/
public void setPublicationRepository(String publicationRepository) {
this.publicationRepository = publicationRepository;
}
/**
* Previous released version we generate the release notes from.
*/
public String getPreviousVersion() {
return previousVersion;
}
/**
* See {@link #getPreviousVersion()}
*/
public void setPreviousVersion(String previousVersion) {
this.previousVersion = previousVersion;
}
/**
* Input to the release notes generation,
* serialized release notes data objects of type {@link ReleaseNotesData}.
* They are used to generate formatted release notes.
* The data file is generate by {@link FetchReleaseNotesTask}.
*/
public File getReleaseNotesData() {
return releaseNotesData;
}
/**
* See {@link #getReleaseNotesData()}
*/
public void setReleaseNotesData(File releaseNotesData) {
this.releaseNotesData = releaseNotesData;
}
/**
* Developers as configured in {@link ShipkitConfiguration.Team#getDevelopers()}
*/
public Collection<String> getDevelopers() {
return developers;
}
/**
* See {@link #getDevelopers()}
*/
public void setDevelopers(Collection<String> developers) {
this.developers = developers;
}
/**
* Contributors as configured in {@link ShipkitConfiguration.Team#getContributors()}
*/
public Collection<String> getContributors() {
return contributors;
}
/**
* See {@link #getContributors()}
*/
public void setContributors(Collection<String> contributors) {
this.contributors = contributors;
}
/**
* {@link #getContributorsDataFile()}
*/
public void setContributorsDataFile(File contributorsDataFile) {
this.contributorsDataFile = contributorsDataFile;
}
/**
* File name from reads contributors from GitHub
*/
public File getContributorsDataFile() {
return contributorsDataFile;
}
/**
* {@link #isEmphasizeVersion()}
*/
public void setEmphasizeVersion(boolean emphasizeVersion) {
this.emphasizeVersion = emphasizeVersion;
}
/**
* Should current version be emphasized in release notes
*/
public boolean isEmphasizeVersion() {
return emphasizeVersion;
}
public String getHeader() {
return header;
}
public void setHeader(String header) {
this.header = header;
}
}
|
package be.shoktan.BeeBreedingManager.model;
import java.util.Collection;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.Inheritance;
import javax.persistence.InheritanceType;
import javax.persistence.ManyToMany;
import org.apache.commons.lang3.builder.CompareToBuilder;
/**
* Class used to represent of bee or a group of bees
* usefull for mutation representation
* @author Wisthy
*
*/
@Entity @Inheritance(strategy=InheritanceType.JOINED)
public abstract class Specification extends ABaseEntity implements Comparable<Specification>{
@Column(unique = true)
private String name;
@ManyToMany
private Collection<Group> groups;
/**
* @return the name
*/
public String getName() {
return name;
}
/**
* @param name the name to set
*/
public void setName(String name) {
this.name = name;
}
/**
* @return the groups
*/
public Collection<Group> getGroups() {
return groups;
}
/**
* @param groups the groups to set
*/
public void setGroups(Collection<Group> groups) {
this.groups = groups;
}
/**
*
* @return the type of the instance
*/
protected abstract ESpecificationType getType();
/*
* (non-Javadoc)
* @see java.lang.Comparable#compareTo(java.lang.Object)
*/
@Override
public int compareTo(Specification o) {
return new CompareToBuilder().append(this.getType(), o.getType())
.append(this.name, o.name)
.appendSuper(super.compareTo(o))
.toComparison();
}
}
|
package beaform.gui.search;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import javax.swing.SwingUtilities;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import beaform.entities.Formula;
import beaform.gui.FormulaTree;
/**
* This class is used to render the result of a formula search in the GUI.
*
* @author Steven Post
*
*/
public final class RenderFormulaSearchByTagResult implements Runnable {
/** A logger */
private static final Logger LOG = LoggerFactory.getLogger(RenderFormulaSearchByTagResult.class);
/** The task with the search result */
private final Future<List<Formula>> searchresult;
/** The target panel */
private final SearchGui pane;
/**
* Constructor.
* @param searchresult The task for the search.
* @param pane The target panel.
*/
public RenderFormulaSearchByTagResult(final Future<List<Formula>> searchresult, final SearchGui pane) {
this.searchresult = searchresult;
this.pane = pane;
}
/**
* Invoked when the action occurs.
*/
@Override
public void run() {
try {
final List<Formula> searchResult = this.searchresult.get();
SwingUtilities.invokeLater(new AddFormTreeToGui(searchResult, this.pane));
}
catch (InterruptedException | ExecutionException e1) {
LOG.error("An error happened getting the result from the search.", e1);
return;
}
}
/**
* A task to to the actual rendering.
*
* @author Steven Post
*
*/
private static final class AddFormTreeToGui implements Runnable {
/** The result of the search */
private final List<Formula> searchResult;
/** The target panel */
private final SearchGui pane;
/**
* Constructor.
* @param searchResult the result of the search
* @param pane the target panel
*/
public AddFormTreeToGui(final List<Formula> searchResult, final SearchGui pane) {
this.searchResult = new ArrayList<Formula>(searchResult);
this.pane = pane;
}
/**
* Invoked when the action occurs.
*/
@Override
public void run() {
final FormulaTree formulaTree = new FormulaTree(this.searchResult);
this.pane.setSearchResults(formulaTree);
}
}
}
|
package bio.terra.cli.command.resource.create;
import bio.terra.cli.businessobject.Context;
import bio.terra.cli.command.shared.BaseCommand;
import bio.terra.cli.command.shared.options.Format;
import bio.terra.cli.command.shared.options.ResourceCreation;
import bio.terra.cli.command.shared.options.WorkspaceOverride;
import bio.terra.cli.serialization.userfacing.input.CreateGcpNotebookParams;
import bio.terra.cli.serialization.userfacing.input.CreateResourceParams;
import bio.terra.cli.serialization.userfacing.resource.UFGcpNotebook;
import bio.terra.workspace.model.AccessScope;
import bio.terra.workspace.model.StewardshipType;
import com.google.common.collect.ImmutableMap;
import java.util.Map;
import java.util.UUID;
import picocli.CommandLine;
/** This class corresponds to the fourth-level "terra resource create gcp-notebook" command. */
@CommandLine.Command(
name = "gcp-notebook",
description =
"Add a controlled GCP notebook instance resource.\n"
+ "For a detailed explanation of some parameters, see https://cloud.google.com/vertex-ai/docs/workbench/reference/rest/v1/projects.locations.instances#Instance.",
showDefaultValues = true,
sortOptions = false)
public class GcpNotebook extends BaseCommand {
private static final String DEFAULT_VM_IMAGE_PROJECT = "deeplearning-platform-release";
private static final String DEFAULT_VM_IMAGE_FAMILY = "r-latest-cpu-experimental";
// Use CreateResource instead of createControlledResource because only private notebooks are
// supported and we don't want to provide options that are not useful.
@CommandLine.Mixin ResourceCreation resourceCreationOptions;
@CommandLine.Option(
names = "--instance-id",
description =
"The unique name to give to the notebook instance. Cannot be changed later. "
+ "The instance name must be 1 to 63 characters long and contain only lowercase "
+ "letters, numeric characters, and dashes. The first character must be a lowercase "
+ "letter and the last character cannot be a dash. If not specified, an "
+ "auto-generated name based on your email address and time will be used.")
private String instanceId;
@CommandLine.Option(
names = "--location",
defaultValue = "us-central1-a",
description =
"The Google Cloud location of the instance (https://cloud.google.com/vertex-ai/docs/general/locations#user-managed-notebooks-locations).")
private String location;
@CommandLine.Option(
names = "--machine-type",
defaultValue = "n1-standard-4",
description = "The Compute Engine machine type of this instance.")
private String machineType;
@CommandLine.Option(
names = "--post-startup-script",
defaultValue =
"https://raw.githubusercontent.com/DataBiosphere/terra-cli/main/notebooks/post-startup.sh",
description =
"Path to a Bash script that automatically runs after a notebook instance fully boots up. "
+ "The path must be a URL or Cloud Storage path, e.g. 'gs://path-to-file/file-name'.")
private String postStartupScript;
@CommandLine.Option(
names = "-M, --metadata",
description =
"Custom metadata to apply to this instance.\n"
+ "specify multiple metadata in the format of --metadata=key1=value1 --Mkey2=value2.\n"
+ "By default sets some jupyterlab extensions "
+ "(installed-extensions=jupyterlab_bigquery-latest.tar.gz,jupyterlab_gcsfilebrowser-latest.tar.gz,jupyterlab_gcpscheduler-latest.tar.gz) "
+ "and the Terra workspace id (terra-workspace-id=[WORKSPACE_ID]).")
private Map<String, String> metadata;
@CommandLine.ArgGroup(exclusive = true, multiplicity = "0..1")
GcpNotebook.VmOrContainerImage vmOrContainerImage;
static class VmOrContainerImage {
@CommandLine.ArgGroup(
exclusive = false,
multiplicity = "1",
heading =
"Definition of a custom Compute Engine virtual machine image for starting a "
+ "notebook instance with the environment installed directly on the VM.\n"
+ "If neither this nor --container-* are specified, default to \n"
+ "'--vm-image-project="
+ DEFAULT_VM_IMAGE_PROJECT
+ " --vm-image-family="
+ DEFAULT_VM_IMAGE_FAMILY
+ "'.%n")
VmImage vm;
@CommandLine.ArgGroup(
exclusive = false,
multiplicity = "1",
heading =
"Definition of a container image for starting a notebook instance with the environment "
+ "installed in a container.%n")
ContainerImage container;
}
static class VmImage {
@CommandLine.Option(
names = "--vm-image-project",
required = true,
description = "The ID of the Google Cloud project that this VM image belongs to.")
private String project;
@CommandLine.ArgGroup(exclusive = true, multiplicity = "1")
ImageConfig imageConfig;
static class ImageConfig {
@CommandLine.Option(
names = "--vm-image-family",
description =
"Use this VM image family to find the image; the newest image in this family will be "
+ "used.")
private String family;
@CommandLine.Option(
names = "--vm-image-name",
description = "Use this VM image name to find the image.")
private String name;
}
}
static class ContainerImage {
@CommandLine.Option(
names = "--container-repository",
required = true,
description =
"The path to the container image repository. For example: "
+ "'gcr.io/{project_id}/{imageName}'.")
private String repository;
@CommandLine.Option(
names = "--container-tag",
description =
"The tag of the container image. If not specified, this defaults to the latest tag.")
private String tag;
}
// TODO(PF-767): Consider how to improve usability & validation of these parameters.
@CommandLine.ArgGroup(
exclusive = false,
multiplicity = "0..1",
heading = "The hardware accelerator used on this instance.%n")
GcpNotebook.AcceleratorConfig acceleratorConfig;
static class AcceleratorConfig {
@CommandLine.Option(names = "--accelerator-type", description = "Type of this accelerator.")
private String type;
@CommandLine.Option(
names = "--accelerator-core-count",
description = "Count of cores of this accelerator.")
private Long coreCount;
}
@CommandLine.ArgGroup(
exclusive = false,
multiplicity = "0..1",
heading = "GPU driver configurations.%n")
GcpNotebook.GpuDriverConfiguration gpuDriverConfiguration;
static class GpuDriverConfiguration {
@CommandLine.Option(
names = "--install-gpu-driver",
description =
"If true, the end user authorizes Google Cloud to install a GPU driver on this instance.")
private Boolean installGpuDriver;
@CommandLine.Option(
names = "--custom-gpu-driver-path",
description = "Specify a custom Cloud Storage path where the GPU driver is stored.")
private String customGpuDriverPath;
}
@CommandLine.ArgGroup(
exclusive = false,
multiplicity = "0..1",
heading = "Boot disk configurations.%n")
GcpNotebook.BootDiskConfiguration bootDiskConfiguration;
static class BootDiskConfiguration {
@CommandLine.Option(
names = "--boot-disk-size",
description = "The size of the disk in GB attached to this instance.")
Long sizeGb;
@CommandLine.Option(
names = "--boot-disk-type",
description =
"The type of disk attached to this instance, defaults to the standard persistent disk.")
String type;
}
@CommandLine.ArgGroup(
exclusive = false,
multiplicity = "0..1",
heading = "Data disk configurations.%n")
GcpNotebook.DataDiskConfiguration dataDiskConfiguration;
static class DataDiskConfiguration {
@CommandLine.Option(
names = "--data-disk-size",
description = "The size of the disk in GB attached to this instance.")
Long sizeGb;
@CommandLine.Option(
names = "--data-disk-type",
description =
"The type of disk attached to this instance, defaults to the standard persistent disk.")
String type;
}
@CommandLine.Mixin WorkspaceOverride workspaceOption;
@CommandLine.Mixin Format formatOption;
/** Add a controlled GCP Notebook instance to the workspace. */
@Override
protected void execute() {
workspaceOption.overrideIfSpecified();
// build the resource object to create. force the resource to be private
CreateResourceParams.Builder createResourceParams =
resourceCreationOptions
.populateMetadataFields()
.stewardshipType(StewardshipType.CONTROLLED)
.accessScope(AccessScope.PRIVATE_ACCESS);
CreateGcpNotebookParams.Builder createParams =
new CreateGcpNotebookParams.Builder()
.resourceFields(createResourceParams.build())
.instanceId(instanceId)
.location(location)
.machineType(machineType)
.postStartupScript(postStartupScript)
.metadata(
metadata == null ? defaultMetadata(Context.requireWorkspace().getId()) : metadata);
if (acceleratorConfig != null) {
createParams
.acceleratorType(acceleratorConfig.type)
.acceleratorCoreCount(acceleratorConfig.coreCount);
}
if (gpuDriverConfiguration != null) {
createParams
.installGpuDriver(gpuDriverConfiguration.installGpuDriver)
.customGpuDriverPath(gpuDriverConfiguration.customGpuDriverPath);
}
if (bootDiskConfiguration != null) {
createParams
.bootDiskType(bootDiskConfiguration.type)
.bootDiskSizeGb(bootDiskConfiguration.sizeGb);
}
if (dataDiskConfiguration != null) {
createParams
.dataDiskType(dataDiskConfiguration.type)
.dataDiskSizeGb(dataDiskConfiguration.sizeGb);
}
if (vmOrContainerImage == null) {
createParams.vmImageProject(DEFAULT_VM_IMAGE_PROJECT).vmImageFamily(DEFAULT_VM_IMAGE_FAMILY);
} else if (vmOrContainerImage.container != null) {
createParams
.containerRepository(vmOrContainerImage.container.repository)
.containerTag(vmOrContainerImage.container.tag);
} else {
createParams
.vmImageProject(vmOrContainerImage.vm.project)
.vmImageFamily(vmOrContainerImage.vm.imageConfig.family)
.vmImageName(vmOrContainerImage.vm.imageConfig.name);
}
bio.terra.cli.businessobject.resource.GcpNotebook createdResource =
bio.terra.cli.businessobject.resource.GcpNotebook.createControlled(createParams.build());
formatOption.printReturnValue(new UFGcpNotebook(createdResource), GcpNotebook::printText);
}
/** Create the metadata to put on the GCP Notebook instance. */
private Map<String, String> defaultMetadata(UUID workspaceID) {
return ImmutableMap.<String, String>builder()
// Set additional Terra context as metadata on the VM instance.
.put("terra-workspace-id", workspaceID.toString())
.put("terra-cli-server", Context.getServer().getName())
.build();
}
/** Print this command's output in text format. */
private static void printText(UFGcpNotebook returnValue) {
OUT.println("Successfully added controlled GCP Notebook instance.");
returnValue.print();
}
}
|
package bio.terra.cli.command.resources.create;
import bio.terra.cli.businessobject.Context;
import bio.terra.cli.businessobject.User;
import bio.terra.cli.command.shared.BaseCommand;
import bio.terra.cli.command.shared.options.Format;
import bio.terra.cli.command.shared.options.ResourceCreation;
import bio.terra.cli.command.shared.options.WorkspaceOverride;
import bio.terra.cli.serialization.userfacing.inputs.CreateAiNotebookParams;
import bio.terra.cli.serialization.userfacing.inputs.CreateResourceParams;
import bio.terra.cli.serialization.userfacing.resources.UFAiNotebook;
import bio.terra.workspace.model.AccessScope;
import bio.terra.workspace.model.ControlledResourceIamRole;
import bio.terra.workspace.model.StewardshipType;
import com.google.common.collect.ImmutableMap;
import java.time.Instant;
import java.time.ZoneId;
import java.time.format.DateTimeFormatter;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import picocli.CommandLine;
/** This class corresponds to the fourth-level "terra resources create ai-notebook" command. */
@CommandLine.Command(
name = "ai-notebook",
description =
"Add a controlled AI Platform Notebook instance resource.\n"
+ "For a detailed explanation of some parameters, see https://cloud.google.com/ai-platform/notebooks/docs/reference/rest/v1/projects.locations.instances#Instance",
showDefaultValues = true,
sortOptions = false)
public class AiNotebook extends BaseCommand {
private static final String AUTO_NAME_DATE_FORMAT = "-yyyyMMdd-HHmmss";
private static final String AUTO_GENERATE_NAME = "{username}" + AUTO_NAME_DATE_FORMAT;
/** See {@link #mangleUsername(String)}. */
private static final int MAX_INSTANCE_NAME_LENGTH = 61;
private static final String DEFAULT_VM_IMAGE_PROJECT = "deeplearning-platform-release";
private static final String DEFAULT_VM_IMAGE_FAMILY = "r-latest-cpu-experimental";
// Use CreateResource instead of createControlledResource because only private notebooks are
// supported and we don't want to provide options that are not useful.
@CommandLine.Mixin ResourceCreation resourceCreationOptions;
@CommandLine.Option(
names = "--instance-id",
description =
"The unique name to give to the notebook instance. Cannot be changed later. "
+ "The instance name must be 1 to 63 characters long and contain only lowercase "
+ "letters, numeric characters, and dashes. The first character must be a lowercase "
+ "letter and the last character cannot be a dash. If not specified, an "
+ "auto-generated name based on your email address and time will be used.",
defaultValue = AUTO_GENERATE_NAME)
private String instanceId;
@CommandLine.Option(
names = "--location",
defaultValue = "us-central1-a",
description = "The Google Cloud location of the instance.")
private String location;
@CommandLine.Option(
names = "--machine-type",
defaultValue = "n1-standard-4",
description = "The Compute Engine machine type of this instance.")
private String machineType;
@CommandLine.Option(
names = "--post-startup-script",
defaultValue =
"https://raw.githubusercontent.com/DataBiosphere/terra-cli/main/notebooks/post-startup.sh",
description =
"Path to a Bash script that automatically runs after a notebook instance fully boots up. "
+ "The path must be a URL or Cloud Storage path, e.g. 'gs://path-to-file/file-name'")
private String postStartupScript;
@CommandLine.Option(
names = "--metadata",
description =
"Custom metadata to apply to this instance.\nBy default sets some jupyterlab extensions "
+ "(installed-extensions=jupyterlab_bigquery-latest.tar.gz,jupyterlab_gcsfilebrowser-latest.tar.gz,jupyterlab_gcpscheduler-latest.tar.gz) "
+ "and the Terra workspace id (terra-workspace-id=[WORKSPACE_ID]).")
private Map<String, String> metadata;
@CommandLine.ArgGroup(exclusive = true, multiplicity = "0..1")
AiNotebook.VmOrContainerImage vmOrContainerImage;
static class VmOrContainerImage {
@CommandLine.ArgGroup(
exclusive = false,
multiplicity = "1",
heading =
"Definition of a custom Compute Engine virtual machine image for starting a "
+ "notebook instance with the environment installed directly on the VM.\n"
+ "If neither this nor --container-* are specified, default to \n"
+ "'--vm-image-project="
+ DEFAULT_VM_IMAGE_PROJECT
+ " --vm-image-family="
+ DEFAULT_VM_IMAGE_FAMILY
+ "' %n")
VmImage vm;
@CommandLine.ArgGroup(
exclusive = false,
multiplicity = "1",
heading =
"Definition of a container image for starting a notebook instance with the environment "
+ "installed in a container.%n")
ContainerImage container;
}
static class VmImage {
@CommandLine.Option(
names = "--vm-image-project",
required = true,
description = "The ID of the Google Cloud project that this VM image belongs to.")
private String project;
@CommandLine.ArgGroup(exclusive = true, multiplicity = "1")
ImageConfig imageConfig;
static class ImageConfig {
@CommandLine.Option(
names = "--vm-image-family",
description =
"Use this VM image family to find the image; the newest image in this family will be "
+ "used.")
private String family;
@CommandLine.Option(
names = "--vm-image-name",
description = "Use this VM image name to find the image.")
private String name;
}
}
static class ContainerImage {
@CommandLine.Option(
names = "--container-repository",
required = true,
description =
"The path to the container image repository. For example: "
+ "'gcr.io/{project_id}/{imageName}'")
private String repository;
@CommandLine.Option(
names = "--container-tag",
description =
"The tag of the container image. If not specified, this defaults to the latest tag.")
private String tag;
}
// TODO(PF-767): Consider how to improve usability & validation of these parameters.
@CommandLine.ArgGroup(
exclusive = false,
multiplicity = "0..1",
heading = "The hardware accelerator used on this instance.%n")
AiNotebook.AcceleratorConfig acceleratorConfig;
static class AcceleratorConfig {
@CommandLine.Option(names = "--accelerator-type", description = "type of this accelerator")
private String type;
@CommandLine.Option(
names = "--accelerator-core-count",
description = "Count of cores of this accelerator")
private Long coreCount;
}
@CommandLine.ArgGroup(
exclusive = false,
multiplicity = "0..1",
heading = "GPU driver configurations.%n")
AiNotebook.GpuDriverConfiguration gpuDriverConfiguration;
static class GpuDriverConfiguration {
@CommandLine.Option(
names = "--install-gpu-driver",
description =
"If true, the end user authorizes Google Cloud to install a GPU driver on this instance")
private Boolean installGpuDriver;
@CommandLine.Option(
names = "--custom-gpu-driver-path",
description = "Specify a custom Cloud Storage path where the GPU driver is stored.")
private String customGpuDriverPath;
}
@CommandLine.ArgGroup(
exclusive = false,
multiplicity = "0..1",
heading = "Boot disk configurations.%n")
AiNotebook.BootDiskConfiguration bootDiskConfiguration;
static class BootDiskConfiguration {
@CommandLine.Option(
names = "--boot-disk-size",
description = "The size of the disk in GB attached to this instance.")
Long sizeGb;
@CommandLine.Option(
names = "--boot-disk-type",
description =
"The type of disk attached to this instance, defaults to the standard persistent disk.")
String type;
}
@CommandLine.ArgGroup(
exclusive = false,
multiplicity = "0..1",
heading = "Data disk configurations.%n")
AiNotebook.DataDiskConfiguration dataDiskConfiguration;
static class DataDiskConfiguration {
@CommandLine.Option(
names = "--data-disk-size",
description = "The size of the disk in GB attached to this instance.")
Long sizeGb;
@CommandLine.Option(
names = "--data-disk-type",
description =
"The type of disk attached to this instance, defaults to the standard persistent disk.")
String type;
}
@CommandLine.Mixin WorkspaceOverride workspaceOption;
@CommandLine.Mixin Format formatOption;
/** Add a controlled AI Notebook instance to the workspace. */
@Override
protected void execute() {
workspaceOption.overrideIfSpecified();
// build the resource object to create. force the resource to be private
CreateResourceParams.Builder createResourceParams =
resourceCreationOptions
.populateMetadataFields()
.stewardshipType(StewardshipType.CONTROLLED)
.accessScope(AccessScope.PRIVATE_ACCESS)
.privateUserName(Context.requireUser().getEmail())
.privateUserRoles(
List.of(
ControlledResourceIamRole.EDITOR,
ControlledResourceIamRole.WRITER,
ControlledResourceIamRole.READER));
CreateAiNotebookParams.Builder createParams =
new CreateAiNotebookParams.Builder()
.resourceFields(createResourceParams.build())
.instanceId(getInstanceId(Context.requireUser()))
.location(location)
.machineType(machineType)
.postStartupScript(postStartupScript)
.metadata(
metadata == null ? defaultMetadata(Context.requireWorkspace().getId()) : metadata);
if (acceleratorConfig != null) {
createParams
.acceleratorType(acceleratorConfig.type)
.acceleratorCoreCount(acceleratorConfig.coreCount);
}
if (gpuDriverConfiguration != null) {
createParams
.installGpuDriver(gpuDriverConfiguration.installGpuDriver)
.customGpuDriverPath(gpuDriverConfiguration.customGpuDriverPath);
}
if (bootDiskConfiguration != null) {
createParams
.bootDiskType(bootDiskConfiguration.type)
.bootDiskSizeGb(bootDiskConfiguration.sizeGb);
}
if (dataDiskConfiguration != null) {
createParams
.dataDiskType(dataDiskConfiguration.type)
.dataDiskSizeGb(dataDiskConfiguration.sizeGb);
}
if (vmOrContainerImage == null) {
createParams.vmImageProject(DEFAULT_VM_IMAGE_PROJECT).vmImageFamily(DEFAULT_VM_IMAGE_FAMILY);
} else if (vmOrContainerImage.container != null) {
createParams
.containerRepository(vmOrContainerImage.container.repository)
.containerTag(vmOrContainerImage.container.tag);
} else {
createParams
.vmImageProject(vmOrContainerImage.vm.project)
.vmImageFamily(vmOrContainerImage.vm.imageConfig.family)
.vmImageName(vmOrContainerImage.vm.imageConfig.name);
}
bio.terra.cli.businessobject.resources.AiNotebook createdResource =
bio.terra.cli.businessobject.resources.AiNotebook.createControlled(createParams.build());
formatOption.printReturnValue(new UFAiNotebook(createdResource), AiNotebook::printText);
}
/** Create the metadata to put on the AI Notebook instance. */
private Map<String, String> defaultMetadata(UUID workspaceID) {
return ImmutableMap.<String, String>builder()
// Set additional Terra context as metadata on the VM instance.
.put("terra-workspace-id", workspaceID.toString())
.put("terra-cli-server", Context.getServer().getName())
.build();
}
/**
* Returns the specified instanceId or an auto generated instance name with the username and date
* time.
*/
// TODO add some unit tests when we have a testing framework.
private String getInstanceId(User user) {
if (!AUTO_GENERATE_NAME.equals(instanceId)) {
return instanceId;
}
String mangledUsername = mangleUsername(extractUsername(user.getEmail()));
String localDateTimeSuffix =
DateTimeFormatter.ofPattern(AUTO_NAME_DATE_FORMAT)
.format(Instant.now().atZone(ZoneId.systemDefault()));
return mangledUsername + localDateTimeSuffix;
}
/**
* Best effort mangle the user's name so that it meets the requirements for a valid instance name.
*
* <p>Instance name id must match the regex '(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)', i.e. starting
* with a lowercase alpha character, only alphanumerics and '-' of max length 61. I don't have a
* documentation link, but gcloud will complain otherwise.
*/
private static String mangleUsername(String username) {
// Strip non alpha-numeric or '-' characters.
String mangledName = username.replaceAll("[^a-zA-Z0-9-]", "");
if (mangledName.isEmpty()) {
mangledName = "notebook";
}
// Lower case everything, even though only the first character requires lowercase.
mangledName = mangledName.toLowerCase();
// Make sure the returned name isn't too long to not have the date time suffix.
int maxNameLength = MAX_INSTANCE_NAME_LENGTH - AUTO_NAME_DATE_FORMAT.length();
if (mangledName.length() > maxNameLength) {
mangledName = mangledName.substring(0, maxNameLength);
}
return mangledName;
}
private static String extractUsername(String validEmail) {
return validEmail.substring(0, validEmail.indexOf('@'));
}
/** Print this command's output in text format. */
private static void printText(UFAiNotebook returnValue) {
OUT.println("Successfully added controlled AI Notebook instance.");
returnValue.print();
}
}
|
package bjohnson.ResponseHandlers;
import bjohnson.FileIO;
import bjohnson.Request;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.util.Arrays;
public class FileReadResponseBuilder implements ResponseBuilderInterface {
private final String directoryPath;
private Response response;
private Request request;
private int startRange = 0;
private int endRange = 0;
public FileReadResponseBuilder(String directoryPath) {
this.directoryPath = directoryPath;
}
private void readFromFile() {
String fullPath = directoryPath + request.getURL();
try {
byte[] fileContent = FileIO.readFromFile(fullPath);
if (isPartialFileRequest()){
buildPartialResponse(fileContent);
} else {
response.setBody(fileContent);
}
} catch (IOException e) {
response = new FourOhFourResponseBuilder().getResponse(request);
}
}
private void buildPartialResponse(byte[] fileContent) {
endRange = fileContent.length;
setContentRange();
response.setStatus("206 PARTIAL CONTENT");
response.setBody(Arrays.copyOfRange(fileContent, startRange, endRange));
}
private String[] parseContentRangeHeader() {
String rangeString = request.getHeaders().get("Range");
return rangeString.split("=")[1].split("");
}
private void setContentRange() {
String range[] = parseContentRangeHeader();
if (range.length == 3) {
startRange = Integer.parseInt(range[0]);
endRange = Integer.parseInt(range[2]) + 1;
} else if (range[0].equals("-")) {
startRange = endRange - Integer.parseInt(range[1]);
} else if (range[1].equals("-")) {
startRange = Integer.parseInt(range[0]);
}
}
private Boolean isPartialFileRequest() {
return request.getHeaders().containsKey("Range");
}
public Response getResponse(Request request) {
response = new Response();
this.request = request;
readFromFile();
return response;
}
}
|
package com.bdl.annotation.processing.model;
import com.google.auto.value.AutoValue;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import java.util.Set;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import javax.lang.model.element.AnnotationMirror;
import javax.lang.model.element.Element;
import javax.lang.model.element.ElementKind;
import javax.lang.model.element.ExecutableElement;
import javax.lang.model.element.TypeElement;
import javax.lang.model.type.DeclaredType;
import javax.lang.model.type.TypeMirror;
/**
* Metadata class for a relevant parts of a class to write.
*
* @author Ben Leitner
*/
@AutoValue
public abstract class ClassMetadata implements UsesTypes, Annotatable {
/** Enumeration of the possible types to AutoAdapt: Class and Interface. */
public enum Category {
CLASS,
INTERFACE;
static Category forKind(ElementKind kind) {
switch (kind) {
case CLASS:
case ENUM:
return Category.CLASS;
case INTERFACE:
case ANNOTATION_TYPE:
return Category.INTERFACE;
default:
throw new IllegalArgumentException("Bad Kind: " + kind);
}
}
}
private ImmutableList<FieldMetadata> allFields;
private ImmutableList<MethodMetadata> allMethods;
@Override
public abstract ImmutableList<AnnotationMetadata> annotations();
/** The AutoAdaptee's {@link Category}. */
public abstract Category category();
/** Contains the complete type metadata for the class. */
public abstract TypeMetadata type();
/** The inheritance metadatas for the types that this one inherits from. */
public abstract ImmutableList<InheritanceMetadata> inheritances();
public abstract ImmutableSet<ConstructorMetadata> constructors();
/** Fields that are declared in this class. */
public abstract ImmutableList<FieldMetadata> fields();
/** Methods that are declared in this class. */
public abstract ImmutableList<MethodMetadata> methods();
@Override
public Set<TypeMetadata> getAllTypes() {
ImmutableSet.Builder<TypeMetadata> imports = ImmutableSet.builder();
imports.addAll(type().getAllTypes());
imports.addAll(inheritances().stream()
.map((inheritance) -> inheritance.classMetadata().type())
.collect(Collectors.toSet()));
for (AnnotationMetadata annotation : annotations()) {
imports.addAll(annotation.getAllTypes());
}
for (ConstructorMetadata constructor : constructors()) {
imports.addAll(constructor.getAllTypes());
}
for (MethodMetadata method : methods()) {
imports.addAll(method.getAllTypes());
}
return imports.build();
}
public ImmutableList<FieldMetadata> getAllFields() {
if (allFields == null) {
Stream<FieldMetadata> fieldStream = Stream.empty();
for (InheritanceMetadata inheritance : inheritances()) {
fieldStream = Stream.concat(
fieldStream,
inheritance.getAllFields().stream()
.filter((field) -> field.visibility() != Visibility.PRIVATE));
}
fieldStream = Stream.concat(fieldStream, fields().stream());
allFields = ImmutableList.copyOf(fieldStream.sorted().collect(Collectors.toList()));
}
return allFields;
}
/** Methods declared in this type or in any supertype / interface. */
public ImmutableList<MethodMetadata> getAllMethods() {
if (allMethods == null) {
Stream<MethodMetadata> methodStream = Stream.empty();
for (InheritanceMetadata inheritance : inheritances()) {
methodStream = Stream.concat(
methodStream,
inheritance.getAllMethods().stream()
.filter((method) -> method.visibility() != Visibility.PRIVATE));
}
methodStream = Stream.concat(methodStream, methods().stream());
Set<MethodMetadata> methods = methodStream.collect(Collectors.toSet());
Set<MethodMetadata> concreteMethods = methods.stream()
.filter((method) -> !method.isAbstract())
.collect(Collectors.toSet());
Set<MethodMetadata> abstractMethods = methods.stream()
.filter(MethodMetadata::isAbstract)
.filter((method) -> !concreteMethods.contains(method.toBuilder().setIsAbstract(false).build()))
.collect(Collectors.toSet());
allMethods = ImmutableList.copyOf(
Stream.concat(
concreteMethods.stream(),
abstractMethods.stream())
.sorted()
.collect(Collectors.toList()));
}
return allMethods;
}
public String fullyQualifiedPathName() {
return type().packagePrefix() + type().nestingPrefix() + type().name();
}
@Override
public String toString() {
return fullyQualifiedPathName();
}
public static ClassMetadata fromElement(Element element) {
TypeMetadata type = TypeMetadata.fromElement(element);
Builder metadata = builder()
.setCategory(Category.forKind(element.getKind()))
.setType(type);
for (AnnotationMirror annotationMirror : element.getAnnotationMirrors()) {
metadata.addAnnotation(AnnotationMetadata.fromType(annotationMirror));
}
TypeElement typeElement = (TypeElement) element;
TypeMirror superClass = typeElement.getSuperclass();
if (superClass instanceof DeclaredType) {
metadata.addInheritance(InheritanceMetadata.fromType((DeclaredType) superClass));
}
for (TypeMirror inherited : typeElement.getInterfaces()) {
metadata.addInheritance(InheritanceMetadata.fromType((DeclaredType) inherited));
}
for (Element enclosed : element.getEnclosedElements()) {
if (enclosed.getKind() == ElementKind.METHOD) {
metadata.addMethod(MethodMetadata.fromMethod((ExecutableElement) enclosed));
}
if (enclosed.getKind() == ElementKind.CONSTRUCTOR) {
metadata.addConstructor(ConstructorMetadata.fromConstructor(enclosed));
}
if (enclosed.getKind() == ElementKind.FIELD) {
metadata.addField(FieldMetadata.from(type, enclosed));
}
}
return metadata.build();
}
public static Builder builder() {
return new AutoValue_ClassMetadata.Builder();
}
@AutoValue.Builder
public static abstract class Builder {
abstract ImmutableList.Builder<AnnotationMetadata> annotationsBuilder();
public abstract Builder setCategory(Category category);
public abstract Builder setType(TypeMetadata type);
abstract ImmutableList.Builder<InheritanceMetadata> inheritancesBuilder();
abstract ImmutableSet.Builder<ConstructorMetadata> constructorsBuilder();
abstract ImmutableList.Builder<FieldMetadata> fieldsBuilder();
abstract ImmutableList.Builder<MethodMetadata> methodsBuilder();
public Builder addInheritance(InheritanceMetadata inheritance) {
inheritancesBuilder().add(inheritance);
return this;
}
public Builder addAnnotation(AnnotationMetadata annotation) {
annotationsBuilder().add(annotation);
return this;
}
public Builder addConstructor(ConstructorMetadata constructor) {
constructorsBuilder().add(constructor);
return this;
}
public Builder addField(FieldMetadata field) {
fieldsBuilder().add(field);
return this;
}
public Builder addMethod(MethodMetadata method) {
methodsBuilder().add(method);
return this;
}
public abstract ClassMetadata build();
}
}
|
package com.ctrip.zeus.restful.resource;
import com.ctrip.zeus.auth.Authorize;
import com.ctrip.zeus.dal.core.StatusGroupServerDao;
import com.ctrip.zeus.exceptions.ValidationException;
import com.ctrip.zeus.executor.TaskManager;
import com.ctrip.zeus.model.entity.*;
import com.ctrip.zeus.restful.message.ResponseHandler;
import com.ctrip.zeus.service.model.*;
import com.ctrip.zeus.service.nginx.CertificateConfig;
import com.ctrip.zeus.service.nginx.CertificateInstaller;
import com.ctrip.zeus.service.nginx.CertificateService;
import com.ctrip.zeus.service.query.GroupCriteriaQuery;
import com.ctrip.zeus.service.query.SlbCriteriaQuery;
import com.ctrip.zeus.service.query.VirtualServerCriteriaQuery;
import com.ctrip.zeus.service.status.GroupStatusService;
import com.ctrip.zeus.service.status.StatusOffset;
import com.ctrip.zeus.service.status.StatusService;
import com.ctrip.zeus.service.task.constant.TaskOpsType;
import com.ctrip.zeus.status.entity.GroupServerStatus;
import com.ctrip.zeus.status.entity.GroupStatus;
import com.ctrip.zeus.status.entity.ServerStatus;
import com.ctrip.zeus.task.entity.OpsTask;
import com.ctrip.zeus.task.entity.TaskResult;
import com.google.common.base.Joiner;
import com.netflix.config.DynamicBooleanProperty;
import com.netflix.config.DynamicLongProperty;
import com.google.common.collect.Sets;
import com.netflix.config.DynamicPropertyFactory;
import org.glassfish.jersey.media.multipart.FormDataParam;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import javax.annotation.Resource;
import javax.servlet.http.HttpServletRequest;
import javax.ws.rs.*;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import java.io.InputStream;
import java.util.*;
@Component
@Path("/op")
public class OperationResource {
@Resource
StatusService statusService;
@Resource
private GroupStatusService groupStatusService;
@Resource
private GroupRepository groupRepository;
@Resource
private SlbRepository slbRepository;
@Resource
private TaskManager taskManager;
@Resource
private SlbCriteriaQuery slbCriteriaQuery;
@Resource
private GroupCriteriaQuery groupCriteriaQuery;
@Resource
private VirtualServerCriteriaQuery virtualServerCriteriaQuery;
@Resource
private ResponseHandler responseHandler;
@Resource
private CertificateService certificateService;
@Resource
private CertificateInstaller certificateInstaller;
@Resource
private EntityFactory entityFactory;
private static DynamicLongProperty apiTimeout = DynamicPropertyFactory.getInstance().getLongProperty("api.timeout", 15000L);
private static DynamicBooleanProperty healthyOpsActivate = DynamicPropertyFactory.getInstance().getBooleanProperty("healthy.operation.active", false);
private Logger logger = LoggerFactory.getLogger(this.getClass());
@GET
@Path("/upServer")
@Authorize(name = "upDownServer")
public Response upServer(@Context HttpServletRequest request, @Context HttpHeaders hh, @QueryParam("ip") String ip) throws Exception {
return serverOps(hh, ip, true);
}
@GET
@Path("/downServer")
@Authorize(name = "upDownServer")
public Response downServer(@Context HttpServletRequest request, @Context HttpHeaders hh, @QueryParam("ip") String ip) throws Exception {
return serverOps(hh, ip, false);
}
private Response serverOps(HttpHeaders hh, String serverip, boolean up) throws Exception {
Long[] groupIds = entityFactory.getGroupIdsByGroupServerIp(serverip, SelectionMode.REDUNDANT);
if (groupIds == null || groupIds.length == 0) {
throw new ValidationException("Not found Server Ip.");
}
ModelStatusMapping<Group> groupMap = entityFactory.getGroupsByIds(groupIds);
Set<Long> vsIds = new HashSet<>();
for (Long id : groupIds) {
if (groupMap.getOnlineMapping().get(id) != null) {
Group group = groupMap.getOnlineMapping().get(id);
for (GroupVirtualServer gvs : group.getGroupVirtualServers()) {
vsIds.add(gvs.getVirtualServer().getId());
}
} else if (groupMap.getOfflineMapping().get(id) != null) {
Group group = groupMap.getOfflineMapping().get(id);
for (GroupVirtualServer gvs : group.getGroupVirtualServers()) {
vsIds.add(gvs.getVirtualServer().getId());
}
}
}
ModelStatusMapping<VirtualServer> vsMap = entityFactory.getVsesByIds(vsIds.toArray(new Long[]{}));
Set<Long> slbIds = new HashSet<>();
for (VirtualServer vs : vsMap.getOnlineMapping().values()) {
slbIds.add(vs.getSlbId());
}
for (VirtualServer vs : vsMap.getOfflineMapping().values()) {
slbIds.add(vs.getSlbId());
}
List<OpsTask> tasks = new ArrayList<>();
for (Long slbId : slbIds) {
OpsTask task = new OpsTask();
task.setIpList(serverip);
task.setOpsType(TaskOpsType.SERVER_OPS);
task.setTargetSlbId(slbId);
task.setUp(up);
tasks.add(task);
}
List<Long> taskIds = taskManager.addTask(tasks);
List<TaskResult> results = taskManager.getResult(taskIds, apiTimeout.get());
boolean isSuccess = true;
String failCause = "";
for (TaskResult taskResult : results) {
if (!taskResult.isSuccess()) {
isSuccess = false;
failCause += taskResult.toString();
}
}
if (!isSuccess) {
throw new Exception(failCause);
}
ServerStatus ss = new ServerStatus().setIp(serverip).setUp(statusService.getServerStatus(serverip));
Long[] gids = entityFactory.getGroupIdsByGroupServerIp(serverip, SelectionMode.ONLINE_EXCLUSIVE);
List<Group> groups = groupRepository.list(gids);
if (groups != null) {
for (Group group : groups) {
ss.addGroupName(group.getName());
}
}
if (MediaType.APPLICATION_XML_TYPE.equals(hh.getMediaType())) {
return Response.status(200).entity(String.format(ServerStatus.XML, ss)).type(MediaType.APPLICATION_XML).build();
} else {
return Response.status(200).entity(String.format(ServerStatus.JSON, ss)).type(MediaType.APPLICATION_JSON).build();
}
}
@GET
@Path("/upMember")
@Authorize(name = "upDownMember")
public Response upMember(@Context HttpServletRequest request,
@Context HttpHeaders hh,
@QueryParam("groupId") Long groupId,
@QueryParam("groupName") String groupName,
@QueryParam("ip") List<String> ips,
@QueryParam("batch") Boolean batch) throws Exception {
List<String> _ips = new ArrayList<>();
if (groupId == null) {
if (groupName == null) {
throw new ValidationException("Group Id or Name not found!");
} else {
groupId = groupCriteriaQuery.queryByName(groupName);
}
}
if (null != batch && batch.equals(true)) {
Group gp = groupRepository.getById(groupId);
List<GroupServer> servers = gp.getGroupServers();
for (GroupServer gs : servers) {
_ips.add(gs.getIp());
}
} else if (ips != null) {
_ips = ips;
}
return memberOps(hh, groupId, _ips, true, TaskOpsType.MEMBER_OPS);
}
@GET
@Path("/downMember")
@Authorize(name = "upDownMember")
public Response downMember(@Context HttpServletRequest request,
@Context HttpHeaders hh,
@QueryParam("groupId") Long groupId,
@QueryParam("groupName") String groupName,
@QueryParam("ip") List<String> ips,
@QueryParam("batch") Boolean batch) throws Exception {
List<String> _ips = new ArrayList<>();
if (groupId == null) {
if (groupName == null) {
throw new ValidationException("Group Id or Name not found!");
} else {
groupId = groupCriteriaQuery.queryByName(groupName);
}
}
if (null != batch && batch.equals(true)) {
Group gp = groupRepository.getById(groupId);
List<GroupServer> servers = gp.getGroupServers();
for (GroupServer gs : servers) {
_ips.add(gs.getIp());
}
} else if (ips != null) {
_ips = ips;
}
return memberOps(hh, groupId, _ips, false, TaskOpsType.MEMBER_OPS);
}
@GET
@Path("/pullIn")
@Authorize(name = "upDownMember")
public Response pullIn(@Context HttpServletRequest request,
@Context HttpHeaders hh,
@QueryParam("groupId") Long groupId,
@QueryParam("groupName") String groupName,
@QueryParam("ip") List<String> ips,
@QueryParam("batch") Boolean batch) throws Exception {
List<String> _ips = new ArrayList<>();
if (groupId == null) {
if (groupName == null) {
throw new ValidationException("Group Id or Name not found!");
} else {
groupId = groupCriteriaQuery.queryByName(groupName);
}
}
if (null != batch && batch.equals(true)) {
Group gp = groupRepository.getById(groupId);
List<GroupServer> servers = gp.getGroupServers();
for (GroupServer gs : servers) {
_ips.add(gs.getIp());
}
} else if (ips != null) {
_ips = ips;
}
return memberOps(hh, groupId, _ips, true, TaskOpsType.PULL_MEMBER_OPS);
}
@GET
@Path("/pullOut")
@Authorize(name = "upDownMember")
public Response pullOut(@Context HttpServletRequest request,
@Context HttpHeaders hh,
@QueryParam("groupId") Long groupId,
@QueryParam("groupName") String groupName,
@QueryParam("ip") List<String> ips,
@QueryParam("batch") Boolean batch) throws Exception {
List<String> _ips = new ArrayList<>();
if (groupId == null) {
if (groupName == null) {
throw new ValidationException("Group Id or Name not found!");
} else {
groupId = groupCriteriaQuery.queryByName(groupName);
}
}
if (null != batch && batch.equals(true)) {
Group gp = groupRepository.getById(groupId);
List<GroupServer> servers = gp.getGroupServers();
for (GroupServer gs : servers) {
_ips.add(gs.getIp());
}
} else if (ips != null) {
_ips = ips;
}
return memberOps(hh, groupId, _ips, false, TaskOpsType.PULL_MEMBER_OPS);
}
@GET
@Path("/raise")
@Authorize(name = "upDownMember")
public Response raise(@Context HttpServletRequest request,
@Context HttpHeaders hh,
@QueryParam("groupId") Long groupId,
@QueryParam("groupName") String groupName,
@QueryParam("ip") List<String> ips,
@QueryParam("batch") Boolean batch) throws Exception {
List<String> _ips = new ArrayList<>();
if (groupId == null) {
if (groupName == null) {
throw new ValidationException("Group Id or Name not found!");
} else {
groupId = groupCriteriaQuery.queryByName(groupName);
}
}
if (null != batch && batch.equals(true)) {
Group gp = groupRepository.getById(groupId);
List<GroupServer> servers = gp.getGroupServers();
for (GroupServer gs : servers) {
_ips.add(gs.getIp());
}
} else if (ips != null) {
_ips = ips;
}
if (healthyOpsActivate.get()) {
return memberOps(hh, groupId, _ips, true, TaskOpsType.HEALTHY_OPS);
} else {
return healthyOps(hh, groupId, _ips, true);
}
}
@GET
@Path("/fall")
@Authorize(name = "upDownMember")
public Response fall(@Context HttpServletRequest request,
@Context HttpHeaders hh,
@QueryParam("groupId") Long groupId,
@QueryParam("groupName") String groupName,
@QueryParam("ip") List<String> ips,
@QueryParam("batch") Boolean batch) throws Exception {
List<String> _ips = new ArrayList<>();
if (groupId == null) {
if (groupName == null) {
throw new ValidationException("Group Id or Name not found!");
} else {
groupId = groupCriteriaQuery.queryByName(groupName);
}
}
Group gp = groupRepository.getById(groupId);
if (gp == null) {
throw new ValidationException("Group Id or Name not found!");
}
if (null != batch && batch.equals(true)) {
List<GroupServer> servers = gp.getGroupServers();
for (GroupServer gs : servers) {
_ips.add(gs.getIp());
}
} else if (ips != null) {
_ips = ips;
}
if (healthyOpsActivate.get()) {
return memberOps(hh, groupId, _ips, false, TaskOpsType.HEALTHY_OPS);
} else {
return healthyOps(hh, groupId, _ips, false);
}
}
private Response healthyOps(HttpHeaders hh, Long groupId, List<String> ips, boolean b) throws Exception {
statusService.updateStatus(groupId, ips, StatusOffset.HEALTHY, b);
return responseHandler.handle(groupStatusService.getOfflineGroupStatus(groupId), hh.getMediaType());
}
@POST
@Path("/uploadcerts")
@Consumes(MediaType.MULTIPART_FORM_DATA)
@Authorize(name = "uploadCerts")
public Response uploadCerts(@Context HttpServletRequest request,
@Context HttpHeaders hh,
@FormDataParam("cert") InputStream cert,
@FormDataParam("key") InputStream key,
@QueryParam("domain") String domain) throws Exception {
if (domain == null || domain.isEmpty()) {
throw new ValidationException("Domain info is required.");
}
String[] domainMembers = domain.split("\\|");
Arrays.sort(domainMembers);
domain = Joiner.on("|").join(domainMembers);
certificateService.upload(cert, key, domain, CertificateConfig.ONBOARD);
return responseHandler.handle("Certificates uploaded. Virtual server creation is permitted.", hh.getMediaType());
}
@POST
@Path("/upgradecerts")
@Consumes(MediaType.MULTIPART_FORM_DATA)
@Authorize(name = "upgradeCerts")
public Response upgradeCerts(@Context HttpServletRequest request,
@Context HttpHeaders hh,
@FormDataParam("cert") InputStream cert,
@FormDataParam("key") InputStream key,
@QueryParam("domain") String domain,
@QueryParam("vsId") Long vsId,
@QueryParam("ip") List<String> ips) throws Exception {
if (domain == null || domain.isEmpty()) {
throw new ValidationException("Domain info is required.");
}
if (vsId == null) {
throw new ValidationException("vsId is required when updating certificate.");
}
// update certificate or run grayscale test
IdVersion[] check = virtualServerCriteriaQuery.queryByIdAndMode(vsId, SelectionMode.REDUNDANT);
Set<IdVersion> keys = virtualServerCriteriaQuery.queryByDomain(domain);
keys.retainAll(Sets.newHashSet(check));
if (keys.size() == 0) {
throw new ValidationException("VsId and domain mismatched.");
}
configureIps(keys.toArray(new IdVersion[keys.size()]), ips);
String[] domainMembers = domain.split("\\|");
Arrays.sort(domainMembers);
domain = Joiner.on("|").join(domainMembers);
Long certId = certificateService.upgrade(cert, key, domain, CertificateConfig.ONBOARD);
return responseHandler.handle("Certificate uploaded. New cert-id is " + certId + ". Contact slb team with the given cert-id to install the new certificate.", hh.getMediaType());
// certificateService.command(vsId, ips, certId);
// certificateService.install(vsId);
// return responseHandler.handle("Certificates uploaded. Re-activate the virtual server to take effect.", hh.getMediaType());
}
@GET
@Path("/dropcerts")
@Authorize(name = "dropCerts")
public Response dropCerts(@Context HttpServletRequest request,
@Context HttpHeaders hh,
@QueryParam("vsId") Long vsId,
@QueryParam("ip") List<String> ips) throws Exception {
return responseHandler.handle("dropcerts is not available at the moment.", hh.getMediaType());
// if (vsId == null && (ips == null || ips.size() == 0))
// throw new ValidationException("vsId and ip addresses are required.");
// certificateService.recall(vsId, ips);
// certificateService.uninstallIfRecalled(vsId);
// return responseHandler.handle("Certificates dropped successfully. Re-activate the virtual server to take effect.", hh.getMediaType());
}
@GET
@Path("/cert/remoteInstall")
@Authorize(name = "remoteInstallCerts")
public Response remoteInstall(@Context HttpServletRequest request,
@Context HttpHeaders hh,
@QueryParam("certId") Long certId,
@QueryParam("vsId") Long vsId,
@QueryParam("ips") List<String> ips) throws Exception {
if (certId == null || ips == null || vsId == null) {
throw new ValidationException("certId, vsId and ips are required.");
}
IdVersion[] keys = virtualServerCriteriaQuery.queryByIdAndMode(vsId, SelectionMode.REDUNDANT);
ips = configureIps(keys, ips);
certificateService.install(vsId, ips, certId);
return responseHandler.handle("Certificates uploaded. Re-activate the virtual server to take effect.", hh.getMediaType());
}
@GET
@Path("/installcerts")
@Authorize(name = "installCerts")
public Response installCerts(@Context HttpServletRequest request,
@Context HttpHeaders hh,
@QueryParam("vsId") Long vsId,
@QueryParam("certId") Long certId) throws Exception {
if (vsId == null || certId == null)
throw new ValidationException("vsId and certId are required.");
String domain = certificateInstaller.localInstall(vsId, certId);
return responseHandler.handle("Certificates with domain " + domain + " are installed successfully.", hh.getMediaType());
}
@GET
@Path("/uninstallcerts")
@Authorize(name = "uninstallCerts")
public Response uninstallCerts(@Context HttpServletRequest request,
@Context HttpHeaders hh,
@QueryParam("vsId") Long vsId) throws Exception {
if (vsId == null)
throw new ValidationException("vsId and certId are required.");
certificateInstaller.localUninstall(vsId);
return responseHandler.handle("Certificates for vsId " + vsId + " are uninstalled.", hh.getMediaType());
}
private Response memberOps(HttpHeaders hh, Long groupId, List<String> ips, boolean up, String type) throws Exception {
Map<String, List<Boolean>> status = statusService.fetchGroupServerStatus(new Long[]{groupId});
boolean skipOps = true;
for (String ip : ips) {
int index = 0;
if (type.equals(TaskOpsType.HEALTHY_OPS)) index = StatusOffset.HEALTHY;
if (type.equals(TaskOpsType.PULL_MEMBER_OPS)) index = StatusOffset.PULL_OPS;
if (type.equals(TaskOpsType.MEMBER_OPS)) index = StatusOffset.MEMBER_OPS;
boolean preStatus = status.get(groupId.toString() + "_" + ip).get(index);
if (preStatus != up) {
skipOps = false;
}
}
if (skipOps) {
GroupStatus groupStatus = groupStatusService.getOfflineGroupStatus(groupId);
logger.info("Group status equals the desired value.Do not need execute task.GroupId:" + groupId + " ips:"
+ ips.toString() + " up:" + up + " type:" + type);
return responseHandler.handle(groupStatus, hh.getMediaType());
}
StringBuilder sb = new StringBuilder();
for (String ip : ips) {
sb.append(ip).append(";");
}
ModelStatusMapping<Group> mapping = entityFactory.getGroupsByIds(new Long[]{groupId});
if (mapping.getOfflineMapping() == null || mapping.getOfflineMapping().size() == 0) {
throw new ValidationException("Not Found Group By Id.");
}
Group onlineGroup = mapping.getOnlineMapping().get(groupId);
Group offlineGroup = mapping.getOfflineMapping().get(groupId);
Set<Long> vsIds = new HashSet<>();
Set<Long> slbIds = new HashSet<>();
if (onlineGroup != null) {
for (GroupVirtualServer gvs : onlineGroup.getGroupVirtualServers()) {
vsIds.add(gvs.getVirtualServer().getId());
}
}
for (GroupVirtualServer gvs : offlineGroup.getGroupVirtualServers()) {
vsIds.add(gvs.getVirtualServer().getId());
}
ModelStatusMapping<VirtualServer> vsMaping = entityFactory.getVsesByIds(vsIds.toArray(new Long[]{}));
VirtualServer tmp;
for (Long vsId : vsIds) {
tmp = vsMaping.getOnlineMapping().get(vsId);
if (tmp == null) {
tmp = vsMaping.getOfflineMapping().get(vsId);
}
slbIds.add(tmp.getSlbId());
}
List<OpsTask> tasks = new ArrayList<>();
for (Long slbId : slbIds) {
OpsTask task = new OpsTask();
task.setTargetSlbId(slbId);
task.setOpsType(type);
task.setUp(up);
task.setGroupId(groupId);
task.setIpList(sb.toString());
tasks.add(task);
}
List<Long> taskIds = taskManager.addTask(tasks);
List<TaskResult> results = taskManager.getResult(taskIds, apiTimeout.get());
for (TaskResult taskResult : results) {
if (!taskResult.isSuccess()) {
throw new Exception("Task Failed! Fail cause : " + taskResult.getFailCause());
}
}
GroupStatus groupStatus = groupStatusService.getOfflineGroupStatus(groupId);
return responseHandler.handle(groupStatus, hh.getMediaType());
}
private List<String> configureIps(IdVersion[] keys, List<String> ips) throws Exception {
Set<Long> slbId = slbCriteriaQuery.queryByVses(keys);
ModelStatusMapping<Slb> check = entityFactory.getSlbsByIds(slbId.toArray(new Long[slbId.size()]));
if (check.getOfflineMapping().size() == 0 && check.getOnlineMapping().size() == 0) {
throw new ValidationException("Cannot find slb servers by the given vsId.");
}
Set<String> slbIps = new HashSet<>();
for (Slb slb : check.getOfflineMapping().values()) {
for (SlbServer server : slb.getSlbServers()) {
slbIps.add(server.getIp());
}
}
for (Slb slb : check.getOnlineMapping().values()) {
for (SlbServer slbServer : slb.getSlbServers()) {
slbIps.add(slbServer.getIp());
}
}
if (ips != null && ips.size() > 0) {
if (!slbIps.containsAll(ips)) {
throw new ValidationException("Some ips do not belong to the current slb.");
}
} else {
ips = new ArrayList<>(slbIps);
}
return ips;
}
}
|
package com.elmakers.mine.bukkit.spell.builtin;
import java.util.Collection;
import com.elmakers.mine.bukkit.block.MaterialAndData;
import com.elmakers.mine.bukkit.spell.UndoableSpell;
import org.bukkit.Material;
import org.bukkit.block.Block;
import org.bukkit.block.BlockFace;
import org.bukkit.configuration.ConfigurationSection;
import org.bukkit.entity.LivingEntity;
import org.bukkit.event.EventHandler;
import org.bukkit.event.Listener;
import org.bukkit.event.entity.EntityDamageEvent;
import org.bukkit.event.entity.EntityDamageEvent.DamageCause;
import org.bukkit.util.Vector;
import com.elmakers.mine.bukkit.api.spell.SpellEventType;
import com.elmakers.mine.bukkit.api.spell.SpellResult;
import com.elmakers.mine.bukkit.spell.TargetingSpell;
public class FlingSpell extends UndoableSpell implements Listener
{
private long safetyLength = 20000;
private long lastFling = 0;
protected int defaultMaxSpeedAtElevation = 64;
protected double defaultMinMagnitude = 1.5;
protected double defaultMaxMagnitude = 4;
private final static int effectSpeed = 1;
private final static int effectPeriod = 3;
private final static int minRingEffectRange = 2;
private final static int maxRingEffectRange = 15;
private final static int maxDamageAmount = 200;
@Override
public SpellResult onCast(ConfigurationSection parameters)
{
int height = 0;
Block playerBlock = getLocation().getBlock();
LivingEntity entity = mage.getLivingEntity();
if (entity == null) {
return SpellResult.LIVING_ENTITY_REQUIRED;
}
int maxSpeedAtElevation = parameters.getInt("cruising_altitude", defaultMaxSpeedAtElevation);
double minMagnitude = parameters.getDouble("min_speed", defaultMinMagnitude);
double maxMagnitude = parameters.getDouble("max_speed", defaultMaxMagnitude);
safetyLength = parameters.getLong("safety", safetyLength);
while (height < maxSpeedAtElevation && playerBlock.getType() == Material.AIR)
{
playerBlock = playerBlock.getRelative(BlockFace.DOWN);
height++;
}
double heightModifier = maxSpeedAtElevation > 0 ? ((double)height / maxSpeedAtElevation) : 1;
double magnitude = (minMagnitude + (((double)maxMagnitude - minMagnitude) * heightModifier));
Vector velocity = getDirection();
if (mage.getLocation().getBlockY() >= 256)
{
velocity.setY(0);
}
velocity.multiply(magnitude);
registerVelocity(entity);
entity.setVelocity(velocity);
if (safetyLength > 0) {
mage.registerEvent(SpellEventType.PLAYER_DAMAGE, this);
}
lastFling = System.currentTimeMillis();
registerForUndo();
return SpellResult.CAST;
}
@SuppressWarnings("deprecation")
@EventHandler
public void onPlayerDamage(EntityDamageEvent event)
{
if (event.getCause() != DamageCause.FALL) return;
mage.unregisterEvent(SpellEventType.PLAYER_DAMAGE, this);
if (lastFling == 0) return;
if (lastFling + safetyLength > System.currentTimeMillis())
{
event.setCancelled(true);
lastFling = 0;
// Visual effect
int ringEffectRange = (int)Math.ceil(((double)maxRingEffectRange - minRingEffectRange) * event.getDamage() / maxDamageAmount + minRingEffectRange);
ringEffectRange = Math.min(maxRingEffectRange, ringEffectRange);
playEffects("land", ringEffectRange);
}
}
@Override
public void getParameters(Collection<String> parameters)
{
super.getParameters(parameters);
parameters.add("cruising_altitude");
parameters.add("min_speed");
parameters.add("max_speed");
parameters.add("safety");
}
@Override
public com.elmakers.mine.bukkit.api.block.MaterialAndData getEffectMaterial()
{
Block block = mage.getEntity().getLocation().getBlock();
block = block.getRelative(BlockFace.DOWN);
return new MaterialAndData(block);
}
}
|
package com.github.bohnman.squiggly.parser;
import com.github.bohnman.squiggly.config.SquigglyConfig;
import com.github.bohnman.squiggly.metric.source.GuavaCacheSquigglyMetricsSource;
import com.github.bohnman.squiggly.metric.source.SquigglyMetricsSource;
import com.github.bohnman.squiggly.name.*;
import com.github.bohnman.squiggly.parser.antlr4.SquigglyExpressionBaseVisitor;
import com.github.bohnman.squiggly.parser.antlr4.SquigglyExpressionLexer;
import com.github.bohnman.squiggly.parser.antlr4.SquigglyExpressionParser;
import com.github.bohnman.squiggly.util.antlr4.ThrowingErrorListener;
import com.github.bohnman.squiggly.view.PropertyView;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import net.jcip.annotations.ThreadSafe;
import org.antlr.v4.runtime.ANTLRInputStream;
import org.antlr.v4.runtime.CommonTokenStream;
import org.apache.commons.lang3.StringUtils;
import java.util.*;
/**
* The parser takes a filter expression and compiles it to an Abstract Syntax Tree (AST). In this parser's case, the
* tree doesn't have a root node but rather just returns top level nodes.
*/
@ThreadSafe
public class SquigglyParser {
// Caches parsed filter expressions
private static final Cache<String, List<SquigglyNode>> CACHE;
private static final SquigglyMetricsSource METRICS_SOURCE;
static {
CACHE = CacheBuilder.from(SquigglyConfig.getParserNodeCacheSpec()).build();
METRICS_SOURCE = new GuavaCacheSquigglyMetricsSource("squiggly.parser.nodeCache.", CACHE);
}
/**
* Parse a filter expression.
*
* @param filter the filter expression
* @return compiled nodes
*/
public List<SquigglyNode> parse(String filter) {
filter = StringUtils.trim(filter);
if (StringUtils.isEmpty(filter)) {
return Collections.emptyList();
}
// get it from the cache if we can
List<SquigglyNode> cachedNodes = CACHE.getIfPresent(filter);
if (cachedNodes != null) {
return cachedNodes;
}
SquigglyExpressionLexer lexer = ThrowingErrorListener.overwrite(new SquigglyExpressionLexer(new ANTLRInputStream(filter)));
SquigglyExpressionParser parser = ThrowingErrorListener.overwrite(new SquigglyExpressionParser(new CommonTokenStream(lexer)));
Visitor visitor = new Visitor();
List<SquigglyNode> nodes = Collections.unmodifiableList(visitor.visit(parser.parse()));
CACHE.put(filter, nodes);
return nodes;
}
public static SquigglyMetricsSource getMetricsSource() {
return METRICS_SOURCE;
}
private class Visitor extends SquigglyExpressionBaseVisitor<List<SquigglyNode>> {
@Override
public List<SquigglyNode> visitParse(SquigglyExpressionParser.ParseContext ctx) {
MutableNode root = new MutableNode(new ExactName("root")).dotPathed(true);
handleExpressionList(ctx.expression_list(), root);
MutableNode analyzedRoot = analyze(root);
return analyzedRoot.toSquigglyNode().getChildren();
}
private void handleExpressionList(SquigglyExpressionParser.Expression_listContext ctx, MutableNode parent) {
List<SquigglyExpressionParser.ExpressionContext> expressions = ctx.expression();
for (SquigglyExpressionParser.ExpressionContext expressionContext : expressions) {
handleExpression(expressionContext, parent);
}
}
private void handleExpression(SquigglyExpressionParser.ExpressionContext ctx, MutableNode parent) {
if (ctx.negated_expression() != null) {
handleNegatedExpression(ctx.negated_expression(), parent);
}
List<SquigglyName> names;
if (ctx.field() != null) {
names = Collections.singletonList(createName(ctx.field()));
} else if (ctx.dot_path() != null) {
parent.squiggly = true;
for (int i = 0; i < ctx.dot_path().field().size() - 1; i++) {
parent = parent.addChild(new MutableNode(createName(ctx.dot_path().field(i))).dotPathed(true));
parent.squiggly = true;
}
names = Collections.singletonList(createName(ctx.dot_path().field().get(ctx.dot_path().field().size() - 1)));
} else if (ctx.field_list() != null) {
names = new ArrayList<>(ctx.field_list().field().size());
for (SquigglyExpressionParser.FieldContext fieldContext : ctx.field_list().field()) {
names.add(createName(fieldContext));
}
} else if (ctx.deep() != null) {
names = Collections.singletonList((SquigglyName) AnyDeepName.get());
} else {
names = Collections.emptyList();
}
for (SquigglyName name : names) {
MutableNode node = parent.addChild(new MutableNode(name));
if (ctx.empty_nested_expression() != null) {
node.emptyNested = true;
} else if (ctx.nested_expression() != null) {
node.squiggly = true;
handleExpressionList(ctx.nested_expression().expression_list(), node);
}
}
}
private SquigglyName createName(SquigglyExpressionParser.FieldContext ctx) {
SquigglyName name;
if (ctx.exact_field() != null) {
name = new ExactName(ctx.getText());
} else if (ctx.wildcard_field() != null) {
name = new WildcardName(ctx.getText());
} else if (ctx.regex_field() != null) {
String regexPattern = ctx.regex_field().regex_pattern().getText();
Set<String> regexFlags = new HashSet<>(ctx.regex_field().regex_flag().size());
for (SquigglyExpressionParser.Regex_flagContext regex_flagContext : ctx.regex_field().regex_flag()) {
regexFlags.add(regex_flagContext.getText());
}
name = new RegexName(regexPattern, regexFlags);
} else if (ctx.wildcard_shallow_field() != null) {
name = AnyShallowName.get();
} else {
throw new IllegalArgumentException("Unhandled field: " + ctx.getText());
}
return name;
}
private void handleNegatedExpression(SquigglyExpressionParser.Negated_expressionContext ctx, MutableNode parent) {
if (ctx.field() != null) {
parent.addChild(new MutableNode(createName(ctx.field())).negated(true));
} else if (ctx.dot_path() != null) {
for (int i = 0; i < ctx.dot_path().field().size(); i++) {
SquigglyExpressionParser.FieldContext fieldContext = ctx.dot_path().field(i);
parent.squiggly = true;
MutableNode mutableNode = new MutableNode(createName(fieldContext));
mutableNode.negativeParent = true;
parent = parent.addChild(mutableNode.dotPathed(true));
}
parent.negated(true);
parent.negativeParent = false;
}
}
}
private MutableNode analyze(MutableNode node) {
Map<MutableNode, MutableNode> nodesToAdd = new IdentityHashMap<>();
MutableNode analyze = analyze(node, nodesToAdd);
for (Map.Entry<MutableNode, MutableNode> entry : nodesToAdd.entrySet()) {
entry.getKey().addChild(entry.getValue());
}
return analyze;
}
private MutableNode analyze(MutableNode node, Map<MutableNode, MutableNode> nodesToAdd) {
if (node.children != null && !node.children.isEmpty()) {
boolean allNegated = true;
for (MutableNode child : node.children.values()) {
if (!child.negated && !child.negativeParent) {
allNegated = false;
break;
}
}
if (allNegated) {
nodesToAdd.put(node, new MutableNode(newBaseViewName()).dotPathed(node.dotPathed));
}
for (MutableNode child : node.children.values()) {
analyze(child, nodesToAdd);
}
}
return node;
}
private class MutableNode {
public boolean negativeParent;
private SquigglyName name;
private boolean negated;
private boolean squiggly;
private boolean emptyNested;
private Map<String, MutableNode> children;
private boolean dotPathed;
private MutableNode parent;
MutableNode(SquigglyName name) {
this.name = name;
}
SquigglyNode toSquigglyNode() {
if (name == null) {
throw new IllegalArgumentException("No Names specified");
}
List<SquigglyNode> childNodes;
if (children == null || children.isEmpty()) {
childNodes = Collections.emptyList();
} else {
childNodes = new ArrayList<>(children.size());
for (MutableNode child : children.values()) {
childNodes.add(child.toSquigglyNode());
}
}
return newSquigglyNode(name, childNodes);
}
private SquigglyNode newSquigglyNode(SquigglyName name, List<SquigglyNode> childNodes) {
return new SquigglyNode(name, childNodes, negated, squiggly, emptyNested);
}
public MutableNode dotPathed(boolean dotPathed) {
this.dotPathed = dotPathed;
return this;
}
public MutableNode negated(boolean negated) {
this.negated = negated;
return this;
}
public MutableNode addChild(MutableNode childToAdd) {
if (children == null) {
children = new LinkedHashMap<>();
}
String name = childToAdd.name.getName();
MutableNode existingChild = children.get(name);
if (existingChild == null) {
childToAdd.parent = this;
children.put(name, childToAdd);
} else {
if (childToAdd.children != null) {
if (existingChild.children == null) {
existingChild.children = childToAdd.children;
} else {
existingChild.children.putAll(childToAdd.children);
}
}
existingChild.squiggly = existingChild.squiggly || childToAdd.squiggly;
existingChild.emptyNested = existingChild.emptyNested && childToAdd.emptyNested;
existingChild.dotPathed = existingChild.dotPathed && childToAdd.dotPathed;
existingChild.negativeParent = existingChild.negativeParent && childToAdd.negativeParent;
childToAdd = existingChild;
}
if (!childToAdd.dotPathed && dotPathed) {
dotPathed = false;
}
return childToAdd;
}
}
private ExactName newBaseViewName() {
return new ExactName(PropertyView.BASE_VIEW);
}
}
|
package com.github.rnewson.couchdb.lucene;
import static java.util.concurrent.TimeUnit.SECONDS;
import java.io.BufferedReader;
import java.io.File;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.Writer;
import java.net.SocketException;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
import java.util.Map.Entry;
import java.util.concurrent.CountDownLatch;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import net.sf.json.JSON;
import net.sf.json.JSONArray;
import net.sf.json.JSONObject;
import org.apache.commons.configuration.HierarchicalINIConfiguration;
import org.apache.http.HttpEntity;
import org.apache.http.HttpResponse;
import org.apache.http.HttpStatus;
import org.apache.http.client.ClientProtocolException;
import org.apache.http.client.HttpClient;
import org.apache.http.client.HttpResponseException;
import org.apache.http.client.ResponseHandler;
import org.apache.http.client.methods.HttpUriRequest;
import org.apache.log4j.Logger;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexReader.FieldOption;
import org.apache.lucene.index.IndexWriter.MaxFieldLength;
import org.apache.lucene.queryParser.ParseException;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.FieldDoc;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.TopFieldDocs;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.mozilla.javascript.ClassShutter;
import org.mozilla.javascript.Context;
import com.github.rnewson.couchdb.lucene.couchdb.CouchDocument;
import com.github.rnewson.couchdb.lucene.couchdb.Database;
import com.github.rnewson.couchdb.lucene.couchdb.DesignDocument;
import com.github.rnewson.couchdb.lucene.couchdb.View;
import com.github.rnewson.couchdb.lucene.util.Constants;
import com.github.rnewson.couchdb.lucene.util.ServletUtils;
import com.github.rnewson.couchdb.lucene.util.StopWatch;
import com.github.rnewson.couchdb.lucene.util.Utils;
public final class DatabaseIndexer implements Runnable, ResponseHandler<Void> {
private class IndexState {
private final DocumentConverter converter;
private boolean dirty;
private String etag;
private final QueryParser parser;
private long pending_seq;
private IndexReader reader;
private final IndexWriter writer;
private final Database database;
public IndexState(final DocumentConverter converter,
final IndexWriter writer, final QueryParser parser,
final Database database) {
this.converter = converter;
this.writer = writer;
this.parser = parser;
this.database = database;
}
public synchronized IndexReader borrowReader(final boolean staleOk)
throws IOException {
blockForLatest(staleOk);
if (reader == null) {
reader = writer.getReader();
etag = newEtag();
reader.incRef();
}
if (!staleOk) {
reader.decRef();
reader = writer.getReader();
if (dirty) {
etag = newEtag();
dirty = false;
}
}
reader.incRef();
return reader;
}
public IndexSearcher borrowSearcher(final boolean staleOk)
throws IOException {
return new IndexSearcher(borrowReader(staleOk));
}
public void returnReader(final IndexReader reader) throws IOException {
reader.decRef();
}
public void returnSearcher(final IndexSearcher searcher)
throws IOException {
returnReader(searcher.getIndexReader());
}
private synchronized void close() throws IOException {
if (reader != null)
reader.close();
if (writer != null)
writer.rollback();
}
private synchronized String getEtag() {
return etag;
}
private String newEtag() {
return Long.toHexString(now());
}
private synchronized boolean notModified(final HttpServletRequest req) {
return etag != null && etag.equals(req.getHeader("If-None-Match"));
}
private void blockForLatest(final boolean staleOk) throws IOException {
if (staleOk) {
return;
}
final long latest = database.getInfo().getUpdateSequence();
synchronized (this) {
while (pending_seq < latest) {
try {
wait(getSearchTimeout());
} catch (final InterruptedException e) {
throw new IOException("Search timed out.");
}
}
}
}
private synchronized void setPendingSequence(final long newSequence) {
pending_seq = newSequence;
notifyAll();
}
@Override
public String toString() {
return writer.getDirectory().toString();
}
}
private final class RestrictiveClassShutter implements ClassShutter {
public boolean visibleToScripts(final String fullClassName) {
return false;
}
}
private static final long COMMIT_INTERVAL = SECONDS.toNanos(60);
private static final JSONObject JSON_SUCCESS = JSONObject
.fromObject("{\"ok\":true}");
public static File uuidDir(final File root, final UUID uuid) {
return new File(root, uuid.toString());
}
public static File viewDir(final File root, final UUID uuid,
final String digest, final boolean mkdirs) throws IOException {
final File uuidDir = uuidDir(root, uuid);
final File viewDir = new File(uuidDir, digest);
if (mkdirs) {
viewDir.mkdirs();
}
return viewDir;
}
private static long now() {
return System.nanoTime();
}
private final HttpClient client;
private boolean closed;
private Context context;
private final Database database;
private long ddoc_seq;
private long lastCommit;
private final CountDownLatch latch = new CountDownLatch(1);
private Logger logger;
private final Map<String, View> paths = new HashMap<String, View>();
private HttpUriRequest req;
private final File root;
private long since;
private final Map<View, IndexState> states = Collections
.synchronizedMap(new HashMap<View, IndexState>());
private UUID uuid;
private final HierarchicalINIConfiguration ini;
public DatabaseIndexer(final HttpClient client, final File root,
final Database database, final HierarchicalINIConfiguration ini)
throws IOException {
this.client = client;
this.root = root;
this.database = database;
this.ini = ini;
}
public void admin(final HttpServletRequest req,
final HttpServletResponse resp) throws IOException {
final IndexState state = getState(req, resp);
if (state == null)
return;
final String command = pathParts(req)[4];
if ("_expunge".equals(command)) {
logger.info("Expunging deletes from " + state);
state.writer.expungeDeletes(false);
ServletUtils.setResponseContentTypeAndEncoding(req, resp);
resp.setStatus(202);
ServletUtils.writeJSON(resp, JSON_SUCCESS);
return;
}
if ("_optimize".equals(command)) {
logger.info("Optimizing " + state);
state.writer.optimize(false);
ServletUtils.setResponseContentTypeAndEncoding(req, resp);
resp.setStatus(202);
ServletUtils.writeJSON(resp, JSON_SUCCESS);
return;
}
}
public void awaitInitialization() {
try {
latch.await();
} catch (final InterruptedException e) {
// Ignore.
}
}
public Void handleResponse(final HttpResponse response)
throws ClientProtocolException, IOException {
final HttpEntity entity = response.getEntity();
final BufferedReader reader = new BufferedReader(new InputStreamReader(
entity.getContent(), "UTF-8"));
String line;
loop: while ((line = reader.readLine()) != null) {
maybeCommit();
// Heartbeat.
if (line.length() == 0) {
logger.trace("heartbeat");
continue loop;
}
final JSONObject json = JSONObject.fromObject(line);
if (json.has("error")) {
logger.warn("Indexing stopping due to error: " + json);
break loop;
}
if (json.has("last_seq")) {
logger.warn("End of changes detected.");
break loop;
}
final long seq = json.getLong("seq");
final String id = json.getString("id");
CouchDocument doc;
if (json.has("doc")) {
doc = new CouchDocument(json.getJSONObject("doc"));
} else {
// include_docs=true doesn't work prior to 0.11.
try {
doc = database.getDocument(id);
} catch (final HttpResponseException e) {
switch (e.getStatusCode()) {
case HttpStatus.SC_NOT_FOUND:
doc = CouchDocument.deletedDocument(id);
break;
default:
logger.warn("Failed to fetch " + id);
break loop;
}
}
}
if (id.startsWith("_design") && seq > ddoc_seq) {
logger.info("Exiting due to design document change.");
break loop;
}
if (doc.isDeleted()) {
for (final IndexState state : states.values()) {
state.writer.deleteDocuments(new Term("_id", id));
state.setPendingSequence(seq);
}
} else {
for (final Entry<View, IndexState> entry : states.entrySet()) {
final View view = entry.getKey();
final IndexState state = entry.getValue();
final Document[] docs;
try {
docs = state.converter.convert(doc, view
.getDefaultSettings(), database);
} catch (final Exception e) {
logger.warn(id + " caused " + e.getMessage());
continue loop;
}
state.writer.deleteDocuments(new Term("_id", id));
for (final Document d : docs) {
state.writer.addDocument(d, view.getAnalyzer());
}
state.setPendingSequence(seq);
state.dirty = true;
}
}
}
req.abort();
return null;
}
public void info(final HttpServletRequest req,
final HttpServletResponse resp) throws IOException {
final IndexState state = getState(req, resp);
if (state == null)
return;
final IndexReader reader = state.borrowReader(isStaleOk(req));
try {
final JSONObject result = new JSONObject();
result.put("current", reader.isCurrent());
result.put("disk_size", Utils.directorySize(reader.directory()));
result.put("doc_count", reader.numDocs());
result.put("doc_del_count", reader.numDeletedDocs());
final JSONArray fields = new JSONArray();
for (final Object field : reader.getFieldNames(FieldOption.INDEXED)) {
if (((String) field).startsWith("_")) {
continue;
}
fields.add(field);
}
result.put("fields", fields);
result.put("last_modified", Long.toString(IndexReader
.lastModified(reader.directory())));
result.put("optimized", reader.isOptimized());
result.put("ref_count", reader.getRefCount());
final JSONObject info = new JSONObject();
info.put("code", 200);
info.put("json", result);
ServletUtils.setResponseContentTypeAndEncoding(req, resp);
final Writer writer = resp.getWriter();
try {
writer.write(result.toString());
} finally {
writer.close();
}
} finally {
state.returnReader(reader);
}
}
public void run() {
if (closed) {
throw new IllegalStateException("closed!");
}
try {
init();
} catch (final IOException e) {
logger.warn("Exiting after init() raised I/O exception.", e);
return;
}
try {
try {
req = database.getChangesRequest(since);
logger.info("Indexing from update_seq " + since);
client.execute(req, this);
} finally {
close();
}
} catch (final SocketException e) {
// Ignored because req.abort() does this.
} catch (final IOException e) {
logger.warn("Exiting due to I/O exception.", e);
}
}
public void search(final HttpServletRequest req,
final HttpServletResponse resp) throws IOException {
final IndexState state = getState(req, resp);
if (state == null)
return;
if (state.notModified(req)) {
resp.setStatus(304);
return;
}
final IndexSearcher searcher = state.borrowSearcher(isStaleOk(req));
final String etag = state.getEtag();
final JSONArray result = new JSONArray();
try {
for (final String queryString : req.getParameterValues("q")) {
final Query q = state.parser.parse(queryString);
final JSONObject queryRow = new JSONObject();
queryRow.put("q", q.toString());
if (getBooleanParameter(req, "debug")) {
queryRow.put("plan", QueryPlan.toPlan(q));
}
queryRow.put("etag", etag);
if (getBooleanParameter(req, "rewrite")) {
final Query rewritten_q = q.rewrite(searcher
.getIndexReader());
queryRow.put("rewritten_q", rewritten_q.toString());
final JSONObject freqs = new JSONObject();
final Set<Term> terms = new HashSet<Term>();
rewritten_q.extractTerms(terms);
for (final Object term : terms) {
final int freq = searcher.docFreq((Term) term);
freqs.put(term, freq);
}
queryRow.put("freqs", freqs);
} else {
// Perform the search.
final TopDocs td;
final StopWatch stopWatch = new StopWatch();
final boolean include_docs = getBooleanParameter(req,
"include_docs");
final int limit = getIntParameter(req, "limit", 25);
final Sort sort = CustomQueryParser.toSort(req
.getParameter("sort"));
final int skip = getIntParameter(req, "skip", 0);
if (sort == null) {
td = searcher.search(q, null, skip + limit);
} else {
td = searcher.search(q, null, skip + limit, sort);
}
stopWatch.lap("search");
// Fetch matches (if any).
final int max = Math.max(0, Math.min(td.totalHits - skip,
limit));
final JSONArray rows = new JSONArray();
final String[] fetch_ids = new String[max];
for (int i = skip; i < skip + max; i++) {
final Document doc = searcher.doc(td.scoreDocs[i].doc);
final JSONObject row = new JSONObject();
final JSONObject fields = new JSONObject();
// Include stored fields.
for (final Object f : doc.getFields()) {
final Field fld = (Field) f;
if (!fld.isStored()) {
continue;
}
final String name = fld.name();
final String value = fld.stringValue();
if (value != null) {
if ("_id".equals(name)) {
row.put("id", value);
} else {
if (!fields.has(name)) {
fields.put(name, value);
} else {
final Object obj = fields.get(name);
if (obj instanceof String) {
final JSONArray arr = new JSONArray();
arr.add(obj);
arr.add(value);
fields.put(name, arr);
} else {
assert obj instanceof JSONArray;
((JSONArray) obj).add(value);
}
}
}
}
}
if (!Float.isNaN(td.scoreDocs[i].score)) {
row.put("score", td.scoreDocs[i].score);
}// Include sort order (if any).
if (td instanceof TopFieldDocs) {
final FieldDoc fd = (FieldDoc) ((TopFieldDocs) td).scoreDocs[i];
row.put("sort_order", fd.fields);
}
// Fetch document (if requested).
if (include_docs) {
fetch_ids[i - skip] = doc.get("_id");
}
if (fields.size() > 0) {
row.put("fields", fields);
}
rows.add(row);
// Fetch documents (if requested).
if (include_docs && fetch_ids.length > 0) {
database.getDocuments(fetch_ids);
final List<CouchDocument> fetched_docs = database
.getDocuments(fetch_ids);
for (int j = 0; j < max; j++) {
rows.getJSONObject(j).put("doc",
fetched_docs.get(j).asJson());
}
}
stopWatch.lap("fetch");
queryRow.put("skip", skip);
queryRow.put("limit", limit);
queryRow.put("total_rows", td.totalHits);
queryRow.put("search_duration", stopWatch
.getElapsed("search"));
queryRow.put("fetch_duration", stopWatch
.getElapsed("fetch"));
// Include sort info (if requested).
if (td instanceof TopFieldDocs) {
queryRow.put("sort_order", CustomQueryParser
.toString(((TopFieldDocs) td).fields));
}
queryRow.put("rows", rows);
}
result.add(queryRow);
}
}
} catch (final ParseException e) {
ServletUtils.sendJSONError(req, resp, 400, "Bad query syntax: "
+ e.getMessage());
return;
} finally {
state.returnSearcher(searcher);
}
resp.setHeader("ETag", etag);
resp.setHeader("Cache-Control", "must-revalidate");
ServletUtils.setResponseContentTypeAndEncoding(req, resp);
final JSON json = result.size() > 1 ? result : result.getJSONObject(0);
final String callback = req.getParameter("callback");
final String body;
if (callback != null) {
body = String.format("%s(%s)", callback, json);
} else {
body = json.toString(getBooleanParameter(req, "debug") ? 2 : 0);
}
final Writer writer = resp.getWriter();
try {
writer.write(body);
} finally {
writer.close();
}
}
private void close() throws IOException {
this.closed = true;
for (final IndexState state : states.values()) {
state.close();
}
states.clear();
Context.exit();
}
private void commitAll() throws IOException {
for (final Entry<View, IndexState> entry : states.entrySet()) {
final View view = entry.getKey();
final IndexState state = entry.getValue();
if (state.pending_seq > getUpdateSequence(state.writer)) {
final Map<String, String> userData = new HashMap<String, String>();
userData.put("last_seq", Long.toString(state.pending_seq));
state.writer.commit(userData);
logger.info(view + " now at update_seq " + state.pending_seq);
}
}
lastCommit = now();
}
private boolean getBooleanParameter(final HttpServletRequest req,
final String parameterName) {
return Boolean.parseBoolean(req.getParameter(parameterName));
}
private int getIntParameter(final HttpServletRequest req,
final String parameterName, final int defaultValue) {
final String result = req.getParameter(parameterName);
return result != null ? Integer.parseInt(result) : defaultValue;
}
private IndexState getState(final HttpServletRequest req,
final HttpServletResponse resp) throws IOException {
final String path = pathParts(req)[2] + "/" + pathParts(req)[3];
final View view = paths.get(path);
if (view == null) {
ServletUtils.sendJSONError(req, resp, 400, "no_such_view");
return null;
}
final IndexState result = states.get(view);
if (result == null) {
ServletUtils.sendJSONError(req, resp, 400, "no_such_state");
}
return result;
}
private long getUpdateSequence(final Directory dir) throws IOException {
if (!IndexReader.indexExists(dir)) {
return 0L;
}
return getUpdateSequence(IndexReader.getCommitUserData(dir));
}
private long getUpdateSequence(final IndexWriter writer) throws IOException {
return getUpdateSequence(writer.getDirectory());
}
private long getUpdateSequence(final Map<String, String> userData) {
if (userData != null && userData.containsKey("last_seq")) {
return Long.parseLong(userData.get("last_seq"));
}
return 0L;
}
private void init() throws IOException {
this.logger = Logger.getLogger(DatabaseIndexer.class.getName() + "."
+ database.getInfo().getName());
this.uuid = database.getOrCreateUuid();
this.context = Context.enter();
context.setClassShutter(new RestrictiveClassShutter());
context.setOptimizationLevel(9);
this.ddoc_seq = database.getInfo().getUpdateSequence();
this.since = 0;
for (final DesignDocument ddoc : database.getAllDesignDocuments()) {
for (final Entry<String, View> entry : ddoc.getAllViews()
.entrySet()) {
final String name = entry.getKey();
final View view = entry.getValue();
paths.put(ddoc.getId().substring(8) + "/" + name, view);
if (!states.containsKey(view)) {
final Directory dir = FSDirectory.open(viewDir(view, true));
final long seq = getUpdateSequence(dir);
if (since == 0) {
since = seq;
}
if (seq != -1L) {
since = Math.min(since, seq);
}
final DocumentConverter converter = new DocumentConverter(
context, view);
final IndexWriter writer = newWriter(dir);
final QueryParser parser = new CustomQueryParser(
Constants.VERSION, Constants.DEFAULT_FIELD, view
.getAnalyzer());
final IndexState state = new IndexState(converter, writer,
parser, database);
state.setPendingSequence(seq);
states.put(view, state);
}
}
}
logger.debug("paths: " + paths);
this.lastCommit = now();
latch.countDown();
}
private boolean isStaleOk(final HttpServletRequest req) {
return "ok".equals(req.getParameter("stale"));
}
private void maybeCommit() throws IOException {
if (now() - lastCommit >= COMMIT_INTERVAL) {
commitAll();
}
}
private IndexWriter newWriter(final Directory dir) throws IOException {
final IndexWriter result = new IndexWriter(dir, Constants.ANALYZER,
MaxFieldLength.UNLIMITED);
result.setMergeFactor(ini.getInt("lucene.mergeFactor", 5));
result.setUseCompoundFile(ini.getBoolean("lucene.useCompoundFile", false));
result.setRAMBufferSizeMB(ini.getDouble("lucene.ramBufferSizeMB", IndexWriter.DEFAULT_RAM_BUFFER_SIZE_MB));
return result;
}
private String[] pathParts(final HttpServletRequest req) {
return req.getRequestURI().replaceFirst("/", "").split("/");
}
private File viewDir(final View view, final boolean mkdirs)
throws IOException {
assert root != null;
assert uuid != null;
assert view != null;
return viewDir(root, uuid, view.getDigest(), mkdirs);
}
private long getSearchTimeout() {
return ini.getLong("lucene.timeout", 5000);
}
}
|
package com.gochinatv.cdn.api.jdk.queue;
import org.junit.Test;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
public class BlockingQueueTest {
@Test
public void linkedBlockQueueTest() {
//, Integer.MAX_VALUE
BlockingQueue<String> queue = new LinkedBlockingQueue<>(10);
queue.add("aaa");
queue.add("bbb");
//,true,
boolean add = queue.add("queue01");
System.out.println("add:"+add + ",size:"+queue.size());
try {
queue.put("queue02");
System.out.println("size:"+queue.size());
} catch (InterruptedException e) {
e.printStackTrace();
}
boolean offer = queue.offer("queue03");//true,false
System.out.println("offer:"+offer + ",size:"+queue.size());
try {
//true,false
boolean queueOffer2 = queue.offer("queueOffer2", 5, TimeUnit.SECONDS);
System.out.println("queueOffer2:"+queueOffer2 + ",size:"+queue.size());
} catch (InterruptedException e) {
e.printStackTrace();
}
queue.clear();
String poll = queue.poll();//,null
System.out.println("poll:"+poll + ",size:"+queue.size());
try {
String pollTimeOut = queue.poll(5, TimeUnit.SECONDS);
System.out.println("pollTimeOut:"+pollTimeOut + ",size:"+queue.size());
} catch (InterruptedException e) {
e.printStackTrace();
}
String peek = queue.peek();
System.out.println("peek:"+peek + ",size:"+queue.size());
//Retrieves, but does not remove, the head of this queue.
// This method differs from peek only in that it throws an exception if this queue is empty.
String element = queue.element();
System.out.println("element:"+element + ",size:"+queue.size());
boolean contains = queue.contains("queue03");
System.out.println("contains:"+contains + ",size:"+queue.size());
}
}
|
package com.imcode.imcms.mapping;
import com.imcode.imcms.mapping.container.*;
import com.imcode.imcms.mapping.jpa.User;
import com.imcode.imcms.mapping.jpa.UserRepository;
import com.imcode.imcms.mapping.jpa.doc.Version;
import com.imcode.imcms.mapping.jpa.doc.VersionRepository;
import com.imcode.imcms.mapping.jpa.doc.content.textdoc.*;
import com.imcode.imcms.mapping.jpa.doc.content.textdoc.Menu;
import com.imcode.imcms.mapping.jpa.doc.content.textdoc.MenuItem;
import com.imcode.imcms.persistence.entity.*;
import com.imcode.imcms.persistence.entity.LoopEntryRef;
import com.imcode.imcms.persistence.repository.ImageRepository;
import com.imcode.imcms.persistence.repository.LanguageRepository;
import com.imcode.imcms.persistence.repository.LoopRepository;
import com.imcode.imcms.util.Value;
import imcode.server.document.textdocument.ImageDomainObject;
import imcode.server.document.textdocument.MenuDomainObject;
import imcode.server.document.textdocument.TextDocumentDomainObject;
import imcode.server.document.textdocument.TextDomainObject;
import imcode.server.user.UserDomainObject;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import javax.inject.Inject;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
@Service
@Transactional
public class TextDocumentContentSaver {
private final VersionRepository versionRepository;
private final TextRepository textRepository;
private final TextHistoryRepository textHistoryRepository;
private final ImageRepository imageRepository;
private final MenuRepository menuRepository;
private final TemplateNamesRepository templateNamesRepository;
private final LoopRepository loopRepository;
private final LanguageRepository languageRepository;
private final IncludeRepository includeRepository;
private final UserRepository userRepository;
@Inject
public TextDocumentContentSaver(VersionRepository versionRepository, TextRepository textRepository,
TextHistoryRepository textHistoryRepository, ImageRepository imageRepository,
MenuRepository menuRepository, TemplateNamesRepository templateNamesRepository,
LoopRepository loopRepository, LanguageRepository languageRepository,
IncludeRepository includeRepository, UserRepository userRepository) {
this.versionRepository = versionRepository;
this.textRepository = textRepository;
this.textHistoryRepository = textHistoryRepository;
this.imageRepository = imageRepository;
this.menuRepository = menuRepository;
this.templateNamesRepository = templateNamesRepository;
this.loopRepository = loopRepository;
this.languageRepository = languageRepository;
this.includeRepository = includeRepository;
this.userRepository = userRepository;
}
/**
* Saves new document content.
*/
public void createContent(TextDocumentDomainObject doc, UserDomainObject userDomainObject) {
DocRef docRef = doc.getRef();
Version version = findVersion(docRef);
Language language = findLanguage(docRef);
User user = findUser(userDomainObject);
// loops must be created before loop items (texts and images)
// createLoops(doc, version);
saveTexts(doc, version, language, user, SaveMode.CREATE);
saveImages(doc, version, language, SaveMode.CREATE);
// saveMenus(doc, version, SaveMode.CREATE);
saveTemplateNames(doc.getId(), doc.getTemplateNames());
saveIncludes(doc.getId(), doc.getIncludesMap());
}
public void createCommonContent(TextDocumentDomainObject doc) {
VersionRef versionRef = doc.getVersionRef();
Version version = findVersion(versionRef);
// createLoops(doc, version);
// saveMenus(doc, version, SaveMode.CREATE);
saveTemplateNames(doc.getId(), doc.getTemplateNames());
saveIncludes(doc.getId(), doc.getIncludesMap());
}
public void createI18nContent(TextDocumentDomainObject doc, UserDomainObject userDomainObject) {
DocRef docRef = doc.getRef();
Version version = findVersion(docRef);
Language language = findLanguage(docRef);
User user = findUser(userDomainObject);
saveTexts(doc, version, language, user, SaveMode.CREATE);
saveImages(doc, version, language, SaveMode.CREATE);
}
/**
* Updates existing document content.
*/
public void updateContent(TextDocumentDomainObject doc, UserDomainObject userDomainObject) {
DocRef docRef = doc.getRef();
Version version = findVersion(docRef);
Language language = findLanguage(docRef);
User user = findUser(userDomainObject);
// loop items must be deleted before loops (texts and images)
textRepository.deleteByVersionAndLanguage(version, language);
imageRepository.deleteByVersionAndLanguage(version, language);
menuRepository.deleteByVersion(version);
// loops must be re-created before loop items (texts and images)
/* loopRepository.findByVersion(version).forEach((a) -> a.getEntries().clear());
loopRepository.deleteByVersion(version);
createLoops(doc, version);*/
saveTexts(doc, version, language, user, SaveMode.UPDATE);
saveImages(doc, version, language, SaveMode.UPDATE);
// saveMenus(doc, version, SaveMode.UPDATE);
saveTemplateNames(doc.getId(), doc.getTemplateNames());
saveIncludes(doc.getId(), doc.getIncludesMap());
}
/**
* Saves existing document image.
*/
public void saveImage(TextDocImageContainer container) {
Image image = toJpaObject(container);
saveImage(image, SaveMode.UPDATE);
}
public void saveImages(TextDocImagesContainer container) {
Version version = findVersion(container);
for (Map.Entry<com.imcode.imcms.api.DocumentLanguage, ImageDomainObject> e : container.getImages().entrySet()) {
Language language = findLanguage(e.getKey());
Image image = toJpaObject(e.getValue(), version, language, container.getImageNo(), toJpaObject(container.getLoopEntryRef()));
saveImage(image, SaveMode.UPDATE);
}
container.getImages().forEach((languageDO, imageDO) -> {
Language language = findLanguage(languageDO);
Image image = toJpaObject(imageDO, version, language, container.getImageNo(), toJpaObject(container.getLoopEntryRef()));
saveImage(image, SaveMode.UPDATE);
});
}
public void saveText(TextDocTextContainer container, UserDomainObject userDomainObject) {
User user = findUser(userDomainObject);
Text text = toJpaObject(container);
saveText(text, user, SaveMode.UPDATE);
}
public void saveTexts(TextDocTextsContainer container, UserDomainObject userDomainObject) {
User user = findUser(userDomainObject);
Version version = findVersion(container);
container.getTexts().forEach((languageDO, textDO) -> {
Language language = findLanguage(languageDO);
Text text = toJpaObject(textDO, version, language, container.getTextNo(), toJpaObject(container.getLoopEntryRef()));
saveText(text, user, SaveMode.UPDATE);
});
}
private void saveTemplateNames(int docId, TextDocumentDomainObject.TemplateNames templateNamesDO) {
TemplateNames templateNames = new TemplateNames();
templateNames.setDocId(docId);
templateNames.setDefaultTemplateName(templateNamesDO.getDefaultTemplateName());
templateNames.setDefaultTemplateNameForRestricted1(templateNamesDO.getDefaultTemplateNameForRestricted1());
templateNames.setDefaultTemplateNameForRestricted2(templateNamesDO.getDefaultTemplateNameForRestricted2());
templateNames.setTemplateGroupId(templateNamesDO.getTemplateGroupId());
templateNames.setTemplateName(templateNamesDO.getTemplateName());
templateNamesRepository.save(templateNames);
}
private void saveIncludes(int docId, Map<Integer, Integer> includes) {
includeRepository.deleteByDocId(docId);
includes.forEach((no, includedDocId) -> {
Include include = new Include();
include.setId(null);
include.setDocId(docId);
include.setNo(no);
include.setIncludedDocumentId(includedDocId);
includeRepository.save(include);
});
}
public void saveMenu(TextDocMenuContainer container) {
VersionRef versionRef = container.getVersionRef();
Version version = findVersion(versionRef);
Menu menu = toJpaObject(container.getMenu(), version, container.getMenuNo());
saveMenu(menu, SaveMode.UPDATE);
}
private Menu toJpaObject(MenuDomainObject menuDO, Version version, int no) {
Menu menu = new Menu();
Map<Integer, MenuItem> menuItems = new HashMap<>();
menuDO.getItemsMap().forEach((menuItemNo, menuItemDO) -> {
MenuItem menuItem = new MenuItem();
menuItem.setSortKey(menuItemDO.getSortKey());
menuItem.setTreeSortIndex(menuItemDO.getTreeSortIndex());
menuItems.put(menuItemNo, menuItem);
});
menu.setVersion(version);
menu.setNo(no);
menu.setSortOrder(menuDO.getSortOrder());
menu.setItems(menuItems);
return menu;
}
private void saveMenu(Menu menu, SaveMode saveMode) {
if (saveMode == SaveMode.UPDATE) {
Integer id = menuRepository.findIdByVersionAndNo(menu.getVersion(), menu.getNo());
menu.setId(id);
}
menuRepository.saveAndFlush(menu);
}
private void saveImages(TextDocumentDomainObject doc, Version version, Language language, SaveMode saveMode) {
for (Map.Entry<Integer, ImageDomainObject> entry : doc.getImages().entrySet()) {
Image image = toJpaObject(entry.getValue(), version, language, entry.getKey(), null);
saveImage(image, saveMode);
}
for (Map.Entry<TextDocumentDomainObject.LoopItemRef, ImageDomainObject> entry : doc.getLoopImages().entrySet()) {
TextDocumentDomainObject.LoopItemRef loopItemRef = entry.getKey();
LoopEntryRef loopEntryRef = new LoopEntryRef(loopItemRef.getLoopNo(), loopItemRef.getEntryNo());
Image image = toJpaObject(entry.getValue(), version, language, loopItemRef.getItemNo(), loopEntryRef);
saveImage(image, saveMode);
}
}
private void saveTexts(TextDocumentDomainObject doc, Version version, Language language, User user, SaveMode saveMode) {
for (Map.Entry<Integer, TextDomainObject> entry : doc.getTexts().entrySet()) {
Text text = toJpaObject(entry.getValue(), version, language, entry.getKey(), null);
saveText(text, user, saveMode);
}
for (Map.Entry<TextDocumentDomainObject.LoopItemRef, TextDomainObject> entry : doc.getLoopTexts().entrySet()) {
TextDocumentDomainObject.LoopItemRef loopItemRef = entry.getKey();
LoopEntryRef loopEntryRef = new LoopEntryRef(loopItemRef.getLoopNo(), loopItemRef.getEntryNo());
Text text = toJpaObject(entry.getValue(), version, language, loopItemRef.getItemNo(), loopEntryRef);
saveText(text, user, saveMode);
}
}
private void saveImage(Image image, SaveMode saveMode) {
if (saveMode == SaveMode.UPDATE) {
LoopEntryRef loopEntryRef = image.getLoopEntryRef();
Integer id = loopEntryRef == null
? imageRepository.findIdByVersionAndLanguageAndIndexWhereLoopEntryRefIsNull(image.getVersion(), image.getLanguage(), image.getIndex())
: imageRepository.findIdByVersionAndLanguageAndIndexAndLoopEntryRef(image.getVersion(), image.getLanguage(), image.getIndex(), loopEntryRef);
image.setId(id);
}
createLoopEntryIfNotExists(image.getVersion(), image.getLoopEntryRef());
imageRepository.save(image);
}
private void saveText(Text text, User user, SaveMode saveMode) {
if (saveMode == SaveMode.UPDATE) {
LoopEntryRef loopEntryRef = text.getLoopEntryRef();
Integer id = loopEntryRef == null
? textRepository.findIdByVersionAndLanguageAndIndexWhereLoopEntryRefIsNull(text.getVersion(), text.getLanguage(), text.getIndex())
: textRepository.findIdByVersionAndLanguageAndIndexAndLoopEntryRef(text.getVersion(), text.getLanguage(), text.getIndex(), loopEntryRef);
text.setId(id);
}
createLoopEntryIfNotExists(text.getVersion(), text.getLoopEntryRef());
textRepository.save(text);
textHistoryRepository.save(new TextHistory(text, user));
}
private void createLoopEntryIfNotExists(Version version, LoopEntryRef entryRef) {
if (entryRef == null) return;
Loop loop = loopRepository.findByVersionAndIndex(
version, entryRef.getLoopIndex());
int entryIndex = entryRef.getLoopEntryIndex();
int loopIndex = entryRef.getLoopIndex();
if (loop == null) {
loop = new Loop();
loop.setVersion(version);
loop.setIndex(loopIndex);
loop.getEntries().add(new LoopEntry(entryIndex));
} else {
if (!loop.containsEntry(entryRef.getLoopEntryIndex())) {
loop.getEntries().add(new LoopEntry(entryIndex));
}
}
loopRepository.save(loop);
}
private Text toJpaObject(TextDocTextContainer container) {
Language language = findLanguage(container);
Version version = findVersion(container);
LoopEntryRef loopEntryRef = toJpaObject(container.getLoopEntryRef());
return toJpaObject(container.getText(), version, language, container.getTextNo(), loopEntryRef);
}
private Text toJpaObject(TextDomainObject textDO, Version version, Language language, int no, LoopEntryRef loopEntryRef) {
Text text = new Text();
text.setLanguage(language);
text.setVersion(version);
text.setIndex(no);
text.setText(textDO.getText());
text.setType(TextType.values()[textDO.getType()]);
text.setLoopEntryRef(loopEntryRef);
return text;
}
private Image toJpaObject(TextDocImageContainer container) {
Language language = findLanguage(container);
Version version = findVersion(container);
LoopEntryRef loopEntryRef = toJpaObject(container.getLoopEntryRef());
return toJpaObject(container.getImage(), version, language, container.getImageNo(), loopEntryRef);
}
private Image toJpaObject(ImageDomainObject imageDO, Version version, Language language, int no, LoopEntryRef loopEntryRef) {
ImageDomainObject.CropRegion cropRegionDO = imageDO.getCropRegion();
ImageCropRegion cropRegion = cropRegionDO.isValid()
? new ImageCropRegion(cropRegionDO.getCropX1(), cropRegionDO.getCropY1(), cropRegionDO.getCropX2(), cropRegionDO.getCropY2())
: new ImageCropRegion(-1, -1, -1, -1);
Image image = new Image();
image.setIndex(no);
image.setLanguage(language);
image.setVersion(version);
image.setLoopEntryRef(loopEntryRef);
image.setAlign(imageDO.getAlign());
image.setAlternateText(imageDO.getAlternateText());
image.setBorder(imageDO.getBorder());
image.setCropRegion(cropRegion);
image.setFormat(imageDO.getFormat());
image.setGeneratedFilename(imageDO.getGeneratedFilename());
image.setHeight(imageDO.getHeight());
image.setHorizontalSpace(imageDO.getHorizontalSpace());
image.setUrl(imageDO.getSource().toStorageString());
image.setLinkUrl(imageDO.getLinkUrl());
image.setLowResolutionUrl(imageDO.getLowResolutionUrl());
image.setName(imageDO.getName());
image.setResize(imageDO.getResize() == null ? 0 : imageDO.getResize().getOrdinal());
image.setRotateAngle(imageDO.getRotateDirection() == null ? 0 : imageDO.getRotateDirection().getAngle());
image.setTarget(imageDO.getTarget());
image.setType(imageDO.getSource().getTypeId());
image.setVerticalSpace(imageDO.getVerticalSpace());
image.setWidth(imageDO.getWidth());
image.setHeight(imageDO.getHeight());
image.setArchiveImageId(imageDO.getArchiveImageId());
return image;
}
private LoopEntryRef toJpaObject(com.imcode.imcms.mapping.container.LoopEntryRef source) {
return source == null
? null
: new LoopEntryRef(source.getLoopNo(), source.getEntryNo());
}
private Loop toJpaObject(TextDocLoopContainer container) {
return toJpaObject(container.getVersionRef(), container.getLoopNo(), container.getLoop());
}
private Loop toJpaObject(VersionRef versionRef, int loopNo, com.imcode.imcms.api.Loop loopDO) {
List<LoopEntry> entries = new LinkedList<>();
Version version = findVersion(versionRef);
loopDO.getEntries().forEach((entryNo, enabled) -> entries.add(new LoopEntry(entryNo, enabled)));
return Value.with(
new Loop(),
l -> {
l.setEntries(entries);
l.setIndex(loopNo);
l.setVersion(version);
}
);
}
private Version findVersion(DocRef docRef) {
return versionRepository.findByDocIdAndNo(docRef.getId(), docRef.getVersionNo());
}
private Version findVersion(VersionRef versionRef) {
return versionRepository.findByDocIdAndNo(versionRef.getDocId(), versionRef.getNo());
}
private Version findVersion(Container container) {
return versionRepository.findByDocIdAndNo(container.getDocId(), container.getVersionNo());
}
private User findUser(UserDomainObject userDomainObject) {
return userRepository.getOne(userDomainObject.getId());
}
private Language findLanguage(LanguageContainer container) {
return languageRepository.findByCode(container.getLanguageCode());
}
private Language findLanguage(com.imcode.imcms.api.DocumentLanguage documentLanguage) {
return languageRepository.findByCode(documentLanguage.getCode());
}
private enum SaveMode {
CREATE, UPDATE
}
}
|
package com.intalio.web.server;
import java.io.BufferedReader;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.StringWriter;
import javax.servlet.ServletConfig;
import javax.servlet.ServletException;
import javax.servlet.UnavailableException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.log4j.Logger;
import org.json.JSONException;
import org.json.JSONObject;
import org.osgi.framework.BundleContext;
import org.osgi.framework.BundleReference;
import org.osgi.framework.ServiceReference;
import com.intalio.web.profile.IDiagramProfile;
import com.intalio.web.profile.IDiagramProfileService;
import com.intalio.web.profile.impl.DefaultProfileImpl;
import com.intalio.web.profile.impl.ProfileServiceImpl;
import com.intalio.web.repository.DiagramValidationException;
import com.intalio.web.repository.IUUIDBasedRepository;
import com.intalio.web.repository.IUUIDBasedRepositoryService;
import com.intalio.web.repository.impl.UUIDBasedFileRepository;
/**
* @author Antoine Toulme
* a file based repository that uses the UUID element to save models
* using a repository, which may be passed by a backend in an OSGi environment
* or saved to file system.
*
*/
public class UUIDBasedRepositoryServlet extends HttpServlet {
/**
* Serializable comes with this field.
*/
private static final long serialVersionUID = 1433687917432938596L;
/**
* da logger
*/
private static final Logger _logger = Logger.getLogger(UUIDBasedRepositoryServlet.class);
/**
* The class name of the default repository.
*/
private static final String DEFAULT_REPOSITORY = UUIDBasedFileRepository.class.getName();
/**
* The default factory for creation of repositories.
*
* The factory uses the initialization parameter repositoryClass
* to know which class to instantiate.
* The class is loaded using the current thread context class loader,
* or the UUIDBasedRepositoryServlet class loader if none is set.
*/
private static IUUIDBasedRepositoryService _factory = new IUUIDBasedRepositoryService() {
/**
* @param config
* the servlet config to help create the repository
* @return a new IUUIDBasedRepository object
*/
@SuppressWarnings("rawtypes")
public IUUIDBasedRepository createRepository(ServletConfig config)
throws ServletException {
ClassLoader cl = Thread.currentThread().getContextClassLoader();
if (cl == null) {
cl = UUIDBasedRepositoryServlet.class.getClassLoader();
}
String className = config.getInitParameter("repositoryClass");
if (className == null) {
_logger.debug("Defaulting the repository to the default class");
className = DEFAULT_REPOSITORY;
}
try {
Class clazz = cl.loadClass(className);
return (IUUIDBasedRepository) clazz.newInstance();
} catch (Exception e) {
throw new IllegalArgumentException(e.getMessage(), e);
}
}
};
/**
* The factory used in an OSGi context.
*
* The factory looks for a registered IUUIDBasedRepositoryService using
* the current BundleContext.
* If none is found, it will throw a UnavailableException.
* The first one found will otherwise be used to create the repository.
*/
private static IUUIDBasedRepositoryService _osgiFactory = new IUUIDBasedRepositoryService() {
/**
* @param config
* the servlet config to help create the repository
* @return a new IUUIDBasedRepository object
* @throws ServletException
*/
public IUUIDBasedRepository createRepository(ServletConfig config) throws ServletException {
BundleContext bundleContext = ((BundleReference) getClass().
getClassLoader()).getBundle().getBundleContext();
ServiceReference ref = bundleContext.getServiceReference(
IUUIDBasedRepositoryService.class.getName());
if (ref == null) {
_logger.info("No service registered for IUUIDBasedRepositoryService");
throw new UnavailableException(
"No service registered for IUUIDBasedRepositoryService", 0);
}
IUUIDBasedRepositoryService service = (IUUIDBasedRepositoryService)
bundleContext.getService(ref);
return service.createRepository(config);
}
};
/**
* The repository used to save and load models.
*/
private IUUIDBasedRepository _repository;
/**
* Initiates the repository servlet.
*
* The behavior is based on the initialization parameters read from web.xml
*
* repositoryServiceType:
* -null
* The classloader of the class is investigated to see if we are operating
* in a OSGi context. If yes we use osgi.
* -default
* We will use the _factory static field to create the repository.
* -osgi
* We will use the _osgiFactory to create the repository.
*
* Please refer to the documentation of both fields for further information.
*
*/
@Override
public void init(ServletConfig config) throws ServletException {
super.init(config);
try {
String repoType = config.getInitParameter("repositoryServiceType");
if (repoType == null) {
// look up the current class loader
if (UUIDBasedRepositoryServlet.class.getClassLoader()
instanceof BundleReference) {
repoType = "osgi";
} else {
repoType = "default";
}
}
if ("default".equals(repoType)) {
_repository = _factory.createRepository(config);
} else if ("osgi".equals(repoType)){
_repository = _osgiFactory.createRepository(config);
} else {
throw new IllegalArgumentException("Invalid value for init " +
"parameter repositoryServiceType : " + repoType);
}
_repository.configure(this);
} catch (Exception e) {
if (e instanceof ServletException) {
throw (ServletException) e;
}
throw new ServletException(e);
}
}
/**
* This method populates the response with the contents of the model.
* It expects two parameters to be passed via the request, uuid and profile.
*/
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
if (resp.isCommitted()) {
return;//called twice... need to clean-up the FilterChainImpl that is quite wrong.
}
String uuid = req.getParameter("uuid");
if (uuid == null) {
throw new ServletException("uuid parameter required");
}
IDiagramProfile profile = getProfile(req, req.getParameter("profile"));
ByteArrayInputStream input = new ByteArrayInputStream(
_repository.load(req, uuid, profile.getSerializedModelExtension()));
byte[] buffer = new byte[4096];
int read;
while ((read = input.read(buffer)) != -1) {
resp.getOutputStream().write(buffer, 0, read);
}
}
/**
* This method saves the model contents based on the json sent as the
* body of the request.
*
* The json should look like:
*
* { "data" : ....,
* "svg" : <svg>...</svg>,
* "uuid" : "1234",
* "profile" : "default"
* }
*
* The data is the json representation of the model.
* The svg represents the graphical model as a SVG format.
*/
@Override
protected void doPost(HttpServletRequest req, HttpServletResponse resp)
throws ServletException, IOException {
if (resp.isCommitted()) {
return;//called twice... need to clean-up the FilterChainImpl that is quite wrong.
}
BufferedReader reader = req.getReader();
StringWriter reqWriter = new StringWriter();
char[] buffer = new char[4096];
int read;
while ((read = reader.read(buffer)) != -1) {
reqWriter.write(buffer, 0, read);
}
String data = reqWriter.toString();
try {
JSONObject jsonObject = new JSONObject(data);
String json = (String) jsonObject.get("data");
String svg = (String) jsonObject.get("svg");
String uuid = (String) jsonObject.get("uuid");
String profileName = (String) jsonObject.get("profile");
boolean autosave = jsonObject.getBoolean("savetype");
if (_logger.isDebugEnabled()) {
_logger.debug("Calling UUIDBasedRepositoryServlet doPost()...");
_logger.debug("autosave: " + autosave);
}
IDiagramProfile profile = getProfile(req, profileName);
if (_logger.isDebugEnabled()) {
_logger.debug("Begin saving the diagram");
}
_repository.save(req, uuid, json, svg, profile, autosave);
if (_logger.isDebugEnabled()) {
_logger.debug("Finish saving the diagram");
}
} catch (JSONException e1) {
throw new ServletException(e1);
} catch (DiagramValidationException e) {
// set the error JSON to response
resp.setCharacterEncoding("utf-8");
resp.getWriter().write(e.getErrorJsonStr());
}
}
/**
* FIXME this needs to go as it duplicates part of the functionality for
* profiles resolution. We should only write this code once.
*/
private IDiagramProfile getProfile(HttpServletRequest req, String profileName) {
IDiagramProfile profile = null;
// get the profile, either through the OSGi DS or by using the default one:
if (getClass().getClassLoader() instanceof BundleReference) {
BundleContext bundleContext = ((BundleReference) getClass().getClassLoader()).getBundle().getBundleContext();
ServiceReference ref = bundleContext.getServiceReference(IDiagramProfileService.class.getName());
if (ref == null) {
throw new IllegalArgumentException(profileName + " is not registered");
}
IDiagramProfileService service = (IDiagramProfileService) bundleContext.getService(ref);
profile = service.findProfile(req, profileName);
} else if ("default".equals(profileName)) {
profile = new DefaultProfileImpl(getServletContext(), false);
} else {
// check w/o BundleReference
IDiagramProfileService service = new ProfileServiceImpl();
service.init(getServletContext());
profile = service.findProfile(req, profileName);
if(profile == null) {
throw new IllegalArgumentException("Cannot determine the profile to use for interpreting models");
}
}
return profile;
}
}
|
package com.leafsoft.jersey.provider;
import java.lang.reflect.Method;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
import javax.annotation.security.DenyAll;
import javax.annotation.security.PermitAll;
import javax.annotation.security.RolesAllowed;
import javax.ws.rs.container.ContainerRequestContext;
import javax.ws.rs.container.ResourceInfo;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MultivaluedMap;
import javax.ws.rs.core.Response;
import javax.ws.rs.ext.Provider;
import org.json.JSONArray;
import org.json.JSONObject;
import com.leafsoft.org.OrgUtil;
@Provider
public class AuthenticationFilter implements javax.ws.rs.container.ContainerRequestFilter
{
@Context
private ResourceInfo resourceInfo;
private JSONObject resJson = new JSONObject();
private static final Response ACCESS_DENIED = Response.status(Response.Status.UNAUTHORIZED)
.entity("{\"message\" : \"You cannot access this resource\"}").build();
private static final Response ACCESS_FORBIDDEN = Response.status(Response.Status.FORBIDDEN)
.entity("{\"message\" : \"Access blocked for all users\"}").build();
@Override
public void filter(ContainerRequestContext requestContext)
{
Method method = resourceInfo.getResourceMethod();
//Access allowed for all
if( ! method.isAnnotationPresent(PermitAll.class))
{
//Access denied for all
if(method.isAnnotationPresent(DenyAll.class))
{
requestContext.abortWith(ACCESS_FORBIDDEN);
return;
}
//Get request headers
final MultivaluedMap<String, String> headers = requestContext.getHeaders();
JSONArray userRoles = OrgUtil.getUserRole();
//If no authorization information present; block access
if(userRoles == null || userRoles.length() < 0)
{
requestContext.abortWith(ACCESS_DENIED);
return;
}
//Verify user access
if(method.isAnnotationPresent(RolesAllowed.class))
{
RolesAllowed rolesAnnotation = method.getAnnotation(RolesAllowed.class);
Set<String> rolesSet = new HashSet<String>(Arrays.asList(rolesAnnotation.value()));
//Is user valid?
if( ! isUserAllowed(userRoles,rolesSet))
{
requestContext.abortWith(ACCESS_DENIED);
return;
}
}
}
}
private boolean isUserAllowed(final JSONArray userroles, final Set<String> rolesSet)
{
boolean isAllowed = false;
for(int i=0;i<userroles.length();i++) {
if(rolesSet.contains(userroles.get(i)))
{
isAllowed = true;
break;
}
}
return isAllowed;
}
}
|
package com.owlplatform.solver;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.atomic.AtomicInteger;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.owlplatform.common.SampleMessage;
import com.owlplatform.solver.listeners.ConnectionListener;
import com.owlplatform.solver.listeners.SampleListener;
import com.owlplatform.solver.protocol.messages.SubscriptionMessage;
import com.owlplatform.solver.rules.SubscriptionRequestRule;
/**
* A simplified interface between a solver and an aggregator. Buffers samples received from the aggregator and provides
* a simple, synchronous interface to the aggregator for solvers.
*
* @author Robert Moore
*
*/
public class SolverAggregatorConnection {
/**
* Private class to hide interface methods from classes using the {@code Solver AggregatorSolverProtocolCodecFactory}.
* @author Robert Moore
*
*/
private static final class Handler implements ConnectionListener,
SampleListener {
/**
* The {@code SolverAggregatorInterface} that will actually handle the events.
*/
private final SolverAggregatorConnection parent;
/**
* Creates a new handler for the specified {@code SolverAggregatorConnection}.
* @param parent the actual handler of the events.
*/
public Handler(SolverAggregatorConnection parent) {
this.parent = parent;
}
@Override
public void connectionEnded(SolverAggregatorInterface aggregator) {
this.parent.subscriptionAcknowledged = false;
this.parent.connectionEnded(aggregator);
}
@Override
public void connectionEstablished(SolverAggregatorInterface aggregator) {
this.parent.subscriptionAcknowledged = false;
}
@Override
public void connectionInterrupted(SolverAggregatorInterface aggregator) {
this.parent.subscriptionAcknowledged = false;
// Ignored
}
@Override
public void sampleReceived(SolverAggregatorInterface aggregator,
SampleMessage sample) {
this.parent.sampleReceived(aggregator, sample);
}
@Override
public void subscriptionReceived(SolverAggregatorInterface aggregator,
SubscriptionMessage response) {
this.parent.subscriptionAcknowledged = true;
}
}
/**
* Logger for this class.
*/
private static final Logger log = LoggerFactory
.getLogger(SolverAggregatorConnection.class);
/**
* The internal interface to the aggregator.
*/
protected final SolverAggregatorInterface agg = new SolverAggregatorInterface();
/**
* Queue of samples that were received from the aggregator but not yet
* taken by the solver. Bounded to 1,000 samples by default.
*/
protected final LinkedBlockingQueue<SampleMessage> sampleQueue = new LinkedBlockingQueue<SampleMessage>(
1000);
/**
* Private handler to hide the event methods from outside classes.
*/
protected final Handler handler = new Handler(this);
/**
* Flag to indicate if the aggregator is disconnected.
*/
protected boolean connected = true;
/**
* Map of subscription rules to their request numbers. Used internally to cancel a subscription
* request on behalf of the solver.
*/
protected final Map<Integer, SubscriptionRequestRule> ruleMap = new ConcurrentHashMap<Integer, SubscriptionRequestRule>();
/**
* Stores the next available rule number for this connection.
*/
protected AtomicInteger nextRuleNum = new AtomicInteger(0);
/**
* Flag to indicate whether full buffers should be logged with a warning. Defaults to false.
*/
protected boolean warnBufferFull = false;
/**
* Flag to indicate whether at least one subscription response message has been received on the current session.
*/
protected boolean subscriptionAcknowledged = false;
/**
* Creates a new Aggregator interface for a solver. The aggregator will not
* be connected until {@link #connect()} is called.
*/
public SolverAggregatorConnection() {
this(1024);
}
/**
* <p>Creates a new Aggregator interface for a solver with the specified buffer
* size. In addition, it will set the following values for the {@code SolverAggregatorInterface} it uses:
* <ul>
* <li>Connection retry delay: 5 seconds</li>
* <li>Connection timeout: 5 seconds</li>
* <li>Disconnect on exception: {@code true}</li>
* <li>Automatic reconnect: {@code true}</li>
* <li>Aggregator host: localhost</li>
* <li>Aggregator port: 7008</li>
* </ul>
* </p>
*
*
* @param bufferSize
* the number of samples to buffer for the solver. Samples
* received after the buffer is full will be discarded and a
* warning will be logged.
*/
public SolverAggregatorConnection(final int bufferSize) {
super();
this.agg.setConnectionRetryDelay(5000l);
this.agg.setConnectionTimeout(5000l);
this.agg.setDisconnectOnException(true);
this.agg.setStayConnected(true);
this.agg.setHost("localhost");
this.agg.setPort(7008);
this.agg.addSampleListener(this.handler);
this.agg.addConnectionListener(this.handler);
}
/**
* Connects to the aggregator if it is not already connected, returning after the specified timeout
* value if the connection has not succeeded in that time.
*
* @param timeout the connection timeout, in milliseconds.
* @return {@code true} if the connection succeeds, else {@code false}.
*/
public boolean connect(long timeout) {
return (this.connected = this.agg.connect(timeout));
}
/**
* Connects to the aggregator if it is not already connected. This method
* has been replaced with {@link #connect(long)}.
*
* @return {@code true} if the connection succeeds, else {@code false}.
*/
@Deprecated
public boolean connect() {
return this.connect(0);
}
/**
* Disconnects from the aggregator.
*/
public void disconnect() {
this.agg.disconnect();
}
/**
* Sets the hostname/IP address for the aggregator. If the aggregator is
* already connected, then the new host will be used the next time a
* connection is established. The default value is "localhost".
*
* @param host
* the new hostname/IP address for the aggregator.
*/
public void setHost(final String host) {
this.agg.setHost(host);
}
/**
* Sets the port number for the aggregator. If the aggregator is already
* connected, then the new port will be used the next time a connection is
* established. The default value is 7008.
*
* @param port
*/
public void setPort(final int port) {
this.agg.setPort(port);
}
public SampleMessage getNextSample() {
if (this.connected) {
try {
return this.sampleQueue.take();
} catch (InterruptedException e) {
log.error(
"Interrupted while waiting for next sample to arrive.",
e);
}
return null;
}
throw new IllegalStateException(
"Connection to the aggregator has terminated.");
}
/**
* Returns {@code true} if there is a Sample available for immediate
* consumption on the next call to {@link #getNextSample()}. Note that this
* is a "soft" state, meaning that if more than one thread has access to
* this {@code SolverAggregatorConnection}, there is no guarantee that the
* next call will actually succeed without blocking, as another thread may
* have consumed the available Sample.
*
* @return {@code true} if {@code getNextSample()} can be called without
* blocking, else {@code false}.
*/
public boolean hasNext() {
return !this.sampleQueue.isEmpty();
}
/**
* Returns the current connection state for this {@code SolverAggregatorConnection}.
*
* @return {@code true} if the connection to the aggregtor is established, else {@code false}.
*/
public boolean isConnected() {
return this.connected;
}
/**
* Adds a Subscription Request Rule to the aggregator interface. If the
* aggregator is already connected, the rule will be sent immediately,
* otherwise it will be sent with all rules when the aggregator is
* connected.
*
* @param rule
* the rule to add to this aggregator.
* @return the rule number, which can be used later to remove a Subscription
* Request Rule.
*/
public int addRule(final SubscriptionRequestRule rule) {
Integer theRuleNum = Integer.valueOf(this.nextRuleNum.getAndIncrement());
synchronized (this.ruleMap) {
if (!this.ruleMap.values().contains(rule)) {
this.ruleMap.put(theRuleNum, rule);
SubscriptionRequestRule[] newRules = this.ruleMap.values().toArray(new SubscriptionRequestRule[]{});
this.agg.setRules(newRules);
if (this.agg.isConnected()) {
SubscriptionMessage msg = new SubscriptionMessage();
msg.setRules(new SubscriptionRequestRule[] { rule });
msg.setMessageType(SubscriptionMessage.SUBSCRIPTION_MESSAGE_ID);
this.agg.getSession().write(msg);
}
return theRuleNum.intValue();
}
log.warn("Rule {} is already configured for use.", rule);
return -1;
}
}
/**
* Removes a rule from this aggregator based on the rule number returned by
* {@link #addRule(SubscriptionRequestRule)}. At this time, it will not
* cancel the subscription request rule on the aggregator, but instead will
* cause a reconnection to the aggregator to refresh the rules.
*
* @param ruleNum
* the number of the rule to cancel.
* @return the rule that was cancelled, if one matching the rule number was
* present for this aggregator.
*/
public SubscriptionRequestRule removeRule(final int ruleNum) {
synchronized (this.ruleMap) {
SubscriptionRequestRule rule = this.ruleMap.remove(Integer
.valueOf(ruleNum));
if (this.agg.isConnected()) {
this.agg._disconnect();
}
return rule;
}
}
/**
* Returns {@code true} if this interface will log a warning message each
* time a sample is dropped due to a full buffer. By default, this interface
* will not warn about dropped packets, as the warning itself may cause even greater
* sample loss.
*
* @return {@code true} if this interface will warn on dropped samples, else
* {@code false}.
*/
public boolean isBufferWarningEnabled() {
return this.warnBufferFull;
}
/**
* Set this to {@code true} to enable warning messages each time a sample
* cannot be delivered due to a full buffer. Take care when setting this value
* to {@code true}, as excessive log messages may increase sample loss.
*
* @param warnBufferFull
* {@code true} to generate a warning log message for each lost sample.
*/
public void setBufferWarning(boolean warnBufferFull) {
this.warnBufferFull = warnBufferFull;
}
@Override
public String toString(){
return "Aggregator @ " + this.agg.getHost() + ":" + this.agg.getPort();
}
/**
* Called when the connection to the aggregator has permanently died.
* @param aggregator
*/
void connectionEnded(SolverAggregatorInterface aggregator) {
this.connected = false;
synchronized (this.sampleQueue) {
this.sampleQueue.notifyAll();
}
}
/**
* Called when the aggregator sends a sample. Enqueues the sample into the internal buffer.
* @param aggregator the aggregator that sent the sample.
* @param sample the sample that was sent.
*/
void sampleReceived(SolverAggregatorInterface aggregator,
SampleMessage sample) {
if (!this.sampleQueue.offer(sample) && this.warnBufferFull) {
log.warn("Unable to insert a sample due to a full buffer.");
}
}
/**
* Flag to indicate whether a subscription response was received. When true, it will be possible
* for Samples to be buffered.
* @return {@code true} if at least 1 subscription response was received.
*/
public boolean isSubscriptionAcknowledged() {
return this.subscriptionAcknowledged;
}
}
|
package com.raoulvdberge.refinedstorage.proxy;
import com.raoulvdberge.refinedstorage.RS;
import com.raoulvdberge.refinedstorage.RSBlocks;
import com.raoulvdberge.refinedstorage.RSItems;
import com.raoulvdberge.refinedstorage.RSUtils;
import com.raoulvdberge.refinedstorage.apiimpl.API;
import com.raoulvdberge.refinedstorage.apiimpl.autocrafting.craftingmonitor.*;
import com.raoulvdberge.refinedstorage.apiimpl.autocrafting.preview.CraftingPreviewElementFluidStack;
import com.raoulvdberge.refinedstorage.apiimpl.autocrafting.preview.CraftingPreviewElementItemStack;
import com.raoulvdberge.refinedstorage.apiimpl.autocrafting.registry.CraftingTaskFactory;
import com.raoulvdberge.refinedstorage.apiimpl.network.readerwriter.ReaderWriterHandlerFluids;
import com.raoulvdberge.refinedstorage.apiimpl.network.readerwriter.ReaderWriterHandlerItems;
import com.raoulvdberge.refinedstorage.apiimpl.network.readerwriter.ReaderWriterHandlerRedstone;
import com.raoulvdberge.refinedstorage.apiimpl.solderer.*;
import com.raoulvdberge.refinedstorage.apiimpl.storage.fluid.FluidStorageNBT;
import com.raoulvdberge.refinedstorage.apiimpl.storage.item.ItemStorageNBT;
import com.raoulvdberge.refinedstorage.block.*;
import com.raoulvdberge.refinedstorage.gui.GuiHandler;
import com.raoulvdberge.refinedstorage.integration.craftingtweaks.IntegrationCraftingTweaks;
import com.raoulvdberge.refinedstorage.integration.forgeenergy.ReaderWriterHandlerForgeEnergy;
import com.raoulvdberge.refinedstorage.integration.tesla.IntegrationTesla;
import com.raoulvdberge.refinedstorage.integration.tesla.ReaderWriterHandlerTesla;
import com.raoulvdberge.refinedstorage.item.*;
import com.raoulvdberge.refinedstorage.network.*;
import com.raoulvdberge.refinedstorage.tile.*;
import com.raoulvdberge.refinedstorage.tile.craftingmonitor.TileCraftingMonitor;
import com.raoulvdberge.refinedstorage.tile.data.ContainerListener;
import com.raoulvdberge.refinedstorage.tile.data.TileDataManager;
import com.raoulvdberge.refinedstorage.tile.externalstorage.TileExternalStorage;
import com.raoulvdberge.refinedstorage.tile.grid.TileGrid;
import net.minecraft.block.Block;
import net.minecraft.init.Blocks;
import net.minecraft.init.Items;
import net.minecraft.item.Item;
import net.minecraft.item.ItemStack;
import net.minecraftforge.common.MinecraftForge;
import net.minecraftforge.fml.common.event.FMLInitializationEvent;
import net.minecraftforge.fml.common.event.FMLPostInitializationEvent;
import net.minecraftforge.fml.common.event.FMLPreInitializationEvent;
import net.minecraftforge.fml.common.network.ByteBufUtils;
import net.minecraftforge.fml.common.network.NetworkRegistry;
import net.minecraftforge.fml.common.registry.GameRegistry;
import net.minecraftforge.fml.relauncher.Side;
import net.minecraftforge.oredict.OreDictionary;
import net.minecraftforge.oredict.ShapedOreRecipe;
import java.util.ArrayList;
import java.util.List;
public class ProxyCommon {
protected List<BlockCable> cableTypes = new ArrayList<>();
public void preInit(FMLPreInitializationEvent e) {
if (IntegrationCraftingTweaks.isLoaded()) {
IntegrationCraftingTweaks.register();
}
API.deliver(e.getAsmData());
API.instance().getCraftingTaskRegistry().addFactory(CraftingTaskFactory.ID, new CraftingTaskFactory());
API.instance().getCraftingMonitorElementRegistry().add(CraftingMonitorElementItemRender.ID, buf -> new CraftingMonitorElementItemRender(buf.readInt(), ByteBufUtils.readItemStack(buf), buf.readInt(), buf.readInt()));
API.instance().getCraftingMonitorElementRegistry().add(CraftingMonitorElementFluidRender.ID, buf -> new CraftingMonitorElementFluidRender(buf.readInt(), RSUtils.readFluidStack(buf).getRight(), buf.readInt()));
API.instance().getCraftingMonitorElementRegistry().add(CraftingMonitorElementText.ID, buf -> new CraftingMonitorElementText(ByteBufUtils.readUTF8String(buf), buf.readInt()));
API.instance().getCraftingMonitorElementRegistry().add(CraftingMonitorElementError.ID, buf -> {
String id = ByteBufUtils.readUTF8String(buf);
String tooltip = ByteBufUtils.readUTF8String(buf);
return new CraftingMonitorElementError(API.instance().getCraftingMonitorElementRegistry().getFactory(id).apply(buf), tooltip);
});
API.instance().getCraftingMonitorElementRegistry().add(CraftingMonitorElementInfo.ID, buf -> {
String id = ByteBufUtils.readUTF8String(buf);
String tooltip = ByteBufUtils.readUTF8String(buf);
return new CraftingMonitorElementInfo(API.instance().getCraftingMonitorElementRegistry().getFactory(id).apply(buf), tooltip);
});
API.instance().getCraftingPreviewElementRegistry().add(CraftingPreviewElementItemStack.ID, CraftingPreviewElementItemStack::fromByteBuf);
API.instance().getCraftingPreviewElementRegistry().add(CraftingPreviewElementFluidStack.ID, CraftingPreviewElementFluidStack::fromByteBuf);
API.instance().getReaderWriterHandlerRegistry().add(ReaderWriterHandlerItems.ID, ReaderWriterHandlerItems::new);
API.instance().getReaderWriterHandlerRegistry().add(ReaderWriterHandlerFluids.ID, ReaderWriterHandlerFluids::new);
API.instance().getReaderWriterHandlerRegistry().add(ReaderWriterHandlerRedstone.ID, tag -> new ReaderWriterHandlerRedstone());
API.instance().getReaderWriterHandlerRegistry().add(ReaderWriterHandlerForgeEnergy.ID, ReaderWriterHandlerForgeEnergy::new);
if (IntegrationTesla.isLoaded()) {
API.instance().getReaderWriterHandlerRegistry().add(ReaderWriterHandlerTesla.ID, ReaderWriterHandlerTesla::new);
}
int id = 0;
RS.INSTANCE.network.registerMessage(MessageTileDataParameter.class, MessageTileDataParameter.class, id++, Side.CLIENT);
RS.INSTANCE.network.registerMessage(MessageTileDataParameterUpdate.class, MessageTileDataParameterUpdate.class, id++, Side.SERVER);
RS.INSTANCE.network.registerMessage(MessageGridItemInsertHeld.class, MessageGridItemInsertHeld.class, id++, Side.SERVER);
RS.INSTANCE.network.registerMessage(MessageGridItemPull.class, MessageGridItemPull.class, id++, Side.SERVER);
RS.INSTANCE.network.registerMessage(MessageGridCraftingClear.class, MessageGridCraftingClear.class, id++, Side.SERVER);
RS.INSTANCE.network.registerMessage(MessageGridCraftingTransfer.class, MessageGridCraftingTransfer.class, id++, Side.SERVER);
RS.INSTANCE.network.registerMessage(MessageWirelessGridSettingsUpdate.class, MessageWirelessGridSettingsUpdate.class, id++, Side.SERVER);
RS.INSTANCE.network.registerMessage(MessageGridCraftingStart.class, MessageGridCraftingStart.class, id++, Side.SERVER);
RS.INSTANCE.network.registerMessage(MessageGridPatternCreate.class, MessageGridPatternCreate.class, id++, Side.SERVER);
RS.INSTANCE.network.registerMessage(MessageCraftingMonitorCancel.class, MessageCraftingMonitorCancel.class, id++, Side.SERVER);
RS.INSTANCE.network.registerMessage(MessageCraftingMonitorElements.class, MessageCraftingMonitorElements.class, id++, Side.CLIENT);
RS.INSTANCE.network.registerMessage(MessageGridItemUpdate.class, MessageGridItemUpdate.class, id++, Side.CLIENT);
RS.INSTANCE.network.registerMessage(MessageGridItemDelta.class, MessageGridItemDelta.class, id++, Side.CLIENT);
RS.INSTANCE.network.registerMessage(MessageGridFluidUpdate.class, MessageGridFluidUpdate.class, id++, Side.CLIENT);
RS.INSTANCE.network.registerMessage(MessageGridFluidDelta.class, MessageGridFluidDelta.class, id++, Side.CLIENT);
RS.INSTANCE.network.registerMessage(MessageGridFluidPull.class, MessageGridFluidPull.class, id++, Side.SERVER);
RS.INSTANCE.network.registerMessage(MessageGridFluidInsertHeld.class, MessageGridFluidInsertHeld.class, id++, Side.SERVER);
RS.INSTANCE.network.registerMessage(MessageProcessingPatternEncoderClear.class, MessageProcessingPatternEncoderClear.class, id++, Side.SERVER);
RS.INSTANCE.network.registerMessage(MessageGridFilterUpdate.class, MessageGridFilterUpdate.class, id++, Side.SERVER);
RS.INSTANCE.network.registerMessage(MessageGridCraftingPreview.class, MessageGridCraftingPreview.class, id++, Side.SERVER);
RS.INSTANCE.network.registerMessage(MessageGridCraftingPreviewResponse.class, MessageGridCraftingPreviewResponse.class, id++, Side.CLIENT);
RS.INSTANCE.network.registerMessage(MessageProcessingPatternEncoderTransfer.class, MessageProcessingPatternEncoderTransfer.class, id++, Side.SERVER);
RS.INSTANCE.network.registerMessage(MessageReaderWriterUpdate.class, MessageReaderWriterUpdate.class, id++, Side.CLIENT);
RS.INSTANCE.network.registerMessage(MessageReaderWriterChannelAdd.class, MessageReaderWriterChannelAdd.class, id++, Side.SERVER);
RS.INSTANCE.network.registerMessage(MessageReaderWriterChannelRemove.class, MessageReaderWriterChannelRemove.class, id++, Side.SERVER);
NetworkRegistry.INSTANCE.registerGuiHandler(RS.INSTANCE, new GuiHandler());
MinecraftForge.EVENT_BUS.register(new ContainerListener());
registerTile(TileController.class, "controller");
registerTile(TileGrid.class, "grid");
registerTile(TileDiskDrive.class, "disk_drive");
registerTile(TileExternalStorage.class, "external_storage");
registerTile(TileImporter.class, "importer");
registerTile(TileExporter.class, "exporter");
registerTile(TileDetector.class, "detector");
registerTile(TileSolderer.class, "solderer");
registerTile(TileDestructor.class, "destructor");
registerTile(TileConstructor.class, "constructor");
registerTile(TileStorage.class, "storage");
registerTile(TileRelay.class, "relay");
registerTile(TileInterface.class, "interface");
registerTile(TileCraftingMonitor.class, "crafting_monitor");
registerTile(TileWirelessTransmitter.class, "wireless_transmitter");
registerTile(TileCrafter.class, "crafter");
registerTile(TileProcessingPatternEncoder.class, "processing_pattern_encoder");
registerTile(TileCable.class, "cable");
registerTile(TileNetworkReceiver.class, "network_receiver");
registerTile(TileNetworkTransmitter.class, "network_transmitter");
registerTile(TileFluidInterface.class, "fluid_interface");
registerTile(TileFluidStorage.class, "fluid_storage");
registerTile(TileDiskManipulator.class, "disk_manipulator");
registerTile(TileReader.class, "reader");
registerTile(TileWriter.class, "writer");
registerBlock(RSBlocks.CONTROLLER);
registerBlock(RSBlocks.GRID);
registerBlock(RSBlocks.CRAFTING_MONITOR);
registerBlock(RSBlocks.CRAFTER);
registerBlock(RSBlocks.PROCESSING_PATTERN_ENCODER);
registerBlock(RSBlocks.DISK_DRIVE);
registerBlock(RSBlocks.STORAGE);
registerBlock(RSBlocks.FLUID_STORAGE);
registerBlock(RSBlocks.SOLDERER);
registerBlock(RSBlocks.CABLE);
registerBlock(RSBlocks.IMPORTER);
registerBlock(RSBlocks.EXPORTER);
registerBlock(RSBlocks.EXTERNAL_STORAGE);
registerBlock(RSBlocks.CONSTRUCTOR);
registerBlock(RSBlocks.DESTRUCTOR);
registerBlock(RSBlocks.READER);
registerBlock(RSBlocks.WRITER);
registerBlock(RSBlocks.DETECTOR);
registerBlock(RSBlocks.RELAY);
registerBlock(RSBlocks.INTERFACE);
registerBlock(RSBlocks.FLUID_INTERFACE);
registerBlock(RSBlocks.WIRELESS_TRANSMITTER);
registerBlock(RSBlocks.MACHINE_CASING);
registerBlock(RSBlocks.NETWORK_TRANSMITTER);
registerBlock(RSBlocks.NETWORK_RECEIVER);
registerBlock(RSBlocks.DISK_MANIPULATOR);
registerItem(RSItems.QUARTZ_ENRICHED_IRON);
registerItem(RSItems.STORAGE_DISK);
registerItem(RSItems.FLUID_STORAGE_DISK);
registerItem(RSItems.STORAGE_HOUSING);
registerItem(RSItems.PATTERN);
registerItem(RSItems.STORAGE_PART);
registerItem(RSItems.FLUID_STORAGE_PART);
registerItem(RSItems.WIRELESS_GRID);
registerItem(RSItems.PROCESSOR);
registerItem(RSItems.CORE);
registerItem(RSItems.SILICON);
registerItem(RSItems.UPGRADE);
registerItem(RSItems.GRID_FILTER);
registerItem(RSItems.NETWORK_CARD);
registerItem(RSItems.WRENCH);
registerItem(RSItems.WIRELESS_CRAFTING_MONITOR);
OreDictionary.registerOre("itemSilicon", RSItems.SILICON);
// Processors
API.instance().getSoldererRegistry().addRecipe(new SoldererRecipePrintedProcessor(ItemProcessor.TYPE_PRINTED_BASIC));
API.instance().getSoldererRegistry().addRecipe(new SoldererRecipePrintedProcessor(ItemProcessor.TYPE_PRINTED_IMPROVED));
API.instance().getSoldererRegistry().addRecipe(new SoldererRecipePrintedProcessor(ItemProcessor.TYPE_PRINTED_ADVANCED));
API.instance().getSoldererRegistry().addRecipe(new SoldererRecipePrintedProcessor(ItemProcessor.TYPE_PRINTED_SILICON));
API.instance().getSoldererRegistry().addRecipe(new SoldererRecipeProcessor(ItemProcessor.TYPE_BASIC));
API.instance().getSoldererRegistry().addRecipe(new SoldererRecipeProcessor(ItemProcessor.TYPE_IMPROVED));
API.instance().getSoldererRegistry().addRecipe(new SoldererRecipeProcessor(ItemProcessor.TYPE_ADVANCED));
// Silicon
GameRegistry.addSmelting(Items.QUARTZ, new ItemStack(RSItems.SILICON), 0.5f);
// Quartz Enriched Iron
GameRegistry.addRecipe(new ItemStack(RSItems.QUARTZ_ENRICHED_IRON, 4),
"II",
"IQ",
'I', new ItemStack(Items.IRON_INGOT),
'Q', new ItemStack(Items.QUARTZ)
);
// Machine Casing
GameRegistry.addRecipe(new ItemStack(RSBlocks.MACHINE_CASING),
"EEE",
"E E",
"EEE",
'E', new ItemStack(RSItems.QUARTZ_ENRICHED_IRON)
);
// Construction Core
GameRegistry.addShapelessRecipe(new ItemStack(RSItems.CORE, 1, ItemCore.TYPE_CONSTRUCTION),
new ItemStack(RSItems.PROCESSOR, 1, ItemProcessor.TYPE_BASIC),
new ItemStack(Items.GLOWSTONE_DUST)
);
// Destruction Core
GameRegistry.addShapelessRecipe(new ItemStack(RSItems.CORE, 1, ItemCore.TYPE_DESTRUCTION),
new ItemStack(RSItems.PROCESSOR, 1, ItemProcessor.TYPE_BASIC),
new ItemStack(Items.QUARTZ)
);
// Relay
GameRegistry.addShapelessRecipe(new ItemStack(RSBlocks.RELAY),
new ItemStack(RSBlocks.MACHINE_CASING),
new ItemStack(RSBlocks.CABLE),
new ItemStack(Blocks.REDSTONE_TORCH)
);
// Controller
GameRegistry.addRecipe(new ShapedOreRecipe(new ItemStack(RSBlocks.CONTROLLER, 1, EnumControllerType.NORMAL.getId()),
"EDE",
"SMS",
"ESE",
'D', new ItemStack(Items.DIAMOND),
'E', new ItemStack(RSItems.QUARTZ_ENRICHED_IRON),
'M', new ItemStack(RSBlocks.MACHINE_CASING),
'S', "itemSilicon"
));
// Solderer
GameRegistry.addRecipe(new ItemStack(RSBlocks.SOLDERER),
"ESE",
"E E",
"ESE",
'E', new ItemStack(RSItems.QUARTZ_ENRICHED_IRON),
'S', new ItemStack(Blocks.STICKY_PISTON)
);
// Disk Drive
API.instance().getSoldererRegistry().addRecipe(API.instance().getSoldererRegistry().createSimpleRecipe(
new ItemStack(RSBlocks.DISK_DRIVE),
500,
new ItemStack(RSItems.PROCESSOR, 1, ItemProcessor.TYPE_ADVANCED),
new ItemStack(RSBlocks.MACHINE_CASING),
new ItemStack(Blocks.CHEST)
));
// Cable
GameRegistry.addRecipe(new ShapedOreRecipe(new ItemStack(RSBlocks.CABLE, 12),
"EEE",
"GRG",
"EEE",
'E', new ItemStack(RSItems.QUARTZ_ENRICHED_IRON),
'G', "blockGlass",
'R', new ItemStack(Items.REDSTONE)
));
// Wireless Transmitter
GameRegistry.addRecipe(new ItemStack(RSBlocks.WIRELESS_TRANSMITTER),
"EPE",
"EME",
"EAE",
'E', new ItemStack(RSItems.QUARTZ_ENRICHED_IRON),
'A', new ItemStack(RSItems.PROCESSOR, 1, ItemProcessor.TYPE_ADVANCED),
'P', new ItemStack(Items.ENDER_PEARL),
'M', new ItemStack(RSBlocks.MACHINE_CASING)
);
// Grid
GameRegistry.addRecipe(new ItemStack(RSBlocks.GRID, 1, EnumGridType.NORMAL.getId()),
"ECE",
"PMP",
"EDE",
'E', new ItemStack(RSItems.QUARTZ_ENRICHED_IRON),
'P', new ItemStack(RSItems.PROCESSOR, 1, ItemProcessor.TYPE_IMPROVED),
'C', new ItemStack(RSItems.CORE, 1, ItemCore.TYPE_CONSTRUCTION),
'D', new ItemStack(RSItems.CORE, 1, ItemCore.TYPE_DESTRUCTION),
'M', new ItemStack(RSBlocks.MACHINE_CASING)
);
// Crafting Grid
API.instance().getSoldererRegistry().addRecipe(API.instance().getSoldererRegistry().createSimpleRecipe(
new ItemStack(RSBlocks.GRID, 1, EnumGridType.CRAFTING.getId()),
500,
new ItemStack(RSItems.PROCESSOR, 1, ItemProcessor.TYPE_ADVANCED),
new ItemStack(RSBlocks.GRID, 1, EnumGridType.NORMAL.getId()),
new ItemStack(Blocks.CRAFTING_TABLE)
));
// Pattern Grid
API.instance().getSoldererRegistry().addRecipe(API.instance().getSoldererRegistry().createSimpleRecipe(
new ItemStack(RSBlocks.GRID, 1, EnumGridType.PATTERN.getId()),
500,
new ItemStack(RSItems.PROCESSOR, 1, ItemProcessor.TYPE_ADVANCED),
new ItemStack(RSBlocks.GRID, 1, EnumGridType.NORMAL.getId()),
new ItemStack(RSItems.PATTERN)
));
// Fluid Grid
API.instance().getSoldererRegistry().addRecipe(API.instance().getSoldererRegistry().createSimpleRecipe(
new ItemStack(RSBlocks.GRID, 1, EnumGridType.FLUID.getId()),
500,
new ItemStack(RSItems.PROCESSOR, 1, ItemProcessor.TYPE_ADVANCED),
new ItemStack(RSBlocks.GRID, 1, EnumGridType.NORMAL.getId()),
new ItemStack(Items.BUCKET)
));
// Wireless Grid
GameRegistry.addRecipe(new ItemStack(RSItems.WIRELESS_GRID, 1, ItemWirelessGrid.TYPE_NORMAL),
"EPE",
"EGE",
"EAE",
'E', new ItemStack(RSItems.QUARTZ_ENRICHED_IRON),
'P', new ItemStack(Items.ENDER_PEARL),
'G', new ItemStack(RSBlocks.GRID, 1, EnumGridType.NORMAL.getId()),
'A', new ItemStack(RSItems.PROCESSOR, 1, ItemProcessor.TYPE_ADVANCED)
);
// Wireless Crafting Monitor
GameRegistry.addRecipe(new ItemStack(RSItems.WIRELESS_CRAFTING_MONITOR, 1, ItemWirelessCraftingMonitor.TYPE_NORMAL),
"EPE",
"EME",
"EAE",
'E', new ItemStack(RSItems.QUARTZ_ENRICHED_IRON),
'P', new ItemStack(Items.ENDER_PEARL),
'M', new ItemStack(RSBlocks.CRAFTING_MONITOR),
'A', new ItemStack(RSItems.PROCESSOR, 1, ItemProcessor.TYPE_ADVANCED)
);
// Crafter
GameRegistry.addRecipe(new ItemStack(RSBlocks.CRAFTER),
"ECE",
"AMA",
"EDE",
'E', new ItemStack(RSItems.QUARTZ_ENRICHED_IRON),
'A', new ItemStack(RSItems.PROCESSOR, 1, ItemProcessor.TYPE_ADVANCED),
'C', new ItemStack(RSItems.CORE, 1, ItemCore.TYPE_CONSTRUCTION),
'D', new ItemStack(RSItems.CORE, 1, ItemCore.TYPE_DESTRUCTION),
'M', new ItemStack(RSBlocks.MACHINE_CASING)
);
// Processing Pattern Encoder
GameRegistry.addRecipe(new ItemStack(RSBlocks.PROCESSING_PATTERN_ENCODER),
"ECE",
"PMP",
"EFE",
'E', new ItemStack(RSItems.QUARTZ_ENRICHED_IRON),
'M', new ItemStack(RSBlocks.MACHINE_CASING),
'P', new ItemStack(RSItems.PATTERN),
'C', new ItemStack(Blocks.CRAFTING_TABLE),
'F', new ItemStack(Blocks.FURNACE)
);
// External Storage
GameRegistry.addRecipe(new ShapedOreRecipe(new ItemStack(RSBlocks.EXTERNAL_STORAGE),
"CED",
"HMH",
"EPE",
'E', new ItemStack(RSItems.QUARTZ_ENRICHED_IRON),
'H', "chest",
'C', new ItemStack(RSItems.CORE, 1, ItemCore.TYPE_CONSTRUCTION),
'D', new ItemStack(RSItems.CORE, 1, ItemCore.TYPE_DESTRUCTION),
'M', new ItemStack(RSBlocks.CABLE),
'P', new ItemStack(RSItems.PROCESSOR, 1, ItemProcessor.TYPE_IMPROVED)
));
// Importer
GameRegistry.addShapelessRecipe(new ItemStack(RSBlocks.IMPORTER),
new ItemStack(RSBlocks.CABLE),
new ItemStack(RSItems.CORE, 1, ItemCore.TYPE_DESTRUCTION),
new ItemStack(RSItems.PROCESSOR, 1, ItemProcessor.TYPE_IMPROVED)
);
// Exporter
GameRegistry.addShapelessRecipe(new ItemStack(RSBlocks.EXPORTER),
new ItemStack(RSBlocks.CABLE),
new ItemStack(RSItems.CORE, 1, ItemCore.TYPE_CONSTRUCTION),
new ItemStack(RSItems.PROCESSOR, 1, ItemProcessor.TYPE_IMPROVED)
);
// Destructor
GameRegistry.addShapedRecipe(new ItemStack(RSBlocks.DESTRUCTOR),
"EDE",
"RMR",
"EIE",
'E', new ItemStack(RSItems.QUARTZ_ENRICHED_IRON),
'D', new ItemStack(RSItems.CORE, 1, ItemCore.TYPE_DESTRUCTION),
'R', new ItemStack(Items.REDSTONE),
'M', new ItemStack(RSBlocks.CABLE),
'I', new ItemStack(RSItems.PROCESSOR, 1, ItemProcessor.TYPE_IMPROVED)
);
// Constructor
GameRegistry.addShapedRecipe(new ItemStack(RSBlocks.CONSTRUCTOR),
"ECE",
"RMR",
"EIE",
'E', new ItemStack(RSItems.QUARTZ_ENRICHED_IRON),
'C', new ItemStack(RSItems.CORE, 1, ItemCore.TYPE_CONSTRUCTION),
'R', new ItemStack(Items.REDSTONE),
'M', new ItemStack(RSBlocks.CABLE),
'I', new ItemStack(RSItems.PROCESSOR, 1, ItemProcessor.TYPE_IMPROVED)
);
// Detector
GameRegistry.addRecipe(new ItemStack(RSBlocks.DETECTOR),
"ECE",
"RMR",
"EPE",
'E', new ItemStack(RSItems.QUARTZ_ENRICHED_IRON),
'R', new ItemStack(Items.REDSTONE),
'C', new ItemStack(Items.COMPARATOR),
'M', new ItemStack(RSBlocks.MACHINE_CASING),
'P', new ItemStack(RSItems.PROCESSOR, 1, ItemProcessor.TYPE_IMPROVED)
);
// Storage Parts
GameRegistry.addRecipe(new ShapedOreRecipe(new ItemStack(RSItems.STORAGE_PART, 1, ItemStoragePart.TYPE_1K),
"SES",
"GRG",
"SGS",
'R', new ItemStack(Items.REDSTONE),
'E', new ItemStack(RSItems.QUARTZ_ENRICHED_IRON),
'S', "itemSilicon",
'G', "blockGlass"
));
GameRegistry.addRecipe(new ItemStack(RSItems.STORAGE_PART, 1, ItemStoragePart.TYPE_4K),
"PEP",
"SRS",
"PSP",
'R', new ItemStack(Items.REDSTONE),
'E', new ItemStack(RSItems.QUARTZ_ENRICHED_IRON),
'P', new ItemStack(RSItems.PROCESSOR, 1, ItemProcessor.TYPE_BASIC),
'S', new ItemStack(RSItems.STORAGE_PART, 1, ItemStoragePart.TYPE_1K)
);
GameRegistry.addRecipe(new ItemStack(RSItems.STORAGE_PART, 1, ItemStoragePart.TYPE_16K),
"PEP",
"SRS",
"PSP",
'R', new ItemStack(Items.REDSTONE),
'E', new ItemStack(RSItems.QUARTZ_ENRICHED_IRON),
'P', new ItemStack(RSItems.PROCESSOR, 1, ItemProcessor.TYPE_IMPROVED),
'S', new ItemStack(RSItems.STORAGE_PART, 1, ItemStoragePart.TYPE_4K)
);
GameRegistry.addRecipe(new ItemStack(RSItems.STORAGE_PART, 1, ItemStoragePart.TYPE_64K),
"PEP",
"SRS",
"PSP",
'R', new ItemStack(Items.REDSTONE),
'E', new ItemStack(RSItems.QUARTZ_ENRICHED_IRON),
'P', new ItemStack(RSItems.PROCESSOR, 1, ItemProcessor.TYPE_ADVANCED),
'S', new ItemStack(RSItems.STORAGE_PART, 1, ItemStoragePart.TYPE_16K)
);
// Fluid Storage Parts
GameRegistry.addRecipe(new ShapedOreRecipe(new ItemStack(RSItems.FLUID_STORAGE_PART, 1, ItemFluidStoragePart.TYPE_64K),
"SES",
"GRG",
"SGS",
'R', new ItemStack(Items.BUCKET),
'E', new ItemStack(RSItems.QUARTZ_ENRICHED_IRON),
'S', "itemSilicon",
'G', "blockGlass"
));
GameRegistry.addRecipe(new ShapedOreRecipe(new ItemStack(RSItems.FLUID_STORAGE_PART, 1, ItemFluidStoragePart.TYPE_128K),
"PEP",
"SRS",
"PSP",
'R', new ItemStack(Items.BUCKET),
'E', new ItemStack(RSItems.QUARTZ_ENRICHED_IRON),
'P', new ItemStack(RSItems.PROCESSOR, 1, ItemProcessor.TYPE_BASIC),
'S', new ItemStack(RSItems.FLUID_STORAGE_PART, 1, ItemFluidStoragePart.TYPE_64K)
));
GameRegistry.addRecipe(new ShapedOreRecipe(new ItemStack(RSItems.FLUID_STORAGE_PART, 1, ItemFluidStoragePart.TYPE_256K),
"PEP",
"SRS",
"PSP",
'R', new ItemStack(Items.BUCKET),
'E', new ItemStack(RSItems.QUARTZ_ENRICHED_IRON),
'P', new ItemStack(RSItems.PROCESSOR, 1, ItemProcessor.TYPE_IMPROVED),
'S', new ItemStack(RSItems.FLUID_STORAGE_PART, 1, ItemFluidStoragePart.TYPE_128K)
));
GameRegistry.addRecipe(new ShapedOreRecipe(new ItemStack(RSItems.FLUID_STORAGE_PART, 1, ItemFluidStoragePart.TYPE_512K),
"PEP",
"SRS",
"PSP",
'R', new ItemStack(Items.BUCKET),
'E', new ItemStack(RSItems.QUARTZ_ENRICHED_IRON),
'P', new ItemStack(RSItems.PROCESSOR, 1, ItemProcessor.TYPE_ADVANCED),
'S', new ItemStack(RSItems.FLUID_STORAGE_PART, 1, ItemFluidStoragePart.TYPE_256K)
));
// Storage Housing
GameRegistry.addRecipe(new ShapedOreRecipe(new ItemStack(RSItems.STORAGE_HOUSING),
"GRG",
"R R",
"EEE",
'G', "blockGlass",
'R', new ItemStack(Items.REDSTONE),
'E', new ItemStack(RSItems.QUARTZ_ENRICHED_IRON)
));
// Storage Disks
for (int type = 0; type <= 3; ++type) {
ItemStack disk = ItemStorageNBT.createStackWithNBT(new ItemStack(RSItems.STORAGE_DISK, 1, type));
GameRegistry.addRecipe(new ShapedOreRecipe(disk,
"GRG",
"RPR",
"EEE",
'G', "blockGlass",
'R', new ItemStack(Items.REDSTONE),
'P', new ItemStack(RSItems.STORAGE_PART, 1, type),
'E', new ItemStack(RSItems.QUARTZ_ENRICHED_IRON)
));
GameRegistry.addShapelessRecipe(disk,
new ItemStack(RSItems.STORAGE_HOUSING),
new ItemStack(RSItems.STORAGE_PART, 1, type)
);
}
// Fluid Storage Disks
for (int type = 0; type <= 3; ++type) {
ItemStack disk = FluidStorageNBT.createStackWithNBT(new ItemStack(RSItems.FLUID_STORAGE_DISK, 1, type));
GameRegistry.addRecipe(new ShapedOreRecipe(disk,
"GRG",
"RPR",
"EEE",
'G', "blockGlass",
'R', new ItemStack(Items.REDSTONE),
'P', new ItemStack(RSItems.FLUID_STORAGE_PART, 1, type),
'E', new ItemStack(RSItems.QUARTZ_ENRICHED_IRON)
));
GameRegistry.addShapelessRecipe(disk,
new ItemStack(RSItems.STORAGE_HOUSING),
new ItemStack(RSItems.FLUID_STORAGE_PART, 1, type)
);
}
// Pattern
GameRegistry.addRecipe(new ShapedOreRecipe(new ItemStack(RSItems.PATTERN),
"GRG",
"RGR",
"EEE",
'G', "blockGlass",
'R', new ItemStack(Items.REDSTONE),
'E', new ItemStack(RSItems.QUARTZ_ENRICHED_IRON)
));
// Upgrade
GameRegistry.addRecipe(new ShapedOreRecipe(new ItemStack(RSItems.UPGRADE, 1, 0),
"EGE",
"EPE",
"EGE",
'G', "blockGlass",
'P', new ItemStack(RSItems.PROCESSOR, 1, ItemProcessor.TYPE_IMPROVED),
'E', new ItemStack(RSItems.QUARTZ_ENRICHED_IRON)
));
API.instance().getSoldererRegistry().addRecipe(new SoldererRecipeUpgrade(ItemUpgrade.TYPE_RANGE));
API.instance().getSoldererRegistry().addRecipe(new SoldererRecipeUpgrade(ItemUpgrade.TYPE_SPEED));
API.instance().getSoldererRegistry().addRecipe(new SoldererRecipeUpgrade(ItemUpgrade.TYPE_INTERDIMENSIONAL));
API.instance().getSoldererRegistry().addRecipe(new SoldererRecipeUpgrade(ItemUpgrade.TYPE_SILK_TOUCH));
API.instance().getSoldererRegistry().addRecipe(new SoldererRecipeUpgrade(ItemUpgrade.TYPE_CRAFTING));
API.instance().getSoldererRegistry().addRecipe(new SoldererRecipeUpgrade(ItemUpgrade.initializeForFortune(1)));
API.instance().getSoldererRegistry().addRecipe(new SoldererRecipeUpgrade(ItemUpgrade.initializeForFortune(2)));
API.instance().getSoldererRegistry().addRecipe(new SoldererRecipeUpgrade(ItemUpgrade.initializeForFortune(3)));
GameRegistry.addShapedRecipe(new ItemStack(RSItems.UPGRADE, 1, ItemUpgrade.TYPE_STACK),
"USU",
"SUS",
"USU",
'U', new ItemStack(Items.SUGAR),
'S', new ItemStack(RSItems.UPGRADE, 1, ItemUpgrade.TYPE_SPEED)
);
// Storage Blocks
API.instance().getSoldererRegistry().addRecipe(new SoldererRecipeStorage(EnumItemStorageType.TYPE_1K, ItemStoragePart.TYPE_1K));
API.instance().getSoldererRegistry().addRecipe(new SoldererRecipeStorage(EnumItemStorageType.TYPE_4K, ItemStoragePart.TYPE_4K));
API.instance().getSoldererRegistry().addRecipe(new SoldererRecipeStorage(EnumItemStorageType.TYPE_16K, ItemStoragePart.TYPE_16K));
API.instance().getSoldererRegistry().addRecipe(new SoldererRecipeStorage(EnumItemStorageType.TYPE_64K, ItemStoragePart.TYPE_64K));
// Fluid Storage Blocks
API.instance().getSoldererRegistry().addRecipe(new SoldererRecipeFluidStorage(EnumFluidStorageType.TYPE_64K, ItemFluidStoragePart.TYPE_64K));
API.instance().getSoldererRegistry().addRecipe(new SoldererRecipeFluidStorage(EnumFluidStorageType.TYPE_128K, ItemFluidStoragePart.TYPE_128K));
API.instance().getSoldererRegistry().addRecipe(new SoldererRecipeFluidStorage(EnumFluidStorageType.TYPE_256K, ItemFluidStoragePart.TYPE_256K));
API.instance().getSoldererRegistry().addRecipe(new SoldererRecipeFluidStorage(EnumFluidStorageType.TYPE_512K, ItemFluidStoragePart.TYPE_512K));
// Crafting Monitor
GameRegistry.addRecipe(new ShapedOreRecipe(new ItemStack(RSBlocks.CRAFTING_MONITOR),
"EGE",
"GMG",
"EPE",
'E', new ItemStack(RSItems.QUARTZ_ENRICHED_IRON),
'M', new ItemStack(RSBlocks.MACHINE_CASING),
'G', "blockGlass",
'P', new ItemStack(RSItems.PROCESSOR, 1, ItemProcessor.TYPE_IMPROVED)
));
// Interface
API.instance().getSoldererRegistry().addRecipe(API.instance().getSoldererRegistry().createSimpleRecipe(
new ItemStack(RSBlocks.INTERFACE),
200,
new ItemStack(RSBlocks.IMPORTER),
new ItemStack(RSItems.PROCESSOR, 1, ItemProcessor.TYPE_BASIC),
new ItemStack(RSBlocks.EXPORTER)
));
// Fluid Interface
API.instance().getSoldererRegistry().addRecipe(API.instance().getSoldererRegistry().createSimpleRecipe(
new ItemStack(RSBlocks.FLUID_INTERFACE),
200,
new ItemStack(Items.BUCKET),
new ItemStack(RSBlocks.INTERFACE),
new ItemStack(Items.BUCKET)
));
// Grid Filter
GameRegistry.addShapedRecipe(new ItemStack(RSItems.GRID_FILTER),
"EPE",
"PHP",
"EPE",
'E', new ItemStack(RSItems.QUARTZ_ENRICHED_IRON),
'P', new ItemStack(Items.PAPER),
'H', new ItemStack(Blocks.HOPPER)
);
// Network Card
GameRegistry.addShapedRecipe(new ItemStack(RSItems.NETWORK_CARD),
"EEE",
"PAP",
"EEE",
'E', new ItemStack(RSItems.QUARTZ_ENRICHED_IRON),
'P', new ItemStack(Items.PAPER),
'A', new ItemStack(RSItems.PROCESSOR, 1, ItemProcessor.TYPE_ADVANCED)
);
// Network Transmitter
GameRegistry.addShapedRecipe(new ItemStack(RSBlocks.NETWORK_TRANSMITTER),
"EEE",
"CMD",
"AAA",
'E', new ItemStack(Items.ENDER_PEARL),
'C', new ItemStack(RSItems.CORE, 1, ItemCore.TYPE_CONSTRUCTION),
'M', new ItemStack(RSBlocks.MACHINE_CASING),
'D', new ItemStack(RSItems.CORE, 1, ItemCore.TYPE_DESTRUCTION),
'A', new ItemStack(RSItems.PROCESSOR, 1, ItemProcessor.TYPE_ADVANCED)
);
// Network Receiver
GameRegistry.addShapedRecipe(new ItemStack(RSBlocks.NETWORK_RECEIVER),
"AAA",
"CMD",
"EEE",
'E', new ItemStack(Items.ENDER_PEARL),
'C', new ItemStack(RSItems.CORE, 1, ItemCore.TYPE_CONSTRUCTION),
'M', new ItemStack(RSBlocks.MACHINE_CASING),
'D', new ItemStack(RSItems.CORE, 1, ItemCore.TYPE_DESTRUCTION),
'A', new ItemStack(RSItems.PROCESSOR, 1, ItemProcessor.TYPE_ADVANCED)
);
// Disk Manipulator
GameRegistry.addShapedRecipe(new ItemStack(RSBlocks.DISK_MANIPULATOR),
"ESE",
"CMD",
"ESE",
'E', new ItemStack(RSItems.QUARTZ_ENRICHED_IRON),
'S', new ItemStack(RSItems.STORAGE_HOUSING),
'C', new ItemStack(RSItems.CORE, 1, ItemCore.TYPE_CONSTRUCTION),
'M', new ItemStack(RSBlocks.MACHINE_CASING),
'D', new ItemStack(RSItems.CORE, 1, ItemCore.TYPE_DESTRUCTION)
);
// Wrench
GameRegistry.addShapedRecipe(new ItemStack(RSItems.WRENCH),
"EPE",
"EEE",
" E ",
'E', new ItemStack(RSItems.QUARTZ_ENRICHED_IRON),
'P', new ItemStack(RSItems.PROCESSOR, 1, ItemProcessor.TYPE_BASIC)
);
}
public void init(FMLInitializationEvent e) {
// NO OP
}
public void postInit(FMLPostInitializationEvent e) {
// NO OP
}
private void registerBlock(BlockBase block) {
GameRegistry.<Block>register(block);
GameRegistry.register(block.createItem());
}
private void registerBlock(BlockCable cable) {
GameRegistry.<Block>register(cable);
GameRegistry.register(new ItemBlockBase(cable, cable.getPlacementType(), false));
cableTypes.add(cable);
}
private void registerTile(Class<? extends TileBase> tile, String id) {
GameRegistry.registerTileEntity(tile, RS.ID + ":" + id);
try {
TileBase tileInstance = tile.newInstance();
tileInstance.getDataManager().getParameters().forEach(TileDataManager::registerParameter);
} catch (InstantiationException | IllegalAccessException e) {
e.printStackTrace();
}
}
private void registerItem(Item item) {
GameRegistry.register(item);
}
}
|
package com.rarchives.ripme.ripper.rippers;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.log4j.Logger;
import org.json.JSONArray;
import org.json.JSONObject;
import org.jsoup.Jsoup;
import com.rarchives.ripme.ripper.AbstractRipper;
import com.rarchives.ripme.utils.Utils;
public class GonewildRipper extends AbstractRipper {
private static final String HOST = "gonewild";
private static final Logger logger = Logger.getLogger(GonewildRipper.class);
private static final int SLEEP_TIME = 1000;
private static String API_DOMAIN;
private String username;
public GonewildRipper(URL url) throws IOException {
super(url);
API_DOMAIN = Utils.getConfigString("gw.api", "gonewild");
}
@Override
public boolean canRip(URL url) {
return getUsernameMatcher(url).matches();
}
private Matcher getUsernameMatcher(URL url) {
Pattern p = Pattern.compile("^https?://[a-z]{0,3}\\.?reddit\\.com/(u|user)/([a-zA-Z0-9\\-]{3,})/?.*$");
return p.matcher(url.toExternalForm());
}
@Override
public URL sanitizeURL(URL url) throws MalformedURLException {
return url;
}
@Override
public void rip() throws IOException {
int start = 0,
count = 50;
String baseGwURL = "http://" + API_DOMAIN + ".rarchives.com/api.cgi"
+ "?method=get_user"
+ "&user=" + username
+ "&count=" + count;
String gwURL, jsonString, imagePath;
JSONArray posts, images;
JSONObject json, post, image;
while (true) {
logger.info(" Retrieving posts by " + username);
gwURL = baseGwURL
+ "&start=" + start;
start += count;
jsonString = Jsoup.connect(gwURL)
.ignoreContentType(true)
.execute()
.body();
json = new JSONObject(jsonString);
if (json.has("error")) {
logger.error("Error while retrieving user posts:" + json.getString("error"));
break;
}
posts = json.getJSONArray("posts");
if (posts.length() == 0) {
break; // No more posts to get
}
for (int i = 0; i < posts.length(); i++) {
post = (JSONObject) posts.get(i);
images = post.getJSONArray("images");
for (int j = 0; j < images.length(); j++) {
image = (JSONObject) images.get(j);
imagePath = image.getString("path");
if (imagePath.startsWith("..")) {
imagePath = imagePath.substring(2);
}
imagePath = "http://" + API_DOMAIN + ".rarchives.com" + imagePath;
logger.info(" Found file: " + imagePath);
addURLToDownload(new URL(imagePath));
}
}
try {
Thread.sleep(SLEEP_TIME);
} catch (InterruptedException e) {
logger.error("[!] Interrupted while waiting to load more posts", e);
break;
}
}
waitForThreads();
}
@Override
public String getHost() {
return HOST;
}
@Override
public String getGID(URL url) throws MalformedURLException {
Matcher m = getUsernameMatcher(url);
if (m.matches()) {
this.username = m.group(m.groupCount());
}
return username;
}
}
|
package com.skcraft.plume.common.service.sql;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableMap.Builder;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.skcraft.plume.common.DataAccessException;
import com.skcraft.plume.common.UserId;
import com.skcraft.plume.common.service.auth.Group;
import com.skcraft.plume.common.service.auth.Hive;
import com.skcraft.plume.common.service.auth.User;
import com.skcraft.plume.common.service.sql.model.data.tables.records.GroupParentRecord;
import com.skcraft.plume.common.service.sql.model.data.tables.records.GroupRecord;
import com.skcraft.plume.common.service.sql.model.data.tables.records.UserRecord;
import lombok.Getter;
import org.jooq.DSLContext;
import org.jooq.Query;
import org.jooq.Record;
import org.jooq.Result;
import org.jooq.impl.DSL;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.skcraft.plume.common.service.sql.model.data.tables.Group.GROUP;
import static com.skcraft.plume.common.service.sql.model.data.tables.GroupParent.GROUP_PARENT;
import static com.skcraft.plume.common.service.sql.model.data.tables.User.USER;
import static com.skcraft.plume.common.service.sql.model.data.tables.UserGroup.USER_GROUP;
import static com.skcraft.plume.common.service.sql.model.data.tables.UserId.USER_ID;
public class DatabaseHive implements Hive {
@Getter
private final DatabaseManager database;
private ImmutableMap<Integer, Group> groups = ImmutableMap.of();
public DatabaseHive(DatabaseManager database) {
checkNotNull(database, "database");
this.database = database;
}
@Override
public void load() throws DataAccessException {
loadGroups();
}
@Override
public List<Group> getLoadedGroups() {
return Lists.newArrayList(groups.values());
}
private void loadGroups() throws DataAccessException {
try {
DSLContext create = database.create();
// Fetch groups
Builder<Integer, Group> groupsBuilder = ImmutableMap.builder();
for (GroupRecord record : create.selectFrom(GROUP).fetch()) {
Group group = new Group();
group.setId(record.getId());
group.setName(record.getName());
group.setPermissions(ImmutableSet.copyOf(record.getPermissions().toLowerCase().split("\n")));
group.setAutoJoin(record.getAutoJoin() == 1);
groupsBuilder.put(group.getId(), group);
}
ImmutableMap<Integer, Group> groups = groupsBuilder.build();
// Link up parents
for (GroupParentRecord record : create.selectFrom(GROUP_PARENT).fetch()) {
Group group = groups.get(record.getGroupId().intValue());
Group parent = groups.get(record.getParentId().intValue());
if (group != null && parent != null && !group.equals(parent)) {
group.getParents().add(parent);
}
}
this.groups = groups;
} catch (org.jooq.exception.DataAccessException e) {
throw new DataAccessException("Failed to get group data", e);
}
}
public ImmutableMap<Integer, Group> getGroups() {
return groups;
}
@Override
public Map<UserId, User> findUsersById(List<UserId> ids) throws DataAccessException {
checkNotNull(ids, "ids");
checkArgument(!ids.isEmpty(), "empty list provided");
try {
DSLContext create = database.create();
Map<Integer, User> users = Maps.newHashMap(); // Temporary map to first get the users then add their groups
List<String> uuidStrings = ids.stream().map(id -> id.getUuid().toString()).collect(Collectors.toList());
com.skcraft.plume.common.service.sql.model.data.tables.UserId r = USER_ID.as("r");
Result<Record> userRecords = create
.select(USER_ID.fields())
.select(USER.fields())
.select(r.fields())
.from(USER_ID)
.leftOuterJoin(USER).on(USER.USER_ID.eq(USER_ID.ID))
.leftOuterJoin(r).on(USER.REFERRER_ID.eq(r.ID))
.where(USER_ID.UUID.in(uuidStrings))
.fetch();
List<Record> groupRecords = create
.select()
.from(USER_ID.join(USER_GROUP).on(USER_GROUP.USER_ID.eq(USER_ID.ID)))
.where(USER_ID.UUID.in(uuidStrings))
.fetch();
for (Record record : userRecords) {
User user = database.getModelMapper().map(record, User.class);
user.setUserId(database.getUserIdCache().fromRecord(record, USER_ID));
user.setReferrer(database.getUserIdCache().fromRecord(record, r));
users.put(record.getValue(USER_ID.ID), user);
}
for (Record record : groupRecords) {
User user = users.get(record.getValue(USER_ID.ID));
if (user != null) {
Group group = groups.get(record.getValue(USER_GROUP.GROUP_ID));
if (group != null) {
user.getGroups().add(group);
}
}
}
Map<UserId, User> map = Maps.newHashMap();
for (User user : users.values()) {
map.put(user.getUserId(), user);
}
return map;
} catch (org.jooq.exception.DataAccessException e) {
throw new DataAccessException("Failed to get user data", e);
}
}
@Override
public void saveUser(User user, boolean saveGroups) throws DataAccessException {
checkNotNull(user, "user");
try {
DSLContext c = database.create();
int userId = database.getUserIdCache().getOrCreateUserId(c, user.getUserId());
UserRecord record = c.newRecord(USER, user);
record.setUserId(userId);
if (user.getReferrer() != null) {
int referrerId = database.getUserIdCache().getOrCreateUserId(c, user.getReferrer());
record.setReferrerId(referrerId);
}
c.transaction(configuration -> {
DSLContext create = DSL.using(configuration);
create.insertInto(USER)
.set(record)
.onDuplicateKeyUpdate()
.set(record)
.execute();
if (saveGroups) {
create.deleteFrom(USER_GROUP)
.where(USER_GROUP.USER_ID.eq(userId));
List<Query> queries = Lists.newArrayList();
for (Group group : user.getGroups()) {
queries.add(create.insertInto(USER_GROUP)
.set(USER_GROUP.GROUP_ID, group.getId())
.set(USER_GROUP.USER_ID, userId));
}
create.batch(queries).execute();
}
});
} catch (org.jooq.exception.DataAccessException e) {
throw new DataAccessException("Failed to save the user " + user, e);
}
}
}
|
package com.tightdb.generator;
import java.io.IOException;
import java.io.Writer;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import javax.annotation.processing.AbstractProcessor;
import javax.annotation.processing.Filer;
import javax.annotation.processing.Messager;
import javax.annotation.processing.ProcessingEnvironment;
import javax.annotation.processing.RoundEnvironment;
import javax.lang.model.element.TypeElement;
import javax.lang.model.util.Elements;
import javax.lang.model.util.Types;
import javax.tools.Diagnostic.Kind;
import javax.tools.FileObject;
import javax.tools.JavaFileManager.Location;
import javax.tools.StandardLocation;
import org.apache.commons.lang.StringUtils;
public abstract class AbstractAnnotationProcessor extends AbstractProcessor {
private static final String[] SUPPORTED_ANNOTATIONS = { "com.tightdb.lib.Table" };
protected Messager messager;
protected Elements elementUtils;
protected Types typeUtils;
protected Filer filer;
protected Map<String, String> options;
@Override
public boolean process(Set<? extends TypeElement> annotations, RoundEnvironment env) {
info("Entering annotation processor...");
if (!env.processingOver()) {
info("Processing resources...");
try {
processAnnotations(annotations, env);
info("Successfully finished processing.");
} catch (Exception e) {
String info = e.getMessage() != null ? e.getMessage() : "";
String msg = "ERROR: " + info + "\n\n" + StringUtils.join(e.getStackTrace(), "\n");
Throwable cause = e.getCause();
while (cause != null) {
info = cause.getMessage() != null ? cause.getMessage() : "";
msg += "\n\nCause: " + info + "\n" + StringUtils.join(cause.getStackTrace(), "\n");
cause = cause.getCause();
}
error(msg);
}
} else {
info("Last round, processing is done.");
}
return true;
}
protected void info(String msg) {
messager.printMessage(Kind.NOTE, msg);
}
protected void warn(String msg) {
messager.printMessage(Kind.WARNING, msg);
}
protected void error(String msg) {
messager.printMessage(Kind.ERROR, msg);
}
@Override
public synchronized void init(ProcessingEnvironment env) {
super.init(env);
messager = env.getMessager(); // required for logging
info("Initializing annotation processor...");
elementUtils = env.getElementUtils();
typeUtils = env.getTypeUtils();
filer = env.getFiler();
options = env.getOptions();
info("Initialization finished.");
}
@Override
public Set<String> getSupportedAnnotationTypes() {
info("Specifying supported annotations...");
return new HashSet<String>(Arrays.asList(SUPPORTED_ANNOTATIONS));
}
protected abstract void processAnnotations(Set<? extends TypeElement> annotations, RoundEnvironment env) throws Exception;
protected void writeToFile(String pkg, String filename, String content) {
final Location location = StandardLocation.SOURCE_OUTPUT;
Writer writer = null;
try {
FileObject fileRes = filer.createResource(location, pkg, filename);
writer = fileRes.openWriter();
writer.write(content);
} catch (IOException e) {
error("Couldn't write to file: " + filename);
throw new RuntimeException("Couldn't write to file: " + filename, e);
} finally {
if (writer != null) {
try {
writer.close();
} catch (IOException e) {
error("Couldn't write to file: " + filename);
throw new RuntimeException("Couldn't write to file: " + filename, e);
}
}
}
}
}
|
package com.zandor300.simplebounty.commands;
import com.zandor300.simplebounty.SimpleBounty;
import com.zandor300.zsutilities.commandsystem.Command;
import com.zandor300.zsutilities.utilities.uuid.NameFetcher;
import com.zandor300.zsutilities.utilities.uuid.UUIDFetcher;
import org.bukkit.ChatColor;
import org.bukkit.command.CommandSender;
import java.util.Arrays;
import java.util.UUID;
public class BountyCommand extends Command {
public BountyCommand() {
super("bounty", "Bounty command.", "[<user> <reward>]");
}
@Override
public void execute(CommandSender sender, String[] args) {
if (args.length == 0) {
if (!sender.hasPermission("simplebounty.get"))
return;
int i = 1;
for (String uuid : SimpleBounty.getCustomConfig().getConfigurationSection("bounties").getKeys(false)) {
try {
SimpleBounty.getChat().sendMessage(sender, i + ". Wanted " + new NameFetcher(Arrays.asList(UUID.fromString(uuid))).call().get(0).toString() + " for $" + SimpleBounty.getCustomConfig().getInt("bounties." + uuid + ".reward"));
} catch (Exception e) {
e.printStackTrace();
}
}
return;
} else if (args.length == 2) {
if (!sender.hasPermission("simplebounty.set"))
return;
if (sender.getName().equalsIgnoreCase(args[0])) {
SimpleBounty.getChat().sendMessage(sender, ChatColor.RED + "You can't put a bounty on yourself!");
return;
}
String uuid = "";
try {
uuid = new UUIDFetcher(Arrays.asList(args[0])).call().get(0).toString();
} catch (Exception e) {
SimpleBounty.getChat().sendMessage(sender, "Unable to fetch the uuid of this player.");
}
int reward = 0;
try {
Integer.valueOf(args[1]);
} catch (Exception e) {
SimpleBounty.getChat().sendMessage(sender, "Invalid reward.");
}
SimpleBounty.getCustomConfig().set("bounties." + uuid + ".reward", reward);
SimpleBounty.getChat().broadcastMessage("A $" + reward + " bounty has been placed on " + args[0] + ".");
return;
} else {
sendUsageMessage(sender);
return;
}
}
}
|
package cronapi.odata.server;
import com.google.gson.*;
import cronapi.*;
import cronapi.database.DataSource;
import cronapi.database.DatabaseQueryManager;
import cronapi.database.HistoryListener;
import cronapi.i18n.Messages;
import cronapi.util.ReflectionUtils;
import org.apache.olingo.odata2.api.ClientCallback;
import org.apache.olingo.odata2.api.edm.EdmEntitySet;
import org.apache.olingo.odata2.api.edm.EdmEntityType;
import org.apache.olingo.odata2.api.edm.EdmProperty;
import org.apache.olingo.odata2.api.uri.UriInfo;
import org.apache.olingo.odata2.api.uri.expression.*;
import org.apache.olingo.odata2.api.uri.info.*;
import org.apache.olingo.odata2.core.edm.provider.EdmEntityTypeImplProv;
import org.apache.olingo.odata2.core.edm.provider.EdmSimplePropertyImplProv;
import org.apache.olingo.odata2.core.uri.UriInfoImpl;
import org.apache.olingo.odata2.jpa.processor.api.ODataJPAQueryExtensionEntityListener;
import org.apache.olingo.odata2.jpa.processor.api.exception.ODataJPARuntimeException;
import org.apache.olingo.odata2.jpa.processor.core.ODataExpressionParser;
import org.apache.olingo.odata2.jpa.processor.core.ODataParameterizedWhereExpressionUtil;
import org.apache.olingo.odata2.jpa.processor.core.access.data.ReflectionUtil;
import org.apache.olingo.odata2.jpa.processor.core.access.data.VirtualClass;
import org.apache.olingo.odata2.jpa.processor.core.model.JPAEdmMappingImpl;
import org.eclipse.persistence.internal.jpa.EntityManagerImpl;
import org.eclipse.persistence.internal.jpa.jpql.HermesParser;
import org.eclipse.persistence.internal.sessions.AbstractSession;
import org.eclipse.persistence.jpa.jpql.parser.*;
import org.eclipse.persistence.queries.DatabaseQuery;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.persistence.EntityManager;
import javax.persistence.Id;
import javax.persistence.Query;
import javax.persistence.TemporalType;
import java.lang.reflect.Field;
import java.sql.Time;
import java.sql.Timestamp;
import java.util.*;
public class QueryExtensionEntityListener extends ODataJPAQueryExtensionEntityListener {
private static final Logger log = LoggerFactory.getLogger(QueryExtensionEntityListener.class);
private BlocklyQuery GETBlocklyQuery;
private String GETFunctionName;
private void findInputParams(Expression expression, List<String> inputs) {
if (expression instanceof InputParameter) {
inputs.add(expression.toString());
}
if (expression.children() != null) {
expression.children().forEach((e) -> {
findInputParams(e, inputs);
});
}
}
private String getBlocklyMethod(UriInfo uriInfo, JsonObject customQuery) {
String restMethod = getRestMehtod(uriInfo);
return getBlocklyMethod(customQuery, restMethod);
}
private String getBlocklyMethod(JsonObject customQuery, String restMethod) {
if (QueryManager.isNull(customQuery.get("entityFullName"))) {
String blocklyMethod = QueryManager.getBlocklyMethod(customQuery, restMethod);
if (!QueryManager.isNull(customQuery.get("baseEntity")) && "default".equals(blocklyMethod)) {
return null;
} else {
return blocklyMethod;
}
}
return null;
}
private String getRestMehtod(UriInfo uriInfo) {
String restMethod = RestClient.getRestClient().getMethod();
if (uriInfo.getFilter() != null || (uriInfo.getKeyPredicates() != null && uriInfo.getKeyPredicates().size() > 0)) {
restMethod = "FILTER";
}
if (uriInfo.isCount()) {
restMethod = "COUNT";
}
return restMethod;
}
public Query getBaseQuery(UriInfo uriInfo, EntityManager em) throws ODataJPARuntimeException {
try {
JsonObject customQuery = null;
try {
customQuery = QueryManager.getQuery(uriInfo.getTargetEntitySet().getName());
} catch (Exception e) {
//No Command
}
EdmEntityType entityType = uriInfo.getTargetEntitySet().getEntityType();
boolean isJPQL = entityType.getName().equals("jpql");
if (customQuery != null || isJPQL) {
if (!isJPQL) {
QueryManager.checkSecurity(customQuery, RestClient.getRestClient().getMethod());
}
boolean isBlockly = !isJPQL && QueryManager.isNull(customQuery.get("entityFullName"));
String restMethod = getRestMehtod(uriInfo);
if (isBlockly) {
String blocklyMethod = getBlocklyMethod(uriInfo, customQuery);
if (blocklyMethod == null) {
return null;
}
}
Query query = null;
String whereExpression = null;
String selectExpression = null;
SelectStatement selectStatement = null;
String jpqlStatement = "";
String alias = null;
String orderBy = null;
List<String> inputs = new LinkedList<>();
boolean hasGroupBy = false;
if (!isBlockly) {
if (isJPQL) {
jpqlStatement = RestClient.getRestClient().getParameter("jpql");
} else {
jpqlStatement = QueryManager.getJPQL(customQuery, false);
}
if (((EdmEntityTypeImplProv) entityType).getEntityType().getJpql() != null) {
jpqlStatement = ((EdmEntityTypeImplProv) entityType).getEntityType().getJpql();
}
JPQLExpression jpqlExpression = new JPQLExpression(
jpqlStatement,
DefaultEclipseLinkJPQLGrammar.instance(),
true
);
findInputParams(jpqlExpression, inputs);
selectStatement = ((SelectStatement) jpqlExpression.getQueryStatement());
String selection = ((SelectClause) selectStatement.getSelectClause()).getSelectExpression().toActualText();
String mainAlias = JPQLParserUtil.getMainAlias(jpqlExpression);
if (!selection.contains(".") && !selection.contains(",")) {
alias = mainAlias;
}
if (uriInfo.rawEntity()) {
ReflectionUtils.setField(selectStatement, "selectClause", null);
if (uriInfo.rawEntity()) {
selectExpression = "SELECT " + mainAlias + " ";
}
jpqlStatement = selectStatement.toString();
}
if (selectStatement.hasOrderByClause()) {
orderBy = selectStatement.getOrderByClause().toString();
ReflectionUtils.setField(selectStatement, "orderByClause", null);
jpqlStatement = selectStatement.toString();
}
if (uriInfo.getOrderBy() != null) {
String orderExpression = ODataExpressionParser.parseToJPAOrderByExpression(uriInfo.getOrderBy(), alias);
orderBy = "ORDER BY " + orderExpression;
}
}
ODataExpressionParser.reInitializePositionalParameters();
Map<String, Map<Integer, Object>> parameterizedExpressionMap = new HashMap<String, Map<Integer, Object>>();
if (uriInfo.getFilter() != null) {
checkFilter(entityType, uriInfo.getFilter());
whereExpression = ODataExpressionParser.parseToJPAWhereExpression(uriInfo.getFilter(), alias);
parameterizedExpressionMap.put(whereExpression, ODataExpressionParser.getPositionalParameters());
ODataParameterizedWhereExpressionUtil.setParameterizedQueryMap(parameterizedExpressionMap);
ODataExpressionParser.reInitializePositionalParameters();
}
if (uriInfo.getKeyPredicates().size() > 0) {
whereExpression = ODataExpressionParser.parseKeyPredicates(uriInfo.getKeyPredicates(), alias);
parameterizedExpressionMap.put(whereExpression, ODataExpressionParser.getPositionalParameters());
ODataParameterizedWhereExpressionUtil.setParameterizedQueryMap(parameterizedExpressionMap);
ODataExpressionParser.reInitializePositionalParameters();
}
String having = null;
String groupBy = null;
if (whereExpression != null) {
String where = null;
if (selectStatement != null && selectStatement.hasWhereClause()) {
where = ((WhereClause) selectStatement.getWhereClause()).getConditionalExpression().toString();
ReflectionUtils.setField(selectStatement, "whereClause", null);
jpqlStatement = selectStatement.toString();
}
if (selectStatement != null && selectStatement.hasGroupByClause()) {
groupBy = ((GroupByClause) selectStatement.getGroupByClause()).toString();
ReflectionUtils.setField(selectStatement, "groupByClause", null);
jpqlStatement = selectStatement.toString();
}
if (selectStatement != null && selectStatement.hasHavingClause()) {
having = ((HavingClause) selectStatement.getHavingClause()).toString();
ReflectionUtils.setField(selectStatement, "havingClause", null);
jpqlStatement = selectStatement.toString();
}
if (where != null && uriInfo.composeWhere()) {
jpqlStatement += " WHERE (" + where + ") AND " + whereExpression;
} else {
jpqlStatement += " WHERE " + whereExpression;
}
if (having != null) {
jpqlStatement += " " + having;
}
if (groupBy != null) {
jpqlStatement += " " + groupBy;
}
}
if (orderBy != null) {
jpqlStatement += " " + orderBy;
}
if (selectExpression != null) {
jpqlStatement = selectExpression + " " + jpqlStatement;
}
Map<String, Map<Integer, Object>> parameterizedMap = ODataParameterizedWhereExpressionUtil.getParameterizedQueryMap();
int maxParam = 0;
if (parameterizedMap != null && parameterizedMap.size() > 0) {
for (Map.Entry<String, Map<Integer, Object>> parameterEntry : parameterizedMap.entrySet()) {
if (jpqlStatement.contains(parameterEntry.getKey())) {
Map<Integer, Object> positionalParameters = parameterEntry.getValue();
for (Map.Entry<Integer, Object> param : positionalParameters.entrySet()) {
if (param.getKey() > maxParam) {
maxParam = param.getKey();
}
}
}
}
}
int i = maxParam;
for (String param : inputs) {
i++;
jpqlStatement = jpqlStatement.replace(param, "?" + i);
}
if (!isBlockly) {
query = em.createQuery(jpqlStatement);
} else {
String type = "select";
if (uriInfo.isCount()) {
type = "count";
}
String function = customQuery.getAsJsonObject("blockly").get("blocklyClass").getAsString() + ":" + customQuery.getAsJsonObject("blockly").get("blocklyMethod").getAsString();
query = new BlocklyQuery(customQuery, restMethod, type, jpqlStatement, (uriInfo.getFilter() != null ? uriInfo.getFilter().getExpressionString() : ""), uriInfo.getTargetEntitySet().getName());
((BlocklyQuery) query).setUriInfo(uriInfo);
if (uriInfo.isCount() && GETFunctionName != null && GETBlocklyQuery != null && GETFunctionName.equalsIgnoreCase(function)) {
if (GETBlocklyQuery.getLastResult() != null && GETBlocklyQuery.getLastResult().getObject() instanceof DataSource) {
long total = ((DataSource) GETBlocklyQuery.getLastResult().getObject()).count();
((BlocklyQuery) query).setLastResult(Var.valueOf(total));
GETFunctionName = null;
GETBlocklyQuery = null;
}
}
if (restMethod.equalsIgnoreCase("GET")) {
GETFunctionName = function;
GETBlocklyQuery = (BlocklyQuery) query;
}
}
if (parameterizedMap != null && parameterizedMap.size() > 0) {
for (Map.Entry<String, Map<Integer, Object>> parameterEntry : parameterizedMap.entrySet()) {
if (jpqlStatement.contains(parameterEntry.getKey())) {
Map<Integer, Object> positionalParameters = parameterEntry.getValue();
for (Map.Entry<Integer, Object> param : positionalParameters.entrySet()) {
if (param.getValue() instanceof Calendar || param.getValue() instanceof Timestamp) {
query.setParameter(param.getKey(), (Calendar) param.getValue(), TemporalType.TIMESTAMP);
} else if (param.getValue() instanceof Time) {
query.setParameter(param.getKey(), (Time) param.getValue(), TemporalType.TIME);
} else {
try {
query.setParameter(param.getKey(), param.getValue());
} catch (Exception e) {
Class clazz = query.getParameter(param.getKey()).getParameterType();
if (clazz != null) {
query.setParameter(param.getKey(), convert(param.getValue(), clazz));
} else {
throw new RuntimeException(e);
}
}
}
}
parameterizedMap.remove(parameterEntry.getKey());
ODataParameterizedWhereExpressionUtil.setJPQLStatement(null);
break;
}
}
}
if (inputs.size() > 0) {
AbstractSession session = (AbstractSession) ((EntityManagerImpl) em.getDelegate()).getActiveSession();
HermesParser parser = new HermesParser();
DatabaseQuery queryParsed = parser.buildQuery(jpqlStatement, session);
List<Class> argsTypes = queryParsed.getArgumentTypes();
List<String> argsNames = queryParsed.getArguments();
i = maxParam;
for (String param : inputs) {
i++;
String strValue = RestClient.getRestClient().getParameter(param.substring(1));
int idx = argsNames.indexOf(String.valueOf(i));
Class type = null;
if (idx != -1) {
type = argsTypes.get(idx);
if (strValue != null) {
Var requestParam = null;
if (strValue.contains("@@") || type.getSimpleName().equals("Object")) {
requestParam = Var.valueOf(Var.deserialize(strValue));
} else {
requestParam = Var.valueOf(strValue);
}
if (param.indexOf("__") > 0) {
Class paramClass = Var.getType(param.substring(1));
type = paramClass;
}
query.setParameter(i, requestParam.getObject(type));
} else {
Map<String, Var> customValues = new LinkedHashMap<>();
customValues.put("entityName", Var.valueOf(uriInfo.getTargetEntitySet().getName()));
query.setParameter(i,
QueryManager.getParameterValue(customQuery, param.substring(1), customValues)
.getObject(type));
}
}
}
}
if (uriInfo.isCount() && !isBlockly) {
query = JPQLParserUtil.count(jpqlStatement, query, em);
}
return query;
}
if (entityType.getMapping() != null && ((JPAEdmMappingImpl) entityType.getMapping()).getJPAType() != null) {
Class clazz = ((JPAEdmMappingImpl) entityType.getMapping()).getJPAType();
QueryManager.checkSecurity(clazz, RestClient.getRestClient().getMethod());
}
} catch (Exception e) {
if (e.getMessage().contains("The state field path")) {
throw ErrorResponse.createException(new RuntimeException(Messages.getString("fieldpath")), RestClient.getRestClient().getMethod());
}
throw ErrorResponse.createException(e, RestClient.getRestClient().getMethod());
}
return null;
}
@Override
public Query getQuery(GetEntitySetUriInfo uriInfo, EntityManager em) throws ODataJPARuntimeException {
return this.getBaseQuery((UriInfo) uriInfo, em);
}
@Override
public Query getQuery(GetEntityCountUriInfo uriInfo, EntityManager em) throws ODataJPARuntimeException {
return this.getBaseQuery((UriInfo) uriInfo, em);
}
@Override
public Query getQuery(GetEntitySetCountUriInfo uriInfo, EntityManager em) throws ODataJPARuntimeException {
return this.getBaseQuery((UriInfo) uriInfo, em);
}
@Override
public Query getQuery(GetEntityUriInfo uriInfo, EntityManager em) throws ODataJPARuntimeException {
return this.getBaseQuery((UriInfo) uriInfo, em);
}
@Override
public Query getQuery(PutMergePatchUriInfo uriInfo, EntityManager em) throws ODataJPARuntimeException {
return this.getBaseQuery((UriInfo) uriInfo, em);
}
@Override
public Query getQuery(DeleteUriInfo uriInfo, EntityManager em) throws ODataJPARuntimeException {
return this.getBaseQuery((UriInfo) uriInfo, em);
}
@Override
public boolean authorizeProperty(EdmEntityType entityType, EdmProperty property) {
JsonObject query = null;
try {
try {
query = QueryManager.getQuery(entityType.getName());
} catch (Exception e) {
//No Command
}
if (query != null) {
return QueryManager.isFieldAuthorized(query, property.getName(), RestClient.getRestClient().getMethod());
}
if (entityType.getMapping() != null && ((JPAEdmMappingImpl) entityType.getMapping()).getJPAType() != null) {
Class clazz = ((JPAEdmMappingImpl) entityType.getMapping()).getJPAType();
return QueryManager.isFieldAuthorized(clazz, property.getName(), RestClient.getRestClient().getMethod());
}
return true;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
public void checkOprAuthorization(final UriInfo uriView) throws ODataJPARuntimeException {
JsonObject query = null;
try {
EdmEntityType entityType = uriView.getTargetEntitySet().getEntityType();
try {
query = QueryManager.getQuery(uriView.getTargetEntitySet().getName());
} catch (Exception e) {
//No Command
}
if (query != null) {
QueryManager.checkSecurity(query, RestClient.getRestClient().getMethod());
} else {
if (entityType.getMapping() != null && ((JPAEdmMappingImpl) entityType.getMapping()).getJPAType() != null) {
Class clazz = ((JPAEdmMappingImpl) entityType.getMapping()).getJPAType();
QueryManager.checkSecurity(clazz, RestClient.getRestClient().getMethod());
}
}
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@Override
public void checkAuthorization(final PostUriInfo postView) throws ODataJPARuntimeException {
this.checkOprAuthorization((UriInfo) postView);
}
@Override
public void checkAuthorization(final PutMergePatchUriInfo putView) throws ODataJPARuntimeException {
this.checkOprAuthorization((UriInfo) putView);
}
@Override
public void checkAuthorization(final DeleteUriInfo deleteView) throws ODataJPARuntimeException {
this.checkOprAuthorization((UriInfo) deleteView);
}
@Override
public void checkEntityGetAuthorization(final EdmEntityType entityType) throws ODataJPARuntimeException {
try {
if (entityType.getMapping() != null && ((JPAEdmMappingImpl) entityType.getMapping()).getJPAType() != null) {
Class clazz = ((JPAEdmMappingImpl) entityType.getMapping()).getJPAType();
QueryManager.checkSecurity(clazz, RestClient.getRestClient().getMethod());
}
} catch (Exception e) {
throw ErrorResponse.createException(e, RestClient.getRestClient().getMethod());
}
}
private Map<String, Object> convertValues(Map<String, Object> defaults, EdmEntityType entityType) throws Exception {
if (defaults != null) {
for (String key : defaults.keySet()) {
Class clazz = ((JPAEdmMappingImpl) ((EdmSimplePropertyImplProv) entityType.getProperty(key)).getMapping()).getOriginaType();
Object value = defaults.get(key);
value = Var.valueOf(value).getObject(clazz);
defaults.put(key, value);
}
}
return defaults;
}
@Override
public Map<String, Object> getDefaultFieldValues(final EdmEntityType entityType, Object data) throws ODataJPARuntimeException {
JsonObject query = null;
try {
try {
query = QueryManager.getQuery(entityType.getName());
} catch (Exception e) {
//No Command
}
if (query != null && RestClient.getRestClient() != null && RestClient.getRestClient().getRequest() != null) {
return convertValues(QueryManager.getDefaultValues(query, data), entityType);
}
} catch (Exception e) {
throw new RuntimeException(e);
}
return null;
}
@Override
public Map<String, Object> getCalcFieldValues(final EdmEntityType entityType, Object data) throws ODataJPARuntimeException {
JsonObject query = null;
try {
try {
query = QueryManager.getQuery(entityType.getName());
} catch (Exception e) {
//No Command
}
if (query != null && RestClient.getRestClient() != null && RestClient.getRestClient().getRequest() != null) {
return convertValues(QueryManager.getCalcFieldValues(query, data), entityType);
}
} catch (Exception e) {
throw new RuntimeException(e);
}
return null;
}
@Override
public void checkFilter(final EdmEntityType entityType, FilterExpression filter) throws ODataJPARuntimeException {
try {
JsonObject query = null;
try {
query = QueryManager.getQuery(entityType.getName());
} catch (Exception e) {
//No Command
}
List<String> filters = new LinkedList<>();
visitExpression(filter, filters);
if (query != null) {
QueryManager.checkFilterSecurity(query, filters);
} else {
Class clazz = ((JPAEdmMappingImpl) entityType.getMapping()).getJPAType();
QueryManager.checkEntityFilterSecurity(clazz, filters);
}
} catch (Exception e) {
throw new RuntimeException(e);
}
}
private void doCheckFilter(BinaryExpression expression, List<String> filters) {
visitExpression(expression.getLeftOperand(), filters);
visitExpression(expression.getRightOperand(), filters);
}
private void doCheckFilter(FilterExpression expression, List<String> filters) {
visitExpression(expression.getExpression(), filters);
}
private void doCheckFilter(PropertyExpression expression, List<String> filters) {
filters.add(expression.getPropertyName());
}
private void doCheckFilter(MethodExpression expression, List<String> filters) {
for (CommonExpression e : expression.getParameters()) {
visitExpression(e, filters);
}
}
private void visitExpression(CommonExpression expression, List<String> filters) {
if (expression instanceof BinaryExpression) {
doCheckFilter((BinaryExpression) expression, filters);
} else if (expression instanceof PropertyExpression) {
doCheckFilter((PropertyExpression) expression, filters);
} else if (expression instanceof FilterExpression) {
doCheckFilter((FilterExpression) expression, filters);
} else if (expression instanceof MethodExpression) {
doCheckFilter((MethodExpression) expression, filters);
}
}
@Override
public List<ClientCallback> getClientCallbacks() {
List<ClientCallback> callbacks = null;
for (ClientCommand command : RestClient.getRestClient().getCommands()) {
if (callbacks == null) {
callbacks = new LinkedList<>();
}
callbacks.add(command.toClientCallback());
}
return callbacks;
}
private void beforeAnyOperation(String type, Object object) {
try {
DatabaseQueryManager logManager = HistoryListener.getAuditLogManager();
if (logManager != null) {
GsonBuilder builder = new GsonBuilder().addSerializationExclusionStrategy(new ExclusionStrategy() {
@Override
public boolean shouldSkipField(FieldAttributes fieldAttributes) {
if (fieldAttributes.getDeclaringClass() == object.getClass() || fieldAttributes.getAnnotation(Id.class) != null) {
return false;
}
return true;
}
@Override
public boolean shouldSkipClass(Class<?> aClass) {
return false;
}
});
builder.registerTypeAdapter(Date.class, HistoryListener.UTC_DATE_ADAPTER);
Gson gson = builder.create();
JsonElement objectJson = gson.toJsonTree(object);
Var auditLog = new Var(new LinkedHashMap<>());
auditLog.set("type", object.getClass().getName());
auditLog.set("command", type);
auditLog.set("category", "DataSource");
auditLog.set("date", new Date());
auditLog.set("objectData", objectJson.toString());
if (RestClient.getRestClient() != null) {
auditLog.set("user", RestClient.getRestClient().getUser() != null ? RestClient.getRestClient().getUser().getUsername() : null);
auditLog.set("host", RestClient.getRestClient().getHost());
auditLog.set("agent", RestClient.getRestClient().getAgent());
}
auditLog.set("server", HistoryListener.CURRENT_IP);
auditLog.set("application", AppConfig.guid());
logManager.insert(auditLog);
}
} catch (Exception e) {
log.error(e.getMessage(), e);
}
}
private List<EdmProperty> findOriginalKeys(EdmEntityType entityType) {
List<EdmProperty> keys = new LinkedList<>();
try {
for (EdmProperty item : entityType.getKeyProperties()) {
EdmSimplePropertyImplProv key = (EdmSimplePropertyImplProv) item;
if (key.getComposite() != null) {
keys.addAll(key.getComposite());
} else {
keys.add(item);
}
}
} catch (Throwable e) {
throw new RuntimeException(e);
}
return keys;
}
@Override
public Object execEvent(final UriInfo infoView, final EdmEntityType entityType, String type, Object data) {
if (infoView != null) {
try {
JsonObject query = null;
if (data != null) {
Utils.processCloudFields(data);
}
try {
query = QueryManager.getQuery(entityType.getName());
} catch (Exception e) {
//No Command
}
if (query != null) {
if (type.startsWith("before")) {
if (!QueryManager.isNull(query.get("audit")) && query.get("audit").getAsJsonPrimitive().getAsBoolean()) {
beforeAnyOperation(type.replace("before", "").toUpperCase(), data);
}
RestClient.getRestClient().setEntity(data);
}
List<Object> keys = new LinkedList<>();
try {
for (EdmProperty key : findOriginalKeys(entityType)) {
keys.add(ReflectionUtil.getter(data, key.getName()));
}
} catch (Exception e) {
e.printStackTrace();
}
RestClient.getRestClient().setKeys(keys);
Var result = QueryManager.executeEvent(query, data, type, keys, entityType.getName());
if (result != null) {
return result.getObject();
}
}
((UriInfoImpl) infoView).setClientCallbacks(getClientCallbacks());
} catch (Exception e) {
throw new RuntimeException(e);
}
}
return null;
}
@Override
public Object processNew(UriInfo infoView) {
try {
final EdmEntitySet oDataEntitySet = infoView.getTargetEntitySet();
final EdmEntityType entityType = oDataEntitySet.getEntityType();
JsonObject query = null;
try {
query = QueryManager.getQuery(entityType.getName());
} catch (Exception e) {
//No Command
}
if (query != null) {
Object jpaEntity = ((JPAEdmMappingImpl) entityType.getMapping()).getJPAType().newInstance();
String jpqlStatement = QueryManager.getJPQL(query, false);
JPQLExpression jpqlExpression = new JPQLExpression(
jpqlStatement,
DefaultEclipseLinkJPQLGrammar.instance(),
true
);
String mainAlias = JPQLParserUtil.getMainAlias(jpqlExpression);
VirtualClass virtualClass = new VirtualClass();
for (String name : entityType.getPropertyNames()) {
EdmSimplePropertyImplProv type = (EdmSimplePropertyImplProv) entityType.getProperty(name);
if (type.getMapping() != null && type.getMapping().getInternalExpression() != null) {
String expression = type.getMapping().getInternalExpression();
String[] parts = expression.split("\\.");
if (parts.length == 2) {
String f = parts[1];
if (parts[0].equals(mainAlias)) {
Field field = ReflectionUtil.getField(jpaEntity, f);
if (field != null) {
field.setAccessible(true);
Object o = field.get(jpaEntity);
virtualClass.set(name, o);
}
}
}
}
}
return virtualClass;
}
} catch (Exception e) {
}
return null;
}
@Override
public Object overridePost(UriInfo infoView, Object entity) {
JsonObject query = null;
try {
final EdmEntitySet oDataEntitySet = infoView.getTargetEntitySet();
final EdmEntityType entityType = oDataEntitySet.getEntityType();
query = QueryManager.getQuery(entityType.getName());
String blocklyMethod = getBlocklyMethod(infoView, query);
if (blocklyMethod == null) {
return null;
}
if (query != null && QueryManager.isNull(query.get("entityFullName"))) {
Var result = QueryManager.executeBlockly(query, getRestMehtod(infoView), Var.valueOf(entity));
if (result != null && !result.isNull()) {
if (!QueryManager.isNull(query.get("baseEntity"))) {
return result.getObject();
} else {
return result;
}
} else {
return entity;
}
}
} catch (Exception e) {
//No Command
}
return null;
}
@Override
public boolean canOverridePut(UriInfo infoView) {
try {
final EdmEntitySet oDataEntitySet = infoView.getTargetEntitySet();
final EdmEntityType entityType = oDataEntitySet.getEntityType();
JsonObject query = QueryManager.getQuery(entityType.getName());
String blocklyMethod = getBlocklyMethod(infoView, query);
if (blocklyMethod == null) {
return false;
}
return query != null && QueryManager.isNull(query.get("entityFullName"));
} catch (Exception e) {
//NoCommand
}
return false;
}
@Override
public boolean canOverrideDelete(UriInfo infoView) {
return canOverridePut(infoView);
}
@Override
public boolean canOverridePost(UriInfo infoView) {
return canOverridePut(infoView);
}
@Override
public Object overridePut(UriInfo infoView, Object entity) {
return overridePost(infoView, entity);
}
@Override
public boolean overrideDelete(UriInfo infoView, Object entity) {
JsonObject query = null;
try {
final EdmEntitySet oDataEntitySet = infoView.getTargetEntitySet();
final EdmEntityType entityType = oDataEntitySet.getEntityType();
query = QueryManager.getQuery(entityType.getName());
String blocklyMethod = getBlocklyMethod(infoView, query);
if (blocklyMethod == null) {
return false;
}
if (query != null && QueryManager.isNull(query.get("entityFullName"))) {
QueryManager.executeBlockly(query, getRestMehtod(infoView), Var.valueOf(entity));
return true;
}
} catch (Exception e) {
//No Command
}
return false;
}
@Override
public Object convert(Object obj, Class clazz) {
return Var.valueOf(obj).getObject(clazz);
}
}
|
package de.endrullis.draggabletabs;
import com.sun.javafx.scene.traversal.Direction;
import javafx.geometry.Orientation;
import javafx.scene.Node;
import javafx.scene.Parent;
import javafx.scene.control.SplitPane;
import javafx.scene.control.Tab;
import javafx.scene.control.TabPane;
import javafx.scene.input.TransferMode;
import javafx.scene.layout.StackPane;
import static de.endrullis.draggabletabs.DraggableTab.isDraggingTab;
import static de.endrullis.draggabletabs.DraggableTabUtils.rearrangeDividers;
/**
* @author Stefan Endrullis (endrullis@iat.uni-leipzig.de)
*/
public class DraggableTabInsertPane extends StackPane {
protected final DraggableTabLayoutExtender draggableTabLayoutExtender;
protected final Direction direction;
public DraggableTabInsertPane(DraggableTabLayoutExtender draggableTabLayoutExtender, Direction direction) {
this.draggableTabLayoutExtender = draggableTabLayoutExtender;
this.direction = direction;
switch (this.direction) {
case UP:
case DOWN:
setMinHeight(DraggableTabLayoutExtender.EXTENDER_SIZE);
setMaxHeight(DraggableTabLayoutExtender.EXTENDER_SIZE);
break;
case LEFT:
case RIGHT:
setMinWidth(DraggableTabLayoutExtender.EXTENDER_SIZE);
setMaxWidth(DraggableTabLayoutExtender.EXTENDER_SIZE);
break;
}
getStyleClass().add("draggableTabInsertPane");
registerListeners();
}
protected void registerListeners() {
setOnDragOver(event -> {
if (isDraggingTab(event.getDragboard())) {
event.acceptTransferModes(TransferMode.MOVE);
event.consume();
}
});
setOnDragDropped(event -> {
if (isDraggingTab(event.getDragboard())) {
final Tab tab = DraggableTab.draggingTab.get();
TabPane oldTabPane = tab.getTabPane();
oldTabPane.getTabs().remove(tab);
DraggableTabLayoutExtender sourceDraggableTabLayoutExtender = new DraggableTabLayoutExtender(new DraggableTabPane(tab));
addComponent(draggableTabLayoutExtender.getParent(), sourceDraggableTabLayoutExtender);
DraggableTabUtils.cleanup(oldTabPane);
DraggableTab.draggingTab.set(null);
event.setDropCompleted(true);
event.consume();
}
});
}
protected void addComponent(Parent parent, Node component) {
if (parent.getParent() instanceof SplitPane) {
SplitPane splitPane = (SplitPane) parent.getParent();
int index = splitPane.getItems().indexOf(draggableTabLayoutExtender);
if (splitPane.getOrientation() == Orientation.VERTICAL) {
switch (direction) {
case UP:
insertInto(splitPane, index, component);
break;
case DOWN:
insertInto(splitPane, index + 1, component);
break;
case LEFT:
splitInto(splitPane, index, component, draggableTabLayoutExtender);
break;
case RIGHT:
splitInto(splitPane, index, draggableTabLayoutExtender, component);
break;
}
}
if (splitPane.getOrientation() == Orientation.HORIZONTAL) {
switch (direction) {
case UP:
splitInto(splitPane, index, component, draggableTabLayoutExtender);
break;
case DOWN:
splitInto(splitPane, index, draggableTabLayoutExtender, component);
break;
case LEFT:
insertInto(splitPane, index, component);
break;
case RIGHT:
insertInto(splitPane, index + 1, component);
break;
}
}
}
}
protected void splitInto(SplitPane parent, int index, Node firstComponent, Node secondComponent) {
parent.getItems().remove(draggableTabLayoutExtender);
SplitPane newSplitPane = new SplitPane();
// change orientation for new SplitPane
newSplitPane.setOrientation(parent.getOrientation() == Orientation.HORIZONTAL ? Orientation.VERTICAL : Orientation.HORIZONTAL);
newSplitPane.getItems().addAll(firstComponent, secondComponent);
insertInto(parent, index, new DraggableTabLayoutExtender(newSplitPane));
}
protected void insertInto(SplitPane parent, int index, Node component) {
parent.getItems().add(index, component);
rearrangeDividers(parent);
}
}
|
package de.retest.recheck.ui.diff;
import static de.retest.recheck.ui.descriptors.ElementUtil.flattenChildElements;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import de.retest.recheck.ui.DefaultValueFinder;
import de.retest.recheck.ui.descriptors.Element;
public class ElementDifferenceFinder {
private final IdentifyingAttributesDifferenceFinder identAttrDiffFinder;
private final AttributesDifferenceFinder attributesDifferenceFinder;
public ElementDifferenceFinder( final DefaultValueFinder defaultValueFinder ) {
identAttrDiffFinder = new IdentifyingAttributesDifferenceFinder();
attributesDifferenceFinder = new AttributesDifferenceFinder( defaultValueFinder );
}
// TODO We can have more performance optimization: a cell can only life in a row, a row only in a table etc.
public Collection<ElementDifference> findChildDifferences( final Element expectedComponent,
final Element actualComponent ) {
final Alignment alignment = Alignment.createAlignment( expectedComponent, actualComponent );
// Recreate original structure for difference, so we can skip if there are too many child differences per comp.
final List<Element> remainingActual = new ArrayList<>( flattenChildElements( actualComponent ) );
final Collection<ElementDifference> result =
createHierarchicalStructure( expectedComponent.getContainedElements(), remainingActual, alignment );
// Add components in actual that are missing in expected.
for ( final Element element : remainingActual ) {
final ElementDifference difference = differenceFor( null, element, remainingActual, alignment );
if ( difference != null ) {
result.add( difference );
}
}
return result;
}
private Collection<ElementDifference> createHierarchicalStructure( final List<Element> expected,
final List<Element> remainingActual, final Alignment alignment ) {
final Collection<ElementDifference> result = new ArrayList<>();
for ( final Element childComp : expected ) {
final Element actualChild = alignment.get( childComp );
final ElementDifference difference = differenceFor( childComp, actualChild, remainingActual, alignment );
if ( difference != null ) {
result.add( difference );
}
remainingActual.remove( actualChild );
}
return result;
}
private ElementDifference differenceFor( final Element expected, final Element actual,
final List<Element> remainingActual, final Alignment alignment ) {
AttributesDifference attributesDifference = null;
LeafDifference identifyingAttributesDifference = null;
final Collection<ElementDifference> childDifferences = new ArrayList<>();
if ( expected == null ) {
identifyingAttributesDifference = InsertedDeletedElementDifference.differenceFor( null, actual );
} else {
if ( actual == null ) {
identifyingAttributesDifference = InsertedDeletedElementDifference.differenceFor( expected, null );
} else {
identifyingAttributesDifference = identAttrDiffFinder
.differenceFor( expected.getIdentifyingAttributes(), actual.getIdentifyingAttributes() );
attributesDifference = attributesDifferenceFinder.differenceFor( expected, actual );
}
childDifferences.addAll(
createHierarchicalStructure( expected.getContainedElements(), remainingActual, alignment ) );
}
if ( identifyingAttributesDifference == null && attributesDifference == null && childDifferences.isEmpty() ) {
return null;
}
return new ElementDifference( expected == null ? actual : expected, attributesDifference,
identifyingAttributesDifference, expected == null ? null : expected.getScreenshot(),
actual == null ? null : actual.getScreenshot(), childDifferences );
}
public ElementDifference differenceFor( final Element expected, final Element actual ) {
AttributesDifference attributesDifference = null;
LeafDifference identifyingAttributesDifference = null;
final Collection<ElementDifference> childDifferences = new ArrayList<>();
if ( expected == null ) {
identifyingAttributesDifference = InsertedDeletedElementDifference.differenceFor( null, actual );
} else {
if ( actual == null ) {
identifyingAttributesDifference = InsertedDeletedElementDifference.differenceFor( expected, null );
} else {
identifyingAttributesDifference = identAttrDiffFinder
.differenceFor( expected.getIdentifyingAttributes(), actual.getIdentifyingAttributes() );
attributesDifference = attributesDifferenceFinder.differenceFor( expected, actual );
childDifferences.addAll( findChildDifferences( expected, actual ) );
}
}
if ( identifyingAttributesDifference == null && attributesDifference == null && childDifferences.isEmpty() ) {
return null;
}
return new ElementDifference( expected == null ? actual : expected, attributesDifference,
identifyingAttributesDifference, expected == null ? null : expected.getScreenshot(),
actual == null ? null : actual.getScreenshot(), childDifferences );
}
public static List<ElementDifference> getNonEmptyDifferences( final List<? extends Difference> differences ) {
final List<ElementDifference> result = new ArrayList<>();
if ( differences != null ) {
for ( final Difference difference : differences ) {
result.addAll( difference.getNonEmptyDifferences() );
}
}
return result;
}
public static List<ElementDifference> getElementDifferences( final List<? extends Difference> differences ) {
final List<ElementDifference> result = new ArrayList<>();
for ( final Difference difference : differences ) {
result.addAll( difference.getElementDifferences() );
}
return result;
}
}
|
package edu.ufl.cise.cnt5106c.messages;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.util.Arrays;
/**
*
* @author Giacomo Benincasa (giacomo@cise.ufl.edu)
*/
public class MessageWithPayload extends Message
{
MessageWithPayload (Type type, byte[] payload) {
super (type, payload);
}
public int getPieceIndex() {
return ByteBuffer.wrap(Arrays.copyOfRange(_payload, 0, 4)).order(ByteOrder.BIG_ENDIAN).getInt();
}
protected static byte[] getPieceIndexBytes (int pieceIdx) {
return ByteBuffer.allocate(4).order(ByteOrder.BIG_ENDIAN).putInt(pieceIdx).array();
}
}
|
package es.jimenezhidalgo.uni.programacion.poker2;
import es.jimenezhidalgo.uni.programacion.poker2.utils.PremiosUtils;
import java.util.ArrayList;
public class Apuesta {
private Jugador mJugador;
private ArrayList<Carta> mMano;
private double mCantidadApostada;
public Apuesta(Jugador jugador, ArrayList<Carta> mano, double cantidadApostada) {
mJugador = jugador;
mMano = mano;
mCantidadApostada = cantidadApostada;
}
public Premio verPremio(){
double premioEconomico = 0;
int combinacion = PremiosUtils.NO_TIENE_NADA;
switch (PremiosUtils.comprobarCombinacionEnMano(mMano)){
case PremiosUtils.TIENE_PAREJA:
combinacion = PremiosUtils.TIENE_PAREJA;
premioEconomico = mCantidadApostada;
break;
case PremiosUtils.TIENE_DOBLE_PAREJA:
combinacion = PremiosUtils.TIENE_DOBLE_PAREJA;
premioEconomico = mCantidadApostada * 2;
break;
case PremiosUtils.TIENE_TRIO:
combinacion = PremiosUtils.TIENE_TRIO;
if (mJugador.getClass().equals(JugadorRegistrado.class)){
premioEconomico = mCantidadApostada * 4;
} else {
premioEconomico = mCantidadApostada * 3;
}
break;
case PremiosUtils.TIENE_COLOR:
combinacion = PremiosUtils.TIENE_COLOR;
if (mJugador.getClass().equals(JugadorRegistrado.class)){
premioEconomico = mCantidadApostada * 6;
} else {
premioEconomico = mCantidadApostada * 4;
}
break;
case PremiosUtils.TIENE_FULL:
combinacion = PremiosUtils.TIENE_FULL;
if (mJugador.getClass().equals(JugadorRegistrado.class)){
premioEconomico = mCantidadApostada * 8;
} else {
premioEconomico = mCantidadApostada * 5;
}
break;
case PremiosUtils.TIENE_POKER:
combinacion = PremiosUtils.TIENE_POKER;
if (mJugador.getClass().equals(JugadorRegistrado.class)){
premioEconomico = mCantidadApostada * 10;
} else {
premioEconomico = mCantidadApostada * 6;
}
break;
default:
combinacion = PremiosUtils.NO_TIENE_NADA;
break;
}
return new Premio(combinacion, premioEconomico);
}
public class Premio {
private int mCombinacion;
private double premioEconomico;
Premio(int combinacion, double premioEconomico) {
mCombinacion = combinacion;
this.premioEconomico = premioEconomico;
}
public int getCombinacion() {
return mCombinacion;
}
public double getPremioEconomico() {
return premioEconomico;
}
}
}
|
package eu.socialsensor.graphdatabases;
import java.io.File;
import java.util.Set;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import com.google.common.base.Stopwatch;
import org.apache.commons.configuration.Configuration;
import org.apache.commons.configuration.MapConfiguration;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.tinkerpop.gremlin.process.traversal.Path;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.__;
import org.apache.tinkerpop.gremlin.structure.Direction;
import org.apache.tinkerpop.gremlin.structure.Edge;
import org.apache.tinkerpop.gremlin.structure.Property;
import org.apache.tinkerpop.gremlin.structure.T;
import org.apache.tinkerpop.gremlin.structure.Vertex;
import com.amazon.titan.diskstorage.dynamodb.BackendDataModel;
import com.amazon.titan.diskstorage.dynamodb.Client;
import com.amazon.titan.diskstorage.dynamodb.Constants;
import com.amazon.titan.diskstorage.dynamodb.DynamoDBSingleRowStore;
import com.amazon.titan.diskstorage.dynamodb.DynamoDBStore;
import com.amazonaws.services.dynamodbv2.AmazonDynamoDB;
import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClient;
import com.amazonaws.services.dynamodbv2.model.CreateTableRequest;
import com.amazonaws.services.dynamodbv2.model.ResourceInUseException;
import com.google.common.collect.Iterators;
import com.thinkaurelius.titan.core.Multiplicity;
import com.thinkaurelius.titan.core.PropertyKey;
import com.thinkaurelius.titan.core.TitanFactory;
import com.thinkaurelius.titan.core.schema.TitanManagement;
import com.thinkaurelius.titan.core.schema.VertexLabelMaker;
import com.thinkaurelius.titan.core.util.TitanCleanup;
import com.thinkaurelius.titan.graphdb.configuration.GraphDatabaseConfiguration;
import com.thinkaurelius.titan.graphdb.database.StandardTitanGraph;
import eu.socialsensor.insert.Insertion;
import eu.socialsensor.insert.TitanMassiveInsertion;
import eu.socialsensor.insert.TitanSingleInsertion;
import eu.socialsensor.main.BenchmarkConfiguration;
import eu.socialsensor.main.GraphDatabaseType;
import eu.socialsensor.utils.Utils;
import jp.classmethod.titan.diskstorage.tupl.TuplStoreManager;
import static org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource.computer;
/**
* Titan graph database implementation
*
* @author sotbeis, sotbeis@iti.gr
* @author Alexander Patrikalakis
*/
public class TitanGraphDatabase extends GraphDatabaseBase<Iterator<Vertex>, Iterator<Edge>, Vertex, Edge>
{
private static final Logger LOG = LogManager.getLogger();
public static final String INSERTION_TIMES_OUTPUT_PATH = "data/titan.insertion.times";
double totalWeight;
private final StandardTitanGraph graph;
private final BenchmarkConfiguration config;
public TitanGraphDatabase(GraphDatabaseType type, BenchmarkConfiguration config, File dbStorageDirectory,
boolean batchLoading)
{
super(type, dbStorageDirectory);
this.config = config;
if (!GraphDatabaseType.TITAN_FLAVORS.contains(type))
{
throw new IllegalArgumentException(String.format("The graph database %s is not a Titan database.",
type == null ? "null" : type.name()));
}
graph = open(batchLoading);
createSchema();
}
private static final StandardTitanGraph buildTitanGraph(GraphDatabaseType type, File dbPath, BenchmarkConfiguration bench,
boolean batchLoading)
{
if (!GraphDatabaseType.TITAN_FLAVORS.contains(type))
{
throw new IllegalArgumentException("must provide a Titan database type but got "
+ (type == null ? "null" : type.name()));
}
if (dbPath == null)
{
throw new IllegalArgumentException("the dbPath must not be null");
}
if (!dbPath.exists() || !dbPath.canWrite() || !dbPath.isDirectory())
{
throw new IllegalArgumentException("db path must exist as a directory and must be writeable");
}
final Configuration conf = new MapConfiguration(new HashMap<String, String>());
final Configuration graph = conf.subset(GraphDatabaseConfiguration.GRAPH_NS.getName());
final Configuration storage = conf.subset(GraphDatabaseConfiguration.STORAGE_NS.getName());
final Configuration ids = conf.subset(GraphDatabaseConfiguration.IDS_NS.getName());
final Configuration metrics = conf.subset(GraphDatabaseConfiguration.METRICS_NS.getName());
final Configuration cluster = conf.subset(GraphDatabaseConfiguration.CLUSTER_NS.getName());
//graph NS config
if(bench.isCustomIds()) {
//TODO(amcp) figure out a way to claim the ids used for this unique-instance-id
graph.addProperty(GraphDatabaseConfiguration.ALLOW_SETTING_VERTEX_ID.getName(), "true");
}
graph.addProperty(GraphDatabaseConfiguration.UNIQUE_INSTANCE_ID.getName(), "DEADBEEF");
//cluster NS config. only two partitions for now
//recall the number of partitions is a FIXED property so user cant override
//initial value stored in system_properties the first time the graph is loaded.
//default is 32
cluster.addProperty(GraphDatabaseConfiguration.CLUSTER_MAX_PARTITIONS.getName(), 2);
// storage NS config. FYI, storage.idauthority-wait-time is 300ms
storage.addProperty(GraphDatabaseConfiguration.STORAGE_BACKEND.getName(), type.getBackend());
storage.addProperty(GraphDatabaseConfiguration.STORAGE_DIRECTORY.getName(), dbPath.getAbsolutePath());
storage.addProperty(GraphDatabaseConfiguration.STORAGE_BATCH.getName(), Boolean.toString(batchLoading));
storage.addProperty(GraphDatabaseConfiguration.STORAGE_TRANSACTIONAL.getName(), Boolean.toString(!batchLoading));
storage.addProperty(GraphDatabaseConfiguration.BUFFER_SIZE.getName(), bench.getTitanBufferSize());
storage.addProperty(GraphDatabaseConfiguration.PAGE_SIZE.getName(), bench.getTitanPageSize());
storage.addProperty(GraphDatabaseConfiguration.PARALLEL_BACKEND_OPS.getName(), "true");
// ids NS config
ids.addProperty(GraphDatabaseConfiguration.IDS_BLOCK_SIZE.getName(), bench.getTitanIdsBlocksize());
boolean anyMetrics = bench.publishGraphiteMetrics() || bench.publishCsvMetrics();
if(anyMetrics) {
metrics.addProperty(GraphDatabaseConfiguration.BASIC_METRICS.getName(), anyMetrics);
metrics.addProperty("prefix", type.getShortname());
}
if(bench.publishGraphiteMetrics()) {
final Configuration graphite = metrics.subset(BenchmarkConfiguration.GRAPHITE);
graphite.addProperty("hostname", bench.getGraphiteHostname());
graphite.addProperty(BenchmarkConfiguration.CSV_INTERVAL, bench.getCsvReportingInterval());
}
if(bench.publishCsvMetrics()) {
final Configuration csv = metrics.subset(GraphDatabaseConfiguration.METRICS_CSV_NS.getName());
csv.addProperty(GraphDatabaseConfiguration.METRICS_CSV_DIR.getName(), bench.getCsvDir().getAbsolutePath());
csv.addProperty(BenchmarkConfiguration.CSV_INTERVAL, bench.getCsvReportingInterval());
}
if (GraphDatabaseType.TITAN_CASSANDRA == type)
{
storage.addProperty(GraphDatabaseConfiguration.STORAGE_HOSTS.getName(),
"localhost");
}
else if (GraphDatabaseType.TITAN_TUPL == type)
{
final Configuration tupl = storage.subset(TuplStoreManager.TUPL_NS.getName());
tupl.addProperty(TuplStoreManager.TUPL_PREFIX.getName(), "tupldb");
tupl.addProperty(TuplStoreManager.TUPL_DIRECT_PAGE_ACCESS.getName(), Boolean.TRUE.toString());
tupl.addProperty(TuplStoreManager.TUPL_MIN_CACHE_SIZE.getName(), Long.toString(bench.getTuplMinCacheSize()));
tupl.addProperty(TuplStoreManager.TUPL_MAP_DATA_FILES.getName(), Boolean.TRUE.toString());
final Configuration checkpoint = tupl.subset(TuplStoreManager.TUPL_CHECKPOINT_NS.getName());
//TODO make this conditioned on running the Massive Insertion Workload
//checkpoint.addProperty(TuplStoreManager.TUPL_CHECKPOINT_SIZE_THRESHOLD.getName(), 0);
}
else if (GraphDatabaseType.TITAN_DYNAMODB == type)
{
final Configuration dynamodb = storage.subset("dynamodb");
final Configuration client = dynamodb.subset(Constants.DYNAMODB_CLIENT_NAMESPACE.getName());
final Configuration credentials = client.subset(Constants.DYNAMODB_CLIENT_CREDENTIALS_NAMESPACE.getName());
if (bench.getDynamodbDataModel() == null)
{
throw new IllegalArgumentException("data model must be set for dynamodb benchmarking");
}
if (GraphDatabaseType.TITAN_DYNAMODB == type && bench.getDynamodbEndpoint() != null
&& !bench.getDynamodbEndpoint().isEmpty())
{
client.addProperty(Constants.DYNAMODB_CLIENT_ENDPOINT.getName(), bench.getDynamodbEndpoint());
client.addProperty(Constants.DYNAMODB_CLIENT_MAX_CONN.getName(), bench.getDynamodbWorkerThreads());
} else {
throw new IllegalArgumentException("require endpoint");
}
if (bench.getDynamodbCredentialsFqClassName() != null
&& !bench.getDynamodbCredentialsFqClassName().isEmpty())
{
credentials.addProperty(Constants.DYNAMODB_CREDENTIALS_CLASS_NAME.getName(), bench.getDynamodbCredentialsFqClassName());
}
if (bench.getDynamodbCredentialsCtorArguments() != null)
{
credentials.addProperty(Constants.DYNAMODB_CREDENTIALS_CONSTRUCTOR_ARGS.getName(),
bench.getDynamodbCredentialsCtorArguments());
}
dynamodb.addProperty(Constants.DYNAMODB_FORCE_CONSISTENT_READ.getName(), bench.dynamodbConsistentRead());
Configuration executor = client.subset(Constants.DYNAMODB_CLIENT_EXECUTOR_NAMESPACE.getName());
executor.addProperty(Constants.DYNAMODB_CLIENT_EXECUTOR_CORE_POOL_SIZE.getName(), bench.getDynamodbWorkerThreads());
executor.addProperty(Constants.DYNAMODB_CLIENT_EXECUTOR_MAX_POOL_SIZE.getName(), bench.getDynamodbWorkerThreads());
executor.addProperty(Constants.DYNAMODB_CLIENT_EXECUTOR_KEEP_ALIVE.getName(), TimeUnit.MINUTES.toMillis(1));
executor.addProperty(Constants.DYNAMODB_CLIENT_EXECUTOR_QUEUE_MAX_LENGTH.getName(), bench.getTitanBufferSize());
final long writeTps = bench.getDynamodbTps();
final long readTps = Math.max(1, bench.dynamodbConsistentRead() ? writeTps : writeTps / 2);
final Configuration stores = dynamodb.subset(Constants.DYNAMODB_STORES_NAMESPACE.getName());
for (String storeName : Constants.REQUIRED_BACKEND_STORES)
{
final Configuration store = stores.subset(storeName);
store.addProperty(Constants.STORES_DATA_MODEL.getName(), bench.getDynamodbDataModel().name());
store.addProperty(Constants.STORES_CAPACITY_READ.getName(), readTps);
store.addProperty(Constants.STORES_CAPACITY_WRITE.getName(), writeTps);
store.addProperty(Constants.STORES_READ_RATE_LIMIT.getName(), readTps);
store.addProperty(Constants.STORES_WRITE_RATE_LIMIT.getName(), writeTps);
}
}
return (StandardTitanGraph) TitanFactory.open(conf);
}
private StandardTitanGraph open(boolean batchLoading)
{
//if using DynamoDB Storage Backend for Titan, prep the tables in parallel
if(type == GraphDatabaseType.TITAN_DYNAMODB && config.getDynamodbPrecreateTables()) {
List<CreateTableRequest> requests = new LinkedList<>();
long wcu = config.getDynamodbTps();
long rcu = Math.max(1, config.dynamodbConsistentRead() ? wcu : (wcu / 2));
for(String store : Constants.REQUIRED_BACKEND_STORES) {
final String tableName = config.getDynamodbTablePrefix() + "_" + store;
if(BackendDataModel.MULTI == config.getDynamodbDataModel()) {
requests.add(DynamoDBStore.createTableRequest(tableName,
rcu, wcu));
} else if(BackendDataModel.SINGLE == config.getDynamodbDataModel()) {
requests.add(DynamoDBSingleRowStore.createTableRequest(tableName, rcu, wcu));
}
}
//TODO is this autocloseable?
final AmazonDynamoDB client =
new AmazonDynamoDBClient(Client.createAWSCredentialsProvider(config.getDynamodbCredentialsFqClassName(),
config.getDynamodbCredentialsCtorArguments() == null ? null : config.getDynamodbCredentialsCtorArguments().split(",")));
client.setEndpoint(config.getDynamodbEndpoint());
for(CreateTableRequest request : requests) {
try {
client.createTable(request);
} catch(ResourceInUseException ignore) {
//already created, good
}
}
client.shutdown();
}
return buildTitanGraph(type, dbStorageDirectory, config, batchLoading);
}
@Override
public void massiveModeLoading(File dataPath)
{
Insertion titanMassiveInsertion = TitanMassiveInsertion.create(graph, type, config.isCustomIds());
titanMassiveInsertion.createGraph(dataPath, 0 /* scenarioNumber */);
//TODO(amcp) figure out a way to claim the ids used for this unique-instance-id
}
@Override
public void singleModeLoading(File dataPath, File resultsPath, int scenarioNumber)
{
Insertion titanSingleInsertion = new TitanSingleInsertion(this.graph, type, resultsPath);
titanSingleInsertion.createGraph(dataPath, scenarioNumber);
}
@Override
public void shutdown()
{
graph.close();
}
@Override
public void delete()
{
shutdown();
TitanCleanup.clear(graph);
Utils.deleteRecursively(dbStorageDirectory);
}
@Override
public void shutdownMassiveGraph()
{
shutdown();
}
@Override
public void shortestPath(final Vertex fromNode, Integer targetNode)
{
final GraphTraversalSource g = graph.traversal();
final Stopwatch watch = Stopwatch.createStarted();
// repeat the contained traversal
// map from this vertex to inV on SIMILAR edges without looping
// until you map to the target toNode and the path is six vertices long or less
// only return one path
//g.V().has("nodeId", 775).repeat(out('similar').simplePath()).until(has('nodeId', 990).and().filter {it.path().size() <= 5}).limit(1).path().by('nodeId')
GraphTraversal<?, Path> t =
g.V().has(NODE_ID, fromNode.<Integer>value(NODE_ID))
.repeat(
__.out(SIMILAR)
.simplePath())
.until(
__.has(NODE_ID, targetNode)
.and(
__.filter(it -> {
//when the size of the path in the traverser object is six, that means this traverser made 4 hops from the
//fromNode, a total of 5 vertices
return it.path().size() <= 5;
}))
)
.limit(1)
.path();
t.tryNext()
.ifPresent( it -> {
final int pathSize = it.size();
final long elapsed = watch.elapsed(TimeUnit.MILLISECONDS);
watch.stop();
if(elapsed > 200) { //threshold for debugging
LOG.info("from @ " + fromNode.value(NODE_ID) +
" to @ " + targetNode.toString() +
" took " + elapsed + " ms, " + pathSize + ": " + it.toString());
}
});
}
@Override
public int getNodeCount()
{
final GraphTraversalSource g = graph.traversal();
final long nodeCount = g.V().count().toList().get(0);
return (int) nodeCount;
}
@Override
public Set<Integer> getNeighborsIds(int nodeId)
{
final Vertex vertex = getVertex(nodeId);
Set<Integer> neighbors = new HashSet<Integer>();
Iterator<Vertex> iter = vertex.vertices(Direction.OUT, SIMILAR);
while (iter.hasNext())
{
Integer neighborId = Integer.valueOf(iter.next().property(NODE_ID).value().toString());
neighbors.add(neighborId);
}
return neighbors;
}
@Override
public double getNodeWeight(int nodeId)
{
Vertex vertex = getVertex(nodeId);
double weight = getNodeOutDegree(vertex);
return weight;
}
public double getNodeInDegree(Vertex vertex)
{
return (double) Iterators.size(vertex.edges(Direction.IN, SIMILAR));
}
public double getNodeOutDegree(Vertex vertex)
{
return (double) Iterators.size(vertex.edges(Direction.OUT, SIMILAR));
}
@Override
public void initCommunityProperty()
{
int communityCounter = 0;
for (Vertex v : graph.traversal().V(T.label, NODE_LABEL).toList())
{
v.property(NODE_COMMUNITY, communityCounter);
v.property(COMMUNITY, communityCounter);
communityCounter++;
}
}
@Override
public Set<Integer> getCommunitiesConnectedToNodeCommunities(int nodeCommunities)
{
Set<Integer> communities = new HashSet<Integer>();
final GraphTraversalSource g = graph.traversal();
for (Property<?> p : g.V().has(NODE_COMMUNITY, nodeCommunities).out(SIMILAR).properties(COMMUNITY).toSet())
{
communities.add((Integer) p.value());
}
return communities;
}
@Override
public Set<Integer> getNodesFromCommunity(int community)
{
final GraphTraversalSource g = graph.traversal();
Set<Integer> nodes = new HashSet<Integer>();
for (Vertex v : g.V().has(COMMUNITY, community).toList())
{
Integer nodeId = (Integer) v.property(NODE_ID).value();
nodes.add(nodeId);
}
return nodes;
}
@Override
public Set<Integer> getNodesFromNodeCommunity(int nodeCommunity)
{
Set<Integer> nodes = new HashSet<Integer>();
final GraphTraversalSource g = graph.traversal();
for (Property<?> property : g.V().has(NODE_COMMUNITY, nodeCommunity).properties(NODE_ID).toList())
{
nodes.add((Integer) property.value());
}
return nodes;
}
@Override
public double getEdgesInsideCommunity(int vertexCommunity, int communityVertices)
{
double edges = 0;
Set<Vertex> comVertices = graph.traversal().V().has(COMMUNITY, communityVertices).toSet();
for (Vertex vertex : graph.traversal().V().has(NODE_COMMUNITY, vertexCommunity).toList())
{
Iterator<Vertex> it = vertex.vertices(Direction.OUT, SIMILAR);
for (Vertex v; it.hasNext();)
{
v = it.next();
if(comVertices.contains(v)) {
edges++;
}
}
}
return edges;
}
@Override
public double getCommunityWeight(int community)
{
double communityWeight = 0;
final List<Vertex> list = graph.traversal().V().has(COMMUNITY, community).toList();
if (list.size() <= 1) {
return communityWeight;
}
for (Vertex vertex : list)
{
communityWeight += getNodeOutDegree(vertex);
}
return communityWeight;
}
@Override
public double getNodeCommunityWeight(int nodeCommunity)
{
double nodeCommunityWeight = 0;
for (Vertex vertex : graph.traversal().V().has(NODE_COMMUNITY, nodeCommunity).toList())
{
nodeCommunityWeight += getNodeOutDegree(vertex);
}
return nodeCommunityWeight;
}
@Override
public void moveNode(int nodeCommunity, int toCommunity)
{
for (Vertex vertex : graph.traversal().V().has(NODE_COMMUNITY, nodeCommunity).toList())
{
vertex.property(COMMUNITY, toCommunity);
}
}
@Override
public double getGraphWeightSum()
{
final Iterator<Edge> edges = graph.edges();
return (double) Iterators.size(edges);
}
@Override
public int reInitializeCommunities()
{
Map<Integer, Integer> initCommunities = new HashMap<Integer, Integer>();
int communityCounter = 0;
Iterator<Vertex> it = graph.vertices();
for (Vertex v; it.hasNext();)
{
v = it.next();
int communityId = (Integer) v.property(COMMUNITY).value();
if (!initCommunities.containsKey(communityId))
{
initCommunities.put(communityId, communityCounter);
communityCounter++;
}
int newCommunityId = initCommunities.get(communityId);
v.property(COMMUNITY, newCommunityId);
v.property(NODE_COMMUNITY, newCommunityId);
}
return communityCounter;
}
@Override
public int getCommunity(int nodeCommunity)
{
Vertex vertex = graph.traversal().V().has(NODE_COMMUNITY, nodeCommunity).next();
int community = (Integer) vertex.property(COMMUNITY).value();
return community;
}
@Override
public int getCommunityFromNode(int nodeId)
{
Vertex vertex = getVertex(nodeId);
return (Integer) vertex.property(COMMUNITY).value();
}
@Override
public int getCommunitySize(int community)
{
Set<Integer> nodeCommunities = new HashSet<Integer>();
for (Vertex v : graph.traversal().V().has(COMMUNITY, community).toList())
{
int nodeCommunity = (Integer) v.property(NODE_COMMUNITY).value();
if (!nodeCommunities.contains(nodeCommunity))
{
nodeCommunities.add(nodeCommunity);
}
}
return nodeCommunities.size();
}
@Override
public Map<Integer, List<Integer>> mapCommunities(int numberOfCommunities)
{
Map<Integer, List<Integer>> communities = new HashMap<Integer, List<Integer>>();
for (int i = 0; i < numberOfCommunities; i++)
{
GraphTraversal<Vertex, Vertex> t = graph.traversal().V().has(COMMUNITY, i);
List<Integer> vertices = new ArrayList<Integer>();
while (t.hasNext())
{
Integer nodeId = (Integer) t.next().property(NODE_ID).value();
vertices.add(nodeId);
}
communities.put(i, vertices);
}
return communities;
}
private void createSchema()
{
final TitanManagement mgmt = graph.openManagement();
if(!mgmt.containsVertexLabel(NODE_LABEL)) {
final VertexLabelMaker maker = mgmt.makeVertexLabel(NODE_LABEL);
maker.make();
}
if (null == mgmt.getGraphIndex(NODE_ID))
{
final PropertyKey key = mgmt.makePropertyKey(NODE_ID).dataType(Integer.class).make();
mgmt.buildIndex(NODE_ID, Vertex.class).addKey(key).unique().buildCompositeIndex();
}
if (null == mgmt.getGraphIndex(COMMUNITY))
{
final PropertyKey key = mgmt.makePropertyKey(COMMUNITY).dataType(Integer.class).make();
mgmt.buildIndex(COMMUNITY, Vertex.class).addKey(key).buildCompositeIndex();
}
if (null == mgmt.getGraphIndex(NODE_COMMUNITY))
{
final PropertyKey key = mgmt.makePropertyKey(NODE_COMMUNITY).dataType(Integer.class).make();
mgmt.buildIndex(NODE_COMMUNITY, Vertex.class).addKey(key).buildCompositeIndex();
}
if (mgmt.getEdgeLabel(SIMILAR) == null)
{
mgmt.makeEdgeLabel(SIMILAR).multiplicity(Multiplicity.MULTI).directed().make();
}
mgmt.commit();
graph.tx().commit();
}
@Override
public Iterator<Vertex> getVertexIterator()
{
return graph.traversal().V().hasLabel(NODE_LABEL).toStream().iterator();
}
@Override
public Iterator<Edge> getNeighborsOfVertex(Vertex v)
{
return v.edges(Direction.BOTH, SIMILAR);
}
@Override
public void cleanupVertexIterator(Iterator<Vertex> it)
{
return; // NOOP - do nothing
}
@Override
public Vertex getOtherVertexFromEdge(Edge edge, Vertex oneVertex)
{
return edge.inVertex().equals(oneVertex) ? edge.outVertex() : edge.inVertex();
}
@Override
public Iterator<Edge> getAllEdges()
{
return graph.edges();
}
@Override
public Vertex getSrcVertexFromEdge(Edge edge)
{
return edge.outVertex();
}
@Override
public Vertex getDestVertexFromEdge(Edge edge)
{
return edge.inVertex();
}
@Override
public boolean edgeIteratorHasNext(Iterator<Edge> it)
{
return it.hasNext();
}
@Override
public Edge nextEdge(Iterator<Edge> it)
{
return it.next();
}
@Override
public void cleanupEdgeIterator(Iterator<Edge> it)
{
// NOOP
}
@Override
public boolean vertexIteratorHasNext(Iterator<Vertex> it)
{
return it.hasNext();
}
@Override
public Vertex nextVertex(Iterator<Vertex> it)
{
return it.next();
}
@Override
public Vertex getVertex(Integer i)
{
final GraphTraversalSource g = graph.traversal();
final Vertex vertex = g.V().has(NODE_ID, i).next();
return vertex;
}
}
|
package greycat.samples.Battleground;
import greycat.*;
import greycat.internal.task.math.MathExpressionEngine;
import greycat.internal.task.math.CoreMathExpressionEngine;
import static greycat.Tasks.*;
import static greycat.internal.task.CoreActions.*;
public class BattlegroundOntology {
public static void main(String[] args) {
//Create a minimal graph with the default configuration
Graph g = new GraphBuilder().build();
//Connect the graph
g.connect((Boolean isConnected) -> {
//Display that the graph database is connected!
System.out.println("Connected : " + isConnected);
//TRYING HERE !!!
newTask()
.loop("1","10",
newTask()
///Trying to math i
/*.thenDo(new ActionFunction() {
@Override
public void eval(TaskContext taskContext) {
MathExpressionEngine engine = CoreMathExpressionEngine.parse("5*4");
double res = engine.eval(null,null,null);
taskContext.continueTask();
}
})*/
//.then(inject(10))
//.then(defineAsVar("it"))
//.then(print("{{it}}"))
//.inject("{{=4*i}}")
.thenDo(new ActionFunction() {
@Override
public void eval(TaskContext ctx) {
ctx.continueWith(ctx.wrap(ctx.template("{{=4*i}}")).clone());
}
})
//.log("{{result}}")
// .then(inject(CoreMathExpressionEngine.parse("4*{{i}}").eval(null,null,null)))
.then(defineAsVar("res"))
.then(println("{{res}}"))
.then(createNode())
.then(setAttribute("name",Type.STRING,"node_{{i}}"))
.then(setAttribute("type",Type.STRING,"Tank"))
.then(setAttribute("power",Type.DOUBLE,"{{res}}"))
.then(travelInTime("0"))
.then(println("{{result}}")))
.execute(g,null);
newTask()
.loop("1","14",
newTask()
.then(createNode())
.then(setAttribute("name",Type.STRING,"node_{{i}}"))
.then(setAttribute("type",Type.STRING,"Target"))
.then(setAttribute("resilience",Type.DOUBLE,"{{i}}"))
.then(travelInTime("0"))
.then(println("{{result}}")))
.execute(g,null);
/*
loop("0", "3",
.newNode()
.setProperty("name", Type.STRING, "node_{{i}}")
.print("{{result}}")
).execute(g,null);
/////
Node sensor0 = g.newNode(0, 0); //create a new node for world 0 and time 0
sensor0.set("sensorId", Type.INT, 12); //set the id attribute as an integer
sensor0.set("name",Type.STRING, "sensor0"); //set the name attribute as a string
//Display the first node we created
System.out.println(sensor0); //print {"world":0,"time":0,"id":1,"sensorId":12,"name":"sensor0"}
Node room0 = g.newNode(0, 0); //create new node for world 0 and time 0
room0.set("name",Type.STRING, "room0"); //set the name attribute
room0.addToRelation("sensors", sensor0); //add the sensor0 to the relation sensors of room0
//Let's display the room0 node to see what's inside
System.out.println(room0); //print {"world":0,"time":0,"id":2,"name":"room0","sensors":[1]}
//iterate over the saved sensors relation from room0
room0.relation("sensors", (Node[] sensors) -> {
System.out.println("Relationship Sensors:");
for (Node sensor : sensors) {
System.out.println("\t" + sensor.toString());
}
//Disconnect the database
g.disconnect(result -> {
System.out.println("GoodBye!");
});
}); */
g.disconnect(result -> {
System.out.println("GoodBye!");
});
});
}
}
|
package io.github.thatsmusic99.headsplus.storage;
import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
import io.github.thatsmusic99.headsplus.HeadsPlus;
import org.bukkit.OfflinePlayer;
import org.json.simple.JSONObject;
import org.json.simple.parser.JSONParser;
import org.json.simple.parser.ParseException;
import java.io.*;
public interface JSONFile {
String getName();
default void create() throws IOException {
File f = new File(HeadsPlus.getInstance().getDataFolder() + File.separator + "storage" + File.separator + getName() + ".json");
if (!f.exists()) {
new File(HeadsPlus.getInstance().getDataFolder() + File.separator + "storage").mkdirs();
f.createNewFile();
JSONObject o = new JSONObject();
Gson gson = new GsonBuilder().disableHtmlEscaping().setPrettyPrinting().create(); // Allows the file to look pretty
String s = gson.toJson(o); // Convert the JSONObject to a string
OutputStreamWriter fw = new OutputStreamWriter(new FileOutputStream(f)); // This is used to write the JSON to the file
try {
fw.write(s.replace("\u0026", "&")); // Then write it
} finally {
fw.flush(); // Before flushing and closing it!
fw.close();
}
}
}
default void read() throws IOException {
File f = new File(HeadsPlus.getInstance().getDataFolder() + File.separator + "storage" + File.separator + getName() + ".json");
try {
setJSON((JSONObject) new JSONParser().parse(new InputStreamReader(new FileInputStream(f))));
} catch (ParseException e) {
setJSON(new JSONObject());
}
}
void writeData(OfflinePlayer p, Object... values);
default void save() throws IOException {
File f = new File(HeadsPlus.getInstance().getDataFolder() + File.separator + "storage");
if (!f.exists()) {
f.mkdirs();
}
File jsonfile = new File(f + File.separator, getName() + ".json");
if (jsonfile.exists()) {
PrintWriter writer = new PrintWriter(jsonfile);
writer.print("");
writer.close();
}
Gson gson = new GsonBuilder().disableHtmlEscaping().setPrettyPrinting().create();
String s = gson.toJson(getJSON());
OutputStreamWriter fw = new OutputStreamWriter(new FileOutputStream(jsonfile));
try {
fw.write(s.replace("\u0026", "&"));
} finally {
fw.flush();
fw.close();
}
}
JSONObject getJSON();
Object getData(Object key);
void setJSON(JSONObject s);
}
|
package io.unicall.bgyun.controller;
import io.unicall.bgyun.entity.BgTYunOrderEntity;
import io.unicall.bgyun.entity.BgTYunProductEntity;
import io.unicall.bgyun.service.BgTYunOrderService;
import io.unicall.bgyun.service.BgTYunProductService;
import io.unicall.utils.PageUtils;
import io.unicall.utils.Query;
import io.unicall.utils.R;
import io.unicall.utils.annotation.SysLog;
import io.yun.utils.FileUtils;
import org.apache.ibatis.annotations.Param;
import org.apache.shiro.authz.annotation.RequiresPermissions;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.*;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
*
*
* @author maxellen
* @email bq.zhu@unicall.net.cn
* @date 2017-06-30 14:40:55
*/
@RestController
@RequestMapping("bgyun/order")
public class BgTYunOrderController {
@Autowired
private BgTYunOrderService bgTYunOrderService;
@Autowired
private FileUtils fileUtils;
// page,orderNo,Idlist
@RequestMapping("/list")
public R list(@RequestParam Map<String, Object> params){
Query query = new Query(params);
List<Object> l = bgTYunOrderService.queryList(query);
int total = bgTYunOrderService.queryTotal(query);
PageUtils pageUtil = new PageUtils(l, total, query.getLimit(), query.getPage());
return R.ok().put("page", pageUtil);
}
//orderNo
@RequestMapping("/orderByOrderNo/{orderNo}")
public R getOrderByOrderNo(@PathVariable("orderNo") String orderNo){
return R.ok().put("order",bgTYunOrderService.queryByOrderNo(orderNo));
}
@RequestMapping("/orderById")
public R getOrderById(@RequestParam String id){
return R.ok().put("order",bgTYunOrderService.queryObject(id));
}
//orderNo
@RequestMapping("/updateDeliverStatus/{orderNo}")
public R updateDeliverStatus(@PathVariable("orderNo") String orderNo){
bgTYunOrderService.updateDeliverStatus(orderNo);
return R.ok();
}
@RequestMapping(value = "test")
public String test(){
return "test";
}
}
|
package li.strolch.persistence.impl;
import java.io.File;
import java.util.Properties;
import li.strolch.model.Order;
import li.strolch.model.Resource;
import li.strolch.model.Tags;
import li.strolch.persistence.api.OrderDao;
import li.strolch.persistence.api.ResourceDao;
import li.strolch.persistence.api.StrolchPersistenceHandler;
import li.strolch.persistence.api.StrolchTransaction;
import li.strolch.persistence.impl.model.OrderContextFactory;
import li.strolch.persistence.impl.model.ResourceContextFactory;
import li.strolch.runtime.component.StrolchComponent;
import li.strolch.runtime.configuration.ComponentConfiguration;
import ch.eitchnet.xmlpers.api.IoMode;
import ch.eitchnet.xmlpers.api.PersistenceConstants;
import ch.eitchnet.xmlpers.api.PersistenceManager;
import ch.eitchnet.xmlpers.api.PersistenceManagerLoader;
import ch.eitchnet.xmlpers.api.PersistenceTransaction;
/**
* @author Robert von Burg <eitch@eitchnet.ch>
*
*/
public class XmlPersistenceHandler extends StrolchComponent implements StrolchPersistenceHandler {
public static final String DB_STORE_PATH = "dbStore/"; //$NON-NLS-1$
private PersistenceManager persistenceManager;
public XmlPersistenceHandler() {
super(StrolchPersistenceHandler.class.getName());
}
@Override
public void initialize(ComponentConfiguration componentConfiguration) {
File basePathF = componentConfiguration.getRuntimeConfiguration().getRootPath();
File dbStorePathF = new File(basePathF, DB_STORE_PATH);
Properties properties = new Properties();
properties.setProperty(PersistenceConstants.PROP_VERBOSE, "true"); //$NON-NLS-1$
properties.setProperty(PersistenceConstants.PROP_XML_IO_MOD, IoMode.DOM.name());
properties.setProperty(PersistenceConstants.PROP_BASEPATH, dbStorePathF.getAbsolutePath());
this.persistenceManager = PersistenceManagerLoader.load(properties);
this.persistenceManager.getCtxFactory().registerPersistenceContextFactory(Resource.class, Tags.RESOURCE,
new ResourceContextFactory());
this.persistenceManager.getCtxFactory().registerPersistenceContextFactory(Order.class, Tags.ORDER,
new OrderContextFactory());
}
public StrolchTransaction openTx() {
return openTx(PersistenceManager.DEFAULT_REALM);
}
@SuppressWarnings("resource")
// caller must close
public StrolchTransaction openTx(String realm) {
PersistenceTransaction tx = this.persistenceManager.openTx(realm);
XmlStrolchTransaction strolchTx = new XmlStrolchTransaction(tx);
return strolchTx;
}
@Override
public OrderDao getOrderDao(StrolchTransaction tx) {
return new XmlOrderDao(tx);
}
@Override
public ResourceDao getResourceDao(StrolchTransaction tx) {
return new XmlResourceDao(tx);
}
}
|
package mx.infotec.dads.sekc.config;
import mx.infotec.dads.sekc.security.*;
import mx.infotec.dads.sekc.security.jwt.*;
import static mx.infotec.dads.sekc.web.rest.util.ApiConstant.API_PATH;
import io.github.jhipster.security.*;
import org.springframework.beans.factory.BeanInitializationException;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.http.HttpMethod;
import org.springframework.security.config.annotation.authentication.builders.AuthenticationManagerBuilder;
import org.springframework.security.config.annotation.method.configuration.EnableGlobalMethodSecurity;
import org.springframework.security.config.annotation.web.builders.HttpSecurity;
import org.springframework.security.config.annotation.web.builders.WebSecurity;
import org.springframework.security.config.annotation.web.configuration.EnableWebSecurity;
import org.springframework.security.config.annotation.web.configuration.WebSecurityConfigurerAdapter;
import org.springframework.security.config.http.SessionCreationPolicy;
import org.springframework.security.core.session.SessionRegistry;
import org.springframework.security.core.userdetails.UserDetailsService;
import org.springframework.security.crypto.bcrypt.BCryptPasswordEncoder;
import org.springframework.security.crypto.password.PasswordEncoder;
import org.springframework.security.data.repository.query.SecurityEvaluationContextExtension;
import org.springframework.security.web.authentication.UsernamePasswordAuthenticationFilter;
import org.springframework.web.filter.CorsFilter;
import javax.annotation.PostConstruct;
@Configuration
@EnableWebSecurity
@EnableGlobalMethodSecurity(prePostEnabled = true, securedEnabled = true)
public class SecurityConfiguration extends WebSecurityConfigurerAdapter {
private final AuthenticationManagerBuilder authenticationManagerBuilder;
private final UserDetailsService userDetailsService;
private final TokenProvider tokenProvider;
private final SessionRegistry sessionRegistry;
private final CorsFilter corsFilter;
public SecurityConfiguration(AuthenticationManagerBuilder authenticationManagerBuilder, UserDetailsService userDetailsService,
TokenProvider tokenProvider, SessionRegistry sessionRegistry,
CorsFilter corsFilter) {
this.authenticationManagerBuilder = authenticationManagerBuilder;
this.userDetailsService = userDetailsService;
this.tokenProvider = tokenProvider;
this.sessionRegistry = sessionRegistry;
this.corsFilter = corsFilter;
}
@PostConstruct
public void init() {
try {
authenticationManagerBuilder
.userDetailsService(userDetailsService)
.passwordEncoder(passwordEncoder());
} catch (Exception e) {
throw new BeanInitializationException("Security configuration failed", e);
}
}
@Bean
public Http401UnauthorizedEntryPoint http401UnauthorizedEntryPoint() {
return new Http401UnauthorizedEntryPoint();
}
@Bean
public PasswordEncoder passwordEncoder() {
return new BCryptPasswordEncoder();
}
@Override
public void configure(WebSecurity web) throws Exception {
web.ignoring()
.antMatchers("/app*.{js,html}")
|
package net.atos.entng.forum.events;
import static net.atos.entng.forum.Forum.CATEGORY_COLLECTION;
import static net.atos.entng.forum.Forum.SUBJECT_COLLECTION;
import static net.atos.entng.forum.Forum.MANAGE_RIGHT_ACTION;
import com.mongodb.BasicDBObject;
import com.mongodb.DBObject;
import com.mongodb.QueryBuilder;
import fr.wseduc.mongodb.MongoDb;
import fr.wseduc.mongodb.MongoQueryBuilder;
import fr.wseduc.mongodb.MongoUpdateBuilder;
import fr.wseduc.webutils.Either;
import io.vertx.core.Vertx;
import io.vertx.core.eventbus.Message;
import net.atos.entng.forum.Forum;
import org.entcore.common.mongodb.MongoDbConf;
import org.entcore.common.mongodb.MongoDbResult;
import org.entcore.common.service.impl.MongoDbRepositoryEvents;
import org.entcore.common.user.RepositoryEvents;
import io.vertx.core.Handler;
import io.vertx.core.json.JsonArray;
import io.vertx.core.json.JsonObject;
import io.vertx.core.logging.Logger;
import io.vertx.core.logging.LoggerFactory;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.stream.Collectors;
public class ForumRepositoryEvents extends MongoDbRepositoryEvents {
private static final Logger log = LoggerFactory.getLogger(ForumRepositoryEvents.class);
private final MongoDb mongo = MongoDb.getInstance();
public ForumRepositoryEvents(Vertx vertx) {
super(vertx);
}
@Override
public void exportResources(String exportId, String userId, JsonArray g, String exportPath, String locale,
String host, Handler<Boolean> handler)
{
QueryBuilder findByOwner = QueryBuilder.start("owner.userId").is(userId);
QueryBuilder findByShared = QueryBuilder.start().or(
QueryBuilder.start("shared.userId").is(userId).get(),
QueryBuilder.start("shared.groupId").in(g).get()
);
QueryBuilder findByAuthorOrOwnerOrShared = QueryBuilder.start().or(findByOwner.get(),findByShared.get());
final JsonObject query = MongoQueryBuilder.build(findByAuthorOrOwnerOrShared);
final AtomicBoolean exported = new AtomicBoolean(false);
mongo.find(Forum.CATEGORY_COLLECTION, query, new Handler<Message<JsonObject>>()
{
@Override
public void handle(Message<JsonObject> event)
{
JsonArray results = event.body().getJsonArray("results");
if ("ok".equals(event.body().getString("status")) && results != null)
{
results.forEach(elem ->
{
JsonObject cat = ((JsonObject) elem);
cat.put("name", "cat_" + cat.getString("name"));
});
final Set<String> ids = results.stream().map(res -> ((JsonObject)res).getString("_id")).collect(Collectors.toSet());
QueryBuilder findByCategoryId = QueryBuilder.start("category").in(ids);
JsonObject query2 = MongoQueryBuilder.build(findByCategoryId);
mongo.find(Forum.SUBJECT_COLLECTION, query2, new Handler<Message<JsonObject>>()
{
@Override
public void handle(Message<JsonObject> event2)
{
JsonArray results2 = event2.body().getJsonArray("results");
if ("ok".equals(event2.body().getString("status")) && results2 != null)
{
results2.forEach(elem ->
{
JsonObject cat = ((JsonObject) elem);
cat.put("title", "sub_" + cat.getString("title"));
});
createExportDirectory(exportPath, locale, new Handler<String>()
{
@Override
public void handle(String path)
{
if (path != null)
{
exportDocumentsDependancies(results.addAll(results2), path, new Handler<Boolean>()
{
@Override
public void handle(Boolean bool)
{
if (bool)
{
exportFiles(results, path, new HashSet<String>(), exported, handler);
}
else
{
// Should never happen, export doesn't fail if docs export fail.
handler.handle(exported.get());
}
}
});
}
else
{
handler.handle(exported.get());
}
}
});
}
else
{
log.error(title + " : Could not proceed query " + query2.encode(), event2.body().getString("message"));
handler.handle(exported.get());
}
}
});
}
else
{
log.error(title + " : Could not proceed query " + query.encode(), event.body().getString("message"));
handler.handle(exported.get());
}
}
});
}
@Override
public void deleteGroups(JsonArray groups) {
if(groups == null || groups.size() == 0) {
log.warn("[ForumRepositoryEvents][deleteGroups] JsonArray groups is null or empty");
return;
}
final String [] groupIds = new String[groups.size()];
for (int i = 0; i < groups.size(); i++) {
JsonObject j = groups.getJsonObject(i);
groupIds[i] = j.getString("group");
}
final JsonObject matcher = MongoQueryBuilder.build(QueryBuilder.start("shared.groupId").in(groupIds));
MongoUpdateBuilder modifier = new MongoUpdateBuilder();
modifier.pull("shared", MongoQueryBuilder.build(QueryBuilder.start("groupId").in(groupIds)));
// remove all the shares with groups
mongo.update(CATEGORY_COLLECTION, matcher, modifier.build(), false, true, MongoDbResult.validActionResultHandler(new Handler<Either<String,JsonObject>>() {
@Override
public void handle(Either<String, JsonObject> event) {
if (event.isRight()) {
log.info("[ForumRepositoryEvents][deleteGroups] All groups shares are removed");
} else {
log.error("[ForumRepositoryEvents][deleteGroups] Error removing groups shares. Message : " + event.left().getValue());
}
}
}));
}
@Override
public void deleteUsers(JsonArray users) {
//FIXME: anonymization is not relevant
if(users == null || users.size() == 0) {
log.warn("[ForumRepositoryEvents][deleteUsers] JsonArray users is null or empty");
return;
}
final String [] usersIds = new String[users.size()];
for (int i = 0; i < users.size(); i++) {
JsonObject j = users.getJsonObject(i);
usersIds[i] = j.getString("id");
}
/* Clean the database :
- First, remove shares of all the categories shared with (usersIds)
- then, get the categories identifiers that have no user and no manger,
- delete all these categories,
- delete all the subjects that do not belong to a category
- finally, tag all users as deleted in their own categories
*/
ForumRepositoryEvents.this.removeSharesCategories(usersIds);
}
/**
* Remove the shares of categories with a list of users
* if OK, Call prepareCleanCategories()
* @param usersIds users identifiers
*/
private void removeSharesCategories(final String [] usersIds){
final JsonObject criteria = MongoQueryBuilder.build(QueryBuilder.start("shared.userId").in(usersIds));
MongoUpdateBuilder modifier = new MongoUpdateBuilder();
modifier.pull("shared", MongoQueryBuilder.build(QueryBuilder.start("userId").in(usersIds)));
// Remove Categories shares with these users
mongo.update(CATEGORY_COLLECTION, criteria, modifier.build(), false, true, MongoDbResult.validActionResultHandler(new Handler<Either<String,JsonObject>>() {
@Override
public void handle(Either<String, JsonObject> event) {
if (event.isRight()) {
log.info("[ForumRepositoryEvents][removeSharesCategories] All categories shares with users are removed");
ForumRepositoryEvents.this.prepareCleanCategories(usersIds);
} else {
log.error("[ForumRepositoryEvents][removeSharesCategories] Error removing categories shares with users. Message : " + event.left().getValue());
}
}
}));
}
/**
* Prepare a list of categories identifiers
* if OK, Call cleanCategories()
* @param usersIds users identifiers
*/
private void prepareCleanCategories(final String [] usersIds) {
DBObject deletedUsers = new BasicDBObject();
// users currently deleted
deletedUsers.put("owner.userId", new BasicDBObject("$in", usersIds));
// users who have already been deleted
DBObject ownerIsDeleted = new BasicDBObject("owner.deleted", true);
// no manager found
JsonObject matcher = MongoQueryBuilder.build(QueryBuilder.start("shared." + MANAGE_RIGHT_ACTION).notEquals(true).or(deletedUsers, ownerIsDeleted));
// return only categories identifiers
JsonObject projection = new JsonObject().put("_id", 1);
mongo.find(CATEGORY_COLLECTION, matcher, null, projection, MongoDbResult.validResultsHandler(new Handler<Either<String,JsonArray>>() {
@Override
public void handle(Either<String, JsonArray> event) {
if (event.isRight()) {
JsonArray categories = event.right().getValue();
if(categories == null || categories.size() == 0) {
log.info("[ForumRepositoryEvents][prepareCleanCategories] No categorie to delete");
return;
}
final String[] categoriesIds = new String[categories.size()];
for (int i = 0; i < categories.size(); i++) {
JsonObject j = categories.getJsonObject(i);
categoriesIds[i] = j.getString("_id");
}
ForumRepositoryEvents.this.cleanCategories(usersIds, categoriesIds);
} else {
log.error("[ForumRepositoryEvents][prepareCleanCategories] Error retreving the categories created by users. Message : " + event.left().getValue());
}
}
}));
}
/**
* Delete categories by identifier
* if OK, call cleanSubjects() and tagUsersAsDeleted()
* @param usersIds users identifiers, used for tagUsersAsDeleted()
* @param categoriesIds categories identifiers
*/
private void cleanCategories(final String [] usersIds, final String [] categoriesIds) {
JsonObject matcher = MongoQueryBuilder.build(QueryBuilder.start("_id").in(categoriesIds));
mongo.delete(CATEGORY_COLLECTION, matcher, MongoDbResult.validActionResultHandler(new Handler<Either<String,JsonObject>>() {
@Override
public void handle(Either<String, JsonObject> event) {
if (event.isRight()) {
log.info("[ForumRepositoryEvents][cleanCategories] The categories created by users are deleted");
ForumRepositoryEvents.this.cleanSubjects(categoriesIds);
ForumRepositoryEvents.this.tagUsersAsDeleted(usersIds);
} else {
log.error("[ForumRepositoryEvents][cleanCategories] Error deleting the categories created by users. Message : " + event.left().getValue());
}
}
}));
}
/**
* Delete subjects by category identifier
* @param categoriesIds categories identifiers
*/
private void cleanSubjects(final String [] categoriesIds) {
JsonObject matcher = MongoQueryBuilder.build(QueryBuilder.start("category").in(categoriesIds));
mongo.delete(SUBJECT_COLLECTION, matcher, MongoDbResult.validActionResultHandler(new Handler<Either<String,JsonObject>>() {
@Override
public void handle(Either<String, JsonObject> event) {
if (event.isRight()) {
log.info("[ForumRepositoryEvents][cleanSubjects] The subjects created by users are deleted");
} else {
log.error("[ForumRepositoryEvents][cleanSubjects] Error deleting the subjects created by users. Message : " + event.left().getValue());
}
}
}));
}
/**
* Tag as deleted a list of users in their own categories
* @param usersIds users identifiers
*/
private void tagUsersAsDeleted(final String[] usersIds) {
final JsonObject criteria = MongoQueryBuilder.build(QueryBuilder.start("owner.userId").in(usersIds));
MongoUpdateBuilder modifier = new MongoUpdateBuilder();
modifier.set("owner.deleted", true);
mongo.update(CATEGORY_COLLECTION, criteria, modifier.build(), false, true, MongoDbResult.validActionResultHandler(new Handler<Either<String,JsonObject>>() {
@Override
public void handle(Either<String, JsonObject> event) {
if (event.isRight()) {
log.info("[ForumRepositoryEvents][deleteCategoriesUser] users are tagged as deleted in their own categories");
} else {
log.error("[ForumRepositoryEvents][deleteCategoriesUser] Error tagging as deleted users. Message : " + event.left().getValue());
}
}
}));
}
}
|
package net.engio.mbassy.subscription;
import net.engio.mbassy.bus.BusRuntime;
import net.engio.mbassy.common.ReflectionUtils;
import net.engio.mbassy.common.StrongConcurrentSet;
import net.engio.mbassy.listener.MessageHandler;
import net.engio.mbassy.listener.MetadataReader;
import java.util.*;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
public class SubscriptionManager {
// the metadata reader that is used to inspect objects passed to the subscribe method
private final MetadataReader metadataReader;
// all subscriptions per message type
// this is the primary list for dispatching a specific message
// write access is synchronized and happens only when a listener of a specific class is registered the first time
private final Map<Class, ArrayList<Subscription>> subscriptionsPerMessage;
// all subscriptions per messageHandler type
// this map provides fast access for subscribing and unsubscribing
// write access is synchronized and happens very infrequently
// once a collection of subscriptions is stored it does not change
private final Map<Class, Subscription[]> subscriptionsPerListener;
// remember already processed classes that do not contain any message handlers
private final StrongConcurrentSet<Class> nonListeners = new StrongConcurrentSet<Class>();
// this factory is used to create specialized subscriptions based on the given message handler configuration
// it can be customized by implementing the getSubscriptionFactory() method
private final SubscriptionFactory subscriptionFactory;
// synchronize read/write acces to the subscription maps
private final ReentrantReadWriteLock readWriteLock = new ReentrantReadWriteLock();
private final BusRuntime runtime;
public SubscriptionManager(MetadataReader metadataReader, SubscriptionFactory subscriptionFactory, BusRuntime runtime) {
this.metadataReader = metadataReader;
this.subscriptionFactory = subscriptionFactory;
this.runtime = runtime;
subscriptionsPerMessage = new HashMap<Class, ArrayList<Subscription>>(64);
subscriptionsPerListener = new HashMap<Class, Subscription[]>(64);
}
public boolean unsubscribe(Object listener) {
if (listener == null) {
return false;
}
Subscription[] subscriptions = getSubscriptionsByListener(listener);
if (subscriptions == null) {
return false;
}
boolean isRemoved = true;
for (Subscription subscription : subscriptions) {
isRemoved &= subscription.unsubscribe(listener);
}
return isRemoved;
}
private Subscription[] getSubscriptionsByListener(Object listener) {
Subscription[] subscriptions;
ReadLock readLock = readWriteLock.readLock();
try {
readLock.lock();
subscriptions = subscriptionsPerListener.get(listener.getClass());
} finally {
readLock.unlock();
}
return subscriptions;
}
public void subscribe(Object listener) {
try {
Class<?> listenerClass = listener.getClass();
if (nonListeners.contains(listenerClass)) {
return; // early reject of known classes that do not define message handlers
}
Subscription[] subscriptionsByListener = getSubscriptionsByListener(listener);
// a listener is either subscribed for the first time
if (subscriptionsByListener == null) {
MessageHandler[] messageHandlers = metadataReader.getMessageListener(listenerClass).getHandlers();
int length = messageHandlers.length;
if (length == 0) { // remember the class as non listening class if no handlers are found
nonListeners.add(listenerClass);
return;
}
subscriptionsByListener = new Subscription[length]; // it's safe to use non-concurrent collection here (read only)
// create subscriptions for all detected message handlers
MessageHandler messageHandler;
for (int i=0; i<length; i++) {
messageHandler = messageHandlers[i];
subscriptionsByListener[i] = subscriptionFactory.createSubscription(runtime, messageHandler);
}
// this will acquire a write lock and handle the case when another thread already subscribed
// this particular listener in the mean-time
subscribe(listener, subscriptionsByListener);
} // or the subscriptions already exist and must only be updated
else {
for (Subscription sub : subscriptionsByListener) {
sub.subscribe(listener);
}
}
} catch (Exception e) {
throw new RuntimeException(e);
}
}
private void subscribe(Object listener, Subscription[] subscriptions) {
WriteLock writeLock = readWriteLock.writeLock();
try {
writeLock.lock();
// basically this is a deferred double check
// it's an ugly pattern but necessary because atomic upgrade from read to write lock
// is not possible
// the alternative of using a write lock from the beginning would decrease performance dramatically
// because of the huge number of reads compared to writes
Subscription[] subscriptionsByListener = getSubscriptionsByListener(listener);
if (subscriptionsByListener == null) {
for (int i=0, n=subscriptions.length; i<n; i++) {
Subscription subscription = subscriptions[i];
subscription.subscribe(listener);
for (Class<?> messageType : subscription.getHandledMessageTypes()) {
// associate a subscription with a message type
ArrayList<Subscription> subscriptions2 = subscriptionsPerMessage.get(messageType);
if (subscriptions2 == null) {
subscriptions2 = new ArrayList<Subscription>(8);
subscriptionsPerMessage.put(messageType, subscriptions2);
}
subscriptions2.add(subscription);
}
}
subscriptionsPerListener.put(listener.getClass(), subscriptions);
}
// the rare case when multiple threads concurrently subscribed the same class for the first time
// one will be first, all others will have to subscribe to the existing instead the generated subscriptions
else {
for (int i=0, n=subscriptionsByListener.length; i<n; i++) {
Subscription existingSubscription = subscriptionsByListener[i];
existingSubscription.subscribe(listener);
}
}
} finally {
writeLock.unlock();
}
}
// obtain the set of subscriptions for the given message type
// Note: never returns null!
public Collection<Subscription> getSubscriptionsByMessageType(Class messageType) {
Set<Subscription> subscriptions = new TreeSet<Subscription>(Subscription.SubscriptionByPriorityDesc);
ReadLock readLock = readWriteLock.readLock();
try {
readLock.lock();
Subscription subscription;
ArrayList<Subscription> subsPerMessage = subscriptionsPerMessage.get(messageType);
if (subsPerMessage != null) {
subscriptions.addAll(subsPerMessage);
}
Class[] types = ReflectionUtils.getSuperTypes(messageType);
for (int i=0, n=types.length; i<n; i++) {
Class eventSuperType = types[i];
ArrayList<Subscription> subs = subscriptionsPerMessage.get(eventSuperType);
if (subs != null) {
for (int j = 0,m=subs.size(); j<m; j++) {
subscription = subs.get(j);
if (subscription.handlesMessageType(messageType)) {
subscriptions.add(subscription);
}
}
}
}
}finally{
readLock.unlock();
}
return subscriptions;
}
}
|
package net.finmath.randomnumbers;
import java.io.Serializable;
import java.util.function.DoubleSupplier;
/**
* Interface for a 1-dimensional random number generator
* generating a sequence of vectors sampling the space [0,1]
*
* @author Christian Fries
* @version 1.0
*/
public interface RandomNumberGenerator1D extends RandomNumberGenerator, DoubleSupplier, Serializable {
double nextDouble();
@Override
default double[] getNext() {
return new double[] { nextDouble() };
}
@Override
default int getDimension() {
return 1;
}
// Alias to function as <code>DoubleSupplier</code>
@Override
default double getAsDouble() {
return nextDouble();
}
}
|
package test;
import gov.nih.nci.security.cgmm.CGMMManager;
import gov.nih.nci.security.cgmm.CGMMManagerImpl;
import gov.nih.nci.security.cgmm.exceptions.CGMMConfigurationException;
import java.util.SortedMap;
public class CGMMManagerTest {
public static void main(String[] args) {
System.setProperty("gov.nih.nci.security.cgmm.syncgts.file","C:/Vijay/software/jboss-4.0.5.GA/server/default/cgmm_config/sync-description.xml");
System.setProperty("gov.nih.nci.security.cgmm.properties.file","C:/Vijay/software/jboss-4.0.5.GA/server/default/cgmm_config/cgmm-properties.xml");
System.setProperty("gov.nih.nci.security.cgmm.login.config.file","C:/Vijay/software/jboss-4.0.5.GA/server/default/cgmm_config/cgmm.login.config");
System.setProperty("gov.nih.nci.security.configFile","C:/Vijay/software/jboss-4.0.5.GA/server/default/cgmm_config/ApplicationSecurityConfig.xml");
// Get AuthenticationService URL Info.
CGMMManager cgmmManager=null;
try {
cgmmManager = new CGMMManagerImpl();
} catch (CGMMConfigurationException e) {
e.printStackTrace();
}
SortedMap authenticationServiceURLMap =null;
try {
authenticationServiceURLMap = cgmmManager.getAuthenticationServiceURLMap();
if(authenticationServiceURLMap==null) System.out.println(" Map is null");
else {
System.out.println(" Got Map");
}
} catch (CGMMConfigurationException e) {
e.printStackTrace();
}
}
}
|
package net.sniperlegacy.apparatus.blocks;
import net.minecraft.block.Block;
import net.minecraft.block.material.Material;
import net.minecraft.block.state.IBlockState;
import net.minecraft.item.Item;
import net.minecraft.util.ResourceLocation;
import net.sniperlegacy.apparatus.Apparatus;
import net.sniperlegacy.apparatus.Document;
import java.util.Random;
public class BlockRheniumOre extends Block {
public BlockRheniumOre(String p_unlocalizedName, String p_registryName) {
super(Material.ROCK);
this.setUnlocalizedName(p_unlocalizedName);
this.setRegistryName(new ResourceLocation(Document.MOD_ID, p_registryName));
this.setHardness(3F);
this.setResistance(5F);
this.setHarvestLevel("pickaxe", 2);
}
@Override
public Item getItemDropped(IBlockState p_state, Random p_rand, int p_fortune) {
return Item.getItemFromBlock(this);
}
}
|
package net.vexelon.currencybg.srv.db.models;
import java.util.Date;
public class CurrencyData {
private String code;
private int ratio = 0; // default
private String buy = "0"; // default
private String sell = "0"; // default
private Date date;
private int source;
public CurrencyData() {
}
public CurrencyData(String code, int ratio, String buy, String sell, Date date, int source) {
super();
this.code = code;
this.ratio = ratio;
this.buy = buy;
this.sell = sell;
this.date = date;
this.source = source;
}
public String getCode() {
return code;
}
public void setCode(String code) {
this.code = code;
}
public int getRatio() {
return ratio;
}
public void setRatio(int ratio) {
this.ratio = ratio;
}
public String getBuy() {
return buy;
}
public void setBuy(String buy) {
this.buy = buy;
}
public String getSell() {
return sell;
}
public void setSell(String sell) {
this.sell = sell;
}
public Date getDate() {
return date;
}
public void setDate(Date date) {
this.date = date;
}
public int getSource() {
return source;
}
public void setSource(int source) {
this.source = source;
}
@Override
public String toString() {
return "CurrencyData [code=" + code + ", ratio=" + ratio + ", buy=" + buy + ", sell=" + sell + ", date=" + date
+ ", source=" + source + "]";
}
}
|
package nl.hsac.fitnesse.fixture.util.selenium.by;
import org.openqa.selenium.By;
import org.openqa.selenium.SearchContext;
import org.openqa.selenium.WebElement;
public class LabelBy extends SingleElementOrNullBy {
public static By exact(String text) {
return new LabelBy.Exact(text);
}
public static By partial(String partialText) {
return new LabelBy.Partial(partialText);
}
/**
* Finds by exact label text.
*/
public static class Exact extends LabelBy {
public Exact(String text) {
super(".//label/descendant-or-self::text()[normalized(.)='%s']/ancestor-or-self::label", text);
}
}
/**
* Finds by partial label text.
*/
public static class Partial extends LabelBy {
public Partial(String partialText) {
super(".//label/descendant-or-self::text()[contains(normalized(.), '%s')]/ancestor-or-self::label", partialText);
}
}
private final SingleElementOrNullBy by;
protected LabelBy(String xpath, String... parameters) {
this(createXPathBy(xpath, parameters));
}
protected LabelBy(SingleElementOrNullBy by) {
this.by = by;
}
@Override
public WebElement findElement(SearchContext context) {
WebElement label = by.findElement(context);
WebElement element = getLabelledElement(label);
return element;
}
public static WebElement getLabelledElement(WebElement label) {
WebElement element = null;
if (label != null) {
String forAttr = label.getAttribute("for");
if (forAttr == null || "".equals(forAttr)) {
element = ConstantBy.nestedElementForValue().findElement(label);
} else {
element = new BestMatchBy(By.id(forAttr)).findElement(label);
}
}
return element;
}
private static BestMatchBy createXPathBy(String pattern, String... parameters) {
return new BestMatchBy(new XPathBy(pattern, parameters));
}
}
|
package org.chocosolver.util.objects.graphs;
import org.chocosolver.solver.Model;
import org.chocosolver.util.objects.setDataStructures.ISet;
import org.chocosolver.util.objects.setDataStructures.ISetIterator;
import org.chocosolver.util.objects.setDataStructures.SetFactory;
import org.chocosolver.util.objects.setDataStructures.SetType;
/**
* Directed graph implementation : arcs are indexed per endpoints
* @author Jean-Guillaume Fages, Xavier Lorca
*/
public class DirectedGraph implements IGraph {
/**
* Creates an empty graph.
* Allocates memory for n nodes (but they should then be added explicitly,
* unless allNodes is true).
*
* @param n maximum number of nodes
* @param type data structure to use for representing node successors and predecessors
* @param allNodes true iff all nodes must always remain present in the graph.
* i.e. The node set is fixed to [0,n-1] and will never change
*/
public DirectedGraph(int n, SetType type, boolean allNodes) {
this.type = type;
this.n = n;
predecessors = new ISet[n];
successors = new ISet[n];
for (int i = 0; i < n; i++) {
predecessors[i] = SetFactory.makeSet(type, 0);
successors[i] = SetFactory.makeSet(type, 0);
}
if (allNodes) {
this.nodes = SetFactory.makeConstantSet(0,n-1);
} else {
this.nodes = SetFactory.makeBitSet(0);
}
}
/**
* Creates an empty backtrable graph of n nodes
* Allocates memory for n nodes (but they should then be added explicitly,
* unless allNodes is true).
*
* @param model model providing the backtracking environment
* @param n maximum number of nodes
* @param type data structure to use for representing node successors and predecessors
* @param allNodes true iff all nodes must always remain present in the graph
*/
public DirectedGraph(Model model, int n, SetType type, boolean allNodes) {
this.n = n;
this.type = type;
predecessors = new ISet[n];
successors = new ISet[n];
for (int i = 0; i < n; i++) {
predecessors[i] = SetFactory.makeStoredSet(type, 0, model);
successors[i] = SetFactory.makeStoredSet(type, 0, model);
}
if (allNodes) {
this.nodes = SetFactory.makeConstantSet(0,n-1);
} else {
this.nodes = SetFactory.makeStoredSet(SetType.BITSET, 0, model);
}
}
/**
* remove arc (from,to) from the graph
*
* @param from a node index
* @param to a node index
* @return true iff arc (from,to) was in the graph
*/
public boolean removeArc(int from, int to) {
if (successors[from].contains(to)) {
assert (predecessors[to].contains(from)) : "incoherent directed graph";
return successors[from].remove(to) | predecessors[to].remove(from);
}
return false;
}
/**
* Test whether arc (from,to) exists or not in the graph
*
* @param from a node index
* @param to a node index
* @return true iff arc (from,to) exists in the graph
*/
public boolean arcExists(int from, int to) {
if (successors[from].contains(to)) {
assert (predecessors[to].contains(from)) : "incoherent directed graph";
return true;
}
return false;
}
@Override
public boolean isArcOrEdge(int from, int to) {
return arcExists(from, to);
}
@Override
public boolean isDirected() {
return true;
}
/**
* add arc (from,to) to the graph
*
* @param from a node index
* @param to a node index
* @return true iff arc (from,to) was not already in the graph
*/
public boolean addArc(int from, int to) {
addNode(from);
addNode(to);
if (!successors[from].contains(to)) {
assert (!predecessors[to].contains(from)) : "incoherent directed graph";
return successors[from].add(to) & predecessors[to].add(from);
}
return false;
}
/**
* Get successors of node x
*
* @param x node index
* @return successors of x
*/
public ISet getSuccOf(int x) {
return successors[x];
}
@Override
public ISet getSuccOrNeighOf(int x) {
return successors[x];
}
/**
* Get predecessors of node x
*
* @param x node index
* @return predecessors of x
*/
public ISet getPredOf(int x) {
return predecessors[x];
}
@Override
public ISet getPredOrNeighOf(int x) {
return predecessors[x];
}
}
|
package org.cryptomator.cryptofs;
import org.cryptomator.cryptofs.ch.AsyncDelegatingFileChannel;
import org.cryptomator.cryptofs.common.Constants;
import org.cryptomator.cryptofs.common.FileSystemCapabilityChecker;
import org.cryptomator.cryptolib.api.Cryptor;
import org.cryptomator.cryptolib.api.Masterkey;
import org.cryptomator.cryptolib.api.MasterkeyLoadingFailedException;
import java.io.IOException;
import java.net.URI;
import java.nio.channels.AsynchronousFileChannel;
import java.nio.channels.FileChannel;
import java.nio.channels.SeekableByteChannel;
import java.nio.charset.StandardCharsets;
import java.nio.file.AccessMode;
import java.nio.file.CopyOption;
import java.nio.file.DirectoryStream;
import java.nio.file.DirectoryStream.Filter;
import java.nio.file.FileStore;
import java.nio.file.FileSystem;
import java.nio.file.FileSystems;
import java.nio.file.Files;
import java.nio.file.LinkOption;
import java.nio.file.NotDirectoryException;
import java.nio.file.OpenOption;
import java.nio.file.Path;
import java.nio.file.ProviderMismatchException;
import java.nio.file.StandardOpenOption;
import java.nio.file.attribute.BasicFileAttributes;
import java.nio.file.attribute.FileAttribute;
import java.nio.file.attribute.FileAttributeView;
import java.nio.file.spi.FileSystemProvider;
import java.security.NoSuchAlgorithmException;
import java.security.SecureRandom;
import java.util.Arrays;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ExecutorService;
import static java.nio.file.StandardOpenOption.CREATE_NEW;
import static java.nio.file.StandardOpenOption.WRITE;
public class CryptoFileSystemProvider extends FileSystemProvider {
private final CryptoFileSystems fileSystems;
private final MoveOperation moveOperation;
private final CopyOperation copyOperation;
public CryptoFileSystemProvider() {
this(DaggerCryptoFileSystemProviderComponent.builder().csprng(strongSecureRandom()).build());
}
private static SecureRandom strongSecureRandom() {
try {
return SecureRandom.getInstanceStrong();
} catch (NoSuchAlgorithmException e) {
throw new IllegalStateException("A strong algorithm must exist in every Java platform.", e);
}
}
/**
* visible for testing
*/
CryptoFileSystemProvider(CryptoFileSystemProviderComponent component) {
this.fileSystems = component.fileSystems();
this.moveOperation = component.moveOperation();
this.copyOperation = component.copyOperation();
}
/**
* Typesafe alternative to {@link FileSystems#newFileSystem(URI, Map)}. Default way to retrieve a CryptoFS instance.
*
* @param pathToVault Path to this vault's storage location
* @param properties Parameters used during initialization of the file system
* @return a new file system
* @throws FileSystemNeedsMigrationException if the vault format needs to get updated and <code>properties</code> did not contain a flag for implicit migration.
* @throws FileSystemCapabilityChecker.MissingCapabilityException If the underlying filesystem lacks features required to store a vault
* @throws IOException if an I/O error occurs creating the file system
* @throws MasterkeyLoadingFailedException if the masterkey for this vault could not be loaded
*/
public static CryptoFileSystem newFileSystem(Path pathToVault, CryptoFileSystemProperties properties) throws FileSystemNeedsMigrationException, IOException, MasterkeyLoadingFailedException {
URI uri = CryptoFileSystemUri.create(pathToVault.toAbsolutePath());
return (CryptoFileSystem) FileSystems.newFileSystem(uri, properties);
}
/**
* Creates a new vault at the given directory path.
*
* @param pathToVault Path to an existing directory
* @param properties Parameters to use when writing the vault configuration
* @param keyId ID of the master key to use for this vault
* @throws NotDirectoryException If the given path is not an existing directory.
* @throws IOException If the vault structure could not be initialized due to I/O errors
* @throws MasterkeyLoadingFailedException If thrown by the supplied keyLoader
* @since 2.0.0
*/
public static void initialize(Path pathToVault, CryptoFileSystemProperties properties, URI keyId) throws NotDirectoryException, IOException, MasterkeyLoadingFailedException {
if (!Files.isDirectory(pathToVault)) {
throw new NotDirectoryException(pathToVault.toString());
}
byte[] rawKey = new byte[0];
var config = VaultConfig.createNew().cipherCombo(properties.cipherCombo()).maxFilenameLength(properties.maxNameLength()).build();
try (Masterkey key = properties.keyLoader(keyId.getScheme()).loadKey(keyId);
Cryptor cryptor = config.getCipherCombo().getCryptorProvider(strongSecureRandom()).withKey(key)) {
rawKey = key.getEncoded();
// save vault config:
Path vaultConfigPath = pathToVault.resolve(properties.vaultConfigFilename());
var token = config.toToken(keyId.toString(), rawKey);
Files.writeString(vaultConfigPath, token, StandardCharsets.US_ASCII, WRITE, CREATE_NEW);
// create "d" dir and root:
String dirHash = cryptor.fileNameCryptor().hashDirectoryId(Constants.ROOT_DIR_ID);
Path vaultCipherRootPath = pathToVault.resolve(Constants.DATA_DIR_NAME).resolve(dirHash.substring(0, 2)).resolve(dirHash.substring(2));
Files.createDirectories(vaultCipherRootPath);
} finally {
Arrays.fill(rawKey, (byte) 0x00);
}
assert containsVault(pathToVault, properties.vaultConfigFilename(), properties.masterkeyFilename());
}
/**
* Checks if the folder represented by the given path exists and contains a valid vault structure.
*
* @param pathToVault A directory path
* @param vaultConfigFilename Name of the vault config file
* @param masterkeyFilename Name of the masterkey file
* @return <code>true</code> if the directory seems to contain a vault.
* @since 2.0.0
*/
public static boolean containsVault(Path pathToVault, String vaultConfigFilename, String masterkeyFilename) {
Path vaultConfigPath = pathToVault.resolve(vaultConfigFilename);
Path masterkeyPath = pathToVault.resolve(masterkeyFilename);
Path dataDirPath = pathToVault.resolve(Constants.DATA_DIR_NAME);
return (Files.isReadable(vaultConfigPath) || Files.isReadable(masterkeyPath)) && Files.isDirectory(dataDirPath);
}
/**
* @deprecated only for testing
*/
@Deprecated
CryptoFileSystems getCryptoFileSystems() {
return fileSystems;
}
@Override
public String getScheme() {
return CryptoFileSystemUri.URI_SCHEME;
}
@Override
public CryptoFileSystem newFileSystem(URI uri, Map<String, ?> rawProperties) throws IOException, MasterkeyLoadingFailedException {
CryptoFileSystemUri parsedUri = CryptoFileSystemUri.parse(uri);
CryptoFileSystemProperties properties = CryptoFileSystemProperties.wrap(rawProperties);
return fileSystems.create(this, parsedUri.pathToVault(), properties);
}
@Override
public CryptoFileSystem getFileSystem(URI uri) {
CryptoFileSystemUri parsedUri = CryptoFileSystemUri.parse(uri);
return fileSystems.get(parsedUri.pathToVault());
}
@Override
public Path getPath(URI uri) {
CryptoFileSystemUri parsedUri = CryptoFileSystemUri.parse(uri);
return fileSystems.get(parsedUri.pathToVault()).getPath(parsedUri.pathInsideVault());
}
@Override
public AsynchronousFileChannel newAsynchronousFileChannel(Path cleartextPath, Set<? extends OpenOption> options, ExecutorService executor, FileAttribute<?>... attrs) throws IOException {
if (options.contains(StandardOpenOption.APPEND)) {
throw new IllegalArgumentException("AsynchronousFileChannel can not be opened in append mode");
}
return new AsyncDelegatingFileChannel(newFileChannel(cleartextPath, options, attrs), executor);
}
@Override
public FileChannel newFileChannel(Path cleartextPath, Set<? extends OpenOption> optionsSet, FileAttribute<?>... attrs) throws IOException {
return fileSystem(cleartextPath).newFileChannel(CryptoPath.castAndAssertAbsolute(cleartextPath), optionsSet, attrs);
}
@Override
public SeekableByteChannel newByteChannel(Path cleartextPath, Set<? extends OpenOption> options, FileAttribute<?>... attrs) throws IOException {
return newFileChannel(cleartextPath, options, attrs);
}
@Override
public DirectoryStream<Path> newDirectoryStream(Path cleartextDir, Filter<? super Path> filter) throws IOException {
return fileSystem(cleartextDir).newDirectoryStream(CryptoPath.castAndAssertAbsolute(cleartextDir), filter);
}
@Override
public void createDirectory(Path cleartextDir, FileAttribute<?>... attrs) throws IOException {
fileSystem(cleartextDir).createDirectory(CryptoPath.castAndAssertAbsolute(cleartextDir), attrs);
}
@Override
public void delete(Path cleartextPath) throws IOException {
fileSystem(cleartextPath).delete(CryptoPath.castAndAssertAbsolute(cleartextPath));
}
@Override
public void copy(Path cleartextSource, Path cleartextTarget, CopyOption... options) throws IOException {
assertSameProvider(cleartextSource);
assertSameProvider(cleartextTarget);
copyOperation.copy(CryptoPath.castAndAssertAbsolute(cleartextSource), CryptoPath.castAndAssertAbsolute(cleartextTarget), options);
}
@Override
public void move(Path cleartextSource, Path cleartextTarget, CopyOption... options) throws IOException {
assertSameProvider(cleartextSource);
assertSameProvider(cleartextTarget);
moveOperation.move(CryptoPath.castAndAssertAbsolute(cleartextSource), CryptoPath.castAndAssertAbsolute(cleartextTarget), options);
}
@Override
public boolean isSameFile(Path cleartextPath, Path cleartextPath2) throws IOException {
return cleartextPath.getFileSystem() == cleartextPath2.getFileSystem()
&& cleartextPath.toRealPath().equals(cleartextPath2.toRealPath());
}
@Override
public boolean isHidden(Path cleartextPath) throws IOException {
return fileSystem(cleartextPath).isHidden(CryptoPath.castAndAssertAbsolute(cleartextPath));
}
@Override
public FileStore getFileStore(Path cleartextPath) {
return fileSystem(cleartextPath).getFileStore();
}
@Override
public void checkAccess(Path cleartextPath, AccessMode... modes) throws IOException {
fileSystem(cleartextPath).checkAccess(CryptoPath.castAndAssertAbsolute(cleartextPath), modes);
}
@Override
public void createSymbolicLink(Path cleartextPath, Path target, FileAttribute<?>... attrs) throws IOException {
fileSystem(cleartextPath).createSymbolicLink(CryptoPath.castAndAssertAbsolute(cleartextPath), target, attrs);
}
@Override
public Path readSymbolicLink(Path cleartextPath) throws IOException {
return fileSystem(cleartextPath).readSymbolicLink(CryptoPath.castAndAssertAbsolute(cleartextPath));
}
@Override
public <V extends FileAttributeView> V getFileAttributeView(Path cleartextPath, Class<V> type, LinkOption... options) {
return fileSystem(cleartextPath).getFileAttributeView(CryptoPath.castAndAssertAbsolute(cleartextPath), type, options);
}
@Override
public <A extends BasicFileAttributes> A readAttributes(Path cleartextPath, Class<A> type, LinkOption... options) throws IOException {
return fileSystem(cleartextPath).readAttributes(CryptoPath.castAndAssertAbsolute(cleartextPath), type, options);
}
@Override
public Map<String, Object> readAttributes(Path cleartextPath, String attributes, LinkOption... options) throws IOException {
return fileSystem(cleartextPath).readAttributes(CryptoPath.castAndAssertAbsolute(cleartextPath), attributes, options);
}
@Override
public void setAttribute(Path cleartextPath, String attribute, Object value, LinkOption... options) throws IOException {
fileSystem(cleartextPath).setAttribute(CryptoPath.castAndAssertAbsolute(cleartextPath), attribute, value, options);
}
private CryptoFileSystemImpl fileSystem(Path path) {
assertSameProvider(path);
CryptoFileSystemImpl fs = CryptoPath.cast(path).getFileSystem();
fs.assertOpen();
return fs;
}
private void assertSameProvider(Path path) {
if (path.getFileSystem().provider() != this) {
throw new ProviderMismatchException("Used a path from provider " + path.getFileSystem().provider() + " with provider " + this);
}
}
}
|
package org.ihtsdo.snomed.rf2torf1conversion;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.nio.charset.StandardCharsets;
import java.util.HashMap;
import java.util.Map;
import java.util.zip.ZipInputStream;
import org.ihtsdo.snomed.rf2torf1conversion.pojo.RF1SchemaConstants;
import static org.ihtsdo.snomed.rf2torf1conversion.GlobalUtils.*;
public class RF1Constants implements RF1SchemaConstants{
public static final String FSN = "900000000000003001";
public static final String DEFINITION = "900000000000550004";
public static final String ENTIRE_TERM_CS = "900000000000017005";
public static final String INFERRED = "900000000000011006";
public static final String STATED = "900000000000010007";
public static final String ADDITIONAL = "900000000000227009";
public static final int NOT_REFINE = 0;
public static final int MAY_REFINE = 1;
public static final int MUST_REFINE = 2;
private static final String DELIM = "_";
//Map of triple+group to SCTID
public static Map<String, String> previousInferredRelationships = new HashMap<String, String>();
public static Map<String, String> previousStatedRelationships = new HashMap<String, String>();
private static BufferedReader availableRelationshipIds;
private static int relIdsSkipped = 0;
private static int relIdsIssued = 0;
private static Map<String, Byte> rf1Map = new HashMap<String, Byte>();
static {
rf1Map.put("900000000000441003", null); /* SNOMED CT Model Component (metadata) */
rf1Map.put("106237007", null); /* Linkage concept (linkage concept) */
rf1Map.put("370136006", null); /* Namespace concept (namespace concept) */
rf1Map.put("900000000000442005", null); /* Core metadata concept (core metadata concept) */
rf1Map.put("900000000000447004", null); /* Case significance (core metadata concept) */
rf1Map.put("900000000000016001", (byte) 2); /* AU Entire term case sensitive (core metadata concept) */
rf1Map.put("900000000000017005", (byte) 2); /* CORE Entire term case sensitive (core metadata concept) */
rf1Map.put("900000000000022005", (byte) 1); /* AU Only initial character case insensitive (core metadata concept) */
rf1Map.put("900000000000020002", (byte) 1); /* CORE Only initial character case insensitive (core metadata concept) */
rf1Map.put("900000000000448009", (byte) 0); /* Entire term case insensitive (core metadata concept) */
rf1Map.put("900000000000449001", null); /* Characteristic type (core metadata concept) */
rf1Map.put("900000000000006009", (byte) 0); /* Defining relationship (core metadata concept) */
rf1Map.put("900000000000011006", (byte) 0); /* Inferred relationship (core metadata concept) */
rf1Map.put("900000000000010007", (byte) 10); /* Stated relationship (core metadata concept) */
rf1Map.put("900000000000409009", (byte) 1); /* AU Qualifying relationship (core metadata concept) */
rf1Map.put("900000000000225001", (byte) 1); /* CORE Qualifying relationship (core metadata concept) */
rf1Map.put("900000000000412007", (byte) 3); /* AU Additional relationship (core metadata concept) */
rf1Map.put("900000000000227009", (byte) 3); /* CORE Additional relationship (core metadata concept) */
rf1Map.put("900000000000444006", null); /* Definition status (core metadata concept) */
rf1Map.put("900000000000128007", (byte) 0); /* AU Defined Sufficiently defined concept definition status (core metadata concept) */
rf1Map.put("900000000000130009", (byte) 1); /* AU Primitive Necessary but not sufficient concept definition status (core metadata concept) */
rf1Map.put("900000000000073002", (byte) 0); /* CORE Sufficiently defined concept definition status (core metadata concept) */
rf1Map.put("900000000000074008", (byte) 1); /* CORE Necessary but not sufficient concept definition status (core metadata concept) */
rf1Map.put("900000000000446008", null); /* Description type (core metadata concept) */
rf1Map.put("??????????????????", (byte) 0); /* Unspecified */
rf1Map.put("900000000000013009", (byte) 2); /* CORE Synonym (core metadata concept) */
rf1Map.put("900000000000187000", (byte) 2); /* AU Synonym (core metadata concept) */
rf1Map.put("900000000000003001", (byte) 3); /* Fully specified name (core metadata concept) */
rf1Map.put("900000000000550004", (byte) 10); /* Definition (core metadata concept) */
rf1Map.put("900000000000453004", null); /* Identifier scheme (core metadata concept) */
rf1Map.put("900000000000294009", null); /* CORE SNOMED CT integer identifier (core metadata concept) */
rf1Map.put("900000000000118003", null); /* AU SNOMED CT integer identifier (core metadata concept) */
rf1Map.put("900000000000002006", null); /* SNOMED CT universally unique identifier (core metadata concept) */
rf1Map.put("900000000000450001", null); /* Modifier (core metadata concept) */
rf1Map.put("900000000000451002", (byte) 20); /* Existential restriction modifier (core metadata concept) */
rf1Map.put("900000000000452009", (byte) 21); /* Universal restriction modifier (core metadata concept) */
rf1Map.put("900000000000443000", null); /* Module (core metadata concept) */
rf1Map.put("900000000000445007", null); /* International Health Terminology Standards Development Organisation maintained module (core metadata concept) */
rf1Map.put("449081005", null); /* SNOMED CT Spanish edition module (core metadata concept) */
rf1Map.put("900000000000207008", null); /* SNOMED CT core module (core metadata concept) */
rf1Map.put("900000000000012004", null); /* SNOMED CT model component module (core metadata concept) */
rf1Map.put("449080006", null); /* SNOMED CT to ICD-10 rule-based mapping module (core metadata concept) */
rf1Map.put("449079008", null); /* SNOMED CT to ICD-9CM equivalency mapping module (core metadata concept) */
rf1Map.put("900000000000454005", null); /* Foundation metadata concept (foundation metadata concept) */
rf1Map.put("900000000000455006", null); /* Reference set (foundation metadata concept) */
rf1Map.put("900000000000456007", null); /* Reference set descriptor reference set (foundation metadata concept) */
rf1Map.put("900000000000480006", null); /* Attribute value type reference set (foundation metadata concept) */
rf1Map.put("900000000000488004", null); /* Relationship refinability attribute value reference set (foundation metadata concept) */
rf1Map.put("900000000000489007", null); /* Concept inactivation indicator attribute value reference set (foundation metadata concept) */
rf1Map.put("900000000000490003", null); /* Description inactivation indicator attribute value reference set (foundation metadata concept) */
rf1Map.put("900000000000547002", null); /* Relationship inactivation indicator attribute value reference set (foundation metadata concept) */
rf1Map.put("900000000000496009", null); /* Simple map type reference set (foundation metadata concept) */
rf1Map.put("900000000000497000", null); /* CTV3 simple map reference set (foundation metadata concept) */
rf1Map.put("900000000000498005", null); /* SNOMED RT identifier simple map (foundation metadata concept) */
rf1Map.put("900000000000506000", null); /* Language type reference set (foundation metadata concept) */
rf1Map.put("900000000000507009", null); /* English [International Organization for Standardization 639-1 code en] language reference set (foundation metadata concept) */
rf1Map.put("900000000000512005", null); /* Query specification type reference set (foundation metadata concept) */
rf1Map.put("900000000000513000", null); /* Simple query specification reference set (foundation metadata concept) */
rf1Map.put("900000000000516008", null); /* Annotation type reference set (foundation metadata concept) */
rf1Map.put("900000000000517004", null); /* Associated image reference set (foundation metadata concept) */
rf1Map.put("900000000000521006", null); /* Association type reference set (foundation metadata concept) */
rf1Map.put("900000000000522004", null); /* Historical association reference set (foundation metadata concept) */
rf1Map.put("900000000000534007", null); /* Module dependency reference set (foundation metadata concept) */
rf1Map.put("900000000000538005", null); /* Description format reference set (foundation metadata concept) */
rf1Map.put("900000000000457003", null); /* Reference set attribute (foundation metadata concept) */
rf1Map.put("900000000000491004", null); /* Attribute value (foundation metadata concept) */
rf1Map.put("900000000000410004", null); /* Refinability value (foundation metadata concept) */
rf1Map.put("900000000000007000", (byte) 0); /* Not refinable (foundation metadata concept) */
rf1Map.put("900000000000392005", (byte) 1); /* Optional refinability (foundation metadata concept) */
rf1Map.put("900000000000391003", (byte) 2); /* Mandatory refinability (foundation metadata concept) */
rf1Map.put("900000000000481005", null); /* Concept inactivation value (foundation metadata concept) */
rf1Map.put("900000000000482003", (byte) 2); /* Duplicate component (foundation metadata concept) */
rf1Map.put("900000000000483008", (byte) 3); /* Outdated component (foundation metadata concept) */
rf1Map.put("900000000000484002", (byte) 4); /* Ambiguous component (foundation metadata concept) */
rf1Map.put("900000000000485001", (byte) 5); /* Erroneous component (foundation metadata concept) */
//New inactivation reasons added in the 20170131 release - map as per 'Erroneous' component
rf1Map.put("723278000", (byte) 5); // |Not semantically equivalent component (foundation metadata concept)|
rf1Map.put("723277005", (byte) 5); // |Nonconformance to editorial policy component (foundation metadata concept)|
rf1Map.put("900000000000486000", (byte) 6); /* Limited component (foundation metadata concept) */
rf1Map.put("900000000000487009", (byte) 10); /* Component moved elsewhere (foundation metadata concept) */
rf1Map.put("900000000000492006", (byte) 11); /* Pending move (foundation metadata concept) */
rf1Map.put("900000000000493001", null); /* Description inactivation value (foundation metadata concept) */
rf1Map.put("900000000000494007", (byte) 7); /* Inappropriate component (foundation metadata concept) */
rf1Map.put("900000000000495008", (byte) 8); /* Concept non-current (foundation metadata concept) */
rf1Map.put("900000000000546006", null); /* Inactive value (foundation metadata concept) */
rf1Map.put("900000000000545005", null); /* Active value (foundation metadata concept) */
rf1Map.put("900000000000458008", null); /* Attribute description (foundation metadata concept) */
rf1Map.put("900000000000459000", null); /* Attribute type (foundation metadata concept) */
rf1Map.put("900000000000460005", null); /* Component type (foundation metadata concept) */
rf1Map.put("900000000000461009", null); /* Concept type component (foundation metadata concept) */
rf1Map.put("900000000000462002", null); /* Description type component (foundation metadata concept) */
rf1Map.put("900000000000463007", null); /* Relationship type component (foundation metadata concept) */
rf1Map.put("900000000000464001", null); /* Reference set member type component (foundation metadata concept) */
rf1Map.put("900000000000465000", null); /* String (foundation metadata concept) */
rf1Map.put("900000000000466004", null); /* Text (foundation metadata concept) */
rf1Map.put("900000000000469006", null); /* Uniform resource locator (foundation metadata concept) */
rf1Map.put("900000000000474003", null); /* Universally Unique Identifier (foundation metadata concept) */
rf1Map.put("900000000000475002", null); /* Time (foundation metadata concept) */
rf1Map.put("900000000000476001", null); /* Integer (foundation metadata concept) */
rf1Map.put("900000000000477005", null); /* Signed integer (foundation metadata concept) */
rf1Map.put("900000000000478000", null); /* Unsigned integer (foundation metadata concept) */
rf1Map.put("900000000000479008", null); /* Attribute order (foundation metadata concept) */
rf1Map.put("900000000000499002", null); /* Scheme value (foundation metadata concept) */
rf1Map.put("900000000000500006", null); /* Map source concept (foundation metadata concept) */
rf1Map.put("900000000000501005", null); /* Map group (foundation metadata concept) */
rf1Map.put("900000000000502003", null); /* Map priority (foundation metadata concept) */
rf1Map.put("900000000000503008", null); /* Map rule (foundation metadata concept) */
rf1Map.put("900000000000504002", null); /* Map advice (foundation metadata concept) */
rf1Map.put("900000000000505001", null); /* Map target (foundation metadata concept) */
rf1Map.put("900000000000510002", null); /* Description in dialect (foundation metadata concept) */
rf1Map.put("900000000000511003", null); /* Acceptability (foundation metadata concept) */
rf1Map.put("900000000000548007", (byte) 1); /* Preferred (foundation metadata concept) */
rf1Map.put("900000000000549004", (byte) 2); /* Acceptable (foundation metadata concept) */
rf1Map.put("900000000000514006", null); /* Generated reference set (foundation metadata concept) */
rf1Map.put("900000000000515007", null); /* Query (foundation metadata concept) */
rf1Map.put("900000000000518009", null); /* Annotated component (foundation metadata concept) */
rf1Map.put("900000000000519001", null); /* Annotation (foundation metadata concept) */
rf1Map.put("900000000000520007", null); /* Image (foundation metadata concept) */
rf1Map.put("900000000000532006", null); /* Association source component (foundation metadata concept) */
rf1Map.put("900000000000533001", null); /* Association target component (foundation metadata concept) */
rf1Map.put("900000000000535008", null); /* Dependency target (foundation metadata concept) */
rf1Map.put("900000000000536009", null); /* Source effective time (foundation metadata concept) */
rf1Map.put("900000000000537000", null); /* Target effective time (foundation metadata concept) */
rf1Map.put("900000000000539002", null); /* Description format (foundation metadata concept) */
rf1Map.put("900000000000540000", null); /* Plain text (foundation metadata concept) */
rf1Map.put("900000000000541001", null); /* Limited HyperText Markup Language (foundation metadata concept) */
rf1Map.put("900000000000542008", null); /* Extensible HyperText Markup Language (foundation metadata concept) */
rf1Map.put("900000000000543003", null); /* Darwin Information Typing Architecture (foundation metadata concept) */
rf1Map.put("900000000000544009", null); /* Description length (foundation metadata concept) */
}
public static Byte getMagicNumber(String sctid) {
if (rf1Map.containsKey(sctid)) {
return rf1Map.get(sctid);
}
return null;
}
public static Byte getMagicNumberDebug(String refsetId, String sctid) {
if (rf1Map.containsKey(sctid)) {
return rf1Map.get(sctid);
}
return null;
}
private static Map<String, String> sourceMap = new HashMap<String, String>();
static {
sourceMap.put("900000000000207008","CORE"); /* SNOMED CT core module (core metadata concept) */
sourceMap.put("900000000000012004","META"); /* SNOMED CT model component module (core metadata concept) */
sourceMap.put("999000011000000103","UKEX"); /* SNOMED CT United Kingdom clinical extension module (core metadata concept) */
sourceMap.put("999000011000001104","UKDG"); /* SNOMED CT United Kingdom drug extension module (core metadata concept) */
sourceMap.put("999000021000000109","UKXR"); /* SNOMED CT United Kingdom clinical extension reference set module (core metadata concept) */
sourceMap.put("999000021000001108","UKDR"); /* SNOMED CT United Kingdom pharmacy extension reference set module (core metadata concept) */
}
public static String getModuleSource(String sctid) {
if (sourceMap.containsKey(sctid)) {
return sourceMap.get(sctid);
}
return "ERRR";
}
public static byte translateActive(boolean rf2Active) {
return rf2Active ? (byte) 0 : (byte) 1;
}
public static byte translateDescriptionActive(boolean rf2DescActive, boolean rf2ConActive) {
//If the description is inactive, the RF1 status is 1
//but if it's active, then if the CONCEPT is inactive, then that's indicated
//with status 8
if (!rf2DescActive) {
return (byte) 1; //Inactive, reason unknown
} else if (!rf2ConActive) {
return (byte) 8; //Concept inactive for active description
} else {
return (byte) 0; //Active
}
}
public static byte translateDescType(String sctid) {
return sctid.equals(FSN) ? (byte) 3 : (byte) 2;
}
public static byte translateCaseSensitive(String sctid) {
return sctid.equals(ENTIRE_TERM_CS) ? (byte) 1 : (byte) 0;
}
public static byte translateRefinability(String characteristicType) {
// Qualifying Relationships (1) are Mandatory Refinable (2)
// everything else is no refinable (0)
return characteristicType.equals(1) ? (byte) 2 : (byte) 0;
}
public static byte translateCharacteristic(String sctid) {
switch (sctid) {
case STATED: return 1;
case INFERRED: return 0;
case ADDITIONAL: return 3;
}
return 9; //Invalid value
}
public static String lookupRelationshipId(String source, String type, String destination, String groupNum, boolean statedRelationships) throws RF1ConversionException, IOException {
Map<String, String> previousRelationships = statedRelationships ? previousStatedRelationships : previousInferredRelationships;
String key = source + DELIM + type + DELIM + destination + DELIM + groupNum;
//Do we already have an SCTID for this key?
if (previousRelationships.containsKey(key)) {
return previousRelationships.get(key);
}
//Otherwise get the next one available and assign it so there's no danger of using it again
String nextSCTID = getNextAvailableRelationship();
previousRelationships.put(key, nextSCTID);
return nextSCTID;
}
private static String getNextAvailableRelationship() throws RF1ConversionException, IOException {
boolean isAvailable = false;
String sctId = null;
while (!isAvailable) {
sctId = availableRelationshipIds.readLine().trim();
if (sctId == null) {
throw new RF1ConversionException("Run out of available relationship SCTIDs. Contact IHTSDO");
}
if (previousInferredRelationships.containsValue(sctId) || previousStatedRelationships.containsValue(sctId)) {
relIdsSkipped++;
} else {
isAvailable = true;
}
}
relIdsIssued++;
return sctId;
}
public static String getRelationshipIdUsageSummary() throws IOException {
if (availableRelationshipIds == null || !availableRelationshipIds.ready()) {
return "";
}
int relIdsRemaining = 0;
while (availableRelationshipIds.readLine() != null) {
relIdsRemaining++;
}
String relSummary = "Relationship Ids Issued: " + relIdsIssued
+ "\nRelationship Ids Skipped (already in use): " + relIdsSkipped
+ "\nRelationship Ids remaining: " + relIdsRemaining;
return relSummary;
}
public static void intialiseAvailableRelationships(InputStream resource) {
availableRelationshipIds = new BufferedReader(new InputStreamReader(resource, StandardCharsets.UTF_8));
}
/**
* Stores the previous relationships in a map of triple+group to sctid, so they can
* be used for reconciliation or augmented with relationships from available_sctids_partition_02
* @throws RF1ConversionException
*/
public static void loadPreviousRelationships(ZipInputStream zis, boolean stated) throws IOException, RF1ConversionException {
String line;
boolean isFirstLine = true;
//We don't want to close this reader because we have more to get out of zis
BufferedReader br = new BufferedReader(new InputStreamReader(zis, StandardCharsets.UTF_8));
long relationshipsStored = 0;
while ((line = br.readLine()) != null) {
if (isFirstLine) {
isFirstLine = false;
continue;
}
String[] lineItems = line.split(RF1_FIELD_DELIMITER);
String triplePlusGroup = lineItems[RF1_IDX_CONCEPTID1] + DELIM
+ lineItems[RF1_IDX_RELATIONSHIPTYPE] + DELIM
+ lineItems[RF1_IDX_CONCEPTID2] + DELIM
+ lineItems[RF1_IDX_RELATIONSHIPGROUP];
relationshipsStored++;
Map<String, String> previousRelationships = stated ? previousStatedRelationships : previousInferredRelationships;
if (previousRelationships.containsKey(triplePlusGroup)) {
throw new RF1ConversionException("Duplicate " + (stated?"stated":"inferred") + " relationship id detected: " + lineItems[RF1_IDX_RELATIONSHIPID]);
}
previousRelationships.put(triplePlusGroup, lineItems[RF1_IDX_RELATIONSHIPID]);
}
debug ("Imported " + relationshipsStored + " previously " + (stated?"stated":"inferred") + " relationships");
}
}
|
package org.javarosa.core.util.externalizable;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.List;
import java.util.Vector;
// List of objects of single (non-polymorphic) type
public class ExtWrapList extends ExternalizableWrapper {
public ExternalizableWrapper type;
private boolean sealed;
private Class<? extends List> listImplementation;
/* Constructors for serialization */
public ExtWrapList(List val) {
this(val, null);
}
public ExtWrapList(List val, ExternalizableWrapper type) {
if (val == null) {
throw new NullPointerException();
}
this.val = val;
this.type = type;
this.listImplementation = val.getClass();
}
/* Constructors for deserialization */
public ExtWrapList() {
}
// Assumes that the list implementation is a Vector, since that is most common
public ExtWrapList(Class listElementType) {
this(listElementType, Vector.class);
}
public ExtWrapList(Class listElementType, Class listImplementation) {
this.type = new ExtWrapBase(listElementType);
this.listImplementation = listImplementation;
this.sealed = false;
}
// Assumes that the list implementation is a Vector, since that is most common
public ExtWrapList(ExternalizableWrapper type) {
this(type, Vector.class);
}
public ExtWrapList(ExternalizableWrapper type, Class listImplementation) {
if (type == null) {
throw new NullPointerException();
}
this.listImplementation = listImplementation;
this.type = type;
}
@Override
public ExternalizableWrapper clone(Object val) {
return new ExtWrapList((List)val, type);
}
@Override
public void readExternal(DataInputStream in, PrototypeFactory pf) throws IOException, DeserializationException {
if (!sealed) {
int size = (int)ExtUtil.readNumeric(in);
try {
List<Object> l = listImplementation.newInstance();
for (int i = 0; i < size; i++) {
l.add(ExtUtil.read(in, type, pf));
}
val = l;
} catch (InstantiationException e) {
throw new DeserializationException(e.getMessage());
} catch (IllegalAccessException e) {
throw new DeserializationException(e.getMessage());
}
} else {
int size = (int)ExtUtil.readNumeric(in);
Object[] theval = new Object[size];
for (int i = 0; i < size; i++) {
theval[i] = ExtUtil.read(in, type, pf);
}
val = theval;
}
}
@Override
public void writeExternal(DataOutputStream out) throws IOException {
List l = (List)val;
ExtUtil.writeNumeric(out, l.size());
for (int i = 0; i < l.size(); i++) {
ExtUtil.write(out, type == null ? l.get(i) : type.clone(l.get(i)));
}
}
@Override
public void metaReadExternal(DataInputStream in, PrototypeFactory pf) throws IOException, DeserializationException {
type = ExtWrapTagged.readTag(in, pf);
try {
listImplementation = (Class<? extends List>)Class.forName(ExtUtil.readString(in));
} catch (ClassNotFoundException e) {
throw new DeserializationException(e.getMessage());
}
}
@Override
public void metaWriteExternal(DataOutputStream out) throws IOException {
List l = (List)val;
Object tagObj;
if (type == null) {
if (l.size() == 0) {
tagObj = new Object();
} else {
tagObj = l.get(0);
}
} else {
tagObj = type;
}
ExtWrapTagged.writeTag(out, tagObj);
ExtUtil.writeString(out, listImplementation.getName());
}
}
|
package org.jenkinsci.plugins.ghprb;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import hudson.model.AbstractBuild;
import hudson.tasks.junit.CaseResult;
import hudson.tasks.test.AggregatedTestResultAction;
public abstract class GhprbBaseBuildManager implements GhprbBuildManager {
public GhprbBaseBuildManager(AbstractBuild build) {
this.build = build;
}
/**
* Calculate the build URL of a build of default type. This will be overriden
* by specific build types.
*
* @return the build URL of a build of default type
*/
public String calculateBuildUrl() {
String publishedURL = GhprbTrigger.getDscp().getPublishedURL();
return publishedURL + "/" + build.getUrl();
}
/**
* Return a downstream iterator of a build of default type. This will be overriden
* by specific build types.
*
* @return the downstream builds as an iterator
*/
public Iterator downstreamIterator() {
List downstreamList = new ArrayList();
downstreamList.add(build);
return downstreamList.iterator();
}
/**
* Return the tests results of a build of default type. This will be overriden
* by specific build types.
*
* @param printStackTraces wether to print or not the stacktraces associated to each test
* @return the tests result of a build of default type
*/
public String getTestResults(boolean printStackTraces) {
return getAggregatedTestResults(build, printStackTraces);
}
protected String getAggregatedTestResults(
AbstractBuild build, boolean printStackTraces) {
AggregatedTestResultAction aggregatedTestResultAction =
build.getAggregatedTestResultAction();
List<CaseResult> failedTests =
aggregatedTestResultAction.getFailedTests();
StringBuilder sb = new StringBuilder();
sb.append("<h2>Failed Tests: ");
sb.append("<span class='status-failure'>");
sb.append(failedTests.size());
sb.append("</span></h2>");
sb.append("<ul>");
for (CaseResult failedTest : failedTests) {
sb.append("<li>");
sb.append("<a href='");
sb.append(failedTest.getUrl());
sb.append("'>");
sb.append("<strong>");
sb.append(failedTest.getFullDisplayName());
sb.append("</strong>");
sb.append("</a>");
if (printStackTraces) {
sb.append("\n```\n");
sb.append(failedTest.getErrorStackTrace());
sb.append("\n```\n");
}
sb.append("</li>");
}
sb.append("</ul>");
return sb.toString();
}
protected AbstractBuild build;
}
|
package org.owasp.esapi.filters;
import java.io.IOException;
import java.util.Date;
import java.util.Stack;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpSession;
/**
* A simple servlet filter that limits the request rate to a certain threshold of requests per second.
* The default rate is 5 hits in 10 seconds. This can be overridden in the web.xml file by adding
* parameters named "hits" and "period" with the desired values. When the rate is exceeded, a short
* string is written to the response output stream and the chain method is not invoked. Otherwise,
* processing proceeds as normal.
*/
public class RequestRateThrottleFilter implements Filter
{
private int hits = 5;
private int period = 10;
private static final String HITS = "hits";
private static final String PERIOD = "period";
/**
* Called by the web container to indicate to a filter that it is being
* placed into service. The servlet container calls the init method exactly
* once after instantiating the filter. The init method must complete
* successfully before the filter is asked to do any filtering work.
*
* @param filterConfig
* configuration object
*/
public void init(FilterConfig filterConfig)
{
hits = Integer.parseInt(filterConfig.getInitParameter(HITS));
period = Integer.parseInt(filterConfig.getInitParameter(PERIOD));
}
/**
* Checks to see if the current session has exceeded the allowed number
* of requests in the specified time period. If the threshold has been
* exceeded, then a short error message is written to the output stream and
* no further processing is done on the request. Otherwise the request is
* processed as normal.
* @param request
* @param response
* @param chain
* @throws IOException
* @throws ServletException
*/
public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException
{
HttpServletRequest httpRequest = (HttpServletRequest) request;
HttpSession session = httpRequest.getSession(true);
synchronized( session ) {
Stack times = (Stack) session.getAttribute("times");
if (times == null)
{
times = new Stack();
times.push(new Date(0));
session.setAttribute("times", times);
}
times.push(new Date());
if (times.size() >= hits)
{
times.removeElementAt(0);
}
Date newest = (Date) times.get(times.size() - 1);
Date oldest = (Date) times.get(0);
long elapsed = newest.getTime() - oldest.getTime();
if (elapsed < period * 1000) // seconds
{
response.getWriter().println("Request rate too high");
return;
}
}
chain.doFilter(request, response);
}
/**
* Called by the web container to indicate to a filter that it is being
* taken out of service. This method is only called once all threads within
* the filter's doFilter method have exited or after a timeout period has
* passed. After the web container calls this method, it will not call the
* doFilter method again on this instance of the filter.
*/
public void destroy()
{
// finalize
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.