index
int64
repo_id
string
file_path
string
content
string
0
java-sources/ai/preferred/cerebro/1.0/ai/preferred/cerebro/index/search
java-sources/ai/preferred/cerebro/1.0/ai/preferred/cerebro/index/search/processor/LuQueryProcessor.java
package ai.preferred.cerebro.index.search.processor; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.queryparser.classic.QueryParser; import org.apache.lucene.search.ScoreDoc; import ai.preferred.cerebro.index.exception.UnsupportedDataType; import ai.preferred.cerebro.index.request.QueryRequest; import ai.preferred.cerebro.index.response.QueryResponse; import ai.preferred.cerebro.index.search.structure.VersatileSearcher; import ai.preferred.cerebro.index.utils.IndexConst; /** * This class process a {@link QueryRequest} into suitable * query for a {@link VersatileSearcher} then pass it to carry * out the search and return result. */ public class LuQueryProcessor implements QueryProcessor { /** * @param searcher * @param qRequest * @return * @throws Exception * * Process both type of query text and vector. */ @Override public QueryResponse process(VersatileSearcher searcher, QueryRequest qRequest) throws Exception { switch (qRequest.getType()){ case KEYWORD: return new QueryResponse<ScoreDoc>(processKeyword(searcher, qRequest.getQueryData(), qRequest.getTopK())); case VECTOR: return new QueryResponse<ScoreDoc>(searcher.queryVector((double[])qRequest.getQueryData(), qRequest.getTopK())); default: throw new UnsupportedDataType(); } } /** * @param searcher * @param queryData * @param topK * @return * @throws Exception * * Handle the case when we want to query a field with a custom name, * not the default {@link IndexConst#CONTENTS}. */ public ScoreDoc[] processKeyword(VersatileSearcher searcher, Object queryData, int topK) throws Exception { //assume field name is contents if (queryData instanceof String){ return searcher.queryKeyWord(null, (String) queryData, topK); } //assume [0] is fieldname, [1] is query string else if(queryData instanceof String[]){ String[] fieldnameAndQuery = (String[])queryData; QueryParser parser = new QueryParser(fieldnameAndQuery[0], new StandardAnalyzer()); return searcher.queryKeyWord(parser, fieldnameAndQuery[1], topK); } return null; } }
0
java-sources/ai/preferred/cerebro/1.0/ai/preferred/cerebro/index/search
java-sources/ai/preferred/cerebro/1.0/ai/preferred/cerebro/index/search/processor/QueryProcessor.java
package ai.preferred.cerebro.index.search.processor; import ai.preferred.cerebro.index.request.QueryRequest; import ai.preferred.cerebro.index.response.QueryResponse; import ai.preferred.cerebro.index.search.structure.VersatileSearcher; /** * An interface to enforce the functionality of any type * of Processor used to handled queries. */ public interface QueryProcessor { public QueryResponse process(VersatileSearcher searcher, QueryRequest qRequest) throws Exception; }
0
java-sources/ai/preferred/cerebro/1.0/ai/preferred/cerebro/index/search
java-sources/ai/preferred/cerebro/1.0/ai/preferred/cerebro/index/search/structure/CeBulkScorer.java
package ai.preferred.cerebro.index.search.structure; import org.apache.lucene.search.*; import org.apache.lucene.util.Bits; import ai.preferred.cerebro.index.utils.IndexUtils; import java.io.IOException; /** * Cerebro internal scorer. */ public class CeBulkScorer extends BulkScorer { private final Scorer scorer; private final DocIdSetIterator iterator; private final TwoPhaseIterator twoPhase; public CeBulkScorer(Scorer scorer) { if (scorer == null) { throw new NullPointerException(); } this.scorer = scorer; this.iterator = scorer.iterator(); this.twoPhase = scorer.twoPhaseIterator(); } @Override public int score(LeafCollector collector, Bits acceptDocs, int min, int max) throws IOException { collector.setScorer(scorer); if (scorer.docID() == -1 && min == 0 && max == DocIdSetIterator.NO_MORE_DOCS) { scoreAll(collector, iterator, twoPhase, acceptDocs); return DocIdSetIterator.NO_MORE_DOCS; } else { IndexUtils.notifyLazyImplementation("CeBulkScorer / score"); int doc = scorer.docID(); if (doc < min) { if (twoPhase == null) { doc = iterator.advance(min); } else { doc = twoPhase.approximation().advance(min); } } return scoreRange(collector, iterator, twoPhase, acceptDocs, doc, max); } } static int scoreRange(LeafCollector collector, DocIdSetIterator iterator, TwoPhaseIterator twoPhase, Bits acceptDocs, int currentDoc, int end) throws IOException { if (twoPhase == null) { while (currentDoc < end) { if (acceptDocs == null || acceptDocs.get(currentDoc)) { collector.collect(currentDoc); } currentDoc = iterator.nextDoc(); } return currentDoc; } else { final DocIdSetIterator approximation = twoPhase.approximation(); while (currentDoc < end) { if ((acceptDocs == null || acceptDocs.get(currentDoc)) && twoPhase.matches()) { collector.collect(currentDoc); } currentDoc = approximation.nextDoc(); } return currentDoc; } } static void scoreAll(LeafCollector collector, DocIdSetIterator iterator, TwoPhaseIterator twoPhase, Bits acceptDocs) throws IOException { if (twoPhase == null) { //long startTime = System.currentTimeMillis(); for (int doc = iterator.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = iterator.nextDoc()) { if (acceptDocs == null || acceptDocs.get(doc)) { collector.collect(doc); } } //long endtime = System.currentTimeMillis(); //System.out.println("Doc collect time" + (endtime - startTime)); } else { // The scorer has an approximation, so run the approximation first, then check acceptDocs, then confirm final DocIdSetIterator approximation = twoPhase.approximation(); for (int doc = approximation.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = approximation.nextDoc()) { if ((acceptDocs == null || acceptDocs.get(doc)) && twoPhase.matches()) { collector.collect(doc); } } } } @Override public long cost() { return iterator.cost(); } }
0
java-sources/ai/preferred/cerebro/1.0/ai/preferred/cerebro/index/search
java-sources/ai/preferred/cerebro/1.0/ai/preferred/cerebro/index/search/structure/CeCollector.java
package ai.preferred.cerebro.index.search.structure; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Collector; import org.apache.lucene.search.LeafCollector; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TopDocs; import ai.preferred.cerebro.index.store.Container; import java.io.IOException; /** * Cerebro's internal Scoring functionality. * @param <T> Type of Object to rank against each others. * */ public abstract class CeCollector <T extends ScoreDoc> implements Collector { protected static final TopDocs EMPTY_TOPDOCS = new TopDocs(0, new ScoreDoc[0], Float.NaN); final protected int topK; protected Container<T> arr; protected int totalHits; protected CeCollector(Container<T> container, int k) { this.arr = container; this.topK = k; } protected void populateResults(ScoreDoc[] results, int howMany) { for (int i = 0; i <= howMany - 1; i++) { results[i] = arr.pop(); } } abstract protected TopDocs newTopDocs(ScoreDoc[] results, int start); public int getTotalHits() { return totalHits; } protected int topDocsSize() { // In case pq was populated with sentinel values, there might be less // results than pq.size(). Therefore return all results until either // pq.size() or totalHits. return totalHits < arr.size() ? totalHits : arr.size(); } public TopDocs topDocs() { // In case pq was populated with sentinel values, there might be less // results than pq.size(). Therefore return all results until either // pq.size() or totalHits. return topDocs(0, topDocsSize()); } public TopDocs topDocs(int start) { // In case pq was populated with sentinel values, there might be less // results than pq.size(). Therefore return all results until either // pq.size() or totalHits. return topDocs(start, topDocsSize()); } public TopDocs topDocs(int start, int howMany) { // In case pq was populated with sentinel values, there might be less // results than pq.size(). Therefore return all results until either // pq.size() or totalHits. int size = topDocsSize(); // Don't bother to throw an exception, just return an empty TopDocs in case // the parameters are invalid or out of range. // TODO: shouldn't we throw IAE if apps give bad params here so they dont // have sneaky silent bugs? if (start < 0 || start >= size || howMany <= 0) { return newTopDocs(null, start); } // We know that start < pqsize, so just fix howMany. howMany = Math.min(size - start, howMany); ScoreDoc[] results = new ScoreDoc[howMany]; // pq's pop() returns the 'least' element in the queue, therefore need // to discard the first ones, until we reach the requested range. // Note that this loop will usually not be executed, since the common usage // should be that the caller asks for the last howMany results. However it's // needed here for completeness. for (int i = arr.size() - start - howMany; i > 0; i--) { arr.pop(); } // Get the requested results from pq. populateResults(results, howMany); return newTopDocs(results, start); } }
0
java-sources/ai/preferred/cerebro/1.0/ai/preferred/cerebro/index/search
java-sources/ai/preferred/cerebro/1.0/ai/preferred/cerebro/index/search/structure/CeTermMatchesIterator.java
package ai.preferred.cerebro.index.search.structure; import java.io.IOException; import org.apache.lucene.index.PostingsEnum; import org.apache.lucene.search.MatchesIterator; import org.apache.lucene.search.Query; /** * A {@link MatchesIterator} over a single term's postings list. */ class CeTermMatchesIterator implements MatchesIterator { private int upto; private int pos; private final PostingsEnum pe; private final Query query; /** * Create a new {@link CeTermMatchesIterator} for the given term and postings list. */ CeTermMatchesIterator(Query query, PostingsEnum pe) throws IOException { this.pe = pe; this.query = query; this.upto = pe.freq(); } @Override public boolean next() throws IOException { if (upto-- > 0) { pos = pe.nextPosition(); return true; } return false; } @Override public int startPosition() { return pos; } @Override public int endPosition() { return pos; } @Override public int startOffset() throws IOException { return pe.startOffset(); } @Override public int endOffset() throws IOException { return pe.endOffset(); } @Override public MatchesIterator getSubMatches() throws IOException { return null; } @Override public Query getQuery() { return query; } }
0
java-sources/ai/preferred/cerebro/1.0/ai/preferred/cerebro/index/search
java-sources/ai/preferred/cerebro/1.0/ai/preferred/cerebro/index/search/structure/CeTopScoreDocCollector.java
package ai.preferred.cerebro.index.search.structure; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.*; import ai.preferred.cerebro.index.store.DocArray; import ai.preferred.cerebro.index.utils.IndexUtils; import java.io.IOException; /** * Class to handle Cerebro internal retrieving and ranking * of Document Objects. */ public class CeTopScoreDocCollector extends CeCollector<ScoreDoc> { abstract static class ScorerLeafCollector implements LeafCollector { Scorer scorer; @Override public void setScorer(Scorer scorer) throws IOException { this.scorer = scorer; } } @Override public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException { final int docBase = context.docBase; return new ScorerLeafCollector() { @Override public void collect(int doc) throws IOException { float score = scorer.score(); // This collector cannot handle these scores: assert score != Float.NEGATIVE_INFINITY; assert !Float.isNaN(score); totalHits++; arr.add(new ScoreDoc(doc + docBase, score)); } }; } CeTopScoreDocCollector(int numHits, int k) { super(new DocArray(numHits, false), k); } public void pullTopK(){ arr.pullTopK(topK, true,true); } @Override protected TopDocs newTopDocs(ScoreDoc[] results, int start) { if (results == null) { return EMPTY_TOPDOCS; } // We need to compute maxScore in order to set it in TopDocs. If start == 0, // it means the largest element is already in results, use its score as // maxScore. Otherwise pop everything else, until the largest element is // extracted and use its score as maxScore. float maxScore = Float.NaN; if (start == 0) { maxScore = results[0].score; } else { IndexUtils.notifyLazyImplementation("CeTopScoreDocCollector / newTopDocs"); //for (int i = pq.size(); i > 1; i--) { pq.pop(); } //maxScore = pq.pop().score; } return new TopDocs(totalHits, results, maxScore); } @Override public boolean needsScores() { return true; } }
0
java-sources/ai/preferred/cerebro/1.0/ai/preferred/cerebro/index/search
java-sources/ai/preferred/cerebro/1.0/ai/preferred/cerebro/index/search/structure/LatentVecScorer.java
package ai.preferred.cerebro.index.search.structure; import org.apache.lucene.index.PostingsEnum; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; import org.apache.lucene.search.similarities.Similarity; import java.io.IOException; /** * Scorer using {@link ai.preferred.cerebro.index.similarity.CosineSimilarity} * to score similarity between latent vectors. */ public class LatentVecScorer extends Scorer { private final PostingsEnum postingsEnum; private final Similarity.SimScorer docScorer; LatentVecScorer(Weight weight, PostingsEnum td, Similarity.SimScorer docScorer) { super(weight); this.docScorer = docScorer; this.postingsEnum = td; } @Override public int docID() { return postingsEnum.docID(); } @Override public float score() throws IOException { assert docID() != DocIdSetIterator.NO_MORE_DOCS; return docScorer.score(postingsEnum.docID(), 0); } @Override public DocIdSetIterator iterator() { return postingsEnum; } }
0
java-sources/ai/preferred/cerebro/1.0/ai/preferred/cerebro/index/search
java-sources/ai/preferred/cerebro/1.0/ai/preferred/cerebro/index/search/structure/LatentVectorQuery.java
package ai.preferred.cerebro.index.search.structure; import org.apache.lucene.index.*; import org.apache.lucene.search.*; import org.apache.lucene.search.similarities.Similarity; import ai.preferred.cerebro.index.builder.LocalitySensitiveHash; import ai.preferred.cerebro.index.similarity.CosineSimilarity; import ai.preferred.cerebro.index.utils.IndexConst; import ai.preferred.cerebro.index.utils.IndexUtils; import java.io.IOException; import java.util.Objects; import java.util.Set; /** * A Query that matches Documents by the hashcode * produced by their latent vector. */ public class LatentVectorQuery extends Query { private final Term term; private final double[] vec; private final TermContext perReaderTermState; public LatentVectorQuery(double[] vec, LocalitySensitiveHash lsh) { this.vec =vec; term = new Term(IndexConst.HashFieldName, lsh.getHashBit(vec)); perReaderTermState = null; } public LatentVectorQuery(double[] vec, Term t) { this.vec = vec; term = Objects.requireNonNull(t); perReaderTermState = null; } public LatentVectorQuery(double[] vec, LocalitySensitiveHash lsh, TermContext states) { this.vec =vec; term = new Term(IndexConst.HashFieldName, lsh.getHashBit(vec)); perReaderTermState = states; } public LatentVectorQuery(double[] vec, Term t, TermContext states) { this.vec = vec; term = Objects.requireNonNull(t); perReaderTermState = states; } public Term getTerm() { return term; } public double[] getVec() { return vec; } @Override public String toString(String field) { IndexUtils.notifyLazyImplementation("LatentVectorQuery / toString"); return null; } @Override public boolean equals(Object other) { return sameClassAs(other) && term.equals(((LatentVectorQuery) other).term); } @Override public int hashCode() { return classHash() ^ term.hashCode(); } @Override public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException{ return new LatentVecWeight(searcher, needsScores, perReaderTermState); } final class LatentVecWeight extends Weight { private final CosineSimilarity similarity; private final Similarity.SimWeight stats; private final TermContext termStates; private final boolean needsScores; public LatentVecWeight(IndexSearcher searcher, boolean needsScores, TermContext termStates) throws IOException { super(LatentVectorQuery.this); this.needsScores = needsScores; this.termStates = termStates; this.similarity = new CosineSimilarity(); final CollectionStatistics collectionStats; //final TermStatistics termStats; if (needsScores) { collectionStats = searcher.collectionStatistics(term.field()); //termStats = searcher.termStatistics(term, termStates); } else { // we do not need the actual stats, use fake stats with docFreq=maxDoc and ttf=-1 final int maxDoc = searcher.getIndexReader().maxDoc(); collectionStats = new CollectionStatistics(term.field(), maxDoc, -1, -1, -1); //termStats = new TermStatistics(term.bytes(), maxDoc, -1); } this.stats = similarity.computeWeight(LatentVectorQuery.this.vec, searcher.getIndexReader(), collectionStats); } @Override public void extractTerms(Set<Term> terms) { terms.add(getTerm()); } @Override public Matches matches(LeafReaderContext context, int doc) throws IOException { IndexUtils.notifyLazyImplementation("LatentVecWeight / matches"); TermsEnum te = getTermsEnum(context); if (te == null) { return null; } if (!context.reader().terms(term.field()).hasPositions()) { return super.matches(context, doc); } return MatchesUtils.forField(term.field(), () -> { PostingsEnum pe = te.postings(null, PostingsEnum.OFFSETS); if (pe.advance(doc) != doc) { return null; } return new CeTermMatchesIterator(getQuery(), pe); }); } @Override public String toString() { return "weight(" + LatentVectorQuery.this + ")"; } @Override public Scorer scorer(LeafReaderContext context) throws IOException { assert termStates == null || termStates.wasBuiltFor(ReaderUtil.getTopLevelContext(context)) : "The top-reader used to create Weight is not the same as the current reader's top-reader (" + ReaderUtil.getTopLevelContext(context); ; final TermsEnum termsEnum = getTermsEnum(context); if (termsEnum == null) { return null; } PostingsEnum docs = termsEnum.postings(null, needsScores ? PostingsEnum.FREQS : PostingsEnum.NONE); assert docs != null; return new LatentVecScorer(this, docs, similarity.simScorer(stats, context)); } @Override public boolean isCacheable(LeafReaderContext ctx) { return true; } /** * Returns a {@link TermsEnum} positioned at this weights Term or null if * the term does not exist in the given context */ private TermsEnum getTermsEnum(LeafReaderContext context) throws IOException { if (termStates != null) { // TermQuery either used as a Query or the term states have been provided at construction time assert termStates.wasBuiltFor(ReaderUtil.getTopLevelContext(context)) : "The top-reader used to create Weight is not the same as the current reader's top-reader (" + ReaderUtil.getTopLevelContext(context); final TermState state = termStates.get(context.ord); if (state == null) { // term is not present in that reader assert termNotInReader(context.reader(), term) : "no termstate found but term exists in reader term=" + term; return null; } final TermsEnum termsEnum = context.reader().terms(term.field()).iterator(); termsEnum.seekExact(term.bytes(), state); return termsEnum; } else { // TermQuery used as a filter, so the term states have not been built up front Terms terms = context.reader().terms(term.field()); if (terms == null) { return null; } final TermsEnum termsEnum = terms.iterator(); if (termsEnum.seekExact(term.bytes())) { return termsEnum; } else { return null; } } } private boolean termNotInReader(LeafReader reader, Term term) throws IOException { // only called from assert // System.out.println("TQ.termNotInReader reader=" + reader + " term=" + // field + ":" + bytes.utf8ToString()); return reader.docFreq(term) == 0; } @Override public BulkScorer bulkScorer(LeafReaderContext context) throws IOException { Scorer scorer = scorer(context); if (scorer == null) { // No docs match return null; } // This impl always scores docs in order, so we can // ignore scoreDocsInOrder: return new CeBulkScorer(scorer); } @Override public Explanation explain(LeafReaderContext context, int doc) throws IOException { IndexUtils.notifyLazyImplementation("LatentVectorQuery/explain"); return null; // TermScorer scorer = (TermScorer) scorer(context); // if (scorer != null) { // int newDoc = scorer.iterator().advance(doc); // if (newDoc == doc) { // float freq = scorer.freq(); // Similarity.SimScorer docScorer = similarity.simScorer(stats, context); // Explanation freqExplanation = Explanation.match(freq, "termFreq=" + freq); // Explanation scoreExplanation = docScorer.explain(doc, freqExplanation); // return Explanation.match( // scoreExplanation.getValue(), // "weight(" + getQuery() + " in " + doc + ") [" // + similarity.getClass().getSimpleName() + "], result of:", // scoreExplanation); // } // } // return Explanation.noMatch("no matching term"); } } }
0
java-sources/ai/preferred/cerebro/1.0/ai/preferred/cerebro/index/search
java-sources/ai/preferred/cerebro/1.0/ai/preferred/cerebro/index/search/structure/LuIndexSearcher.java
package ai.preferred.cerebro.index.search.structure; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.index.*; import org.apache.lucene.queryparser.classic.QueryParser; import org.apache.lucene.search.*; import org.apache.lucene.store.NIOFSDirectory; import org.apache.lucene.util.ThreadInterruptedException; import ai.preferred.cerebro.core.entity.TopKItem; import ai.preferred.cerebro.index.builder.LocalitySensitiveHash; import ai.preferred.cerebro.index.similarity.CosineSimilarity; import ai.preferred.cerebro.index.utils.IndexConst; import ai.preferred.cerebro.index.utils.IndexUtils; import java.io.IOException; import java.util.*; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; /** * Inherited from Lucene's IndexSearcher, this class extends * Lucene's traditional full-text search to vector similarity * search also. * * As such it shares almost all of Lucene's IndexSearcher; from * thread-safety to I/O speed. Use it as you would use a Lucene * searcher. Plus, it now supports vector similarity search via * {@link #queryVector(double[], int)}. */ public class LuIndexSearcher extends IndexSearcher implements VersatileSearcher{ protected final ExecutorService executor; protected final LeafSlice[] leafSlices; protected IndexReader reader; QueryParser defaultParser; private LocalitySensitiveHash lsh; /** * Create a searcher from the provided index and set of hashing vectors. */ public LuIndexSearcher(IndexReader r, String splitVecPath) throws IOException { this(r.getContext(), null, splitVecPath); } /** Runs searches for each segment separately, using the * provided ExecutorService. IndexSearcher will not * close/awaitTermination this ExecutorService on * close; you must do so, eventually, on your own. NOTE: * if you are using {@link NIOFSDirectory}, do not use * the shutdownNow method of ExecutorService as this uses * Thread.interrupt under-the-hood which can silently * close file descriptors (see <a * href="https://issues.apache.org/jira/browse/LUCENE-2239">LUCENE-2239</a>). */ public LuIndexSearcher(IndexReader r, ExecutorService executor, String splitVecPath) throws IOException { this(r.getContext(), executor, splitVecPath); } public LuIndexSearcher(IndexReaderContext context, ExecutorService executor, String splitVecPath) throws IOException { super(context, executor); this.executor = executor; this.reader = context.reader(); this.defaultParser = new QueryParser(IndexConst.CONTENTS, new StandardAnalyzer()); this.leafSlices = executor == null ? null : slices(leafContexts); if(splitVecPath != null){ double[][] splitVecs = IndexUtils.readVectors(splitVecPath); lsh = new LocalitySensitiveHash(splitVecs); } } private TopDocs personalizedSearch(double [] vQuery, int topK) throws Exception{ if(lsh == null) throw new Exception("LocalitySensitiveHash not initialized"); Term t = new Term(IndexConst.HashFieldName, lsh.getHashBit(vQuery)); // count the number of document that matches with this hashcode int count = 0; for (LeafReaderContext leaf : reader.leaves()) count += leaf.reader().docFreq(t); if(count < topK){ return null; } LatentVectorQuery query = new LatentVectorQuery(vQuery, t); return search(query, topK); //return pSearch(query, count, topK); } private TopDocs pSearch(Query query, int count, int topK) throws IOException { return pSearchAfter(null, query, count, topK); } private TopDocs pSearchAfter(ScoreDoc after, Query query, int count, int topK) throws IOException { if(after != null){ IndexUtils.notifyLazyImplementation("LuIndexSearcher / pSearchAfter"); } final int limit = Math.max(1, reader.maxDoc()); if (after != null && after.doc >= limit) { throw new IllegalArgumentException("after.doc exceeds the number of documents in the reader: after.doc=" + after.doc + " limit=" + limit); } final int cappedNumHits = Math.min(count, limit); final CollectorManager<CeTopScoreDocCollector, TopDocs> manager = new CollectorManager<CeTopScoreDocCollector, TopDocs>() { @Override public CeTopScoreDocCollector newCollector() throws IOException { return new CeTopScoreDocCollector(cappedNumHits, topK); } @Override public TopDocs reduce(Collection<CeTopScoreDocCollector> collectors) throws IOException { final TopDocs[] topDocs = new TopDocs[collectors.size()]; int i = 0; for (CeTopScoreDocCollector collector : collectors) { collector.pullTopK(); topDocs[i++] = collector.topDocs(); } return TopDocs.merge(0, cappedNumHits, topDocs, true); } }; return pSearch(query, manager); } private <C extends Collector, T> T pSearch(Query query, CollectorManager<C, T> collectorManager) throws IOException { if (executor == null) { final C collector = collectorManager.newCollector(); pSearch(query, collector); return collectorManager.reduce(Collections.singletonList(collector)); } else { final List<C> collectors = new ArrayList<>(leafSlices.length); boolean needsScores = false; for (int i = 0; i < leafSlices.length; ++i) { final C collector = collectorManager.newCollector(); collectors.add(collector); needsScores |= collector.needsScores(); } query = rewrite(query); final Weight weight = createWeight(query, needsScores, 1); final List<Future<C>> topDocsFutures = new ArrayList<>(leafSlices.length); for (int i = 0; i < leafSlices.length; ++i) { final LeafReaderContext[] leaves = leafSlices[i].leaves; final C collector = collectors.get(i); topDocsFutures.add(executor.submit(new Callable<C>() { @Override public C call() throws Exception { pSearch(Arrays.asList(leaves), weight, collector); return collector; } })); } final List<C> collectedCollectors = new ArrayList<>(); for (Future<C> future : topDocsFutures) { try { collectedCollectors.add(future.get()); } catch (InterruptedException e) { throw new ThreadInterruptedException(e); } catch (ExecutionException e) { throw new RuntimeException(e); } } return collectorManager.reduce(collectors); } } private void pSearch(Query query, Collector results) throws IOException { query = rewrite(query); pSearch(leafContexts, createWeight(query, results.needsScores(), 1), results); } private void pSearch(List<LeafReaderContext> leaves, Weight weight, Collector collector) throws IOException { // TODO: should we make this // threaded...? the Collector could be sync'd? // always use single thread: for (LeafReaderContext ctx : leaves) { // search each subreader final LeafCollector leafCollector; try { leafCollector = collector.getLeafCollector(ctx); } catch (CollectionTerminatedException e) { // there is no doc of interest in this reader context // continue with the following leaf continue; } BulkScorer scorer = weight.bulkScorer(ctx); if (scorer != null) { try { scorer.score(leafCollector, ctx.reader().getLiveDocs()); } catch (CollectionTerminatedException e) { // collection was terminated prematurely // continue with the following leaf } } } } /** * * @param queryParser if null the searcher will by default carry search on * the field named {@link IndexConst#CONTENTS}. * @param sQuery * @param resultSize * @return A set of {@link ScoreDoc} of Document matching with the query. * @throws Exception */ @Override public ScoreDoc[] queryKeyWord(QueryParser queryParser, String sQuery, int resultSize) throws Exception { Query query = null; if(queryParser == null) query = defaultParser.parse(sQuery); else query = queryParser.parse(sQuery); TopDocs hits = search(query, resultSize); return hits == null ? null : hits.scoreDocs; } /** * * @param vQuery * @param resultSize * @return A set of {@link ScoreDoc} of Document having latent vector producing. * the highest inner product with the query vector. * @throws Exception */ @Override public ScoreDoc[] queryVector(double[] vQuery, int resultSize) throws Exception { TopDocs hits = personalizedSearch(vQuery, resultSize); return hits == null ? null : hits.scoreDocs; } }
0
java-sources/ai/preferred/cerebro/1.0/ai/preferred/cerebro/index/search
java-sources/ai/preferred/cerebro/1.0/ai/preferred/cerebro/index/search/structure/VersatileSearcher.java
package ai.preferred.cerebro.index.search.structure; import org.apache.lucene.queryparser.classic.QueryParser; import org.apache.lucene.search.ScoreDoc; import ai.preferred.cerebro.core.entity.AbstractVector; import ai.preferred.cerebro.core.entity.TopKItem; import ai.preferred.cerebro.core.jpa.entity.IndexMetadata; import javax.swing.text.Document; import java.util.List; /** * This interface enforces the functionality of a searcher used by other * Cerebro components. */ public interface VersatileSearcher { ScoreDoc[] queryKeyWord(QueryParser queryParser, String sQuery, int resultSize) throws Exception; ScoreDoc[] queryVector(double[] vQuery, int resultSize) throws Exception; //Document doc(int docID); // private final IndexMetadata metadata; // private final String pathToIndex; // // public AbstractSearcher(IndexMetadata metadata,String pathToIndex){ // this.metadata = metadata; // this.pathToIndex = pathToIndex; // } // // public abstract TopKItem[] query(AbstractVector query, int resultSize); // // @Deprecated // public IndexMetadata getMetadata() { return metadata; } // @Deprecated // public void setMetadata(IndexMetadata metadata) { //this.metadata = metadata; // } }
0
java-sources/ai/preferred/cerebro/1.0/ai/preferred/cerebro/index
java-sources/ai/preferred/cerebro/1.0/ai/preferred/cerebro/index/similarity/CosineSimilarity.java
package ai.preferred.cerebro.index.similarity; import org.apache.lucene.document.Document; import org.apache.lucene.index.FieldInvertState; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.CollectionStatistics; import org.apache.lucene.search.TermStatistics; import org.apache.lucene.search.similarities.Similarity; import org.apache.lucene.util.BytesRef; import ai.preferred.cerebro.index.store.VectorField; import ai.preferred.cerebro.index.utils.IndexConst; import ai.preferred.cerebro.index.utils.IndexUtils; import java.io.IOException; import java.util.*; /** * Class to compute cosine similarity. */ public class CosineSimilarity extends Similarity { public CosineSimilarity(){ } @Override public long computeNorm(FieldInvertState state) { IndexUtils.notifyLazyImplementation("CosineSimilarity / computeNorm"); return 0; } @Override public SimWeight computeWeight(float boost, CollectionStatistics collectionStats, TermStatistics... termStats) { IndexUtils.notifyLazyImplementation("CosineSimilarity / computeWeight"); return null; } public SimWeight computeWeight(double[] queryVec, IndexReader reader, CollectionStatistics collectionStats) { return new CosineStats(collectionStats.field(), queryVec, reader); } @Override public SimScorer simScorer(SimWeight stats, LeafReaderContext context) throws IOException { CosineStats cosineStats = (CosineStats) stats; return new CosineDocScorer(cosineStats); } private class CosineDocScorer extends SimScorer{ private final CosineStats stats; CosineDocScorer(CosineStats stats){ this.stats = stats; } @Override public float score(int doc, float freq) throws IOException { //Document document = stats.reader.document(doc, IndexConst.fieldsRetrieve); Document document = stats.reader.document(doc); double[] tarVec = VectorField.getFeatureVector(document.getField(IndexConst.VecFieldName).binaryValue().bytes); //double tarVecLen = DoubleStoredField.bytesToDouble(document.getField(IndexConst.VecLenFieldName).binaryValue().bytes);//IndexUtils.vecLength(tarVec); double tarVecLen = IndexUtils.vecLength(tarVec); double cosineScore = IndexUtils.dotProduct(stats.vquery, tarVec) / (stats.vecLength * tarVecLen); return (float) cosineScore; } @Override public float computeSlopFactor(int distance) { IndexUtils.notifyLazyImplementation("CosineDocScorer / computeSlopFactor"); return 0; } @Override public float computePayloadFactor(int doc, int start, int end, BytesRef payload) { IndexUtils.notifyLazyImplementation("CosineDocScorer / computePayloadFactor"); return 0; } } private static class CosineStats extends SimWeight{ private final String field; private final double[] vquery; private final double vecLength; private final IndexReader reader; CosineStats(String field, double[] vquery, IndexReader reader){ this.field = field; this.vquery = vquery; this.vecLength = IndexUtils.vecLength(vquery); this.reader = reader; } } }
0
java-sources/ai/preferred/cerebro/1.0/ai/preferred/cerebro/index
java-sources/ai/preferred/cerebro/1.0/ai/preferred/cerebro/index/store/Container.java
package ai.preferred.cerebro.index.store; import org.apache.lucene.util.ArrayUtil; import ai.preferred.cerebro.index.utils.IndexUtils; import java.util.Iterator; import java.util.NoSuchElementException; import java.util.Random; import java.util.function.Supplier; /** * Base class to contain, rank and retrieve top K objects * based on their score. This implementation use a modified * version of the Quickselect algorithm. * * @param <T> Type of object to be scored and retrieved. */ public abstract class Container <T> implements Iterable<T>{ private int size = 0; private int maxSize; protected T[] arr; enum PivotScheme{ START, MID, END, RAND } private final PivotScheme pivotScheme; public Container(int maxSize) { this(maxSize, () -> null); } public Container(int maxSize, Supplier<T> sentinelObjectSupplier) { if (maxSize < 1 || maxSize >= ArrayUtil.MAX_ARRAY_LENGTH) { throw new IllegalArgumentException("maxSize must be >= 1 and < " + (ArrayUtil.MAX_ARRAY_LENGTH) + "; got: " + maxSize); } // T is unbounded type, so this unchecked cast works always: @SuppressWarnings("unchecked") final T[] h = (T[]) new Object[maxSize]; this.arr = h; this.maxSize = maxSize; this.pivotScheme = PivotScheme.RAND; // If sentinel objects are supported, populate the queue with them T sentinel = sentinelObjectSupplier.get(); if (sentinel != null) { arr[0] = sentinel; for (int i = 1; i < arr.length; i++) { arr[i] = sentinelObjectSupplier.get(); } size = maxSize; } } public Container(T[] arr){ this.arr = arr; this.maxSize = arr.length; this.pivotScheme = PivotScheme.RAND; } protected abstract boolean lessThan(T a, T b); public abstract void calculateScore(T target); public final void add(T element) { assert size < maxSize; arr[size++] = element; } public final T get(int index){ assert index < size; return arr[index]; } public T insertWithOverflow(T element) { IndexUtils.notifyLazyImplementation("Container / insertWithOverflow"); return null; } public final T top() { IndexUtils.notifyLazyImplementation("Container / top"); return null; } public final T pop() { if (size > 0) { T result = arr[size - 1]; arr[size - 1] = null; // permit GC of objects size--; return result; } else { return null; } } // public final T updateTop() { // //downHeap(1); // //return heap[1]; // IndexUtils.notifyLazyImplementation("Container / updateTop"); // return null; // } // // // public final T updateTop(T newTop) { // //heap[1] = newTop; // //return updateTop(); // IndexUtils.notifyLazyImplementation("Container / updateTop"); // return null; // } /** Returns the number of elements currently stored in the Container. */ public final int size() { return size; } /** Removes all entries from the Container. */ public final void clear() { for (int i = 0; i < size; i++) { arr[i] = null; } arr = null; size = 0; } public final boolean remove(T element) { for (int i = 0; i <= size; i++) { if (arr[i] == element) { for(int j = i; j < size - 1;){ arr[j] = arr[++j]; } arr[size - 1] = null; // permit GC of objects size--; return true; } } return false; } public void pullTopK(int k, boolean ordered, boolean trim){ assert k < size; orderStatistic(0, size, size - k); if(ordered) quicksort(size - k, size); if(trim){ for(int i = 0; i < size; i++){ if(i < k){ arr[i] = arr[size - k + i]; } else arr[i] = null; } size = k; } } protected void quicksort(int start, int end){ if (end - start <= 1) return; if (end - start == 2){ if (lessThan(arr[start + 1], arr[start])) swap(start, start+1); return; } int pivot = partition(start, end); quicksort(start, pivot); quicksort(pivot + 1, end); } protected void orderStatistic(int start, int end, int i){ if (end - start <= 1) return; if (end - start == 2){ if (lessThan(arr[start + 1], arr[start])) swap(start, start+1); return; } int pivot = partition(start, end); if (pivot == i) return; else if (pivot < i){ orderStatistic(pivot + 1, end, i); return; } else{ orderStatistic(start, pivot, i); return; } } protected int pivotScheme(int start, int end){ switch (pivotScheme){ case START: return start; case MID: return (start + end) >>> 1; case END: return end - 1; case RAND: return (new Random()).nextInt(end - start) + start; } //this is only for syntax //the return here is never executed return 0; } protected int partition(int start, int end){ int pivot = pivotScheme(start, end); swap(start, pivot); int pIndex = start; for (int seen = start + 1; seen < end; seen++){ if (lessThan(arr[seen], arr[start])){ pIndex++; swap(pIndex, seen); } } swap(pIndex, start); return pIndex; } protected void swap(int a, int b){ T hold = arr[a]; arr[a] = arr[b]; arr[b] = hold; } protected final Object[] getArray() { return (Object[]) arr; } @Override public Iterator<T> iterator() { return new Iterator<T>() { int i = 0; @Override public boolean hasNext() { return i < size; } @Override public T next() { if (hasNext() == false) { throw new NoSuchElementException(); } return arr[i++]; } }; } }
0
java-sources/ai/preferred/cerebro/1.0/ai/preferred/cerebro/index
java-sources/ai/preferred/cerebro/1.0/ai/preferred/cerebro/index/store/DocArray.java
package ai.preferred.cerebro.index.store; import org.apache.lucene.search.ScoreDoc; /** * A data construct to get out the top k document * according a specific score measurement. */ public class DocArray extends Container<ScoreDoc> { public DocArray(int size, boolean prePopulate) { super(size, () -> { if (prePopulate) { // Always set the doc Id to MAX_VALUE so that it won't be favored by // lessThan. This generally should not happen since if score is not NEG_INF, // TopScoreDocCollector will always add the object to the queue. return new ScoreDoc(Integer.MAX_VALUE, Float.NEGATIVE_INFINITY); } else { return null; } }); } @Override protected boolean lessThan(ScoreDoc a, ScoreDoc b) { if (a.score == b.score) return a.doc > b.doc; else return a.score < b.score; } @Override public void calculateScore(ScoreDoc target) { //future implementation //do not use this function right now //socre is calculated in Lucene framework } }
0
java-sources/ai/preferred/cerebro/1.0/ai/preferred/cerebro/index
java-sources/ai/preferred/cerebro/1.0/ai/preferred/cerebro/index/store/DoubleStoredField.java
package ai.preferred.cerebro.index.store; import org.apache.lucene.document.StoredField; import org.apache.lucene.util.BytesRef; import ai.preferred.cerebro.index.utils.IndexConst; import java.nio.ByteBuffer; /** * Cerebro's class to store a double number into Lucene's index. */ public class DoubleStoredField extends StoredField { private DoubleStoredField(String name, double d){ super(name, new BytesRef(doubleToBytes(d))); } public DoubleStoredField(double d){ super(IndexConst.VecLenFieldName, new BytesRef(doubleToBytes(d))); } public static byte[] doubleToBytes(double d){ byte[] arr = new byte[Double.BYTES]; ByteBuffer.wrap(arr).putDouble(d); return arr; } public static double bytesToDouble(byte[] data){ assert data.length == Double.BYTES; return ByteBuffer.wrap(data, 0, Double.BYTES).getDouble(); } }
0
java-sources/ai/preferred/cerebro/1.0/ai/preferred/cerebro/index
java-sources/ai/preferred/cerebro/1.0/ai/preferred/cerebro/index/store/VectorField.java
package ai.preferred.cerebro.index.store; import org.apache.lucene.document.StoredField; import org.apache.lucene.util.BytesRef; import ai.preferred.cerebro.index.utils.IndexConst; import java.nio.ByteBuffer; /** * Cerebro's class to store a vector into Lucene's index. */ public class VectorField extends StoredField { private VectorField(String name, double[] data){ super(name, new BytesRef(vecToBytes(data))); } public VectorField(double[] data){ super(IndexConst.VecFieldName, new BytesRef(vecToBytes(data))); } /** * @param doublearr * @return byte encoding of the vector. * * Encoding a vector into an array of byte. */ public static byte[] vecToBytes(double[] doublearr){ byte[] arr = new byte[doublearr.length * Double.BYTES]; for(int i = 0; i < doublearr.length; i++){ byte[] bytes = new byte[Double.BYTES]; ByteBuffer.wrap(bytes).putDouble(doublearr[i]); System.arraycopy(bytes, 0, arr, i * Double.BYTES, bytes.length); } return arr; } /** * @param data * @return vector values of a byte array. * * Decode a byte array back into a vector. */ public static double[] getFeatureVector(byte[] data){ assert data.length % Double.BYTES == 0; double[] doubles = new double[data.length / Double.BYTES]; for(int i=0;i<doubles.length;i++){ doubles[i] = ByteBuffer.wrap(data, i*Double.BYTES, Double.BYTES).getDouble(); } return doubles; } }
0
java-sources/ai/preferred/cerebro/1.0/ai/preferred/cerebro/index
java-sources/ai/preferred/cerebro/1.0/ai/preferred/cerebro/index/utils/IndexConst.java
package ai.preferred.cerebro.index.utils; import java.util.Collections; import java.util.Set; /** * Cerebro's reserved keyword list and necessary constants */ public class IndexConst { //Reserved keywords to avoid using as fieldname public final static String IDFieldName = "ID"; public final static String VecFieldName = "Feature_Vector"; public final static String VecLenFieldName = "Vec_Length"; public final static String HashFieldName = "LSH_Hash_Code"; //These are not reserved keywords but you should //understand how Cerebro handle text file by default public static final String CONTENTS = "contents"; public static final long mb = 1 << 20; }
0
java-sources/ai/preferred/cerebro/1.0/ai/preferred/cerebro/index
java-sources/ai/preferred/cerebro/1.0/ai/preferred/cerebro/index/utils/IndexUtils.java
package ai.preferred.cerebro.index.utils; import com.esotericsoftware.kryo.Kryo; import com.esotericsoftware.kryo.io.Input; import com.esotericsoftware.kryo.io.Output; import ai.preferred.cerebro.core.entity.AbstractVector; import java.io.*; import java.nio.ByteBuffer; import java.util.*; /** * Utility class to facilitate all necessary function * in cerebro.index like I/O, vector computations..., etc */ public class IndexUtils { public static void notifyFutureImplementation(){ System.out.println("Warning: To be Implemented"); } public static void notifyLazyImplementation(String msg){ System.out.println("Lazy impl: " + msg); } /** * * @param n the number of vector to generate * @param nFeatures the number of dimension each vector has * @param splitFeature flag to specify whether the vector to * be used as hashing function or not. * If so the range of distribution range * from -1 to 1 * @param cosineSimilarity * @return A set of randomized vectors */ public static double[][] randomizeFeatureVectors(int n, int nFeatures, boolean splitFeature, boolean cosineSimilarity){ Random random = new Random(); double[][] res = new double[n][nFeatures]; for (int i =0; i < n; i++){ for(int j = 0; j < nFeatures; j++){ if(splitFeature) res[i][j] = random.nextDouble() * 2 - 1; if(cosineSimilarity) res[i][j] = random.nextDouble(); else { if(j == 0) res[i][j] = 1.0; else res[i][j] = random.nextDouble(); } } } return res; } /** * @param aVector * @return * * Function to transform a {@link AbstractVector} instance to * a double vector */ public static double[] toDoubles(AbstractVector aVector){ double[] elements = new double[aVector.length()]; for (int id = 0; id < aVector.length(); id ++){ elements[id] = aVector.getElement(id); } return elements; } /** * * @param nFeatures * @return a single vector */ public static double[] randomizeQueryVector(int nFeatures){ Random random = new Random(); double[] re = new double[nFeatures]; for(int i =0; i < nFeatures; i++){ re[i] = random.nextDouble(); } return re; } /** * @param splitVector * @param filename * * Save a set of vectors to hard disk in the specified path */ public static void saveVectors(double [][] splitVector, String filename){ Kryo kryo = new Kryo(); kryo.register(double[][].class); kryo.register(double[].class); try { Output output = new Output(new FileOutputStream(filename)); kryo.writeObject(output, splitVector); output.close(); } catch (FileNotFoundException e) { e.printStackTrace(); } } /** * * @param filename * @return A set of vectors from the specified filename * @throws IOException * * Load a set of vectors from the specified filename */ public static double[][] readVectors(String filename) throws IOException { Kryo kryo = new Kryo(); kryo.register(double[][].class); kryo.register(double[].class); Input input = new Input(new FileInputStream(filename)); double[][] arr= kryo.readObject(input, double[][].class); input.close(); return arr; } /** * @param data * @param filename * * Utility function for testing */ public static void saveQueryAndTopK(HashMap<double[], ArrayList<Integer>> data, String filename){ Kryo kryo = new Kryo(); kryo.register(HashMap.class); kryo.register(double[].class); kryo.register(ArrayList.class); kryo.register(Integer.class); try { Output output = new Output(new FileOutputStream(filename)); kryo.writeObject(output, data); output.close(); } catch (FileNotFoundException e) { e.printStackTrace(); } } /** * @param filename * @return * @throws FileNotFoundException * * Utility function for testing */ public static HashMap readQueryAndTopK(String filename) throws FileNotFoundException { Kryo kryo = new Kryo(); kryo.register(HashMap.class); kryo.register(double[].class); kryo.register(ArrayList.class); kryo.register(Integer.class); Input input = new Input(new FileInputStream(filename)); HashMap arr= kryo.readObject(input, HashMap.class); input.close(); return arr; } /** * Calculate the the inner product between 2 vectors */ static public double dotProduct(double [] a, double [] b){ double re = 0; for (int i=0; i < a.length; i++){ re += a[i] * b[i]; } return re; } /** * @param vec * @return The Euclidean length of vector passed in */ static public double vecLength(double[] vec) { double hold = 0; for (int i = 0; i < vec.length; i++) { hold += vec[i] * vec[i]; } return Math.sqrt(hold); } /** * @param num * @return The byte encoding of an integer */ public static byte[] intToByte(int num){ byte[] bytes = new byte[Integer.BYTES]; ByteBuffer.wrap(bytes).putInt(num); return bytes; } /** * @param bytes * @return The corresponding integer value to a byte array */ public static int byteToInt(byte[] bytes){ return ByteBuffer.wrap(bytes, 0, Integer.BYTES).getInt(); } }
0
java-sources/ai/preferred/cerebro/1.0/ai/preferred/cerebro/index
java-sources/ai/preferred/cerebro/1.0/ai/preferred/cerebro/index/utils/JPAUtils.java
package ai.preferred.cerebro.index.utils; import java.util.List; import ai.preferred.cerebro.core.entity.AbstractVector; import ai.preferred.cerebro.core.jpa.entity.IndexMetadata; import ai.preferred.cerebro.core.jpa.entity.IndexType; import ai.preferred.cerebro.core.jpa.entity.ItemModel; import ai.preferred.cerebro.core.jpa.entity.Model; import ai.preferred.cerebro.core.jpa.entitymanager.IndexMetadataManager; import ai.preferred.cerebro.core.jpa.entitymanager.IndexTypeManager; import ai.preferred.cerebro.core.jpa.entitymanager.ItemModelManager; import ai.preferred.cerebro.core.jpa.entitymanager.ModelManager; import ai.preferred.cerebro.core.jpa.util.LatentVectorUtils; import ai.preferred.cerebro.core.jpa.util.PersistenceUtils; /** * Handle communication with Cerebro's other unreleased * modules */ public class JPAUtils { /* get item vectors from model code */ public static List<ItemModel> retrieveItemListByModelId(int modelId){ ItemModelManager itemModelManager = new ItemModelManager(PersistenceUtils.getEntityManager()); List<ItemModel> itemModelList = itemModelManager.getAllItemModelByModelId(modelId); return itemModelList; } public static AbstractVector retrieveItemByItemIdAndModelId(String itemId, int modelId){ ItemModelManager itemModelManager = new ItemModelManager(PersistenceUtils.getEntityManager()); ItemModel im = itemModelManager.getItemModelByItemIdAndModelId(itemId, modelId); ModelManager modelManager = new ModelManager(PersistenceUtils.getEntityManager()); Model m = modelManager.getModelById(modelId); return LatentVectorUtils.convertToLatentVector(m.useDenseVector(), im.getRepresentation()); } public static Model retrieveModelByModelId(int modelId){ ModelManager modelManager = new ModelManager(PersistenceUtils.getEntityManager()); return modelManager.getModelById(modelId); } public static IndexType retrieveIndexTypeByType(String type){ IndexTypeManager indexTypeManager = new IndexTypeManager(PersistenceUtils.getEntityManager()); for(IndexType iType: indexTypeManager.getAllIndexType()) if(iType.getType().equalsIgnoreCase(type)) return iType; return null; } public static void insertIndexMetadataToDB(IndexMetadata iMeta) { IndexMetadataManager iMetaManager = new IndexMetadataManager(PersistenceUtils.getEntityManager()); iMetaManager.persist(iMeta); } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/ApplyRegression.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression; import ai.preferred.regression.io.ARFFDataReader; import ai.preferred.regression.io.CSVInputData; import ai.preferred.regression.io.CSVUtils; import org.apache.commons.csv.CSVPrinter; import org.kohsuke.args4j.Option; import weka.classifiers.Classifier; import weka.core.Instances; import weka.core.SerializationHelper; import java.io.File; import java.io.FileInputStream; import java.util.ArrayList; public class ApplyRegression extends Command { @Option(name = "-s", aliases = {"--train"}, usage = "the path to the training data", required = true) private File train; @Option(name = "-i", aliases = {"--test"}, usage = "the path to the testing data", required = true) private File test; @Option(name = "-o", aliases = {"--output"}, usage = "the path to the output CSV file", required = true) private File output; @Option(name = "-m", aliases = {"--model"}, usage = "the path to the model file", required = true) private File model; @Option(name = "-h", aliases = {"--header"}, usage = "specifies if the input CSV files have headers") private boolean header = true; @Override protected void exec() throws Exception { try (final FileInputStream stream = new FileInputStream(model)) { final Classifier classifier = (Classifier) SerializationHelper.read(stream); final boolean nominal = WekaUtils.isLogisticClassifier(classifier); final ARFFDataReader reader = new ARFFDataReader(train, nominal, header); final Instances data = reader.read(test); try (final CSVPrinter printer = CSVUtils.printer(output); final CSVInputData csvData = CSVUtils.reader(test, header)) { if (csvData.hasHeader()) { printer.printRecord(csvData.getHeader()); } int index = 0; for (final ArrayList<String> record : csvData) { final double prediction = classifier.classifyInstance(data.get(index)); if (nominal) { record.set(0, data.classAttribute().value((int) prediction)); } else { record.set(0, String.valueOf(prediction)); } printer.printRecord(record); } } } } public static void main(final String[] args) { parseArgsAndRun(ApplyRegression.class, args); } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/Command.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression; import org.kohsuke.args4j.CmdLineException; import org.kohsuke.args4j.CmdLineParser; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public abstract class Command { private static final Logger LOGGER = LoggerFactory.getLogger(Command.class); protected abstract void exec() throws Exception; protected static void parseArgsAndRun(final Class<? extends Command> clazz, final String[] args) { Command command = null; try { command = clazz.newInstance(); } catch (IllegalAccessException | InstantiationException e) { System.err.println("Please check if there is the public default constructor for the class: " + clazz.getCanonicalName()); System.exit(1); } if (args == null) { System.out.println("=========== HELP ==========="); System.out.println(); System.out.println("Processing Element: " + clazz.getSimpleName() + ".class"); System.out.println(); System.out.println("Shell.run(" + clazz.getSimpleName() + ".class, \"\");"); final CmdLineParser parser = new CmdLineParser(command); System.out.println(); parser.printUsage(System.out); System.out.println(); System.out.println("============================"); System.out.println(); System.out.println(); return; } final CmdLineParser parser = new CmdLineParser(command); try { parser.parseArgument(args); } catch (CmdLineException e) { System.err.println("Command: " + clazz.getCanonicalName()); System.err.println(e.getMessage()); System.err.println(); parser.printUsage(System.err); System.exit(1); } try { command.exec(); } catch (Exception e) { LOGGER.error("Unable to execute command (" + clazz.getCanonicalName() + "): ", e); System.exit(1); } } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/EvaluateRegression.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression; import ai.preferred.regression.io.ARFFDataReader; import org.kohsuke.args4j.Option; import weka.classifiers.Classifier; import weka.classifiers.evaluation.Evaluation; import weka.core.Instances; import weka.core.SerializationHelper; import java.io.File; import java.io.FileInputStream; public class EvaluateRegression extends Command { @Option(name = "-s", aliases = {"--train"}, usage = "the path to the training data", required = true) private File train; @Option(name = "-i", aliases = {"--test"}, usage = "the path to the testing data", required = true) private File test; @Option(name = "-m", aliases = {"--model"}, usage = "the path to the model file", required = true) private File model; @Option(name = "-v", aliases = {"--verbose"}, usage = "verbosity level (0 - short, 1 - default, 2 - detailed)") private int verbose = 1; @Option(name = "-h", aliases = {"--header"}, usage = "specifies if the input CSV files have headers") private boolean header = true; @Override protected void exec() throws Exception { try (final FileInputStream stream = new FileInputStream(model)) { final Classifier classifier = (Classifier) SerializationHelper.read(stream); final boolean nominal = WekaUtils.isLogisticClassifier(classifier); final ARFFDataReader reader = new ARFFDataReader(train, nominal, header); final Instances data = reader.read(test); final Evaluation eval = new Evaluation(data); eval.evaluateModel(classifier, data); if (nominal) { if (verbose <= 0) { System.out.println(eval.pctCorrect()); } else if (verbose == 1) { System.out.println("ACCURACY = " + eval.pctCorrect()); } else { System.out.println(); System.out.println("CLASS\tPRECISION\tRECALL\tF-MEASURE"); for (int i = 0; i < data.classAttribute().numValues(); i++) { System.out.printf("%s\t%f\t%f\t%f", data.classAttribute().value(i), eval.precision(i), eval.recall(i), eval.fMeasure(i)); System.out.println(); } } } else { if (verbose <= 0) { System.out.println(eval.rootMeanSquaredError()); } else { System.out.println("RMSE = " + eval.rootMeanSquaredError()); } } } } public static void main(final String[] args) { parseArgsAndRun(EvaluateRegression.class, args); } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/PlotData.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression; import ai.preferred.regression.io.ARFFDataReader; import ai.preferred.regression.plot.XYChart; import org.jfree.data.xy.XYSeries; import org.kohsuke.args4j.Option; import weka.core.Instance; import weka.core.Instances; import javax.swing.*; import java.io.File; public class PlotData extends Command { @Option(name = "-i", aliases = {"--input"}, usage = "the path to the input CSV file", required = true) private File input; @Option(name = "-n", aliases = {"--name"}, usage = "the name of the plot") private String name = "DATA"; @Option(name = "-h", aliases = {"--header"}, usage = "specifies if the input CSV files have headers") private boolean header = true; @Override protected void exec() throws Exception { final ARFFDataReader reader = new ARFFDataReader(input, false, header); final Instances data = reader.read(input); final XYSeries dataSeries = new XYSeries("DATA"); for (final Instance datum : data) { dataSeries.add(datum.value(1), datum.value(0)); } final XYChart chart = new XYChart(name, dataSeries, new XYSeries("REGRESSION")); chart.pack(); chart.setDefaultCloseOperation(WindowConstants.EXIT_ON_CLOSE); chart.setVisible(true); } public static void main(String[] args) { parseArgsAndRun(PlotData.class, args); } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/PlotLinearRegression.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression; import ai.preferred.regression.io.ARFFDataReader; import ai.preferred.regression.plot.XYChart; import org.jfree.data.xy.XYSeries; import org.kohsuke.args4j.Option; import weka.classifiers.Classifier; import weka.classifiers.functions.LinearRegression; import weka.core.Instance; import weka.core.Instances; import weka.core.SerializationHelper; import javax.swing.*; import java.io.File; import java.io.FileInputStream; import java.io.IOException; public class PlotLinearRegression extends Command { @Option(name = "-i", aliases = {"--input"}, usage = "the path to the input CSV file", required = true) private File input; @Option(name = "-m", aliases = {"--model"}, usage = "the path to the model file", required = true) private File model; @Option(name = "-n", aliases = {"--name"}, usage = "the name of the plot") private String name = "Y = alpha * X + beta"; @Option(name = "-h", aliases = {"--header"}, usage = "specifies if the input CSV files have headers") private boolean header = true; @Override protected void exec() throws Exception { try (final FileInputStream stream = new FileInputStream(model)) { final Classifier classifier = (Classifier) SerializationHelper.read(stream); if (!(classifier instanceof LinearRegression)) { throw new IOException("The model is neither LogisticRegression nor LinearRegression!"); } final double[] w = ((LinearRegression) classifier).coefficients(); if (w.length != 3) { throw new IOException("We can plot only linear functions!"); } final ARFFDataReader reader = new ARFFDataReader(input, false, header); final Instances data = reader.read(input); final XYSeries dataSeries = new XYSeries("DATA"); for (final Instance datum : data) { dataSeries.add(datum.value(1), datum.value(0)); } final XYSeries regressionSeries = new XYSeries("REGRESSION"); regressionSeries.add(dataSeries.getMinX(), w[1] * dataSeries.getMinX() + w[2]); regressionSeries.add(dataSeries.getMaxX(), w[1] * dataSeries.getMaxX() + w[2]); final XYChart chart = new XYChart(name, dataSeries, regressionSeries); chart.pack(); chart.setDefaultCloseOperation(WindowConstants.EXIT_ON_CLOSE); chart.setVisible(true); } } public static void main(String[] args) { parseArgsAndRun(PlotLinearRegression.class, args); } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/PrintRegression.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression; import ai.preferred.regression.io.ARFFDataReader; import org.kohsuke.args4j.Option; import weka.classifiers.Classifier; import weka.classifiers.functions.LinearRegression; import weka.classifiers.functions.Logistic; import weka.core.Attribute; import weka.core.Instances; import weka.core.SerializationHelper; import weka.filters.Filter; import weka.filters.unsupervised.attribute.RemoveUseless; import weka.filters.unsupervised.attribute.ReplaceMissingValues; import java.io.File; import java.io.FileInputStream; import java.util.ArrayList; import java.util.BitSet; public class PrintRegression extends Command { @Option(name = "-i", aliases = {"--input"}, usage = "the path to the input CSV file", required = true) private File input; @Option(name = "-m", aliases = {"--model"}, usage = "the path to the model file", required = true) private File model; @Option(name = "-h", aliases = {"--header"}, usage = "specifies if the input CSV files have headers") private boolean header = true; private void printSignature(ArrayList<Attribute> signature) { for (int i = 1; i < signature.size(); i++) { System.out.print(signature.get(i).name() + "\t"); } System.out.println("Bias"); } @Override protected void exec() throws Exception { System.out.println(); try (final FileInputStream stream = new FileInputStream(model)) { final Classifier classifier = (Classifier) SerializationHelper.read(stream); if (classifier instanceof LinearRegression) { final ARFFDataReader reader = new ARFFDataReader(input, false, header); final Instances instances = preprocess(reader.read(input)); final BitSet ignore = new BitSet(instances.numAttributes()); for (int i = 0; i < instances.numAttributes(); i++) { if (i != instances.classIndex()) { if (Math.sqrt(instances.variance(i)) == 0) { ignore.set(i); } } } final double[] w = ((LinearRegression) classifier).coefficients(); System.out.printf("%-20s W", "FEATURE"); System.out.println(); for (int i = 1; i < instances.numAttributes(); i++) { if (ignore.get(i)) { continue; } System.out.printf("%-20s %.6f", instances.attribute(i).name(), w[i]); System.out.println(); } System.out.printf("%-20s %.6f", "Bias", w[instances.numAttributes()]); System.out.println(); } else if (classifier instanceof Logistic) { final ARFFDataReader reader = new ARFFDataReader(input, true, header); final Instances instances = preprocess(reader.read(input)); final double[][] w = ((Logistic) classifier).coefficients(); for (int i = 0; i < instances.classAttribute().numValues(); i++) { System.out.printf("%s %s", "CLASS[" + i + "] =", instances.classAttribute().value(i)); System.out.println(); } System.out.println(); System.out.printf("%-20s W", "FEATURE"); System.out.println(); for (int i = 1; i < instances.numAttributes(); i++) { System.out.printf("%-20s %.6f", instances.attribute(i).name(), w[i][0]); System.out.println(); } System.out.printf("%-20s %.6f", "Bias", w[0][0]); System.out.println(); } else { throw new RuntimeException("We can process only regression models!"); } } } private Instances preprocess(Instances instances) throws Exception { final ReplaceMissingValues replaceMissingValues = new ReplaceMissingValues(); replaceMissingValues.setInputFormat(instances); instances = Filter.useFilter(instances, replaceMissingValues); final RemoveUseless removeUseless = new RemoveUseless(); removeUseless.setInputFormat(instances); instances = Filter.useFilter(instances, removeUseless); return instances; } public static void main(String[] args) { parseArgsAndRun(PrintRegression.class, args); } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/Shell.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression; import ai.preferred.regression.reset.DataFiles; import com.google.common.io.Files; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.*; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.nio.charset.StandardCharsets; import java.util.Objects; public class Shell { private static final Logger LOGGER = LoggerFactory.getLogger(Shell.class); public static void reset() { final File tempDir = new File("temp"); mkdir(tempDir); for (final File file : Objects.requireNonNull(tempDir.listFiles())) { if (!file.getName().startsWith(".") && !file.delete()) { LOGGER.error("Unable to delete: {}", file); } } mkdir(tempDir); File dataDir = new File("data"); mkdir(dataDir); write(new File(dataDir, "icecream.csv"), DataFiles.ICECREAM_CSV); write(new File(dataDir, "icecream_raw.csv"), DataFiles.ICECREAM_RAW_CSV); write(new File(dataDir, "amazon.csv"), DataFiles.AMAZON_CSV); write(new File(dataDir, "camera.csv"), DataFiles.CAMERA_CSV); write(new File(dataDir, "amazon_extended.csv"), DataFiles.AMAZON_EXTENDED); } private static void write(File file, String content) { try (final PrintWriter writer = new PrintWriter(file, "UTF-8")) { writer.write(content); } catch (FileNotFoundException | UnsupportedEncodingException e) { LOGGER.error("Unable to reset file {}: {}", file, e); } } private static void mkdir(File tempDir) { if (!tempDir.exists() && !tempDir.mkdirs()) { LOGGER.error("Unable to mkdir: {}", tempDir); } } public static void help(Class<?> clazz) { try { final Method method = clazz.getMethod("main", String[].class); method.invoke(null, (Object) null); } catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException e) { LOGGER.error("Unable to execute {}: {}", clazz, e); } catch (Exception e) { throw new RuntimeException(e); } } public static void copyFile(String src, String dst) { try { Files.copy(new File(src), new File(dst)); } catch (IOException e) { throw new RuntimeException(e); } } public static void run(Class<?> clazz, String... args) { try { final Method method = clazz.getMethod("main", String[].class); method.invoke(null, (Object) args); } catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException e) { LOGGER.error("Unable to execute {}: {}", clazz, e); } catch (Exception e) { throw new RuntimeException(e); } } public static void run(Class<?> clazz, String args) { run(clazz, args.trim().split("\\s+")); } private static Class<?> pe(String name) { final String className = "ai.preferred.regression.pe." + name; try { return Class.forName(className); } catch (ClassNotFoundException e) { LOGGER.info("Could not find PE: {}", className); return null; } } private static Class<?> command(String name) { final String className = "ai.preferred.regression." + name; try { return Class.forName(className); } catch (ClassNotFoundException e) { LOGGER.info("Could not find command: {}", className); return null; } } public static void exec(String filename) { try (final BufferedReader reader = new BufferedReader(new InputStreamReader(new FileInputStream(filename), StandardCharsets.UTF_8))) { String line; while (null != (line = reader.readLine())) { line = line.trim(); if (line.isEmpty()) { continue; } final String[] command = line.split("\\s+", 2); final String name = command[0]; final String args = command[1]; Class<?> clazz = pe(name); if (clazz == null) { clazz = command(name); } if (clazz == null) { LOGGER.error("Unable to execute command: {}", name); return; } run(clazz, args); } } catch (FileNotFoundException e) { LOGGER.error("Unable to find input file: {}", filename); } catch (IOException e) { LOGGER.error("Execution error: ", e); } } public static void main(String[] args) { for (final String arg : args) { exec(arg); } } private Shell() { throw new AssertionError(); } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/TrainLinearRegression.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression; import ai.preferred.regression.io.ARFFDataReader; import org.kohsuke.args4j.Option; import weka.classifiers.evaluation.Evaluation; import weka.core.Instances; import weka.core.SerializationHelper; import java.io.File; import java.io.FileOutputStream; public class TrainLinearRegression extends Command { @Option(name = "-i", aliases = {"--train"}, usage = "the path to the training data in CSV format", required = true) private File input; @Option(name = "-m", aliases = {"--model"}, usage = "the output path to the model file", required = true) private File model; @Option(name = "-h", aliases = {"--header"}, usage = "specifies if the input CSV files have headers") private boolean header = true; @Option(name = "-r", aliases = {"--ridge"}, usage = "the ridge parameter") private double ridge = 1.0; @Option(name = "-v", aliases = {"--verbose"}, usage = "verbosity level (-1 - disable, 0 - short, 1 - default)") private int verbose = 1; @Override protected void exec() throws Exception { final ARFFDataReader reader = new ARFFDataReader(input, false, header); final Instances data = reader.read(input); final weka.classifiers.functions.LinearRegression classifier = new weka.classifiers.functions.LinearRegression(); classifier.setRidge(ridge); classifier.buildClassifier(data); final Evaluation eval = new Evaluation(data); eval.evaluateModel(classifier, data); if (verbose <= -1) { // output disabled } else if (verbose == 0) { System.out.println(eval.rootMeanSquaredError()); } else { System.out.println("RMSE[TRAINING] = " + eval.rootMeanSquaredError()); } SerializationHelper.write(new FileOutputStream(model), classifier); } public static void main(String[] args) { parseArgsAndRun(TrainLinearRegression.class, args); } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/TrainLogisticRegression.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression; import ai.preferred.regression.io.ARFFDataReader; import org.kohsuke.args4j.Option; import weka.classifiers.evaluation.Evaluation; import weka.core.Instances; import weka.core.SerializationHelper; import java.io.File; import java.io.FileOutputStream; public class TrainLogisticRegression extends Command { @Option(name = "-i", aliases = {"--train"}, usage = "the path to the training data in CSV format", required = true) private File input; @Option(name = "-m", aliases = {"--model"}, usage = "the output path to the model file", required = true) private File model; @Option(name = "-h", aliases = {"--header"}, usage = "specifies if the input CSV files have headers") private boolean header = true; @Option(name = "-r", aliases = {"--ridge"}, usage = "the ridge parameter") private double ridge = 1.0; @Option(name = "-v", aliases = {"--verbose"}, usage = "verbosity level (-1 - disable, 0 - short, 1 - default, 2 - detailed)") private int verbose = 1; @Override protected void exec() throws Exception { final ARFFDataReader reader = new ARFFDataReader(input, true, header); final Instances data = reader.read(input); final weka.classifiers.functions.Logistic classifier = new weka.classifiers.functions.Logistic(); classifier.setRidge(ridge); classifier.buildClassifier(data); final Evaluation eval = new Evaluation(data); eval.evaluateModel(classifier, data); if (verbose <= -1) { // output disabled } else if (verbose == 0) { System.out.println(eval.pctCorrect()); } else if (verbose == 1) { System.out.println("ACCURACY[TRAINING] = " + eval.pctCorrect()); } else { System.out.println(); System.out.println("CLASS\tPRECISION\tRECALL\tF-MEASURE"); for (int i = 0; i < data.classAttribute().numValues(); i++) { System.out.printf("%s\t%f\t%f\t%f", data.classAttribute().value(i), eval.precision(i), eval.recall(i), eval.fMeasure(i)); System.out.println(); } } SerializationHelper.write(new FileOutputStream(model), classifier); } public static void main(String[] args) { parseArgsAndRun(TrainLogisticRegression.class, args); } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/WekaUtils.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression; import weka.classifiers.Classifier; import weka.classifiers.functions.LinearRegression; import weka.classifiers.functions.Logistic; public class WekaUtils { static boolean isLogisticClassifier(Classifier classifier) { boolean nominal; if (classifier instanceof Logistic) { nominal = true; } else if (classifier instanceof LinearRegression) { nominal = false; } else { throw new IllegalStateException("The model is neither LogisticRegression nor LinearRegression!"); } return nominal; } private WekaUtils() { throw new AssertionError(); } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/exercise/E00_IceCream.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression.exercise; import ai.preferred.regression.PlotData; import ai.preferred.regression.Shell; public class E00_IceCream { /** * DATA: data/icecream.csv * <p> * TODO: * Run this class to plot the input data, take a look at it! * You can open the data file in Excel or Google Spreadsheet. * <p> * CHECK: Is it possible to approximate this data with a linear function? */ public static void main(final String[] args) { Shell.reset(); Shell.run(PlotData.class, "-i data/icecream.csv -n IceCream"); } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/exercise/E01_MyFirstRegressionWithIceCream.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression.exercise; import ai.preferred.regression.PlotLinearRegression; import ai.preferred.regression.Shell; import ai.preferred.regression.TrainLinearRegression; public class E01_MyFirstRegressionWithIceCream { /** * DATA: data/icecream.csv * <p> * TODO: * Train a linear regression on 'icecream.csv'. Plot the regression line. * <p> * CHECK: What is the value of RMSE[TRAINING] for this dataset? */ public static void main(final String[] args) { Shell.reset(); Shell.run(TrainLinearRegression.class, "-i data/icecream.csv -m temp/icecream.model"); Shell.run(PlotLinearRegression.class, "-i data/icecream.csv -m temp/icecream.model"); } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/exercise/E02_ReadingRegression.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression.exercise; import ai.preferred.regression.PrintRegression; import ai.preferred.regression.Shell; import ai.preferred.regression.TrainLinearRegression; public class E02_ReadingRegression { /** * DATA: data/icecream.csv * <p> * TODO: * You can print the regression weights, to understand it a bit better! * <p> * CHECK: What is the value of the regression when Temperature is 0? */ public static void main(final String[] args) { Shell.reset(); Shell.run(TrainLinearRegression.class, "-i data/icecream.csv -m temp/icecream.model"); Shell.run(PrintRegression.class, "-i data/icecream.csv -m temp/icecream.model"); } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/exercise/E03_RawIceCream.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression.exercise; import ai.preferred.regression.Shell; import ai.preferred.regression.pe.RemoveColumn; import ai.preferred.regression.pe.SwapColumns; public class E03_RawIceCream { /** * DATA: data/icecream_raw.csv ; data/icecream.csv * <p> * TODO: * Often, data come in a format which is not suitable for analysis or for building a regression. * Convert 'icecream_raw.csv' to make it look like 'icecream.csv'. * <p> * CHECK: Should you use RemoveColumn or SwapColumns as the first step? */ public static void main(final String[] args) { Shell.reset(); Shell.help(RemoveColumn.class); Shell.help(SwapColumns.class); } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/exercise/E04_RegressionForTemperature.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression.exercise; import ai.preferred.regression.Shell; public class E04_RegressionForTemperature { /** * DATA: data/icecream_raw.csv * <p> * TODO: * Train and plot a regression predicting temperature based on consumption. * Plot it! * <p> * CHECK: What is the difference between consumption-regression and temperature-regression? */ public static void main(final String[] args) { Shell.reset(); } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/exercise/E05_TryX2Only.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression.exercise; import ai.preferred.regression.Shell; import ai.preferred.regression.pe.AddX2; public class E05_TryX2Only { /** * DATA: data/icecream.csv * <p> * TODO: * Add column Temperature^2 and train linear regression to predict consumption based only on Temperature^2 feature. * Plot the trained regression! * <p> * CHECK: Is RMSE[TRAINING] different from 'E01_MyFirstRegressionWithIceCream'? */ public static void main(final String[] args) { Shell.reset(); Shell.help(AddX2.class); } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/exercise/E06_TryX1AndX2AndX3.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression.exercise; import ai.preferred.regression.Shell; import ai.preferred.regression.pe.AddX3; public class E06_TryX1AndX2AndX3 { /** * DATA: data/icecream.csv * <p> * TODO: * Add columns Temperature^2 and Temperature^3 and train linear regression using all the parameters! * <p> * CHECK: Check RMSE[TRAINING] again, is it any different? */ public static void main(final String[] args) { Shell.reset(); // TODO: implement AddX3.class, hint: take a look at the AddX2 class Shell.help(AddX3.class); } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/exercise/E07_AmazonText.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression.exercise; import ai.preferred.regression.Shell; import ai.preferred.regression.pe.EncodeTextAsFrequency; public class E07_AmazonText { /** * DATA: data/amazon.csv * <p> * TODO: * Take a loot at 'amazon.csv', one of the columns contains text. * Convert it into word frequencies using EncodeTextAsFrequency.class. * <p> * CHECK: How many columns does the new dataset have after conversion? */ public static void main(final String[] args) { Shell.reset(); Shell.help(EncodeTextAsFrequency.class); } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/exercise/E08_AmazonCheap.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression.exercise; import ai.preferred.regression.Shell; import ai.preferred.regression.pe.ProjectColumns; public class E08_AmazonCheap { /** * DATA: data/amazon.csv * <p> * TODO: * Train a linear regression for rating prediction based on word "cheap" only! Plot it! * <p> * CHECK: What are the regression parameters? Is word "cheap" a good predictor? */ public static void main(final String[] args) { Shell.reset(); Shell.help(ProjectColumns.class); } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/exercise/E09_AmazonExpensive.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression.exercise; import ai.preferred.regression.Shell; public class E09_AmazonExpensive { /** * DATA: data/amazon.csv * <p> * TODO: * Train linear regression for rating prediction based on word "expensive" only. Plot it! * <p> * CHECK: What are the regression parameters? Is word "expensive" a good predictor? */ public static void main(final String[] args) { Shell.reset(); } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/exercise/E10_AmazonYourOwnWord.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression.exercise; import ai.preferred.regression.Shell; public class E10_AmazonYourOwnWord { /** * DATA: data/amazon.csv * <p> * TODO: * Train a linear regression for rating prediction based on your own word. Plot it! * We will discuss it! * <p> * CHECK: What are the regression parameters? Is your word a good predictor? */ public static void main(final String[] args) { Shell.reset(); } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/exercise/E11_CameraCategories.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression.exercise; import ai.preferred.regression.Shell; import ai.preferred.regression.pe.EncodeValueAsOneHot; /** * DATA: data/camera.csv * <p> * TODO: * Take a look at 'camera.csv'. It has a lot of categorical data, which is to be * processed and represented as 0-1 values. * <p> * CHECK: How many columns does the dataset have after processing? */ public class E11_CameraCategories { public static void main(final String[] args) { Shell.reset(); Shell.help(EncodeValueAsOneHot.class); } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/exercise/E12_CameraWithAutoFocus.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression.exercise; import ai.preferred.regression.Shell; import ai.preferred.regression.pe.SelectEquals; public class E12_CameraWithAutoFocus { /** * DATA: data/camera.csv * <p> * TODO: * We are interested in the subset of 'camera.csv', the cameras with auto focus. * Select this subset and train a linear regression to predict price based on camera type. * <p> * CHECK: How many rows does the dataset have after processing? */ public static void main(final String[] args) { Shell.reset(); Shell.help(SelectEquals.class); } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/exercise/E13_ShuffleAndPartitionIceCream.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression.exercise; import ai.preferred.regression.Shell; import ai.preferred.regression.pe.Partition; import ai.preferred.regression.pe.Shuffle; public class E13_ShuffleAndPartitionIceCream { /** * DATA: data/icecream.csv * <p> * TODO: * We are back to 'icecream.csv'. * Shuffle and partition the data in proportion 80/20, 80% is for training data and 20% is for testing data. * Plot the data splits. * <p> * CHECK: How many rows are there in the training and testing datasets? */ public static void main(final String[] args) { Shell.reset(); Shell.help(Shuffle.class); Shell.help(Partition.class); } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/exercise/E14_TrainTest.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression.exercise; import ai.preferred.regression.EvaluateRegression; import ai.preferred.regression.Shell; public class E14_TrainTest { /** * DATA: data/icecream.txt * <p> * TODO: * Shuffle and partition the data in proportion 60/40, 60% is for the training data and 40% is for the testing data. * Train a regression on the training data and evaluate it on the testing data. * <p> * CHECK: Is RMSE (on testing) > RMSE[TRAINING] or RMSE (on testing) < RMSE[TRAINING]? */ public static void main(final String[] args) { Shell.reset(); Shell.help(EvaluateRegression.class); } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/exercise/E15_TestX123.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression.exercise; import ai.preferred.regression.Shell; public class E15_TestX123 { /** * DATA: data/icecream.txt * <p> * TODO: * Let's continue with our previous split. * Add more features to the dataset: Temperature^2 and Temperature^3. * Train a regression model on the training data and evaluate in on the testing data. * <p> * CHECK: Is RMSE (with more features) > RMSE (with only one feature)? */ public static void main(final String[] args) { Shell.reset(); } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/exercise/E16_AmazonTrainTest.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression.exercise; import ai.preferred.regression.Shell; public class E16_AmazonTrainTest { /** * DATA: data/amazon.csv * <p> * TODO: * Let's go to the amazon data: 'amazon.csv'. * Split the data in proportion 80/20. * Build a regression on the training split and evaluate it on the testing. * <p> * CHECK: Is RMSE (on testing) > RMSE[TRAINING] or RMSE (on testing) < RMSE[TRAINING]? */ public static void main(final String[] args) { Shell.reset(); } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/exercise/E17_AmazonLogistic.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression.exercise; import ai.preferred.regression.Shell; import ai.preferred.regression.TrainLogisticRegression; public class E17_AmazonLogistic { /** * DATA: data/amazon.csv * <p> * TODO: * Build and evaluate a logistic regression model. Data split: 80/20. * <p> * CHECK: Is ACCURACY (on testing) > ACCURACY[TRAINING] or ACCURACY (on testing) < ACCURACY[TRAINING]? */ public static void main(final String[] args) { Shell.reset(); Shell.help(TrainLogisticRegression.class); } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/exercise/E18_LogisticRidgeRegression.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression.exercise; import ai.preferred.regression.Shell; import ai.preferred.regression.TrainLogisticRegression; public class E18_LogisticRidgeRegression { /** * DATA: data/amazon.csv * <p> * TODO: * Build and evaluate a logistic regression model with ridge = {0.1, 1.0, 10.0}. Data split: 80/20. * <p> * CHECK: Which ridge parameter gives the best ACCURACY (on testing)? */ public static void main(final String[] args) { Shell.reset(); Shell.help(TrainLogisticRegression.class); } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/exercise/E19_LinearRidgeRegression.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression.exercise; import ai.preferred.regression.Shell; import ai.preferred.regression.TrainLinearRegression; public class E19_LinearRidgeRegression { /** * DATA: data/amazon.csv * <p> * TODO: * Build and evaluate a linear regression model with ridge = {0.1, 1.0, 10.0}. Data split: 80/20. * <p> * CHECK: Which ridge parameter gives the best RMSE (on testing)? */ public static void main(final String[] args) { Shell.reset(); Shell.help(TrainLinearRegression.class); } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/exercise/E20_GrandFinale.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression.exercise; import ai.preferred.regression.Shell; public class E20_GrandFinale { /** * DATA: data/amazon_extended.csv * <p> * TODO: * Build and evaluate a regression model for rating prediction! Data split: 80/20. * <p> * CHECK: What is the best ACCURACY (on testing) you can get? */ public static void main(final String[] args) { Shell.reset(); } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/io/ARFFDataReader.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression.io; import weka.core.*; import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.Set; import java.util.TreeSet; public class ARFFDataReader { private static double parseDouble(ArrayList<String> record, int row, int col) throws IOException { try { return Double.parseDouble(record.get(col)); } catch (NumberFormatException | NullPointerException e) { throw new IOException("A number expected! (row = " + row + "; col = " + col + ")"); } } private final boolean nominal; private final boolean parseHeader; private final ArrayList<Attribute> signature; public ARFFDataReader(File signatureFile, boolean nominal, boolean parseHeader) throws IOException { this.nominal = nominal; this.parseHeader = parseHeader; try (final CSVInputData data = new CSVInputData(signatureFile, parseHeader)) { final ArrayList<Attribute> signature = new ArrayList<>(); if (nominal) { final Set<String> attributeValueSet = new TreeSet<>(); ArrayList<String> firstRecord = null; int row = parseHeader ? 1 : 0; for (final ArrayList<String> record : data) { if (firstRecord == null) { firstRecord = record; } attributeValueSet.add(record.get(0)); for (int col = 1; col < record.size(); col++) { parseDouble(record, row, col); } row++; } if (firstRecord == null) { throw new IOException("There is no records in the CSV file!"); } if (data.hasHeader()) { final ArrayList<String> header = data.getHeader(); signature.add(new Attribute(header.get(0), new ArrayList<>(attributeValueSet))); for (int i = 1; i < header.size(); i++) { signature.add(new Attribute(header.get(i))); } } else { signature.add(new Attribute("Y", new ArrayList<>(attributeValueSet))); for (int i = 1; i < firstRecord.size(); i++) { signature.add(new Attribute("X" + i)); } } } else { int row = parseHeader ? 1 : 0; if (data.hasHeader()) { final ArrayList<String> header = data.getHeader(); signature.add(new Attribute(header.get(0))); for (int i = 1; i < header.size(); i++) { signature.add(new Attribute(header.get(i))); } } for (final ArrayList<String> record : data) { if (signature.isEmpty()) { signature.add(new Attribute("Y")); for (int i = 1; i < record.size(); i++) { signature.add(new Attribute("X" + i)); } } for (int col = 0; col < record.size(); col++) { parseDouble(record, row, col); } } if (signature.isEmpty()) { throw new IOException("There is no records in the CSV file!"); } } this.signature = signature; } } public ArrayList<Attribute> getSignature() { return new ArrayList<>(signature); } public Instances read(File file) throws IOException { final Instances instances = new Instances("DATA", signature, 100); instances.setClassIndex(0); try (final CSVInputData data = new CSVInputData(file, parseHeader)) { int row = parseHeader ? 1 : 0; for (final ArrayList<String> record : data) { final Instance instance = new DenseInstance(instances.numAttributes()); for (int i = 1; i < record.size(); i++) { instance.setValue(i, parseDouble(record, row, i)); } if (nominal) { instance.setValue(0, signature.get(0).indexOfValue(record.get(0))); } else { instance.setValue(0, parseDouble(record, row, 0)); } instances.add(new SparseInstance(instance)); row++; } } return instances; } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/io/CSVInputData.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression.io; import com.google.common.collect.Lists; import org.apache.commons.csv.CSVFormat; import org.apache.commons.csv.CSVParser; import org.apache.commons.csv.CSVRecord; import java.io.Closeable; import java.io.File; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Iterator; public class CSVInputData implements Iterable<ArrayList<String>>, AutoCloseable { private final ArrayList<String> header; private final boolean parseHeader; private final File file; public CSVInputData(File file, boolean parseHeader) throws IOException { this.file = file; this.parseHeader = parseHeader; if (parseHeader) { header = parseHeader(); } else { header = null; } } private ArrayList<String> parseHeader() throws IOException { final CSVParser parser = newParser(); final Iterator<CSVRecord> iterator = parser.iterator(); if (!iterator.hasNext()) { throw new IOException("The header record is not found!"); } CSVRecord headerRecord = iterator.next(); parser.close(); return Lists.newArrayList(headerRecord); } public boolean hasHeader() { return header != null; } public ArrayList<String> getHeader() { if (header == null) { throw new UnsupportedOperationException("This CSV file has no header!"); } return new ArrayList<>(header); } public ArrayList<ArrayList<String>> getRecords() throws IOException { final CSVParser parser = newParser(); final Iterator<CSVRecord> iterator = parser.iterator(); final ArrayList<ArrayList<String>> data = new ArrayList<>(); skipHeaderIfExists(iterator); while (iterator.hasNext()) { data.add(Lists.newArrayList(iterator.next())); } parser.close(); return data; } private void skipHeaderIfExists(Iterator<CSVRecord> iterator) throws IOException { if (parseHeader) { if (!iterator.hasNext()) { throw new IOException("The header record is not found!"); } iterator.next(); } } @Override public Iterator<ArrayList<String>> iterator() { try { return new Iter(); } catch (IOException e) { throw new IllegalStateException(e); } } private CSVParser newParser() throws IOException { return CSVParser.parse(file, StandardCharsets.UTF_8, CSVFormat.EXCEL); } @Override public void close() { // do nothing } private class Iter implements Iterator<ArrayList<String>>, Closeable { private final CSVParser parser; private final Iterator<CSVRecord> innerIter; Iter() throws IOException { parser = CSVInputData.this.newParser(); innerIter = parser.iterator(); skipHeaderIfExists(innerIter); } @Override public boolean hasNext() { final boolean hasNext = innerIter.hasNext(); if (!hasNext) { try { parser.close(); } catch (IOException e) { throw new IllegalStateException(e); } } return hasNext; } @Override public ArrayList<String> next() { return Lists.newArrayList(innerIter.next()); } @Override public void close() throws IOException { parser.close(); } @Override protected void finalize() throws Throwable { close(); } } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/io/CSVUtils.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression.io; import org.apache.commons.csv.CSVFormat; import org.apache.commons.csv.CSVPrinter; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.io.OutputStreamWriter; import java.nio.charset.StandardCharsets; public class CSVUtils { public static CSVInputData reader(File file, boolean header) throws IOException { return new CSVInputData(file, header); } public static CSVPrinter printer(File file) throws IOException { return new CSVPrinter(new OutputStreamWriter(new FileOutputStream(file, false), StandardCharsets.UTF_8), CSVFormat.EXCEL); } @SafeVarargs public static <T> String[] toStringArray(T... values) { final String[] strings = new String[values.length]; for (int i = 0; i < values.length; i++) { strings[i] = String.valueOf(values[i]); } return strings; } private CSVUtils() { throw new AssertionError(); } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/pe/AddX2.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression.pe; import ai.preferred.regression.io.CSVInputData; import org.apache.commons.csv.CSVPrinter; import org.kohsuke.args4j.Option; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.ArrayList; public class AddX2 extends ProcessingElement { private static final Logger LOGGER = LoggerFactory.getLogger(AddX2.class); @Option(name = "-c", aliases = {"--column"}, usage = "the index of the column", required = true) private int column; @Override protected void process(CSVInputData data, CSVPrinter printer) throws IOException { if (data.hasHeader()) { final ArrayList<String> header = data.getHeader(); header.add("(" + header.get(column) + ")^2"); printer.printRecord(header); } for (final ArrayList<String> record : data) { final String value = record.get(column); final double x = Double.parseDouble(value); final double x2 = x * x; record.add(String.valueOf(x2)); printer.printRecord(record); } } public static void main(String[] args) { parseArgsAndRun(AddX2.class, args); } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/pe/AddX3.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression.pe; import ai.preferred.regression.io.CSVInputData; import org.apache.commons.csv.CSVPrinter; import org.kohsuke.args4j.Option; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.ArrayList; public class AddX3 extends ProcessingElement { private static final Logger LOGGER = LoggerFactory.getLogger(AddX3.class); @Option(name = "-c", aliases = {"--column"}, usage = "the index of the column", required = true) private int column; @Override protected void process(CSVInputData data, CSVPrinter printer) throws IOException { if (data.hasHeader()) { ArrayList<String> header = data.getHeader(); // TODO: transform this header here! // FOR EXAMPLE: // header.add("NEW_COLUMN"); printer.printRecord(header); } for (final ArrayList<String> record : data) { // TODO: transform each record here! // FOR EXAMPLE: // record.add("VALUE"); printer.printRecord(record); } } public static void main(String[] args) { parseArgsAndRun(AddX3.class, args); } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/pe/AverageCell.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression.pe; import ai.preferred.regression.io.CSVInputData; import org.apache.commons.csv.CSVPrinter; import org.kohsuke.args4j.Option; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.ArrayList; public class AverageCell extends ProcessingElement { private static final Logger LOGGER = LoggerFactory.getLogger(AverageCell.class); @Option(name = "-c", aliases = {"--column"}) private int column; @Option(name = "-d", aliases = {"--delimiter"}) private String delimiter = ","; @Override protected void process(CSVInputData data, CSVPrinter printer) throws IOException { if (data.hasHeader()) { ArrayList<String> header = data.getHeader(); header.add(header.remove(column)); printer.printRecord(header); } for (ArrayList<String> record : data) { double sum = 0.0; final String[] values = record.get(column).split(delimiter); for (final String v : values) { try { sum += Double.parseDouble(v.trim()); } catch (NumberFormatException e) { LOGGER.error("Unable to parse the number", e); } } record.remove(column); record.add(String.valueOf(sum / values.length)); printer.printRecord(record); } } public static void main(String[] args) { parseArgsAndRun(AverageCell.class, args); } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/pe/Dummy.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression.pe; import ai.preferred.regression.io.CSVInputData; import org.apache.commons.csv.CSVPrinter; import org.kohsuke.args4j.Option; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.ArrayList; public class Dummy extends ProcessingElement { private static final Logger LOGGER = LoggerFactory.getLogger(Dummy.class); // TODO: add your options! @Option(name = "-z", aliases = {"--option-z"}) private boolean option = false; @Override protected void process(CSVInputData data, CSVPrinter printer) throws IOException { if (data.hasHeader()) { ArrayList<String> header = data.getHeader(); // TODO: transform this header here! // FOR EXAMPLE: // header.add("NEW_COLUMN"); printer.printRecord(header); } for (ArrayList<String> record : data) { // TODO: transform each record here! // FOR EXAMPLE: // record.add("VALUE"); printer.printRecord(record); } } public static void main(String[] args) { parseArgsAndRun(Dummy.class, args); } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/pe/EncodeTextAsFrequency.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression.pe; import ai.preferred.regression.io.CSVInputData; import ai.preferred.regression.io.CSVUtils; import ai.preferred.regression.pe.data.Vocabulary; import com.google.common.collect.HashMultiset; import com.google.common.collect.ImmutableMultiset; import com.google.common.collect.Multiset; import org.apache.commons.csv.CSVPrinter; import org.kohsuke.args4j.Option; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.Comparator; import java.util.regex.Pattern; public class EncodeTextAsFrequency extends ProcessingElement { private static final Logger LOGGER = LoggerFactory.getLogger(EncodeTextAsFrequency.class); @Option(name = "-c", aliases = {"--column"}, usage = "the index of the input column", required = true) private int column; @Option(name = "-s", aliases = {"--separator"}, usage = "specifies regular expression for splitting text into words") private String separator = "\\W+"; @Option(name = "-n", aliases = {"--number-of-words"}, usage = "the maximum number of words to keep") private int numberOfWords = 1000; @Option(name = "-p", aliases = {"--prefix"}, usage = "the prefix of the new columns") private String prefix = "WORD:"; private static <T> Comparator<Multiset.Entry<T>> getDecreasingCountComparator() { return (entry1, entry2) -> Integer.compare(entry2.getCount(), entry1.getCount()); } private static String[] toLowerCase(String[] words) { final String[] result = new String[words.length]; for (int i = 0; i < words.length; i++) { result[i] = words[i].toLowerCase(); } return result; } private static String[] trimEmpty(String[] words) { final ArrayList<String> result = new ArrayList<>(); for (final String word : words) { if (!word.trim().isEmpty()) { result.add(word); } } return result.toArray(new String[0]); } private static Multiset<String> toBagOfWords(String text, String separator) { final Pattern tokenizer = Pattern.compile(separator); String[] words; words = tokenizer.split(text); words = trimEmpty(words); words = toLowerCase(words); return ImmutableMultiset.copyOf(toLowerCase(words)); } private Vocabulary buildVocabulary(CSVInputData reader, int numberOfWords) { final Multiset<String> vocabulary = HashMultiset.create(); for (final ArrayList<String> values : reader) { final String text = values.get(column); vocabulary.addAll(toBagOfWords(text, separator)); } final ArrayList<Multiset.Entry<String>> highestCountFirst = new ArrayList<>(vocabulary.entrySet()); highestCountFirst.sort(getDecreasingCountComparator()); final ArrayList<String> wordsToRetain = new ArrayList<>(numberOfWords); for (final Multiset.Entry<String> e : highestCountFirst.subList(0, Math.min(highestCountFirst.size(), numberOfWords))) { wordsToRetain.add(e.getElement()); } return new Vocabulary(wordsToRetain); } @Override protected void process(CSVInputData data, CSVPrinter printer) throws IOException { final Vocabulary vocabulary = buildVocabulary(data, numberOfWords); if (data.hasHeader()) { final ArrayList<String> header = data.getHeader(); header.remove(column); for (final String h : vocabulary.getVocabularyList()) { header.add(prefix + h); } printer.printRecord(header); } for (final ArrayList<String> record : data) { final Multiset<String> bagOfWords = toBagOfWords(record.get(column), separator); final Integer[] vDocument = new Integer[vocabulary.size()]; Arrays.fill(vDocument, 0); for (final Multiset.Entry<String> entry : bagOfWords.entrySet()) { final int index = vocabulary.getIndex(entry.getElement()); if (index == -1) { continue; } vDocument[index] = entry.getCount(); } record.remove(column); Collections.addAll(record, CSVUtils.toStringArray(vDocument)); printer.printRecord(record); } } public static void main(String[] args) { parseArgsAndRun(EncodeTextAsFrequency.class, args); } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/pe/EncodeValueAsOneHot.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression.pe; import ai.preferred.regression.io.CSVInputData; import ai.preferred.regression.io.CSVUtils; import ai.preferred.regression.pe.data.Vocabulary; import org.apache.commons.csv.CSVPrinter; import org.kohsuke.args4j.Option; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.*; public class EncodeValueAsOneHot extends ProcessingElement { private static final Logger LOGGER = LoggerFactory.getLogger(EncodeValueAsOneHot.class); @Option(name = "-c", aliases = {"--column"}, usage = "the index of the input column", required = true) private int column; @Option(name = "-p", aliases = {"--prefix"}, usage = "the prefix of the new columns") private String prefix = "VALUE:"; @Override protected void process(CSVInputData data, CSVPrinter printer) throws IOException { final Vocabulary vocabulary = buildVocabulary(data); if (data.hasHeader()) { final ArrayList<String> header = data.getHeader(); header.remove(column); for (final String h : vocabulary.getVocabularyList()) { header.add(prefix + h); } printer.printRecord(header); } for (final ArrayList<String> record : data) { final Integer[] vOneHot = new Integer[vocabulary.size()]; Arrays.fill(vOneHot, 0); final int index = vocabulary.getIndex(record.get(column)); vOneHot[index] = 1; record.remove(column); Collections.addAll(record, CSVUtils.toStringArray(vOneHot)); printer.printRecord(record); } } private Vocabulary buildVocabulary(CSVInputData reader) { final Set<String> vocabulary = new HashSet<>(); for (final ArrayList<String> record : reader) { vocabulary.add(record.get(column)); } return new Vocabulary(vocabulary); } public static void main(String[] args) { parseArgsAndRun(EncodeValueAsOneHot.class, args); } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/pe/Partition.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression.pe; import ai.preferred.regression.io.CSVInputData; import org.apache.commons.csv.CSVPrinter; import org.kohsuke.args4j.Option; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.ArrayList; public class Partition extends ProcessingElement { private static final Logger LOGGER = LoggerFactory.getLogger(Partition.class); @Option(name = "-p", aliases = {"--proportion"}, usage = "the proportion of data to be selected or excluded (ranges from 0.0 to 1.0)") private double percent = 0.8; @Option(name = "-e", aliases = {"--exclude"}, usage = "takes the other half of the selection if specified") private boolean exclude = false; @Override protected void process(CSVInputData reader, CSVPrinter printer) throws IOException { if (reader.hasHeader()) { printer.printRecord(reader.getHeader()); } final ArrayList<ArrayList<String>> data = reader.getRecords(); final int n = (int) Math.round(percent * data.size()); if (exclude) { printer.printRecords(data.subList(n, data.size())); } else { printer.printRecords(data.subList(0, n)); } } public static void main(String[] args) { parseArgsAndRun(Partition.class, args); } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/pe/ProcessingElement.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression.pe; import ai.preferred.regression.io.CSVInputData; import ai.preferred.regression.io.CSVUtils; import org.apache.commons.csv.CSVPrinter; import org.kohsuke.args4j.CmdLineException; import org.kohsuke.args4j.CmdLineParser; import org.kohsuke.args4j.Option; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.File; import java.io.IOException; public abstract class ProcessingElement { private static final Logger LOGGER = LoggerFactory.getLogger(ProcessingElement.class); @Option(name = "-i", aliases = {"--input"}, usage = "the path to the input CSV file", required = true) private File input; @Option(name = "-o", aliases = {"--output"}, usage = "the path to the output CSV file", required = true) private File output; @Option(name = "-h", aliases = {"--header"}, usage = "specifies if the input CSV files have headers") private boolean header = true; public ProcessingElement() { } protected abstract void process(CSVInputData data, CSVPrinter printer) throws Exception; protected static void parseArgsAndRun(Class<? extends ProcessingElement> clazz, String[] args) { ProcessingElement processingElement = null; try { processingElement = clazz.newInstance(); } catch (IllegalAccessException | InstantiationException e) { System.err.println("Please check if there is the public default constructor for the class: " + clazz.getCanonicalName()); System.exit(1); } if (args == null) { System.out.println("=========== HELP ==========="); System.out.println(); System.out.println("Processing Element: " + clazz.getSimpleName() + ".class"); System.out.println(); System.out.println("Shell.run(" + clazz.getSimpleName() + ".class, \"\");"); final CmdLineParser parser = new CmdLineParser(processingElement); System.out.println(); parser.printUsage(System.out); System.out.println(); System.out.println("============================"); System.out.println(); System.out.println(); return; } final CmdLineParser parser = new CmdLineParser(processingElement); try { parser.parseArgument(args); } catch (CmdLineException e) { System.err.println("ProcessingElement: " + clazz.getCanonicalName()); System.err.println(e.getMessage()); System.err.println(); parser.printUsage(System.err); System.exit(1); } if (processingElement.input.equals(processingElement.output)) { LOGGER.error("The input and output files point to the same location: {}", processingElement.input); System.exit(1); } try (final CSVPrinter printer = CSVUtils.printer(processingElement.output); final CSVInputData reader = CSVUtils.reader(processingElement.input, processingElement.header)) { try { processingElement.process(reader, printer); } catch (Exception e) { LOGGER.error("Unexpected error: ", e); System.exit(1); } } catch (IOException e) { LOGGER.error("Unable to process files: ", e); System.exit(1); } } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/pe/ProjectColumns.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression.pe; import ai.preferred.regression.io.CSVInputData; import org.apache.commons.csv.CSVPrinter; import org.kohsuke.args4j.Option; import org.kohsuke.args4j.spi.StringArrayOptionHandler; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.ArrayList; import java.util.List; public class ProjectColumns extends ProcessingElement { private static final Logger LOGGER = LoggerFactory.getLogger(ProjectColumns.class); @Option(name = "-c", aliases = {"--columns"}, usage = "the column names separated by spaces", handler = StringArrayOptionHandler.class, required = true) private String[] columns = new String[0]; private static List<Integer> indicesOf(ArrayList<String> header, String[] columns) { final List<Integer> indices = new ArrayList<>(columns.length); for (final String name : columns) { int index = header.indexOf(name); if (index > -1) { indices.add(index); } } return indices; } private static <T> ArrayList<T> projectIndices(ArrayList<T> list, List<Integer> indices) { final ArrayList<T> projection = new ArrayList<>(indices.size()); for (int index : indices) { projection.add(list.get(index)); } return projection; } @Override protected void process(CSVInputData data, CSVPrinter printer) throws IOException { if (!data.hasHeader()) { throw new IllegalArgumentException("ProjectColumns requires CSV with header!"); } final ArrayList<String> header = data.getHeader(); final List<Integer> indices = indicesOf(header, columns); printer.printRecord(projectIndices(header, indices)); for (final ArrayList<String> record : data) { printer.printRecord(projectIndices(record, indices)); } } public static void main(String[] args) { parseArgsAndRun(ProjectColumns.class, args); } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/pe/RemoveColumn.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression.pe; import ai.preferred.regression.io.CSVInputData; import org.apache.commons.csv.CSVPrinter; import org.kohsuke.args4j.Option; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.ArrayList; public class RemoveColumn extends ProcessingElement { private static final Logger LOGGER = LoggerFactory.getLogger(RemoveColumn.class); @Option(name = "-c", aliases = {"--column"}, usage = "the index of the column to be dropped", required = true) private int column; @Override protected void process(CSVInputData data, CSVPrinter printer) throws IOException { if (data.hasHeader()) { final ArrayList header = data.getHeader(); header.remove(column); printer.printRecord(header); } for (final ArrayList<String> record : data) { record.remove(column); printer.printRecord(record); } } public static void main(String[] args) { parseArgsAndRun(RemoveColumn.class, args); } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/pe/RemoveOversizedRows.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression.pe; import ai.preferred.regression.io.CSVInputData; import org.apache.commons.csv.CSVPrinter; import org.kohsuke.args4j.Option; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.ArrayList; public class RemoveOversizedRows extends ProcessingElement { private static final Logger LOGGER = LoggerFactory.getLogger(RemoveOversizedRows.class); @Option(name = "-c", aliases = {"--column"}) private int column; @Option(name = "-d", aliases = {"--delimiter"}) private String delimiter = ","; @Option(name = "-l", aliases = {"--length"}) private int maxLength; @Override protected void process(CSVInputData data, CSVPrinter printer) throws IOException { if (data.hasHeader()) { ArrayList<String> header = data.getHeader(); printer.printRecord(header); } for (ArrayList<String> record : data) { if (record.get(column).split(delimiter).length <= maxLength) { printer.printRecord(record); } } } public static void main(String[] args) { parseArgsAndRun(RemoveOversizedRows.class, args); } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/pe/SelectEquals.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression.pe; import ai.preferred.regression.io.CSVInputData; import org.apache.commons.csv.CSVPrinter; import org.kohsuke.args4j.Option; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.ArrayList; public class SelectEquals extends ProcessingElement { private static final Logger LOGGER = LoggerFactory.getLogger(SelectEquals.class); @Option(name = "-c", aliases = {"--column"}, usage = "the index of the input column", required = true) private int column; @Option(name = "-e", aliases = {"--equals"}, usage = "the value to be verified", required = true) private String value; @Override protected void process(CSVInputData data, CSVPrinter printer) throws IOException { if (data.hasHeader()) { printer.printRecord(data.getHeader()); } for (final ArrayList<String> record : data) { if (value.equals(record.get(column))) { printer.printRecord(record); } } } public static void main(String[] args) { parseArgsAndRun(SelectEquals.class, args); } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/pe/Shuffle.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression.pe; import ai.preferred.regression.io.CSVInputData; import org.apache.commons.csv.CSVPrinter; import org.kohsuke.args4j.Option; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.Random; public class Shuffle extends ProcessingElement { private static final Logger LOGGER = LoggerFactory.getLogger(Shuffle.class); @Option(name = "-s", aliases = {"--seed"}, usage = "random seed") private long seed = 1; @Override protected void process(CSVInputData reader, CSVPrinter printer) throws IOException { if (reader.hasHeader()) { printer.printRecord(reader.getHeader()); } final ArrayList<ArrayList<String>> data = reader.getRecords(); Collections.shuffle(data, new Random(seed)); printer.printRecords(data); } public static void main(String[] args) { parseArgsAndRun(Shuffle.class, args); } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/pe/SwapColumns.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression.pe; import ai.preferred.regression.io.CSVInputData; import org.apache.commons.csv.CSVPrinter; import org.kohsuke.args4j.Option; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; public class SwapColumns extends ProcessingElement { private static final Logger LOGGER = LoggerFactory.getLogger(SwapColumns.class); @Option(name = "-x", aliases = {"--column-x"}, usage = "the index of one column to be swapped", required = true) private int column1; @Option(name = "-y", aliases = {"--column-y"}, usage = "the index of the other column to be swapped", required = true) private int column2; @Override protected void process(CSVInputData data, CSVPrinter printer) throws IOException { if (data.hasHeader()) { final ArrayList<String> header = data.getHeader(); Collections.swap(header, column1, column2); printer.printRecord(header); } for (final ArrayList<String> record : data) { Collections.swap(record, column1, column2); printer.printRecord(record); } } public static void main(String[] args) { parseArgsAndRun(SwapColumns.class, args); } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/pe
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/pe/data/Vocabulary.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression.pe.data; import java.util.*; public class Vocabulary { private final ArrayList<String> vocabularyList; private final Map<String, Integer> vocabularyMap; public Vocabulary(Collection<String> vocabulary) { vocabularyList = new ArrayList<>(vocabulary); Collections.sort(vocabularyList); vocabularyMap = new HashMap<>(vocabularyList.size()); for (final String w : vocabularyList) { vocabularyMap.put(w, vocabularyMap.size()); } } public List<String> getVocabularyList() { return Collections.unmodifiableList(vocabularyList); } public String[] getVocabularyArray() { return vocabularyList.toArray(new String[0]); } public int getIndex(String w) { final Integer index = vocabularyMap.get(w); if (index == null) { return 0; } return index; } public String getWord(int index) { if (index >= 0 && index < vocabularyList.size()) { return vocabularyList.get(index); } throw new IllegalArgumentException("No such index in the vocabulary: " + index); } public int size() { return vocabularyList.size(); } @Override public String toString() { return "Vocabulary{" + vocabularyList + '}'; } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/plot/XYChart.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression.plot; import org.jfree.chart.ChartFactory; import org.jfree.chart.ChartPanel; import org.jfree.chart.JFreeChart; import org.jfree.chart.plot.XYPlot; import org.jfree.chart.renderer.xy.XYLineAndShapeRenderer; import org.jfree.data.xy.XYDataset; import org.jfree.data.xy.XYSeries; import org.jfree.data.xy.XYSeriesCollection; import javax.swing.*; import java.awt.*; public class XYChart extends JFrame { private static final long serialVersionUID = 1L; public XYChart(String chartTitle, XYSeries data, XYSeries line) { super("Linear Regression Plotter"); final XYSeriesCollection collection = new XYSeriesCollection(); collection.addSeries(data); collection.addSeries(line); final ChartPanel panel = new ChartPanel(createChart(collection, chartTitle)); panel.setPreferredSize(new Dimension(640, 480)); setContentPane(panel); } private JFreeChart createChart(XYDataset dataset, String title) { final JFreeChart chart = ChartFactory.createXYLineChart(title, "X", "Y", dataset); final XYPlot plot = chart.getXYPlot(); final XYLineAndShapeRenderer renderer = new XYLineAndShapeRenderer(); renderer.setSeriesLinesVisible(0, false); renderer.setSeriesShapesVisible(0, true); renderer.setSeriesLinesVisible(1, true); renderer.setSeriesShapesVisible(1, false); plot.setRenderer(renderer); return chart; } }
0
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression
java-sources/ai/preferred/csvpl/1.0/ai/preferred/regression/reset/DataFiles.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package ai.preferred.regression.reset; public class DataFiles { public static final String ICECREAM_CSV = "Consumption,Temperature\n" + "0.386,5.00\n" + "0.374,13.33\n" + "0.393,17.22\n" + "0.425,20.00\n" + "0.406,20.56\n" + "0.344,18.33\n" + "0.327,16.11\n" + "0.288,8.33\n" + "0.269,0.00\n" + "0.256,-4.44\n" + "0.286,-2.22\n" + "0.298,-3.33\n" + "0.329,0.00\n" + "0.318,4.44\n" + "0.381,12.78\n" + "0.381,17.22\n" + "0.47,22.22\n" + "0.443,22.22\n" + "0.386,19.44\n" + "0.342,15.56\n" + "0.319,6.67\n" + "0.307,4.44\n" + "0.284,0.00\n" + "0.326,-2.78\n" + "0.309,-2.22\n" + "0.359,0.56\n" + "0.376,5.00\n" + "0.416,11.11\n" + "0.437,17.78\n" + "0.548,21.67\n"; public static final String ICECREAM_RAW_CSV = "Id,Temperature,Consumption\n" + "1,5.00,0.386\n" + "2,13.33,0.374\n" + "3,17.22,0.393\n" + "4,20.00,0.425\n" + "5,20.56,0.406\n" + "6,18.33,0.344\n" + "7,16.11,0.327\n" + "8,8.33,0.288\n" + "9,0.00,0.269\n" + "10,-4.44,0.256\n" + "11,-2.22,0.286\n" + "12,-3.33,0.298\n" + "13,0.00,0.329\n" + "14,4.44,0.318\n" + "15,12.78,0.381\n" + "16,17.22,0.381\n" + "17,22.22,0.47\n" + "18,22.22,0.443\n" + "19,19.44,0.386\n" + "20,15.56,0.342\n" + "21,6.67,0.319\n" + "22,4.44,0.307\n" + "23,0.00,0.284\n" + "24,-2.78,0.326\n" + "25,-2.22,0.309\n" + "26,0.56,0.359\n" + "27,5.00,0.376\n" + "28,11.11,0.416\n" + "29,17.78,0.437\n" + "30,21.67,0.548\n"; public static final String AMAZON_CSV = "Id,Rating,Text\n" + "1,5,I only spent less than ten on these so they're good for what I paid for\n" + "2,5,I'm in love with these glasses.\n" + "3,5,Stylish. My kid loved them\n" + "4,5,They came in great condition.\n" + "5,5,These are really wonderful!\n" + "6,5,these are GREAT quality\n" + "7,5,She LOVES them!\n" + "8,5,Love these.\n" + "9,5,The quality is pretty good also.\n" + "10,5,EXCELLENT PRODUCT\n" + "11,5,I love them. Exactly what i wanted.\n" + "12,5,Son love them\n" + "13,5,He says they give him that style.\n" + "14,5,Great value!!!\n" + "15,5,Very complimentary!\n" + "16,5,\"Cute, great quality, good fit.\"\n" + "17,5,I love these glasses!!\n" + "18,5,they fit perfectly.\n" + "19,5,They look expensive and the fit is perfect\n" + "20,5,Sturdy and good looking for a great price\n" + "21,5,Very stylish! Great accessory to compliment an outfit\n" + "22,5,Thanks so much my grandson enjoy them.\n" + "23,5,Daughter loves them.\n" + "24,5,Makes me look smarter in my tinder profile !\n" + "25,1,the side arms keep breaking\n" + "26,1,just look soooo cheap!\n" + "27,1,Not my style.\n" + "28,1,Mine arrived broken!! Not worth sending back.\n" + "29,1,Dollar store quality.\n" + "30,1,Not like picture.\n" + "31,1,We're cheap and broke right away. \n" + "32,1,\"These are so cheap looking, they are unwearable.\"\n" + "33,1,Very Very VERY Round ! Not at all vintage .\n" + "34,1,It's a peace of garbage. Feels so cheap and plastic.\n" + "35,1,feel flimsy like it would break i returned it the next day\n" + "36,1,lens have too much glare\n" + "37,1,they look cheaply made and plastic\n" + "38,1,Very cheap looking\n" + "39,1,make me headache\n" + "40,1,Feel apart after a week of getting them prescribed.\n" + "41,1,Really cheap looking.\n" + "42,1,Glasses are crooked and not made correctly.\n" + "43,1,poorly made... broke after three days\n" + "44,1,Sunglasses were very small.\n" + "45,1,Look fake and cheap\n" + "46,1,lens fell out on first day.\n" + "47,1,Not really like it!\n" + "48,1,Delivered broken.\n" + "49,1,\"Overall, trash.\"\n" + "50,1,\"Were broken when I opened the box, very disappointed\"\n" + "51,1,Horrible lens fell out 2nd day!\n" + "52,1,I don't like them.\n" + "53,1,These hoes broke too I want my money\n" + "54,1,Broke within the 3 days\n" + "55,1,Little small but still good\n" + "56,1,Super small\n" + "57,1,Horrible desing\n" + "58,1,It's broke\n" + "59,1,Crooked and cheaply made.\n" + "60,1,Poor quality\n"; public static final String CAMERA_CSV = "Id,Price (USD),Type,Focus\n" + "1,949,MIRRORLESS,MANUAL\n" + "2,99,DSLR,BOTH\n" + "3,90,DSLR,BOTH\n" + "4,80,DSLR,AUTO\n" + "5,20,COMPACT,MANUAL\n" + "6,50,COMPACT,AUTO\n" + "7,49,COMPACT,AUTO\n" + "8,30,COMPACT,AUTO\n" + "9,800,MIRRORLESS,AUTO\n" + "10,789,MIRRORLESS,MANUAL\n" + "11,35,COMPACT,AUTO\n" + "12,789,MIRRORLESS,BOTH\n"; public static final String AMAZON_EXTENDED = "Id,Rating,Text,Verified Purchase,Helpful\n" + "1,5,I only spent less than ten on these so they're good for what I paid for,YES,11\n" + "2,5,I'm in love with these glasses.,YES,2\n" + "3,5,Stylish. My kid loved them,YES,2\n" + "4,5,They came in great condition.,NO,0\n" + "5,5,These are really wonderful!,YES,0\n" + "6,5,these are GREAT quality,YES,0\n" + "7,5,She LOVES them!,YES,0\n" + "8,5,Love these.,YES,0\n" + "9,5,The quality is pretty good also.,YES,3\n" + "10,5,EXCELLENT PRODUCT,YES,0\n" + "11,5,I love them. Exactly what i wanted.,NO,0\n" + "12,5,Son love them,YES,0\n" + "13,5,He says they give him that style.,YES,0\n" + "14,5,Great value!!!,YES,0\n" + "15,5,Very complimentary!,YES,0\n" + "16,5,\"Cute, great quality, good fit.\",YES,1\n" + "17,5,I love these glasses!!,YES,0\n" + "18,5,they fit perfectly.,YES,0\n" + "19,5,They look expensive and the fit is perfect,NO,0\n" + "20,5,Sturdy and good looking for a great price,YES,0\n" + "21,5,Very stylish! Great accessory to compliment an outfit,YES,0\n" + "22,5,Thanks so much my grandson enjoy them.,YES,0\n" + "23,5,Daughter loves them.,YES,0\n" + "24,5,Makes me look smarter in my tinder profile !,YES,0\n" + "25,1,the side arms keep breaking,YES,0\n" + "26,1,just look soooo cheap!,NO,0\n" + "27,1,Not my style.,NO,0\n" + "28,1,Mine arrived broken!! Not worth sending back.,YES,0\n" + "29,1,Dollar store quality.,YES,5\n" + "30,1,Not like picture.,NO,0\n" + "31,1,We're cheap and broke right away. ,YES,0\n" + "32,1,\"These are so cheap looking, they are unwearable.\",YES,0\n" + "33,1,Very Very VERY Round ! Not at all vintage .,YES,0\n" + "34,1,It's a peace of garbage. Feels so cheap and plastic.,YES,0\n" + "35,1,feel flimsy like it would break i returned it the next day,NO,0\n" + "36,1,lens have too much glare,YES,0\n" + "37,1,they look cheaply made and plastic,YES,0\n" + "38,1,Very cheap looking,NO,0\n" + "39,1,make me headache,YES,0\n" + "40,1,Feel apart after a week of getting them prescribed.,NO,0\n" + "41,1,Really cheap looking.,YES,0\n" + "42,1,Glasses are crooked and not made correctly.,YES,7\n" + "43,1,poorly made... broke after three days,YES,2\n" + "44,1,Sunglasses were very small.,NO,0\n" + "45,1,Look fake and cheap,YES,0\n" + "46,1,lens fell out on first day.,NO,0\n" + "47,1,Not really like it!,YES,2\n" + "48,1,Delivered broken.,YES,0\n" + "49,1,\"Overall, trash.\",YES,0\n" + "50,1,\"Were broken when I opened the box, very disappointed\",NO,0\n" + "51,1,Horrible lens fell out 2nd day!,YES,5\n" + "52,1,I don't like them.,NO,0\n" + "53,1,These hoes broke too I want my money,NO,0\n" + "54,1,Broke within the 3 days,NO,0\n" + "55,1,Little small but still good,NO,4\n" + "56,1,Super small,NO,0\n" + "57,1,Horrible desing,NO,0\n" + "58,1,It's broke,NO,0\n" + "59,1,Crooked and cheaply made.,NO,0\n" + "60,1,Poor quality,YES,0\n"; }
0
java-sources/ai/preferred/venom/4.2.7/ai/preferred
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom/Crawler.java
/* * Copyright 2018 Preferred.AI * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.preferred.venom; import ai.preferred.venom.fetcher.*; import ai.preferred.venom.job.Job; import ai.preferred.venom.job.PriorityJobQueue; import ai.preferred.venom.job.Scheduler; import ai.preferred.venom.request.CrawlerRequest; import ai.preferred.venom.request.Request; import ai.preferred.venom.response.Response; import ai.preferred.venom.response.VResponse; import ai.preferred.venom.validator.Validator; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import javax.annotation.Nullable; import javax.validation.constraints.NotNull; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.concurrent.*; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; /** * This class handles the coordination between classes during the pre and * post fetching of a page such as executing threads, calling to fetcher * and manipulating the priority of a scheduled request. * * @author Maksim Tkachenko * @author Truong Quoc Tuan * @author Ween Jiann Lee */ public final class Crawler implements Interruptible, AutoCloseable { /** * Logger. */ private static final Logger LOGGER = LoggerFactory.getLogger(Crawler.class); /** * A new thread where the crawler would run. */ @NotNull private final Thread crawlerThread; /** * Allow the crawler to be closed when done. */ @NotNull private final AtomicBoolean exitWhenDone; /** * The fetcher used. */ @NotNull private final Fetcher fetcher; /** * The maximum number of tries for a request. */ private final int maxTries; /** * The proportion of tries to retain a specified proxy. */ private final double propRetainProxy; /** * The router to be used. */ @Nullable private final HandlerRouter router; /** * The job queue used. */ @NotNull private final BlockingQueue<Job> jobQueue; /** * The scheduler used. */ @NotNull private final Scheduler scheduler; /** * The maximum number of simultaneous connections. */ @NotNull private final Semaphore connections; /** * The session store used. */ @NotNull private final Session session; /** * The sleep scheduler used. */ @Nullable private final SleepScheduler sleepScheduler; /** * The thread pool to fetch requests and execute callbacks. */ @NotNull private final ForkJoinPool threadPool; /** * The worker manager to use. */ @NotNull private final WorkerManager workerManager; /** * A list of pending futures. */ @NotNull private final AtomicInteger jobsPending; /** * The list of fatal exceptions occurred during response handling. */ private final List<FatalHandlerException> fatalHandlerExceptions; /** * Constructs a new instance of crawler. * * @param builder An instance of builder */ private Crawler(final Builder builder) { crawlerThread = new Thread(this::run, builder.name); exitWhenDone = new AtomicBoolean(false); fetcher = builder.fetcher; maxTries = builder.maxTries; propRetainProxy = builder.propRetainProxy; router = builder.router; jobQueue = builder.jobQueue; scheduler = new Scheduler(jobQueue); connections = new Semaphore(builder.maxConnections); session = builder.session; sleepScheduler = builder.sleepScheduler; threadPool = new ForkJoinPool(builder.parallelism, pool -> { final ForkJoinWorkerThread worker = ForkJoinPool.defaultForkJoinWorkerThreadFactory.newThread(pool); worker.setName(builder.name + " " + worker.getPoolIndex()); return worker; }, null, true ); workerManager = builder.workerManager == null ? new ThreadedWorkerManager(threadPool) : builder.workerManager; jobsPending = new AtomicInteger(); fatalHandlerExceptions = Collections.synchronizedList(new ArrayList<>()); } /** * Creates a new instance of Builder. * * @return an instance of Builder. */ public static Builder builder() { return new Builder(); } /** * Builds a new default instance of Crawler. * * @return an instance of Crawler. */ public static Crawler buildDefault() { return builder().build(); } /** * Sleep if last request time is less than required sleep time. * * @param job An instance of job * @param lastRequestTime The time of last request * @throws InterruptedException If sleep is interrupted */ private void sleep(final Job job, final long lastRequestTime) throws InterruptedException { final long sleepTime; if (job.getRequest().getSleepScheduler() == null) { if (sleepScheduler != null) { sleepTime = sleepScheduler.getSleepTime(); } else { sleepTime = 0; } } else { sleepTime = job.getRequest().getSleepScheduler().getSleepTime(); } final long timeElapsed = System.nanoTime() - lastRequestTime; final long timeElapsedMillis = TimeUnit.NANOSECONDS.toMillis(timeElapsed); if (sleepTime > timeElapsedMillis) { Thread.sleep(sleepTime - timeElapsedMillis); } } /** * Check if request is an instance of crawler request and return it * if true, otherwise wrap it with crawler request and return that. * * @param request An instance of request * @return An instance of crawler request */ private CrawlerRequest normalizeRequest(final Request request) { if (request instanceof CrawlerRequest) { return (CrawlerRequest) request; } return new CrawlerRequest(request); } /** * Normalise request and check if specified proxy should be used. * * @param request An instance of request * @param tryCount Current try count * @return An instance of crawler request */ private CrawlerRequest prepareRequest(final Request request, final int tryCount) { final CrawlerRequest crawlerRequest = normalizeRequest(request); if (request.getProxy() != null && ((double) tryCount) / maxTries > propRetainProxy) { crawlerRequest.removeProxy(); } return crawlerRequest; } /** * Handle a successful response. * * @param job The instance of job being processed. * @param response Response returned. */ private void handle(final Job job, final Response response) { try { if (job.getHandler() != null) { job.getHandler().handle(job.getRequest(), new VResponse(response), getScheduler(), session, workerManager.getWorker()); } else if (router != null) { final Handler routedHandler = router.getHandler(job.getRequest()); if (routedHandler != null) { routedHandler.handle(job.getRequest(), new VResponse(response), getScheduler(), session, workerManager.getWorker()); } } else { LOGGER.error("No handler to handle request {}.", job.getRequest().getUrl()); } } catch (final FatalHandlerException e) { LOGGER.error("Fatal exception occurred in handler, when parsing response ({}), interrupting execution.", job.getRequest().getUrl(), e); fatalHandlerExceptions.add(e); } catch (final Exception e) { LOGGER.error("An exception occurred in handler when parsing response: {}", job.getRequest().getUrl(), e); } finally { jobsPending.decrementAndGet(); } } /** * Handle all exception thrown during the fetching process. * * @param job The instance of job being processed. * @param ex Exception returned. */ private void except(final Job job, final Throwable ex) { if ((ex instanceof ValidationException && ((ValidationException) ex).getStatus() == Validator.Status.STOP) || ex instanceof StopCodeException || ex instanceof CancellationException) { jobsPending.decrementAndGet(); } else { synchronized (jobsPending) { // Synchronisation required to prevent crawler stopping incorrectly. jobsPending.decrementAndGet(); if (job.getTryCount() < maxTries) { job.prepareRetry(); jobQueue.add(job); LOGGER.debug("Job {} - {} re-queued.", Integer.toHexString(job.hashCode()), job.getRequest().getUrl()); } else { LOGGER.error("Max retries reached for request: {}", job.getRequest().getUrl()); } } } } /** * Start polling for jobs, and fetch request. */ private void run() { fetcher.start(); long lastRequestTime = 0; while (!Thread.currentThread().isInterrupted() && !threadPool.isShutdown() && fatalHandlerExceptions.isEmpty()) { try { final Job job = jobQueue.poll(100, TimeUnit.MILLISECONDS); if (job == null) { if (jobsPending.get() > 0) { continue; } // This should only run if pendingJob == 0 && job == null synchronized (jobsPending) { LOGGER.debug("({}) Checking for exit conditions.", crawlerThread.getName()); if (jobQueue.peek() == null && jobsPending.get() <= 0 && exitWhenDone.get()) { break; } } continue; } sleep(job, lastRequestTime); lastRequestTime = System.nanoTime(); connections.acquire(); jobsPending.incrementAndGet(); threadPool.execute(() -> { LOGGER.debug("Preparing job {} - {} (try {}/{}).", Integer.toHexString(job.hashCode()), job.getRequest().getUrl(), job.getTryCount(), maxTries); final CrawlerRequest crawlerRequest = prepareRequest(job.getRequest(), job.getTryCount()); if (Thread.currentThread().isInterrupted()) { connections.release(); jobsPending.decrementAndGet(); LOGGER.debug("The thread pool is interrupted"); return; } final CompletableFuture<Response> completableResponseFuture = new CompletableFuture<>(); completableResponseFuture .whenComplete((response, throwable) -> connections.release()) .thenAcceptAsync(response -> handle(job, response), threadPool) .whenComplete((blank, throwable) -> { if (throwable != null) { final Throwable cause = throwable.getCause(); except(job, cause); } }); fetcher.fetch(crawlerRequest, new CompletableCallback(job, completableResponseFuture)); }); } catch (final InterruptedException e) { LOGGER.debug("({}) producer thread interrupted.", crawlerThread.getName(), e); Thread.currentThread().interrupt(); break; } } if (!fatalHandlerExceptions.isEmpty()) { LOGGER.debug("Handler exception found... Interrupting."); interrupt(); } LOGGER.debug("({}) will stop producing requests.", crawlerThread.getName()); } /** * Get the instance of scheduler used. * * @return the instance of scheduler used. */ public Scheduler getScheduler() { return scheduler; } /** * Starts the crawler by starting a new thread to poll for jobs. * * @return the instance of Crawler used. */ public synchronized Crawler start() { crawlerThread.start(); LOGGER.info("{} thread started.", crawlerThread.getName()); return this; } /** * Starts the crawler by starting a new thread to poll for jobs and close it * after the jobQueue has reached 0. * * @return the instance of Crawler used. * @throws Exception if this resource cannot be closed. */ public synchronized Crawler startAndClose() throws Exception { start(); close(); return this; } /** * Interrupts then close this object. * * @throws Exception if exception is thrown on close. */ public void interruptAndClose() throws Exception { interrupt(); close(); } /** * Interrupts crawler, fetcher and worker threads. */ @Override public void interrupt() { if (!Thread.currentThread().equals(crawlerThread) && crawlerThread.isAlive()) { crawlerThread.interrupt(); } if (!threadPool.isTerminated()) { threadPool.shutdownNow(); } workerManager.interrupt(); if (fetcher instanceof Interruptible) { ((Interruptible) fetcher).interrupt(); } } @Override public void close() throws Exception { if (exitWhenDone.compareAndSet(false, true)) { LOGGER.debug("Initialising \"{}\" shutdown, waiting for threads to join...", crawlerThread.getName()); try { crawlerThread.join(); LOGGER.debug("{} producer thread joined.", crawlerThread.getName()); } catch (InterruptedException e) { LOGGER.warn("The producer thread joining has been interrupted", e); interrupt(); Thread.currentThread().interrupt(); } threadPool.shutdown(); try { threadPool.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); LOGGER.debug("Thread pool has terminated gracefully."); } catch (InterruptedException e) { LOGGER.warn("The thread pool joining has been interrupted", e); interrupt(); Thread.currentThread().interrupt(); } Exception cachedException = null; for (final AutoCloseable closeable : new AutoCloseable[]{workerManager, fetcher}) { try { closeable.close(); } catch (final Exception e) { if (cachedException != null) { cachedException.addSuppressed(e); } else { cachedException = e; } } if (Thread.currentThread().isInterrupted()) { interrupt(); } } if (!fatalHandlerExceptions.isEmpty()) { final FatalHandlerException mainHandlerException; synchronized (fatalHandlerExceptions) { final Iterator<FatalHandlerException> iterator = fatalHandlerExceptions.iterator(); mainHandlerException = iterator.next(); while (iterator.hasNext()) { mainHandlerException.addSuppressed(iterator.next()); } if (cachedException != null) { mainHandlerException.addSuppressed(cachedException); } } throw mainHandlerException; } if (Thread.currentThread().isInterrupted()) { Thread.currentThread().interrupt(); } if (cachedException != null) { throw cachedException; } } } /** * A callback that utilises CompletableFuture. */ private static final class CompletableCallback implements Callback { /** * The job this callback is for. */ private final Job job; /** * The CompletableFuture to call upon response. */ private final CompletableFuture<Response> completableResponseFuture; /** * Constructs an instance of CompletableCallback. * * @param job The job this callback is for. * @param completableResponseFuture The CompletableFuture to call upon response. */ private CompletableCallback(final Job job, final CompletableFuture<Response> completableResponseFuture) { this.job = job; this.completableResponseFuture = completableResponseFuture; } @Override public void completed(final @NotNull Request request, final @NotNull Response response) { LOGGER.debug("Completed received for job {} - {}.", Integer.toHexString(job.hashCode()), job.getRequest().getUrl()); completableResponseFuture.complete(response); } @Override public void failed(final @NotNull Request request, final @NotNull Exception ex) { LOGGER.debug("Failed received for job {} - {}.", Integer.toHexString(job.hashCode()), job.getRequest().getUrl()); completableResponseFuture.completeExceptionally(ex); } @Override public void cancelled(final @NotNull Request request) { LOGGER.debug("Cancelled received for job {} - {}.", Integer.toHexString(job.hashCode()), job.getRequest().getUrl()); completableResponseFuture.cancel(true); } } /** * A builder for crawler class. */ public static final class Builder { /** * The fetcher used. */ private Fetcher fetcher; /** * The maximum number of simultaneous connections. */ private int maxConnections; /** * The maximum number of tries for a request. */ private int maxTries; /** * The name of this crawler. */ private String name; /** * The parallelism level for multithreading. */ private int parallelism; /** * The worker manager to use. */ private WorkerManager workerManager; /** * The proportion of tries to retain a specified proxy. */ private double propRetainProxy; /** * The router to be used. */ private HandlerRouter router; /** * The job queue used. */ private BlockingQueue<Job> jobQueue; /** * The sleep scheduler used. */ private SleepScheduler sleepScheduler; /** * The session store used. */ private Session session; /** * Constructs an instance of builder with default values. */ private Builder() { fetcher = AsyncFetcher.buildDefault(); maxConnections = 32; maxTries = 50; name = "Crawler"; parallelism = Runtime.getRuntime().availableProcessors(); workerManager = null; propRetainProxy = 0.05; router = null; jobQueue = new PriorityJobQueue(); sleepScheduler = new SleepScheduler(250, 2000); session = Session.EMPTY_SESSION; } /** * Sets the name for crawler thread. * * @param name name for crawler thread * @return this */ public Builder setName(final @NotNull String name) { if (name == null) { throw new IllegalStateException("Attribute 'name' cannot be null."); } this.name = name; return this; } /** * Sets the Fetcher to be used, if not set, default will be chosen. * * @param fetcher fetcher to be used. * @return this */ public Builder setFetcher(final @NotNull Fetcher fetcher) { if (fetcher == null) { throw new IllegalStateException("Attribute 'fetcher' cannot be null."); } this.fetcher = fetcher; return this; } /** * Sets the parallelism level. Defaults to system thread count. * * @param parallelism the parallelism level. * @return this */ public Builder setParallelism(final int parallelism) { if (parallelism <= 0) { throw new IllegalStateException("Attribute 'parallelism' must be more or equal to 1."); } this.parallelism = parallelism; return this; } /** * Sets the WorkerManager to be used, if not set, default will be chosen. * * @param workerManager result workerManager to be used. * @return this */ public Builder setWorkerManager(final @NotNull WorkerManager workerManager) { if (workerManager == null) { throw new IllegalStateException("Attribute 'workerManager' cannot be null."); } this.workerManager = workerManager; return this; } /** * Sets the JobQueue to be used, if not set, default will be chosen. * This is deprecated, use setJobQueue instead. * * @param jobQueue scheduler to be used. * @return this */ @Deprecated public Builder setScheduler(final @NotNull BlockingQueue<Job> jobQueue) { if (jobQueue == null) { throw new IllegalStateException("Attribute 'jobQueue' cannot be null."); } this.jobQueue = jobQueue; return this; } /** * Sets the JobQueue to be used, if not set, default will be chosen. * * @param jobQueue scheduler to be used. * @return this */ public Builder setJobQueue(final @NotNull BlockingQueue<Job> jobQueue) { if (jobQueue == null) { throw new IllegalStateException("Attribute 'jobQueue' cannot be null."); } this.jobQueue = jobQueue; return this; } /** * Sets HandlerRouter to be used. Defaults to none. * * @param router handler router to be used. * @return this */ public Builder setHandlerRouter(final HandlerRouter router) { this.router = router; return this; } /** * The number of concurrent connections allowed out of the client. * * @param maxConnections maximum number of concurrent connections. * @return this */ public Builder setMaxConnections(final int maxConnections) { if (maxConnections <= 0) { throw new IllegalStateException("Attribute 'maxConnections' must be more or equal to 1."); } this.maxConnections = maxConnections; return this; } /** * Sets number of times to retry for a request. This number excludes the first try. * Defaults to 50. * * @param maxTries max retry times. * @return this */ public Builder setMaxTries(final int maxTries) { if (maxTries <= 0) { throw new IllegalStateException("Attribute 'maxTries' must be more or equal to 1."); } this.maxTries = maxTries; return this; } /** * Sets the proportion of max tries where a specified proxy, if specified will be used. * Number should be between 0 and 1 inclusive, Defaults to 0.05. * <p> * This only comes into effect when a specific proxy is set for the request. * This proxy set will be overridden beyond this threshold. * </p> * * @param propRetainProxy threshold percentage. * @return this */ public Builder setPropRetainProxy(final double propRetainProxy) { if (propRetainProxy > 1 || propRetainProxy < 0) { throw new IllegalStateException("Attribute 'propRetainProxy' not within range, must be (0,1]."); } this.propRetainProxy = propRetainProxy; return this; } /** * Sets the SleepScheduler to be used, if not set, default will be chosen. * * @param sleepScheduler sleepAndGetTime scheduler to be used. * @return this */ public Builder setSleepScheduler(final SleepScheduler sleepScheduler) { this.sleepScheduler = sleepScheduler; return this; } /** * Sets the Session to be used, if not set, defaults to {@code Session.EMPTY_SESSION}. * * @param session Sessions where variables are defined * @return this */ public Builder setSession(final Session session) { if (session == null) { this.session = Session.EMPTY_SESSION; } this.session = session; return this; } /** * Builds the crawler with the options specified. * * @return an instance of Crawler */ public Crawler build() { return new Crawler(this); } } }
0
java-sources/ai/preferred/venom/4.2.7/ai/preferred
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom/FatalHandlerException.java
/* * Copyright 2018 Preferred.AI * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.preferred.venom; /** * This class defines fatal runtime exception for {@link Handler}. * If {@link Handler#handle(ai.preferred.venom.request.Request, * ai.preferred.venom.response.VResponse, ai.preferred.venom.job.Scheduler, * Session, Worker)} encounters unexpected situation, it can signal * {@link Crawler} to stop execution by throwing this exception. * * @author Maksim Tkachenko */ public class FatalHandlerException extends RuntimeException { /** * Constructs a new fatal handler exception with {@code null} as its * detail message. The cause is not initialized, and may subsequently be * initialized by a call to {@link #initCause}. */ public FatalHandlerException() { super(); } /** * Constructs a new fatal handler exception with the specified detail message. * The cause is not initialized, and may subsequently be initialized by a * call to {@link #initCause}. * * @param message the detail message. The detail message is saved for * later retrieval by the {@link #getMessage()} method. */ public FatalHandlerException(final String message) { super(message); } /** * Constructs a new fatal handler exception with the specified detail message and * cause. <p>Note that the detail message associated with * {@code cause} is <i>not</i> automatically incorporated in * this runtime exception's detail message. * * @param message the detail message (which is saved for later retrieval * by the {@link #getMessage()} method). * @param cause the cause (which is saved for later retrieval by the * {@link #getCause()} method). (A {@code null} value is * permitted, and indicates that the cause is nonexistent or * unknown.) * @since 1.4 */ public FatalHandlerException(final String message, final Throwable cause) { super(message, cause); } /** * Constructs a new fatal handler exception with the specified cause and a * detail message of {@code (cause==null ? null : cause.toString())} * (which typically contains the class and detail message of * {@code cause}). This constructor is useful for runtime exceptions * that are little more than wrappers for other throwables. * * @param cause the cause (which is saved for later retrieval by the * {@link #getCause()} method). (A {@code null} value is * permitted, and indicates that the cause is nonexistent or * unknown.) * @since 1.4 */ public FatalHandlerException(final Throwable cause) { super(cause); } }
0
java-sources/ai/preferred/venom/4.2.7/ai/preferred
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom/Handler.java
/* * Copyright 2018 Preferred.AI * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.preferred.venom; import ai.preferred.venom.job.Scheduler; import ai.preferred.venom.request.Request; import ai.preferred.venom.response.VResponse; /** * This interface represents the method call when the response is makeValidResponse * the parsing logic will be up to individual implementations. * * @author Maksim Tkachenko * @author Ween Jiann Lee */ public interface Handler { /** * This function is called when the request is fetched successfully. * <p> * This function will hold the logic after the page/file has been fetched. * </p> * * @param request request fetched. * @param response venom response received. * @param scheduler scheduler used for this request. * @param session session variables defined when the crawler is initiated. * @param worker provides the ability to run code in a separate thread. */ void handle(Request request, VResponse response, Scheduler scheduler, Session session, Worker worker); }
0
java-sources/ai/preferred/venom/4.2.7/ai/preferred
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom/HandlerRouter.java
/* * Copyright 2018 Preferred.AI * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.preferred.venom; import ai.preferred.venom.request.Request; import javax.annotation.Nullable; import javax.validation.constraints.NotNull; /** * This interface allows the user to map request to handler. * * @author Maksim Tkachenko */ public interface HandlerRouter { /** * Returns the handler to be used for a specified request. * <p> * This will only be used if a handler is not specified in when added to scheduler. * </p> * * @param request request made * @return the instance of handler to be used */ @Nullable Handler getHandler(@NotNull Request request); }
0
java-sources/ai/preferred/venom/4.2.7/ai/preferred
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom/Interruptible.java
/* * Copyright 2018 Preferred.AI * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.preferred.venom; /** * @author Ween Jiann Lee */ public interface Interruptible { /** * Interrupt the underlying mechanisms of the class. * <p> * Please note that this {@code interrupt} method should be * idempotent. In other words, calling this {@code interrupt} * method more than once should not have any side effect. * </p> */ void interrupt(); }
0
java-sources/ai/preferred/venom/4.2.7/ai/preferred
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom/ProxyProvider.java
/* * Copyright 2018 Preferred.AI * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.preferred.venom; import ai.preferred.venom.request.Request; import org.apache.http.HttpHost; import javax.validation.constraints.NotNull; /** * This interface allows the user to define proxies to be used for requests. * * @author Truong Quoc Tuan * @author Maksim Tkachenko * @author Ween Jiann Lee */ public interface ProxyProvider { /** * Returns the get proxy from the list. * * @param request the request to be made * @return the proxy to use */ HttpHost get(@NotNull Request request); }
0
java-sources/ai/preferred/venom/4.2.7/ai/preferred
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom/Session.java
/* * Copyright 2018 Preferred.AI * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.preferred.venom; import com.google.common.collect.ImmutableMap; import javax.annotation.Nullable; import javax.validation.constraints.NotNull; import java.util.Collections; import java.util.HashMap; import java.util.Map; /** * @author Maksim Tkachenko */ public final class Session { /** * An instance of an empty session. */ public static final Session EMPTY_SESSION = new Session(); /** * A map of all session key and value. */ private final Map<Key<?>, ?> map; /** * The generic session constructor. * * @param map the map to replace */ private Session(final Map<Key<?>, ?> map) { this.map = map; } /** * Constructs an empty session. */ private Session() { this(Collections.emptyMap()); } /** * Constructs a singleton session. * * @param key an unique identifier of the session variable * @param value the value of the session variable * @param <T> the type of the value of the session variable */ private <T> Session(final @NotNull Key<T> key, final @Nullable T value) { this(Collections.singletonMap(key, value)); } /** * Constructs a session with builder variables. * * @param builder An instance of builder */ private Session(final @NotNull Builder builder) { this(ImmutableMap.copyOf(builder.map)); } /** * Constructs a singleton session. * * @param key an unique identifier of the session variable * @param value the value of the session variable * @param <T> the type of the value of the session variable * @return An instance of session with single key-value pair */ public static <T> Session singleton(final @NotNull Key<T> key, final @Nullable T value) { return new Session(key, value); } /** * Create a new instance of builder. * * @return A new instance of builder */ public static Builder builder() { return new Builder(); } /** * Returns the session variable from the store. * * @param key the name of the session variable to retrieve * @param <T> the type of the value of the session variable being retrieved * @return the value of the session variable stored */ @SuppressWarnings("unchecked") public <T> T get(final @NotNull Key<T> key) { return (T) map.get(key); } /** * A class representing the key for a session. * * @param <T> specifies the type of the stored value */ public static final class Key<T> { } /** * Builder for Session. */ public static class Builder { /** * A map of all session key and value. */ private final Map<Key<?>, Object> map = new HashMap<>(); /** * Adds a session variable into store. * * @param key an unique name of the session variable * @param value the value of the session variable * @param <T> the type of the value of the session variable * @return an instance of Builder */ public final <T> Builder put(final @NotNull Key<T> key, final @Nullable T value) { map.put(key, value); return this; } /** * Create a new instance of session. * * @return A new instance of session. */ public final Session build() { return new Session(this); } } }
0
java-sources/ai/preferred/venom/4.2.7/ai/preferred
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom/SleepScheduler.java
/* * Copyright 2018 Preferred.AI * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed max in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.preferred.venom; import javax.validation.constraints.NotNull; import java.util.Random; /** * @author Maksim Tkachenko * @author Ween Jiann Lee */ public class SleepScheduler { /** * The seed to generate random number. */ private final Random random; /** * The minimum sleep time. */ private final long min; /** * The maximum sleep time. */ private final long max; /** * Constructs a sleep scheduler with fix sleep time. * * @param sleepTime Sleep time */ public SleepScheduler(final long sleepTime) { this(sleepTime, sleepTime, null); } /** * Constructs a sleep scheduler with range of sleep time. * * @param min Minimum sleep time * @param max Maximum sleep time */ public SleepScheduler(final long min, final long max) { this(min, max, new Random(System.currentTimeMillis() * 13)); } /** * Constructs a sleep scheduler with fix sleep time with a random seed. * * @param min Minimum sleep time * @param max Maximum sleep time * @param random Random seed */ private SleepScheduler(final long min, final long max, final @NotNull Random random) { if (min < 0) { throw new IllegalArgumentException("Sleep time cannot be less than 0."); } this.min = min; if (min > max) { throw new IllegalArgumentException("Sleep time in \"min\" cannot be greater less than \"max\"."); } this.max = max; this.random = random; } /** * Get the amount of time to wait specified in this class. * * @return interval required */ public final long getSleepTime() { if (min == max) { return min; } else { return random.nextInt((int) (max - min)) + min; } } }
0
java-sources/ai/preferred/venom/4.2.7/ai/preferred
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom/ThreadedWorkerManager.java
/* * Copyright 2018 Preferred.AI * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.preferred.venom; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import javax.annotation.Nullable; import javax.validation.constraints.NotNull; import java.util.concurrent.*; /** * @author Maksim Tkachenko */ public class ThreadedWorkerManager implements WorkerManager { /** * Logger. */ private static final Logger LOGGER = LoggerFactory.getLogger(ThreadedWorkerManager.class); /** * The executor used to submit tasks. */ @Nullable private final ExecutorService executor; /** * The worker to expose executor methods. */ private final Worker worker; /** * Constructs a threaded worker manager with a specified executor. */ public ThreadedWorkerManager() { this(null); } /** * Constructs a threaded worker manager with a specified executor. * * @param executor An executor service */ public ThreadedWorkerManager(@Nullable final ExecutorService executor) { this.executor = executor; if (executor instanceof ForkJoinPool || executor == null) { this.worker = new ForkJoinWorker(); } else { this.worker = new DefaultWorker(executor); } } @Override public final Worker getWorker() { return worker; } @Override public final void interrupt() { if (executor == null || executor.isTerminated()) { return; } LOGGER.debug("Forcefully shutting down the worker manager."); executor.shutdownNow(); } @Override public final void close() { if (executor == null || executor.isTerminated()) { return; } LOGGER.debug("Shutting down the worker manager."); executor.shutdown(); try { if (executor.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS)) { LOGGER.debug("The worker manager has been terminated."); } else { interrupt(); } } catch (final InterruptedException e) { interrupt(); Thread.currentThread().interrupt(); } } /** * This abstract class exposes the methods to allow submitting tasks for * multithreading and implements inline blocking method. */ public abstract static class AbstractManagedBlockingWorker implements Worker { @Override public final void executeBlockingIO(final @NotNull Runnable task) { if (task == null) { throw new NullPointerException(); } final ManagedBlockerTask managedBlockerTask = new ManagedBlockerTask(task); try { ForkJoinPool.managedBlock(managedBlockerTask); } catch (final InterruptedException e) { Thread.currentThread().interrupt(); throw new AssertionError("Exception of unknown cause. Please verify codebase.", e); } } } /** * This class exposes the methods to allow submitting tasks for multithreading. */ static final class DefaultWorker extends AbstractManagedBlockingWorker { /** * The executor used to submit tasks. */ private final ExecutorService executor; /** * Constructs inner worker with a specified executor service. * * @param executor An instance of executor service */ DefaultWorker(final ExecutorService executor) { this.executor = executor; } @Override public @NotNull <T> Future<T> submit(final @NotNull Callable<T> task) { return executor.submit(task); } @Override public @NotNull <T> Future<T> submit(final @NotNull Runnable task, final T result) { return executor.submit(task, result); } @Override public @NotNull Future<?> submit(final @NotNull Runnable task) { return executor.submit(task); } } /** * This class exposes the methods to allow submitting tasks for multithreading * in {@link ForkJoinPool} or {@link ForkJoinPool#commonPool()}. */ static final class ForkJoinWorker extends AbstractManagedBlockingWorker { @Override public @NotNull <T> Future<T> submit(final @NotNull Callable<T> task) { return ForkJoinTask.adapt(task).fork(); } @Override public @NotNull <T> Future<T> submit(final @NotNull Runnable task, final T result) { return ForkJoinTask.adapt(task, result).fork(); } @Override public @NotNull Future<?> submit(final @NotNull Runnable task) { return ForkJoinTask.adapt(task).fork(); } } /** * This class allows extending managed parallelism for tasks running * in {@link ForkJoinPool}s. */ static final class ManagedBlockerTask implements ForkJoinPool.ManagedBlocker { /** * The task to be run. */ private final Runnable task; /** * {@code true} if task has successfully completed. */ private boolean done = false; /** * Constructs a managed blocking task. * * @param task the blocking task */ private ManagedBlockerTask(final Runnable task) { this.task = task; } @Override public boolean block() { task.run(); done = true; return true; } @Override public boolean isReleasable() { return done; } } }
0
java-sources/ai/preferred/venom/4.2.7/ai/preferred
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom/UrlRouter.java
/* * Copyright 2018 Preferred.AI * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.preferred.venom; import ai.preferred.venom.request.Request; import ai.preferred.venom.validator.Validator; import java.util.LinkedHashMap; import java.util.Map; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.regex.Pattern; /** * This class provides an implementation to select a handler based on the url * from which they were fetched. * * @author Maksim Tkachenko * @author Ween Jiann Lee */ public class UrlRouter implements HandlerRouter, ValidatorRouter { /** * The default handler used if pattern does not match any rules. */ private final Handler defaultHandler; /** * A list of handler rules. */ private final Map<Pattern, Handler> handlerRules = new LinkedHashMap<>(); /** * A list of validator rules. */ private final Map<Pattern, Validator> validatorRules = new LinkedHashMap<>(); /** * A read write lock for handler rules. */ private final ReentrantReadWriteLock handlerRulesLock = new ReentrantReadWriteLock(); /** * A read write lock for validator rules. */ private final ReentrantReadWriteLock validatorRulesLock = new ReentrantReadWriteLock(); /** * Constructs a url router without default handler. */ public UrlRouter() { this(null); } /** * Constructs a url router with default handler. * * @param defaultHandler default handler */ public UrlRouter(final Handler defaultHandler) { this.defaultHandler = defaultHandler; } /** * Adds a url pattern, and the handler to be used. * <p> * Please note that the pattern must be an exact match of the url to work. * </p> * * @param urlPattern regex pattern of the url. * @param handler handler to which the fetched page should use. * @return this. */ public final UrlRouter register(final Pattern urlPattern, final Handler handler) { handlerRulesLock.writeLock().lock(); try { handlerRules.put(urlPattern, handler); } finally { handlerRulesLock.writeLock().unlock(); } return this; } /** * Adds a url pattern, and the handler to be used. * <p> * Please note that the pattern must be an exact match of the url to work. * </p> * * @param urlPattern regex pattern of the url. * @param validator validator to which the fetched page should use. * @return this. */ public final UrlRouter register(final Pattern urlPattern, final Validator validator) { validatorRulesLock.writeLock().lock(); try { validatorRules.put(urlPattern, validator); } finally { validatorRulesLock.writeLock().unlock(); } return this; } /** * Adds a url pattern, and the handler to be used. * <p> * Please note that the pattern must be an exact match of the url to work. * </p> * * @param urlPattern regex pattern of the url. * @param handler handler to which the fetched page should use. * @param validator validator to which the fetched page should use. * @return this. */ public final UrlRouter register(final Pattern urlPattern, final Handler handler, final Validator validator) { register(urlPattern, handler); register(urlPattern, validator); return this; } @Override public final Handler getHandler(final Request request) { handlerRulesLock.readLock().lock(); try { for (final Map.Entry<Pattern, Handler> rule : handlerRules.entrySet()) { if (rule.getKey().matcher(request.getUrl()).matches()) { return rule.getValue(); } } } finally { handlerRulesLock.readLock().unlock(); } if (defaultHandler != null) { return defaultHandler; } throw new RuntimeException("Default handler is not set"); } @Override public final Validator getValidator(final Request request) { validatorRulesLock.readLock().lock(); try { for (final Map.Entry<Pattern, Validator> rule : validatorRules.entrySet()) { if (rule.getKey().matcher(request.getUrl()).matches()) { return rule.getValue(); } } } finally { validatorRulesLock.readLock().unlock(); } return Validator.ALWAYS_VALID; } }
0
java-sources/ai/preferred/venom/4.2.7/ai/preferred
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom/ValidatorRouter.java
/* * Copyright 2018 Preferred.AI * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.preferred.venom; import ai.preferred.venom.request.Request; import ai.preferred.venom.validator.Validator; import javax.validation.constraints.NotNull; /** * This interface allows the user to map request to handler. * * @author Maksim Tkachenko * @author Ween Jiann Lee */ public interface ValidatorRouter { /** * Returns the handler to be used for a specified request. * <p> * This will only be used if a handler is not specified in when added to scheduler * </p> * * @param request request made * @return the instance of handler to be used */ Validator getValidator(@NotNull Request request); }
0
java-sources/ai/preferred/venom/4.2.7/ai/preferred
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom/Worker.java
/* * Copyright 2018 Preferred.AI * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.preferred.venom; import javax.validation.constraints.NotNull; import java.util.concurrent.Callable; import java.util.concurrent.Future; /** * @author Maksim Tkachenko * @author Ween Jiann Lee */ public interface Worker { /** * Performs the given task inline, and increase available threads in the pool * by one for the execution of other tasks. * <p> * It is imperative to wrap all I/O tasks in this method to prevent * starving other parsing tasks from threads. * </p> * * @param task the I/O blocking task to execute * @throws NullPointerException if the task is null */ void executeBlockingIO(@NotNull Runnable task); /** * Submits a value-returning task for execution and returns a * Future representing the pending results of the task. The * Future's {@code get} method will return the task's result upon * successful completion. * <br> * <br> * If you would like to immediately block waiting * for a task, you can use constructions of the form * {@code result = exec.submit(aCallable).get();} * * <p>Note: The {@link java.util.concurrent.Executors} class includes a set of methods * that can convert some other common closure-like objects, * for example, {@link java.security.PrivilegedAction} to * {@link Callable} form so they can be submitted. * * @param task the task to submit * @param <T> the type of the task's result * @return a Future representing pending completion of the task * @throws java.util.concurrent.RejectedExecutionException if the task cannot be * scheduled for execution * @throws NullPointerException if the task is null */ @NotNull <T> Future<T> submit(@NotNull Callable<T> task); /** * Submits a Runnable task for execution and returns a Future * representing that task. The Future's {@code get} method will * return the given result upon successful completion. * * @param task the task to submit * @param result the result to return * @param <T> the type of the result * @return a Future representing pending completion of the task * @throws java.util.concurrent.RejectedExecutionException if the task cannot be * scheduled for execution * @throws NullPointerException if the task is null */ @NotNull <T> Future<T> submit(@NotNull Runnable task, T result); /** * Submits a Runnable task for execution and returns a Future * representing that task. The Future's {@code get} method will * return {@code null} upon <em>successful</em> completion. * * @param task the task to submit * @return a Future representing pending completion of the task * @throws java.util.concurrent.RejectedExecutionException if the task cannot be * scheduled for execution * @throws NullPointerException if the task is null */ @NotNull Future<?> submit(@NotNull Runnable task); }
0
java-sources/ai/preferred/venom/4.2.7/ai/preferred
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom/WorkerManager.java
/* * Copyright 2018 Preferred.AI * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.preferred.venom; import javax.validation.constraints.NotNull; /** * @author Maksim Tkachenko */ public interface WorkerManager extends Interruptible, AutoCloseable { /** * Get the result collector in use. * * @return an instance of Worker */ @NotNull Worker getWorker(); }
0
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom/fetcher/AsyncFetcher.java
/* * Copyright 2018 Preferred.AI * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.preferred.venom.fetcher; import ai.preferred.venom.ProxyProvider; import ai.preferred.venom.ValidatorRouter; import ai.preferred.venom.request.HttpFetcherRequest; import ai.preferred.venom.request.Request; import ai.preferred.venom.response.Response; import ai.preferred.venom.socks.SocksConnectingIOReactor; import ai.preferred.venom.socks.SocksHttpRoutePlanner; import ai.preferred.venom.socks.SocksIOSessionStrategy; import ai.preferred.venom.storage.FileManager; import ai.preferred.venom.uagent.DefaultUserAgent; import ai.preferred.venom.uagent.UserAgent; import ai.preferred.venom.validator.EmptyContentValidator; import ai.preferred.venom.validator.PipelineValidator; import ai.preferred.venom.validator.StatusOkValidator; import ai.preferred.venom.validator.Validator; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.http.HttpHost; import org.apache.http.client.ClientProtocolException; import org.apache.http.client.RedirectStrategy; import org.apache.http.client.config.RequestConfig; import org.apache.http.client.methods.HttpUriRequest; import org.apache.http.client.methods.RequestBuilder; import org.apache.http.client.protocol.HttpClientContext; import org.apache.http.client.protocol.RequestAcceptEncoding; import org.apache.http.client.utils.URIUtils; import org.apache.http.concurrent.BasicFuture; import org.apache.http.concurrent.FutureCallback; import org.apache.http.config.Registry; import org.apache.http.config.RegistryBuilder; import org.apache.http.entity.ByteArrayEntity; import org.apache.http.impl.conn.DefaultRoutePlanner; import org.apache.http.impl.conn.DefaultSchemePortResolver; import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; import org.apache.http.impl.nio.client.HttpAsyncClientBuilder; import org.apache.http.impl.nio.conn.PoolingNHttpClientConnectionManager; import org.apache.http.impl.nio.reactor.IOReactorConfig; import org.apache.http.nio.client.methods.HttpAsyncMethods; import org.apache.http.nio.conn.NoopIOSessionStrategy; import org.apache.http.nio.conn.SchemeIOSessionStrategy; import org.apache.http.nio.conn.ssl.SSLIOSessionStrategy; import org.apache.http.nio.reactor.IOReactorException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import javax.annotation.Nullable; import javax.net.ssl.SSLContext; import javax.validation.constraints.NotNull; import java.io.IOException; import java.net.URI; import java.util.*; import java.util.concurrent.Future; import java.util.concurrent.ThreadFactory; /** * This class holds the implementation to provide how items are fetched from the web, * to validate the item and to store it if specified. * * @author Maksim Tkachenko * @author Truong Quoc Tuan * @author Ween Jiann Lee */ public final class AsyncFetcher implements Fetcher { /** * Logger. */ private static final Logger LOGGER = LoggerFactory.getLogger(AsyncFetcher.class); /** * A list of callbacks to execute upon response. */ @NotNull private final List<Callback> callbacks; /** * A list of headers to append to request. */ @NotNull private final Map<String, String> headers; /** * The HTTP client used for requests. */ @NotNull private final CloseableHttpAsyncClient httpClient; /** * The proxy provider for proxies. */ @Nullable private final ProxyProvider proxyProvider; /** * A list of status code to stop retry. */ @NotNull private final Set<Integer> stopCodes; /** * The user agent used for requests. */ @NotNull private final UserAgent userAgent; /** * The validator used. */ @NotNull private final Validator validator; /** * The validator router used. */ @Nullable private final ValidatorRouter router; /** * The timeout in milliseconds used when requesting a connection. */ private final int connectionRequestTimeout; /** * Determines whether compression is allowed. */ private final boolean compressed; /** * Constructs an instance of AsyncFetcher. * * @param builder An instance of builder */ private AsyncFetcher(final Builder builder) { final ImmutableList.Builder<Callback> callbackListBuilder = new ImmutableList.Builder<>(); if (builder.fileManager != null) { callbackListBuilder.add(builder.fileManager.getCallback()); } callbackListBuilder.addAll(builder.callbacks); callbacks = callbackListBuilder.build(); headers = builder.headers; proxyProvider = builder.proxyProvider; stopCodes = builder.stopCodes; userAgent = builder.userAgent; validator = builder.validator; router = builder.router; connectionRequestTimeout = builder.connectionRequestTimeout; compressed = builder.compressed; final IOReactorConfig reactorConfig = IOReactorConfig.custom() .setIoThreadCount(builder.numIoThreads) .setSoKeepAlive(true) .setConnectTimeout(builder.connectTimeout) .setSoTimeout(builder.socketTimeout) .build(); final HttpAsyncClientBuilder clientBuilder = HttpAsyncClientBuilder.create() .setMaxConnPerRoute(builder.maxRouteConnections) .setMaxConnTotal(builder.maxConnections) .setSSLContext(builder.sslContext) .setRedirectStrategy(builder.redirectStrategy); if (builder.enableSocksProxy) { final PoolingNHttpClientConnectionManager connectionManager; try { final SSLIOSessionStrategy sslioSessionStrategy = SSLIOSessionStrategy.getDefaultStrategy(); final Registry<SchemeIOSessionStrategy> reg = RegistryBuilder.<SchemeIOSessionStrategy>create() .register("socks", new SocksIOSessionStrategy(sslioSessionStrategy)) .register("http", NoopIOSessionStrategy.INSTANCE) .register("https", sslioSessionStrategy) .build(); final SocksConnectingIOReactor reactor = new SocksConnectingIOReactor(reactorConfig, builder.threadFactory); connectionManager = new PoolingNHttpClientConnectionManager(reactor, reg); clientBuilder.setConnectionManager(connectionManager) .setRoutePlanner(new SocksHttpRoutePlanner(new DefaultRoutePlanner(DefaultSchemePortResolver.INSTANCE))); } catch (IOReactorException e) { LOGGER.error("Disabling SOCKS protocol", e); clientBuilder.setDefaultIOReactorConfig(reactorConfig).setThreadFactory(builder.threadFactory); } } else { clientBuilder.setDefaultIOReactorConfig(reactorConfig).setThreadFactory(builder.threadFactory); } if (builder.maxConnections < builder.maxRouteConnections) { clientBuilder.setMaxConnTotal(builder.maxRouteConnections); LOGGER.info("Maximum total connections will be set to {}, to match maximum route connection.", builder.maxRouteConnections); } if (builder.disableCookies) { clientBuilder.disableCookieManagement(); } if (builder.compressed) { clientBuilder.addInterceptorLast(new RequestAcceptEncoding()); } httpClient = clientBuilder.build(); } /** * Creates {@link BasicFuture} and fails the request with a specified exception. * * @param callback request callback * @param ex specified exception * @return BasicFuture created */ private static Future<Response> failRequest(final FutureCallback<Response> callback, final Exception ex) { final BasicFuture<Response> f = new BasicFuture<>(callback); f.failed(ex); return f; } /** * Creates {@link BasicFuture} and cancels the request. * * @param callback request callback * @return BasicFuture created */ private static Future<Response> cancelRequest(final FutureCallback<Response> callback) { final BasicFuture<Response> f = new BasicFuture<>(callback); f.cancel(true); return f; } /** * Create an instance of AsyncFetcher with default options. * * @return A new instance of async fetcher */ public static AsyncFetcher buildDefault() { return builder().build(); } /** * Create an instance of builder. * * @return A new instance of builder */ public static Builder builder() { return new Builder(); } /** * Check if request is an instance of http fetcher request and return it * if true, otherwise wrap it with HttpFetcherRequest and return that. * * @param request An instance of request * @return An instance of HttpFetcherRequest */ private HttpFetcherRequest normalizeRequest(final Request request) { if (request instanceof HttpFetcherRequest) { return (HttpFetcherRequest) request; } return new HttpFetcherRequest(request); } /** * Prepare fetcher request by prepending headers and set appropriate proxy. * * @param request An instance of request * @return An instance of HttpFetcherRequest */ private HttpFetcherRequest prepareFetcherRequest(final Request request) { HttpFetcherRequest httpFetcherRequest = normalizeRequest(request); if (!headers.isEmpty()) { httpFetcherRequest = httpFetcherRequest.prependHeaders(headers); } if (proxyProvider != null && httpFetcherRequest.getInner().getProxy() == null) { httpFetcherRequest = httpFetcherRequest.setProxy(proxyProvider.get(request)); } return httpFetcherRequest; } /** * Create an instance of request builder. * * @param request An instance of request * @return An instance of request builder */ private RequestBuilder createRequestBuilder(final Request request) { switch (request.getMethod()) { case GET: return RequestBuilder.get(); case POST: return RequestBuilder.post(); case HEAD: return RequestBuilder.head(); case PUT: return RequestBuilder.put(); case DELETE: return RequestBuilder.delete(); case OPTIONS: return RequestBuilder.options(); default: throw new RuntimeException("Request method is not defined"); } } /** * Prepare http uri request to be used with http async client. * * @param request An instance of request * @return An instance of http uri request */ private HttpUriRequest prepareHttpRequest(final HttpFetcherRequest request) { final RequestConfig config = RequestConfig.custom() .setConnectionRequestTimeout(connectionRequestTimeout) .setProxy(request.getProxy()) .build(); final RequestBuilder requestBuilder = createRequestBuilder(request) .addHeader("User-Agent", userAgent.get()) .setUri(request.getUrl()) .setConfig(config); request.getHeaders().forEach(requestBuilder::setHeader); if (request.getBody() != null) { requestBuilder.setEntity(new ByteArrayEntity(request.getBody().getBytes())); } return requestBuilder.build(); } /** * Append routed validator if present for this request. * * @param routedValidator An instance of routed validator * @return An instance of validator */ private Validator prepareValidator(final Validator routedValidator) { if (routedValidator == null) { return validator; } return new PipelineValidator(validator, routedValidator); } /** * Copied from {@link CloseableHttpAsyncClient}. * * @param request request * @return target * @throws ClientProtocolException Non-valid protocol */ private HttpHost determineTarget(final HttpUriRequest request) throws ClientProtocolException { // A null target may be acceptable if there is a default target. // Otherwise, the null target is detected in the director. HttpHost target = null; final URI requestURI = request.getURI(); if (requestURI.isAbsolute()) { target = URIUtils.extractHost(requestURI); if (target == null) { throw new ClientProtocolException( "URI does not specify a valid host name: " + requestURI); } } return target; } @Override public Future<Response> fetch(final Request request) { return fetch(request, Callback.EMPTY_CALLBACK); } @Override public Future<Response> fetch(final Request request, final Callback callback) { final HttpFetcherRequest httpFetcherRequest = prepareFetcherRequest(request); final FutureCallback<Response> requestCallback = new RequestCallback(httpFetcherRequest, callback); if (Thread.currentThread().isInterrupted()) { return cancelRequest(requestCallback); } final HttpUriRequest httpReq = prepareHttpRequest(httpFetcherRequest); final HttpHost target; try { target = determineTarget(httpReq); } catch (final ClientProtocolException ex) { return failRequest(requestCallback, ex); } LOGGER.debug("Fetching URL: {}", request.getUrl()); final Validator routedValidator; if (router != null) { routedValidator = router.getValidator(request); } else { routedValidator = null; } if (!httpClient.isRunning() || Thread.currentThread().isInterrupted()) { return cancelRequest(requestCallback); } try { return httpClient.execute( HttpAsyncMethods.create(target, httpReq), new AsyncResponseConsumer( prepareValidator(routedValidator), stopCodes, compressed, httpFetcherRequest ), HttpClientContext.create(), requestCallback ); } catch (final Exception e) { return failRequest(requestCallback, e); } } @Override public void start() { httpClient.start(); } @Override public void close() throws IOException { LOGGER.debug("Shutting down the fetcher..."); httpClient.close(); LOGGER.debug("The fetcher shutdown completed."); } /** * A builder for async fetcher class. */ public static final class Builder { /** * A list of callbacks to execute upon response. */ private final List<Callback> callbacks; private boolean enableSocksProxy; /** * Determines whether cookie storage is allowed. */ private boolean disableCookies; /** * The file manager used to store raw responses. */ private FileManager<?> fileManager; /** * A list of headers to append to request. */ private Map<String, String> headers; /** * The maximum number of I/O threads allowed. */ private int numIoThreads; /** * The maximum number of connections allowed. */ private int maxConnections; /** * The maximum number of connections allowed per route. */ private int maxRouteConnections; /** * The proxy provider for proxies. */ private ProxyProvider proxyProvider; /** * The SSL context for a response. */ private SSLContext sslContext; /** * A list of status code to stop retry. */ private Set<Integer> stopCodes; /** * The threadFactory used for I/O dispatcher. */ private ThreadFactory threadFactory; /** * The user agent used for requests. */ private UserAgent userAgent; /** * The validator used. */ private Validator validator; /** * The redirection strategy for a response. */ private RedirectStrategy redirectStrategy; /** * The validator router used. */ private ValidatorRouter router; /** * The timeout in milliseconds used when requesting a connection. */ private int connectionRequestTimeout; /** * The timeout in milliseconds until a connection is established. */ private int connectTimeout; /** * The socket timeout ({@code SO_TIMEOUT}) in milliseconds. */ private int socketTimeout; /** * Determines whether compression is allowed. */ private boolean compressed; /** * Construct an instance of builder. */ private Builder() { callbacks = new ArrayList<>(); disableCookies = false; fileManager = null; headers = Collections.emptyMap(); maxConnections = 16; maxRouteConnections = 8; numIoThreads = Runtime.getRuntime().availableProcessors(); proxyProvider = null; stopCodes = Collections.emptySet(); threadFactory = new ThreadFactoryBuilder().setNameFormat("I/O Dispatcher %d").build(); userAgent = new DefaultUserAgent(); validator = new PipelineValidator( StatusOkValidator.INSTANCE, EmptyContentValidator.INSTANCE ); connectionRequestTimeout = -1; connectTimeout = -1; socketTimeout = -1; compressed = true; enableSocksProxy = false; } /** * Enables SOCKS protocol for proxies (socks://). Experimental. * * @return this */ public Builder enableSocksProxy() { enableSocksProxy = true; return this; } /** * Register any callbacks that will be called when a page has been fetched. * <p> * Please note that blocking callbacks will significantly reduce the rate * at which request are processed. Please implement your own executors on * I/O blocking callbacks. * </p> * * @param callback A set of FetcherCallback. * @return this */ public Builder register(final @NotNull Callback callback) { if (callback == null) { throw new IllegalStateException("Attribute 'callback' cannot be null."); } this.callbacks.add(callback); return this; } /** * Disables cookie storage. * * @return this */ public Builder disableCookies() { this.disableCookies = true; return this; } /** * Sets the FileManager to be used. Defaults to none. * <p> * If fileManager is set, all items fetched will be saved to storage. * </p> * * @param fileManager file manager to be used. * @return this */ public Builder setFileManager(final FileManager<?> fileManager) { this.fileManager = fileManager; return this; } /** * Sets the headers to be used when fetching items. Defaults to none. * * @param headers a map to headers to be used. * @return this */ public Builder setHeaders(final @NotNull Map<String, String> headers) { if (headers == null) { throw new IllegalStateException("Attribute 'headers' cannot be null."); } this.headers = headers; return this; } /** * Number of httpclient dispatcher threads. * * @param numIoThreads number of threads. * @return this */ public Builder setNumIoThreads(final int numIoThreads) { if (numIoThreads <= 0) { throw new IllegalStateException("Attribute 'numIoThreads' must be more or equal to 1."); } this.numIoThreads = numIoThreads; return this; } /** * Sets the maximum allowable connections at an instance. * * @param maxConnections the max allowable connections. * @return this */ public Builder setMaxConnections(final int maxConnections) { if (maxConnections <= 0) { throw new IllegalStateException("Attribute 'maxConnections' must be more or equal to 1."); } this.maxConnections = maxConnections; return this; } /** * Sets the maximum allowable connections at an instance for * a particular route (host). * * @param maxRouteConnections the max allowable connections per route. * @return this */ public Builder setMaxRouteConnections(final int maxRouteConnections) { if (maxRouteConnections <= 0) { throw new IllegalStateException("Attribute 'maxRouteConnections' must be more or equal to 1."); } this.maxRouteConnections = maxRouteConnections; return this; } /** * Sets the ProxyProvider to be used. Defaults to none. * * @param proxyProvider proxy provider to be used. * @return this */ public Builder setProxyProvider(final ProxyProvider proxyProvider) { this.proxyProvider = proxyProvider; return this; } /** * Sets the ssl context for an encrypted response. * * @param sslContext SSLContext to be used. * @return this */ public Builder setSslContext(final SSLContext sslContext) { this.sslContext = sslContext; return this; } /** * Set a list of stop code that will interrupt crawling. * * @param codes A list of stop codes. * @return this */ public Builder setStopCodes(final @NotNull int... codes) { if (codes == null) { throw new IllegalStateException("Attribute 'codes' cannot be null."); } ImmutableSet.Builder<Integer> builder = new ImmutableSet.Builder<>(); for (int code : codes) { builder.add(code); } stopCodes = builder.build(); return this; } /** * Set the thread factory that creates the httpclient dispatcher * threads. * * @param threadFactory an instance of ThreadFactory. * @return this */ public Builder setThreadFactory(final @NotNull ThreadFactory threadFactory) { if (threadFactory == null) { throw new IllegalStateException("Attribute 'threadFactory' cannot be null."); } this.threadFactory = threadFactory; return this; } /** * Sets the UserAgent to be used, if not set, default will be chosen. * * @param userAgent user agent generator to be used. * @return this */ public Builder setUserAgent(final @NotNull UserAgent userAgent) { if (userAgent == null) { throw new IllegalStateException("Attribute 'userAgent' cannot be null."); } this.userAgent = userAgent; return this; } /** * Sets the Validator to be used. Defaults to StatusOkValidator and * EmptyContentValidator. * <p> * This will validate the fetched page and retry if page is not * consistent with the specification set by the validator. * </p> * * @param validator validator to be used. * @return this */ public Builder setValidator(final @NotNull Validator validator) { this.validator = validator; return this; } /** * Sets the multiple validators to be used. Defaults to StatusOkValidator * and EmptyContentValidator. * <p> * This will validate the fetched page and retry if page is not * consistent with the specification set by the validator. * </p> * * @param validators validator to be used. * @return this */ public Builder setValidator(final @NotNull Validator... validators) { this.validator = new PipelineValidator(validators); return this; } /** * Sets the redirection strategy for a response received by the fetcher. * * @param redirectStrategy redirection strategy to be used. * @return this */ public Builder setRedirectStrategy(final RedirectStrategy redirectStrategy) { this.redirectStrategy = redirectStrategy; return this; } /** * Sets ValidatorRouter to be used. Defaults to none. * Validator rules set in validator will always be used. * * @param router router validator setValidatorRouter to be used. * @return this */ public Builder setValidatorRouter(final ValidatorRouter router) { this.router = router; return this; } /** * The timeout in milliseconds used when requesting a connection * from the connection manager. A timeout value of zero is interpreted * as an infinite timeout. * * @param connectionRequestTimeout timeout. * @return this */ public Builder setConnectionRequestTimeout(final int connectionRequestTimeout) { if (connectionRequestTimeout == -1 ^ connectionRequestTimeout < 0) { throw new IllegalStateException("Attribute 'connectTimeout' must be -1, or more or equal to 0."); } this.connectionRequestTimeout = connectionRequestTimeout; return this; } /** * Determines the timeout in milliseconds until a connection is established. * A timeout value of zero is interpreted as an infinite timeout. * * @param connectTimeout timeout. * @return this */ public Builder setConnectTimeout(final int connectTimeout) { if (connectTimeout == -1 ^ connectTimeout < 0) { throw new IllegalStateException("Attribute 'connectTimeout' must be -1, or more or equal to 0."); } this.connectTimeout = connectTimeout; return this; } /** * Defines the socket timeout ({@code SO_TIMEOUT}) in milliseconds, * which is the timeout for waiting for data or, put differently, * a maximum period inactivity between two consecutive data packets). * * @param socketTimeout timeout. * @return this */ public Builder setSocketTimeout(final int socketTimeout) { if (socketTimeout == -1 ^ socketTimeout < 0) { throw new IllegalStateException("Attribute 'socketTimeout' must be -1, or more or equal to 0."); } this.socketTimeout = socketTimeout; return this; } /** * Disables request for compress pages and to decompress pages * after it is fetched. Defaults to true. * * @return this */ public Builder disableCompression() { this.compressed = false; return this; } /** * Builds the fetcher with the options specified. * * @return an instance of Fetcher. */ public AsyncFetcher build() { return new AsyncFetcher(this); } } private class RequestCallback implements FutureCallback<Response> { private final HttpFetcherRequest fetcherRequest; private final Callback crawlerCallback; RequestCallback(HttpFetcherRequest fetcherRequest, Callback crawlerCallback) { this.fetcherRequest = fetcherRequest; this.crawlerCallback = crawlerCallback; } @Override public void completed(final Response response) { LOGGER.debug("Executing completion callback on {}.", fetcherRequest.getUrl()); try { callbacks.forEach(callback -> callback.completed(fetcherRequest, response)); } finally { crawlerCallback.completed(fetcherRequest, response); } } @Override public void failed(final Exception ex) { LOGGER.debug("Executing failed callback on {}.", fetcherRequest.getUrl(), ex); try { callbacks.forEach(callback -> callback.failed(fetcherRequest, ex)); } finally { crawlerCallback.failed(fetcherRequest, ex); } } @Override public void cancelled() { LOGGER.debug("Executing cancelled callback on {}.", fetcherRequest.getUrl()); try { callbacks.forEach(callback -> callback.cancelled(fetcherRequest)); } finally { crawlerCallback.cancelled(fetcherRequest); } } } }
0
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom/fetcher/AsyncResponseConsumer.java
/* * Copyright 2018 Preferred.AI * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.preferred.venom.fetcher; import ai.preferred.venom.request.HttpFetcherRequest; import ai.preferred.venom.request.Unwrappable; import ai.preferred.venom.response.BaseResponse; import ai.preferred.venom.response.Response; import ai.preferred.venom.utils.ResponseDecompressor; import ai.preferred.venom.validator.Validator; import com.ibm.icu.text.CharsetDetector; import com.ibm.icu.text.CharsetMatch; import org.apache.commons.io.IOUtils; import org.apache.http.*; import org.apache.http.client.protocol.HttpClientContext; import org.apache.http.entity.ContentType; import org.apache.http.nio.ContentDecoder; import org.apache.http.nio.IOControl; import org.apache.http.nio.entity.ContentBufferEntity; import org.apache.http.nio.protocol.AbstractAsyncResponseConsumer; import org.apache.http.nio.util.HeapByteBufferAllocator; import org.apache.http.nio.util.SimpleInputBuffer; import org.apache.http.protocol.HttpContext; import org.apache.http.util.Asserts; import org.apache.http.util.EntityUtils; import org.apache.tika.Tika; import org.apache.tika.io.TikaInputStream; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.net.URI; import java.nio.charset.UnsupportedCharsetException; import java.util.List; import java.util.Set; /** * On top of the abstract class, this class handles the parsing of a response * from the web service. * * @author Ween Jiann Lee */ public class AsyncResponseConsumer extends AbstractAsyncResponseConsumer<Response> { /** * Logger. */ private static final Logger LOGGER = LoggerFactory.getLogger(AsyncResponseConsumer.class); /** * Decompressor used to decompress responses. */ private static final ResponseDecompressor RESPONSE_DECOMPRESSOR = new ResponseDecompressor(); /** * Default content type of response if not given. */ private static final ContentType DEFAULT_CONTENT_TYPE = ContentType.APPLICATION_OCTET_STREAM; /** * The validator to be use to validate this response. */ private final Validator validator; /** * A set of stop codes to interrupt crawling. */ private final Set<Integer> stopCodes; /** * Determines whether responses might be compressed. */ private final boolean compressed; /** * The request leading to this response. */ private final HttpFetcherRequest request; /** * An instance of http response. */ private volatile HttpResponse httpResponse; /** * A buffer for the content. */ private volatile SimpleInputBuffer buf; /** * Lazy loaded content. * <p> * Use getContent() to retrieve. * </p> */ private byte[] content; /** * Constructs an instance of async response consumer. * * @param validator The instance of validator to be used * @param stopCodes A set of stop code to interrupt crawling * @param compressed Determines whether responses might be compressed * @param request The request leading to this response */ AsyncResponseConsumer(final Validator validator, final Set<Integer> stopCodes, final boolean compressed, final HttpFetcherRequest request) { this.validator = validator; this.stopCodes = stopCodes; this.compressed = compressed; this.request = request; request.getDiagnostics().setStart(); } /** * Lazy loading of content. * * @param entity An instance of http entity. * @return byte array of the entity * @throws IOException If entity has no content or failed */ private byte[] getContent(final HttpEntity entity) throws IOException { if (content == null) { content = IOUtils.toByteArray(entity.getContent()); } return content; } /** * Create an instance of venom response. * * @param compressed Determines whether responses might be compressed * @return An instance of base response * @throws IOException Reading http response */ private BaseResponse createVenomResponse(final boolean compressed, final HttpContext context) throws IOException { if (compressed) { RESPONSE_DECOMPRESSOR.decompress(httpResponse); } final HttpClientContext clientContext = HttpClientContext.adapt(context); final List<URI> redirectedLocations = clientContext.getRedirectLocations(); final String url; if (redirectedLocations == null) { url = request.getUrl(); } else { url = redirectedLocations.get(redirectedLocations.size() - 1).toString(); } final HttpEntity entity = httpResponse.getEntity(); final byte[] content = getContent(entity); request.getDiagnostics().setSize(content.length); final ContentType contentType = getContentType(entity); final Header[] headers = httpResponse.getAllHeaders(); return new BaseResponse( httpResponse.getStatusLine().getStatusCode(), url, content, contentType, headers, request.getProxy()); } @Override protected final synchronized ContentType getContentType(final HttpEntity entity) { try { ContentType contentType = ContentType.get(entity); if (contentType == null || contentType.getCharset() == null) { final byte[] bytes; try { bytes = getContent(entity); } catch (IllegalStateException e) { return contentType; } if (contentType == null) { try (TikaInputStream stream = TikaInputStream.get(bytes)) { final Tika tika = new Tika(); final String fileType = tika.detect(stream); contentType = ContentType.create(fileType); } } if (contentType.getCharset() == null) { try (TikaInputStream stream = TikaInputStream.get(bytes)) { final CharsetMatch match = new CharsetDetector() .setText(stream) .detect(); if (match != null && match.getConfidence() > 50) { contentType = contentType.withCharset(match.getName()); } } } } return contentType; } catch (ParseException e) { LOGGER.warn("Could not parse content type", e); } catch (UnsupportedCharsetException e) { LOGGER.warn("Charset is not available in this instance of the Java virtual machine", e); } catch (IOException e) { LOGGER.warn("Cannot get content to determine media type", e); } return DEFAULT_CONTENT_TYPE; } @Override protected final void onResponseReceived(final HttpResponse httpResponse) { request.getDiagnostics().setAcknowledge(); this.httpResponse = httpResponse; } @Override protected final void onContentReceived(final ContentDecoder decoder, final IOControl ioctrl) throws IOException { Asserts.notNull(this.buf, "Content buffer"); this.buf.consumeContent(decoder); } @Override protected final void onEntityEnclosed(final HttpEntity entity, final ContentType contentType) throws IOException { long len = entity.getContentLength(); if (len > Integer.MAX_VALUE) { throw new ContentTooLongException("Entity content is too long: " + len); } if (len < 0) { len = 4096; } this.buf = new SimpleInputBuffer((int) len, new HeapByteBufferAllocator()); this.httpResponse.setEntity(new ContentBufferEntity(entity, this.buf)); } @Override protected final BaseResponse buildResult(final HttpContext context) throws Exception { request.getDiagnostics().setComplete(); final int statusCode = httpResponse.getStatusLine().getStatusCode(); if (stopCodes.contains(statusCode)) { EntityUtils.consumeQuietly(httpResponse.getEntity()); releaseResources(); throw new StopCodeException(statusCode, "Stop code received."); } final BaseResponse response = createVenomResponse(compressed, context); releaseResources(); final Validator.Status status; try { status = validator.isValid(Unwrappable.unwrapRequest(request), response); } catch (Exception e) { throw new ValidationException(Validator.Status.INVALID_CONTENT, response, "Validator threw an exception, " + "please check your code for bugs.", e); } if (status == Validator.Status.STOP) { throw new ValidationException(status, response, "Validator stopped the request."); } else if (status != Validator.Status.VALID) { throw new ValidationException(status, response, "Invalid response."); } return response; } @Override protected final void releaseResources() { this.httpResponse = null; this.buf = null; } }
0
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom/fetcher/Callback.java
/* * Copyright 2018 Preferred.AI * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.preferred.venom.fetcher; import ai.preferred.venom.request.Request; import ai.preferred.venom.response.Response; import javax.validation.constraints.NotNull; /** * @author Ween Jiann Lee */ public interface Callback { /** * An instance of empty callback. */ Callback EMPTY_CALLBACK = new Callback() { @Override public void completed(final @NotNull Request request, final @NotNull Response response) { // do nothing } @Override public void failed(final @NotNull Request request, final @NotNull Exception ex) { // do nothing } @Override public void cancelled(final @NotNull Request request) { // do nothing } }; /** * Method to be call upon completion of request. * * @param request Request sent. * @param response Response returned. */ void completed(@NotNull Request request, @NotNull Response response); /** * Method to be call upon failure of request. * * @param request Request sent. * @param ex Exception received. */ void failed(@NotNull Request request, @NotNull Exception ex); /** * Method to be call upon cancellation of request. * * @param request Request sent. */ void cancelled(@NotNull Request request); }
0
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom/fetcher/Fetcher.java
/* * Copyright 2018 Preferred.AI * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.preferred.venom.fetcher; import ai.preferred.venom.request.Request; import ai.preferred.venom.response.Response; import javax.validation.constraints.NotNull; import java.util.concurrent.Future; /** * This interface represents only the most basic of fetching a request. * It imposes no restrictions or particular details on the request execution process * and leaves the specifics of proxy management, validation and response status handling * up to individual implementations. * * @author Maksim Tkachenko * @author Truong Quoc Tuan * @author Ween Jiann Lee */ public interface Fetcher extends AutoCloseable { /** * Fetcher starter. */ void start(); /** * Fetch the desired HTTP page given in {@link Request}. * * @param request information for the page to fetch. * @return Response future */ @NotNull Future<Response> fetch(@NotNull Request request); /** * Fetch the desired HTTP page given in {@link Request}. Executes * callback upon completion. * * @param request information for the page to fetch. * @param callback callback * @return Response future */ @NotNull Future<Response> fetch(@NotNull Request request, @NotNull Callback callback); }
0
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom/fetcher/StopCodeException.java
/* * Copyright 2018 Preferred.AI * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.preferred.venom.fetcher; /** * @author Ween Jiann Lee */ public class StopCodeException extends Exception { /** * The status code from the response. */ private final int statusCode; /** * Constructs a stop code exception with a message. * * @param statusCode The status code received from the response * @param message A message about the exception */ public StopCodeException(final int statusCode, final String message) { super(message); this.statusCode = statusCode; } /** * Constructs a stop code exception with a message and a cause. * * @param statusCode The status code received from the response * @param message A message about the exception * @param cause The cause of the exception */ public StopCodeException(final int statusCode, final String message, final Throwable cause) { super(message, cause); this.statusCode = statusCode; } /** * Constructs a stop code exception with a cause. * * @param statusCode The status code received from the response * @param cause The cause of the exception */ public StopCodeException(final int statusCode, final Throwable cause) { super(cause); this.statusCode = statusCode; } /** * Constructs a stop code exception with a message and a cause. * * @param statusCode The status code received from the response * @param message A message about the exception * @param cause The cause of the exception * @param enableSuppression Enable suppression. * @param writableStackTrace Enable writable stack trace. */ public StopCodeException(final int statusCode, final String message, final Throwable cause, final boolean enableSuppression, final boolean writableStackTrace) { super(message, cause, enableSuppression, writableStackTrace); this.statusCode = statusCode; } @Override public final Throwable fillInStackTrace() { return this; } }
0
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom/fetcher/StorageFetcher.java
/* * Copyright 2018 Preferred.AI * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.preferred.venom.fetcher; import ai.preferred.venom.request.Request; import ai.preferred.venom.request.StorageFetcherRequest; import ai.preferred.venom.request.Unwrappable; import ai.preferred.venom.response.Response; import ai.preferred.venom.response.StorageResponse; import ai.preferred.venom.storage.FileManager; import ai.preferred.venom.storage.Record; import ai.preferred.venom.storage.StorageException; import ai.preferred.venom.validator.EmptyContentValidator; import ai.preferred.venom.validator.PipelineValidator; import ai.preferred.venom.validator.StatusOkValidator; import ai.preferred.venom.validator.Validator; import org.apache.http.concurrent.BasicFuture; import org.apache.http.concurrent.FutureCallback; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import javax.validation.constraints.NotNull; import java.util.Collections; import java.util.Map; import java.util.concurrent.Future; /** * This class holds the implementation to provide how items are fetched from a database, * to validate the item and to store it if specified. * * @author Ween Jiann Lee */ public final class StorageFetcher implements Fetcher { /** * Logger. */ private static final Logger LOGGER = LoggerFactory.getLogger(StorageFetcher.class); /** * The file manager used to store raw responses. */ private final FileManager<?> fileManager; /** * The validator used. */ private final Validator validator; /** * A list of headers to append to request. */ private final Map<String, String> headers; /** * Constructs an instance of StorageFetcher. * * @param builder An instance of builder */ private StorageFetcher(final Builder builder) { this.fileManager = builder.fileManager; this.validator = builder.validator; this.headers = builder.headers; } /** * Create an instance of builder. * * @param fileManager the file manager to use. * @return A new instance of builder */ public static Builder builder(final FileManager<?> fileManager) { return new Builder(fileManager); } /** * Check if request is an instance of StorageFetcher request and return it * if true, otherwise wrap it with StorageFetcherRequest and return that. * * @param request An instance of request * @return An instance of StorageFetcherRequest */ private StorageFetcherRequest normalize(final Request request) { if (request instanceof StorageFetcherRequest) { return (StorageFetcherRequest) request; } return new StorageFetcherRequest(request); } @Override public void start() { // do nothing } @Override public Future<Response> fetch(final Request request) { return fetch(request, Callback.EMPTY_CALLBACK); } @Override public Future<Response> fetch(final Request request, final Callback callback) { LOGGER.debug("Getting record for: {}", request.getUrl()); final StorageFetcherRequest storageFetcherRequest = normalize(request).prependHeaders(headers); final BasicFuture<Response> future = new BasicFuture<>(new FutureCallback<Response>() { @Override public void completed(final Response result) { callback.completed(request, result); } @Override public void failed(final Exception ex) { callback.failed(request, ex); } @Override public void cancelled() { callback.cancelled(request); } }); try { final Record<?> record = fileManager.get(storageFetcherRequest); if (record == null) { future.cancel(); LOGGER.info("No content found from storage for: {}", request.getUrl()); return future; } LOGGER.debug("Record found with id: {}", record.getId()); final StorageResponse response = new StorageResponse(record, request.getUrl()); final Validator.Status status = validator.isValid(Unwrappable.unwrapRequest(request), response); if (status != Validator.Status.VALID) { future.failed(new ValidationException(status, response, "Invalid response.")); return future; } future.completed(response); return future; } catch (StorageException e) { LOGGER.warn("Error retrieving content for : {}", request.getUrl(), e); future.failed(e); return future; } } @Override public void close() throws Exception { if (fileManager != null) { fileManager.close(); } } /** * A builder for StorageFetcher class. */ public static final class Builder { /** * The file manager used to store raw responses. */ private final FileManager<?> fileManager; /** * A list of headers to append to request. */ private Map<String, String> headers; /** * The validator used. */ private Validator validator; /** * Construct an instance of builder. * * @param fileManager an instance file manager used to store raw responses. */ private Builder(final FileManager<?> fileManager) { this.fileManager = fileManager; headers = Collections.emptyMap(); validator = new PipelineValidator( StatusOkValidator.INSTANCE, EmptyContentValidator.INSTANCE ); } /** * Sets the headers to be used when fetching items. Defaults to none. * * @param headers a map to headers to be used. * @return this */ public Builder setHeaders(final @NotNull Map<String, String> headers) { this.headers = headers; return this; } /** * Sets the Validator to be used. Defaults to StatusOkValidator and * EmptyContentValidator. * <p> * This will validate the fetched page and retry if page is not * consistent with the specification set by the validator. * </p> * * @param validator validator to be used. * @return this */ public Builder setValidator(final @NotNull Validator validator) { this.validator = validator; return this; } /** * Sets the multiple validators to be used. Defaults to StatusOkValidator * and EmptyContentValidator. * <p> * This will validate the fetched page and retry if page is not * consistent with the specification set by the validator. * </p> * * @param validators validator to be used. * @return this */ public Builder setValidator(final @NotNull Validator... validators) { this.validator = new PipelineValidator(validators); return this; } /** * Builds the fetcher with the options specified. * * @return an instance of Fetcher. */ public StorageFetcher build() { return new StorageFetcher(this); } } }
0
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom/fetcher/ValidationException.java
/* * Copyright 2018 Preferred.AI * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.preferred.venom.fetcher; import ai.preferred.venom.response.Response; import ai.preferred.venom.validator.Validator; /** * @author Ween Jiann Lee */ public class ValidationException extends Exception { /** * The validation status of the response. */ private final Validator.Status status; /** * The response. */ private final Response response; /** * Constructs a validation exception with a message. * * @param status The validation status of the response. * @param response The response validated * @param message A message about the exception */ public ValidationException(final Validator.Status status, final Response response, final String message) { super(message); this.status = status; this.response = response; } /** * @param status The validation status of the response. * @param response The response validated * @param message A message about the exception * @param cause The cause of the exception */ public ValidationException(final Validator.Status status, final Response response, final String message, final Throwable cause) { super(message, cause); this.status = status; this.response = response; } /** * @param status The validation status of the response. * @param response The response validated * @param cause The cause of the exception */ public ValidationException(final Validator.Status status, final Response response, final Throwable cause) { super(cause); this.status = status; this.response = response; } /** * @param status The validation status of the response. * @param response The response validated * @param message A message about the exception * @param cause The cause of the exception * @param enableSuppression Enable suppression. * @param writableStackTrace Enable writable stack trace. */ public ValidationException(final Validator.Status status, final Response response, final String message, final Throwable cause, final boolean enableSuppression, final boolean writableStackTrace) { super(message, cause, enableSuppression, writableStackTrace); this.status = status; this.response = response; } /** * Get the validation status of the response. * * @return Validation status of the response */ public final Validator.Status getStatus() { return status; } /** * Get the response validated. * * @return Response validated */ public final Response getResponse() { return response; } @Override public final Throwable fillInStackTrace() { return this; } }
0
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom/job/AbstractJobQueue.java
/* * Copyright 2018 Preferred.AI * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.preferred.venom.job; import javax.annotation.Nonnull; import java.util.AbstractQueue; import java.util.Collection; import java.util.Iterator; import java.util.concurrent.BlockingQueue; /** * @author Ween Jiann Lee * @author Maksim Tkachenko */ @SuppressWarnings("NullableProblems") public abstract class AbstractJobQueue extends AbstractQueue<Job> implements BlockingQueue<Job> { /** * The queue used for this scheduler. */ private final BlockingQueue<Job> queue; /** * Constructs an instance of AbstractJobQueue. * * @param queue an instance of BlockingQueue */ protected AbstractJobQueue(final BlockingQueue<Job> queue) { this.queue = queue; } @Nonnull @Override public final Iterator<Job> iterator() { return queue.iterator(); } @Override public final int size() { return queue.size(); } @Nonnull @Override public final Job take() throws InterruptedException { return queue.take(); } @Override public final int remainingCapacity() { return queue.remainingCapacity(); } @Override public final int drainTo(final @Nonnull Collection<? super Job> c) { return queue.drainTo(c); } @Override public final int drainTo(final @Nonnull Collection<? super Job> c, final int maxElements) { return queue.drainTo(c, maxElements); } @Override public final Job peek() { return queue.peek(); } /** * Get the BlockingQueue backing this scheduler. * * @return an instance of BlockingQueue */ protected final BlockingQueue<Job> getQueue() { return queue; } }
0
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom/job/AbstractPriorityJobQueue.java
/* * Copyright (c) 2019 Preferred.AI * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.preferred.venom.job; import javax.annotation.Nonnull; import java.util.Comparator; import java.util.concurrent.PriorityBlockingQueue; import java.util.concurrent.TimeUnit; /** * @author Ween Jiann Lee */ public abstract class AbstractPriorityJobQueue extends AbstractJobQueue { /** * Constructs an instance of AbstractJobQueue. */ protected AbstractPriorityJobQueue() { super(new PriorityBlockingQueue<>(11, Comparator.comparing(o -> (o.getJobAttribute(PriorityJobAttribute.class))))); } /** * Check the job for {@see PriorityJobAttribute}, if missing, * adds it to the job. * * @param job the job to check. * @return the input job. */ private Job ensurePriorityJobAttribute(final Job job) { if (job.getJobAttribute(PriorityJobAttribute.class) == null) { job.setJobAttribute(new PriorityJobAttribute()); } return job; } @Override public final void put(final @Nonnull Job job) throws InterruptedException { getQueue().put(ensurePriorityJobAttribute(job)); } @Override public final boolean offer(final Job job, final long timeout, final @Nonnull TimeUnit unit) throws InterruptedException { return getQueue().offer(ensurePriorityJobAttribute(job), timeout, unit); } @Override public final boolean offer(final @Nonnull Job job) { return getQueue().offer(ensurePriorityJobAttribute(job)); } }
0
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom/job/FIFOJobQueue.java
/* * Copyright 2018 Preferred.AI * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.preferred.venom.job; import javax.annotation.Nonnull; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.TimeUnit; /** * This class provides and implementation of scheduler with a first in * first out queue. * <p> * Jobs in queue will be processed first in order of insertion. * </p> * * @author Ween Jiann Lee */ public class FIFOJobQueue extends AbstractJobQueue { /** * Constructs an instance of FIFOJobQueue. */ public FIFOJobQueue() { super(new LinkedBlockingQueue<>()); } @Override public final void put(final @Nonnull Job job) throws InterruptedException { getQueue().put(job); } @Override public final boolean offer(final Job job, final long timeout, final @Nonnull TimeUnit unit) throws InterruptedException { return getQueue().offer(job, timeout, unit); } @Override public final boolean offer(final @Nonnull Job job) { return getQueue().offer(job); } @Override public final Job poll(final long timeout, final @Nonnull TimeUnit unit) throws InterruptedException { return getQueue().poll(timeout, unit); } @Override public final Job poll() { return getQueue().poll(); } }
0
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom/job/FIFOQueueScheduler.java
package ai.preferred.venom.job; /** * For backwards compatibility. */ @Deprecated public class FIFOQueueScheduler extends FIFOJobQueue { }
0
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom/job/Job.java
/* * Copyright 2018 Preferred.AI * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.preferred.venom.job; import ai.preferred.venom.Handler; import ai.preferred.venom.request.Request; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import javax.annotation.Nullable; import javax.validation.constraints.NotNull; import java.util.HashMap; import java.util.Map; /** * This class will be placed in a scheduler for queuing requests. * * @author Maksim Tkachenko * @author Ween Jiann Lee */ public class Job { /** * Logger. */ private static final Logger LOGGER = LoggerFactory.getLogger(Job.class); /** * The request of this job. */ private final Request request; /** * The handler of this job. */ private final Handler handler; /** * */ private final Map<Class<? extends JobAttribute>, JobAttribute> jobAttributeMap = new HashMap<>(); /** * The current try of this job. */ private int tryCount = 1; /** * Constructs a basic job. * * @param request The request of this job. * @param handler The handler of this job. * @param jobAttributes attributes to insert to the job. */ public Job(final @NotNull Request request, final Handler handler, final @NotNull JobAttribute... jobAttributes) { this.request = request; this.handler = handler; for (final JobAttribute jobAttribute : jobAttributes) { jobAttributeMap.put(jobAttribute.getClass(), jobAttribute); } } /** * Constructs a basic job. * * @param request The request of this job. * @param handler The handler of this job. */ public Job(final @NotNull Request request, final Handler handler) { this(request, handler, new JobAttribute[0]); } /** * Constructs a basic job. * * @param request The request of this job. */ public Job(final @NotNull Request request) { this(request, null); } /** * Get the request of this job. * * @return Request of the job. */ @NotNull public final Request getRequest() { return request; } /** * Get the handler to handle the response of the job. * <p> * If handler is null, routed handler will be used to assign a * handler to the response, based on its criteria. * </p> * * @return Handler for the response or null. */ @Nullable public final Handler getHandler() { return handler; } /** * Get attempt number of this job. * * @return Attempt (try) count of the job. */ public final int getTryCount() { return tryCount; } /** * This method is called before the job is scheduled * for a retry. * <p> * This method allows you to specify the logic to * move the job into its subsequent state for a retry. * </p> */ public final void prepareRetry() { LOGGER.debug("Preparing job {} - {} for next state.", Integer.toHexString(this.hashCode()), request.getUrl()); jobAttributeMap.forEach((k, jobAttribute) -> jobAttribute.prepareRetry()); tryCount++; } /** * Adds or replace the current job attribute if the class of * attribute is already present in the map. * * @param jobAttribute the job attribute to add or replace. * @return this. */ public final Job setJobAttribute(final JobAttribute jobAttribute) { jobAttributeMap.put(jobAttribute.getClass(), jobAttribute); return this; } /** * Get the job attribute for a specific attribute class or * return {@code null} if not found. * * @param clazz the class of attribute to find. * @param <T> the class of attribute to find. * @return an instance of job attribute for class or null. */ public final <T extends JobAttribute> T getJobAttribute(final Class<T> clazz) { //noinspection unchecked return (T) jobAttributeMap.get(clazz); } }
0
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom/job/JobAttribute.java
/* * Copyright (c) 2019 Preferred.AI * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.preferred.venom.job; /** * This interface represents attributes that can be added * to jobs to manipulate the crawling process. * * @author Ween Jiann Lee */ public interface JobAttribute { /** * This method is called before the job is scheduled * for a retry. * <p> * This method allows you to specify the logic to * move the job into its subsequent state for a retry. * </p> */ void prepareRetry(); }
0
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom/job/LazyPriorityJobQueue.java
/* * Copyright 2018 Preferred.AI * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.preferred.venom.job; import ai.preferred.venom.Handler; import ai.preferred.venom.request.Request; import javax.annotation.Nonnull; import java.util.Iterator; import java.util.concurrent.TimeUnit; /** * This class provides and implementation of scheduler with a priority * sensitive queue and polls from iterator when queue is empty. * <p> * Jobs in queue will be processed first in order of higher priority, * followed by requests in the iterator. * </p> * * @author Maksim Tkachenko * @author Ween Jiann Lee */ public class LazyPriorityJobQueue extends AbstractPriorityJobQueue { /** * An object to synchronise upon. */ private final Object lock = new Object(); /** * The iterator to draw requests from. */ private final Iterator<Request> requests; /** * The default handler for this scheduler. */ private final Handler handler; /** * Constructs an instance of lazy scheduler with a default handler. * * @param requests An iterator to obtain requests * @param handler The default handler to use */ public LazyPriorityJobQueue(final Iterator<Request> requests, final Handler handler) { this.requests = requests; this.handler = handler; } /** * Constructs an instance of lazy scheduler without a default handler. * * @param requests An iterator to obtain requests */ public LazyPriorityJobQueue(final Iterator<Request> requests) { this(requests, null); } /** * Poll request from the iterator. * * @return An new job instance */ private Job pollLazyRequest() { return new Job(requests.next(), handler, new PriorityJobAttribute()); } @Override public final Job poll(final long time, final @Nonnull TimeUnit unit) throws InterruptedException { synchronized (lock) { if (getQueue().isEmpty() && requests.hasNext()) { return pollLazyRequest(); } } return getQueue().poll(time, unit); } @Override public final Job poll() { synchronized (lock) { if (getQueue().isEmpty() && requests.hasNext()) { return pollLazyRequest(); } } return getQueue().poll(); } @Override public final boolean isEmpty() { synchronized (lock) { return getQueue().isEmpty() && !requests.hasNext(); } } }
0
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom/job/LazyScheduler.java
/* * Copyright (c) 2019 Preferred.AI * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.preferred.venom.job; import ai.preferred.venom.Handler; import ai.preferred.venom.request.Request; import java.util.Iterator; /** * Deprecated, will be removed in the next release. * Please use LazyPriorityJobQueue instead. */ @Deprecated public class LazyScheduler extends LazyPriorityJobQueue { /** * Constructs an instance of lazy scheduler with a default handler. * * @param requests An iterator to obtain requests * @param handler The default handler to use */ public LazyScheduler(final Iterator<Request> requests, final Handler handler) { super(requests, handler); } /** * Constructs an instance of lazy scheduler without a default handler. * * @param requests An iterator to obtain requests */ public LazyScheduler(final Iterator<Request> requests) { super(requests); } }
0
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom/job/Priority.java
/* * Copyright 2018 Preferred.AI * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.preferred.venom.job; /** * Job priorities, list in descending order of priority: * HIGHEST, HIGH, NORMAL, LOW, LOWEST. * * @author Maksim Tkachenko * @author Ween Jiann Lee */ public enum Priority { /** * Highest priority. */ HIGHEST, /** * High priority. */ HIGH, /** * Normal priority. */ NORMAL, /** * Low priority. */ LOW, /** * Lowest priority. */ LOWEST; /** * The default starting priority for a job. */ public static final Priority DEFAULT = NORMAL; /** * The default lowest priority for a job. */ public static final Priority FLOOR = LOW; /** * Returns the priority one level below the current * priority if priority is higher than the specified floor or the * lowest available priority. Otherwise return itself. * * @param floor Priority floor * @return Priority after downgrade. */ public Priority downgrade(final Priority floor) { if (this.compareTo(floor) >= 0) { return this; } return values()[ordinal() + 1]; } /** * Returns the priority one level below the current * priority if priority is higher than the default floor or the * lowest available priority. Otherwise return itself. * * @return Priority after downgrade. */ public Priority downgrade() { return downgrade(FLOOR); } }
0
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom/job/PriorityJobAttribute.java
/* * Copyright (c) 2019 Preferred.AI * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.preferred.venom.job; import javax.annotation.Nonnull; /** * This class provides an implementation of job attribute with comparable * priority. * * @author Ween Jiann Lee */ public class PriorityJobAttribute implements JobAttribute, Comparable<PriorityJobAttribute> { /** * The priority floor of this job. */ private final Priority priorityFloor; /** * The priority of this job. */ private Priority priority; /** * Constructs an instance of PriorityJobAttribute. * * @param priority The priority of this job. * @param priorityFloor The priority floor of this job. */ public PriorityJobAttribute(final Priority priority, final Priority priorityFloor) { this.priority = priority; this.priorityFloor = priorityFloor; } /** * Constructs an instance of PriorityJobAttribute. * * @param priority The priority of this job. */ public PriorityJobAttribute(final Priority priority) { this(priority, Priority.FLOOR); } /** * Constructs an instance of PriorityJobAttribute. */ public PriorityJobAttribute() { this(Priority.DEFAULT); } /** * Get the priority in this attribute. * * @return the priority in this attribute. */ public final Priority getPriority() { return priority; } @Override public final void prepareRetry() { priority = priority.downgrade(priorityFloor); } @Override public final int compareTo(final @Nonnull PriorityJobAttribute job) { return priority.compareTo(job.getPriority()); } }
0
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom
java-sources/ai/preferred/venom/4.2.7/ai/preferred/venom/job/PriorityJobQueue.java
/* * Copyright 2018 Preferred.AI * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.preferred.venom.job; import javax.annotation.Nonnull; import java.util.concurrent.TimeUnit; /** * This class provides an implementation of scheduler with a priority * sensitive queue. * <p> * Jobs with higher priority will be processed first. * </p> * * @author Maksim Tkachenko * @author Ween Jiann Lee */ public class PriorityJobQueue extends AbstractPriorityJobQueue { @Override public final Job poll(final long timeout, final @Nonnull TimeUnit unit) throws InterruptedException { return getQueue().poll(timeout, unit); } @Override public final Job poll() { return getQueue().poll(); } }