code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
/**
* Copyright (C) 2013 Stefan Niederhauser (nidin@gmx.ch)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package guru.nidi.text.transform.parse
import java.util.regex.Pattern
/**
* Parse customizing attributes of an element, like the span in "|<span=2>cell|".
*/
object TagCustomizerParser {
private val PATTERN = Pattern.compile("<([A-Za-z-]+)(=([^>]+))?>")
def apply(input: String, block: (String, String) => Unit) = CustomizerParser.apply(PATTERN, input, block)
}
| nidi3/text-transform | src/main/scala/guru/nidi/text/transform/parse/TagCustomizerParser.scala | Scala | apache-2.0 | 1,001 |
package com.sksamuel.elastic4s.searches.suggestions
import com.sksamuel.elastic4s.searches.suggestion.PhraseSuggestionDefinition
import org.elasticsearch.search.suggest.SuggestBuilders
import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder
import scala.collection.JavaConverters._
object PhraseSuggestionBuilderFn {
def apply(sugg: PhraseSuggestionDefinition): PhraseSuggestionBuilder = {
val builder = SuggestBuilders.phraseSuggestion(sugg.fieldname)
sugg.analyzer.foreach(builder.analyzer)
sugg.shardSize.foreach(builder.shardSize(_))
sugg.size.foreach(builder.size)
sugg.text.foreach(builder.text)
sugg.analyzer.foreach(builder.analyzer)
sugg.candidateGenerator.foreach(builder.addCandidateGenerator)
builder.collateParams(sugg.collateParams.asJava)
sugg.collatePrune.foreach(builder.collatePrune)
sugg.collateQuery.foreach(builder.collateQuery)
sugg.confidence.foreach(builder.confidence)
sugg.forceUnigrams.foreach(builder.forceUnigrams)
sugg.gramSize.foreach(builder.gramSize)
(sugg.preTag, sugg.postTag) match {
case (Some(pre), Some(post)) => builder.highlight(pre, post)
case _ =>
}
sugg.maxErrors.foreach(builder.maxErrors)
sugg.realWordErrorLikelihood.foreach(builder.realWordErrorLikelihood)
sugg.separator.foreach(builder.separator)
sugg.smoothingModel.foreach(builder.smoothingModel)
builder
}
}
| aroundus-inc/elastic4s | elastic4s-tcp/src/main/scala/com/sksamuel/elastic4s/searches/suggestions/PhraseSuggestionBuilderFn.scala | Scala | apache-2.0 | 1,428 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.dynamicpruning
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.optimizer.JoinSelectionHelper
import org.apache.spark.sql.catalyst.planning.ExtractEquiJoinKeys
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.connector.read.SupportsRuntimeFiltering
import org.apache.spark.sql.execution.columnar.InMemoryRelation
import org.apache.spark.sql.execution.datasources.{HadoopFsRelation, LogicalRelation}
import org.apache.spark.sql.execution.datasources.v2.DataSourceV2ScanRelation
/**
* Dynamic partition pruning optimization is performed based on the type and
* selectivity of the join operation. During query optimization, we insert a
* predicate on the filterable table using the filter from the other side of
* the join and a custom wrapper called DynamicPruning.
*
* The basic mechanism for DPP inserts a duplicated subquery with the filter from the other side,
* when the following conditions are met:
* (1) the table to prune is filterable by the JOIN key
* (2) the join operation is one of the following types: INNER, LEFT SEMI,
* LEFT OUTER (partitioned on right), or RIGHT OUTER (partitioned on left)
*
* In order to enable partition pruning directly in broadcasts, we use a custom DynamicPruning
* clause that incorporates the In clause with the subquery and the benefit estimation.
* During query planning, when the join type is known, we use the following mechanism:
* (1) if the join is a broadcast hash join, we replace the duplicated subquery with the reused
* results of the broadcast,
* (2) else if the estimated benefit of partition pruning outweighs the overhead of running the
* subquery query twice, we keep the duplicated subquery
* (3) otherwise, we drop the subquery.
*/
object PartitionPruning extends Rule[LogicalPlan] with PredicateHelper with JoinSelectionHelper {
/**
* Searches for a table scan that can be filtered for a given column in a logical plan.
*
* This methods tries to find either a v1 partitioned scan for a given partition column or
* a v2 scan that support runtime filtering on a given attribute.
*/
def getFilterableTableScan(a: Expression, plan: LogicalPlan): Option[LogicalPlan] = {
val srcInfo: Option[(Expression, LogicalPlan)] = findExpressionAndTrackLineageDown(a, plan)
srcInfo.flatMap {
case (resExp, l: LogicalRelation) =>
l.relation match {
case fs: HadoopFsRelation =>
val partitionColumns = AttributeSet(
l.resolve(fs.partitionSchema, fs.sparkSession.sessionState.analyzer.resolver))
if (resExp.references.subsetOf(partitionColumns)) {
return Some(l)
} else {
None
}
case _ => None
}
case (resExp, r @ DataSourceV2ScanRelation(_, scan: SupportsRuntimeFiltering, _)) =>
val filterAttrs = V2ExpressionUtils.resolveRefs[Attribute](scan.filterAttributes, r)
if (resExp.references.subsetOf(AttributeSet(filterAttrs))) {
Some(r)
} else {
None
}
case _ => None
}
}
/**
* Insert a dynamic partition pruning predicate on one side of the join using the filter on the
* other side of the join.
* - to be able to identify this filter during query planning, we use a custom
* DynamicPruning expression that wraps a regular In expression
* - we also insert a flag that indicates if the subquery duplication is worthwhile and it
* should run regardless of the join strategy, or is too expensive and it should be run only if
* we can reuse the results of a broadcast
*/
private def insertPredicate(
pruningKey: Expression,
pruningPlan: LogicalPlan,
filteringKey: Expression,
filteringPlan: LogicalPlan,
joinKeys: Seq[Expression],
partScan: LogicalPlan,
canBuildBroadcast: Boolean): LogicalPlan = {
val reuseEnabled = conf.exchangeReuseEnabled
val index = joinKeys.indexOf(filteringKey)
lazy val hasBenefit =
pruningHasBenefit(pruningKey, partScan, filteringKey, filteringPlan, canBuildBroadcast)
if (reuseEnabled || hasBenefit) {
// insert a DynamicPruning wrapper to identify the subquery during query planning
Filter(
DynamicPruningSubquery(
pruningKey,
filteringPlan,
joinKeys,
index,
conf.dynamicPartitionPruningReuseBroadcastOnly || !hasBenefit),
pruningPlan)
} else {
// abort dynamic partition pruning
pruningPlan
}
}
/**
* Given an estimated filtering ratio(and extra filter ratio if filtering side can't
* build broadcast by join type) we assume the partition pruning has benefit if
* the size in bytes of the partitioned plan after filtering is greater than the size
* in bytes of the plan on the other side of the join. We estimate the filtering ratio
* using column statistics if they are available, otherwise we use the config value of
* `spark.sql.optimizer.dynamicPartitionPruning.fallbackFilterRatio`.
*/
private def pruningHasBenefit(
partExpr: Expression,
partPlan: LogicalPlan,
otherExpr: Expression,
otherPlan: LogicalPlan,
canBuildBroadcast: Boolean): Boolean = {
// get the distinct counts of an attribute for a given table
def distinctCounts(attr: Attribute, plan: LogicalPlan): Option[BigInt] = {
plan.stats.attributeStats.get(attr).flatMap(_.distinctCount)
}
// the default filtering ratio when CBO stats are missing, but there is a
// predicate that is likely to be selective
val fallbackRatio = conf.dynamicPartitionPruningFallbackFilterRatio
// the filtering ratio based on the type of the join condition and on the column statistics
val filterRatio = (partExpr.references.toList, otherExpr.references.toList) match {
// filter out expressions with more than one attribute on any side of the operator
case (leftAttr :: Nil, rightAttr :: Nil)
if conf.dynamicPartitionPruningUseStats =>
// get the CBO stats for each attribute in the join condition
val partDistinctCount = distinctCounts(leftAttr, partPlan)
val otherDistinctCount = distinctCounts(rightAttr, otherPlan)
val availableStats = partDistinctCount.isDefined && partDistinctCount.get > 0 &&
otherDistinctCount.isDefined
if (!availableStats) {
fallbackRatio
} else if (partDistinctCount.get.toDouble <= otherDistinctCount.get.toDouble) {
// there is likely an estimation error, so we fallback
fallbackRatio
} else {
1 - otherDistinctCount.get.toDouble / partDistinctCount.get.toDouble
}
case _ => fallbackRatio
}
val estimatePruningSideSize = filterRatio * partPlan.stats.sizeInBytes.toFloat
val overhead = calculatePlanOverhead(otherPlan)
if (canBuildBroadcast) {
estimatePruningSideSize > overhead
} else {
// We can't reuse the broadcast because the join type doesn't support broadcast,
// and doing DPP means running an extra query that may have significant overhead.
// We need to make sure the pruning side is very big so that DPP is still worthy.
canBroadcastBySize(otherPlan, conf) &&
estimatePruningSideSize * conf.dynamicPartitionPruningPruningSideExtraFilterRatio > overhead
}
}
/**
* Calculates a heuristic overhead of a logical plan. Normally it returns the total
* size in bytes of all scan relations. We don't count in-memory relation which uses
* only memory.
*/
private def calculatePlanOverhead(plan: LogicalPlan): Float = {
val (cached, notCached) = plan.collectLeaves().partition(p => p match {
case _: InMemoryRelation => true
case _ => false
})
val scanOverhead = notCached.map(_.stats.sizeInBytes).sum.toFloat
val cachedOverhead = cached.map {
case m: InMemoryRelation if m.cacheBuilder.storageLevel.useDisk &&
!m.cacheBuilder.storageLevel.useMemory =>
m.stats.sizeInBytes.toFloat
case m: InMemoryRelation if m.cacheBuilder.storageLevel.useDisk =>
m.stats.sizeInBytes.toFloat * 0.2
case m: InMemoryRelation if m.cacheBuilder.storageLevel.useMemory =>
0.0
}.sum.toFloat
scanOverhead + cachedOverhead
}
/**
* Returns whether an expression is likely to be selective
*/
private def isLikelySelective(e: Expression): Boolean = e match {
case Not(expr) => isLikelySelective(expr)
case And(l, r) => isLikelySelective(l) || isLikelySelective(r)
case Or(l, r) => isLikelySelective(l) && isLikelySelective(r)
case _: StringRegexExpression => true
case _: BinaryComparison => true
case _: In | _: InSet => true
case _: StringPredicate => true
case _: MultiLikeBase => true
case _ => false
}
/**
* Search a filtering predicate in a given logical plan
*/
private def hasSelectivePredicate(plan: LogicalPlan): Boolean = {
plan.find {
case f: Filter => isLikelySelective(f.condition)
case _ => false
}.isDefined
}
/**
* To be able to prune partitions on a join key, the filtering side needs to
* meet the following requirements:
* (1) it can not be a stream
* (2) it needs to contain a selective predicate used for filtering
*/
private def hasPartitionPruningFilter(plan: LogicalPlan): Boolean = {
!plan.isStreaming && hasSelectivePredicate(plan)
}
private def canPruneLeft(joinType: JoinType): Boolean = joinType match {
case Inner | LeftSemi | RightOuter => true
case _ => false
}
private def canPruneRight(joinType: JoinType): Boolean = joinType match {
case Inner | LeftSemi | LeftOuter => true
case _ => false
}
private def prune(plan: LogicalPlan): LogicalPlan = {
plan transformUp {
// skip this rule if there's already a DPP subquery on the LHS of a join
case j @ Join(Filter(_: DynamicPruningSubquery, _), _, _, _, _) => j
case j @ Join(_, Filter(_: DynamicPruningSubquery, _), _, _, _) => j
case j @ Join(left, right, joinType, Some(condition), hint) =>
var newLeft = left
var newRight = right
// extract the left and right keys of the join condition
val (leftKeys, rightKeys) = j match {
case ExtractEquiJoinKeys(_, lkeys, rkeys, _, _, _, _, _) => (lkeys, rkeys)
case _ => (Nil, Nil)
}
// checks if two expressions are on opposite sides of the join
def fromDifferentSides(x: Expression, y: Expression): Boolean = {
def fromLeftRight(x: Expression, y: Expression) =
!x.references.isEmpty && x.references.subsetOf(left.outputSet) &&
!y.references.isEmpty && y.references.subsetOf(right.outputSet)
fromLeftRight(x, y) || fromLeftRight(y, x)
}
splitConjunctivePredicates(condition).foreach {
case EqualTo(a: Expression, b: Expression)
if fromDifferentSides(a, b) =>
val (l, r) = if (a.references.subsetOf(left.outputSet) &&
b.references.subsetOf(right.outputSet)) {
a -> b
} else {
b -> a
}
// there should be a partitioned table and a filter on the dimension table,
// otherwise the pruning will not trigger
var filterableScan = getFilterableTableScan(l, left)
if (filterableScan.isDefined && canPruneLeft(joinType) &&
hasPartitionPruningFilter(right)) {
newLeft = insertPredicate(l, newLeft, r, right, rightKeys, filterableScan.get,
canBuildBroadcastRight(joinType))
} else {
filterableScan = getFilterableTableScan(r, right)
if (filterableScan.isDefined && canPruneRight(joinType) &&
hasPartitionPruningFilter(left) ) {
newRight = insertPredicate(r, newRight, l, left, leftKeys, filterableScan.get,
canBuildBroadcastLeft(joinType))
}
}
case _ =>
}
Join(newLeft, newRight, joinType, Some(condition), hint)
}
}
override def apply(plan: LogicalPlan): LogicalPlan = plan match {
// Do not rewrite subqueries.
case s: Subquery if s.correlated => plan
case _ if !conf.dynamicPartitionPruningEnabled => plan
case _ => prune(plan)
}
}
| chuckchen/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/dynamicpruning/PartitionPruning.scala | Scala | apache-2.0 | 13,461 |
package metal
package generic
import scala.reflect.ClassTag
import spire.util.Opt
abstract class Set[K] extends Defaults with NElements1[K] with Enumerable with Searchable[K] { lhs =>
implicit def ctK: ClassTag[K]
implicit def K: MetalTag[K]
type Generic = generic.Set[K]
type Mutable <: mutable.Set[K]
type Immutable <: immutable.Set[K]
type Scala <: scala.collection.immutable.Set[K]
def stringPrefix = "Set"
final def ptrCastT(any: Any): Opt[generic.Set[K]] = any match {
case rhs: generic.Set[K] if lhs.ctK == rhs.ctK => Opt(rhs)
case _ => Opt.empty[generic.Set[K]]
}
private[metal] def keyArray(ptr: VPtr[lhs.type]): Array[K]
private[metal] def keyIndex(ptr: VPtr[lhs.type]): Int
def ptrHash(ptr: VPtr[lhs.type]): Int =
K.hashElement(keyArray(ptr), keyIndex(ptr))
def ptrToString(ptr: VPtr[lhs.type]): String = K.toStringElement(keyArray(ptr), keyIndex(ptr))
def ptrEquals(thisPtr: VPtr[lhs.type], that: generic.Set[K]): Boolean =
that.ptrFindFromArray(keyArray(thisPtr), keyIndex(thisPtr)).nonNull
}
| denisrosset/ptrcoll | library/src/main/scala/metal/generic/Set.scala | Scala | mit | 1,062 |
package dsbook.sentimentanalysis
import java.io.{OutputStream, PrintStream, StringReader}
import java.util
import java.util.Properties
import edu.stanford.nlp.ling.{CoreAnnotations, Sentence}
import edu.stanford.nlp.neural.rnn.RNNCoreAnnotations
import edu.stanford.nlp.pipeline.StanfordCoreNLP
import edu.stanford.nlp.process.DocumentPreprocessor
import edu.stanford.nlp.sentiment.SentimentCoreAnnotations
import edu.stanford.nlp.util.CoreMap
import org.ejml.simple.SimpleMatrix
import scala.collection.JavaConversions._
import scala.collection.mutable
object SentimentAnalyzer {
/**
* Set up a sentiment pipeline using CoreNLP to tokenize, apply part-of-speech tagging and generate sentiment
* estimates.
*/
object SentimentPipeline {
val props = new Properties
props.setProperty("annotators", "tokenize, ssplit, pos, parse, sentiment")
val pipeline = new StanfordCoreNLP(props)
}
/**
* Split a document into sentences using CoreNLP
* @param document
* @return the sentences within the document
*/
def splitIntoSentences(document:String) : Iterable[String] = {
val err = System.err;
// now make all writes to the System.err stream silent
System.setErr(new PrintStream(new OutputStream() {
def write(b : Int ) {
}
}));
val reader = new StringReader(document);
val dp = new DocumentPreprocessor(reader);
val sentences = dp.map(sentence => Sentence.listToString(sentence))
System.setErr(err);
sentences
}
/**
* Analyze a whole document by splitting the document into sentences, extracting
* the sentiment and aggregating the sentiments into a total positive or negative score
*
* @param document
* @return POSITIVE or NEGATIVE
*/
def analyzeDocument(document: String) : String = {
// this is your print stream, store the reference
val err = System.err;
// now make all writes to the System.err stream silent
System.setErr(new PrintStream(new OutputStream() {
def write(b : Int ) {
}
}));
val pipeline = SentimentPipeline.pipeline
val annotation = pipeline.process(document)
System.setErr(err);
rollup(
annotation.get((new CoreAnnotations.SentencesAnnotation).getClass)
.map(sentence => analyzeSentence(sentence))
)
}
/**
* Analyze an individual sentence using CoreNLP by extracting
* the sentiment
*
* @param sentence
* @return POSITIVE or NEGATIVE
*/
def analyzeSentence(sentence: String) : String = {
val err = System.err;
System.setErr(new PrintStream(new OutputStream() {
def write(b : Int ) {
}
}));
val pipeline = SentimentPipeline.pipeline
val annotation = pipeline.process(sentence)
val sentiment = analyzeSentence(annotation.get((new CoreAnnotations.SentencesAnnotation).getClass).get(0))
System.setErr(err);
sentiment
}
/**
* Analyze an individual sentence using CoreNLP by extracting
* the sentiment
*
* @param sentence the probabilities for the sentiments
* @return POSITIVE or NEGATIVE
*/
def analyzeSentence(sentence: CoreMap) : String = {
//for each sentence, we get the sentiment that CoreNLP thinks this sentence indicates.
val sentimentTree = sentence.get((new SentimentCoreAnnotations.AnnotatedTree).getClass)
val mat = RNNCoreAnnotations.getPredictions(sentimentTree)
/*
The probabilities are very negative, negative, neutral, positive or very positive. We want the probability that
the sentence is positive, so we choose to collapse categories as neutral, positive and very positive.
*/
if(mat.get(2) > .5) {
return "NEUTRAL"
}
else if(mat.get(2) + mat.get(3) + mat.get(4) > .5) {
return "POSITIVE"
}
else {
return "NEGATIVE"
}
}
/**
* Aggregate the sentiments of a collection of sentences into a total sentiment
* of a document. Assume a rough estimate using majority rules.
* @param sentencePositivityProbabilities
* @return POSITIVE or NEGATIVE
*/
def rollup(sentencePositivityProbabilities : Iterable[String]) : String = {
var n = 0
var numPositive = 0
for( sentiment <- sentencePositivityProbabilities) {
if(sentiment.equals("POSITIVE")) {
numPositive = numPositive + 1
}
if(!sentiment.equals("NEUTRAL")) {
n = n + 1
}
}
if(numPositive == 0) {
"NEUTRAL"
}
val score = (1.0*numPositive) / n
if(score > .5) {
"POSITIVE"
}
else {
"NEGATIVE"
}
}
}
| ofermend/data-science-with-hadoop-book | ch11/sentimentanalysis/src/main/scala/dsbook/sentimentanalysis/SentimentAnalyzer.scala | Scala | apache-2.0 | 4,602 |
import org.scalatest._
import org.scalatest.FunSpec
import org.slf4j.LoggerFactory
class TestSpec extends FunSpec {
def logger = LoggerFactory.getLogger(this.getClass)
class Example {
"Hello" //warning will be risen here
}
describe("Some Test") {
it("stat warnings") {
//assume some output starting with WARNING appears in test
println("WARNING: Invalid stat name /127.0.0.1:4010_backoffs exported as _127_0_0_1_4010_backoffs")
}
it("should print warnings") {
//assume some output starting with WARNING appears in test
println("WARNING 3/19/14 1:26 PM:liquidbase: modifyDataType will lose primary key/autoincrement/not null settings for mysql")
}
it("run something") {
//assume some custom exception appears in test
//test should fail, but not compilation error should be presented
try {
throw new OurTestException("Invalid stat name /127...")
} catch {
case ote: OurTestException => println(ote.getMessage)
case e: Exception => println(e.getMessage)
}
}
it("log warning") {
println("- About to log waring!")
logger.warn("WARNING: Invalid blah-blah-blah")
}
it("log error") {
println("- About to log error!")
logger.error("[error] some error in test output")
}
}
case class OurTestException(smth: String) extends Exception(smth)
} | JetBrains/sbt-tc-logger | test/testdata/TW35693/src/test/scala/TestSpec.scala | Scala | apache-2.0 | 1,405 |
package com.sksamuel.elastic4s.searches
import com.sksamuel.elastic4s.searches.queries.Query
import com.sksamuel.exts.OptionImplicits._
case class Rescore(query: Query,
windowSize: Option[Int] = None,
rescoreQueryWeight: Option[Double] = None,
originalQueryWeight: Option[Double] = None,
scoreMode: Option[QueryRescoreMode] = None) {
def window(size: Int): Rescore = copy(windowSize = size.some)
def originalQueryWeight(weight: Double): Rescore = copy(originalQueryWeight = weight.some)
def rescoreQueryWeight(weight: Double): Rescore = copy(rescoreQueryWeight = weight.some)
def scoreMode(mode: String): Rescore = scoreMode(QueryRescoreMode.valueOf(mode))
def scoreMode(mode: QueryRescoreMode): Rescore = copy(scoreMode = mode.some)
}
| Tecsisa/elastic4s | elastic4s-core/src/main/scala/com/sksamuel/elastic4s/searches/Rescore.scala | Scala | apache-2.0 | 838 |
package se.ramn.sorting
import annotation.tailrec
import scala.math.Ordering.Implicits._
object MergeSort {
def apply[T : Ordering](xs: Seq[T]): Seq[T] = {
val n = xs.length / 2
if (n == 0)
xs
else {
val (left, right) = xs splitAt (n)
merge(apply(left), apply(right), Nil)
}
}
@tailrec
private def merge[T : Ordering](
xs: Seq[T],
ys: Seq[T],
sortedPrefix: Seq[T]
): Seq[T] =
(xs, ys) match {
case (Nil, ys) => concatReverse(sortedPrefix, ys)
case (xs, Nil) => concatReverse(sortedPrefix, xs)
case (x +: xtail, y +: ytail) =>
if (x < y) merge(xtail, ys, x +: sortedPrefix)
else merge(xs, ytail, y +: sortedPrefix)
}
@tailrec
private def concatReverse[T](xs: Seq[T], ys: Seq[T]): Seq[T] = {
xs match {
case Nil => ys
case head +: tail => concatReverse(tail, head +: ys)
}
}
}
| ramn/allehanda | src/main/scala/sorting/MergeSort.scala | Scala | apache-2.0 | 904 |
package mathParser
trait Evaluate[UO, BO, S, V] {
def executeUnitary(uo: UO, s: S): S
def executeBinaryOperator(bo: BO, left: S, right: S): S
def evaluate(node: Node[UO, BO, S, V])
(variableAssignment: V => S): S =
node.fold[S](identity,
executeUnitary,
executeBinaryOperator,
variableAssignment)
}
| gregor-i/mathParser | math-parser/src/main/scala/mathParser/Evaluate.scala | Scala | mit | 345 |
/*
* Licensed to Cloudera, Inc. under one
val statement = Statement(
0,
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Cloudera, Inc. licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cloudera.hue.livy.server.interactive
import com.cloudera.hue.livy.msgs.ExecuteRequest
import org.json4s.JsonAST.JString
import org.json4s.{DefaultFormats, Extraction}
import org.scalatest.{FunSpec, Matchers}
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future}
class StatementSpec extends FunSpec with Matchers {
implicit val formats = DefaultFormats
describe("A statement") {
it("should support paging through text/plain data") {
val lines = List("1", "2", "3", "4", "5")
val rep = Extraction.decompose(Map(
"status" -> "ok",
"execution_count" -> 0,
"data" -> Map(
"text/plain" -> lines.mkString("\\n")
)
))
val stmt = new Statement(0, ExecuteRequest(""), Future.successful(rep))
var output = Await.result(stmt.output(), Duration.Inf)
output \\ "data" \\ "text/plain" should equal (JString(lines.mkString("\\n")))
output = Await.result(stmt.output(Some(2)), Duration.Inf)
output \\ "data" \\ "text/plain" should equal (JString(lines.slice(2, lines.length).mkString("\\n")))
output = Await.result(stmt.output(Some(2), Some(1)), Duration.Inf)
output \\ "data" \\ "text/plain" should equal (JString(lines.slice(2, 3).mkString("\\n")))
}
it("should support paging through application/json arrays") {
val lines = List("1", "2", "3", "4")
val rep = Extraction.decompose(Map(
"status" -> "ok",
"execution_count" -> 0,
"data" -> Map(
"application/json" -> List(1, 2, 3, 4)
)
))
val stmt = new Statement(0, ExecuteRequest(""), Future.successful(rep))
var output = Await.result(stmt.output(), Duration.Inf)
(output \\ "data" \\ "application/json").extract[List[Int]] should equal (List(1, 2, 3, 4))
output = Await.result(stmt.output(Some(2)), Duration.Inf)
(output \\ "data" \\ "application/json").extract[List[Int]] should equal (List(3, 4))
output = Await.result(stmt.output(Some(2), Some(1)), Duration.Inf)
(output \\ "data" \\ "application/json").extract[List[Int]] should equal (List(3))
}
}
}
| vmanoria/bluemix-hue-filebrowser | hue-3.8.1-bluemix/apps/spark/java/livy-server/src/test/scala/com/cloudera/hue/livy/server/interactive/StatementSpec.scala | Scala | gpl-2.0 | 2,988 |
package controllers
import com.bryzek.apidoc.api.v0.models.Membership
import com.bryzek.apidoc.api.v0.models.json._
import db.{Authorization, MembershipsDao}
import java.util.UUID
import javax.inject.{Inject, Singleton}
import play.api.mvc._
import play.api.libs.json.Json
@Singleton
class Memberships @Inject() (
membershipsDao: MembershipsDao
) extends Controller {
def get(
organizationGuid: Option[UUID],
organizationKey: Option[String],
userGuid: Option[UUID],
role: Option[String],
limit: Long = 25,
offset: Long = 0
) = Authenticated { request =>
Ok(
Json.toJson(
membershipsDao.findAll(
request.authorization,
organizationGuid = organizationGuid,
organizationKey = organizationKey,
userGuid = userGuid,
role = role,
limit = limit,
offset = offset
)
)
)
}
def getByGuid(guid: UUID) = Authenticated { request =>
membershipsDao.findByGuid(request.authorization, guid) match {
case None => NotFound
case Some(membership) => {
if (membershipsDao.isUserAdmin(request.user, membership.organization)) {
Ok(Json.toJson(membership))
} else {
Unauthorized
}
}
}
}
def deleteByGuid(guid: UUID) = Authenticated { request =>
membershipsDao.findByGuid(request.authorization, guid) match {
case None => NoContent
case Some(membership) => {
if (membershipsDao.isUserAdmin(request.user, membership.organization)) {
membershipsDao.softDelete(request.user, membership)
NoContent
} else {
Unauthorized
}
}
}
}
}
| Seanstoppable/apidoc | api/app/controllers/Memberships.scala | Scala | mit | 1,701 |
package scala.parser
import org.junit.Test
import org.apache.commons.io.IOUtils
import parser.RssParser
import collection.JavaConversions._
import org.rometools.feed.module.mediarss.{MediaEntryModuleImpl, MediaModule}
import org.rometools.feed.module.feedburner.FeedBurner
/**
* The Class RssParser.
*
* @author Nguyen Duc Dung
* @since 2/27/14 8:37 AM
*
*/
class RssParserTest {
val parser = new RssParser
@Test
def test1() {
val input = this.getClass.getClassLoader.getResourceAsStream("rss4.xml")
val bytes = IOUtils.toByteArray(input)
parser.parse(bytes, "http://eatingandgettingfattogether.wordpress.com/").map(feed => {
feed.getEntries.foreach(entry => {
val media = entry.getModule(MediaModule.URI).asInstanceOf[MediaEntryModuleImpl]
if (media != null && media.getMetadata != null) {
media.getMetadata.getThumbnail.foreach(println)
}
val feedBunnerModule = entry.getModule(FeedBurner.URI)
if (feedBunnerModule != null) {
val origLink = feedBunnerModule.asInstanceOf[FeedBurner].getOrigLink
println(origLink)
}
})
})
}
@Test
def test2() {
val input = this.getClass.getClassLoader.getResourceAsStream("rss1.xml")
val bytes = IOUtils.toByteArray(input)
parser.parse(bytes, "http://hocthenao.vn/").map(feed => {
feed.getEntries.foreach(entry => {
entry.getForeignMarkup.foreach(m => {
println(m.getName)
println(m.getNamespacePrefix)
println(m.getValue)
})
})
})
}
@Test
def test3() {
val input = this.getClass.getClassLoader.getResourceAsStream("rss3.xml")
val bytes = IOUtils.toByteArray(input)
parser.parse(bytes, "http://hocthenao.vn/").map(feed => {
feed.getEntries.foreach(entry => {
entry.getForeignMarkup.foreach(m => {
println(m.getName)
println(m.getNamespaceURI)
println(m.getValue)
})
})
})
}
@Test
def test4() {
val input = this.getClass.getClassLoader.getResourceAsStream("rss4.xml")
val bytes = IOUtils.toByteArray(input)
parser.parse(bytes, "http://hocthenao.vn/").map(feed => {
feed.getEntries.foreach(entry => {
println(entry.getContents)
})
})
}
}
| SunriseSoftVN/hayhayblog | test/scala/parser/RssParserTest.scala | Scala | gpl-2.0 | 2,301 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.utils.tf.loaders
import java.nio.ByteOrder
import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.dllib.nn.tf.{Log1p => Log1pOps}
import com.intel.analytics.bigdl.dllib.tensor.TensorNumericMath.TensorNumeric
import org.tensorflow.framework.{DataType, NodeDef}
import com.intel.analytics.bigdl.dllib.utils.tf.Context
import scala.reflect.ClassTag
class Log1p extends TensorflowOpsLoader {
import Utils._
override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
val t = getType(nodeDef.getAttrMap, "T")
if (t == DataType.DT_FLOAT) {
Log1pOps[T, Float]()
} else if (t == DataType.DT_DOUBLE) {
Log1pOps[T, Double]()
} else {
throw new UnsupportedOperationException(s"Not support load Log1p when type is ${t}")
}
}
}
| intel-analytics/BigDL | scala/dllib/src/main/scala/com/intel/analytics/bigdl/dllib/utils/tf/loaders/Log1p.scala | Scala | apache-2.0 | 1,535 |
package cz.cvut.fit.palicand.knapsack.algorithms.evolution
import org.scalatest.FlatSpec
/**
* Created by palickaa on 18/12/15.
*/
class EvolutionEntityTest extends FlatSpec {
behavior of "EvolutionEntityTest"
it should "compute the correct fitness" in {
}
}
| palicand/mi_paa_genetic_algorithm | src/test/scala/cz/cvut/fit/palicand/knapsack/algorithms/evolution/EvolutionEntityTest.scala | Scala | mit | 275 |
import org.apache.spark.sql.SparkSession
object DS04 {
//
def getCurrentDirectory = new java.io.File( "." ).getCanonicalPath
//
// 0 pclass,1 survived,2 l.name,3.f.name, 4 sex,5 age,6 sibsp,7 parch,8 ticket,9 fare,10 cabin,
// 11 embarked,12 boat,13 body,14 home.dest
//
//
def main(args: Array[String]): Unit = {
println(getCurrentDirectory)
val spark = SparkSession.builder
.master("local")
.appName("Chapter 9")
.config("spark.logConf","true")
.config("spark.logLevel","ERROR")
.getOrCreate()
println(s"Running Spark Version ${spark.version}")
//
val startTime = System.nanoTime()
//
val filePath = "/Users/ksankar/fdps-v3/"
val passengers = spark.read.option("header","true").
option("inferSchema","true").
csv(filePath + "data/titanic3_02.csv")
println("Passengers has "+passengers.count()+" rows")
//passengers.show(5)
//passengers.printSchema()
//
val passengers1 = passengers.select(passengers("Pclass"),passengers("Survived"),passengers("Gender"),passengers("Age"),passengers("SibSp"),passengers("Parch"),passengers("Fare"))
passengers1.show(5)
passengers1.printSchema()
//
passengers1.groupBy("Gender").count().show()
passengers1.stat.crosstab("Survived","Gender").show()
//
passengers1.stat.crosstab("Survived","SibSp").show()
//
// passengers1.stat.crosstab("Survived","Age").show()
val ageDist = passengers1.select(passengers1("Survived"), (passengers1("age") - passengers1("age") % 10).cast("int").as("AgeBracket"))
ageDist.show(3)
ageDist.stat.crosstab("Survived","AgeBracket").show()
//
val elapsedTime = (System.nanoTime() - startTime) / 1e9
println("Elapsed time: %.2f seconds".format(elapsedTime))
//
println("*** That's All Folks ! ***")
//
}
} | dineshpackt/Fast-Data-Processing-with-Spark-2 | code/DS04.scala | Scala | mit | 1,842 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sparkdeployer
import scala.util.{Failure, Success, Try}
import org.slf4s.Logging
object Retry extends Logging {
@annotation.tailrec
def apply[T](op: Int => T, attempt: Int, maxAttempts: Int): T = {
Try { op(attempt) } match {
case Success(x) => x
case Failure(e) if attempt < maxAttempts =>
Thread.sleep(15000)
apply(op, attempt + 1, maxAttempts)
case Failure(e) => throw e
}
}
def apply[T](op: Int => T)(implicit clusterConf: ClusterConf): T = apply(op, 1, clusterConf.retryAttempts)
}
| pishen/spark-deployer | core/src/main/scala/sparkdeployer/Retry.scala | Scala | apache-2.0 | 1,106 |
package models
object BenfordService {
case class srvData(filePath: String)
case class srvCalc(numberSamples: Int)
case class srvCIsByGroupId(groupId: Int)
case class srvBenfordCIsByGroupId(groupId: Int)
case class srvResultsByGroupId(groupId: Int)
case class srvFrequenciesByGroupId(groupId: Int)
case class srvGroups()
case class srvNumSamples()
case class srvTestsByGroupId(groupId: Int)
}
| dvgodoy/play-benford-analysis | app/models/BenfordService.scala | Scala | apache-2.0 | 414 |
package org.jetbrains.plugins.scala
package lang
package psi
package impl
package base
package patterns
import org.jetbrains.plugins.scala.lang.psi.api.base.patterns._
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiElementImpl
import com.intellij.lang.ASTNode
import com.intellij.psi._
import api.ScalaElementVisitor
import psi.types.result.TypingContext
/**
* @author Alexander Podkhalyuzin
* Date: 28.02.2008
*/
class ScLiteralPatternImpl(node: ASTNode) extends ScalaPsiElementImpl(node) with ScLiteralPattern {
override def accept(visitor: PsiElementVisitor) {
visitor match {
case visitor: ScalaElementVisitor => super.accept(visitor)
case _ => super.accept(visitor)
}
}
override def toString: String = "LiteralPattern"
override def getType(ctx: TypingContext) = {
getLiteral.getType(TypingContext.empty)
}
} | consulo/consulo-scala | src/org/jetbrains/plugins/scala/lang/psi/impl/base/patterns/ScLiteralPatternImpl.scala | Scala | apache-2.0 | 857 |
package sims.dynamics
import sims.collision.{Collision => CCollision}
import sims.dynamics.constraints._
import sims.math._
class RestingPoint(collision: CCollision[Shape], point: Vector2D) extends Constraint {
import collision._
val body1 = item1.body
val body2 = item2.body
def relativeVelocity = body2.velocityOfPoint(point) - body1.velocityOfPoint(point)
override def inequality = true
override val limit = Some((0.0, Double.PositiveInfinity))
override def value = -overlap
override def jacobian =
new Jacobian(-normal.unit, -((point - body1.position) cross normal.unit),
normal.unit, ((point - body2.position) cross normal.unit))
val slop = 0.005
override def error =
if (collision.overlap > slop)
-(collision.overlap - slop)
else 0.0
}
class ImpactPoint(collision: CCollision[Shape], point: Vector2D) extends Constraint {
import collision._
val body1 = item1.body
val body2 = item2.body
def relativeVelocity = body2.velocityOfPoint(point) - body1.velocityOfPoint(point)
override def inequality = true
override val limit = Some((0.0, Double.PositiveInfinity))
override def value = -overlap
override def jacobian =
new Jacobian(-normal.unit, -((point - body1.position) cross normal.unit),
normal.unit, ((point - body2.position) cross normal.unit))
val restitution = math.min(collision.item1.restitution, collision.item2.restitution)
override def bias = (relativeVelocity dot collision.normal.unit) * restitution
override def error = 0
}
object ContactResolver {
def relativeVelocity(collision: CCollision[Shape], point: Vector2D) =
collision.item2.body.velocityOfPoint(point) - collision.item2.body.velocityOfPoint(point)
def resolve(collision: CCollision[Shape]): Seq[Constraint] = for (p <- collision.points) yield {
val v = (relativeVelocity(collision, p) dot collision.normal.unit)
if (v < -1) new RestingPoint(collision, p)
else new ImpactPoint(collision, p)
}
} | jodersky/sims2 | src/main/scala/sims/dynamics/ContactPoint.scala | Scala | bsd-3-clause | 1,975 |
/*
* Copyright (c) 2017. Yuriy Stul
*/
package com.stulsoft.ysps.pforcomprehansion
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future}
import scala.util.Success
/** Demonstrates usage comprehension ''for'' and ''flatMap'' with ''Future''
*
* @author Yuriy Stul
*/
object ForWithFuture extends App {
test1()
test2()
/**
* ''Future'' with comprehension ''for''
*/
def test1(): Unit = {
println("==>test1")
val f = for {
job1 <- job1()
_ = println(s"Received job1: $job1")
job2 <- job2(job1)
_ = println(s"Received job2: $job2")
} yield {}
Await.ready(f, Duration.Inf)
println("<==test1")
}
def job1(): Future[String] = Future {
println("==>job1")
Thread.sleep(500)
println("<==job1")
"The job1 result"
}
def job2(param: String): Future[String] = Future {
println("==>job2")
Thread.sleep(500)
println("<==job2")
"The job2 with " + param
}
/**
* ''Future'' with ''flatMap''
*/
def test2(): Unit = {
println("==>test2")
val f = job1().flatMap {
job1Result =>
println(s"job1Result: $job1Result")
job2(job1Result) andThen {
case Success(job2Result) => println(s"job2Result: $job2Result")
}
}
Await.ready(f, Duration.Inf)
println("<==test2")
}
}
| ysden123/ysps | src/main/scala/com/stulsoft/ysps/pforcomprehansion/ForWithFuture.scala | Scala | mit | 1,418 |
package com.outr.arango
case class CollectionDetail(id: String,
name: String,
isSystem: Boolean,
status: Int,
`type`: Int)
| outr/arangodb-scala | driver/src/main/scala/com/outr/arango/CollectionDetail.scala | Scala | mit | 236 |
package com.twitter.zipkin.storage.cassandra
import com.datastax.driver.core.Cluster
import com.google.common.util.concurrent.Futures
import org.apache.cassandra.service.CassandraDaemon
import org.twitter.zipkin.storage.cassandra.Repository
import scala.collection.JavaConversions
/** Ensures all cassandra micro-integration tests use only one cassandra server. */
object CassandraFixture {
val keyspace = "test_zipkin_spanstore"
// Defer shared connection to the cluster
lazy val cluster = Cluster.builder().addContactPoint("127.0.0.1").withPort(9142).build()
/** overridden to use gradle's "build" dir instead of maven's "target" dir. */
lazy val cassandra = {
System.setProperty("cassandra-foreground", "true")
System.setProperty("cassandra.native.epoll.enabled", "false")
val cassandraDaemon = new CassandraDaemon
cassandraDaemon.activate
}
// Ensure the repository's local cache of service names expire quickly
System.setProperty("zipkin.store.cassandra.internal.writtenNamesTtl", "1")
// the "true" at the end will ensure schema. lazy to do this only once.
lazy val repository = new Repository(keyspace, cluster, true)
def truncate = {
repository // dereference to ensure schema exists
val session = cluster.connect()
Futures.allAsList(JavaConversions.asJavaIterable(Seq(
"traces",
"dependencies",
"service_names",
"span_names",
"service_name_index",
"service_span_name_index",
"annotations_index",
"span_duration_index"
).map(cf => session.executeAsync("TRUNCATE %s.%s".format(keyspace, cf))))).get()
}
}
| thelastpickle/zipkin | zipkin-cassandra/src/test/scala/com/twitter/zipkin/storage/cassandra/CassandraFixture.scala | Scala | apache-2.0 | 1,623 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.runtime.batch.sql
import java.sql.{Date, Time, Timestamp}
import java.util
import org.apache.flink.api.scala._
import org.apache.flink.api.scala.util.CollectionDataSets
import org.apache.flink.table.api.scala._
import org.apache.flink.table.api.ValidationException
import org.apache.flink.table.expressions.utils.{Func13, SplitUDF}
import org.apache.flink.table.functions.ScalarFunction
import org.apache.flink.table.runtime.batch.table.OldHashCode
import org.apache.flink.table.runtime.utils.TableProgramsTestBase.TableConfigMode
import org.apache.flink.table.runtime.utils.{TableProgramsCollectionTestBase, TableProgramsTestBase}
import org.apache.flink.test.util.TestBaseUtils
import org.apache.flink.types.Row
import org.junit._
import org.junit.Assert.assertEquals
import org.junit.runner.RunWith
import org.junit.runners.Parameterized
import scala.collection.JavaConverters._
@RunWith(classOf[Parameterized])
class CalcITCase(
configMode: TableConfigMode)
extends TableProgramsCollectionTestBase(configMode) {
@Test
def testSelectStarFromTable(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env, config)
val sqlQuery = "SELECT * FROM MyTable"
val ds = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv).as('a, 'b, 'c)
tEnv.registerTable("MyTable", ds)
val result = tEnv.sqlQuery(sqlQuery)
val expected = "1,1,Hi\n" + "2,2,Hello\n" + "3,2,Hello world\n" +
"4,3,Hello world, how are you?\n" + "5,3,I am fine.\n" + "6,3,Luke Skywalker\n" +
"7,4,Comment#1\n" + "8,4,Comment#2\n" + "9,4,Comment#3\n" + "10,4,Comment#4\n" +
"11,5,Comment#5\n" + "12,5,Comment#6\n" + "13,5,Comment#7\n" + "14,5,Comment#8\n" +
"15,5,Comment#9\n" + "16,6,Comment#10\n" + "17,6,Comment#11\n" + "18,6,Comment#12\n" +
"19,6,Comment#13\n" + "20,6,Comment#14\n" + "21,6,Comment#15\n"
val results = result.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testSelectStarFromNestedTable(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env, config)
val sqlQuery = "SELECT * FROM MyTable"
val ds = CollectionDataSets.getSmallNestedTupleDataSet(env).toTable(tEnv).as('a, 'b)
tEnv.registerTable("MyTable", ds)
val result = tEnv.sqlQuery(sqlQuery)
val expected = "(1,1),one\n" + "(2,2),two\n" + "(3,3),three\n"
val results = result.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testSelectStarFromDataSet(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env, config)
val sqlQuery = "SELECT * FROM MyTable"
val ds = CollectionDataSets.get3TupleDataSet(env)
tEnv.createTemporaryView("MyTable", ds, 'a, 'b, 'c)
val result = tEnv.sqlQuery(sqlQuery)
val expected = "1,1,Hi\n" + "2,2,Hello\n" + "3,2,Hello world\n" +
"4,3,Hello world, how are you?\n" + "5,3,I am fine.\n" + "6,3,Luke Skywalker\n" +
"7,4,Comment#1\n" + "8,4,Comment#2\n" + "9,4,Comment#3\n" + "10,4,Comment#4\n" +
"11,5,Comment#5\n" + "12,5,Comment#6\n" + "13,5,Comment#7\n" + "14,5,Comment#8\n" +
"15,5,Comment#9\n" + "16,6,Comment#10\n" + "17,6,Comment#11\n" + "18,6,Comment#12\n" +
"19,6,Comment#13\n" + "20,6,Comment#14\n" + "21,6,Comment#15\n"
val results = result.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testSimpleSelectAll(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env, config)
val sqlQuery = "SELECT a, b, c FROM MyTable"
val ds = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv).as('a, 'b, 'c)
tEnv.registerTable("MyTable", ds)
val result = tEnv.sqlQuery(sqlQuery)
val expected = "1,1,Hi\n" + "2,2,Hello\n" + "3,2,Hello world\n" +
"4,3,Hello world, how are you?\n" + "5,3,I am fine.\n" + "6,3,Luke Skywalker\n" +
"7,4,Comment#1\n" + "8,4,Comment#2\n" + "9,4,Comment#3\n" + "10,4,Comment#4\n" +
"11,5,Comment#5\n" + "12,5,Comment#6\n" + "13,5,Comment#7\n" + "14,5,Comment#8\n" +
"15,5,Comment#9\n" + "16,6,Comment#10\n" + "17,6,Comment#11\n" + "18,6,Comment#12\n" +
"19,6,Comment#13\n" + "20,6,Comment#14\n" + "21,6,Comment#15\n"
val results = result.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testSelectWithNaming(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env, config)
val sqlQuery = "SELECT `1-_./Ü`, b FROM (SELECT _1 as `1-_./Ü`, _2 as b FROM MyTable)"
val ds = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv)
tEnv.registerTable("MyTable", ds)
val result = tEnv.sqlQuery(sqlQuery)
val expected = "1,1\n" + "2,2\n" + "3,2\n" + "4,3\n" + "5,3\n" + "6,3\n" + "7,4\n" +
"8,4\n" + "9,4\n" + "10,4\n" + "11,5\n" + "12,5\n" + "13,5\n" + "14,5\n" + "15,5\n" +
"16,6\n" + "17,6\n" + "18,6\n" + "19,6\n" + "20,6\n" + "21,6\n"
val results = result.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test(expected = classOf[ValidationException])
def testInvalidFields(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env, config)
val sqlQuery = "SELECT a, foo FROM MyTable"
val ds = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv).as('a, 'b, 'c)
tEnv.registerTable("MyTable", ds)
tEnv.sqlQuery(sqlQuery)
}
@Test
def testAllRejectingFilter(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env, config)
val sqlQuery = "SELECT * FROM MyTable WHERE false"
val ds = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv)
tEnv.registerTable("MyTable", ds)
val result = tEnv.sqlQuery(sqlQuery)
val expected = "\n"
val results = result.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testAllPassingFilter(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env, config)
val sqlQuery = "SELECT * FROM MyTable WHERE true"
val ds = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv)
tEnv.registerTable("MyTable", ds)
val result = tEnv.sqlQuery(sqlQuery)
val expected = "1,1,Hi\n" + "2,2,Hello\n" + "3,2,Hello world\n" + "4,3,Hello world, " +
"how are you?\n" + "5,3,I am fine.\n" + "6,3,Luke Skywalker\n" + "7,4," +
"Comment#1\n" + "8,4,Comment#2\n" + "9,4,Comment#3\n" + "10,4,Comment#4\n" + "11,5," +
"Comment#5\n" + "12,5,Comment#6\n" + "13,5,Comment#7\n" + "14,5,Comment#8\n" + "15,5," +
"Comment#9\n" + "16,6,Comment#10\n" + "17,6,Comment#11\n" + "18,6,Comment#12\n" + "19," +
"6,Comment#13\n" + "20,6,Comment#14\n" + "21,6,Comment#15\n"
val results = result.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testFilterOnString(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env, config)
val sqlQuery = "SELECT * FROM MyTable WHERE c LIKE '%world%'"
val ds = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv).as('a, 'b, 'c)
tEnv.registerTable("MyTable", ds)
val result = tEnv.sqlQuery(sqlQuery)
val expected = "3,2,Hello world\n" + "4,3,Hello world, how are you?\n"
val results = result.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testFilterOnInteger(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env, config)
val sqlQuery = "SELECT * FROM MyTable WHERE MOD(a,2)=0"
val ds = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv).as('a, 'b, 'c)
tEnv.registerTable("MyTable", ds)
val result = tEnv.sqlQuery(sqlQuery)
val expected = "2,2,Hello\n" + "4,3,Hello world, how are you?\n" +
"6,3,Luke Skywalker\n" + "8,4," + "Comment#2\n" + "10,4,Comment#4\n" +
"12,5,Comment#6\n" + "14,5,Comment#8\n" + "16,6," +
"Comment#10\n" + "18,6,Comment#12\n" + "20,6,Comment#14\n"
val results = result.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testDisjunctivePredicate(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env, config)
val sqlQuery = "SELECT * FROM MyTable WHERE a < 2 OR a > 20 OR a IN(3,4,5)"
val ds = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv).as('a, 'b, 'c)
tEnv.registerTable("MyTable", ds)
val result = tEnv.sqlQuery(sqlQuery)
val expected = "1,1,Hi\n" + "3,2,Hello world\n" + "4,3,Hello world, how are you?\n" +
"5,3,I am fine.\n" + "21,6,Comment#15\n"
val results = result.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testFilterWithAnd(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env, config)
val sqlQuery = "SELECT * FROM MyTable WHERE MOD(a,2)<>0 AND MOD(b,2)=0 AND b NOT IN(1,2,3)"
val ds = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv).as('a, 'b, 'c)
tEnv.registerTable("MyTable", ds)
val result = tEnv.sqlQuery(sqlQuery)
val expected = "7,4,Comment#1\n" + "9,4,Comment#3\n" + "17,6,Comment#11\n" +
"19,6,Comment#13\n" + "21,6,Comment#15\n"
val results = result.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testAdvancedDataTypes(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env, config)
val sqlQuery = "SELECT a, b, c, DATE '1984-07-12', TIME '14:34:24', " +
"TIMESTAMP '1984-07-12 14:34:24' FROM MyTable"
val ds = env.fromElements((
Date.valueOf("1984-07-12"),
Time.valueOf("14:34:24"),
Timestamp.valueOf("1984-07-12 14:34:24")))
tEnv.createTemporaryView("MyTable", ds, 'a, 'b, 'c)
val result = tEnv.sqlQuery(sqlQuery)
val expected = "1984-07-12,14:34:24,1984-07-12 14:34:24.0," +
"1984-07-12,14:34:24,1984-07-12 14:34:24.0"
val results = result.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testValueConstructor(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env, config)
val sqlQuery = "SELECT (a, b, c), ARRAY[12, b], MAP[a, c] FROM MyTable " +
"WHERE (a, b, c) = ('foo', 12, TIMESTAMP '1984-07-12 14:34:24')"
val rowValue = ("foo", 12, Timestamp.valueOf("1984-07-12 14:34:24"))
val ds = env.fromElements(rowValue)
tEnv.createTemporaryView("MyTable", ds, 'a, 'b, 'c)
val result = tEnv.sqlQuery(sqlQuery)
val expected = "foo,12,1984-07-12 14:34:24.0,[12, 12],{foo=1984-07-12 14:34:24.0}"
val results = result.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
// Compare actual object to avoid undetected Calcite flattening
val resultRow = results.asJava.get(0)
assertEquals(rowValue._1, resultRow.getField(0).asInstanceOf[Row].getField(0))
assertEquals(rowValue._2, resultRow.getField(1).asInstanceOf[Array[Integer]](1))
assertEquals(rowValue._3,
resultRow.getField(2).asInstanceOf[util.Map[String, Timestamp]].get(rowValue._1))
}
@Test
def testUserDefinedScalarFunction(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env, config)
tEnv.registerFunction("hashCode", OldHashCode)
tEnv.registerFunction("hashCode", MyHashCode)
val ds = env.fromElements("a", "b", "c")
tEnv.createTemporaryView("MyTable", ds, 'text)
val result = tEnv.sqlQuery("SELECT hashCode(text) FROM MyTable")
val expected = "97\n98\n99"
val results = result.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testFunctionWithUnicodeParameters(): Unit = {
val data = List(
("a\u0001b", "c\"d", "e\\\"\u0004f"), // uses Java/Scala escaping
("x\u0001y", "y\"z", "z\\\"\u0004z")
)
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env)
val splitUDF0 = new SplitUDF(deterministic = true)
val splitUDF1 = new SplitUDF(deterministic = false)
tEnv.registerFunction("splitUDF0", splitUDF0)
tEnv.registerFunction("splitUDF1", splitUDF1)
// uses SQL escaping (be aware that even Scala multi-line strings parse backslash!)
val sqlQuery = s"""
|SELECT
| splitUDF0(a, U&'${'\\'}0001', 0) AS a0,
| splitUDF1(a, U&'${'\\'}0001', 0) AS a1,
| splitUDF0(b, U&'"', 1) AS b0,
| splitUDF1(b, U&'"', 1) AS b1,
| splitUDF0(c, U&'${'\\'}${'\\'}"${'\\'}0004', 0) AS c0,
| splitUDF1(c, U&'${'\\'}"#0004' UESCAPE '#', 0) AS c1
|FROM T1
|""".stripMargin
val t1 = env.fromCollection(data).toTable(tEnv, 'a, 'b, 'c)
tEnv.registerTable("T1", t1)
val results = tEnv.sqlQuery(sqlQuery).toDataSet[Row].collect()
val expected = List("a,a,d,d,e,e", "x,x,z,z,z,z").mkString("\n")
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
// new type inference for functions is only supported in the Blink planner
@Test(expected = classOf[ValidationException])
def testUnsupportedNewFunctionTypeInference(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env)
tEnv.createTemporarySystemFunction("testFunc", new Func13(">>"))
tEnv.sqlQuery("SELECT testFunc('fail')").toDataSet[Row]
}
}
object MyHashCode extends ScalarFunction {
def eval(s: String): Int = s.hashCode()
}
object CalcITCase {
@Parameterized.Parameters(name = "Table config = {0}")
def parameters(): util.Collection[Array[java.lang.Object]] = {
Seq[Array[AnyRef]](
Array(TableProgramsTestBase.DEFAULT),
Array(TableProgramsTestBase.NO_NULL)).asJava
}
}
| bowenli86/flink | flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/runtime/batch/sql/CalcITCase.scala | Scala | apache-2.0 | 15,416 |
package edu.mit.csail.cap.query
import scala.collection.mutable
import util.LinkedTree.Forest
import util.LinkedListTree
/** Trace representation in terms of call trees */
class CallTrace(override val c: Connection, val roots: List[CallTree])
extends TraceProxy {
/** Filter trees by a method predicate */
def filter(f: Method => Boolean) =
new CallTrace(c, for (t <- roots; u <- t.select { case rep => f(rep.method) }) yield u)
/** Filter trees by a metho predicate but keep parents of passing nodes as well */
def filterWithParents(f: Method => Boolean) =
new CallTrace(c, for (t <- roots; u <- t.selectWithParents { case rep => f(rep.method) }) yield u)
override def t =
ConcTrace(c, roots.flatMap(_.nodes).sortBy(_.counter))
override def select(f: Query): CallTrace =
new CallTrace(c, for (t <- roots; u <- t.select { case rep => f(rep.data) }) yield u)
override def methods: Set[Method] =
roots.flatMap(_.methods).toSet
override def size =
roots.map(_.numNodes).sum
override def trees =
this
override def threadIDs =
{ for (t <- roots) yield t.data.thread }.toSet
override def toString = s"${c.name}(${roots.size} roots)"
}
/** A single execution tree */
class CallTree(val method: Method, val c: Connection, val counter: Int)
extends LinkedListTree[CallTree, Enter] {
def this(e: Enter) =
this(e.method, e.c, e.counter)
override def copy =
new CallTree(method, c, counter)
override def add(that: CallTree) {
require(this.c == that.c, "must belong to the same trace")
require(that.counter > this.counter, "call trees are sorted by counter")
super.add(that)
}
/** Note: computing data element requires instantiating an event */
override def data: Enter =
c.at(counter).asInstanceOf[Enter]
def methods: Set[Method] =
Set(method) ++ children.flatMap(_.methods)
/** All subtrees of depth 1 */
def snippets: List[CallTree] =
copy.addAll(children.map(_.copy)) :: children.flatMap(_.snippets)
def trace = ConcTrace(c, nodes.toList)
/** Structural hash */
lazy val hash: Long =
children.map(_.hash).sum + 31 * method.id
}
/** Caller-callee relationship analysis */
class CallGraph(t: Trace) extends Traversable[(Option[Enter], Enter)] {
val trace = t.select(Enter)
/** Roots are (None, root), leaves are (Some(parent), child) */
override def foreach[U](f: ((Option[Enter], Enter)) => U) {
val stacks = new mutable.HashMap[Long, List[Enter]]
def makeStack(e: Enter) = {
stacks(e.thread) = List(e)
f((None, e))
}
for (e <- trace) e match {
case e: Enter =>
e.caller match {
case None =>
makeStack(e)
case Some(i) =>
stacks.get(e.thread) match {
case Some(stk) =>
var stack = stk
assert(stack.size > 0, "logic error")
while (stack.head.counter > i && stack.size > 1)
stack = stack.tail
if (stack.head.counter <= i) {
f((Some(stack.head), e))
stacks(e.thread) = e :: stack
} else
makeStack(e)
case None =>
makeStack(e)
}
}
case _ => assert(false)
}
}
/** Build a forest of call trees. */
def trees: List[CallTree] = {
val stacks = new mutable.HashMap[Long, List[CallTree]]
var roots: List[CallTree] = Nil
debug("building call graph for " + t)
this.foreach {
case (None, e) =>
roots = (new CallTree(e)) :: roots
stacks(e.thread) = List(roots.head)
case (Some(e), f) =>
var stack = stacks(e.thread)
while (stack.head.counter != e.counter)
stack = stack.tail
stacks(e.thread) = (stack.head + new CallTree(f)) :: stack
}
debug("done")
roots.reverse
}
}
| kyessenov/semeru | src/main/scala/CallGraph.scala | Scala | gpl-3.0 | 3,911 |
import sbt._
import Keys._
object Commons {
val commonSettings = Seq(
organization := "counter",
version := "1.0",
scalaVersion := "2.11.7",
scalacOptions := Seq(
"-feature",
"-language:postfixOps"
),
fork in Test := true
)
}
| grzesiekw/counter | project/Commons.scala | Scala | apache-2.0 | 269 |
package mesosphere.marathon
import java.util.UUID
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicBoolean
import javax.inject.Named
import akka.actor.SupervisorStrategy.Restart
import akka.actor._
import akka.event.EventStream
import akka.routing.RoundRobinPool
import com.codahale.metrics.Gauge
import com.google.inject._
import com.google.inject.name.Names
import com.twitter.common.base.Supplier
import com.twitter.common.zookeeper.{ Candidate, CandidateImpl, Group => ZGroup, ZooKeeperClient }
import com.twitter.zk.{ NativeConnector, ZkClient }
import mesosphere.chaos.http.HttpConf
import mesosphere.marathon.api.LeaderInfo
import mesosphere.marathon.core.launchqueue.LaunchQueue
import mesosphere.marathon.event.{ HistoryActor, EventModule }
import mesosphere.marathon.event.http.{
HttpEventStreamActorMetrics,
HttpEventStreamHandleActor,
HttpEventStreamHandle,
HttpEventStreamActor
}
import mesosphere.marathon.health.{ HealthCheckManager, MarathonHealthCheckManager }
import mesosphere.marathon.io.storage.StorageProvider
import mesosphere.marathon.metrics.Metrics
import mesosphere.marathon.state._
import mesosphere.marathon.tasks.{ TaskIdUtil, TaskTracker, _ }
import mesosphere.marathon.upgrade.{ DeploymentManager, DeploymentPlan }
import mesosphere.util.SerializeExecution
import mesosphere.util.state.memory.InMemoryStore
import mesosphere.util.state.mesos.MesosStateStore
import mesosphere.util.state.zk.ZKStore
import mesosphere.util.state._
import org.apache.log4j.Logger
import org.apache.mesos.state.ZooKeeperState
import org.apache.zookeeper.ZooDefs
import org.apache.zookeeper.ZooDefs.Ids
import scala.collection.JavaConverters._
import scala.concurrent.Await
import scala.util.control.NonFatal
object ModuleNames {
final val NAMED_CANDIDATE = "CANDIDATE"
final val NAMED_HOST_PORT = "HOST_PORT"
final val NAMED_LEADER_ATOMIC_BOOLEAN = "LEADER_ATOMIC_BOOLEAN"
final val NAMED_SERVER_SET_PATH = "SERVER_SET_PATH"
final val NAMED_SERIALIZE_GROUP_UPDATES = "SERIALIZE_GROUP_UPDATES"
final val NAMED_HTTP_EVENT_STREAM = "HTTP_EVENT_STREAM"
}
class MarathonModule(conf: MarathonConf, http: HttpConf, zk: ZooKeeperClient)
extends AbstractModule {
//scalastyle:off magic.number
val log = Logger.getLogger(getClass.getName)
def configure() {
bind(classOf[MarathonConf]).toInstance(conf)
bind(classOf[HttpConf]).toInstance(http)
bind(classOf[ZooKeeperClient]).toInstance(zk)
bind(classOf[LeaderProxyConf]).toInstance(conf)
// needs to be eager to break circular dependencies
bind(classOf[SchedulerCallbacks]).to(classOf[SchedulerCallbacksServiceAdapter]).asEagerSingleton()
bind(classOf[MarathonSchedulerDriverHolder]).in(Scopes.SINGLETON)
bind(classOf[SchedulerDriverFactory]).to(classOf[MesosSchedulerDriverFactory]).in(Scopes.SINGLETON)
bind(classOf[MarathonLeaderInfoMetrics]).in(Scopes.SINGLETON)
bind(classOf[MarathonScheduler]).in(Scopes.SINGLETON)
bind(classOf[MarathonSchedulerService]).in(Scopes.SINGLETON)
bind(classOf[LeaderInfo]).to(classOf[MarathonLeaderInfo]).in(Scopes.SINGLETON)
bind(classOf[TaskTracker]).in(Scopes.SINGLETON)
bind(classOf[TaskFactory]).to(classOf[DefaultTaskFactory]).in(Scopes.SINGLETON)
bind(classOf[HealthCheckManager]).to(classOf[MarathonHealthCheckManager]).asEagerSingleton()
bind(classOf[String])
.annotatedWith(Names.named(ModuleNames.NAMED_SERVER_SET_PATH))
.toInstance(conf.zooKeeperServerSetPath)
bind(classOf[Metrics]).in(Scopes.SINGLETON)
bind(classOf[HttpEventStreamActorMetrics]).in(Scopes.SINGLETON)
// If running in single scheduler mode, this node is the leader.
val leader = new AtomicBoolean(!conf.highlyAvailable())
bind(classOf[AtomicBoolean])
.annotatedWith(Names.named(ModuleNames.NAMED_LEADER_ATOMIC_BOOLEAN))
.toInstance(leader)
}
@Provides
@Singleton
def provideMesosLeaderInfo(): MesosLeaderInfo = {
conf.mesosLeaderUiUrl.get match {
case someUrl @ Some(_) => ConstMesosLeaderInfo(someUrl)
case None => new MutableMesosLeaderInfo
}
}
@Named(ModuleNames.NAMED_HTTP_EVENT_STREAM)
@Provides
@Singleton
def provideHttpEventStreamActor(system: ActorSystem,
leaderInfo: LeaderInfo,
@Named(EventModule.busName) eventBus: EventStream,
metrics: HttpEventStreamActorMetrics): ActorRef = {
val outstanding = conf.eventStreamMaxOutstandingMessages.get.getOrElse(50)
def handleStreamProps(handle: HttpEventStreamHandle): Props =
Props(new HttpEventStreamHandleActor(handle, eventBus, outstanding))
system.actorOf(Props(new HttpEventStreamActor(leaderInfo, metrics, handleStreamProps)), "HttpEventStream")
}
@Provides
@Singleton
def provideStore(): PersistentStore = {
def directZK(): PersistentStore = {
implicit val timer = com.twitter.util.Timer.Nil
import com.twitter.util.TimeConversions._
val sessionTimeout = conf.zooKeeperSessionTimeout.get.map(_.millis).getOrElse(30.minutes)
val connector = NativeConnector(conf.zkHosts, None, sessionTimeout, timer)
val client = ZkClient(connector)
.withAcl(Ids.OPEN_ACL_UNSAFE.asScala)
.withRetries(3)
new ZKStore(client, client(conf.zooKeeperStatePath))
}
def mesosZK(): PersistentStore = {
val state = new ZooKeeperState(
conf.zkHosts,
conf.zkTimeoutDuration.toMillis,
TimeUnit.MILLISECONDS,
conf.zooKeeperStatePath
)
new MesosStateStore(state, conf.zkTimeoutDuration)
}
conf.internalStoreBackend.get match {
case Some("zk") => directZK()
case Some("mesos_zk") => mesosZK()
case Some("mem") => new InMemoryStore()
case backend: Option[String] => throw new IllegalArgumentException(s"Storage backend $backend not known!")
}
}
//scalastyle:off parameter.number method.length
@Named("schedulerActor")
@Provides
@Singleton
@Inject
def provideSchedulerActor(
system: ActorSystem,
appRepository: AppRepository,
groupRepository: GroupRepository,
deploymentRepository: DeploymentRepository,
healthCheckManager: HealthCheckManager,
taskTracker: TaskTracker,
taskQueue: LaunchQueue,
frameworkIdUtil: FrameworkIdUtil,
driverHolder: MarathonSchedulerDriverHolder,
taskIdUtil: TaskIdUtil,
leaderInfo: LeaderInfo,
storage: StorageProvider,
@Named(EventModule.busName) eventBus: EventStream,
taskFailureRepository: TaskFailureRepository,
config: MarathonConf): ActorRef = {
val supervision = OneForOneStrategy() {
case NonFatal(_) => Restart
}
import system.dispatcher
def createSchedulerActions(schedulerActor: ActorRef): SchedulerActions = {
new SchedulerActions(
appRepository,
groupRepository,
healthCheckManager,
taskTracker,
taskQueue,
eventBus,
schedulerActor,
config)
}
def deploymentManagerProps(schedulerActions: SchedulerActions): Props = {
Props(
new DeploymentManager(
appRepository,
taskTracker,
taskQueue,
schedulerActions,
storage,
healthCheckManager,
eventBus
)
)
}
val historyActorProps = Props(new HistoryActor(eventBus, taskFailureRepository))
system.actorOf(
MarathonSchedulerActor.props(
createSchedulerActions,
deploymentManagerProps,
historyActorProps,
appRepository,
deploymentRepository,
healthCheckManager,
taskTracker,
taskQueue,
driverHolder,
leaderInfo,
eventBus
).withRouter(RoundRobinPool(nrOfInstances = 1, supervisorStrategy = supervision)),
"MarathonScheduler")
}
@Named(ModuleNames.NAMED_HOST_PORT)
@Provides
@Singleton
def provideHostPort: String = {
val port = if (http.disableHttp()) http.httpsPort() else http.httpPort()
"%s:%d".format(conf.hostname(), port)
}
@Named(ModuleNames.NAMED_CANDIDATE)
@Provides
@Singleton
def provideCandidate(zk: ZooKeeperClient, @Named(ModuleNames.NAMED_HOST_PORT) hostPort: String): Option[Candidate] = {
if (conf.highlyAvailable()) {
log.info("Registering in Zookeeper with hostPort:" + hostPort)
val candidate = new CandidateImpl(new ZGroup(zk, ZooDefs.Ids.OPEN_ACL_UNSAFE, conf.zooKeeperLeaderPath),
new Supplier[Array[Byte]] {
def get(): Array[Byte] = {
hostPort.getBytes("UTF-8")
}
})
//scalastyle:off return
return Some(candidate)
//scalastyle:on
}
None
}
@Provides
@Singleton
def provideTaskFailureRepository(
store: PersistentStore,
conf: MarathonConf,
metrics: Metrics): TaskFailureRepository = {
import mesosphere.marathon.state.PathId
import org.apache.mesos.{ Protos => mesos }
new TaskFailureRepository(
new MarathonStore[TaskFailure](
store,
metrics,
() => TaskFailure(
PathId.empty,
mesos.TaskID.newBuilder().setValue("").build,
mesos.TaskState.TASK_STAGING
)
),
conf.zooKeeperMaxVersions.get
)
}
@Provides
@Singleton
def provideAppRepository(
store: PersistentStore,
conf: MarathonConf,
metrics: Metrics): AppRepository = {
new AppRepository(
new MarathonStore[AppDefinition](store, metrics, () => AppDefinition.apply()),
maxVersions = conf.zooKeeperMaxVersions.get,
metrics
)
}
@Provides
@Singleton
def provideGroupRepository(
store: PersistentStore,
appRepository: AppRepository,
conf: MarathonConf,
metrics: Metrics): GroupRepository = {
new GroupRepository(
new MarathonStore[Group](store, metrics, () => Group.empty, "group:"),
appRepository, conf.zooKeeperMaxVersions.get,
metrics
)
}
@Provides
@Singleton
def provideDeploymentRepository(
store: PersistentStore,
conf: MarathonConf,
metrics: Metrics): DeploymentRepository = {
new DeploymentRepository(
new MarathonStore[DeploymentPlan](store, metrics, () => DeploymentPlan.empty, "deployment:"),
conf.zooKeeperMaxVersions.get,
metrics
)
}
@Provides
@Singleton
def provideActorSystem(): ActorSystem = ActorSystem("marathon")
/* Reexports the `akka.actor.ActorSystem` as `akka.actor.ActorRefFactory`. It doesn't work automatically. */
@Provides
@Singleton
def provideActorRefFactory(system: ActorSystem): ActorRefFactory = system
@Provides
@Singleton
def provideFrameworkIdUtil(store: PersistentStore, metrics: Metrics, conf: MarathonConf): FrameworkIdUtil = {
new FrameworkIdUtil(
new MarathonStore[FrameworkId](store, metrics, () => new FrameworkId(UUID.randomUUID().toString), ""),
conf.zkTimeoutDuration)
}
@Provides
@Singleton
def provideMigration(
store: PersistentStore,
appRepo: AppRepository,
groupRepo: GroupRepository,
metrics: Metrics,
config: MarathonConf): Migration = {
new Migration(store, appRepo, groupRepo, config, metrics)
}
@Provides
@Singleton
def provideTaskIdUtil(): TaskIdUtil = new TaskIdUtil
@Provides
@Singleton
def provideStorageProvider(config: MarathonConf, http: HttpConf): StorageProvider =
StorageProvider.provider(config, http)
@Named(ModuleNames.NAMED_SERIALIZE_GROUP_UPDATES)
@Provides
@Singleton
def provideSerializeGroupUpdates(actorRefFactory: ActorRefFactory): SerializeExecution = {
SerializeExecution(actorRefFactory, "serializeGroupUpdates")
}
@Provides
@Singleton
def provideGroupManager(
@Named(ModuleNames.NAMED_SERIALIZE_GROUP_UPDATES) serializeUpdates: SerializeExecution,
scheduler: MarathonSchedulerService,
taskTracker: TaskTracker,
groupRepo: GroupRepository,
appRepo: AppRepository,
storage: StorageProvider,
config: MarathonConf,
@Named(EventModule.busName) eventBus: EventStream,
metrics: Metrics): GroupManager = {
val groupManager: GroupManager = new GroupManager(
serializeUpdates,
scheduler,
taskTracker,
groupRepo,
appRepo,
storage,
config,
eventBus
)
metrics.gauge("service.mesosphere.marathon.app.count", new Gauge[Int] {
override def getValue: Int = {
Await.result(groupManager.rootGroup(), conf.zkTimeoutDuration).transitiveApps.size
}
})
metrics.gauge("service.mesosphere.marathon.group.count", new Gauge[Int] {
override def getValue: Int = {
Await.result(groupManager.rootGroup(), conf.zkTimeoutDuration).transitiveGroups.size
}
})
groupManager
}
}
| lelezi/marathon | src/main/scala/mesosphere/marathon/MarathonModule.scala | Scala | apache-2.0 | 12,855 |
/*-------------------------------------------------------------------------*\\
** ScalaCheck **
** Copyright (c) 2007-2015 Rickard Nilsson. All rights reserved. **
** http://www.scalacheck.org **
** **
** This software is released under the terms of the Revised BSD License. **
** There is NO WARRANTY. See the file LICENSE for the full text. **
\\*------------------------------------------------------------------------ */
package org.scalacheck.util
import org.scalacheck.Properties
object PrettySpecification extends Properties("Pretty") {
property("null") = Pretty.pretty(null) == "null"
property("any null") = Pretty.pretty(null: Any) == "null"
}
| sid-kap/scalacheck | src/test/scala/org/scalacheck/util/Pretty.scala | Scala | bsd-3-clause | 862 |
package filodb.gateway
import java.net.InetSocketAddress
import java.nio.charset.Charset
import java.util.concurrent.Executors
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util.control.NonFatal
import com.typesafe.config.{Config, ConfigFactory}
import com.typesafe.scalalogging.StrictLogging
import kamon.Kamon
import monix.eval.Task
import monix.execution.Scheduler
import monix.kafka._
import monix.reactive.Observable
import net.ceedubs.ficus.Ficus._
import org.jboss.netty.bootstrap.ServerBootstrap
import org.jboss.netty.buffer.ChannelBuffer
import org.jboss.netty.channel.{ChannelPipeline, ChannelPipelineFactory, Channels}
import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory
import org.jboss.netty.handler.ssl.SslContext
import org.jboss.netty.handler.ssl.util.SelfSignedCertificate
import org.jctools.queues.MpscGrowableArrayQueue
import org.rogach.scallop._
import filodb.coordinator.{FilodbSettings, ShardMapper, StoreFactory}
import filodb.core.binaryrecord2.RecordBuilder
import filodb.core.metadata.Dataset
import filodb.gateway.conversion._
import filodb.memory.MemFactory
import filodb.timeseries.TestTimeseriesProducer
/**
* Gateway server to ingest source streams of data, shard, batch, and write output to Kafka
* built using high performance Netty TCP code
*
* It usually takes one arg: the source config file which contains # Kafka partitions/shards and other config
* Also pass in -Dconfig.file=.... as usual, with a config that points to the dataset metadata.
* For local setups, simply run `./dev-gateway.sh`.
* For help pass in `--help`.
*
* NOTE: set `kamon.prometheus.embedded-server.port` to avoid conflicting with FiloDB itself.
*
* There are options that can be used to generate test data, such as `--gen-hist-data`. The -n and -p options can
* also be used together to control the # of samples per series and # of time series.
* To generate Histogram schema test data, one must create the following dataset:
* ./filo-cli -Dconfig.file=conf/timeseries-filodb-server.conf --command create --dataset histogram \\
* --dataColumns timestamp:ts,sum:long,count:long,h:hist:counter=true --partitionColumns metric:string,tags:map \\
* --shardKeyColumns metric --metricColumn metric
* create a Kafka topic:
* kafka-topics --create --zookeeper localhost:2181 --replication-factor 1 --partitions 4 --topic histogram-dev
* and use the `conf/histogram-dev-source.conf` config file.
* Oh, and you have to observe on shards 1 and 3.
*/
object GatewayServer extends StrictLogging {
Kamon.init
// Get global configuration using universal FiloDB/Akka-based config
val settings = new FilodbSettings()
val config = settings.allConfig
val storeFactory = StoreFactory(settings, Scheduler.io())
// ==== Metrics ====
val numInfluxMessages = Kamon.counter("num-influx-messages").withoutTags
val numInfluxParseErrors = Kamon.counter("num-influx-parse-errors").withoutTags
val numDroppedMessages = Kamon.counter("num-dropped-messages").withoutTags
val numContainersSent = Kamon.counter("num-containers-sent").withoutTags
val containersSize = Kamon.histogram("containers-size-bytes").withoutTags
// Most options are for generating test data
class GatewayOptions(args: Seq[String]) extends ScallopConf(args) {
val samplesPerSeries = opt[Int](short = 'n', default = Some(100),
descr = "# of samples per time series")
val numSeries = opt[Int](short = 'p', default = Some(20), descr = "# of total time series")
val sourceConfigPath = trailArg[String](descr = "Path to source config, eg conf/timeseries-dev-source.conf")
val genHistData = toggle(noshort = true, descrYes = "Generate histogram-schema test data and exit")
val genPromData = toggle(noshort = true, descrYes = "Generate Prometheus-schema test data and exit")
verify()
}
//scalastyle:off method.length
def main(args: Array[String]): Unit = {
val userOpts = new GatewayOptions(args)
val numSamples = userOpts.samplesPerSeries() * userOpts.numSeries()
val numSeries = userOpts.numSeries()
val sourceConfig = ConfigFactory.parseFile(new java.io.File(userOpts.sourceConfigPath()))
val numShards = sourceConfig.getInt("num-shards")
val dataset = settings.datasetFromStream(sourceConfig)
// NOTE: the spread MUST match the default spread used in the HTTP module for consistency between querying
// and ingestion sharding
val spread = config.getInt("filodb.spread-default")
val shardMapper = new ShardMapper(numShards)
val queueFullWait = config.as[FiniteDuration]("gateway.queue-full-wait").toMillis
val (shardQueues, containerStream) = shardingPipeline(config, numShards, dataset)
def calcShardAndQueueHandler(buf: ChannelBuffer): Unit = {
val initIndex = buf.readerIndex
val len = buf.readableBytes
numInfluxMessages.increment()
InfluxProtocolParser.parse(buf) map { record =>
logger.trace(s"Enqueuing: $record")
val shard = shardMapper.ingestionShard(record.shardKeyHash, record.partitionKeyHash, spread)
if (!shardQueues(shard).offer(record)) {
// Prioritize recent data. This means dropping messages when full, so new data may have a chance.
logger.warn(s"Queue for shard=$shard is full. Dropping data.")
numDroppedMessages.increment()
// Thread sleep queueFullWait
}
} getOrElse {
numInfluxParseErrors.increment()
logger.warn(s"Could not parse:\\n${buf.toString(initIndex, len, Charset.defaultCharset)}")
}
}
// TODO: allow configurable sinks, maybe multiple sinks for say writing to multiple Kafka clusters/DCs
setupKafkaProducer(sourceConfig, containerStream)
val genHist = userOpts.genHistData.getOrElse(false)
val genProm = userOpts.genPromData.getOrElse(false)
if (genHist || genProm) {
val startTime = System.currentTimeMillis
logger.info(s"Generating $numSamples samples starting at $startTime....")
val stream = if (genHist) TestTimeseriesProducer.genHistogramData(startTime, dataset, numSeries)
else TestTimeseriesProducer.timeSeriesData(startTime, numSeries)
stream.take(numSamples).foreach { rec =>
val shard = shardMapper.ingestionShard(rec.shardKeyHash, rec.partitionKeyHash, spread)
if (!shardQueues(shard).offer(rec)) {
// Prioritize recent data. This means dropping messages when full, so new data may have a chance.
logger.warn(s"Queue for shard=$shard is full. Dropping data.")
numDroppedMessages.increment()
}
}
Thread sleep 10000
TestTimeseriesProducer.logQueryHelp(numSamples, numSeries, startTime)
logger.info(s"Waited for containers to be sent, exiting...")
sys.exit(0)
} else {
setupTCPService(config, calcShardAndQueueHandler)
}
}
//scalastyle:on method.length
def setupTCPService(config: Config, handler: ChannelBuffer => Unit): Unit = {
val influxPort = config.getInt("gateway.influx-port")
// Configure SSL.
val SSL = config.getBoolean("gateway.tcp.ssl-enabled")
val sslCtx = if (SSL) {
val ssc = new SelfSignedCertificate()
Some(SslContext.newServerContext(ssc.certificate(), ssc.privateKey()))
} else {
None
}
// Configure the bootstrap.
val bootstrap = new ServerBootstrap(
new NioServerSocketChannelFactory(
Executors.newCachedThreadPool(),
Executors.newCachedThreadPool()))
bootstrap.setPipelineFactory(new ChannelPipelineFactory() {
def getPipeline(): ChannelPipeline = {
val p = Channels.pipeline();
sslCtx.foreach { ctx => p.addLast("ssl", ctx.newHandler()) }
p.addLast("influxProtocol", new NettySocketHandler(Some('\\n'), handler));
p
}
})
val rcvBufferSize = config.getInt("gateway.tcp.netty-receive-buffer-size")
val sendBufferSize = config.getInt("gateway.tcp.netty-send-buffer-size")
bootstrap.setOption("child.tcpNoDelay", true)
bootstrap.setOption("child.receiveBufferSize", rcvBufferSize)
bootstrap.setOption("child.sendBufferSize", sendBufferSize)
// Bind and start to accept incoming connections.
logger.info(s"Starting GatewayServer with TCP port for Influx data at $influxPort....")
bootstrap.bind(new InetSocketAddress(influxPort))
}
// Returns (Array[Queue] for shards, containerObservable)
def shardingPipeline(config: Config, numShards: Int, dataset: Dataset):
(Array[MpscGrowableArrayQueue[InputRecord]], Observable[(Int, Seq[Array[Byte]])]) = {
val parallelism = config.getInt("gateway.producer-parallelism")
val minQueueSize = config.getInt("gateway.min-queue-size")
val maxQueueSize = config.getInt("gateway.max-queue-size")
// Create queues and RecordBuilders, one per shard
val shardQueues = (0 until numShards).map { _ =>
new MpscGrowableArrayQueue[InputRecord](minQueueSize, maxQueueSize) }.toArray
val lastSendTime = Array.fill(numShards)(0L)
val builders = (0 until numShards).map(s => new RecordBuilder(MemFactory.onHeapFactory))
.toArray
val producing = Array.fill(numShards)(false)
var curShard = 0
// require(parallelism < numShards)
// Create a multithreaded pipeline to read from the shard queues and populate the RecordBuilders.
// The way it works is as follows:
// producing array above keeps track of which shards are being worked on at any time.
// The producing observable produces a stream of the next shard to work on. If a shard is already being worked
// on then it will be skipped -- this ensures that a shard is never worked on in parallel
// Next tasks are created and executed to pull from queue and build records in a parallel pool
// Each Task produces (shard, Container) pairs which get flushed by the sink
val shardIt = Iterator.from(0).map { _ =>
while (producing(curShard)) {
curShard = (curShard + 1) % numShards
Thread sleep 1
} // else keep going. If we have gone around just wait
val shardToWorkOn = curShard
producing(shardToWorkOn) = true
curShard = (curShard + 1) % numShards
shardToWorkOn
}
val containerStream = Observable.fromIterator(shardIt)
.mapAsync(parallelism) { shard =>
buildShardContainers(shard, shardQueues(shard), builders(shard), lastSendTime)
.map { output =>
// Mark this shard as done producing for now to allow another go
producing(shard) = false
output
}
}
logger.info(s"Created $numShards container builder queues with $parallelism parallel workers...")
(shardQueues, containerStream)
}
def buildShardContainers(shard: Int,
queue: MpscGrowableArrayQueue[InputRecord],
builder: RecordBuilder,
sendTime: Array[Long]): Task[(Int, Seq[Array[Byte]])] = Task {
// While there are still messages in the queue and there aren't containers to send, pull and build
while (!queue.isEmpty && builder.allContainers.length <= 1) {
queue.poll().addToBuilder(builder)
// TODO: add metrics
}
// Is there a container to send? Or has the time since the last send been more than a second?
// Send only full containers or if time has elapsed, send and reset current container
val numContainers = builder.allContainers.length
if (numContainers > 1 ||
(numContainers > 0 && !builder.allContainers.head.isEmpty &&
(System.currentTimeMillis - sendTime(shard)) > 1000)) {
sendTime(shard) = System.currentTimeMillis
val out = if (numContainers > 1) { // First container probably full. Send only the first container
numContainersSent.increment(numContainers - 1)
(shard, builder.nonCurrentContainerBytes(reset = true))
} else { // only one container. Get the smallest bytes possible as its probably not full
numContainersSent.increment()
(shard, builder.optimalContainerBytes(reset = true))
}
logger.debug(s"Sending ${out._2.length} containers, ${out._2.map(_.size).sum} bytes from shard=$shard")
out
} else {
(shard, Nil)
}
}
def setupKafkaProducer(sourceConf: Config, containerStream: Observable[(Int, Seq[Array[Byte]])]): Future[Unit] = {
// Now create Kafka config, sink
// TODO: use the official KafkaIngestionStream stuff to parse the file. This is just faster for now.
val producerCfg = KafkaProducerConfig.default.copy(
bootstrapServers = sourceConf.getString("sourceconfig.bootstrap.servers").split(',').toList
)
val topicName = sourceConf.getString("sourceconfig.filo-topic-name")
implicit val io = Scheduler.io("kafka-producer")
val sink = new KafkaContainerSink(producerCfg, topicName)
sink.writeTask(containerStream)
.runAsync
.map { _ => logger.info(s"Finished producing messages into topic $topicName") }
// TODO: restart stream in case of failure?
.recover { case NonFatal(e) => logger.error("Error occurred while producing messages to Kafka", e) }
}
} | tuplejump/FiloDB | gateway/src/main/scala/filodb/gateway/GatewayServer.scala | Scala | apache-2.0 | 13,615 |
package com.github.rosmith.nlp.query.solution
import scala.collection.mutable.Map
class QuerySolutionImpl(m: Map[String, List[Any]], cM: Map[String, Class[_]], vars: List[String]) extends IQuerySolution {
private var empty = false
private var variables = vars
private var map = m
private var classMap = cM
private var _errorMessage = ""
def this() {
this(null, null, null)
empty = true
}
def this(eM: String) {
this(null, null, null)
empty = true
_errorMessage = eM
}
def getType(varName: String): Class[_] = {
if (classMap.get(varName).exists { x => x != null })
classMap get (varName) get
else
null
}
def getValues(varName: String): List[Any] = {
if (map.get(varName).exists { x => x != null })
map get (varName) get
else
null
}
def getVariables(): List[String] = {
variables
}
def isEmpty() = empty
def errorMessage() = _errorMessage
def print() {
println("####### RESULTS ##########")
if (isEmpty) {
println(null)
} else {
println(getVariables.mkString(" => "))
var size = getValues(getVariables.apply(0)).size
for (i <- 0 to size - 1) {
var varSize = getVariables.size
var arr: Array[String] = Array[String]()
for (j <- 0 to varSize - 1) {
var v = getVariables()(j)
arr = arr :+ getValues(v).apply(i).toString
}
println(arr.mkString("[", " => ", "]"))
}
}
}
}
| rosmith/giet | src/main/scala/com/github/rosmith/nlp/query/solution/QuerySolutionImpl.scala | Scala | mit | 1,489 |
/*
* Copyright 2011 University of Wisconsin, Milwaukee
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package edu.uwm.cs.scalabison;
class Code { }
| djspiewak/scala-bison | src/edu/uwm/cs/scalabison/Code.scala | Scala | apache-2.0 | 666 |
/*
* Copyright 2017-2022 John Snow Labs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.johnsnowlabs.storage
import org.rocksdb.{WriteBatch, WriteOptions}
trait StorageWriter[A] extends HasConnection {
protected def writeBufferSize: Int
def toBytes(content: A): Array[Byte]
def add(word: String, content: A): Unit
protected def put(batch: WriteBatch, word: String, content: A): Unit = {
batch.put(word.trim.getBytes, toBytes(content))
}
protected def merge(batch: WriteBatch, word: String, content: A): Unit = {
batch.merge(word.trim.getBytes, toBytes(content))
}
def flush(batch: WriteBatch): Unit = {
val writeOptions = new WriteOptions()
/** Might have disconnected already */
if (connection.isConnected) {
connection.getDb.write(writeOptions, batch)
}
batch.close()
}
def close(): Unit
}
| JohnSnowLabs/spark-nlp | src/main/scala/com/johnsnowlabs/storage/StorageWriter.scala | Scala | apache-2.0 | 1,383 |
package person
import org.junit.Test
import selfassembly.SimpleRegistry
import selfassembly.examples._
// File PersonA.scala:
abstract class Person {
def name: String
def age: Int
}
case class Employee(n: String, a: Int, s: Int)
extends Person {
def name = n
def age = a
}
// File PersonB.scala:
class Volunteer(n: String, a: Int, s: Int) extends Person {
def name = n
def age = a
def since = s
}
case class Firefighter(n: String, a: Int, s: Int)
extends Volunteer(n, a, s)
class DispatchSpec {
@Test def testDispatch() {
implicit val reg = new SimpleRegistry[Show]
val em = Employee("Dave", 35, 80000)
val ff = Firefighter("Jim", 40, 2004)
val inst = implicitly[Show[Person]]
println(inst.show(em))
// prints: Employee(Dave, 35, 80000)
println(inst.show(ff))
// prints: Firefighter(Jim, 40, 2004)
assert(inst.show(em) == "person.Employee(Dave, 35, 80000)")
assert(inst.show(ff) == "person.Firefighter(Jim, 40, 2004)")
}
}
| phaller/selfassembly | src/test/scala/selfassembly/person/Test.scala | Scala | bsd-3-clause | 995 |
package de.htwg.zeta.parser.check
import de.htwg.zeta.parser.check.Check.Id
import org.scalatest.Inside
import org.scalatest.freespec.AnyFreeSpec
import org.scalatest.matchers.should.Matchers
//noinspection ScalaStyle
class FindInvalidReferencesTest extends AnyFreeSpec with Matchers with Inside {
val elementToId: Element => Id = element => element.name
val availableIds: List[String] = List("t1", "t2", "t3", "t4")
val findInvalidReferences = new FindInvalidReferences[Element](elementToId, availableIds)
"Check for invalid ids will result in" - {
"an empty list when" - {
"the reference list is empty" in {
val invalidIds = findInvalidReferences(List.empty)
invalidIds shouldBe empty
}
"the element list contains a single id" in {
val ids = List(Element("t1"))
val invalidIds = findInvalidReferences(ids)
invalidIds shouldBe empty
}
"the element list contains valid ids" in {
val ids = List(Element("t1"), Element("t2"), Element("t3"), Element("t4"))
val invalidIds = findInvalidReferences(ids)
invalidIds shouldBe empty
}
"the element list contains valid ids (with duplicates)" in {
val ids = List(Element("t1"), Element("t2"), Element("t1"), Element("t2"))
val invalidIds = findInvalidReferences(ids)
invalidIds shouldBe empty
}
}
"a non empty list of invalid ids when" - {
"a single element with that id is not found" in {
val ids = List(Element("t5"), Element("t2"))
val invalidIds = findInvalidReferences(ids)
invalidIds should have size 1
invalidIds should contain("t5")
}
"multiple elements with that id is not found" in {
val ids = List(Element("t5"), Element("t7"))
val invalidIds = findInvalidReferences(ids)
invalidIds should have size 2
invalidIds should contain("t5")
invalidIds should contain("t7")
}
}
}
}
| Zeta-Project/zeta | api/parser/src/test/scala/de/htwg/zeta/parser/check/FindInvalidReferencesTest.scala | Scala | bsd-2-clause | 1,999 |
/*
package org.unisonweb
import org.scalacheck._
import org.scalacheck.Prop._
import Term._
object LambdaLiftSpec extends Properties("Term.lambdaLift") {
implicit def toVar(s: String) = Var(s)
implicit def toNum(n: Int) = Num(n)
property("no lambda") = {
// let x = 1
// y = x
// y
val term = Let(
"x" -> 1,
"y" -> "x")(
"y")
// let x = 1
// y x = x
// y x
val liftedTerm = Let(
"x" -> 1,
"y" -> Lam("x")("x"))(
Var("y")("x")
)
Term.lambdaLift(term) ?= liftedTerm
}
property("simple lambda") = {
// let x = 1
// y a = x a
// y 5
val term = Let(
"x" -> 1,
"y" -> Lam("a")(Apply("x","a")))(
Var("y")(5)
)
// let x = 1
// y x a = x a
// y x 5
val liftedTerm = Let(
"x" -> 1,
"y" -> Lam("x","a")(Apply("x","a")))(
(Var("y")("x", 5))
)
Term.lambdaLift(term) ?= liftedTerm
}
property("noop (1)") = {
val id = Lam1("x")("x")
Term.lambdaLift(id) ?= id
}
property("noop (2)") = {
val e = Let("x" -> 1.0, "y" -> 2.0)("y")
Term.lambdaLift(e) ?= e
}
def isOk(t: Term): Boolean = ???
}
*/
| paulp/unison | runtime-jvm/main/src/test/scala/LambdaLiftSpec.scala | Scala | mit | 1,225 |
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// http://code.google.com/p/protobuf/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package fm.serializer.protobuf
object WireFormat {
final val WIRETYPE_VARINT = 0
final val WIRETYPE_FIXED64_LE = 1 // Little Endian
final val WIRETYPE_LENGTH_DELIMITED = 2
final val WIRETYPE_START_GROUP = 3
final val WIRETYPE_END_GROUP = 4
//final val WIRETYPE_FIXED64_BE = 3 // Big Endian -- Was WIRETYPE_START_GROUP
//final val WIRETYPE_FIXED32_BE = 4 // Big Endian -- Was WIRETYPE_END_GROUP
final val WIRETYPE_FIXED32_LE = 5 // Little Endian
final val WIRETYPE_NULL = 6 // Non-Standard -- For NULL values
final val TAG_TYPE_BITS: Int = 3
final val TAG_TYPE_MASK: Int = (1 << TAG_TYPE_BITS) - 1 // This is 7 when TAG_TYPE_BITS is 3
/** Given a tag value, determines the wire type (the lower 3 bits). */
@inline final def getTagWireType(tag: Int): Int = tag & TAG_TYPE_MASK
/** Given a tag value, determines the field number (the upper 29 bits). */
@inline final def getTagFieldNumber(tag: Int): Int = tag >>> TAG_TYPE_BITS
/** Makes a tag value given a field number and wire type. */
@inline final def makeTag(fieldNumber: Int, wireType: Int): Int = (fieldNumber << TAG_TYPE_BITS) | wireType
} | frugalmechanic/fm-serializer | src/main/scala/fm/serializer/protobuf/WireFormat.scala | Scala | apache-2.0 | 2,842 |
package org.embulk.parser.twitter_ads_stats.define
import org.embulk.parser.twitter_ads_stats.{Column, MetricElementNames, ParseException}
case class Data(id: String, id_data: Seq[IDData]) {
private[define] def resolveColumns(metricElementNames: MetricElementNames,
request: Request): Either[ParseException, Seq[Column]] = {
id_data
.map { idData =>
idData.resolveColumns(id, metricElementNames, request)
}
.foldRight[Either[ParseException, Seq[Column]]](Right(Nil)) {
case (Left(e), _) => Left(e)
case (Right(_), Left(e)) => Left(e)
case (Right(seq1), Right(seq2)) => Right(seq1 ++: seq2)
}
}
}
object Data {
val fieldNames: Array[String] = FieldNameUtil.fieldList[Data]
}
| septeni-original/embulk-parser-twitter_ads_stats | src/main/scala/org/embulk/parser/twitter_ads_stats/define/Data.scala | Scala | mit | 801 |
package de.sciss.fscape
package tests
import de.sciss.audiofile.AudioFile
import de.sciss.file._
import de.sciss.fscape.Ops._
import de.sciss.fscape.gui.SimpleGUI
import de.sciss.fscape.stream.Control
import scala.swing.Swing
object LoudnessTest extends App {
def any2stringadd: Any = ()
lazy val g = Graph {
import graph._
// val name = "13533_beesLoop.aif"
// val name = "18667_beesLoop.aif"
// val name = "19178_beesLoop.aif"
// val fIn = file("/data/projects/Schwaermen/audio_work/for_pi/loops/") / name
val fIn = file("/data/projects/Wr_t_ngM_ch_n_/database1/db000055.aif")
val specIn = AudioFile.readSpec(fIn)
import specIn.{numChannels, sampleRate}
val in = AudioFileIn(fIn.toURI, numChannels = specIn.numChannels) * 6.724812881902514
val inMono = if (numChannels == 1) in else ChannelProxy(in, 0) + ChannelProxy(in, 1)
val winSize = sampleRate.toInt
val inSlid = Sliding(inMono, size = winSize, step = winSize/2)
// val inSlid = DC(0).take(specIn.numFrames)
// val inSlid = WhiteNoise(0.001).take(specIn.numFrames)
val loud = Loudness(in = inSlid, sampleRate = sampleRate, size = winSize)
val sum = RunningSum(loud)
val num = Length(sum)
val avg = sum.last / num
avg.poll(0, "avg")
/*
val scl1 = ((dif1.pow(0.85) - 0) * 1.28).pow(1.0).dbamp // 3.1148602768766525
val dif8 = tgt - avg8
val scl8 = ((dif8.abs.pow(0.85) * dif8.signum - 0.0) * 1.28).pow(1.0).dbamp // 7.549001971705424
*/
RepeatWindow(loud).poll(Metro(2), "phon")
// Progress(out / specIn.numFrames.toDouble, Metro(44100))
}
val config = Control.Config()
config.useAsync = false
implicit val ctrl: Control = Control(config)
Swing.onEDT {
SimpleGUI(ctrl)
}
ctrl.run(g)
} | Sciss/FScape-next | core/jvm/src/test/scala/de/sciss/fscape/tests/LoudnessTest.scala | Scala | agpl-3.0 | 1,813 |
package io.prediction.controller.java
import io.prediction.controller.Engine
import io.prediction.controller.Params
import io.prediction.controller.EngineParams
import io.prediction.controller.IEngineFactory
import java.lang.{ Iterable => JIterable }
import java.util.{ Map => JMap }
import java.util.HashMap
import scala.collection.JavaConversions._
/**
* This class chains up the entire data process. PredictionIO uses this
* information to create workflows and deployments. In Java, use
* JavaEngineBuilder to conveniently instantiate an instance of this class.
* For now it only accepts LJavaServing as the serving class.
*
* @param <TD> Training Data
* @param <EI> EvaluationInfo
* @param <PD> Prepared Data
* @param <Q> Input Query
* @param <P> Output Prediction
* @param <A> Actual Value
*/
class PJavaEngine[TD, EI, PD, Q, P, A](
dataSourceClass: Class[_ <: PJavaDataSource[TD, EI, Q, A]],
preparatorClass: Class[_ <: PJavaPreparator[TD, PD]],
algorithmClassMap: JMap[String, Class[_ <: PJavaAlgorithm[PD, _, Q, P]]],
servingClass: Class[_ <: LJavaServing[Q, P]]
) extends Engine(
dataSourceClass,
preparatorClass,
Map(algorithmClassMap.toSeq: _*),
servingClass)
| TheDataShed/PredictionIO | core/src/main/scala/controller/java/PJavaEngine.scala | Scala | apache-2.0 | 1,219 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark
import org.mockito.Mockito._
import org.scalatest.BeforeAndAfter
import org.scalatest.mock.MockitoSugar
import org.apache.spark.executor.{DataReadMethod, TaskMetrics}
import org.apache.spark.rdd.RDD
import org.apache.spark.storage._
// TODO: Test the CacheManager's thread-safety aspects(测试线程安全方面)
class CacheManagerSuite extends SparkFunSuite with LocalSparkContext with BeforeAndAfter
with MockitoSugar {
var blockManager: BlockManager = _
var cacheManager: CacheManager = _
var split: Partition = _
/**
* An RDD which returns the values [1, 2, 3, 4].
* 它返回RDD的值[1, 2, 3, 4]
* */
var rdd: RDD[Int] = _
var rdd2: RDD[Int] = _
var rdd3: RDD[Int] = _
before {
sc = new SparkContext("local", "test")
blockManager = mock[BlockManager]//模拟BlockManager
//引用BlockManager
cacheManager = new CacheManager(blockManager)
split = new Partition { override def index: Int = 0 }
rdd = new RDD[Int](sc, Nil) {
override def getPartitions: Array[Partition] = Array(split)
override val getDependencies = List[Dependency[_]]()//获得依赖关系
override def compute(split: Partition, context: TaskContext): Iterator[Int] ={
//println(split.index+"=="+context.taskMetrics().hostname);
Array(1, 2, 3, 4).iterator//计算
}
}
rdd2 = new RDD[Int](sc, List(new OneToOneDependency(rdd))) {//依赖RDD
override def getPartitions: Array[Partition] = firstParent[Int].partitions
override def compute(split: Partition, context: TaskContext): Iterator[Int] =
firstParent[Int].iterator(split, context)
}.cache()//缓存
rdd3 = new RDD[Int](sc, List(new OneToOneDependency(rdd2))) {//依赖RDD1
override def getPartitions: Array[Partition] = firstParent[Int].partitions
override def compute(split: Partition, context: TaskContext): Iterator[Int] =
firstParent[Int].iterator(split, context)
}.cache()//缓存
}
test("get uncached rdd") {//得到未缓存的RDD
// Do not mock this test, because attempting to match Array[Any], which is not covariant,
// in blockManager.put is a losing battle(可能失败). You have been warned.
//不要模拟这个测试,因为试图匹配数组[任何],这不是协变的,blockManager插入可能失败,你被警告了
blockManager = sc.env.blockManager
cacheManager = sc.env.cacheManager
val context = TaskContext.empty()
val computeValue = cacheManager.getOrCompute(rdd, split, context, StorageLevel.MEMORY_ONLY)
val getValue = blockManager.get(RDDBlockId(rdd.id, split.index))
assert(computeValue.toList === List(1, 2, 3, 4))//获得计算值
//getValue BlockResult
//如果false,则块缓存从getorcompute没有被发现
assert(getValue.isDefined, "Block cached from getOrCompute is not found!")
assert(getValue.get.data.toList === List(1, 2, 3, 4))
}
test("get cached rdd") {//得到缓存的RDD
val result = new BlockResult(Array(5, 6, 7).iterator, DataReadMethod.Memory, 12)
when(blockManager.get(RDDBlockId(0, 0))).thenReturn(Some(result))//然后返回
val context = TaskContext.empty()
val getValue = blockManager.get(RDDBlockId(rdd.id, split.index))
println(split.index+"==rddId=="+rdd.id+"==="+getValue.get)
val value = cacheManager.getOrCompute(rdd, split, context, StorageLevel.MEMORY_ONLY)
assert(value.toList === List(5, 6, 7))
}
test("get uncached local rdd") {//得到未被缓存的本地RDD
// Local computation should not persist the resulting value, so don't expect a put().
//本地计算产生的值不持久化,所以不期望一个插入
when(blockManager.get(RDDBlockId(0, 0))).thenReturn(None)//然后返回
val context = new TaskContextImpl(0, 0, 0, 0, null, null, Seq.empty, runningLocally = true)
val value = cacheManager.getOrCompute(rdd, split, context, StorageLevel.MEMORY_ONLY)
assert(value.toList === List(1, 2, 3, 4))
}
test("verify task metrics updated correctly") {//验证任务度量的正确更新
cacheManager = sc.env.cacheManager
val context = TaskContext.empty()
cacheManager.getOrCompute(rdd3, split, context, StorageLevel.MEMORY_ONLY)
assert(context.taskMetrics.updatedBlocks.getOrElse(Seq()).size === 2)
}
}
| tophua/spark1.52 | core/src/test/scala/org/apache/spark/CacheManagerSuite.scala | Scala | apache-2.0 | 5,138 |
package SpMVAccel
import Chisel._
import TidbitsAXI._
import TidbitsSimUtils._
object MainObj {
val testOutputDir = "testOutput/"
val verilogOutputDir = "verilogOutput/"
val driverOutputDir = "driverOutput/"
def makeTestArgs(cmpName: String): Array[String] = {
return Array( "--targetDir", testOutputDir+cmpName,
"--compile", "--test", "--genHarness")
}
def makeVerilogBuildArgs(cmpName: String): Array[String] = {
return Array( "--targetDir", verilogOutputDir+cmpName, "--v")
}
def printUsageAndExit() {
println("Usage: sbt \"run <op> <comp> <options>\"")
println("where <op> = {inst | test | driver}")
println("<comp> = {" + instFxnMap.keys.reduce(_ +" | "+ _) + "}")
System.exit(0)
}
val instFxnMap: Map[String, SpMVAccelWrapperParams => AXIWrappableAccel] = Map(
"BufferAll" -> {p => new SpMVAcceleratorBufferAll(p)},
"BufferNone" -> {p => new SpMVAcceleratorBufferNone(p)},
"BufferSel" -> {p => new SpMVAcceleratorBufferSel(p)},
"OldCache" -> {p => new SpMVAcceleratorOldCache(p)},
"NewCache" -> {p => new SpMVAcceleratorNewCache(p)}
//"TestBackend" -> {p => new TestSpMVBackend()},
//"TestFrontend" -> {p => new TestSpMVFrontend()
)
def main(args: Array[String]): Unit = {
if(args.size < 2) { printUsageAndExit() }
val op = args(0)
val harnessMemDepth = 64*1024*1024
val cmpName = args(1)
val instFxn = instFxnMap(cmpName)
val opts = args.toList.takeRight(args.size-2)
val p = new SpMVAccelWrapperParams(opts)
if(op == "inst") {
makeHarnessVerilog(cmpName, p, instFxn)
} else if(op == "test") {
makeHarnessTest(cmpName, harnessMemDepth, p, instFxn)
} else if(op == "driver") {
makeHarnessDriver(cmpName, p, instFxn)
} else {
printUsageAndExit()
}
}
def makeHarnessDriver(cmpName: String, p: SpMVAccelWrapperParams,
fxn: SpMVAccelWrapperParams => AXIWrappableAccel) {
val outDir = new java.io.File(driverOutputDir)
outDir.mkdir()
fxn(p).buildDriver(driverOutputDir)
}
def makeHarnessVerilog(cmpName: String, p: SpMVAccelWrapperParams,
fxn: SpMVAccelWrapperParams => AXIWrappableAccel) {
val vargs = makeVerilogBuildArgs(cmpName+"-"+p.suffix)
val instModule = {() => Module(new AXIAccelWrapper(() => fxn(p)))}
chiselMain(vargs, instModule)
}
def makeHarnessTest(cmpName: String,
memDepth: Int,
p: SpMVAccelWrapperParams,
fxn: SpMVAccelWrapperParams => AXIWrappableAccel) {
val targs = makeTestArgs(cmpName+"-"+p.suffix)
val instModule = {() => Module(new WrappableAccelHarness(() => fxn(p), memDepth))}
val instTest = {c => new WrappableAccelTester(c)}
val aT = makeTestArgs(cmpName)
chiselMainTest(aT, instModule)(instTest)
}
}
| maltanar/spmv-vector-cache | chisel/Main.scala | Scala | bsd-3-clause | 2,884 |
package sbt
import org.scalacheck._
import Prop._
import SettingsUsage._
import SettingsExample._
object SettingsTest extends Properties("settings") {
import scala.reflect.Manifest
final val ChainMax = 5000
lazy val chainLengthGen = Gen.choose(1, ChainMax)
property("Basic settings test") = secure(all(tests: _*))
property("Basic chain") = forAll(chainLengthGen) { (i: Int) =>
val abs = math.abs(i)
singleIntTest(chain(abs, value(0)), abs)
}
property("Basic bind chain") = forAll(chainLengthGen) { (i: Int) =>
val abs = math.abs(i)
singleIntTest(chainBind(value(abs)), 0)
}
property("Allows references to completed settings") = forAllNoShrink(30) { allowedReference }
final def allowedReference(intermediate: Int): Prop =
{
val top = value(intermediate)
def iterate(init: Initialize[Int]): Initialize[Int] =
bind(init) { t =>
if (t <= 0)
top
else
iterate(value(t - 1))
}
evaluate(setting(chk, iterate(top)) :: Nil); true
}
property("Derived setting chain depending on (prev derived, normal setting)") = forAllNoShrink(Gen.choose(1, 100).label("numSettings")) { derivedSettings }
final def derivedSettings(nr: Int): Prop =
{
val genScopedKeys = {
// We wan
// t to generate lists of keys that DO NOT inclue the "ch" key we use to check things.
val attrKeys = mkAttrKeys[Int](nr).filter(_.forall(_.label != "ch"))
attrKeys map (_ map (ak => ScopedKey(Scope(0), ak)))
}.label("scopedKeys").filter(_.nonEmpty)
forAll(genScopedKeys) { scopedKeys =>
try {
// Note; It's evil to grab last IF you haven't verified the set can't be empty.
val last = scopedKeys.last
val derivedSettings: Seq[Setting[Int]] = (
for {
List(scoped0, scoped1) <- chk :: scopedKeys sliding 2
nextInit = if (scoped0 == chk) chk
else (scoped0 zipWith chk) { (p, _) => p + 1 }
} yield derive(setting(scoped1, nextInit))
).toSeq
{
// Note: This causes a cycle refernec error, quite frequently.
checkKey(last, Some(nr - 1), evaluate(setting(chk, value(0)) +: derivedSettings)) :| "Not derived?"
} && {
checkKey(last, None, evaluate(derivedSettings)) :| "Should not be derived"
}
} catch {
case t: Throwable =>
// TODO - For debugging only.
t.printStackTrace(System.err)
throw t
}
}
}
private def mkAttrKeys[T](nr: Int)(implicit mf: Manifest[T]): Gen[List[AttributeKey[T]]] =
{
import Gen._
val nonEmptyAlphaStr =
nonEmptyListOf(alphaChar).map(_.mkString).suchThat(_.forall(_.isLetter))
(for {
list <- Gen.listOfN(nr, nonEmptyAlphaStr) suchThat (l => l.size == l.distinct.size)
item <- list
} yield AttributeKey[T](item)).label(s"mkAttrKeys($nr)")
}
property("Derived setting(s) replace DerivedSetting in the Seq[Setting[_]]") = derivedKeepsPosition
final def derivedKeepsPosition: Prop =
{
val a: ScopedKey[Int] = ScopedKey(Scope(0), AttributeKey[Int]("a"))
val b: ScopedKey[Int] = ScopedKey(Scope(0), AttributeKey[Int]("b"))
val prop1 = {
val settings: Seq[Setting[_]] = Seq(
setting(a, value(3)),
setting(b, value(6)),
derive(setting(b, a)),
setting(a, value(5)),
setting(b, value(8))
)
val ev = evaluate(settings)
checkKey(a, Some(5), ev) && checkKey(b, Some(8), ev)
}
val prop2 = {
val settings: Seq[Setting[Int]] = Seq(
setting(a, value(3)),
setting(b, value(6)),
derive(setting(b, a)),
setting(a, value(5))
)
val ev = evaluate(settings)
checkKey(a, Some(5), ev) && checkKey(b, Some(5), ev)
}
prop1 && prop2
}
property("DerivedSetting in ThisBuild scopes derived settings under projects thus allowing safe +=") = forAllNoShrink(Gen.choose(1, 100)) { derivedSettingsScope }
final def derivedSettingsScope(nrProjects: Int): Prop =
{
forAll(mkAttrKeys[Int](2)) {
case List(key, derivedKey) =>
val projectKeys = for { proj <- 1 to nrProjects } yield ScopedKey(Scope(1, proj), key)
val projectDerivedKeys = for { proj <- 1 to nrProjects } yield ScopedKey(Scope(1, proj), derivedKey)
val globalKey = ScopedKey(Scope(0), key)
val globalDerivedKey = ScopedKey(Scope(0), derivedKey)
// Each project defines an initial value, but the update is defined in globalKey.
// However, the derived Settings that come from this should be scoped in each project.
val settings: Seq[Setting[_]] =
derive(setting(globalDerivedKey, SettingsExample.map(globalKey)(_ + 1))) +: projectKeys.map(pk => setting(pk, value(0)))
val ev = evaluate(settings)
// Also check that the key has no value at the "global" scope
val props = for { pk <- projectDerivedKeys } yield checkKey(pk, Some(1), ev)
checkKey(globalDerivedKey, None, ev) && Prop.all(props: _*)
}
}
// Circular (dynamic) references currently loop infinitely.
// This is the expected behavior (detecting dynamic cycles is expensive),
// but it may be necessary to provide an option to detect them (with a performance hit)
// This would test that cycle detection.
// property("Catches circular references") = forAll(chainLengthGen) { checkCircularReferences _ }
final def checkCircularReferences(intermediate: Int): Prop =
{
val ccr = new CCR(intermediate)
try { evaluate(setting(chk, ccr.top) :: Nil); false }
catch { case e: java.lang.Exception => true }
}
def tests =
for (i <- 0 to 5; k <- Seq(a, b)) yield {
val expected = expectedValues(2 * i + (if (k == a) 0 else 1))
checkKey[Int](ScopedKey(Scope(i), k), expected, applied)
}
lazy val expectedValues = None :: None :: None :: None :: None :: None :: Some(3) :: None :: Some(3) :: Some(9) :: Some(4) :: Some(9) :: Nil
lazy val ch = AttributeKey[Int]("ch")
lazy val chk = ScopedKey(Scope(0), ch)
def chain(i: Int, prev: Initialize[Int]): Initialize[Int] =
if (i <= 0) prev else chain(i - 1, prev(_ + 1))
def chainBind(prev: Initialize[Int]): Initialize[Int] =
bind(prev) { v =>
if (v <= 0) prev else chainBind(value(v - 1))
}
def singleIntTest(i: Initialize[Int], expected: Int) =
{
val eval = evaluate(setting(chk, i) :: Nil)
checkKey(chk, Some(expected), eval)
}
def checkKey[T](key: ScopedKey[T], expected: Option[T], settings: Settings[Scope]) =
{
val value = settings.get(key.scope, key.key)
("Key: " + key) |:
("Value: " + value) |:
("Expected: " + expected) |:
(value == expected)
}
def evaluate(settings: Seq[Setting[_]]): Settings[Scope] =
try { make(settings)(delegates, scopeLocal, showFullKey) }
catch { case e: Throwable => e.printStackTrace; throw e }
}
// This setup is a workaround for module synchronization issues
final class CCR(intermediate: Int) {
lazy val top = iterate(value(intermediate), intermediate)
def iterate(init: Initialize[Int], i: Int): Initialize[Int] =
bind(init) { t =>
if (t <= 0)
top
else
iterate(value(t - 1), t - 1)
}
}
| pdalpra/sbt | util/collection/src/test/scala/SettingsTest.scala | Scala | bsd-3-clause | 7,492 |
package com.arcusys.valamis.questionbank.model
import org.scalatest.{ FlatSpec, Matchers }
class ChoiceQuestionTest extends FlatSpec with Matchers {
"Choice answer" can "be constructed" in {
val answer = new ChoiceAnswer(11, "Scala", isCorrect = true)
answer.id should equal(11)
answer.text should equal("Scala")
answer.isCorrect should equal(true)
}
private def someAnswers = Seq(
new ChoiceAnswer(11, "Scala", isCorrect = true),
new ChoiceAnswer(2, "Java", isCorrect = false),
new ChoiceAnswer(33, "C#", isCorrect = true)
)
private def constructQuestion(categoryId: Option[Int], answers: Seq[ChoiceAnswer]) = new ChoiceQuestion(
id = 11,
categoryID = categoryId,
title = "Check programming language taste",
text = "Which programming language is good",
explanationText = "You know why",
rightAnswerText = "Your answer is correct",
wrongAnswerText = "Your answer is incorrect",
answers = answers,
forceCorrectCount = false,
courseID = Some(1)
)
private def checkFields(question: ChoiceQuestion, categoryId: Option[Int], answers: Seq[ChoiceAnswer]) {
question.questionTypeCode should equal(0)
question.id should equal(11)
question.categoryID should equal(categoryId)
question.title should equal("Check programming language taste")
question.text should equal("Which programming language is good")
question.explanationText should equal("You know why")
question.rightAnswerText should equal("Your answer is correct")
question.wrongAnswerText should equal("Your answer is incorrect")
question.forceCorrectCount should equal(false)
question.answers should equal(answers)
}
"Choice question" can "be constructed" in {
val answers = someAnswers
val question = constructQuestion(categoryId = Some(2), answers = answers)
checkFields(question, categoryId = Some(2), answers = answers)
}
it can "be constructed with empty category" in {
val answers = someAnswers
val question = constructQuestion(categoryId = None, answers = answers)
checkFields(question, categoryId = None, answers = answers)
}
it can "be constructed without answers" in {
val question = constructQuestion(categoryId = Some(2), answers = Nil)
checkFields(question, categoryId = Some(2), answers = Nil)
}
}
| ViLPy/Valamis | valamis-questionbank/src/test/scala/com/arcusys/valamis/questionbank/model/ChoiceQuestionTest.scala | Scala | lgpl-3.0 | 2,339 |
/*
* Copyright 2013 Akiyoshi Sugiki, University of Tsukuba
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kumoi.impl.fs.os
import kumoi.shell.fs._
import kumoi.shell.aaa._
class FSMac extends FSOSDepend {
def df(implicit auth: AAA) = List()
} | axi-sugiki/kumoi | src/kumoi/impl/fs/os/FSMac.scala | Scala | apache-2.0 | 770 |
package ui.shader.builder
import scala.collection.mutable.ListBuffer
class GlBlock(val commands: ListBuffer[GlCommand]) {
def toGlsl: String = {
s"{\\n\\t${commands.foldLeft("")(_ + "\\n" + _.toGlsl)}\\n}"
}
}
object GlBlock {
def apply(commands: ListBuffer[GlCommand]): GlBlock = {
new GlBlock(commands)
}
def apply(commands: GlCommand*): GlBlock = {
new GlBlock(commands.to[ListBuffer])
}
}
| gvatn/play-scalajs-webgl-spark | client/src/main/scala/ui/shader/builder/GlBlock.scala | Scala | mit | 438 |
/**
* @author Yuuto
*/
package yuuto.enhancedinventories.client.gui.elements
import java.util.List
import cofh.lib.gui.element.ElementButton
import cofh.lib.gui.element.ElementButtonBase
import cofh.lib.gui.GuiBase
import cofh.lib.util.helpers.StringHelper
class ButtonSchematic(gui:GuiBase, pX:Int, pY:Int, sX:Int, sY:Int, val button:Int) extends ElementButtonBase(gui, pX, pY, sX, sY){
if(button==0){
this.setName("buttonCancel");
}else if(button==1){
this.setName("buttonSave");
}
override def drawBackground(mouseX:Int, mouseY:Int, gameTicks:Float) {
val hover=if(isOver(mouseX, mouseY)) 1 else 0;
button match {
case 0=>{
gui.drawButton("IconCancel", this.posX, this.posY, 1, hover);
}case 1=>{
if(this.isEnabled())
gui.drawButton("IconAccept", this.posX, this.posY, 1, hover);
else {
gui.drawButton("IconAcceptInactive", this.posX, this.posY, 1, 2);
}
}case i=>{}
}
}
override def drawForeground(mouseX:Int, mouseY:Int) {
}
def isOver(mouseX:Int, mouseY:Int):Boolean={
return (this.posX <= mouseX && mouseX < this.posX+this.sizeX && this.posY <= mouseY && mouseY < this.posY+this.sizeY);
}
override def onMousePressed(mouseX:Int, mouseY:Int, mouseButton:Int):Boolean={
if(isEnabled()){
gui.handleElementButtonClick(getName(), mouseButton);
return true;
}
return false;
}
override def addTooltip(list:List[String]) {
if(!this.isEnabled())
return;
button match{
case 0=>list.add(StringHelper.localize("info.ei.clear"));
case 1=>list.add(StringHelper.localize("info.ei.save"));
case i=>{}
}
}
} | AnimeniacYuuto/EnhancedInventories | src/main/scala/yuuto/enhancedinventories/client/gui/elements/ButtonSchematic.scala | Scala | gpl-2.0 | 1,701 |
package otherstuff
trait ExtraBits {
trait ExtraBitsOps {
def bits: String
}
val extra: ExtraBitsOps
}
trait Module extends ExtraBits {
trait ModuleOps {
def op: String
}
val ops: ModuleOps
}
trait FooModule extends Module {
object ops extends ModuleOps {
def op = s"Foo${extra.bits}"
}
}
trait DefaultExtraBits extends ExtraBits {
object extra extends ExtraBitsOps {
def bits = ""
}
}
trait BarExtraBits extends ExtraBits {
object extra extends ExtraBitsOps {
def bits = "Bar"
}
}
object AbstractOverideModelue extends App {
val baseModule = new AnyRef with FooModule with DefaultExtraBits
println(s"baseModule.ops.op = ${baseModule.ops.op}")
val decoratedModule = new AnyRef with FooModule with BarExtraBits
println(s"decoratedModule.ops.op = ${decoratedModule.ops.op}")
}
| jthompson-hiya/akka-streams-sandbox | src/main/scala/otherstuff/AbstractOverideModelue.scala | Scala | mit | 839 |
package mesosphere.marathon.integration.setup
import mesosphere.marathon.state.PathId._
import scala.reflect.ClassTag
import com.google.inject.Scopes
import javax.ws.rs._
import javax.ws.rs.core.{ Response, MediaType }
import javax.inject.Inject
import org.apache.log4j.Logger
import spray.httpx.marshalling.{ MarshallingContext, Marshaller }
import spray.http.{ ContentTypes, HttpEntity }
import spray.httpx.UnsuccessfulResponseException
import spray.http.HttpResponse
import mesosphere.chaos.http.RestModule
import mesosphere.marathon.api.MarathonRestModule
/**
* Result of an REST operation.
*/
case class RestResult[T](value: T, code: Int) {
def success = code == 200
}
/**
* Marshal and Unmarshal json via jackson jaxb over spray http client.
*/
trait JacksonSprayMarshaller {
val mapper = new MarathonRestModule().provideRestMapper()
def marshaller[T]: Marshaller[T] = new Marshaller[T] {
def apply(value: T, ctx: MarshallingContext): Unit = {
ctx.marshalTo(HttpEntity(ContentTypes.`application/json`, mapper.writeValueAsString(value)))
}
}
def read[T](implicit tag: ClassTag[T]): HttpResponse ⇒ RestResult[T] =
response ⇒
if (response.status.isSuccess) {
val value = mapper.readValue(response.entity.asString, tag.runtimeClass.asInstanceOf[Class[T]])
RestResult(value, response.status.intValue)
}
else {
throw new UnsuccessfulResponseException(response)
}
def responseResult: HttpResponse => RestResult[HttpResponse] = response => RestResult(response, response.status.intValue)
def toList[T]: RestResult[Array[T]] => RestResult[List[T]] = result => RestResult(result.value.toList, result.code)
}
/**
* Guava integration test module, which start a local http server.
*/
class IntegrationTestModule extends RestModule {
override def configureServlets(): Unit = {
super.configureServlets()
bind(classOf[CallbackEventHandler]).in(Scopes.SINGLETON)
bind(classOf[ApplicationHealthCheck]).in(Scopes.SINGLETON)
}
}
/**
* The common data structure for all callback events.
* Needed for dumb jackson.
*/
case class CallbackEvent(eventType: String, info: Map[String, Any])
/**
* Callback
*/
@Path("callback")
class CallbackEventHandler @Inject() () {
private[this] val log = Logger.getLogger(getClass.getName)
@GET
@Produces(Array(MediaType.APPLICATION_JSON))
def index = List(1, 2, 3, 4, 5)
@POST
@Consumes(Array(MediaType.APPLICATION_JSON))
@Produces(Array(MediaType.APPLICATION_JSON))
def handleEvent(map: Map[String, Any]): Unit = {
val kind = map.getOrElse("eventType", "unknown").asInstanceOf[String]
log.info(s"Received callback event: $kind with props $map")
val event = CallbackEvent(kind, map)
ExternalMarathonIntegrationTest.listener.foreach(_.handleEvent(event))
}
}
@Path("health")
class ApplicationHealthCheck @Inject() () {
@GET
@Path("{appId:.+}/{versionId}/{port}")
def isApplicationHealthy(@PathParam("appId") path: String, @PathParam("versionId") versionId: String, @PathParam("port") port: Int): Response = {
val appId = path.toRootPath
def instance = ExternalMarathonIntegrationTest.healthChecks.find{ c => c.appId == appId && c.versionId == versionId && c.port == port }
def definition = ExternalMarathonIntegrationTest.healthChecks.find{ c => c.appId == appId && c.versionId == versionId && c.port == 0 }
val state = instance.orElse(definition).fold(true)(_.healthy)
if (state) Response.ok().build() else Response.serverError().build()
}
}
| tnachen/marathon | src/test/scala/mesosphere/marathon/integration/setup/IntegrationTestModule.scala | Scala | apache-2.0 | 3,560 |
package fpinscala.streamingio
import fpinscala.iomonad.{IO,Monad,Free,unsafePerformIO}
object ImperativeAndLazyIO {
/*
We are going to consider various approaches to the simple task of
checking whether a file contains more than 40,000 lines.
Our first implementation is an imperative implementation, embedded
into `IO`.
*/
import java.io._
def linesGt40k(filename: String): IO[Boolean] = IO {
// There are a number of convenience functions in scala.io.Source
// for reading from external sources such as files.
val src = io.Source.fromFile(filename)
try {
var count = 0
// Obtain a stateful iterator from the Source
val lines: Iterator[String] = src.getLines
while (count <= 40000 && lines.hasNext) {
lines.next // has side effect of advancing to next element
count += 1
}
count > 40000
}
finally src.close
}
/*
The above code is rather low-level, and it's not compositional,
either. Consider the following scenarios:
* Check whether the number of _nonempty_ lines in the file exceeds
40,000
* Find a line index before 40,000 where the first letter of
consecutive lines spells out `"abracadabra"`.
We cannot just compose our existing implementation with some
other combinator(s) to implement these tasks. Our implementation is
a monolithic loop, and we must modify this loop directly if we want
to change its behavior.
Now imagine if we had a `Stream[String]` for the lines of the file
and we could assemble functionality using all the `Stream` functions
we know and love.
*/
object Examples {
val lines: Stream[String] = sys.error("defined elsewhere")
val ex1 = lines.zipWithIndex.exists(_._2 + 1 >= 40000)
val ex2 = lines.filter(!_.trim.isEmpty).zipWithIndex.exists(_._2 + 1 >= 40000)
val ex3 = lines.take(40000).map(_.head).indexOfSlice("abracadabra".toList)
}
/*
Could we actually write the above? Not quite. We could 'cheat' and
return an `IO[Stream[String]]` representing the lines of a file:
*/
def lines(filename: String): IO[Stream[String]] = IO {
val src = io.Source.fromFile(filename)
src.getLines.toStream append { src.close; Stream.empty }
}
/*
This is called _lazy I/O_, and it's problematic for a number of
reasons, discussed in the book text. However, it would be nice to
recover the same high-level, compositional style we are used to
from our use of `List` and `Stream`.
*/
}
object SimpleStreamTransducers {
/*
We now introduce a type, `Process`, representing pure, single-input
stream transducers. It can be in of three states - it can be
emitting a value to the output (`Emit`), reading a value from its
input (`Await`) or signaling termination via `Halt`.
*/
sealed trait Process[I,O] {
import Process._
/*
* A `Process[I,O]` can be used to transform a `Stream[I]` to a
* `Stream[O]`.
*/
def apply(s: Stream[I]): Stream[O] = this match {
case Halt() => Stream()
case Await(recv) => s match {
case h #:: t => recv(Some(h))(t)
case xs => recv(None)(xs) // Stream is empty
}
case Emit(h,t) => h #:: t(s)
}
/*
* `Process` can be thought of as a sequence of values of type `O`
* and many of the operations that would be defined for `List[O]`
* can be defined for `Process[I,O]`, for instance `map`, `++` and
* `flatMap`. The definitions are analogous.
*/
def map[O2](f: O => O2): Process[I,O2] = this match {
case Halt() => Halt()
case Emit(h, t) => Emit(f(h), t map f)
case Await(recv) => Await(recv andThen (_ map f))
}
def ++(p: => Process[I,O]): Process[I,O] = this match {
case Halt() => p
case Emit(h, t) => Emit(h, t ++ p)
case Await(recv) => Await(recv andThen (_ ++ p))
}
def flatMap[O2](f: O => Process[I,O2]): Process[I,O2] = this match {
case Halt() => Halt()
case Emit(h, t) => f(h) ++ t.flatMap(f)
case Await(recv) => Await(recv andThen (_ flatMap f))
}
/*
* Exercise 5: Implement `|>`. Let the types guide your implementation.
*/
def |>[O2](p2: Process[O,O2]): Process[I,O2] = ???
/*
* Feed `in` to this `Process`. Uses a tail recursive loop as long
* as `this` is in the `Await` state.
*/
def feed(in: Seq[I]): Process[I,O] = {
@annotation.tailrec
def go(in: Seq[I], cur: Process[I,O]): Process[I,O] =
cur match {
case Halt() => Halt()
case Await(recv) =>
if (in.nonEmpty) go(in.tail, recv(Some(in.head)))
else cur
case Emit(h, t) => Emit(h, t.feed(in))
}
go(in, this)
}
/*
* See `Process.lift` for a typical repeating `Process`
* definition expressed with explicit recursion.
*/
/*
* `Process` definitions can often be expressed without explicit
* recursion, by repeating some simpler `Process` forever.
*/
def repeat: Process[I,O] = {
def go(p: Process[I,O]): Process[I,O] = p match {
case Halt() => go(this)
case Await(recv) => Await {
case None => recv(None)
case i => go(recv(i))
}
case Emit(h, t) => Emit(h, go(t))
}
go(this)
}
def repeatN(n: Int): Process[I,O] = {
def go(n: Int, p: Process[I,O]): Process[I,O] = p match {
case Halt() => if (n > 0) go(n-1, this) else Halt()
case Await(recv) => Await {
case None => recv(None)
case i => go(n,recv(i))
}
case Emit(h, t) => Emit(h, go(n,t))
}
go(n, this)
}
/*
* As an example of `repeat`, see `Process.filter`. We define
* a convenience function here for composing this `Process`
* with a `Process` that filters the output type `O`.
*/
def filter(f: O => Boolean): Process[I,O] =
this |> Process.filter(f)
/*
* Exercise 6: Implement `zipWithIndex`.
*/
def zipWithIndex: Process[I,(O,Int)] = ???
/* Add `p` to the fallback branch of this process */
def orElse(p: Process[I,O]): Process[I,O] = this match {
case Halt() => p
case Await(recv) => Await {
case None => p
case x => recv(x)
}
case _ => this
}
}
object Process {
case class Emit[I,O](
head: O,
tail: Process[I,O] = Halt[I,O]())
extends Process[I,O]
case class Await[I,O](
recv: Option[I] => Process[I,O])
extends Process[I,O]
case class Halt[I,O]() extends Process[I,O]
def emit[I,O](head: O,
tail: Process[I,O] = Halt[I,O]()): Process[I,O] =
Emit(head, tail)
// Process forms a monad, and we provide monad syntax for it
import fpinscala.iomonad.Monad
def monad[I]: Monad[({ type f[x] = Process[I,x]})#f] =
new Monad[({ type f[x] = Process[I,x]})#f] {
def unit[O](o: => O): Process[I,O] = emit(o)
def flatMap[O,O2](p: Process[I,O])(f: O => Process[I,O2]): Process[I,O2] =
p flatMap f
}
// enable monadic syntax for `Process` type
implicit def toMonadic[I,O](a: Process[I,O]) = monad[I].toMonadic(a)
/**
* A helper function to await an element or fall back to another process
* if there is no input.
*/
def await[I,O](f: I => Process[I,O],
fallback: Process[I,O] = Halt[I,O]()): Process[I,O] =
Await[I,O] {
case Some(i) => f(i)
case None => fallback
}
/*
* We can convert any function `f: I => O` to a `Process[I,O]`. We
* simply `Await`, then `Emit` the value received, transformed by
* `f`.
*/
def liftOne[I,O](f: I => O): Process[I,O] =
Await {
case Some(i) => emit(f(i))
case None => Halt()
}
def lift[I,O](f: I => O): Process[I,O] =
liftOne(f).repeat
/*
* As an example of `repeat`, here's a definition of `filter` that
* uses `repeat`.
*/
def filter[I](f: I => Boolean): Process[I,I] =
Await[I,I] {
case Some(i) if f(i) => emit(i)
case _ => Halt()
}.repeat
/*
* Here's a typical `Process` definition that requires tracking some
* piece of state (in this case, the running total):
*/
def sum: Process[Double,Double] = {
def go(acc: Double): Process[Double,Double] =
await(d => emit(d+acc, go(d+acc)))
go(0.0)
}
/*
* Exercise 1: Implement `take`, `drop`, `takeWhile`, and `dropWhile`.
*/
def take[I](n: Int): Process[I,I] = ???
def drop[I](n: Int): Process[I,I] = ???
def takeWhile[I](f: I => Boolean): Process[I,I] = ???
def dropWhile[I](f: I => Boolean): Process[I,I] = ???
/* The identity `Process`, just repeatedly echos its input. */
def id[I]: Process[I,I] = lift(identity)
/*
* Exercise 2: Implement `count`.
*/
def count[I]: Process[I,Int] = ???
/* For comparison, here is an explicit recursive implementation. */
def count2[I]: Process[I,Int] = {
def go(n: Int): Process[I,Int] =
await((i: I) => emit(n+1, go(n+1)))
go(0)
}
/*
* Exercise 3: Implement `mean`.
*/
def mean: Process[Double,Double] = ???
def loop[S,I,O](z: S)(f: (I,S) => (O,S)): Process[I,O] =
await((i: I) => f(i,z) match {
case (o,s2) => emit(o, loop(s2)(f))
})
/* Exercise 4: Implement `sum` and `count` in terms of `loop` */
def sumViaLoop: Process[Double,Double] = ???
def countViaLoop[I]: Process[I,Int] = ???
/*
* Exercise 7: Can you think of a generic combinator that would
* allow for the definition of `mean` in terms of `sum` and
* `count`?
*/
def feed[A,B](oa: Option[A])(p: Process[A,B]): Process[A,B] =
p match {
case Halt() => p
case Emit(h,t) => Emit(h, feed(oa)(t))
case Await(recv) => recv(oa)
}
/*
* Exercise 6: Implement `zipWithIndex`.
*
* See definition on `Process` above.
*/
def zipWithIndex[I,O](p: Process[I,O]): Process[I,(O,Int)] = ???
def zip[A,B,C](p1: Process[A,B], p2: Process[A,C]): Process[A,(B,C)] = ???
def meanViaZip: Process[Double,Double] = ???
/*
* Exercise 8: Implement `exists`
*
* We choose to emit all intermediate values, and not halt.
* See `existsResult` below for a trimmed version.
*/
def exists[I](f: I => Boolean): Process[I,Boolean] = ???
/* Awaits then emits a single value, then halts. */
def echo[I]: Process[I,I] = await(i => emit(i))
def skip[I,O]: Process[I,O] = await(i => Halt())
def ignore[I,O]: Process[I,O] = skip.repeat
def terminated[I]: Process[I,Option[I]] =
await((i: I) => emit(Some(i), terminated[I]), emit(None))
def processFile[A,B](f: java.io.File,
p: Process[String, A],
z: B)(g: (B, A) => B): IO[B] = IO {
@annotation.tailrec
def go(ss: Iterator[String], cur: Process[String, A], acc: B): B =
cur match {
case Halt() => acc
case Await(recv) =>
val next = if (ss.hasNext) recv(Some(ss.next))
else recv(None)
go(ss, next, acc)
case Emit(h, t) => go(ss, t, g(acc, h))
}
val s = io.Source.fromFile(f)
try go(s.getLines, p, z)
finally s.close
}
/*
* Exercise 9: Write a program that reads degrees fahrenheit as `Double` values from a file,
* converts each temperature to celsius, and writes results to another file.
*/
def convertFahrenheit: Process[String,String] = ???
def toCelsius(fahrenheit: Double): Double =
(5.0 / 9.0) * (fahrenheit - 32.0)
}
}
object GeneralizedStreamTransducers {
/*
Our generalized process type is parameterized on the protocol used for
communicating with the driver. This works similarly to the `IO` type
we defined in chapter 13. The `Await` constructor emits a request of
type `F[A]`, and receives a response of type `Either[Throwable,A]`:
trait Process[F,A]
case class Await[F[_],A,O](
req: F[A],
recv: Either[Throwable,A] => Process[F,O]) extends Process[F,O]
case class Halt[F[_],O](err: Throwable) extends Process[F,O]
case class Emit[F[_],O](head: O, tail: Process[F,O]) extends Process[F,O]
The `Await` constructor may now receive a successful result or an error.
The `Halt` constructor now has a _reason_ for termination, which may be
either normal termination indicated by the special exception `End`,
forceful terimation, indicated by the special exception `Kill`,
or some other error.
We'll use the improved `Await` and `Halt` cases together to ensure
that all resources get released, even in the event of exceptions.
*/
trait Process[F[_],O] {
import Process._
/*
* Many of the same operations can be defined for this generalized
* `Process` type, regardless of the choice of `F`.
*/
def map[O2](f: O => O2): Process[F,O2] = this match {
case Await(req,recv) =>
Await(req, recv andThen (_ map f))
case Emit(h, t) => Try { Emit(f(h), t map f) }
case Halt(err) => Halt(err)
}
def ++(p: => Process[F,O]): Process[F,O] =
this.onHalt {
case End => Try(p) // we consult `p` only on normal termination
case err => Halt(err)
}
/*
* Like `++`, but _always_ runs `p`, even if `this` halts with an error.
*/
def onComplete(p: => Process[F,O]): Process[F,O] =
this.onHalt {
case End => p.asFinalizer
case err => p.asFinalizer ++ Halt(err) // we always run `p`, but preserve any errors
}
def asFinalizer: Process[F,O] = this match {
case Emit(h, t) => Emit(h, t.asFinalizer)
case Halt(e) => Halt(e)
case Await(req,recv) => await(req) {
case Left(Kill) => this.asFinalizer
case x => recv(x)
}
}
def onHalt(f: Throwable => Process[F,O]): Process[F,O] = this match {
case Halt(e) => Try(f(e))
case Emit(h, t) => Emit(h, t.onHalt(f))
case Await(req,recv) => Await(req, recv andThen (_.onHalt(f)))
}
/*
* Anywhere we _call_ `f`, we catch exceptions and convert them to `Halt`.
* See the helper function `Try` defined below.
*/
def flatMap[O2](f: O => Process[F,O2]): Process[F,O2] =
this match {
case Halt(err) => Halt(err)
case Emit(o, t) => Try(f(o)) ++ t.flatMap(f)
case Await(req,recv) =>
Await(req, recv andThen (_ flatMap f))
}
def repeat: Process[F,O] =
this ++ this.repeat
def repeatNonempty: Process[F,O] = {
val cycle = (this.map(o => Some(o): Option[O]) ++ emit(None)).repeat
// cut off the cycle when we see two `None` values in a row, as this
// implies `this` has produced no values during an iteration
val trimmed = cycle |> window2 |> (takeWhile {
case (Some(None), None) => false
case _ => true
})
trimmed.map(_._2).flatMap {
case None => Halt(End)
case Some(o) => emit(o)
}
}
/*
* Exercise 10: This function is defined only if given a `MonadCatch[F]`.
* Unlike the simple `runLog` interpreter defined in the companion object
* below, this is not tail recursive and responsibility for stack safety
* is placed on the `Monad` instance.
*/
def runLog(implicit F: MonadCatch[F]): F[IndexedSeq[O]] = ???
/*
* We define `Process1` as a type alias - see the companion object
* for `Process` below. Using that, we can then define `|>` once
* more. The definition is extremely similar to our previous
* definition. We again use the helper function, `feed`, to take
* care of the case where `this` is emitting values while `p2`
* is awaiting these values.
*
* The one subtlety is we make sure that if `p2` halts, we
* `kill` this process, giving it a chance to run any cleanup
* actions (like closing file handles, etc).
*/
def |>[O2](p2: Process1[O,O2]): Process[F,O2] = {
p2 match {
case Halt(e) => this.kill onHalt { e2 => Halt(e) ++ Halt(e2) }
case Emit(h, t) => Emit(h, this |> t)
case Await(req,recv) => this match {
case Halt(err) => Halt(err) |> recv(Left(err))
case Emit(h,t) => t |> Try(recv(Right(h)))
case Await(req0,recv0) => await(req0)(recv0 andThen (_ |> p2))
}
}
}
@annotation.tailrec
final def kill[O2]: Process[F,O2] = this match {
case Await(req,recv) => recv(Left(Kill)).drain.onHalt {
case Kill => Halt(End) // we convert the `Kill` exception back to normal termination
case e => Halt(e)
}
case Halt(e) => Halt(e)
case Emit(h, t) => t.kill
}
/** Alias for `this |> p2`. */
def pipe[O2](p2: Process1[O,O2]): Process[F,O2] =
this |> p2
final def drain[O2]: Process[F,O2] = this match {
case Halt(e) => Halt(e)
case Emit(h, t) => t.drain
case Await(req,recv) => Await(req, recv andThen (_.drain))
}
def filter(f: O => Boolean): Process[F,O] =
this |> Process.filter(f)
def take(n: Int): Process[F,O] =
this |> Process.take(n)
def once: Process[F,O] = take(1)
/*
* Use a `Tee` to interleave or combine the outputs of `this` and
* `p2`. This can be used for zipping, interleaving, and so forth.
* Nothing requires that the `Tee` read elements from each
* `Process` in lockstep. It could read fifty elements from one
* side, then two elements from the other, then combine or
* interleave these values in some way, etc.
*
* This definition uses two helper functions, `feedL` and `feedR`,
* which feed the `Tee` in a tail-recursive loop as long as
* it is awaiting input.
*/
def tee[O2,O3](p2: Process[F,O2])(t: Tee[O,O2,O3]): Process[F,O3] = {
t match {
case Halt(e) => this.kill onComplete p2.kill onComplete Halt(e)
case Emit(h,t) => Emit(h, (this tee p2)(t))
case Await(side, recv) => side.get match {
case Left(isO) => this match {
case Halt(e) => p2.kill onComplete Halt(e)
case Emit(o,ot) => (ot tee p2)(Try(recv(Right(o))))
case Await(reqL, recvL) =>
await(reqL)(recvL andThen (this2 => this2.tee(p2)(t)))
}
case Right(isO2) => p2 match {
case Halt(e) => this.kill onComplete Halt(e)
case Emit(o2,ot) => (this tee ot)(Try(recv(Right(o2))))
case Await(reqR, recvR) =>
await(reqR)(recvR andThen (p3 => this.tee(p3)(t)))
}
}
}
}
def zipWith[O2,O3](p2: Process[F,O2])(f: (O,O2) => O3): Process[F,O3] =
(this tee p2)(Process.zipWith(f))
def zip[O2](p2: Process[F,O2]): Process[F,(O,O2)] =
zipWith(p2)((_,_))
def to[O2](sink: Sink[F,O]): Process[F,Unit] =
join { (this zipWith sink)((o,f) => f(o)) }
def through[O2](p2: Channel[F, O, O2]): Process[F,O2] =
join { (this zipWith p2)((o,f) => f(o)) }
}
object Process {
case class Await[F[_],A,O](
req: F[A],
recv: Either[Throwable,A] => Process[F,O]) extends Process[F,O]
case class Emit[F[_],O](
head: O,
tail: Process[F,O]) extends Process[F,O]
case class Halt[F[_],O](err: Throwable) extends Process[F,O]
def emit[F[_],O](
head: O,
tail: Process[F,O] = Halt[F,O](End)): Process[F,O] =
Emit(head, tail)
def await[F[_],A,O](req: F[A])(recv: Either[Throwable,A] => Process[F,O]): Process[F,O] =
Await(req, recv)
/**
* Helper function to safely produce `p`, or gracefully halt
* with an error if an exception is thrown.
*/
def Try[F[_],O](p: => Process[F,O]): Process[F,O] =
try p
catch { case e: Throwable => Halt(e) }
/*
* Safely produce `p`, or run `cleanup` and halt gracefully with the
* exception thrown while evaluating `p`.
*/
def TryOr[F[_],O](p: => Process[F,O])(cleanup: Process[F,O]): Process[F,O] =
try p
catch { case e: Throwable => cleanup ++ Halt(e) }
/*
* Safely produce `p`, or run `cleanup` or `fallback` if an exception
* occurs while evaluating `p`.
*/
def TryAwait[F[_],O](p: => Process[F,O])(fallback: Process[F,O], cleanup: Process[F,O]): Process[F,O] =
try p
catch {
case End => fallback
case e: Throwable => cleanup ++ Halt(e)
}
/* Our generalized `Process` type can represent sources! */
import fpinscala.iomonad.IO
/* Special exception indicating normal termination */
case object End extends Exception
/* Special exception indicating forceful termination */
case object Kill extends Exception
/*
* A `Process[F,O]` where `F` is a monad like `IO` can be thought of
* as a source.
*/
/*
* Here is a simple tail recursive function to collect all the
* output of a `Process[IO,O]`. Notice we are using the fact
* that `IO` can be `run` to produce either a result or an
* exception.
*/
def runLog[O](src: Process[IO,O]): IO[IndexedSeq[O]] = IO {
val E = java.util.concurrent.Executors.newFixedThreadPool(4)
@annotation.tailrec
def go(cur: Process[IO,O], acc: IndexedSeq[O]): IndexedSeq[O] =
cur match {
case Emit(h,t) => go(t, acc :+ h)
case Halt(End) => acc
case Halt(err) => throw err
case Await(req,recv) =>
val next =
try recv(Right(fpinscala.iomonad.unsafePerformIO(req)(E)))
catch { case err: Throwable => recv(Left(err)) }
go(next, acc)
}
try go(src, IndexedSeq())
finally E.shutdown
}
/*
* We can write a version of collect that works for any `Monad`.
* See the definition in the body of `Process`.
*/
import java.io.{BufferedReader,FileReader}
lazy val p: Process[IO, String] =
await(IO(new BufferedReader(new FileReader("lines.txt")))) {
case Right(b) =>
lazy val next: Process[IO,String] = await(IO(b.readLine)) {
case Left(e) => await(IO(b.close))(_ => Halt(e))
case Right(line) => Emit(line, next)
}
next
case Left(e) => Halt(e)
}
/*
* Generic combinator for producing a `Process[IO,O]` from some
* effectful `O` source. The source is tied to some resource,
* `R` (like a file handle) that we want to ensure is released.
* See `lines` below for an example use.
*/
def resource[R,O](acquire: IO[R])(
use: R => Process[IO,O])(
release: R => Process[IO,O]): Process[IO,O] =
eval(acquire) flatMap { r => use(r).onComplete(release(r)) }
/*
* Like `resource`, but `release` is a single `IO` action.
*/
def resource_[R,O](acquire: IO[R])(
use: R => Process[IO,O])(
release: R => IO[Unit]): Process[IO,O] =
resource(acquire)(use)(release andThen (eval_[IO,Unit,O]))
/*
* Create a `Process[IO,O]` from the lines of a file, using
* the `resource` combinator above to ensure the file is closed
* when processing the stream of lines is finished.
*/
def lines(filename: String): Process[IO,String] =
resource
{ IO(io.Source.fromFile(filename)) }
{ src =>
lazy val iter = src.getLines // a stateful iterator
def step = if (iter.hasNext) Some(iter.next) else None
lazy val lines: Process[IO,String] = eval(IO(step)).flatMap {
case None => Halt(End)
case Some(line) => Emit(line, lines)
}
lines
}
{ src => eval_ { IO(src.close) } }
/* Exercise 11: Implement `eval`, `eval_`, and use these to implement `lines`. */
def eval[F[_],A](a: F[A]): Process[F,A] = ???
/* Evaluate the action purely for its effects. */
def eval_[F[_],A,B](a: F[A]): Process[F,B] = ???
/* Helper function with better type inference. */
def evalIO[A](a: IO[A]): Process[IO,A] =
eval[IO,A](a)
/*
* We now have nice, resource safe effectful sources, but we don't
* have any way to transform them or filter them. Luckily we can
* still represent the single-input `Process` type we introduced
* earlier, which we'll now call `Process1`.
*/
case class Is[I]() {
sealed trait f[X]
val Get = new f[I] {}
}
def Get[I] = Is[I]().Get
type Process1[I,O] = Process[Is[I]#f, O]
/* Some helper functions to improve type inference. */
def await1[I,O](
recv: I => Process1[I,O],
fallback: => Process1[I,O] = halt1[I,O]): Process1[I, O] =
Await(Get[I], (e: Either[Throwable,I]) => e match {
case Left(End) => fallback
case Left(err) => Halt(err)
case Right(i) => Try(recv(i))
})
def emit1[I,O](h: O, tl: Process1[I,O] = halt1[I,O]): Process1[I,O] =
emit(h, tl)
def halt1[I,O]: Process1[I,O] = Halt[Is[I]#f, O](End)
def lift[I,O](f: I => O): Process1[I,O] =
await1[I,O]((i:I) => emit(f(i))) repeat
def filter[I](f: I => Boolean): Process1[I,I] =
await1[I,I](i => if (f(i)) emit(i) else halt1) repeat
// we can define take, takeWhile, and so on as before
def take[I](n: Int): Process1[I,I] =
if (n <= 0) halt1
else await1[I,I](i => emit(i, take(n-1)))
def takeWhile[I](f: I => Boolean): Process1[I,I] =
await1(i =>
if (f(i)) emit(i, takeWhile(f))
else halt1)
def dropWhile[I](f: I => Boolean): Process1[I,I] =
await1(i =>
if (f(i)) dropWhile(f)
else emit(i,id))
def id[I]: Process1[I,I] =
await1((i: I) => emit(i, id))
def window2[I]: Process1[I,(Option[I],I)] = {
def go(prev: Option[I]): Process1[I,(Option[I],I)] =
await1[I,(Option[I],I)](i => emit(prev -> i) ++ go(Some(i)))
go(None)
}
/** Emits `sep` in between each input received. */
def intersperse[I](sep: I): Process1[I,I] =
await1[I,I](i => emit1(i) ++ id.flatMap(i => emit1(sep) ++ emit1(i)))
/*
We sometimes need to construct a `Process` that will pull values
from multiple input sources. For instance, suppose we want to
'zip' together two files, `f1.txt` and `f2.txt`, combining
corresponding lines in some way. Using the same trick we used for
`Process1`, we can create a two-input `Process` which can request
values from either the 'left' stream or the 'right' stream. We'll
call this a `Tee`, after the letter 'T', which looks like a
little diagram of two inputs being combined into one output.
*/
case class T[I,I2]() {
sealed trait f[X] { def get: Either[I => X, I2 => X] }
val L = new f[I] { def get = Left(identity) }
val R = new f[I2] { def get = Right(identity) }
}
def L[I,I2] = T[I,I2]().L
def R[I,I2] = T[I,I2]().R
type Tee[I,I2,O] = Process[T[I,I2]#f, O]
/* Again some helper functions to improve type inference. */
def haltT[I,I2,O]: Tee[I,I2,O] =
Halt[T[I,I2]#f,O](End)
def awaitL[I,I2,O](recv: I => Tee[I,I2,O],
fallback: => Tee[I,I2,O] = haltT[I,I2,O]): Tee[I,I2,O] =
await[T[I,I2]#f,I,O](L) {
case Left(End) => fallback
case Left(err) => Halt(err)
case Right(a) => Try(recv(a))
}
def awaitR[I,I2,O](recv: I2 => Tee[I,I2,O],
fallback: => Tee[I,I2,O] = haltT[I,I2,O]): Tee[I,I2,O] =
await[T[I,I2]#f,I2,O](R) {
case Left(End) => fallback
case Left(err) => Halt(err)
case Right(a) => Try(recv(a))
}
def emitT[I,I2,O](h: O, tl: Tee[I,I2,O] = haltT[I,I2,O]): Tee[I,I2,O] =
emit(h, tl)
def zipWith[I,I2,O](f: (I,I2) => O): Tee[I,I2,O] =
awaitL[I,I2,O](i =>
awaitR (i2 => emitT(f(i,i2)))) repeat
def zip[I,I2]: Tee[I,I2,(I,I2)] = zipWith((_,_))
/* Ignores all input from left. */
def passR[I,I2]: Tee[I,I2,I2] = awaitR(emitT(_, passR))
/* Ignores input from the right. */
def passL[I,I2]: Tee[I,I2,I] = awaitL(emitT(_, passL))
/* Alternate pulling values from the left and the right inputs. */
def interleaveT[I]: Tee[I,I,I] =
awaitL[I,I,I](i =>
awaitR (i2 => emitT(i) ++ emitT(i2))) repeat
/*
Our `Process` type can also represent effectful sinks (like a file).
A `Sink` is simply a source of effectful functions! See the
definition of `to` in `Process` for an example of how to feed a
`Process` to a `Sink`.
*/
type Sink[F[_],O] = Process[F, O => Process[F,Unit]]
import java.io.FileWriter
/* A `Sink` which writes input strings to the given file. */
def fileW(file: String, append: Boolean = false): Sink[IO,String] =
resource[FileWriter, String => Process[IO,Unit]]
{ IO { new FileWriter(file, append) }}
{ w => constant { (s: String) => eval[IO,Unit](IO(w.write(s))) }}
{ w => eval_(IO(w.close)) }
/* The infinite, constant stream. */
def constant[A](a: A): Process[IO,A] =
eval(IO(a)).flatMap { a => Emit(a, constant(a)) }
/* Exercise 12: Implement `join`. Notice this is the standard monadic combinator! */
def join[F[_],A](p: Process[F,Process[F,A]]): Process[F,A] = ???
/*
* An example use of the combinators we have so far: incrementally
* convert the lines of a file from fahrenheit to celsius.
*/
import fpinscala.iomonad.IO0.fahrenheitToCelsius
lazy val converter: Process[IO,Unit] =
lines("fahrenheit.txt").
filter(line => !line.startsWith("#") && !line.trim.isEmpty).
map(line => fahrenheitToCelsius(line.toDouble).toString).
pipe(intersperse("\\n")).
to(fileW("celsius.txt")).
drain
/*
More generally, we can feed a `Process` through an effectful
channel which returns a value other than `Unit`.
*/
type Channel[F[_],I,O] = Process[F, I => Process[F,O]]
/*
* Here is an example, a JDBC query runner which returns the
* stream of rows from the result set of the query. We have
* the channel take a `Connection => PreparedStatement` as
* input, so code that uses this channel does not need to be
* responsible for knowing how to obtain a `Connection`.
*/
import java.sql.{Connection, PreparedStatement, ResultSet}
def query(conn: IO[Connection]):
Channel[IO, Connection => PreparedStatement, Map[String,Any]] =
resource_
{ conn }
{ conn => constant { (q: Connection => PreparedStatement) =>
resource_
{ IO {
val rs = q(conn).executeQuery
val ncols = rs.getMetaData.getColumnCount
val cols = (1 to ncols).map(rs.getMetaData.getColumnName)
(rs, cols)
}}
{ case (rs, cols) =>
def step =
if (!rs.next) None
else Some(cols.map(c => (c, rs.getObject(c): Any)).toMap)
lazy val rows: Process[IO,Map[String,Any]] =
eval(IO(step)).flatMap {
case None => Halt(End)
case Some(row) => Emit(row, rows)
}
rows
}
{ p => IO { p._1.close } } // close the ResultSet
}}
{ c => IO(c.close) }
/*
* We can allocate resources dynamically when defining a `Process`.
* As an example, this program reads a list of filenames to process
* _from another file_, opening each file, processing it and closing
* it promptly.
*/
lazy val convertAll: Process[IO,Unit] = (for {
out <- fileW("celsius.txt").once
file <- lines("fahrenheits.txt")
_ <- lines(file).
map(line => fahrenheitToCelsius(line.toDouble)).
flatMap(celsius => out(celsius.toString))
} yield ()) drain
/*
* Just by switching the order of the `flatMap` calls, we can output
* to multiple files.
*/
lazy val convertMultisink: Process[IO,Unit] = (for {
file <- lines("fahrenheits.txt")
_ <- lines(file).
map(line => fahrenheitToCelsius(line.toDouble)).
map(_ toString).
to(fileW(file + ".celsius"))
} yield ()) drain
/*
* We can attach filters or other transformations at any point in the
* program, for example:
*/
lazy val convertMultisink2: Process[IO,Unit] = (for {
file <- lines("fahrenheits.txt")
_ <- lines(file).
filter(!_.startsWith("#")).
map(line => fahrenheitToCelsius(line.toDouble)).
filter(_ > 0). // ignore below zero temperatures
map(_ toString).
to(fileW(file + ".celsius"))
} yield ()) drain
}
}
object ProcessTest extends App {
import GeneralizedStreamTransducers._
import fpinscala.iomonad.IO
import Process._
val p = eval(IO { println("woot"); 1 }).repeat
val p2 = eval(IO { println("cleanup"); 2 } ).onHalt {
case Kill => println { "cleanup was killed, instead of bring run" }; Halt(Kill)
case e => Halt(e)
}
println { Process.runLog { p2.onComplete(p2).onComplete(p2).take(1).take(1) } }
println { Process.runLog(converter) }
// println { Process.collect(Process.convertAll) }
}
| fpinscala-muc/fpinscala-ankitgit | exercises/src/main/scala/fpinscala/streamingio/StreamingIO.scala | Scala | mit | 34,104 |
package com.twitter.util.validation.internal
package object validators {
private[validation] def mkString(value: Iterable[_]): String = {
val trimmed = value.map(_.toString.trim).filter(_.nonEmpty)
if (trimmed.nonEmpty && trimmed.size > 1) s"${trimmed.mkString("[", ", ", "]")}"
else if (trimmed.nonEmpty) trimmed.head
else "<empty>"
}
}
| twitter/util | util-validator/src/main/scala/com/twitter/util/validation/internal/validators/package.scala | Scala | apache-2.0 | 360 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.mxnet
import java.io._
import scala.collection.mutable
import scala.util.Either
object Optimizer {
def getUpdater(optimizer: Optimizer): MXKVStoreUpdater = {
new MXKVStoreUpdater with MXKVStoreCachedStates {
override def update(index: Int, grad: NDArray, weight: NDArray): Unit = {
val state =
if (states.contains(index)) {
states.get(index).get
} else {
val newState = optimizer.createState(index, weight)
states.put(index, newState)
newState
}
optimizer.update(index, weight, grad, state)
}
override def dispose(): Unit = {
states.values.foreach(optimizer.disposeState)
states.clear()
}
override def serializeState(): Array[Byte] = {
val bos = new ByteArrayOutputStream()
try {
val out = new ObjectOutputStream(bos)
out.writeInt(states.size)
states.foreach { case (k, v) =>
if (v != null) {
out.writeInt(k)
val stateBytes = optimizer.serializeState(v)
if (stateBytes == null) {
out.writeInt(0)
} else {
out.writeInt(stateBytes.length)
out.write(stateBytes)
}
}
}
out.flush()
bos.toByteArray
} finally {
try {
bos.close()
} catch {
case _: Throwable =>
}
}
}
override def deserializeState(bytes: Array[Byte]): Unit = {
val bis = new ByteArrayInputStream(bytes)
var in: ObjectInputStream = null
try {
in = new ObjectInputStream(bis)
val size = in.readInt()
(0 until size).foreach(_ => {
val key = in.readInt()
val bytesLength = in.readInt()
val value =
if (bytesLength > 0) {
val bytes = Array.fill[Byte](bytesLength)(0)
in.readFully(bytes)
optimizer.deserializeState(bytes)
} else {
null
}
states.update(key, value)
})
} finally {
try {
if (in != null) {
in.close()
}
} catch {
case _: Throwable =>
}
}
}
}
}
}
abstract class Optimizer extends Serializable {
protected val lrMult: mutable.Map[Either[Int, String], Float] =
mutable.HashMap.empty[Either[Int, String], Float]
protected val wdMult: mutable.Map[Either[Int, String], Float] =
mutable.HashMap.empty[Either[Int, String], Float]
protected var numUpdate: Int = 0
protected val indexUpdateCount: mutable.Map[Int, Int] = mutable.HashMap.empty[Int, Int]
protected var specialized: Boolean = false
protected val weightSet: mutable.Set[Int] = mutable.HashSet.empty[Int]
protected var rescaleGrad: Float = 1
@transient protected var symbol: Symbol = null
protected var idx2name: Map[Int, String] = null
/**
* Update the parameters.
* @param index An unique integer key used to index the parameters
* @param weight weight ndarray
* @param grad grad ndarray
* @param state NDArray or other objects returned by initState
* The auxiliary state used in optimization.
*/
// TODO: make state a ClassTag
def update(index: Int, weight: NDArray, grad: NDArray, state: AnyRef): Unit
// Create additional optimizer state such as momentum.
// TODO: make returned state a ClassTag
def createState(index: Int, weight: NDArray): AnyRef
// Dispose the state it created
def disposeState(state: AnyRef): Unit
def serializeState(state: AnyRef): Array[Byte]
def deserializeState(bytes: Array[Byte]): AnyRef
// Set individual learning rate scale for parameters
@deprecated("Use setLrMult instead.")
def setLrScale(lrScale: Map[Int, Float]): Unit = {
val argsLrScale: Map[Either[Int, String], Float] = lrScale.map { case (k, v) => Left(k) -> v }
setLrMult(argsLrScale)
}
/**
* Sets an individual learning rate multiplier for each parameter.
* If you specify a learning rate multiplier for a parameter, then
* the learning rate for the parameter will be set as the product of
* the global learning rate and its multiplier.
* note:: The default learning rate multiplier of a `Variable`
* can be set with `lr_mult` argument in the constructor.
* @param argsLrMult: Map[Either[Int, String], Float]
* For each of its key-value entries, the learning rate multipler for the
* parameter specified in the key will be set as the given value.
*
* You can specify the parameter with either its name or its index.
* If you use the name, you should also call the `setSymbol` method first,
* and the name you specified in the key of `argsLrMult` should match
* the name of the parameter in the `sym` you pass to `setSymbol` method.
* If you use the index, it should correspond to the index of the parameter
* used in the `update` method.
*
* Specifying a parameter by its index is only supported for backward
* compatibility, and we recommend to use the name instead.
*/
def setLrMult(argsLrMult: Map[Either[Int, String], Float]): Unit = {
argsLrMult.foreach { case (k, v) => this.lrMult(k) = v }
}
/**
* Sets an individual weight decay multiplier for each parameter.
*
* By default, the weight decay multipler is set as 0 for all
* parameters whose name don't end with ``_weight`` or ``_gamma``, if
* you call the `setIdx2Name` method to set idx2name.
*
* note:: The default weight decay multiplier for a `Variable`
* can be set with its `wd_mult` argument in the constructor.
* @param argsWdMult: Map[Either[Int, String], Float]
* For each of its key-value entries, the learning rate multipler for the
* parameter specified in the key will be set as the given value.
*
* You can specify the parameter with either its name or its index.
* If you use the name, you should also call the `setSymbol` method first,
* and the name you specified in the key of `argsWdMult` should match
* the name of the parameter in the `sym` you pass to `setSymbol` method.
* If you use the index, it should correspond to the index of the parameter
* used in the `update` method.
*
* Specifying a parameter by its index is only supported for backward
* compatibility, and we recommend to use the name instead.
*/
def setWdMult(argsWdMult: Map[Either[Int, String], Float]): Unit = {
argsWdMult.foreach { case (k, v) => this.wdMult(k) = v }
}
def setArgNames(argNames: Seq[String]): Unit = {
if (argNames != null) {
specialized = true
var index = 0
argNames foreach { name =>
if (!name.endsWith("data") && !name.endsWith("label")) {
if (name.endsWith("weight")) {
weightSet.add(index)
}
index += 1
}
}
}
}
// Set rescaling factor of gradient.
def setRescaleGrad(rescaleGrad: Float): Unit = {
this.rescaleGrad = rescaleGrad
}
def setSymbol(sym: Symbol): Unit = {
this.symbol = sym
if (this.symbol != null) {
val attr = this.symbol.attrMap
for (name <- this.symbol.listArguments()) {
if (attr.contains(name) && attr(name).contains("__lr_mult__")) {
this.lrMult(Right(name)) = attr(name)("__lr_mult__").toFloat
}
if (attr.contains(name) && attr(name).contains("__wd_mult__")) {
this.wdMult(Right(name)) = attr(name)("__wd_mult__").toFloat
}
}
}
}
def setIdx2Name(paramIdx2Name: Map[Int, String]): Unit = {
this.idx2name = paramIdx2Name
if (this.idx2name != null) {
for (n <- this.idx2name.values) {
if (!(n.endsWith("_weight") || n.endsWith("_gamma"))) {
this.wdMult(Right(n)) = 0f
}
}
}
}
/**
* update num_update
* @param index The index will be updated
*/
protected def updateCount(index: Int): Unit = {
val count = indexUpdateCount.getOrElseUpdate(index, 0) + 1
indexUpdateCount.update(index, count)
numUpdate = Math.max(count, numUpdate)
}
// Gets the learning rate given the index of the weight.
protected def getLr(index: Int, lr: Float): Float = {
var llr = lr
if (this.lrMult.contains(Left(index))) {
llr *= this.lrMult(Left(index))
} else if (this.idx2name != null && this.idx2name.contains(index)) {
llr *= this.lrMult.getOrElse(Right(this.idx2name(index)), 1.0f)
}
llr
}
// Gets weight decay for index.
protected def getWd(index: Int, wd: Float): Float = {
var lwd = if (specialized) {
if (this.weightSet.contains(index)) {
wd
} else {
0f
}
} else {
wd
}
if (this.wdMult.contains(Left(index))) {
lwd *= this.wdMult(Left(index))
} else if (this.idx2name != null && this.idx2name.contains(index)) {
lwd *= this.wdMult.getOrElse(Right(this.idx2name(index)), 1.0f)
}
lwd
}
}
trait MXKVStoreUpdater {
/**
* user-defined updater for the kvstore
* It's this updater's responsibility to delete recv and local
* @param key the key
* @param recv the pushed value on this key
* @param local the value stored on local on this key
*/
def update(key: Int, recv: NDArray, local: NDArray): Unit
def dispose(): Unit
// def serializeState(): Array[Byte]
// def deserializeState(bytes: Array[Byte]): Unit
}
trait MXKVStoreCachedStates {
protected val states = new scala.collection.mutable.HashMap[Int, AnyRef]
/**
* Serialize states to byte array
* @return serialized states
*/
def serializeState(): Array[Byte]
/**
* Update states with serialized results
* @param bytes Generated by serializeState()
*/
def deserializeState(bytes: Array[Byte]): Unit
}
| rahul003/mxnet | scala-package/core/src/main/scala/org/apache/mxnet/Optimizer.scala | Scala | apache-2.0 | 11,086 |
/*
* ____ ____ _____ ____ ___ ____
* | _ \\ | _ \\ | ____| / ___| / _/ / ___| Precog (R)
* | |_) | | |_) | | _| | | | | /| | | _ Advanced Analytics Engine for NoSQL Data
* | __/ | _ < | |___ | |___ |/ _| | | |_| | Copyright (C) 2010 - 2013 SlamData, Inc.
* |_| |_| \\_\\ |_____| \\____| /__/ \\____| All Rights Reserved.
*
* This program is free software: you can redistribute it and/or modify it under the terms of the
* GNU Affero General Public License as published by the Free Software Foundation, either version
* 3 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License along with this
* program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package com.precog.mimir
import org.specs2.mutable._
import com.precog.common._
import com.precog.yggdrasil._
import com.precog.common.Path
import scalaz._
import scalaz.std.list._
import com.precog.util.IdGen
import org.joda.time._
import org.joda.time.format._
trait TimePlusSpecs[M[+_]] extends Specification
with EvaluatorTestSupport[M]
with LongIdMemoryDatasetConsumer[M] { self =>
import Function._
import dag._
import instructions._
import library._
val line = Line(1, 1, "")
def testEval(graph: DepGraph): Set[SEvent] = {
consumeEval(graph, defaultEvaluationContext) match {
case Success(results) => results
case Failure(error) => throw error
}
}
"time plus functions (homogeneous case)" should {
"compute incrememtation of positive number of years" in {
val input = Join(BuiltInFunction2Op(YearsPlus), Cross(None),
dag.AbsoluteLoad(Const(CString("/hom/iso8601"))(line))(line),
Const(CLong(5))(line))(line)
val result = testEval(input)
result must haveSize(5)
val result2 = result collect {
case (ids, SString(s)) if ids.length == 1 => s
}
result2 must contain(
"2015-04-29T09:37:52.599+08:00",
"2016-02-21T20:09:59.165+09:00",
"2016-09-06T06:44:52.848-10:00",
"2017-02-11T09:11:33.394-07:00",
"2017-12-28T22:38:19.430+06:00")
}
"compute incrememtation of negative number of years" in {
val input = Join(BuiltInFunction2Op(YearsPlus), Cross(None),
dag.AbsoluteLoad(Const(CString("/hom/iso8601"))(line))(line),
Const(CLong(-5))(line))(line)
val result = testEval(input)
result must haveSize(5)
val result2 = result collect {
case (ids, SString(s)) if ids.length == 1 => s
}
result2 must contain(
"2005-04-29T09:37:52.599+08:00",
"2006-02-21T20:09:59.165+09:00",
"2006-09-06T06:44:52.848-10:00",
"2007-02-11T09:11:33.394-07:00",
"2007-12-28T22:38:19.430+06:00")
}
"compute incrememtation of zero of years" in {
val input = Join(BuiltInFunction2Op(YearsPlus), Cross(None),
dag.AbsoluteLoad(Const(CString("/hom/iso8601"))(line))(line),
Const(CLong(0))(line))(line)
val result = testEval(input)
result must haveSize(5)
val result2 = result collect {
case (ids, SString(s)) if ids.length == 1 => s
}
result2 must contain(
"2010-04-29T09:37:52.599+08:00",
"2011-02-21T20:09:59.165+09:00",
"2011-09-06T06:44:52.848-10:00",
"2012-02-11T09:11:33.394-07:00",
"2012-12-28T22:38:19.430+06:00")
}
"compute incrememtation of months" in {
val input = Join(BuiltInFunction2Op(MonthsPlus), Cross(None),
dag.AbsoluteLoad(Const(CString("/hom/iso8601"))(line))(line),
Const(CLong(5))(line))(line)
val result = testEval(input)
result must haveSize(5)
val result2 = result collect {
case (ids, SString(s)) if ids.length == 1 => s
}
result2 must contain(
"2010-09-29T09:37:52.599+08:00",
"2011-07-21T20:09:59.165+09:00",
"2012-02-06T06:44:52.848-10:00",
"2012-07-11T09:11:33.394-07:00",
"2013-05-28T22:38:19.430+06:00")
}
"compute incrememtation of weeks" in {
val input = Join(BuiltInFunction2Op(WeeksPlus), Cross(None),
dag.AbsoluteLoad(Const(CString("/hom/iso8601"))(line))(line),
Const(CLong(5))(line))(line)
val result = testEval(input)
result must haveSize(5)
val result2 = result collect {
case (ids, SString(s)) if ids.length == 1 => s
}
result2 must contain(
"2011-10-11T06:44:52.848-10:00",
"2012-03-17T09:11:33.394-07:00",
"2011-03-28T20:09:59.165+09:00",
"2013-02-01T22:38:19.430+06:00",
"2010-06-03T09:37:52.599+08:00")
}
"compute incrememtation of days" in {
val input = Join(BuiltInFunction2Op(DaysPlus), Cross(None),
dag.AbsoluteLoad(Const(CString("/hom/iso8601"))(line))(line),
Const(CLong(5))(line))(line)
val result = testEval(input)
result must haveSize(5)
val result2 = result collect {
case (ids, SString(s)) if ids.length == 1 => s
}
result2 must contain(
"2010-05-04T09:37:52.599+08:00",
"2011-02-26T20:09:59.165+09:00",
"2011-09-11T06:44:52.848-10:00",
"2012-02-16T09:11:33.394-07:00",
"2013-01-02T22:38:19.430+06:00")
}
"compute incrememtation of hours" in {
val input = Join(BuiltInFunction2Op(HoursPlus), Cross(None),
dag.AbsoluteLoad(Const(CString("/hom/iso8601"))(line))(line),
Const(CLong(5))(line))(line)
val result = testEval(input)
result must haveSize(5)
val result2 = result collect {
case (ids, SString(s)) if ids.length == 1 => s
}
result2 must contain(
"2010-04-29T14:37:52.599+08:00",
"2011-02-22T01:09:59.165+09:00",
"2011-09-06T11:44:52.848-10:00",
"2012-02-11T14:11:33.394-07:00",
"2012-12-29T03:38:19.430+06:00")
}
"compute incrememtation of minutes" in {
val input = Join(BuiltInFunction2Op(MinutesPlus), Cross(None),
dag.AbsoluteLoad(Const(CString("/hom/iso8601"))(line))(line),
Const(CLong(5))(line))(line)
val result = testEval(input)
result must haveSize(5)
val result2 = result collect {
case (ids, SString(s)) if ids.length == 1 => s
}
result2 must contain(
"2010-04-29T09:42:52.599+08:00",
"2011-02-21T20:14:59.165+09:00",
"2011-09-06T06:49:52.848-10:00",
"2012-02-11T09:16:33.394-07:00",
"2012-12-28T22:43:19.430+06:00")
}
"compute incrememtation of seconds" in {
val input = Join(BuiltInFunction2Op(SecondsPlus), Cross(None),
dag.AbsoluteLoad(Const(CString("/hom/iso8601"))(line))(line),
Const(CLong(5))(line))(line)
val result = testEval(input)
result must haveSize(5)
val result2 = result collect {
case (ids, SString(s)) if ids.length == 1 => s
}
result2 must contain(
"2010-04-29T09:37:57.599+08:00",
"2011-02-21T20:10:04.165+09:00",
"2011-09-06T06:44:57.848-10:00",
"2012-02-11T09:11:38.394-07:00",
"2012-12-28T22:38:24.430+06:00")
}
"compute incrememtation of ms" in {
val input = Join(BuiltInFunction2Op(MillisPlus), Cross(None),
dag.AbsoluteLoad(Const(CString("/hom/iso8601"))(line))(line),
Const(CLong(5))(line))(line)
val result = testEval(input)
result must haveSize(5)
val result2 = result collect {
case (ids, SString(s)) if ids.length == 1 => s
}
result2 must contain(
"2010-04-29T09:37:52.604+08:00",
"2011-02-21T20:09:59.170+09:00",
"2011-09-06T06:44:52.853-10:00",
"2012-02-11T09:11:33.399-07:00",
"2012-12-28T22:38:19.435+06:00")
}
}
"time plus functions (heterogeneous case)" should {
"compute incrememtation of years" in {
val input = Join(BuiltInFunction2Op(YearsPlus), Cross(None),
dag.AbsoluteLoad(Const(CString("/het/iso8601"))(line))(line),
Const(CLong(5))(line))(line)
val result = testEval(input)
result must haveSize(5)
val result2 = result collect {
case (ids, SString(s)) if ids.length == 1 => s
}
result2 must contain(
"2015-04-29T09:37:52.599+08:00",
"2016-02-21T20:09:59.165+09:00",
"2016-09-06T06:44:52.848-10:00",
"2017-02-11T09:11:33.394-07:00",
"2017-12-28T22:38:19.430+06:00")
}
"compute incrememtation of months" in {
val input = Join(BuiltInFunction2Op(MonthsPlus), Cross(None),
dag.AbsoluteLoad(Const(CString("/het/iso8601"))(line))(line),
Const(CLong(5))(line))(line)
val result = testEval(input)
result must haveSize(5)
val result2 = result collect {
case (ids, SString(s)) if ids.length == 1 => s
}
result2 must contain(
"2010-09-29T09:37:52.599+08:00",
"2011-07-21T20:09:59.165+09:00",
"2012-02-06T06:44:52.848-10:00",
"2012-07-11T09:11:33.394-07:00",
"2013-05-28T22:38:19.430+06:00")
}
"compute incrememtation of weeks" in {
val input = Join(BuiltInFunction2Op(WeeksPlus), Cross(None),
dag.AbsoluteLoad(Const(CString("/het/iso8601"))(line))(line),
Const(CLong(5))(line))(line)
val result = testEval(input)
result must haveSize(5)
val result2 = result collect {
case (ids, SString(s)) if ids.length == 1 => s
}
result2 must contain(
"2011-10-11T06:44:52.848-10:00",
"2012-03-17T09:11:33.394-07:00",
"2011-03-28T20:09:59.165+09:00",
"2013-02-01T22:38:19.430+06:00",
"2010-06-03T09:37:52.599+08:00")
}
"compute incrememtation of days" in {
val input = Join(BuiltInFunction2Op(DaysPlus), Cross(None),
dag.AbsoluteLoad(Const(CString("/het/iso8601"))(line))(line),
Const(CLong(5))(line))(line)
val result = testEval(input)
result must haveSize(5)
val result2 = result collect {
case (ids, SString(s)) if ids.length == 1 => s
}
result2 must contain(
"2010-05-04T09:37:52.599+08:00",
"2011-02-26T20:09:59.165+09:00",
"2011-09-11T06:44:52.848-10:00",
"2012-02-16T09:11:33.394-07:00",
"2013-01-02T22:38:19.430+06:00")
}
"compute incrememtation of hours" in {
val input = Join(BuiltInFunction2Op(HoursPlus), Cross(None),
dag.AbsoluteLoad(Const(CString("/het/iso8601"))(line))(line),
Const(CLong(5))(line))(line)
val result = testEval(input)
result must haveSize(5)
val result2 = result collect {
case (ids, SString(s)) if ids.length == 1 => s
}
result2 must contain(
"2010-04-29T14:37:52.599+08:00",
"2011-02-22T01:09:59.165+09:00",
"2011-09-06T11:44:52.848-10:00",
"2012-02-11T14:11:33.394-07:00",
"2012-12-29T03:38:19.430+06:00")
}
"compute incrememtation of minutes" in {
val input = Join(BuiltInFunction2Op(MinutesPlus), Cross(None),
dag.AbsoluteLoad(Const(CString("/het/iso8601"))(line))(line),
Const(CLong(5))(line))(line)
val result = testEval(input)
result must haveSize(5)
val result2 = result collect {
case (ids, SString(s)) if ids.length == 1 => s
}
result2 must contain(
"2010-04-29T09:42:52.599+08:00",
"2011-02-21T20:14:59.165+09:00",
"2011-09-06T06:49:52.848-10:00",
"2012-02-11T09:16:33.394-07:00",
"2012-12-28T22:43:19.430+06:00")
}
"compute incrememtation of seconds" in {
val input = Join(BuiltInFunction2Op(SecondsPlus), Cross(None),
dag.AbsoluteLoad(Const(CString("/het/iso8601"))(line))(line),
Const(CLong(5))(line))(line)
val result = testEval(input)
result must haveSize(5)
val result2 = result collect {
case (ids, SString(s)) if ids.length == 1 => s
}
result2 must contain(
"2010-04-29T09:37:57.599+08:00",
"2011-02-21T20:10:04.165+09:00",
"2011-09-06T06:44:57.848-10:00",
"2012-02-11T09:11:38.394-07:00",
"2012-12-28T22:38:24.430+06:00")
}
"compute incrememtation of ms" in {
val input = Join(BuiltInFunction2Op(MillisPlus), Cross(None),
dag.AbsoluteLoad(Const(CString("/het/iso8601"))(line))(line),
Const(CLong(5))(line))(line)
val result = testEval(input)
result must haveSize(5)
val result2 = result collect {
case (ids, SString(s)) if ids.length == 1 => s
}
result2 must contain(
"2010-04-29T09:37:52.604+08:00",
"2011-02-21T20:09:59.170+09:00",
"2011-09-06T06:44:52.853-10:00",
"2012-02-11T09:11:33.399-07:00",
"2012-12-28T22:38:19.435+06:00")
}
}
"time plus functions (homogeneous case across slices)" should {
"compute incrememtation of positive number of years" in {
val input = Join(BuiltInFunction2Op(YearsPlus), Cross(None),
dag.AbsoluteLoad(Const(CString("/hom/iso8601AcrossSlices"))(line))(line),
Const(CLong(5))(line))(line)
val result = testEval(input)
result must haveSize(22)
val result2 = result collect {
case (ids, SString(s)) if ids.length == 1 => s
}
result2 must contain(
"2017-03-04T12:19:00.040Z",
"2014-08-04T04:52:17.443Z",
"2014-05-18T11:33:38.358+11:00",
"2014-10-29T02:43:41.657+04:00",
"2012-02-04T10:58:14.041-01:00",
"2014-07-17T10:30:16.115+07:00",
"2014-05-02T01:14:41.555-10:00",
"2017-07-30T13:18:40.252-03:00",
"2016-02-15T13:49:53.937+07:00",
"2016-10-27T01:11:04.423-04:00",
"2013-01-10T18:36:48.745-03:00",
"2017-10-11T00:36:31.692-02:00",
"2016-08-11T19:29:55.119+05:00",
"2015-02-09T02:20:17.040-05:00",
"2017-12-28T22:38:19.430+06:00",
"2016-03-06T13:56:56.877-02:00",
"2012-03-24T04:49:22.259-09:00",
"2017-03-14T03:48:21.874Z",
"2013-05-23T17:31:37.488Z",
"2016-02-10T14:53:34.278-01:00",
"2013-03-06T21:02:28.910-11:00",
"2017-08-15T21:05:04.684Z")
}
"compute incrememtation of negative number of years" in {
val input = Join(BuiltInFunction2Op(YearsPlus), Cross(None),
dag.AbsoluteLoad(Const(CString("/hom/iso8601AcrossSlices"))(line))(line),
Const(CLong(-5))(line))(line)
val result = testEval(input)
result must haveSize(22)
val result2 = result collect {
case (ids, SString(s)) if ids.length == 1 => s
}
result2 must contain(
"2006-03-06T13:56:56.877-02:00",
"2007-08-15T21:05:04.684Z",
"2004-05-18T11:33:38.358+11:00",
"2002-02-04T10:58:14.041-01:00",
"2007-03-14T03:48:21.874Z",
"2006-02-10T14:53:34.278-01:00",
"2004-07-17T10:30:16.115+07:00",
"2007-03-04T12:19:00.040Z",
"2006-02-15T13:49:53.937+07:00",
"2004-05-02T01:14:41.555-10:00",
"2003-01-10T18:36:48.745-03:00",
"2003-05-23T17:31:37.488Z",
"2007-10-11T00:36:31.692-02:00",
"2007-12-28T22:38:19.430+06:00",
"2004-08-04T04:52:17.443Z",
"2006-08-11T19:29:55.119+05:00",
"2007-07-30T13:18:40.252-03:00",
"2006-10-27T01:11:04.423-04:00",
"2004-10-29T02:43:41.657+04:00",
"2005-02-09T02:20:17.040-05:00",
"2003-03-06T21:02:28.910-11:00",
"2002-03-24T04:49:22.259-09:00")
}
"compute incrememtation of zero of years" in {
val input = Join(BuiltInFunction2Op(YearsPlus), Cross(None),
dag.AbsoluteLoad(Const(CString("/hom/iso8601AcrossSlices"))(line))(line),
Const(CLong(0))(line))(line)
val result = testEval(input)
result must haveSize(22)
val result2 = result collect {
case (ids, SString(s)) if ids.length == 1 => s
}
result2 must contain(
"2008-03-06T21:02:28.910-11:00",
"2008-05-23T17:31:37.488Z",
"2009-07-17T10:30:16.115+07:00",
"2011-03-06T13:56:56.877-02:00",
"2012-12-28T22:38:19.430+06:00",
"2008-01-10T18:36:48.745-03:00",
"2012-08-15T21:05:04.684Z",
"2011-08-11T19:29:55.119+05:00",
"2007-02-04T10:58:14.041-01:00",
"2012-10-11T00:36:31.692-02:00",
"2009-05-02T01:14:41.555-10:00",
"2011-02-10T14:53:34.278-01:00",
"2009-10-29T02:43:41.657+04:00",
"2010-02-09T02:20:17.040-05:00",
"2009-05-18T11:33:38.358+11:00",
"2012-07-30T13:18:40.252-03:00",
"2012-03-14T03:48:21.874Z",
"2009-08-04T04:52:17.443Z",
"2011-02-15T13:49:53.937+07:00",
"2007-03-24T04:49:22.259-09:00",
"2012-03-04T12:19:00.040Z",
"2011-10-27T01:11:04.423-04:00")
}
"compute incrememtation of months" in {
val input = Join(BuiltInFunction2Op(MonthsPlus), Cross(None),
dag.AbsoluteLoad(Const(CString("/hom/iso8601AcrossSlices"))(line))(line),
Const(CLong(5))(line))(line)
val result = testEval(input)
result must haveSize(22)
val result2 = result collect {
case (ids, SString(s)) if ids.length == 1 => s
}
result2 must contain(
"2010-03-29T02:43:41.657+04:00",
"2012-12-30T13:18:40.252-03:00",
"2012-08-04T12:19:00.040Z",
"2007-07-04T10:58:14.041-01:00",
"2009-12-17T10:30:16.115+07:00",
"2013-03-11T00:36:31.692-02:00",
"2012-03-27T01:11:04.423-04:00",
"2008-06-10T18:36:48.745-03:00",
"2012-08-14T03:48:21.874Z",
"2009-10-18T11:33:38.358+11:00",
"2008-08-06T21:02:28.910-11:00",
"2010-07-09T02:20:17.040-05:00",
"2013-05-28T22:38:19.430+06:00",
"2008-10-23T17:31:37.488Z",
"2011-07-15T13:49:53.937+07:00",
"2010-01-04T04:52:17.443Z",
"2011-08-06T13:56:56.877-02:00",
"2013-01-15T21:05:04.684Z",
"2011-07-10T14:53:34.278-01:00",
"2009-10-02T01:14:41.555-10:00",
"2012-01-11T19:29:55.119+05:00",
"2007-08-24T04:49:22.259-09:00")
}
"compute incrememtation of weeks" in {
val input = Join(BuiltInFunction2Op(WeeksPlus), Cross(None),
dag.AbsoluteLoad(Const(CString("/hom/iso8601AcrossSlices"))(line))(line),
Const(CLong(5))(line))(line)
val result = testEval(input)
result must haveSize(22)
val result2 = result collect {
case (ids, SString(s)) if ids.length == 1 => s
}
result2 must contain(
"2011-09-15T19:29:55.119+05:00",
"2012-09-03T13:18:40.252-03:00",
"2009-09-08T04:52:17.443Z",
"2008-04-10T21:02:28.910-11:00",
"2009-06-22T11:33:38.358+11:00",
"2010-03-16T02:20:17.040-05:00",
"2012-04-18T03:48:21.874Z",
"2007-03-11T10:58:14.041-01:00",
"2012-04-08T12:19:00.040Z",
"2013-02-01T22:38:19.430+06:00",
"2008-02-14T18:36:48.745-03:00",
"2008-06-27T17:31:37.488Z",
"2007-04-28T04:49:22.259-09:00",
"2009-08-21T10:30:16.115+07:00",
"2009-12-03T02:43:41.657+04:00",
"2009-06-06T01:14:41.555-10:00",
"2012-11-15T00:36:31.692-02:00",
"2011-04-10T13:56:56.877-02:00",
"2011-03-22T13:49:53.937+07:00",
"2011-03-17T14:53:34.278-01:00",
"2011-12-01T01:11:04.423-04:00",
"2012-09-19T21:05:04.684Z")
}
"compute incrememtation of days" in {
val input = Join(BuiltInFunction2Op(DaysPlus), Cross(None),
dag.AbsoluteLoad(Const(CString("/hom/iso8601AcrossSlices"))(line))(line),
Const(CLong(5))(line))(line)
val result = testEval(input)
result must haveSize(22)
val result2 = result collect {
case (ids, SString(s)) if ids.length == 1 => s
}
result2 must contain(
"2010-02-14T02:20:17.040-05:00",
"2007-03-29T04:49:22.259-09:00",
"2012-08-20T21:05:04.684Z",
"2011-11-01T01:11:04.423-04:00",
"2012-10-16T00:36:31.692-02:00",
"2013-01-02T22:38:19.430+06:00",
"2009-07-22T10:30:16.115+07:00",
"2008-05-28T17:31:37.488Z",
"2008-01-15T18:36:48.745-03:00",
"2009-11-03T02:43:41.657+04:00",
"2011-08-16T19:29:55.119+05:00",
"2012-03-09T12:19:00.040Z",
"2009-08-09T04:52:17.443Z",
"2012-03-19T03:48:21.874Z",
"2011-03-11T13:56:56.877-02:00",
"2011-02-20T13:49:53.937+07:00",
"2007-02-09T10:58:14.041-01:00",
"2008-03-11T21:02:28.910-11:00",
"2012-08-04T13:18:40.252-03:00",
"2009-05-23T11:33:38.358+11:00",
"2009-05-07T01:14:41.555-10:00",
"2011-02-15T14:53:34.278-01:00")
}
"compute incrememtation of hours" in {
val input = Join(BuiltInFunction2Op(HoursPlus), Cross(None),
dag.AbsoluteLoad(Const(CString("/hom/iso8601AcrossSlices"))(line))(line),
Const(CLong(5))(line))(line)
val result = testEval(input)
result must haveSize(22)
val result2 = result collect {
case (ids, SString(s)) if ids.length == 1 => s
}
result2 must contain(
"2009-08-04T09:52:17.443Z",
"2012-12-29T03:38:19.430+06:00",
"2009-07-17T15:30:16.115+07:00",
"2007-03-24T09:49:22.259-09:00",
"2012-07-30T18:18:40.252-03:00",
"2012-08-16T02:05:04.684Z",
"2012-03-14T08:48:21.874Z",
"2009-10-29T07:43:41.657+04:00",
"2009-05-02T06:14:41.555-10:00",
"2011-10-27T06:11:04.423-04:00",
"2008-05-23T22:31:37.488Z",
"2007-02-04T15:58:14.041-01:00",
"2011-02-15T18:49:53.937+07:00",
"2011-02-10T19:53:34.278-01:00",
"2008-03-07T02:02:28.910-11:00",
"2011-03-06T18:56:56.877-02:00",
"2012-03-04T17:19:00.040Z",
"2012-10-11T05:36:31.692-02:00",
"2010-02-09T07:20:17.040-05:00",
"2011-08-12T00:29:55.119+05:00",
"2008-01-10T23:36:48.745-03:00",
"2009-05-18T16:33:38.358+11:00")
}
"compute incrememtation of minutes" in {
val input = Join(BuiltInFunction2Op(MinutesPlus), Cross(None),
dag.AbsoluteLoad(Const(CString("/hom/iso8601AcrossSlices"))(line))(line),
Const(CLong(5))(line))(line)
val result = testEval(input)
result must haveSize(22)
val result2 = result collect {
case (ids, SString(s)) if ids.length == 1 => s
}
result2 must contain(
"2012-03-04T12:24:00.040Z",
"2008-03-06T21:07:28.910-11:00",
"2012-03-14T03:53:21.874Z",
"2011-10-27T01:16:04.423-04:00",
"2011-08-11T19:34:55.119+05:00",
"2009-10-29T02:48:41.657+04:00",
"2012-08-15T21:10:04.684Z",
"2007-03-24T04:54:22.259-09:00",
"2012-12-28T22:43:19.430+06:00",
"2009-05-02T01:19:41.555-10:00",
"2007-02-04T11:03:14.041-01:00",
"2009-08-04T04:57:17.443Z",
"2012-10-11T00:41:31.692-02:00",
"2011-02-10T14:58:34.278-01:00",
"2011-03-06T14:01:56.877-02:00",
"2012-07-30T13:23:40.252-03:00",
"2009-07-17T10:35:16.115+07:00",
"2008-05-23T17:36:37.488Z",
"2010-02-09T02:25:17.040-05:00",
"2011-02-15T13:54:53.937+07:00",
"2008-01-10T18:41:48.745-03:00",
"2009-05-18T11:38:38.358+11:00")
}
"compute incrememtation of seconds" in {
val input = Join(BuiltInFunction2Op(SecondsPlus), Cross(None),
dag.AbsoluteLoad(Const(CString("/hom/iso8601AcrossSlices"))(line))(line),
Const(CLong(5))(line))(line)
val result = testEval(input)
result must haveSize(22)
val result2 = result collect {
case (ids, SString(s)) if ids.length == 1 => s
}
result2 must contain(
"2009-08-04T04:52:22.443Z",
"2008-05-23T17:31:42.488Z",
"2007-03-24T04:49:27.259-09:00",
"2012-12-28T22:38:24.430+06:00",
"2009-05-18T11:33:43.358+11:00",
"2011-02-10T14:53:39.278-01:00",
"2012-10-11T00:36:36.692-02:00",
"2012-03-14T03:48:26.874Z",
"2009-05-02T01:14:46.555-10:00",
"2011-03-06T13:57:01.877-02:00",
"2012-08-15T21:05:09.684Z",
"2010-02-09T02:20:22.040-05:00",
"2011-08-11T19:30:00.119+05:00",
"2012-03-04T12:19:05.040Z",
"2009-10-29T02:43:46.657+04:00",
"2011-10-27T01:11:09.423-04:00",
"2009-07-17T10:30:21.115+07:00",
"2008-01-10T18:36:53.745-03:00",
"2007-02-04T10:58:19.041-01:00",
"2008-03-06T21:02:33.910-11:00",
"2011-02-15T13:49:58.937+07:00",
"2012-07-30T13:18:45.252-03:00")
}
"compute incrememtation of ms" in {
val input = Join(BuiltInFunction2Op(MillisPlus), Cross(None),
dag.AbsoluteLoad(Const(CString("/hom/iso8601AcrossSlices"))(line))(line),
Const(CLong(5))(line))(line)
val result = testEval(input)
result must haveSize(22)
val result2 = result collect {
case (ids, SString(s)) if ids.length == 1 => s
}
result2 must contain(
"2009-05-02T01:14:41.560-10:00",
"2010-02-09T02:20:17.045-05:00",
"2012-08-15T21:05:04.689Z",
"2008-03-06T21:02:28.915-11:00",
"2009-10-29T02:43:41.662+04:00",
"2011-08-11T19:29:55.124+05:00",
"2011-02-10T14:53:34.283-01:00",
"2008-01-10T18:36:48.750-03:00",
"2009-05-18T11:33:38.363+11:00",
"2012-07-30T13:18:40.257-03:00",
"2011-03-06T13:56:56.882-02:00",
"2009-07-17T10:30:16.120+07:00",
"2011-10-27T01:11:04.428-04:00",
"2012-10-11T00:36:31.697-02:00",
"2007-02-04T10:58:14.046-01:00",
"2009-08-04T04:52:17.448Z",
"2012-03-04T12:19:00.045Z",
"2012-03-14T03:48:21.879Z",
"2012-12-28T22:38:19.435+06:00",
"2008-05-23T17:31:37.493Z",
"2007-03-24T04:49:22.264-09:00",
"2011-02-15T13:49:53.942+07:00")
}
}
"time plus functions (heterogeneous case across slices)" should {
"compute incrememtation of years" in {
val input = Join(BuiltInFunction2Op(YearsPlus), Cross(None),
dag.AbsoluteLoad(Const(CString("/het/iso8601AcrossSlices"))(line))(line),
Const(CLong(5))(line))(line)
val result = testEval(input)
result must haveSize(10)
val result2 = result collect {
case (ids, SString(s)) if ids.length == 1 => s
}
result2 must contain(
"2013-10-24T11:44:19.844+03:00",
"2017-05-05T08:58:10.171+10:00",
"2015-11-21T23:50:10.932+06:00",
"2015-10-25T01:51:16.248+04:00",
"2012-07-14T03:49:30.311-07:00",
"2016-06-25T00:18:50.873-11:00",
"2013-05-27T16:27:24.858Z",
"2013-07-02T18:53:43.506-04:00",
"2014-08-17T05:54:08.513+02:00",
"2016-10-13T15:47:40.629+08:00")
}
"compute incrememtation of months" in {
val input = Join(BuiltInFunction2Op(MonthsPlus), Cross(None),
dag.AbsoluteLoad(Const(CString("/het/iso8601AcrossSlices"))(line))(line),
Const(CLong(5))(line))(line)
val result = testEval(input)
result must haveSize(10)
val result2 = result collect {
case (ids, SString(s)) if ids.length == 1 => s
}
result2 must contain(
"2011-04-21T23:50:10.932+06:00",
"2012-10-05T08:58:10.171+10:00",
"2012-03-13T15:47:40.629+08:00",
"2010-01-17T05:54:08.513+02:00",
"2009-03-24T11:44:19.844+03:00",
"2008-12-02T18:53:43.506-04:00",
"2011-03-25T01:51:16.248+04:00",
"2008-10-27T16:27:24.858Z",
"2007-12-14T03:49:30.311-07:00",
"2011-11-25T00:18:50.873-11:00")
}
"compute incrememtation of weeks" in {
val input = Join(BuiltInFunction2Op(WeeksPlus), Cross(None),
dag.AbsoluteLoad(Const(CString("/het/iso8601AcrossSlices"))(line))(line),
Const(CLong(5))(line))(line)
val result = testEval(input)
result must haveSize(10)
val result2 = result collect {
case (ids, SString(s)) if ids.length == 1 => s
}
result2 must contain(
"2009-09-21T05:54:08.513+02:00",
"2011-11-17T15:47:40.629+08:00",
"2008-08-06T18:53:43.506-04:00",
"2011-07-30T00:18:50.873-11:00",
"2012-06-09T08:58:10.171+10:00",
"2010-12-26T23:50:10.932+06:00",
"2010-11-29T01:51:16.248+04:00",
"2008-11-28T11:44:19.844+03:00",
"2007-08-18T03:49:30.311-07:00",
"2008-07-01T16:27:24.858Z")
}
"compute incrememtation of days" in {
val input = Join(BuiltInFunction2Op(DaysPlus), Cross(None),
dag.AbsoluteLoad(Const(CString("/het/iso8601AcrossSlices"))(line))(line),
Const(CLong(5))(line))(line)
val result = testEval(input)
result must haveSize(10)
val result2 = result collect {
case (ids, SString(s)) if ids.length == 1 => s
}
result2 must contain(
"2008-06-01T16:27:24.858Z",
"2007-07-19T03:49:30.311-07:00",
"2010-10-30T01:51:16.248+04:00",
"2009-08-22T05:54:08.513+02:00",
"2008-10-29T11:44:19.844+03:00",
"2010-11-26T23:50:10.932+06:00",
"2011-06-30T00:18:50.873-11:00",
"2011-10-18T15:47:40.629+08:00",
"2012-05-10T08:58:10.171+10:00",
"2008-07-07T18:53:43.506-04:00")
}
"compute incrememtation of hours" in {
val input = Join(BuiltInFunction2Op(HoursPlus), Cross(None),
dag.AbsoluteLoad(Const(CString("/het/iso8601AcrossSlices"))(line))(line),
Const(CLong(5))(line))(line)
val result = testEval(input)
result must haveSize(10)
val result2 = result collect {
case (ids, SString(s)) if ids.length == 1 => s
}
result2 must contain(
"2012-05-05T13:58:10.171+10:00",
"2009-08-17T10:54:08.513+02:00",
"2011-10-13T20:47:40.629+08:00",
"2008-05-27T21:27:24.858Z",
"2008-10-24T16:44:19.844+03:00",
"2007-07-14T08:49:30.311-07:00",
"2011-06-25T05:18:50.873-11:00",
"2010-11-22T04:50:10.932+06:00",
"2008-07-02T23:53:43.506-04:00",
"2010-10-25T06:51:16.248+04:00")
}
"compute incrememtation of minutes" in {
val input = Join(BuiltInFunction2Op(MinutesPlus), Cross(None),
dag.AbsoluteLoad(Const(CString("/het/iso8601AcrossSlices"))(line))(line),
Const(CLong(5))(line))(line)
val result = testEval(input)
result must haveSize(10)
val result2 = result collect {
case (ids, SString(s)) if ids.length == 1 => s
}
result2 must contain(
"2010-11-21T23:55:10.932+06:00",
"2011-10-13T15:52:40.629+08:00",
"2008-10-24T11:49:19.844+03:00",
"2010-10-25T01:56:16.248+04:00",
"2012-05-05T09:03:10.171+10:00",
"2008-07-02T18:58:43.506-04:00",
"2011-06-25T00:23:50.873-11:00",
"2008-05-27T16:32:24.858Z",
"2007-07-14T03:54:30.311-07:00",
"2009-08-17T05:59:08.513+02:00")
}
"compute incrememtation of seconds" in {
val input = Join(BuiltInFunction2Op(SecondsPlus), Cross(None),
dag.AbsoluteLoad(Const(CString("/het/iso8601AcrossSlices"))(line))(line),
Const(CLong(5))(line))(line)
val result = testEval(input)
result must haveSize(10)
val result2 = result collect {
case (ids, SString(s)) if ids.length == 1 => s
}
result2 must contain(
"2011-06-25T00:18:55.873-11:00",
"2009-08-17T05:54:13.513+02:00",
"2010-11-21T23:50:15.932+06:00",
"2008-10-24T11:44:24.844+03:00",
"2012-05-05T08:58:15.171+10:00",
"2010-10-25T01:51:21.248+04:00",
"2008-05-27T16:27:29.858Z",
"2011-10-13T15:47:45.629+08:00",
"2007-07-14T03:49:35.311-07:00",
"2008-07-02T18:53:48.506-04:00")
}
"compute incrememtation of ms" in {
val input = Join(BuiltInFunction2Op(MillisPlus), Cross(None),
dag.AbsoluteLoad(Const(CString("/het/iso8601AcrossSlices"))(line))(line),
Const(CLong(5))(line))(line)
val result = testEval(input)
result must haveSize(10)
val result2 = result collect {
case (ids, SString(s)) if ids.length == 1 => s
}
result2 must contain(
"2008-07-02T18:53:43.511-04:00",
"2011-06-25T00:18:50.878-11:00",
"2007-07-14T03:49:30.316-07:00",
"2010-11-21T23:50:10.937+06:00",
"2008-05-27T16:27:24.863Z",
"2010-10-25T01:51:16.253+04:00",
"2008-10-24T11:44:19.849+03:00",
"2011-10-13T15:47:40.634+08:00",
"2012-05-05T08:58:10.176+10:00",
"2009-08-17T05:54:08.518+02:00")
}
}
}
object TimePlusSpecs extends TimePlusSpecs[test.YId] with test.YIdInstances
| precog/platform | mimir/src/test/scala/com/precog/mimir/TimePlusSpecs.scala | Scala | agpl-3.0 | 33,795 |
package cmwell.build
/**
* Proj: server
* User: gilad
* Date: 10/1/17
* Time: 11:15 AM
*/
object PluginVersions {
val play = "2.7.2"
}
| thomsonreuters/CM-Well | server/project/project/src/main/scala/cmwell/build/PluginVersions.scala | Scala | apache-2.0 | 148 |
package is.hail.expr.ir.agg
import breeze.linalg.{DenseMatrix, DenseVector, diag, inv}
import is.hail.annotations.{Region, RegionValueBuilder, UnsafeRow}
import is.hail.asm4s._
import is.hail.backend.ExecuteContext
import is.hail.expr.ir.{EmitClassBuilder, EmitCode, EmitCodeBuilder, EmitContext, IEmitCode}
import is.hail.types.physical._
import is.hail.types.physical.stypes.{EmitType, SCode, SValue}
import is.hail.types.physical.stypes.concrete.{SBaseStructPointer, SIndexablePointer, SIndexablePointerSettable}
import is.hail.types.physical.stypes.interfaces.SIndexableValue
import is.hail.types.virtual.{TArray, TFloat64, TInt32, Type}
import is.hail.utils.FastIndexedSeq
class LinearRegressionAggregatorState(val kb: EmitClassBuilder[_]) extends AbstractTypedRegionBackedAggState(LinearRegressionAggregator.stateType)
object LinearRegressionAggregator {
val scalar = PFloat64(true)
val vector = PCanonicalArray(scalar, true)
val stateType: PCanonicalTuple = PCanonicalTuple(true, vector, vector, PInt32(true))
private val optVector = vector.setRequired(false)
val resultPType: PCanonicalStruct = PCanonicalStruct(required = false, "xty" -> optVector, "beta" -> optVector, "diag_inv" -> optVector, "beta0" -> optVector)
def computeResult(region: Region, xtyPtr: Long, xtxPtr: Long, k0: Int): Long = {
val xty = DenseVector(UnsafeRow.readArray(vector, null, xtyPtr)
.asInstanceOf[IndexedSeq[Double]].toArray[Double])
val k = xty.length
val xtx = DenseMatrix.create(k, k, UnsafeRow.readArray(vector, null, xtxPtr)
.asInstanceOf[IndexedSeq[Double]].toArray[Double])
val rvb = new RegionValueBuilder(region)
rvb.start(resultPType)
rvb.startStruct()
try {
val b = xtx \\ xty
val diagInv = diag(inv(xtx))
val xtx0 = xtx(0 until k0, 0 until k0)
val xty0 = xty(0 until k0)
val b0 = xtx0 \\ xty0
rvb.startArray(k)
var i = 0
while (i < k) {
rvb.addDouble(xty(i))
i += 1
}
rvb.endArray()
rvb.startArray(k)
i = 0
while (i < k) {
rvb.addDouble(b(i))
i += 1
}
rvb.endArray()
rvb.startArray(k)
i = 0
while (i < k) {
rvb.addDouble(diagInv(i))
i += 1
}
rvb.endArray()
rvb.startArray(k0)
i = 0
while (i < k0) {
rvb.addDouble(b0(i))
i += 1
}
rvb.endArray()
} catch {
case _: breeze.linalg.MatrixSingularException |
_: breeze.linalg.NotConvergedException =>
rvb.setMissing()
rvb.setMissing()
rvb.setMissing()
rvb.setMissing()
}
rvb.endStruct()
rvb.end()
}
}
class LinearRegressionAggregator() extends StagedAggregator {
import LinearRegressionAggregator._
type State = AbstractTypedRegionBackedAggState
override def resultEmitType: EmitType = EmitType(SBaseStructPointer(LinearRegressionAggregator.resultPType), true)
val initOpTypes: Seq[Type] = Array(TInt32, TInt32)
val seqOpTypes: Seq[Type] = Array(TFloat64, TArray(TFloat64))
def initOpF(state: State)(cb: EmitCodeBuilder, kc: Code[Int], k0c: Code[Int]): Unit = {
val k = cb.newLocal[Int]("lra_init_k", kc)
val k0 = cb.newLocal[Int]("lra_init_k0", k0c)
cb.ifx((k0 < 0) || (k0 > k),
cb += Code._fatal[Unit](const("linreg: `nested_dim` must be between 0 and the number (")
.concat(k.toS)
.concat(") of covariates, inclusive"))
)
cb.assign(state.off, stateType.allocate(state.region))
cb += Region.storeAddress(stateType.fieldOffset(state.off, 0), vector.zeroes(cb, state.region, k))
cb += Region.storeAddress(stateType.fieldOffset(state.off, 1), vector.zeroes(cb, state.region, k * k))
cb += Region.storeInt(stateType.loadField(state.off, 2), k0)
}
protected def _initOp(cb: EmitCodeBuilder, state: State, init: Array[EmitCode]): Unit = {
val Array(kt, k0t) = init
kt.toI(cb)
.consume(cb,
{
cb += Code._fatal[Unit]("linreg: init args may not be missing")
},
{ ktCode =>
k0t.toI(cb)
.consume(cb,
{
cb += Code._fatal[Unit]("linreg: init args may not be missing")
},
k0tCode => initOpF(state)(cb, ktCode.asInt.value, k0tCode.asInt.value)
)
})
}
def seqOpF(state: State)(cb: EmitCodeBuilder, y: Code[Double], x: SIndexableValue): Unit = {
val k = cb.newLocal[Int]("linreg_agg_seqop_k")
val i = cb.newLocal[Int]("linreg_agg_seqop_i")
val j = cb.newLocal[Int]("linreg_agg_seqop_j")
val sptr = cb.newLocal[Long]("linreg_agg_seqop_sptr")
val xty = cb.newLocal[Long]("linreg_agg_seqop_xty")
val xtx = cb.newLocal[Long]("linreg_agg_seqop_xtx")
cb.ifx(!x.hasMissingValues(cb),
{
cb.assign(xty, stateType.loadField(state.off, 0))
cb.assign(xtx, stateType.loadField(state.off, 1))
cb.assign(k, vector.loadLength(xty))
cb.assign(sptr, vector.firstElementOffset(xty, k))
cb.assign(i, 0)
x.st match {
case SIndexablePointer(pt: PCanonicalArray) =>
assert(pt.elementType.isInstanceOf[PFloat64])
val xAddr = x.asInstanceOf[SIndexablePointerSettable].a
val xptr = cb.newLocal[Long]("linreg_agg_seqop_xptr")
val xptr2 = cb.newLocal[Long]("linreg_agg_seqop_xptr2")
cb.assign(xptr, pt.firstElementOffset(xAddr, k))
cb.whileLoop(i < k,
{
cb += Region.storeDouble(sptr, Region.loadDouble(sptr) + (Region.loadDouble(xptr) * y))
cb.assign(i, i + 1)
cb.assign(sptr, sptr + scalar.byteSize)
cb.assign(xptr, xptr + scalar.byteSize)
})
cb.assign(i, 0)
cb.assign(sptr, vector.firstElementOffset(xtx, k))
cb.assign(xptr, pt.firstElementOffset(xAddr, k))
cb.whileLoop(i < k,
{
cb.assign(j, 0)
cb.assign(xptr2, pt.firstElementOffset(xAddr, k))
cb.whileLoop(j < k,
{
// add x[i] * x[j] to the value at sptr
cb += Region.storeDouble(sptr, Region.loadDouble(sptr) + (Region.loadDouble(xptr) * Region.loadDouble(xptr2)))
cb.assign(j, j + 1)
cb.assign(sptr, sptr + scalar.byteSize)
cb.assign(xptr2, xptr2 + scalar.byteSize)
})
cb.assign(i, i + 1)
cb.assign(xptr, xptr + scalar.byteSize)
})
case _ =>
cb.whileLoop(i < k,
{
cb += Region.storeDouble(sptr, Region.loadDouble(sptr) + x.loadElement(cb, i).get(cb).asDouble.value * y)
cb.assign(i, i + 1)
cb.assign(sptr, sptr + scalar.byteSize)
})
cb.assign(i, 0)
cb.assign(sptr, vector.firstElementOffset(xtx, k))
cb.whileLoop(i < k,
{
cb.assign(j, 0)
cb.whileLoop(j < k,
{
// add x[i] * x[j] to the value at sptr
cb += Region.storeDouble(sptr, Region.loadDouble(sptr) +
(x.loadElement(cb, i).get(cb).asDouble.value * x.loadElement(cb, j).get(cb).asDouble.value))
cb.assign(j, j + 1)
cb.assign(sptr, sptr + scalar.byteSize)
})
cb.assign(i, i + 1)
})
}
})
}
protected def _seqOp(cb: EmitCodeBuilder, state: State, seq: Array[EmitCode]): Unit = {
val Array(y, x) = seq
y.toI(cb)
.consume(cb,
{},
{ yCode =>
x.toI(cb)
.consume(cb,
{},
xCode => seqOpF(state)(cb, yCode.asDouble.value, xCode.asIndexable)
)
})
}
def combOpF(state: State, other: State)(cb: EmitCodeBuilder): Unit = {
val n = cb.newLocal[Int]("n")
val i = cb.newLocal[Int]("i")
val sptr = cb.newLocal[Long]("sptr")
val optr = cb.newLocal[Long]("optr")
val xty = cb.newLocal[Long]("xty")
val xtx = cb.newLocal[Long]("xtx")
val oxty = cb.newLocal[Long]("oxty")
val oxtx = cb.newLocal[Long]("oxtx")
cb += Code(FastIndexedSeq(
xty := stateType.loadField(state.off, 0),
xtx := stateType.loadField(state.off, 1),
oxty := stateType.loadField(other.off, 0),
oxtx := stateType.loadField(other.off, 1),
n := vector.loadLength(xty),
i := 0,
sptr := vector.firstElementOffset(xty, n),
optr := vector.firstElementOffset(oxty, n),
Code.whileLoop(i < n, Code(
Region.storeDouble(sptr, Region.loadDouble(sptr) + Region.loadDouble(optr)),
i := i + 1,
sptr := sptr + scalar.byteSize,
optr := optr + scalar.byteSize)),
n := vector.loadLength(xtx),
i := 0,
sptr := vector.firstElementOffset(xtx, n),
optr := vector.firstElementOffset(oxtx, n),
Code.whileLoop(i < n, Code(
Region.storeDouble(sptr, Region.loadDouble(sptr) + Region.loadDouble(optr)),
i := i + 1,
sptr := sptr + scalar.byteSize,
optr := optr + scalar.byteSize))))
}
protected def _combOp(ctx: ExecuteContext, cb: EmitCodeBuilder, state: AbstractTypedRegionBackedAggState, other: AbstractTypedRegionBackedAggState): Unit = {
combOpF(state, other)(cb)
}
protected def _result(cb: EmitCodeBuilder, state: State, region: Value[Region]): IEmitCode = {
val resAddr = cb.newLocal[Long]("linear_regression_agg_res", Code.invokeScalaObject4[Region, Long, Long, Int, Long](
LinearRegressionAggregator.getClass, "computeResult",
region,
stateType.loadField(state.off, 0),
stateType.loadField(state.off, 1),
Region.loadInt(stateType.loadField(state.off, 2))))
IEmitCode.present(cb, LinearRegressionAggregator.resultPType.loadCheapSCode(cb, resAddr))
}
}
| hail-is/hail | hail/src/main/scala/is/hail/expr/ir/agg/LinearRegressionAggregator.scala | Scala | mit | 10,033 |
/*
* Copyright (C) 2005, The Beangle Software.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.beangle.commons.conversion
import org.beangle.commons.conversion.impl.DefaultConversion
import org.scalatest.matchers.should.Matchers
import org.scalatest.funspec.AnyFunSpec
class ConversionTest extends AnyFunSpec with Matchers {
val con = new DefaultConversion();
describe("DefaultConversion") {
it("Convert Integer") {
con.convert(2.5f, classOf[Integer])
}
it("Convert Array") {
con.convert(Array("2", "3"), classOf[Array[Integer]])
}
it("Convert Primitive") {
con.convert("2", classOf[Int]).toInt
con.convert(3, classOf[Integer])
}
it("Convert Primitive Array") {
val con = new DefaultConversion()
con.convert(Array("2", "3.4"), classOf[Array[Float]]).asInstanceOf[Array[Float]]
}
}
}
| beangle/commons | core/src/test/scala/org/beangle/commons/conversion/ConversionTest.scala | Scala | lgpl-3.0 | 1,502 |
trait Iterable { self =>
//type CC <: Iterable { type CC = self.CC }
type DD[X] <: Iterable { type DD[Y] = self.DD[Y] }
}
| lampepfl/dotty | tests/pos/i3816.scala | Scala | apache-2.0 | 126 |
package nyaya.util
import scala.annotation.nowarn
object Platform {
@nowarn("cat=unused")
@inline final def choose[A](jvm: => A, js: => A): A = jvm
} | japgolly/nyaya | util/jvm/src/main/scala/nyaya/util/Platform.scala | Scala | lgpl-2.1 | 156 |
/*
* Copyright 2015 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.domain
import play.api.libs.json.{Reads, Writes}
case class AgentBusinessUtr(utr: String) extends TaxIdentifier with SimpleName {
require(AgentBusinessUtr.isValid(utr))
override def toString = utr
val name = "agentbusinessutr"
def value = utr
}
object AgentBusinessUtr extends CheckCharacter {
implicit val agentBusinessUtrWrite: Writes[AgentBusinessUtr] = new SimpleObjectWrites[AgentBusinessUtr](_.value)
implicit val agentBusinessUtrRead: Reads[AgentBusinessUtr] = new SimpleObjectReads[AgentBusinessUtr]("utr", AgentBusinessUtr.apply)
private val validFormat = "^[a-zA-Z][Aa][Rr][Nn]\\\\d{7}$"
def isValid(utr: String): Boolean = !utr.isEmpty && utr.matches(validFormat) && isCheckCorrect(utr.toUpperCase, 0, 1)
} | andrewgee/domain | src/main/scala/uk/gov/hmrc/domain/AgentBusinessUtr.scala | Scala | apache-2.0 | 1,361 |
package com.bot4s.telegram.models
trait AkkaInputFile extends InputFile
object AkkaInputFile {
final case class ByteString(filename: String, contents: akka.util.ByteString) extends AkkaInputFile
def apply(filename: String, contents: akka.util.ByteString): AkkaInputFile = ByteString(filename, contents)
}
| mukel/telegrambot4s | akka/src/com/bot4s/telegram/models/AkkaInputFile.scala | Scala | apache-2.0 | 312 |
package com.eevolution.context.dictionary.infrastructure.repository
import java.util.UUID
import com.eevolution.context.dictionary.domain._
import com.eevolution.context.dictionary.domain.model.WorkflowProcessorLog
import com.eevolution.context.dictionary.infrastructure.db.DbContext._
import com.eevolution.utils.PaginatedSequence
import com.lightbend.lagom.scaladsl.persistence.jdbc.JdbcSession
import scala.concurrent.{ExecutionContext, Future}
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: eduardo.moreno@e-evolution.com, http://www.e-evolution.com , http://github.com/e-Evolution
* Created by eduardo.moreno@e-evolution.com , www.e-evolution.com
*/
/**
* Workflow Processor Log Repository
* @param session
* @param executionContext
*/
class WorkflowProcessorLogRepository (session: JdbcSession)(implicit executionContext: ExecutionContext)
extends api.repository.WorkflowProcessorLogRepository[WorkflowProcessorLog , Int]
with WorkflowProcessorLogMapping {
def getById(id: Int): Future[WorkflowProcessorLog] = {
Future(run(queryWorkflowProcessorLog.filter(_.workflowProcessorLogId == lift(id))).headOption.get)
}
def getByUUID(uuid: UUID): Future[WorkflowProcessorLog] = {
Future(run(queryWorkflowProcessorLog.filter(_.uuid == lift(uuid.toString))).headOption.get)
}
def getByWorkflowProcessorLogId(id : Int) : Future[List[WorkflowProcessorLog]] = {
Future(run(queryWorkflowProcessorLog))
}
def getAll() : Future[List[WorkflowProcessorLog]] = {
Future(run(queryWorkflowProcessorLog))
}
def getAllByPage(page: Int, pageSize: Int): Future[PaginatedSequence[WorkflowProcessorLog]] = {
val offset = page * pageSize
val limit = (page + 1) * pageSize
for {
count <- countWorkflowProcessorLog()
elements <- if (offset > count) Future.successful(Nil)
else selectWorkflowProcessorLog(offset, limit)
} yield {
PaginatedSequence(elements, page, pageSize, count)
}
}
private def countWorkflowProcessorLog() = {
Future(run(queryWorkflowProcessorLog.size).toInt)
}
private def selectWorkflowProcessorLog(offset: Int, limit: Int): Future[Seq[WorkflowProcessorLog]] = {
Future(run(queryWorkflowProcessorLog).drop(offset).take(limit).toSeq)
}
}
| adempiere/ADReactiveSystem | dictionary-impl/src/main/scala/com/eevolution/context/dictionary/infrastructure/repository/WorkflowProcessorLogRepository.scala | Scala | gpl-3.0 | 2,967 |
package provingground
import org.scalatest._, flatspec._
import provingground.HoTT._
import induction.TLImplicits._
import shapeless._
//These tests are not unit tests in the strict sense but used to help non regression and code coverage.
//It's only a palliative to proper and comprehensive testing.
//Examples come from https://stepik.org/course/ThCS-Introduction-to-programming-with-dependent-types-in-Scala-2294/
//ThCS. Introduction to programming with dependent types in Scala by Dmytro Mitin
class ProductTypeSpec extends flatspec.AnyFlatSpec {
val A = "A" :: Type
val B = "B" :: Type
val a = "a" :: A
val b = "b" :: B
val pair = PairTerm(a, b)
"Built-in ProdTyp with built in first and second functions" should "retrieve element of a ProdTyp" in {
assert(pair.typ === ProdTyp(A, B))
assert(pair.first === a)
assert(pair.second === b)
}
"Built-in ProdTyp with user defined first and second functions" should "retrieve element of a ProdTyp" in {
val recAandBA = ProdTyp(A, B).rec(A)
val recAandBB = ProdTyp(A, B).rec(B)
val first = recAandBA(a :-> (b :-> a))
val second = recAandBB(a :-> (b :-> b))
assert(first(pair) === a)
assert(second(pair) === b)
}
"Built-in ProdTyp with user defined id function" should "return the same pair" in {
val recAandBAandB = ProdTyp(A, B).rec(ProdTyp(A, B))
val id = recAandBAandB(a :-> (b :-> PairTerm(a, b)))
assert(id(pair) === pair)
}
"User defined ProdTyp with user defined first and second functions" should "retrieve element of a ProdTyp" in {
val AandB = "(A, B)" :: Type
val ProdInd = ("mkPair" ::: A ->>: (B ->>: AandB)) =: AandB
val makePair :: HNil = ProdInd.intros
val pair = makePair(a)(b)
assert(pair.typ === AandB)
val recAandBA = ProdInd.rec(A)
val recAandBB = ProdInd.rec(B)
val first = recAandBA(a :-> (b :-> a))
val second = recAandBB(a :-> (b :-> b))
assert(first(pair) === a)
assert(second(pair) === b)
}
"User defined ProdTyp with user defined id function" should "return the same pair" in {
val AandB = "(A, B)" :: Type
val ProdInd = ("mkPair" ::: A ->>: (B ->>: AandB)) =: AandB
val makePair :: HNil = ProdInd.intros
val pair = makePair(a)(b)
assert(pair.typ === AandB)
val recAandBAandB = ProdInd.rec(AandB)
val id = recAandBAandB(a :-> (b :-> makePair(a)(b)))
assert(id(pair) === pair)
}
}
class CoProductTypeSpec extends flatspec.AnyFlatSpec {
val A = "A" :: Type
val B = "B" :: Type
val a = "a" :: A
val b = "b" :: B
"User defined idXOrDefault(x) function with built-in PlusTyp" should "be defined properly" in {
val a1 = PlusTyp(A, B).incl1(a)
assert(a1.typ === PlusTyp(A, B))
val b1 = PlusTyp(A, B).incl2(b)
assert(b1.typ === PlusTyp(A, B))
val recAorBA = PlusTyp(A, B).rec(A)
val recAorBB = PlusTyp(A, B).rec(B)
val a0 = "a0" :: A
val b0 = "b0" :: B
val idAOrDefault = recAorBA(a :-> a)(b :-> a0)
val idBOrDefault = recAorBB(a :-> b0)(b :-> b)
assert(idAOrDefault(a1) === a)
assert(idAOrDefault(b1) === a0)
assert(idBOrDefault(a1) === b0)
assert(idBOrDefault(b1) === b)
}
"User defined id function with built-in PlusTyp" should "be defined properly" in {
val a1 = PlusTyp(A, B).incl1(a)
assert(a1.typ === PlusTyp(A, B))
val b1 = PlusTyp(A, B).incl2(b)
assert(b1.typ === PlusTyp(A, B))
val recAorBAorB = PlusTyp(A, B).rec(PlusTyp(A, B))
val id =
recAorBAorB(a :-> PlusTyp(A, B).incl1(a))(b :-> PlusTyp(A, B).incl2(b))
assert(id(a1) === a1)
assert(id(b1) === b1)
}
"User defined idOrDefault function with user defined PlusTyp" should "be defined properly" in {
val AorB = "A + B" :: Type
val SumInd = ("inl" ::: A ->>: AorB) |: ("inr" ::: B ->>: AorB) =: AorB
val inl :: inr :: HNil = SumInd.intros
val a1 = inl(a)
assert(a1.typ === AorB)
val b1 = inr(b)
assert(b1.typ === AorB)
val recAorBA = SumInd.rec(A)
val recAorBB = SumInd.rec(B)
val a0 = "a0" :: A
val b0 = "b0" :: B
val idAOrDefault = recAorBA(a :-> a)(b :-> a0)
val idBOrDefault = recAorBB(a :-> b0)(b :-> b)
assert(idAOrDefault(a1) === a)
assert(idAOrDefault(b1) === a0)
assert(idBOrDefault(a1) === b0)
assert(idBOrDefault(b1) === b)
}
"User defined id function with user defined PlusTyp" should "be defined properly" in {
val AorB = "A + B" :: Type
val SumInd = ("inl" ::: A ->>: AorB) |: ("inr" ::: B ->>: AorB) =: AorB
val inl :: inr :: HNil = SumInd.intros
val a1 = inl(a)
assert(a1.typ === AorB)
val b1 = inr(b)
assert(b1.typ === AorB)
val recAorBAorB = SumInd.rec(AorB)
val id = recAorBAorB(a :-> inl(a))(b :-> inr(b))
assert(id(a1) === a1)
assert(id(b1) === b1)
}
}
class FunctionTypeSpec extends flatspec.AnyFlatSpec {
val A = "A" :: Type
val a = "a" :: A
val B = "B" :: Type
val b = "b" :: B
val f = "f" :: A ->: B
import scalahott.ScalaRep._
import NatInt.rep
"Simple lambda function with built-in function type" should "be defined properly" in {
val h = ((x: Int) => x * x).term
assert(h.typ === NatInt ->: NatInt)
assert(h(2.term) === 4.term)
}
"Simple user defined function with user defined function type" should "have proper tyoe" in {
val AtoB = "A → B" :: Type
val AtoBInd = ("λ" ::: (A ->: B) ->>: AtoB) =: AtoB
val lmbda :: HNil = AtoBInd.intros
// assert(lmbda(f).typ === f.typ)
val recFunAB = AtoBInd.rec(A ->: B)
val call = recFunAB(f :-> (a :-> f(a)))
assert(call.typ === AtoB ->: A ->: B)
val g = "g" :: AtoB
assert(call(g)(a).typ === B)
}
"Simple user defined function with user defined emulated type family" should "have proper type" in {
val Fun = "Fun" :: Type ->: Type ->: Type
val FunInd = ("λ" ::: A ~>>: (B ~>>: ((A ->: B) ->>: (Fun -> Fun(A)(B))))) =:: Fun
val lmbda :: HNil = FunInd.intros
assert(lmbda(A)(B)(f).typ === Fun(A)(B))
}
"Simple user defined function with user defined emulated type family" should "be defined properly" in {
val Fun = "Fun" :: Type ->: Type ->: Type
val FunInd = ("λ" ::: A ~>>: (B ~>>: ((A ->: B) ->>: (Fun -> Fun(A)(B))))) =:: Fun
val lmbda :: HNil = FunInd.intros
val g = "g" :: Fun(A)(B)
val indFunAB = FunInd.induc(A :~> (B :~> (g :-> (A ->: B))))
val call = indFunAB(A :~> (B :~> (f :-> (a :-> f(a)))))
assert(call.typ === A ~>: (B ~>: (Fun(A)(B) ->: A ->: B)))
assert(call(A)(B)(g)(a).typ === B)
val square = lmbda(NatInt)(NatInt)(
((x: Int) => x * x).term.asInstanceOf[Func[Term, Term]])
assert(square.typ === Fun(NatInt)(NatInt))
assert(call(NatInt)(NatInt)(square)(2.term) === 4.term)
}
}
class SigmaTypeSpec extends flatspec.AnyFlatSpec {
val A = "A" :: Type
val B = "B(_ : A)" :: A ->: Type
val a = "a" :: A
val b = "b" :: B(a)
val pair = mkPair(a, b)
"Build-in first and second method with built-in Sigma type" should "have proper type" in {
assert(pair.typ === Sgma(a !: A, B(a)))
assert(pair.first.typ === A)
assert(pair.second.typ === B(a))
}
"User defined first and second method with built-in Sigma type" should "have proper type" in {
val recSABA = Sgma(a !: A, B(a)).rec(A)
val first = recSABA(a :~> (b :-> a))
val indSABB = Sgma(a !: A, B(a)).induc(a :~> (b :-> B(a)))
val second = indSABB(a :~> (b :-> b))
assert(first(pair) === a)
assert(second(pair).typ === B(a))
}
"User defined id function with built-in Sigma type" should "be defined properly" in {
val recSABSAB = Sgma(a !: A, B(a)).rec(Sgma(a !: A, B(a)))
val id =
recSABSAB(a :~> (b :-> mkPair(a, b).asInstanceOf[AbsPair[Term, Term]]))
assert(id(pair) === pair)
}
"User defined id with user defined Sigma type" should "be defined properly" in {
val SigmaAB = "Sigma(a : A, B(a))" :: Type
val SigmaInd = ("mkPair" ::: a ~>>: (B(a) ->>: SigmaAB)) =: SigmaAB
val makePair :: HNil = SigmaInd.intros
val pair = makePair(a)(b)
assert(pair.typ === SigmaAB)
val recSABA = SigmaInd.rec(A)
val first = recSABA(a :~> (b :-> a))
assert(first(pair) === a)
val recSABSAB = SigmaInd.rec(SigmaAB)
val id = recSABSAB(a :~> (b :-> makePair(a)(b)))
assert(id(pair) === pair)
}
"For arbitrary type user defined id with user defined Sigma type" should "be defined properly" in {
val Sigma = "Σ" :: A ~>: ((A ->: Type) ->: Type)
val SigmaInd = ("mkPair" ::: A ~>>: (B ~>>: (a ~>>: (B(a) ->>: (Sigma -> Sigma(
A)(B)))))) =:: Sigma
val makePair :: HNil = SigmaInd.intros
val pair = makePair(A)(B)(a)(b)
assert(pair.typ === Sigma(A)(B))
val p = "(a, b)" :: Sigma(A)(B)
val indSABA = SigmaInd.induc(A :~> (B :~> (p :-> A)))
val first = indSABA(A :~> (B :~> (a :~> (b :-> a))))
val indSABB = SigmaInd.induc(A :~> (B :~> (p :-> B(first(A)(B)(p)))))
val second = indSABB(A :~> (B :~> (a :~> (b :-> b))))
assert(first(A)(B)(pair) === a)
assert(second(A)(B)(pair) === b)
val indSABSAB = SigmaInd.induc(A :~> (B :~> (p :-> Sigma(A)(B))))
val id = indSABSAB(A :~> (B :~> (a :~> (b :-> makePair(A)(B)(a)(b)))))
assert(id(A)(B)(pair) === pair)
}
}
class PiTypeSpec extends flatspec.AnyFlatSpec {
val A = "A" :: Type
val B = "B(_ : A)" :: A ->: Type
val a = "a" :: A
val f = "f" :: a ~>: B(a)
val id = A :~> (a :-> a)
"f(a)" should "have proper type" in {
assert(f(a).typ === B(a))
}
"id function" should "have proper type" in {
assert(id.typ === A ~>: (A ->: A))
}
"simple user defined function with user define PI type" should "be defined properly" in {
val AtoB = "Π(a:A) B(a)" :: Type
val AtoBInd = ("λ" ::: (a ~>: B(a)) ->>: AtoB) =: AtoB
val lambda :: HNil = AtoBInd.intros
assert(lambda(f).typ === AtoB)
val g = "g" :: AtoB
val recDepFunAB = AtoBInd.rec(a ~>: B(a))
val call = recDepFunAB(f :-> (a :~> f(a)))
assert(call.typ === AtoB ->: (a ~>: B(a)))
assert(call(g)(a).typ === B(a))
assert(call(lambda(f))(a) === f(a))
}
"For arbitrary type simple user defined function with user defined PI type" should "be defined properly" in {
val Pi = "Π" :: A ~>: ((A ->: Type) ->: Type)
val PiInd = ("λ" ::: A ~>>: (B ~>>: ((a ~>: B(a)) ->>: (Pi -> Pi(A)(B))))) =:: Pi
val lambda :: HNil = PiInd.intros
assert(lambda(A)(B)(f).typ === Pi(A)(B))
val g = "g" :: Pi(A)(B)
val indDepFunAB = PiInd.induc(A :~> (B :~> (g :-> (a ~>: B(a)))))
val call = indDepFunAB(A :~> (B :~> (f :-> (a :~> f(a)))))
assert(call.typ === A ~>: (B ~>: (Pi(A)(B) ->: (a ~>: B(a)))))
assert(call(A)(B)(g)(a).typ === B(a))
assert(call(A)(B)(lambda(A)(B)(f))(a) === f(a))
}
"Definitions involving nested Pi-Types" should "correctly escape variables" in {
val A = "A" :: Type
val B = "B" :: Type
val fn = "f" :: (A ~>: (B ~>: (A ->: B)))
val fArg = toTyp(fn(Unit)(Type)(Star))
val func = fn(fArg)(Type)
val arg = "x" :: fArg
assert(func(arg).typ == Type)
}
}
class EmptyAndUnitTypeSpec extends flatspec.AnyFlatSpec {
Zero
val A = "A" :: Type
val a = "a" :: A
val recZA = Zero.rec(A)
val z = "z" :: Zero
assert(recZA(z).typ === A)
assert(Star.typ === Unit)
val recUA = Unit.rec(A)
assert(recUA(a)(Star) === a)
}
class BooleanTypeSpec extends flatspec.AnyFlatSpec {
val Bool = "Boolean" :: Type
val b = "b" :: Bool
val BoolInd = ("true" ::: Bool) |: ("false" ::: Bool) =: Bool
val tru :: fls :: HNil = BoolInd.intros
"custom negation function" should "be defined properly" in {
val recBB = BoolInd.rec(Bool)
val not = recBB(fls)(tru)
assert(not(tru) === fls)
assert(not(fls) === tru)
}
"custom conjunction function" should "be defined properly" in {
val recBBB = BoolInd.rec(Bool ->: Bool)
val and = recBBB(b :-> b)(b :-> fls)
assert(and(tru)(fls) === fls)
assert(and(tru)(tru) === tru)
}
}
class NaturalNumbersTypeSpec extends flatspec.AnyFlatSpec {
val Nat = "Nat" :: Type
val NatInd = ("0" ::: Nat) |: ("succ" ::: Nat -->>: Nat) =: Nat
val zero :: succ :: HNil = NatInd.intros
val n = "n" :: Nat
val m = "m" :: Nat
val one = succ(zero)
val two = succ(one)
val three = succ(two)
val four = succ(three)
val five = succ(four)
val recNN = NatInd.rec(Nat)
"custom double function" should "be defined properly" in {
val double = recNN(zero)(n :-> (m :-> succ(succ(m))))
assert(double(one) === two)
assert(double(two) === four)
}
"custom addition function" should "be defined properly" in {
val recNNN = NatInd.rec(Nat ->: Nat)
val addn = "add(n)" :: Nat ->: Nat
val add = recNNN(m :-> m)(n :-> (addn :-> (m :-> succ(addn(m)))))
assert(add(one)(two) === three)
assert(add(two)(three) === five)
}
}
class ListTypeSpec extends flatspec.AnyFlatSpec {
"custom size list function" should "be defined properly" in {
val Nat = "Nat" :: Type
val NatInd = ("0" ::: Nat) |: ("succ" ::: Nat -->>: Nat) =: Nat
val zero :: succ :: HNil = NatInd.intros
val one = succ(zero)
val two = succ(one)
val three = succ(two)
val A = "A" :: Type
val ListA = "List(A)" :: Type
val ListAInd = ("nil" ::: ListA) |: ("cons" ::: A ->>: ListA -->>: ListA) =: ListA
val nil :: cons :: HNil = ListAInd.intros
val recLN = ListAInd.rec(Nat)
val a = "a" :: A
val as = "as" :: ListA
val n = "n" :: Nat
val size = recLN(zero)(a :-> (as :-> (n :-> succ(n))))
val a1 = "a1" :: A
val a2 = "a2" :: A
val list = cons(a)(cons(a1)(cons(a2)(nil)))
assert(size(list) === three)
}
}
class VectorOfFixLengthTypeSpec extends flatspec.AnyFlatSpec {
"custom vector of fixed length" should "be defined properly" in {
val Nat = "Nat" :: Type
val NatInd = ("0" ::: Nat) |: ("succ" ::: Nat -->>: Nat) =: Nat
val zero :: succ :: HNil = NatInd.intros
val one = succ(zero)
val two = succ(one)
val three = succ(two)
val n = "n" :: Nat
val m = "m" :: Nat
val A = "A" :: Type
val Vec = "Vec" :: Nat ->: Type
val VecInd = ("nil" ::: (Vec -> Vec(zero))) |: {
"cons" ::: n ~>>: (A ->>: (Vec :> Vec(n)) -->>: (Vec -> Vec(succ(n))))
} =:: Vec
val vnil :: vcons :: HNil = VecInd.intros
val recVN = VecInd.rec(Nat)
val a = "a" :: A
val vn = "v_n" :: Vec(n)
val vsize = recVN(zero)(n :~> (a :-> (vn :-> (m :-> succ(m)))))
val a1 = "a1" :: A
val a2 = "a2" :: A
val vect = vcons(two)(a)(vcons(one)(a1)(vcons(zero)(a2)(vnil)))
assert(vsize(three)(vect) == three)
}
}
class IdentityTypeSpec extends flatspec.AnyFlatSpec {
val A = "A" :: Type
val a = "a" :: A
"associativity of addition of natural numbers" should "be proved" in {
val Nat = "Nat" :: Type
val NatInd = ("0" ::: Nat) |: ("succ" ::: Nat -->>: Nat) =: Nat
val zero :: succ :: HNil = NatInd.intros
val n = "n" :: Nat
val m = "m" :: Nat
val k = "k" :: Nat
val recNNN = NatInd.rec(Nat ->: Nat)
val addn = "add(n)" :: Nat ->: Nat
val add = recNNN(m :-> m)(n :-> (addn :-> (m :-> succ(addn(m)))))
val one = succ(zero)
val two = succ(one)
val three = succ(two)
val indN_assoc = NatInd.induc(
n :-> (m ~>: (k ~>: (add(add(n)(m))(k) =:= add(n)(add(m)(k))))))
val pf = "(n+m)+k=n+(m+k)" :: m ~>: (k ~>: (add(add(n)(m))(k) =:= add(n)(
add(m)(k))))
val assoc = indN_assoc(m :~> (k :~> add(m)(k).refl))(
n :~> (pf :-> (m :~> (k :~>
IdentityTyp
.induced(succ)(add(add(n)(m))(k))(add(n)(add(m)(k)))(pf(m)(k))))))
assert(
assoc.typ === (n ~>: (m ~>: (k ~>: (add(add(n)(m))(k) =:= add(n)(
add(m)(k)))))))
}
"refl(a)" should "have proper type" in {
val IdA = "Id(A)" :: A ->: A ->: Type
val IdAInd = ("refl" ::: a ~>>: (IdA -> IdA(a)(a))) =:: IdA
val refl :: HNil = IdAInd.intros
assert(refl(a).typ === IdA(a)(a))
}
"refl(A)(a)" should "have proper type" in {
val Id = "Id" :: A ~>: (A ->: A ->: Type)
val IdInd = ("refl" ::: A ~>>: (a ~>>: (Id -> Id(A)(a)(a)))) =:: Id
val refl :: HNil = IdInd.intros
assert(refl(A)(a).typ === Id(A)(a)(a))
}
}
class EliminatorsSpec extends flatspec.AnyFlatSpec {
val A = "A" :: Type
val a = "a" :: A
val a1 = "a1" :: A
val a2 = "a2" :: A
val B = "B" :: Type
val b = "b" :: B
val Ba = "B(_ : A)" :: A ->: Type
val ba = "ba" :: Ba(a)
val C = "C" :: Type
val c = "c" :: C
val c1 = "c1" :: C
val c2 = "c2" :: C
"Empty type" should "have proper type" in {
assert(Zero.typ === Type)
val recZC = Zero.rec(C)
assert(recZC.typ === Zero ->: C)
val z = "z" :: Zero
assert(recZC(z).typ === C)
val D = "D(_ : Zero)" :: Zero ->: Type
//casting is not needed anymore
//val indZD = Zero.induc(D.asInstanceOf[Func[AtomicTerm, Typ[Term]]]) !: z ~>: D(z)
val indZD = Zero.induc(D)
assert(indZD(z).typ === D(z))
}
"Unit type" should "be defined property" in {
assert(Unit.typ === Type)
assert(Star.typ === Unit)
val recUC = Unit.rec(C)
assert(recUC.typ === C ->: Unit ->: C)
assert(recUC(c)(Star) === c)
val D = "D(_ : Unit)" :: Unit ->: Type
val u = "u" :: Unit
val indUD = Unit.induc(D)
assert(indUD.typ === D(Star) ->: u ~>: D(u))
val d = "d" :: D(Star)
assert(indUD(d)(Star) === d)
}
"User-defined Unit type" should "be defined property" in {
val Unit = "Unit" :: Type
val UnitInd = ("star" ::: Unit) =: Unit
val star :: HNil = UnitInd.intros
val recUC = UnitInd.rec(C)
assert(recUC.typ === C ->: Unit ->: C)
assert(recUC(c)(star) === c)
val D = "D(_ : Unit)" :: Unit ->: Type
val u = "u" :: Unit
val indUD = UnitInd.induc(D)
assert(indUD.typ === D(star) ->: u ~>: D(u))
val d = "d" :: D(star)
assert(indUD(d)(star) === d)
}
"Sum type" should "be defined property" in {
PlusTyp(A, B) !: Type
val s1 = PlusTyp(A, B).incl1(a)
assert(s1.typ === PlusTyp(A, B))
val s2 = PlusTyp(A, B).incl2(b)
assert(s1.typ === PlusTyp(A, B))
val recAorBC = PlusTyp(A, B).rec(C)
assert(recAorBC.typ === (A ->: C) ->: (B ->: C) ->: PlusTyp(A, B) ->: C)
val f = "f" :: A ->: C
val g = "g" :: B ->: C
assert(recAorBC(f)(g)(s1) === f(a))
assert(recAorBC(f)(g)(s2) === g(b))
val D = "D(_ : A + B)" :: PlusTyp(A, B) ->: Type
val s = "s" :: PlusTyp(A, B)
val indAorBD = PlusTyp(A, B).induc(D)
assert(
indAorBD.typ === (a ~>: D(PlusTyp(A, B).incl1(a))) ->: (b ~>: D(
PlusTyp(A, B).incl2(b))) ->: s ~>: D(s))
val f2 = "f2" :: a ~>: D(PlusTyp(A, B).incl1(a))
val g2 = "g2" :: b ~>: D(PlusTyp(A, B).incl2(b))
assert(indAorBD(f2)(g2)(s1) === f2(a))
assert(indAorBD(f2)(g2)(s2) === g2(b))
}
"User-defined Sum type" should "be defined property" in {
val AorB = "A + B" :: Type
val SumInd = ("inl" ::: A ->>: AorB) |: ("inr" ::: B ->>: AorB) =: AorB
val inl :: inr :: HNil = SumInd.intros
val s1 = inl(a)
assert(s1.typ === AorB)
val s2 = inr(b)
assert(s2.typ === AorB)
val recAorBC = SumInd.rec(C)
assert(recAorBC.typ === (A ->: C) ->: (B ->: C) ->: AorB ->: C)
val f = "f" :: A ->: C
val g = "g" :: B ->: C
assert(recAorBC(f)(g)(s1) === f(a))
assert(recAorBC(f)(g)(s2) === g(b))
val D = "D(_ : A + B)" :: AorB ->: Type
val s = "s" :: AorB
val indAorBD = SumInd.induc(D)
assert(
indAorBD.typ === (a ~>: D(inl(a))) ->: (b ~>: D(inr(b))) ->: s ~>: D(s))
val f2 = "f2" :: a ~>: D(inl(a))
val g2 = "g2" :: b ~>: D(inr(b))
assert(indAorBD(f2)(g2)(s1) === f2(a))
assert(indAorBD(f2)(g2)(s2) === g2(b))
}
"Product type" should "be defined property" in {
assert(ProdTyp(A, B).typ === Type)
val pair = PairTerm(a, b)
assert(pair.typ === ProdTyp(A, B))
val recAandBC = ProdTyp(A, B).rec(C)
assert(recAandBC.typ === (A ->: B ->: C) ->: ProdTyp(A, B) ->: C)
val f = "f" :: A ->: B ->: C
assert(recAandBC(f)(pair) === f(a)(b))
val D = "D(_ : A x B)" :: A ->: B ->: Type
val p = "(a, b)" :: ProdTyp(A, B)
val indAandBD = ProdTyp(A, B).induc(D)
assert(
indAandBD.typ === (a ~>: b ~>: D(a)(b)) ->: p ~>: D(p.first)(p.second))
val f2 = "f2" :: a ~>: b ~>: D(a)(b)
assert(indAandBD(f2)(pair) === f2(a)(b))
}
"User-defined Product type" should "be defined property" in {
val AandB = "A x B" :: Type
val ProdInd = ("mkPair" ::: A ->>: (B ->>: AandB)) =: AandB
val makePair :: HNil = ProdInd.intros
val pair = makePair(a)(b)
assert(pair.typ === AandB)
val recAandBC = ProdInd.rec(C)
assert(recAandBC.typ === (A ->: B ->: C) ->: AandB ->: C)
val f = "f" :: A ->: B ->: C
assert(recAandBC(f)(pair) === f(a)(b))
val D = "D(_ : A x B)" :: AandB ->: Type
val p = "(a, b)" :: AandB
val indAandBD = ProdInd.induc(D)
assert(indAandBD.typ === (a ~>: b ~>: D(makePair(a)(b))) ->: p ~>: D(p))
val f2 = "f2" :: a ~>: b ~>: D(makePair(a)(b))
assert(indAandBD(f2)(pair) === f2(a)(b))
}
"User-defined Function typ" should "be defined property" in {
val AtoB = "A → B" :: Type
val FunInd = ("λ" ::: (A ->: B) ->>: AtoB) =: AtoB
val lmbda :: HNil = FunInd.intros
val f = "f" :: A ->: B
val fun = lmbda(f)
assert(fun.typ === AtoB)
val recFunC = FunInd.rec(C)
assert(recFunC.typ === ((A ->: B) ->: C) ->: AtoB ->: C)
val F = "F" :: (A ->: B) ->: C
assert(recFunC(F)(fun) === F(f))
val g = "g" :: AtoB
val D = "D(_ : A → B)" :: AtoB ->: Type
val indFunD = FunInd.induc(D)
assert(indFunD.typ === (f ~>: D(lmbda(f))) ->: g ~>: D(g))
val F2 = "F2" :: f ~>: D(lmbda(f))
assert(indFunD(F2)(fun) === F2(f))
}
"Sigma-type" should "be defined property" in {
assert(Sgma(a !: A, Ba(a)).typ === Type)
val pair = mkPair(a, ba) !: Sgma(a !: A, Ba(a))
val recSgmC = Sgma(a !: A, Ba(a)).rec(C)
assert(recSgmC.typ === (a ~>: (Ba(a) ->: C)) ->: Sgma(a !: A, Ba(a)) ->: C)
val f = "f" :: a ~>: (Ba(a) ->: C)
assert(recSgmC(f)(pair) === f(a)(ba))
val D = "D(_ : Σ(a : A, B(a)))" :: a ~>: (Ba(a) ->: Type)
val p = "(a, ba)" :: Sgma(a !: A, Ba(a))
val indSgmD = Sgma(a !: A, Ba(a)).induc(D)
assert(indSgmD.typ === (a ~>: ba ~>: D(a)(ba) ) ->: p ~>: D(p.first)(p.second))
val f2 = "f2" :: a ~>: ba ~>: D(a)(ba)
assert(indSgmD(f2)(pair) === f2(a)(ba))
}
"User-defined Sigma-type" should "be defined property" in {
val SigmaAB = "Σ(a : A, B(a))" :: Type
val SigmaInd = ("mkPair" ::: a ~>>: (Ba(a) ->>: SigmaAB)) =: SigmaAB
val makePair :: HNil = SigmaInd.intros
val pair = makePair(a)(ba)
assert(pair.typ === SigmaAB)
val recSgmC = SigmaInd.rec(C)
assert(recSgmC.typ === (a ~>: (Ba(a) ->: C)) ->: SigmaAB ->: C)
val f = "f" :: a ~>: (Ba(a) ->: C)
assert(recSgmC(f)(pair) === f(a)(ba))
val D = "D(_ : Σ(a : A, B(a)))" :: SigmaAB ->: Type
val p = "(a, ba)" :: SigmaAB
val indSgmD = SigmaInd.induc(D)
assert(indSgmD.typ === (a ~>: ba ~>: D(makePair(a)(ba))) ->: p ~>: D(p))
val f2 = "f2" :: a ~>: ba ~>: D(makePair(a)(ba))
assert(indSgmD(f2)(pair) === f2(a)(ba))
}
"User-defined Pi-type" should "be defined property" in {
val AtoB = "Π(a : A, B(a))" :: Type
val FunInd = ("λ" ::: (a ~>: Ba(a)) ->>: AtoB) =: AtoB
val lambda :: HNil = FunInd.intros
val f = "f" :: a ~>: Ba(a)
val fun = lambda(f)
assert(fun.typ === AtoB)
val recFunC = FunInd.rec(C)
assert(recFunC.typ === ((a ~>: Ba(a)) ->: C) ->: AtoB ->: C)
val F = "F" :: (a ~>: Ba(a)) ->: C
recFunC(F)(fun) == F(f)
val g = "g" :: AtoB
val D = "D(_ : Π(a : A, B(a)))" :: AtoB ->: Type
val indFunD = FunInd.induc(D)
assert(indFunD.typ === (f ~>: D(lambda(f))) ->: g ~>: D(g))
val F2 = "F2" :: f ~>: D(lambda(f))
assert(indFunD(F2)(fun) === F2(f))
}
"Boolean" should "be defined property" in {
val Bool = "Boolean" :: Type
val BoolInd = ("true" ::: Bool) |: ("false" ::: Bool) =: Bool
val tru :: fls :: HNil = BoolInd.intros
val recBC = BoolInd.rec(C)
assert(recBC.typ === C ->: C ->: Bool ->: C)
assert(recBC(c1)(c2)(tru) === c1)
assert(recBC(c1)(c2)(fls) === c2)
val D = "D(_ : Boolean)" :: Bool ->: Type
val bool = "b" :: Bool
val indBC = BoolInd.induc(D)
assert(indBC.typ === D(tru) ->: D(fls) ->: bool ~>: D(bool))
val d1 = "d1" :: D(tru)
val d2 = "d2" :: D(fls)
assert(indBC(d1)(d2)(tru) === d1)
assert(indBC(d1)(d2)(fls) === d2)
}
"Natural numbers" should "be defined property" in {
val Nat = "Nat" :: Type
val NatInd = ("0" ::: Nat) |: ("succ" ::: Nat -->>: Nat) =: Nat
val zero :: succ :: HNil = NatInd.intros
val n = "n" :: Nat
val m = "m" :: Nat
val recNC = NatInd.rec(C)
assert(recNC.typ === C ->: (Nat ->: C ->: C) ->: Nat ->: C)
val f = "f" :: Nat ->: C ->: C
assert(recNC(c)(f)(zero) === c)
assert(recNC(c)(f)(succ(n)) === f(n)(recNC(c)(f)(n)))
val D = "D(_ : Nat)" :: Nat ->: Type
val indND = NatInd.induc(D)
assert(indND.typ === D(zero) ->: n ~>: (D(n) ->: D(succ(n))) ->: m ~>: D(m))
val d = "d" :: D(zero)
val f2 = "f2" :: n ~>: (D(n) ->: D(succ(n)))
assert(indND(d)(f2)(zero) === d)
assert(indND(d)(f2)(succ(n)) === f2(n)(indND(d)(f2)(n)))
}
"List" should "be defined property" in {
val ListA = "List(A)" :: Type
val ListAInd = ("nil" ::: ListA) |: ("cons" ::: A ->>: ListA -->>: ListA) =: ListA
val nil :: cons :: HNil = ListAInd.intros
val as = "as" :: ListA
val as1 = "as1" :: ListA
val recLC = ListAInd.rec(C)
assert(recLC.typ === C ->: (A ->: ListA ->: C ->: C) ->: ListA ->: C)
val f = "f" :: A ->: ListA ->: C ->: C
assert(recLC(c)(f)(nil) === c)
assert(recLC(c)(f)(cons(a)(as)) === f(a)(as)(recLC(c)(f)(as)))
val D = "D(_ : List(A))" :: ListA ->: Type
val indLD = ListAInd.induc(D)
assert(
indLD.typ === D(nil) ->: a ~>: as ~>: (D(as) ->: D(cons(a)(as))) ->: as1 ~>: D(
as1))
val f2 = "f2" :: a ~>: as ~>: (D(as) ->: D(cons(a)(as)))
val d = "d" :: D(nil)
assert(indLD(d)(f2)(nil) === d)
assert(indLD(d)(f2)(cons(a)(as)) === f2(a)(as)(indLD(d)(f2)(as)))
}
"Vector" should "be defined property" in {
val Nat = "Nat" :: Type
val NatInd = ("0" ::: Nat) |: ("succ" ::: Nat -->>: Nat) =: Nat
val zero :: succ :: HNil = NatInd.intros
val n = "n" :: Nat
val m = "m" :: Nat
val Vec = "Vec" :: Nat ->: Type
val VecInd = ("nil" ::: (Vec -> Vec(zero))) |: {
"cons" ::: n ~>>: (A ->>: (Vec :> Vec(n)) -->>: (Vec -> Vec(succ(n))))
} =:: Vec
val vnil :: vcons :: HNil = VecInd.intros
val vn = "v_n" :: Vec(n)
val vm = "v_m" :: Vec(m)
val recVC = VecInd.rec(C)
assert(recVC.typ === C ->: n ~>: (A ->: Vec(n) ->: C ->: C) ->: m ~>: (Vec(
m) ->: C))
val f = "f" :: n ~>: (A ->: Vec(n) ->: C ->: C)
assert(recVC(c)(f)(zero)(vnil) === c)
assert(
recVC(c)(f)(succ(n))(vcons(n)(a)(vn)) === f(n)(a)(vn)(recVC(c)(f)(n)(vn)))
val D = "D(_ : Vec(_))" :: n ~>: (Vec(n) ->: Type)
val indVD = VecInd.induc(D)
assert(
indVD.typ === D(zero)(vnil) ->: n ~>: a ~>: vn ~>: (D(n)(vn) ->: D(
succ(n))(vcons(n)(a)(vn))) ->: m ~>: vm ~>: D(m)(vm))
val f2 = "f2" :: n ~>: a ~>: vn ~>: (D(n)(vn) ->: D(succ(n))(
vcons(n)(a)(vn)))
val d = "d" :: D(zero)(vnil)
assert(indVD(d)(f2)(zero)(vnil) === d)
assert(
indVD(d)(f2)(succ(n))(vcons(n)(a)(vn)) === f2(n)(a)(vn)(
indVD(d)(f2)(n)(vn)))
}
"Identity type" should "be defined property" in {
assert((a1 =:= a2).typ === Type)
assert(a.refl.typ === (a =:= a))
val recIdC = IdentityTyp.rec(A, C)
assert(recIdC.typ === (A ->: C) ->: a1 ~>: a2 ~>: ((a1 =:= a2) ->: C))
val f = "f" :: A ->: C
recIdC(f)(a)(a)(a.refl) == f(a) // false
val D = "D(_ : a1 = a2)" :: a1 ~>: (a2 ~>: ((a1 =:= a2) ->: Type))
val pf = "pf" :: (a1 =:= a2)
// val indIdD = IdentityTyp.induc(A, D) // doesn't compile
// !: (a ~>: D(a)(a)(a.refl)) ->: a1 ~>: a2 ~>: (pf ~>: D(a1)(a2)(pf))
// val f = "f" :: A ->: C
// indIdD(f)(a)(a)(a.refl) == f(a)
}
"User-defined Identity type" should "be defined property" in {
val IdA = "Id(A)" :: A ->: A ->: Type
val IdAInd = ("refl" ::: a ~>>: (IdA -> IdA(a)(a))) =:: IdA
val refl :: HNil = IdAInd.intros
assert(refl(a).typ === IdA(a)(a))
val recIdC = IdAInd.rec(C)
assert(recIdC.typ === (A ->: C) ->: a1 ~>: a2 ~>: (IdA(a1)(a2) ->: C))
val f = "f" :: A ->: C
assert(recIdC(f)(a)(a)(refl(a)) === f(a))
val D = "D(_ : a1 = a2)" :: a1 ~>: a2 ~>: (IdA(a1)(a2) ->: Type)
val pf = "a1 = a2" :: IdA(a1)(a2)
val indIdD = IdAInd.induc(D)
assert(
indIdD.typ === a ~>: D(a)(a)(refl(a)) ->: a1 ~>: a2 ~>: pf ~>: D(a1)(a2)(
pf))
val f2 = "f2" :: a ~>: D(a)(a)(refl(a))
assert(indIdD(f2)(a)(a)(refl(a)) === f2(a))
}
}
| siddhartha-gadgil/ProvingGround | mantle/src/test/scala/provingground/IntegrationSpec.scala | Scala | mit | 31,058 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ui.storage
import java.net.URLEncoder
import javax.servlet.http.HttpServletRequest
import scala.xml.{Node, Unparsed}
import org.apache.spark.status.AppStatusStore
import org.apache.spark.status.api.v1.{ExecutorSummary, RDDDataDistribution, RDDPartitionInfo}
import org.apache.spark.ui._
import org.apache.spark.util.Utils
/** Page showing storage details for a given RDD */
private[ui] class RDDPage(parent: SparkUITab, store: AppStatusStore) extends WebUIPage("rdd") {
def render(request: HttpServletRequest): Seq[Node] = {
// stripXSS is called first to remove suspicious characters used in XSS attacks
val parameterId = UIUtils.stripXSS(request.getParameter("id"))
require(parameterId != null && parameterId.nonEmpty, "Missing id parameter")
val parameterBlockPage = UIUtils.stripXSS(request.getParameter("block.page"))
val parameterBlockSortColumn = UIUtils.stripXSS(request.getParameter("block.sort"))
val parameterBlockSortDesc = UIUtils.stripXSS(request.getParameter("block.desc"))
val parameterBlockPageSize = UIUtils.stripXSS(request.getParameter("block.pageSize"))
val blockPage = Option(parameterBlockPage).map(_.toInt).getOrElse(1)
val blockSortColumn = Option(parameterBlockSortColumn).getOrElse("Block Name")
val blockSortDesc = Option(parameterBlockSortDesc).map(_.toBoolean).getOrElse(false)
val blockPageSize = Option(parameterBlockPageSize).map(_.toInt).getOrElse(100)
val rddId = parameterId.toInt
val rddStorageInfo = try {
store.rdd(rddId)
} catch {
case _: NoSuchElementException =>
// Rather than crashing, render an "RDD Not Found" page
return UIUtils.headerSparkPage(request, "RDD Not Found", Seq.empty[Node], parent)
}
// Worker table
val workerTable = UIUtils.listingTable(workerHeader, workerRow,
rddStorageInfo.dataDistribution.get, id = Some("rdd-storage-by-worker-table"))
val blockTableHTML = try {
val _blockTable = new BlockPagedTable(
UIUtils.prependBaseUri(request, parent.basePath) + s"/storage/rdd/?id=${rddId}",
rddStorageInfo.partitions.get,
blockPageSize,
blockSortColumn,
blockSortDesc,
store.executorList(true))
_blockTable.table(blockPage)
} catch {
case e @ (_ : IllegalArgumentException | _ : IndexOutOfBoundsException) =>
<div class="alert alert-error">{e.getMessage}</div>
}
val jsForScrollingDownToBlockTable =
<script>
{
Unparsed {
"""
|$(function() {
| if (/.*&block.sort=.*$/.test(location.search)) {
| var topOffset = $("#blocks-section").offset().top;
| $("html,body").animate({scrollTop: topOffset}, 200);
| }
|});
""".stripMargin
}
}
</script>
val content =
<div class="row-fluid">
<div class="span12">
<ul class="unstyled">
<li>
<strong>Storage Level:</strong>
{rddStorageInfo.storageLevel}
</li>
<li>
<strong>Cached Partitions:</strong>
{rddStorageInfo.numCachedPartitions}
</li>
<li>
<strong>Total Partitions:</strong>
{rddStorageInfo.numPartitions}
</li>
<li>
<strong>Memory Size:</strong>
{Utils.bytesToString(rddStorageInfo.memoryUsed)}
</li>
<li>
<strong>Disk Size:</strong>
{Utils.bytesToString(rddStorageInfo.diskUsed)}
</li>
</ul>
</div>
</div>
<div class="row-fluid">
<div class="span12">
<h4>
Data Distribution on {rddStorageInfo.dataDistribution.map(_.size).getOrElse(0)}
Executors
</h4>
{workerTable}
</div>
</div>
<div>
<h4 id="blocks-section">
{rddStorageInfo.partitions.map(_.size).getOrElse(0)} Partitions
</h4>
{blockTableHTML ++ jsForScrollingDownToBlockTable}
</div>;
UIUtils.headerSparkPage(
request, "RDD Storage Info for " + rddStorageInfo.name, content, parent)
}
/** Header fields for the worker table */
private def workerHeader = Seq(
"Host",
"On Heap Memory Usage",
"Off Heap Memory Usage",
"Disk Usage")
/** Render an HTML row representing a worker */
private def workerRow(worker: RDDDataDistribution): Seq[Node] = {
<tr>
<td>{worker.address}</td>
<td>
{Utils.bytesToString(worker.onHeapMemoryUsed.getOrElse(0L))}
({Utils.bytesToString(worker.onHeapMemoryRemaining.getOrElse(0L))} Remaining)
</td>
<td>
{Utils.bytesToString(worker.offHeapMemoryUsed.getOrElse(0L))}
({Utils.bytesToString(worker.offHeapMemoryRemaining.getOrElse(0L))} Remaining)
</td>
<td>{Utils.bytesToString(worker.diskUsed)}</td>
</tr>
}
}
private[ui] case class BlockTableRowData(
blockName: String,
storageLevel: String,
memoryUsed: Long,
diskUsed: Long,
executors: String)
private[ui] class BlockDataSource(
rddPartitions: Seq[RDDPartitionInfo],
pageSize: Int,
sortColumn: String,
desc: Boolean,
executorIdToAddress: Map[String, String]) extends PagedDataSource[BlockTableRowData](pageSize) {
private val data = rddPartitions.map(blockRow).sorted(ordering(sortColumn, desc))
override def dataSize: Int = data.size
override def sliceData(from: Int, to: Int): Seq[BlockTableRowData] = {
data.slice(from, to)
}
private def blockRow(rddPartition: RDDPartitionInfo): BlockTableRowData = {
BlockTableRowData(
rddPartition.blockName,
rddPartition.storageLevel,
rddPartition.memoryUsed,
rddPartition.diskUsed,
rddPartition.executors
.map { id => executorIdToAddress.get(id).getOrElse(id) }
.sorted
.mkString(" "))
}
/**
* Return Ordering according to sortColumn and desc
*/
private def ordering(sortColumn: String, desc: Boolean): Ordering[BlockTableRowData] = {
val ordering: Ordering[BlockTableRowData] = sortColumn match {
case "Block Name" => Ordering.by(_.blockName)
case "Storage Level" => Ordering.by(_.storageLevel)
case "Size in Memory" => Ordering.by(_.memoryUsed)
case "Size on Disk" => Ordering.by(_.diskUsed)
case "Executors" => Ordering.by(_.executors)
case unknownColumn => throw new IllegalArgumentException(s"Unknown column: $unknownColumn")
}
if (desc) {
ordering.reverse
} else {
ordering
}
}
}
private[ui] class BlockPagedTable(
basePath: String,
rddPartitions: Seq[RDDPartitionInfo],
pageSize: Int,
sortColumn: String,
desc: Boolean,
executorSummaries: Seq[ExecutorSummary]) extends PagedTable[BlockTableRowData] {
override def tableId: String = "rdd-storage-by-block-table"
override def tableCssClass: String =
"table table-bordered table-condensed table-striped table-head-clickable"
override def pageSizeFormField: String = "block.pageSize"
override def pageNumberFormField: String = "block.page"
override val dataSource: BlockDataSource = new BlockDataSource(
rddPartitions,
pageSize,
sortColumn,
desc,
executorSummaries.map { ex => (ex.id, ex.hostPort) }.toMap)
override def pageLink(page: Int): String = {
val encodedSortColumn = URLEncoder.encode(sortColumn, "UTF-8")
basePath +
s"&$pageNumberFormField=$page" +
s"&block.sort=$encodedSortColumn" +
s"&block.desc=$desc" +
s"&$pageSizeFormField=$pageSize"
}
override def goButtonFormPath: String = {
val encodedSortColumn = URLEncoder.encode(sortColumn, "UTF-8")
s"$basePath&block.sort=$encodedSortColumn&block.desc=$desc"
}
override def headers: Seq[Node] = {
val blockHeaders = Seq(
"Block Name",
"Storage Level",
"Size in Memory",
"Size on Disk",
"Executors")
if (!blockHeaders.contains(sortColumn)) {
throw new IllegalArgumentException(s"Unknown column: $sortColumn")
}
val headerRow: Seq[Node] = {
blockHeaders.map { header =>
if (header == sortColumn) {
val headerLink = Unparsed(
basePath +
s"&block.sort=${URLEncoder.encode(header, "UTF-8")}" +
s"&block.desc=${!desc}" +
s"&block.pageSize=$pageSize")
val arrow = if (desc) "▾" else "▴" // UP or DOWN
<th>
<a href={headerLink}>
{header}
<span> {Unparsed(arrow)}</span>
</a>
</th>
} else {
val headerLink = Unparsed(
basePath +
s"&block.sort=${URLEncoder.encode(header, "UTF-8")}" +
s"&block.pageSize=$pageSize")
<th>
<a href={headerLink}>
{header}
</a>
</th>
}
}
}
<thead>{headerRow}</thead>
}
override def row(block: BlockTableRowData): Seq[Node] = {
<tr>
<td>{block.blockName}</td>
<td>{block.storageLevel}</td>
<td>{Utils.bytesToString(block.memoryUsed)}</td>
<td>{Utils.bytesToString(block.diskUsed)}</td>
<td>{block.executors}</td>
</tr>
}
}
| guoxiaolongzte/spark | core/src/main/scala/org/apache/spark/ui/storage/RDDPage.scala | Scala | apache-2.0 | 10,220 |
//: ----------------------------------------------------------------------------
//: Copyright (C) 2015 Verizon. All Rights Reserved.
//:
//: Licensed under the Apache License, Version 2.0 (the "License");
//: you may not use this file except in compliance with the License.
//: You may obtain a copy of the License at
//:
//: http://www.apache.org/licenses/LICENSE-2.0
//:
//: Unless required by applicable law or agreed to in writing, software
//: distributed under the License is distributed on an "AS IS" BASIS,
//: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//: See the License for the specific language governing permissions and
//: limitations under the License.
//:
//: ----------------------------------------------------------------------------
package funnel
import java.net.URI
import com.twitter.algebird.Group
import org.scalacheck.Arbitrary._
import org.scalacheck.Prop._
import org.scalacheck._
import scala.concurrent.duration._
import scalaz.Nondeterminism
import scalaz.concurrent.Task
import scalaz.std.list._
import scalaz.std.tuple._
import scalaz.stream.{Process, Sink}
import scalaz.syntax.foldable._
import scalaz.syntax.functor._
object MonitoringSpec extends Properties("monitoring") {
val B = Buffers
// we are dealing with doubles, so don't want to rely on exact comparison,
// should be within a small epsilon though
val Epsilon = 1e-7
// some syntax for === and !== for epsion comparisons
implicit class DoubleSyntax(d: Double) {
def ===[N](n: N)(implicit N: Numeric[N]) = (d - N.toDouble(n)).abs < Epsilon
def !==[N](n: N)(implicit N: Numeric[N]) = (d - N.toDouble(n)).abs >= Epsilon
}
implicit class DoubleSeqSyntax(ds: Seq[Double]) {
def ===[N](ns: Seq[N])(implicit N: Numeric[N]) =
ns.length == ds.length && ds.zip(ns.map(N.toDouble)).forall { case (a,b) => a === b }
}
// +/- 200 trillion
implicit val arbLong: Arbitrary[Long] = Arbitrary(Gen.choose(-200000000000000L, 200000000000000L))
/*
* Check that `roundDuration` works as expected for
* some hardcoded examples.
*/
property("roundDuration") = secure {
B.ceilingDuration(0.minutes, 5.minutes) == (5.minutes) &&
B.ceilingDuration(14.seconds, 1.minutes) == (1.minutes) &&
B.ceilingDuration(60.seconds, 1.minutes) == (2.minutes) &&
B.ceilingDuration(61.seconds, 1.minutes) == (2.minutes) &&
B.ceilingDuration(59.seconds, 2.minutes) == (2.minutes) &&
B.ceilingDuration(119.seconds, 1.minutes) == (2.minutes) &&
B.ceilingDuration(120.seconds, 1.minutes) == (3.minutes) &&
B.ceilingDuration(190.milliseconds, 50.milliseconds) == (200.milliseconds)
}
/*
* Check that `counter` properly counts.
*/
property("counter") = forAll { (xs: List[Long]) =>
val c = B.counter(0)
val input: Process[Task,Long] = Process.emitAll(xs)
val out = input.pipe(c).runLog.run
out == xs.scanLeft(0.0)(_ + _)
}
/*
* Check that `resetEvery` properly resets the stream
* transducer after the elapsed time. Check that `emitEvery`
* only emits a value at period boundaries.
* Also check that `emitEvery` emits when it sees two `None` values.
*/
property("reset/emitEvery") = forAll { (h: Long, t: List[Long]) =>
val xs = h :: t
// resetEvery -- we feed the same input twice, fast forwarding
// the time; this should give the same output, duplicated
val c = B.resetEvery(5.minutes)(B.counter(0))
val input: Process[Task,(Long,Duration)] =
Process.emitAll(xs.map((_, 0.minutes))) ++
Process.emitAll(xs.map((_, 5.minutes)))
val input3: Process[Task,(Option[Long],Duration)] =
Process.emitAll(xs.map(x => (Some(x), 0.minutes))) ++
Process.emitAll(xs.map(x => (Some(x), 1.minutes)))
val out = input.pipe(c).runLog.run
require(out.length % 2 == 0, "length of output should be even")
val (now, later) = out.splitAt(out.length / 2)
val ok = (now === later) && (now === xs.scanLeft(0.0)(_ + _))
// emitEvery -- we should only emit two values, one at the
// end of the first period, and one at the end of the second
val c2 = B.emitEvery(5.minutes)(c)
val input2 = input ++ Process(1L -> (11.minutes))
val out2 = input2.map{ case (x, y) => (Some(x), y) }.pipe(c2).runLog.run
val out3 = (input3 ++ Process(None -> 2.minutes, None -> 11.minutes)).pipe(c2).runLog.run
ok &&
out2.length === 2 &&
out2(0) === xs.sum &&
out2(1) === xs.sum &&
out3(0) === xs.sum * 2
}
/* Check that if all events occur at same moment, `sliding` has no effect. */
property("sliding-id") = forAll(Gen.nonEmptyListOf(Gen.choose(1,10))) { xs =>
val c = B.sliding(5.minutes)(identity[Int])(Group.intGroup)
val input: Process[Task,(Int,Duration)] =
Process.emitAll(xs.map((_, 1.minutes)))
val output = input.pipe(c).runLog.run
output == xs.scanLeft(0)(_ + _)
}
/* Example of sliding count. */
property("sliding-example") = secure {
val i1: Process[Task, (Int,Duration)] =
Process(1 -> (0.minutes), 1 -> (1.minutes), 2 -> (3.minutes), 2 -> (4.minutes))
val c = B.sliding(2.minutes)(identity[Int])(Group.intGroup)
val output = i1.pipe(c).runLog.run
output == List(0, 1, 2, 3, 4)
}
/*
* Check that all values are eventually received by a
* buffered signal.
*/
property("bufferedSignal") = forAll { (xs: List[Long]) =>
val (snk, s) = Monitoring.bufferedSignal(B.counter(0)).run
xs.traverse_(snk).run
val expected = xs.sum
// this will 'eventually' become true, and loop otherwise
while (s.continuous.once.runLastOr(0.0).run !== expected) {
Thread.sleep(10)
}
true
}
/*
* Check that subscribing and filtering is the same as
* filtering and subscribing.
*/
property("subscribe") = secure {
val M = Monitoring.instance(windowSize = 6.seconds)
implicit val log = (_:String) => ()
def listenFor[A](t: Duration)(p: Process[Task, A]): Vector[A] = {
val b = new java.util.concurrent.atomic.AtomicBoolean(false)
var v = Vector[A]()
p.evalMap(a => Task {
v = v :+ a
}).run.runAsyncInterruptibly(_ => (), b)
Thread.sleep(t.toMillis)
b.set(true)
v
}
new Instruments(M) {
JVM.instrument(this)
}
val b1 = Monitoring.subscribe(M)(_ => true).
filter(_.key.name.contains("previous/jvm/gc/ParNew/time"))
val b2 = Monitoring.subscribe(M)(
_.name.contains("previous/jvm/gc/ParNew/time"))
val xs = listenFor(30.seconds)(b1)
val ys = listenFor(30.seconds)(b2)
val d = (xs.length - ys.length).abs
d <= 2 // Each of xs and ys could gain or lose one tick, for a total of 2
}
/* Check that `distinct` combinator works. */
property("distinct") = forAll(Gen.nonEmptyListOf(Gen.choose(-10L,10L))) { xs =>
val input: Process[Task,Long] = Process.emitAll(xs)
input.pipe(B.distinct).runLog.run.toList == xs.distinct
}
/* Check that publishing to a bufferedSignal is 'fast'. */
property("bufferedSignal-profiling") = secure {
def go: Boolean = {
val N = 100000
val (snk, s) = Monitoring.bufferedSignal(B.counter(0)).run
val t0 = System.nanoTime
(0 to N).toList.traverse_(x => snk(x)).run
val expected = (0 to N).map(_.toDouble).sum
while (s.continuous.once.runLastOr(0.0).run !== expected) {
Thread.sleep(10)
}
val d = Duration.fromNanos(System.nanoTime - t0) / N.toDouble
// println("Number of microseconds per event: " + d.toMicros)
// I am seeing around 25 microseconds on avg
d.toMicros < 1000
}
go || go || go // decrease false negative rate by retrying three times
}
/*
* Counter and Gauge updates should be 'fast', and should work
* with concurrent producers.
*/
property("profiling") = secure {
def go: Prop = {
import instruments._
val c = counter("uno")
val ok = gauge("tres", false)
val N = 1000000
val t0 = System.nanoTime
val S = scalaz.concurrent.Strategy.DefaultStrategy
val f1 = S { (0 until N).foreach { _ =>
c.increment
ok.set(true)
}}
val f2 = S { (0 until N).foreach { _ =>
c.increment
ok.set(true)
}}
f1(); f2()
val updateTime = Duration.fromNanos(System.nanoTime - t0) / N.toDouble
val get: Task[Double] = Monitoring.default.latest(c.keys.now)
while (get.run != N*2) {
// println("current count: " + get.run)
Thread.sleep(10)
}
val publishTime = Duration.fromNanos(System.nanoTime - t0) / N.toDouble
val okResult = Monitoring.default.latest(ok.keys.now).run
Thread.sleep(instruments.bufferTime.toMillis * 2)
//println("update time: " + updateTime)
//println("publishTime: " + publishTime)
//println("OK result:" + okResult)
// I am seeing about 40.nanoseconds for update times,
// 100 nanos for publishing
(s"Gauge latency should be < 1 μs (was $updateTime)" |: (updateTime.toNanos < 1000)) &&
(s"Publish latency should be < 2 μs (was $publishTime)" |: (publishTime.toNanos < 2000)) &&
okResult
}
go || go || go
}
/* Make sure key senesence doesn't have quadratic complexity */
property("key-senesence") = secure {
def go: Boolean = {
val ranges = List(4, 8, 16, 32, 64, 128).map(Range(0, _))
val times = ranges map { r =>
val M = Monitoring.instance(windowSize = 30.seconds)
val I = new Instruments(M)
import I._
val counters = r.toList.map(n => counter(s"test$n"))
val t0 = System.nanoTime
val ks = M.keys.discrete.dropWhile(_.isEmpty).evalMap(x =>
if (x.isEmpty)
M.keys.close
else
Task.now(())
)
M.keySenescence(Events.every(100.milliseconds), M.distinctKeys).zip(ks).run.run
val t = System.nanoTime - t0
t
}
times.zip(times.tail).foldLeft(true) {
case (b, (t1, t2)) =>
val dt = t2.toDouble / t1.toDouble
// Doubling input size should have complexity closer to 2x than 4x
b && (Math.abs(2-dt) < Math.abs(4-dt))
}
}
go || go || go
}
/* Simple sanity check of a timer. */
property("timer-ex") = secure {
def go: Boolean = {
import instruments._
val t = timer("uno")
t.time { Thread.sleep(50) }
// Make sure we wait for the time buffer to catch up
Thread.sleep(instruments.bufferTime.toMillis * 2)
val r = Monitoring.default.latest(t.keys.now).run.mean
//println("Sleeping for 50ms took: " + r)
r > 0 && (r - 50).abs < 1000
}
go || go || go
}
/* Make sure timer updates are 'fast'. */
property("timer-profiling") = secure {
def go: Boolean = {
import instruments._
val t = timer("uno")
val N = 1000000
val t0 = System.nanoTime
val d = (50.milliseconds)
(0 until N).foreach { _ =>
t.record(d)
}
val delta = System.nanoTime - t0
val updateTime = (delta.nanoseconds) / N.toDouble
// Make sure we wait for the time buffer to catch up
Thread.sleep(instruments.bufferTime.toMillis * 2)
val m = Monitoring.default.latest(t.keys.now).run.mean
//println("timer:updateTime: " + updateTime + ", m: " + m)
updateTime.toNanos < 1000 && m == 50
}
go || go || go
}
/* Make sure timers allow concurrent updates. */
property("concurrent-timing") = secure {
def go: Boolean = {
import instruments._
val t = timer("uno")
val N = 100000
val S = scalaz.concurrent.Strategy.DefaultStrategy
val t0 = System.nanoTime
val d1 = (1.milliseconds); val d2 = (3.milliseconds)
val f1 = S { (0 until N).foreach { _ =>
t.record(d1)
}}
val f2 = S { (0 until N).foreach { _ =>
t.record(d2)
}}
f1(); f2()
val updateTime = Duration.fromNanos(System.nanoTime - t0) / N.toDouble
Thread.sleep(200)
// average time should be 2 millis
val m = Monitoring.default.latest(t.keys.now).run.mean
// println("average time: " + m)
// println("timer:updateTime: " + updateTime)
m === 2.0 && updateTime.toNanos < 1000
}
go || go || go
}
/** Check that when publishing, we get the count that was published. */
property("pub/sub") = forAll(Gen.nonEmptyListOf(Gen.choose(1,10))) { a =>
val M = Monitoring.default
val (k, snk) = M.topic[Long,Double]("count", Units.Count, "")(B.ignoreTickAndTime(B.counter(0))).map(_.run)
val count = M.get(k)
a.traverse_(x => snk(x)).run
val expected = a.sum
var got = count.continuous.once.runLastOr(0.0).run
while (got !== expected) {
got = count.continuous.once.runLastOr(0.0).run
Thread.sleep(10)
}
true
}
/*
* Feed a counter concurrently from two different threads, making sure
* the final count is the same as if we summed sequentially.
*/
property("concurrent-counters-integration-test") = forAll(Gen.nonEmptyListOf(Gen.choose(-10,10))) { ab =>
// this test takes about 45 seconds
val (a,b) = ab.splitAt(ab.length / 2)
val M = Monitoring.instance(windowSize = 30.seconds)
val I = new Instruments(M)
import I._
val aN = counter("a")
val bN = counter("b")
val abN = counter("ab")
val latest = Monitoring.snapshot(M)
Nondeterminism[Task].both(
Task { a.foreach { a => aN.incrementBy(a); abN.incrementBy(a) } },
Task { b.foreach { b => bN.incrementBy(b); abN.incrementBy(b) } }
).run
val expectedA: Double = a.map(_.toDouble).sum
val expectedB: Double = b.map(_.toDouble).sum
val expectedAB: Double = ab.map(_.toDouble).sum
@annotation.tailrec
def go(): Unit = {
val gotA: Double = M.latest(aN.keys.now).run
val gotB: Double = M.latest(bN.keys.now).run
val gotAB: Double = M.latest(abN.keys.now).run
if ((gotA !== expectedA) || (gotB !== expectedB) || (gotAB !== expectedAB)) {
// println("sleeping")
// println(s"a: $gotA, b: $gotB, ab: $gotAB")
Thread.sleep(10)
go()
}
}
go()
val t0 = System.currentTimeMillis
val m = latest.run
val millis = System.currentTimeMillis - t0
// println(s"snapshot took: $millis")
(m(aN.keys.now).value.asInstanceOf[Double] === expectedA) &&
(m(bN.keys.now).value.asInstanceOf[Double] === expectedB) &&
(m(abN.keys.now).value.asInstanceOf[Double] === expectedAB)
}
// This takes too long to run.
// Commenting out for now. -- Runar
/* property("derived-metrics") = forAll(Gen.nonEmptyListOf(Gen.choose(-10,10))) { ls0 =>
val ls = ls0.take(50)
implicit val M = Monitoring.instance
val I = new Instruments(5.minutes, M); import I._
val a = counter("a")
val b = counter("b")
val ab = Metric.apply2(a.key, b.key)(_ + _)
val kab1 = ab.publishEvery(30.milliseconds)("sum:ab-1", Units.Count)
val kab2 = ab.publishOnChange(a.key)("sum:ab-2", Units.Count)
val kab3 = ab.publishOnChanges(a.key, b.key)("sum:ab-3", Units.Count)
Strategy.Executor(Monitoring.defaultPool) {
ls.foreach(a.incrementBy)
}
Strategy.Executor(Monitoring.defaultPool) {
ls.foreach(b.incrementBy)
}
val expected = ls.map(_.toDouble).sum * 2
def go(rounds: Int): Prop = {
Thread.sleep(30)
val ab1r = M.latest(kab1).run
val ab2r = M.latest(kab2).run
val ab3r = M.latest(kab3).run
// since ab2r is only refreshed when `a` changes, we
// artifically refresh `a`, otherwise this test would
// have a race condition if `a` completed before `b`
if (ab2r != expected) a.incrementBy(0)
// println((ab1r, ab2r, ab3r))
(ab1r === ab2r) && (ab2r === ab3r) && (ab3r === expected) || {
if (rounds == 0) "results: " + (ab1r, ab2r, ab3r).toString |: false
else go(rounds - 1)
}
}
go(15)
}*/
// Commenting out since I don't know what this is testing
// and it doesn't seem to work. -- Runar
/*property("aggregate") = secure {
List(List(), List(1), List(-1,1), List.range(0,100)).forall { xs =>
val M = Monitoring.instance
val I = new Instruments(5.minutes, M)
val counters = xs.zipWithIndex.map { case (x,i) =>
val c = I.counter(s"count/$i")
c.incrementBy(x)
c
}
val family = Key[Double]("now/count", Units.Count)
val out = Key[Double]("sum", Units.Count)
M.aggregate(family, out)(Events.takeEvery(15.milliseconds, 50))(_.sum).run
Thread.sleep(1000)
// println("xs: " + xs)
val l = M.latest(out).run
val r = xs.map(_.toDouble).sum
l === r || { println((l, r)); false }
}
}*/
property("TrafficLight.quorum") = secure {
import TrafficLight._
quorum(1)(List()) == Red &&
quorum(1)(List(Red)) == Red &&
quorum(1)(List(Green)) == Green &&
quorum(1)(List(Amber)) == Green &&
quorum(1)(List(Red,Green)) == Amber &&
quorum(1)(List(Amber,Green)) == Green &&
quorum(1)(List(Green,Green)) == Green &&
quorum(2)(List(Green,Green,Green)) == Green &&
quorum(2)(List(Green,Amber,Green)) == Green &&
quorum(2)(List(Red,Amber,Green)) == Amber &&
quorum(2)(List(Red,Red,Green)) == Red
}
property("TrafficLight.fraction") = secure {
import TrafficLight._
fraction(.5)(List()) == Green &&
fraction(.5)(List(Red)) == Red &&
fraction(.5)(List(Green)) == Green &&
fraction(.5)(List(Amber)) == Green &&
fraction(.5)(List(Red,Green)) == Amber &&
fraction(.5)(List(Amber,Green)) == Green &&
fraction(.5)(List(Green,Green)) == Green &&
fraction(.67)(List(Green,Green,Green)) == Green &&
fraction(.67)(List(Green,Amber,Green)) == Green && true
fraction(.66)(List(Red,Amber,Green)) == Amber &&
fraction(.68)(List(Red,Amber,Green)) == Red &&
fraction(.66)(List(Red,Red,Green)) == Red &&
fraction(.5)(List(Red,Red,Green,Green)) == Amber
}
/* Simple sanity check of LapTimer. */
property("LapTimer.basic") = secure {
def go: Boolean = {
import instruments._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent._
import scala.language.postfixOps
val label = "laptimer"
val t = timer(label)
val c = counter(label)
val lt = new LapTimer(t, c)
lt.record(50 milliseconds)
lt.recordNanos(50000)
val stop = lt.start
Thread.sleep(50)
lt.stop(stop)
lt.time { Thread.sleep(50) }
lt.timeFuture(Future { Thread.sleep(50); None })
lt.timeTask(Task { Thread.sleep(50); None }).run
// Make sure we wait for the time buffer to catch up
Thread.sleep(instruments.bufferTime.toMillis * 2)
val m = Monitoring.default
val r1 = m.latest(t.keys.now).run.mean
val r2 = m.latest((c.keys.now)).run
r1 > 0 && (r1 - 50).abs < 1000 && r2 == 6
}
go || go || go
}
/* Make sure timers allow concurrent updates. */
property("LapTimer.concurrent") = secure {
def go: Boolean = {
import instruments._
val label = "laptimer"
val t = timer(label)
val c = counter(label)
val lt = new LapTimer(t, c)
val N = 100000
val S = scalaz.concurrent.Strategy.DefaultStrategy
val t0 = System.nanoTime
val d1 = (1.milliseconds); val d2 = (3.milliseconds)
val f1 = S { (0 until N).foreach { _ =>
lt.record(d1)
}}
val f2 = S { (0 until N).foreach { _ =>
lt.record(d2)
}}
f1(); f2()
val updateTime = Duration.fromNanos(System.nanoTime - t0) / N.toDouble
Thread.sleep(200)
// average time should be 2 millis
val m = Monitoring.default
val r1 = m.latest(t.keys.now).run.mean
val r2 = m.latest((c.keys.now)).run
r1 === 2.0 && updateTime.toNanos < 1000 && r2 == N*2
}
go || go || go
}
}
| neigor/funnel | core/src/test/scala/MonitoringSpec.scala | Scala | apache-2.0 | 19,975 |
package enigma
// primary difference here is that `Reflector` has no notch, and is always configured
// with symetric wiring tables. i.e. A -> Z, Z -> A. Reflectors also do not have
// configurable rings; they were fixed and came in preset variants: A, B and C
// Known as Umkehrwalze in german.
case class Reflector(wiring: String){
val mapping: Map[Char,Char] =
wiring.zipWithIndex.map { case (c,i) => Alphabet.ordered(i) -> c }.toMap
def transform(c: Char): Char =
mapping(c) // throws an exception if invalid character.
}
object Reflectors {
val A = Reflector("EJMZALYXVBWFCRQUONTSPIKHGD")
val B = Reflector("YRUHQSLDPXNGOKMIEBFZCWVJAT")
val C = Reflector("FVPJIAOYEDRZXWGCTKUQSBNMHL")
}
| timperrett/enigma | src/main/scala/enigma/Reflector.scala | Scala | apache-2.0 | 713 |
package coursier.benchmark
import java.util.concurrent.TimeUnit
import coursier.Resolve
import org.openjdk.jmh.annotations._
import scala.concurrent.Await
import scala.concurrent.duration.Duration
@BenchmarkMode(Array(Mode.AverageTime))
@OutputTimeUnit(TimeUnit.MILLISECONDS)
class ResolutionTests {
@Benchmark
def sparkSql(state: TestState): Unit = {
val t = Resolve.runProcess(state.initialSparkSqlRes, state.fetch)
Await.result(t.future()(state.ec), Duration.Inf)
}
@Benchmark
def coursierCli(state: TestState): Unit = {
val t = Resolve.runProcess(state.initialCoursierCliRes, state.fetch)
Await.result(t.future()(state.ec), Duration.Inf)
}
}
| alexarchambault/coursier | modules/benchmark/src/main/scala/coursier/benchmark/ResolutionTests.scala | Scala | apache-2.0 | 681 |
package scodec.protocols.mpeg
import scodec.bits._
import scodec.Codec
import scodec.codecs._
import shapeless._
sealed abstract class PesScramblingControl
object PesScramblingControl {
object NotScrambled extends PesScramblingControl
object UserDefined1 extends PesScramblingControl
object UserDefined2 extends PesScramblingControl
object UserDefined3 extends PesScramblingControl
implicit val codec: Codec[PesScramblingControl] = mappedEnum(bits(2),
NotScrambled -> bin"00",
UserDefined1 -> bin"01",
UserDefined2 -> bin"10",
UserDefined3 -> bin"11")
}
case class PesPacketHeader(
pesScramblingControl: PesScramblingControl,
pesPriority: Boolean,
dataAlignmentIndicator: Boolean,
copyright: Boolean,
originalOrCopy: Boolean,
flags: PesPacketHeader.Flags, // TODO
pts: Option[Long],
dts: Option[Long],
escr: Option[Long],
esRate: Option[Int],
dsmTrickMode: Option[BitVector],
additionalCopyInfo: Option[BitVector],
pesCrc: Option[Int],
extension: Option[PesPacketHeader.Extension]
)
object PesPacketHeader {
case class Flags(
ptsFlag: Boolean,
dtsFlag: Boolean,
escrFlag: Boolean,
esRateFlag: Boolean,
dsmTrickModeFlag: Boolean,
additionalCopyInfoFlag: Boolean,
pesCrcFlag: Boolean,
pesExtensionFlag: Boolean
)
object Flags {
implicit val codec: Codec[Flags] = {
("pts_dts_flags[0]" | bool ) ::
("pts_dts_flags[1]" | bool ) ::
("escr_flag" | bool ) ::
("es_rate_flag" | bool ) ::
("dsm_trick_mode_flag" | bool ) ::
("additional_copy_info_flag" | bool ) ::
("pes_crc_flag" | bool ) ::
("pes_extension_flag" | bool )
}.as[Flags]
}
case class ExtensionFlags(
pesPrivateDataFlag: Boolean,
packHeaderFieldFlag: Boolean,
programPacketSequenceCounterFlag: Boolean,
pstdBufferFlag: Boolean,
pesExtensionFlag2: Boolean
)
object ExtensionFlags {
implicit val codec: Codec[ExtensionFlags] = {
("pes_private_data_flag" | bool ) ::
("pack_header_field_flag" | bool ) ::
("program_packet_sequence_counter_flag" | bool ) ::
("P-STD_buffer_flag" | bool ) ::
reserved(3) ::
("pes_extension_flag_2" | bool )
}.dropUnits.as[ExtensionFlags]
}
case class ProgramPacketSequenceCounter(counter: Int, mpeg1: Boolean, originalStuffLength: Int)
object ProgramPacketSequenceCounter {
implicit val codec: Codec[ProgramPacketSequenceCounter] = {
(marker :: uint(7) :: marker :: bool :: uint(6)).dropUnits.as[ProgramPacketSequenceCounter]
}
}
case class PStdBuffer(scale: Boolean, size: Int)
object PStdBuffer {
implicit val codec: Codec[PStdBuffer] = {
(constant(bin"01") :~>: bool :: uint(13)).as[PStdBuffer]
}
}
case class Extension(
flags: ExtensionFlags, // TODO
pesPrivateData: Option[BitVector],
packHeaderField: Option[BitVector],
programPacketSequenceCounter: Option[ProgramPacketSequenceCounter],
pstdBuffer: Option[PStdBuffer],
extension: Option[BitVector]
)
object Extension {
implicit val codec: Codec[Extension] = {
Codec[ExtensionFlags] >>:~ { flags =>
("pes_private_data" | conditional(flags.pesPrivateDataFlag, bits(128))) ::
("pack_header_field" | conditional(flags.packHeaderFieldFlag, variableSizeBytes(uint8, bits))) ::
("program_packet_sequence_counter" | conditional(flags.programPacketSequenceCounterFlag, Codec[ProgramPacketSequenceCounter])) ::
("P-STD_buffer" | conditional(flags.pstdBufferFlag, Codec[PStdBuffer])) ::
("pes_extension_2" | conditional(flags.pesExtensionFlag2, marker ~> variableSizeBytes(uint(7), bits)))
}
}.as[Extension]
}
private val marker: Codec[Unit] = constantLenient(bin"1")
private def tsCodec(prefix: BitVector) = {
(constant(prefix) :: bits(3) :: marker :: bits(15) :: marker :: bits(15) :: marker).dropUnits.xmap[Long](
{ case a :: b :: c :: HNil => (a ++ b ++ c).toLong() },
l => {
val b = BitVector.fromLong(l).drop(31)
b.take(3) :: b.drop(3).take(15) :: b.drop(18) :: HNil
}
)
}
private val escrCodec: Codec[Long] = {
(ignore(2) :: bits(3) :: marker :: bits(15) :: marker :: bits(15) :: marker :: uint(9) :: marker).dropUnits.xmap[Long](
{ case a :: b :: c :: ext :: HNil =>
val base = (a ++ b ++ c).toLong()
base * 300 + ext
},
l => {
val base = (l / 300) % (2L << 32)
val b = BitVector.fromLong(base).drop(31)
val ext = (l % 300).toInt
b.take(3) :: b.drop(3).take(15) :: b.drop(18) :: ext :: HNil
}
)
}
implicit val codec: Codec[PesPacketHeader] = {
constant(bin"10") :~>:
("pes_scrambling_control" | Codec[PesScramblingControl] ) ::
("pes_priority" | bool ) ::
("data_alignment_indicator" | bool ) ::
("copyright" | bool ) ::
("original_or_copy" | bool ) ::
(("flags" | Codec[Flags] ) >>:~ { flags =>
variableSizeBytes(uint8,
("pts" | conditional(flags.ptsFlag, tsCodec(bin"0011")) ) ::
("dts" | conditional(flags.dtsFlag, tsCodec(bin"0001")) ) ::
("escr" | conditional(flags.escrFlag, escrCodec) ) ::
("es_rate" | conditional(flags.esRateFlag, ignore(1) ~> uint(22) <~ ignore(1))) ::
("dsm_trick_mode" | conditional(flags.dsmTrickModeFlag, bits(8)) ) ::
("additional_copy_info" | conditional(flags.additionalCopyInfoFlag, ignore(1) ~> bits(7)) ) ::
("pes_crc" | conditional(flags.pesCrcFlag, uint16) ) ::
("extension" | conditional(flags.pesExtensionFlag, Codec[Extension]) )
)
}) // .removeElem[Flags](Generic[Flags].from(optionalFields.map(_.isDefined)))
}.withContext("pes_packet_header").as[PesPacketHeader]
}
| jrudnick/scodec-protocols | src/main/scala/scodec/protocols/mpeg/PesPacketHeader.scala | Scala | bsd-3-clause | 6,925 |
package org.dele.text.lapa.patterns
import TLangPattern.LangPatternGroupFromPatterns
import org.dele.text.maen.AtomPropMatcherLib._
import org.dele.text.maen.TInput
import org.dele.text.maen.extracts.Extract
import org.dele.text.maen.matchers.MatcherManager
import org.dele.text.maen.matchers.TMatcher._
import org.dele.text.maen.test.TestAtom._
import org.dele.text.lapa.TestHelper
import org.scalatest.ShouldMatchers
import org.scalatest.testng.TestNGSuite
import org.testng.annotations.Test
/**
* Created by jiaji on 2016-02-15.
*/
class AppliedPatternsTest extends TestNGSuite with ShouldMatchers {
import org.dele.text.lapa.TestHelper._
import org.dele.text.maen.test.TestInput._
val _extractDef =
"""
|{
| "extractDefSets": [
| {
| "domain": "-non-intrusive-online-attack",
| "extractDefs": [
| {
| "extractName": "_attacker",
| "matcherId": "ALL-ATTACKERS.OrgAttacker",
| "atomMatcherDef": "E(Organization)"
| },
| {
| "extractName": "_target",
| "matcherId": "ALL-TARGETS.OrgCmpTarget",
| "atomMatcherDef": "E(Company | Organization)"
| },
| {
| "extractName": "_indicator",
| "matcherId": "pattern1"
| }
| ]
| }
| ]
|}
""".stripMargin
val extractDefs = Extract.fromJson(_extractDef)
val cyberEventExtractDefSet = extractDefs.getExtractDefSet("-non-intrusive-online-attack")
implicit val smcLib = EmptySubMatchCheckerLib
import DomainStructure._
val LangEngZhs = Set("eng", "zhs")
val LangZhs = Set("zhs")
@Test
def testPatternSupportedLang = {
//val ptnId1 = engDomainMgr.getFullId(testPatternDomainId, testPatternId)
val ptnGrp = patternDomain.patternGroupJsonById(testPatternId).get
ptnGrp.languages shouldBe LangEngZhs
val patternsGroup = ptnGrp.asInstanceOf[LangPatternGroupFromPatterns].toPatternGroup
patternsGroup.patterns.forall(_.supportedLanguages == LangEngZhs) shouldBe true
val ptnGrp2 = patternDomain.patternGroupJsonById("[METHOD]-attack").get
val patternsGroup2 = ptnGrp2.asInstanceOf[LangPatternGroupFromPatterns].toPatternGroup
patternsGroup2.languages shouldBe LangEngZhs
patternsGroup2.patterns.forall(_.supportedLanguages == LangEngZhs) shouldBe true
val ptnGrp3 = patternDomain.patternGroupJsonById("ALL-ATTACKERS").get
val patternsGroup3 = ptnGrp3.asInstanceOf[LangPatternGroupFromPatterns].toPatternGroup
patternsGroup3.languages shouldBe LangEngZhs
patternsGroup3.patterns(0).supportedLanguages shouldBe LangEngZhs
patternsGroup3.patterns(1).supportedLanguages shouldBe LangZhs
}
val LineTypeLNGAndClause1 = List("lngChecker", "clause1")
val LineTypeLNG = List("lngChecker")
@Test
def testDefaultLineTypes = {
//val ptnId1 = engDomainMgr.getFullId(testPatternDomainId, testPatternId)
val ptnGrp2 = patternDomain.patternGroupJsonById("[METHOD]-attack").get
val patternsGroup2 = ptnGrp2.asInstanceOf[LangPatternGroupFromPatterns].toPatternGroup
patternsGroup2.defLineTypes shouldBe LineTypeLNGAndClause1
patternsGroup2.patterns.forall(_.defaultLineTypes == LineTypeLNGAndClause1) shouldBe true
val ptnGrp3 = patternDomain.patternGroupJsonById("ALL-ATTACKERS").get
val patternsGroup3 = ptnGrp3.asInstanceOf[LangPatternGroupFromPatterns].toPatternGroup
patternsGroup3.defLineTypes shouldBe LineTypeLNGAndClause1
patternsGroup3.patterns(0).defaultLineTypes shouldBe LineTypeLNG
patternsGroup3.patterns(1).defaultLineTypes shouldBe LineTypeLNGAndClause1
}
import LangPatternGroupTemplate._
import Extract._
@Test
def t1 = {
engAppliedPatterns.lang shouldBe("eng")
val domainPatternId = engDomainMgr.getFullId(testPatternDomainId, testPatternId)
val mm = MatcherManager.create
engAppliedPatterns.applyDomain(engDomainMgr, TMatcherGen.NoMatcherTemplateLibGen, patternDomain, EmptyPatternTemplateLib, EmptyRegexDict).foreach(mm.add)
//val domain = patternDomain.id
//val orgList = d(domain, "org-list")
//val orgCmpList = d(domain, "org-cmp-list")
//val countryList = d(domain, "country-list")
//val attackWordList = d(domain, "attack-words")
//val againstWordList = d(domain, "against-words")
//val launchWordList = d(domain, "launch-words")
//engAppliedPatterns.applyPatternGroup(patternGroup2).foreach(mm.add)
//engAppliedPatterns.applyPatternGroup(patternGroup3).foreach(mm.add)
//engAppliedPatterns.applyPatternGroup(patternGroup4).foreach(mm.add)
//mm.add(fromAtomMatcher(E(Array("Organization")), Option(orgList)))
//mm.add(fromAtomMatcher(E(Array("Company", "Organization")), Option(orgCmpList)))
//mm.add(fromAtomMatcher(E(Array("Country")), Option(countryList)))
engDomainMgr.listMatchers(TMatcherGen.NoMatcherTemplateLibGen, EmptyRegexDict).foreach(mm.add)
//mm.add(fromAtomMatcher(F("attack"), Option(attackWordList)))
//mm.add(fromAtomMatcher(F("against"), Option(againstWordList)))
//mm.add(fromAtomMatcher(F("launch"), Option(launchWordList)))
val resultPool = mm.m(engInput1, EmptySubMatchCheckerLib, MatcherManager.EmptyMIdFilters)
val events = resultPool.query(domainPatternId)
events.foreach(
m => {
val ex = cyberEventExtractDefSet.run(m, EmptyRelatedEntityCheckerIds)
println(ex.mkString(" "))
}
)
val zhsmm = MatcherManager.create
zhsAppliedPatterns.applyDomain(zhsDomainMgr, TMatcherGen.NoMatcherTemplateLibGen, patternDomain, EmptyPatternTemplateLib, EmptyRegexDict).foreach(zhsmm.add)
//zhsAppliedPatterns.applyPatternGroup(patternGroup2).foreach(zhsmm.add)
//zhsAppliedPatterns.applyPatternGroup(patternGroup3).foreach(zhsmm.add)
//zhsAppliedPatterns.applyPatternGroup(patternGroup4).foreach(zhsmm.add)
//zhsmm.add(fromAtomMatcher(E(Array("Organization")), Option(orgList)))
//zhsmm.add(fromAtomMatcher(E(Array("Company", "Organization")), Option(orgCmpList)))
//zhsmm.add(fromAtomMatcher(E(Array("Country")), Option(countryList)))
//zhsmm.add(fromAtomMatcher(F("攻击"), Option(attackWordList)))
//zhsmm.add(fromAtomMatcher(F("针对"), Option(againstWordList)))
//zhsmm.add(fromAtomMatcher(F("发起"), Option(launchWordList)))
zhsDomainMgr.listMatchers(TMatcherGen.NoMatcherTemplateLibGen, EmptyRegexDict).foreach(zhsmm.add)
var zhsResultPool = zhsmm.m(zhsInput1, EmptySubMatchCheckerLib, MatcherManager.EmptyMIdFilters)
var zhsEvents = zhsResultPool.query(domainPatternId)
zhsEvents.foreach(
m => {
val ex = cyberEventExtractDefSet.run(m, EmptyRelatedEntityCheckerIds)
println(ex.mkString(" "))
}
)
zhsResultPool = zhsmm.m(zhsInput2, EmptySubMatchCheckerLib, MatcherManager.EmptyMIdFilters)
zhsEvents = zhsResultPool.query(domainPatternId)
zhsEvents.foreach(
m => {
val ex = cyberEventExtractDefSet.run(m, EmptyRelatedEntityCheckerIds)
println(ex.mkString(" "))
}
)
}
}
| new2scala/text-util | lapa/src/test/scala/org/dele/text/lapa/patterns/AppliedPatternsTest.scala | Scala | apache-2.0 | 7,056 |
package com.signalcollect.bp
import com.signalcollect._
/**
* The vertex representing the factors.
* @param id
* @param initialState
*/
class LogFactorVertex(id: String,
initialState: Distribution,
utility: Distribution)
extends DataGraphVertex(id, initialState) {
type Signal = Distribution
def collect = signals.reduce(_ + _)
def signalMap = mostRecentSignalMap
}
class FactorVertex(id: String,
initialState: Distribution,
utility: Distribution)
extends DataGraphVertex(id, initialState) {
type Signal = Distribution
def collect = Distribution((signals.reduce(_ * _)).f * utility.f)
def signalMap = mostRecentSignalMap
}
| saramagliacane/signal-collect-mln | src/main/scala/com/signalcollect/bp/FactorVertex.scala | Scala | apache-2.0 | 732 |
/* sbt -- Simple Build Tool
* Copyright 2010 Mark Harrah
*/
package sbt
package inc
import xsbti.api.Source
import java.io.File
/**
* The merge/groupBy functionality requires understanding of the concepts of internalizing/externalizing dependencies:
*
* Say we have source files X, Y. And say we have some analysis A_X containing X as a source, and likewise for A_Y and Y.
* If X depends on Y then A_X contains an external dependency X -> Y.
*
* However if we merge A_X and A_Y into a combined analysis A_XY, then A_XY contains X and Y as sources, and therefore
* X -> Y must be converted to an internal dependency in A_XY. We refer to this as "internalizing" the dependency.
*
* The reverse transformation must occur if we group an analysis A_XY into A_X and A_Y, so that the dependency X->Y
* crosses the boundary. We refer to this as "externalizing" the dependency.
*
* These transformations are complicated by the fact that internal dependencies are expressed as source file -> source file,
* but external dependencies are expressed as source file -> fully-qualified class name.
*/
trait Analysis {
val stamps: Stamps
val apis: APIs
/** Mappings between sources, classes, and binaries. */
val relations: Relations
val infos: SourceInfos
/**
* Information about compiler runs accumulated since `clean` command has been run.
*
* The main use-case for using `compilations` field is to determine how
* many iterations it took to compilen give code. The `Compilation` object
* are also stored in `Source` objects so there's an indirect way to recover
* information about files being recompiled in every iteration.
*
* The incremental compilation algorithm doesn't use information stored in
* `compilations`. It's safe to prune contents of that field without breaking
* internal consistency of the entire Analysis object.
*/
val compilations: Compilations
/** Concatenates Analysis objects naively, i.e., doesn't internalize external deps on added files. See `Analysis.merge`. */
def ++(other: Analysis): Analysis
/** Drops all analysis information for `sources` naively, i.e., doesn't externalize internal deps on removed files. */
def --(sources: Iterable[File]): Analysis
def copy(stamps: Stamps = stamps, apis: APIs = apis, relations: Relations = relations, infos: SourceInfos = infos,
compilations: Compilations = compilations): Analysis
def addSource(src: File, api: Source, stamp: Stamp, directInternal: Iterable[File], inheritedInternal: Iterable[File], info: SourceInfo): Analysis
def addBinaryDep(src: File, dep: File, className: String, stamp: Stamp): Analysis
def addExternalDep(src: File, dep: String, api: Source, inherited: Boolean, fromMacro: Boolean): Analysis
def addProduct(src: File, product: File, stamp: Stamp, name: String): Analysis
/** Partitions this Analysis using the discriminator function. Externalizes internal deps that cross partitions. */
def groupBy[K](discriminator: (File => K)): Map[K, Analysis]
override lazy val toString = Analysis.summary(this)
}
object Analysis {
lazy val Empty: Analysis = new MAnalysis(Stamps.empty, APIs.empty, Relations.empty, SourceInfos.empty, Compilations.empty)
private[sbt] def empty(nameHashing: Boolean): Analysis = new MAnalysis(Stamps.empty, APIs.empty,
Relations.empty(nameHashing = nameHashing), SourceInfos.empty, Compilations.empty)
/** Merge multiple analysis objects into one. Deps will be internalized as needed. */
def merge(analyses: Traversable[Analysis]): Analysis = {
if (analyses.exists(_.relations.nameHashing))
throw new IllegalArgumentException("Merging of Analyses that have" +
"`relations.memberRefAndInheritanceDeps` set to `true` is not supported.")
// Merge the Relations, internalizing deps as needed.
val mergedSrcProd = Relation.merge(analyses map { _.relations.srcProd })
val mergedBinaryDep = Relation.merge(analyses map { _.relations.binaryDep })
val mergedClasses = Relation.merge(analyses map { _.relations.classes })
val stillInternal = Relation.merge(analyses map { _.relations.direct.internal })
val (internalized, stillExternal) = Relation.merge(analyses map { _.relations.direct.external }) partition { case (a, b) => mergedClasses._2s.contains(b) }
val internalizedFiles = Relation.reconstruct(internalized.forwardMap mapValues { _ flatMap mergedClasses.reverse })
val mergedInternal = stillInternal ++ internalizedFiles
val stillInternalPI = Relation.merge(analyses map { _.relations.publicInherited.internal })
val (internalizedPI, stillExternalPI) = Relation.merge(analyses map { _.relations.publicInherited.external }) partition { case (a, b) => mergedClasses._2s.contains(b) }
val internalizedFilesPI = Relation.reconstruct(internalizedPI.forwardMap mapValues { _ flatMap mergedClasses.reverse })
val mergedInternalPI = stillInternalPI ++ internalizedFilesPI
val mergedRelations = Relations.make(
mergedSrcProd,
mergedBinaryDep,
Relations.makeSource(mergedInternal, stillExternal),
Relations.makeSource(mergedInternalPI, stillExternalPI),
mergedClasses
)
// Merge the APIs, internalizing APIs for targets of dependencies we internalized above.
val concatenatedAPIs = (APIs.empty /: (analyses map { _.apis }))(_ ++ _)
val stillInternalAPIs = concatenatedAPIs.internal
val (internalizedAPIs, stillExternalAPIs) = concatenatedAPIs.external partition { x: (String, Source) => internalized._2s.contains(x._1) }
val internalizedFilesAPIs = internalizedAPIs flatMap {
case (cls: String, source: Source) => mergedRelations.definesClass(cls) map { file: File => (file, concatenatedAPIs.internalAPI(file)) }
}
val mergedAPIs = APIs(stillInternalAPIs ++ internalizedFilesAPIs, stillExternalAPIs)
val mergedStamps = Stamps.merge(analyses map { _.stamps })
val mergedInfos = SourceInfos.merge(analyses map { _.infos })
val mergedCompilations = Compilations.merge(analyses map { _.compilations })
new MAnalysis(mergedStamps, mergedAPIs, mergedRelations, mergedInfos, mergedCompilations)
}
def summary(a: Analysis): String =
{
val (j, s) = a.apis.allInternalSources.partition(_.getName.endsWith(".java"))
val c = a.stamps.allProducts
val ext = a.apis.allExternals
val jars = a.relations.allBinaryDeps.filter(_.getName.endsWith(".jar"))
val unreportedCount = a.infos.allInfos.values.map(_.unreportedProblems.size).sum
val sections =
counted("Scala source", "", "s", s.size) ++
counted("Java source", "", "s", j.size) ++
counted("class", "", "es", c.size) ++
counted("external source dependenc", "y", "ies", ext.size) ++
counted("binary dependenc", "y", "ies", jars.size) ++
counted("unreported warning", "", "s", unreportedCount)
sections.mkString("Analysis: ", ", ", "")
}
def counted(prefix: String, single: String, plural: String, count: Int): Option[String] =
count match {
case 0 => None
case 1 => Some("1 " + prefix + single)
case x => Some(x.toString + " " + prefix + plural)
}
}
private class MAnalysis(val stamps: Stamps, val apis: APIs, val relations: Relations, val infos: SourceInfos, val compilations: Compilations) extends Analysis {
def ++(o: Analysis): Analysis = new MAnalysis(stamps ++ o.stamps, apis ++ o.apis, relations ++ o.relations,
infos ++ o.infos, compilations ++ o.compilations)
def --(sources: Iterable[File]): Analysis =
{
val newRelations = relations -- sources
def keep[T](f: (Relations, T) => Set[_]): T => Boolean = !f(newRelations, _).isEmpty
val newAPIs = apis.removeInternal(sources).filterExt(keep(_ usesExternal _))
val newStamps = stamps.filter(keep(_ produced _), sources, keep(_ usesBinary _))
val newInfos = infos -- sources
new MAnalysis(newStamps, newAPIs, newRelations, newInfos, compilations)
}
def copy(stamps: Stamps, apis: APIs, relations: Relations, infos: SourceInfos, compilations: Compilations = compilations): Analysis =
new MAnalysis(stamps, apis, relations, infos, compilations)
def addSource(src: File, api: Source, stamp: Stamp, directInternal: Iterable[File], inheritedInternal: Iterable[File], info: SourceInfo): Analysis =
copy(stamps.markInternalSource(src, stamp), apis.markInternalSource(src, api), relations.addInternalSrcDeps(src, directInternal, inheritedInternal), infos.add(src, info))
def addBinaryDep(src: File, dep: File, className: String, stamp: Stamp): Analysis =
copy(stamps.markBinary(dep, className, stamp), apis, relations.addBinaryDep(src, dep), infos)
def addExternalDep(src: File, dep: String, depAPI: Source, inherited: Boolean, fromMacro: Boolean): Analysis =
copy(stamps, apis.markExternalAPI(dep, depAPI), relations.addExternalDep(src, dep, inherited, fromMacro), infos)
def addProduct(src: File, product: File, stamp: Stamp, name: String): Analysis =
copy(stamps.markProduct(product, stamp), apis, relations.addProduct(src, product, name), infos)
def groupBy[K](discriminator: File => K): Map[K, Analysis] = {
if (relations.nameHashing)
throw new UnsupportedOperationException("Grouping of Analyses that have" +
"`relations.memberRefAndInheritanceDeps` set to `true` is not supported.")
def discriminator1(x: (File, _)) = discriminator(x._1) // Apply the discriminator to the first coordinate.
val kSrcProd = relations.srcProd.groupBy(discriminator1)
val kBinaryDep = relations.binaryDep.groupBy(discriminator1)
val kClasses = relations.classes.groupBy(discriminator1)
val kSourceInfos = infos.allInfos.groupBy(discriminator1)
val (kStillInternal, kExternalized) = relations.direct.internal partition { case (a, b) => discriminator(a) == discriminator(b) } match {
case (i, e) => (i.groupBy(discriminator1), e.groupBy(discriminator1))
}
val kStillExternal = relations.direct.external.groupBy(discriminator1)
// Find all possible groups.
val allMaps = kSrcProd :: kBinaryDep :: kStillInternal :: kExternalized :: kStillExternal :: kClasses :: kSourceInfos :: Nil
val allKeys: Set[K] = (Set.empty[K] /: (allMaps map { _.keySet }))(_ ++ _)
// Map from file to a single representative class defined in that file.
// This is correct (for now): currently all classes in an external dep share the same Source object,
// and a change to any of them will act like a change to all of them.
// We don't use all the top-level classes in source.api.definitions, even though that's more intuitively
// correct, because this can cause huge bloat of the analysis file.
def getRepresentativeClass(file: File): Option[String] = apis.internalAPI(file).api.definitions.headOption map { _.name }
// Create an Analysis for each group.
(for (k <- allKeys) yield {
def getFrom[A, B](m: Map[K, Relation[A, B]]): Relation[A, B] = m.getOrElse(k, Relation.empty)
// Products and binary deps.
val srcProd = getFrom(kSrcProd)
val binaryDep = getFrom(kBinaryDep)
// Direct Sources.
val stillInternal = getFrom(kStillInternal)
val stillExternal = getFrom(kStillExternal)
val externalized = getFrom(kExternalized)
val externalizedClasses = Relation.reconstruct(externalized.forwardMap mapValues { _ flatMap getRepresentativeClass })
val newExternal = stillExternal ++ externalizedClasses
// Public inherited sources.
val stillInternalPI = stillInternal filter relations.publicInherited.internal.contains
val stillExternalPI = stillExternal filter relations.publicInherited.external.contains
val externalizedPI = externalized filter relations.publicInherited.internal.contains
val externalizedClassesPI = Relation.reconstruct(externalizedPI.forwardMap mapValues { _ flatMap getRepresentativeClass })
val newExternalPI = stillExternalPI ++ externalizedClassesPI
// Class names.
val classes = getFrom(kClasses)
// Create new relations for this group.
val newRelations = Relations.make(
srcProd,
binaryDep,
Relations.makeSource(stillInternal, newExternal),
Relations.makeSource(stillInternalPI, newExternalPI),
classes
)
// Compute new API mappings.
def apisFor[T](m: Map[T, Source], x: Traversable[T]): Map[T, Source] =
(x map { e: T => (e, m.get(e)) } collect { case (t, Some(source)) => (t, source) }).toMap
val stillInternalAPIs = apisFor(apis.internal, srcProd._1s)
val stillExternalAPIs = apisFor(apis.external, stillExternal._2s)
val externalizedAPIs = apisFor(apis.internal, externalized._2s)
val externalizedClassesAPIs = externalizedAPIs flatMap {
case (file: File, source: Source) => getRepresentativeClass(file) map { cls: String => (cls, source) }
}
val newAPIs = APIs(stillInternalAPIs, stillExternalAPIs ++ externalizedClassesAPIs)
// New stamps.
val newStamps = Stamps(
stamps.products.filterKeys(srcProd._2s.contains),
stamps.sources.filterKeys({ discriminator(_) == k }),
stamps.binaries.filterKeys(binaryDep._2s.contains),
stamps.classNames.filterKeys(binaryDep._2s.contains))
// New infos.
val newSourceInfos = SourceInfos.make(kSourceInfos.getOrElse(k, Map.empty))
(k, new MAnalysis(newStamps, newAPIs, newRelations, newSourceInfos, compilations))
}).toMap
}
override def equals(other: Any) = other match {
// Note: Equality doesn't consider source infos or compilations.
case o: MAnalysis => stamps == o.stamps && apis == o.apis && relations == o.relations
case _ => false
}
override lazy val hashCode = (stamps :: apis :: relations :: Nil).hashCode
}
| xeno-by/old-scalameta-sbt | compile/inc/src/main/scala/sbt/inc/Analysis.scala | Scala | bsd-3-clause | 13,872 |
package com.sksamuel.elastic4s.http.search.queries.geo
import com.sksamuel.elastic4s.DistanceUnit
import com.sksamuel.elastic4s.searches.GeoPoint
import com.sksamuel.elastic4s.searches.queries.geo.Shapes.{Circle, Polygon}
import com.sksamuel.elastic4s.searches.queries.geo._
import org.scalatest.{FunSuite, GivenWhenThen, Matchers}
class GeoShapeQueryBodyFnTest extends FunSuite with Matchers with GivenWhenThen {
test("Should correctly build geo shape point search query") {
Given("Some point query")
val query = GeoShapeQuery(
"location",
InlineShape(
PointShape(GeoPoint(-77.03653, 38.897676))
)
)
When("Geo shape query is built")
val queryBody = GeoShapeQueryBodyFn(query)
Then("query should have right field and coordinate")
queryBody.string() shouldEqual pointQuery
}
test("Should correctly build geo shape envelope query") {
Given("Some envelope query")
val query = GeoShapeQuery(
"location",
InlineShape(
EnvelopeShape(
upperLeft = GeoPoint(-45.0, 45.0),
lowerRight = GeoPoint(45.0, -45.0)
)
)
)
When("Geo shape query is built")
val queryBody = GeoShapeQueryBodyFn(query)
Then("query should have right field and coordinates")
queryBody.string() shouldEqual envelopeQuery
}
test("Should correctly build geo shape multipoint query") {
Given("Some multipoint query")
val query = GeoShapeQuery(
"location",
InlineShape(
MultiPointShape(Seq(GeoPoint(102.0,2.0),GeoPoint(102.0,3.0)))
)
)
When("Geo shape query is built")
val queryBody = GeoShapeQueryBodyFn(query)
Then("query should have right field and coordinates")
queryBody.string() shouldEqual multiPointQuery
}
test("Should correctly build geo shape linestring query") {
Given("Some linestring query")
val query = GeoShapeQuery(
"location",
InlineShape(
LineStringShape(GeoPoint(-77.03653, 38.897676),GeoPoint(-77.009051, 38.889939))
)
)
When("Geo shape query is built")
val queryBody = GeoShapeQueryBodyFn(query)
Then("query should have right field and coordinates")
queryBody.string() shouldEqual lineStringQuery
}
test("Should correctly build geo shape multilinestring query") {
Given("Some multi linestring query")
val query = GeoShapeQuery(
"location",
InlineShape(
MultiLineStringShape(Seq(
Seq(GeoPoint(102.0, 2.0), GeoPoint(103.0, 2.0), GeoPoint(103.0, 3.0), GeoPoint(102.0, 3.0)),
Seq(GeoPoint(100.0, 0.0), GeoPoint(101.0, 0.0), GeoPoint(101.0, 1.0), GeoPoint(100.0, 1.0)),
Seq(GeoPoint(100.2, 0.2), GeoPoint(100.8, 0.2), GeoPoint(100.8, 0.8), GeoPoint(100.2, 0.8))
))
)
)
When("Geo shape query is built")
val queryBody = GeoShapeQueryBodyFn(query)
Then("query should have right field and coordinates")
queryBody.string() shouldEqual multiLineStringQuery
}
test("Should correctly build geo shape circle search query") {
Given("Some circle query")
val query = GeoShapeQuery(
"location",
InlineShape(
CircleShape(Circle(GeoPoint(23.23,100.23),(100.0,DistanceUnit.Meters)))
)
)
When("Geo shape query is built")
val queryBody = GeoShapeQueryBodyFn(query)
Then("query should have right field, coordinates and radius")
queryBody.string() shouldEqual circleQuery
}
test("Should correctly build geo shape geometry collection search query") {
Given("Some collection shape query")
val query = GeoShapeQuery(
"location",
InlineShape(
GeometryCollectionShape(
Seq(
CircleShape(Circle(GeoPoint(23.23,100.23),(100.0,DistanceUnit.Meters))),
PointShape(GeoPoint(23.23,100.23))
)
)
)
)
When("Geo shape query is built")
val queryBody = GeoShapeQueryBodyFn(query)
Then("query should have all shapes in collection specified")
queryBody.string() shouldEqual geometryCollectionQuery
}
test("Should correctly build geo shape polygon search query") {
Given("Some polygon shape query")
val query = GeoShapeQuery(
"location",
InlineShape(
PolygonShape(Polygon(
points = Seq(
GeoPoint(100.0, 0.0),
GeoPoint(101.0, 0.0),
GeoPoint(101.0, 1.0),
GeoPoint(100.0, 1.0),
GeoPoint(100.0, 0.0)
),
holes = Some(
Seq(
GeoPoint(100.2, 0.2),
GeoPoint(100.8, 0.2),
GeoPoint(100.8, 0.8),
GeoPoint(100.2, 0.8),
GeoPoint(100.2, 0.2)
)
))
)
)
)
When("Geo shape query is built")
val queryBody = GeoShapeQueryBodyFn(query)
Then("query should have right field and coordinates")
queryBody.string() shouldEqual polygonQuery
}
test("Should correctly build geo shape multipolygon search query") {
Given("Some multipolygon shape query")
val query = GeoShapeQuery(
"location",
InlineShape(
MultiPolygonShape(
Seq(
Polygon(
points = Seq(
GeoPoint(102.0, 2.0),
GeoPoint(103.0, 2.0),
GeoPoint(103.0, 3.0),
GeoPoint(102.0, 3.0),
GeoPoint(102.0, 2.0)
),
holes = None
),
Polygon(
points = Seq(
GeoPoint(100.0, 0.0),
GeoPoint(101.0, 0.0),
GeoPoint(101.0, 1.0),
GeoPoint(100.0, 1.0),
GeoPoint(100.0, 0.0)
),
holes = Some(
Seq(
GeoPoint(100.2, 0.2),
GeoPoint(100.8, 0.2),
GeoPoint(100.8, 0.8),
GeoPoint(100.2, 0.8),
GeoPoint(100.2, 0.2)
)
)
)
)
)
)
)
When("Geo shape query is built")
val queryBody = GeoShapeQueryBodyFn(query)
Then("query should have right field and coordinates")
queryBody.string() shouldEqual multiPolygonQuery
}
def polygonQuery =
"""
|{
| "geo_shape":{
| "location":{
| "shape":{
| "type":"polygon",
| "coordinates":[
| [[100.0,0.0],[101.0,0.0],[101.0,1.0],[100.0,1.0],[100.0,0.0]],
| [[100.2,0.2],[100.8,0.2],[100.8,0.8],[100.2,0.8],[100.2,0.2]]
| ]
| }
| }
| }
|}
""".stripMargin.replaceAllLiterally(" ", "").replace("\n", "")
def multiPolygonQuery =
"""
|{
| "geo_shape":{
| "location":{
| "shape":{
| "type":"multipolygon",
| "coordinates":[
| [ [[102.0,2.0],[103.0,2.0],[103.0,3.0],[102.0,3.0],[102.0,2.0]] ],
| [ [[100.0,0.0],[101.0,0.0],[101.0,1.0],[100.0,1.0],[100.0,0.0]],
| [[100.2,0.2],[100.8,0.2],[100.8,0.8],[100.2,0.8],[100.2,0.2]] ]
| ]
| }
| }
| }
|}
""".stripMargin.replaceAllLiterally(" ", "").replace("\n", "")
def pointQuery =
"""
|{
| "geo_shape":{
| "location":{
| "shape":{
| "type":"point",
| "coordinates":[-77.03653, 38.897676]
| }
| }
| }
|}
""".stripMargin.replaceAllLiterally(" ", "").replace("\n", "")
def envelopeQuery =
"""
|{
| "geo_shape":{
| "location":{
| "shape":{
| "type":"envelope",
| "coordinates":[ [-45.0,45.0],[45.0,-45.0] ]
| }
| }
| }
|}
""".stripMargin.replaceAllLiterally(" ", "").replace("\n", "")
def multiPointQuery =
"""
|{
| "geo_shape":{
| "location":{
| "shape":{
| "type":"multipoint",
| "coordinates":[ [102.0,2.0],[102.0,3.0] ]
| }
| }
| }
|}
""".stripMargin.replaceAllLiterally(" ", "").replace("\n", "")
def lineStringQuery =
"""
|{
| "geo_shape":{
| "location":{
| "shape":{
| "type":"linestring",
| "coordinates":[ [-77.03653,38.897676],[-77.009051,38.889939] ]
| }
| }
| }
|}
""".stripMargin.replaceAllLiterally(" ", "").replace("\n", "")
def multiLineStringQuery =
"""
|{
| "geo_shape":{
| "location":{
| "shape":{
| "type":"multilinestring",
| "coordinates":[
| [ [102.0,2.0],[103.0,2.0],[103.0,3.0],[102.0,3.0] ],
| [ [100.0,0.0],[101.0,0.0],[101.0,1.0],[100.0,1.0] ],
| [ [100.2,0.2],[100.8,0.2],[100.8,0.8],[100.2,0.8] ]
| ]
| }
| }
| }
|}
""".stripMargin.replaceAllLiterally(" ", "").replace("\n", "")
def circleQuery =
"""|
|{
| "geo_shape":{
| "location":{
| "shape":{
| "type":"circle",
| "coordinates":[23.23,100.23],
| "radius":"100.0m"
| }
| }
| }
|}
""".stripMargin.replaceAllLiterally(" ", "").replace("\n", "")
def geometryCollectionQuery =
"""
|{
| "geo_shape":{
| "location":{
| "shape":{
| "type":"geometrycollection",
| "geometries":[
| {
| "type":"circle",
| "coordinates":[23.23,100.23],
| "radius":"100.0m"
| },
| {
| "type":"point",
| "coordinates":[23.23,100.23]
| }
| ]
| }
| }
| }
|}
""".stripMargin.replaceAllLiterally(" ", "").replace("\n", "")
}
| Tecsisa/elastic4s | elastic4s-http/src/test/scala/com/sksamuel/elastic4s/http/search/queries/geo/GeoShapeQueryBodyFnTest.scala | Scala | apache-2.0 | 10,083 |
/**
* Created by yusuke on 2015/02/07.
*/
package skarn
import com.typesafe.config.ConfigFactory
object ProjectInfo {
val config = ConfigFactory.load("project").withFallback(ConfigFactory.load())
val name = config.getString("name")
val version = Version.apply(config.getString("version"))
val ip = {
val ip = config.getString("http.host")
if (ip.contains("/")) {
ip.replaceAll("""/\\d+""", "")
} else {
ip
}
}
val port = config.getInt("tcp.port")
val httpPort = config.getInt("http.port")
val path = s"akka.tcp://$name@$ip:$port"
}
case class Version(major: Int, minor: Int, suffix: Option[String]) {
override def toString = suffix match {
case Some(s) => s"$major.$minor-$s"
case None => s"$major.$minor"
}
}
object Version {
def apply(version: String) = {
parse(version).get
}
def parse(version: String): Option[Version] = {
val Development = """(\\d+)\\.(\\d+)-(.+)""".r
val Production = """(\\d+)\\.(\\d+)""".r
version match {
case Development(major, minor, suffix) =>
val v = new Version(major.toInt, minor.toInt, Some(suffix))
Some(v)
case Production(major, minor) =>
val v = new Version(major.toInt, minor.toInt, None)
Some(v)
case _ => None
}
}
}
| trifort/skarn | src/main/scala/skarn/ProjectInfo.scala | Scala | mit | 1,288 |
package com.github.cuzfrog.webdriver
import scala.language.implicitConversions
private[webdriver] sealed trait Message {
protected implicit def classNameToString(message: Message): String = message.getClass.getSimpleName
}
private[webdriver] sealed trait Request extends Message {
def execute(api: Api): Response
}
private[webdriver] case class RetrieveDriver(name: String, willCleanCache: Boolean) extends Request {
override def execute(api: Api): Response = {
api.retrieveDriver(name, willCleanCache).map(Ready[Driver])
.getOrElse(throw new NoSuchElementException(s"No driver[$name] on server."))
}
}
private[webdriver] case class RetrieveOrNewDriver(name: String, typ: DriverType, waitSec: Int, willCleanCache: Boolean) extends Request {
override def execute(api: Api): Response = {
val dr = api.retrieveOrNewDriver(name, typ, waitSec, willCleanCache)
Ready[Driver](dr)
}
}
private[webdriver] case class Kill(driver: Driver) extends Request {
override def execute(api: Api): Success = {
val eleCnt = api.kill(driver)
Success(s"Driver quit, $eleCnt elements cleaned.")
}
}
private[webdriver] case class CleanCache(driver: Driver) extends Request {
override def execute(api: Api): Success = {
val eleCnt = api.cleanCache(driver)
Success(s"$eleCnt elements cleaned.")
}
}
private[webdriver] case class Navigate(driver: Driver, url: String) extends Request {
override def execute(api: Api): Ready[Window] = {
val window = api.navigateTo(driver, url)
Ready(window)
}
}
private[webdriver] case class GetWindow(driver: Driver) extends Request {
override def execute(api: Api): Ready[Window] = Ready[Window](api.getWindow(driver))
}
private[webdriver] case class GetWindows(driver: Driver) extends Request {
override def execute(api: Api): Ready[Seq[Window]] = Ready[Seq[Window]](api.getWindows(driver))
}
private[webdriver] case class FindElement(webBody: WebBody, attr: String, value: String) extends Request {
override def execute(api: Api): Ready[Element] = Ready[Element](api.findElement(webBody, attr, value))
}
private[webdriver] case class FindElements(webBody: WebBody, attr: String, value: String) extends Request {
override def execute(api: Api): Ready[Seq[Element]] = Ready[Seq[Element]](api.findElements(webBody, attr, value))
}
private[webdriver] case class FindElementEx(webBody: WebBody, attrPairs: List[(String, String)]) extends Request {
override def execute(api: Api): Ready[Element] = Ready[Element](api.findElementEx(webBody, attrPairs))
}
private[webdriver] case class CheckElementExistence(webBody: WebBody, attr: String, value: String) extends Request {
override def execute(api: Api): Ready[Boolean] = Ready[Boolean](api.checkElementExistence(webBody, attr, value))
}
private[webdriver] case class ExecuteJS(webBody: WebBody, script: String, args: AnyRef*) extends Request {
override def execute(api: Api): Ready[Any] = Ready[Any](api.executeJS(webBody, script))
}
private[webdriver] case class SendKeys(element: Element, keys: String) extends Request {
override def execute(api: Api): Success = {
api.sendKeys(element, keys)
Success("Keys sent.")
}
}
private[webdriver] case class ClearText(element: Element) extends Request {
override def execute(api: Api): Success = {
api.clearText(element)
Success(this)
}
}
private[webdriver] case class Submit(element: Element) extends Request {
override def execute(api: Api): Success = {
api.submit(element)
Success(this)
}
}
private[webdriver] case class Click(element: Element) extends Request {
override def execute(api: Api): Success = {
api.click(element)
Success(this)
}
}
private[webdriver] case class GetAttr(element: Element, attr: String) extends Request {
override def execute(api: Api) = Success(api.getAttr(element, attr))
}
private[webdriver] case class GetText(element: Element) extends Request {
override def execute(api: Api) = Success(api.getText(element))
}
private[webdriver] case class CloseWindow(window: Window) extends Request {
override def execute(api: Api): Success = {
api.closeWindow(window)
Success(this)
}
}
private[webdriver] case object Shutdown extends Request {
override def execute(api: Api): Success = {
api.shutdown()
Success("Tell server to shutdown.")
}
}
private[webdriver] case class GetInnerHtml(element: Element, parseLogic: String) extends Request {
override def execute(api: Api): Response = Ready(api.getInnerHtml(element, parseLogic))
}
private[webdriver] sealed trait Response extends Message
private[webdriver] case class Failed(msg: String, request: Request) extends Response
private[webdriver] case class Success(msg: String) extends Response
private[webdriver] case class Ready[T](data: T) extends Response
| cuzfrog/WebDriverServ | shared/src/main/scala/com/github/cuzfrog/webdriver/Messages.scala | Scala | apache-2.0 | 4,807 |
package com.arcusys.learn.export.quiz
import java.io.{ File, FileInputStream }
import com.arcusys.learn.facades._
import com.arcusys.valamis.export.ImportProcessor
import com.arcusys.valamis.file.service.FileService
import com.arcusys.valamis.quiz.model.Quiz
import com.arcusys.valamis.quiz.service.QuizService
import com.arcusys.valamis.quiz.storage.QuizStorage
import com.arcusys.valamis.util.{ FileSystemUtil, StreamUtil }
import com.escalatesoft.subcut.inject.{ BindingModule, Injectable }
import org.joda.time.DateTime
import org.json4s.{ DefaultFormats, Formats }
//TODO need to reafctor
class QuizImportProcessor(implicit configuration: BindingModule) extends ImportProcessor[QuizExportResponse] with Injectable {
override implicit def bindingModule: BindingModule = configuration
private lazy val quizStorage = inject[QuizStorage]
private lazy val quizFacade = inject[QuizFacadeContract]
private lazy val questionFacade = inject[QuestionFacadeContract]
private lazy val categoryFacade = inject[CategoryFacadeContract]
private lazy val fileService = inject[FileService]
private lazy val quizService = inject[QuizService]
def importItems(items: List[QuizExportResponse], courseId: Long, tempDirectory: File, userId: Long): Unit = {
items.foreach(q => {
// new logo name for quiz logo file
val newLogo = if (q.logo.nonEmpty) q.logo.substring(Math.max(q.logo.indexOf("_"), 0)) else ""
val quizId = quizStorage.createAndGetID(Quiz(-1, q.title, q.description, "", "", Option(courseId.toInt), newLogo, None))
if (q.logo.nonEmpty) {
try {
val content = FileSystemUtil.getFileContent(new File(tempDirectory, q.logo))
fileService.setFileContent("quiz_logo_" + quizId, newLogo, content)
} catch {
case _: Throwable => {
// if logo saving failed, clear logo in quiz model
quizStorage.modify(quizStorage.getByID(quizId).get.copy(logo = ""))
}
}
}
val categoryBankId = categoryFacade.create("Imported_" + DateTime.now.toString("YYYY-MM-dd"), "Imported questions at " + DateTime.now.toString("YYYY-MM-dd"), None, Option(courseId.toInt)).id
q.contents.foreach(addImportContent(quizId, _, None, courseId, categoryBankId, tempDirectory))
if (questionFacade.getChildren(Option(categoryBankId), Option(courseId.toInt)).size == 0)
categoryFacade.delete(categoryBankId, Some(courseId.toInt))
})
}
private def addImportContent(newQuizId: Int, content: QuizContentExport, categoryId: Option[String], courseId: Long, categoryBankId: Int, tempDirectory: File) {
content match {
case c: QuizCategoryExport =>
val newCategory = quizFacade.addCategory(newQuizId, c.title)
c.children.foreach(addImportContent(newQuizId, _, Option(newCategory.id), courseId, categoryBankId, tempDirectory))
case q: QuizQuestionRevealJSExport =>
quizFacade.addQuestionRevealJS(newQuizId, categoryId, q.title, q.text)
case q: QuizQuestionExternalExport =>
quizFacade.addQuestionExternal(newQuizId, categoryId, q.title, q.url)
case q: QuizQuestionRevealPlainTextExport =>
quizFacade.addQuestionRevealJS(newQuizId, categoryId, q.title, q.content)
case q: QuizQuestionPDFExport =>
quizFacade.addQuestionPDF(newQuizId, categoryId, q.title, q.filename)
val content = FileSystemUtil.getFileContent(new File(tempDirectory, q.filename))
fileService.setFileContent("quizData" + newQuizId, q.filename, content, false)
case q: QuizQuestionPlainTextExport =>
quizFacade.addQuestionPlainText(newQuizId, categoryId, q.title, q.text)
case q: QuizQuestionBankExport =>
val questionId = questionFacade.createQuestion(Option(categoryBankId),
q.question.questionType,
q.question.title,
q.question.text,
q.question.explanationText,
q.question.rightAnswerText.getOrElse(""),
q.question.wrongAnswerText.getOrElse(""),
q.question.forceCorrectCount,
q.question.isCaseSensitive,
Option(courseId.toInt),
q.question.answers.map(_.toAnswerResponse()).toList).id
val res = quizFacade.addQuestion(newQuizId, categoryId, questionId)
quizFacade.updateQuestion(newQuizId, res.id, q.question.title, q.autoShowAnswer)
case q: QuizQuestionVideoExport =>
val file = new File(tempDirectory, q.uuid)
val videoTitle = q.videoTitle
val uuid = fileService.addToDocumentLibrary(file.getPath, courseId, videoTitle, q.extension, q.mimeType, q.size)
val catId = getCategoryId(categoryId)
quizService.createQuestionDocumentLibrary(newQuizId, catId, q.title, uuid, courseId.toInt)
case q: QuizQuestionPptxExport =>
val catId = getCategoryId(categoryId)
val content = StreamUtil.toByteArray(new FileInputStream(new File(tempDirectory, q.filename)))
fileService.setFileContent("quizData" + newQuizId, q.filename, content, false)
quizService.createQuestionPPTX(newQuizId, catId, q.title, q.filename)
}
}
//TODO need to reafctor
private def getCategoryId(categoryId: Option[String]) = {
if (categoryId.isEmpty) None
else Option(categoryId.get.replace("c_", "").toInt)
}
}
| ViLPy/Valamis | learn-portlet/src/main/scala/com/arcusys/learn/export/quiz/QuizImportProcessor.scala | Scala | lgpl-3.0 | 5,309 |
package com.ebay.hackathon.entity
import com.ebay.hackathon.entity.traits.{SerialisableEntity, Identifiable}
import com.mongodb.casbah.commons.MongoDBObject
import com.mongodb.casbah.commons.TypeImports._
import org.joda.time.DateTime
/**
* Author sreejith on 09/11/14 6:00 AM.
*/
class DeliveryInfo extends DeliveryInfoCore[DeliveryInfo]
with Identifiable[DeliveryInfo]
class DeliveryInfoCore [T] extends SerialisableEntity[T] {
var name: String = null
var address: String = null
var comments:List[String] = null
var volunteerId: String = null
var needyId: String = null
var contributorIds:List[String] = null
var deliveryDate:DateTime= null
var photosIds:List[String] = null
import com.ebay.hackathon.entity.DeliveryInfo._
def asDBObject: DBObject = {
val builder = MongoDBObject.newBuilder
if (name != null) builder += NAME -> name
if (address != null) builder += ADDRESS -> address
if (comments != null) builder += COMMENTS -> comments
if (volunteerId != null) builder += VOLUNTEER_ID -> volunteerId
if (needyId != null) builder += NEEDY_ID -> needyId
if (contributorIds != null) builder += CONTRIBUTOR_IDS -> contributorIds
if (photosIds != null) builder += PHOTO_IDS -> photosIds
if (deliveryDate != null) builder += DELIVERY_DATE -> deliveryDate
builder.result()
}
def fromDBObject(dbObject: DBObject) = {
name = getString(dbObject, NAME)
address = getString(dbObject, ADDRESS)
comments = getPrimitiveList[String](dbObject, COMMENTS)
volunteerId = getString(dbObject, volunteerId)
needyId = getString(dbObject, NEEDY_ID)
deliveryDate = getDate(dbObject, DELIVERY_DATE)
contributorIds = getPrimitiveList[String](dbObject, CONTRIBUTOR_IDS)
photosIds = getPrimitiveList[String](dbObject, PHOTO_IDS)
this.asInstanceOf[T]
}
}
object DeliveryInfo {
val NAME: String = "name"
val ADDRESS: String = "address"
val COMMENTS: String = "comments"
val VOLUNTEER_ID: String = "volunteerId"
val NEEDY_ID: String = "needyId"
val CONTRIBUTOR_IDS: String = "contributorIds"
val DELIVERY_DATE: String = "deliveryDate"
val PHOTO_IDS: String = "photoIds"
} | sreejithp/i.us.we | iuswe/src/main/scala/com/ebay/hackathon/entity/DeliveryInfo.scala | Scala | apache-2.0 | 2,171 |
package fpinscala.datastructures
sealed trait List[+A] // `List` data type, parameterized on a type, `A`
case object Nil extends List[Nothing] // A `List` data constructor representing the empty list
/* Another data constructor, representing nonempty lists. Note that `tail` is another `List[A]`,
which may be `Nil` or another `Cons`.
*/
case class Cons[+A](head: A, tail: List[A]) extends List[A]
object List
{ // `List` companion object. Contains functions for creating and working with lists.
def sum(ints: List[Int]): Int = ints match
{ // A function that uses pattern matching to add up a list of integers
case Nil => 0 // The sum of the empty list is 0.
case Cons(x, xs) => x + sum(xs) // The sum of a list starting with `x` is `x` plus the sum of the rest of the list.
}
def product(ds: List[Double]): Double = ds match
{
case Nil => 1.0
case Cons(0.0, _) => 0.0
case Cons(x, xs) => x * product(xs)
}
def apply[A](as: A*): List[A] = // Variadic function syntax
if (as.isEmpty) Nil
else Cons(as.head, apply(as.tail: _*))
val x = List(1, 2, 3, 4, 5) match
{
case Cons(x, Cons(2, Cons(4, _))) => x
case Nil => 42
case Cons(x, Cons(y, Cons(3, Cons(4, _)))) => x + y
case Cons(h, t) => h + sum(t)
case _ => 101
}
def append[A](a1: List[A], a2: List[A]): List[A] =
a1 match {
case Nil => a2
case Cons(h, t) => Cons(h, append(t, a2))
}
def foldRight[A, B](as: List[A], z: B)(f: (A, B) => B): B = // Utility functions
as match
{
case Nil => z
case Cons(x, xs) => f(x, foldRight(xs, z)(f))
}
def sum2(ns: List[Int]) =
foldRight(ns, 0)((x, y) => x + y)
def product2(ns: List[Double]) =
foldRight(ns, 1.0)(_ * _)
def tail[A](l: List[A]): List[A] =
l match
{
case Nil => l
case Cons(h, t) => t
}
def setHead[A](l: List[A], h: A): List[A] =
l match
{
case Nil => l
case Cons(lh, lt) => Cons(h, lt)
}
def drop[A](l: List[A], n: Int): List[A] =
// l match
// {
// case Nil => Nil
// case _ =>
n match
{
case 0 => l
case _ => drop(tail(l), n - 1)
}
// }
def dropWhile[A](l: List[A], f: A => Boolean): List[A] =
l match
{
case Nil => Nil
case Cons(h, t) =>
if (f(h)) dropWhile(t, f)
else l
}
def init[A](l: List[A]): List[A] =
l match
{
case Nil => Nil
case Cons(h, Nil) => Nil
case Cons(h, t) => Cons(h, init(t))
}
def length[A](l: List[A]): Int = foldRight(l, 0)((_, a) => a + 1)
def foldLeft[A, B](l: List[A], z: B)(f: (B, A) => B): B =
l match
{
case Nil => z
case Cons(h, t) => foldLeft(t, f(z, h))(f)
}
def sum3(l: List[Int]) = foldLeft(l, 0)(_ + _)
def product3(l: List[Double]) = foldLeft(l, 1.0)(_ * _)
def length2[A](l: List[A]) = foldLeft(l, 0)((a, _) => a + 1)
def reverse[A](l: List[A]) = foldLeft(l, Nil:List[A])((xs, x) => Cons(x, xs))
def foldLeft2[A, B](l: List[A], z: B)(f: (B, A) => B): B =
foldRight(l, z)((a, b) => f(b, a))
def foldRight2[A, B](l: List[A], z: B)(f: (A, B) => B): B =
foldLeft(l, z)((b, a) => f(a, b))
def append2[A](lhs: List[A], rhs: List[A]): List[A] =
foldLeft(reverse(lhs), rhs)((xs, x) => Cons(x, xs))
def concatenate[A](ll: List[List[A]]) =
foldLeft(ll, Nil:List[A])((p, q) => append2(p, q))
def ex316(l: List[Int]): List[Int] =
reverse(foldLeft(l, Nil:List[Int])((xs, x) => Cons(x + 1, xs)))
def ex317(l: List[Double]): List[String] =
reverse(foldLeft(l, Nil:List[String])((xs, x) => Cons(x.toString(), xs)))
def map[A, B](l: List[A])(f: A => B): List[B] =
reverse(foldLeft(l, Nil:List[B])((xs, x) => Cons(f(x), xs)))
// ex 3.19
def filter[A](as: List[A])(f: A => Boolean): List[A] =
reverse(foldLeft(as, Nil:List[A])((xs, x) => if (f(x)) Cons(x, xs)
else xs))
// ex 3.20
def flatMap[A, B](as: List[A])(f: A => List[B]): List[B] =
concatenate(map(as)(f))
// ex 3.21
def filter2[A](as: List[A])(f: A => Boolean): List[A] =
flatMap(as)(x => if (f(x)) List(x)
else Nil:List[A])
def ex322(p: List[Int], q: List[Int]): List[Int] =
(p, q) match
{
case (Nil, x) => Nil
case (x, Nil) => Nil
case (Cons(h1, t1), Cons(h2, t2)) => Cons(h1 + h2, ex322(t1, t2))
}
def zipWith[A, B, C](p: List[A], q: List[B])(f: (A, B) => C): List[C] =
(p, q) match
{
case (Nil, x) => Nil
case (x, Nil) => Nil
case (Cons(h1, t1), Cons(h2, t2)) => Cons(f(h1, h2), zipWith(t1, t2)(f))
}
def hasSubsequence[A](where: List[A], what: List[A]): Boolean =
{
@annotation.tailrec
def go[A](where: List[A], what: List[A], initial: Boolean): Boolean =
(where, what) match
{
case (x, Nil) => true
case (Nil, x) => false
case (Cons(h1, t1), Cons(h2, t2)) =>
if (h1 == h2) go(t1, t2, false)
else if (initial) go(t1, what, true)
else false
}
go(where, what, true)
}
}
object TestList
{
import List._
def main(args: Array[String]): Unit =
{
println("[tail] Expected: Nil")
println("[tail] Actual: %s\\n".format(tail(Nil)))
println("[tail] Expected: Cons(2,Cons(3,Nil))")
println("[tail] Actual: %s\\n".format(tail(List(1, 2, 3))))
println("[setHead] Expected: Nil")
println("[setHead] Actual: %s\\n".format(setHead(Nil, 2)))
println("[setHead] Expected: Cons(4,Cons(2,Nil))")
println("[setHead] Actual: %s\\n".format(setHead(List(1, 2), 4)))
println("[drop] Expected: Nil")
println("[drop] Actual: %s\\n".format(drop(Nil, 0)))
println("[drop] Expected: Nil")
println("[drop] Actual: %s\\n".format(drop(Nil, 1)))
println("[drop] Expected: Cons(1,Cons(2,Cons(3,Nil)))")
println("[drop] Actual: %s\\n".format(drop(List(1, 2, 3), 0)))
println("[drop] Expected: Cons(3,Nil)")
println("[drop] Actual: %s\\n".format(drop(List(1, 2, 3), 2)))
println("[dropWhile] Expected: Nil")
println("[dropWhile] Actual: %s\\n".format(dropWhile(Nil, (h: Int) => h < 2)))
println("[dropWhile] Expected: Cons(2,Cons(3,Nil))")
println("[dropWhile] Actual: %s\\n".format(dropWhile(List(1, 2, 3), (h: Int) => h < 2)))
println("[dropWhile] Expected: Cons(2,Cons(3,Nil))")
println("[dropWhile] Actual: %s\\n".format(dropWhile(List(0, 1, 2, 3), (h: Int) => h < 2)))
println("[init] Expected: Nil")
println("[init] Actual: %s\\n".format(init(Nil)))
println("[init] Expected: Nil")
println("[init] Actual: %s\\n".format(init(List(1))))
println("[init] Expected: Cons(1,Nil)")
println("[init] Actual: %s\\n".format(init(List(1, 2))))
println("[init] Expected: Cons(1,Cons(2,Nil))")
println("[init] Actual: %s\\n".format(init(List(1, 2, 3))))
println("[length] Expected: 0")
println("[length] Actual: %d\\n".format(length(Nil)))
println("[length] Expected: 1")
println("[length] Actual: %d\\n".format(length(List(5))))
println("[length] Expected: 3")
println("[length] Actual: %d\\n".format(length(List(5, 8, 10))))
println("[foldLeft] Expected: 0")
println("[foldLeft] Actual: %d\\n".format(foldLeft(Nil:List[Int], 0)(_ + _)))
println("[foldLeft] Expected: 5")
println("[foldLeft] Actual: %d\\n".format(foldLeft(List(3, 2), 0)(_ + _)))
println("[foldLeft] Expected: 10")
println("[foldLeft] Actual: %d\\n".format(foldLeft(List(4, 3, 1, 2), 0)(_ + _)))
println("[foldLeft] Expected: 1")
println("[foldLeft] Actual: %d\\n".format(foldLeft(Nil:List[Int], 1)(_ * _)))
println("[foldLeft] Expected: 120")
println("[foldLeft] Actual: %d\\n".format(foldLeft(List(4, 3, 5, 2), 1)(_ * _)))
println("[reverse] Expected: Nil")
println("[reverse] Actual: %s\\n".format(reverse(Nil)))
println("[reverse] Expected: Cons(3,Nil)")
println("[reverse] Actual: %s\\n".format(reverse(List(3))))
println("[reverse] Expected: Cons(3,Cons(4,Cons(5,Nil)))")
println("[reverse] Actual: %s\\n".format(reverse(List(5, 4, 3))))
println("[foldLeft2] Expected: 120")
println("[foldLeft2] Actual: %d\\n".format(foldLeft2(List(4, 3, 5, 2), 1)(_ * _)))
println("[foldRight2] Expected: 120")
println("[foldRight2] Actual: %d\\n".format(foldRight2(List(4, 3, 5, 2), 1)(_ * _)))
println("[append2] Expected: Cons(3,Cons(4,Cons(5,Cons(6,Nil))))")
println("[append2] Actual: %s\\n".format(append2(List(3, 4), List(5, 6))))
println("[append2] Expected: Cons(5,Cons(6,Cons(3,Cons(4,Nil))))")
println("[append2] Actual: %s\\n".format(append2(List(5, 6), List(3, 4))))
println("[concatenate] Expected: Nil")
println("[concatenate] Actual: %s\\n".format(concatenate(List(Nil:List[Int], Nil:List[Int]))))
println("[concatenate] Expected: Cons(1,Nil)")
println("[concatenate] Actual: %s\\n".format(
concatenate(List(List(1), Nil))))
println("[concatenate] Expected: Cons(1,Nil)")
println("[concatenate] Actual: %s\\n".format(
concatenate(List(Nil, List(1)))))
println("[concatenate] Expected: Cons(2,Cons(1,Nil))")
println("[concatenate] Actual: %s\\n".format(
concatenate(List(List(2), List(1)))))
println("[concatenate] Expected: List(2, 3, 6, 7, 4, 5)")
println("[concatenate] Actual: %s\\n".format(
concatenate(List(List(2, 3), List(6, 7), List(4, 5)))))
println(ex316(List(1, 4, 7)))
println(ex317(List(1.23, 3.14, 11.00 / 7.00)))
println(filter(Nil:List[Int])(x => (x % 2) == 0))
println(filter(List(1, 2, 3, 4, 5, 6))(x => (x % 2) == 0))
println(flatMap(List(1, 2, 3))(i => List(i, i)))
println(filter2(List(1, 2, 3, 4, 5, 6))(x => (x % 2) == 0))
println(ex322(List(1, 2, 3), List(10, 20, 30)))
println(hasSubsequence(Nil:List[Int], Nil)) // true
println(hasSubsequence(List(1, 2, 3, 4), Nil)) // true
println(hasSubsequence(List(1, 2, 3, 4), List(4))) // true
println(hasSubsequence(List(1, 2, 3, 4), List(5))) // false
println(hasSubsequence(List(1, 2, 3, 4), List(1, 2))) // true
println(hasSubsequence(List(1, 2, 3, 4), List(2, 3))) // true
println(hasSubsequence(List(1, 2, 3, 4), List(2, 4))) // false
}
}
| WojciechMigda/fpinscala | exercises/src/main/scala/fpinscala/datastructures/List.scala | Scala | mit | 11,263 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import org.scalactic.{Equality, Every, One, Many}
import org.scalactic.StringNormalizations._
import SharedHelpers._
import FailureMessages.decorateToStringValue
import Matchers._
class EveryShouldContainOnlyLogicalAndSpec extends Spec {
val invertedListOfStringEquality =
new Equality[Every[String]] {
def areEqual(a: Every[String], b: Any): Boolean = a != b
}
val upperCaseStringEquality =
new Equality[String] {
def areEqual(a: String, b: Any): Boolean = upperCase(a) == upperCase(b)
}
private def upperCase(value: Any): Any =
value match {
case l: Every[_] => l.map(upperCase(_))
case s: String => s.toUpperCase
case c: Char => c.toString.toUpperCase.charAt(0)
case (s1: String, s2: String) => (s1.toUpperCase, s2.toUpperCase)
case e: java.util.Map.Entry[_, _] =>
(e.getKey, e.getValue) match {
case (k: String, v: String) => Entry(k.toUpperCase, v.toUpperCase)
case _ => value
}
case _ => value
}
val upperCaseListOfStringEquality =
new Equality[Every[String]] {
def areEqual(a: Every[String], b: Any): Boolean = upperCase(a) == upperCase(b)
}
//ADDITIONAL//
val fileName: String = "EveryShouldContainOnlyLogicalAndSpec.scala"
object `an Every` {
val fumList: Every[String] = Every("fum", "foe", "fie", "fee")
val toList: Every[String] = Every("you", "to", "birthday", "happy")
object `when used with (contain only (..) and contain only (..))` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
fumList should (contain only ("fee", "fie", "foe", "fum") and contain only ("fie", "fee", "fum", "foe"))
val e1 = intercept[TestFailedException] {
fumList should (contain only ("happy", "birthday", "to", "you") and contain only ("fee", "fie", "foe", "fum"))
}
checkMessageStackDepth(e1, Resources("didNotContainOnlyElements", decorateToStringValue(fumList), "\"happy\", \"birthday\", \"to\", \"you\""), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
fumList should (contain only ("fee", "fie", "foe", "fum") and contain only ("happy", "birthday", "to", "you"))
}
checkMessageStackDepth(e2, Resources("containedOnlyElements", decorateToStringValue(fumList), "\"fee\", \"fie\", \"foe\", \"fum\"") + ", but " + Resources("didNotContainOnlyElements", decorateToStringValue(fumList), "\"happy\", \"birthday\", \"to\", \"you\""), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
fumList should (contain only ("FEE", "FIE", "FOE", "FUM") and contain only ("FEE", "FIE", "FUM", "FOE"))
val e1 = intercept[TestFailedException] {
fumList should (contain only ("FEE", "FIE", "FOE", "FAM") and contain only ("FEE", "FIE", "FUM", "FOE"))
}
checkMessageStackDepth(e1, Resources("didNotContainOnlyElements", decorateToStringValue(fumList), "\"FEE\", \"FIE\", \"FOE\", \"FAM\""), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
fumList should (contain only ("FEE", "FIE", "FOE", "FUM") and (contain only ("FEE", "FIE", "FAM", "FOE")))
}
checkMessageStackDepth(e2, Resources("containedOnlyElements", decorateToStringValue(fumList), "\"FEE\", \"FIE\", \"FOE\", \"FUM\"") + ", but " + Resources("didNotContainOnlyElements", decorateToStringValue(fumList), "\"FEE\", \"FIE\", \"FAM\", \"FOE\""), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(fumList should (contain only ("FEE", "FIE", "FOE", "FUM") and contain only ("FEE", "FIE", "FUM", "FOE"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (contain only ("FEE", "FIE", "FOE", "FAM") and contain only ("FEE", "FIE", "FUM", "FOE"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, Resources("didNotContainOnlyElements", decorateToStringValue(fumList), "\"FEE\", \"FIE\", \"FOE\", \"FAM\""), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
(fumList should (contain only ("FEE", "FIE", "FOE", "FUM") and contain only ("FEE", "FIE", "FAM", "FOE"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e2, Resources("containedOnlyElements", decorateToStringValue(fumList), "\"FEE\", \"FIE\", \"FOE\", \"FUM\"") + ", but " + Resources("didNotContainOnlyElements", decorateToStringValue(fumList), "\"FEE\", \"FIE\", \"FAM\", \"FOE\""), fileName, thisLineNumber - 2)
(fumList should (contain only (" FEE ", " FIE ", " FOE ", " FUM ") and contain only (" FEE ", " FIE ", " FOE ", " FUM "))) (after being lowerCased and trimmed, after being lowerCased and trimmed)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
fumList should (contain only ("fee", "fie", "foe", "fie", "fum") and contain only ("fie", "fee", "fum", "foe"))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources("onlyDuplicate")))
val e2 = intercept[exceptions.NotAllowedException] {
fumList should (contain only ("fie", "fee", "fum", "foe") and contain only ("fee", "fie", "foe", "fie", "fum"))
}
e2.failedCodeFileName.get should be (fileName)
e2.failedCodeLineNumber.get should be (thisLineNumber - 3)
e2.message should be (Some(Resources("onlyDuplicate")))
}
def `should throw TFE with friendly reminder when single GenTraversable argument is passed and failed` {
val e1 = intercept[TestFailedException] {
fumList should (contain only Many("happy", "birthday", "to", "you") and contain only ("fee", "fie", "foe", "fum"))
}
checkMessageStackDepth(e1, Resources("didNotContainOnlyElementsWithFriendlyReminder", decorateToStringValue(fumList), decorateToStringValue(Many("happy", "birthday", "to", "you"))), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
fumList should (contain only ("fee", "fie", "foe", "fum") and contain only Many("happy", "birthday", "to", "you"))
}
checkMessageStackDepth(e2, Resources("containedOnlyElements", decorateToStringValue(fumList), "\"fee\", \"fie\", \"foe\", \"fum\"") + ", but " + Resources("didNotContainOnlyElementsWithFriendlyReminder", decorateToStringValue(fumList), decorateToStringValue(Many("happy", "birthday", "to", "you"))), fileName, thisLineNumber - 2)
}
}
object `when used with (equal (..) and contain only (..))` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
fumList should (equal (fumList) and contain only ("fie", "fee", "fum", "foe"))
val e1 = intercept[TestFailedException] {
fumList should (equal (toList) and contain only ("fee", "fie", "foe", "fum"))
}
checkMessageStackDepth(e1, Resources("didNotEqual", decorateToStringValue(fumList), decorateToStringValue(toList)), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
fumList should (equal (fumList) and contain only ("happy", "birthday", "to", "you"))
}
checkMessageStackDepth(e2, Resources("equaled", decorateToStringValue(fumList), decorateToStringValue(fumList)) + ", but " + Resources("didNotContainOnlyElements", decorateToStringValue(fumList), "\"happy\", \"birthday\", \"to\", \"you\""), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
fumList should (equal (fumList) and contain only ("FEE", "FIE", "FOE", "FUM"))
val e1 = intercept[TestFailedException] {
fumList should (equal (toList) and contain only ("FEE", "FIE", "FOE", "FUM"))
}
checkMessageStackDepth(e1, Resources("didNotEqual", decorateToStringValue(fumList), decorateToStringValue(toList)), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
fumList should (equal (fumList) and (contain only ("FEE", "FIE", "FOE", "FAM")))
}
checkMessageStackDepth(e2, Resources("equaled", decorateToStringValue(fumList), decorateToStringValue(fumList)) + ", but " + Resources("didNotContainOnlyElements", decorateToStringValue(fumList), "\"FEE\", \"FIE\", \"FOE\", \"FAM\""), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(fumList should (equal (toList) and contain only ("FEE", "FIE", "FOE", "FUM"))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (equal (toList) and contain only ("FEE", "FIE", "FOE", "FAM"))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, Resources("equaled", decorateToStringValue(fumList), decorateToStringValue(toList)) + ", but " + Resources("didNotContainOnlyElements", decorateToStringValue(fumList), "\"FEE\", \"FIE\", \"FOE\", \"FAM\""), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
(fumList should (equal (fumList) and contain only ("FEE", "FIE", "FOE", "FUM"))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e2, Resources("didNotEqual", decorateToStringValue(fumList), decorateToStringValue(fumList)), fileName, thisLineNumber - 2)
(fumList should (equal (toList) and contain only (" FEE ", " FIE ", " FOE ", " FUM "))) (decided by invertedListOfStringEquality, after being lowerCased and trimmed)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
fumList should (equal (fumList) and contain only ("fee", "fie", "foe", "fie", "fum"))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources("onlyDuplicate")))
}
def `should throw TFE with friendly reminder when single GenTraversable argument is passed and failed` {
val e1 = intercept[TestFailedException] {
fumList should (equal (fumList) and contain only Many("happy", "birthday", "to", "you"))
}
checkMessageStackDepth(e1, Resources("equaled", decorateToStringValue(fumList), decorateToStringValue(fumList)) + ", but " + Resources("didNotContainOnlyElementsWithFriendlyReminder", decorateToStringValue(fumList), decorateToStringValue(Many("happy", "birthday", "to", "you"))), fileName, thisLineNumber - 2)
}
}
object `when used with (be (..) and contain only (..))` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
fumList should (be_== (fumList) and contain only ("fie", "fee", "fum", "foe"))
val e1 = intercept[TestFailedException] {
fumList should (be_== (toList) and contain only ("fee", "fie", "foe", "fum"))
}
checkMessageStackDepth(e1, Resources("wasNotEqualTo", decorateToStringValue(fumList), decorateToStringValue(toList)), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
fumList should (be_== (fumList) and contain only ("happy", "birthday", "to", "you"))
}
checkMessageStackDepth(e2, Resources("wasEqualTo", decorateToStringValue(fumList), decorateToStringValue(fumList)) + ", but " + Resources("didNotContainOnlyElements", decorateToStringValue(fumList), "\"happy\", \"birthday\", \"to\", \"you\""), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
fumList should (be_== (fumList) and contain only ("FEE", "FIE", "FOE", "FUM"))
val e1 = intercept[TestFailedException] {
fumList should (be_== (toList) and contain only ("FEE", "FIE", "FOE", "FUM"))
}
checkMessageStackDepth(e1, Resources("wasNotEqualTo", decorateToStringValue(fumList), decorateToStringValue(toList)), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
fumList should (be_== (fumList) and (contain only ("happy", "birthday", "to", "you")))
}
checkMessageStackDepth(e2, Resources("wasEqualTo", decorateToStringValue(fumList), decorateToStringValue(fumList)) + ", but " + Resources("didNotContainOnlyElements", decorateToStringValue(fumList), "\"happy\", \"birthday\", \"to\", \"you\""), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(fumList should (be_== (fumList) and contain only ("FEE", "FIE", "FOE", "FUM"))) (decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (be_== (fumList) and contain only ("happy", "birthday", "to", "you"))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, Resources("wasEqualTo", decorateToStringValue(fumList), decorateToStringValue(fumList)) + ", but " + Resources("didNotContainOnlyElements", decorateToStringValue(fumList), "\"happy\", \"birthday\", \"to\", \"you\""), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
(fumList should (be_== (toList) and contain only ("FEE", "FIE", "FOE", "FUM"))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e2, Resources("wasNotEqualTo", decorateToStringValue(fumList), decorateToStringValue(toList)), fileName, thisLineNumber - 2)
(fumList should (be_== (fumList) and contain only (" FEE ", " FIE ", " FOE ", " FUM "))) (after being lowerCased and trimmed)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
fumList should (be_== (fumList) and contain only ("fee", "fie", "foe", "fie", "fum"))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources("onlyDuplicate")))
}
def `should throw TFE with friendly reminder when single GenTraversable argument is passed and failed` {
val e2 = intercept[TestFailedException] {
fumList should (be_== (fumList) and contain only Many("happy", "birthday", "to", "you"))
}
checkMessageStackDepth(e2, Resources("wasEqualTo", decorateToStringValue(fumList), decorateToStringValue(fumList)) + ", but " + Resources("didNotContainOnlyElementsWithFriendlyReminder", decorateToStringValue(fumList), decorateToStringValue(Many("happy", "birthday", "to", "you"))), fileName, thisLineNumber - 2)
}
}
object `when used with (contain only (..) and be (..))` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
fumList should (contain only ("fie", "fee", "fum", "foe") and be_== (fumList))
val e1 = intercept[TestFailedException] {
fumList should (contain only ("fee", "fie", "foe", "fum") and be_== (toList))
}
checkMessageStackDepth(e1, Resources("containedOnlyElements", decorateToStringValue(fumList), "\"fee\", \"fie\", \"foe\", \"fum\"") + ", but " + Resources("wasNotEqualTo", decorateToStringValue(fumList), decorateToStringValue(toList)), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
fumList should (contain only ("happy", "birthday", "to", "you") and be_== (fumList))
}
checkMessageStackDepth(e2, Resources("didNotContainOnlyElements", decorateToStringValue(fumList), "\"happy\", \"birthday\", \"to\", \"you\""), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
fumList should (contain only ("FIE", "FEE", "FUM", "FOE") and be_== (fumList))
val e1 = intercept[TestFailedException] {
fumList should (contain only ("FIE", "FEE", "FAM", "FOE") and be_== (toList))
}
checkMessageStackDepth(e1, Resources("didNotContainOnlyElements", decorateToStringValue(fumList), "\"FIE\", \"FEE\", \"FAM\", \"FOE\""), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
fumList should (contain only ("HAPPY", "BIRTHDAY", "TO", "YOU") and (be_== (fumList)))
}
checkMessageStackDepth(e2, Resources("didNotContainOnlyElements", decorateToStringValue(fumList), "\"HAPPY\", \"BIRTHDAY\", \"TO\", \"YOU\""), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(fumList should (contain only ("FIE", "FEE", "FUM", "FOE") and be_== (fumList))) (decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (contain only ("FIE", "FEE", "FAM", "FOE") and be_== (fumList))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, Resources("didNotContainOnlyElements", decorateToStringValue(fumList), "\"FIE\", \"FEE\", \"FAM\", \"FOE\""), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
(fumList should (contain only ("FIE", "FEE", "FUM", "FOE") and be_== (toList))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e2, Resources("containedOnlyElements", decorateToStringValue(fumList), "\"FIE\", \"FEE\", \"FUM\", \"FOE\"") + ", but " + Resources("wasNotEqualTo", decorateToStringValue(fumList), decorateToStringValue(toList)), fileName, thisLineNumber - 2)
(fumList should (contain only (" FEE ", " FIE ", " FOE ", " FUM ") and be_== (fumList))) (after being lowerCased and trimmed)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
fumList should (contain only ("fee", "fie", "foe", "fie", "fum") and be_== (fumList))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources("onlyDuplicate")))
}
def `should throw TFE with friendly reminder when single GenTraversable argument is passed and failed` {
val e1 = intercept[TestFailedException] {
fumList should (contain only Many("happy", "birthday", "to", "you") and be_== (fumList))
}
checkMessageStackDepth(e1, Resources("didNotContainOnlyElementsWithFriendlyReminder", decorateToStringValue(fumList), decorateToStringValue(Many("happy", "birthday", "to", "you"))), fileName, thisLineNumber - 2)
}
}
object `when used with (not contain only xx and not contain only xx)` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
fumList should (not contain only ("fee", "fie", "foe", "fuu") and not contain only ("fie", "fee", "fuu", "foe"))
val e1 = intercept[TestFailedException] {
fumList should (not contain only ("fee", "fie", "foe", "fum") and not contain only ("happy", "birthday", "to", "you"))
}
checkMessageStackDepth(e1, Resources("containedOnlyElements", decorateToStringValue(fumList), "\"fee\", \"fie\", \"foe\", \"fum\""), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
fumList should (not contain only ("happy", "birthday", "to", "you") and not contain only ("fee", "fie", "foe", "fum"))
}
checkMessageStackDepth(e2, Resources("didNotContainOnlyElements", decorateToStringValue(fumList), "\"happy\", \"birthday\", \"to\", \"you\"") + ", but " + Resources("containedOnlyElements", decorateToStringValue(fumList), "\"fee\", \"fie\", \"foe\", \"fum\""), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
fumList should (not contain only ("FIE", "FEE", "FAM", "FOE") and not contain only ("FIE", "FEE", "FOE", "FAM"))
val e1 = intercept[TestFailedException] {
fumList should (not contain only ("FIE", "FEE", "FUM", "FOE") and not contain only ("FIE", "FEE", "FOE", "FAM"))
}
checkMessageStackDepth(e1, Resources("containedOnlyElements", decorateToStringValue(fumList), "\"FIE\", \"FEE\", \"FUM\", \"FOE\""), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
fumList should (not contain only ("FIE", "FEE", "FAM", "FOE") and (not contain only ("FIE", "FEE", "FOE", "FUM")))
}
checkMessageStackDepth(e2, Resources("didNotContainOnlyElements", decorateToStringValue(fumList), "\"FIE\", \"FEE\", \"FAM\", \"FOE\"") + ", but " + Resources("containedOnlyElements", decorateToStringValue(fumList), "\"FIE\", \"FEE\", \"FOE\", \"FUM\""), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(fumList should (not contain only ("FIE", "FEE", "FAM", "FOE") and not contain only ("FIE", "FEE", "FOE", "FAM"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (not contain only ("FIE", "FEE", "FAM", "FOE") and not contain only ("FIE", "FEE", "FOE", "FUM"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, Resources("didNotContainOnlyElements", decorateToStringValue(fumList), "\"FIE\", \"FEE\", \"FAM\", \"FOE\"") + ", but " + Resources("containedOnlyElements", decorateToStringValue(fumList), "\"FIE\", \"FEE\", \"FOE\", \"FUM\""), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
(fumList should (not contain only ("FIE", "FEE", "FUM", "FOE") and not contain only ("FIE", "FEE", "FOE", "FAM"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e2, Resources("containedOnlyElements", decorateToStringValue(fumList), "\"FIE\", \"FEE\", \"FUM\", \"FOE\""), fileName, thisLineNumber - 2)
(fumList should (contain only (" FEE ", " FIE ", " FOE ", " FUM ") and contain only (" FEE ", " FIE ", " FOE ", " FUM "))) (after being lowerCased and trimmed, after being lowerCased and trimmed)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
fumList should (not contain only ("fee", "fie", "foe", "fie", "fum") and not contain only ("fie", "fee", "fuu", "foe"))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources("onlyDuplicate")))
val e2 = intercept[exceptions.NotAllowedException] {
fumList should (not contain only ("fie", "fee", "fuu", "foe") and not contain only ("fee", "fie", "foe", "fie", "fum"))
}
e2.failedCodeFileName.get should be (fileName)
e2.failedCodeLineNumber.get should be (thisLineNumber - 3)
e2.message should be (Some(Resources("onlyDuplicate")))
}
def `should throw TFE with friendly reminder when single GenTraversable argument is passed and failed` {
val e1 = intercept[TestFailedException] {
One(Many("fee", "fie", "foe", "fum")) should (not contain only (Many("fee", "fie", "foe", "fum")) and not contain only ("happy", "birthday", "to", "you"))
}
checkMessageStackDepth(e1, Resources("containedOnlyElementsWithFriendlyReminder", decorateToStringValue(One(Many("fee", "fie", "foe", "fum"))), decorateToStringValue(Many("fee", "fie", "foe", "fum"))), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
One(Many("fee", "fie", "foe", "fum")) should (not contain only (Many("happy", "birthday", "to", "you")) and not contain only (Many("fee", "fie", "foe", "fum")))
}
checkMessageStackDepth(e2, Resources("didNotContainOnlyElementsWithFriendlyReminder", decorateToStringValue(One(Many("fee", "fie", "foe", "fum"))), decorateToStringValue(Many("happy", "birthday", "to", "you"))) + ", but " + Resources("containedOnlyElementsWithFriendlyReminder", decorateToStringValue(One(Many("fee", "fie", "foe", "fum"))), decorateToStringValue(Many("fee", "fie", "foe", "fum"))), fileName, thisLineNumber - 2)
}
}
object `when used with (not equal (..) and not contain only (..))` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
fumList should (not equal (toList) and not contain only ("fie", "fee", "fuu", "foe"))
val e1 = intercept[TestFailedException] {
fumList should (not equal (fumList) and not contain only ("happy", "birthday", "to", "you"))
}
checkMessageStackDepth(e1, Resources("equaled", decorateToStringValue(fumList), decorateToStringValue(fumList)), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
fumList should (not equal (toList) and not contain only ("fee", "fie", "foe", "fum"))
}
checkMessageStackDepth(e2, Resources("didNotEqual", decorateToStringValue(fumList), decorateToStringValue(toList)) + ", but " + Resources("containedOnlyElements", decorateToStringValue(fumList), "\"fee\", \"fie\", \"foe\", \"fum\""), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
fumList should (not equal (toList) and not contain only ("FIE", "FEE", "FAM", "FOE"))
val e1 = intercept[TestFailedException] {
fumList should (not equal (fumList) and not contain only ("FIE", "FEE", "FAM", "FOE"))
}
checkMessageStackDepth(e1, Resources("equaled", decorateToStringValue(fumList), decorateToStringValue(fumList)), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
fumList should (not equal (toList) and (not contain only ("FIE", "FEE", "FUM", "FOE")))
}
checkMessageStackDepth(e2, Resources("didNotEqual", decorateToStringValue(fumList), decorateToStringValue(toList)) + ", but " + Resources("containedOnlyElements", decorateToStringValue(fumList), "\"FIE\", \"FEE\", \"FUM\", \"FOE\""), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(fumList should (not equal (fumList) and not contain only ("FIE", "FEE", "FAM", "FOE"))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (not equal (fumList) and not contain only ("FIE", "FEE", "FUM", "FOE"))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, Resources("didNotEqual", decorateToStringValue(fumList), decorateToStringValue(fumList)) + ", but " + Resources("containedOnlyElements", decorateToStringValue(fumList), "\"FIE\", \"FEE\", \"FUM\", \"FOE\""), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
(fumList should (not equal (toList) and not contain only ("FIE", "FEE", "FAM", "FOE"))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e2, Resources("equaled", decorateToStringValue(fumList), decorateToStringValue(toList)), fileName, thisLineNumber - 2)
(fumList should (not contain only (" FEE ", " FIE ", " FOE ", " FUU ") and not contain only (" FEE ", " FIE ", " FOE ", " FUU "))) (after being lowerCased and trimmed, after being lowerCased and trimmed)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
fumList should (not equal (toList) and not contain only ("fee", "fie", "foe", "fie", "fum"))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources("onlyDuplicate")))
}
def `should throw TFE with friendly reminder when single GenTraversable argument is passed and failed` {
val e1 = intercept[TestFailedException] {
One(Many("fee", "fie", "foe", "fum")) should (not equal (toList) and not contain only (Many("fee", "fie", "foe", "fum")))
}
checkMessageStackDepth(e1, Resources("didNotEqual", decorateToStringValue(One(Many("fee", "fie", "foe", "fum"))), decorateToStringValue(toList)) + ", but " + Resources("containedOnlyElementsWithFriendlyReminder", decorateToStringValue(One(Many("fee", "fie", "foe", "fum"))), decorateToStringValue(Many("fee", "fie", "foe", "fum"))), fileName, thisLineNumber - 2)
}
}
object `when used with (not be (..) and not contain only (..))` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
fumList should (not be_== (toList) and not contain only ("fie", "fee", "fuu", "foe"))
val e1 = intercept[TestFailedException] {
fumList should (not be_== (fumList) and not contain only ("happy", "birthday", "to", "you"))
}
checkMessageStackDepth(e1, Resources("wasEqualTo", decorateToStringValue(fumList), decorateToStringValue(fumList)), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
fumList should (not be_== (toList) and not contain only ("fee", "fie", "foe", "fum"))
}
checkMessageStackDepth(e2, Resources("wasNotEqualTo", decorateToStringValue(fumList), decorateToStringValue(toList)) + ", but " + Resources("containedOnlyElements", decorateToStringValue(fumList), "\"fee\", \"fie\", \"foe\", \"fum\""), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
fumList should (not be_== (toList) and not contain only ("FIE", "FEE", "FAM", "FOE"))
val e1 = intercept[TestFailedException] {
fumList should (not be_== (fumList) and not contain only ("FIE", "FEE", "FAM", "FOE"))
}
checkMessageStackDepth(e1, Resources("wasEqualTo", decorateToStringValue(fumList), decorateToStringValue(fumList)), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
fumList should (not be_== (toList) and (not contain only ("FIE", "FEE", "FUM", "FOE")))
}
checkMessageStackDepth(e2, Resources("wasNotEqualTo", decorateToStringValue(fumList), decorateToStringValue(toList)) + ", but " + Resources("containedOnlyElements", decorateToStringValue(fumList), "\"FIE\", \"FEE\", \"FUM\", \"FOE\""), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(fumList should (not be_== (toList) and not contain only ("FIE", "FEE", "FAM", "FOE"))) (decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (not be_== (toList) and not contain only ("FIE", "FEE", "FUM", "FOE"))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, Resources("wasNotEqualTo", decorateToStringValue(fumList), decorateToStringValue(toList)) + ", but " + Resources("containedOnlyElements", decorateToStringValue(fumList), "\"FIE\", \"FEE\", \"FUM\", \"FOE\""), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
(fumList should (not be_== (fumList) and not contain only ("FIE", "FEE", "FAM", "FOE"))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e2, Resources("wasEqualTo", decorateToStringValue(fumList), decorateToStringValue(fumList)), fileName, thisLineNumber - 2)
(fumList should (not contain only (" FEE ", " FIE ", " FOE ", " FUU ") and not contain only (" FEE ", " FIE ", " FOE ", " FUU "))) (after being lowerCased and trimmed, after being lowerCased and trimmed)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
fumList should (not be_== (toList) and not contain only ("fee", "fie", "foe", "fie", "fum"))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources("onlyDuplicate")))
}
def `should throw TFE with friendly reminder when single GenTraversable argument is passed and failed` {
val e1 = intercept[TestFailedException] {
One(Many("fee", "fie", "foe", "fum")) should (not be_== (toList) and not contain only (Many("fee", "fie", "foe", "fum")))
}
checkMessageStackDepth(e1, Resources("wasNotEqualTo", decorateToStringValue(One(Many("fee", "fie", "foe", "fum"))), decorateToStringValue(toList)) + ", but " + Resources("containedOnlyElementsWithFriendlyReminder", decorateToStringValue(One(Many("fee", "fie", "foe", "fum"))), decorateToStringValue(Many("fee", "fie", "foe", "fum"))), fileName, thisLineNumber - 2)
}
}
}
object `every of Everys` {
val list1s: Every[Every[Int]] = Every(Every(3, 2, 1), Every(3, 2, 1), Every(3, 2, 1))
val lists: Every[Every[Int]] = Every(Every(3, 2, 1), Every(3, 2, 1), Every(4, 3, 2))
val hiLists: Every[Every[String]] = Every(Every("hi", "hello"), Every("hi", "hello"), Every("hi", "hello"))
def allErrMsg(index: Int, message: String, lineNumber: Int, left: Any): String =
"'all' inspection failed, because: \n" +
" at index " + index + ", " + message + " (" + fileName + ":" + (lineNumber) + ") \n" +
"in " + decorateToStringValue(left)
object `used with contain only xx and contain only xx` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
all (list1s) should (contain only (3, 2, 1) and contain only (1, 3, 2))
atLeast (2, lists) should (contain only (3, 1, 2) and contain only (2, 3, 1))
atMost (2, lists) should (contain only (3, 1, 2) and contain only (2, 3, 1))
no (lists) should (contain only (3, 6, 9) and contain only (3, 4, 5))
val e1 = intercept[TestFailedException] {
all (lists) should (contain only (1, 2, 3) and contain only (1, 3, 2))
}
checkMessageStackDepth(e1, allErrMsg(2, decorateToStringValue(Many(4, 3, 2)) + " did not contain only " + "(1, 2, 3)", thisLineNumber - 2, lists), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
all (list1s) should (contain only (1, 2, 3) and contain only (1, 3, 4))
}
checkMessageStackDepth(e2, allErrMsg(0, decorateToStringValue(Many(3, 2, 1)) + " contained only " + "(1, 2, 3)" + ", but " + decorateToStringValue(Many(3, 2, 1)) + " did not contain only " + "(1, 3, 4)", thisLineNumber - 2, list1s), fileName, thisLineNumber - 2)
val e4 = intercept[TestFailedException] {
all (hiLists) should (contain only ("hi", "hello") and contain only ("ho", "hey", "howdy"))
}
checkMessageStackDepth(e4, allErrMsg(0, decorateToStringValue(Many("hi", "hello")) + " contained only " + "(\"hi\", \"hello\")" + ", but " + decorateToStringValue(Many("hi", "hello")) + " did not contain only " + "(\"ho\", \"hey\", \"howdy\")", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
all (hiLists) should (contain only ("HELLO", "HI") and contain only ("HI", "HELLO"))
val e1 = intercept[TestFailedException] {
all (hiLists) should (contain only ("HO", "HELLO") and contain only ("HI", "HELLO"))
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(Many("hi", "hello")) + " did not contain only " + "(\"HO\", \"HELLO\")", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
all (hiLists) should (contain only ("HELLO", "HI") and contain only ("HO", "HELLO"))
}
checkMessageStackDepth(e2, allErrMsg(0, decorateToStringValue(Many("hi", "hello")) + " contained only " + "(\"HELLO\", \"HI\")" + ", but " + decorateToStringValue(Many("hi", "hello")) + " did not contain only " + "(\"HO\", \"HELLO\")", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(all (hiLists) should (contain only ("HELLO", "HI") and contain only ("HI", "HELLO"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(all (hiLists) should (contain only ("HO", "HELLO") and contain only ("HI", "HELLO"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(Many("hi", "hello")) + " did not contain only " + "(\"HO\", \"HELLO\")", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
(all (hiLists) should (contain only ("HELLO", "HI") and contain only ("HO", "HELLO"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e2, allErrMsg(0, decorateToStringValue(Many("hi", "hello")) + " contained only " + "(\"HELLO\", \"HI\")" + ", but " + decorateToStringValue(Many("hi", "hello")) + " did not contain only " + "(\"HO\", \"HELLO\")", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
all (list1s) should (contain only (3, 2, 2, 1) and contain only (1, 3, 2))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources("onlyDuplicate")))
val e2 = intercept[exceptions.NotAllowedException] {
all (list1s) should (contain only (1, 3, 2) and contain only (3, 2, 2, 1))
}
e2.failedCodeFileName.get should be (fileName)
e2.failedCodeLineNumber.get should be (thisLineNumber - 3)
e2.message should be (Some(Resources("onlyDuplicate")))
}
def `should throw TFE with friendly reminder when single GenTraversable argument is passed and failed` {
val e1 = intercept[TestFailedException] {
all (One(One(Many(3, 2, 1), Many(3, 2, 1), Many(4, 3, 2)))) should (contain only Many(1, 2, 3) and contain only (1, 3, 2))
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(One(Many(3, 2, 1), Many(3, 2, 1), Many(4, 3, 2))) + " did not contain only " + "(" + decorateToStringValue(Many(1, 2, 3)) + "), did you forget to say : _*", thisLineNumber - 2, One(One(Many(3, 2, 1), Many(3, 2, 1), Many(4, 3, 2)))), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
all (One(One(Many(1, 2, 3)))) should (contain only Many(1, 2, 3) and contain only Many(1, 3, 4))
}
checkMessageStackDepth(e2, allErrMsg(0, decorateToStringValue(One(Many(1, 2, 3))) + " contained only (" + decorateToStringValue(Many(1, 2, 3)) + "), did you forget to say : _*, but " + decorateToStringValue(One(Many(1, 2, 3))) + " did not contain only " + "(" + decorateToStringValue(Many(1, 3, 4)) + "), did you forget to say : _*", thisLineNumber - 2, One(One(Many(1, 2, 3)))), fileName, thisLineNumber - 2)
val e3 = intercept[TestFailedException] {
all (One(One(Many("hi", "hello")))) should (contain only Many("hi", "hello") and contain only Many("ho", "hey", "howdy"))
}
checkMessageStackDepth(e3, allErrMsg(0, decorateToStringValue(One(Many("hi", "hello"))) + " contained only (" + decorateToStringValue(Many("hi", "hello")) + "), did you forget to say : _*, but " + decorateToStringValue(One(Many("hi", "hello"))) + " did not contain only " + "(" + decorateToStringValue(Many("ho", "hey", "howdy")) + "), did you forget to say : _*", thisLineNumber - 2, One(One(Many("hi", "hello")))), fileName, thisLineNumber - 2)
}
}
object `when used with (be (..) and contain only (..))` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
all (list1s) should (be_== (Many(3, 2, 1)) and contain only (1, 3, 2))
atLeast (2, lists) should (be_== (Many(3, 2, 1)) and contain only (1, 3, 2))
atMost (2, lists) should (be_== (Many(3, 2, 1)) and contain only (2, 3, 1))
no (lists) should (be_== (Many(3, 6, 9)) and contain only (3, 4, 5))
val e1 = intercept[TestFailedException] {
all (lists) should (be_== (Many(3, 2, 1)) and contain only (1, 3, 2))
}
checkMessageStackDepth(e1, allErrMsg(2, decorateToStringValue(Many(4, 3, 2)) + " was not equal to " + decorateToStringValue(Many(3, 2, 1)), thisLineNumber - 2, lists), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
all (list1s) should (be_== (Many(3, 2, 1)) and contain only (2, 3, 8))
}
checkMessageStackDepth(e2, allErrMsg(0, decorateToStringValue(Many(3, 2, 1)) + " was equal to " + decorateToStringValue(Many(3, 2, 1)) + ", but " + decorateToStringValue(Many(3, 2, 1)) + " did not contain only " + "(2, 3, 8)", thisLineNumber - 2, list1s), fileName, thisLineNumber - 2)
val e3 = intercept[TestFailedException] {
all (hiLists) should (be_== (Many("hi", "hello")) and contain only ("ho", "hey", "howdy"))
}
checkMessageStackDepth(e3, allErrMsg(0, decorateToStringValue(Many("hi", "hello")) + " was equal to " + decorateToStringValue(Many("hi", "hello")) + ", but " + decorateToStringValue(Many("hi", "hello")) + " did not contain only " + "(\"ho\", \"hey\", \"howdy\")", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
val e4 = intercept[TestFailedException] {
all (list1s) should (be_== (Many(3, 2, 1)) and contain only (2, 3, 8))
}
checkMessageStackDepth(e4, allErrMsg(0, decorateToStringValue(Many(3, 2, 1)) + " was equal to " + decorateToStringValue(Many(3, 2, 1)) + ", but " + decorateToStringValue(Many(3, 2, 1)) + " did not contain only " + "(2, 3, 8)", thisLineNumber - 2, list1s), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
all (hiLists) should (be_== (Many("hi", "hello")) and contain only ("HELLO", "HI"))
val e1 = intercept[TestFailedException] {
all (hiLists) should (be_== (Many("HI", "HELLO")) and contain only ("HELLO", "HI"))
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(Many("hi", "hello")) + " was not equal to " + decorateToStringValue(Many("HI", "HELLO")), thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
all (hiLists) should (be_== (Many("hi", "hello")) and contain only ("HO", "HELLO"))
}
checkMessageStackDepth(e2, allErrMsg(0, decorateToStringValue(Many("hi", "hello")) + " was equal to " + decorateToStringValue(Many("hi", "hello")) + ", but " + decorateToStringValue(Many("hi", "hello")) + " did not contain only " + "(\"HO\", \"HELLO\")", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(all (hiLists) should (be_== (Many("hi", "hello")) and contain only ("HELLO", "HI"))) (decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(all (hiLists) should (be_== (Many("HI", "HELLO")) and contain only ("HELLO", "HI"))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(Many("hi", "hello")) + " was not equal to " + decorateToStringValue(Many("HI", "HELLO")), thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
(all (hiLists) should (be_== (Many("hi", "hello")) and contain only ("HO", "HELLO"))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e2, allErrMsg(0, decorateToStringValue(Many("hi", "hello")) + " was equal to " + decorateToStringValue(Many("hi", "hello")) + ", but " + decorateToStringValue(Many("hi", "hello")) + " did not contain only " + "(\"HO\", \"HELLO\")", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
all (list1s) should (be_== (Many(3, 2, 1)) and contain only (3, 2, 2, 1))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources("onlyDuplicate")))
}
def `should throw TFE with friendly reminder when single GenTraversable argument is passed and failed` {
val e1 = intercept[TestFailedException] {
all (One(One(Many(3, 2, 1)))) should (be_== (One(Many(3, 2, 1))) and contain only Many(2, 3, 8))
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(One(Many(3, 2, 1))) + " was equal to " + decorateToStringValue(One(Many(3, 2, 1))) + ", but " + decorateToStringValue(One(Many(3, 2, 1))) + " did not contain only (" + decorateToStringValue(Many(2, 3, 8)) + "), did you forget to say : _*", thisLineNumber - 2, One(One(Many(3, 2, 1)))), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
all (One(One(Many("hi", "hello")))) should (be_== (One(Many("hi", "hello"))) and contain only Many("ho", "hey", "howdy"))
}
checkMessageStackDepth(e2, allErrMsg(0, decorateToStringValue(One(Many("hi", "hello"))) + " was equal to " + decorateToStringValue(One(Many("hi", "hello"))) + ", but " + decorateToStringValue(One(Many("hi", "hello"))) + " did not contain only (" + decorateToStringValue(Many("ho", "hey", "howdy")) + "), did you forget to say : _*", thisLineNumber - 2, One(One(Many("hi", "hello")))), fileName, thisLineNumber - 2)
val e3 = intercept[TestFailedException] {
all (One(One(Many(3, 2, 1)))) should (be_== (One(Many(3, 2, 1))) and contain only Many(2, 3, 8))
}
checkMessageStackDepth(e3, allErrMsg(0, decorateToStringValue(One(Many(3, 2, 1))) + " was equal to " + decorateToStringValue(One(Many(3, 2, 1))) + ", but " + decorateToStringValue(One(Many(3, 2, 1))) + " did not contain only (" + decorateToStringValue(Many(2, 3, 8)) + "), did you forget to say : _*", thisLineNumber - 2, One(One(Many(3, 2, 1)))), fileName, thisLineNumber - 2)
}
}
object `when used with (not contain only xx and not contain only xx)` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
all (list1s) should (not contain only (3, 2, 8) and not contain only (8, 3, 4))
atLeast (2, lists) should (not contain only (3, 8, 5) and not contain only (8, 3, 4))
atMost (2, lists) should (not contain only (2, 4, 3) and contain only (4, 3, 2))
no (list1s) should (not contain only (1, 2, 3) and not contain only (1, 3, 2))
val e1 = intercept[TestFailedException] {
all (lists) should (not contain only (2, 3, 4) and not contain only (8, 3, 4))
}
checkMessageStackDepth(e1, allErrMsg(2, decorateToStringValue(Many(4, 3, 2)) + " contained only " + "(2, 3, 4)", thisLineNumber - 2, lists), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
all (lists) should (not contain only (3, 6, 8) and not contain only (2, 3, 4))
}
checkMessageStackDepth(e2, allErrMsg(2, decorateToStringValue(Many(4, 3, 2)) + " did not contain only " + "(3, 6, 8)" + ", but " + decorateToStringValue(Many(4, 3, 2)) + " contained only " + "(2, 3, 4)", thisLineNumber - 2, lists), fileName, thisLineNumber - 2)
val e3 = intercept[TestFailedException] {
all (hiLists) should (not contain only ("hello", "hi") and not contain only ("ho", "hey", "howdy"))
}
checkMessageStackDepth(e3, allErrMsg(0, decorateToStringValue(Many("hi", "hello")) + " contained only " + "(\"hello\", \"hi\")", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
val e4 = intercept[TestFailedException] {
all (hiLists) should (not contain only ("ho", "hey", "howdy") and not contain only ("hello", "hi"))
}
checkMessageStackDepth(e4, allErrMsg(0, decorateToStringValue(Many("hi", "hello")) + " did not contain only " + "(\"ho\", \"hey\", \"howdy\")" + ", but " + decorateToStringValue(Many("hi", "hello")) + " contained only " + "(\"hello\", \"hi\")", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
all (hiLists) should (not contain only ("HI") and not contain only ("HO"))
val e1 = intercept[TestFailedException] {
all (hiLists) should (not contain only ("HELLO", "HI") and not contain only ("HO"))
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(Many("hi", "hello")) + " contained only " + "(\"HELLO\", \"HI\")", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
all (hiLists) should (not contain only ("HI") and not contain only ("HELLO", "HI"))
}
checkMessageStackDepth(e2, allErrMsg(0, decorateToStringValue(Many("hi", "hello")) + " did not contain only " + "(\"HI\")" + ", but " + decorateToStringValue(Many("hi", "hello")) + " contained only " + "(\"HELLO\", \"HI\")", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(all (hiLists) should (not contain only ("HI") and not contain only ("HO"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(all (hiLists) should (not contain only ("HELLO", "HI") and not contain only ("HO"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(Many("hi", "hello")) + " contained only " + "(\"HELLO\", \"HI\")", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
(all (hiLists) should (not contain only ("HI") and not contain only ("HELLO", "HI"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e2, allErrMsg(0, decorateToStringValue(Many("hi", "hello")) + " did not contain only " + "(\"HI\")" + ", but " + decorateToStringValue(Many("hi", "hello")) + " contained only " + "(\"HELLO\", \"HI\")", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
all (list1s) should (not contain only (3, 2, 2, 1) and not contain only (8, 3, 4))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources("onlyDuplicate")))
val e2 = intercept[exceptions.NotAllowedException] {
all (list1s) should (not contain only (8, 3, 4) and not contain only (3, 2, 2, 1))
}
e2.failedCodeFileName.get should be (fileName)
e2.failedCodeLineNumber.get should be (thisLineNumber - 3)
e2.message should be (Some(Resources("onlyDuplicate")))
}
def `should throw TFE with friendly reminder when single GenTraversable argument is passed and failed` {
val e1 = intercept[TestFailedException] {
all (One(One(Many(3, 2, 1)))) should (not contain only (Many(3, 2, 1)) and not contain only (8, 3, 4))
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(One(Many(3, 2, 1))) + " contained only (" + decorateToStringValue(Many(3, 2, 1)) + "), did you forget to say : _*", thisLineNumber - 2, One(One(Many(3, 2, 1)))), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
all (One(One(Many(3, 2, 1)))) should (not contain only (Many(3, 6, 8)) and not contain only (Many(3, 2, 1)))
}
checkMessageStackDepth(e2, allErrMsg(0, decorateToStringValue(One(Many(3, 2, 1))) + " did not contain only (" + decorateToStringValue(Many(3, 6, 8)) + "), did you forget to say : _*" + ", but " + decorateToStringValue(One(Many(3, 2, 1))) + " contained only (" + decorateToStringValue(Many(3, 2, 1)) + "), did you forget to say : _*", thisLineNumber - 2, One(One(Many(3, 2, 1)))), fileName, thisLineNumber - 2)
val e3 = intercept[TestFailedException] {
all (One(One(Many("hi", "hello")))) should (not contain only (Many("hi", "hello")) and not contain only ("ho", "hey", "howdy"))
}
checkMessageStackDepth(e3, allErrMsg(0, decorateToStringValue(One(Many("hi", "hello"))) + " contained only (" + decorateToStringValue(Many("hi", "hello")) + "), did you forget to say : _*", thisLineNumber - 2, One(One(Many("hi", "hello")))), fileName, thisLineNumber - 2)
val e4 = intercept[TestFailedException] {
all (One(One(Many("hi", "hello")))) should (not contain only (Many("ho", "hey", "howdy")) and not contain only (Many("hi", "hello")))
}
checkMessageStackDepth(e4, allErrMsg(0, decorateToStringValue(One(Many("hi", "hello"))) + " did not contain only (" + decorateToStringValue(Many("ho", "hey", "howdy")) + "), did you forget to say : _*, but " + decorateToStringValue(One(Many("hi", "hello"))) + " contained only (" + decorateToStringValue(Many("hi", "hello")) + "), did you forget to say : _*", thisLineNumber - 2, One(One(Many("hi", "hello")))), fileName, thisLineNumber - 2)
}
}
object `when used with (not be (..) and not contain only (..))` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
all (list1s) should (not be_== (One(2)) and not contain only (8, 3, 4))
atLeast (2, lists) should (not be_== (One(3)) and not contain only (8, 3, 4))
atMost (2, lists) should (not be_== (Many(4, 3, 2)) and not contain only (3, 4, 2))
no (list1s) should (not be_== (Many(3, 2, 1)) and not contain only (1, 2, 3))
val e1 = intercept[TestFailedException] {
all (lists) should (not be_== (Many(4, 3, 2)) and not contain only (8, 3, 4))
}
checkMessageStackDepth(e1, allErrMsg(2, decorateToStringValue(Many(4, 3, 2)) + " was equal to " + decorateToStringValue(Many(4, 3, 2)), thisLineNumber - 2, lists), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
all (lists) should (not be_== (One(3)) and not contain only (2, 3, 4))
}
checkMessageStackDepth(e2, allErrMsg(2, decorateToStringValue(Many(4, 3, 2)) + " was not equal to " + decorateToStringValue(One(3)) + ", but " + decorateToStringValue(Many(4, 3, 2)) + " contained only " + "(2, 3, 4)", thisLineNumber - 2, lists), fileName, thisLineNumber - 2)
val e3 = intercept[TestFailedException] {
all (hiLists) should (not be_== (Many("hi", "hello")) and not contain only ("ho", "hey", "howdy"))
}
checkMessageStackDepth(e3, allErrMsg(0, decorateToStringValue(Many("hi", "hello")) + " was equal to " + decorateToStringValue(Many("hi", "hello")), thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
val e4 = intercept[TestFailedException] {
all (hiLists) should (not be_== (One("ho")) and not contain only ("hello", "hi"))
}
checkMessageStackDepth(e4, allErrMsg(0, decorateToStringValue(Many("hi", "hello")) + " was not equal to " + decorateToStringValue(One("ho")) + ", but " + decorateToStringValue(Many("hi", "hello")) + " contained only " + "(\"hello\", \"hi\")", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
all (hiLists) should (not be_== (One("ho")) and not contain only ("HO", "HELLO"))
val e1 = intercept[TestFailedException] {
all (hiLists) should (not be_== (Many("hi", "hello")) and not contain only ("HELLO", "HI"))
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(Many("hi", "hello")) + " was equal to " + decorateToStringValue(Many("hi", "hello")), thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
all (hiLists) should (not be_== (One("ho")) and not contain only ("HI", "HELLO"))
}
checkMessageStackDepth(e2, allErrMsg(0, decorateToStringValue(Many("hi", "hello")) + " was not equal to " + decorateToStringValue(One("ho")) + ", but " + decorateToStringValue(Many("hi", "hello")) + " contained only " + "(\"HI\", \"HELLO\")", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(all (hiLists) should (not be_== (One("ho")) and not contain only ("HO", "HELLO"))) (decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(all (hiLists) should (not be_== (Many("hi", "hello")) and not contain only ("HELLO", "HI"))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(Many("hi", "hello")) + " was equal to " + decorateToStringValue(Many("hi", "hello")), thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
(all (hiLists) should (not be_== (One("ho")) and not contain only ("HI", "HELLO"))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e2, allErrMsg(0, decorateToStringValue(Many("hi", "hello")) + " was not equal to " + decorateToStringValue(One("ho")) + ", but " + decorateToStringValue(Many("hi", "hello")) + " contained only " + "(\"HI\", \"HELLO\")", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
all (list1s) should (not be_== (One(2)) and not contain only (3, 2, 2, 1))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources("onlyDuplicate")))
}
def `should throw TFE with friendly reminder when single GenTraversable argument is passed and failed` {
val e1 = intercept[TestFailedException] {
all (One(One(Many(3, 2, 1)))) should (not be_== (One(3)) and not contain only (Many(3, 2, 1)))
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(One(Many(3, 2, 1))) + " was not equal to " + decorateToStringValue(One(3)) + ", but " + decorateToStringValue(One(Many(3, 2, 1))) + " contained only (" + decorateToStringValue(Many(3, 2, 1)) + "), did you forget to say : _*", thisLineNumber - 2, One(One(Many(3, 2, 1)))), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
all (One(One(Many("hi", "hello")))) should (not be_== (One("ho")) and not contain only (Many("hi", "hello")))
}
checkMessageStackDepth(e2, allErrMsg(0, decorateToStringValue(One(Many("hi", "hello"))) + " was not equal to " + decorateToStringValue(One("ho")) + ", but " + decorateToStringValue(One(Many("hi", "hello"))) + " contained only (" + decorateToStringValue(Many("hi", "hello")) + "), did you forget to say : _*", thisLineNumber - 2, One(One(Many("hi", "hello")))), fileName, thisLineNumber - 2)
}
}
}
}
| travisbrown/scalatest | src/test/scala/org/scalatest/EveryShouldContainOnlyLogicalAndSpec.scala | Scala | apache-2.0 | 62,151 |
package esclient.queries
import org.elasticsearch.client.Client
import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse
import org.elasticsearch.action.ActionListener
import scala.concurrent._
case class GetFillableIndicesQuery(esClient: Client) {
lazy val p = promise[IndicesStatsResponse]()
esClient
.admin()
.indices()
.prepareStats()
.clear()
.all()
.setStore(true)
.setGet(true)
.execute()
.addListener(new ActionListener[IndicesStatsResponse] {
def onFailure(e: Throwable) = p failure e
def onResponse(response: IndicesStatsResponse) = p success response
})
def execute: Future[IndicesStatsResponse] = p.future
}
| MeiSign/Fillable | app/esclient/queries/GetFillableIndicesQuery.scala | Scala | apache-2.0 | 693 |
/* *\\
** Squants **
** **
** Scala Quantities and Units of Measure Library and DSL **
** (c) 2013-2014, Gary Keorkunian **
** **
\\* */
package squants
import org.scalatest.{ Matchers, FlatSpec }
/**
* @author garyKeorkunian
* @since 0.1
*
*/
class MetricSystemSpec extends FlatSpec with Matchers {
"The Metric System multipliers" should "convert as expected" in {
import MetricSystem._
Exa should be(Peta * 1000d)
Peta should be(Tera * 1000d)
Tera should be(Giga * 1000d)
Giga should be(Mega * 1000d)
Mega should be(Kilo * 1000d)
Kilo should be(1000d)
Milli should be(.001)
Micro should be(Milli * .001)
Nano should be(Micro * .001)
Pico should be(Nano * .001)
Femto should be(Pico * .001)
Atto should be(Femto * .001)
}
}
| non/squants | src/test/scala/squants/MetricSystemSpec.scala | Scala | apache-2.0 | 1,180 |
/**
* This code is generated using [[https://www.scala-sbt.org/contraband/ sbt-contraband]].
*/
// DO NOT EDIT MANUALLY
package sbt.internal.bsp
/** Build Target Sources Request */
final class SourcesParams private (
val targets: Vector[sbt.internal.bsp.BuildTargetIdentifier]) extends Serializable {
override def equals(o: Any): Boolean = this.eq(o.asInstanceOf[AnyRef]) || (o match {
case x: SourcesParams => (this.targets == x.targets)
case _ => false
})
override def hashCode: Int = {
37 * (37 * (17 + "sbt.internal.bsp.SourcesParams".##) + targets.##)
}
override def toString: String = {
"SourcesParams(" + targets + ")"
}
private[this] def copy(targets: Vector[sbt.internal.bsp.BuildTargetIdentifier] = targets): SourcesParams = {
new SourcesParams(targets)
}
def withTargets(targets: Vector[sbt.internal.bsp.BuildTargetIdentifier]): SourcesParams = {
copy(targets = targets)
}
}
object SourcesParams {
def apply(targets: Vector[sbt.internal.bsp.BuildTargetIdentifier]): SourcesParams = new SourcesParams(targets)
}
| sbt/sbt | protocol/src/main/contraband-scala/sbt/internal/bsp/SourcesParams.scala | Scala | apache-2.0 | 1,085 |
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author Arash Fard, Usman Nisar, Ayushi Jain, Aravind Kalimurthy, John Miller
* @version 1.2
* @date Thu Nov 25 11:28:31 EDT 2013
* @see LICENSE (MIT style license file).
*
* `MGraph` Strict Simulation Using Mutable Sets
*/
package scalation.graphalytics.mutable
import scala.collection.mutable.{ArrayStack, ListBuffer, Map, HashMap, MutableList}
import scala.collection.mutable.{Set => SET}
import scala.reflect.ClassTag
import scala.util.control.Breaks.{break, breakable}
import scalation.graphalytics.mutable.{ExampleMGraphD => EX_GRAPH}
import scalation.stat.Statistic
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The 'MStrictSim' class provides an implementation for strict simulation
* graph pattern matching. This version uses `DualSim`.
* @see hipore.com/ijbd/2014/IJBD%20Vol%201%20No%201%202014.pdf
* @param q the query graph Q(U, D, k)
* @param g the data graph G(V, E, l)
*/
class MStrictSim [TLabel: ClassTag] (g: MGraph [TLabel], q: MGraph [TLabel])
extends GraphMatcher (g, q)
{
private val listOfDistinctReducedSet = new ListBuffer [SET [String]] () // contains total number of matches
// after post processing
private val mapOfBallWithSize = Map [Int, Long] () // contains balls left after
// post processing with diameter.
private val listOfMatchedBallVertices = MutableList [Int] () // contains list of center vertices
private val qmet = new GraphMetrics (q.clone, false) // creating graph metrics object of query graph
private val dataSize = g.size // size of the data graph
private val querySize = q.size // size of the query graph
private val phi0 = new MDualSim (g, q).mappings ()
println (s"phi0 = ${phi0.deep}")
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Apply the Strict Graph Simulation pattern matching algorithm to find the mappings
* from the query graph 'q' to the data graph 'g'. These are represented by a
* multi-valued function 'phi' that maps each query graph vertex 'u' to a
* set of data graph vertices '{v}'.
*/
def mappings (): Array [SET [Int]] = merge (mappings2 ())
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Mapping results per ball.
*/
def mappings2 (): HashMap [Int, Array [SET [Int]]] = strictSim (phi0)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Merged mapping results, the union over all balls.
*/
def merge (matches: Map [Int, Array [SET [Int]]]): Array [SET [Int]] =
{
val phi_all = Array.ofDim [SET [Int]] (querySize)
for (i <- 0 until querySize) phi_all (i) = SET [Int] ()
for ((c, phi_c) <- matches) {
println (s"(c, phi_c) = ($c, ${phi_c.deep})")
for (i <- 0 until querySize) phi_all(i) ++= phi_c(i)
} // for
phi_all
} // merge
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Performs strict simulation to find mappings with balls.
* @param phi the initial mapping after applying Dual to the whole graph
*/
def strictSim (phi: Array [SET [Int]]): HashMap [Int, Array [SET [Int]]] =
{
if (phi.size == 0) { println ("No dual match."); return null } // exit if no match after dual simulation
println (s"phi = ${phi.deep}")
val newGraph = filterGraph (phi) // if doing strong sim more than once, must clone g
val prunedSize = phi.clone.flatten.toSet.size // size of feasible matches after strict simulation
val qDiameter = qmet.diam // get the query diameter
val balls = HashMap [Int, Ball [TLabel]] () // map of balls: center -> ball
val matches = HashMap [Int, Array [SET [Int]]] () // map of matches in balls: center -> match
val gCenters = (0 until q.size).flatMap(phi(_)) // set of mapped data graph centers
val bCenters = SET [Int] () // set of centers for all balls
var ballSum = 0
for (center <- gCenters) { // for each mapped data graph center
val ball = new Ball (newGraph, center, qDiameter) // create a new ball for that center vertex
ballSum += ball.nodesInBall.size // calculate ball size
val mat = dualFilter (phi.clone, ball) // perform dual filter on the ball
println (s"center = $center, mat = ${mat.deep}")
balls.put (center, ball)
if (mat.size != 0) { bCenters += center; matches += center -> mat }
else println ("No match for ball centered at " + center + "\\n")
} // for
println ("SEQUENTIAL: Data Graph Name: " + g.name +
"\\n Number of Data Graph Nodes: " + dataSize +
"\\n Query Graph Name: " + q.name +
"\\n Number of Query Graph Nodes: " + querySize +
"\\n Number of Strict Matches: " + bCenters.size +
"\\n Graph Size after Pruning: " + prunedSize + " nodes" +
"\\n Query Diameter: " + qDiameter +
"\\n Average Ball Size: " + (ballSum / prunedSize.toDouble) +
"\\n Total Distinct Edges: " + calculateTotalEdges (g, balls, bCenters) +
"\\n Total Distinct Vertices: " + calculateTotalVertices ())
println ("Ball Diameter Metrics(Min, Max, Mean, StdDev): " + calculateBallDiameterMetrics (balls) )
matches
} // strictSim
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Prune the data graph by consider only those vertices and edges which
* are part of feasible matches after performing initial dual simulation.
* @param phi mappings from a query vertex u_q to { graph vertices v_g }
*/
def filterGraph (phi: Array [SET [Int]]): MGraph [TLabel] =
{
val nodesInSimset = phi.flatten.toSet // get all the vertices of feasible matches
for (i <- 0 until dataSize) g.ch(i) &= nodesInSimset // prune via intersection
val newCh = Array.ofDim [SET [Int]] (dataSize)
for (i <- 0 until dataSize) newCh(i) = SET [Int] ()
for (u <- 0 until q.size; w <- phi(u)) { // new ch and pa set for data graph based upon feasible vertices
for (v <- q.ch(u)) newCh(w) |= (g.ch(w) & phi(v))
} // for
new MGraph (newCh, g.label, g.elabel, g.inverse, g.name + "2") // create a new data graph
} // filterGraph
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Perform dual simulation onto the ball.
* @param phi mappings from a query vertex u_q to { graph vertices v_g }
* @param ball the Ball B(Graph, Center, Radius)
*/
def dualFilter (phi: Array [SET [Int]], ball: Ball [TLabel]): Array [SET [Int]] =
{
for (v <- phi.indices) phi(v) &= ball.nodesInBall // project simset onto ball
val filterSet = new ArrayStack [(Int, Int)] ()
var filtered = false
for (u <- phi.indices; v <- phi(u) if ball.borderNodes contains v) {
filtered = false // filtering ball based on child relationship
breakable { for (u1 <- q.ch(u)) {
if ((ball.post (v) & phi (u1)).isEmpty) {
filterSet.push ((u, v))
filtered = true
break
} // if
}} // breakable for
if (! filtered) { // filtering ball based on parent relationship,
breakable { for (u2 <- q.pa(u)) { // if no child has been filtered out
if ((ball.pre (v) & phi(u2)).isEmpty) {
filterSet.push ((u, v))
break
} // if
}} // breakable for
} // if
} // for
while (! filterSet.isEmpty) { // refine ch and pa relationship for the vertex v,
val (u, v) = filterSet.pop () // which is now not a feasible match
phi(u) -= v
for (u2 <- q.pa(u); v2 <- (ball.pre (v) & phi(u2)) if (ball.post (v2) & phi(u)).isEmpty)
filterSet.push ((u2, v2))
for (u1 <- q.ch(u); v1 <- (ball.post (v) & phi(u1)) if (ball.pre (v1) & phi(u)).isEmpty)
filterSet.push ((u1, v1))
} // while
val chSet = HashMap [Int, SET [Int]] ()
val paSet = HashMap [Int, SET [Int]] ()
// create new ch and pa set for the ball after above pruning
for (u <- phi.indices; v <- phi(u); uc <- q.ch(u); vc <- (ball.post (v) & phi(uc))) {
chSet.getOrElseUpdate (v, SET [Int] ()) += vc
paSet.getOrElseUpdate (vc, SET [Int] ()) += v
} // for
// Finding max perfect subgraph
val stack = new ArrayStack [Int] ()
val visited = SET (ball.center)
stack.push (ball.center)
while (! stack.isEmpty) {
val v = stack.pop ()
for (child <- (chSet.getOrElse (v, SET ()) | paSet.getOrElse (v, SET ()))) {
if (! visited.contains (child)) {
stack.push (child)
visited += child
} // if
} // for
} // while
for ( v <- phi.indices) phi(v) = phi(v) & visited
//fixes the edges in the ball
//(note that it does not change the parent set; this is only used for printing)
//uncomment if you want to see the ball after finding maximum perfect subgraph
ball.chMap = Map [Int, SET [Int]] ()
val matchNodes = phi.flatten.toSet
for ((n, nset) <- chSet; nc <- nset) {
if ((matchNodes contains n) && (matchNodes contains nc)) ball.chMap.getOrElseUpdate (n, SET () ) += nc
} // for
for (v <- phi.indices if phi(v).isEmpty) return Array [SET [Int]] ()
phi
} //dualFilter
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Count distinct vertices left after post processing.
*/
def calculateTotalVertices (): Int =
{
val totalSet = SET [String] ()
for (i <- 0 until listOfDistinctReducedSet.length) totalSet ++= listOfDistinctReducedSet(i)
totalSet.size
} // calculateTotalVertices
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Count distinct edges left after post processing.
* @param g the data graph G(V, E, l)
* @param balls mappings from a center vertex to the Ball B(Graph, Center, Radius)
* @param matchCenters set of all vertices which are considered as center
*/
def calculateTotalEdges (g: MGraph [TLabel], balls: Map [Int, Ball [TLabel]], matchCenters: SET [Int]): Int =
{
val distinctEdges = SET [String] ()
for (vert_id <- 0 until g.ch.length; if balls.keySet.contains (vert_id)) {
balls.get (vert_id).get.chMap.foreach (i => i._2.foreach (j => distinctEdges += (i._1.toString + "_" + j.toString)))
} // for
distinctEdges.size
} // calculateTotalEdges
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Calculate statistics (e.g., min, max, average diameter and standard deviation)
* on the balls left after post-processing.
* @param balls mappings from a center vertex to the Ball B(Graph, Center, Radius)
*/
def calculateBallDiameterMetrics (balls: Map [Int, Ball [TLabel]]): Statistic =
{
val ballStats = new Statistic ()
for (vert_id <- listOfMatchedBallVertices) ballStats.tally (balls.get (vert_id).get.getBallDiameter)
ballStats
} // calculateBallDiameterMetrics
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the vertex from an array of central vertices, those which have
* highest 'ch' set size and lowest frequency of label in the query graph, i.e.,
* highest ratio.
* @param centr the array of vertices whose eccentricity is equal to the radius
*/
def selectivityCriteria (qmet: GraphMetrics [TLabel]): Int =
{
var index = 0
var max = 0.0
for (ctr <- qmet.central) {
val ratio = qmet.g.ch(ctr).size.toDouble / qmet.g.labelMap (qmet.g.label(ctr)).size.toDouble
if (max < ratio) { max = ratio; index = ctr }
} // for
index
} // selectivityCriteria
} // MStrictSim class
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: ::::::::::::
/** The `MStrictSimTest` object is used to test the `MStrictSim` class.
* > run-main scalation.graphalytics.mutable.MStrictSimTest
*/
object MStrictSimTest extends App
{
val g = EX_GRAPH.g1p
val q = EX_GRAPH.q1p
println (s"g.checkEdges = ${g.checkEdges}")
g.printG ()
println (s"q.checkEdges = ${q.checkEdges}")
q.printG ()
(new MStrictSim (g, q)).test ("MStrictSim") // Strict Graph Simulation Pattern Matcher
} // MStrictSimTest object
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: ::::::::::::
/** The `MStrictSimTest2` object is used to test the `MStrictSim` class.
* > run-main scalation.graphalytics.mutable.MStrictSimTest2
*/
object MStrictSimTest2 extends App
{
val g = EX_GRAPH.g2p
val q = EX_GRAPH.q2p
println (s"g.checkEdges = ${g.checkEdges}")
g.printG ()
println (s"q.checkEdges = ${q.checkEdges}")
q.printG ()
(new MStrictSim (g, q)).test ("MStrictSim") // Strict Graph Simulation Pattern Matcher
} // MStrictSimTest2 object
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `MStrictSimTest3` object test the `MStrictSim` class by passing data graph
* and query graph relative file paths.
* > run-main scalation.graphalytics.mutable.MStrictSimTest3
*/
object MStrictSimTest3 extends App
{
val g = MGraphIO [Double] ("gfile")
val q = MGraphIO [Double] ("qfile")
println (s"q.checkEdges = ${q.checkEdges}")
q.printG ()
(new MStrictSim (g, q)).test ("MStrictSim") // Strict Graph Simulation Pattern Matcher
} // MStrictSimTest3 object
| NBKlepp/fda | scalation_1.2/src/main/scala/scalation/graphalytics/mutable/MStrictSim.scala | Scala | mit | 15,158 |
package c2corg
import scala.concurrent.duration._
import io.gatling.core.Predef._
import io.gatling.http.Predef._
class ConsultationTopoguideAnonyme extends Simulation {
val httpProtocol = http
.baseURL(C2corgConf.ui_url)
.inferHtmlResources(C2corgConf.black_list, WhiteList())
.acceptHeader("*/*")
.acceptEncodingHeader("gzip, deflate")
val scn = scenario("ConsultationTopoguideAnonyme")
.exec(Homepage.init)
.pause(8)
.exec(Homepage.browse)
.pause(6)
.exec(Outing.view)
.pause(14)
.exec(Route.view)
.pause(10)
.exec(Waypoint.view)
val numUsers = Integer.getInteger("users", 100)
val rampSec = Integer.getInteger("ramp", 300)
setUp(scn.inject(rampUsers(numUsers) over (rampSec seconds))).protocols(httpProtocol)
}
| c2corg/v6_api | c2corg_api/scripts/loadtests/gatling/user-files/simulations/c2corg/scenarios/ConsultationTopoguideAnonyme.scala | Scala | agpl-3.0 | 786 |
package uk.co.morleydev.ghosthunt.model.component.game
import org.jsfml.system.Vector2f
/**
* Global useful values related to actors, such as the width and heigh of the sprite
*/
object ActorDetails {
val width = 28.0f
val height = 28.0f
val speed = 100.0f
val dimensions = new Vector2f(width, height)
val halfDimensions = new Vector2f(width / 2.0f, height / 2.0f)
val los = 96.0f
}
/**
* The actor is an entity that has a position and direction (aka velocity), used to represent either a ghost or a hero.
*
* @param position
* @param direction
*/
case class Actor(position : Vector2f,
direction : Vector2f = new Vector2f(0.0f, 0.0f))
| MorleyDev/GhostHunt | src/main/scala/uk/co/morleydev/ghosthunt/model/component/game/Actor.scala | Scala | mit | 674 |
package io.udash.web.guide.views
import io.udash.web.commons.components.CodeBlock
import io.udash.web.guide.styles.partials.GuideStyles
import scalatags.JsDom.Modifier
package object rest {
def simpleExample(): Modifier =
CodeBlock(
"""import io.udash.rest._
|
|trait MainServerREST {
| def simple(): SimpleServerREST
|}
|
|trait SimpleServerREST {
| @GET def string(): Future[String]
| @GET def int(): Future[Int]
| @GET @RESTName("class") def cls(): Future[RestExampleClass]
|}""".stripMargin
)(GuideStyles)
}
| UdashFramework/udash-guide | guide/src/main/scala/io/udash/web/guide/views/rest/package.scala | Scala | gpl-3.0 | 615 |
package com.azavea.maml.error
import com.azavea.maml.ast._
import io.circe._
import io.circe.syntax._
/** Custom, MAML-specific errors */
trait MamlError {
def repr: String
}
object MamlError {
implicit val encodeMamlError: Encoder[MamlError] =
Encoder.encodeString.contramap[MamlError](_.repr)
}
/** Error to which signifies that a nodes aregument count is incorrect */
case class IncorrectArgCount(exp: Expression, expectedArgs: Int) extends MamlError {
def repr = s"Expected $expectedArgs arguments to ${exp}; instead, found ${exp.children.size}"
}
/** Error to use when an unhandled node is encountered during evaluation */
case class UnhandledCase(exp: Expression, kind: MamlKind) extends MamlError {
def repr = s"A branch of Interpreter logic has yet to be implemented for the expression ${exp} and the kind $kind"
}
case class ASTParseError(json: String, reason: String) extends MamlError {
def repr = s"Unable to parse ${json} as JSON: ${reason}"
}
case class ASTDecodeError(json: Json, reason: String) extends MamlError {
def repr = s"Unable to decode the json ${json} as AST: ${reason}"
}
case class DivergingTypes(found: String, expected: List[String]) extends MamlError {
def repr: String = s"Expected to evaluate tree as one of $expected; instead found $found"
}
case class NoVariableBinding(variable: Variable, bindings: Map[String, Literal]) extends MamlError {
def repr: String = s"No binding for ${variable.name} found in ${bindings.keys.toList}"
}
case class BindingUndefined(exp: Expression) extends MamlError {
def repr: String = s"No logic defined to bind literal value to tree for $exp"
}
case class NonEvaluableNode(exp: Expression, reason: Option[String]) extends MamlError {
def repr: String = reason match {
case Some(r) =>
s"Unable to evaluate $exp due to $r"
case None =>
s"Unable to evaluate $exp due to an unknown reason"
}
}
| geotrellis/maml | shared/src/main/scala/error/MamlError.scala | Scala | apache-2.0 | 1,917 |
package com.twitter.finagle.stats
/**
* A RollupStatsReceiver reports stats on multiple Counter/Stat/Gauge based on the sequence of
* names you pass.
* e.g.
* counter("errors", "clientErrors", "java_net_ConnectException").incr()
* will actually increment those three counters:
* - "/errors"
* - "/errors/clientErrors"
* - "/errors/clientErrors/java_net_ConnectException"
*/
class RollupStatsReceiver(val self: StatsReceiver)
extends StatsReceiver with Proxy
{
val repr = self.repr
private[this] def tails[A](s: Seq[A]): Seq[Seq[A]] = {
s match {
case s@Seq(_) =>
Seq(s)
case Seq(hd, tl@_*) =>
Seq(Seq(hd)) ++ (tails(tl) map { t => Seq(hd) ++ t })
}
}
override def toString(): String = self.toString
def counter(names: String*): Counter = new Counter {
private[this] val allCounters = BroadcastCounter(
tails(names) map (self.counter(_: _*))
)
def incr(delta: Int) = allCounters.incr(delta)
}
def stat(names: String*): Stat = new Stat {
private[this] val allStats = BroadcastStat(
tails(names) map (self.stat(_: _*))
)
def add(value: Float) = allStats.add(value)
}
def addGauge(names: String*)(f: => Float): Gauge = new Gauge {
private[this] val underlying = tails(names) map { self.addGauge(_: _*)(f) }
def remove() = underlying foreach { _.remove() }
}
}
| tdyas/util | util-stats/src/main/scala/com/twitter/finagle/stats/RollupStatsReceiver.scala | Scala | apache-2.0 | 1,374 |
/*
* La Trobe University - Distributed Deep Learning System
* Copyright 2016 Matthias Langer (t3l@threelights.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package edu.latrobe.blaze
import breeze.optimize.FirstOrderMinimizer._
/**
* Created by tl on 4/13/16.
*/
package object optimizerexitcodes {
final implicit class ConvergenceReasonFunctions(cr: ConvergenceReason) {
def toOptimizationResult(iterationNo: Long,
noSamples: Long)
: OptimizationResult = cr match {
case MaxIterations =>
OptimizationResult.derive(
NoIterationsLimit(),
iterationNo,
noSamples
)
case FunctionValuesConverged =>
OptimizationResult.derive(
ThirdParty.convergence(cr.reason),
iterationNo,
noSamples
)
case GradientConverged =>
OptimizationResult.derive(
ThirdParty.convergence(cr.reason),
iterationNo,
noSamples
)
case SearchFailed =>
OptimizationResult.derive(
LineSearchFailed(),
iterationNo,
noSamples
)
case ObjectiveNotImproving =>
OptimizationResult.derive(
ThirdParty.failure(cr.reason),
iterationNo,
noSamples
)
// TODO: Update once we have newer breeze support.
/*
case MonitorFunctionNotImproving =>
OptimizationResult.derive(
ThirdParty.failure(cr.reason),
iterationNo,
noSamples
)
case ProjectedStepConverged =>
OptimizationResult.derive(
ThirdParty.convergence(cr.reason),
iterationNo,
noSamples
)
*/
case _ =>
throw new MatchError(cr)
}
}
}
| bashimao/ltudl | blaze/src/main/scala/edu/latrobe/blaze/optimizerexitcodes/package.scala | Scala | apache-2.0 | 2,319 |
package com.ubirch.avatar.test.tools.model
import java.util.UUID
import com.ubirch.avatar.model.rest.device.DeviceHistory
import com.ubirch.util.uuid.UUIDUtil
import org.joda.time.{DateTime, DateTimeZone}
import org.json4s.JsonAST.JValue
import org.json4s.native.JsonMethods._
import scala.collection.mutable.ListBuffer
/**
* author: cvandrei
* since: 2016-10-25
*/
object DummyDeviceHistory {
def data(deviceId: String = UUIDUtil.uuidStr,
messageId: UUID = UUIDUtil.uuid,
deviceType: String = "lightsLamp",
timestamp: DateTime = DateTime.now,
deviceTags: Set[String] = Set("ubirch#0", "actor"),
deviceMessage: JValue = parse("""{"foo": 23, "bar": 42}""")
): DeviceHistory = {
DeviceHistory(
messageId = messageId,
deviceDataRawId = UUIDUtil.uuid,
deviceId = deviceId,
deviceName = s"$deviceType $deviceId",
deviceType = deviceType,
deviceTags = deviceTags,
deviceMessage = deviceMessage,
timestamp = timestamp
)
}
def dataSeries(deviceId: String = UUIDUtil.uuidStr,
dType: String = "lightsLamp",
tags: Set[String] = Set("ubirch#0", "actor"),
message: JValue = parse("""{"foo": 23, "bar": 42}"""),
intervalMillis: Long = 1000 * 10, // 10s
timestampOffset: Long = -1000 * 60 * 60, // 1h
elementCount: Int = 5
): List[DeviceHistory] = {
val deviceDataList: ListBuffer[DeviceHistory] = ListBuffer()
val newestDateTime = DateTime.now(DateTimeZone.UTC).minus(timestampOffset)
val range = 0 until elementCount
for (i <- range) {
val timestamp = newestDateTime.minus(i * intervalMillis)
val deviceData = data(deviceId = deviceId, deviceType = dType, timestamp = timestamp, deviceTags = tags, deviceMessage = message)
deviceDataList.+=:(deviceData)
}
deviceDataList.toList
}
}
| ubirch/ubirch-avatar-service | test-tools/src/main/scala/com/ubirch/avatar/test/tools/model/DummyDeviceHistory.scala | Scala | apache-2.0 | 1,977 |
package com.avsystem.commons
package mongo.core.ops
import org.bson.BsonValue
trait DocKeyKeyValueHandling[T] extends Any with KeyValueHandling[T] with DocKeyKeyHandling[T] {
override protected def encode(t: T): BsonValue = docKey.codec.toBson(t)
}
| AVSystem/scala-commons | commons-mongo/jvm/src/main/scala/com/avsystem/commons/mongo/core/ops/DocKeyKeyValueHandling.scala | Scala | mit | 253 |
/*
* Copyright (c) 2014-2019 Israel Herraiz <isra@herraiz.org>
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
// ---------------------
// Test for example 6.03
// ---------------------
package chap06
import org.specs2.mutable._
import rng._
object Ex03Spec extends Specification {
var r1: RNG = SimpleRNG(-123456)
// Create a list of 10000 RNGs
val listOfRNGs = (1 to 10000).map { x =>
val r = r1.nextInt._2
r1 = r
r
}
"The intDouble method" should {
"return always nonNegative ints and double numbers 0<=x<1" in {
val ranPairs = listOfRNGs.map(x => RNG.intDouble(x))
ranPairs.map(_._1._1) must contain(be_>=(0)).forall
ranPairs.map(_._1._2) must contain(be_>=(0.0) and be_<(1.0)).forall
}
}
"The doubleInt method" should {
"return always nonNegative ints and double numbers 0<=x<1" in {
val ranPairs = listOfRNGs.map(x => RNG.doubleInt(x))
ranPairs.map(_._1._2) must contain(be_>=(0)).forall
ranPairs.map(_._1._1) must contain(be_>=(0.0) and be_<(1.0)).forall
}
}
"The double3 method" should {
"return always double numbers 0<=x<1" in {
val ranTripl = listOfRNGs.map(x => RNG.double3(x))
ranTripl.map(_._1._1) must contain(be_>=(0.0) and be_<(1.0)).forall
ranTripl.map(_._1._2) must contain(be_>=(0.0) and be_<(1.0)).forall
ranTripl.map(_._1._3) must contain(be_>=(0.0) and be_<(1.0)).forall
}
}
}
| iht/fpinscala | src/test/scala/chap06/ex03Spec.scala | Scala | mit | 2,490 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.manager.utils
import scala.util.matching.Regex
/**
* Borrowed from kafka 0.8.1.1
* https://git-wip-us.apache.org/repos/asf?p=kafka.git;a=blob;f=core/src/main/scala/kafka/common/Topic.scala
*/
object Topic {
import kafka.manager.utils.TopicErrors._
val legalChars = "[a-zA-Z0-9\\\\._\\\\-]"
val maxNameLength = 255
private val rgx = new Regex(legalChars + "+")
def validate(topic: String) {
checkCondition(topic.length > 0, TopicNameEmpty)
checkCondition(!(topic.equals(".") || topic.equals("..")), InvalidTopicName)
checkCondition(topic.length <= maxNameLength, InvalidTopicLength)
rgx.findFirstIn(topic) match {
case Some(t) =>
checkCondition(t.equals(topic), IllegalCharacterInName(topic))
case None =>
checkCondition(false, IllegalCharacterInName(topic))
}
}
}
object TopicErrors {
class TopicNameEmpty private[TopicErrors] extends UtilError("topic name is illegal, can't be empty")
class InvalidTopicName private[TopicErrors] extends UtilError("topic name cannot be \\".\\" or \\"..\\"")
class InvalidTopicLength private[TopicErrors] extends UtilError(
"topic name is illegal, can't be longer than " + Topic.maxNameLength + " characters")
class IllegalCharacterInName private[TopicErrors] (topic: String) extends UtilError(
"topic name " + topic + " is illegal, contains a character other than ASCII alphanumerics, '.', '_' and '-'")
class PartitionsGreaterThanZero private[TopicErrors] extends UtilError(s"number of partitions must be greater than 0!")
class ReplicationGreaterThanZero private[TopicErrors] extends UtilError(s"replication factor must be greater than 0!")
class ReplicationGreaterThanNumBrokers private[TopicErrors](replicationFactor: Int, numBrokers: Int) extends UtilError(
s"replication factor: $replicationFactor larger than available brokers $numBrokers")
class InconsistentPartitionReplicas private[TopicErrors] extends UtilError("All partitions should have the same number of replicas.")
class TopicAlreadyExists private[TopicErrors] (topic: String) extends UtilError(s"Topic already exists : $topic")
class DuplicateReplicaAssignment private[TopicErrors] (topic: String, part: Int, replicas: Seq[Int]) extends UtilError(
s"Duplicate replica assignment topic=$topic, partition=$part, replicas=$replicas"
)
class CannotAddZeroPartitions private[TopicErrors] (topic: String, currentPartitions: Int, newPartitions: Int) extends UtilError(
s"Cannot add zero partitions topic=$topic, currentPartitions=$currentPartitions, newPartitions=$newPartitions"
)
class FailedToAddNewPartitions private[TopicErrors] (topic: String, newPartitions: Int, found: Int) extends UtilError(
s"Failed to add new partitions topic=$topic, newPartitions=$newPartitions, after adding new partitions to assignment found=$found"
)
class TopicDoesNotExist private[TopicErrors] (topic: String) extends UtilError(s"Topic does not exist : $topic")
val TopicNameEmpty = new TopicNameEmpty
val InvalidTopicName = new InvalidTopicName
val InvalidTopicLength = new InvalidTopicLength
def IllegalCharacterInName(topic: String) = new IllegalCharacterInName(topic)
val PartitionsGreaterThanZero = new PartitionsGreaterThanZero
val ReplicationGreaterThanZero = new ReplicationGreaterThanZero
def ReplicationGreaterThanNumBrokers(rf: Int, nb: Int) = new ReplicationGreaterThanNumBrokers(rf,nb)
val InconsistentPartitionReplicas = new InconsistentPartitionReplicas
def TopicAlreadyExists(topic: String) = new TopicAlreadyExists(topic)
def DuplicateReplicAssignment(topic: String, part: Int, replicas: Seq[Int]) = new DuplicateReplicaAssignment(topic,part,replicas)
def CannotAddZeroPartitions(topic: String, currentPartitions: Int, newPartitions:Int) = new CannotAddZeroPartitions(topic,currentPartitions,newPartitions)
def FailedToAddNewPartitions(topic: String, newPartitions:Int, found: Int) = new FailedToAddNewPartitions(topic,newPartitions,found)
def TopicDoesNotExist(topic: String) = new TopicDoesNotExist(topic)
}
| hecran/FUCKWHENXIN | app/kafka/manager/utils/Topic.scala | Scala | apache-2.0 | 4,869 |
package filodb.spark
import org.apache.spark.sql.{SQLContext, SaveMode, DataFrame, Row}
import org.apache.spark.sql.types.StructType
import scala.concurrent.duration._
import scala.language.postfixOps
import filodb.core._
import filodb.core.metadata.{Column, DataColumn, Dataset}
import filodb.coordinator.client.ClientException
import filodb.coordinator.IngestionCommands
/**
* Class implementing insert and save Scala APIs.
* Don't directly instantiate this, instead use the implicit conversion function.
*/
class FiloContext(val sqlContext: SQLContext) extends AnyVal {
import IngestionCommands.{Flush, Flushed}
import FiloRelation._
/**
* Creates a DataFrame from a FiloDB table. Does no reading until a query is run, but it does
* read the schema for the table.
* @param dataset the name of the FiloDB dataset to read from
* @param database the database / Cassandra keyspace to read the dataset from
* @param version the version number to read from
* @param splitsPerNode the parallelism or number of splits per node
*/
def filoDataset(dataset: String,
database: Option[String] = None,
version: Int = 0,
splitsPerNode: Int = 4): DataFrame =
sqlContext.baseRelationToDataFrame(FiloRelation(DatasetRef(dataset, database), version, splitsPerNode)
(sqlContext))
// Convenience method for Java programmers
def filoDataset(dataset: String): DataFrame = filoDataset(dataset, None)
/**
* Creates (or recreates) a FiloDB dataset with certain row, segment, partition keys. Only creates the
* dataset/projection definition and persists the dataset metadata; does not actually create the column
* definitions (that is done by the insert step). The exact behavior depends on the mode:
* Append - creates the dataset if it doesn't exist
* Overwrite - creates the dataset, deleting the old definition first if needed
* ErrorIfExists - throws an error if the dataset already exists
*
* For the other paramter definitions, please see saveAsFiloDataset().
*/
private[spark] def createOrUpdateDataset(schema: StructType,
dataset: DatasetRef,
rowKeys: Seq[String],
segmentKey: String,
partitionKeys: Seq[String],
chunkSize: Option[Int] = None,
resetSchema: Boolean = false,
mode: SaveMode = SaveMode.Append): Unit = {
FiloDriver.init(sqlContext.sparkContext)
val partKeys = if (partitionKeys.nonEmpty) partitionKeys else Seq(Dataset.DefaultPartitionColumn)
val dfColumns = dfToFiloColumns(schema)
val datasetObj = try {
Some(getDatasetObj(dataset))
} catch {
case e: NotFoundError => None
}
(datasetObj, mode) match {
case (None, SaveMode.Append) | (None, SaveMode.Overwrite) | (None, SaveMode.ErrorIfExists) =>
val ds = makeAndVerifyDataset(dataset, rowKeys, segmentKey, partKeys, chunkSize, dfColumns)
createNewDataset(ds)
case (Some(dsObj), SaveMode.ErrorIfExists) =>
throw new RuntimeException(s"Dataset $dataset already exists!")
case (Some(dsObj), SaveMode.Overwrite) if resetSchema =>
val ds = makeAndVerifyDataset(dataset, rowKeys, segmentKey, partKeys, chunkSize, dfColumns)
deleteDataset(dataset)
createNewDataset(ds)
case (_, _) =>
sparkLogger.info(s"Dataset $dataset definition not changed")
}
}
/**
* Saves a DataFrame in a FiloDB Table
* - Creates columns in FiloDB from DF schema if needed
*
* @param df the DataFrame to write to FiloDB
* @param dataset the name of the FiloDB table/dataset to read from
* @param rowKeys the name of the column(s) used as the row primary key within each partition.
* May be computed functions. Only used if mode is Overwrite and
* @param segmentKey the name of the column or computed function used to group rows into segments and
* to sort the partition by.
* @param partitionKeys column name(s) used for partition key. If empty, then the default Dataset
* partition key of `:string /0` (a constant) will be used.
*
* Partitioning columns could be created using an expression on another column
* {{{
* val newDF = df.withColumn("partition", df("someCol") % 100)
* }}}
* or even UDFs:
* {{{
* val idHash = sqlContext.udf.register("hashCode", (s: String) => s.hashCode())
* val newDF = df.withColumn("partition", idHash(df("id")) % 100)
* }}}
*
* However, note that the above methods will lead to a physical column being created, so
* use of computed columns is probably preferable.
*
* @param database the database/keyspace to write to, optional. Default behavior depends on ColumnStore.
* @param mode the Spark SaveMode - ErrorIfExists, Append, Overwrite, Ignore
* @param options various IngestionOptions, such as timeouts, version to write to, etc.
*/
def saveAsFilo(df: DataFrame,
dataset: String,
rowKeys: Seq[String],
segmentKey: String,
partitionKeys: Seq[String],
database: Option[String] = None,
mode: SaveMode = SaveMode.Append,
options: IngestionOptions = IngestionOptions()): Unit = {
val IngestionOptions(version, chunkSize, writeTimeout,
flushAfterInsert, resetSchema) = options
val ref = DatasetRef(dataset, database)
createOrUpdateDataset(df.schema, ref, rowKeys, segmentKey, partitionKeys, chunkSize, resetSchema, mode)
insertIntoFilo(df, dataset, version, mode == SaveMode.Overwrite,
database, writeTimeout, flushAfterInsert)
}
/**
* Implements INSERT INTO into a Filo Dataset. The dataset must already have been created.
* Will check and add any extra columns from the DataFrame into the dataset, but column type
* mismatches will result in an error.
* @param overwrite if true, first truncate the dataset before writing
*/
def insertIntoFilo(df: DataFrame,
datasetName: String,
version: Int = 0,
overwrite: Boolean = false,
database: Option[String] = None,
writeTimeout: FiniteDuration = DefaultWriteTimeout,
flushAfterInsert: Boolean = true): Unit = {
val filoConfig = FiloDriver.initAndGetConfig(sqlContext.sparkContext)
val dfColumns = dfToFiloColumns(df)
val columnNames = dfColumns.map(_.name)
val dataset = DatasetRef(datasetName, database)
checkAndAddColumns(dfColumns, dataset, version)
if (overwrite) {
FiloDriver.client.truncateDataset(dataset, version)
}
val numPartitions = df.rdd.partitions.size
sparkLogger.info(s"Inserting into ($dataset/$version) with $numPartitions partitions")
sparkLogger.debug(s" Dataframe schema = $dfColumns")
// For each partition, start the ingestion
df.rdd.mapPartitionsWithIndex { case (index, rowIter) =>
// Everything within this function runs on each partition/executor, so need a local datastore & system
FiloExecutor.init(filoConfig)
sparkLogger.info(s"Starting ingestion of DataFrame for dataset $dataset, partition $index...")
ingestRddRows(FiloExecutor.coordinatorActor, dataset, columnNames, version, rowIter,
writeTimeout, index)
Iterator.empty
}.count()
// This is the only time that flush is explicitly called
if (flushAfterInsert) {
try {
val nodesFlushed = FiloDriver.client.flushCompletely(dataset, version)
sparkLogger.info(s"Flush completed on $nodesFlushed nodes for dataset $dataset")
} catch {
case ClientException(msg) =>
sparkLogger.warn(s"Could not flush due to client exception $msg on dataset $dataset...")
case e: Exception =>
sparkLogger.warn(s"Exception from flushing nodes for $dataset/$version", e)
}
}
syncToHive(sqlContext)
}
}
| markhamstra/FiloDB | spark/src/main/scala/filodb.spark/FiloContext.scala | Scala | apache-2.0 | 8,458 |
package com.larskroll.neo4j
import spray.json._
object Neo4JsonProtocol extends DefaultJsonProtocol {
implicit object StatementFormat extends JsonFormat[StatementBlock] {
def write(m: StatementBlock): JsValue = {
val paramCypher = m.statement.replaceParams();
return JsObject("statement" -> JsString(paramCypher));
}
def read(value: JsValue): StatementBlock = {
throw new Exception("No necessity to deserialize StatementBlock");
}
}
implicit object BlockFormat extends RootJsonFormat[StatementsBlock] {
def write(m: StatementsBlock) =
JsObject("statements" -> JsArray(m.statements.map(_.toJson).toList));
def read(value: JsValue): StatementsBlock = {
throw new Exception("No necessity to deserialize StatementsBlock");
}
}
implicit object StatementResultFormat extends JsonFormat[StatementResult] {
def write(m: StatementResult): JsValue = {
throw new Exception("No necessity to serialize StatementResult");
}
def read(value: JsValue): StatementResult = {
value.asJsObject.getFields("columns", "data") match {
case Seq(JsArray(columns), JsArray(data)) => {
val cols = columns.map {
case JsString(s) => s
case _ => throw new DeserializationException("Unkown result format!")
}
StatementResult(cols, data);
}
case _ => throw new DeserializationException("Unkown result format!")
}
}
}
implicit object QueryErrorFormat extends JsonFormat[QueryError] {
def write(m: QueryError): JsValue = {
throw new Exception("No necessity to serialize StatementResult");
}
def read(value: JsValue): QueryError = {
value.asJsObject.getFields("code", "message") match {
case Seq(JsString(code), JsString(message)) => QueryError(code, message)
case _ => throw new DeserializationException("Unkown result format!")
}
}
}
implicit object TxResultFormat extends RootJsonFormat[TxResult] {
def write(m: TxResult): JsValue = {
throw new Exception("No necessity to serialize TxResult");
}
def read(value: JsValue): TxResult = {
value.asJsObject.getFields("results", "errors") match {
case Seq(JsArray(results), JsArray(errors)) => {
val res = results.map(StatementResultFormat.read(_));
val errs = errors.map(QueryErrorFormat.read(_));
TxResult(res, errs);
}
case _ => throw new DeserializationException("Unkown result format!")
}
}
}
implicit object NodeFormat extends JsonFormat[Node] {
def write(n: Node): JsValue = {
JsString(n.endpoint);
}
def read(value: JsValue): Node = {
value match {
case JsString(endpoint) => {
val parts = endpoint.split("/");
val id = parts(parts.length-1).toInt;
Node(id, endpoint)
}
case _ => throw new DeserializationException("Unkown result format!")
}
}
}
implicit object RelationshipFormat extends JsonFormat[Relationship] {
def write(n: Relationship): JsValue = {
JsString(n.endpoint);
}
def read(value: JsValue): Relationship = {
value match {
case JsString(endpoint) => {
val parts = endpoint.split("/");
val id = parts(parts.length-1).toInt;
Relationship(id, endpoint)
}
case _ => throw new DeserializationException("Unkown result format!")
}
}
}
implicit object TraversalFormat extends RootJsonFormat[Traversal] {
import Graph._
def write(m: Traversal): JsValue = {
val algoProp = m.algo match {
case `shortestPath`(maxDepth) => ("max_depth" -> JsNumber(maxDepth))
case `dijkstra`(costProp) => ("cost_property" -> JsString(costProp))
}
val rel = "relationships" -> JsObject("type" -> JsString(m.edgeType), "direction" -> JsString(m.edgeDirection.getClass().getSimpleName().stripSuffix("$")));
JsObject("to" ->NodeFormat.write(m.to), algoProp, rel, "algorithm" -> JsString(m.algo.getClass().getSimpleName()))
}
def read(value: JsValue): Traversal = {
throw new Exception("No necessity to deserialize Traversal");
}
}
implicit object TraversalResultFormat extends RootJsonFormat[TraversalResult] {
import Graph._
def write(m: TraversalResult): JsValue = {
throw new Exception("No necessity to serialize TraversalResult");
}
def read(value: JsValue): TraversalResult = {
value.asJsObject.getFields("weight", "start", "nodes", "length", "relationships", "end") match {
case Seq(JsNumber(weight), start: JsString, JsArray(nodes), JsNumber(length), JsArray(relationships), end: JsString) => {
val nds = nodes.map(NodeFormat.read);
val rels = relationships.map(RelationshipFormat.read);
TraversalResult(Some(weight.toFloat), NodeFormat.read(start), nds, length.toInt, rels, NodeFormat.read(end))
}
case Seq(start: JsString, JsArray(nodes), JsNumber(length), JsArray(relationships), end: JsString) => {
val nds = nodes.map(NodeFormat.read);
val rels = relationships.map(RelationshipFormat.read);
TraversalResult(None, NodeFormat.read(start), nds, length.toInt, rels, NodeFormat.read(end))
}
}
}
}
} | Bathtor/Whloot | src/main/scala/com/larskroll/neo4j/Neo4JsonProtocol.scala | Scala | apache-2.0 | 6,096 |
object SCL4353B {
def goo[T](x: Int => T): T = x(1)
implicit def f(x: Int): String = ""
def foo(x: Int): Int = x + 1
val x: String = goo(/*start*/foo _/*end*/)
}
//(Int) => String | ilinum/intellij-scala | testdata/typeInference/bugs5/SCL4353B.scala | Scala | apache-2.0 | 187 |
/*
* Copyright 2013-2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package laika.rewrite
import laika.rewrite.LinkTargets._
import laika.tree.Elements._
import laika.tree.Paths.{Current, Path}
import scala.annotation.tailrec
/** The default rewrite rules responsible for resolving link references that get
* applied to the raw document tree after parsing.
*
* These rules resolve references to images, footnotes, citations and other
* inline targets, and generate the identifiers for targets with auto-generated
* ids like auto-number footnotes.
*
* The rules replace references pointing to internal or external targets
* with the corresponding resolved link elements, as well as the targets
* themselves with nodes that contain their final ids.
*
* In case of duplicate target ids or unresolvable references system messages
* get inserted into the final document tree.
*
* @author Jens Halm
*/
object LinkResolver extends (DocumentCursor => RewriteRule) {
/** The default rules for resolving link references
* to be applied to the document.
*/
def apply (cursor: DocumentCursor): RewriteRule = {
val targets = cursor.target.linkTargets
val headerId = targets.headerIds
def replaceHeader (h: Block, origId: String, lookup: String => Option[String]): Option[Element] = lookup(origId).flatMap(replace(h,_))
def replace (element: Element, selector: Selector): Option[Element] =
targets.local.get(selector).flatMap(_.replaceTarget(element))
def resolve (ref: Reference, selector: Selector, msg: => String, global: Boolean = false): Option[Element] = {
def selectFromParent = {
@tailrec def select (path: Path): (Option[TargetResolver],Option[Path]) = {
val tree = cursor.root.target.selectSubtree(path)
val target = tree.flatMap(_.selectTarget(selector))
if (target.isDefined || path.parent == path) (target,Some(cursor.target.path))
else select(path.parent)
}
val path = cursor.parent.target.path
select(Path(Current, path.components))
}
def selectFromRoot (path: String, name: String) =
(cursor.root.target.selectTarget(PathSelector(cursor.parent.target.path / Path(path), name)),Some(cursor.target.path))
val (target, path) = {
val local = targets.local.get(selector)
if (local.isDefined) (local, None)
else (selector, global) match {
case (UniqueSelector(targetName), true) =>
val index = targetName.indexOf(":")
if (index == -1) selectFromParent
else selectFromRoot(targetName take index, targetName drop (index+1))
case _ => (None,None)
}
}
Some(target.flatMap(_.resolveReference(ref,path))
.getOrElse(InvalidSpan(SystemMessage(laika.tree.Elements.Error, msg), Text(ref.source))))
}
{
case f: FootnoteDefinition => f.label match {
case NumericLabel(num) => replace(f, num.toString)
case AutonumberLabel(id) => replace(f, id)
case Autonumber => replace(f, AutonumberSelector)
case Autosymbol => replace(f, AutosymbolSelector)
}
case c: Citation => replace(c, c.label)
case h: DecoratedHeader => replaceHeader(h, h.options.id.get, headerId)
case h@ Header(_,_,Id(id)) => replaceHeader(h, id, headerId)
case c @ CitationReference(label,_,_) => resolve(c, label, s"unresolved citation reference: $label")
case ref: FootnoteReference => ref.label match {
case NumericLabel(num) => resolve(ref, num.toString, s"unresolved footnote reference: $num")
case AutonumberLabel(id) => resolve(ref, id, s"unresolved footnote reference: $id")
case Autonumber => resolve(ref, AutonumberSelector, "too many autonumber references")
case Autosymbol => resolve(ref, AutosymbolSelector, "too many autosymbol references")
}
case ref: LinkReference => if (ref.id.isEmpty) resolve(ref, AnonymousSelector, "too many anonymous link references")
else resolve(ref, ref.id, s"unresolved link reference: ${ref.id}", global = true)
case ref: ImageReference => resolve(ref, ref.id, s"unresolved image reference: ${ref.id}", global = true)
case img @ Image(_,URI(uri, None),_,_) => Some(img.copy(uri = URI(uri, PathInfo.fromURI(uri, cursor.parent.target.path))))
case _: Temporary => None
case c: Customizable if c.options.id.isDefined => replace(c, c.options.id.get)
}
}
}
| amuramatsu/Laika | core/src/main/scala/laika/rewrite/LinkResolver.scala | Scala | apache-2.0 | 5,240 |
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author Hao Peng, John Miller
* @version 1.2
* @date Thu Nov 19 18:43:58 EST 2015
* @see LICENSE (MIT style license file).
*/
package scalation.graphalytics
import scala.collection.immutable.{Set => SET}
import TrafficLight._
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `TopSort` object provides the 'topSort' method for creating a
* topological sort of the vertices in a directed graph. It also perform
* cycle detection.
*/
object TopSort
{
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Topological sort that returns an edge compatible ordering of the vertices.
* Translated from pseudo-code and implements Tarjan's algorithm.
* The topological sort will contain negative values, if there is a cycle.
* @see en.wikipedia.org/wiki/Topological_sorting
* @param g the graph
*/
def topSort (g: Graph): Array [Int] =
{
val n = g.size // the number of vertices in g
val color = Array.fill (n)(G_N) // traffic light: GreeN, YelloW or ReD
val vList = Array.fill (n)(-1) // ordered list of vertices
var last = n - 1 // last open position in vList
var acyclic = true // assume acyclic until cycle detected
for (v <- color.indices if acyclic && color(v) == G_N) dfs (v)
/* Recursively visit vertices, adding vertices onto a list at the end
* @param u the current vertex
*/
def dfs (u: Int)
{
if (acyclic) {
if (color(u) == Y_W) {
vList(last) = -2
acyclic = false // detected cycle
} else if (color(u) == G_N) {
color(u) = Y_W
for (v <- g.ch(u)) dfs (v)
color(u) = R_D
if (vList(last) != -2) { vList(last) = u; last -= 1 } // prepend to front of list
} // if
} // if
} // dfs
vList
} // topSort
} // TopSort object
import TopSort.topSort
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `TopSortTest` object tests the `TopSort` object using a directed
* graph. Graphs are created by passing in an array of adjacency sets (one for
* each vertex).
* > run-main scalation.graphalytics.TopSortTest
*/
object TopSortTest extends App
{
/** Test graph 1
*/
val pg1 = new Graph (Array (SET (1, 2), // edges from 0: 0 -> 1, 0 -> 2
SET (2), // edges from 1: 1 -> 2
SET [Int] ())) // edges from 2: no such edges
println ("Precedence Graph pg1: --------------------------------------------")
pg1.printG ()
println ("pg1 order = " + topSort (pg1).deep)
/** Test graph 2
*/
val pg2 = new Graph (Array (SET (1, 2), // edges from 0: 0 -> 1, 0 -> 2
SET (2), // edges from 1: 1 -> 2
SET (0))) // edges form 2: 2 -> 0
println ("Precedence Digraph pg2: --------------------------------------------")
pg2.printG ()
println ("pg2 order = " + topSort (pg2).deep)
} // TopSortTest object
| scalation/fda | scalation_1.2/src/main/scala/scalation/graphalytics/TopSort.scala | Scala | mit | 3,572 |
package edu.gemini.model.p1.pdf
import org.specs2.mutable.SpecificationWithJUnit
import javax.xml.transform.stream.{StreamResult, StreamSource}
import java.io.StringWriter
import javax.xml.transform.TransformerFactory
import edu.gemini.model.p1.pdf.P1PDF.{Template, P1PdfUriResolver}
import scala.xml.XML
class P1TemplatesSpec extends SpecificationWithJUnit {
// These test transform the proposal files using the templates
// and validate the output of the transformation
//
// Though this doesn't test the PDF directly we should trust FOP to produce de desired result
// at the end
"The P1 DEFAULT Template" should {
"write on the ObservingMode Queue if there is no ToO option set" in {
val result = transformProposal("proposal_no_too.xml")
/*(XML.loadString(result) \\\\ ("block")) foreach {
e => println("'" + e.text + "'")
}*/
XML.loadString(result) must \\\\("block") \\>~ """\\s*Observing Mode: Queue\\s*"""
}
"write on the ObservingMode Queue + Rapid ToO, REL-646" in {
val result = transformProposal("proposal_rapid_too.xml")
XML.loadString(result) must \\\\("block") \\>~ """\\s*Observing Mode: Queue \\+ Rapid ToO\\s*"""
}
"write on the ObservingMode Queue + Standard ToO, REL-646" in {
val result = transformProposal("proposal_standard_too.xml")
XML.loadString(result) must \\\\("block") \\>~ """\\s*Observing Mode: Queue \\+ Standard ToO\\s*"""
}
"include TAC information in case all are approved, REL-677" in {
val result = transformProposal("proposal_submitted_to_tac_all_approved.xml")
val accepted = (XML.loadString(result) \\\\ "table-row" \\ "table-cell") collect {
case e if e.text.matches( """\\s*Accepted\\s*""") => true
}
// Check there is a table
XML.loadString(result) must \\\\("block") \\ "inline" \\>~ """\\s*TAC information\\s*"""
// Check there is three accepted
accepted must be size 3
}
"include TAC information with accepts and rejects, REL-677" in {
val result = transformProposal("proposal_submitted_to_tac_one_approved_one_rejected.xml")
val accepted = (XML.loadString(result) \\\\ "table-row" \\ "table-cell") collect {
case e if e.text.matches( """\\s*Accepted\\s*""") => true
}
val rejected = (XML.loadString(result) \\\\ "table-row" \\ "table-cell") collect {
case e if e.text.matches( """\\s*Rejected\\s*""") => true
}
// Check there is a table
XML.loadString(result) must \\\\("block") \\ "inline" \\>~ """\\s*TAC information\\s*"""
// Check there is one accepted
accepted must be size 1
// And one rejected
rejected must be size 1
}
"include TAC information with only one response, REL-677" in {
val result = transformProposal("proposal_submitted_to_tac_one_decision.xml")
val accepted = (XML.loadString(result) \\\\ "table-row" \\ "table-cell") collect {
case e if e.text.matches( """\\s*Accepted\\s*""") => true
}
val rejected = (XML.loadString(result) \\\\ "table-row" \\ "table-cell") collect {
case e if e.text.matches( """\\s*Rejected\\s*""") => true
}
// Check there is a table
XML.loadString(result) must \\\\("block") \\ "inline" \\>~ """\\s*TAC information\\s*"""
// Check there is one accepted
accepted must be size 1
// And none rejected
rejected must beEmpty
}
"skip TAC information when there are no responses, REL-677" in {
val result = transformProposal("proposal_submitted_to_tac_no_decisions.xml")
// Check there is no TAC table
XML.loadString(result) must not(\\\\("block") \\ "inline" \\>~ """\\s*TAC information\\s*""")
}
"include TAC partner ranking, REL-677" in {
val result = transformProposal("proposal_submitted_to_tac_all_approved.xml")
// Check there is a block with text 4.0
XML.loadString(result) must \\\\("block") \\> "4.0"
}
"include recommended and min recommended time, REL-677" in {
val result = transformProposal("proposal_submitted_to_tac_all_approved.xml")
// Check there is a block with text 2.0 hr (1.0 hr)
XML.loadString(result) must \\\\("block") \\>~ """2\\.0.hr.\\(1\\.0.hr\\)"""
}
"include text with scheduling requests, REL-687" in {
val result = transformProposal("proposal_with_schedule.xml")
val schedRegex = """\\s*This proposal has the following scheduling restrictions.*""".r
val foundMatches = (XML.loadString(result) \\\\ "block") collect {
case e if schedRegex.findFirstIn(e.text).isDefined => true
}
// Check there is a scheduling element
XML.loadString(result) must (\\\\("block") \\ "inline" \\>~ """\\s*Scheduling Constraints\\s*""")
// Check there is a scheduling text
foundMatches must be size 1
}
"use new text for observations with guiding between 50% and less than 100%, REL-640" in {
val result = transformProposal("proposal_guiding_caution.xml")
// Check there is a scheduling element
XML.loadString(result) must (\\\\("block") \\ "inline" \\>~ """.*Some PAs do not have suitable guide stars \\(\\d\\d%\\).*""")
}
"use new text for observations with guiding between 0% and less than 50%, REL-640" in {
val result = transformProposal("proposal_guiding_warning.xml")
// Check there is a scheduling element
XML.loadString(result) must (\\\\("block") \\ "inline" \\>~ """.*Many PAs do not have suitable guide stars \\(\\d\\d%\\).*""")
}
"use new text for observations with guiding equals to 0%, REL-640" in {
val result = transformProposal("proposal_guiding_bad.xml")
// Check there is a scheduling element
XML.loadString(result) must (\\\\("block") \\ "inline" \\>~ """.*Guiding is problematic \\(0%\\).*""")
}
"present the correct name when using GSAOI, REL-693" in {
val result = transformProposal("proposal_with_gsaoi.xml")
// Check that we use the proper public name of GSOAI
XML.loadString(result) must (\\\\("table-cell") \\ "block" \\> "GSAOI")
}
"present the correct name when using Texes, REL-1062" in {
val result = transformProposal("proposal_with_texes.xml")
// Check that we use the proper public name of Texes
XML.loadString(result) must (\\\\("table-cell") \\ "block" \\> "Texes - Gemini North")
}
"present the correct name when using Dssi, REL-1061" in {
val result = transformProposal("proposal_with_dssi.xml")
// Check that we use the proper public name of DSSI
XML.loadString(result) must (\\\\("table-cell") \\ "block" \\> "DSSI - Gemini North")
}
"present the correct name when using Visitor GN, REL-1090" in {
val result = transformProposal("proposal_with_visitor_gn.xml")
// Check that we use the proper public name of a north visitor
XML.loadString(result) must (\\\\("table-cell") \\ "block" \\> "Visitor - Gemini North - My instrument")
}
"present the correct name when using Visitor GS, REL-1090" in {
val result = transformProposal("proposal_with_visitor_gs.xml")
// Check that we use the proper public name of a south visitor
XML.loadString(result) must (\\\\("table-cell") \\ "block" \\> "Visitor - Gemini South - Super Camera")
}
"present Phoenix's Site, REL-2463" in {
val result = transformProposal("proposal_with_phoenix.xml")
// Check that we use the proper public name of a south visitor
XML.loadString(result) must (\\\\("table-cell") \\ "block" \\> "Phoenix - Gemini South")
}
"present Texes' Site, REL-2463" in {
val result = transformProposal("proposal_with_texes.xml")
// Check that we use the proper public name of a south visitor
XML.loadString(result) must (\\\\("table-cell") \\ "block" \\> "Texes - Gemini North")
}
"present DSSI' Site, REL-2463" in {
val result = transformProposal("proposal_with_dssi.xml")
// Check that we use the proper public name of a south visitor
XML.loadString(result) must (\\\\("table-cell") \\ "block" \\> "DSSI - Gemini North")
}
"show an ITAC information section if the proposal contains a comment, REL-1165" in {
val result = transformProposal("proposal_with_itac_comment.xml")
// Check that we have an ITAC information section with the comment
XML.loadString(result) must (\\\\("block") \\ "inline" \\> "ITAC Information")
XML.loadString(result) must \\\\("block") \\>~ "An Itac comment"
}
"show an ITAC information section if the proposal contains multiple comments, REL-1165" in {
val result = transformProposal("proposal_with_itac_and_several_comments.xml")
// Check that we have an ITAC information section with the comment
XML.loadString(result) must (\\\\("block") \\ "inline" \\> "ITAC Information")
XML.loadString(result) must \\\\("block") \\>~ "One Itac comment"
XML.loadString(result) must \\\\("block") \\>~ "Another itac comment"
}
"if there is no ITAC section in the proposal, no ITAC Information section should be included, REL-1165" in {
val result = transformProposal("proposal_with_gsaoi.xml")
// Check that there is no ITAC information section
XML.loadString(result) must not (\\\\("block") \\ "inline" \\> "ITAC Information")
}
"show an ITAC information section if the proposal contains a multiline comment, REL-1165" in {
val result = transformProposal("proposal_with_itac_and_ntac_comments.xml")
// Check that we have an ITAC information section with the comment
XML.loadString(result) must \\\\("block") \\ "inline" \\> "ITAC Information"
}
"present the correct name when using GPI, REL-1193" in {
val result = transformProposal("proposal_with_gpi.xml")
// Check that we use the proper public name of GPI
XML.loadString(result) must (\\\\("table-cell") \\ "block" \\> "GPI")
}
"present the correct name when using GRACES, REL-1356" in {
val result = transformProposal("proposal_with_graces.xml")
// Check that we use the proper public name of GPI
XML.loadString(result) must (\\\\("table-cell") \\ "block" \\> "GRACES")
}
"present the correct instrument name when using Phoenix, REL-2356" in {
val result = transformProposal("proposal_with_phoenix.xml")
// Check that we use the proper public name of Phoenix
XML.loadString(result) must (\\\\("table-cell") \\ "block" \\> "Phoenix - Gemini South")
}
"Supports Large Programs, REL-1614" in {
val result = transformProposal("large_program.xml")
val proposalXml = XML.loadString(result)
// Check that the Observing Mode is Large Program
val largeProgramMode = (proposalXml \\\\ "table-cell" \\ "block") collect {
case e if e.text.matches( """\\s*Observing Mode:.Large Program\\s*""") => true
}
largeProgramMode must be size 1
// LPTAC table
proposalXml must (\\\\("inline") \\>~ """\\s*LPTAC information\\s*""")
}
"Shows Visitors on each site on the summary" in {
val result = transformProposal("proposal_with_visitor_gn_and_gs.xml")
val proposalXml = XML.loadString(result)
proposalXml must (\\\\("table-cell") \\ "block" \\>~ """GPI, GRACES, GMOS North, GNIRS, GMOS South, Flamingos2, NIRI, GSAOI, NIFS, Visitor - Gemini South - GS Cam, Visitor - Gemini North - DSSI""")
}
"show correct Observing Mode for FT, REL-1894" in {
val result = transformProposal("proposal_fast_turnaround.xml")
val proposalXml = XML.loadString(result)
// Check that Observing Mode is correct
val ftMode = (proposalXml \\\\ "table-cell" \\ "block") collect {
case e if e.text.matches( """\\s*Observing Mode:.Fast Turnaround\\s*""") => true
}
ftMode must be size 1
}
"display reviewer for FT, REL-1894" in {
val result = transformProposal("proposal_fast_turnaround.xml", P1PDF.NOAO)
val proposalXml = XML.loadString(result)
// Check that Observing Mode is correct
proposalXml must (\\\\("inline") \\>~ """\\s*Reviewer:\\s*""")
proposalXml must (\\\\("block") \\>~ """.*Andrew.Stephens\\s*""")
}
"display mentor for FT, REL-1894" in {
val result = transformProposal("proposal_fast_turnaround.xml", P1PDF.NOAO)
val proposalXml = XML.loadString(result)
// Check that Observing Mode is correct
proposalXml must (\\\\("inline") \\>~ """\\s*Mentor:\\s*""")
proposalXml must (\\\\("block") \\>~ """.*John.Doe\\s*""")
}
"calculate the total time for all observations for GN and GS, REL-1298" in {
val result = transformProposal("proposal_with_gn_and_gs.xml")
val proposalXml = XML.loadString(result)
// Check values manually calculated
// Band 1/2 GN
proposalXml must (\\\\("block") \\>~ """11.1 hr\\s*""")
// Band 1/2 GS
proposalXml must (\\\\("block") \\>~ """8.4 hr\\s*""")
// Band 3 GN
proposalXml must (\\\\("block") \\>~ """3.0 hr\\s*""")
// Band 3 GS
proposalXml must (\\\\("block") \\>~ """1.0 hr\\s*""")
}
}
"The P1 NOAO Template" should {
"include text with scheduling requests, REL-687" in {
val result = transformProposal("proposal_with_schedule.xml", P1PDF.NOAO)
val schedRegex = """\\s*This proposal has the following scheduling restrictions.*""".r
val foundMatches = (XML.loadString(result) \\\\ "block") collect {
case e if schedRegex.findFirstIn(e.text).isDefined => true
}
// Check there is a scheduling element
XML.loadString(result) must (\\\\("block") \\ "inline" \\>~ """\\s*Scheduling Constraints:\\s*""")
// Check there is a scheduling text
foundMatches must be size 1
}
"include TAC partner ranking, REL-677" in {
val result = transformProposal("proposal_submitted_to_tac_all_approved.xml", P1PDF.NOAO)
// Check there is a block with text 4.0
XML.loadString(result) must \\\\("block") \\> "4.0"
}
"include recommended and min recommended time, REL-677" in {
val result = transformProposal("proposal_submitted_to_tac_all_approved.xml", P1PDF.NOAO)
// Check there is a block with text 2.0 hr (1.0 hr)
XML.loadString(result) must \\\\("block") \\>~ """2\\.0.hr.\\(1\\.0.hr\\)"""
}
"show that GSAOI is in Gemini South, REL-693" in {
val result = transformProposal("proposal_with_gsaoi.xml", P1PDF.NOAO)
// Check that GSAOI is shown in Gemini South
XML.loadString(result) must (\\\\("table-cell") \\ "block" \\> "Gemini South")
}
"show that Texes is in Gemini North, REL-1062" in {
val result = transformProposal("proposal_with_texes.xml", P1PDF.NOAO)
// Check that Texes is shown in Gemini North
XML.loadString(result) must (\\\\("table-cell") \\ "block" \\> "Gemini North")
}
"show that Dssi is in Gemini North, REL-1061" in {
val result = transformProposal("proposal_with_dssi.xml", P1PDF.NOAO)
// Check that Speckle is shown in Gemini North
XML.loadString(result) must (\\\\("table-cell") \\ "block" \\> "Gemini North")
}
"show that a GN visitor is in Gemini North, REL-1090" in {
val result = transformProposal("proposal_with_visitor_gn.xml", P1PDF.NOAO)
// Check that Speckle is shown in Gemini North
XML.loadString(result) must (\\\\("table-cell") \\ "block" \\> "Gemini North")
}
"show that a GS visitor is in Gemini North, REL-1090" in {
val result = transformProposal("proposal_with_visitor_gs.xml", P1PDF.NOAO)
// Check that Speckle is shown in Gemini North
XML.loadString(result) must (\\\\("table-cell") \\ "block" \\> "Gemini South")
}
"show that a Phoenix site is displayed, REL-1090" in {
val result = transformProposal("proposal_with_phoenix.xml", P1PDF.NOAO)
// Check that Phoenix is shown in Gemini South
XML.loadString(result) must (\\\\("table-cell") \\ "block" \\> "Gemini South")
}
"show an ITAC information section if the proposal contains a comment, REL-1165" in {
val result = transformProposal("proposal_with_itac_comment.xml", P1PDF.NOAO)
// Check that we have an ITAC information section with the comment
XML.loadString(result) must \\\\("block") \\ "inline" \\>~ "ITAC Information.*"
XML.loadString(result) must \\\\("block") \\>~ "An Itac comment"
}
"show an ITAC information section if the proposal contains multiple comment, REL-1165" in {
val result = transformProposal("proposal_with_itac_and_several_comments.xml", P1PDF.NOAO)
// Check that we have an ITAC information section with the comment
XML.loadString(result) must \\\\("block") \\ "inline" \\>~ "ITAC Information.*"
XML.loadString(result) must \\\\("block") \\>~ "One Itac comment"
XML.loadString(result) must \\\\("block") \\>~ "Another itac comment"
}
"show an ITAC information section if the proposal contains itac and ntac comments, REL-1165" in {
val result = transformProposal("proposal_with_itac_and_ntac_comments.xml", P1PDF.NOAO)
// Check that we have an ITAC information section with the comment
XML.loadString(result) must (\\\\("block") \\ "inline" \\>~ "ITAC Information:.*")
}
"if there is no ITAC section in the proposal, no ITAC Information section should be included, REL-1165" in {
val result = transformProposal("proposal_with_gsaoi.xml", P1PDF.NOAO)
// Check that there is no ITAC information section
XML.loadString(result) must not (\\\\("block") \\ "inline" \\> "ITAC Information: ")
}
"show that GPI is in GS, REL-1193" in {
val result = transformProposal("proposal_with_gpi.xml", P1PDF.NOAO)
// Check that GPI is shown in Gemini South
XML.loadString(result) must (\\\\("table-cell") \\ "block" \\>~ "Gemini South")
}
"show that GRACES is in GN, REL-1356" in {
val result = transformProposal("proposal_with_graces.xml", P1PDF.NOAO)
// Check that Graces is shown in Gemini South
XML.loadString(result) must (\\\\("table-cell") \\ "block" \\>~ "Gemini North")
}
"show that Phoenix is in GS, REL-2356" in {
val result = transformProposal("proposal_with_phoenix.xml", P1PDF.NOAO)
// Check that Phoenix is shown in Gemini South
XML.loadString(result) must (\\\\("table-cell") \\ "block" \\>~ "Gemini South")
}
"show correct Observing Mode for FT, REL-1894" in {
val result = transformProposal("proposal_fast_turnaround.xml", P1PDF.NOAO)
val proposalXml = XML.loadString(result)
// Check that Observing Mode is correct
val ftMode = (proposalXml \\\\ "table-cell" \\ "block") collect {
case e if e.text.contains("""Fast Turnaround""") => true
}
ftMode must be size 1
}
"calculate the total time for all observations for GN and GS, REL-1298" in {
val result = transformProposal("proposal_with_gn_and_gs.xml", P1PDF.NOAO)
val proposalXml = XML.loadString(result)
// Check values manually calculated
// Band 1/2 GN
proposalXml must (\\\\("block") \\>~ """11.1 hr\\s*""")
// Band 1/2 GS
proposalXml must (\\\\("block") \\>~ """8.4 hr\\s*""")
// Band 3 GN
proposalXml must (\\\\("block") \\>~ """3.0 hr\\s*""")
// Band 3 GS
proposalXml must (\\\\("block") \\>~ """1.0 hr\\s*""")
}
}
"The P1 AU Template" should {
"includes the program id with a sensible default, REL-813" in {
val result = transformProposal("proposal_au_no_submission.xml", P1PDF.AU)
val proposalXml = XML.loadString(result)
// Show the semester
proposalXml must (\\\\("block") \\>~ "AU-2013B-.*")
}
}
def transformProposal(proposal: String, template: Template = P1PDF.DEFAULT) = {
val xslStream = getClass.getResourceAsStream(template.location)
val xslSource = new StreamSource(xslStream)
val xmlSource = new StreamSource(getClass.getResourceAsStream(proposal))
// Setup XSLT
val factory = TransformerFactory.newInstance()
factory.setURIResolver(P1PdfUriResolver)
val transformer = factory.newTransformer(xslSource)
transformer.setURIResolver(P1PdfUriResolver)
template.parameters.foreach(p => transformer.setParameter(p._1, p._2))
val writer = new StringWriter()
val res = new StreamResult(writer)
// Do XSLT Transform
transformer.transform(xmlSource, res)
writer.toString
}
}
| arturog8m/ocs | bundle/edu.gemini.model.p1.pdf/src/test/scala/edu/gemini/model/p1/pdf/P1TemplatesSpec.scala | Scala | bsd-3-clause | 20,263 |
package com.swoop.scala.sugar
import com.swoop.scala.sugar.regex.MatchedInString
import scala.util.matching.Regex
package object Implicits {
/**
* ap() allows arbitrary function chaining. The name comes from ap(plying) a function.
* This is similar to one use of Ruby's Object#try (the other being taken care of by Option).
* It is also convenient for injecting logic into a method chain, similar to Ruby's Object#tap.
*
* @example {{{
* import com.swoop.scala.sugar.Implicits._
* 5.ap(x => x + x) // res0: Int = 10
* // res0: Int = 10
*
* 5.ap{x => println(s"I got \\${x}"); x}.toFloat
* I got 5
* res1: Float = 5.0
* }}}
*
* @tparam A The Type of x
* @tparam x Value to be applied on by the function ap
*/
implicit class ApOp[A](val x: A) extends AnyVal {
/** ap(ply) a function to the class constructor's x value
*
*
* @tparam B Return type of the function being applied
* @param f Function to be applied to x
* @return The result of type B from applying the function f to this instance's x
*/
def ap[B](f: A => B): B = f(x)
}
/**
* The |> (pipe) operator is borrowed from F# (inspired by Unix pipes).
* It allows function composition to become akin to method chaining.
* x |> f |> g is equivalent to g(f(x))
*
* @example {{{
* import com.swoop.scala.sugar.Implicits._
* def f(x: Int) = x * 10
* def g(x: Int) = x + 10
*
* g(f(5))
* res0: Int = 60
* 5 |> f |> g
* res1: Int = 60
* }}}
* @tparam A The type of the constructor argument value
* @param value The value which will be passed to the piped function as an argument
*/
implicit class PipelineOperator[A](val value: A) extends AnyVal {
/**
* compose a function using method chaining
*
*
* @tparam B Return type of the function being chained
* @param f Function to be chained and applied to this instance's value
*/
def |>[B] (f: A => B) = f(value)
}
/**
* regex.test(str) checks for a partial match and returns a Boolean (inspired by JavaScript)
* This is considerably simpler than !regex.findFirstIn(str).isEmpty and semantically less
* awkward than regex.findFirstIn(str).isDefined.
*
* @example {{{
* import com.swoop.scala.sugar.Implicits._
*
* "a+b+".r.test("xyz aabbb")
* // res0: Boolean = true
*
* }}}
*
* @param re The regular expression to be pimped
*/
implicit class RegexOps(val re: Regex) extends AnyVal {
/**
* Perform a boolean test for a regular expression in a Character Sequence
*
* @param source The CharSequence to match the regular expression in this instance's re against
* @return True or False if the expression exists in the source
*/
def test(source: CharSequence) = re.findFirstIn(source).isDefined
/**
* regex.matchIn(str) returns the first match in an object that makes optional extraction easy.
* @param source The CharSequence to match the regular expression in this instance's re against
* @return a [[com.swoop.scala.sugar.regex.MatchedInString]] of the first match in this instance's source
*/
def matchIn(source: CharSequence) = {
new MatchedInString(re.findFirstMatchIn(source))
}
}
/**
* str.extract(regex) is sugar for regex.matchIn(str)
*
* @example {{{
* import com.swoop.scala.sugar.Implicits._
*
* val m = "aabb".extract("a+(b+)".r)
* // m: com.swoop.scala.sugar.regex.MatchedInString = Some(aabb)
*
* m.group(1).get
* // res1: String = bb
*
* "no match here".extract("a+(b+)".r).group(1).getOrElse("nope")
* // res2: String = nope
* }}}
*
* @param source The CharSequence to match the regular expression re against
*/
implicit class StringRegexOps(val source: CharSequence) extends AnyVal {
/**
* extracts a MatchedInString from the source
* @param re A regular expression to match against the source
* @return A MatchedInString of the first match of the regular expression re in this instance's source
*/
def extract(re: Regex) = re.matchIn(source)
}
}
| swoop-inc/scala-sugar | src/main/scala/com/swoop/scala/sugar/Implicits/package.scala | Scala | mit | 4,174 |
/*
* Grammar of Graphics in Scala
* Copyright (c) 2011, ggscala.org
*/
package org.ggscala.io
import java.io.File
import scala.collection.mutable.ListBuffer
import scala.collection.mutable.HashMap
import org.ggscala.model.MultiColumnSource._
import org.ggscala.model.TypeCode._
import org.ggscala.model.DataFrame.TempStringDataFrame
object Csv
{
case class LineReaderCallback( pattern:String=>Boolean, action:String=>Unit )
class CallbackLineReader[A]( protected val filePath:File, protected val default : String=>A )
extends Iterable[A]
{
protected val callbacks = new ListBuffer[LineReaderCallback]
private lazy val _open = io.Source.fromFile( filePath.getAbsolutePath ).getLines
def iterator = new Iterator[A]
{
var nextLine : Option[String] = None
var _hasNext = true
advance
private def advance = {
nextLine = None
while( !nextLine.isDefined && _open.hasNext )
{
nextLine = Some(_open.next)
val callback = callbacks.find( _.pattern(nextLine.get) )
callback.map( _.action(nextLine.get) )
if ( callback.isDefined )
nextLine = None
}
_hasNext = nextLine.isDefined
}
def hasNext = _hasNext
def next = {
val line = nextLine.get
advance
default(line)
}
}
}
/** Provides simple iteration over a delimited line file, returning List[String] for each row.
* Optional configuration includes
* <ul>
* <li>setting an arbitrary regex delimiter</li>
* <li>recording lines which are considered "metadata"</li>
* <li>skipping arbitrary lines</li>
* </ul>
* */
class DelimitedLineReader( filePath:File,
protected val delimiter:String = ",",
protected val metadataFilter:String=>Boolean = {s=>false},
protected val skipFilter:String=>Boolean = {s=>false} ) extends
CallbackLineReader( filePath, { l => l.split( delimiter ).toList } )
{
/** constructor from a file path string instead of java.io.File **/
def this( f:String ) = this( new File(f) )
callbacks += LineReaderCallback( metadataFilter, {l=>metadata+=l} )
callbacks += LineReaderCallback( skipFilter, {l=>()} )
val metadata = new ListBuffer[String]
}
/** Provides type-specific manipulation of columns from a CSV file. */
class DataFrameCsv( filePath:File ) extends MultiColumnSourceDelegate
{
def this( f:String ) = this( new File(f) )
val csv = new DelimitedLineReader(filePath)
protected var colTypes : Option[Seq[TypeCode]] = None
// These three (identical) methods provide clues to the parser for how to assign types for each column
def setColTypes( newColTypes:Seq[TypeCode] ) = colTypes = Some(newColTypes)
def setColTypes( newColTypes:String ) : Unit = setColTypes( newColTypes.split(",").toArray.map(strToTypeCode) )
def as( newColTypes:String ) = { setColTypes(newColTypes); this }
/**
* The actual data for this DataFrameCsv is initialized lazily.
* Read the CSV file into memory and convert to appropriate type-specific columns.
**/
protected def columnSource =
{
require( colTypes.isDefined, "Column types must be defined before reading from a DataFrameCsv" )
val cols = new TempStringDataFrame( colTypes.get )
csv.iterator.zipWithIndex.foreach
{
_ match {
case (line,0) => cols.setIds(line)
// all of the data is initially stored as String
case (line,_) => cols.addLine(line)
}
}
// this call unmarshals the columns to the appropriate types
cols.unmarshalAll
cols
}
}
/** simple constructor for a mini CSV/data-frame DSL */
def csv( filePath:String ) = new DataFrameCsv(filePath)
}
| drkeoni/ggscala | src/main/scala/org/ggscala/io/Csv.scala | Scala | mit | 3,848 |
// Happy New Year in Scala
object HappyNewYear extends App {
println("Happy New Year")
} | alihesari/Happy-New-Year | happy-new-year.scala | Scala | mit | 91 |
package net.iakovlev.scopetranslatortest
import net.iakovlev.scopetranslatortest.AttractorPB.AttractorModePB.AttractorContentPB
import net.iakovlev.scopetranslatortest.AttractorPB.{
AttractorModePB,
ReferenceSystemPB
}
case class CartesianCoordinate2(X: Double, Y: Double)
case class Coordinate(latitude: Double,
longitude: Double,
altitude: Option[Double])
case class Attractor(LocalReference: ReferenceSystem,
DetectionRadius: Double,
Modes: List[AttractorMode])
case class AttractorMode(Radius: Double,
Tolerance: Option[Double],
SquareRadius: Option[Double],
Center: CartesianCoordinate2,
Content: List[AttractorContent])
case class AttractorContent(TimestampMs: Long, Center: CartesianCoordinate2)
final case class CartesianCoordinate2PB(
x: _root_.scala.Double,
y: _root_.scala.Double
)
case class ReferenceSystem(Coordinates: Coordinate,
LatitudeFactor: Double,
LongitudeFactor: Double)
object AttractorPB {
final case class AttractorModePB(
radius: _root_.scala.Double,
tolerance: scala.Option[_root_.scala.Double] = None,
squareRadius: scala.Option[_root_.scala.Double] = None,
center: CartesianCoordinate2PB,
content: _root_.scala.collection.Seq[AttractorContentPB] =
_root_.scala.collection.Seq.empty
)
object AttractorModePB {
final case class AttractorContentPB(
timestampMs: _root_.scala.Long,
center: CartesianCoordinate2PB
)
}
final case class ReferenceSystemPB(
coordinates: CoordinatePB,
latitudeFactor: _root_.scala.Double,
longitudeFactor: _root_.scala.Double
)
}
final case class AttractorPB(
localReference: ReferenceSystemPB,
detectionRadius: _root_.scala.Double,
modes: _root_.scala.collection.Seq[AttractorModePB] =
_root_.scala.collection.Seq.empty
)
final case class CoordinatePB(
latitude: _root_.scala.Double,
longitude: _root_.scala.Double,
altitude: scala.Option[_root_.scala.Double] = None
)
| RomanIakovlev/scope-translator | src/test/scala/net/iakovlev/scopetranslatortest/TestClasses.scala | Scala | mit | 2,207 |
package maven2sbt.core
import Repository._
import cats.Show
import cats.syntax.all._
import scala.xml.Elem
/** @author Kevin Lee
* @since 2019-04-21
*/
final case class Repository(id: Option[RepoId], name: Option[RepoName], url: RepoUrl)
object Repository extends RepositoryPlus {
type RepoId = RepoId.RepoId
object RepoId {
opaque type RepoId = String
def apply(repoId: String): RepoId = repoId
def unapply(repoId: RepoId): Option[String] =
repoId.value.some
given repoIdCanEqual: CanEqual[RepoId, RepoId] = CanEqual.derived
extension (repoId: RepoId) def value: String = repoId
given show: Show[RepoId] = _.toString
}
type RepoName = RepoName.RepoName
object RepoName {
opaque type RepoName = String
def apply(repoName: String): RepoName = repoName
given repoNameCanEqual: CanEqual[RepoName, RepoName] = CanEqual.derived
def unapply(repoName: RepoName): Option[String] =
repoName.value.some
extension (repoName: RepoName) def value: String = repoName
given show: Show[RepoName] = _.toString
}
type RepoUrl = RepoUrl.RepoUrl
object RepoUrl {
opaque type RepoUrl = String
def apply(repoUrl: String): RepoUrl = repoUrl
given repoUrlCanEqual: CanEqual[RepoUrl, RepoUrl] = CanEqual.derived
extension (repoUrl: RepoUrl) def value: String = repoUrl
}
}
| Kevin-Lee/maven2sbt | core/src/main/scala-3/maven2sbt/core/Repository.scala | Scala | mit | 1,364 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.