code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
|---|---|---|---|---|---|
package builder
case class CarDirector(carBuilder: CarBuilder) {
def build: CarBuilder = {
carBuilder
.buildBodyStyle
.buildEngine
.buildPower
.buildFuelType
.buildBreaks
.buildSeats
.buildWindows
}
}
|
BBK-PiJ-2015-67/sdp-portfolio
|
exercises/week08/src/main/scala/builder/CarDirector.scala
|
Scala
|
unlicense
| 252
|
package org.jetbrains.plugins.scala.decompiler.scalasig
import java.io.IOException
import java.lang.Double.longBitsToDouble
import java.lang.Float.intBitsToFloat
import org.jetbrains.plugins.scala.decompiler.scalasig.PickleFormat._
import org.jetbrains.plugins.scala.decompiler.scalasig.TagGroups._
import scala.annotation.switch
import scala.language.implicitConversions
import scala.reflect.ClassTag
import scala.reflect.internal.pickling.PickleFormat._
/**
* Nikolay.Tropin
* 18-Jul-17
*/
//Some parts of scala.reflect.internal.pickling.UnPickler used
object Parser {
def parseScalaSig(bytes: Array[Byte], fileName: String): ScalaSig = {
try {
new Builder(bytes).readAll()
} catch {
case ex: IOException =>
throw ex
case ex: Throwable =>
val message = s"Error parsing scala signature of $fileName"
System.err.println(message)
ex.printStackTrace()
throw ex
}
}
private class Builder(bytes: Array[Byte]) extends ScalaSigReader(bytes) {
val index: Array[Int] = createIndex()
private val entries = new Array[Entry](index.length)
implicit val scalaSig: ScalaSig = new ScalaSig(entries)
def readAll(): ScalaSig = {
var i = 0
while (i < index.length) {
entries(i) = readEntry(i)
i += 1
}
scalaSig.finished()
scalaSig
}
def readEntry(i: Int): Entry = {
readIndex = index(i)
val tag = readByte()
(tag: @switch) match {
case TERMname => readName()
case TYPEname => readName()
case NONEsym => NoSymbol
case TYPEsym => readSymbol(tag)
case ALIASsym => readSymbol(tag)
case CLASSsym => readSymbol(tag)
case MODULEsym => readSymbol(tag)
case VALsym => readSymbol(tag)
case EXTref => readExtSymbol(tag)
case EXTMODCLASSref => readExtSymbol(tag)
case NOtpe => NoType
case NOPREFIXtpe => NoPrefixType
case THIStpe => readType(tag)
case SINGLEtpe => readType(tag)
case CONSTANTtpe => readType(tag)
case TYPEREFtpe => readType(tag)
case TYPEBOUNDStpe => readType(tag)
case REFINEDtpe => readType(tag)
case CLASSINFOtpe => readType(tag)
case METHODtpe => readType(tag)
case POLYtpe => readType(tag)
case IMPLICITMETHODtpe => readType(tag)
case LITERALunit => readLiteral(tag)
case LITERALboolean => readLiteral(tag)
case LITERALbyte => readLiteral(tag)
case LITERALshort => readLiteral(tag)
case LITERALchar => readLiteral(tag)
case LITERALint => readLiteral(tag)
case LITERALlong => readLiteral(tag)
case LITERALfloat => readLiteral(tag)
case LITERALdouble => readLiteral(tag)
case LITERALstring => readLiteral(tag)
case LITERALnull => readLiteral(tag)
case LITERALclass => readLiteral(tag)
case LITERALenum => readLiteral(tag)
case LITERALsymbol => readLiteral(tag)
case SYMANNOT => readSymbolAnnotation()
case CHILDREN => Children
case ANNOTATEDtpe => readType(tag)
case ANNOTINFO => AnnotInfo
case ANNOTARGARRAY => readAnnotArgArray()
case SUPERtpe => readType(tag)
case DEBRUIJNINDEXtpe => readType(tag)
case EXISTENTIALtpe => readType(tag)
case TREE => Tree
case MODIFIERS => readModifiers()
case SUPERtpe2 => readType(tag)
}
}
def tagAt(i: Int): Byte = bytes(index(i))
def tryReadRef[T <: Entry : ClassTag](tagCondition: Int => Boolean,
constructor: Int => Ref[T],
entryEnd: Int): Option[Ref[T]] = {
if (readIndex >= entryEnd) return None
val savedIdx = readIndex
val ref = readNat()
if (tagCondition(tagAt(ref))) Some(constructor(ref))
else {
readIndex = savedIdx
None
}
}
def readNameRef(): Ref[Name] = Ref.to[Name](readNat())
def readSymbolRef(): Ref[Symbol] = Ref.to[Symbol](readNat())
def readTypeRef(): Ref[Type] = Ref.to[Type](readNat())
def readConstantRef(): Ref[Constant] = Ref.to[Constant](readNat())
def readConstantAnnotArgRef(): Ref[ConstAnnotArg] = Ref.to[ConstAnnotArg](readNat())
def readScalaSymbol(): Ref[ScalaSymbol] = readNameRef().map(n => ScalaSymbol(n.value))
def tryReadTypeRef(end: Int): Option[Ref[Type]] = tryReadRef(isTypeTag, Ref.to[Type], end)
def tryReadSymbolRef(end: Int): Option[Ref[Symbol]] = tryReadRef(isSymbolTag, Ref.to[Symbol], end)
def readSymbolInfo(end: Int): SymbolInfo = {
val name = readNameRef()
val owner = readSymbolRef()
val flags = readNat()
val privateWithin = tryReadRef(isSymbolTag, Ref.to[Symbol], end)
val typeInfo = readTypeRef()
SymbolInfo(name, owner, flags, privateWithin, typeInfo)
}
def readSymbol(tag: Int): Symbol = {
val end = readEnd()
val symbol = tag match {
case TYPEsym => TypeSymbol(readSymbolInfo(end))
case ALIASsym => AliasSymbol(readSymbolInfo(end))
case CLASSsym =>
val clazz = ClassSymbol(readSymbolInfo(end), tryReadTypeRef(end))
scalaSig.addClass(clazz)
clazz
case MODULEsym =>
val obj = ObjectSymbol(readSymbolInfo(end))
scalaSig.addObject(obj)
obj
case VALsym =>
MethodSymbol(readSymbolInfo(end), tryReadSymbolRef(end))
case _ => errorBadSignature("bad symbol tag: " + tag)
}
scalaSig.addChild(symbol.parentRef, symbol)
symbol
}
def readExtSymbol(tag: Int): ExternalSymbol = {
val end = readEnd()
val name = readNameRef()
val owner = tryReadSymbolRef(end)
val isObject = tag == EXTMODCLASSref
ExternalSymbol(name, owner, isObject)
}
def readTypes(end: Int): List[Ref[Type]] = until(end, readTypeRef _)
def readSymbols(end: Int): List[Ref[Symbol]] = until(end, readSymbolRef _)
def readName(): Name = Name(readUtf8(readNat()))
def readType(tag: Int): Type = {
val end = readEnd()
def polyOrNullaryType(restpe: Ref[Type], tparams: List[Ref[Symbol]]): Type = tparams match {
case Nil => NullaryMethodType(restpe)
case _ => PolyType(restpe, tparams)
}
(tag: @switch) match {
case NOtpe => NoType
case NOPREFIXtpe => NoPrefixType
case THIStpe => ThisType(readSymbolRef())
case SINGLEtpe => SingleType(readTypeRef(), readSymbolRef()) // SI-7596 account for overloading
case SUPERtpe => SuperType(readTypeRef(), readTypeRef())
case CONSTANTtpe => ConstantType(readConstantRef())
case TYPEREFtpe => TypeRefType(readTypeRef(), readSymbolRef(), readTypes(end))
case TYPEBOUNDStpe => TypeBoundsType(readTypeRef(), readTypeRef())
case REFINEDtpe => RefinedType(readSymbolRef(), readTypes(end))
case CLASSINFOtpe => ClassInfoType(readSymbolRef(), readTypes(end))
case METHODtpe => MethodType(readTypeRef(), readSymbols(end))
case POLYtpe => polyOrNullaryType(readTypeRef(), readSymbols(end))
case DEBRUIJNINDEXtpe => DeBruijnIndexType(readNat(), readNat())
case EXISTENTIALtpe => ExistentialType(readTypeRef(), readSymbols(end))
case ANNOTATEDtpe => AnnotatedType(readTypeRef())
case _ => errorBadSignature("bad type tag: " + tag)
}
}
def readLiteral(tag: Int): Constant = {
val len = readNat()
(tag: @switch) match {
case LITERALunit => Constant(())
case LITERALboolean => Constant(readLong(len) != 0L)
case LITERALbyte => Constant(readLong(len).toByte)
case LITERALshort => Constant(readLong(len).toShort)
case LITERALchar => Constant(readLong(len).toChar)
case LITERALint => Constant(readLong(len).toInt)
case LITERALlong => Constant(readLong(len))
case LITERALfloat => Constant(intBitsToFloat(readLong(len).toInt))
case LITERALdouble => Constant(longBitsToDouble(readLong(len)))
case LITERALstring => Constant(readNameRef())
case LITERALnull => Constant(null)
case LITERALclass => Constant(readTypeRef())
case LITERALenum => Constant(readSymbolRef())
case LITERALsymbol => Constant(readScalaSymbol())
case _ => errorBadSignature("bad constant tag: " + tag)
}
}
protected def readSymbolAnnotation(): SymAnnot = {
val end = readEnd()
val sym = readSymbolRef()
val info = readTypeRef()
val args = until(end, () => tryReadRef(isConstAnnotArgTag, Ref.to[ConstAnnotArg], end)).flatten
val namedArgs = until(end, () => (readNameRef(), readConstantAnnotArgRef()))
val annot = SymAnnot(sym, info, args, namedArgs)
scalaSig.addAttribute(annot)
annot
}
def readAnnotArgArray(): AnnotArgArray = {
val end = readEnd()
val args = until(end, readConstantAnnotArgRef _)
AnnotArgArray(args)
}
//implementation from scala.reflect.internal.pickling.UnPickler.Scan.readModifiers
def readModifiers(): Modifiers = {
readEnd()
val pflagsHi = readNat()
val pflagsLo = readNat()
val pflags = (pflagsHi.toLong << 32) + pflagsLo
val flags = scala.reflect.internal.Flags.pickledToRawFlags(pflags)
val privateWithin = readNameRef()
Modifiers(flags, privateWithin)
}
private def readEnd() = readNat() + readIndex
protected def errorBadSignature(msg: String) =
throw new RuntimeException(s"malformed Scala signature " + " at " + readIndex + "; " + msg)
}
}
|
jastice/intellij-scala
|
scala/decompiler/src/org/jetbrains/plugins/scala/decompiler/scalasig/Parser.scala
|
Scala
|
apache-2.0
| 10,302
|
package com.holdenkarau.spark.validator
import java.sql.Timestamp
import scala.collection.immutable.Seq
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.{LongType, StringType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, Row, SQLContext, SparkSession, functions}
case class HistoricData(
counters: scala.collection.Map[String, Long], date: Timestamp) {
/**
* Saves historic data to the given path.
*/
def saveHistoricData(session: SparkSession, path: String): Unit = {
// creates accumulator DataFrame
val schema = StructType(List(
StructField("counterName", StringType, false),
StructField("value", LongType, false)))
val rows =
session.sparkContext.parallelize(counters.toList).
map(kv => Row(kv._1, kv._2))
val data = session.createDataFrame(rows, schema)
// save accumulators DataFrame
val writePath = s"$path/date=$date"
data.write.parquet(writePath)
}
}
object HistoricData {
/**
* Converts both Spark counters & user counters into a HistoricData object
*/
def apply(
accumulators: TypedAccumulators, vl: ValidationListener, date: Timestamp):
HistoricData = {
val counters = accumulators.toMap() ++ vl.toMap()
HistoricData(counters, date)
}
/**
* Gets the Historic Data as an Array.
*/
def loadHistoricData(session: SparkSession, path: String): Array[HistoricData] = {
import session.implicits._
val countersDF = loadHistoricDataDataFrame(session, path)
countersDF match {
case Some(df) =>
val countersDataset = df.select(
df("date"), functions.map(df("counterName"), df("value")).alias("counter"))
.groupBy(df("date"))
.agg(functions.collect_list("counter").alias("counters"))
val result = countersDataset.map(x =>
(HistoricData(
// fields are ordered ny name
x.getSeq[Map[String, Long]](1).reduceLeft(_ ++ _),
x.getTimestamp(0)
)))
result.collect()
case None =>
new Array[HistoricData](0)
}
}
/**
* Returns a DataFrame of the old counters (for SQL funtimes).
*/
private def loadHistoricDataDataFrame(
session: SparkSession, path: String): Option[DataFrame] = {
// Spark SQL doesn't handle empty directories very well...
val hadoopConf = session.sparkContext.hadoopConfiguration
val fs =
org.apache.hadoop.fs.FileSystem.get(hadoopConf)
if (fs.exists(new org.apache.hadoop.fs.Path(path))) {
val inputDF = session.read.parquet(path)
Some(inputDF)
} else {
None
}
}
def getPath(jobBasePath: String, jobName: String, success: Boolean): String = {
val status = success match {
case true => "SUCCESS"
case false => "FAILURE"
}
val path = s"$jobBasePath/$jobName/validator/HistoricDataParquet/status=$status"
path
}
}
|
holdenk/spark-validator
|
src/main/scala/com/holdenkarau/spark/validator/HistoricData.scala
|
Scala
|
apache-2.0
| 2,921
|
package coursier.cli.options
import caseapp.{ExtraName => Short, HelpMessage => Help, ValueDescription => Value, _}
import coursier.core.{Classifier, Resolution, Type}
import coursier.install.RawAppDescriptor
// format: off
final case class ArtifactOptions(
@Group(OptionGroup.fetch)
@Hidden
@Help("Classifiers that should be fetched")
@Value("classifier1,classifier2,...")
@Short("C")
classifier: List[String] = Nil,
@Group(OptionGroup.fetch)
@Help("Fetch source artifacts")
sources: Boolean = false,
@Group(OptionGroup.fetch)
@Help("Fetch javadoc artifacts")
javadoc: Boolean = false,
@Group(OptionGroup.fetch)
@Help("Fetch default artifacts (default: false if --sources or --javadoc or --classifier are passed, true else)")
default: Option[Boolean] = None,
@Group(OptionGroup.fetch)
@Hidden
@Help("Artifact types that should be retained (e.g. jar, src, doc, etc.) - defaults to jar,bundle")
@Value("type1,type2,...")
@Short("A")
artifactType: List[String] = Nil,
@Group(OptionGroup.fetch)
@Hidden
@Help("Fetch artifacts even if the resolution is errored")
forceFetch: Boolean = false
) {
// format: on
// to deprecate
lazy val classifier0 =
classifier.flatMap(_.split(',')).filter(_.nonEmpty).map(Classifier(_)).toSet
// to deprecate
def default0: Boolean =
default.getOrElse {
(!sources && !javadoc && classifier0.isEmpty) ||
classifier0(Classifier("_"))
}
// deprecated
def artifactTypes: Set[Type] = {
val types0 = artifactType
.flatMap(_.split(',').toSeq)
.filter(_.nonEmpty)
.map(Type(_))
.toSet
if (types0.isEmpty) {
val sourceTypes =
Some(Type.source).filter(_ => sources || classifier0(Classifier.sources)).toSet
val javadocTypes =
Some(Type.doc).filter(_ => javadoc || classifier0(Classifier.javadoc)).toSet
val defaultTypes = if (default0) Resolution.defaultTypes else Set()
sourceTypes ++ javadocTypes ++ defaultTypes
}
else if (types0(Type.all))
Set(Type.all)
else
types0
}
def addApp(app: RawAppDescriptor): ArtifactOptions =
copy(
classifier = {
val previous = classifier
previous ++ app.classifiers.filterNot(previous.toSet + "_")
},
default = default.orElse {
if (app.classifiers.contains("_"))
Some(true)
else
None
},
artifactType = {
val previous = artifactType
previous ++ app.artifactTypes.filterNot(previous.toSet)
}
)
}
object ArtifactOptions {
implicit val parser = Parser[ArtifactOptions]
implicit val help = caseapp.core.help.Help[ArtifactOptions]
}
|
coursier/coursier
|
modules/cli/src/main/scala/coursier/cli/options/ArtifactOptions.scala
|
Scala
|
apache-2.0
| 2,712
|
// Copyright: 2010 - 2018 https://github.com/ensime/ensime-server/graphs
// License: http://www.gnu.org/licenses/lgpl-3.0.en.html
package org.ensime.sexp.formats
import org.ensime.sexp._
class BasicFormatsSpec extends FormatSpec with BasicFormats {
"BasicFormats" should "support Int" in {
assertFormat(13, SexpNumber(13))
assertFormat(-1, SexpNumber(-1))
assertFormat(0, SexpNumber(0))
assertFormat(Int.MaxValue, SexpNumber(Int.MaxValue))
assertFormat(Int.MinValue, SexpNumber(Int.MinValue))
}
it should "support Long" in {
assertFormat(13L, SexpNumber(13))
assertFormat(-1L, SexpNumber(-1))
assertFormat(0L, SexpNumber(0))
assertFormat(Long.MaxValue, SexpNumber(Long.MaxValue))
assertFormat(Long.MinValue, SexpNumber(Long.MinValue))
}
it should "support Float" in {
assertFormat(13.0f, SexpNumber(13.0f))
assertFormat(-1.0f, SexpNumber(-1.0f))
assertFormat(0.0f, SexpNumber(0.0f))
assertFormat(Float.MaxValue, SexpNumber(Float.MaxValue))
//assertFormat(Float.MinValue, SexpNumber(Float.MinValue)) // implicit widening?
assertFormat(Float.NegativeInfinity, SexpNegInf)
assertFormat(Float.PositiveInfinity, SexpPosInf)
// remember NaN != NaN
Float.NaN.toSexp should ===(SexpNaN)
SexpNaN.convertTo[Float].isNaN shouldBe true
}
it should "support Double" in {
assertFormat(13.0d, SexpNumber(13.0d))
assertFormat(-1.0d, SexpNumber(-1.0d))
assertFormat(0.0d, SexpNumber(0.0d))
assertFormat(Double.MaxValue, SexpNumber(Double.MaxValue))
assertFormat(Double.MinValue, SexpNumber(Double.MinValue))
assertFormat(Double.NegativeInfinity, SexpNegInf)
assertFormat(Double.PositiveInfinity, SexpPosInf)
// remember NaN != NaN
Double.NaN.toSexp should ===(SexpNaN)
SexpNaN.convertTo[Double].isNaN shouldBe true
}
it should "support Boolean" in {
assertFormat(true, SexpSymbol("t"))
assertFormat(false, SexpNil)
}
it should "support Char" in {
assertFormat('t', SexpChar('t'))
}
it should "support Unit" in {
assertFormat((), SexpNil)
}
it should "support Symbol" in {
assertFormat('blah, SexpString("blah"))
}
}
|
yyadavalli/ensime-server
|
s-express/src/test/scala/org/ensime/sexp/formats/BasicFormatsSpec.scala
|
Scala
|
gpl-3.0
| 2,183
|
package simx.core.helper
import simplex3d.math.double._
/**
* Created by martin
* on 30/07/15.
*/
object Vector3 {
def centerOf(vectors : Iterable[ConstVec3]) : ConstVec3 =
vectors.foldLeft(Vec3.Zero)(_ + _) / vectors.size
}
|
simulator-x/core
|
src/simx/core/helper/Vector3.scala
|
Scala
|
apache-2.0
| 237
|
package uk.co.turingatemyhamster
package owl2
/**
* An abstraction of: http://www.w3.org/TR/2012/REC-owl2-syntax-20121211/#Entity_Declarations_and_Typing
*
* @author Matthew Pocock
*/
trait DeclarationModule {
importedModules : owl2.IriModule with owl2.OntologyModule =>
type Declaration <: Axiom
}
|
drdozer/owl2
|
core/src/main/scala/uk/co/turingatemyhamster/owl2/DeclarationModule.scala
|
Scala
|
apache-2.0
| 311
|
package io.iohk.ethereum.ets.vm
import akka.util.ByteString
import io.iohk.ethereum.domain.Address
import io.iohk.ethereum.ets.common.AccountState
case class VMScenario(
env: Env,
exec: Exec,
callcreates: Option[List[CallCreate]],
pre: Map[Address, AccountState],
post: Option[Map[Address, AccountState]],
logs: Option[ByteString],
gas: Option[BigInt],
out: Option[ByteString]
)
case class Env(
currentCoinbase: Address,
currentDifficulty: BigInt,
currentGasLimit: BigInt,
currentNumber: BigInt,
currentTimestamp: Long,
previousHash: Option[ByteString]
)
case class Exec(
address: Address,
origin: Address,
caller: Address,
value: BigInt,
data: ByteString,
code: ByteString,
gasPrice: BigInt,
gas: BigInt
)
case class CallCreate(
data: ByteString,
destination: Option[Address],
gasLimit: BigInt,
value: BigInt
)
|
input-output-hk/etc-client
|
src/ets/scala/io/iohk/ethereum/ets/vm/scenario.scala
|
Scala
|
mit
| 870
|
package net.bmjames.opts.test.example
import net.bmjames.opts._
import scalaz.syntax.apply._
case class Options(globalOpt: String, globalFlag: Boolean, command: Command)
sealed trait Command
case class Add(paths: List[String]) extends Command
case class Commit(message: String) extends Command
object SubparserExample {
val parseOpts: Parser[Options] =
^^(strOption(long("globalOpt"), help("Option that applies to all commands")),
switch(long("globalFlag"), help("Switch that applies to all commands")),
subparser[Command](command("add", info(many(strArgument(metavar("PATH"))).map(Add))),
command("commit", info(strArgument(metavar("MESSAGE")).map(Commit))))
)(Options)
def main(args: Array[String]) {
val opts = info(parseOpts <*> helper, progDesc("A program with some global opts and command subparsers"))
println(execParser(args, "SubparserExample", opts))
}
}
/* Notes:
* If you fail to provide the required --globalOpt option, but do provide the
* required arguments to a subparser, the error message shows only the usage for the
* subparser, and does not mention --globalOpt at all! This is confusing, however
* optparse-applicative also appears to have the same behaviour (bug?).
*/
|
bmjames/scala-optparse-applicative
|
src/test/scala/net/bmjames/opts/test/example/SubparserExample.scala
|
Scala
|
bsd-3-clause
| 1,273
|
/**
* Copyright (C) 2010 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.xforms.analysis.controls
import org.orbeon.dom.{Element, QName}
import org.orbeon.oxf.common.ValidationException
import org.orbeon.oxf.xforms.analysis._
import org.orbeon.oxf.xforms.itemset.Itemset
import org.orbeon.oxf.xforms.model.StaticDataModel
import org.orbeon.saxon.om
import org.orbeon.xforms.XFormsNames._
trait SelectionControlTrait
extends InputValueControl
with SelectAppearanceTrait
with WithChildrenTrait {
if (element.attributeValue("selection") == "open")
throw new ValidationException("Open selection is currently not supported.", locationData)
val excludeWhitespaceTextNodesForCopy: Boolean =
element.attributeValue(EXCLUDE_WHITESPACE_TEXT_NODES_QNAME) == "true"
val isNorefresh: Boolean =
element.attributeValue(XXFORMS_REFRESH_ITEMS_QNAME) == "false"
final var itemsetAnalysis: Option[XPathAnalysis] = None
def staticItemset: Option[Itemset]
def useCopy: Boolean
def mustEncodeValues: Option[Boolean]
override def isAllowedBoundItem(item: om.Item): Boolean =
if (useCopy)
StaticDataModel.isAllowedBoundItem(item)
else
super.isAllowedBoundItem(item)
override def freeTransientState(): Unit = {
super.freeTransientState()
itemsetAnalysis foreach (_.freeTransientState())
}
}
object SelectionControlUtil {
val AttributesToPropagate = List(CLASS_QNAME, STYLE_QNAME, XXFORMS_OPEN_QNAME)
val TopLevelItemsetQNames = Set(XFORMS_ITEM_QNAME, XFORMS_ITEMSET_QNAME, XFORMS_CHOICES_QNAME)
def isTopLevelItemsetElement(e: Element): Boolean = TopLevelItemsetQNames(e.getQName)
def getAttributes(itemChoiceItemset: Element): List[(QName, String)] =
for {
attributeName <- AttributesToPropagate
attributeValue = itemChoiceItemset.attributeValue(attributeName)
if attributeValue ne null
} yield
attributeName -> attributeValue
}
|
orbeon/orbeon-forms
|
xforms-analysis/shared/src/main/scala/org/orbeon/oxf/xforms/analysis/controls/SelectionControlTrait.scala
|
Scala
|
lgpl-2.1
| 2,537
|
package org.hammerlab.guacamole.readsets.rdd
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.hammerlab.genomics.reference.test.{RegionsUtil, TestRegion}
trait RegionsRDDUtil
extends RegionsUtil {
def sc: SparkContext
def makeRegionsRDD(numPartitions: Int, reads: (String, Int, Int, Int)*): RDD[TestRegion] =
sc.parallelize(makeRegions(reads).toSeq, numPartitions)
}
|
hammerlab/guacamole
|
src/test/scala/org/hammerlab/guacamole/readsets/rdd/RegionsRDDUtil.scala
|
Scala
|
apache-2.0
| 412
|
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
import scala.language.implicitConversions
import scala.util.parsing.combinator.Parsers
import scala.util.parsing.input.CharSequenceReader
import org.junit.Test
import org.junit.Assert.assertEquals
class gh72 {
class TestParsers extends Parsers {
type Elem = Char
val left: Parser[String] = 'a' ~ 'b' ~ 'c' ^^^ "left" withFailureMessage "failure on left"
val right: Parser[String] = 'a' ~ 'b' ~ 'c' ^^^ "right" withFailureMessage "failure on right"
def p: Parser[String] = left ||| right
}
@Test
def test(): Unit = {
val tstParsers = new TestParsers
val s = new CharSequenceReader("abc")
assertEquals("[1.4] parsed: left", tstParsers.p(s).toString)
val t = new CharSequenceReader("def")
val expectedFailure = """[1.1] failure: failure on left
def
^"""
assertEquals(expectedFailure, tstParsers.p(t).toString)
}
}
|
scala/scala-parser-combinators
|
shared/src/test/scala/scala/util/parsing/combinator/gh72.scala
|
Scala
|
apache-2.0
| 1,156
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources
import java.io.{FileNotFoundException, IOException}
import scala.collection.mutable
import org.apache.spark.{Partition => RDDPartition, TaskContext, TaskKilledException}
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.rdd.{InputFileBlockHolder, RDD}
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.execution.vectorized.ColumnarBatch
import org.apache.spark.util.NextIterator
/**
* A part (i.e. "block") of a single file that should be read, along with partition column values
* that need to be prepended to each row.
*
* @param partitionValues value of partition columns to be prepended to each row.
* @param filePath path of the file to read
* @param start the beginning offset (in bytes) of the block.
* @param length number of bytes to read.
* @param locations locality information (list of nodes that have the data).
*/
case class PartitionedFile(
partitionValues: InternalRow,
filePath: String,
start: Long,
length: Long,
@transient locations: Array[String] = Array.empty) {
override def toString: String = {
s"path: $filePath, range: $start-${start + length}, partition values: $partitionValues"
}
}
/**
* A collection of file blocks that should be read as a single task
* (possibly from multiple partitioned directories).
*/
case class FilePartition(index: Int, files: Seq[PartitionedFile]) extends RDDPartition
/**
* An RDD that scans a list of file partitions.
*/
class FileScanRDD(
@transient private val sparkSession: SparkSession,
readFunction: (PartitionedFile) => Iterator[InternalRow],
@transient val filePartitions: Seq[FilePartition])
extends RDD[InternalRow](sparkSession.sparkContext, Nil) {
private val ignoreCorruptFiles = sparkSession.sessionState.conf.ignoreCorruptFiles
override def compute(split: RDDPartition, context: TaskContext): Iterator[InternalRow] = {
val iterator = new Iterator[Object] with AutoCloseable {
private val inputMetrics = context.taskMetrics().inputMetrics
private val existingBytesRead = inputMetrics.bytesRead
// Find a function that will return the FileSystem bytes read by this thread. Do this before
// apply readFunction, because it might read some bytes.
private val getBytesReadCallback =
SparkHadoopUtil.get.getFSBytesReadOnThreadCallback()
// We get our input bytes from thread-local Hadoop FileSystem statistics.
// If we do a coalesce, however, we are likely to compute multiple partitions in the same
// task and in the same thread, in which case we need to avoid override values written by
// previous partitions (SPARK-13071).
private def updateBytesRead(): Unit = {
inputMetrics.setBytesRead(existingBytesRead + getBytesReadCallback())
}
// If we can't get the bytes read from the FS stats, fall back to the file size,
// which may be inaccurate.
private def updateBytesReadWithFileSize(): Unit = {
if (currentFile != null) {
inputMetrics.incBytesRead(currentFile.length)
}
}
private[this] val files = split.asInstanceOf[FilePartition].files.toIterator
private[this] var currentFile: PartitionedFile = null
private[this] var currentIterator: Iterator[Object] = null
def hasNext: Boolean = {
// Kill the task in case it has been marked as killed. This logic is from
// InterruptibleIterator, but we inline it here instead of wrapping the iterator in order
// to avoid performance overhead.
context.killTaskIfInterrupted()
(currentIterator != null && currentIterator.hasNext) || nextIterator()
}
def next(): Object = {
val nextElement = currentIterator.next()
// TODO: we should have a better separation of row based and batch based scan, so that we
// don't need to run this `if` for every record.
if (nextElement.isInstanceOf[ColumnarBatch]) {
inputMetrics.incRecordsRead(nextElement.asInstanceOf[ColumnarBatch].numRows())
} else {
inputMetrics.incRecordsRead(1)
}
if (inputMetrics.recordsRead % SparkHadoopUtil.UPDATE_INPUT_METRICS_INTERVAL_RECORDS == 0) {
updateBytesRead()
}
nextElement
}
private def readCurrentFile(): Iterator[InternalRow] = {
try {
readFunction(currentFile)
} catch {
case e: FileNotFoundException =>
throw new FileNotFoundException(
e.getMessage + "\\n" +
"It is possible the underlying files have been updated. " +
"You can explicitly invalidate the cache in Spark by " +
"running 'REFRESH TABLE tableName' command in SQL or " +
"by recreating the Dataset/DataFrame involved.")
}
}
/** Advances to the next file. Returns true if a new non-empty iterator is available. */
private def nextIterator(): Boolean = {
updateBytesReadWithFileSize()
if (files.hasNext) {
currentFile = files.next()
logInfo(s"Reading File $currentFile")
// Sets InputFileBlockHolder for the file block's information
InputFileBlockHolder.set(currentFile.filePath, currentFile.start, currentFile.length)
if (ignoreCorruptFiles) {
currentIterator = new NextIterator[Object] {
// The readFunction may read some bytes before consuming the iterator, e.g.,
// vectorized Parquet reader. Here we use lazy val to delay the creation of
// iterator so that we will throw exception in `getNext`.
private lazy val internalIter = readCurrentFile()
override def getNext(): AnyRef = {
try {
if (internalIter.hasNext) {
internalIter.next()
} else {
finished = true
null
}
} catch {
// Throw FileNotFoundException even `ignoreCorruptFiles` is true
case e: FileNotFoundException => throw e
case e @ (_: RuntimeException | _: IOException) =>
logWarning(
s"Skipped the rest of the content in the corrupted file: $currentFile", e)
finished = true
null
}
}
override def close(): Unit = {}
}
} else {
currentIterator = readCurrentFile()
}
hasNext
} else {
currentFile = null
InputFileBlockHolder.unset()
false
}
}
override def close(): Unit = {
updateBytesRead()
updateBytesReadWithFileSize()
InputFileBlockHolder.unset()
}
}
// Register an on-task-completion callback to close the input stream.
context.addTaskCompletionListener(_ => iterator.close())
iterator.asInstanceOf[Iterator[InternalRow]] // This is an erasure hack.
}
override protected def getPartitions: Array[RDDPartition] = filePartitions.toArray
override protected def getPreferredLocations(split: RDDPartition): Seq[String] = {
val files = split.asInstanceOf[FilePartition].files
// Computes total number of bytes can be retrieved from each host.
val hostToNumBytes = mutable.HashMap.empty[String, Long]
files.foreach { file =>
file.locations.filter(_ != "localhost").foreach { host =>
hostToNumBytes(host) = hostToNumBytes.getOrElse(host, 0L) + file.length
}
}
// Takes the first 3 hosts with the most data to be retrieved
hostToNumBytes.toSeq.sortBy {
case (host, numBytes) => numBytes
}.reverse.take(3).map {
case (host, numBytes) => host
}
}
}
|
minixalpha/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileScanRDD.scala
|
Scala
|
apache-2.0
| 8,768
|
object Main{
def main(args: Array[String]){
println("Hello 99 Scala Problem World!")
}
}
|
sakabar/ninetyNineScala
|
src/main/scala/Main.scala
|
Scala
|
gpl-3.0
| 97
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.regression
import org.apache.spark.SparkContext
import org.apache.spark.annotation.Since
import org.apache.spark.mllib.linalg.{BLAS, Vector}
import org.apache.spark.mllib.optimization._
import org.apache.spark.mllib.pmml.PMMLExportable
import org.apache.spark.mllib.regression.impl.GLMRegressionModel
import org.apache.spark.mllib.util.{Loader, Saveable}
/**
* Regression model trained using Lasso.
*
* @param weights Weights computed for every feature.
* @param intercept Intercept computed for this model.
*
*/
@Since("0.8.0")
class LassoModel @Since("1.1.0") (
@Since("1.0.0") override val weights: Vector,
@Since("0.8.0") override val intercept: Double)
extends GeneralizedLinearModel(weights, intercept)
with RegressionModel with Serializable with Saveable with PMMLExportable {
override protected def predictPoint(
dataMatrix: Vector,
weightMatrix: Vector,
intercept: Double): Double = {
BLAS.dot(weightMatrix, dataMatrix) + intercept
}
@Since("1.3.0")
override def save(sc: SparkContext, path: String): Unit = {
GLMRegressionModel.SaveLoadV1_0.save(sc, path, this.getClass.getName, weights, intercept)
}
}
@Since("1.3.0")
object LassoModel extends Loader[LassoModel] {
@Since("1.3.0")
override def load(sc: SparkContext, path: String): LassoModel = {
val (loadedClassName, version, metadata) = Loader.loadMetadata(sc, path)
// Hard-code class name string in case it changes in the future
val classNameV1_0 = "org.apache.spark.mllib.regression.LassoModel"
(loadedClassName, version) match {
case (className, "1.0") if className == classNameV1_0 =>
val numFeatures = RegressionModel.getNumFeatures(metadata)
val data = GLMRegressionModel.SaveLoadV1_0.loadData(sc, path, classNameV1_0, numFeatures)
new LassoModel(data.weights, data.intercept)
case _ => throw new Exception(
s"LassoModel.load did not recognize model with (className, format version):" +
s"($loadedClassName, $version). Supported:\\n" +
s" ($classNameV1_0, 1.0)")
}
}
}
/**
* Train a regression model with L1-regularization using Stochastic Gradient Descent.
* This solves the l1-regularized least squares regression formulation
* f(weights) = 1/2n ||A weights-y||^2^ + regParam ||weights||_1
* Here the data matrix has n rows, and the input RDD holds the set of rows of A, each with
* its corresponding right hand side label y.
* See also the documentation for the precise formulation.
*/
@Since("0.8.0")
class LassoWithSGD private[mllib] (
private var stepSize: Double,
private var numIterations: Int,
private var regParam: Double,
private var miniBatchFraction: Double)
extends GeneralizedLinearAlgorithm[LassoModel] with Serializable {
private val gradient = new LeastSquaresGradient()
private val updater = new L1Updater()
@Since("0.8.0")
override val optimizer = new GradientDescent(gradient, updater)
.setStepSize(stepSize)
.setNumIterations(numIterations)
.setRegParam(regParam)
.setMiniBatchFraction(miniBatchFraction)
override protected def createModel(weights: Vector, intercept: Double) = {
new LassoModel(weights, intercept)
}
}
|
ueshin/apache-spark
|
mllib/src/main/scala/org/apache/spark/mllib/regression/Lasso.scala
|
Scala
|
apache-2.0
| 4,062
|
class Test {
def remove[S](a: S | Int, f: Int => S):S = a match {
case a: S => a // error
case a: Int => f(a)
}
val t: Int | String = 5
val t1 = remove[String](t, _.toString)
}
|
som-snytt/dotty
|
tests/neg-custom-args/isInstanceOf/1828.scala
|
Scala
|
apache-2.0
| 206
|
package chandu0101.scalajs.rn.components
import chandu0101.scalajs.rn.ReactNative
import japgolly.scalajs.react.ReactComponentU_
import scala.scalajs.js
import scala.scalajs.js.{UndefOr, undefined}
/**
*
*
* key: PropTypes.string,
style: PropTypes.js.Any,
ref: PropTypes.String,
maximumTrackTintColor: PropTypes.string,
maximumValue: PropTypes.Double,
minimumTrackTintColor: PropTypes.string,
minimumValue: PropTypes.Double,
onSlidingComplete: PropTypes.Double => Unit,
onValueChange: PropTypes.Double => Unit,
value: PropTypes.Double,
*/
object SliderIOS {
def apply(style : js.UndefOr[js.Any] = js.undefined,
minimumTrackTintColor : js.UndefOr[String] = js.undefined,
minimumValue : js.UndefOr[Double] = js.undefined,
onSlidingComplete : js.UndefOr[Double => Unit] = js.undefined,
ref : js.UndefOr[String] = js.undefined,
maximumTrackTintColor : js.UndefOr[String] = js.undefined,
key : js.UndefOr[String] = js.undefined,
onValueChange : js.UndefOr[Double => Unit] = js.undefined,
value : js.UndefOr[Double] = js.undefined,
maximumValue : js.UndefOr[Double] = js.undefined) = {
val p = js.Dynamic.literal()
style.foreach(v => p.updateDynamic("style")(v))
minimumTrackTintColor.foreach(v => p.updateDynamic("minimumTrackTintColor")(v))
minimumValue.foreach(v => p.updateDynamic("minimumValue")(v))
onSlidingComplete.foreach(v => p.updateDynamic("onSlidingComplete")(v))
ref.foreach(v => p.updateDynamic("ref")(v))
maximumTrackTintColor.foreach(v => p.updateDynamic("maximumTrackTintColor")(v))
key.foreach(v => p.updateDynamic("key")(v))
onValueChange.foreach(v => p.updateDynamic("onValueChange")(v))
value.foreach(v => p.updateDynamic("value")(v))
maximumValue.foreach(v => p.updateDynamic("maximumValue")(v))
val f = ReactNative.createFactory(ReactNative.SliderIOS)
f(p).asInstanceOf[ReactComponentU_]
}
}
|
beni55/scalajs-react-native
|
core/src/main/scala/chandu0101/scalajs/rn/components/SliderIOS.scala
|
Scala
|
apache-2.0
| 2,020
|
package sexamples.basics.pingpong;
import se.sics.kompics.sl._
// #header_and_port
// #header_only
class Pinger extends ComponentDefinition {
// #header_only
val ppp = requires(PingPongPort);
// #header_and_port
ctrl uponEvent {
case _: Start => {
trigger(Ping -> ppp);
}
}
ppp uponEvent {
case Pong => {
log.info(s"Got Pong!");
trigger(Ping -> ppp);
}
}
// #header_and_port
// #header_only
}
// #header_only
// #header_and_port
|
kompics/kompics-scala
|
docs/src/main/scala/sexamples/basics/pingpong/Pinger.scala
|
Scala
|
gpl-2.0
| 478
|
package controllers
import io.apibuilder.api.v0.models.UserUpdateForm
import javax.inject.Inject
import play.api.data._
import play.api.data.Forms._
import scala.concurrent.Future
class AccountProfileController @Inject() (
val apibuilderControllerComponents: ApibuilderControllerComponents
) extends ApibuilderController {
private[this] implicit val ec = scala.concurrent.ExecutionContext.Implicits.global
def redirect = Action { implicit request =>
Redirect(routes.AccountProfileController.index())
}
def index() = Identified { implicit request =>
val tpl = request.mainTemplate(Some("Profile"))
Ok(views.html.account.profile.index(tpl, request.user))
}
def edit() = Identified { implicit request =>
val form = AccountProfileController.profileForm.fill(
AccountProfileController.ProfileData(
email = request.user.email,
nickname = request.user.nickname,
name = request.user.name
)
)
val tpl = request.mainTemplate(Some("Edit Profile"))
Ok(views.html.account.profile.edit(tpl, request.user, form))
}
def postEdit = Identified.async { implicit request =>
val tpl = request.mainTemplate(Some("Edit Profile"))
val form = AccountProfileController.profileForm.bindFromRequest
form.fold (
_ => Future {
Ok(views.html.account.profile.edit(tpl, request.user, form))
},
valid => {
request.api.users.putByGuid(
request.user.guid,
UserUpdateForm(
email = valid.email,
nickname = valid.nickname,
name = valid.name
)
).map { _ =>
Redirect(routes.AccountProfileController.index()).flashing("success" -> "Profile updated")
}.recover {
case r: io.apibuilder.api.v0.errors.ErrorsResponse => {
Ok(views.html.account.profile.edit(tpl, request.user, form, r.errors.map(_.message)))
}
}
}
)
}
}
object AccountProfileController {
case class ProfileData(
email: String,
nickname: String,
name: Option[String]
)
private[controllers] val profileForm = Form(
mapping(
"email" -> nonEmptyText,
"nickname" -> nonEmptyText,
"name" -> optional(text)
)(ProfileData.apply)(ProfileData.unapply)
)
}
|
mbryzek/apidoc
|
app/app/controllers/AccountProfileController.scala
|
Scala
|
mit
| 2,301
|
package org.mandrake.runners.slick2d
import org.mandrake.simulation.{Event, Simulation}
import org.newdawn.slick.{AppGameContainer, BasicGame, GameContainer, Graphics}
object Slick2DRunner {
def run(rootSimulation: Simulation): Unit = {
val app = new AppGameContainer(new BasicGame("") {
private var currentSimulation = rootSimulation
private var outEvents = Vector[Event]()
override def init(container: GameContainer): Unit = {}
override def update(container: GameContainer, delta: Int): Unit =
currentSimulation(Vector(SlickInput(container, delta))) match {
case (simulation, events) =>
currentSimulation = simulation
outEvents = events
}
override def render(container: GameContainer, g: Graphics): Unit = {
g.setAntiAlias(true)
outEvents.foreach(renderer(container, g))
}
})
app.setDisplayMode(app.getScreenWidth, app.getScreenHeight, false)
app.start()
}
def renderer(container: GameContainer, g: Graphics)(event: Event): Unit = event match {
case event: RenderEvent => event.render(g)
case _ =>
}
trait RenderEvent extends Event {
def render(g: Graphics): Unit
}
case class SlickInput(container: GameContainer, delta: Int) extends Event
}
|
louis-mon/mandrake
|
src/main/scala/org/mandrake/runners/slick2d/Slick2DRunner.scala
|
Scala
|
mit
| 1,296
|
package gov.uk.dvla.vehicles.acquire.runner
import cucumber.api.CucumberOptions
import cucumber.api.junit.Cucumber
import org.junit.runner.RunWith
@RunWith(classOf[Cucumber])
@CucumberOptions(
features = Array("acceptance-tests/src/test/resources/gherkin/BruteForceForVehicleKeeperLookUpService.feature"),
glue = Array("gov.uk.dvla.vehicles.acquire.stepdefs"),
tags = Array("@working","~@Ignore")
)
class BruteForceForVehicleKeeperLookUpService {
}
|
dvla/vehicles-acquire-online
|
acceptance-tests/src/test/scala/gov/uk/dvla/vehicles/acquire/runner/BruteForceForVehicleKeeperLookUpService.scala
|
Scala
|
mit
| 458
|
package com.twitter.finagle.memcached.protocol.text
import org.jboss.netty.buffer.ChannelBuffer
sealed abstract class Decoding
case class Tokens(tokens: Seq[ChannelBuffer]) extends Decoding
case class TokensWithData(
tokens: Seq[ChannelBuffer],
data: ChannelBuffer,
casUnique: Option[ChannelBuffer] = None)
extends Decoding
case class ValueLines(lines: Seq[TokensWithData]) extends Decoding
case class StatLines(lines: Seq[Tokens]) extends Decoding
|
olix0r/finagle
|
finagle-memcached/src/main/scala/com/twitter/finagle/memcached/protocol/text/Decodings.scala
|
Scala
|
apache-2.0
| 553
|
package com.glowingavenger.plan.impl
import com.glowingavenger.plan.model.Problem
import scala.collection.immutable.Queue
import org.jgrapht.DirectedGraph
import com.glowingavenger.plan.{ActionEdge, PlanDescription}
import com.glowingavenger.plan.util.ReachGraph._
import com.glowingavenger.plan.model.action.Question
import com.glowingavenger.plan.model.state.BeliefState
import org.jgrapht.graph.DirectedMultigraph
trait ProblemAware {
def problem: Problem
}
abstract class AbstractPlanner extends Successors with Axioms with ProblemAware with PlanInitializer {
def build(): PlanDescription = {
val init = initState()
val front = Queue(init)
val empty = new DirectedMultigraph[BeliefState, ActionEdge](classOf[ActionEdge])
val graph = buildRec(front, empty)
PlanDescription(init, graph, problem)
}
private def buildRec(front: Queue[BeliefState], plan: DirectedGraph[BeliefState, ActionEdge]): DirectedGraph[BeliefState, ActionEdge] = {
if (front.isEmpty) plan
else {
val (next, nextQueue) = front.dequeue
val (edges, queued) = successors(next)
buildRec(nextQueue ++ queued.toSet, plan ++ edges.toSet)
}
}
}
|
dreef3/glowing-avenger
|
src/main/scala/com/glowingavenger/plan/impl/AbstractPlanner.scala
|
Scala
|
mit
| 1,174
|
//package io.skysail.server.demo
//
//import java.net.URL
//
//import io.skysail.domain.resources.AsyncStaticResource
//import io.skysail.domain.{HtmlResponseEvent, RequestEvent}
//
//abstract case class DocResource() extends AsyncStaticResource {
// protected def getHtml(requestEvent: RequestEvent, path: String) = {
// val url: URL = bundleContext.getBundle.getResource(path)
// val is = url.openConnection().getInputStream()
// val content = scala.io.Source.fromInputStream(is).mkString
// requestEvent.controllerActor ! HtmlResponseEvent(requestEvent, content)
// }
//
//}
//
//class MetaDocResource() extends DocResource {
// override def get(requestEvent: RequestEvent): Unit = getHtml(requestEvent, "assets/html5/meta.html")
//}
//
//class DevDocResource() extends DocResource {
// override def get(requestEvent: RequestEvent): Unit = getHtml(requestEvent, "assets/html5/developer.html")
//}
//
//class HistoryDocResource() extends DocResource {
// override def get(requestEvent: RequestEvent): Unit = getHtml(requestEvent, "assets/html5/history.html")
//}
|
evandor/skysail-server
|
skysail.server.demo/src/io/skysail/server/demo/MetaDocResource.scala
|
Scala
|
apache-2.0
| 1,082
|
package co.rc.tokenmanager.hmac.base
import net.ceedubs.ficus.Ficus._
import net.ceedubs.ficus.readers.ValueReader
/**
* Utility class that defines finite time duration
* @param unit Time unit
* @param length Time length value
*/
case class TimeDuration( unit: String, length: Int ) {
require( List(
"s", "second", "seconds",
"m", "minute", "minutes",
"h", "hour", "hours",
"d", "day", "days",
"w", "week", "weeks" ).contains( unit ), "Invalid unit for time duration" )
}
object TimeDuration {
// Implicit value reader for ficus config
implicit val reader: ValueReader[ TimeDuration ] = ValueReader.relative { config =>
TimeDuration(
config.as[ String ]( "unit" ),
config.as[ Int ]( "length" )
)
}
}
|
rodricifuentes1/token-manager
|
src/main/scala/co/rc/tokenmanager/hmac/base/TimeDuration.scala
|
Scala
|
mit
| 757
|
/*
* Copyright (C) 2016-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.javadsl.persistence
import java.util.concurrent.CompletionStage
import java.util.{ Optional, UUID }
import akka.japi.Pair
import akka.stream.javadsl
import akka.{ Done, NotUsed }
import com.lightbend.lagom.javadsl.persistence.Offset.{ Sequence, TimeBasedUUID }
import scala.concurrent.duration._
/**
* At system startup all [[PersistentEntity]] classes must be registered here
* with [[PersistentEntityRegistry#register]].
*
* Later, [[PersistentEntityRef]] can be retrieved with [[PersistentEntityRegistry#refFor]].
* Commands are sent to a [[PersistentEntity]] using a `PersistentEntityRef`.
*/
trait PersistentEntityRegistry {
/**
* At system startup all [[PersistentEntity]] classes must be registered
* with this method.
*/
def register[C, E, S](entityClass: Class[_ <: PersistentEntity[C, E, S]]): Unit
/**
* Retrieve a [[PersistentEntityRef]] for a given [[PersistentEntity]] class
* and identifier. Commands are sent to a `PersistentEntity` using a `PersistentEntityRef`.
*/
def refFor[C](entityClass: Class[_ <: PersistentEntity[C, _, _]], entityId: String): PersistentEntityRef[C]
/**
* A stream of the persistent events that have the given `aggregateTag`, e.g.
* all persistent events of all `Order` entities.
*
* The type of the offset is journal dependent, some journals use time-based
* UUID offsets, while others use sequence numbers. The passed in `fromOffset`
* must either be [[Offset#NONE]], or an offset that has previously been produced
* by this journal.
*
* The stream will begin with events starting ''after'' `fromOffset`.
* To resume an event stream, store the `Offset` corresponding to the most
* recently processed `Event`, and pass that back as the value for
* `fromOffset` to start the stream from events following that one.
*
* @throws IllegalArgumentException If the `fromOffset` type is not supported
* by this journal.
*/
def eventStream[Event <: AggregateEvent[Event]](
aggregateTag: AggregateEventTag[Event],
fromOffset: Offset
): javadsl.Source[Pair[Event, Offset], NotUsed]
/**
* A stream of the persistent events that have the given `aggregateTag`, e.g.
* all persistent events of all `Order` entities.
*
* This method will only work with journals that support UUID offsets. Journals that
* produce sequence offsets will fail during stream handling.
*/
@deprecated("Use eventStream(AggregateEventTag, Offset) instead", "1.2.0")
def eventStream[Event <: AggregateEvent[Event]](
aggregateTag: AggregateEventTag[Event],
fromOffset: Optional[UUID]
): javadsl.Source[Pair[Event, UUID], NotUsed] = {
val offset = if (fromOffset.isPresent) {
Offset.timeBasedUUID(fromOffset.get())
} else Offset.NONE
eventStream(aggregateTag, offset).asScala.map { pair =>
val uuid = pair.second match {
case timeBased: TimeBasedUUID => timeBased.value()
case sequence: Sequence =>
// While we *could* translate the sequence number to a time-based UUID, this would be very bad, since the UUID
// would either be non unique (violating the fundamental aim of UUIDs), or it would change every time the
// event was loaded. Also, a sequence number is not a timestamp.
throw new IllegalStateException("Sequence based offset is not supported in a UUID event stream")
}
Pair(pair.first, uuid)
}.asJava
}
/**
* Gracefully stop the persistent entities and leave the cluster.
* The persistent entities will be started on another node when
* new messages are sent to them.
*
* @return the `CompletionStage` is completed when the node has been
* removed from the cluster
*/
def gracefulShutdown(timeout: FiniteDuration): CompletionStage[Done]
}
|
edouardKaiser/lagom
|
persistence/javadsl/src/main/scala/com/lightbend/lagom/javadsl/persistence/PersistentEntityRegistry.scala
|
Scala
|
apache-2.0
| 3,922
|
package whitespace
import skinny.orm._, feature._
import scalikejdbc._
import org.joda.time._
case class Tag(
id: Long,
name: String,
createdAt: DateTime,
updatedAt: Option[DateTime] = None
)
object Tag extends SkinnyCRUDMapper[Tag] with TimestampsFeature[Tag] {
override val connectionPoolName = Symbol("ws")
override val tableName = "tags"
override val defaultAlias = createAlias("t")
override def extract(rs: WrappedResultSet, rn: ResultName[Tag]): Tag = new Tag(
id = rs.get(rn.id),
name = rs.get(rn.name),
createdAt = rs.get(rn.createdAt),
updatedAt = rs.get(rn.updatedAt)
)
}
|
skinny-framework/skinny-framework
|
factory-girl/src/test/scala/whitespace/Tag.scala
|
Scala
|
mit
| 643
|
package org.jetbrains.plugins.scala
package lang
package psi
package api
package toplevel
package typedef
import com.intellij.execution.junit.JUnitUtil
import com.intellij.openapi.progress.ProgressManager
import com.intellij.openapi.project.DumbService
import com.intellij.openapi.util.Key
import com.intellij.pom.java.LanguageLevel
import com.intellij.psi._
import com.intellij.psi.impl.PsiClassImplUtil.MemberType
import com.intellij.psi.impl.{PsiClassImplUtil, PsiSuperMethodImplUtil}
import com.intellij.psi.scope.PsiScopeProcessor
import com.intellij.psi.scope.processor.MethodsProcessor
import com.intellij.psi.search.GlobalSearchScope
import com.intellij.psi.util.{PsiTreeUtil, PsiUtil}
import org.jetbrains.plugins.scala.caches.{CachesUtil, ScalaShortNamesCacheManager}
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.parser.ScalaElementTypes
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiUtil.isLineTerminator
import org.jetbrains.plugins.scala.lang.psi.api.base.types.ScSelfTypeElement
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScNewTemplateDefinition
import org.jetbrains.plugins.scala.lang.psi.api.statements._
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.templates.ScExtendsBlock
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScTemplateDefinition._
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory._
import org.jetbrains.plugins.scala.lang.psi.impl.toplevel.synthetic.ScSyntheticClass
import org.jetbrains.plugins.scala.lang.psi.impl.toplevel.typedef.TypeDefinitionMembers
import org.jetbrains.plugins.scala.lang.psi.light.ScFunctionWrapper
import org.jetbrains.plugins.scala.lang.psi.types._
import org.jetbrains.plugins.scala.lang.psi.types.api.designator.ScThisType
import org.jetbrains.plugins.scala.lang.psi.types.result.{TypeResult, Typeable, TypingContext}
import org.jetbrains.plugins.scala.lang.resolve.processor.BaseProcessor
import org.jetbrains.plugins.scala.macroAnnotations.{Cached, CachedInsidePsiElement, ModCount}
import scala.collection.JavaConverters._
/**
* @author ven
*/
trait ScTemplateDefinition extends ScNamedElement with PsiClass with Typeable {
import com.intellij.psi.PsiMethod
def qualifiedName: String = null
def additionalJavaNames: Array[String] = Array.empty
def originalElement: Option[ScTemplateDefinition] = Option(getUserData(originalElemKey))
def setDesugared(actualElement: ScTypeDefinition): ScTemplateDefinition = {
putUserData(originalElemKey, actualElement)
members.foreach { member =>
member.setSynthetic(actualElement)
member.setSyntheticContainingClass(actualElement)
}
this
}
def isDesugared: Boolean = originalElement.isDefined
def desugaredElement: Option[ScTemplateDefinition] = None
@Cached(ModCount.anyScalaPsiModificationCount, this)
def physicalExtendsBlock: ScExtendsBlock = this.stubOrPsiChild(ScalaElementTypes.EXTENDS_BLOCK).orNull
def extendsBlock: ScExtendsBlock = desugaredElement.map(_.extendsBlock).getOrElse(physicalExtendsBlock)
def innerExtendsListTypes: Array[PsiClassType] = {
val eb = extendsBlock
if (eb != null) {
val tp = eb.templateParents
implicit val elementScope = ElementScope(getProject)
tp match {
case Some(tp1) => (for (te <- tp1.allTypeElements;
t = te.getType(TypingContext.empty).getOrAny;
asPsi = t.toPsiType
if asPsi.isInstanceOf[PsiClassType]) yield asPsi.asInstanceOf[PsiClassType]).toArray[PsiClassType]
case _ => PsiClassType.EMPTY_ARRAY
}
} else PsiClassType.EMPTY_ARRAY
}
def showAsInheritor: Boolean = extendsBlock.templateBody.isDefined
override def findMethodBySignature(patternMethod: PsiMethod, checkBases: Boolean): PsiMethod = {
PsiClassImplUtil.findMethodBySignature(this, patternMethod, checkBases)
}
override def findMethodsBySignature(patternMethod: PsiMethod, checkBases: Boolean): Array[PsiMethod] = {
PsiClassImplUtil.findMethodsBySignature(this, patternMethod, checkBases)
}
override def findMethodsByName(name: String, checkBases: Boolean): Array[PsiMethod] = {
val toSearchWithIndices = Set("main", JUnitUtil.SUITE_METHOD_NAME) //these methods may be searched from EDT, search them without building a whole type hierarchy
def withIndices(): Array[PsiMethod] = {
val inThisClass = functionsByName(name)
val files = this.allSupers.flatMap(_.containingVirtualFile).asJava
val scope = GlobalSearchScope.filesScope(getProject, files)
val manager = ScalaShortNamesCacheManager.getInstance(getProject)
val candidates = manager.getMethodsByName(name, scope)
val inBaseClasses = candidates.filter(m => this.isInheritor(m.containingClass, deep = true))
(inThisClass ++ inBaseClasses).toArray
}
if (toSearchWithIndices.contains(name)) withIndices()
else PsiClassImplUtil.findMethodsByName(this, name, checkBases)
}
override def findFieldByName(name: String, checkBases: Boolean): PsiField = {
PsiClassImplUtil.findFieldByName(this, name, checkBases)
}
override def findInnerClassByName(name: String, checkBases: Boolean): PsiClass = {
PsiClassImplUtil.findInnerByName(this, name, checkBases)
}
import java.util.{Collection => JCollection, List => JList}
import com.intellij.openapi.util.{Pair => IPair}
def getAllFields: Array[PsiField] = {
PsiClassImplUtil.getAllFields(this)
}
override def findMethodsAndTheirSubstitutorsByName(name: String,
checkBases: Boolean): JList[IPair[PsiMethod, PsiSubstitutor]] = {
//the reordering is a hack to enable 'go to test location' for junit test methods defined in traits
import scala.collection.JavaConversions._
PsiClassImplUtil.findMethodsAndTheirSubstitutorsByName(this, name, checkBases).toList.sortBy(myPair =>
myPair.first match {
case wrapper: ScFunctionWrapper if wrapper.delegate.isInstanceOf[ScFunctionDeclaration] => 1
case wrapper: ScFunctionWrapper if wrapper.delegate.isInstanceOf[ScFunctionDefinition] => wrapper.containingClass match {
case myClass: ScTemplateDefinition if myClass.members.contains(wrapper.delegate) => 0
case _ => 1
}
case _ => 1
})
}
override def getAllMethodsAndTheirSubstitutors: JList[IPair[PsiMethod, PsiSubstitutor]] = {
PsiClassImplUtil.getAllWithSubstitutorsByMap(this, MemberType.METHOD)
}
@CachedInsidePsiElement(this, CachesUtil.libraryAwareModTracker(this))
override def getVisibleSignatures: JCollection[HierarchicalMethodSignature] = {
PsiSuperMethodImplUtil.getVisibleSignatures(this)
}
def getTypeWithProjections(ctx: TypingContext, thisProjections: Boolean = false): TypeResult[ScType]
def members: Seq[ScMember] = extendsBlock.members ++ syntheticMembers
def functions: Seq[ScFunction] = extendsBlock.functions
def aliases: Seq[ScTypeAlias] = extendsBlock.aliases
@CachedInsidePsiElement(this, ModCount.getBlockModificationCount)
def syntheticMethodsWithOverride: Seq[PsiMethod] = syntheticMethodsWithOverrideImpl
/**
* Implement it carefully to avoid recursion.
*/
protected def syntheticMethodsWithOverrideImpl: Seq[PsiMethod] = Seq.empty
def allSynthetics: Seq[PsiMethod] = syntheticMethodsNoOverride ++ syntheticMethodsWithOverride
@CachedInsidePsiElement(this, ModCount.getBlockModificationCount)
def syntheticMethodsNoOverride: Seq[PsiMethod] = syntheticMethodsNoOverrideImpl
protected def syntheticMethodsNoOverrideImpl: Seq[PsiMethod] = Seq.empty
def typeDefinitions: Seq[ScTypeDefinition] = extendsBlock.typeDefinitions
@CachedInsidePsiElement(this, ModCount.getBlockModificationCount)
def syntheticTypeDefinitions: Seq[ScTypeDefinition] = syntheticTypeDefinitionsImpl
protected def syntheticTypeDefinitionsImpl: Seq[ScTypeDefinition] = Seq.empty
@CachedInsidePsiElement(this, ModCount.getBlockModificationCount)
def syntheticMembers: Seq[ScMember] = syntheticMembersImpl
protected def syntheticMembersImpl: Seq[ScMember] = Seq.empty
def selfTypeElement: Option[ScSelfTypeElement] = {
val qual = qualifiedName
if (qual != null && (qual == "scala.Predef" || qual == "scala")) return None
extendsBlock.selfTypeElement
}
def selfType: Option[ScType] = extendsBlock.selfType
def superTypes: List[ScType] = extendsBlock.superTypes
def supers: Seq[PsiClass] = extendsBlock.supers
def allTypeAliases: Seq[(PsiNamedElement, ScSubstitutor)] = TypeDefinitionMembers.getTypes(this).allFirstSeq().flatMap(n => n.map {
case (_, x) => (x.info, x.substitutor)
}) ++ syntheticTypeDefinitions.filter(!_.isObject).map((_, ScSubstitutor.empty))
def allTypeAliasesIncludingSelfType: Seq[(PsiNamedElement, ScSubstitutor)] = {
selfType match {
case Some(selfType) =>
val clazzType = getTypeWithProjections(TypingContext.empty).getOrAny
selfType.glb(clazzType) match {
case c: ScCompoundType =>
TypeDefinitionMembers.getTypes(c, Some(clazzType), this).allFirstSeq().
flatMap(_.map { case (_, n) => (n.info, n.substitutor) })
case _ =>
allTypeAliases
}
case _ =>
allTypeAliases
}
}
def allVals: Seq[(PsiNamedElement, ScSubstitutor)] =
TypeDefinitionMembers.getSignatures(this).allFirstSeq()
.flatMap(n => n.filter {
case (_, x) => !x.info.isInstanceOf[PhysicalSignature] &&
(x.info.namedElement match {
case v =>
ScalaPsiUtil.nameContext(v) match {
case _: ScVariable => v.name == x.info.name
case _: ScValue => v.name == x.info.name
case _ => true
}
})
})
.distinctBy { case (_, y) => y.info.namedElement }
.map { case (_, n) => (n.info.namedElement, n.substitutor) }
def allValsIncludingSelfType: Seq[(PsiNamedElement, ScSubstitutor)] = {
selfType match {
case Some(selfType) =>
val clazzType = getTypeWithProjections(TypingContext.empty).getOrAny
selfType.glb(clazzType) match {
case c: ScCompoundType =>
TypeDefinitionMembers.getSignatures(c, Some(clazzType), this).allFirstSeq().flatMap(n => n.filter{
case (_, x) => !x.info.isInstanceOf[PhysicalSignature] &&
(x.info.namedElement match {
case v =>
ScalaPsiUtil.nameContext(v) match {
case _: ScVariable => v.name == x.info.name
case _: ScValue => v.name == x.info.name
case _ => true
}
})}).map { case (_, n) => (n.info.namedElement, n.substitutor) }
case _ =>
allVals
}
case _ =>
allVals
}
}
def allMethods: Iterable[PhysicalSignature] =
TypeDefinitionMembers.getSignatures(this).allFirstSeq().flatMap(_.filter {
case (_, n) => n.info.isInstanceOf[PhysicalSignature]}).
map { case (_, n) => n.info.asInstanceOf[PhysicalSignature] } ++
syntheticMethodsNoOverride.map(new PhysicalSignature(_, ScSubstitutor.empty))
def allMethodsIncludingSelfType: Iterable[PhysicalSignature] = {
selfType match {
case Some(selfType) =>
val clazzType = getTypeWithProjections(TypingContext.empty).getOrAny
selfType.glb(clazzType) match {
case c: ScCompoundType =>
TypeDefinitionMembers.getSignatures(c, Some(clazzType), this).allFirstSeq().flatMap(_.filter {
case (_, n) => n.info.isInstanceOf[PhysicalSignature]}).
map { case (_, n) => n.info.asInstanceOf[PhysicalSignature] } ++
syntheticMethodsNoOverride.map(new PhysicalSignature(_, ScSubstitutor.empty))
case _ =>
allMethods
}
case _ =>
allMethods
}
}
def allSignatures: Seq[Signature] = TypeDefinitionMembers.getSignatures(this).allFirstSeq().flatMap(_.map { case (_, n) => n.info })
def allSignaturesIncludingSelfType: Seq[Signature] = {
selfType match {
case Some(selfType) =>
val clazzType = getTypeWithProjections(TypingContext.empty).getOrAny
selfType.glb(clazzType) match {
case c: ScCompoundType =>
TypeDefinitionMembers.getSignatures(c, Some(clazzType), this).allFirstSeq().
flatMap(_.map { case (_, n) => n.info })
case _ =>
allSignatures
}
case _ =>
allSignatures
}
}
def isScriptFileClass: Boolean = getContainingFile match {
case file: ScalaFile => file.isScriptFile
case _ => false
}
def processDeclarations(processor: PsiScopeProcessor,
oldState: ResolveState,
lastParent: PsiElement,
place: PsiElement) : Boolean = {
if (!processor.isInstanceOf[BaseProcessor]) {
val lastChild = this.lastChildStub.orNull
val languageLevel: LanguageLevel =
processor match {
case methodProcessor: MethodsProcessor => methodProcessor.getLanguageLevel
case _ => PsiUtil.getLanguageLevel(place)
}
return PsiClassImplUtil.processDeclarationsInClass(this, processor, oldState, null, lastChild, place, languageLevel, false)
}
if (extendsBlock.templateBody.isDefined &&
PsiTreeUtil.isContextAncestor(extendsBlock.templateBody.get, place, false) && lastParent != null) return true
processDeclarationsForTemplateBody(processor, oldState, lastParent, place)
}
def processDeclarationsForTemplateBody(processor: PsiScopeProcessor,
oldState: ResolveState,
lastParent: PsiElement,
place: PsiElement): Boolean = {
if (DumbService.getInstance(getProject).isDumb) return true
var state = oldState
//exception cases
this match {
case s: ScTypeParametersOwner => s.typeParametersClause match {
case Some(tpc) if PsiTreeUtil.isContextAncestor(tpc, place, false) => return true
case _ =>
}
case _ =>
}
// Process selftype reference
selfTypeElement match {
case Some(se) if se.name != "_" => if (!processor.execute(se, state)) return false
case _ =>
}
state = state.put(BaseProcessor.FROM_TYPE_KEY,
if (ScalaPsiUtil.isPlaceTdAncestor(this, place)) ScThisType(this)
else ScalaType.designator(this))
val eb = extendsBlock
eb.templateParents match {
case Some(p) if PsiTreeUtil.isContextAncestor(p, place, false) =>
eb.earlyDefinitions match {
case Some(ed) => for (m <- ed.members) {
ProgressManager.checkCanceled()
m match {
case _var: ScVariable => for (declared <- _var.declaredElements) {
ProgressManager.checkCanceled()
if (!processor.execute(declared, state)) return false
}
case _val: ScValue => for (declared <- _val.declaredElements) {
ProgressManager.checkCanceled()
if (!processor.execute(declared, state)) return false
}
}
}
case None =>
}
true
case _ =>
eb.earlyDefinitions match {
case Some(ed) if PsiTreeUtil.isContextAncestor(ed, place, true) =>
case _ =>
extendsBlock match {
case e: ScExtendsBlock if e != null =>
if (PsiTreeUtil.isContextAncestor(e, place, true) ||
ScalaPsiUtil.isSyntheticContextAncestor(e, place) ||
!PsiTreeUtil.isContextAncestor(this, place, true)) {
this match {
case t: ScTypeDefinition if selfTypeElement.isDefined &&
!PsiTreeUtil.isContextAncestor(selfTypeElement.get, place, true) &&
PsiTreeUtil.isContextAncestor(e.templateBody.orNull, place, true) &&
processor.isInstanceOf[BaseProcessor] && !t.isInstanceOf[ScObject] =>
selfTypeElement match {
case Some(_) => processor.asInstanceOf[BaseProcessor].processType(ScThisType(t), place, state)
case _ =>
if (!TypeDefinitionMembers.processDeclarations(this, processor, state, lastParent, place)) {
return false
}
}
case _ =>
if (!TypeDefinitionMembers.processDeclarations(this, processor, state, lastParent, place)) return false
}
}
case _ =>
}
}
true
}
}
def addMember(member: ScMember, anchor: Option[PsiElement]): ScMember = {
implicit val projectContext = member.projectContext
extendsBlock.templateBody.map {
_.getNode
}.map { node =>
val beforeNode = anchor.map {
_.getNode
}.getOrElse {
val last = node.getLastChildNode
last.getTreePrev match {
case result if isLineTerminator(result.getPsi) => result
case _ => last
}
}
val before = beforeNode.getPsi
if (isLineTerminator(before))
node.addChild(createNewLineNode(), beforeNode)
node.addChild(member.getNode, beforeNode)
val newLineNode = createNewLineNode()
if (isLineTerminator(before)) {
node.replaceChild(beforeNode, newLineNode)
} else {
node.addChild(newLineNode, beforeNode)
}
member
}.getOrElse {
val node = extendsBlock.getNode
node.addChild(createWhitespace.getNode)
node.addChild(createBodyFromMember(member.getText).getNode)
members.head
}
}
def deleteMember(member: ScMember) {
member.getParent.getNode.removeChild(member.getNode)
}
def functionsByName(name: String): Seq[PsiMethod] = {
(for ((p: PhysicalSignature, _) <- TypeDefinitionMembers.getSignatures(this).forName(name)._1) yield p.method).
++(syntheticMethodsNoOverride.filter(_.name == name))
}
override def isInheritor(baseClass: PsiClass, deep: Boolean): Boolean = {
val basePath = Path.of(baseClass)
// These doesn't appear in the superTypes at the moment, so special case required.
if (basePath == Path.javaObject || (basePath == Path.scalaObject && !baseClass.isDeprecated)) return true
if (basePath.kind.isFinal) return false
if (deep) superPathsDeep.contains(basePath)
else superPaths.contains(basePath)
}
@Cached(ModCount.getModificationCount, this)
def cachedPath: Path = {
val kind = this match {
case _: ScTrait => Kind.ScTrait
case _: ScClass => Kind.ScClass
case _: ScObject => Kind.ScObject
case _: ScNewTemplateDefinition => Kind.ScNewTd
case s: ScSyntheticClass if s.className != "AnyRef" && s.className != "AnyVal" => Kind.SyntheticFinal
case _ => Kind.NonScala
}
Path(name, Option(qualifiedName), kind)
}
@Cached(ModCount.getModificationCount, this)
private def superPaths: Set[Path] = {
if (DumbService.getInstance(getProject).isDumb) return Set.empty //to prevent failing during indexes
supers.map(Path.of).toSet
}
@Cached(ModCount.getModificationCount, this)
private def superPathsDeep: Set[Path] = {
if (DumbService.getInstance(getProject).isDumb) return Set.empty //to prevent failing during indexes
var collected = Set[Path]()
def addForClass(c: PsiClass): Unit = {
val path = c match {
case td: ScTemplateDefinition => td.cachedPath
case _ => Path.of(c)
}
if (!collected.contains(path)) {
collected += path
c match {
case td: ScTemplateDefinition =>
val supersIterator = td.supers.iterator
while (supersIterator.hasNext) {
addForClass(supersIterator.next())
}
case other =>
val supersIterator = other.getSuperTypes.iterator
while (supersIterator.hasNext) {
val psiT = supersIterator.next()
val next = psiT.resolveGenerics.getElement
if (next != null) {
addForClass(next)
}
}
}
}
}
addForClass(this)
collected - cachedPath
}
}
object ScTemplateDefinition {
object ExtendsBlock {
def unapply(definition: ScTemplateDefinition): Some[ScExtendsBlock] = Some(definition.extendsBlock)
}
sealed abstract class Kind(val isFinal: Boolean)
object Kind {
object ScClass extends Kind(false)
object ScTrait extends Kind(false)
object ScObject extends Kind(true)
object ScNewTd extends Kind(true)
object SyntheticFinal extends Kind(true)
object NonScala extends Kind(false)
}
case class Path(name: String, qName: Option[String], kind: Kind)
object Path {
def of(c: PsiClass): Path = {
c match {
case td: ScTemplateDefinition =>
td.cachedPath
case s: ScSyntheticClass if s.className != "AnyRef" && s.className != "AnyVal" =>
Path(c.name, Option(c.qualifiedName), Kind.SyntheticFinal)
case s: ScSyntheticClass =>
Path(c.name, Option(c.qualifiedName), Kind.ScClass)
case _ =>
Path(c.name, Option(c.qualifiedName), Kind.NonScala)
}
}
val javaObject = Path("Object", Some("java.lang.Object"), Kind.NonScala)
val scalaObject = Path("ScalaObject", Some("scala.ScalaObject"), Kind.ScTrait)
}
private val originalElemKey: Key[ScTemplateDefinition] = Key.create("ScTemplateDefinition.originalElem")
}
|
loskutov/intellij-scala
|
src/org/jetbrains/plugins/scala/lang/psi/api/toplevel/typedef/ScTemplateDefinition.scala
|
Scala
|
apache-2.0
| 22,059
|
package pl.touk.nussknacker.ui.definition.additionalproperty
import pl.touk.nussknacker.engine.api.definition.{SimpleParameterEditor, StringParameterEditor}
import pl.touk.nussknacker.engine.api.component.AdditionalPropertyConfig
object UiAdditionalPropertyEditorDeterminer {
def determine(config: AdditionalPropertyConfig): SimpleParameterEditor = {
config.editor match {
case Some(editor: SimpleParameterEditor) => editor
case None => StringParameterEditor
}
}
}
|
TouK/nussknacker
|
ui/server/src/main/scala/pl/touk/nussknacker/ui/definition/additionalproperty/UiAdditionalPropertyEditorDeterminer.scala
|
Scala
|
apache-2.0
| 492
|
/*
* Copyright (c) 2018 OVO Energy
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package com.ovoenergy.comms.model
package email
import com.ovoenergy.comms.KafkaMessage
import com.ovoenergy.comms.model._
import com.ovoenergy.comms.model.types.ProgressedEventV2
import com.sksamuel.avro4s.AvroDoc
@KafkaMessage
case class EmailProgressedV2(
metadata: MetadataV2,
internalMetadata: InternalMetadata,
@AvroDoc("Status of given Email (Delivered, Bounced etc)") status: EmailStatus,
@AvroDoc("The Gateway from which the given event originated") gateway: Gateway,
@AvroDoc(
"The ID given to the event by the origin gateway. This field is optional because Mailgun's webhooks are inconsistent. They include the messageId in some events but not others") gatewayMessageId: Option[
String] = None,
@AvroDoc("Providing context to the progression, i.e. a reason for a delivery failure") reason: Option[
String] = None
) extends LoggableEvent
with ProgressedEventV2 {
override def loggableString: Option[String] = prettyPrint(this, Seq.empty)
override def mdcMap: Map[String, String] = {
val eventMDC = Map("gateway" -> gateway.toString)
combineMDCS(metadata.mdcMap, internalMetadata.mdcMap, eventMDC)
}
}
|
ovotech/comms-kafka-messages
|
modules/core/src/main/scala/com/ovoenergy/comms/model/email/EmailProgressedV2.scala
|
Scala
|
mit
| 2,286
|
package com.pourtois.symbolette.expr
import Expr._
import org.scalatest._
/**
* Created by manu on 24.10.16.
*/
class TestUnary extends FunSuite {
test("Soon your pocahontas compilable test name here") {
val z = symbol("z")
val r = symbol("r")
val x = symbol("x", r :: z :: Nil)
val diff = -(-x)
val opti = diff.optimized
val best = diff.best
println("Diff " + diff)
printOpti(opti)
println("Best " + best)
assert(best == x)
assert((-(-(-(-x)))).best == x)
assert((-(-(-(-(-x))))).best == (-x))
}
test("binary") {
val a = symbol("a")
val b = symbol("b")
val c = symbol("c")
val sum = a + a
// printOpti(sum.optimized)
assert((a + a).best == Integer(2) * a)
val sum4 = a + a + a + a
//printOpti(sum4.optimized)
check(a + a + a + a, Integer(4) * a)
check(a + a + a - a, Integer(2) * a)
check(a * b + a * c, a * (b + c))
check(a * b + c * a, a * (b + c))
check((a + 7) * b + c * (a + 7), (a + 7) * (b + c))
check(a * 0, ZERO)
check(a + a - a - a, ZERO)
check(a - a, ZERO)
check(a / a, ONE)
check(a / a, ONE)
assert((a * (b + c)) == (a * (b + c)))
check((a * (b + c)) / (a * (b + c)), ONE)
check((a * (b + c)) / ((b + c)*a), ONE)
check((a * (c + b)) / ((b + c)*a), ONE)
check(exp(a+c)*exp(b), exp((a+c)+b))
}
def check(e: Expr, expected: Expr): Unit = {
val best = e.best
assert(best == expected)
}
def printOpti(o: Seq[Expr]) = {
println("Optimized")
o.foreach(println(_))
}
}
|
ManuelPourtois/Symbolette
|
src/com/pourtois/symbolette/expr/TestUnary.scala
|
Scala
|
mit
| 1,568
|
package latis.reader.adapter
import latis.data._
import latis.metadata._
import latis.util.StringUtils
import java.security.cert.X509Certificate
import scala.io.Source
import javax.net.ssl.HostnameVerifier
import javax.net.ssl.HttpsURLConnection
import javax.net.ssl.SSLContext
import javax.net.ssl.SSLSession
import javax.net.ssl.X509TrustManager
import latis.dm._
class AsciiAdapter3(metadata: Metadata3, config: AdapterConfig)
extends IterativeAdapter3[String](metadata, config) {
//---- Manage data source ---------------------------------------------------
private lazy val source = Source.fromURL(getUrl)
override def close = if (source != null) source.close
//---- Adapter Properties ---------------------------------------------------
/**
* Get the String (one or more characters) that is used at the start of a
* line to indicate that it should not be read as data.
* Defaults to null, meaning that no line should be ignored (except empty lines).
* Return null if there are no comments to skip.
* Use a lazy val since this will be used for every line.
*/
lazy val getCommentCharacter: String = getProperty("commentCharacter") match {
case Some(s) => s
case None => null
}
/**
* Get the String (one or more characters) that is used to separate data values.
* Default to comma (",").
*/
def getDelimiter: String = getProperty("delimiter", ",")
//TODO: reconcile with ability to define delimiter in tsml as regex,
// but need to be able to insert into data
/**
* Return the number of lines (as returned by Source.getLines) that make up
* each data record.
*/
def getLinesPerRecord: Int = getProperty("linesPerRecord") match {
case Some(s) => s.toInt
case None => 1
}
/**
* Return the number of lines (as returned by Source.getLines) that should
* be skipped before reading data.
*/
def getLinesToSkip: Int = getProperty("skip") match {
case Some(s) => s.toInt
case None => 0
}
/**
* Get the String used as the data marker from tsml file.
* Use a lazy val since this will be used for every line.
*/
lazy val getDataMarker: String = getProperty("marker") match {
case Some(s) => s
case None => null
}
/**
* Keep track of whether we have encountered a data marker.
*/
private var foundDataMarker = false
//---- Parse operations -----------------------------------------------------
/**
* Return an Iterator of data records. Group multiple lines of text for each record.
*/
def getRecordIterator: Iterator[String] = {
val lpr = getLinesPerRecord
val dlm = getDelimiter
val records = getLineIterator.grouped(lpr).map(_.mkString(dlm))
//TODO: apply length of Function if given
getProperty("limit") match {
case Some(s) => records.take(s.toInt) //TODO: deal with bad value
case None => records
}
}
/**
* Return Iterator of lines, filter out lines deemed unworthy by "shouldSkipLine".
*/
def getLineIterator: Iterator[String] = {
//TODO: does using 'drop' cause premature reading of data?
val skip = getLinesToSkip
source.getLines.drop(skip).filterNot(shouldSkipLine(_))
}
/**
* This method will be used by the lineIterator to skip lines from the data source
* that we don't want in the data.
* Note that the "isEmpty" test bypasses an end of file problem iterating over the
* iterator from Source.getLines.
*/
def shouldSkipLine(line: String): Boolean = {
val d = getDataMarker
val c = getCommentCharacter
if (d == null || foundDataMarker) {
// default behavior: ignore empty lines and lines that start with comment characters
line.isEmpty() || (c != null && line.startsWith(c))
} else {
// We have a data marker and we haven't found it yet,
// therefore we should ignore everything until we
// find it. We should also exclude the data marker itself
// when we find it.
if (line.matches(d)) foundDataMarker = true;
true
}
}
/**
* Return Map with Variable name to value(s) as Data.
*/
def parseRecord(record: String): Option[Map[String,Any]] = {
/*
* TODO: consider nested functions
* if not flattened, lines per record will be length of inner Function (assume cartesian?)
* deal with here or use algebra?
*/
//assume one value per scalar per record
val vnames = getScalarNames
val values = extractValues(record)
val datas: Seq[Any] = (values zip vnames) map { p =>
metadata.findVariableProperty(p._2, "type") match {
//TODO: handle conversion errors
case Some("integer") => p._1.toLong
case Some("real") => p._1.toDouble
case Some("text") => p._1
}
}
Some((vnames zip datas).toMap)
}
/**
* Extract the Variable values from the given record.
*/
def extractValues(record: String): Seq[String] = splitAtDelim(record)
def splitAtDelim(str: String): Array[String] = str.trim.split(getDelimiter, -1)
//Note, use "-1" so trailing ","s will yield empty strings.
}
|
dlindhol/LaTiS
|
src/main/scala/latis/reader/adapter/AsciiAdapter3.scala
|
Scala
|
epl-1.0
| 5,168
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution
import scala.collection.immutable.IndexedSeq
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.internal.Logging
import org.apache.spark.sql.{Dataset, SparkSession}
import org.apache.spark.sql.catalyst.expressions.{Attribute, SubqueryExpression}
import org.apache.spark.sql.catalyst.optimizer.EliminateResolvedHint
import org.apache.spark.sql.catalyst.plans.logical.{IgnoreCachedData, LogicalPlan, ResolvedHint}
import org.apache.spark.sql.execution.columnar.InMemoryRelation
import org.apache.spark.sql.execution.command.CommandUtils
import org.apache.spark.sql.execution.datasources.{FileIndex, HadoopFsRelation, LogicalRelation}
import org.apache.spark.sql.execution.datasources.v2.{DataSourceV2Relation, FileTable}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.storage.StorageLevel
import org.apache.spark.storage.StorageLevel.MEMORY_AND_DISK
/** Holds a cached logical plan and its data */
case class CachedData(plan: LogicalPlan, cachedRepresentation: InMemoryRelation)
/**
* Provides support in a SQLContext for caching query results and automatically using these cached
* results when subsequent queries are executed. Data is cached using byte buffers stored in an
* InMemoryRelation. This relation is automatically substituted query plans that return the
* `sameResult` as the originally cached query.
*
* Internal to Spark SQL.
*/
class CacheManager extends Logging {
/**
* Maintains the list of cached plans as an immutable sequence. Any updates to the list
* should be protected in a "this.synchronized" block which includes the reading of the
* existing value and the update of the cachedData var.
*/
@transient @volatile
private var cachedData = IndexedSeq[CachedData]()
/** Clears all cached tables. */
def clearCache(): Unit = this.synchronized {
cachedData.foreach(_.cachedRepresentation.cacheBuilder.clearCache())
cachedData = IndexedSeq[CachedData]()
}
/** Checks if the cache is empty. */
def isEmpty: Boolean = {
cachedData.isEmpty
}
/**
* Caches the data produced by the logical representation of the given [[Dataset]].
* Unlike `RDD.cache()`, the default storage level is set to be `MEMORY_AND_DISK` because
* recomputing the in-memory columnar representation of the underlying table is expensive.
*/
def cacheQuery(
query: Dataset[_],
tableName: Option[String] = None,
storageLevel: StorageLevel = MEMORY_AND_DISK): Unit = {
val planToCache = query.logicalPlan
if (lookupCachedData(planToCache).nonEmpty) {
logWarning("Asked to cache already cached data.")
} else {
// Turn off AQE so that the outputPartitioning of the underlying plan can be leveraged.
val sessionWithAqeOff = QueryExecution.getOrCloneSessionWithAqeOff(query.sparkSession)
val inMemoryRelation = sessionWithAqeOff.withActive {
val qe = sessionWithAqeOff.sessionState.executePlan(planToCache)
InMemoryRelation(
sessionWithAqeOff.sessionState.conf.useCompression,
sessionWithAqeOff.sessionState.conf.columnBatchSize, storageLevel,
qe.executedPlan,
tableName,
optimizedPlan = qe.optimizedPlan)
}
this.synchronized {
if (lookupCachedData(planToCache).nonEmpty) {
logWarning("Data has already been cached.")
} else {
cachedData = CachedData(planToCache, inMemoryRelation) +: cachedData
}
}
}
}
/**
* Un-cache the given plan or all the cache entries that refer to the given plan.
* @param query The [[Dataset]] to be un-cached.
* @param cascade If true, un-cache all the cache entries that refer to the given
* [[Dataset]]; otherwise un-cache the given [[Dataset]] only.
*/
def uncacheQuery(
query: Dataset[_],
cascade: Boolean): Unit = {
uncacheQuery(query.sparkSession, query.logicalPlan, cascade)
}
/**
* Un-cache the given plan or all the cache entries that refer to the given plan.
* @param spark The Spark session.
* @param plan The plan to be un-cached.
* @param cascade If true, un-cache all the cache entries that refer to the given
* plan; otherwise un-cache the given plan only.
* @param blocking Whether to block until all blocks are deleted.
*/
def uncacheQuery(
spark: SparkSession,
plan: LogicalPlan,
cascade: Boolean,
blocking: Boolean = false): Unit = {
val shouldRemove: LogicalPlan => Boolean =
if (cascade) {
_.find(_.sameResult(plan)).isDefined
} else {
_.sameResult(plan)
}
val plansToUncache = cachedData.filter(cd => shouldRemove(cd.plan))
this.synchronized {
cachedData = cachedData.filterNot(cd => plansToUncache.exists(_ eq cd))
}
plansToUncache.foreach { _.cachedRepresentation.cacheBuilder.clearCache(blocking) }
// Re-compile dependent cached queries after removing the cached query.
if (!cascade) {
recacheByCondition(spark, cd => {
// If the cache buffer has already been loaded, we don't need to recompile the cached plan,
// as it does not rely on the plan that has been uncached anymore, it will just produce
// data from the cache buffer.
// Note that the `CachedRDDBuilder.isCachedColumnBuffersLoaded` call is a non-locking
// status test and may not return the most accurate cache buffer state. So the worse case
// scenario can be:
// 1) The buffer has been loaded, but `isCachedColumnBuffersLoaded` returns false, then we
// will clear the buffer and re-compiled the plan. It is inefficient but doesn't affect
// correctness.
// 2) The buffer has been cleared, but `isCachedColumnBuffersLoaded` returns true, then we
// will keep it as it is. It means the physical plan has been re-compiled already in the
// other thread.
val cacheAlreadyLoaded = cd.cachedRepresentation.cacheBuilder.isCachedColumnBuffersLoaded
cd.plan.find(_.sameResult(plan)).isDefined && !cacheAlreadyLoaded
})
}
}
// Analyzes column statistics in the given cache data
private[sql] def analyzeColumnCacheQuery(
sparkSession: SparkSession,
cachedData: CachedData,
column: Seq[Attribute]): Unit = {
val relation = cachedData.cachedRepresentation
val (rowCount, newColStats) =
CommandUtils.computeColumnStats(sparkSession, relation, column)
relation.updateStats(rowCount, newColStats)
}
/**
* Tries to re-cache all the cache entries that refer to the given plan.
*/
def recacheByPlan(spark: SparkSession, plan: LogicalPlan): Unit = {
recacheByCondition(spark, _.plan.find(_.sameResult(plan)).isDefined)
}
/**
* Re-caches all the cache entries that satisfies the given `condition`.
*/
private def recacheByCondition(
spark: SparkSession,
condition: CachedData => Boolean): Unit = {
val needToRecache = cachedData.filter(condition)
this.synchronized {
// Remove the cache entry before creating a new ones.
cachedData = cachedData.filterNot(cd => needToRecache.exists(_ eq cd))
}
needToRecache.map { cd =>
cd.cachedRepresentation.cacheBuilder.clearCache()
// Turn off AQE so that the outputPartitioning of the underlying plan can be leveraged.
val sessionWithAqeOff = QueryExecution.getOrCloneSessionWithAqeOff(spark)
val newCache = sessionWithAqeOff.withActive {
val qe = sessionWithAqeOff.sessionState.executePlan(cd.plan)
InMemoryRelation(
cacheBuilder = cd.cachedRepresentation.cacheBuilder.copy(cachedPlan = qe.executedPlan),
optimizedPlan = qe.optimizedPlan)
}
val recomputedPlan = cd.copy(cachedRepresentation = newCache)
this.synchronized {
if (lookupCachedData(recomputedPlan.plan).nonEmpty) {
logWarning("While recaching, data was already added to cache.")
} else {
cachedData = recomputedPlan +: cachedData
}
}
}
}
/** Optionally returns cached data for the given [[Dataset]] */
def lookupCachedData(query: Dataset[_]): Option[CachedData] = {
lookupCachedData(query.logicalPlan)
}
/** Optionally returns cached data for the given [[LogicalPlan]]. */
def lookupCachedData(plan: LogicalPlan): Option[CachedData] = {
cachedData.find(cd => plan.sameResult(cd.plan))
}
/** Replaces segments of the given logical plan with cached versions where possible. */
def useCachedData(plan: LogicalPlan): LogicalPlan = {
val newPlan = plan transformDown {
case command: IgnoreCachedData => command
case currentFragment =>
lookupCachedData(currentFragment).map { cached =>
// After cache lookup, we should still keep the hints from the input plan.
val hints = EliminateResolvedHint.extractHintsFromPlan(currentFragment)._2
val cachedPlan = cached.cachedRepresentation.withOutput(currentFragment.output)
// The returned hint list is in top-down order, we should create the hint nodes from
// right to left.
hints.foldRight[LogicalPlan](cachedPlan) { case (hint, p) =>
ResolvedHint(p, hint)
}
}.getOrElse(currentFragment)
}
newPlan transformAllExpressions {
case s: SubqueryExpression => s.withNewPlan(useCachedData(s.plan))
}
}
/**
* Tries to re-cache all the cache entries that contain `resourcePath` in one or more
* `HadoopFsRelation` node(s) as part of its logical plan.
*/
def recacheByPath(spark: SparkSession, resourcePath: String): Unit = {
val (fs, qualifiedPath) = {
val path = new Path(resourcePath)
val fs = path.getFileSystem(spark.sessionState.newHadoopConf())
(fs, fs.makeQualified(path))
}
recacheByCondition(spark, _.plan.find(lookupAndRefresh(_, fs, qualifiedPath)).isDefined)
}
/**
* Traverses a given `plan` and searches for the occurrences of `qualifiedPath` in the
* [[org.apache.spark.sql.execution.datasources.FileIndex]] of any [[HadoopFsRelation]] nodes
* in the plan. If found, we refresh the metadata and return true. Otherwise, this method returns
* false.
*/
private def lookupAndRefresh(plan: LogicalPlan, fs: FileSystem, qualifiedPath: Path): Boolean = {
plan match {
case lr: LogicalRelation => lr.relation match {
case hr: HadoopFsRelation =>
refreshFileIndexIfNecessary(hr.location, fs, qualifiedPath)
case _ => false
}
case DataSourceV2Relation(fileTable: FileTable, _, _, _, _) =>
refreshFileIndexIfNecessary(fileTable.fileIndex, fs, qualifiedPath)
case _ => false
}
}
/**
* Refresh the given [[FileIndex]] if any of its root paths starts with `qualifiedPath`.
* @return whether the [[FileIndex]] is refreshed.
*/
private def refreshFileIndexIfNecessary(
fileIndex: FileIndex,
fs: FileSystem,
qualifiedPath: Path): Boolean = {
val prefixToInvalidate = qualifiedPath.toString
val needToRefresh = fileIndex.rootPaths
.map(_.makeQualified(fs.getUri, fs.getWorkingDirectory).toString)
.exists(_.startsWith(prefixToInvalidate))
if (needToRefresh) fileIndex.refresh()
needToRefresh
}
}
|
goldmedal/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/CacheManager.scala
|
Scala
|
apache-2.0
| 12,211
|
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
/* NOTE
* Most of this file is copy-pasted from
* https://github.com/scala/scala-partest-interface
* It is unfortunately not configurable enough, hence the duplication
*/
package scala.tools.partest
package scalajs
import scala.language.reflectiveCalls
import _root_.sbt.testing._
import java.net.URLClassLoader
import java.io.File
object Framework {
// as partest is not driven by test classes discovered by sbt, need to add this marker fingerprint to definedTests
val fingerprint = new AnnotatedFingerprint { def isModule = true; def annotationName = "partest" }
// TODO how can we export `fingerprint` so that a user can just add this to their build.sbt
// definedTests in Test += new sbt.TestDefinition("partest", fingerprint, true, Array())
}
class Framework extends _root_.sbt.testing.Framework {
def fingerprints: Array[Fingerprint] = Array[Fingerprint](Framework.fingerprint)
def name: String = "partest"
def runner(args: Array[String], remoteArgs: Array[String], testClassLoader: ClassLoader): _root_.sbt.testing.Runner =
new Runner(args, remoteArgs, testClassLoader)
}
/** Represents one run of a suite of tests.
*/
case class Runner(args: Array[String], remoteArgs: Array[String], testClassLoader: ClassLoader) extends _root_.sbt.testing.Runner {
/** Returns an array of tasks that when executed will run tests and suites determined by the
* passed <code>TaskDef</code>s.
*
* <p>
* Each returned task, when executed, will run tests and suites determined by the
* test class name, fingerprints, "explicitly specified" field, and selectors of one of the passed <code>TaskDef</code>s.
* </p>
*
* <p>
* This <code>tasks</code> method may be called with <code>TaskDef</code>s containing the same value for <code>testClassName</code> but
* different fingerprints. For example, if both a class and its companion object were test classes, the <code>tasks</code> method could be
* passed an array containing <code>TaskDef</code>s with the same name but with a different value for <code>fingerprint.isModule</code>.
* </p>
*
* <p>
* A test framework may "reject" a requested task by returning no <code>Task</code> for that <code>TaskDef</code>.
* </p>
*
* @param taskDefs the <code>TaskDef</code>s for requested tasks
* @return an array of <code>Task</code>s
* @throws IllegalStateException if invoked after <code>done</code> has been invoked.
*/
def tasks(taskDefs: Array[TaskDef]): Array[_root_.sbt.testing.Task] =
taskDefs map (PartestTask(_, args): _root_.sbt.testing.Task)
/** Indicates the client is done with this <code>Runner</code> instance.
*
* @return a possibly multi-line summary string, or the empty string if no summary is provided -- TODO
*/
def done(): String = ""
}
/** Run partest in this VM. Assumes we're running in a forked VM!
*
* TODO: make configurable
*/
case class PartestTask(taskDef: TaskDef, args: Array[String]) extends Task {
// Get scala version through test name
val scalaVersion = taskDef.fullyQualifiedName.stripPrefix("partest-")
/** Executes this task, possibly returning to the client new tasks to execute. */
def execute(eventHandler: EventHandler, loggers: Array[Logger]): Array[Task] = {
val forkedCp = scala.util.Properties.javaClassPath
val classLoader = new URLClassLoader(forkedCp.split(java.io.File.pathSeparator).map(new File(_).toURI.toURL))
if (Runtime.getRuntime().maxMemory() / (1024*1024) < 800)
loggers foreach (_.warn(s"""Low heap size detected (~ ${Runtime.getRuntime().maxMemory() / (1024*1024)}M). Please add the following to your build.sbt: javaOptions in Test += "-Xmx1G""""))
val maybeOptions =
ScalaJSPartestOptions(args, str => loggers.foreach(_.error(str)))
maybeOptions foreach { options =>
val runner = SBTRunner(
Framework.fingerprint, eventHandler, loggers,
new File(s"../../partest/fetchedSources/${scalaVersion}"),
classLoader, null, null, Array.empty[String], Array("run", "pos", "neg"), options, scalaVersion)
try runner.run()
catch {
case ex: ClassNotFoundException =>
loggers foreach { l => l.error("Please make sure partest is running in a forked VM by including the following line in build.sbt:\nfork in Test := true") }
throw ex
}
}
Array()
}
type SBTRunner = { def run(): Unit }
// use reflection to instantiate scala.tools.partest.scalajs.ScalaJSSBTRunner,
// casting to the structural type SBTRunner above so that method calls on the result will be invoked reflectively as well
private def SBTRunner(partestFingerprint: Fingerprint, eventHandler: EventHandler, loggers: Array[Logger], testRoot: File, testClassLoader: URLClassLoader, javaCmd: File, javacCmd: File, scalacArgs: Array[String], args: Array[String], options: ScalaJSPartestOptions, scalaVersion: String): SBTRunner = {
// The test root for partest is read out through the system properties, not passed as an argument
System.setProperty("partest.root", testRoot.getAbsolutePath)
// Partests take at least 5h. We double, just to be sure. (default is 4 hours)
System.setProperty("partest.timeout", "10 hours")
val runnerClass = Class.forName("scala.tools.partest.scalajs.ScalaJSSBTRunner")
runnerClass.getConstructors()(0).newInstance(partestFingerprint, eventHandler, loggers, testClassLoader, javaCmd, javacCmd, scalacArgs, args, options, scalaVersion).asInstanceOf[SBTRunner]
}
/** A possibly zero-length array of string tags associated with this task. */
def tags: Array[String] = Array()
}
|
scala-js/scala-js
|
partest/src/main/scala/scala/tools/partest/scalajs/PartestInterface.scala
|
Scala
|
apache-2.0
| 5,931
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.cluster
import java.nio.ByteBuffer
import kafka.api.ApiUtils._
import org.apache.kafka.common.KafkaException
import org.apache.kafka.common.utils.Utils._
object BrokerEndPoint {
private val uriParseExp = """\\[?([0-9a-zA-Z\\-%._:]*)\\]?:([0-9]+)""".r
/**
* BrokerEndPoint URI is host:port or [ipv6_host]:port
* Note that unlike EndPoint (or listener) this URI has no security information.
*/
def parseHostPort(connectionString: String): Option[(String, Int)] = {
connectionString match {
case uriParseExp(host, port) => try Some(host, port.toInt) catch { case _: NumberFormatException => None }
case _ => None
}
}
/**
* BrokerEndPoint URI is host:port or [ipv6_host]:port
* Note that unlike EndPoint (or listener) this URI has no security information.
*/
def createBrokerEndPoint(brokerId: Int, connectionString: String): BrokerEndPoint = {
parseHostPort(connectionString).map { case (host, port) => new BrokerEndPoint(brokerId, host, port) }.getOrElse {
throw new KafkaException("Unable to parse " + connectionString + " to a broker endpoint")
}
}
def readFrom(buffer: ByteBuffer): BrokerEndPoint = {
val brokerId = buffer.getInt()
val host = readShortString(buffer)
val port = buffer.getInt()
BrokerEndPoint(brokerId, host, port)
}
}
/**
* BrokerEndpoint is used to connect to specific host:port pair.
* It is typically used by clients (or brokers when connecting to other brokers)
* and contains no information about the security protocol used on the connection.
* Clients should know which security protocol to use from configuration.
* This allows us to keep the wire protocol with the clients unchanged where the protocol is not needed.
*/
case class BrokerEndPoint(id: Int, host: String, port: Int) {
def connectionString(): String = formatAddress(host, port)
def writeTo(buffer: ByteBuffer): Unit = {
buffer.putInt(id)
writeShortString(buffer, host)
buffer.putInt(port)
}
def sizeInBytes: Int =
4 + /* broker Id */
4 + /* port */
shortStringLength(host)
override def toString: String = {
s"BrokerEndPoint(id=$id, host=$host:$port)"
}
}
|
KevinLiLu/kafka
|
core/src/main/scala/kafka/cluster/BrokerEndPoint.scala
|
Scala
|
apache-2.0
| 3,003
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.util
import org.apache.spark.sql.catalyst.analysis.TypeCheckResult
import org.apache.spark.sql.catalyst.expressions.RowOrdering
import org.apache.spark.sql.types._
/**
* Helper functions to check for valid data types.
*/
object TypeUtils {
def checkForNumericExpr(dt: DataType, caller: String): TypeCheckResult = {
if (dt.isInstanceOf[NumericType] || dt == NullType) {
TypeCheckResult.TypeCheckSuccess
} else {
TypeCheckResult.TypeCheckFailure(s"$caller requires numeric types, not $dt")
}
}
def checkForOrderingExpr(dt: DataType, caller: String): TypeCheckResult = {
if (RowOrdering.isOrderable(dt)) {
TypeCheckResult.TypeCheckSuccess
} else {
TypeCheckResult.TypeCheckFailure(s"$caller does not support ordering on type $dt")
}
}
def checkForSameTypeInputExpr(types: Seq[DataType], caller: String): TypeCheckResult = {
if (types.size <= 1) {
TypeCheckResult.TypeCheckSuccess
} else {
val firstType = types.head
types.foreach { t =>
if (!t.sameType(firstType)) {
return TypeCheckResult.TypeCheckFailure(
s"input to $caller should all be the same type, but it's " +
types.map(_.simpleString).mkString("[", ", ", "]"))
}
}
TypeCheckResult.TypeCheckSuccess
}
}
def getNumeric(t: DataType): Numeric[Any] =
t.asInstanceOf[NumericType].numeric.asInstanceOf[Numeric[Any]]
def getInterpretedOrdering(t: DataType): Ordering[Any] = {
t match {
case i: AtomicType => i.ordering.asInstanceOf[Ordering[Any]]
case a: ArrayType => a.interpretedOrdering.asInstanceOf[Ordering[Any]]
case s: StructType => s.interpretedOrdering.asInstanceOf[Ordering[Any]]
case udt: UserDefinedType[_] => getInterpretedOrdering(udt.sqlType)
}
}
def compareBinary(x: Array[Byte], y: Array[Byte]): Int = {
for (i <- 0 until x.length; if i < y.length) {
val v1 = x(i) & 0xff
val v2 = y(i) & 0xff
val res = v1 - v2
if (res != 0) return res
}
x.length - y.length
}
}
|
esi-mineset/spark
|
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/TypeUtils.scala
|
Scala
|
apache-2.0
| 2,913
|
package me.lsbengine.api.public
import com.github.nscala_time.time.Imports.DateTime
import me.lsbengine.api.ProjectsAccessor
import me.lsbengine.database.DatabaseAccessor
import me.lsbengine.database.model.{MongoCollections, Project}
import me.lsbengine.database.model.MongoFormats._
import reactivemongo.api.DefaultDB
import reactivemongo.bson.{BSONDateTime, BSONDocument}
import scala.concurrent.Future
class PublicProjectsAccessor(db: DefaultDB)
extends DatabaseAccessor[Project](db, MongoCollections.projectsCollectionName)
with ProjectsAccessor {
def getProject(id: Int): Future[Option[Project]] = {
val now = DateTime.now
val query = BSONDocument("id" -> id,
"published" -> BSONDocument(
"$lte" -> BSONDateTime(now.getMillis)
))
super.getItem(query)
}
def listProjects: Future[List[Project]] = {
val now = DateTime.now
val sort = BSONDocument("published" -> -1)
val query = BSONDocument("published" -> BSONDocument(
"$lte" -> BSONDateTime(now.getMillis)
))
super.getItems(query = query, sort = sort)
}
}
|
jrabasco/LSBEngine
|
src/main/scala/me/lsbengine/api/public/PublicProjectsAccessor.scala
|
Scala
|
mit
| 1,087
|
trait Observable {
type Handle
protected var callbacks = Map[Handle, this.type => Unit]()
def observe(callback : this.type => Unit) : Handle = {
val handle = createHandle(callback)
callbacks += (handle -> callback)
handle
}
def unobserve(handle : Handle) : Unit = {
callbacks -= handle
}
protected def notifyListeners() : Unit =
for(callback <- callbacks.values) callback(this)
/**
* Subclasses override this to provide their own callback disambiguation scheme.
*/
protected def createHandle(callback : this.type => Unit) : Handle
}
trait DefaultHandles extends Observable {
type Handle = (this.type => Unit)
protected def createHandle(callback : this.type => Unit) : Handle = callback
}
class VariableStore[X](private var value : X) extends Observable with DefaultHandles {
def get : X = value
def set(newValue : X) : Unit = {
value = newValue
notifyListeners()
}
override def toString : String = "VariableStore(" + value + ")"
}
/**
Welcome to Scala version 2.8.0.RC3 (OpenJDK 64-Bit Server VM, Java 1.6.0_18).
Type in expressions to have them evaluated.
Type :help for more information.
scala> val x = new VariableStore(5)
x: VariableStore[Int] = VariableStore(5)
scala> val handle = x.observe(println)
handle: (x.type) => Unit = <function1>
scala> x.set(2)
VariableStore(2)
scala> x.unobserve(handle)
scala> x.set(4)
*/
/**
* Type difference != runtime difference.
scala> val x = new VariableStore(5)
x: VariableStore[Int] = VariableStore(5)
scala> val y = new VariableStore(2)
y: VariableStore[Int] = VariableStore(2) ^
scala> val callback = println(_ : Any)
callback: (Any) => Unit = <function1>
scala> val handle1 = x.observe(callback)
handle1: (x.type) => Unit = <function1>
scala> val handle2 = y.observe(callback)
handle2: (y.type) => Unit = <function1>
scala> y.set(3)
VariableStore(3)
scala> x.set(5)
VariableStore(5)
scala> y.unobserve(handle1)
<console>:10: error: type mismatch;
found : (x.type) => Unit
required: (y.type) => Unit
y.unobserve(handle1)
^
scala> handle
handle1 handle2
scala> handle1 == handle2
res3: Boolean = true
*/
|
XClouded/t4f-core
|
scala/src/tmp/chapter6/dependent-types/callbacks.scala
|
Scala
|
apache-2.0
| 2,212
|
package beer.data.judgments
import java.io.File
import scala.io.Source
import scala.collection.mutable.{Map => MutableMap}
private class WMT14 (dir:String) {
val csv = dir+"/judgements-2014-05-14.csv"
val references_dir = dir+"/baselines/data/plain/references" // newstest2014-ref.en-fr
val system_dir = dir+"/baselines/data/plain/system-outputs/newstest2014" // lang pairs dirs
val langs = List("cs", "de", "hi", "fr", "ru", "en")
val lang_pairs = List("cs-en", "de-en", "hi-en", "fr-en", "ru-en", "en-cs", "en-de", "en-hi", "en-fr", "en-ru")
type LangPair = String
type Lang = String
type System = String
val system_sents = MutableMap[LangPair, MutableMap[System, Array[String]]]()
val ref_sents = MutableMap[LangPair, Array[String]]()
var judgments = List[Judgment]()
def load() : Unit = {
load_system_translations();
load_references();
load_csv("wmt14", csv);
}
private def load_csv(dataset_name:String, csv_fn:String) : Unit = {
var line_id = 0
val file_iterator = Source.fromFile(csv_fn).getLines()
file_iterator.next() // skip_first line
for(line <- file_iterator){
val fields = line.split(",")
val src_lang_long = fields(0)
val tgt_lang_long = fields(1)
val src_lang_short = WMT14.long_to_short(src_lang_long)
val tgt_lang_short = WMT14.long_to_short(tgt_lang_long)
val sys_names = List(fields(7), fields(9), fields(11), fields(13), fields(15))
val rankings = List(fields(16).toInt, fields(17).toInt, fields(18).toInt, fields(19).toInt, fields(20).toInt);
val sentId = fields(2).toInt-1
val lp = s"$src_lang_short-$tgt_lang_short"
val sents : List[String] = sys_names.map{system_sents(lp)(_)(sentId)}
val ref : String = ref_sents(lp)(sentId)
judgments ::= new Judgment(dataset_name, src_lang_short, tgt_lang_short, sentId, sys_names, rankings, sents, ref)
}
}
private def load_system_translations() : Unit = {
for(lp <- lang_pairs){
val system_sents = scala.collection.mutable.Map[String, Array[String]]()
for(file <- WMT14.getListOfFiles(system_dir+"/"+lp)){
val system = file.getName
system_sents(system) = WMT14.loadContent(file)
}
this.system_sents(lp) = system_sents
}
}
private def load_references():Unit={
for(lp <- lang_pairs){
val fn = references_dir+"/newstest2014-ref."+lp
ref_sents(lp) = WMT14.loadContent(new File(fn))
}
}
}
object WMT14 {
def loadJudgments(dir:String) : List[Judgment] = {
val loader = new WMT14(dir)
loader.load()
loader.judgments
}
private def loadContent(file:File) : Array[String] = {
Source.fromFile(file, "UTF-8").getLines().toArray
}
private def getListOfFiles(dir: String):List[File] = {
val d = new File(dir)
if (d.exists && d.isDirectory) {
d.listFiles.filter(_.isFile).toList
} else {
List[File]()
}
}
private def long_to_short(lang:String) : String = {
lang match {
case "English" => "en"
case "Hindi" => "hi"
case "Czech" => "cs"
case "Russian" => "ru"
case "Spanish" => "es"
case "French" => "fr"
case "German" => "de"
}
}
private def short_to_long(lang:String) : String = {
lang match {
case "en" => "English"
case "hi" => "Hindi"
case "cs"|"cz" => "Czech"
case "ru" => "Russian"
case "es" => "Spanish"
case "fr" => "French"
case "de" => "German"
}
}
}
|
qingsongma/blend
|
tools/beer_2.0/src/beer/data/judgments/WMT14.scala
|
Scala
|
gpl-3.0
| 3,575
|
package name.abhijitsarkar.akka.service
import akka.NotUsed
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.HttpMethods.GET
import akka.http.scaladsl.model.HttpRequest
import akka.http.scaladsl.model.Uri.apply
import akka.stream.scaladsl.Source
import akka.stream.{Graph, SinkShape}
import akka.util.ByteString
import name.abhijitsarkar.akka.util.ActorPlumbing
import org.slf4j.LoggerFactory
class MeetupStreamingService(val sink: Graph[SinkShape[ByteString], NotUsed])(implicit val actorPlumbing: ActorPlumbing) {
private val log = LoggerFactory.getLogger(getClass())
private val baseUri = "http://stream.meetup.com/2/rsvps"
import actorPlumbing._
def stream = {
val httpRequest = HttpRequest(uri = baseUri, method = GET)
val flow = {
val host = httpRequest.uri.authority.host.address()
Http().newHostConnectionPoolHttps[Int](host)
}
Source.single(httpRequest -> 42)
.via(flow)
.flatMapConcat(_._1.get.entity.dataBytes)
.runWith(sink)
}
}
|
asarkar/akka
|
akka-streams-learning/meetup-streaming/src/main/scala/name/abhijitsarkar/akka/service/MeetupStreamingService.scala
|
Scala
|
gpl-3.0
| 1,017
|
package vonsim.webapp
import scala.scalajs.js
import scala.scalajs.js.annotation.ScalaJSDefined
import scala.scalajs.js.annotation.JSName
import com.scalawarrior.scalajs.ace.Editor
import com.scalawarrior.scalajs.ace.Annotation
import com.scalawarrior.scalajs.ace.Position
import com.scalawarrior.scalajs.ace.Range
@ScalaJSDefined
@JSName("Ace")
trait MyAce extends js.Object {
def edit(): Editor
}
import js.Dynamic.global
import com.scalawarrior.scalajs.ace.IEditSession
import scala.io.Position
object Annotation {
def apply(row:Double,column:Double,text:String,`type`:String): Annotation =
js.Dynamic.literal(row = row, column= column,text = text, `type`=`type`).asInstanceOf[Annotation]
}
package object webapp {
lazy val myace: MyAce = global.ace.asInstanceOf[MyAce]
}
@js.native
@JSName("AceRange")
class AceRange protected () extends Range {
def this(startRow: Double, startColumn: Double, endRow: Double, endColumn: Double) = this()
}
|
facundoq/vonsim
|
src/main/scala/vonsim/webapp/MyAce.scala
|
Scala
|
agpl-3.0
| 983
|
package uk.gov.bis.levyApiMock.data.oauth2
import org.joda.time.{DateTime}
import scala.concurrent.{ExecutionContext, Future}
case class AuthRecord(
accessToken: String,
refreshToken: Option[String],
refreshedAt: Option[DateTime],
gatewayID: String,
scope: Option[String],
expiresIn: Long,
createdAt: DateTime,
clientID: String,
privileged: Option[Boolean]) {
val eighteenMonths: Long = 18 * 30 * 24 * 60 * 60 * 1000L
val accessTokenExpiresAt: Long = refreshedAt.getOrElse(createdAt).getMillis() + expiresIn * 1000L
val refreshTokenExpiresAt: Long = createdAt.getMillis() + eighteenMonths
val isPrivileged: Boolean = privileged.getOrElse(false)
def accessTokenExpired(referenceTimeInMills: Long): Boolean = accessTokenExpiresAt <= referenceTimeInMills
def refreshTokenExpired(referenceTimeInMills: Long): Boolean = refreshTokenExpiresAt <= referenceTimeInMills
}
trait AuthRecordOps {
def forRefreshToken(refreshToken: String)(implicit ec: ExecutionContext): Future[Option[AuthRecord]]
def forAccessToken(accessToken: String)(implicit ec: ExecutionContext): Future[Option[AuthRecord]]
def find(accessToken: String)(implicit ec: ExecutionContext): Future[Option[AuthRecord]]
def find(gatewayId: String, clientId: Option[String])(implicit ec: ExecutionContext): Future[Option[AuthRecord]]
def create(record: AuthRecord)(implicit ec: ExecutionContext): Future[Unit]
def deleteExistingAndCreate(existing: AuthRecord, created: AuthRecord)(implicit ec: ExecutionContext): Future[Unit]
}
|
SkillsFundingAgency/das-alpha-hmrc-api-mock
|
src/main/scala/uk/gov/bis/levyApiMock/data/oauth2/AuthRecordOps.scala
|
Scala
|
mit
| 1,724
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.codegen.calls
import java.lang.reflect.Method
import org.apache.flink.table.codegen.calls.CallGenerator._
import org.apache.flink.table.codegen.{CodeGenerator, GeneratedExpression}
/**
* Generates a function call that calls a method which returns the same type that it
* takes as first argument.
*/
class MultiTypeMethodCallGen(method: Method) extends CallGenerator {
override def generate(
codeGenerator: CodeGenerator,
operands: Seq[GeneratedExpression])
: GeneratedExpression = {
generateCallIfArgsNotNull(codeGenerator.nullCheck, operands.head.resultType, operands) {
(operandResultTerms) =>
s"""
|${method.getDeclaringClass.getCanonicalName}.
| ${method.getName}(${operandResultTerms.mkString(", ")})
""".stripMargin
}
}
}
|
hequn8128/flink
|
flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/codegen/calls/MultiTypeMethodCallGen.scala
|
Scala
|
apache-2.0
| 1,648
|
package com.sksamuel.elastic4s.requests.security.roles.admin
case class DeleteRoleResponse(found: Boolean)
|
sksamuel/elastic4s
|
elastic4s-domain/src/main/scala/com/sksamuel/elastic4s/requests/security/roles/admin/DeleteRoleResponse.scala
|
Scala
|
apache-2.0
| 107
|
package paperdoll.scalaz
import scalaz.ReaderT
import scalaz.Reader
import paperdoll.core.effect.Effects.sendTU
import scalaz.Functor
import paperdoll.core.effect.Effects
import paperdoll.core.layer.Layer
object ReaderTLayer {
def sendReaderT[F[_]: Functor, I, A](readerT: ReaderT[F, I, A]): Effects.Two[Reader_[I], Layer.Aux[F], A] =
sendTU[Reader[I, F[A]], F[A]](Reader(readerT.run))
}
|
m50d/paperdoll
|
scalaz/src/main/scala/paperdoll/scalaz/ReaderTLayer.scala
|
Scala
|
apache-2.0
| 398
|
package co.ledger.wallet.web.ripple.debug
import co.ledger.wallet.core.device.Device
import co.ledger.wallet.core.device.ripple.LedgerApi
import co.ledger.wallet.core.utils.{DerivationPath, HexUtils}
import co.ledger.wallet.core.wallet.ripple.{XRP, RippleAccount}
import co.ledger.wallet.web.ripple.services.{DeviceService, SessionService}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.scalajs.js.UndefOr
import scala.scalajs.js.annotation.JSExport
import scala.util.{Failure, Success}
/**
*
* TransactionDebugInterface
* ledger-wallet-ripple-chrome
*
* Created by Pierre Pollastri on 18/10/2016.
*
* The MIT License (MIT)
*
* Copyright (c) 2016 Ledger
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
trait TransactionDebugInterface extends BaseDebugInterface {
def wallet = SessionService.instance.currentSession.get.wallet
/*@JSExport
def getAddress(path: String): Future[String] = {
DeviceService.instance.lastConnectedDevice() flatMap {(d) =>
LedgerApi(d).derivePublicAddress(DerivationPath(path), false)
} map {(address) =>
log(s"Derive path [$path] => ${address.account.toChecksumString}")
address.account.toChecksumString
}
}
@JSExport
def getBalance(address: String): Future[String] = {
wallet.asInstanceOf[AbstractApiWalletClient].transactionRestClient.getAccountBalance(address) map {(balance) =>
log(s"Balance [$address] => ${balance.toEther.toString()} ETH")
balance.toEther.toString()
}
}
def getNonce(address: String): Future[Long] = {
wallet.asInstanceOf[AbstractApiWalletClient].transactionRestClient.getAccountNonce(address) map {(nonce) =>
log(s"Nonce [$address] => ${nonce.toLong}")
nonce.toLong
}
}
@JSExport
def signTransaction(path: String,
to: String,
value: String,
gasLimit: UndefOr[String],
gasPrice: UndefOr[String],
data: UndefOr[String]) = {
var device: LedgerApi = null
var nonce: Long = 0L
val limit = gasLimit.getOrElse("210000")
val sentData = data.map(HexUtils.decodeHex).getOrElse(Array.empty[Byte])
getGasPrice() flatMap {(p) =>
val sentGasPrice = gasPrice.getOrElse(p.toString())
getAddress(path) flatMap {(address) =>
getNonce(address) map {(n) =>
nonce = n
} flatMap {(_) =>
DeviceService.instance.lastConnectedDevice()
} flatMap {(d) =>
LedgerApi(d).signTransaction(
BigInt(nonce),
BigInt(sentGasPrice),
BigInt(limit),
DerivationPath(path),
RippleAccount(to),
(BigDecimal(value.replace(',', '.').replace(" ", "")) * BigDecimal(10).pow(18)).toBigInt(),
sentData
)
}
}
} onComplete {
case Success(v) =>
log("Signed TX: "+ HexUtils.encodeHex(v.signedTx))
case Failure(ex) => ex.printStackTrace()
}
}
@JSExport
def pushTransaction(tx: String): Unit = {
wallet.pushTransaction(HexUtils.decodeHex(tx)) onComplete {
case Success(_) =>
log("Transaction pushed")
case Failure(ex) =>
ex.printStackTrace()
}
}
@JSExport
def getGasPrice(): Future[BigInt] = {
wallet.estimatedGasPrice() map {(price) =>
log(s"Estimated gas price: ${price.toBigInt.toString()}")
price.toBigInt
}
}*/
}
|
LedgerHQ/ledger-wallet-ripple
|
src/main/scala/co/ledger/wallet/web/ripple/debug/TransactionDebugInterface.scala
|
Scala
|
mit
| 4,545
|
package com.wuyuntao.aeneas.migration.example.migrations
import com.wuyuntao.aeneas.migration.Migration
import com.wuyuntao.aeneas.migration.dsl.DbModifier
class CreateUserByIdViewTable extends Migration {
def version = 20151030185645595L
def up(db: DbModifier) = {
db.executeSql("""CREATE TABLE user_by_id_views (
| id timeuuid PRIMARY KEY,
| email text,
| username text,
| last_login_time timestamp
|)
|""".stripMargin)
}
def down(db: DbModifier) = {
db.executeSql("DROP TABLE user_by_id_views")
}
}
|
wuyuntao/Aeneas
|
aeneas-migration-example/src/main/scala/com/wuyuntao/aeneas/migration/example/migrations/V20151030185645595_CreateUserByIdViewTable.scala
|
Scala
|
apache-2.0
| 583
|
/*
* Copyright 2001-2009 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatestexamples.fixture.funsuite
import org.scalatest.fixture.FixtureFunSuite
import java.io.FileReader
import java.io.FileWriter
import java.io.File
class WithTempFileFromConfigMapExampleSuite extends FixtureFunSuite {
type FixtureParam = FileReader
def withFixture(test: OneArgTest) {
import test.configMap
val FileName = configMap("tempFileName").asInstanceOf[String]
// Set up the temp file needed by the test
val writer = new FileWriter(FileName)
try {
writer.write("Hello, test!")
}
finally {
writer.close()
}
// Create the reader needed by the test
val reader = new FileReader(FileName)
try {
// Run the test using the temp file
test(reader)
}
finally {
// Close and delete the temp file
reader.close()
val file = new File(FileName)
file.delete()
}
}
test("reading from the temp file") { reader =>
var builder = new StringBuilder
var c = reader.read()
while (c != -1) {
builder.append(c.toChar)
c = reader.read()
}
assert(builder.toString === "Hello, test!")
}
test("first char of the temp file") { reader =>
assert(reader.read() === 'H')
}
}
|
kevinwright/scalatest
|
src/examples/scala/org/scalatestexamples/fixture/funsuite/WithTempFileFromConfigMapExampleSuite.scala
|
Scala
|
apache-2.0
| 1,830
|
// Copyright 2014-2018 Commonwealth Bank of Australia
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import sbt._, Keys._
import au.com.cba.omnia.uniform.core.standard.StandardProjectPlugin._
import au.com.cba.omnia.uniform.core.version.UniqueVersionPlugin._
import au.com.cba.omnia.uniform.dependency.UniformDependencyPlugin._
object build extends Build {
val omnitoolVersion = "1.15.9-20190730073144-b52646c"
lazy val standardSettings =
Defaults.coreDefaultSettings ++
uniformDependencySettings ++
uniform.docSettings("https://github.com/CommBank/answer") ++
Seq(updateOptions := updateOptions.value.withCachedResolution(true))
lazy val all = Project(
id = "all"
, base = file(".")
, settings =
standardSettings
++ uniform.project("answer-all", "au.com.cba.omnia.answer")
++ uniform.ghsettings
++ Seq(
publishArtifact := false
)
, aggregate = Seq(core, macros)
)
lazy val core = Project(
id = "core"
, base = file("core")
, settings =
standardSettings
++ uniform.project("answer-core", "au.com.cba.omnia.answer.core")
++ Seq(
libraryDependencies ++=
depend.scalaz()
++ depend.testing() ++ depend.time()
++ depend.omnia("omnitool-core", omnitoolVersion)
++ depend.omnia("omnitool-core", omnitoolVersion, "test").map(_ classifier "tests")
++ hadoopCP.modules.find(_.name == "commons-logging") // for scalikejdbc, depend on our CDH commons-logging (without the rest of CDH)
++ depend.scalikejdbc() ++ depend.hsqldb().map(_ % "test")
)
)
lazy val macros = Project(
id = "macros"
, base = file("macros")
, settings =
standardSettings
++ uniform.project("answer-macros", "au.com.cba.omnia.answer.macros")
).dependsOn(core % "compile->compile;test->test")
}
|
CommBank/answer
|
project/build.scala
|
Scala
|
apache-2.0
| 2,369
|
package aia.channels
// start with multi-jvm:test-only aia.channels.ReliableProxySampleSpec
import org.scalatest.{WordSpecLike, BeforeAndAfterAll, MustMatchers}
import akka.testkit.ImplicitSender
import akka.actor.{Props, Actor}
/**
* Hooks up MultiNodeSpec with ScalaTest
*/
import akka.remote.testkit.MultiNodeSpecCallbacks
import akka.remote.testkit.MultiNodeConfig
import akka.remote.testkit.MultiNodeSpec
trait STMultiNodeSpec
extends MultiNodeSpecCallbacks
with WordSpecLike
with MustMatchers
with BeforeAndAfterAll {
override def beforeAll() = multiNodeSpecBeforeAll()
override def afterAll() = multiNodeSpecAfterAll()
}
object ReliableProxySampleConfig extends MultiNodeConfig {
val client = role("Client")
val server = role("Server")
testTransport(on = true)
}
class ReliableProxySampleSpecMultiJvmNode1 extends ReliableProxySample
class ReliableProxySampleSpecMultiJvmNode2 extends ReliableProxySample
import akka.remote.transport.ThrottlerTransportAdapter.Direction
import scala.concurrent.duration._
import concurrent.Await
import akka.contrib.pattern.ReliableProxy
class ReliableProxySample
extends MultiNodeSpec(ReliableProxySampleConfig)
with STMultiNodeSpec
with ImplicitSender {
import ReliableProxySampleConfig._
def initialParticipants = roles.size
"A MultiNodeSample" must {
"wait for all nodes to enter a barrier" in {
enterBarrier("startup")
}
"send to and receive from a remote node" in {
runOn(client) {
enterBarrier("deployed")
val pathToEcho = node(server) / "user" / "echo"
val echo = system.actorSelection(pathToEcho)
val proxy = system.actorOf(
ReliableProxy.props(pathToEcho, 500.millis), "proxy")
proxy ! "message1"
expectMsg("message1")
Await.ready(
testConductor.blackhole( client, server, Direction.Both),
1 second)
echo ! "DirectMessage"
proxy ! "ProxyMessage"
expectNoMsg(3 seconds)
Await.ready(
testConductor.passThrough( client, server, Direction.Both),
1 second)
expectMsg("ProxyMessage")
echo ! "DirectMessage2"
expectMsg("DirectMessage2")
}
runOn(server) {
system.actorOf(Props(new Actor {
def receive = {
case msg: AnyRef => {
sender() ! msg
}
}
}), "echo")
enterBarrier("deployed")
}
enterBarrier("finished")
}
}
}
|
RayRoestenburg/akka-in-action
|
chapter-channels/src/multi-jvm/scala/aia/channels/ProxyMultiJvm.scala
|
Scala
|
mit
| 2,518
|
package bot.line.model.event
trait Source {
val `type`: String
}
case class UserSource(id: String) extends Source {
override val `type`: String = "user"
}
case class GroupSource(id: String) extends Source {
override val `type`: String = "group"
}
case class RoomSource(id: String) extends Source {
override val `type`: String = "room"
}
|
xoyo24/akka-http-line-bot
|
src/main/scala/bot/line/model/event/Source.scala
|
Scala
|
mit
| 349
|
import scala.collection.immutable.Map
import scala.collection.Seq
object ETL {
def transform(data: Map[Int, Seq[String]]): Map[String, Int] = {
Map(
data
.map { case (k, v) => v.zip(List.fill(v.length)(k)) }
.toSeq
.flatMap(x => x)
.map(t => (t._1.toLowerCase, t._2)): _*)
}
}
|
stanciua/exercism
|
scala/etl/src/main/scala/Etl.scala
|
Scala
|
mit
| 324
|
package net.tomasherman.specus.common.api.grid.config
/**
* This file is part of Specus.
*
* Specus is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Specus is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with Specus. If not, see <http://www.gnu.org/licenses/>.
*
*/
object Constants {
val akkRemoteServerPort = 7117
val remoteServerConnectorId = "remoteServerConnector"
}
|
tomasherman/specus
|
common_api/src/main/scala/grid/config/Constants.scala
|
Scala
|
gpl-3.0
| 841
|
package com.geteit.rcouch.actors
import akka.actor.{Props, Actor, ActorLogging}
import com.geteit.rcouch.Settings.NodeConfig
import java.net.InetSocketAddress
import com.geteit.rcouch.memcached.Memcached
import com.geteit.rcouch.actors.NodeActor.MemcachedAddress
import com.geteit.rcouch.couchbase.Couchbase.Node
/**
* Single couchbase node.
*/
class NodeActor(node: Node, config: NodeConfig) extends Actor with ActorLogging {
import context._
val memcached = {
val MemcachedAddress(address) = node
system.actorOf(MemcachedIo.props(address, self, config.memcached))
}
val view = system.actorOf(ViewActor.props(node.couchApiBase.get))
override def preStart(): Unit = {
super.preStart()
}
def receive: Actor.Receive = {
case c: Memcached.Command => memcached.forward(c)
case c: ViewActor.Command => view.forward(c)
}
}
object NodeActor {
def apply(n: Node, c: NodeConfig) = Props.create(classOf[NodeActor], n, c)
object MemcachedAddress {
def unapply(n: Node): Option[InetSocketAddress] = {
val i = n.hostname.indexOf(':')
val host = if (i > 0) n.hostname.substring(0, i) else n.hostname
Some(new InetSocketAddress(host, n.ports.direct))
}
}
}
|
zbsz/reactive-couch
|
src/main/scala/com/geteit/rcouch/actors/NodeActor.scala
|
Scala
|
apache-2.0
| 1,225
|
package nl.lpdiy.incubator.gpio
import akka.actor.{Actor, Cancellable}
import scala.concurrent.duration.{FiniteDuration, _}
import scala.language.postfixOps
trait TimerTaskActor {
this: Actor =>
private var timer: Option[Cancellable] = None
def startTimer(callback: () => Unit, interval: FiniteDuration) = {
implicit val ec = context.system.dispatcher
timer = Some(context.system.scheduler.schedule(0 seconds, interval, new Runnable {
def run() = {
callback()
}
}))
}
def cancelTimer() = timer.map(_.cancel())
}
|
dragoslav/incubator
|
core/src/main/scala/nl/lpdiy/incubator/gpio/TimerTaskActor.scala
|
Scala
|
apache-2.0
| 560
|
package controllers
import models.Product
import play.api.data.Form
import play.api.data.Forms._
import play.api.i18n.Messages
import play.api.mvc.{Flash, Action, Controller}
/**
* Authors: Hilton et al., 2013
* Re-Created by bpupadhyaya on 7/9/16.
*/
object Products extends Controller {
/**
*
* @param ean the EAN to check
* @return true if teh checksum is correct, false otherwise
*/
private def eanCheck(ean: Long) = {
def sumDigits(digits: IndexedSeq[(Char,Int)]): Int = {
digits.map{ _._1}.map { _.toInt}.sum
}
val (singles, triples) = ean.toString.reverse.zipWithIndex.partition {
_._2 % 2 == 0
}
(sumDigits(singles) + sumDigits(triples) * 3) % 10 == 0
}
private def makeProductForm(error: String, constraint: (Long) => Boolean) = Form(
mapping (
"ean" -> longNumber.verifying("validation.ean.checksum", eanCheck _).verifying(
error, constraint),
"name" -> nonEmptyText,
"description" -> nonEmptyText
) (Product.apply) (Product.unapply)
)
private def isUniqueEan(ean: Long): Boolean = Product.findByEan(ean).isEmpty
private val productForm = makeProductForm("validation.ean.duplicate", isUniqueEan(_))
private def updateProductForm(ean: Long) =
makeProductForm("validation.ean.duplicate", {newEan =>
newEan == ean || isUniqueEan(newEan)
})
def list = Action { implicit request =>
Ok(views.html.products.list(Product.findAll))
}
def newProduct = Action { implicit request =>
val form = if(flash.get("error").isDefined) {
val errorForm = productForm.bind(flash.data)
errorForm
} else
productForm
Ok(views.html.products.editProduct(form))
}
def show(ean: Long) = Action { implicit request =>
Product.findByEan(ean).map { product =>
Ok(views.html.products.details(product))
}.getOrElse(NotFound)
}
def save = Action { implicit request =>
val newProductForm = productForm.bindFromRequest()
newProductForm.fold(
hasErrors = { form =>
Redirect(routes.Products.newProduct()).flashing(Flash(form.data) +
("error" -> Messages("validation.errors")))
},
success = { newProduct =>
Product.add(newProduct)
val successMessage = ("success" -> Messages("products.new.success", newProduct.name))
Redirect(routes.Products.show(newProduct.ean)).flashing(successMessage)
}
)
}
def edit(ean: Long) = Action { implicit request =>
val form = if (flash.get("error").isDefined)
updateProductForm(ean).bind(flash.data)
else
updateProductForm(ean).fill(Product.findByEan(ean).get)
Ok(views.html.products.editProduct(form, Some(ean)))
}
def update(ean: Long) = Action { implicit request =>
if(Product.findByEan(ean).isEmpty)
NotFound
else {
val updatedProductForm = updateProductForm(ean).bindFromRequest()
updatedProductForm.fold(
hasErrors = { form =>
Redirect(routes.Products.edit(ean)).flashing(Flash(form.data) +
("error" -> Messages("validation.errors")))
},
success = { updatedProduct =>
Product.remove(Product.findByEan(ean).get)
Product.add(updatedProduct)
val successMessage = "success" -> Messages("products.update.success", updatedProduct.name)
Redirect(routes.Products.show(updatedProduct.ean)).flashing(successMessage)
}
)
}
}
}
|
bpupadhyaya/PlayingWithPlay
|
PlayingWithPlayScala/app/controllers/Products.scala
|
Scala
|
mit
| 3,467
|
/*
* Copyright 2011 Simple Finance, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.banksimple.clipping.ManagementStrategies
import com.banksimple.clipping.{PersistentVar,PersistingStrategy,StateManagementStrategy, PersistenceError}
import java.util.concurrent._
import java.util.concurrent.locks.{ReentrantReadWriteLock,ReentrantLock}
trait SyncronousManagementStrategy[A] extends StateManagementStrategy[A] {
self: PersistentVar[A] with PersistingStrategy[A] =>
private val rwLock = new ReentrantReadWriteLock()
override def putIf(test: A => Boolean, produce: () => A): A = {
rwLock.writeLock().lock()
try {
val currentVal = get() // Get will get the readlock, but that's Okay
if (test(currentVal)) {
produce() match {
case v if (v != null) => {
storedValue = Some(v)
persist(v)
v
}
case _ => currentVal
}
} else {
currentVal
}
} catch {
case PersistenceError(cause) => {
log.error("Could not persist value because: %s. Continuing without persisting.".format(cause), cause)
get()
}
case x => throw x // We probably want to fail out aggressively here,
// it's almost certainly a code error.
} finally {
rwLock.writeLock().unlock()
}
}
override def get(): A = {
rwLock.readLock().lock()
try {
if (storedValue.isEmpty) { // Uninitialized case
storedValue = Some(
reify() match {
case Some(v) => v.asInstanceOf[A]
case _ => defaultValue
})
}
storedValue.get // What we actually want
} catch {
case PersistenceError(cause) => {
// This only occurs during the initial read, so populate with default
log.error("Problem attempting to reify var. Using default. Error: %s".format(cause))
if(storedValue.isEmpty) { storedValue = Some(defaultValue) }
storedValue.get
}
case e => {
// Almost certainly a code error we should pass to the user
log.error("Problem attempting to get() var. Error: %s".format(e), e)
throw e
}
} finally {
rwLock.readLock().unlock()
}
}
}
|
KirinDave/Clipping
|
src/main/scala/com/banksimple/clipping/ManagementStrategies/SynchronousManagementStrategy.scala
|
Scala
|
apache-2.0
| 2,786
|
package org.scalacoin.util
import org.scalacoin.protocol.script.ScriptSignature
import org.scalacoin.protocol.CompactSizeUIntImpl
import org.scalacoin.script.constant.ScriptNumberImpl
import org.scalatest.{FlatSpec, MustMatchers}
/**
* Created by chris on 2/8/16.
*/
class NumberUtilTest extends FlatSpec with MustMatchers with NumberUtil {
"NumberUtil" must "convert a positive hex number to its corresponding long number" in {
val hex = "01"
val long = toLong(hex)
long must be (1)
//127
val hex1 = "7f"
val long1 = toLong(hex1)
long1 must be (127)
//128
val hex2 = "8000"
val long2 = toLong(hex2)
long2 must be (128)
//32767
val hex3 = "ff7f"
val long3 = toLong(hex3)
long3 must be (32767)
//32768
val hex4 = "008000"
val long4 = toLong(hex4)
long4 must be (32768)
//20
val hex5 = "14"
val long5 = toLong(hex5)
long5 must be (20)
//0
val hex6 = "00"
val long6 = toLong(hex6)
long6 must be (0)
}
it must "convert a negative hex number to its corresponding long number" in {
//-1
val hex = "81"
val long = toLong(hex)
long must be (-1)
//-127
val hex1 = "ff"
val long1 = toLong(hex1)
long1 must be (-127)
//-128
val hex2 = "8080"
val long2 = toLong(hex2)
long2 must be (-128)
//-32767
val hex3 = "ffff"
val long3 = toLong(hex3)
long3 must be (-32767)
//-32768
val hex4 = "008080"
val long4 = toLong(hex4)
long4 must be (-32768)
}
it must "determine if a hex string is a positive number" in {
val hex = "01"
val hexIsPositive = isPositive(hex)
hexIsPositive must be (true)
//128
val hex1 = "8000"
val hexIsPositive1 = isPositive(hex1)
hexIsPositive1 must be (true)
val hex2 = "ff7f"
val hexIsPositive2 = isPositive(hex2)
hexIsPositive2 must be (true)
}
it must "determine if a hex string is a negative number" in {
//-1
val hex = "81"
val hexIsNegative = isNegative(hex)
hexIsNegative must be (true)
//-128
val hex1 = "8080"
val hexIsNegative1 = isNegative(hex1)
hexIsNegative1 must be (true)
//-32767
val hex2 = "ffff"
val hexIsNegative2 = isNegative(hex2)
hexIsNegative2 must be (true)
//must also work for bytes
isNegative(BitcoinSUtil.decodeHex(hex2)) must be (true)
}
it must "change a sign bit from negative to positive" in {
val hex = "ff"
val expectedHex = "7f"
BitcoinSUtil.encodeHex(changeSignBitToPositive(BitcoinSUtil.decodeHex(hex))) must be (expectedHex)
//-32767
val hex1 = "ffff"
val expectedHex1 = "7fff"
BitcoinSUtil.encodeHex(changeSignBitToPositive(hex1)) must be (expectedHex1)
}
it must "change a sign bit from positive to negative" in {
val hex = "01"
val expectedHex = "81"
BitcoinSUtil.encodeHex(changeSignBitToNegative(hex)) must be (expectedHex)
//32767
val hex1 = "7fff"
val expectedHex1 = "ffff"
BitcoinSUtil.encodeHex(changeSignBitToNegative(hex1)) must be (expectedHex1)
//128
val hex2 = "8000"
val expectedHex2 = "8000"
BitcoinSUtil.encodeHex(changeSignBitToNegative(hex2)) must be (expectedHex2)
}
it must "detect if the last two bytes are all zeros" in {
val hex = "00"
firstByteAllZeros(hex) must be (true)
val hex1 = "8001"
firstByteAllZeros(hex1) must be (false)
val hex2 = "80"
firstByteAllZeros(hex2) must be (false)
}
it must "serialize negative numbers to the correct hex value" in {
val hex = longToHex(-1)
val expectedHex = "81"
hex must be (expectedHex)
val hex1 = longToHex(-127)
val expectedHex1 = "ff"
hex1 must be (expectedHex1)
val hex2 = longToHex(-128)
val expectedHex2 = "8080"
hex2 must be (expectedHex2)
val hex3 = longToHex(-32767)
val expectedHex3 = "ffff"
hex3 must be (expectedHex3)
val hex4 = longToHex(-32768)
val expectedHex4 = "008080"
hex4 must be (expectedHex4)
}
it must "serialize a positive number to the correct hex value" in {
val hex = longToHex(0L)
val expectedHex = "00"
hex must be (expectedHex)
val hex1 = longToHex(1)
val expectedHex1 = "01"
hex1 must be (expectedHex1)
val hex2 = longToHex(127)
val expectedHex2 = "7f"
hex2 must be (expectedHex2)
val hex3 = longToHex(128)
val expectedHex3 = "8000"
hex3 must be (expectedHex3)
val hex4 = longToHex(32767)
val expectedHex4 = "ff7f"
hex4 must be (expectedHex4)
val hex5 = longToHex(32768)
val expectedHex5 = "008000"
hex5 must be (expectedHex5)
}
it must "parse a variable length integer (VarInt)" in {
val str = "fdfd00"
parseCompactSizeUInt(str) must be (CompactSizeUIntImpl(253,3))
val str1 = "00"
parseCompactSizeUInt(str1) must be (CompactSizeUIntImpl(0,1))
val str2 = "ffffffffff"
parseCompactSizeUInt(str2) must be (CompactSizeUIntImpl(4294967295L,9))
}
it must "parse a variable length integer the same from a tx input and a script sig" in {
parseCompactSizeUInt(TestUtil.txInput.head.scriptSignature) must be (TestUtil.txInput.head.scriptSigCompactSizeUInt)
}
it must "parse multiple variable length integers correctly for a multi input tx" in {
parseCompactSizeUInt(TestUtil.txInputs.head.scriptSignature) must be (TestUtil.txInputs.head.scriptSigCompactSizeUInt)
parseCompactSizeUInt(TestUtil.txInputs(1).scriptSignature) must be (TestUtil.txInputs(1).scriptSigCompactSizeUInt)
}
it must "parse the variable length integer of the empty script" in {
parseCompactSizeUInt(ScriptSignature.empty) must be (CompactSizeUIntImpl(0,1))
}
}
|
TomMcCabe/scalacoin
|
src/test/scala/org/scalacoin/util/NumberUtilTest.scala
|
Scala
|
mit
| 5,732
|
/*
* Copyright (c) 2016. Fengguo (Hugo) Wei and others.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Detailed contributors are listed in the CONTRIBUTOR.md
*/
package org.argus.cit.intellij.jawa
import java.lang.reflect.InvocationTargetException
import javax.swing.SwingUtilities
import com.intellij.openapi.application.{ApplicationManager, Result}
import com.intellij.openapi.command.{CommandProcessor, WriteCommandAction}
import com.intellij.openapi.project.Project
import com.intellij.openapi.util.Computable
import com.intellij.psi.{PsiClass, PsiMethod, PsiModifier, PsiNamedElement}
import org.argus.cit.intellij.jawa.lang.psi.api.toplevel.{JawaNamedElement, JawaTypeDefinition}
import org.argus.jawa.core.JawaClass
import org.jetbrains.annotations.NotNull
import scala.runtime.NonLocalReturnControl
/**
* @author <a href="mailto:fgwei521@gmail.com">Fengguo Wei</a>
*/
package object extensions {
implicit class PsiClassExt(val clazz: PsiClass) extends AnyVal {
/**
* Second match branch is for Java only.
*/
def qualifiedName: String = {
clazz match {
case t: JawaTypeDefinition => t.qualifiedName
case _ => clazz.getQualifiedName
}
}
def constructors: Array[PsiMethod] = {
clazz match {
case c: JawaClass => c.constructors
case _ => clazz.getConstructors
}
}
// def isEffectivelyFinal: Boolean = clazz match {
// case jawaClass: JawaClass => jawaClass.hasFinalModifier
// case _ => clazz.hasModifierProperty(PsiModifier.FINAL)
// }
//
//
// def processPsiMethodsForNode(node: SignatureNodes.Node, isStatic: Boolean, isInterface: Boolean)
// (processMethod: PsiMethod => Unit, processName: String => Unit = _ => ()): Unit = {
//
// def concreteClassFor(typedDef: ScTypedDefinition): Option[PsiClass] = {
// if (typedDef.isAbstractMember) return None
// clazz match {
// case wrapper: PsiClassWrapper if wrapper.definition.isInstanceOf[ScObject] =>
// return Some(wrapper) //this is static case, when containing class should be wrapper
// case _ =>
// }
//
// ScalaPsiUtil.nameContext(typedDef) match {
// case m: ScMember =>
// m.containingClass match {
// case t: ScTrait =>
// val linearization = MixinNodes.linearization(clazz)
// .flatMap(_.extractClass(clazz.getProject)(clazz.typeSystem))
// var index = linearization.indexWhere(_ == t)
// while (index >= 0) {
// val cl = linearization(index)
// if (!cl.isInterface) return Some(cl)
// index -= 1
// }
// Some(clazz)
// case _ => None
// }
// case _ => None
// }
// }
//
// node.info.namedElement match {
// case fun: ScFunction if !fun.isConstructor =>
// val wrappers = fun.getFunctionWrappers(isStatic, isInterface = fun.isAbstractMember, concreteClassFor(fun))
// wrappers.foreach(processMethod)
// wrappers.foreach(w => processName(w.name))
// case method: PsiMethod if !method.isConstructor =>
// if (isStatic) {
// if (method.containingClass != null && method.containingClass.qualifiedName != "java.lang.Object") {
// processMethod(StaticPsiMethodWrapper.getWrapper(method, clazz))
// processName(method.getName)
// }
// }
// else {
// processMethod(method)
// processName(method.getName)
// }
// case t: ScTypedDefinition if t.isVal || t.isVar ||
// (t.isInstanceOf[ScClassParameter] && t.asInstanceOf[ScClassParameter].isCaseClassVal) =>
//
// PsiTypedDefinitionWrapper.processWrappersFor(t, concreteClassFor(t), node.info.name, isStatic, isInterface, processMethod, processName)
// case _ =>
// }
// }
//
// def namedElements: Seq[PsiNamedElement] = {
// clazz match {
// case td: ScTemplateDefinition =>
// td.members.flatMap {
// case holder: ScDeclaredElementsHolder => holder.declaredElements
// case named: ScNamedElement => Seq(named)
// case _ => Seq.empty
// }
// case _ => clazz.getFields ++ clazz.getMethods
// }
// }
}
implicit class PsiNamedElementExt(val named: PsiNamedElement) extends AnyVal {
/**
* Second match branch is for Java only.
*/
def name: String = {
named match {
case nd: JawaNamedElement => nd.name
case nd => nd.getName
}
}
}
def startCommand(project: Project, commandName: String)(body: => Unit): Unit = {
CommandProcessor.getInstance.executeCommand(project, new Runnable {
def run() {
inWriteAction {
body
}
}
}, commandName, null)
}
def inWriteAction[T](body: => T): T = {
val application = ApplicationManager.getApplication
if (application.isWriteAccessAllowed) body
else {
application.runWriteAction(
new Computable[T] {
def compute: T = body
}
)
}
}
def inWriteCommandAction[T](project: Project, commandName: String = "Undefined")(body: => T): T = {
val computable = new Computable[T] {
override def compute(): T = body
}
new WriteCommandAction[T](project, commandName) {
protected def run(@NotNull result: Result[T]) {
result.setResult(computable.compute())
}
}.execute.getResultObject
}
def inReadAction[T](body: => T): T = {
val application = ApplicationManager.getApplication
if (application.isReadAccessAllowed) body
else {
application.runReadAction(
new Computable[T] {
override def compute(): T = body
}
)
}
}
def invokeAndWait[T](body: => Unit) {
preservingControlFlow {
SwingUtilities.invokeAndWait(new Runnable {
def run() {
body
}
})
}
}
private def preservingControlFlow(body: => Unit) {
try {
body
} catch {
case e: InvocationTargetException => e.getTargetException match {
case control: NonLocalReturnControl[_] => throw control
case _ => throw e
}
}
}
implicit class PipedObject[T](val value: T) extends AnyVal {
def |>[R](f: T => R) = f(value)
}
}
|
arguslab/argus-cit-intellij
|
src/main/scala/org/argus/cit/intellij/jawa/extensions/package.scala
|
Scala
|
epl-1.0
| 6,680
|
package org.sandbox.chat.cluster
import scala.concurrent.duration.DurationInt
import org.sandbox.chat.SettingsActor
import akka.actor.Actor
import akka.actor.ActorLogging
import akka.actor.ActorRef
import akka.actor.OneForOneStrategy
import akka.actor.Props
import akka.actor.SupervisorStrategy
import akka.actor.Terminated
import akka.actor.actorRef2Scala
import akka.cluster.Cluster
import akka.cluster.Member
import akka.util.Timeout
class ChatClusterReaper(watchee: ActorRef) extends Actor with ActorLogging with SettingsActor
with ClusterEventReceiver
{
import ChatClusterReaper._
override val cluster = Cluster(context.system)
override implicit val timeout = Timeout(1 second)
val clusterReapers =
new ChatClusterActors(ClusterReaperRole, context, timeout, log)
/** Try to restart faulty child actors up to 3 times. */
override val supervisorStrategy = OneForOneStrategy(3)(SupervisorStrategy.defaultDecider)
context.watch(watchee)
override def receive: Receive =
clusterEventReceive orElse handleShutdown
/** Upon termination of a child actor shutdown the actor system. */
def handleShutdown: Receive = {
case Terminated(actorRef) =>
log.warning(s"Shutting down, because ${actorRef.path} has terminated!")
shutdown
case Shutdown =>
clusterReapers foreach { case (_, reaper) => reaper ! Shutdown }
shutdown
}
/** Shutdown the actor system. */
protected def shutdown = context.system.shutdown()
override def onMemberUp(member: Member): Unit = clusterReapers.onMemberUp(member)
override def onMemberDown(member: Member): Unit = clusterReapers.onMemberDown(member)
override def onTerminated(actor: ActorRef): Unit = clusterReapers.onTerminated(actor)
}
object ChatClusterReaper {
def props(watchee: ActorRef) = Props(new ChatClusterReaper(watchee))
sealed trait ChatClusterReaperMsg
case object Shutdown extends ChatClusterReaperMsg
}
|
hustbill/ScalaDemo
|
src/main/scala/org/sandbox/chat/cluster/ChatClusterReaper.scala
|
Scala
|
gpl-2.0
| 1,939
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark.rdd
import java.util
import scala.collection.JavaConverters._
import scala.reflect.ClassTag
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.mapreduce.InputSplit
import org.apache.hadoop.mapreduce.Job
import org.apache.spark.{Logging, Partition, SparkContext, TaskContext}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.hive.DistributionUtil
import org.apache.carbondata.common.CarbonIterator
import org.apache.carbondata.common.logging.LogServiceFactory
import org.apache.carbondata.core.cache.dictionary.Dictionary
import org.apache.carbondata.core.carbon.datastore.block.{BlockletInfos, TableBlockInfo}
import org.apache.carbondata.core.carbon.datastore.SegmentTaskIndexStore
import org.apache.carbondata.core.carbon.querystatistics.{QueryStatistic, QueryStatisticsConstants}
import org.apache.carbondata.core.util.CarbonTimeStatisticsFactory
import org.apache.carbondata.hadoop.{CarbonInputFormat, CarbonInputSplit}
import org.apache.carbondata.lcm.status.SegmentStatusManager
import org.apache.carbondata.scan.executor.QueryExecutor
import org.apache.carbondata.scan.executor.QueryExecutorFactory
import org.apache.carbondata.scan.expression.Expression
import org.apache.carbondata.scan.model.QueryModel
import org.apache.carbondata.scan.result.BatchResult
import org.apache.carbondata.scan.result.iterator.ChunkRowIterator
import org.apache.carbondata.spark.RawValue
import org.apache.carbondata.spark.load.CarbonLoaderUtil
import org.apache.carbondata.spark.util.QueryPlanUtil
class CarbonSparkPartition(rddId: Int, val idx: Int,
val locations: Array[String],
val tableBlockInfos: util.List[TableBlockInfo])
extends Partition {
override val index: Int = idx
// val serializableHadoopSplit = new SerializableWritable[Array[String]](locations)
override def hashCode(): Int = {
41 * (41 + rddId) + idx
}
}
/**
* This RDD is used to perform query on CarbonData file. Before sending tasks to scan
* CarbonData file, this RDD will leverage CarbonData's index information to do CarbonData file
* level filtering in driver side.
*/
class CarbonScanRDD[V: ClassTag](
sc: SparkContext,
queryModel: QueryModel,
filterExpression: Expression,
keyClass: RawValue[V],
@transient conf: Configuration,
tableCreationTime: Long,
schemaLastUpdatedTime: Long,
baseStoreLocation: String)
extends RDD[V](sc, Nil) with Logging {
override def getPartitions: Array[Partition] = {
var defaultParallelism = sparkContext.defaultParallelism
val statisticRecorder = CarbonTimeStatisticsFactory.createDriverRecorder()
val (carbonInputFormat: CarbonInputFormat[Array[Object]], job: Job) =
QueryPlanUtil.createCarbonInputFormat(queryModel.getAbsoluteTableIdentifier)
// initialise query_id for job
job.getConfiguration.set("query.id", queryModel.getQueryId)
val result = new util.ArrayList[Partition](defaultParallelism)
val LOGGER = LogServiceFactory.getLogService(this.getClass.getName)
val validAndInvalidSegments = new SegmentStatusManager(queryModel.getAbsoluteTableIdentifier)
.getValidAndInvalidSegments
// set filter resolver tree
try {
// before applying filter check whether segments are available in the table.
if (!validAndInvalidSegments.getValidSegments.isEmpty) {
val filterResolver = carbonInputFormat
.getResolvedFilter(job.getConfiguration, filterExpression)
CarbonInputFormat.setFilterPredicates(job.getConfiguration, filterResolver)
queryModel.setFilterExpressionResolverTree(filterResolver)
CarbonInputFormat
.setSegmentsToAccess(job.getConfiguration,
validAndInvalidSegments.getValidSegments
)
SegmentTaskIndexStore.getInstance()
.removeTableBlocks(validAndInvalidSegments.getInvalidSegments,
queryModel.getAbsoluteTableIdentifier
)
}
}
catch {
case e: Exception =>
LOGGER.error(e)
sys.error("Exception occurred in query execution :: " + e.getMessage)
}
// get splits
val splits = carbonInputFormat.getSplits(job)
if (!splits.isEmpty) {
val carbonInputSplits = splits.asScala.map(_.asInstanceOf[CarbonInputSplit])
queryModel.setInvalidSegmentIds(validAndInvalidSegments.getInvalidSegments)
val blockListTemp = carbonInputSplits.map(inputSplit =>
new TableBlockInfo(inputSplit.getPath.toString,
inputSplit.getStart, inputSplit.getSegmentId,
inputSplit.getLocations, inputSplit.getLength,
new BlockletInfos(inputSplit.getNumberOfBlocklets, 0, inputSplit.getNumberOfBlocklets)
)
)
var activeNodes = Array[String]()
if(blockListTemp.nonEmpty) {
activeNodes = DistributionUtil
.ensureExecutorsAndGetNodeList(blockListTemp.toArray, sparkContext)
}
defaultParallelism = sparkContext.defaultParallelism
val blockList = CarbonLoaderUtil.
distributeBlockLets(blockListTemp.asJava, defaultParallelism).asScala
if (blockList.nonEmpty) {
var statistic = new QueryStatistic()
// group blocks to nodes, tasks
val nodeBlockMapping =
CarbonLoaderUtil.nodeBlockTaskMapping(blockList.asJava, -1, defaultParallelism,
activeNodes.toList.asJava
)
statistic.addStatistics(QueryStatisticsConstants.BLOCK_ALLOCATION, System.currentTimeMillis)
statisticRecorder.recordStatisticsForDriver(statistic, queryModel.getQueryId())
statistic = new QueryStatistic()
var i = 0
// Create Spark Partition for each task and assign blocks
nodeBlockMapping.asScala.foreach { entry =>
entry._2.asScala.foreach { blocksPerTask => {
val tableBlockInfo = blocksPerTask.asScala.map(_.asInstanceOf[TableBlockInfo])
if (blocksPerTask.size() != 0) {
result
.add(new CarbonSparkPartition(id, i, Seq(entry._1).toArray, tableBlockInfo.asJava))
i += 1
}
}
}
}
val noOfBlocks = blockList.size
val noOfNodes = nodeBlockMapping.size
val noOfTasks = result.size()
logInfo(s"Identified no.of.Blocks: $noOfBlocks,"
+ s"parallelism: $defaultParallelism , " +
s"no.of.nodes: $noOfNodes, no.of.tasks: $noOfTasks"
)
statistic.addStatistics(QueryStatisticsConstants.BLOCK_IDENTIFICATION,
System.currentTimeMillis)
statisticRecorder.recordStatisticsForDriver(statistic, queryModel.getQueryId())
statisticRecorder.logStatisticsAsTableDriver()
result.asScala.foreach { r =>
val cp = r.asInstanceOf[CarbonSparkPartition]
logInfo(s"Node : " + cp.locations.toSeq.mkString(",")
+ ", No.Of Blocks : " + cp.tableBlockInfos.size()
)
}
} else {
logInfo("No blocks identified to scan")
}
}
else {
logInfo("No valid segments found to scan")
}
result.toArray(new Array[Partition](result.size()))
}
override def compute(thepartition: Partition, context: TaskContext): Iterator[V] = {
val LOGGER = LogServiceFactory.getLogService(this.getClass.getName)
val iter = new Iterator[V] {
var rowIterator: CarbonIterator[Array[Any]] = _
var queryStartTime: Long = 0
val queryExecutor = QueryExecutorFactory.getQueryExecutor()
try {
context.addTaskCompletionListener(context => {
clearDictionaryCache(queryModel.getColumnToDictionaryMapping)
logStatistics()
queryExecutor.finish
})
val carbonSparkPartition = thepartition.asInstanceOf[CarbonSparkPartition]
if(!carbonSparkPartition.tableBlockInfos.isEmpty) {
queryModel.setQueryId(queryModel.getQueryId + "_" + carbonSparkPartition.idx)
// fill table block info
queryModel.setTableBlockInfos(carbonSparkPartition.tableBlockInfos)
queryStartTime = System.currentTimeMillis
val carbonPropertiesFilePath = System.getProperty("carbon.properties.filepath", null)
logInfo("*************************" + carbonPropertiesFilePath)
if (null == carbonPropertiesFilePath) {
System.setProperty("carbon.properties.filepath",
System.getProperty("user.dir") + '/' + "conf" + '/' + "carbon.properties")
}
// execute query
rowIterator = new ChunkRowIterator(
queryExecutor.execute(queryModel).
asInstanceOf[CarbonIterator[BatchResult]]).asInstanceOf[CarbonIterator[Array[Any]]]
}
} catch {
case e: Exception =>
LOGGER.error(e)
if (null != e.getMessage) {
sys.error("Exception occurred in query execution :: " + e.getMessage)
} else {
sys.error("Exception occurred in query execution.Please check logs.")
}
}
var havePair = false
var finished = false
var recordCount = 0
override def hasNext: Boolean = {
if (!finished && !havePair) {
finished = (null == rowIterator) || (!rowIterator.hasNext)
havePair = !finished
}
!finished
}
override def next(): V = {
if (!hasNext) {
throw new java.util.NoSuchElementException("End of stream")
}
havePair = false
recordCount += 1
keyClass.getValue(rowIterator.next())
}
def clearDictionaryCache(columnToDictionaryMap: java.util.Map[String, Dictionary]) = {
if (null != columnToDictionaryMap) {
org.apache.carbondata.spark.util.CarbonQueryUtil
.clearColumnDictionaryCache(columnToDictionaryMap)
}
}
def logStatistics(): Unit = {
if (null != queryModel.getStatisticsRecorder) {
var queryStatistic = new QueryStatistic()
queryStatistic
.addFixedTimeStatistic(QueryStatisticsConstants.EXECUTOR_PART,
System.currentTimeMillis - queryStartTime
)
queryModel.getStatisticsRecorder.recordStatistics(queryStatistic)
// result size
queryStatistic = new QueryStatistic()
queryStatistic.addCountStatistic(QueryStatisticsConstants.RESULT_SIZE, recordCount)
queryModel.getStatisticsRecorder.recordStatistics(queryStatistic)
// print executor query statistics for each task_id
queryModel.getStatisticsRecorder.logStatisticsAsTableExecutor()
}
}
}
iter
}
/**
* Get the preferred locations where to launch this task.
*/
override def getPreferredLocations(partition: Partition): Seq[String] = {
val theSplit = partition.asInstanceOf[CarbonSparkPartition]
val firstOptionLocation = theSplit.locations.filter(_ != "localhost")
val tableBlocks = theSplit.tableBlockInfos
// node name and count mapping
val blockMap = new util.LinkedHashMap[String, Integer]()
tableBlocks.asScala.foreach(tableBlock => tableBlock.getLocations.foreach(
location => {
if (!firstOptionLocation.exists(location.equalsIgnoreCase(_))) {
val currentCount = blockMap.get(location)
if (currentCount == null) {
blockMap.put(location, 1)
} else {
blockMap.put(location, currentCount + 1)
}
}
}
)
)
val sortedList = blockMap.entrySet().asScala.toSeq.sortWith((nodeCount1, nodeCount2) => {
nodeCount1.getValue > nodeCount2.getValue
}
)
val sortedNodesList = sortedList.map(nodeCount => nodeCount.getKey).take(2)
firstOptionLocation ++ sortedNodesList
}
}
|
foryou2030/incubator-carbondata
|
integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala
|
Scala
|
apache-2.0
| 12,626
|
package engine
trait Cache {
private var _cache: Map[String, Any] = Map()
def put[T](name: String, value: T) = _cache += (name -> value)
def get[T](name: String): Option[T] = {
if (_cache contains name)
Some(_cache(name).asInstanceOf[T])
else
None
}
def contains(name: String): Boolean = _cache contains name
}
|
mpod/scala-workflow
|
backend/src/main/scala/engine/Cache.scala
|
Scala
|
gpl-3.0
| 346
|
package io.coral.lib
import scala.collection.mutable.{ArrayBuffer, Queue, Stack}
case class Node[K](value:K, var left:Option[Node[K]], var right:Option[Node[K]],var parent:Option[Node[K]]) {
def hasLeft:Boolean = if (left!=None) true else false
def hasRight:Boolean = if (right!=None) true else false
def hasParent:Boolean = if (parent!=None) true else false
def isLeaf:Boolean = !hasLeft && !hasRight
def isParent(n:Node[K]):Boolean = {
if (!isLeaf) {
val l = if (hasLeft) left.get else null
val r = if (hasRight) right.get else null
l==n || r==n
}
else false
}
}
abstract class BinaryTree[K] {
def add(value:K)
def remove(value:K):Boolean
def height:Int
def size:Int
}
class Tree[K](implicit ord:K=>Ordered[K]) extends BinaryTree[K] {
var root:Option[Node[K]] = None
private var count = 0
override def add(value:K) {
root match {
case None => root = Some(new Node[K](value, None, None, None)); count = 1
case Some(node) => if(insert(node,value)) count+=1
}
}
def insert(node:Node[K],newVal:K):Boolean= {
if (newVal <= node.value) {
node match {
case Node(_, None, _, _) => node.left =
Some(new Node[K](newVal, None, None, Some(node)))
true
case Node(_, Some(left), _, _) => insert(left, newVal)
}
} else if (newVal > node.value) {
node match {
case Node(_, _, None, _) => node.right =
Some(new Node[K](newVal, None, None, Some(node)))
true
case Node(_, _, Some(right), _) => insert(right, newVal)
}
} else false //this removes the duplicate values from tree
}
override def remove(value:K):Boolean= {
root match {
case None => false
case Some(node) => {
binarySearch(value,node) match {
case None => false
case Some(node) => {
count-=1
delete(node)
true
}
}
}
}
}
def delete(node:Node[K]) {
node match {
case Node(value, None, None, Some(parent)) => updateParent(parent,value,None)
case Node(value,Some(child),None,Some(parent)) => {
updateParent(parent,value,Some(child))
child.parent = Some(parent)
}
case Node(value, None, Some(child), Some(parent)) => {
updateParent(parent,value,Some(child))
child.parent = Some(parent)
}
case Node(_, Some(child), None, None) => {
root = Some(child)
child.parent = None
}
case Node(_, None, Some(child), None) => {
root = Some(child)
child.parent = None
}
case Node(_, Some(left), Some(right), _) => {
var child = right
while(child.left!=None) {
child=child.left.get
}
node.parent match {
case Some(parent) => updateParent(parent,node.value,Some(child))
case None => root = Some(child)
}
child.left = node.left
child.right = node.right
left.parent = Some(child)
right.parent = Some(child)
if (child.hasParent && child.parent.get.hasLeft && child.parent.get.left.get == child)
child.parent.get.left=None
else child.parent.get.right=None
child.parent = node.parent
}
case _ =>
}
def updateParent(parent:Node[K], value:K, newChild:Option[Node[K]]) {
if (value< parent.value) parent.left = newChild
else parent.right = newChild
}
}
def binarySearch(value:K, node:Node[K]): Option[Node[K]]= {
if (value == node.value)
Some(node)
else if (value <= node.value) {
node match {
case Node(_,None,_,_) => None
case Node(_,Some(left),_,_) => binarySearch(value,left)
}
} else {
node match{
case Node(_,_,None,_) => None
case Node(_,_,Some(right),_) => binarySearch(value,right)
}
}
}
def inorder:Seq[K]= {
val nodes = new ArrayBuffer[K]()
val stack = new Stack[Node[K]]()
if (size!=0) {
var cur = root
while(!stack.isEmpty || cur!=None) {
cur match {
case Some(node) => {
stack.push(node)
cur = node.left
}
case None=> {
val tmp = stack.pop()
nodes += tmp.value
cur = tmp.right
}
}
}
}
nodes
}
def preorder:Seq[K]= {
val nodes = new ArrayBuffer[K]()
val stack = new Stack[Node[K]]()
if (size!=0) {
var cur = root
while(!stack.isEmpty || cur!=None) {
cur match {
case Some(node) => {
stack.push(node)
nodes += node.value
cur = node.left
}
case None=> {
val tmp = stack.pop()
cur = tmp.right
}
}
}
}
nodes
}
def postorder: Seq[K]= {
val nodes = new ArrayBuffer[K]()
val stack = new Stack[Node[K]]()
if (size!=0) {
var prev:Option[Node[K]] = None
stack.push(root.get)
while(!stack.isEmpty) {
val cur = stack.top
prev match {
case None=> if (cur.hasLeft) stack.push(cur.left.get) else if (cur.hasRight) stack.push(cur.right.get)
case Some(node)=>{
if(!cur.isParent(node) && cur.hasLeft) stack.push(cur.left.get)
else if (!cur.isParent(node) && cur.hasRight) stack.push(cur.right.get)
else if (cur.isParent(node) && cur.hasRight && cur.hasLeft && cur.left.get==node) stack.push(cur.right.get)
else {
stack.pop()
nodes+=cur.value
}
}
}
prev=Some(cur)
}
}
nodes
}
// def postorder(node:Option[Node[K]]) {
// node match{
// case None=>
// case Some(n)=>{
// postorder(n.left)
// postorder(n.right)
// println(n.value)
// }
// }
// }
override def toString:String= {
postorder.mkString(" : ")
}
override def height:Int= depth(root) - 1
def depth(node:Option[Node[K]]):Int = {
node match {
case None => 0
case Some(n) => 1 + scala.math.max(depth(n.left), depth(n.right))
}
}
def size = count
}
|
daishichao/coral
|
runtime-api/src/main/scala/io/coral/lib/Tree.scala
|
Scala
|
apache-2.0
| 6,212
|
import java.lang.System._
import java.text.DecimalFormat
import com.datastax.driver.core.exceptions.InvalidQueryException
import com.datastax.spark.connector._
import com.datastax.spark.connector.cql.CassandraConnector
import kafka.serializer.StringDecoder
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.streaming.{Minutes, Duration, Seconds, StreamingContext}
/**
* @author <a href="mailto:kgrodzicki@gmail.com">Krzysztof Grodzicki</a> 13/02/16.
*/
object App {
object Model {
final case class OriginDest(origin: String, depDelayMinutes: Option[Double], dest: String)
}
def parseDouble(s: String) = try {
Some(s.toDouble)
} catch {
case _: Throwable => None
}
def main(args: Array[String]) {
if (args.length < 4) {
err.println(
s"""
| Usage: App <brokers> <topics> <brokers>
| <brokers> is a list of one or more Kafka brokers
| <topics> is a list of one or more kafka topics to consume from
| <brokers> is a list of one or more Cassandra brokers
| <timeout> await termination in minutes
|
""".stripMargin)
exit(1)
}
val Array(kafkaBrokers, topics, cassandraBrokers, timeout) = args
val batchDuration: Duration = Seconds(5)
val cassandraHost: String = cassandraBrokers.split(":")(0)
val cassandraPort: String = cassandraBrokers.split(":")(1)
val keepAlliveMs = batchDuration.+(Seconds(5)).milliseconds // keep connection alive between batch sizes
val conf = new SparkConf(true)
.set("spark.cassandra.connection.host", cassandraHost)
.set("spark.cassandra.connection.port", cassandraPort)
.set("spark.cassandra.auth.username", "cassandra")
.set("spark.cassandra.auth.password", "cassandra")
.set("spark.cassandra.connection.keep_alive_ms", s"$keepAlliveMs")
.set("spark.cassandra.output.consistency.level", "ANY") // no need for strong consistency here
.setAppName("Spark Task 2 group 2 question 2")
/** Creates the keyspace and table in Cassandra. */
CassandraConnector(conf).withSessionDo { session =>
session.execute(s"CREATE KEYSPACE IF NOT EXISTS capstone WITH REPLICATION = {'class': 'SimpleStrategy', 'replication_factor': 1 }")
session.execute(s"CREATE TABLE IF NOT EXISTS capstone.airport (code text PRIMARY KEY, top_carriers list<text>)")
try {
session.execute(s"alter table capstone.airport add top_dest list<text>")
}
catch {
case _: InvalidQueryException =>
//pass as column already exists
}
// session.execute(s"TRUNCATE capstone.airport")
}
val ssc = new StreamingContext(conf, batchDuration)
ssc.checkpoint("checkpoint")
val topicsSet = topics.split(",").toSet
val kafkaParams = Map[String, String]("metadata.broker.list" -> kafkaBrokers)
val messages = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](ssc, kafkaParams, topicsSet)
val lines: DStream[String] = messages.map(_._2)
import Model.OriginDest
val formatter = new DecimalFormat("#.###")
val result = lines.map { line: String =>
// split each line
line.split(",") match {
case Array(origin, depDelayMinutes, dest) => OriginDest(origin, parseDouble(depDelayMinutes), dest)
}
}.filter(_.depDelayMinutes match {
case Some(d) => true
case _ => false
}).map {
// make a tuple for each airport
case a@OriginDest(origin, depDelayMinutes, dest) =>
if (depDelayMinutes.get <= 0.0)
// on time
(origin, (dest, 1))
else
(origin, (dest, 0))
}.groupByKey()
.updateStateByKey(updateState)
.map(a => {
val origin: String = a._1
val dests: Iterable[(String, Int)] = a._2
val topTenDest = dests.groupBy(_._1).map(each => {
val dest = each._1
val onTime = each._2.map(_._2)
val performance: Double = (onTime.sum / onTime.size.toDouble) * 100
(dest, performance)
}).toSeq.sortWith(_._2 > _._2)
.take(10)
.map(e => (e._1, formatter.format(e._2)))
(origin, topTenDest)
})
result.foreachRDD(_.saveToCassandra("capstone", "airport", SomeColumns("code", "top_dest")))
ssc.start()
ssc.awaitTerminationOrTimeout(Minutes(timeout.toLong).milliseconds)
ssc.stop(stopSparkContext = true, stopGracefully = true)
}
def updateState(newValues: Seq[Iterable[(String, Int)]], runningCount: Option[Iterable[(String, Int)]]): Option[Seq[(String, Int)]] = {
runningCount match {
case Some(rc) =>
Some(newValues.flatten ++ rc)
case _ =>
Some(newValues.flatten)
}
}
}
|
kgrodzicki/cloud-computing-specialization
|
capstone/workspace/task2/task2group2q2/src/main/scala/App.scala
|
Scala
|
mit
| 4,861
|
//-*- coding: utf-8-unix -*-
/**
* (C) IT Sky Consulting GmbH 2014
* http://www.it-sky-consulting.com/
* Author: Karl Brodowsky
* Date: 2014-05-20
* License: GPL v2 (See https://de.wikipedia.org/wiki/GNU_General_Public_License )
*
*/
object ClosureWithoutClass {
def main(args : Array[String]) : Unit = {
val x : Int = args(0).toInt
val y : Int = args(1).toInt
val f : ((Int) => Int) = adder(x);
val arr = (1 to y).map(f)
println(arr.toString)
}
def adder(x : Int) : ((Int) => Int) = {
(y => x+y)
}
}
|
8l/sysprogramming-examples
|
scala/ClosureWithoutClass.scala
|
Scala
|
gpl-2.0
| 541
|
package org.http4s
import org.http4s.Uri.{Authority, Host, IPv4, IPv6, RegName, Scheme}
import org.http4s.UriTemplate._
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import scala.util.{Failure, Success, Try}
/**
* Simple representation of a URI Template that can be rendered as RFC6570
* conform string.
*
* This model reflects only a subset of RFC6570.
*
* Level 1 and Level 2 are completely modeled and
* Level 3 features are limited to:
* - Path segments, slash-prefixed
* - Form-style query, ampersand-separated
* - Fragment expansion
*/
final case class UriTemplate(
scheme: Option[Scheme] = None,
authority: Option[Authority] = None,
path: Path = Nil,
query: UriTemplate.Query = Nil,
fragment: Fragment = Nil) {
/**
* Replaces any expansion type that matches the given `name`. If no matching
* `expansion` could be found the same instance will be returned.
*/
def expandAny[T: QueryParamEncoder](name: String, value: T): UriTemplate =
expandPath(name, value).expandQuery(name, value).expandFragment(name, value)
/**
* Replaces any expansion type in `fragment` that matches the given `name`.
* If no matching `expansion` could be found the same instance will be
* returned.
*/
def expandFragment[T: QueryParamEncoder](name: String, value: T): UriTemplate = {
if (fragment.isEmpty) this
else copy(fragment = expandFragmentN(fragment, name, String.valueOf(value)))
}
/**
* Replaces any expansion type in `path` that matches the given `name`. If no
* matching `expansion` could be found the same instance will be returned.
*/
def expandPath[T: QueryParamEncoder](name: String, values: List[T]): UriTemplate =
copy(path = expandPathN(path, name, values.map(QueryParamEncoder[T].encode)))
/**
* Replaces any expansion type in `path` that matches the given `name`. If no
* matching `expansion` could be found the same instance will be returned.
*/
def expandPath[T: QueryParamEncoder](name: String, value: T): UriTemplate =
copy(path = expandPathN(path, name, QueryParamEncoder[T].encode(value)::Nil))
/**
* Replaces any expansion type in `query` that matches the specified `name`.
* If no matching `expansion` could be found the same instance will be
* returned.
*/
def expandQuery[T: QueryParamEncoder](name: String, values: List[T]): UriTemplate = {
if (query.isEmpty) this
else copy(query = expandQueryN(query, name, values.map(QueryParamEncoder[T].encode(_).value)))
}
/**
* Replaces any expansion type in `query` that matches the specified `name`.
* If no matching `expansion` could be found the same instance will be
* returned.
*/
def expandQuery(name: String): UriTemplate = expandQuery(name, List[String]())
/**
* Replaces any expansion type in `query` that matches the specified `name`.
* If no matching `expansion` could be found the same instance will be
* returned.
*/
def expandQuery[T: QueryParamEncoder](name: String, values: T*): UriTemplate =
expandQuery(name, values.toList)
override lazy val toString =
renderUriTemplate(this)
/**
* If no expansion is available an `Uri` will be created otherwise the
* current instance of `UriTemplate` will be returned.
*/
def toUriIfPossible: Try[Uri] =
if (containsExpansions(this)) Failure(new IllegalStateException(s"all expansions must be resolved to be convertable: $this"))
else Success(toUri(this))
}
object UriTemplate {
type Path = List[PathDef]
type Query = List[QueryDef]
type Fragment = List[FragmentDef]
protected val unreserved = (('a' to 'z') ++ ('A' to 'Z') ++ ('0' to '9') :+ '-' :+ '.' :+ '_' :+ '~').toSet
// protected val genDelims = ':' :: '/' :: '?' :: '#' :: '[' :: ']' :: '@' :: Nil
// protected val subDelims = '!' :: '$' :: '&' :: '\\'' :: '(' :: ')' :: '*' :: '+' :: ',' :: ';' :: '=' :: Nil
// protected val reserved = genDelims ::: subDelims
def isUnreserved(s: String): Boolean = s.forall(unreserved.contains)
protected def expandPathN(path: Path, name: String, values: List[QueryParameterValue]): Path = {
val acc = new ArrayBuffer[PathDef]()
def appendValues() = values foreach { v => acc.append(PathElm(v.value)) }
path foreach {
case p@PathElm(_) => acc.append(p)
case p@VarExp(Seq(n)) =>
if (n == name) appendValues()
else acc.append(p)
case p@VarExp(ns) =>
if (ns.contains(name)) {
appendValues()
acc.append(VarExp(ns.filterNot(_ == name)))
} else acc.append(p)
case p@ReservedExp(Seq(n)) =>
if (n == name) appendValues()
else acc.append(p)
case p@ReservedExp(ns) =>
if (ns.contains(name)) {
appendValues()
acc.append(VarExp(ns.filterNot(_ == name)))
} else acc.append(p)
case p@PathExp(Seq(n)) =>
if (n == name) appendValues()
else acc.append(p)
case p@PathExp(ns) =>
if (ns.contains(name)) {
appendValues()
acc.append(PathExp(ns.filterNot(_ == name)))
} else acc.append(p)
}
acc.toList
}
protected def expandQueryN(query: Query, name: String, values: List[String]): Query = {
val acc = new ArrayBuffer[QueryDef]()
query.foreach {
case p@ParamElm(_, _) => acc.append(p)
case p@ParamVarExp(r, List(n)) =>
if (n == name) acc.append(ParamElm(r, values))
else acc.append(p)
case p@ParamVarExp(r, ns) =>
if (ns.contains(name)) {
acc.append(ParamElm(r, values))
acc.append(ParamVarExp(r, ns.filterNot(_ == name)))
} else acc.append(p)
case p@ParamReservedExp(r, List(n)) =>
if (n == name) acc.append(ParamElm(r, values))
else acc.append(p)
case p@ParamReservedExp(r, ns) =>
if (ns.contains(name)) {
acc.append(ParamElm(r, values))
acc.append(ParamReservedExp(r, ns.filterNot(_ == name)))
} else acc.append(p)
case p@ParamExp(Seq(n)) =>
if (n == name) acc.append(ParamElm(name, values))
else acc.append(p)
case p@ParamExp(ns) =>
if (ns.contains(name)) {
acc.append(ParamElm(name, values))
acc.append(ParamExp(ns.filterNot(_ == name)))
} else acc.append(p)
case p@ParamContExp(Seq(n)) =>
if (n == name) acc.append(ParamElm(name, values))
else acc.append(p)
case p@ParamContExp(ns) =>
if (ns.contains(name)) {
acc.append(ParamElm(name, values))
acc.append(ParamContExp(ns.filterNot(_ == name)))
} else acc.append(p)
}
acc.toList
}
protected def expandFragmentN(fragment: Fragment, name: String, value: String): Fragment = {
val acc = new ArrayBuffer[FragmentDef]()
fragment.foreach {
case p@FragmentElm(_) => acc.append(p)
case p@SimpleFragmentExp(n) => if (n == name) acc.append(FragmentElm(value)) else acc.append(p)
case p@MultiFragmentExp(Seq(n)) => if (n == name) acc.append(FragmentElm(value)) else acc.append(p)
case p@MultiFragmentExp(ns) =>
if (ns.contains(name)) {
acc.append(FragmentElm(value))
acc.append(MultiFragmentExp(ns.filterNot(_ == name)))
} else acc.append(p)
}
acc.toList
}
protected def renderAuthority(a: Authority): String = a match {
case Authority(Some(u), h, None) => u + "@" + renderHost(h)
case Authority(Some(u), h, Some(p)) => u + "@" + renderHost(h) + ":" + p
case Authority(None, h, Some(p)) => renderHost(h) + ":" + p
case Authority(_, h, _) => renderHost(h)
case _ => ""
}
protected def renderHost(h: Host): String = h match {
case RegName(n) => n.toString
case IPv4(a) => a.toString
case IPv6(a) => "[" + a.toString + "]"
case _ => ""
}
protected def renderScheme(s: Scheme): String = s + ":"
protected def renderSchemeAndAuthority(t: UriTemplate): String = t match {
case UriTemplate(None, None, _, _, _) => ""
case UriTemplate(Some(s), Some(a), _, _, _) => renderScheme(s) + "//" + renderAuthority(a)
case UriTemplate(Some(s), None, _, _, _) => renderScheme(s)
case UriTemplate(None, Some(a), _, _, _) => renderAuthority(a)
}
protected def renderQuery(ps: Query): String = {
val parted = ps partition {
case ParamElm(_, _) => false
case ParamVarExp(_, _) => false
case ParamReservedExp(_, _) => false
case ParamExp(_) => true
case ParamContExp(_) => true
}
val elements = new ArrayBuffer[String]()
parted._2 foreach {
case ParamElm(n, Nil) => elements.append(n)
case ParamElm(n, List(v)) => elements.append(n + "=" + v)
case ParamElm(n, vs) => vs.foreach(v => elements.append(n + "=" + v))
case ParamVarExp(n, vs) => elements.append(n + "=" + "{" + vs.mkString(",") + "}")
case ParamReservedExp(n, vs) => elements.append(n + "=" + "{+" + vs.mkString(",") + "}")
case u => throw new IllegalStateException(s"type ${u.getClass.getName} not supported")
}
val exps = new ArrayBuffer[String]()
def separator = if (elements.isEmpty && exps.isEmpty) "?" else "&"
parted._1 foreach {
case ParamExp(ns) => exps.append("{" + separator + ns.mkString(",") + "}")
case ParamContExp(ns) => exps.append("{" + separator + ns.mkString(",") + "}")
case u => throw new IllegalStateException(s"type ${u.getClass.getName} not supported")
}
if (elements.isEmpty) exps.mkString
else "?" + elements.mkString("&") + exps.mkString
}
protected def renderFragment(f: Fragment): String = {
val elements = new mutable.ArrayBuffer[String]()
val expansions = new mutable.ArrayBuffer[String]()
f map {
case FragmentElm(v) => elements.append(v)
case SimpleFragmentExp(n) => expansions.append(n)
case MultiFragmentExp(ns) => expansions.append(ns.mkString(","))
}
if (elements.nonEmpty && expansions.nonEmpty) {
"#" + elements.mkString(",") + "{#" + expansions.mkString(",") + "}"
}
else if (elements.nonEmpty) {
"#" + elements.mkString(",")
}
else if (expansions.nonEmpty) {
"{#" + expansions.mkString(",") + "}"
}
else {
"#"
}
}
protected def renderFragmentIdentifier(f: Fragment): String = {
val elements = new mutable.ArrayBuffer[String]()
f map {
case FragmentElm(v) => elements.append(v)
case SimpleFragmentExp(_) => throw new IllegalStateException("SimpleFragmentExp cannot be converted to a Uri")
case MultiFragmentExp(_) => throw new IllegalStateException("MultiFragmentExp cannot be converted to a Uri")
}
if (elements.isEmpty) ""
else elements.mkString(",")
}
protected def buildQuery(q: Query): org.http4s.Query = {
val elements = Query.newBuilder
q map {
case ParamElm(n, Nil) => elements += ((n, None))
case ParamElm(n, List(v)) => elements += ((n, Some(v)))
case ParamElm(n, vs) => vs.foreach(v => elements += ((n, Some(v))))
case u => throw new IllegalStateException(s"${u.getClass.getName} cannot be converted to a Uri")
}
elements.result()
}
protected def renderPath(p: Path): String = p match {
case Nil => "/"
case ps =>
val elements = new ArrayBuffer[String]()
ps foreach {
case PathElm(n) => elements.append("/" + n)
case VarExp(ns) => elements.append("{" + ns.mkString(",") + "}")
case ReservedExp(ns) => elements.append("{+" + ns.mkString(",") + "}")
case PathExp(ns) => elements.append("{/" + ns.mkString(",") + "}")
case u => throw new IllegalStateException(s"type ${u.getClass.getName} not supported")
}
elements.mkString
}
protected def renderPathAndQueryAndFragment(t: UriTemplate): String = t match {
case UriTemplate(_, _, Nil, Nil, Nil) => "/"
case UriTemplate(_, _, Nil, Nil, f) => "/" + renderFragment(f)
case UriTemplate(_, _, Nil, query, Nil) => "/" + renderQuery(query)
case UriTemplate(_, _, Nil, query, f) => "/" + renderQuery(query) + renderFragment(f)
case UriTemplate(_, _, path, Nil, Nil) => renderPath(path)
case UriTemplate(_, _, path, query, Nil) => renderPath(path) + renderQuery(query)
case UriTemplate(_, _, path, Nil, f) => renderPath(path) + renderFragment(f)
case UriTemplate(_, _, path, query, f) => renderPath(path) + renderQuery(query) + renderFragment(f)
case _ => ""
}
protected def renderUriTemplate(t: UriTemplate): String = t match {
case UriTemplate(None, None, Nil, Nil, Nil) => "/"
case UriTemplate(Some(s), Some(a), Nil, Nil, Nil) => renderSchemeAndAuthority(t)
case UriTemplate(Some(s), Some(a), List(), Nil, Nil) => renderSchemeAndAuthority(t)
case UriTemplate(scheme, authority, path, params, fragment) => renderSchemeAndAuthority(t) + renderPathAndQueryAndFragment(t)
case _ => ""
}
protected def fragmentExp(f: FragmentDef): Boolean = f match {
case FragmentElm(_) => false
case SimpleFragmentExp(_) => true
case MultiFragmentExp(_) => true
}
protected def pathExp(p: PathDef): Boolean = p match {
case PathElm(n) => false
case VarExp(ns) => true
case ReservedExp(ns) => true
case PathExp(ns) => true
}
protected def queryExp(q: QueryDef): Boolean = q match {
case ParamElm(_, _) => false
case ParamVarExp(_, _) => true
case ParamReservedExp(_, _) => true
case ParamExp(_) => true
case ParamContExp(_) => true
}
protected def containsExpansions(t: UriTemplate): Boolean = t match {
case UriTemplate(_, _, Nil, Nil, Nil) => false
case UriTemplate(_, _, Nil, Nil, f) => f exists fragmentExp
case UriTemplate(_, _, Nil, q, Nil) => q exists queryExp
case UriTemplate(_, _, Nil, q, f) => (q exists queryExp) || (f exists fragmentExp)
case UriTemplate(_, _, p, Nil, Nil) => p exists pathExp
case UriTemplate(_, _, p, Nil, f) => (p exists pathExp) || (f exists fragmentExp)
case UriTemplate(_, _, p, q, Nil) => (p exists pathExp) || (q exists queryExp)
case UriTemplate(_, _, p, q, f) => (p exists pathExp) || (q exists queryExp) || (f exists fragmentExp)
}
protected def toUri(t: UriTemplate): Uri = t match {
case UriTemplate(s, a, Nil, Nil, Nil) => Uri(s, a)
case UriTemplate(s, a, Nil, Nil, f) => Uri(s, a, fragment = Some(renderFragmentIdentifier(f)))
case UriTemplate(s, a, Nil, q, Nil) => Uri(s, a, query = buildQuery(q))
case UriTemplate(s, a, Nil, q, f) => Uri(s, a, query = buildQuery(q), fragment = Some(renderFragmentIdentifier(f)))
case UriTemplate(s, a, p, Nil, Nil) => Uri(s, a, renderPath(p))
case UriTemplate(s, a, p, q, Nil) => Uri(s, a, renderPath(p), buildQuery(q))
case UriTemplate(s, a, p, Nil, f) => Uri(s, a, renderPath(p), fragment = Some(renderFragmentIdentifier(f)))
case UriTemplate(s, a, p, q, f) => Uri(s, a, renderPath(p), buildQuery(q), Some(renderFragmentIdentifier(f)))
}
sealed trait PathDef
/** Static path element */
final case class PathElm(value: String) extends PathDef
sealed trait QueryDef
sealed trait QueryExp extends QueryDef
/** Static query parameter element */
final case class ParamElm(name: String, values: List[String]) extends QueryDef
object ParamElm {
def apply(name: String): ParamElm = new ParamElm(name, Nil)
def apply(name: String, values: String*): ParamElm = new ParamElm(name, values.toList)
}
/**
* Simple string expansion for query parameter
*/
final case class ParamVarExp(name: String, variables: List[String]) extends QueryDef {
require(variables forall isUnreserved, "all variables must consist of unreserved characters")
}
object ParamVarExp {
def apply(name: String): ParamVarExp = new ParamVarExp(name, Nil)
def apply(name: String, variables: String*): ParamVarExp = new ParamVarExp(name, variables.toList)
}
/**
* Reserved string expansion for query parameter
*/
final case class ParamReservedExp(name: String, variables: List[String]) extends QueryDef {
require(variables forall isUnreserved, "all variables must consist of unreserved characters")
}
object ParamReservedExp {
def apply(name: String): ParamReservedExp = new ParamReservedExp(name, Nil)
def apply(name: String, variables: String*): ParamReservedExp = new ParamReservedExp(name, variables.toList)
}
/**
* URI Templates are similar to a macro language with a fixed set of macro
* definitions: the expression type determines the expansion process.
*
* The default expression type is simple string expansion (Level 1), wherein a
* single named variable is replaced by its value as a string after
* pct-encoding any characters not in the set of unreserved URI characters
* (<a href="http://tools.ietf.org/html/rfc6570#section-1.5">Section 1.5</a>).
*
* Level 2 templates add the plus ("+") operator, for expansion of values that
* are allowed to include reserved URI characters
* (<a href="http://tools.ietf.org/html/rfc6570#section-1.5">Section 1.5</a>),
* and the crosshatch ("#") operator for expansion of fragment identifiers.
*
* Level 3 templates allow multiple variables per expression, each
* separated by a comma, and add more complex operators for dot-prefixed
* labels, slash-prefixed path segments, semicolon-prefixed path
* parameters, and the form-style construction of a query syntax
* consisting of name=value pairs that are separated by an ampersand
* character.
*/
sealed trait ExpansionType
sealed trait FragmentDef
/** Static fragment element */
final case class FragmentElm(value: String) extends FragmentDef
/**
* Fragment expansion, crosshatch-prefixed
* (<a href="http://tools.ietf.org/html/rfc6570#section-3.2.4">Section 3.2.4</a>)
*/
final case class SimpleFragmentExp(name: String) extends FragmentDef {
require(name.nonEmpty, "at least one character must be set")
require(isUnreserved(name), "name must consist of unreserved characters")
}
/**
* Level 1 allows string expansion
* (<a href="http://tools.ietf.org/html/rfc6570#section-3.2.2">Section 3.2.2</a>)
*
* Level 3 allows string expansion with multiple variables
* (<a href="http://tools.ietf.org/html/rfc6570#section-3.2.2">Section 3.2.2</a>)
*/
final case class VarExp(names: List[String]) extends PathDef {
require(names.nonEmpty, "at least one name must be set")
require(names forall isUnreserved, "all names must consist of unreserved characters")
}
object VarExp {
def apply(names: String*): VarExp = new VarExp(names.toList)
}
/**
* Level 2 allows reserved string expansion
* (<a href="http://tools.ietf.org/html/rfc6570#section-3.2.3">Section 3.2.3</a>)
*
* Level 3 allows reserved expansion with multiple variables
* (<a href="http://tools.ietf.org/html/rfc6570#section-3.2.3">Section 3.2.3</a>)
*/
final case class ReservedExp(names: List[String]) extends PathDef {
require(names.nonEmpty, "at least one name must be set")
require(names forall isUnreserved, "all names must consist of unreserved characters")
}
object ReservedExp {
def apply(names: String*): ReservedExp = new ReservedExp(names.toList)
}
/**
* Fragment expansion with multiple variables, crosshatch-prefixed
* (<a href="http://tools.ietf.org/html/rfc6570#section-3.2.4">Section 3.2.4</a>)
*/
final case class MultiFragmentExp(names: List[String]) extends FragmentDef {
require(names.nonEmpty, "at least one name must be set")
require(names forall isUnreserved, "all names must consist of unreserved characters")
}
object MultiFragmentExp {
def apply(names: String*): MultiFragmentExp = new MultiFragmentExp(names.toList)
}
/**
* Path segments, slash-prefixed
* (<a href="http://tools.ietf.org/html/rfc6570#section-3.2.6">Section 3.2.6</a>)
*/
final case class PathExp(names: List[String]) extends PathDef {
require(names.nonEmpty, "at least one name must be set")
require(names forall isUnreserved, "all names must consist of unreserved characters")
}
object PathExp {
def apply(names: String*): PathExp = new PathExp(names.toList)
}
/**
* Form-style query, ampersand-separated
* (<a href="http://tools.ietf.org/html/rfc6570#section-3.2.8">Section 3.2.8</a>)
*/
final case class ParamExp(names: List[String]) extends QueryExp {
require(names.nonEmpty, "at least one name must be set")
require(names forall isUnreserved, "all names must consist of unreserved characters")
}
object ParamExp {
def apply(names: String*): ParamExp = new ParamExp(names.toList)
}
/**
* Form-style query continuation
* (<a href="http://tools.ietf.org/html/rfc6570#section-3.2.9">Section 3.2.9</a>)
*/
final case class ParamContExp(names: List[String]) extends QueryExp {
require(names.nonEmpty, "at least one name must be set")
require(names forall isUnreserved, "all names must consist of unreserved characters")
}
object ParamContExp {
def apply(names: String*): ParamContExp = new ParamContExp(names.toList)
}
}
|
hvesalai/http4s
|
core/src/main/scala/org/http4s/UriTemplate.scala
|
Scala
|
apache-2.0
| 21,189
|
package io.sqooba.oss.timeseries
import java.util.concurrent.TimeUnit
import io.sqooba.oss.timeseries.immutable.{ContiguousTimeDomain, EmptyTimeSeries, TSEntry}
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should
// scalastyle:off magic.number
// scalastyle:off file.size.limit
trait TimeSeriesTestBench extends should.Matchers { this: AnyFlatSpec =>
/**
* *Main test bench* for a timeseries implementation. This tests all the functions
* defined by the TimeSeries trait for a non empty and non singleton (TSEntry)
* timeseries implementation. All the tests use double valued series.
*
* @note The mapping functions are only tested without compression. Use
* 'nonEmptyNonSingletonDoubleTimeSeriesWithCompression' to test that.
*
* @param newTs constructor method for the timeseries implementation to test
*/
def nonEmptyNonSingletonDoubleTimeSeries(newTs: Seq[TSEntry[Double]] => TimeSeries[Double]): Unit = {
// Two contiguous entries
val contig2 = newTs(Seq(TSEntry(1, 111d, 10), TSEntry(11, 222d, 10)))
// Two entries with a gap in between
val discon2 = newTs(Seq(TSEntry(1, 111d, 10), TSEntry(12, 222d, 10)))
// Three entries, gap between first and second
val three = newTs(Seq(TSEntry(1, 111d, 10), TSEntry(12, 222d, 10), TSEntry(22, 333d, 10)))
val anotherThree = newTs(Seq(TSEntry(1, 111d, 9), TSEntry(10, 222d, 10), TSEntry(20, 444d, 10)))
val tri = newTs(Seq(TSEntry(1, 111d, 10), TSEntry(12, 222d, 10), TSEntry(22, 333d, 10)))
it should "give correct values for at()" in {
// Check two contiguous values
assert(2 === contig2.size)
assert(contig2.nonEmpty)
assert(contig2.at(0).isEmpty)
assert(contig2.at(1).contains(111d))
assert(contig2.at(10).contains(111d))
assert(contig2.at(11).contains(222d))
assert(contig2.at(20).contains(222d))
assert(contig2.at(21).isEmpty)
// Check two non contiguous values
assert(discon2.size === 2)
assert(discon2.at(0).isEmpty)
assert(discon2.at(1).contains(111d))
assert(discon2.at(10).contains(111d))
assert(discon2.at(11).isEmpty)
assert(discon2.at(12).contains(222d))
assert(discon2.at(21).contains(222d))
assert(discon2.at(22).isEmpty)
}
it should "be correctly defined" in {
// Check two contiguous values
assert(!contig2.defined(0))
assert(contig2.defined(1))
assert(contig2.defined(10))
assert(contig2.defined(11))
assert(contig2.defined(20))
assert(!contig2.defined(21))
// Check two non contiguous values
assert(!discon2.defined(0))
assert(discon2.defined(1))
assert(discon2.defined(10))
assert(!discon2.defined(11))
assert(discon2.defined(12))
assert(discon2.defined(21))
assert(!discon2.defined(22))
}
it should "correctly trim on the left for contiguous entries" in {
// Two contiguous entries
// Left of the domain
assert(contig2.entries === contig2.trimLeft(0).entries)
assert(contig2.entries === contig2.trimLeft(1).entries)
// Trimming on the first entry
assert(Seq(TSEntry(2, 111d, 9), TSEntry(11, 222d, 10)) === contig2.trimLeft(2).entries)
assert(Seq(TSEntry(10, 111d, 1), TSEntry(11, 222d, 10)) === contig2.trimLeft(10).entries)
// Trimming at the boundary between entries:
assert(Seq(TSEntry(11, 222d, 10)) === contig2.trimLeft(11).entries)
// ... and on the second entry:
assert(Seq(TSEntry(12, 222d, 9)) === contig2.trimLeft(12).entries)
assert(Seq(TSEntry(20, 222d, 1)) === contig2.trimLeft(20).entries)
// ... and after the second entry:
assert(contig2.trimLeft(21).isEmpty)
}
it should "correctly trim on the left for not contiguous entries" in {
// Two non-contiguous entries
// Trimming left of the first entry
assert(discon2.entries === discon2.trimLeft(0).entries)
assert(discon2.entries === discon2.trimLeft(1).entries)
// Trimming on the first entry
assert(Seq(TSEntry(2, 111d, 9), TSEntry(12, 222d, 10)) === discon2.trimLeft(2).entries)
assert(Seq(TSEntry(10, 111d, 1), TSEntry(12, 222d, 10)) === discon2.trimLeft(10).entries)
// Trimming between entries:
assert(Seq(TSEntry(12, 222d, 10)) === discon2.trimLeft(11).entries)
assert(Seq(TSEntry(12, 222d, 10)) === discon2.trimLeft(12).entries)
// ... and on the second
assert(Seq(TSEntry(13, 222d, 9)) === discon2.trimLeft(13).entries)
assert(Seq(TSEntry(21, 222d, 1)) === discon2.trimLeft(21).entries)
// ... and after the second entry:
assert(discon2.trimLeft(22).isEmpty)
// Trim on a three element time series with a discontinuity
// Left of the first entry
assert(three.entries === three.trimLeft(0).entries)
assert(three.entries === three.trimLeft(1).entries)
// Trimming on the first entry
assert(
Seq(TSEntry(2, 111d, 9), TSEntry(12, 222d, 10), TSEntry(22, 333d, 10)) ===
three.trimLeft(2).entries
)
assert(
Seq(TSEntry(10, 111d, 1), TSEntry(12, 222d, 10), TSEntry(22, 333d, 10)) ===
three.trimLeft(10).entries
)
// Trimming between entries:
assert(Seq(TSEntry(12, 222d, 10), TSEntry(22, 333d, 10)) === three.trimLeft(11).entries)
assert(Seq(TSEntry(12, 222d, 10), TSEntry(22, 333d, 10)) === three.trimLeft(12).entries)
// ... and on the second
assert(Seq(TSEntry(13, 222d, 9), TSEntry(22, 333d, 10)) === three.trimLeft(13).entries)
assert(Seq(TSEntry(21, 222d, 1), TSEntry(22, 333d, 10)) === three.trimLeft(21).entries)
// ... on the border between second and third
assert(Seq(TSEntry(22, 333d, 10)) === three.trimLeft(22).entries)
// on the third
assert(Seq(TSEntry(23, 333d, 9)) === three.trimLeft(23).entries)
assert(Seq(TSEntry(31, 333d, 1)) === three.trimLeft(31).entries)
// ... and after every entry.
assert(three.trimLeft(32).isEmpty)
}
it should "correctly trim on the left for discrete entries" in {
// Two contiguous entries
// Left of the domain
assert(contig2.entries === contig2.trimLeftDiscrete(0, true).entries)
assert(contig2.entries === contig2.trimLeftDiscrete(0, false).entries)
assert(contig2.entries === contig2.trimLeftDiscrete(1, true).entries)
assert(contig2.entries === contig2.trimLeftDiscrete(1, false).entries)
// Trimming on the first entry
assert(contig2.entries === contig2.trimLeftDiscrete(2, true).entries)
assert(Seq(TSEntry(11, 222d, 10)) === contig2.trimLeftDiscrete(2, false).entries)
assert(contig2.entries === contig2.trimLeftDiscrete(10, true).entries)
assert(Seq(TSEntry(11, 222d, 10)) === contig2.trimLeftDiscrete(2, false).entries)
// Trimming at the boundary between entries:
assert(Seq(TSEntry(11, 222d, 10)) === contig2.trimLeftDiscrete(11, true).entries)
assert(Seq(TSEntry(11, 222d, 10)) === contig2.trimLeftDiscrete(11, false).entries)
// ... and on the second entry:
assert(Seq(TSEntry(11, 222d, 10)) === contig2.trimLeftDiscrete(12, true).entries)
assert(Seq() === contig2.trimLeftDiscrete(12, false).entries)
assert(Seq(TSEntry(11, 222d, 10)) === contig2.trimLeftDiscrete(20, true).entries)
assert(Seq() === contig2.trimLeftDiscrete(20, false).entries)
// ... and after the second entry:
assert(contig2.trimLeftDiscrete(21, true).isEmpty)
assert(contig2.trimLeftDiscrete(21, false).isEmpty)
}
it should "correctly trim on the right for contiguous entries" in {
// Two contiguous entries:
// Right of the domain:
assert(contig2.entries === contig2.trimRight(22).entries)
assert(contig2.entries === contig2.trimRight(21).entries)
// On the second entry
contig2.trimRight(20).entries should contain theSameElementsInOrderAs
Seq(TSEntry(1, 111d, 10), TSEntry(11, 222d, 9))
contig2.trimRight(12).entries should contain theSameElementsInOrderAs
Seq(TSEntry(1, 111d, 10), TSEntry(11, 222d, 1))
// On the boundary
contig2.trimRight(11).entries should contain theSameElementsInOrderAs
Seq(TSEntry(1, 111d, 10))
// On the first entry
assert(TSEntry(1, 111d, 9) === contig2.trimRight(10))
assert(TSEntry(1, 111d, 1) === contig2.trimRight(2))
// Before the first entry
assert(contig2.trimRight(1).isEmpty)
assert(contig2.trimRight(0).isEmpty)
}
it should "correctly trim on the right for not contiguous entries" in {
// Two non-contiguous entries
// Trimming right of the second entry
assert(discon2.entries === discon2.trimRight(23).entries)
assert(discon2.entries === discon2.trimRight(22).entries)
// Trimming on the last entry
discon2.trimRight(21).entries should contain theSameElementsInOrderAs
Seq(TSEntry(1, 111d, 10), TSEntry(12, 222d, 9))
discon2.trimRight(13).entries should contain theSameElementsInOrderAs
Seq(TSEntry(1, 111d, 10), TSEntry(12, 222d, 1))
// Trimming between entries:
discon2.trimRight(12).entries should contain theSameElementsInOrderAs
Seq(TSEntry(1, 111d, 10))
discon2.trimRight(11).entries should contain theSameElementsInOrderAs
Seq(TSEntry(1, 111d, 10))
// ... and on the first
discon2.trimRight(10).entries should contain theSameElementsInOrderAs
Seq(TSEntry(1, 111d, 9))
discon2.trimRight(2).entries should contain theSameElementsInOrderAs
Seq(TSEntry(1, 111d, 1))
// ... and before the first entry:
assert(discon2.trimRight(1).isEmpty)
assert(discon2.trimRight(0).isEmpty)
// Trim on a three element time series with a discontinuity
// Right of the last entry
assert(three.entries === three.trimRight(33).entries)
assert(three.entries === three.trimRight(32).entries)
// Trimming on the last entry
assert(
Seq(TSEntry(1, 111d, 10), TSEntry(12, 222d, 10), TSEntry(22, 333d, 9)) ===
three.trimRight(31).entries
)
assert(
Seq(TSEntry(1, 111d, 10), TSEntry(12, 222d, 10), TSEntry(22, 333d, 1)) ===
three.trimRight(23).entries
)
// Trimming between 2nd and 3rd entries:
assert(Seq(TSEntry(1, 111d, 10), TSEntry(12, 222d, 10)) === three.trimRight(22).entries)
// ... and on the second
assert(Seq(TSEntry(1, 111d, 10), TSEntry(12, 222d, 9)) === three.trimRight(21).entries)
assert(Seq(TSEntry(1, 111d, 10), TSEntry(12, 222d, 1)) === three.trimRight(13).entries)
// ... on the border between 1st and 2nd
assert(Seq(TSEntry(1, 111d, 10)) === three.trimRight(12).entries)
assert(Seq(TSEntry(1, 111d, 10)) === three.trimRight(11).entries)
// ... on the first
assert(Seq(TSEntry(1, 111d, 9)) === three.trimRight(10).entries)
assert(Seq(TSEntry(1, 111d, 1)) === three.trimRight(2).entries)
// ... and after every entry.
assert(three.trimRight(1).isEmpty)
assert(three.trimRight(0).isEmpty)
}
it should "correctly trim on the right for discrete entries" in {
// Two contiguous entries:
// Right of the domain:
assert(contig2.entries === contig2.trimRightDiscrete(22, true).entries)
assert(contig2.entries === contig2.trimRightDiscrete(22, false).entries)
assert(contig2.entries === contig2.trimRightDiscrete(21, true).entries)
assert(contig2.entries === contig2.trimRightDiscrete(21, false).entries)
// On the second entry
assert(Seq(TSEntry(1, 111d, 10), TSEntry(11, 222d, 10)) === contig2.trimRightDiscrete(20, true).entries)
assert(Seq(TSEntry(1, 111d, 10)) === contig2.trimRightDiscrete(20, false).entries)
assert(Seq(TSEntry(1, 111d, 10), TSEntry(11, 222d, 10)) === contig2.trimRightDiscrete(12, true).entries)
assert(Seq(TSEntry(1, 111d, 10)) === contig2.trimRightDiscrete(12, false).entries)
// On the boundary
assert(Seq(TSEntry(1, 111d, 10)) === contig2.trimRightDiscrete(11, true).entries)
assert(Seq(TSEntry(1, 111d, 10)) === contig2.trimRightDiscrete(11, false).entries)
// On the first entry
assert(TSEntry(1, 111d, 10) === contig2.trimRightDiscrete(10, true))
assert(contig2.trimRightDiscrete(2, false).isEmpty)
// Before the first entry
assert(contig2.trimRightDiscrete(1, true).isEmpty)
assert(contig2.trimRightDiscrete(1, false).isEmpty)
assert(contig2.trimRightDiscrete(0, true).isEmpty)
assert(contig2.trimRightDiscrete(0, false).isEmpty)
}
it should "correctly split a timeseries of three entries" in {
val (l1, r1) = anotherThree.split(-1)
l1 shouldBe EmptyTimeSeries
r1.entries shouldBe anotherThree.entries
val (l2, r2) = anotherThree.split(1)
l2 shouldBe EmptyTimeSeries
r2.entries shouldBe anotherThree.entries
val (l3, r3) = anotherThree.split(1)
l3.entries shouldBe anotherThree.trimRight(1).entries
r3.entries shouldBe anotherThree.trimLeft(1).entries
val (l4, r4) = anotherThree.split(9)
l4.entries shouldBe anotherThree.trimRight(9).entries
r4.entries shouldBe anotherThree.trimLeft(9).entries
val (l5, r5) = anotherThree.split(10)
l5.entries shouldBe anotherThree.trimRight(10).entries
r5.entries shouldBe anotherThree.trimLeft(10).entries
val (l6, r6) = anotherThree.split(11)
l6.entries shouldBe anotherThree.trimRight(11).entries
r6.entries shouldBe anotherThree.trimLeft(11).entries
val (l7, r7) = anotherThree.split(19)
l7.entries shouldBe anotherThree.trimRight(19).entries
r7.entries shouldBe anotherThree.trimLeft(19).entries
val (l8, r8) = anotherThree.split(20)
l8.entries shouldBe anotherThree.trimRight(20).entries
r8.entries shouldBe anotherThree.trimLeft(20).entries
val (l9, r9) = anotherThree.split(21)
l9.entries shouldBe anotherThree.trimRight(21).entries
r9.entries shouldBe anotherThree.trimLeft(21).entries
val (l10, r10) = anotherThree.split(29)
l10.entries shouldBe anotherThree.trimRight(29).entries
r10.entries shouldBe anotherThree.trimLeft(29).entries
val (l11, r11) = anotherThree.split(30)
l11.entries shouldBe anotherThree.entries
r11 shouldBe EmptyTimeSeries
val (l12, r12) = anotherThree.split(31)
l12.entries shouldBe anotherThree.entries
r12 shouldBe EmptyTimeSeries
}
it should "correctly map a timeseries of three entries" in {
val up = anotherThree.map(_.toString + "asdf")
assert(up.size === 3)
assert(up.at(1).contains("111.0asdf"))
assert(up.at(10).contains("222.0asdf"))
assert(up.at(20).contains("444.0asdf"))
}
it should "correctly map a timeseries of three entries without compression" in {
val up = anotherThree.map(s => 42, compress = false)
assert(up.entries === Seq(TSEntry(1, 42, 9), TSEntry(10, 42, 10), TSEntry(20, 42, 10)))
}
it should "correctly map with time a timeseries of three entries" in {
val up = anotherThree.mapEntries(e => e.value.toString + "_" + e.timestamp)
assert(3 === up.size)
assert(up.at(1).contains("111.0_1"))
assert(up.at(10).contains("222.0_10"))
assert(up.at(20).contains("444.0_20"))
}
it should "correctly map with time a timeseries of three entries without compression" in {
val up = anotherThree.mapEntries(_ => 42, compress = false)
assert(up.entries === Seq(TSEntry(1, 42, 9), TSEntry(10, 42, 10), TSEntry(20, 42, 10)))
}
it should "correctly filter a timeseries of three entries" in {
val ts = newTs(Seq(TSEntry(1, 111d, 9), TSEntry(15, 222d, 15), TSEntry(30, 444d, 20)))
assert(
ts.filterEntries(_.timestamp < 15) === TSEntry(1, 111d, 9)
)
assert(
ts.filterEntries(_.validity > 10).entries === Seq(TSEntry(15, 222d, 15), TSEntry(30, 444d, 20))
)
assert(
ts.filterEntries(_.value > 10).entries === ts.entries
)
assert(
ts.filterEntries(_.value < 0) === EmptyTimeSeries
)
}
it should "correctly filter the values of a timeseries of three entries" in {
val ts = newTs(Seq(TSEntry(1, 111d, 9), TSEntry(15, 222d, 15), TSEntry(30, 444d, 20)))
assert(
ts.filter(_ > 10).entries === ts.entries
)
assert(
ts.filter(_ < 0) === EmptyTimeSeries
)
}
it should "filter & map the values of a timeseries of three entries" in {
val ts = newTs(Seq(TSEntry(1, 111d, 9), TSEntry(15, 222d, 15), TSEntry(30, 444d, 20)))
ts.filterMap(
v => if (v > 300) Some(2 * v) else None,
compress = false
)
.entries shouldBe Seq(TSEntry(30, 888d, 20))
}
it should "filter & map the entries of a timeseries of three entries" in {
val ts = newTs(Seq(TSEntry(1, 111d, 9), TSEntry(15, 222d, 15), TSEntry(30, 444d, 20)))
ts.filterMapEntries(
entry => if ((entry.value - entry.validity) % 2 == 0) Some(entry.timestamp) else None,
compress = false
)
.entries shouldBe Seq(TSEntry(1, 1L, 9), TSEntry(30, 30L, 20))
}
it should "not fill a contiguous timeseries of three entries" in {
val tri = anotherThree
assert(tri.fill(333d).entries === tri.entries)
}
it should "fill a timeseries of three entries" in {
val tri = newTs(Seq(TSEntry(1, 111d, 9), TSEntry(20, 222d, 10), TSEntry(40, 444d, 10)))
assert(
tri.fill(333d).entries ===
Seq(
TSEntry(1, 111d, 9),
TSEntry(10, 333d, 10),
TSEntry(20, 222d, 10),
TSEntry(30, 333d, 10),
TSEntry(40, 444d, 10)
)
)
assert(
tri.fill(111d).entries ===
Seq(
TSEntry(1, 111d, 19),
TSEntry(20, 222d, 10),
TSEntry(30, 111d, 10),
TSEntry(40, 444d, 10)
)
)
assert(
tri.fill(222d).entries ===
Seq(
TSEntry(1, 111d, 9),
TSEntry(10, 222d, 30),
TSEntry(40, 444d, 10)
)
)
assert(
tri.fill(444d).entries ===
Seq(
TSEntry(1, 111d, 9),
TSEntry(10, 444d, 10),
TSEntry(20, 222d, 10),
TSEntry(30, 444d, 20)
)
)
}
it should "return the correct values" in {
assert(tri.values == tri.entries.map(_.value))
}
it should "return the correct head" in {
assert(tri.head === TSEntry(1, 111d, 10))
}
it should "return the correct head option" in {
assert(tri.headOption.contains(TSEntry(1, 111d, 10)))
}
it should "return the correct head value" in {
assert(tri.headValue === 111d)
}
it should "return the correct head value option" in {
assert(tri.headValueOption.contains(111d))
}
it should "return the correct last" in {
assert(tri.last === TSEntry(22, 333d, 10))
}
it should "return the correct last option" in {
assert(tri.lastOption.contains(TSEntry(22, 333d, 10)))
}
it should "return the correct last value" in {
assert(tri.lastValue === 333d)
}
it should "return the correct last value option" in {
assert(tri.lastValueOption.contains(333d))
}
it should "append entries correctly" in {
val tri =
newTs(Seq(TSEntry(1, 111d, 10), TSEntry(11, 222d, 10), TSEntry(21, 444d, 10)))
// Appending after...
tri.append(TSEntry(32, "Hy", 10), compress = false).entries shouldBe
Seq(TSEntry(1, 111d, 10), TSEntry(11, 222d, 10), TSEntry(21, 444d, 10), TSEntry(32, "Hy", 10))
tri.append(TSEntry(31, "Hy", 10), compress = false).entries shouldBe
Seq(TSEntry(1, 111d, 10), TSEntry(11, 222d, 10), TSEntry(21, 444d, 10), TSEntry(31, "Hy", 10))
// Appending on last entry
tri.append(TSEntry(30, "Hy", 10), compress = false).entries shouldBe
Seq(TSEntry(1, 111d, 10), TSEntry(11, 222d, 10), TSEntry(21, 444d, 9), TSEntry(30, "Hy", 10))
tri.append(TSEntry(22, "Hy", 10), compress = false).entries shouldBe
Seq(TSEntry(1, 111d, 10), TSEntry(11, 222d, 10), TSEntry(21, 444d, 1), TSEntry(22, "Hy", 10))
// ... just after and on second entry
tri.append(TSEntry(21, "Hy", 10), compress = false).entries shouldBe
Seq(TSEntry(1, 111d, 10), TSEntry(11, 222d, 10), TSEntry(21, "Hy", 10))
tri.append(TSEntry(20, "Hy", 10), compress = false).entries shouldBe
Seq(TSEntry(1, 111d, 10), TSEntry(11, 222d, 9), TSEntry(20, "Hy", 10))
tri.append(TSEntry(12, "Hy", 10), compress = false).entries shouldBe
Seq(TSEntry(1, 111d, 10), TSEntry(11, 222d, 1), TSEntry(12, "Hy", 10))
// ... just after and on first
tri.append(TSEntry(11, "Hy", 10), compress = false).entries shouldBe
Seq(TSEntry(1, 111d, 10), TSEntry(11, "Hy", 10))
tri.append(TSEntry(10, "Hy", 10), compress = false).entries shouldBe
Seq(TSEntry(1, 111d, 9), TSEntry(10, "Hy", 10))
tri.append(TSEntry(2, "Hy", 10), compress = false).entries shouldBe
Seq(TSEntry(1, 111d, 1), TSEntry(2, "Hy", 10))
// And complete override
tri.append(TSEntry(1, "Hy", 10), compress = false).entries shouldBe
Seq(TSEntry(1, "Hy", 10))
}
it should "prepend entries correctly" in {
val tri =
newTs(Seq(TSEntry(5, 111d, 6), TSEntry(11, 222d, 10), TSEntry(21, 444d, 10)))
// Prepending before...
tri.prepend(TSEntry(1, "Hy", 3), compress = false).entries shouldBe
Seq(TSEntry(1, "Hy", 3), TSEntry(5, 111d, 6), TSEntry(11, 222d, 10), TSEntry(21, 444d, 10))
tri.prepend(TSEntry(2, "Hy", 3), compress = false).entries shouldBe
Seq(TSEntry(2, "Hy", 3), TSEntry(5, 111d, 6), TSEntry(11, 222d, 10), TSEntry(21, 444d, 10))
// Overlaps with first entry
tri.prepend(TSEntry(1, "Hy", 5), compress = false).entries shouldBe
Seq(TSEntry(1, "Hy", 5), TSEntry(6, 111d, 5), TSEntry(11, 222d, 10), TSEntry(21, 444d, 10))
tri.prepend(TSEntry(5, "Hy", 5), compress = false).entries shouldBe
Seq(TSEntry(5, "Hy", 5), TSEntry(10, 111d, 1), TSEntry(11, 222d, 10), TSEntry(21, 444d, 10))
tri.prepend(TSEntry(1, "Hy", 10), compress = false).entries shouldBe
Seq(TSEntry(1, "Hy", 10), TSEntry(11, 222d, 10), TSEntry(21, 444d, 10))
// ... second entry
tri.prepend(TSEntry(2, "Hy", 10), compress = false).entries shouldBe
Seq(TSEntry(2, "Hy", 10), TSEntry(12, 222d, 9), TSEntry(21, 444d, 10))
tri.prepend(TSEntry(10, "Hy", 10), compress = false).entries shouldBe
Seq(TSEntry(10, "Hy", 10), TSEntry(20, 222d, 1), TSEntry(21, 444d, 10))
tri.prepend(TSEntry(11, "Hy", 10), compress = false).entries shouldBe
Seq(TSEntry(11, "Hy", 10), TSEntry(21, 444d, 10))
// ... third entry
tri.prepend(TSEntry(12, "Hy", 10), compress = false).entries shouldBe
Seq(TSEntry(12, "Hy", 10), TSEntry(22, 444d, 9))
tri.prepend(TSEntry(20, "Hy", 10), compress = false).entries shouldBe
Seq(TSEntry(20, "Hy", 10), TSEntry(30, 444d, 1))
// Complete override
tri.prepend(TSEntry(21, "Hy", 10), compress = false).entries shouldBe
Seq(TSEntry(21, "Hy", 10))
tri.prepend(TSEntry(22, "Hy", 10), compress = false).entries shouldBe
Seq(TSEntry(22, "Hy", 10))
}
def testTs(startsAt: Long): TimeSeries[Double] =
newTs(
Seq(
TSEntry(startsAt, 123d, 10),
TSEntry(startsAt + 10, 234d, 10),
TSEntry(startsAt + 20, 345d, 10)
)
)
it should "append a multi-entry TS at various times on the entry" in {
val tri =
newTs(Seq(TSEntry(1, 111d, 10), TSEntry(11, 222d, 10), TSEntry(21, 444d, 10)))
// Append after all entries
tri.append(testTs(31), compress = false).entries shouldBe
tri.entries ++ testTs(31).entries
tri.append(testTs(32), compress = false).entries shouldBe
tri.entries ++ testTs(32).entries
// On last
tri.append(testTs(30), compress = false).entries shouldBe
Seq(TSEntry(1, 111d, 10), TSEntry(11, 222d, 10), TSEntry(21, 444d, 9)) ++ testTs(30).entries
tri.append(testTs(22), compress = false).entries shouldBe
Seq(TSEntry(1, 111d, 10), TSEntry(11, 222d, 10), TSEntry(21, 444d, 1)) ++ testTs(22).entries
tri.append(testTs(21), compress = false).entries shouldBe
Seq(TSEntry(1, 111d, 10), TSEntry(11, 222d, 10)) ++ testTs(21).entries
// On second
tri.append(testTs(20), compress = false).entries shouldBe
Seq(TSEntry(1, 111d, 10), TSEntry(11, 222d, 9)) ++ testTs(20).entries
tri.append(testTs(12), compress = false).entries shouldBe
Seq(TSEntry(1, 111d, 10), TSEntry(11, 222d, 1)) ++ testTs(12).entries
tri.append(testTs(11), compress = false).entries shouldBe
Seq(TSEntry(1, 111d, 10)) ++ testTs(11).entries
// On first
tri.append(testTs(10), compress = false).entries shouldBe
Seq(TSEntry(1, 111d, 9)) ++ testTs(10).entries
tri.append(testTs(2), compress = false).entries shouldBe
Seq(TSEntry(1, 111d, 1)) ++ testTs(2).entries
tri.append(testTs(1), compress = false).entries shouldBe
testTs(1).entries
}
it should "prepend a multi-entry TS at various times on the entry" in {
val tri =
newTs(Seq(TSEntry(101, 111d, 10), TSEntry(111, 222d, 10), TSEntry(121, 444d, 10)))
// Before all entries
tri.prepend(testTs(70), compress = false).entries shouldBe
testTs(70).entries ++ tri.entries
tri.prepend(testTs(71), compress = false).entries shouldBe
testTs(71).entries ++ tri.entries
// On first
tri.prepend(testTs(72), compress = false).entries shouldBe
testTs(72).entries ++ Seq(TSEntry(102, 111d, 9), TSEntry(111, 222d, 10), TSEntry(121, 444d, 10))
tri.prepend(testTs(80), compress = false).entries shouldBe
testTs(80).entries ++ Seq(TSEntry(110, 111d, 1), TSEntry(111, 222d, 10), TSEntry(121, 444d, 10))
tri.prepend(testTs(81), compress = false).entries shouldBe
testTs(81).entries ++ Seq(TSEntry(111, 222d, 10), TSEntry(121, 444d, 10))
// On second
tri.prepend(testTs(82), compress = false).entries shouldBe
testTs(82).entries ++ Seq(TSEntry(112, 222d, 9), TSEntry(121, 444d, 10))
tri.prepend(testTs(90), compress = false).entries shouldBe
testTs(90).entries ++ Seq(TSEntry(120, 222d, 1), TSEntry(121, 444d, 10))
tri.prepend(testTs(91), compress = false).entries shouldBe
testTs(91).entries ++ Seq(TSEntry(121, 444d, 10))
// On third
tri.prepend(testTs(92), compress = false).entries shouldBe
testTs(92).entries ++ Seq(TSEntry(122, 444d, 9))
tri.prepend(testTs(100), compress = false).entries shouldBe
testTs(100).entries ++ Seq(TSEntry(130, 444d, 1))
tri.prepend(testTs(101), compress = false).entries shouldBe
testTs(101).entries
tri.prepend(testTs(102), compress = false).entries shouldBe
testTs(102).entries
}
it should "do a step integral" in {
val tri = newTs(Seq(TSEntry(100, 1, 10), TSEntry(110, 2, 10), TSEntry(120, 3, 10)))
assert(
tri.stepIntegral(10, TimeUnit.SECONDS).entries ===
Seq(TSEntry(100, 10.0, 10), TSEntry(110, 30.0, 10), TSEntry(120, 60.0, 10))
)
val withSampling = TSEntry(100, 1, 30)
assert(
withSampling.stepIntegral(10, TimeUnit.SECONDS).entries ===
Seq(TSEntry(100, 10.0, 10), TSEntry(110, 20.0, 10), TSEntry(120, 30.0, 10))
)
}
it should "split up the entries of a timeseries" in {
val withSlicing = TSEntry(100, 1, 30)
assert(
withSlicing.splitEntriesLongerThan(10).entries ===
Seq(TSEntry(100, 1, 10), TSEntry(110, 1, 10), TSEntry(120, 1, 10))
)
assert(
withSlicing.splitEntriesLongerThan(20).entries ===
Seq(TSEntry(100, 1, 20), TSEntry(120, 1, 10))
)
}
it should "split a timeseries into buckets" in {
val buckets = Stream.from(0, 10).map(_.toLong)
val tri =
newTs(Seq(TSEntry(10, 1, 10), TSEntry(20, 2, 5), TSEntry(25, 3, 5)))
val result = tri.bucket(buckets)
val expected = Stream(
(0, EmptyTimeSeries),
(10, TSEntry(10, 1, 10)),
(20, newTs(Seq(TSEntry(20, 2, 5), TSEntry(25, 3, 5)))),
(30, EmptyTimeSeries)
)
(expected, result).zipped.foreach {
case ((eTs, eSeries), (rTs, rSeries)) =>
rTs shouldBe eTs
rSeries.entries shouldBe eSeries.entries
}
}
it should "do a sliding integral of a timeseries" in {
val triA = TimeSeries(
Seq(
TSEntry(10, 1, 10),
TSEntry(20, 2, 2),
TSEntry(22, 3, 10)
)
)
triA.slidingIntegral(2, 2, TimeUnit.SECONDS).entries shouldBe Seq(
TSEntry(10, 2, 2),
TSEntry(12, 4, 8),
TSEntry(20, 6, 2),
TSEntry(22, 10, 2),
TSEntry(24, 12, 8)
)
triA.slidingIntegral(4, 2, TimeUnit.SECONDS).entries shouldBe Seq(
TSEntry(10, 2, 2),
TSEntry(12, 4, 2),
TSEntry(14, 6, 6),
TSEntry(20, 8, 2),
TSEntry(22, 12, 2),
TSEntry(24, 16, 2),
TSEntry(26, 18, 6)
)
triA.slidingIntegral(12, 8, TimeUnit.SECONDS).entries shouldBe Seq(
TSEntry(10, 8.0, 8),
TSEntry(18, 24.0, 8),
TSEntry(26, 48.0, 4),
TSEntry(30, 40.0, 4)
)
}
it should "return an empty timeseries if one filters all values" in {
val ts = newTs(
Seq(
TSEntry(1, 1, 1),
TSEntry(2, 2, 2),
TSEntry(3, 3, 3)
)
)
assert(ts.filterEntries(_ => false) === EmptyTimeSeries)
}
it should "return a correct loose domain" in {
tri.looseDomain shouldBe ContiguousTimeDomain(tri.head.timestamp, tri.last.definedUntil)
}
it should "calculate the support ratio" in {
val threeFourths = newTs(Seq(TSEntry(1, 1234d, 2), TSEntry(4, 5678d, 1)))
threeFourths.supportRatio shouldBe 0.75
}
}
/**
* Tests the functions defined by the trait TimeSeries for a given implementation
* that is capable of taking generic values. (It is tested by Strings here.)
*
* @note The mapping functions are only tested without compression. Use
* 'nonEmptyNonSingletonDoubleTimeSeriesWithCompression' to test that.
*
* @param newTsString constructor method for the timeseries implementation that
* shall be tested
*/
def nonEmptyNonSingletonGenericTimeSeries(
newTsString: Seq[TSEntry[String]] => TimeSeries[String]
): Unit = {
val threeStrings = newTsString(Seq(TSEntry(0, "Hi", 10), TSEntry(10, "Ho", 10), TSEntry(20, "Hu", 10)))
it should "correctly map a timeseries of three strings" in {
val up = threeStrings.map(s => s.toUpperCase())
assert(up.size === 3)
assert(up.at(0).contains("HI"))
assert(up.at(10).contains("HO"))
assert(up.at(20).contains("HU"))
}
it should "correctly map the entries of a timeseries of three strings" in {
val up = threeStrings.mapEntries(e => e.value.toUpperCase() + e.timestamp)
assert(3 === up.size)
assert(up.at(0).contains("HI0"))
assert(up.at(10).contains("HO10"))
assert(up.at(20).contains("HU20"))
}
it should "correctly filter a timeseries of three strings" in {
val ts = newTsString(Seq(TSEntry(0, "Hi", 10), TSEntry(15, "Ho", 15), TSEntry(30, "Hu", 20)))
assert(
ts.filterEntries(_.value.startsWith("H")).entries === ts.entries
)
assert(
ts.filterEntries(_.value.endsWith("H")) === EmptyTimeSeries
)
}
it should "fill a timeseries of three strings" in {
val tri = newTsString(Seq(TSEntry(0, "Hi", 10), TSEntry(20, "Ho", 10), TSEntry(40, "Hu", 10)))
assert(
tri.fill("Ha").entries ===
Seq(
TSEntry(0, "Hi", 10),
TSEntry(10, "Ha", 10),
TSEntry(20, "Ho", 10),
TSEntry(30, "Ha", 10),
TSEntry(40, "Hu", 10)
)
)
assert(
tri.fill("Hi").entries ===
Seq(
TSEntry(0, "Hi", 20),
TSEntry(20, "Ho", 10),
TSEntry(30, "Hi", 10),
TSEntry(40, "Hu", 10)
)
)
assert(
tri.fill("Ho").entries ===
Seq(
TSEntry(0, "Hi", 10),
TSEntry(10, "Ho", 30),
TSEntry(40, "Hu", 10)
)
)
assert(
tri.fill("Hu").entries ===
Seq(
TSEntry(0, "Hi", 10),
TSEntry(10, "Hu", 10),
TSEntry(20, "Ho", 10),
TSEntry(30, "Hu", 20)
)
)
}
}
/**
* *Main test bench* for a timeseries implementation. This tests all the functions
* defined by the TimeSeries trait for a non empty and non singleton (TSEntry)
* timeseries implementation. All the tests use double valued series.
*
* @param newTs constructor method for the timeseries implementation to test
*/
def nonEmptyNonSingletonDoubleTimeSeriesWithCompression(
newTs: Seq[TSEntry[Double]] => TimeSeries[Double]
): Unit = {
val anotherThree = newTs(Seq(TSEntry(1, 111d, 9), TSEntry(10, 222d, 10), TSEntry(20, 444d, 10)))
it should "correctly map a timeseries of three entries with compression" in {
val up = anotherThree.map(s => 42, compress = true)
up.entries shouldBe Seq(TSEntry(1, 42, 29))
}
it should "filter & map the entries of a timeseries of three entries correctly" in {
val ts = newTs(Seq(TSEntry(1, 111d, 15), TSEntry(15, 222d, 15), TSEntry(30, 444d, 20)))
ts.filterMapEntries(
entry => if (entry.timestamp < 25) Some(123.456) else None,
compress = true
)
.entries shouldBe Seq(TSEntry(1, 123.456, 29))
}
it should "correctly map with time a timeseries of three entries with compression" in {
val ts = anotherThree
val up = ts.mapEntries(_ => 42, compress = true)
assert(up.entries === Seq(TSEntry(1, 42, 29)))
}
it should "append entries correctly with compression" in {
val tri =
newTs(Seq(TSEntry(1, 111d, 10), TSEntry(11, 222d, 10), TSEntry(21, 444d, 10)))
// Appending after...
assert(
Seq(TSEntry(1, 111d, 10), TSEntry(11, 222d, 10), TSEntry(21, 444d, 10), TSEntry(32, "Hy", 10))
=== tri.append(TSEntry(32, "Hy", 10)).entries
)
assert(
Seq(TSEntry(1, 111d, 10), TSEntry(11, 222d, 10), TSEntry(21, 444d, 10), TSEntry(31, "Hy", 10))
=== tri.append(TSEntry(31, "Hy", 10)).entries
)
// Appending on last entry
assert(
Seq(TSEntry(1, 111d, 10), TSEntry(11, 222d, 10), TSEntry(21, 444d, 9), TSEntry(30, "Hy", 10))
=== tri.append(TSEntry(30, "Hy", 10)).entries
)
assert(
Seq(TSEntry(1, 111d, 10), TSEntry(11, 222d, 10), TSEntry(21, 444d, 1), TSEntry(22, "Hy", 10))
=== tri.append(TSEntry(22, "Hy", 10)).entries
)
// ... just after and on second entry
assert(
Seq(TSEntry(1, 111d, 10), TSEntry(11, 222d, 10), TSEntry(21, "Hy", 10))
=== tri.append(TSEntry(21, "Hy", 10)).entries
)
assert(
Seq(TSEntry(1, 111d, 10), TSEntry(11, 222d, 9), TSEntry(20, "Hy", 10))
=== tri.append(TSEntry(20, "Hy", 10)).entries
)
assert(
Seq(TSEntry(1, 111d, 10), TSEntry(11, 222d, 1), TSEntry(12, "Hy", 10))
=== tri.append(TSEntry(12, "Hy", 10)).entries
)
// ... just after and on first
assert(
Seq(TSEntry(1, 111d, 10), TSEntry(11, "Hy", 10))
=== tri.append(TSEntry(11, "Hy", 10)).entries
)
assert(
Seq(TSEntry(1, 111d, 9), TSEntry(10, "Hy", 10))
=== tri.append(TSEntry(10, "Hy", 10)).entries
)
assert(
Seq(TSEntry(1, 111d, 1), TSEntry(2, "Hy", 10))
=== tri.append(TSEntry(2, "Hy", 10)).entries
)
// And complete override
assert(
Seq(TSEntry(1, "Hy", 10))
=== tri.append(TSEntry(1, "Hy", 10)).entries
)
}
it should "prepend entries correctly with compression" in {
val tri =
newTs(Seq(TSEntry(1, 111d, 10), TSEntry(11, 222d, 10), TSEntry(21, 444d, 10)))
// Prepending before...
assert(
Seq(TSEntry(-10, "Hy", 10), TSEntry(1, 111d, 10), TSEntry(11, 222d, 10), TSEntry(21, 444d, 10))
=== tri.prepend(TSEntry(-10, "Hy", 10)).entries
)
assert(
Seq(TSEntry(-9, "Hy", 10), TSEntry(1, 111d, 10), TSEntry(11, 222d, 10), TSEntry(21, 444d, 10))
=== tri.prepend(TSEntry(-9, "Hy", 10)).entries
)
// Overlaps with first entry
assert(
Seq(TSEntry(-8, "Hy", 10), TSEntry(2, 111d, 9), TSEntry(11, 222d, 10), TSEntry(21, 444d, 10))
=== tri.prepend(TSEntry(-8, "Hy", 10)).entries
)
assert(
Seq(TSEntry(0, "Hy", 10), TSEntry(10, 111d, 1), TSEntry(11, 222d, 10), TSEntry(21, 444d, 10))
=== tri.prepend(TSEntry(0, "Hy", 10)).entries
)
assert(
Seq(TSEntry(1, "Hy", 10), TSEntry(11, 222d, 10), TSEntry(21, 444d, 10))
=== tri.prepend(TSEntry(1, "Hy", 10)).entries
)
// ... second entry
assert(
Seq(TSEntry(2, "Hy", 10), TSEntry(12, 222d, 9), TSEntry(21, 444d, 10))
=== tri.prepend(TSEntry(2, "Hy", 10)).entries
)
assert(
Seq(TSEntry(10, "Hy", 10), TSEntry(20, 222d, 1), TSEntry(21, 444d, 10))
=== tri.prepend(TSEntry(10, "Hy", 10)).entries
)
assert(
Seq(TSEntry(11, "Hy", 10), TSEntry(21, 444d, 10))
=== tri.prepend(TSEntry(11, "Hy", 10)).entries
)
// ... third entry
assert(
Seq(TSEntry(12, "Hy", 10), TSEntry(22, 444d, 9))
=== tri.prepend(TSEntry(12, "Hy", 10)).entries
)
assert(
Seq(TSEntry(20, "Hy", 10), TSEntry(30, 444d, 1))
=== tri.prepend(TSEntry(20, "Hy", 10)).entries
)
// Complete override
assert(
Seq(TSEntry(21, "Hy", 10))
=== tri.prepend(TSEntry(21, "Hy", 10)).entries
)
assert(
Seq(TSEntry(22, "Hy", 10))
=== tri.prepend(TSEntry(22, "Hy", 10)).entries
)
}
def testTs(startsAt: Long): TimeSeries[Double] =
newTs(
Seq(
TSEntry(startsAt, 123d, 10),
TSEntry(startsAt + 10, 234d, 10),
TSEntry(startsAt + 20, 345d, 10)
)
)
it should "append a multi-entry TS at various times on the entry with compression" in {
val tri =
newTs(Seq(TSEntry(11, 111d, 10), TSEntry(21, 222d, 10), TSEntry(31, 444d, 10)))
// Append after all entries
assert(tri.entries ++ testTs(41).entries === tri.append(testTs(41)).entries)
assert(tri.entries ++ testTs(42).entries === tri.append(testTs(42)).entries)
// On last
assert(
Seq(TSEntry(11, 111d, 10), TSEntry(21, 222d, 10), TSEntry(31, 444d, 9)) ++ testTs(40).entries
=== tri.append(testTs(40)).entries
)
assert(
Seq(TSEntry(11, 111d, 10), TSEntry(21, 222d, 10), TSEntry(31, 444d, 1)) ++ testTs(32).entries
=== tri.append(testTs(32)).entries
)
assert(
Seq(TSEntry(11, 111d, 10), TSEntry(21, 222d, 10)) ++ testTs(31).entries
=== tri.append(testTs(31)).entries
)
// On second
assert(
Seq(TSEntry(11, 111d, 10), TSEntry(21, 222d, 9)) ++ testTs(30).entries
=== tri.append(testTs(30)).entries
)
assert(
Seq(TSEntry(11, 111d, 10), TSEntry(21, 222d, 1)) ++ testTs(22).entries
=== tri.append(testTs(22)).entries
)
assert(
Seq(TSEntry(11, 111d, 10)) ++ testTs(21).entries
=== tri.append(testTs(21)).entries
)
// On first
assert(
Seq(TSEntry(11, 111d, 9)) ++ testTs(20).entries
=== tri.append(testTs(20)).entries
)
assert(
Seq(TSEntry(11, 111d, 1)) ++ testTs(12).entries
=== tri.append(testTs(12)).entries
)
assert(testTs(11).entries === tri.append(testTs(11)).entries)
assert(testTs(10).entries === tri.append(testTs(10)).entries)
}
it should "prepend a multi-entry TS at various times on the entry with compression" in {
val tri =
newTs(Seq(TSEntry(101, 111d, 10), TSEntry(111, 222d, 10), TSEntry(121, 444d, 10)))
// Before all entries
assert(testTs(70).entries ++ tri.entries === tri.prepend(testTs(70)).entries)
assert(testTs(71).entries ++ tri.entries === tri.prepend(testTs(71)).entries)
// On first
assert(
testTs(72).entries ++ Seq(TSEntry(102, 111d, 9), TSEntry(111, 222d, 10), TSEntry(121, 444d, 10))
=== tri.prepend(testTs(72)).entries
)
assert(
testTs(80).entries ++ Seq(TSEntry(110, 111d, 1), TSEntry(111, 222d, 10), TSEntry(121, 444d, 10))
=== tri.prepend(testTs(80)).entries
)
assert(
testTs(81).entries ++ Seq(TSEntry(111, 222d, 10), TSEntry(121, 444d, 10))
=== tri.prepend(testTs(81)).entries
)
// On second
assert(
testTs(82).entries ++ Seq(TSEntry(112, 222d, 9), TSEntry(121, 444d, 10))
=== tri.prepend(testTs(82)).entries
)
assert(
testTs(90).entries ++ Seq(TSEntry(120, 222d, 1), TSEntry(121, 444d, 10))
=== tri.prepend(testTs(90)).entries
)
assert(
testTs(91).entries ++ Seq(TSEntry(121, 444d, 10))
=== tri.prepend(testTs(91)).entries
)
// On third
assert(
testTs(92).entries ++ Seq(TSEntry(122, 444d, 9))
=== tri.prepend(testTs(92)).entries
)
assert(
testTs(100).entries ++ Seq(TSEntry(130, 444d, 1))
=== tri.prepend(testTs(100)).entries
)
assert(testTs(101).entries === tri.prepend(testTs(101)).entries)
assert(testTs(102).entries === tri.prepend(testTs(102)).entries)
}
}
}
|
Shastick/scala-timeseries-lib
|
src/test/scala/io/sqooba/oss/timeseries/TimeSeriesTestBench.scala
|
Scala
|
apache-2.0
| 42,660
|
/*
* Copyright 2014 http4s.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.http4s
package blazecore
package util
import cats.effect.Async
import cats.effect.std.Dispatcher
import fs2._
import org.http4s.blaze.pipeline.TailStage
import org.http4s.util.StringWriter
import java.nio.ByteBuffer
import scala.concurrent._
private[http4s] class FlushingChunkWriter[F[_]](pipe: TailStage[ByteBuffer], trailer: F[Headers])(
implicit
protected val F: Async[F],
protected val ec: ExecutionContext,
protected val dispatcher: Dispatcher[F],
) extends Http1Writer[F] {
import ChunkWriter._
protected def writeBodyChunk(chunk: Chunk[Byte], flush: Boolean): Future[Unit] =
if (chunk.isEmpty) FutureUnit
else pipe.channelWrite(encodeChunk(chunk, Nil))
protected def writeEnd(chunk: Chunk[Byte]): Future[Boolean] = {
if (!chunk.isEmpty) writeBodyChunk(chunk, true).flatMap { _ =>
writeTrailer(pipe, trailer)
}
else writeTrailer(pipe, trailer)
}.map(_ => false)
override def writeHeaders(headerWriter: StringWriter): Future[Unit] =
// It may be a while before we get another chunk, so we flush now
pipe.channelWrite(
List(Http1Writer.headersToByteBuffer(headerWriter.result), TransferEncodingChunked)
)
}
|
http4s/http4s
|
blaze-core/src/main/scala/org/http4s/blazecore/util/FlushingChunkWriter.scala
|
Scala
|
apache-2.0
| 1,795
|
package org.garage.guru.domain
import org.scalacheck.{Properties, Gen}
import org.scalacheck.Prop._
object VehicleIdProperties extends Properties("Vehicle id"){
val validVehicleIdStrGen = Gen.identifier
val validVehicleIdGen = validVehicleIdStrGen.map(Vehicle.id)
val invalidVehicleIdGen = Gen.oneOf("", " ", " ").map(Vehicle.id)
property("vehicle id creation successful") = forAll(validVehicleIdGen)(_.isDefined)
property("vehicle id creation failure") = forAll(invalidVehicleIdGen){_.isEmpty}
}
object VehicleProperties extends Properties("Vehicle"){
import VehicleIdProperties._
var validVehicleTypeGen = Gen.oneOf("car", "motorbike")
var validVehicleGen = for{
id <- validVehicleIdStrGen
t <- validVehicleTypeGen
} yield (Vehicle(t,id))
var invalidVehicleTypeGen = Gen.alphaStr suchThat(s => s != "car" && s != "motorbike")
var invalidVehicleGen = for {
id <- validVehicleIdStrGen
t <- invalidVehicleTypeGen
}yield (Vehicle(t, id))
property("vehicle successful creation") = forAll(validVehicleGen)(_.isDefined)
property("vehicle fail creation") = forAll(invalidVehicleGen)(_.isEmpty)
}
|
ddd-fun/garage-guru-fun
|
src/test/scala/org/garage/guru/domain/VehicleProperties.scala
|
Scala
|
apache-2.0
| 1,157
|
package thistle.core
case class MatchSequence[T](sequence: Seq[T]) {
private lazy implicit val series =
sequence.toVector
def filter(ep: ElementPredicate[T]) =
series
.view
.zipWithIndex
.filter{case(k, i) => ep(new ElementState(i){override lazy val value = k})}
.map(_._1)
.force
def filterNot(ep: ElementPredicate[T]) =
filter(!ep)
def exists(ep: ElementPredicate[T]): Boolean =
series
.zipWithIndex
.exists{case(k, i) => ep(ElementState(i))}
def forall(ep: ElementPredicate[T]) =
!exists(!ep)
def count(ep: ElementPredicate[T]): Int =
filter(ep).length
def find(ep: ElementPredicate[T]): Option[T] =
series
.view
.zipWithIndex
.find{case(k, i) => ep(new ElementState(i){override lazy val value = k})}
.map(_._1)
def exists(q: Query[T]): Boolean =
MatchTreeBuilder(q).isComplete
}
trait MatchSequenceImplicits {
implicit def sequence2MatchSequence[T](seq: Seq[T]): MatchSequence[T] =
MatchSequence(seq)
implicit def matchSequence2Sequence[T](matchSequence: MatchSequence[T]): Seq[T] =
matchSequence.sequence
}
object MatchSequence extends MatchSequenceImplicits {}
|
smarden1/thistle
|
src/main/scala/thistle/core/MatchSequence.scala
|
Scala
|
mit
| 1,203
|
package io.therealbuggy.remotechest
import java.io.File
import java.util.logging.Logger
import io.therealbuggy.remotechest.api.{RemoteChestAPI, API}
import io.therealbuggy.remotechest.configuration.Configuration
import io.therealbuggy.remotechest.listeners.RemoteChestListener
import io.therealbuggy.remotechest.saver.SaveLoad
import org.bukkit.Bukkit
import org.bukkit.plugin.java.JavaPlugin
/**
* Created by jonathan on 20/12/15.
*/
class RemoteChest extends JavaPlugin {
private var logger: Logger = null
private var config: Configuration = null
private var api: API = null
private var saveLoad: SaveLoad = null
private var dataFile: File = null
private var backupSaveLoad: SaveLoad = null
private var backupFile: File = null
override def onEnable(): Unit ={
logger = getLogger
logger.info("Carregando configuração...")
saveDefaultConfig()
logger.info("Configurando plugin...")
dataFile = new File(getDataFolder, "data.dat")
backupFile = new File(getDataFolder, "backup_data.dat")
config = new Configuration(this, getConfig)
api = new RemoteChestAPI(this)
backupSaveLoad = new SaveLoad(api, backupFile)
logger.info("Carregando chests...")
saveLoad = new SaveLoad(api, dataFile)
var loadedChests: Int = 0
try{
loadedChests = saveLoad.load()
}catch {
case x:Exception =>
x.printStackTrace()
logger.warning("Erro ao carregar o arquivo principal, tentando carregar o backup...")
logger.info("Carregando o backup...")
try{
loadedChests = backupSaveLoad.load()
}catch{
case x2:Exception =>
x2.printStackTrace()
logger.severe("Erro ao carregar o backup! Resetando os arquivos...")
try{
dataFile.delete()
backupFile.delete()
saveLoad.load()
backupSaveLoad.load()
}catch{
case ex:Exception =>
ex.printStackTrace()
logger.severe("Erro critico! Impossivel criar arquivos necessarios para o funcionamento do plugin!")
this.getPluginLoader.disablePlugin(this)
return
}
}
}
logger.info("Carregado "+loadedChests+" chests!")
logger.info("Registrando listener...")
Bukkit.getPluginManager.registerEvents(new RemoteChestListener(this), this)
logger.info("Registrando scheduler...!")
Bukkit.getScheduler.scheduleSyncRepeatingTask(this, new Runnable {
override def run(): Unit = {
logger.info("Salvando chests...")
backupSaveLoad.save()
saveLoad.save()
logger.info("Chests salvos!")
}
}, 0L, config.tempoDeSalvamento*60*20L)
logger.info("Secheduler de salvamento configurado para "+config.tempoDeSalvamento+" minutos!")
logger.info("Plugin habilitado!")
}
override def onDisable() : Unit ={
logger.info("Desabilitando...!")
Bukkit.getScheduler.cancelTasks(this)
logger.info("Salvando chests...")
saveLoad.save()
logger.info("Chests salvos!")
config = null
backupSaveLoad = null
api = null
dataFile = null
logger.info("Desabilitado!")
logger = null
}
def obatinLogger = logger
def obtainConfig = config
def obtainAPI = api
def obtainSaver = saveLoad
def obtainDataFile = dataFile
}
|
JonathanxD/RemoteChest
|
src/io/therealbuggy/remotechest/RemoteChest.scala
|
Scala
|
agpl-3.0
| 3,367
|
/*
* Copyright 2016 Baltnet Communications LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ee.risk.sbt.plugins.wsdl2java.ssl
import java.io._
import java.security.KeyStore
import java.security.cert.X509Certificate
import sbt.Logger
/**
* Created by The Ranger (ranger@risk.ee) on 2016-08-19
* for Baltnet Communications LLC (info@baltnet.ee)
*/
class TrustStore(log: Logger) {
private val keyStore = KeyStore.getInstance(KeyStore.getDefaultType)
private val defaultPassword = "changeit"
def getKeyStore = keyStore
def load(): Unit = {
val separator = java.io.File.separatorChar
val sysDir = new File(System.getProperty("java.home") + separator + "lib" + separator + "security")
var file = new File(sysDir, "jssecacerts")
if (!file.isFile) file = new File(sysDir, "cacerts")
load(file)
}
def load(file: File, password: String = defaultPassword): Unit = {
try {
val inputStream = new FileInputStream(file)
log.info("Using trust store at " + file.getAbsolutePath)
keyStore.load(inputStream, password.toCharArray)
inputStream.close()
}
catch {
case _: FileNotFoundException =>
keyStore.load(null, password.toCharArray)
log.warn("Could not load data from store " + file.getAbsolutePath + ", skipping")
}
}
def save(file: File, password: String = defaultPassword): Unit = {
try {
log.info("Saving keystore to file " + file.getAbsolutePath)
keyStore.store(new FileOutputStream(file), password.toCharArray)
}
catch {
case ex: IOException => throw new SSLException("Could not save keystore to file " + ex.getMessage)
}
}
def addCertificate(certificate: X509Certificate): Unit = {
keyStore.setCertificateEntry("test", certificate)
}
}
|
theranger/sbt-wsdl2java
|
src/main/scala/ee/risk/sbt/plugins/wsdl2java/ssl/TrustStore.scala
|
Scala
|
apache-2.0
| 2,236
|
/*
* Copyright (c) 2015-2022 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Attribution Notice under the terms of the Apache License 2.0
*
* This work was created by the collective efforts of the openCypher community.
* Without limiting the terms of Section 6, any Derivative Work that is not
* approved by the public consensus process of the openCypher Implementers Group
* should not be described as “Cypher” (and Cypher® is a registered trademark of
* Neo4j Inc.) or as "openCypher". Extensions by implementers or prototypes or
* proposals for change that have been documented or implemented should only be
* described as "implementation extensions to Cypher" or as "proposed changes to
* Cypher that are not yet approved by the openCypher community".
*/
package org.opencypher.tools.tck.inspection.diff
import java.net.URI
import java.util
import org.opencypher.tools.tck.api.Dummy
import org.opencypher.tools.tck.api.Measure
import org.opencypher.tools.tck.api.Scenario
import org.opencypher.tools.tck.api.Step
import org.opencypher.tools.tck.api.groups.Feature
import org.opencypher.tools.tck.api.groups.Group
import org.opencypher.tools.tck.api.groups.ScenarioCategory
import org.opencypher.tools.tck.api.groups.ScenarioOutline
import org.opencypher.tools.tck.api.groups.Tag
import org.opencypher.tools.tck.api.groups.TckTree
import org.opencypher.tools.tck.api.groups.Total
import org.opencypher.tools.tck.inspection.diff
import org.opencypher.tools.tck.inspection.diff.ScenarioDiffTag._
import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.matchers.should.Matchers
class TckTreeDiffTest extends AnyFunSuite with Matchers {
private val dummyPickle = new io.cucumber.core.gherkin.Pickle() {
override def getKeyword: String = ""
override def getLanguage: String = "EN"
override def getName: String = "name"
override def getLocation: io.cucumber.core.gherkin.Location = new io.cucumber.core.gherkin.Location() {
override def getLine: Int = 1
override def getColumn: Int = 1
}
override def getScenarioLocation: io.cucumber.core.gherkin.Location = new io.cucumber.core.gherkin.Location() {
override def getLine: Int = 1
override def getColumn: Int = 1
}
override def getSteps: util.List[io.cucumber.core.gherkin.Step] = new util.ArrayList[io.cucumber.core.gherkin.Step]()
override def getTags: util.List[String] = new util.ArrayList[String]()
override def getUri: URI = new URI("http://www.opencypher.org/")
override def getId: String = "id"
}
private def namedDummyPickleStep(name: String): io.cucumber.core.gherkin.Step = new io.cucumber.core.gherkin.Step() {
override def getLine: Int = 1
override def getArgument: io.cucumber.core.gherkin.Argument = new io.cucumber.core.gherkin.DocStringArgument() {
override def getContent: String = "text"
override def getContentType: String = ""
override def getLine: Int = 1
}
override def getKeyWord: String = "keyWord"
override def getType: io.cucumber.core.gherkin.StepType = io.cucumber.core.gherkin.StepType.GIVEN
override def getPreviousGivenWhenThenKeyWord: String = ""
override def getText: String = name
override def getId: String = "id"
}
private val dummyPickleStep = namedDummyPickleStep("")
private def dummyStep(name: String): Step = Dummy(namedDummyPickleStep(name))
private def dummyPath(path: String): java.nio.file.Path = new java.io.File("ftr1.feature").toPath
test("Diff with one scenario added to the same top-level feature without tags") {
val scrA = Scenario(List[String](), "ftr1", Some(1), "scrA", None, None, Set[String](), List[Step](), dummyPickle, dummyPath("ftr1.feature"))
val scrB = Scenario(List[String](), "ftr1", Some(2), "scrB", None, None, Set[String](), List[Step](), dummyPickle, dummyPath("ftr1.feature"))
val scenariosBefore: Seq[Scenario] = Seq(scrB)
val scenariosAfter: Seq[Scenario] = Seq(scrA, scrB)
val collectBefore = TckTree(scenariosBefore)
val collectAfter = TckTree(scenariosAfter)
val expectedResult = Map[Group, GroupDiff](
Total -> GroupDiff(Seq(scrB), Seq(scrA, scrB)),
Feature("ftr1", Total) -> GroupDiff(Seq(scrB), Seq(scrA, scrB)),
)
TckTreeDiff(collectBefore, collectAfter).diffs should equal(expectedResult)
}
test("Diff with one scenario removed from the same top-level feature without tags") {
val scrA = Scenario(List[String](), "ftr1", Some(1), "scrA", None, None, Set[String](), List[Step](), dummyPickle, dummyPath("ftr1.feature"))
val scrB = Scenario(List[String](), "ftr1", Some(2), "scrB", None, None, Set[String](), List[Step](), dummyPickle, dummyPath("ftr1.feature"))
val scenariosBefore: Seq[Scenario] = Seq(scrA, scrB)
val scenariosAfter: Seq[Scenario] = Seq(scrB)
val collectBefore = TckTree(scenariosBefore)
val collectAfter = TckTree(scenariosAfter)
val expectedResult = Map[Group, GroupDiff](
Total -> GroupDiff(Seq(scrA, scrB), Seq(scrB)),
Feature("ftr1", Total) -> GroupDiff(Seq(scrA, scrB), Seq(scrB)),
)
TckTreeDiff(collectBefore, collectAfter).diffs should equal(expectedResult)
}
test("Diff with one scenario moved to another top-level feature without tags") {
val scrA1 = Scenario(List[String](), "ftr1", Some(1), "scrA", None, None, Set[String](), List[Step](), dummyPickle, dummyPath("ftr1.feature"))
val scrA2 = Scenario(List[String](), "ftr2", Some(1), "scrA", None, None, Set[String](), List[Step](), dummyPickle, dummyPath("ftr2.feature"))
val scrB = Scenario(List[String](), "ftr1", Some(2), "scrB", None, None, Set[String](), List[Step](), dummyPickle, dummyPath("ftr1.feature"))
val scenariosBefore: Seq[Scenario] = Seq(scrA1, scrB)
val scenariosAfter: Seq[Scenario] = Seq(scrA2, scrB)
val collectBefore = TckTree(scenariosBefore)
val collectAfter = TckTree(scenariosAfter)
val scrA1scrA2Diff = ScenarioDiff(scrA1, scrA2)
scrA1scrA2Diff.diffTags should equal(Set(Moved))
val expectedResult = Map[Group, GroupDiff](
Total -> GroupDiff(Seq(scrA1, scrB), Seq(scrA2, scrB)),
Feature("ftr1", Total) -> GroupDiff(Seq(scrA1, scrB), Seq(scrB)),
Feature("ftr2", Total) -> GroupDiff(Seq(), Seq(scrA2)),
)
TckTreeDiff(collectBefore, collectAfter).diffs should equal(expectedResult)
}
test("Diff with one scenario moved to another sub-level feature without tags") {
val scrA1 = Scenario(List[String](), "ftr1", Some(1), "scrA", None, None, Set[String](), List[Step](), dummyPickle, dummyPath("ftr1.feature"))
val scrA2 = Scenario(List[String]("X"), "ftr2", Some(1), "scrA", None, None, Set[String](), List[Step](), dummyPickle, dummyPath("X/ftr2.feature"))
val scrB = Scenario(List[String](), "ftr1", Some(2), "scrB", None, None, Set[String](), List[Step](), dummyPickle, dummyPath("ftr1.feature"))
val scenariosBefore: Seq[Scenario] = Seq(scrA1, scrB)
val scenariosAfter: Seq[Scenario] = Seq(scrA2, scrB)
val collectBefore = TckTree(scenariosBefore)
val collectAfter = TckTree(scenariosAfter)
val scrA1scrA2Diff = ScenarioDiff(scrA1, scrA2)
scrA1scrA2Diff.diffTags should equal(Set(Moved))
val catX = ScenarioCategory("X", Total)
val expectedResult = Map[Group, GroupDiff](
Total -> GroupDiff(Seq(scrA1, scrB), Seq(scrA2, scrB)),
Feature("ftr1", Total) -> GroupDiff(Seq(scrA1, scrB), Seq(scrB)),
catX -> GroupDiff(Seq(), Seq(scrA2)),
Feature("ftr2", catX) -> GroupDiff(Seq(), Seq(scrA2)),
)
TckTreeDiff(collectBefore, collectAfter).diffs should equal(expectedResult)
}
test("Diff with one scenario moved to another top-level feature and a changed tag") {
val scrA1 = Scenario(List[String](), "ftr1", Some(1), "scrA", None, None, Set[String](), List[Step](dummyStep("A")), dummyPickle, dummyPath("ftr1.feature"))
val scrA2 = Scenario(List[String](), "ftr2", Some(1), "scrA", None, None, Set[String](), List[Step](dummyStep("A")), dummyPickle, dummyPath("ftr2.feature"))
val scrB1 = Scenario(List[String](), "ftr1", Some(2), "scrB", None, None, Set[String]("A"), List[Step](dummyStep("B")), dummyPickle, dummyPath("ftr1.feature"))
val scrB2 = Scenario(List[String](), "ftr1", Some(2), "scrB", None, None, Set[String]("B"), List[Step](dummyStep("B")), dummyPickle, dummyPath("ftr1.feature"))
val scenariosBefore: Seq[Scenario] = Seq(scrA1, scrB1)
val scenariosAfter: Seq[Scenario] = Seq(scrA2, scrB2)
val collectBefore = TckTree(scenariosBefore)
val collectAfter = TckTree(scenariosAfter)
val scrA1scrA2Diff = ScenarioDiff(scrA1, scrA2)
scrA1scrA2Diff.diffTags should equal(Set(Moved))
val scrB1scrB2Diff = ScenarioDiff(scrB1, scrB2)
scrB1scrB2Diff.diffTags should equal(Set(Retagged))
val expectedResult = Map[Group, GroupDiff](
Total -> GroupDiff(Seq(scrA1, scrB1), Seq(scrA2, scrB2)),
Feature("ftr1", Total) -> GroupDiff(Seq(scrA1, scrB1), Seq(scrB2)),
Feature("ftr2", Total) -> GroupDiff(Seq(), Seq(scrA2)),
Tag("A") -> GroupDiff(Seq(scrB1), Seq()),
Tag("B") -> GroupDiff(Seq(), Seq(scrB2)),
)
TckTreeDiff(collectBefore, collectAfter).diffs should equal(expectedResult)
}
test("Diff with two scenarios from an outline in a top-level feature have a changed tags") {
val scrA = Scenario(List[String](), "ftr1", Some(1), "scrA", None, None, Set[String](), List[Step](), dummyPickle, dummyPath("ftr1.feature"))
val scrB0 = Scenario(List[String](), "ftr1", Some(2), "scrB", Some(0), None, Set[String]("A"), List[Step](), dummyPickle, dummyPath("ftr1.feature"))
val scrB1 = Scenario(List[String](), "ftr1", Some(2), "scrB", Some(1), Some("a"), Set[String]("A"), List[Step](), dummyPickle, dummyPath("ftr1.feature"))
val scrC = Scenario(List[String](), "ftr1", Some(3), "scrC", None, None, Set[String]("A"), List[Step](), dummyPickle, dummyPath("ftr1.feature"))
val scrB0x = Scenario(List[String](), "ftr1", Some(2), "scrB", Some(0), None, Set[String]("B"), List[Step](), dummyPickle, dummyPath("ftr1.feature"))
val scrB1x = Scenario(List[String](), "ftr1", Some(2), "scrB", Some(1), Some("a"), Set[String]("B"), List[Step](), dummyPickle, dummyPath("ftr1.feature"))
val scenariosBefore: Seq[Scenario] = Seq(scrA, scrB0, scrB1, scrC)
val scenariosAfter: Seq[Scenario] = Seq(scrA, scrB0x, scrB1x, scrC)
val collectBefore = TckTree(scenariosBefore)
val collectAfter = TckTree(scenariosAfter)
val scrB1scrB1xDiff = ScenarioDiff(scrB1, scrB1x)
scrB1scrB1xDiff.diffTags should equal(Set(Retagged))
val scrB0scrB0xDiff = ScenarioDiff(scrB0, scrB0x)
scrB0scrB0xDiff.diffTags should equal(Set(Retagged))
val expectedResult = Map[Group, GroupDiff](
Total -> GroupDiff(Seq(scrA, scrB0, scrB1, scrC), Seq(scrA, scrB0x, scrB1x, scrC)),
Feature("ftr1", Total) -> GroupDiff(Seq(scrA, scrB0, scrB1, scrC), Seq(scrA, scrB0x, scrB1x, scrC)),
Tag("A") -> GroupDiff(Seq(scrB0, scrB1, scrC), Seq(scrC)),
Tag("B") -> GroupDiff(Seq(), Seq(scrB0x, scrB1x)),
)
TckTreeDiff(collectBefore, collectAfter).diffs should equal(expectedResult)
}
test("Diff with one scenario changed in categories, tags, and content of steps") {
val stepsA1 = List[Step](Dummy(dummyPickleStep), Measure(dummyPickleStep))
val stepsA2 = List[Step](Measure(dummyPickleStep), Dummy(dummyPickleStep))
val scrA1 = Scenario(List[String](), "ftr1", Some(1), "scrA", None, None, Set[String]("T"), stepsA1, dummyPickle, dummyPath("ftr1.feature"))
val scrA2 = Scenario(List[String](), "ftr2", Some(1), "scrA", None, None, Set[String]("X"), stepsA2, dummyPickle, dummyPath("ftr2.feature"))
val scrB = Scenario(List[String](), "ftr1", Some(2), "scrB", None, None, Set[String](), List[Step](), dummyPickle, dummyPath("ftr1.feature"))
val scenariosBefore: Seq[Scenario] = Seq(scrA1, scrB)
val scenariosAfter: Seq[Scenario] = Seq(scrA2, scrB)
val collectBefore = TckTree(scenariosBefore)
val collectAfter = TckTree(scenariosAfter)
val scrA1scrA2Diff = ScenarioDiff(scrA1, scrA2)
scrA1scrA2Diff.diffTags should equal(Set(Moved, Retagged, StepsChanged))
val expectedResult = Map[Group, GroupDiff](
Total -> GroupDiff(Seq(scrA1, scrB), Seq(scrA2, scrB)),
Feature("ftr1", Total) -> GroupDiff(Seq(scrA1, scrB), Seq(scrB)),
Feature("ftr2", Total) -> GroupDiff(Seq(), Seq(scrA2)),
Tag("T") -> GroupDiff(Seq(scrA1), Seq()),
Tag("X") -> GroupDiff(Seq(), Seq(scrA2)),
)
TckTreeDiff(collectBefore, collectAfter).diffs should equal(expectedResult)
}
}
|
opencypher/openCypher
|
tools/tck-inspection/src/test/scala/org/opencypher/tools/tck/inspection/diff/TckTreeDiffTest.scala
|
Scala
|
apache-2.0
| 13,244
|
package avrohugger
package generators
import avrohugger.format.abstractions.SourceFormat
import avrohugger.input.DependencyInspector
import avrohugger.input.NestedSchemaExtractor
import avrohugger.input.reflectivecompilation.schemagen._
import avrohugger.input.parsers.{ FileInputParser, StringInputParser}
import avrohugger.matchers.TypeMatcher
import avrohugger.stores.{ ClassStore, SchemaStore }
import java.io.{File, FileNotFoundException, IOException}
import org.apache.avro.{ Protocol, Schema }
import org.apache.avro.Schema.Type.ENUM
// Unable to overload this class' methods because outDir uses a default value
private[avrohugger] object FileGenerator {
def schemaToFile(
schema: Schema,
outDir: String,
format: SourceFormat,
classStore: ClassStore,
schemaStore: SchemaStore,
typeMatcher: TypeMatcher,
restrictedFields: Boolean,
targetScalaPartialVersion: String): Unit = {
val topNS: Option[String] = DependencyInspector.getReferredNamespace(schema)
val topLevelSchemas: List[Schema] =
NestedSchemaExtractor.getNestedSchemas(schema, schemaStore, typeMatcher)
// most-nested classes processed first
topLevelSchemas.reverse.distinct.foreach(schema => {
// pass in the top-level schema's namespace if the nested schema has none
val ns = DependencyInspector.getReferredNamespace(schema) orElse topNS
format.compile(classStore, ns, Left(schema), outDir, schemaStore, typeMatcher, restrictedFields, targetScalaPartialVersion)
})
}
def protocolToFile(
protocol: Protocol,
outDir: String,
format: SourceFormat,
classStore: ClassStore,
schemaStore: SchemaStore,
typeMatcher: TypeMatcher,
restrictedFields: Boolean,
targetScalaPartialVersion: String): Unit = {
val ns = Option(protocol.getNamespace)
format.compile(classStore, ns, Right(protocol), outDir, schemaStore, typeMatcher, restrictedFields, targetScalaPartialVersion)
}
def stringToFile(
str: String,
outDir: String,
format: SourceFormat,
classStore: ClassStore,
schemaStore: SchemaStore,
stringParser: StringInputParser,
typeMatcher: TypeMatcher,
restrictedFields: Boolean,
targetScalaPartialVersion: String): Unit = {
val schemaOrProtocols = stringParser.getSchemaOrProtocols(str, schemaStore)
schemaOrProtocols.foreach(schemaOrProtocol => {
schemaOrProtocol match {
case Left(schema) => {
schemaToFile(schema, outDir, format, classStore, schemaStore, typeMatcher, restrictedFields, targetScalaPartialVersion)
}
case Right(protocol) => {
protocolToFile(protocol, outDir, format, classStore, schemaStore, typeMatcher, restrictedFields, targetScalaPartialVersion)
}
}
})
}
def fileToFile(
inFile: File,
outDir: String,
format: SourceFormat,
classStore: ClassStore,
schemaStore: SchemaStore,
fileParser: FileInputParser,
typeMatcher: TypeMatcher,
classLoader: ClassLoader,
restrictedFields: Boolean,
targetScalaPartialVersion: String): Unit = {
val schemaOrProtocols: List[Either[Schema, Protocol]] =
fileParser.getSchemaOrProtocols(inFile, format, classStore, classLoader)
schemaOrProtocols.foreach(schemaOrProtocol => schemaOrProtocol match {
case Left(schema) => {
schemaToFile(schema, outDir, format, classStore, schemaStore, typeMatcher, restrictedFields, targetScalaPartialVersion)
}
case Right(protocol) => {
protocolToFile(protocol, outDir, format, classStore, schemaStore, typeMatcher, restrictedFields, targetScalaPartialVersion)
}
})
}
}
|
julianpeeters/avrohugger
|
avrohugger-core/src/main/scala/generators/FileGenerator.scala
|
Scala
|
apache-2.0
| 3,657
|
package api
import akka.testkit.TestActorRef
import domain._
import org.specs2.matcher.Scope
import org.specs2.mock._
import org.specs2.mutable.Specification
import spray.http.StatusCodes
import spray.httpx.Json4sSupport
import spray.testkit.Specs2RouteTest
import scala.concurrent.duration.FiniteDuration
import akka.actor._
class WeatherApiSpecs extends Specification with Specs2RouteTest with Mockito with Json4sSupport with JsonFormats {
implicit val routeTestTimeout = RouteTestTimeout(FiniteDuration(5, "second"))
class MockedScope(val forecastRef: ActorRef) extends Scope with WeatherApi {
def actorRefFactory = system
}
val sampleWeatherData = WeatherData(
shortterm = List(DailyData(
day = "2015-06-17",
data = List(HourData(
startTime = "02:00",
temperature = 21,
symbol = 1
))
)),
longterm = List(
)
)
val location = "Oslo"
val sampleForecast = Forecast(location = location)
"GET /api/weather" should {
val successForecastRef = TestActorRef(new Actor {
def receive = {
case sampleForecast => sender() ! Right(sampleWeatherData)
}
})
"returns the weather data for the given location" in new MockedScope(successForecastRef) {
Get(s"/api/weather?location=$location") ~> weatherRoute ~> check {
status === StatusCodes.OK
responseAs[WeatherData] mustEqual(sampleWeatherData)
}
}
val failForecastRef = TestActorRef(new Actor {
def receive = {
case Forecast(_) => sender() ! Left(ForecastActor.InternalServerError)
}
})
"returns 404 if there are errors coming from the forecasting actor" in new MockedScope(failForecastRef) {
Get(s"/api/weather?location=$location") ~> weatherRoute ~> check {
status === StatusCodes.NotFound
responseAs[ErrorWrapper] mustEqual (ErrorWrapper(ForecastActor.InternalServerError))
}
}
}
}
|
sbondor/sunwatch
|
server/src/test/scala/api/WeatherApiSpecs.scala
|
Scala
|
mit
| 1,943
|
package com.codesimples.objectives.persistence.adapter.user
trait NewUserPersistenceAdapter {
def saveUser(map: Map[String, AnyRef])
def findUserById(id: String): Map[String, AnyRef]
}
|
agnaldo4j/estudos_arquitetura_limpa_scala
|
planner-usecases/src/main/scala/com/codesimples/objectives/persistence/adapter/user/NewUserPersistenceAdapter.scala
|
Scala
|
mit
| 190
|
/*
* # Trove
*
* This file is part of Trove - A FREE desktop budgeting application that
* helps you track your finances, FREES you from complex budgeting, and
* enables you to build your TROVE of savings!
*
* Copyright © 2016-2019 Eric John Fredericks.
*
* Trove is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Trove is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Trove. If not, see <http://www.gnu.org/licenses/>.
*/
package trove.ui.tracking
import scalafx.Includes._
import scalafx.geometry.Insets
import scalafx.scene.layout.BorderPane
import trove.core.Project
class AccountPane(eventSubscriberGroup: Int, project: Project) extends BorderPane {
private[this] val accountFunctions = new AccountFunctions(project.accountsService, project)
padding = Insets(10)
center = new MainAccountsView(eventSubscriberGroup, project.accountsService)
top = new AccountsButtonBar(
addAccountFn = accountFunctions.addAccount
)
minWidth = 300
prefWidth = 300
// Sets margin for center and top items in border pane; net result is that 10 px will be inserted.
BorderPane.setMargin(center(), Insets(5))
BorderPane.setMargin(top(), Insets(5))
}
|
emanchgo/budgetfree
|
src/main/scala/trove/ui/tracking/AccountPane.scala
|
Scala
|
gpl-3.0
| 1,648
|
package com.plasmaconduit.framework.routes.destinations
import com.plasmaconduit.framework.mvc.Controller
import com.plasmaconduit.framework.{HttpRouteResult, HttpRoute, HttpRequest}
import com.plasmaconduit.framework.string.StringMatcher
final case class HttpVhostRoute[R <: HttpRequest[R]](host: StringMatcher, controller: Controller[R]) extends HttpRoute[R] {
override def test(request: R): Option[HttpRouteResult[R]] = {
request
.headers
.get("Host")
.filter(n => host.matches(n))
.map(_ => HttpRouteResult(request, controller))
}
}
|
plasmaconduit/plasmaconduit-framework
|
src/main/scala/com/plasmaconduit/framework/routes/destinations/HttpVhostRoute.scala
|
Scala
|
mit
| 572
|
/**
* Copyright (c) 2012-2013, Tomasz Kaczmarzyk.
*
* This file is part of BeanDiff.
*
* BeanDiff is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* BeanDiff is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with BeanDiff; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
package org.beandiff.equality
import org.beandiff.core.DiffEngine
class DiffEqualityInvestigator(
diffEngine: DiffEngine) extends EqualityInvestigator {
override def areEqual(o1: Any, o2: Any) = !diffEngine.calculateDiff(o1, o2).hasDifference
}
|
tkaczmarzyk/beandiff
|
src/main/scala/org/beandiff/equality/DiffEqualityInvestigator.scala
|
Scala
|
lgpl-3.0
| 1,071
|
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn.ops
import com.intel.analytics.bigdl.nn.CMulTable
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.T
import org.scalatest.{FlatSpec, Matchers}
class MultiplySpec extends FlatSpec with Matchers {
"Multiply operation" should "works correctly" in {
import com.intel.analytics.bigdl.numeric.NumericFloat
val input =
T(
Tensor(T(1f, 2f, 3f)),
Tensor(T(2f, 2f, 4f)),
Tensor(T(7f, 3f, 1f))
)
val expectOutput = Tensor(T(14f, 12f, 12f))
val output = CMulTable().forward(input)
output should be(expectOutput)
}
}
|
wzhongyuan/BigDL
|
spark/dl/src/test/scala/com/intel/analytics/bigdl/nn/ops/MultplySpec.scala
|
Scala
|
apache-2.0
| 1,244
|
package huffman
import collection.mutable.PriorityQueue
sealed abstract class Tree[T] extends Ordered[Tree[T]] {
def weight: Int
override def compare(that: Tree[T]): Int = that.weight compare this.weight
}
case class Leaf[T](item: T, weight: Int) extends Tree[T]
case class Fork[T](left: Tree[T], right: Tree[T]) extends Tree[T] {
override def weight: Int = left.weight + right.weight
}
object Tree {
def construct[T](frequencies: Map[T, Int]): Tree[T] = {
require(frequencies.size >= 2)
val queue = PriorityQueue[Tree[T]]()
frequencies foreach {
case (item, weight) => queue += Leaf(item, weight)
}
while (queue.size > 1) {
val (x, y) = (queue.dequeue, queue.dequeue)
queue += Fork(x, y)
}
queue.head
}
}
|
anishathalye/huffman
|
src/main/scala/huffman/Tree.scala
|
Scala
|
mit
| 774
|
import com.twitter.finagle.Mysql
import com.twitter.finagle.mysql.{ResultSet, Row, QueryRequest, LongValue, IntValue}
import com.twitter.util.Await
import com.twitter.finagle.client.DefaultPool
import com.twitter.conversions.DurationOps._
object Shared {
//#processRow
def processRow(row: Row): Option[Long] =
row.getLong("product")
//#processRow
}
object ServiceFactoryExample {
import Shared._
//#client
val client = Mysql.client
.withCredentials("<user>", "<password>")
.withDatabase("test")
.configured(DefaultPool
.Param(low = 0, high = 10, idleTime = 5.minutes, bufferSize = 0, maxWaiters = Int.MaxValue))
.newClient("127.0.0.1:3306")
//#client
def main(args: Array[String]): Unit = {
//#query0
val product = client().flatMap { service =>
// `service` is checked out from the pool.
service(QueryRequest("SELECT 5*5 AS `product`"))
.map {
case rs: ResultSet => rs.rows.map(processRow)
case _ => Seq.empty
}.ensure {
// put `service` back into the pool.
service.close()
}
}
//#query0
println(Await.result(product))
}
}
object RichExample {
import Shared._
//#richClient
val richClient = Mysql.client
.withCredentials("<user>", "<password>")
.withDatabase("test")
.configured(DefaultPool
.Param(low = 0, high = 10, idleTime = 5.minutes, bufferSize = 0, maxWaiters = Int.MaxValue))
.newRichClient("127.0.0.1:3306")
//#richClient
def main(args: Array[String]): Unit = {
//#query1
val product = richClient.select("SELECT 5*5 AS `product`")(processRow)
//#query1
println(Await.result(product))
}
}
|
luciferous/finagle
|
doc/src/sphinx/code/protocols/mysql.scala
|
Scala
|
apache-2.0
| 1,695
|
/*
* Commentary
* Copyright (C) 2017 Michael Dippery <michael@monkey-robot.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.mipadi.commentary.reddit.db.mongodb
import java.net.URI
import com.mipadi.jupiter.net.{Addressable, RichURI}
import org.mongodb.scala.Document
import com.mipadi.commentary.reddit.net.Comment
private object internal {
implicit class RichMongoURI(uri: URI)(implicit ev: Addressable[URI])
extends RichURI[URI](uri) {
lazy val database: Option[String] = uri.pathComponents.lift(1)
lazy val collection: Option[String] = uri.pathComponents.lift(2)
}
implicit class MongoDBComment(val c: Comment) {
def toDocument: Document =
Document(
"_id" -> c.id,
"author" -> c.author,
"subreddit" -> c.subreddit,
"title" -> c.title,
"threadId" -> c.thread,
"body" -> c.body,
"timestamp" -> c.timestamp)
}
}
|
mdippery/commentary
|
src/main/scala/com/mipadi/commentary/reddit/db/mongodb/internal.scala
|
Scala
|
gpl-3.0
| 1,529
|
/*
* Copyright (c) 2014-2021 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.eval
import cats.laws._
import cats.laws.discipline._
import monix.execution.exceptions.DummyException
import scala.util.{Failure, Success}
object CoevalNowSuite extends BaseTestSuite {
test("Coeval.now should work") { implicit s =>
var wasTriggered = false
def trigger(): String = { wasTriggered = true; "result" }
val task = Coeval.now(trigger())
assert(wasTriggered, "wasTriggered")
val r = task.runTry()
assertEquals(r, Success("result"))
}
test("Coeval.now.isSuccess") { implicit s =>
assert(Coeval.Now(1).isSuccess, "isSuccess")
assert(!Coeval.Now(1).isError, "!isFailure")
}
test("Coeval.error should work synchronously") { implicit s =>
var wasTriggered = false
val dummy = DummyException("dummy")
def trigger(): Throwable = { wasTriggered = true; dummy }
val task = Coeval.raiseError(trigger())
assert(wasTriggered, "wasTriggered")
val r = task.runTry()
assertEquals(r, Failure(dummy))
}
test("Coeval.now.map should work") { implicit s =>
Coeval.now(1).map(_ + 1).value()
check1 { a: Int =>
Coeval.now(a).map(_ + 1) <-> Coeval.now(a + 1)
}
}
test("Coeval.error.map should be the same as Coeval.error") { implicit s =>
check {
val dummy = DummyException("dummy")
Coeval.raiseError[Int](dummy).map(_ + 1) <-> Coeval.raiseError[Int](dummy)
}
}
test("Coeval.error.flatMap should be the same as Coeval.flatMap") { implicit s =>
check {
val dummy = DummyException("dummy")
Coeval.raiseError[Int](dummy).flatMap(Coeval.now) <-> Coeval.raiseError(dummy)
}
}
test("Coeval.error.flatMap should be protected") { implicit s =>
check {
val dummy = DummyException("dummy")
val err = DummyException("err")
Coeval.raiseError[Int](dummy).flatMap[Int](_ => throw err) <-> Coeval.raiseError(dummy)
}
}
test("Coeval.now.flatMap should protect against user code") { implicit s =>
val ex = DummyException("dummy")
val t = Coeval.now(1).flatMap[Int](_ => throw ex)
check(t <-> Coeval.raiseError(ex))
}
test("Coeval.now.flatMap should be tail recursive") { implicit s =>
def loop(n: Int, idx: Int): Coeval[Int] =
Coeval.now(idx).flatMap { _ =>
if (idx < n) loop(n, idx + 1).map(_ + 1)
else
Coeval.now(idx)
}
val iterations = s.executionModel.recommendedBatchSize * 20
val r = loop(iterations, 0).runTry()
assertEquals(r, Success(iterations * 2))
}
test("Coeval.now.materialize should work") { implicit s =>
val task = Coeval.now(1).materialize
assertEquals(task.value(), Success(1))
}
test("Coeval.error.materialize should work") { implicit s =>
val dummy = DummyException("dummy")
val task = Coeval.raiseError(dummy).materialize
assertEquals(task.value(), Failure(dummy))
}
}
|
monifu/monix
|
monix-eval/shared/src/test/scala/monix/eval/CoevalNowSuite.scala
|
Scala
|
apache-2.0
| 3,541
|
/*
* Copyright 2012 IL <iron9light AT gmali DOT com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ideacolorschemes.ideacolor
import org.jdom.Element
import java.lang.String
import com.intellij.openapi.options.FontSize
import java.awt.{Color, Font}
import com.intellij.openapi.editor.colors._
import com.intellij.openapi.editor.markup.TextAttributes
/**
* @author il
* @version 12/22/11 3:02 PM
*/
object Sandbox {
def run() {
// val manager = EditorColorsManager.getInstance()
// for{
// scheme <- manager.getAllSchemes
// } {
// println("%s[%s] - %s" format (scheme, manager.isDefaultScheme(scheme), scheme.getClass))
// }
// SchemeBookManager.addBooks()
// val editorColorsManager = EditorColorsManager.getInstance()
// editorColorsManager.addColorsScheme(new MockEditorColorScheme("mock"))
}
}
class MockEditorColorScheme(name: String) extends EditorColorsScheme {
val defaultEditorColorsScheme = EditorColorsManager.getInstance.getScheme(EditorColorsManager.DEFAULT_SCHEME_NAME)
def getName = name
def readExternal(element: Element) {}
def writeExternal(element: Element) {}
def setName(name: String) {}
def getAttributes(key: TextAttributesKey) = defaultEditorColorsScheme.getAttributes(key)
def setAttributes(key: TextAttributesKey, attributes: TextAttributes) {}
def getDefaultBackground = defaultEditorColorsScheme.getDefaultBackground
def getDefaultForeground = defaultEditorColorsScheme.getDefaultForeground
def getColor(key: ColorKey) = defaultEditorColorsScheme.getColor(key)
def setColor(key: ColorKey, color: Color) {}
def getEditorFontSize = defaultEditorColorsScheme.getEditorFontSize
def setEditorFontSize(fontSize: Int) {}
def getQuickDocFontSize = defaultEditorColorsScheme.getQuickDocFontSize
def setQuickDocFontSize(fontSize: FontSize) {}
def getEditorFontName = defaultEditorColorsScheme.getEditorFontName
def setEditorFontName(fontName: String) {}
def getFont(key: EditorFontType) = defaultEditorColorsScheme.getFont(key)
def setFont(key: EditorFontType, font: Font) {}
def getLineSpacing = defaultEditorColorsScheme.getLineSpacing
def setLineSpacing(lineSpacing: Float) {}
def getConsoleFontName = defaultEditorColorsScheme.getConsoleFontName
def setConsoleFontName(fontName: String) {}
def getConsoleFontSize = defaultEditorColorsScheme.getConsoleFontSize
def setConsoleFontSize(fontSize: Int) {}
def getConsoleLineSpacing = defaultEditorColorsScheme.getConsoleLineSpacing
def setConsoleLineSpacing(lineSpacing: Float) {}
}
|
iron9light/ideacolorschemes-plugin
|
src/main/scala/com/ideacolorschemes/ideacolor/Sandbox.scala
|
Scala
|
apache-2.0
| 3,099
|
package scray.hdfs.index.format
import java.util.ArrayList
import java.util.concurrent.Executors
import java.util.concurrent.Future
import com.typesafe.scalalogging.LazyLogging
class Buffer(batchSize: Int, path: String) extends LazyLogging {
var idxBuffer = new ArrayList[IndexFileRecord](batchSize)
var previousIdxBufferWriterTask: Future[Boolean] = null
var dataBuffer = new ArrayList[BlobFileRecord](batchSize)
var previousDataBufferWriterTask: Future[Boolean] = null
val writerExecutor = Executors.newFixedThreadPool(5);
var datafilePossition = 0L // Byte possition in data file.
def addValue(value: BlobFileRecord, flush: Boolean): Unit = synchronized {
dataBuffer.add(value)
if (dataBuffer.size() == batchSize || flush) {
flushData
}
}
def addValue(value: IndexFileRecord, flush: Boolean): Unit = synchronized {
idxBuffer.add(value)
if (idxBuffer.size() == batchSize || flush) {
flushIdx
}
}
def addValue(value: Tuple2[BlobFileRecord, IndexFileRecord], flush: Boolean): Unit = synchronized {
this.addValue(value._1, flush)
this.addValue(value._2, flush)
}
def flushIdx = synchronized {
if (previousIdxBufferWriterTask != null) {
previousIdxBufferWriterTask.get
}
// Write full buffer
previousIdxBufferWriterTask = writerExecutor.submit(new HDFSWriter(path, idxBuffer))
// Provide empty buffer
idxBuffer = new ArrayList[IndexFileRecord](batchSize)
if (previousIdxBufferWriterTask != null) {
previousIdxBufferWriterTask.get
}
}
def flushData = synchronized {
logger.debug(s"Flush to HDFS")
// Starte next write process if previous completed
if (previousDataBufferWriterTask != null) {
previousDataBufferWriterTask.get
}
// Write full buffer
previousDataBufferWriterTask = writerExecutor.submit(new HDFSWriter(path, dataBuffer))
// Provide empty buffer
dataBuffer = new ArrayList[BlobFileRecord](batchSize)
}
def flush = {
flushIdx
flushData
}
}
|
scray/scray
|
scray-hdfs/modules/scray-hdfs-service-adapter/src/main/scala/scray/hdfs/index/format/Buffer.scala
|
Scala
|
apache-2.0
| 2,048
|
package com.wlangiewicz
import java.net.InetAddress
import java.util.concurrent.Future
import org.bitcoinj.core._
import org.bitcoinj.params.TestNet3Params
import org.bitcoinj.store.{MemoryBlockStore, BlockStore}
import org.bitcoinj.utils.BriefLogFormatter
/**
* Downloads the block given a block hash from the localhost node and prints it out.
*
* Usage:
* sbt "run-main com.wlangiewicz.FetchBlock <blockHash>"
*
* Example:
* sbt "run-main com.wlangiewicz.FetchBlock 000000000000000016603a15ec1538514af4ba5db4001a6449edcab57d9cc64e"
*
*/
object FetchBlockTestNet extends App {
override def main(args: Array[String]): Unit = {
if (args.length != 1) {
Console.println("Usage: sbt \\"run-main com.wlangiewicz.FetchBlock <blockHash>\\"")
}
else {
performFetch(args(0))
}
}
def performFetch(blockHashString: String): Unit ={
BriefLogFormatter.init()
Console.println("Connecting to node")
val params: NetworkParameters = TestNet3Params.get()
val blockStore: BlockStore = new MemoryBlockStore(params)
val chain: BlockChain = new BlockChain(params, blockStore)
val peerGroup: PeerGroup = new PeerGroup(params, chain)
peerGroup.startAsync()
peerGroup.awaitRunning()
val address: PeerAddress = new PeerAddress(InetAddress.getLocalHost(), params.getPort())
peerGroup.addAddress(address)
peerGroup.waitForPeers(1).get()
val peer: Peer = peerGroup.getConnectedPeers().get(0)
val blockHash: Sha256Hash = new Sha256Hash(blockHashString)
val future: Future[Block] = peer.getBlock(blockHash)
Console.println("Waiting for node to send us the requested block: " + blockHash)
val block: Block = future.get()
Console.println(block)
peerGroup.stopAsync()
}
}
|
wlk/bitcoinj-scala-examples
|
src/main/scala/FetchBlockTestNet.scala
|
Scala
|
apache-2.0
| 1,773
|
import org.scalatest._
import pushka._
import pushka.annotation.pushka
import scala.annotation.StaticAnnotation
object SealedTraitSpec {
abstract class Rgb(r: Int, g: Int, b: Int)
@pushka sealed trait Color
object Color {
case object Red extends Rgb(255, 0, 0) with Color
case object Green extends Rgb(0, 255, 0) with Color
case object Blue extends Rgb(0, 0, 255) with Color
}
@pushka sealed trait WithBody {
def x: Int
}
object WithBody {
case object A extends WithBody { val x = 0 }
case class B(y: Int) extends WithBody { val x = 1 }
}
@pushka sealed trait User
object User {
case object Empty extends User
case class Name(first: String, last: String) extends User
case object Anonymous extends User
}
@pushka case class Container(user: User, anotherField: Int)
class theAnnotation(a: String, b: String) extends StaticAnnotation
@pushka sealed trait Base
object Base {
@theAnnotation("Some message", "Today")
case class Descendant(value: Int) extends Base
object Descendant {
def haha(): Unit = {}
}
}
}
class SealedTraitSpec extends FlatSpec with Matchers {
import SealedTraitSpec._
"ADT based on case class" should "writes by case class rules in property" in {
val instance = Container(User.Name("John", "Doe"), 10)
write(instance) should be(
Ast.Obj(Map(
"user" → Ast.Obj(Map(
"name" → Ast.Obj(Map(
"first" → Ast.Str("John"),
"last" → Ast.Str("Doe"))
))),
"anotherField" → Ast.Num("10")
))
)
}
"ADT based on case object" should "writes as simple string" in {
val instance = Container(User.Empty, 10)
write(instance) should be(
Ast.Obj(Map(
"user" → Ast.Str("empty"),
"anotherField" → Ast.Num("10"
))
)
)
}
"Sealed trait with body" should "be processed" in {
write[WithBody](WithBody.A) shouldEqual Ast.Str("a")
write[WithBody](WithBody.B(1)) shouldEqual Ast.Obj(Map("b" → Ast.Num(1)))
}
"Deprecated annotation in case classes" should "not breaks writing" in {
write[Base](Base.Descendant(42))
}
"Color" should "be read correctly" in {
read[Color](Ast.Str("red")) shouldBe Color.Red
}
it should "throw exception with correct message if Ast is invalid" in {
val invalidAst = Ast("foo" → "bar")
val exception = intercept[PushkaException] {
read[Color](invalidAst)
}
exception.message should be(s"Error while reading AST $invalidAst to Color")
}
}
|
fomkin/pushka
|
core/src/test/scala/SealedTraitSpec.scala
|
Scala
|
apache-2.0
| 2,576
|
package org.jetbrains.plugins.scala.dfa
import org.jetbrains.plugins.scala.dfa.BoolSemiLat.{False, Top, True}
import org.jetbrains.plugins.scala.dfa.lattice.{JoinSemiLattice, JoinSemiLatticeSpec, HasBottom}
import org.scalatest.prop.TableFor3
class BoolSemiLatSpec extends JoinSemiLatticeSpec[BoolSemiLat] {
import BoolSemiLat._
override protected lazy val lattice: JoinSemiLattice[BoolSemiLat] = BoolSemiLat.joinSemiLattice
override protected lazy val latticeHasBottom: Option[HasBottom[BoolSemiLat]] = None
override protected lazy val latticeElementSamples: Seq[BoolSemiLat] = BoolSemiLatSpec.latticeElementSamples
override protected lazy val latticeJoinSamples: TableFor3[BoolSemiLat, BoolSemiLat, BoolSemiLat] =
Table(
("A", "B", "A join B"),
BoolSemiLatSpec.latticeJoinSamples:_*
)
val boolLat: BoolLat = Top
val boolSemiLat: BoolSemiLat = Top
property("BoolSemiLat join BoolSemiLat should give BoolLat") {
val r: BoolSemiLat = boolSemiLat join boolSemiLat
r shouldBe Top
}
property("BoolLat join BoolSemiLat should gives BoolLat") {
val r1: BoolLat = boolSemiLat join boolLat
val r2: BoolLat = boolLat join boolSemiLat
r1 shouldBe Top
r2 shouldBe Top
}
property("BoolSemiLat meet BoolLat should give BoolLat") {
val r1: BoolLat = boolSemiLat meet boolLat
val r2: BoolLat = boolLat meet boolSemiLat
r1 shouldBe Top
r2 shouldBe Top
}
}
object BoolSemiLatSpec {
val latticeElementSamples: Seq[BoolSemiLat] = Seq(Top, True, False)
val latticeJoinSamples: Seq[(BoolSemiLat, BoolSemiLat, BoolSemiLat)] =
Seq(
(True, True, True),
(False, False, False),
(True, False, Top),
(False, True, Top),
) ++ (
// pairs with top
for {
a <- latticeElementSamples
b <- latticeElementSamples
if (a == Top || b == Top)
} yield (a, b, Top)
)
}
|
JetBrains/intellij-scala
|
scala/dfa/test/org/jetbrains/plugins/scala/dfa/BoolSemiLatSpec.scala
|
Scala
|
apache-2.0
| 1,913
|
/*******************************************************************************
* Copyright 2017 Capital One Services, LLC and Bitwise, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
package hydrograph.engine.spark.components.utils
import hydrograph.engine.core.component.entity.base.InputOutputEntityBase
import org.apache.spark.sql.types.{DataType, StructField}
import org.slf4j.{Logger, LoggerFactory}
/**
* The Class SchemaUtils.
*
* @author Bitwise
*
*/
case class SchemaUtils() {
val LOG: Logger = LoggerFactory.getLogger(classOf[SchemaUtils])
def getCodec(outputFileEntity: InputOutputEntityBase): String = {
if (outputFileEntity.getRuntimeProperties != null &&
outputFileEntity.getRuntimeProperties.containsKey("hydrograph.output.compression.codec")){
outputFileEntity.getRuntimeProperties.getProperty("hydrograph.output.compression.codec")
} else {
null
}
}
/*
* This will compare two schema and check whether @readSchema is exist in @mdSchema
* @param readSchema schema from input
* @param mdSchema MetaData schema from metadata
* @return Boolean true or false(Exception)
*/
def compareSchema(inputReadSchema: List[StructField], metaDataSchema: List[StructField]): Boolean = {
var dbDataType: DataType = null
var dbFieldName: String = null
inputReadSchema.foreach(f = inSchema => {
var fieldExist = metaDataSchema.exists(ds => {
dbDataType = ds.dataType
dbFieldName = ds.name
ds.name.equals(inSchema.name)
})
if (fieldExist) {
if (!(inSchema.dataType.typeName.equalsIgnoreCase(dbDataType.typeName))) {
LOG.error("Field '" + inSchema.name + "', data type does not match expected type:" + dbDataType + ", got type:" + inSchema.dataType)
throw SchemaMisMatchException("Field '" + inSchema.name + "' data type does not match expected type:" + dbDataType + ", got type:" + inSchema.dataType)
}
} else {
LOG.error("Field '" + inSchema.name + "' does not exist in metadata")
throw SchemaMisMatchException("Input schema does not match with metadata schema, "
+ "Field '" + inSchema.name + "' does not exist in metadata")
}
})
true
}
}
case class SchemaMisMatchException(message: String = "", cause: Throwable = null) extends RuntimeException(message, cause)
|
capitalone/Hydrograph
|
hydrograph.engine/hydrograph.engine.spark/src/main/scala/hydrograph/engine/spark/components/utils/SchemaUtils.scala
|
Scala
|
apache-2.0
| 2,962
|
package com.twitter.finagle.cacheresolver
import com.twitter.common.quantity.{Time, Amount}
import com.twitter.common.zookeeper.ZooKeeperClient
import com.twitter.concurrent.Broker
import com.twitter.conversions.time._
import com.twitter.finagle.service.Backoff
import com.twitter.finagle.stats.StatsReceiver
import com.twitter.util.{Duration, JavaTimer, FuturePool}
import org.apache.zookeeper.Watcher.Event.KeeperState
import org.apache.zookeeper.{WatchedEvent, Watcher}
import scala.collection.JavaConversions._
object ZookeeperStateMonitor {
val DefaultZkConnectionRetryBackoff =
(Backoff.exponential(1.second, 2) take 6) ++ Backoff.const(60.seconds)
val DefaultFuturePool = FuturePool.unboundedPool
val DefaultTimer = new JavaTimer(isDaemon = true)
val DefaultZKWaitTimeout = 10.seconds
}
/**
* A zk monitor trait that assists with monitoring a given zk path for any node data change,
* in which the provided zk data handling implementation will be invoked.
*
* This monitor will maintain a queue so that every work item triggered by zk event will be
* processed in an order with a back off policy. It also set-up a zookeeper connection watcher
* by default to re-set the data change watcher even during zk re-connect.
*
* The monitor will set-up all watcher properly kick off the loop to process future event;
* you can also invoke loadZKData() in your class anytime to force reading zk data and apply it.
*
* Example use cases are:
* - zookeeper based CachePoolCluster uses this to monitor cache pool members change
* - zookeeper based MigrationClient uses this ot monitor migration state transitioning
*/
trait ZookeeperStateMonitor {
protected val zkPath: String
protected val zkClient: ZooKeeperClient
protected val statsReceiver: StatsReceiver
import ZookeeperStateMonitor._
private[this] val zkWorkFailedCounter = statsReceiver.counter("zkWork.failed")
private[this] val zkWorkSucceededCounter = statsReceiver.counter("zkWork.succeeded")
private[this] val loadZKDataCounter = statsReceiver.counter("loadZKData")
private[this] val loadZKChildrenCounter = statsReceiver.counter("loadZKChildren")
private[this] val reconnectZKCounter = statsReceiver.counter("reconnectZK")
private[this] val zookeeperWorkQueue = new Broker[() => Unit]
// zk watcher for connection, data, children event
private[this] val zkWatcher: Watcher = new Watcher() {
// NOTE: Ensure that the processing of events is not a blocking operation.
override def process(event: WatchedEvent) = {
event.getState match {
// actively trying to re-establish the zk connection (hence re-register the zk path
// data watcher) whenever an zookeeper connection expired or disconnected. We could also
// only do this when SyncConnected happens, but that would be assuming other components
// sharing the zk client (e.g. ZookeeperServerSetCluster) will always actively attempts
// to re-establish the zk connection. For now, I'm making it actively re-establishing
// the connection itself here.
case (KeeperState.Disconnected | KeeperState.Expired) if event.getType == Watcher.Event.EventType.None =>
statsReceiver.counter("zkConnectionEvent." + event.getState).incr()
zookeeperWorkQueue ! reconnectZK
case KeeperState.SyncConnected =>
statsReceiver.counter("zkNodeChangeEvent." + event.getType).incr()
event.getType match {
case Watcher.Event.EventType.NodeDataChanged =>
zookeeperWorkQueue ! loadZKData
case Watcher.Event.EventType.NodeChildrenChanged =>
zookeeperWorkQueue ! loadZKChildren
case _ =>
}
case _ =>
}
}
}
/**
* Read work items of the broker and schedule the work with future pool. If the scheduled work
* failed, it will repeatedly retry itself in a backoff manner (with a timer) until succeeded,
* which then recursively call this method to process the next work item.
*
* This function guarantees that at any given time there will be only one thread (future pool thread)
* blocking on zookeeper IO work. Multiple ZK connection events or cache pool change events would only
* queue up the work, and each work will be picked up only after the previous one finished successfully
*/
private[this] def loopZookeeperWork(): Unit = {
def scheduleReadCachePoolConfig(
op: () => Unit,
backoff: Stream[Duration] = DefaultZkConnectionRetryBackoff
): Unit = {
DefaultFuturePool {
op()
} onFailure { ex =>
zkWorkFailedCounter.incr()
backoff match {
case wait #:: rest =>
DefaultTimer.doLater(wait) { scheduleReadCachePoolConfig(op, rest) }
}
} onSuccess { _ =>
zkWorkSucceededCounter.incr()
loopZookeeperWork
}
}
// get one work item off the broker and schedule it into the future pool
zookeeperWorkQueue.recv.sync() onSuccess {
case op: (() => Unit) => {
scheduleReadCachePoolConfig(op)
}
}
}
/**
* Load the zookeeper node data as well as leaving a data watch, then invoke the
* applyZKData implementation to process the data string.
*/
def applyZKData(data: Array[Byte]): Unit
def loadZKData = () => synchronized {
loadZKDataCounter.incr()
// read cache pool config data and leave a node data watch
val data = zkClient
.get(Amount.of(DefaultZKWaitTimeout.inMilliseconds, Time.MILLISECONDS))
.getData(zkPath, true, null)
applyZKData(data)
}
/**
* Load the zookeeper node children as well as leaving a children watch, then invoke the
* applyZKChildren implementation to process the children list.
*/
def applyZKChildren(children: List[String]): Unit = {} // no-op by default
def loadZKChildren = () => synchronized {
loadZKChildrenCounter.incr()
// get children list and leave a node children watch
val children = zkClient
.get(Amount.of(DefaultZKWaitTimeout.inMilliseconds, Time.MILLISECONDS))
.getChildren(zkPath, true, null)
applyZKChildren(children.toList)
}
/**
* Reconnect to the zookeeper, this maybe invoked when zookeeper connection expired and the
* node data watcher previously registered got dropped, hence re-attache the data wather here.
*/
def reconnectZK = () => synchronized {
reconnectZKCounter.incr()
// reset watch for node data and children
val data = zkClient
.get(Amount.of(DefaultZKWaitTimeout.inMilliseconds, Time.MILLISECONDS))
.getData(zkPath, true, null)
val children = zkClient
.get(Amount.of(DefaultZKWaitTimeout.inMilliseconds, Time.MILLISECONDS))
.getChildren(zkPath, true, null)
}
// Register top-level connection watcher to monitor zk change.
// This watcher will live across different zk connection
zkClient.register(zkWatcher)
// attach data/children change event watcher to the zk client for the first time during constructing
zookeeperWorkQueue ! loadZKData
zookeeperWorkQueue ! loadZKChildren
// Kick off the loop to process zookeeper work queue
loopZookeeperWork()
}
|
lukiano/finagle
|
finagle-cacheresolver/src/main/scala/com/twitter/finagle/cacheresolver/ZookeeperStateMonitor.scala
|
Scala
|
apache-2.0
| 7,225
|
package ru.maizy.ambient7.core.tests
/**
* Copyright (c) Nikita Kovaliov, maizy.ru, 2016-2017
* See LICENSE.txt for details.
*/
import java.nio.file.Paths
import org.scalatest.{ FlatSpec, Matchers }
abstract class BaseSpec extends FlatSpec with Matchers {
def getResourcePathString(relPath: String): String = {
val normPath = if (!relPath.startsWith("/")) "/" + relPath else relPath
Paths.get(this.getClass.getResource(normPath).toURI).toString
}
}
|
maizy/ambient7
|
core/src/test/scala/ru/maizy/ambient7/core/tests/BaseSpec.scala
|
Scala
|
apache-2.0
| 468
|
package regolic.dpllt.qfeuf
import regolic.asts.core.Trees._
import regolic.asts.core.Manip._
import regolic.asts.fol.Trees._
/*
* Flatten nested function calls by introducing auxiliary variables
* Needs one extra equality compared to "Fast congruence closure with
* extensions" paper
*/
object Flattener {
private def freshVar(sort: Sort) = freshVariable("variable", sort)
private val terms = collection.mutable.Map[FunctionApplication, (List[PredicateApplication], Variable)]()
def transform(term: Term): (Term, Map[String, FunctionApplication]) = {
var eqs: Map[String, FunctionApplication] = Map()
val newTerm = mapPostorder(term, (f: Formula) => f, (t: Term) => t match {
case app@FunctionApplication(fun, arg::args) => {
val fv = freshVariable(fun.name, fun.returnSort)
eqs += (fv.name -> app)
FunctionApplication(FunctionSymbol(fv.name, Nil, fv.sort), Nil)
}
case t => t
})
(newTerm, eqs)
}
def transform(formula: Formula): (Formula, Map[String, FunctionApplication]) = {
var eqs: Map[String, FunctionApplication] = Map()
var names: Map[FunctionApplication, String] = Map()
val newFormula = mapPostorder(formula, (f: Formula) => f, (t: Term) => t match {
case app@FunctionApplication(fun, arg::args) => names.get(app) match {
case None =>
val fv = freshVariable(fun.name, fun.returnSort)
eqs += (fv.name -> app)
names += (app -> fv.name)
FunctionApplication(FunctionSymbol(fv.name, Nil, fv.sort), Nil)
case Some(n) => Variable(n, fun.returnSort)
}
case t => t
})
(newFormula, eqs)
}
private def extract(f: FunctionApplication, acc: List[PredicateApplication] = Nil): (List[PredicateApplication], Variable) = {
if(terms.contains(f)) {
terms(f)
} else {
val retVal = f match {
case Apply((t1: Variable), (t2: Variable)) => {
val fv = freshVar(f.fSymbol.returnSort)
(Equals(f, fv) :: acc, fv)
}
case Apply((t1: FunctionApplication), (t2: Variable)) => {
val (l, lVar) = extract(t1, acc)
val fv = freshVar(f.fSymbol.returnSort)
(Equals(Apply(lVar, t2), fv) :: l, fv)
}
case Apply((t1: Variable), (t2: FunctionApplication)) => {
val (r, rVar) = extract(t2, acc)
val fv = freshVar(f.fSymbol.returnSort)
(Equals(Apply(t1, rVar), fv) :: r, fv)
}
case Apply((t1: FunctionApplication), (t2: FunctionApplication)) => {
val (l, lVar) = extract(t1, acc)
val (r, rVar) = extract(t2, l)
val fv = freshVar(f.fSymbol.returnSort)
(Equals(Apply(lVar, rVar), fv) :: r, fv)
}
case _ => throw new Exception("Unsupported function "+ f)
}
terms(f) = retVal
retVal
}
}
private def flatten(eq: PredicateApplication): List[PredicateApplication] = {
eq match {
case Equals((t1: Variable), (t2: Variable)) =>
Equals(t1, t2) :: Nil
case Equals((t1: Variable), (t2: FunctionApplication)) => {
val (r, rVar) = extract(t2)
Equals(t1, rVar) :: r
}
case Equals((t1: FunctionApplication), (t2: Variable)) => {
val (l, lVar) = extract(t1)
Equals(lVar, t2) :: l
}
case Equals((t1: FunctionApplication), (t2: FunctionApplication)) => {
val (l, lVar) = extract(t1)
val (r, rVar) = extract(t2)
Equals(lVar, rVar) :: l ::: r
}
case _ => throw new Exception("Unsupported terms "+ eq)
}
}
def apply(eq: PredicateApplication): List[PredicateApplication] = flatten(eq)
}
|
regb/scabolic
|
src/main/scala/regolic/dpllt/qfeuf/Flattener.scala
|
Scala
|
mit
| 3,681
|
import com.mohiva.play.silhouette.api.actions.SecuredErrorHandler
import javax.inject._
import play.api.http.DefaultHttpErrorHandler
import play.api._
import play.api.i18n.{I18nSupport, Messages, MessagesApi}
import play.api.mvc.Results.{Forbidden, Unauthorized}
import play.api.mvc._
import play.api.routing.Router
import scala.concurrent.Future
@Singleton
class ErrorHandler @Inject()(env: Environment,
config: Configuration,
sourceMapper: OptionalSourceMapper,
router: Provider[Router],
val messagesApi: MessagesApi)
extends DefaultHttpErrorHandler(env, config, sourceMapper, router)
with SecuredErrorHandler
with I18nSupport {
override def onNotAuthenticated(implicit request: RequestHeader): Future[Result] =
Future.successful(Unauthorized(Messages("user.notAuthorised")))
override def onNotAuthorized(implicit request: RequestHeader): Future[Result] =
Future.successful(Forbidden(Messages("notAllowed")))
}
|
scalableminds/webknossos
|
app/ErrorHandler.scala
|
Scala
|
agpl-3.0
| 1,058
|
package org.neilconcepts
import scalaz._; import Scalaz._
import org.scalameter._
import scalaz.Memo._
object SimpleKeyStore extends App {
private val mutCache = scala.collection.mutable.Map[Int, String]()
private def setCache(a: Int): String = s"entry:$a"
private val cache = immutableHashMapMemo {
entry: Int => setCache(entry)
}
private def init {
val time = measure {
for (i <- 1 |-> 1000000) {
val r = scala.util.Random.nextInt(1000)
cache(r)
}
}
println(s"scalaz) Total time: $time")
val time2 = measure {
for (i <- 1 |-> 1000000) {
val r = scala.util.Random.nextInt(1000)
mutCache.getOrElseUpdate(r,s"entry:$i")
}
}
println(s"scala) Total time: $time2")
}
init
}
|
bneil/scalaz_playground
|
src/main/scala/org/neilconcepts/SimpleKeyStore.scala
|
Scala
|
mit
| 773
|
package mesosphere.marathon.upgrade
import akka.actor.{ ActorLogging, ActorRef, Actor }
import mesosphere.marathon.core.readiness.{ ReadinessCheckExecutor, ReadinessCheckResult }
import mesosphere.marathon.core.task.Task
import mesosphere.marathon.core.task.tracker.TaskTracker
import mesosphere.marathon.core.event.{ DeploymentStatus, HealthStatusChanged, MesosStatusUpdateEvent }
import mesosphere.marathon.state.{ AppDefinition, PathId, Timestamp }
import mesosphere.marathon.upgrade.DeploymentManager.ReadinessCheckUpdate
import mesosphere.marathon.upgrade.ReadinessBehavior.{ ReadinessCheckSubscriptionKey, ScheduleReadinessCheckFor }
import rx.lang.scala.Subscription
/**
* ReadinessBehavior makes sure all tasks are healthy and ready depending on the app definition.
* Listens for TaskStatusUpdate events, HealthCheck events and ReadinessCheck events.
* If a task becomes ready, the taskTargetCountReached hook is called.
*
* Assumptions:
* - the actor is attached to the event stream for HealthStatusChanged and MesosStatusUpdateEvent
*/
trait ReadinessBehavior { this: Actor with ActorLogging =>
import context.dispatcher
//dependencies
def app: AppDefinition
def readinessCheckExecutor: ReadinessCheckExecutor
def deploymentManager: ActorRef
def taskTracker: TaskTracker
def status: DeploymentStatus
//computed values to have stable identifier in pattern matcher
val appId: PathId = app.id
val version: Timestamp = app.version
val versionString: String = version.toString
//state managed by this behavior
private[this] var healthy = Set.empty[Task.Id]
private[this] var ready = Set.empty[Task.Id]
private[this] var subscriptions = Map.empty[ReadinessCheckSubscriptionKey, Subscription]
/**
* Hook method which is called, whenever a task becomes healthy or ready.
*/
def taskStatusChanged(taskId: Task.Id): Unit
/**
* Indicates, if a given target count has been reached.
*/
def taskTargetCountReached(count: Int): Boolean = healthy.size == count && ready.size == count
/**
* Actors extending this trait should call this method when they detect that a task is terminated
* The task will be removed from subsequent sets and all subscriptions will get canceled.
*
* @param taskId the id of the task that has been terminated.
*/
def taskTerminated(taskId: Task.Id): Unit = {
healthy -= taskId
ready -= taskId
subscriptions.keys.filter(_.taskId == taskId).foreach { key =>
subscriptions(key).unsubscribe()
subscriptions -= key
}
}
def healthyTasks: Set[Task.Id] = healthy
def readyTasks: Set[Task.Id] = ready
def subscriptionKeys: Set[ReadinessCheckSubscriptionKey] = subscriptions.keySet
override def postStop(): Unit = {
subscriptions.values.foreach(_.unsubscribe())
}
/**
* Depending on the app definition, this method handles:
* - app without health checks and without readiness checks
* - app with health checks and without readiness checks
* - app without health checks and with readiness checks
* - app with health checks and with readiness checks
*
* The #taskIsReady function is called, when the task is ready according to the app definition.
*/
val readinessBehavior: Receive = {
def taskRunBehavior: Receive = {
def markAsHealthyAndReady(taskId: Task.Id): Unit = {
log.debug(s"Started task is ready: $taskId")
healthy += taskId
ready += taskId
taskStatusChanged(taskId)
}
def markAsHealthyAndInitiateReadinessCheck(taskId: Task.Id): Unit = {
healthy += taskId
initiateReadinessCheck(taskId)
}
def taskIsRunning(taskFn: Task.Id => Unit): Receive = {
case MesosStatusUpdateEvent(slaveId, taskId, "TASK_RUNNING", _, `appId`, _, _, _, `versionString`, _, _) =>
taskFn(taskId)
}
taskIsRunning(if (app.readinessChecks.isEmpty) markAsHealthyAndReady else markAsHealthyAndInitiateReadinessCheck)
}
def taskHealthBehavior: Receive = {
def initiateReadinessOnRun: Receive = {
case MesosStatusUpdateEvent(_, taskId, "TASK_RUNNING", _, `appId`, _, _, _, `versionString`, _, _) =>
initiateReadinessCheck(taskId)
}
def handleTaskHealthy: Receive = {
case HealthStatusChanged(`appId`, taskId, `version`, true, _, _) if !healthy(taskId) =>
log.info(s"Task $taskId now healthy for app ${app.id.toString}")
healthy += taskId
if (app.readinessChecks.isEmpty) ready += taskId
taskStatusChanged(taskId)
}
val handleTaskRunning = if (app.readinessChecks.nonEmpty) initiateReadinessOnRun else Actor.emptyBehavior
handleTaskRunning orElse handleTaskHealthy
}
def initiateReadinessCheck(taskId: Task.Id): Unit = {
log.debug(s"Initiate readiness check for task: $taskId")
val me = self
taskTracker.task(taskId).map { taskOption =>
for {
task <- taskOption
launched <- task.launched
} me ! ScheduleReadinessCheckFor(task, launched)
}
}
def readinessCheckBehavior: Receive = {
case ScheduleReadinessCheckFor(task, launched) =>
log.debug(s"Schedule readiness check for task: ${task.taskId}")
ReadinessCheckExecutor.ReadinessCheckSpec.readinessCheckSpecsForTask(app, task, launched).foreach { spec =>
val subscriptionName = ReadinessCheckSubscriptionKey(task.taskId, spec.checkName)
val subscription = readinessCheckExecutor.execute(spec).subscribe(self ! _)
subscriptions += subscriptionName -> subscription
}
case result: ReadinessCheckResult =>
log.info(s"Received readiness check update for task ${result.taskId} with ready: ${result.ready}")
deploymentManager ! ReadinessCheckUpdate(status.plan.id, result)
//TODO(MV): this code assumes only one readiness check per app (validation rules enforce this)
if (result.ready) {
log.info(s"Task ${result.taskId} now ready for app ${app.id.toString}")
ready += result.taskId
val subscriptionName = ReadinessCheckSubscriptionKey(result.taskId, result.name)
subscriptions.get(subscriptionName).foreach(_.unsubscribe())
subscriptions -= subscriptionName
taskStatusChanged(result.taskId)
}
}
val startBehavior = if (app.healthChecks.nonEmpty) taskHealthBehavior else taskRunBehavior
val readinessBehavior = if (app.readinessChecks.nonEmpty) readinessCheckBehavior else Actor.emptyBehavior
startBehavior orElse readinessBehavior
}
}
object ReadinessBehavior {
case class ReadinessCheckSubscriptionKey(taskId: Task.Id, readinessCheck: String)
case class ScheduleReadinessCheckFor(task: Task, launched: Task.Launched)
}
|
timcharper/marathon
|
src/main/scala/mesosphere/marathon/upgrade/ReadinessBehavior.scala
|
Scala
|
apache-2.0
| 6,823
|
package com.taxis99.aws
import scala.collection.JavaConversions._
import com.amazonaws.{ Protocol, ClientConfiguration }
import com.amazonaws.auth.BasicAWSCredentials
import com.amazonaws.services.s3.AmazonS3Client
/**
* Helper to handle S3 Interface
*/
class S3Helper(accessKey: String, secretKey: String, bucketName: String, endpoint: String) {
def createClient(): AmazonS3Client = {
val clientConfig = new ClientConfiguration()
clientConfig.setProtocol(Protocol.HTTP)
val client = new AmazonS3Client(new BasicAWSCredentials(accessKey, secretKey), clientConfig)
client.setEndpoint(endpoint)
client
}
lazy val client = createClient()
def listFiles(prefix: String) = {
client.listObjects(bucketName, prefix).getObjectSummaries.sortBy(_.getLastModified).reverse
}
}
|
mtrovo/awsscala
|
src/main/scala/com/taxis99/aws/S3Helper.scala
|
Scala
|
apache-2.0
| 809
|
package com.github.dronegator.nlp.vocabulary
import com.github.dronegator.nlp.common.Probability
import com.github.dronegator.nlp.component.tokenizer.Tokenizer.{Token, Word}
/**
* Created by cray on 9/18/16.
*/
object VocabularyImplStored {
def apply(vocabulary: Vocabulary) =
new VocabularyImplStored(
vocabulary.wordMap,
vocabulary.pToken.map(identity),
vocabulary.pNGram2,
vocabulary.pNGram3,
vocabulary.map1ToNext,
vocabulary.map2ToNext,
vocabulary.map1ToPrev,
vocabulary.map2ToPrev,
vocabulary.map2ToMiddle,
vocabulary.map1ToNextPhrase,
vocabulary.map1ToTheSamePhrase,
vocabulary.statements,
vocabulary.nGram1,
vocabulary.nGram2,
vocabulary.nGram3,
vocabulary.tokenMap,
vocabulary.phraseCorrelationRepeated,
vocabulary.phraseCorrelationConsequent,
vocabulary.phraseCorrelationInner,
vocabulary.meaningMap,
vocabulary.sense,
vocabulary.auxiliary
)
}
case class VocabularyImplStored(wordMap: Map[Token, Word],
pToken: Map[List[Token], Double],
pNGram2: Map[List[Token], Double],
pNGram3: Map[List[Token], Double],
map1ToNext: Map[List[Token], List[(Double, Token)]],
map2ToNext: Map[List[Token], List[(Double, Token)]],
map1ToPrev: Map[List[Token], List[(Double, Token)]],
map2ToPrev: Map[List[Token], List[(Double, Token)]],
map2ToMiddle: Map[List[Token], List[(Double, Token)]],
map1ToNextPhrase: Map[Token, List[(Token, Double)]],
map1ToTheSamePhrase: Map[Token, List[(Token, Probability)]],
statements: List[List[Token]],
nGram1: Map[List[Token], Int],
nGram2: Map[List[Token], Int],
nGram3: Map[List[Token], Int],
tokenMap: Map[Word, List[Token]],
phraseCorrelationRepeated: Map[Token, Int],
phraseCorrelationConsequent: Map[List[Token], Int],
phraseCorrelationInner: Map[List[Token], Int],
meaningMap: Map[(Token, Token), (Probability, Probability)],
sense: Set[Token],
auxiliary: Set[Token])
extends Vocabulary
with VocabularyImplTrait
|
dronegator/nlp
|
wordmetrix/src/main/scala/com/github/dronegator/nlp/vocabulary/VocabularyImplStored.scala
|
Scala
|
apache-2.0
| 2,674
|
def canEqual(other: Any) = other.isInstanceOf[Rectangle]
override def equals(other: Any) =
if (other.isInstanceOf[Rectangle]) {
val otherRectangle = other.asInstanceOf[Rectangle]
otherRectangle.canEqual(this) &&
this.a == otherRectangle.a &&
this.b == otherRectangle.b
}
else false
override def hashCode = (a + b).toInt
|
grzegorzbalcerek/scala-exercises
|
Figures/stepRectangleEquals.scala
|
Scala
|
bsd-2-clause
| 342
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.streaming.continuous
import java.io.{File, InterruptedIOException, IOException, UncheckedIOException}
import java.nio.channels.ClosedByInterruptException
import java.util.concurrent.{CountDownLatch, ExecutionException, TimeoutException, TimeUnit}
import scala.reflect.ClassTag
import scala.util.control.ControlThrowable
import com.google.common.util.concurrent.UncheckedExecutionException
import org.apache.commons.io.FileUtils
import org.apache.hadoop.conf.Configuration
import org.apache.spark.{SparkContext, SparkEnv}
import org.apache.spark.scheduler.{SparkListener, SparkListenerJobStart}
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.plans.logical.Range
import org.apache.spark.sql.catalyst.streaming.InternalOutputModes
import org.apache.spark.sql.execution.command.ExplainCommand
import org.apache.spark.sql.execution.datasources.v2.{DataSourceV2ScanExec, WriteToDataSourceV2Exec}
import org.apache.spark.sql.execution.streaming._
import org.apache.spark.sql.execution.streaming.continuous._
import org.apache.spark.sql.execution.streaming.sources.MemorySinkV2
import org.apache.spark.sql.execution.streaming.state.{StateStore, StateStoreConf, StateStoreId, StateStoreProvider}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.sources.StreamSourceProvider
import org.apache.spark.sql.streaming.{StreamTest, Trigger}
import org.apache.spark.sql.streaming.util.StreamManualClock
import org.apache.spark.sql.test.TestSparkSession
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils
class ContinuousSuiteBase extends StreamTest {
// We need more than the default local[2] to be able to schedule all partitions simultaneously.
override protected def createSparkSession = new TestSparkSession(
new SparkContext(
"local[10]",
"continuous-stream-test-sql-context",
sparkConf.set("spark.sql.testkey", "true")))
protected def waitForRateSourceTriggers(query: StreamExecution, numTriggers: Int): Unit = {
query match {
case s: ContinuousExecution =>
assert(numTriggers >= 2, "must wait for at least 2 triggers to ensure query is initialized")
val reader = s.lastExecution.executedPlan.collectFirst {
case DataSourceV2ScanExec(_, r: ContinuousRateStreamReader) => r
}.get
val deltaMs = numTriggers * 1000 + 300
while (System.currentTimeMillis < reader.creationTime + deltaMs) {
Thread.sleep(reader.creationTime + deltaMs - System.currentTimeMillis)
}
}
}
// A continuous trigger that will only fire the initial time for the duration of a test.
// This allows clean testing with manual epoch advancement.
protected val longContinuousTrigger = Trigger.Continuous("1 hour")
}
class ContinuousSuite extends ContinuousSuiteBase {
import testImplicits._
test("basic rate source") {
val df = spark.readStream
.format("rate")
.option("numPartitions", "5")
.option("rowsPerSecond", "5")
.load()
.select('value)
testStream(df, useV2Sink = true)(
StartStream(longContinuousTrigger),
AwaitEpoch(0),
Execute(waitForRateSourceTriggers(_, 2)),
IncrementEpoch(),
CheckAnswerRowsContains(scala.Range(0, 10).map(Row(_))),
StopStream,
StartStream(longContinuousTrigger),
AwaitEpoch(2),
Execute(waitForRateSourceTriggers(_, 2)),
IncrementEpoch(),
CheckAnswerRowsContains(scala.Range(0, 20).map(Row(_))),
StopStream)
}
test("map") {
val df = spark.readStream
.format("rate")
.option("numPartitions", "5")
.option("rowsPerSecond", "5")
.load()
.select('value)
.map(r => r.getLong(0) * 2)
testStream(df, useV2Sink = true)(
StartStream(longContinuousTrigger),
AwaitEpoch(0),
Execute(waitForRateSourceTriggers(_, 2)),
IncrementEpoch(),
Execute(waitForRateSourceTriggers(_, 4)),
IncrementEpoch(),
CheckAnswerRowsContains(scala.Range(0, 40, 2).map(Row(_))))
}
test("flatMap") {
val df = spark.readStream
.format("rate")
.option("numPartitions", "5")
.option("rowsPerSecond", "5")
.load()
.select('value)
.flatMap(r => Seq(0, r.getLong(0), r.getLong(0) * 2))
testStream(df, useV2Sink = true)(
StartStream(longContinuousTrigger),
AwaitEpoch(0),
Execute(waitForRateSourceTriggers(_, 2)),
IncrementEpoch(),
Execute(waitForRateSourceTriggers(_, 4)),
IncrementEpoch(),
CheckAnswerRowsContains(scala.Range(0, 20).flatMap(n => Seq(0, n, n * 2)).map(Row(_))))
}
test("filter") {
val df = spark.readStream
.format("rate")
.option("numPartitions", "5")
.option("rowsPerSecond", "5")
.load()
.select('value)
.where('value > 5)
testStream(df, useV2Sink = true)(
StartStream(longContinuousTrigger),
AwaitEpoch(0),
Execute(waitForRateSourceTriggers(_, 2)),
IncrementEpoch(),
Execute(waitForRateSourceTriggers(_, 4)),
IncrementEpoch(),
CheckAnswerRowsContains(scala.Range(6, 20).map(Row(_))))
}
test("deduplicate") {
val df = spark.readStream
.format("rate")
.option("numPartitions", "5")
.option("rowsPerSecond", "5")
.load()
.select('value)
.dropDuplicates()
val except = intercept[AnalysisException] {
testStream(df, useV2Sink = true)(StartStream(longContinuousTrigger))
}
assert(except.message.contains(
"Continuous processing does not support Deduplicate operations."))
}
test("timestamp") {
val df = spark.readStream
.format("rate")
.option("numPartitions", "5")
.option("rowsPerSecond", "5")
.load()
.select(current_timestamp())
val except = intercept[AnalysisException] {
testStream(df, useV2Sink = true)(StartStream(longContinuousTrigger))
}
assert(except.message.contains(
"Continuous processing does not support current time operations."))
}
test("repeatedly restart") {
val df = spark.readStream
.format("rate")
.option("numPartitions", "5")
.option("rowsPerSecond", "5")
.load()
.select('value)
testStream(df, useV2Sink = true)(
StartStream(longContinuousTrigger),
AwaitEpoch(0),
Execute(waitForRateSourceTriggers(_, 2)),
IncrementEpoch(),
CheckAnswerRowsContains(scala.Range(0, 10).map(Row(_))),
StopStream,
StartStream(longContinuousTrigger),
StopStream,
StartStream(longContinuousTrigger),
StopStream,
StartStream(longContinuousTrigger),
AwaitEpoch(2),
Execute(waitForRateSourceTriggers(_, 2)),
IncrementEpoch(),
CheckAnswerRowsContains(scala.Range(0, 20).map(Row(_))),
StopStream)
}
test("query without test harness") {
val df = spark.readStream
.format("rate")
.option("numPartitions", "2")
.option("rowsPerSecond", "2")
.load()
.select('value)
val query = df.writeStream
.format("memory")
.queryName("noharness")
.trigger(Trigger.Continuous(100))
.start()
val continuousExecution =
query.asInstanceOf[StreamingQueryWrapper].streamingQuery.asInstanceOf[ContinuousExecution]
continuousExecution.awaitEpoch(0)
waitForRateSourceTriggers(continuousExecution, 2)
query.stop()
val results = spark.read.table("noharness").collect()
assert(Set(0, 1, 2, 3).map(Row(_)).subsetOf(results.toSet))
}
}
class ContinuousStressSuite extends ContinuousSuiteBase {
import testImplicits._
test("only one epoch") {
val df = spark.readStream
.format("rate")
.option("numPartitions", "5")
.option("rowsPerSecond", "500")
.load()
.select('value)
testStream(df, useV2Sink = true)(
StartStream(longContinuousTrigger),
AwaitEpoch(0),
Execute(waitForRateSourceTriggers(_, 201)),
IncrementEpoch(),
Execute { query =>
val data = query.sink.asInstanceOf[MemorySinkV2].allData
val vals = data.map(_.getLong(0)).toSet
assert(scala.Range(0, 25000).forall { i =>
vals.contains(i)
})
})
}
test("automatic epoch advancement") {
val df = spark.readStream
.format("rate")
.option("numPartitions", "5")
.option("rowsPerSecond", "500")
.load()
.select('value)
testStream(df, useV2Sink = true)(
StartStream(Trigger.Continuous(2012)),
AwaitEpoch(0),
Execute(waitForRateSourceTriggers(_, 201)),
IncrementEpoch(),
CheckAnswerRowsContains(scala.Range(0, 25000).map(Row(_))))
}
test("restarts") {
val df = spark.readStream
.format("rate")
.option("numPartitions", "5")
.option("rowsPerSecond", "500")
.load()
.select('value)
testStream(df, useV2Sink = true)(
StartStream(Trigger.Continuous(2012)),
AwaitEpoch(10),
StopStream,
StartStream(Trigger.Continuous(2012)),
AwaitEpoch(20),
StopStream,
StartStream(Trigger.Continuous(2012)),
AwaitEpoch(21),
StopStream,
StartStream(Trigger.Continuous(2012)),
AwaitEpoch(22),
StopStream,
StartStream(Trigger.Continuous(2012)),
AwaitEpoch(25),
StopStream,
StartStream(Trigger.Continuous(2012)),
StopStream,
StartStream(Trigger.Continuous(2012)),
AwaitEpoch(50),
CheckAnswerRowsContains(scala.Range(0, 25000).map(Row(_))))
}
}
|
saltstar/spark
|
sql/core/src/test/scala/org/apache/spark/sql/streaming/continuous/ContinuousSuite.scala
|
Scala
|
apache-2.0
| 10,382
|
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.