code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package net.thereturningvoid.advancedmod.proxies
abstract class CommonProxy {
def preInit(): Unit
def init(): Unit
def postInit(): Unit
}
| TheReturningVoid/mc-modding-guide | src/main/scala/net/thereturningvoid/advancedmod/proxies/CommonProxy.scala | Scala | gpl-3.0 | 146 |
package sorm
import embrace._
import sext._
import sorm.core._
import sorm.mappings._
import sorm.reflection._
import scala.reflect.runtime.universe._
/**
* The instance of SORM
* @param entities A list of entity settings describing the entities to be
* registered with this instance
* @param url A url of database to connect to. For instance, to connect to a
* database `test` on MySQL server running on your computer it will
* be: `jdbc:mysql://localhost/test`
* @param user A username used for connection
* @param password A password used for connection
* @param poolSize A size of connection pool. Determines how many connections
* to the db will be kept at max. Useful for multithreaded
* databases.
* @param initMode An initialization mode for this instance
* @param timeout Amount of seconds the underlying connections may remain idle.
* Determines how often the "keepalive" queries will be emitted.
*/
class Instance
( entities : Traversable[Entity],
url : String,
user : String = "",
password : String = "",
poolSize : Int = 1,
initMode : InitMode = InitMode.Create,
timeout : Int = 30 )
extends Instance.Initialization(entities, url, user, password, poolSize, initMode, timeout)
with Instance.Api
object Instance {
trait Api {
protected val connector : Connector
protected val mappings : Map[Reflection, EntityMapping]
protected def mapping
[ T : TypeTag ]
= {
def mapping( r : Reflection )
= mappings.get(r)
.getOrElse {
throw new SormException(
"Entity `" + r.name + "` is not registered"
)
}
mapping(Reflection[T].mixinBasis)
}
/**
* Return the [[sorm.Querier]] object for performing a read-query on a specified entity type.
*
* @tparam T The entity type
* @return The accessor object. An abstraction over all kinds of supported SELECT-queries.
*/
def query[T <: Persistable : TypeTag]
= Querier[T](mapping, connector)
/**
* Select entities using a plain sql statement. This function allows to execute custom queries which in certain situations may be better optimized than the ones generated by SORM.
*
* Please note that the statement must select only the `_id` column.
*
* ==Example:==
* {{{
* Db.fetchWithSql[Artist]("SELECT _id FROM artist WHERE name=? || name=?", "Beatles", "The Beatles")
* }}}
* @param template The sql with question-symbols used as placeholders for values
* @param values The values
* @tparam T The type of entities to fetch
* @return Matching entities of type `T`
*/
def fetchWithSql
[ T <: Persistable : TypeTag ]
( template : String,
values : Any* )
: Seq[T]
= connector.withConnection{ cx =>
jdbc.Statement.simple(template, values)
.$( cx.queryJdbc(_)(_.byNameRowsTraversable.toList).toStream )
.ensuring(
_.headOption
.map( k => k.keys == Set("_id") )
.getOrElse(true),
"The sql-statement must select only the `_id`-column"
)
.map(
mapping[T].fetchByPrimaryKey(_, cx).asInstanceOf[T]
)
.toList
}
/**
* Fetch an existing entity by _id. Will throw an exception if the entity doesn't exist.
* @param id The id
* @return An entity instance
*/
def fetchById
[ T <: Persistable : TypeTag ]
( id : Long )
: T
= connector.withConnection{ cx =>
id $ ("_id" -> _) $ (Map(_)) $ (mapping[T].fetchByPrimaryKey(_, cx).asInstanceOf[T])
}
/**
* Save the entity. An Abstraction over INSERT and UPDATE-queries. Which one to perform will be decided based on whether the [[sorm.Persistable]] trait is mixed in the value you provide.
* @param value The value to save
* @return The saved entity instance
*/
def save[T <: Persistable : TypeTag](value : T): T
= connector.withConnection{ cx =>
mapping[T].save(value, cx).asInstanceOf[T]
}
/**
* Saves the entity by overwriting the existing one if one with the matching unique keys exists
* and creating a new one otherwise.
* Executing simply [[sorm.Instance.Api#save]] in a situation of unique keys clash would have thrown an exception.
* Beware that in case when not all unique keys are matched this method will still throw an exception.
* @param value The value to save
* @return The saved entity instance
*/
// def saveByUniqueKeys
// [ T <: Persistable : TypeTag ]
// ( value : T )
// : T
// = (mapping[T].uniqueKeys.flatten zipBy value.reflected.propertyValue)
// // todo: check the unique entities
// .ensuring(_.nonEmpty, "Type doesn't have unique keys")
// .foldLeft(query){ case (q, (n, v)) => q.whereEqual(n, v) }
// .$(q =>
// connector.withConnection{ cx =>
// cx.transaction {
// q.fetchOneId()
// .map(Persisted(value, _))
// .getOrElse(value)
// .$(mapping[T].save(_, cx).asInstanceOf[T])
// }
// }
// )
/**
* Delete a persisted entity
* @param value The entity
* @tparam T The entity
*/
def delete[T <: Persistable : TypeTag ](value : T)
= connector.withConnection{ cx => mapping[T].delete(value, cx) }
/**
* Perform several db-requests in a single transaction. For most dbs this provides guarantees that nothing will be changed in between the db-requests in multithreaded applications and that it will roll-back in case of any failure.
*
* All db-requests which should be executed as part of this transaction must be run on the same thread this method gets called on.
*
* Use transactions with care because for the time the transaction is being executed the involved tables are supposed to get locked, putting all the requests to them from other threads in a queue until the current transaction finishes. The best practice is to make transactions as short as possible and to perform any calculations prior to entering transaction.
*
* @param t The closure wrapping the actions performed in a single transaction.
* @tparam T The result of the closure
* @return The result of the last statement of the passed in closure
*/
def transaction[T]( t : => T ): T = connector.withConnection{ cx => cx.transaction(t) }
/**
* Current DateTime at DB server. Effectively fetches the date only once to calculate the deviation.
*/
lazy val now = {
val base = connector.withConnection(_.now())
val systemBase = System.currentTimeMillis()
() => base.plusMillis((System.currentTimeMillis() - systemBase).toInt)
}
/**
* Free all the underlying resources. Useful in multi-instance tests
*/
def close() = connector.close()
}
abstract class Initialization
( entities : Traversable[Entity],
url : String,
user : String = "",
password : String = "",
poolSize : Int = 1,
initMode : InitMode = InitMode.Create,
timeout : Int )
{
import core.Initialization._
protected val connector = new Connector(url, user, password, poolSize, timeout)
var tic = System.currentTimeMillis()
// Validate entities (must be prior to mappings creation due to possible mappingkind detection errors):
validateEntities(entities.toSeq).headOption.map(new ValidationException(_)).foreach(throw _)
println(s"validate entities using: ${(System.currentTimeMillis()-tic)}")
tic = System.currentTimeMillis()
protected val mappings
= {
val settings
= entities.view
.map{ e =>
e.reflection -> EntitySettings(e.indexed, e.unique)
}
.toMap
settings.keys
.zipBy{ new EntityMapping(_, None, settings) }
.toMap
}
println(s"creating mapping using: ${(System.currentTimeMillis()-tic)}")
tic = System.currentTimeMillis()
// Validate input:
mappings.values.toStream $ validateMapping map (new ValidationException(_)) foreach (throw _)
println(s"validate mapping using: ${(System.currentTimeMillis()-tic)}")
tic = System.currentTimeMillis()
// Initialize a db schema:
initializeSchema(mappings.values, connector, initMode)
println(s"initialing schema using: ${(System.currentTimeMillis()-tic)}")
}
class ValidationException ( m : String ) extends SormException(m)
}
| cllu/sorm2 | src/main/scala/sorm/Instance.scala | Scala | mit | 8,815 |
package sigmastate.interpreter
import sigmastate.{CostKind, SMethod}
import sigmastate.Values.ValueCompanion
/** Each costable operation is described in one of the following ways:
* 1) using [[ValueCompanion]] - operation with separate node class
* 2) using [[SMethod]] - operation represented as method.
* 3) using string name - intermediate sub-operation present in cost model, but
* which is not a separate operation of ErgoTree.
*/
abstract class OperationDesc {
def operationName: String
}
/** Operation descriptor based on [[ValueCompanion]]. */
case class CompanionDesc(companion: ValueCompanion) extends OperationDesc {
override def operationName: String = companion.typeName
}
/** Operation descriptor based on [[SMethod]]. */
case class MethodDesc(method: SMethod) extends OperationDesc {
override def operationName: String = method.opName
override def toString: String = s"MethodDesc(${method.opName})"
override def hashCode(): Int = (method.objType.typeId << 8) | method.methodId
override def equals(obj: Any): Boolean =
this.eq(obj.asInstanceOf[AnyRef]) || (obj != null && (obj match {
case that: MethodDesc =>
method.objType.typeId == that.method.objType.typeId &&
method.methodId == that.method.methodId
case _ => false
}))
}
/** Operation descriptor based on name. */
case class NamedDesc(operationName: String) extends OperationDesc
/** Operation costing descriptors combined together. */
case class OperationCostInfo[C <: CostKind](costKind: C, opDesc: OperationDesc)
| ScorexFoundation/sigmastate-interpreter | sigmastate/src/main/scala/sigmastate/interpreter/OperationDesc.scala | Scala | mit | 1,581 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gearpump.serializer
import com.esotericsoftware.kryo.{Kryo, Serializer => KryoSerializer}
import com.typesafe.config.Config
import org.slf4j.Logger
import org.apache.gearpump.util.{Constants, LogUtil}
class GearpumpSerialization(config: Config) {
private val LOG: Logger = LogUtil.getLogger(getClass)
def customize(kryo: Kryo): Unit = {
val serializationMap = configToMap(config, Constants.GEARPUMP_SERIALIZERS)
serializationMap.foreach { kv =>
val (key, value) = kv
val keyClass = Class.forName(key)
if (value == null || value.isEmpty) {
// Use default serializer for this class type
kryo.register(keyClass)
} else {
val valueClass = Class.forName(value)
val register = kryo.register(keyClass,
valueClass.newInstance().asInstanceOf[KryoSerializer[_]])
LOG.debug(s"Registering ${keyClass}, id: ${register.getId}")
}
}
kryo.setReferences(false)
// Requires the user to register the class first before using
kryo.setRegistrationRequired(true)
}
private final def configToMap(config: Config, path: String) = {
import scala.collection.JavaConverters._
config.getConfig(path).root.unwrapped.asScala.toMap map { case (k, v) => k -> v.toString }
}
} | manuzhang/incubator-gearpump | core/src/main/scala/org/apache/gearpump/serializer/GearpumpSerialization.scala | Scala | apache-2.0 | 2,101 |
/*
* Copyright (C) 2009-2015 Typesafe Inc. <http://www.typesafe.com>
*/
package play.api.libs.json
import play.api.data.validation.ValidationError
sealed trait PathNode {
def apply(json: JsValue): List[JsValue]
def toJsonString: String
private[json] def splitChildren(json: JsValue): List[Either[(PathNode, JsValue), (PathNode, JsValue)]]
def set(json: JsValue, transform: JsValue => JsValue): JsValue
private[json] def toJsonField(value: JsValue): JsValue = value
}
case class RecursiveSearch(key: String) extends PathNode {
def apply(json: JsValue): List[JsValue] = json match {
case obj: JsObject => (json \\\\ key).toList
case arr: JsArray => (json \\\\ key).toList
case _ => Nil
}
override def toString = "//" + key
def toJsonString = "*" + key
/**
* First found, first set and never goes down after setting
*/
def set(json: JsValue, transform: JsValue => JsValue): JsValue = json match {
case obj: JsObject =>
var found = false
val o = JsObject(obj.fields.map {
case (k, v) =>
if (k == this.key) {
found = true
k -> transform(v)
} else k -> set(v, transform)
})
o
case _ => json
}
private[json] def splitChildren(json: JsValue) = json match {
case obj: JsObject => obj.fields.toList.map {
case (k, v) =>
if (k == this.key) Right(this -> v)
else Left(KeyPathNode(k) -> v)
}
case arr: JsArray =>
arr.value.toList.zipWithIndex.map { case (js, j) => Left(IdxPathNode(j) -> js) }
case _ => List()
}
}
case class KeyPathNode(key: String) extends PathNode {
def apply(json: JsValue): List[JsValue] = json match {
case obj: JsObject => List(json \\ key).flatMap(_.toOption)
case _ => List()
}
override def toString = "/" + key
def toJsonString = "." + key
def set(json: JsValue, transform: JsValue => JsValue): JsValue = json match {
case obj: JsObject =>
var found = false
val o = JsObject(obj.fields.map {
case (k, v) =>
if (k == this.key) {
found = true
k -> transform(v)
} else k -> v
})
if (!found) o ++ Json.obj(this.key -> transform(Json.obj()))
else o
case _ => transform(json)
}
private[json] def splitChildren(json: JsValue) = json match {
case obj: JsObject => obj.fields.toList.map {
case (k, v) =>
if (k == this.key) Right(this -> v)
else Left(KeyPathNode(k) -> v)
}
case _ => List()
}
private[json] override def toJsonField(value: JsValue) = Json.obj(key -> value)
}
case class IdxPathNode(idx: Int) extends PathNode {
def apply(json: JsValue): List[JsValue] = json match {
case arr: JsArray => List(arr(idx)).flatMap(_.toOption)
case _ => List()
}
override def toString = "(%d)".format(idx)
def toJsonString = "[%d]".format(idx)
def set(json: JsValue, transform: JsValue => JsValue): JsValue = json match {
case arr: JsArray => JsArray(arr.value.zipWithIndex.map { case (js, j) => if (j == idx) transform(js) else js })
case _ => transform(json)
}
private[json] def splitChildren(json: JsValue) = json match {
case arr: JsArray => arr.value.toList.zipWithIndex.map {
case (js, j) =>
if (j == idx) Right(this -> js)
else Left(IdxPathNode(j) -> js)
}
case _ => List()
}
private[json] override def toJsonField(value: JsValue) = value
}
object JsPath extends JsPath(List.empty) {
// TODO implement it correctly (doesn't merge )
def createObj(pathValues: (JsPath, JsValue)*) = {
def buildSubPath(path: JsPath, value: JsValue) = {
def step(path: List[PathNode], value: JsValue): JsObject = {
path match {
case List() => value match {
case obj: JsObject => obj
case _ => throw new RuntimeException("when empty JsPath, expecting JsObject")
}
case List(p) => p match {
case KeyPathNode(key) => Json.obj(key -> value)
case _ => throw new RuntimeException("expected KeyPathNode")
}
case head :: tail => head match {
case KeyPathNode(key) => Json.obj(key -> step(tail, value))
case _ => throw new RuntimeException("expected KeyPathNode")
}
}
}
step(path.path, value)
}
pathValues.foldLeft(Json.obj()) { (obj, pv) =>
val (path, value) = (pv._1, pv._2)
val subobj = buildSubPath(path, value)
obj.deepMerge(subobj)
}
}
}
case class JsPath(path: List[PathNode] = List()) {
def \\(child: String) = JsPath(path :+ KeyPathNode(child))
def \\(child: Symbol) = JsPath(path :+ KeyPathNode(child.name))
def \\\\(child: String) = JsPath(path :+ RecursiveSearch(child))
def \\\\(child: Symbol) = JsPath(path :+ RecursiveSearch(child.name))
def apply(idx: Int): JsPath = JsPath(path :+ IdxPathNode(idx))
def apply(json: JsValue): List[JsValue] = path.foldLeft(List(json))((s, p) => s.flatMap(p.apply))
def asSingleJsResult(json: JsValue): JsResult[JsValue] = this(json) match {
case Nil => JsError(Seq(this -> Seq(ValidationError("error.path.missing"))))
case List(js) => JsSuccess(js)
case _ :: _ => JsError(Seq(this -> Seq(ValidationError("error.path.result.multiple"))))
}
def asSingleJson(json: JsValue): JsLookupResult = this(json) match {
case Nil => JsUndefined("error.path.missing")
case List(js) => JsDefined(js)
case _ :: _ => JsUndefined("error.path.result.multiple")
}
def applyTillLast(json: JsValue): Either[JsError, JsResult[JsValue]] = {
def step(path: List[PathNode], json: JsValue): Either[JsError, JsResult[JsValue]] = path match {
case Nil => Left(JsError(Seq(this -> Seq(ValidationError("error.path.empty")))))
case List(node) => node(json) match {
case Nil => Right(JsError(Seq(this -> Seq(ValidationError("error.path.missing")))))
case List(js) => Right(JsSuccess(js))
case _ :: _ => Right(JsError(Seq(this -> Seq(ValidationError("error.path.result.multiple")))))
}
case head :: tail => head(json) match {
case Nil => Left(JsError(Seq(this -> Seq(ValidationError("error.path.missing")))))
case List(js) => step(tail, js)
case _ :: _ => Left(JsError(Seq(this -> Seq(ValidationError("error.path.result.multiple")))))
}
}
step(path, json)
}
override def toString = path.mkString
def toJsonString = path.foldLeft("obj")((acc, p) => acc + p.toJsonString)
def compose(other: JsPath) = JsPath(path ++ other.path)
def ++(other: JsPath) = this compose other
/**
* Simple Prune for simple path and only JsObject
*/
def prune(js: JsValue) = {
def stepNode(json: JsObject, node: PathNode): JsResult[JsObject] = {
node match {
case KeyPathNode(key) => JsSuccess(json - key)
case _ => JsError(JsPath(), ValidationError("error.expected.keypathnode"))
}
}
def filterPathNode(json: JsObject, node: PathNode, value: JsValue): JsResult[JsObject] = {
node match {
case KeyPathNode(key) => JsSuccess(JsObject(json.fields.filterNot(_._1 == key)) ++ Json.obj(key -> value))
case _ => JsError(JsPath(), ValidationError("error.expected.keypathnode"))
}
}
def step(json: JsObject, lpath: JsPath): JsResult[JsObject] = {
lpath.path match {
case Nil => JsSuccess(json)
case List(p) => stepNode(json, p).repath(lpath)
case head :: tail => head(json) match {
case Nil => JsError(lpath, ValidationError("error.path.missing"))
case List(js) =>
js match {
case o: JsObject =>
step(o, JsPath(tail)).repath(lpath).flatMap(value =>
filterPathNode(json, head, value)
)
case _ => JsError(lpath, ValidationError("error.expected.jsobject"))
}
case h :: t => JsError(lpath, ValidationError("error.path.result.multiple"))
}
}
}
js match {
case o: JsObject => step(o, this) match {
case s: JsSuccess[JsObject] => s.copy(path = this)
case e => e
}
case _ =>
JsError(this, ValidationError("error.expected.jsobject"))
}
}
/** Reads a T at JsPath */
def read[T](implicit r: Reads[T]): Reads[T] = Reads.at[T](this)(r)
/**
* Reads a Option[T] search optional or nullable field at JsPath (field not found or null is None
* and other cases are Error).
*
* It runs through JsValue following all JsPath nodes on JsValue except last node:
* - If one node in JsPath is not found before last node => returns JsError( "missing-path" )
* - If all nodes are found till last node, it runs through JsValue with last node =>
* - If last node is not found => returns None
* - If last node is found with value "null" => returns None
* - If last node is found => applies implicit Reads[T]
*/
def readNullable[T](implicit r: Reads[T]): Reads[Option[T]] = Reads.nullable[T](this)(r)
/**
* Reads a T at JsPath using the explicit Reads[T] passed by name which is useful in case of
* recursive case classes for ex.
*
* {{{
* case class User(id: Long, name: String, friend: User)
*
* implicit lazy val UserReads: Reads[User] = (
* (__ \\ 'id).read[Long] and
* (__ \\ 'name).read[String] and
* (__ \\ 'friend).lazyRead(UserReads)
* )(User.apply _)
* }}}
*/
def lazyRead[T](r: => Reads[T]): Reads[T] = Reads(js => Reads.at[T](this)(r).reads(js))
/**
* Reads lazily a Option[T] search optional or nullable field at JsPath using the explicit Reads[T]
* passed by name which is useful in case of recursive case classes for ex.
*
* {{{
* case class User(id: Long, name: String, friend: Option[User])
*
* implicit lazy val UserReads: Reads[User] = (
* (__ \\ 'id).read[Long] and
* (__ \\ 'name).read[String] and
* (__ \\ 'friend).lazyReadNullable(UserReads)
* )(User.apply _)
* }}}
*/
def lazyReadNullable[T](r: => Reads[T]): Reads[Option[T]] = Reads(js => Reads.nullable[T](this)(r).reads(js))
/** Pure Reads doesn't read anything but creates a JsObject based on JsPath with the given T value */
def read[T](t: T) = Reads.pure(t)
/** Writes a T at given JsPath */
def write[T](implicit w: Writes[T]): OWrites[T] = Writes.at[T](this)(w)
/**
* Writes a Option[T] at given JsPath
* If None => doesn't write the field (never writes null actually)
* else => writes the field using implicit Writes[T]
*/
def writeNullable[T](implicit w: Writes[T]): OWrites[Option[T]] = Writes.nullable[T](this)(w)
/**
* Writes a T at JsPath using the explicit Writes[T] passed by name which is useful in case of
* recursive case classes for ex
*
* {{{
* case class User(id: Long, name: String, friend: User)
*
* implicit lazy val UserReads: Reads[User] = (
* (__ \\ 'id).write[Long] and
* (__ \\ 'name).write[String] and
* (__ \\ 'friend).lazyWrite(UserReads)
* )(User.apply _)
* }}}
*/
def lazyWrite[T](w: => Writes[T]): OWrites[T] = OWrites((t: T) => Writes.at[T](this)(w).writes(t))
/**
* Writes a Option[T] at JsPath using the explicit Writes[T] passed by name which is useful in case of
* recursive case classes for ex
*
* Please note that it's not writeOpt to be coherent with readNullable
*
* {{{
* case class User(id: Long, name: String, friend: Option[User])
*
* implicit lazy val UserReads: Reads[User] = (
* (__ \\ 'id).write[Long] and
* (__ \\ 'name).write[String] and
* (__ \\ 'friend).lazyWriteNullable(UserReads)
* )(User.apply _)
* }}}
*/
def lazyWriteNullable[T](w: => Writes[T]): OWrites[Option[T]] = OWrites((t: Option[T]) => Writes.nullable[T](this)(w).writes(t))
/** Writes a pure value at given JsPath */
def write[T](t: T)(implicit w: Writes[T]): OWrites[JsValue] = Writes.pure(this, t)
/** Reads/Writes a T at JsPath using provided implicit Format[T] */
def format[T](implicit f: Format[T]): OFormat[T] = Format.at[T](this)(f)
/** Reads/Writes a T at JsPath using provided explicit Reads[T] and implicit Writes[T]*/
def format[T](r: Reads[T])(implicit w: Writes[T]): OFormat[T] = Format.at[T](this)(Format(r, w))
/** Reads/Writes a T at JsPath using provided explicit Writes[T] and implicit Reads[T]*/
def format[T](w: Writes[T])(implicit r: Reads[T]): OFormat[T] = Format.at[T](this)(Format(r, w))
/**
* Reads/Writes a T at JsPath using provided implicit Reads[T] and Writes[T]
*
* Please note we couldn't call it "format" to prevent conflicts
*/
def rw[T](implicit r: Reads[T], w: Writes[T]): OFormat[T] = Format.at[T](this)(Format(r, w))
/**
* Reads/Writes a Option[T] (optional or nullable field) at given JsPath
*
* @see JsPath.readNullable to see behavior in reads
* @see JsPath.writeNullable to see behavior in writes
*/
def formatNullable[T](implicit f: Format[T]): OFormat[Option[T]] = Format.nullable[T](this)(f)
/**
* Lazy Reads/Writes a T at given JsPath using implicit Format[T]
* (useful in case of recursive case classes).
*
* @see JsPath.lazyReadNullable to see behavior in reads
* @see JsPath.lazyWriteNullable to see behavior in writes
*/
def lazyFormat[T](f: => Format[T]): OFormat[T] = OFormat[T](lazyRead(f), lazyWrite(f))
/**
* Lazy Reads/Writes a Option[T] (optional or nullable field) at given JsPath using implicit Format[T]
* (useful in case of recursive case classes).
*
* @see JsPath.lazyReadNullable to see behavior in reads
* @see JsPath.lazyWriteNullable to see behavior in writes
*/
def lazyFormatNullable[T](f: => Format[T]): OFormat[Option[T]] = OFormat[Option[T]](lazyReadNullable(f), lazyWriteNullable(f))
/**
* Lazy Reads/Writes a T at given JsPath using explicit Reads[T] and Writes[T]
* (useful in case of recursive case classes).
*
* @see JsPath.lazyReadNullable to see behavior in reads
* @see JsPath.lazyWriteNullable to see behavior in writes
*/
def lazyFormat[T](r: => Reads[T], w: => Writes[T]): OFormat[T] = OFormat[T](lazyRead(r), lazyWrite(w))
/**
* Lazy Reads/Writes a Option[T] (optional or nullable field) at given JsPath using explicit Reads[T] and Writes[T]
* (useful in case of recursive case classes).
*
* @see JsPath.lazyReadNullable to see behavior in reads
* @see JsPath.lazyWriteNullable to see behavior in writes
*/
def lazyFormatNullable[T](r: => Reads[T], w: => Writes[T]): OFormat[Option[T]] = OFormat[Option[T]](lazyReadNullable(r), lazyWriteNullable(w))
private val self = this
object json {
/**
* (__ \\ 'key).json.pick[A <: JsValue] is a Reads[A] that:
* - picks the given value at the given JsPath (WITHOUT THE PATH) from the input JS
* - validates this element as an object of type A (inheriting JsValue)
* - returns a JsResult[A]
*
* Useful to pick a typed JsValue at a given JsPath
*
* Example :
* {{{
* val js = Json.obj("key1" -> "value1", "key2" -> 123)
* js.validate((__ \\ 'key2).json.pick[JsNumber])
* => JsSuccess(JsNumber(123),/key2)
* }}}
*/
def pick[A <: JsValue](implicit r: Reads[A]): Reads[A] = Reads.jsPick(self)
/**
* (__ \\ 'key).json.pick is a Reads[JsValue] that:
* - picks the given value at the given JsPath (WITHOUT THE PATH) from the input JS
* - validates this element as an object of type JsValue
* - returns a JsResult[JsValue]
*
* Useful to pick a JsValue at a given JsPath
*
* Example :
* {{{
* val js = Json.obj("key1" -> "value1", "key2" -> "value2")
* js.validate((__ \\ 'key2).json.pick)
* => JsSuccess("value2",/key2)
* }}}
*/
def pick: Reads[JsValue] = pick[JsValue]
/**
* (__ \\ 'key).json.pickBranch[A <: JsValue](readsOfA) is a Reads[JsObject] that:
* - copies the given branch (JsPath + relative JsValue) from the input JS at this given JsPath
* - validates this relative JsValue as an object of type A (inheriting JsValue) potentially modifying it
* - creates a JsObject from JsPath and validated JsValue
* - returns a JsResult[JsObject]
*
* Useful to create/validate an JsObject from a single JsPath (potentially modifying it)
*
* Example :
* {{{
* val js = Json.obj("key1" -> "value1", "key2" -> Json.obj( "key21" -> "value2") )
* js.validate( (__ \\ 'key2).json.pickBranch[JsString]( (__ \\ 'key21).json.pick[JsString].map( (js: JsString) => JsString(js.value ++ "3456") ) ) )
* => JsSuccess({"key2":"value23456"},/key2/key21)
* }}}
*/
def pickBranch[A <: JsValue](reads: Reads[A]): Reads[JsObject] = Reads.jsPickBranch[A](self)(reads)
/**
* (__ \\ 'key).json.pickBranch is a Reads[JsObject] that:
* - copies the given branch (JsPath + relative JsValue) from the input JS at this given JsPath
* - creates a JsObject from JsPath and JsValue
* - returns a JsResult[JsObject]
*
* Useful to create/validate an JsObject from a single JsPath (potentially modifying it)
*
* Example :
* {{{
* val js = Json.obj("key1" -> "value1", "key2" -> Json.obj( "key21" -> "value2") )
* js.validate( (__ \\ 'key2).json.pickBranch )
* => JsSuccess({"key2":{"key21":"value2"}},/key2)
* }}}
*/
def pickBranch: Reads[JsObject] = Reads.jsPickBranch[JsValue](self)
/**
* (__ \\ 'key).put(fixedValue) is a Reads[JsObject] that:
* - creates a JsObject setting A (inheriting JsValue) at given JsPath
* - returns a JsResult[JsObject]
*
* This Reads doesn't care about the input JS and is mainly used to set a fixed at a given JsPath
* Please that A is passed by name allowing to use an expression reevaluated at each time.
*
* Example :
* {{{
* val js = Json.obj("key1" -> "value1", "key2" -> "value2")
* js.validate( (__ \\ 'key3).json.put( { JsNumber((new java.util.Date).getTime()) } ) )
* => JsSuccess({"key3":1376419773171},)
* }}}
*/
def put(a: => JsValue): Reads[JsObject] = Reads.jsPut(self, a)
/**
* (__ \\ 'key).json.copyFrom(reads) is a Reads[JsObject] that:
* - copies a JsValue using passed Reads[A]
* - creates a new branch from JsPath and copies previous value into it
*
* Useful to copy a value from a Json branch into another branch
*
* Example :
* {{{
* val js = Json.obj("key1" -> "value1", "key2" -> "value2")
* js.validate( (__ \\ 'key3).json.copyFrom((__ \\ 'key2).json.pick))
* => JsSuccess({"key3":"value2"},/key2)
* }}}
*/
def copyFrom[A <: JsValue](reads: Reads[A]): Reads[JsObject] = Reads.jsCopyTo(self)(reads)
/**
* (__ \\ 'key).json.update(reads) is the most complex Reads[JsObject] but the most powerful:
* - copies the whole JsValue => A
* - applies the passed Reads[A] on JsValue => B
* - deep merges both JsValues (A ++ B) so B overwrites A identical branches
*
* Please note that if you have prune a branch in B, it is still in A so you'll see it in the result
*
* Example :
* {{{
* val js = Json.obj("key1" -> "value1", "key2" -> "value2")
* js.validate(__.json.update((__ \\ 'key3).json.put(JsString("value3"))))
* => JsSuccess({"key1":"value1","key2":"value2","key3":"value3"},)
* }}}
*/
def update[A <: JsValue](reads: Reads[A]): Reads[JsObject] = Reads.jsUpdate(self)(reads)
/**
* (__ \\ 'key).json.prune is Reads[JsObject] that prunes the branch and returns remaining JsValue
*
* Example :
* {{{
* val js = Json.obj("key1" -> "value1", "key2" -> "value2")
* js.validate( (__ \\ 'key2).json.prune )
* => JsSuccess({"key1":"value1"},/key2)
* }}}
*/
def prune: Reads[JsObject] = Reads.jsPrune(self)
}
}
| jeantil/play-json-extra | play-json-extra/js/src/main/scala/play/api/libs/json/JsPath.scala | Scala | apache-2.0 | 19,965 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.transform.vision.image.augmentation
import com.intel.analytics.bigdl.transform.vision.image.opencv.OpenCVMat
import com.intel.analytics.bigdl.transform.vision.image.{FeatureTransformer, ImageFeature}
import org.apache.log4j.Logger
import org.opencv.core.Size
import org.opencv.imgproc.Imgproc
import scala.util.Random
/**
* Resize image
* @param resizeH height after resize
* @param resizeW width after resize
* @param resizeMode if resizeMode = -1, random select a mode from
* (Imgproc.INTER_LINEAR, Imgproc.INTER_CUBIC, Imgproc.INTER_AREA,
* Imgproc.INTER_NEAREST, Imgproc.INTER_LANCZOS4)
* @param useScaleFactor if true, scale factor fx and fy is used, fx = fy = 0
* note that the result of the following are different
* Imgproc.resize(mat, mat, new Size(resizeWH, resizeWH), 0, 0, Imgproc.INTER_LINEAR)
* Imgproc.resize(mat, mat, new Size(resizeWH, resizeWH))
*/
class Resize(resizeH: Int, resizeW: Int,
resizeMode: Int = Imgproc.INTER_LINEAR,
useScaleFactor: Boolean = true)
extends FeatureTransformer {
private val interpMethods = Array(Imgproc.INTER_LINEAR, Imgproc.INTER_CUBIC, Imgproc.INTER_AREA,
Imgproc.INTER_NEAREST, Imgproc.INTER_LANCZOS4)
override def transformMat(feature: ImageFeature): Unit = {
val interpMethod = if (resizeMode == -1) {
interpMethods(new Random().nextInt(interpMethods.length))
} else {
resizeMode
}
Resize.transform(feature.opencvMat(), feature.opencvMat(), resizeW, resizeH, interpMethod,
useScaleFactor)
}
}
object Resize {
val logger = Logger.getLogger(getClass)
def apply(resizeH: Int, resizeW: Int,
resizeMode: Int = Imgproc.INTER_LINEAR, useScaleFactor: Boolean = true): Resize =
new Resize(resizeH, resizeW, resizeMode, useScaleFactor)
def transform(input: OpenCVMat, output: OpenCVMat, resizeW: Int, resizeH: Int,
mode: Int = Imgproc.INTER_LINEAR, useScaleFactor: Boolean = true)
: OpenCVMat = {
if (useScaleFactor) {
Imgproc.resize(input, output, new Size(resizeW, resizeH), 0, 0, mode)
} else {
Imgproc.resize(input, output, new Size(resizeW, resizeH))
}
output
}
}
/**
* Resize the image, keep the aspect ratio. scale according to the short edge
* @param minSize scale size, apply to short edge
* @param scaleMultipleOf make the scaled size multiple of some value
* @param maxSize max size after scale
* @param resizeMode if resizeMode = -1, random select a mode from
* (Imgproc.INTER_LINEAR, Imgproc.INTER_CUBIC, Imgproc.INTER_AREA,
* Imgproc.INTER_NEAREST, Imgproc.INTER_LANCZOS4)
* @param useScaleFactor if true, scale factor fx and fy is used, fx = fy = 0
* @param minScale control the minimum scale up for image
*/
class AspectScale(minSize: Int,
scaleMultipleOf: Int = 1,
maxSize: Int = 1000,
resizeMode: Int = Imgproc.INTER_LINEAR,
useScaleFactor: Boolean = true,
minScale: Option[Float] = None)
extends FeatureTransformer {
override def transformMat(feature: ImageFeature): Unit = {
val (height, width) = AspectScale.getHeightWidthAfterRatioScale(feature.opencvMat(),
minSize, maxSize, scaleMultipleOf, minScale)
Resize.transform(feature.opencvMat(), feature.opencvMat(),
width, height, resizeMode, useScaleFactor)
}
}
object AspectScale {
def apply(minSize: Int,
scaleMultipleOf: Int = 1,
maxSize: Int = 1000,
mode: Int = Imgproc.INTER_LINEAR,
useScaleFactor: Boolean = true,
minScale: Option[Float] = None): AspectScale =
new AspectScale(minSize, scaleMultipleOf, maxSize, mode, useScaleFactor, minScale)
/**
* get the width and height of scaled image
* @param img original image
*/
def getHeightWidthAfterRatioScale(img: OpenCVMat, scaleTo: Float,
maxSize: Int, scaleMultipleOf: Int, minScale: Option[Float] = None): (Int, Int) = {
val imSizeMin = Math.min(img.width(), img.height())
val imSizeMax = Math.max(img.width(), img.height())
var imScale = scaleTo.toFloat / imSizeMin.toFloat
if (minScale.isDefined) {
imScale = Math.max(minScale.get, imScale)
}
// Prevent the biggest axis from being more than MAX_SIZE
if (Math.round(imScale * imSizeMax) > maxSize) {
imScale = maxSize / imSizeMax.toFloat
}
var imScaleH, imScaleW = imScale
if (scaleMultipleOf > 1) {
imScaleH = (Math.floor(img.height() * imScale / scaleMultipleOf) *
scaleMultipleOf / img.height()).toFloat
imScaleW = (Math.floor(img.width() * imScale / scaleMultipleOf) *
scaleMultipleOf / img.width()).toFloat
}
val width = imScaleW * img.width()
val height = imScaleH * img.height()
(height.round, width.round)
}
}
/**
* resize the image by randomly choosing a scale
* @param scales array of scale options that for random choice
* @param scaleMultipleOf Resize test images so that its width and height are multiples of
* @param maxSize Max pixel size of the longest side of a scaled input image
*/
class RandomAspectScale(scales: Array[Int], scaleMultipleOf: Int = 1,
maxSize: Int = 1000) extends FeatureTransformer {
override def transformMat(feature: ImageFeature): Unit = {
val scaleTo = scales(Random.nextInt(scales.length))
val (height, width) = AspectScale.getHeightWidthAfterRatioScale(feature.opencvMat(),
scaleTo, maxSize, scaleMultipleOf)
Resize.transform(feature.opencvMat(), feature.opencvMat(), width, height)
}
}
object RandomAspectScale {
def apply(scales: Array[Int], scaleMultipleOf: Int = 1,
maxSize: Int = 1000): RandomAspectScale =
new RandomAspectScale(scales, scaleMultipleOf, maxSize)
}
| wzhongyuan/BigDL | spark/dl/src/main/scala/com/intel/analytics/bigdl/transform/vision/image/augmentation/Resize.scala | Scala | apache-2.0 | 6,275 |
package com.philipwilcox.mp3sorter
import java.io.File
import java.time.ZonedDateTime
import org.apache.commons.lang3.StringUtils
import org.jaudiotagger.audio.AudioFileIO
import org.jaudiotagger.tag.mp4.field.Mp4TagTextField
import org.jaudiotagger.tag.{FieldKey, Tag}
import org.joda.time.DateTime
/**
* Wrapper interface for JAudioTagger to return metadata extracted from a File'ss tags.
*/
class TagDataHelper {
def readFromFile(file: File): TagData = {
val audioFile = AudioFileIO.read(file)
new TagData(audioFile.getTag)
}
}
class TagData(tag: Tag) {
def artist() = tag.getFirst(FieldKey.ARTIST)
def year(): String = {
val stringYear = tag.getFirst(FieldKey.YEAR)
if (StringUtils.isBlank(stringYear)) {
// TODO inject settings down here to only warn on verbose? or use logging framework?
println(s" WARNING: could not read year for track ${tag.getFirst(FieldKey.TITLE)} by ${tag.getFirst(FieldKey.ARTIST)}")
return "unknown"
}
try {
if (stringYear.length == 10) {
val dateTime = DateTime.parse(stringYear)
dateTime.getYear.toString
} else if (stringYear.length > 4) {
// iTunes Store files contain UTC timestamps as year, parse just the Year part
val zonedDateTime = ZonedDateTime.parse(stringYear)
zonedDateTime.getYear.toString
} else {
stringYear
}
} catch {
case e: Exception => {
println(s" WARNING: could not parse year from $stringYear")
"unknown"
}
}
}
def album() = tag.getFirst(FieldKey.ALBUM)
def track() = {
val stringBuilder = new StringBuilder
val totalDiscsString = tag.getFirst(FieldKey.DISC_TOTAL)
if (StringUtils.isNotBlank(totalDiscsString) && totalDiscsString.toInt > 1) {
stringBuilder.append(f"${tag.getFirst(FieldKey.DISC_NO).toInt}%02d-")
}
// Use the total track count to decide how much to pad the track number
val totalTracksString = tag.getFirst(FieldKey.TRACK_TOTAL)
val totalTracks = if (StringUtils.isBlank(totalTracksString)) {
99 // default to assuming that there are double-digit number of tracks
} else {
totalTracksString.toInt // use actual total tracks number if available
}
val thisTrackString = tag.getFirst(FieldKey.TRACK)
// If we don't have track info at all, we fall back to blank
if (StringUtils.isNotBlank(thisTrackString)) {
val thisTrack = thisTrackString.toInt
// the more than 1000 track case seems unimportant, and I couldn't get it to build a
// "f" interpolation string with dynamic padding
totalTracks match {
case x if 0 <= x && x < 10 => stringBuilder.append(s"$thisTrack")
case x if 10 <= x && x < 100 => stringBuilder.append(f"$thisTrack%02d")
case _ => stringBuilder.append(f"$thisTrack%03d")
}
}
stringBuilder.toString()
}
def title() = tag.getFirst(FieldKey.TITLE)
override def toString = s"TagData($artist, $year, $album, $track, $title)"
}
| philipwilcox/mp3sorter | src/main/scala/com/philipwilcox/mp3sorter/TagData.scala | Scala | apache-2.0 | 3,022 |
import org.nd4s.Implicits._
/**
* Look into resources/number_image folders, you'll see numbers built from 32 X 32 0's and 1...
* Each file are named as (the label) _ (the sequence). Sequence just meant it's different way of writing the same label.
**/
object NumberRecognition {
def main(args: Array[String]): Unit = {
println("Train the system to recognize numbers from a 32*32 characters of 0/1 number that made up a number")
println("Program will execute very slowly!")
val creator = KnnCreator
creator.handwritingClassTest
}
}
| yoonghan/nd4sTest | src/main/scala/NumberRecognition.scala | Scala | unlicense | 561 |
package sc.ala.rubyist
import scala.language.reflectiveCalls
object Using {
/**
* provide loan pattern which ensures to call `close' after the block
*
* @example {{{
* using(new ByteArrayOutputStream) { out => ... }
* }}}
*/
def using[A <: {def close()}, B](a: A)(f: A => B): B = try {
f(a)
} finally {
a.close()
}
}
| maiha/rubyist | src/main/scala/sc/ala/rubyist/Using.scala | Scala | mit | 355 |
package org.scale.database
import reactivemongo.api.collections.default.BSONCollection
import reactivemongo.bson.BSONDocument
import reactivemongo.bson.BSONObjectID
import scala.concurrent.ExecutionContext
import reactivemongo.api._
import reactivemongo.bson._
import reactivemongo.core.commands.{ LastError, GetLastError, Count }
trait MongoDAO[T] { this: MongoConfig =>
implicit val ec: ExecutionContext
implicit val reader: BSONDocumentReader[T]
implicit val writer: BSONDocumentWriter[T]
def collection: BSONCollection
val ID = "_id"
def findAll = {
collection.find(BSONDocument()).cursor[T]
}
def findAll(attribute: String, value: BSONValue) = {
collection.find(BSONDocument(attribute -> value)).cursor[T]
}
def findById(id: String) = {
collection.find(BSONDocument(ID -> BSONObjectID(id))).one[T]
}
def findByID(id: BSONObjectID) = {
collection.find(BSONDocument(ID -> id)).one[T]
}
def findUnique(attribute: String, value: String) = {
collection.find(BSONDocument(attribute -> BSONString(value))).one[T]
}
def findUnique(attribute:String, value: BSONValue) = {
collection.find(BSONDocument(attribute -> value)).one[T]
}
}
| jmarin/testscale | src/main/scala/org/scale/database/MongoDAO.scala | Scala | apache-2.0 | 1,199 |
package com.rouesnel.thrifty.ast
sealed abstract class RHS extends ValueNode
sealed abstract class Literal extends RHS
case class BoolLiteral(value: Boolean) extends Literal
case class IntLiteral(value: Long) extends Literal
case class DoubleLiteral(value: Double) extends Literal
case class StringLiteral(value: String) extends Literal
case object NullLiteral extends Literal
case class ListRHS(elems: Seq[RHS]) extends RHS
case class SetRHS(elems: Set[RHS]) extends RHS
case class MapRHS(elems: Seq[(RHS, RHS)]) extends RHS
case class StructRHS(sid: SimpleID, elems: Map[Field, RHS]) extends RHS
case class UnionRHS(sid: SimpleID, field: Field, initializer: RHS) extends RHS
case class EnumRHS(enum: Enum, value: EnumField) extends RHS
case class IdRHS(id: Identifier) extends RHS
| laurencer/thrifty | src/main/scala/com/rouesnel/thrifty/ast/RHS.scala | Scala | apache-2.0 | 785 |
package concurrent_programming.data_parallel.system_equations
import io.threadcso.{Barrier, CombiningBarrier, PROC, proc, ||}
class ConcurrentJacobiIteration(A: MatrixUtils.Matrix, b: MatrixUtils.Vector){
val N: Int = MatrixUtils.getRows(A)
private val x = MatrixUtils.getNullVector(N)
private val newX = MatrixUtils.getNullVector(N)
val EPSILON: Double = 0.00000001
private val W = 2
private val height = N / W
private val conjBarrier = new CombiningBarrier[Boolean](W, true, _ && _)
private val barrier = new Barrier(W)
private def worker(start: Int, end: Int): PROC = proc{
var finished = false
while (!finished){
// calculate our slice of newX from x
for (i <- start until end){
var sum: MatrixUtils.Type = 0
for (j <- 0.until(N))
if (i != j)
sum += A(i)(j) * x(j)
newX(i) = (b(i) - sum) / A(i)(i)
}
barrier.sync()
// all workers have written their own slice of newX
// calculate our slice of x from newX
var finishedLocally = true
for (i <- start until end){
var sum: MatrixUtils.Type = 0
for (j <- 0.until(N))
if (i != j)
sum += A(i)(j) * newX(j)
x(i) = (b(i) - sum) / A(i)(i)
finishedLocally = finishedLocally && (Math.abs(x(i) - newX(i)) < EPSILON)
}
// cast our vote for termination, retrieve the aggregated votes
finished = conjBarrier.sync(finishedLocally)
// all workers have written their own slice of x for this iteration
}
}
def solve(): MatrixUtils.Vector = {
for (i <- 0.until(N))
x(i) = 0
val system = || (for (i <- 0 until W) yield worker(height*i, height*(i+1)))
system()
x
}
}
| AlexandruValeanu/Concurrent-Programming-in-Scala | src/concurrent_programming/data_parallel/system_equations/ConcurrentJacobiIteration.scala | Scala | gpl-3.0 | 1,744 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
* Contributors:
* Hao Jiang - initial API and implementation
*/
package edu.uchicago.cs.encsel.tool.mem
import java.lang.management.ManagementFactory
class MemoryMonitor private {
private var thread: MemoryMonitorThread = null;
def start(): Unit = {
thread = new MemoryMonitorThread();
thread.start();
}
def stop(): MemoryStat = {
thread.monitorStop = true;
thread.join()
return new MemoryStat(thread.memoryMin, thread.memoryMax)
}
}
class MemoryStat(var min: Long, var max: Long) {
}
class MemoryMonitorThread extends Thread {
var monitorStop = false;
var memoryMin = Long.MaxValue;
var memoryMax = 0l;
var interval = 100l;
setName("MemoryMonitorThread")
setDaemon(true)
override def run(): Unit = {
while (!monitorStop) {
val memory = ManagementFactory.getMemoryMXBean.getHeapMemoryUsage.getUsed
memoryMax = Math.max(memoryMax, memory)
memoryMin = Math.min(memoryMin, memory)
Thread.sleep(interval)
}
val memory = ManagementFactory.getMemoryMXBean.getHeapMemoryUsage.getUsed
memoryMax = Math.max(memoryMax, memory)
memoryMin = Math.min(memoryMin, memory)
}
}
object MemoryMonitor {
val INSTANCE = new MemoryMonitor();
}
| harperjiang/enc-selector | src/main/scala/edu/uchicago/cs/encsel/tool/mem/MemoryMonitor.scala | Scala | apache-2.0 | 2,041 |
/*
* Copyright (c) 2018. Fengguo Wei and others.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License v2.0
* which accompanies this distribution, and is available at
* https://www.apache.org/licenses/LICENSE-2.0
*
* Detailed contributors are listed in the CONTRIBUTOR.md
*/
package org.argus.jawa.core.compiler.compile.io
import java.io.{ByteArrayInputStream, File, InputStream}
import java.net.{URI,URL}
object Hash
{
private val BufferSize = 8192
/** Converts an array of `bytes` to a hexadecimal representation String.*/
def toHex(bytes: Array[Byte]): String =
{
val buffer = new StringBuilder(bytes.length * 2)
for(i <- bytes.indices)
{
val b = bytes(i)
val bi: Int = if(b < 0) b + 256 else b
buffer append toHex((bi >>> 4).asInstanceOf[Byte])
buffer append toHex((bi & 0x0F).asInstanceOf[Byte])
}
buffer.toString
}
/** Converts the provided hexadecimal representation `hex` to an array of bytes.
* The hexadecimal representation must have an even number of characters in the range 0-9, a-f, or A-F. */
def fromHex(hex: String): Array[Byte] =
{
require((hex.length & 1) == 0, "Hex string must have length 2n.")
val array = new Array[Byte](hex.length >> 1)
for(i <- 0 until hex.length by 2)
{
val c1 = hex.charAt(i)
val c2 = hex.charAt(i+1)
array(i >> 1) = ((fromHex(c1) << 4) | fromHex(c2)).asInstanceOf[Byte]
}
array
}
/** Truncates the last half of `s` if the string has at least four characters. Otherwise, the original string is returned. */
def halve(s: String): String = if(s.length > 3) s.substring(0, s.length / 2) else s
/** Computes the SHA-1 hash of `s` and returns the first `i` characters of the hexadecimal representation of the hash. */
def trimHashString(s: String, i: Int): String = toHex(apply(s)).take(i)
/** Computes the SHA-1 hash of `s` and truncates the hexadecimal representation of the hash via [[halve]]. */
def halfHashString(s: String): String = halve(toHex(apply(s)))
/** Calculates the SHA-1 hash of the given String.*/
def apply(s: String): Array[Byte] = apply(s.getBytes("UTF-8"))
/** Calculates the SHA-1 hash of the given Array[Byte].*/
def apply(as: Array[Byte]): Array[Byte] = apply(new ByteArrayInputStream(as))
/** Calculates the SHA-1 hash of the given file.*/
def apply(file: File): Array[Byte] = Using.fileInputStream(file)(apply)
/** Calculates the SHA-1 hash of the given resource.*/
def apply(url: URL): Array[Byte] = Using.urlInputStream(url)(apply)
/** If the URI represents a local file (the scheme is "file"),
* this method calculates the SHA-1 hash of the contents of that file.
* Otherwise, this methods calculates the SHA-1 hash of the normalized string representation of the URI.*/
def contentsIfLocal(uri: URI): Array[Byte] =
if(uri.getScheme == "file") apply(uri.toURL) else apply(uri.normalize.toString)
/** Calculates the SHA-1 hash of the given stream, closing it when finished.*/
def apply(stream: InputStream): Array[Byte] =
{
import java.security.{MessageDigest, DigestInputStream}
val digest = MessageDigest.getInstance("SHA")
try
{
val dis = new DigestInputStream(stream, digest)
val buffer = new Array[Byte](BufferSize)
while(dis.read(buffer) >= 0) {}
dis.close()
digest.digest
}
finally { stream.close() }
}
private def toHex(b: Byte): Char =
{
require(b >= 0 && b <= 15, "Byte " + b + " was not between 0 and 15")
if(b < 10)
('0'.asInstanceOf[Int] + b).asInstanceOf[Char]
else
('a'.asInstanceOf[Int] + (b-10)).asInstanceOf[Char]
}
private def fromHex(c: Char): Int =
{
val b =
if(c >= '0' && c <= '9')
c - '0'
else if(c >= 'a' && c <= 'f')
(c - 'a') + 10
else if(c >= 'A' && c <= 'F')
(c - 'A') + 10
else
throw new RuntimeException("Invalid hex character: '" + c + "'.")
b
}
}
| arguslab/Argus-SAF | jawa/src/main/scala/org/argus/jawa/core/compiler/compile/io/Hash.scala | Scala | apache-2.0 | 4,049 |
/*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package swave.core.impl.stages.inout
import scala.util.control.NonFatal
import swave.core.impl.{Inport, Outport}
import swave.core.macros.StageImplementation
import swave.core.Stage
import swave.core.impl.stages.InOutStage
// format: OFF
@StageImplementation
private[core] final class OnStartStage(callback: () ⇒ Unit) extends InOutStage {
def kind = Stage.Kind.InOut.OnStart(callback)
connectInOutAndSealWith { (in, out) ⇒
region.impl.registerForXStart(this)
awaitingXStart(in, out)
}
def awaitingXStart(in: Inport, out: Outport) = state(
xStart = () => {
try {
callback()
running(in, out)
}
catch {
case NonFatal(e) =>
in.cancel()
stopError(e, out)
}
})
def running(in: Inport, out: Outport) = state(
intercept = false,
request = requestF(in),
cancel = stopCancelF(in),
onNext = onNextF(out),
onComplete = stopCompleteF(out),
onError = stopErrorF(out))
}
| sirthias/swave | core/src/main/scala/swave/core/impl/stages/inout/OnStartStage.scala | Scala | mpl-2.0 | 1,196 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.predictionio.controller
import org.apache.predictionio.core.BaseAlgorithm
/** A concrete implementation of [[LServing]] returning the average of all
* algorithms' predictions, where their classes are expected to be all Double.
*
* @group Serving
*/
class LAverageServing[Q] extends LServing[Q, Double] {
/** Returns the average of all algorithms' predictions. */
def serve(query: Q, predictions: Seq[Double]): Double = {
predictions.sum / predictions.length
}
}
/** A concrete implementation of [[LServing]] returning the average of all
* algorithms' predictions, where their classes are expected to be all Double.
*
* @group Serving
*/
object LAverageServing {
/** Returns an instance of [[LAverageServing]]. */
def apply[Q](a: Class[_ <: BaseAlgorithm[_, _, Q, _]]): Class[LAverageServing[Q]] =
classOf[LAverageServing[Q]]
}
| pferrel/PredictionIO | core/src/main/scala/org/apache/predictionio/controller/LAverageServing.scala | Scala | apache-2.0 | 1,686 |
package scalera.examples
import scalera.macros._
import scalera.macros.Entity._
object Examples extends App {
val entities =
Entity("MyEntity")(
"att1" -> 5,
"att2" -> true)
import entities._
val myEntity = MyEntity(2,false)
assert(myEntity.isInstanceOf[Serializable with Product],"It should be serializable")
assert{
val extractedAtt1: Int = myEntity.att1
val extractedAtt2: Boolean = myEntity.att2
myEntity == MyEntity(extractedAtt1,extractedAtt2)
}
}
| Scalera/macros-handson | examples/src/main/scala/scalera/examples/Examples.scala | Scala | apache-2.0 | 502 |
package util
package security
trait AuthenticationAccessor {
def getAuthentication(): AuthenticationProvider
}
| Shopify/collins | app/util/security/AuthenticationAccessor.scala | Scala | apache-2.0 | 114 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.command
import org.apache.spark.sql.{Row, SparkSession}
import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference}
import org.apache.spark.sql.connector.catalog.CatalogV2Implicits.NamespaceHelper
import org.apache.spark.sql.types.StringType
/**
* The command for `SHOW CURRENT NAMESPACE`.
*/
case class ShowCurrentNamespaceCommand() extends LeafRunnableCommand {
override val output: Seq[Attribute] = Seq(
AttributeReference("catalog", StringType, nullable = false)(),
AttributeReference("namespace", StringType, nullable = false)())
override def run(sparkSession: SparkSession): Seq[Row] = {
val catalogManager = sparkSession.sessionState.catalogManager
Seq(Row(catalogManager.currentCatalog.name, catalogManager.currentNamespace.quoted))
}
}
| ueshin/apache-spark | sql/core/src/main/scala/org/apache/spark/sql/execution/command/ShowCurrentNamespaceCommand.scala | Scala | apache-2.0 | 1,634 |
package sms.core
import org.scalatest.{Suite, BeforeAndAfterAll}
import sms.core.boot.Boot
/** Should be used in integration and acceptance tests
*/
trait CoreInitSuite extends BeforeAndAfterAll with Boot with Logging { this: Suite =>
override protected def beforeAll(): Unit = {
super.beforeAll()
start()
}
override protected def afterAll(): Unit = {
stop()
super.afterAll()
}
}
| kjanosz/stock-market-sherlock | core/src/test/scala/sms/core/CoreInitSuite.scala | Scala | apache-2.0 | 409 |
/**
* Copyright (C) 2009-2011 the original author or authors.
* See the notice.md file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.fusesource.scalate
import java.io.File
import org.scalatest.ConfigMap
import scala.collection.immutable.Map
class InjectAttributeTest extends TemplateTestSupport {
test("Using render context directly") {
val helper = context.inject[SomeHelper]
assert(helper != null)
log.info("got helper! " + helper)
}
// in the following test, the compiler does not pass in the
// attributes type from the left hand side, which is quite surprising at first
// I guess type inferencing only goes from right to left; not left to right
val compilerInfersTypeParamsFromTypeOfLHS = false
if (compilerInfersTypeParamsFromTypeOfLHS) {
test("Using render context directly without explicit type param") {
val helper: SomeHelper = context.inject
assert(helper != null)
log.info("got helper! " + helper)
}
}
test("template using injection") {
assertUriOutputContains("/org/fusesource/scalate/ioc.ssp", Map("name" -> "James"), "Hello James!")
}
def context = new DefaultRenderContext("dummy.ssp", engine)
override protected def beforeAll(configMap: ConfigMap) = {
super.beforeAll(configMap)
engine.sourceDirectories = List(new File(baseDir, "src/test/resources"))
}
}
class SomeHelper(context: RenderContext) {
def greeting = "Hello " + context.attributeOrElse("name", "Unknown") + "!"
}
| scalate/scalate | scalate-core/src/test/scala/org/fusesource/scalate/InjectAttributeTest.scala | Scala | apache-2.0 | 2,100 |
package com.twitter.finagle.netty4
import com.twitter.io.Buf
import com.twitter.io.Buf.ByteArray
import io.netty.buffer._
import io.netty.util.ByteProcessor
private[finagle] object ByteBufAsBuf {
// Assuming that bb.hasArray.
private final def heapToBuf(bb: ByteBuf): Buf.ByteArray = {
val begin = bb.arrayOffset + bb.readerIndex
val end = begin + bb.readableBytes
new Buf.ByteArray(bb.array, begin, end)
}
/**
* Process `bb` 1-byte at a time using the given
* [[Buf.Processor]], starting at index `from` of `bb` until
* index `until`. Processing will halt if the processor
* returns `false` or after processing the final byte.
*
* @return -1 if the processor processed all bytes or
* the last processed index if the processor returns
* `false`.
* Will return -1 if `from` is greater than or equal to
* `until` or `length` of the underlying buffer.
* Will return -1 if `until` is greater than or equal to
* `length` of the underlying buffer.
*
* @param from the starting index, inclusive. Must be non-negative.
*
* @param until the ending index, exclusive. Must be non-negative.
*/
def process(from: Int, until: Int, processor: Buf.Processor, bb: ByteBuf): Int = {
Buf.checkSliceArgs(from, until)
val length = bb.readableBytes()
// check if chunk to process is empty
if (until <= from || from >= length) -1
else {
val byteProcessor = new ByteProcessor {
def process(value: Byte): Boolean = processor(value)
}
val readerIndex = bb.readerIndex()
val off = readerIndex + from
val len = math.min(length - from, until - from)
val index = bb.forEachByte(off, len, byteProcessor)
if (index == -1) -1
else index - readerIndex
}
}
/**
* Construct a [[Buf]] wrapper for `ByteBuf`.
*
* @note this wrapper does not support ref-counting and therefore should either
* be used with unpooled and non-leak detecting allocators or managed
* via the ref-counting methods of the wrapped `buf`. Non-empty buffers
* are `retain`ed.
*
* @note if the given is backed by a heap array, it will be coerced into `Buf.ByteArray`
* and then released. This basically means it's only safe to use this smart constructor
* with unpooled heap buffers.
*/
def apply(buf: ByteBuf): Buf =
if (buf.readableBytes == 0) Buf.Empty
else if (buf.hasArray)
try heapToBuf(buf)
finally buf.release()
else new ByteBufAsBuf(buf)
/**
* Extract a [[ByteBufAsBuf]]'s underlying ByteBuf without copying.
*/
def unapply(wrapped: ByteBufAsBuf): Option[ByteBuf] = Some(wrapped.underlying)
/**
* Extract a read-only `ByteBuf` from [[Buf]] potentially without copying.
*/
def extract(buf: Buf): ByteBuf = BufAsByteBuf(buf)
}
/**
* a [[Buf]] wrapper for Netty `ByteBuf`s.
*/
private[finagle] class ByteBufAsBuf(private[finagle] val underlying: ByteBuf) extends Buf {
// nb: `underlying` is exposed for testing
def get(index: Int): Byte =
underlying.getByte(underlying.readerIndex() + index)
def process(from: Int, until: Int, processor: Buf.Processor): Int =
ByteBufAsBuf.process(from, until, processor, underlying)
def write(bytes: Array[Byte], off: Int): Unit = {
checkWriteArgs(bytes.length, off)
val dup = underlying.duplicate()
dup.readBytes(bytes, off, dup.readableBytes)
}
def write(buffer: java.nio.ByteBuffer): Unit = {
checkWriteArgs(buffer.remaining, 0)
val dup = underlying.duplicate()
val currentLimit = buffer.limit
buffer.limit(buffer.position + length)
dup.readBytes(buffer)
buffer.limit(currentLimit)
}
protected def unsafeByteArrayBuf: Option[ByteArray] =
if (underlying.hasArray) Some(ByteBufAsBuf.heapToBuf(underlying))
else None
def length: Int = underlying.readableBytes
def slice(from: Int, until: Int): Buf = {
Buf.checkSliceArgs(from, until)
if (isSliceEmpty(from, until)) Buf.Empty
else if (isSliceIdentity(from, until)) this
else {
val off = underlying.readerIndex() + from
val len = Math.min(length, until) - from
new ByteBufAsBuf(underlying.slice(off, len))
}
}
override def equals(other: Any): Boolean = other match {
case ByteBufAsBuf(otherBB) => underlying.equals(otherBB)
case composite: Buf.Composite =>
// Composite.apply has a relatively high overhead, so let it probe
// back into this Buf.
composite == this
case other: Buf if other.length == length =>
val proc = new ByteProcessor {
private[this] var pos = 0
def process(value: Byte): Boolean = {
if (other.get(pos) == value) {
pos += 1
true
} else {
false
}
}
}
underlying.forEachByte(underlying.readerIndex(), length, proc) == -1
case _ => false
}
}
| mkhq/finagle | finagle-netty4/src/main/scala/com/twitter/finagle/netty4/ByteBufAsBuf.scala | Scala | apache-2.0 | 4,975 |
package katas.scala.sort.mergesort
import org.scalatest.Matchers
import katas.scala.sort.SeqSortTest
import scala.reflect.ClassTag
class MergeSort16 extends SeqSortTest with Matchers {
override def sort[T](seq: Seq[T])(implicit ordered: (T) => Ordered[T], tag: ClassTag[T]): Seq[T] = {
def merge(seq1: Seq[T], seq2: Seq[T]): Seq[T] = {
if (seq1.isEmpty) seq2
else if (seq2.isEmpty) seq1
else if (seq1.head < seq2.head) seq1.head +: merge(seq1.tail, seq2)
else seq2.head +: merge(seq1, seq2.tail)
}
if (seq.size <= 1) seq
else {
val (part1, part2) = seq.splitAt(seq.size / 2)
merge(sort(part1), sort(part2))
}
}
} | dkandalov/katas | scala/src/katas/scala/sort/mergesort/MergeSort16.scala | Scala | unlicense | 645 |
/*
* Copyright (c) 2010 e.e d3si9n
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package scalaxb.compiler.xsd
import javax.xml.namespace.QName
trait XsTypeSymbol extends scala.xml.TypeSymbol {
val name: String
override def toString(): String = name
}
object XsAnyType extends XsTypeSymbol {
val name = "XsAnyType"
}
object XsNillableAny extends XsTypeSymbol {
val name = "XsNillableAny"
}
object XsLongAll extends XsTypeSymbol {
val name = "XsLongAll"
}
object XsLongAttribute extends XsTypeSymbol {
val name = "XsLongAttribute"
}
object XsInterNamespace extends XsTypeSymbol {
val name = "XsInterNamespace"
}
object XsAnyAttribute extends XsTypeSymbol {
val name = "XsAnyAttribute"
}
object XsMixed extends XsTypeSymbol {
val name = "XsMixed"
}
case class XsWildcard(namespaceConstraint: List[String]) extends XsTypeSymbol {
val name = "XsWildcard(" + namespaceConstraint.mkString(",") + ")"
}
case class XsDataRecord(member: XsTypeSymbol) extends XsTypeSymbol {
val name = "XsDataRecord(" + member + ")"
}
object ReferenceTypeSymbol {
def unapply(value: ReferenceTypeSymbol): Option[TypeDecl] = Some(value.decl)
def apply(namespace: Option[String], localpart: String) =
new ReferenceTypeSymbol(new QName(namespace.orNull, localpart))
}
class ReferenceTypeSymbol(val qname: QName) extends XsTypeSymbol {
val namespace = masked.scalaxb.Helper.nullOrEmpty(qname.getNamespaceURI)
val localPart = qname.getLocalPart
val name: String = (namespace map {"{%s}".format(_)} getOrElse("")) + localPart
var decl: TypeDecl = null
override def toString(): String = {
if (decl == null) "ReferenceTypeSymbol(" + qname.toString + ",null)"
else "ReferenceTypeSymbol(" + qname.toString + ")"
}
}
object AnyType {
def unapply(value: XsTypeSymbol): Option[XsTypeSymbol] = value match {
case x: XsWildcard => Some(x)
case XsAnyType => Some(XsAnyType)
case XsAnySimpleType => Some(XsAnySimpleType)
case _ => None
}
}
case class XsXMLFormat(member: Decl) extends XsTypeSymbol {
val name = "XsXMLFormat(" + (member match {
case decl: ComplexTypeDecl => decl.name
case group: AttributeGroupDecl => group.name
case _ => "_"
}) + ")"
}
class BuiltInSimpleTypeSymbol(val name: String) extends XsTypeSymbol
case class AttributeGroupSymbol(namespace: Option[String],
name: String) extends XsTypeSymbol
abstract class DerivSym
case class Extends(sym: XsTypeSymbol) extends DerivSym
case class Restricts(sym: XsTypeSymbol) extends DerivSym
object XsAnySimpleType extends BuiltInSimpleTypeSymbol("XsAnySimpleType") {}
object XsUnknown extends BuiltInSimpleTypeSymbol("String") {}
object XsDuration extends BuiltInSimpleTypeSymbol("javax.xml.datatype.Duration") {}
object XsDateTime extends BuiltInSimpleTypeSymbol("javax.xml.datatype.XMLGregorianCalendar") {}
object XsTime extends BuiltInSimpleTypeSymbol("javax.xml.datatype.XMLGregorianCalendar") {}
object XsDate extends BuiltInSimpleTypeSymbol("javax.xml.datatype.XMLGregorianCalendar") {}
object XsGYearMonth extends BuiltInSimpleTypeSymbol("javax.xml.datatype.XMLGregorianCalendar") {}
object XsGYear extends BuiltInSimpleTypeSymbol("javax.xml.datatype.XMLGregorianCalendar") {}
object XsGMonthDay extends BuiltInSimpleTypeSymbol("javax.xml.datatype.XMLGregorianCalendar") {}
object XsGDay extends BuiltInSimpleTypeSymbol("javax.xml.datatype.XMLGregorianCalendar") {}
object XsGMonth extends BuiltInSimpleTypeSymbol("javax.xml.datatype.XMLGregorianCalendar") {}
object XsBoolean extends BuiltInSimpleTypeSymbol("Boolean") {}
object XsFloat extends BuiltInSimpleTypeSymbol("Float") {}
object XsBase64Binary extends BuiltInSimpleTypeSymbol("scalaxb.Base64Binary") {}
object XsHexBinary extends BuiltInSimpleTypeSymbol("scalaxb.HexBinary") {}
object XsDouble extends BuiltInSimpleTypeSymbol("Double") {}
object XsAnyURI extends BuiltInSimpleTypeSymbol("java.net.URI") {}
object XsQName extends BuiltInSimpleTypeSymbol("javax.xml.namespace.QName") {}
object XsNOTATION extends BuiltInSimpleTypeSymbol("javax.xml.namespace.QName") {}
object XsString extends BuiltInSimpleTypeSymbol("String") {}
object XsNormalizedString extends BuiltInSimpleTypeSymbol("String") {}
object XsToken extends BuiltInSimpleTypeSymbol("String") {}
object XsLanguage extends BuiltInSimpleTypeSymbol("String") {}
object XsName extends BuiltInSimpleTypeSymbol("String") {}
object XsNMTOKEN extends BuiltInSimpleTypeSymbol("String") {}
object XsNMTOKENS extends BuiltInSimpleTypeSymbol("Seq[String]") {}
object XsNCName extends BuiltInSimpleTypeSymbol("String") {}
object XsID extends BuiltInSimpleTypeSymbol("String") {}
object XsIDREF extends BuiltInSimpleTypeSymbol("String") {}
object XsIDREFS extends BuiltInSimpleTypeSymbol("Seq[String]") {}
object XsENTITY extends BuiltInSimpleTypeSymbol("String") {}
object XsENTITIES extends BuiltInSimpleTypeSymbol("Seq[String]") {}
object XsDecimal extends BuiltInSimpleTypeSymbol("BigDecimal") {}
object XsInteger extends BuiltInSimpleTypeSymbol("BigInt") {}
object XsNonPositiveInteger extends BuiltInSimpleTypeSymbol("BigInt") {}
object XsNegativeInteger extends BuiltInSimpleTypeSymbol("BigInt") {}
object XsNonNegativeInteger extends BuiltInSimpleTypeSymbol("BigInt") {}
object XsPositiveInteger extends BuiltInSimpleTypeSymbol("BigInt") {}
object XsLong extends BuiltInSimpleTypeSymbol("Long") {}
object XsUnsignedLong extends BuiltInSimpleTypeSymbol("BigInt") {}
object XsInt extends BuiltInSimpleTypeSymbol("Int") {}
object XsUnsignedInt extends BuiltInSimpleTypeSymbol("Long") {}
object XsShort extends BuiltInSimpleTypeSymbol("Short") {}
object XsUnsignedShort extends BuiltInSimpleTypeSymbol("Int") {}
object XsByte extends BuiltInSimpleTypeSymbol("Byte") {}
object XsUnsignedByte extends BuiltInSimpleTypeSymbol("Int") {}
object XsTypeSymbol {
type =>?[A, B] = PartialFunction[A, B]
val LOCAL_ELEMENT = "http://scalaxb.org/local-element"
val toTypeSymbol: String =>? XsTypeSymbol = {
case "anyType" => XsAnyType
case "anySimpleType" => XsAnySimpleType
case "duration" => XsDuration
case "dateTime" => XsDateTime
case "time" => XsTime
case "date" => XsDate
case "gYearMonth" => XsGYearMonth
case "gYear" => XsGYear
case "gMonthDay" => XsGMonthDay
case "gDay" => XsGDay
case "gMonth" => XsGMonth
case "boolean" => XsBoolean
case "float" => XsFloat
case "base64Binary" => XsBase64Binary
case "hexBinary" => XsHexBinary
case "double" => XsDouble
case "anyURI" => XsAnyURI
case "QName" => XsQName
case "NOTATION" => XsNOTATION
case "string" => XsString
case "normalizedString" => XsNormalizedString
case "token" => XsToken
case "language" => XsLanguage
case "Name" => XsName
case "NMTOKEN" => XsNMTOKEN
case "NMTOKENS" => XsNMTOKENS
case "NCName" => XsNCName
case "ID" => XsID
case "IDREF" => XsIDREF
case "IDREFS" => XsIDREFS
case "ENTITY" => XsENTITY
case "ENTITIES" => XsENTITIES
case "decimal" => XsDecimal
case "integer" => XsInteger
case "nonPositiveInteger" => XsNonPositiveInteger
case "negativeInteger" => XsNegativeInteger
case "nonNegativeInteger" => XsNonNegativeInteger
case "positiveInteger" => XsPositiveInteger
case "long" => XsLong
case "unsignedLong" => XsUnsignedLong
case "int" => XsInt
case "unsignedInt" => XsUnsignedInt
case "short" => XsShort
case "unsignedShort" => XsUnsignedShort
case "byte" => XsByte
case "unsignedByte" => XsUnsignedByte
}
}
| eed3si9n/scalaxb | cli/src/main/scala/scalaxb/compiler/xsd/XsTypeSymbol.scala | Scala | mit | 9,232 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources
import java.io.{FileNotFoundException, IOException}
import scala.collection.mutable
import org.apache.spark.{Partition => RDDPartition, TaskContext, TaskKilledException}
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.rdd.{InputFileBlockHolder, RDD}
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.vectorized.ColumnarBatch
import org.apache.spark.util.NextIterator
/**
* A part (i.e. "block") of a single file that should be read, along with partition column values
* that need to be prepended to each row.
*
* @param partitionValues value of partition columns to be prepended to each row.
* @param filePath path of the file to read
* @param start the beginning offset (in bytes) of the block.
* @param length number of bytes to read.
* @param locations locality information (list of nodes that have the data).
*/
case class PartitionedFile(
partitionValues: InternalRow,
filePath: String,
start: Long,
length: Long,
@transient locations: Array[String] = Array.empty) {
override def toString: String = {
s"path: $filePath, range: $start-${start + length}, partition values: $partitionValues"
}
}
/**
* A collection of file blocks that should be read as a single task
* (possibly from multiple partitioned directories).
*/
case class FilePartition(index: Int, files: Seq[PartitionedFile]) extends RDDPartition
/**
* An RDD that scans a list of file partitions.
*/
class FileScanRDD(
@transient private val sparkSession: SparkSession,
readFunction: (PartitionedFile) => Iterator[InternalRow],
@transient val filePartitions: Seq[FilePartition])
extends RDD[InternalRow](sparkSession.sparkContext, Nil) {
private val ignoreCorruptFiles = sparkSession.sessionState.conf.ignoreCorruptFiles
private val ignoreMissingFiles = sparkSession.sessionState.conf.ignoreMissingFiles
override def compute(split: RDDPartition, context: TaskContext): Iterator[InternalRow] = {
val iterator = new Iterator[Object] with AutoCloseable {
private val inputMetrics = context.taskMetrics().inputMetrics
private val existingBytesRead = inputMetrics.bytesRead
// Find a function that will return the FileSystem bytes read by this thread. Do this before
// apply readFunction, because it might read some bytes.
private val getBytesReadCallback =
SparkHadoopUtil.get.getFSBytesReadOnThreadCallback()
// We get our input bytes from thread-local Hadoop FileSystem statistics.
// If we do a coalesce, however, we are likely to compute multiple partitions in the same
// task and in the same thread, in which case we need to avoid override values written by
// previous partitions (SPARK-13071).
private def updateBytesRead(): Unit = {
inputMetrics.setBytesRead(existingBytesRead + getBytesReadCallback())
}
// If we can't get the bytes read from the FS stats, fall back to the file size,
// which may be inaccurate.
private def updateBytesReadWithFileSize(): Unit = {
if (currentFile != null) {
inputMetrics.incBytesRead(currentFile.length)
}
}
private[this] val files = split.asInstanceOf[FilePartition].files.toIterator
private[this] var currentFile: PartitionedFile = null
private[this] var currentIterator: Iterator[Object] = null
def hasNext: Boolean = {
// Kill the task in case it has been marked as killed. This logic is from
// InterruptibleIterator, but we inline it here instead of wrapping the iterator in order
// to avoid performance overhead.
context.killTaskIfInterrupted()
(currentIterator != null && currentIterator.hasNext) || nextIterator()
}
def next(): Object = {
val nextElement = currentIterator.next()
// TODO: we should have a better separation of row based and batch based scan, so that we
// don't need to run this `if` for every record.
if (nextElement.isInstanceOf[ColumnarBatch]) {
inputMetrics.incRecordsRead(nextElement.asInstanceOf[ColumnarBatch].numRows())
} else {
inputMetrics.incRecordsRead(1)
}
if (inputMetrics.recordsRead % SparkHadoopUtil.UPDATE_INPUT_METRICS_INTERVAL_RECORDS == 0) {
updateBytesRead()
}
nextElement
}
private def readCurrentFile(): Iterator[InternalRow] = {
try {
readFunction(currentFile)
} catch {
case e: FileNotFoundException =>
throw new FileNotFoundException(
e.getMessage + "\n" +
"It is possible the underlying files have been updated. " +
"You can explicitly invalidate the cache in Spark by " +
"running 'REFRESH TABLE tableName' command in SQL or " +
"by recreating the Dataset/DataFrame involved.")
}
}
/** Advances to the next file. Returns true if a new non-empty iterator is available. */
private def nextIterator(): Boolean = {
updateBytesReadWithFileSize()
if (files.hasNext) {
currentFile = files.next()
logInfo(s"Reading File $currentFile")
// Sets InputFileBlockHolder for the file block's information
InputFileBlockHolder.set(currentFile.filePath, currentFile.start, currentFile.length)
if (ignoreMissingFiles || ignoreCorruptFiles) {
currentIterator = new NextIterator[Object] {
// The readFunction may read some bytes before consuming the iterator, e.g.,
// vectorized Parquet reader. Here we use lazy val to delay the creation of
// iterator so that we will throw exception in `getNext`.
private lazy val internalIter = readCurrentFile()
override def getNext(): AnyRef = {
try {
if (internalIter.hasNext) {
internalIter.next()
} else {
finished = true
null
}
} catch {
case e: FileNotFoundException if ignoreMissingFiles =>
logWarning(s"Skipped missing file: $currentFile", e)
finished = true
null
// Throw FileNotFoundException even if `ignoreCorruptFiles` is true
case e: FileNotFoundException if !ignoreMissingFiles => throw e
case e @ (_: RuntimeException | _: IOException) if ignoreCorruptFiles =>
logWarning(
s"Skipped the rest of the content in the corrupted file: $currentFile", e)
finished = true
null
}
}
override def close(): Unit = {}
}
} else {
currentIterator = readCurrentFile()
}
hasNext
} else {
currentFile = null
InputFileBlockHolder.unset()
false
}
}
override def close(): Unit = {
updateBytesRead()
updateBytesReadWithFileSize()
InputFileBlockHolder.unset()
}
}
// Register an on-task-completion callback to close the input stream.
context.addTaskCompletionListener(_ => iterator.close())
iterator.asInstanceOf[Iterator[InternalRow]] // This is an erasure hack.
}
override protected def getPartitions: Array[RDDPartition] = filePartitions.toArray
override protected def getPreferredLocations(split: RDDPartition): Seq[String] = {
val files = split.asInstanceOf[FilePartition].files
// Computes total number of bytes can be retrieved from each host.
val hostToNumBytes = mutable.HashMap.empty[String, Long]
files.foreach { file =>
file.locations.filter(_ != "localhost").foreach { host =>
hostToNumBytes(host) = hostToNumBytes.getOrElse(host, 0L) + file.length
}
}
// Takes the first 3 hosts with the most data to be retrieved
hostToNumBytes.toSeq.sortBy {
case (host, numBytes) => numBytes
}.reverse.take(3).map {
case (host, numBytes) => host
}
}
}
| brad-kaiser/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileScanRDD.scala | Scala | apache-2.0 | 9,120 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.runtime.stream.table
import org.apache.flink.api.scala._
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.table.api.TableEnvironment
import org.apache.flink.table.api.scala._
import org.apache.flink.table.runtime.utils.{StreamITCase, StreamingWithStateTestBase}
import org.apache.flink.table.utils.TableFunc0
import org.apache.flink.types.Row
import org.junit.Assert._
import org.junit.Test
/**
* tests for retraction
*/
class RetractionITCase extends StreamingWithStateTestBase {
// input data
val data = List(
("Hello", 1),
("word", 1),
("Hello", 1),
("bark", 1),
("bark", 1),
("bark", 1),
("bark", 1),
("bark", 1),
("bark", 1),
("flink", 1)
)
// keyed groupby + keyed groupby
@Test
def testWordCount(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env)
StreamITCase.clear
env.setStateBackend(getStateBackend)
val stream = env.fromCollection(data)
val table = stream.toTable(tEnv, 'word, 'num)
val resultTable = table
.groupBy('word)
.select('num.sum as 'count)
.groupBy('count)
.select('count, 'count.count as 'frequency)
val results = resultTable.toRetractStream[Row]
results.addSink(new StreamITCase.RetractingSink)
env.execute()
val expected = Seq("1,2", "2,1", "6,1")
assertEquals(expected.sorted, StreamITCase.retractedResults.sorted)
}
// keyed groupby + non-keyed groupby
@Test
def testGroupByAndNonKeyedGroupBy(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env)
StreamITCase.clear
env.setStateBackend(getStateBackend)
val stream = env.fromCollection(data)
val table = stream.toTable(tEnv, 'word, 'num)
val resultTable = table
.groupBy('word)
.select('word as 'word, 'num.sum as 'cnt)
.select('cnt.sum)
val results = resultTable.toRetractStream[Row]
results.addSink(new StreamITCase.RetractingSink).setParallelism(1)
env.execute()
val expected = Seq("10")
assertEquals(expected.sorted, StreamITCase.retractedResults.sorted)
}
// non-keyed groupby + keyed groupby
@Test
def testNonKeyedGroupByAndGroupBy(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env)
StreamITCase.clear
env.setStateBackend(getStateBackend)
val stream = env.fromCollection(data)
val table = stream.toTable(tEnv, 'word, 'num)
val resultTable = table
.select('num.sum as 'count)
.groupBy('count)
.select('count, 'count.count)
val results = resultTable.toRetractStream[Row]
results.addSink(new StreamITCase.RetractingSink)
env.execute()
val expected = Seq("10,1")
assertEquals(expected.sorted, StreamITCase.retractedResults.sorted)
}
// test unique process, if the current output message of unbounded groupby equals the
// previous message, unbounded groupby will ignore the current one.
@Test
def testUniqueProcess(): Unit = {
// data input
val data = List(
(1, 1L),
(2, 2L),
(3, 3L),
(3, 3L),
(4, 1L),
(4, 0L),
(4, 0L),
(4, 0L),
(5, 1L),
(6, 6L),
(6, 6L),
(6, 6L),
(7, 8L)
)
val env = StreamExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env)
StreamITCase.clear
env.setStateBackend(getStateBackend)
env.setParallelism(1)
val stream = env.fromCollection(data)
val table = stream.toTable(tEnv, 'pk, 'value)
val resultTable = table
.groupBy('pk)
.select('pk as 'pk, 'value.sum as 'sum)
.groupBy('sum)
.select('sum, 'pk.count as 'count)
val results = resultTable.toRetractStream[Row]
results.addSink(new StreamITCase.RetractMessagesSink)
env.execute()
val expected = Seq(
"+1,1", "+2,1", "+3,1", "-3,1", "+6,1", "-1,1", "+1,2", "-1,2", "+1,3", "-6,1", "+6,2",
"-6,2", "+6,1", "+12,1", "-12,1", "+18,1", "+8,1")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
// correlate should handle retraction messages correctly
@Test
def testCorrelate(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env)
StreamITCase.clear
env.setStateBackend(getStateBackend)
val func0 = new TableFunc0
val stream = env.fromCollection(data)
val table = stream.toTable(tEnv, 'word, 'num)
val resultTable = table
.groupBy('word)
.select('word as 'word, 'num.sum as 'cnt)
.leftOuterJoin(func0('word))
.groupBy('cnt)
.select('cnt, 'word.count as 'frequency)
val results = resultTable.toRetractStream[Row]
results.addSink(new StreamITCase.RetractingSink)
env.execute()
val expected = Seq("1,2", "2,1", "6,1")
assertEquals(expected.sorted, StreamITCase.retractedResults.sorted)
}
}
| mylog00/flink | flink-libraries/flink-table/src/test/scala/org/apache/flink/table/runtime/stream/table/RetractionITCase.scala | Scala | apache-2.0 | 5,947 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import scala.annotation.tailrec
import scala.io.Source
import java.io.{File, FileWriter, BufferedWriter}
object GenContain1 extends GenContainBase {
val generatorSource = new File("GenContain1.scala")
def genTest(targetBaseDir: File, version: String, scalaVersion: String): Seq[File] = {
val sourceBaseDir = new File("jvm/scalatest-test/src/test/scala/org/scalatest")
targetBaseDir.mkdirs()
def generateFile(sourceFileName: String, typeName: String, mapping: (String, String)*): File = {
val generatedFileName = sourceFileName.replaceAll("List", typeName)
val generatedFile = new File(targetBaseDir, generatedFileName)
if (!generatedFile.exists || generatorSource.lastModified > generatedFile.lastModified) {
val writer = new BufferedWriter(new FileWriter(generatedFile))
try {
val lines = Source.fromFile(new File(sourceBaseDir, sourceFileName)).getLines().toList // for 2.8
for (line <- lines) {
val generatedLine = translateLine(line, mapping.toList: _*)
writer.write(generatedLine.toString)
writer.newLine() // add for 2.8
}
}
finally {
writer.flush()
writer.close()
println("Generated " + generatedFile.getAbsolutePath)
}
}
generatedFile
}
Seq(
// Generate tests for atLeastOneOf
generateFile("ListShouldContainAtLeastOneOfSpec.scala", "Array", arrayMapping: _*),
generateFile("ListShouldContainAtLeastOneOfLogicalAndSpec.scala", "Array", arrayMapping: _*),
generateFile("ListShouldContainAtLeastOneOfLogicalOrSpec.scala", "Array", arrayMapping: _*),
generateFile("ListShouldContainAtLeastOneOfSpec.scala", "Map", mapMapping: _*),
generateFile("ListShouldContainAtLeastOneOfLogicalAndSpec.scala", "Map", mapMapping: _*),
generateFile("ListShouldContainAtLeastOneOfLogicalOrSpec.scala", "Map", mapMapping: _*),
generateFile("ListShouldContainAtLeastOneOfSpec.scala", "JavaCol", javaColMapping: _*),
generateFile("ListShouldContainAtLeastOneOfLogicalAndSpec.scala", "JavaCol", javaColMapping: _*),
generateFile("ListShouldContainAtLeastOneOfLogicalOrSpec.scala", "JavaCol", javaColMapping: _*),
generateFile("ListShouldContainAtLeastOneOfSpec.scala", "JavaMap", javaMapMapping: _*),
generateFile("ListShouldContainAtLeastOneOfLogicalAndSpec.scala", "JavaMap", javaMapMapping: _*),
generateFile("ListShouldContainAtLeastOneOfLogicalOrSpec.scala", "JavaMap", javaMapMapping: _*),
generateFile("ListShouldContainAtLeastOneOfSpec.scala", "String", stringMapping: _*),
generateFile("ListShouldContainAtLeastOneOfLogicalAndSpec.scala", "String", stringMapping: _*),
generateFile("ListShouldContainAtLeastOneOfLogicalOrSpec.scala", "String", stringMapping: _*),
// Generate tests for atLeastOneElementOf
/*generateFile("ListShouldContainAtLeastOneElementOfSpec.scala", "Array", arrayMapping: _*),
generateFile("ListShouldContainAtLeastOneElementOfLogicalAndSpec.scala", "Array", arrayMapping: _*),
generateFile("ListShouldContainAtLeastOneElementOfLogicalOrSpec.scala", "Array", arrayMapping: _*),
generateFile("ListShouldContainAtLeastOneElementOfSpec.scala", "Map", mapMapping: _*),
generateFile("ListShouldContainAtLeastOneElementOfLogicalAndSpec.scala", "Map", mapMapping: _*),
generateFile("ListShouldContainAtLeastOneElementOfLogicalOrSpec.scala", "Map", mapMapping: _*),
generateFile("ListShouldContainAtLeastOneElementOfSpec.scala", "JavaCol", javaColMapping: _*),
generateFile("ListShouldContainAtLeastOneElementOfLogicalAndSpec.scala", "JavaCol", javaColMapping: _*),
generateFile("ListShouldContainAtLeastOneElementOfLogicalOrSpec.scala", "JavaCol", javaColMapping: _*),
generateFile("ListShouldContainAtLeastOneElementOfSpec.scala", "JavaMap", javaMapMapping: _*),
generateFile("ListShouldContainAtLeastOneElementOfLogicalAndSpec.scala", "JavaMap", javaMapMapping: _*),
generateFile("ListShouldContainAtLeastOneElementOfLogicalOrSpec.scala", "JavaMap", javaMapMapping: _*),
generateFile("ListShouldContainAtLeastOneElementOfSpec.scala", "String", stringMapping: _*),
generateFile("ListShouldContainAtLeastOneElementOfLogicalAndSpec.scala", "String", stringMapping: _*),
generateFile("ListShouldContainAtLeastOneElementOfLogicalOrSpec.scala", "String", stringMapping: _*), */
// Generate tests for oneOf
generateFile("ListShouldContainOneOfSpec.scala", "Array", arrayMapping: _*),
generateFile("ListShouldContainOneOfLogicalAndSpec.scala", "Array", arrayMapping: _*),
generateFile("ListShouldContainOneOfLogicalOrSpec.scala", "Array", arrayMapping: _*),
generateFile("ListShouldContainOneOfSpec.scala", "Map", mapMapping: _*),
generateFile("ListShouldContainOneOfLogicalAndSpec.scala", "Map", mapMapping: _*),
generateFile("ListShouldContainOneOfLogicalOrSpec.scala", "Map", mapMapping: _*),
generateFile("ListShouldContainOneOfSpec.scala", "JavaCol", javaColMapping: _*),
generateFile("ListShouldContainOneOfLogicalAndSpec.scala", "JavaCol", javaColMapping: _*),
generateFile("ListShouldContainOneOfLogicalOrSpec.scala", "JavaCol", javaColMapping: _*),
generateFile("ListShouldContainOneOfSpec.scala", "JavaMap", javaMapMapping: _*),
generateFile("ListShouldContainOneOfLogicalAndSpec.scala", "JavaMap", javaMapMapping: _*),
generateFile("ListShouldContainOneOfLogicalOrSpec.scala", "JavaMap", javaMapMapping: _*),
generateFile("ListShouldContainOneOfSpec.scala", "String", stringMapping: _*),
generateFile("ListShouldContainOneOfLogicalAndSpec.scala", "String", stringMapping: _*),
generateFile("ListShouldContainOneOfLogicalOrSpec.scala", "String", stringMapping: _*),
// Generate tests for oneElementOf
/*generateFile("ListShouldContainOneElementOfSpec.scala", "Array", arrayMapping: _*),
generateFile("ListShouldContainOneElementOfLogicalAndSpec.scala", "Array", arrayMapping: _*),
generateFile("ListShouldContainOneElementOfLogicalOrSpec.scala", "Array", arrayMapping: _*),
generateFile("ListShouldContainOneElementOfSpec.scala", "Map", mapMapping: _*),
generateFile("ListShouldContainOneElementOfLogicalAndSpec.scala", "Map", mapMapping: _*),
generateFile("ListShouldContainOneElementOfLogicalOrSpec.scala", "Map", mapMapping: _*),
generateFile("ListShouldContainOneElementOfSpec.scala", "JavaCol", javaColMapping: _*),
generateFile("ListShouldContainOneElementOfLogicalAndSpec.scala", "JavaCol", javaColMapping: _*),
generateFile("ListShouldContainOneElementOfLogicalOrSpec.scala", "JavaCol", javaColMapping: _*),
generateFile("ListShouldContainOneElementOfSpec.scala", "JavaMap", javaMapMapping: _*),
generateFile("ListShouldContainOneElementOfLogicalAndSpec.scala", "JavaMap", javaMapMapping: _*),
generateFile("ListShouldContainOneElementOfLogicalOrSpec.scala", "JavaMap", javaMapMapping: _*),
generateFile("ListShouldContainOneElementOfSpec.scala", "String", stringMapping: _*),
generateFile("ListShouldContainOneElementOfLogicalAndSpec.scala", "String", stringMapping: _*),
generateFile("ListShouldContainOneElementOfLogicalOrSpec.scala", "String", stringMapping: _*), */
// Generate tests for noneOf
generateFile("ListShouldContainNoneOfSpec.scala", "Option", optionMapping: _*),
generateFile("ListShouldContainNoneOfLogicalAndSpec.scala", "Option", optionMapping: _*),
generateFile("ListShouldContainNoneOfLogicalOrSpec.scala", "Option", optionMapping: _*),
generateFile("ListShouldContainNoneOfSpec.scala", "Array", arrayMapping: _*),
generateFile("ListShouldContainNoneOfLogicalAndSpec.scala", "Array", arrayMapping: _*),
generateFile("ListShouldContainNoneOfLogicalOrSpec.scala", "Array", arrayMapping: _*),
generateFile("ListShouldContainNoneOfSpec.scala", "Map", mapMapping: _*),
generateFile("ListShouldContainNoneOfLogicalAndSpec.scala", "Map", mapMapping: _*),
generateFile("ListShouldContainNoneOfLogicalOrSpec.scala", "Map", mapMapping: _*),
generateFile("ListShouldContainNoneOfSpec.scala", "JavaCol", javaColMapping: _*),
generateFile("ListShouldContainNoneOfLogicalAndSpec.scala", "JavaCol", javaColMapping: _*),
generateFile("ListShouldContainNoneOfLogicalOrSpec.scala", "JavaCol", javaColMapping: _*),
generateFile("ListShouldContainNoneOfSpec.scala", "JavaMap", javaMapMapping: _*),
generateFile("ListShouldContainNoneOfLogicalAndSpec.scala", "JavaMap", javaMapMapping: _*),
generateFile("ListShouldContainNoneOfLogicalOrSpec.scala", "JavaMap", javaMapMapping: _*),
generateFile("ListShouldContainNoneOfSpec.scala", "String", stringMapping: _*),
generateFile("ListShouldContainNoneOfLogicalAndSpec.scala", "String", stringMapping: _*),
generateFile("ListShouldContainNoneOfLogicalOrSpec.scala", "String", stringMapping: _*),
// Generate tests for noElementsOf
/*//generateFile("ListShouldContainNoElementsOfSpec.scala", "Option", optionMapping: _*),
generateFile("ListShouldContainNoElementsOfLogicalAndSpec.scala", "Option", optionMapping: _*),
generateFile("ListShouldContainNoElementsOfLogicalOrSpec.scala", "Option", optionMapping: _*),
generateFile("ListShouldContainNoElementsOfSpec.scala", "Array", arrayMapping: _*),
generateFile("ListShouldContainNoElementsOfLogicalAndSpec.scala", "Array", arrayMapping: _*),
generateFile("ListShouldContainNoElementsOfLogicalOrSpec.scala", "Array", arrayMapping: _*),
generateFile("ListShouldContainNoElementsOfSpec.scala", "Map", mapMapping: _*),
generateFile("ListShouldContainNoElementsOfLogicalAndSpec.scala", "Map", mapMapping: _*),
generateFile("ListShouldContainNoElementsOfLogicalOrSpec.scala", "Map", mapMapping: _*),
generateFile("ListShouldContainNoElementsOfSpec.scala", "JavaCol", javaColMapping: _*),
generateFile("ListShouldContainNoElementsOfLogicalAndSpec.scala", "JavaCol", javaColMapping: _*),
generateFile("ListShouldContainNoElementsOfLogicalOrSpec.scala", "JavaCol", javaColMapping: _*),
generateFile("ListShouldContainNoElementsOfSpec.scala", "JavaMap", javaMapMapping: _*),
generateFile("ListShouldContainNoElementsOfLogicalAndSpec.scala", "JavaMap", javaMapMapping: _*),
generateFile("ListShouldContainNoElementsOfLogicalOrSpec.scala", "JavaMap", javaMapMapping: _*),
generateFile("ListShouldContainNoElementsOfSpec.scala", "String", stringMapping: _*),
generateFile("ListShouldContainNoElementsOfLogicalAndSpec.scala", "String", stringMapping: _*),
generateFile("ListShouldContainNoElementsOfLogicalOrSpec.scala", "String", stringMapping: _*), */
// Generate tests for theSameElementsAs
generateFile("ListShouldContainTheSameElementsAsSpec.scala", "Array", arrayMapping: _*),
generateFile("ListShouldContainTheSameElementsAsLogicalAndSpec.scala", "Array", arrayMapping: _*),
generateFile("ListShouldContainTheSameElementsAsLogicalOrSpec.scala", "Array", arrayMapping: _*),
generateFile("ListShouldContainTheSameElementsAsSpec.scala", "Map", mapMapping: _*),
generateFile("ListShouldContainTheSameElementsAsLogicalAndSpec.scala", "Map", mapMapping: _*),
generateFile("ListShouldContainTheSameElementsAsLogicalOrSpec.scala", "Map", mapMapping: _*),
generateFile("ListShouldContainTheSameElementsAsSpec.scala", "JavaCol", javaColMapping: _*),
generateFile("ListShouldContainTheSameElementsAsLogicalAndSpec.scala", "JavaCol", javaColMapping: _*),
generateFile("ListShouldContainTheSameElementsAsLogicalOrSpec.scala", "JavaCol", javaColMapping: _*),
generateFile("ListShouldContainTheSameElementsAsSpec.scala", "JavaMap", javaMapMapping: _*),
generateFile("ListShouldContainTheSameElementsAsLogicalAndSpec.scala", "JavaMap", javaMapMapping: _*),
generateFile("ListShouldContainTheSameElementsAsLogicalOrSpec.scala", "JavaMap", javaMapMapping: _*),
generateFile("ListShouldContainTheSameElementsAsSpec.scala", "String", stringMapping: _*),
generateFile("ListShouldContainTheSameElementsAsLogicalAndSpec.scala", "String", stringMapping: _*),
generateFile("ListShouldContainTheSameElementsAsLogicalOrSpec.scala", "String", stringMapping: _*)
)
}
def main(args: Array[String]) {
val targetDir = args(0)
val version = args(1)
val scalaVersion = args(2)
genTest(new File(targetDir + "/org/scalatest/"), version, scalaVersion)
}
}
| scalatest/scalatest | project/GenContain1.scala | Scala | apache-2.0 | 13,265 |
/**
* © 2019 Refinitiv. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmwell.tools.data.utils.akka
import akka.actor.ActorSystem
import akka.stream.{ActorMaterializer, Materializer}
/**
* Created by matan on 2/2/16.
*/
object Implicits {
implicit lazy val system: ActorSystem = ActorSystem("reactive-tools-system")
implicit lazy val mat: Materializer = ActorMaterializer()
}
| e-orz/CM-Well | server/cmwell-data-tools/src/main/scala/cmwell/tools/data/utils/akka/Implicits.scala | Scala | apache-2.0 | 948 |
package jp.kenichi.pdf
import scala.collection.mutable.Buffer
import java.io.InputStream
class PdfObjectParserInput(in: InputStream, var pos: Long) {
var next = in.read
/** @return keeps returning -1 after reaching EOF */
def read = {
if (next == -1)
-1
else {
val bt = next
next = in.read
pos += 1
bt
}
}
def takeAndSkip(size: Int) = {
val buf = Buffer[Byte]()
for (i <- 0 until size)
buf += read.toByte
new ByteSeqAsInputStream(buf)
}
}
class ByteSeqAsInputStream(seq: Seq[Byte]) extends InputStream {
protected var index = 0
def read = {
if (index < seq.size) {
val bt = seq(index)
index += 1
bt & 0xff
} else
-1
}
}
class PdfObjectParser(in: PdfObjectParserInput, ctx: PdfParserContext) {
/** friendly message for error message */
protected def remainingInputText(prefix: String, lastCh: Int) = {
val bin = (prefix.getBytes("UTF-8") :+ lastCh.toByte) ++ Iterator.continually(in.read).take(32).takeWhile(_ != -1).map(_.toByte)
bin.map(bt => f"$bt%02x").mkString(" ") + " (" + new String(bin.toArray, "UTF-8").map(ch => if (ch >= 0x20 && ch <= 0x7e) ch else '.') + ")"
}
protected def skipWhiteSpaces {
while (true) {
in.next match {
// PDF32000_2008.pdf: 7.2.2 Character Set
case '\\u0000' | '\\t' | '\\n' | '\\f' | '\\r' | ' ' => in.read
// PDF32000_2008.pdf: 7.2.3 Comments
case '%' =>
in.read
while (in.next != -1 && in.next != '\\n' && in.next != '\\r')
in.read
case _ => return
}
}
}
protected def readText(text: String) = {
for ((ch, index) <- text.zipWithIndex)
if (in.read != ch)
throw new PdfParseException(f"${in.pos - index}%,d: expected $text: " + remainingInputText(text.take(index), ch))
}
protected def readKeyword(keyword: String) = {
readText(keyword)
if (isRegular(in.next))
throw new PdfParseException(f"${in.pos - keyword.size}%,d: expected $keyword: " + remainingInputText(keyword, in.read))
}
protected var nextObj: Option[Any] = None // we have to read ahead a token to detect indirect object and reference
/** @return PdfIndirectObj, PdfIndirectRef, or direct object */
def parse: Any = {
if (nextObj.nonEmpty) {
val ret = nextObj.get
nextObj = None
ret
} else {
parseDirect match {
case objNum: Int =>
skipWhiteSpaces
if (isDigit(in.next)) {
parseDirect match {
case genNum: Int =>
skipWhiteSpaces
// 7.3.10 Indirect Objects of PDF32000_2008.pdf
if (in.next == 'o') {
readKeyword("obj")
val obj = parseDirect
skipWhiteSpaces
readKeyword("endobj")
PdfIndirectObj(objNum, genNum, obj)
// Indirect Reference
} else if (in.next == 'R') {
readKeyword("R")
new ctx.PdfIndirectRefImpl(objNum, genNum)
// two consecutive integer
} else {
nextObj = Some(genNum)
objNum
}
// single integer
case obj =>
nextObj = Some(obj)
objNum
}
} else
objNum // single integer
// non-integer
case obj => obj
}
}
}
// PDF32000_2008.pdf: 7.3 Object
/*
PDF Object can be any of:
Boolean
Int, Double (PDF Number)
String (PDF String or Name)
Array[_]
PdfDict
*/
def parseDirect: Any = {
skipWhiteSpaces
in.read match {
case -1 => throw new PdfParseException("unexpected EOF")
// PDF32000_2008.pdf: 7.3.4.2 Literal Strings
case '(' =>
println(f" ${in.pos}%,d: Literal String")
val buf = Buffer[Byte]()
var depth = 1
while (depth > 0) {
in.read match {
case -1 => throw new PdfParseException("unexpected EOF: expecting a closing parenthesis")
case '(' =>
depth += 1
buf += '('
case ')' =>
depth -= 1
if (depth > 0)
buf += ')'
case '\\\\' =>
in.read match {
case -1 => throw new PdfParseException("unexpected EOF: expecting an escape sequence")
case 'n' => buf += '\\n'
case 'r' => buf += '\\r'
case 't' => buf += '\\t'
case 'b' => buf += '\\b'
case 'f' => buf += '\\f'
case '(' => buf += '('
case '\\\\' => buf += '\\\\'
case bt => buf += bt.toByte // the backslash is ignored
}
case bt => buf += bt.toByte
}
}
new String(buf.toArray, "UTF-8")
// PDF32000_2008.pdf: 7.3.4.3 Hexadecimal Strings
case '<' if (in.next != '<') =>
println(f" ${in.pos}%,d: Hexadecimal String")
val buf = Buffer[Byte]()
while (in.next != '>') {
val d1 = parseHexDigit(in.read)
val d2 = if (in.next == '>') 0 else parseHexDigit(in.read)
buf += (d1 << 4 | d2).toByte
}
in.read
buf.toArray
// Name
case '/' =>
println(f" ${in.pos}%,d: Name")
val buf = Buffer[Char]()
while (isRegular(in.next)) {
if (in.next == '#') {
in.read
val d1 = parseHexDigit(in.read)
val d2 = parseHexDigit(in.read)
buf += (d1 << 4 | d2).toChar
} else
buf += in.read.toChar
}
new String(buf.toArray)
// Number
case ch if (isDigit(ch) || ch == '+' || ch == '-' || ch == '.') =>
println(f" ${in.pos}%,d: Number")
val buf = Buffer[Char]()
buf += ch.toChar
var containsDot = ch == '.'
while (in.next != -1 && (isDigit(in.next) || in.next == '.')) {
val ch = in.read
buf += ch.toChar
if (ch == '.')
containsDot = true
}
val text = new String(buf.toArray)
if (containsDot)
text.toDouble: Any // without casting to Any, the Int value below would be promoted to Double
else
text.toInt
// Array
case '[' =>
println(f" ${in.pos}%,d: Array")
val buf = Buffer[Any]()
skipWhiteSpaces
while (nextObj.nonEmpty || in.next != ']') {
buf += parse
skipWhiteSpaces
}
in.read
buf.toArray
// Dictionary
case '<' if (in.next == '<') =>
println(f" ${in.pos}%,d: Dictionary")
in.read
val dict = new PdfDict
skipWhiteSpaces
while (in.next == '/') {
val name = parseDirect match {
case name: String => name
case obj => throw new PdfParseException(f"${in.pos}%,d: expecting a name: $obj")
}
val value = parse
println(s" Entry: $name = $value (${value.getClass.getSimpleName})")
dict.put(name, value)
skipWhiteSpaces
}
readText(">>")
// PDF32000_2008.pdf: 7.3.8 Stream Objects
skipWhiteSpaces
if (in.next == 's') {
readKeyword("stream")
println(f" ${in.pos}%,d: Stream (0x${in.pos}%x)")
// the newline after keyword stream
if (in.next == '\\n')
in.read
else if (in.next == '\\r') {
in.read
if (in.read != '\\n') // single "\\r" is not allowed as the newline
throw new PdfParseException(f"${in.pos}%,d: expecting a new line after stream")
}
println(f" before the stream data (${in.pos}%,d 0x${in.pos}%x): ${in.next}%02x")
val stm = new PdfStream(dict)
dict.put(null, ctx.decodeStream(in.takeAndSkip(stm.length), stm.filter)) // null key
println(f" after the stream data (${in.pos}%,d 0x${in.pos}%x): ${in.next}%02x")
skipWhiteSpaces
readKeyword("endstream")
}
dict
// PDF32000_2008.pdf: 7.3.2 Boolean Objects
case 't' =>
readKeyword("true")
true
case 'f' =>
readKeyword("false")
false
// PDF32000_2008.pdf: 7.3.9 Null Object
case 'n' =>
readKeyword("null")
null
case ch =>
throw new PdfParseException(f"${in.pos}%,d: expecting an object: " + remainingInputText("", ch))
}
}
// PDF32000_2008.pdf: 7.2.2 Character Set
def isWhiteSpace(ch: Int) = ch match {
case '\\u0000' | '\\t' | '\\n' | '\\f' | '\\r' | ' ' => true
case _ => false
}
def isDelimiter(ch: Int) = ch match {
case '(' | ')' | '<' | '>' | '[' | ']' | '{' | '}' | '/' | '%' => true
case _ => false
}
def isRegular(ch: Int) = ch != -1 && !isWhiteSpace(ch) && !isDelimiter(ch)
def isDigit(ch: Int) = ch >= '0' && ch <= '9'
def parseHexDigit(ch: Int) = {
if (ch >= '0' && ch <= '9')
ch - '0'
else if (ch >= 'A' && ch <= 'F')
ch - 'A' + 10
else if (ch >= 'a' && ch <= 'f')
ch - 'a' + 10
else
throw new PdfParseException(s"unexpected hex digit: $ch")
}
}
| ken1ma/pades-scala-js | shared/src/main/scala/jp.kenichi/pdf/PdfObjectParser.scala | Scala | apache-2.0 | 8,247 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util
import scala.collection.mutable.HashSet
import org.scalatest.FunSuite
class AppendOnlyMapSuite extends FunSuite {
test("initialization") {
val goodMap1 = new AppendOnlyMap[Int, Int](1)
assert(goodMap1.size === 0)
val goodMap2 = new AppendOnlyMap[Int, Int](255)
assert(goodMap2.size === 0)
val goodMap3 = new AppendOnlyMap[Int, Int](256)
assert(goodMap3.size === 0)
intercept[IllegalArgumentException] {
new AppendOnlyMap[Int, Int](1 << 30) // Invalid map size: bigger than 2^29
}
intercept[IllegalArgumentException] {
new AppendOnlyMap[Int, Int](-1)
}
intercept[IllegalArgumentException] {
new AppendOnlyMap[Int, Int](0)
}
}
test("object keys and values") {
val map = new AppendOnlyMap[String, String]()
for (i <- 1 to 100) {
map("" + i) = "" + i
}
assert(map.size === 100)
for (i <- 1 to 100) {
assert(map("" + i) === "" + i)
}
assert(map("0") === null)
assert(map("101") === null)
assert(map(null) === null)
val set = new HashSet[(String, String)]
for ((k, v) <- map) { // Test the foreach method
set += ((k, v))
}
assert(set === (1 to 100).map(_.toString).map(x => (x, x)).toSet)
}
test("primitive keys and values") {
val map = new AppendOnlyMap[Int, Int]()
for (i <- 1 to 100) {
map(i) = i
}
assert(map.size === 100)
for (i <- 1 to 100) {
assert(map(i) === i)
}
assert(map(0) === null)
assert(map(101) === null)
val set = new HashSet[(Int, Int)]
for ((k, v) <- map) { // Test the foreach method
set += ((k, v))
}
assert(set === (1 to 100).map(x => (x, x)).toSet)
}
test("null keys") {
val map = new AppendOnlyMap[String, String]()
for (i <- 1 to 100) {
map("" + i) = "" + i
}
assert(map.size === 100)
assert(map(null) === null)
map(null) = "hello"
assert(map.size === 101)
assert(map(null) === "hello")
}
test("null values") {
val map = new AppendOnlyMap[String, String]()
for (i <- 1 to 100) {
map("" + i) = null
}
assert(map.size === 100)
assert(map("1") === null)
assert(map(null) === null)
assert(map.size === 100)
map(null) = null
assert(map.size === 101)
assert(map(null) === null)
}
test("changeValue") {
val map = new AppendOnlyMap[String, String]()
for (i <- 1 to 100) {
map("" + i) = "" + i
}
assert(map.size === 100)
for (i <- 1 to 100) {
val res = map.changeValue("" + i, (hadValue, oldValue) => {
assert(hadValue === true)
assert(oldValue === "" + i)
oldValue + "!"
})
assert(res === i + "!")
}
// Iterate from 101 to 400 to make sure the map grows a couple of times, because we had a
// bug where changeValue would return the wrong result when the map grew on that insert
for (i <- 101 to 400) {
val res = map.changeValue("" + i, (hadValue, oldValue) => {
assert(hadValue === false)
i + "!"
})
assert(res === i + "!")
}
assert(map.size === 400)
assert(map(null) === null)
map.changeValue(null, (hadValue, oldValue) => {
assert(hadValue === false)
"null!"
})
assert(map.size === 401)
map.changeValue(null, (hadValue, oldValue) => {
assert(hadValue === true)
assert(oldValue === "null!")
"null!!"
})
assert(map.size === 401)
}
test("inserting in capacity-1 map") {
val map = new AppendOnlyMap[String, String](1)
for (i <- 1 to 100) {
map("" + i) = "" + i
}
assert(map.size === 100)
for (i <- 1 to 100) {
assert(map("" + i) === "" + i)
}
}
}
| mkolod/incubator-spark | core/src/test/scala/org/apache/spark/util/AppendOnlyMapSuite.scala | Scala | apache-2.0 | 4,536 |
package org.gbougeard.dsl
/**
* Created with IntelliJ IDEA.
* User: gbougeard
* Date: 15/07/13
* Time: 14:51
* To change this template use File | Settings | File Templates.
*/
import scala.util.parsing.combinator.syntactical._
import ChangeAST._
object ChangeDsl extends StandardTokenParsers {
lexical.reserved +=("query", "status", "is", "age", "branch", "project", "owner", "get", "and", "or", "not", "-", "limit", "open", "closed", "merged")
lexical.delimiters +=("(", ")", ",")
lazy val changeQuery = queries ~ ("get" ~> (get_values?)) ^^ {
case q ~ l => ChangeQuery(q, l)
}
lazy val queries = "(" ~> rep1sep(query, ",") <~ ")" ^^ Queries
lazy val query = "query" ~> rep1sep(term, (operator_enum ?)) ^^ Query
lazy val term = (
status_spec |
is_spec |
age_spec |
branch_spec |
project_spec |
owner_spec |
limit_spec
)
lazy val operator_enum = ("and" | "or")
lazy val status_spec = "status" ~> status_enum ^^ Status
lazy val status_enum = ("open" | "merged" | "closed")
lazy val is_spec = "is" ~> is_enum ^^ Is
lazy val is_enum = ("open" | "merged" | "closed")
lazy val age_spec = "age" ~> stringLit ^^ Age
lazy val branch_spec = "branch" ~> stringLit ^^ Branch
lazy val project_spec = "project" ~> stringLit ^^ Project
lazy val owner_spec = "owner" ~> stringLit ^^ Owner
lazy val limit_spec = "limit" ~> numericLit ^^ {
case value => Limit(value.toInt)
}
// lazy val get_spec = "get" ~> (get_values ?)
lazy val get_values = "(" ~> rep1sep(stringLit, ",") <~ ")" ^^ {
case labels => Labels(labels map {
label => Label(label)
})
}
}
| gbougeard/gas | src/main/scala/org/gbougeard/dsl/ChangeDsl.scala | Scala | apache-2.0 | 1,666 |
package tryp
package state
package core
package cell
import shapeless.{HList, ::, Coproduct, CNil, :+:}
case object A
extends Message
case object B
extends Message
case object C
extends Message
case object D
extends Message
case object E
extends Message
case object F
extends Message
case class S1(num: Int)
extends CState
case object S2
extends CState
case object S3
extends CState
object Data
{
val cs = C1 :: HNil
val ss = (Pristine: CState) :: HNil
val io = StateIO(IO.pure(1))
}
import Data._
object C1
extends Cell
{
implicit def case_A =
at((m: A.type) => S1(23) :: B :: C :: io :: HNil)
implicit def case_B
: Case.Aux[B.type, Trans[S2.type :: HNil]]
= at((m: B.type) => Trans.partial("", "C1") { case S1(num) => S2 :: HNil })
implicit def case_D =
at((m: D.type) => S2 :: io :: F :: HNil)
type In = A.type :+: B.type :+: CNil
type Out = (S1 :: B.type :: C.type :: StateIO :: HNil) :+: Trans[S2.type :: HNil] :+: HNil :+: CNil
implicit def dyn: Case.Aux[Message, Out] = at { m =>
m match {
case m @ A => Coproduct[Out](this(m))
case m @ B => Coproduct[Out](this(m))
case _ => Coproduct[Out](HNil)
}
}
type Aux = Cell.Aux[this.type, In, Out]
def aux: Aux = this
}
object CellSpec
extends Spec
with BoundedCachedPool
{
import SpecToolbox._
val imports =
"""
import shapeless.::
import tryp.state.core._
import tryp.state.core.cell._
import Data._
"""
implicit class StringOps(val code: String)
extends AnyVal
{
def c = compileSuccess(imports + code)
}
def is = s2"""
runTrans $rt
evalTransition $et
process $proc
transition $trans
processD and reduceMsgOutD $procD
"""
def name = "spec"
def rt = "runTrans(C1, (D, cs))".c
def et = "evalTransition(C1(D, cs), cs)".c
def proc = "process(D, cs)".c
def trans = """
import reduceMsgsOut._
val res = processAll(A :: D :: HNil, cs)
aggregate(res)
""".c
def procD =
"""
val cs = C1.aux :: C1.aux :: HNil
type Ss = CState :: CState :: HNil
val states = (Pristine: CState) :: (Pristine: CState) :: HNil
val t = processD((A: Message), cs)
reduceMsgOutD.agg[Ss](t)
""".c
}
| tek/pulsar | state-core/test-src/cell_spec.scala | Scala | mit | 2,216 |
package com.arcusys.valamis.gradebook.service
import com.arcusys.valamis.gradebook.model.CourseGrade
trait TeacherCourseGradeService {
def get(courseId: Long, userId: Long): Option[CourseGrade]
def get(courseIds: Seq[Long], userId: Long): Seq[CourseGrade]
def set(courseId: Long, userId: Long, grade: Float, comment: Option[String], companyId: Long): Unit
def setComment(courseId: Long, userId: Long, comment: String, companyId: Long): Unit
}
| arcusys/Valamis | valamis-gradebook/src/main/scala/com/arcusys/valamis/gradebook/service/TeacherCourseGradeService.scala | Scala | gpl-3.0 | 457 |
/* ___ _ ___ _ _ *\\
** / __| |/ (_) | | Your SKilL scala Binding **
** \\__ \\ ' <| | | |__ generated: 01.02.2019 **
** |___/_|\\_\\_|_|____| by: feldentm **
\\* */
package de.ust.skill.sir.api.internal
import scala.collection.mutable.ArrayBuffer
import scala.collection.mutable.HashMap
import scala.collection.mutable.HashSet
import scala.collection.mutable.ListBuffer
import scala.collection.mutable.WrappedArray
import scala.reflect.Manifest
import de.ust.skill.common.jvm.streams.InStream
import de.ust.skill.common.scala.SkillID
import de.ust.skill.common.scala.api.SkillObject
import de.ust.skill.common.scala.api.TypeMissmatchError
import de.ust.skill.common.scala.internal.BasePool
import de.ust.skill.common.scala.internal.FieldDeclaration
import de.ust.skill.common.scala.internal.SkillState
import de.ust.skill.common.scala.internal.SingletonStoragePool
import de.ust.skill.common.scala.internal.StoragePool
import de.ust.skill.common.scala.internal.SubPool
import de.ust.skill.common.scala.internal.fieldTypes._
import de.ust.skill.common.scala.internal.restrictions.FieldRestriction
import _root_.de.ust.skill.sir.api._
final class ConstantIntegerPool(poolIndex : Int,
superPool: SimpleTypePool)
extends SubPool[_root_.de.ust.skill.sir.ConstantInteger, de.ust.skill.sir.Type](
poolIndex,
"constantinteger",
superPool
) {
override def getInstanceClass: Class[_root_.de.ust.skill.sir.ConstantInteger] = classOf[_root_.de.ust.skill.sir.ConstantInteger]
override def addField[T : Manifest](ID : Int, t : FieldType[T], name : String,
restrictions : HashSet[FieldRestriction]) : FieldDeclaration[T, _root_.de.ust.skill.sir.ConstantInteger] = {
val f = (name match {
case "value" ⇒ new F_ConstantInteger_value(ID, this)
case _ ⇒ return super.addField(ID, t, name, restrictions)
}).asInstanceOf[FieldDeclaration[T, _root_.de.ust.skill.sir.ConstantInteger]]
//check type
if (t != f.t)
throw new TypeMissmatchError(t, f.t.toString, f.name, name)
val rs = restrictions.iterator
while(rs.hasNext)
f.addRestriction(rs.next())
dataFields += f
return f
}
override def ensureKnownFields(st : SkillState) {
val state = st.asInstanceOf[SkillFile]
// data fields
val Clsvalue = classOf[F_ConstantInteger_value]
val fields = HashSet[Class[_ <: FieldDeclaration[_, _root_.de.ust.skill.sir.ConstantInteger]]](Clsvalue)
var dfi = dataFields.size
while (dfi != 0) {
dfi -= 1
fields.remove(dataFields(dfi).getClass)
}
if(fields.contains(Clsvalue))
dataFields += new F_ConstantInteger_value(dataFields.size + 1, this, V64)
// no auto fields
val fs = (dataFields ++ autoFields).iterator
while (fs.hasNext)
fs.next().createKnownRestrictions
}
override def makeSubPool(name : String, poolIndex : Int) = new ConstantIntegerSubPool(poolIndex, name, this)
override def reflectiveAllocateInstance: _root_.de.ust.skill.sir.ConstantInteger = {
val r = new _root_.de.ust.skill.sir.ConstantInteger(-1)
this.newObjects.append(r)
r
}
override def allocateInstances {
for (b ← blocks.par) {
var i : SkillID = b.bpo
val last = i + b.staticCount
while (i < last) {
data(i) = new _root_.de.ust.skill.sir.ConstantInteger(i + 1)
i += 1
}
}
}
def make(value : Long = 0, name : _root_.de.ust.skill.sir.Identifier = null) = {
val r = new _root_.de.ust.skill.sir.ConstantInteger(-1 - newObjects.size, value : Long, name : _root_.de.ust.skill.sir.Identifier)
newObjects.append(r)
r
}
}
final class ConstantIntegerSubPool(poolIndex : Int, name : String, superPool : StoragePool[_ >: _root_.de.ust.skill.sir.ConstantInteger.UnknownSubType <: _root_.de.ust.skill.sir.ConstantInteger, _root_.de.ust.skill.sir.Type])
extends SubPool[_root_.de.ust.skill.sir.ConstantInteger.UnknownSubType, _root_.de.ust.skill.sir.Type](
poolIndex,
name,
superPool
) {
override def getInstanceClass : Class[_root_.de.ust.skill.sir.ConstantInteger.UnknownSubType] = classOf[_root_.de.ust.skill.sir.ConstantInteger.UnknownSubType]
override def makeSubPool(name : String, poolIndex : Int) = new ConstantIntegerSubPool(poolIndex, name, this)
override def ensureKnownFields(st : SkillState) {}
override def allocateInstances {
for (b ← blocks.par) {
var i : SkillID = b.bpo
val last = i + b.staticCount
while (i < last) {
data(i) = new _root_.de.ust.skill.sir.ConstantInteger.UnknownSubType(i + 1, this)
i += 1
}
}
}
def reflectiveAllocateInstance : _root_.de.ust.skill.sir.ConstantInteger.UnknownSubType = {
val r = new _root_.de.ust.skill.sir.ConstantInteger.UnknownSubType(-1, this)
this.newObjects.append(r)
r
}
}
| skill-lang/skill | src/main/scala/de/ust/skill/sir/api/internal/PoolConstantInteger.scala | Scala | bsd-3-clause | 5,323 |
package cz.kamenitxan.labelprinter.generators
import java.awt.Color
import cz.kamenitxan.labelprinter.models.Position
import org.apache.pdfbox.pdmodel.common.PDRectangle
/**
* Created by tomaspavel on 23.3.17.
*/
abstract class Toner3x1 extends PdfGenerator {
val PAGE_SIZE_A4 = new PDRectangle(wholePageWidth, wholePageHeight)
val singleWidth: Float = 805
val singleHeight: Float = 185
override val fontSize = 12
override def getPosition(line: Int, row: Int): Position = {
val x = 20 + singleWidth * row + 10 * row
val y = 10 + singleHeight * line + 10 * line
new Position(x, y)
}
protected def debugRect(pos: Position): Unit = {
cs.addRect(pos.x, pos.y, singleWidth, singleHeight)
cs.setStrokingColor(Color.BLACK)
cs.stroke()
}
}
| kamenitxan/Label-printer | src/main/java/cz/kamenitxan/labelprinter/generators/Toner3x1.scala | Scala | bsd-3-clause | 762 |
package com.typedynamic.eventrisk
object Main extends App {
override def main(args: Array[String]) = {
dataFeeds.show()
println("DJIA (cash): " + IndexPortfolio.indexPrice(IndexPortfolio.djia))
}
}
| commonlisp/eventrisk | src/main/scala/main.scala | Scala | apache-2.0 | 215 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.runtime.java8
@FunctionalInterface trait JFunction1$mcZF$sp extends Function1[Any, Any] with Serializable {
def apply$mcZF$sp(v1: Float): Boolean
override def apply(t: Any): Any = scala.runtime.BoxesRunTime.boxToBoolean(apply$mcZF$sp(scala.runtime.BoxesRunTime.unboxToFloat(t)))
}
| lrytz/scala | src/library/scala/runtime/java8/JFunction1$mcZF$sp.scala | Scala | apache-2.0 | 590 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.util.ArrayList
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
import org.apache.hadoop.conf.Configuration
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.execution.LeafNode
import org.apache.spark.sql.hive.CarbonMetastoreCatalog
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.util.CarbonProperties
import org.apache.carbondata.scan.model._
import org.apache.carbondata.spark.{CarbonFilters, RawValue, RawValueImpl}
import org.apache.carbondata.spark.rdd.CarbonScanRDD
case class CarbonScan(
var attributesRaw: Seq[Attribute],
relationRaw: CarbonRelation,
dimensionPredicatesRaw: Seq[Expression],
useUnsafeCoversion: Boolean = true)(@transient val ocRaw: SQLContext) extends LeafNode {
val carbonTable = relationRaw.metaData.carbonTable
val selectedDims = scala.collection.mutable.MutableList[QueryDimension]()
val selectedMsrs = scala.collection.mutable.MutableList[QueryMeasure]()
@transient val carbonCatalog = ocRaw.catalog.asInstanceOf[CarbonMetastoreCatalog]
val attributesNeedToDecode = new java.util.LinkedHashSet[AttributeReference]()
val unprocessedExprs = new ArrayBuffer[Expression]()
val buildCarbonPlan: CarbonQueryPlan = {
val plan: CarbonQueryPlan = new CarbonQueryPlan(relationRaw.databaseName, relationRaw.tableName)
plan.setSortedDimemsions(new ArrayList[QueryDimension])
plan.setOutLocationPath(
CarbonProperties.getInstance().getProperty(CarbonCommonConstants.STORE_LOCATION_HDFS))
plan.setQueryId(ocRaw.getConf("queryId", System.nanoTime() + ""))
processFilterExpressions(plan)
plan
}
def processFilterExpressions(plan: CarbonQueryPlan) {
if (dimensionPredicatesRaw.nonEmpty) {
val expressionVal = CarbonFilters.processExpression(
dimensionPredicatesRaw,
attributesNeedToDecode,
unprocessedExprs,
carbonTable)
expressionVal match {
case Some(ce) =>
// adding dimension used in expression in querystats
plan.setFilterExpression(ce)
case _ =>
}
}
processExtraAttributes(plan)
}
private def processExtraAttributes(plan: CarbonQueryPlan) {
if (attributesNeedToDecode.size() > 0) {
val attributeOut = new ArrayBuffer[Attribute]() ++ attributesRaw
attributesNeedToDecode.asScala.foreach { attr =>
if (!attributesRaw.exists(_.name.equalsIgnoreCase(attr.name))) {
attributeOut += attr
}
}
attributesRaw = attributeOut
}
val dimensions = carbonTable.getDimensionByTableName(carbonTable.getFactTableName)
val measures = carbonTable.getMeasureByTableName(carbonTable.getFactTableName)
val dimAttr = new Array[Attribute](dimensions.size())
val msrAttr = new Array[Attribute](measures.size())
attributesRaw.foreach { attr =>
val carbonDimension =
carbonTable.getDimensionByName(carbonTable.getFactTableName, attr.name)
if(carbonDimension != null) {
dimAttr(dimensions.indexOf(carbonDimension)) = attr
} else {
val carbonMeasure =
carbonTable.getMeasureByName(carbonTable.getFactTableName, attr.name)
if(carbonMeasure != null) {
msrAttr(measures.indexOf(carbonMeasure)) = attr
}
}
}
attributesRaw = dimAttr.filter(f => f != null) ++ msrAttr.filter(f => f != null)
var queryOrder: Integer = 0
attributesRaw.foreach { attr =>
val carbonDimension =
carbonTable.getDimensionByName(carbonTable.getFactTableName, attr.name)
if (carbonDimension != null) {
val dim = new QueryDimension(attr.name)
dim.setQueryOrder(queryOrder)
queryOrder = queryOrder + 1
selectedDims += dim
} else {
val carbonMeasure =
carbonTable.getMeasureByName(carbonTable.getFactTableName, attr.name)
if (carbonMeasure != null) {
val m1 = new QueryMeasure(attr.name)
m1.setQueryOrder(queryOrder)
queryOrder = queryOrder + 1
selectedMsrs += m1
}
}
}
// Fill the selected dimensions & measures obtained from
// attributes to query plan for detailed query
selectedDims.foreach(plan.addDimension)
selectedMsrs.foreach(plan.addMeasure)
}
def inputRdd: CarbonScanRDD[Array[Any]] = {
val conf = new Configuration()
val absoluteTableIdentifier = carbonTable.getAbsoluteTableIdentifier
val model = QueryModel.createModel(
absoluteTableIdentifier, buildCarbonPlan, carbonTable)
val kv: RawValue[Array[Any]] = new RawValueImpl
// setting queryid
buildCarbonPlan.setQueryId(ocRaw.getConf("queryId", System.nanoTime() + ""))
val tableCreationTime = carbonCatalog
.getTableCreationTime(relationRaw.databaseName, relationRaw.tableName)
val schemaLastUpdatedTime = carbonCatalog
.getSchemaLastUpdatedTime(relationRaw.databaseName, relationRaw.tableName)
val big = new CarbonScanRDD(
ocRaw.sparkContext,
model,
buildCarbonPlan.getFilterExpression,
kv,
conf,
tableCreationTime,
schemaLastUpdatedTime,
carbonCatalog.storePath)
big
}
override def outputsUnsafeRows: Boolean =
(attributesNeedToDecode.size() == 0) && useUnsafeCoversion
override def doExecute(): RDD[InternalRow] = {
val outUnsafeRows: Boolean = (attributesNeedToDecode.size() == 0) && useUnsafeCoversion
inputRdd.mapPartitions { iter =>
val unsafeProjection = UnsafeProjection.create(output.map(_.dataType).toArray)
new Iterator[InternalRow] {
override def hasNext: Boolean = iter.hasNext
override def next(): InternalRow =
if (outUnsafeRows) {
unsafeProjection(new GenericMutableRow(iter.next()))
} else {
new GenericMutableRow(iter.next())
}
}
}
}
def output: Seq[Attribute] = {
attributesRaw
}
}
| foryou2030/incubator-carbondata | integration/spark/src/main/scala/org/apache/spark/sql/CarbonOperators.scala | Scala | apache-2.0 | 6,926 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.index.planning
import org.geotools.data.Query
import org.junit.runner.RunWith
import org.locationtech.geomesa.index.TestGeoMesaDataStore
import org.locationtech.geomesa.index.conf.QueryHints
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.opengis.filter.sort.{SortBy, SortOrder}
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class QueryPlannerTest extends Specification {
import org.locationtech.geomesa.filter.ff
import org.locationtech.geomesa.index.conf.QueryHints.RichHints
val sft = SimpleFeatureTypes.createType("query-planner", "name:String,age:Int,dtg:Date,*geom:Point:srid=4326")
val ds = new TestGeoMesaDataStore(true)
ds.createSchema(sft)
val planner = ds.queryPlanner
"QueryPlanner" should {
"be a queryPlanner" in {
planner.getClass mustEqual classOf[QueryPlanner[_, _, _]] // sanity check
}
"throw an exception for invalid requested index during explain" in {
val query = new Query(sft.getTypeName)
query.getHints.put(QueryHints.QUERY_INDEX, "foo")
planner.planQuery(sft, query) must throwAn[IllegalArgumentException]
}
"throw an exception for invalid requested index during query" in {
val query = new Query(sft.getTypeName)
query.getHints.put(QueryHints.QUERY_INDEX, "foo")
planner.runQuery(sft, query) must throwAn[IllegalArgumentException]
}
"be able to sort by id asc" >> {
val query = new Query(sft.getTypeName)
query.setSortBy(Array(SortBy.NATURAL_ORDER))
QueryPlanner.setQuerySort(sft, query)
query.getHints.getSortFields must beSome(Seq(("", false)))
}
"be able to sort by id desc" >> {
val query = new Query(sft.getTypeName)
query.setSortBy(Array(SortBy.REVERSE_ORDER))
QueryPlanner.setQuerySort(sft, query)
query.getHints.getSortFields must beSome(Seq(("", true)))
}
"be able to sort by an attribute asc" >> {
val query = new Query(sft.getTypeName)
query.setSortBy(Array(ff.sort("name", SortOrder.ASCENDING)))
QueryPlanner.setQuerySort(sft, query)
query.getHints.getSortFields must beSome(Seq(("name", false)))
}
"be able to sort by an attribute desc" >> {
val query = new Query(sft.getTypeName)
query.setSortBy(Array(ff.sort("name", SortOrder.DESCENDING)))
QueryPlanner.setQuerySort(sft, query)
query.getHints.getSortFields must beSome(Seq(("name", true)))
}
"be able to sort by an attribute and id" >> {
val query = new Query(sft.getTypeName)
query.setSortBy(Array(ff.sort("name", SortOrder.ASCENDING), SortBy.NATURAL_ORDER))
QueryPlanner.setQuerySort(sft, query)
query.getHints.getSortFields must beSome(Seq(("name", false), ("", false)))
}
"be able to sort by an multiple attributes" >> {
val query = new Query(sft.getTypeName)
query.setSortBy(Array(ff.sort("age", SortOrder.DESCENDING), ff.sort("name", SortOrder.ASCENDING)))
QueryPlanner.setQuerySort(sft, query)
query.getHints.getSortFields must beSome(Seq(("age", true), ("name", false)))
}
}
}
| jahhulbert-ccri/geomesa | geomesa-index-api/src/test/scala/org/locationtech/geomesa/index/planning/QueryPlannerTest.scala | Scala | apache-2.0 | 3,656 |
/*
* Copyright (c) 2012-15 Miles Sabin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package shapeless
import scala.language.dynamics
import labelled.{ FieldType, field }
import ops.coproduct.{ Inject, Selector => CSelector }
import ops.hlist.{ At, Init, Last, Prepend, Selector, ReplaceAt, Replacer, Tupler }
import ops.record.{ Selector => RSelector, Updater }
trait Lens[S, A] extends LPLens[S, A] { outer =>
def get(s: S): A
def set(s: S)(a: A): S
def modify(s: S)(f: A => A): S = set(s)(f(get(s)))
def compose[T](g: Lens[T, S]) = new Lens[T, A] {
def get(t: T): A = outer.get(g.get(t))
def set(t: T)(a: A): T = g.modify(t)(outer.set(_)(a))
}
def compose[T](g: Prism[T, S]) = new Prism[T, A] {
def get(t: T): Option[A] = g.get(t).map(outer.get)
def set(t: T)(a: A): T = g.modify(t)(outer.set(_)(a))
}
def >>(n: Nat)(implicit mkLens: MkNthFieldLens[A, n.N]): Lens[S, mkLens.Elem] = mkLens() compose this
def >>(k: Witness)(implicit mkLens: MkFieldLens[A, k.T]): Lens[S, mkLens.Elem] = mkLens() compose this
def selectDynamic(k: String)(
implicit mkLens: MkSelectDynamicOptic[Lens[S, A], A, k.type, Nothing]
): mkLens.Out = mkLens(this)
def apply[B](implicit mkPrism: MkCtorPrism[A, B]): Prism[S, B] = mkPrism() compose this
def unapply(s: S): Option[A] = Some(get(s))
def ~[B](other: Lens[S, B]) = new ProductLensBuilder[S, (A, B)] {
def get(s: S): (A, B) = (outer.get(s), other.get(s))
def set(s: S)(ab: (A, B)) = other.set(outer.set(s)(ab._1))(ab._2)
}
def ~[B](other: Prism[S, B]) = new ProductPrismBuilder[S, (A, B)] {
def get(s: S): Option[(A, B)] = other.get(s).map((outer.get(s), _))
def set(s: S)(ab: (A, B)) = other.set(outer.set(s)(ab._1))(ab._2)
}
}
trait LPLens[S, A] extends Dynamic with Serializable { self: Lens[S, A] =>
def selectDynamic[B](k: String)(
implicit mkLens: MkSelectDynamicOptic[Lens[S, A], A, k.type, B], dummy: DummyImplicit
): mkLens.Out = mkLens(this)
}
trait Prism[S, A] extends LPPrism[S, A] { outer =>
def get(s: S): Option[A]
def set(s: S)(a: A): S
def modify(s: S)(f: A => A): S = get(s).map(f).map(a => set(s)(a)).getOrElse(s)
def compose[T](g: Lens[T, S]) = new Prism[T, A] {
def get(t: T): Option[A] = outer.get(g.get(t))
def set(t: T)(a: A): T = g.modify(t)(outer.set(_)(a))
}
def compose[T](g: Prism[T, S]) = new Prism[T, A] {
def get(t: T): Option[A] = g.get(t).flatMap(outer.get)
def set(t: T)(a: A): T = g.modify(t)(outer.set(_)(a))
}
def selectDynamic(k: String)(
implicit mkPrism: MkSelectDynamicOptic[Prism[S, A], A, k.type, Nothing]
): mkPrism.Out = mkPrism(this)
def apply[B](implicit mkPrism: MkCtorPrism[A, B]): Prism[S, B] = mkPrism() compose this
def unapply(s: S): Option[A] = get(s)
def ~[B](other: Lens[S, B]) = new ProductPrismBuilder[S, (A, B)] {
def get(s: S): Option[(A, B)] = outer.get(s).map((_, other.get(s)))
def set(s: S)(ab: (A, B)) = other.set(outer.set(s)(ab._1))(ab._2)
}
def ~[B](other: Prism[S, B]) = new ProductPrismBuilder[S, (A, B)] {
def get(s: S): Option[(A, B)] =
for {
fst <- outer.get(s)
snd <- other.get(s)
} yield (fst, snd)
def set(s: S)(ab: (A, B)) = other.set(outer.set(s)(ab._1))(ab._2)
}
}
trait LPPrism[S, A] extends Dynamic with Serializable { self: Prism[S, A] =>
def selectDynamic[B](k: String)(
implicit mkPrism: MkSelectDynamicOptic[Prism[S, A], A, k.type, B], dummy: DummyImplicit
): mkPrism.Out = mkPrism(this)
}
trait ProductLensBuilder[C, P <: Product] extends Lens[C, P] with Serializable {
outer =>
def ~[T, L <: HList, LT <: HList, Q <: Product, QL <: HList](other: Lens[C, T])
(implicit
genp: Generic.Aux[P, L],
tpp: Tupler.Aux[L, P],
pre: Prepend.Aux[L, T :: HNil, LT],
tpq: Tupler.Aux[LT, Q],
genq: Generic.Aux[Q, QL],
init: Init.Aux[QL, L],
last: Last.Aux[QL, T]) =
new ProductLensBuilder[C, Q] {
def get(c: C): Q = (genp.to(outer.get(c)) :+ other.get(c)).tupled
def set(c: C)(q: Q) = {
val l = genq.to(q)
other.set(outer.set(c)(l.init.tupled))(l.last)
}
}
}
trait ProductPrismBuilder[C, P <: Product] extends Prism[C, P] with Serializable {
outer =>
def ~[T, L <: HList, LT <: HList, Q <: Product, QL <: HList](other: Prism[C, T])
(implicit
genp: Generic.Aux[P, L],
tpp: Tupler.Aux[L, P],
pre: Prepend.Aux[L, T :: HNil, LT],
tpq: Tupler.Aux[LT, Q],
genq: Generic.Aux[Q, QL],
init: Init.Aux[QL, L],
last: Last.Aux[QL, T]) =
new ProductPrismBuilder[C, Q] {
def get(c: C): Option[Q] =
for {
init <- outer.get(c)
last <- other.get(c)
} yield (genp.to(init) :+ last).tupled
def set(c: C)(q: Q) = {
val l = genq.to(q)
other.set(outer.set(c)(l.init.tupled))(l.last)
}
}
}
object OpticDefns {
def apply[C] = id[C]
object compose extends Poly2 {
implicit def default[A, B, C] = at[Lens[B, C], Lens[A, B]](_ compose _)
}
class RootLens[C] extends Lens[C, C] {
def apply[P <: HList](path: Path[P])(implicit mkPath: MkPathOptic[C, P]): mkPath.Out = mkPath()
def get(c: C): C = c
def set(c: C)(f: C): C = f
}
def id[C] = new RootLens[C]
def setLens[E](e: E) =
new Lens[Set[E], Boolean] {
def get(s: Set[E]): Boolean = s contains e
def set(s: Set[E])(b: Boolean): Set[E] = if(b) s+e else s-e
}
def mapLens[K, V](k: K) =
new Lens[Map[K, V], Option[V]] {
def get(m: Map[K, V]): Option[V] = m get k
def set(m: Map[K, V])(ov: Option[V]): Map[K, V] = ov match {
case Some(v) => m+(k -> v)
case None => m-k
}
}
def mapPrism[K, V](k: K) =
new Prism[Map[K, V], V] {
def get(m: Map[K, V]): Option[V] = m get k
def set(m: Map[K, V])(v: V): Map[K, V] = m+(k -> v)
}
def hlistSelectLens[L <: HList, U](implicit mkLens: MkHListSelectLens[L, U]) = mkLens()
def coproductSelectPrism[C <: Coproduct, T](implicit mkPrism: MkCoproductSelectPrism[C, T]) = mkPrism()
def hlistNthLens[L <: HList, N <: Nat](implicit mkLens: MkHListNthLens[L, N]) = mkLens()
def recordLens[R <: HList](k: Witness)(implicit mkLens: MkRecordSelectLens[R, k.T]) = mkLens()
}
trait OpticComposer[L, R] extends Serializable {
type Out
def apply(l: L, r: R): Out
}
object OpticComposer {
type Aux[L, R, Out0] = OpticComposer[L, R] { type Out = Out0 }
implicit def composeLL[S, A, T]: Aux[Lens[S, A], Lens[T, S], Lens[T, A]] =
new OpticComposer[Lens[S, A], Lens[T, S]] {
type Out = Lens[T, A]
def apply(l: Lens[S, A], r: Lens[T, S]): Lens[T, A] = l compose r
}
implicit def composeLP[S, A, T]: Aux[Lens[S, A], Prism[T, S], Prism[T, A]] =
new OpticComposer[Lens[S, A], Prism[T, S]] {
type Out = Prism[T, A]
def apply(l: Lens[S, A], r: Prism[T, S]): Prism[T, A] = l compose r
}
implicit def composePL[S, A, T]: Aux[Prism[S, A], Lens[T, S], Prism[T, A]] =
new OpticComposer[Prism[S, A], Lens[T, S]] {
type Out = Prism[T, A]
def apply(l: Prism[S, A], r: Lens[T, S]): Prism[T, A] = l compose r
}
implicit def composePP[S, A, T]: Aux[Prism[S, A], Prism[T, S], Prism[T, A]] =
new OpticComposer[Prism[S, A], Prism[T, S]] {
type Out = Prism[T, A]
def apply(l: Prism[S, A], r: Prism[T, S]): Prism[T, A] = l compose r
}
}
trait MkFieldLens[A, K] extends Serializable {
type Elem
def apply(): Lens[A, Elem]
}
object MkFieldLens {
type Aux[A, K, Elem0] = MkFieldLens[A, K] { type Elem = Elem0 }
implicit def mkFieldLens[A, K, R <: HList, B]
(implicit
mkGen: MkLabelledGenericLens.Aux[A, R],
mkLens: MkRecordSelectLens[R, K]): Aux[A, K, mkLens.Elem] =
new MkFieldLens[A, K] {
type Elem = mkLens.Elem
def apply(): Lens[A, mkLens.Elem] = mkLens() compose mkGen()
}
}
trait MkNthFieldLens[A, N <: Nat] extends Serializable {
type Elem
def apply(): Lens[A, Elem]
}
object MkNthFieldLens {
type Aux[A, N <: Nat, Elem0] = MkNthFieldLens[A, N] { type Elem = Elem0 }
implicit def mkGenPNth[A, N <: Nat, R <: HList, B]
(implicit
mkGen: MkGenericLens.Aux[A, R],
mkLens: MkHListNthLens[R, N]): Aux[A, N, mkLens.Elem] =
new MkNthFieldLens[A, N] {
type Elem = mkLens.Elem
def apply(): Lens[A, mkLens.Elem] = mkLens() compose mkGen()
}
}
trait MkCtorPrism[A, B] extends Serializable {
def apply(): Prism[A, B]
}
object MkCtorPrism {
implicit def mkCtorPrism[A, R <: Coproduct, B]
(implicit
mkGen: MkGenericLens.Aux[A, R],
mkPrism: MkCoproductSelectPrism[R, B]): MkCtorPrism[A, B] =
new MkCtorPrism[A, B] {
def apply(): Prism[A, B] = mkPrism() compose mkGen()
}
}
trait InferProduct[C <: Coproduct, K] extends Serializable {
type Prod
}
object InferProduct {
type Aux[C <: Coproduct, K, P] = InferProduct[C, K] { type Prod = P }
implicit def inferProduct1[P, R <: HList, T <: Coproduct, K]
(implicit gen: LabelledGeneric.Aux[P, R], sel: RSelector[R, K]): Aux[P :+: T, K, P] =
new InferProduct[P :+: T, K] {
type Prod = P
}
implicit def inferProduct2[H, T <: Coproduct, K, P](implicit it: Aux[T, K, P]): Aux[H :+: T, K, P] =
new InferProduct[H :+: T, K] {
type Prod = P
}
}
trait MkSelectDynamicOptic[R, A, K, B] extends Serializable {
type Out
def apply(r: R): Out
}
trait LowPriorityMkSelectDynamicOptic {
type Aux[R, A, K, B, Out0] = MkSelectDynamicOptic[R, A, K, B] { type Out = Out0 }
implicit def mkInferCtorSelField[R, A, C <: Coproduct, I, K, E]
(implicit
gen: Generic.Aux[A, C],
infer: InferProduct.Aux[C, K, I],
mkCSel: MkCtorPrism[A, I],
mkPSel: MkFieldLens.Aux[I, K, E],
compose: OpticComposer[Prism[A, E], R]
): Aux[R, A, K, Nothing, compose.Out] =
new MkSelectDynamicOptic[R, A, K, Nothing] {
type Out = compose.Out
def apply(r: R): Out = compose(mkPSel() compose mkCSel(), r)
}
implicit def mkSelFieldCtor[R, A, K, B, C]
(implicit
mkPSel: MkFieldLens.Aux[A, K, C],
mkCSel: MkCtorPrism[C, B],
compose: OpticComposer[Prism[A, B], R]
): Aux[R, A, K, B, compose.Out] =
new MkSelectDynamicOptic[R, A, K, B] {
type Out = compose.Out
def apply(r: R): Out = compose(mkCSel() compose mkPSel(), r)
}
}
object MkSelectDynamicOptic extends LowPriorityMkSelectDynamicOptic {
implicit def mkSelField[R, A, K, E]
(implicit
mkLens: MkFieldLens.Aux[A, K, E],
compose: OpticComposer[Lens[A, E], R]
): Aux[R, A, K, Nothing, compose.Out] =
new MkSelectDynamicOptic[R, A, K, Nothing] {
type Out = compose.Out
def apply(r: R): Out = compose(mkLens(), r)
}
implicit def mkSelCtor[R, A, B]
(implicit
mkPrism: MkCtorPrism[A, B],
compose: OpticComposer[Prism[A, B], R]
): Aux[R, A, Nothing, B, compose.Out] =
new MkSelectDynamicOptic[R, A, Nothing, B] {
type Out = compose.Out
def apply(r: R): Out = compose(mkPrism(), r)
}
}
trait MkGenericLens[T] extends Serializable {
type Repr
def apply(): Lens[T, Repr]
}
object MkGenericLens {
type Aux[T, Repr0] = MkGenericLens[T] { type Repr = Repr0 }
implicit def mkGenericLens[T](implicit gen: Generic[T]): Aux[T, gen.Repr] =
new MkGenericLens[T] {
type Repr = gen.Repr
def apply(): Lens[T, Repr] =
new Lens[T, Repr] {
def get(t: T): Repr = gen.to(t)
def set(t: T)(r: Repr): T = gen.from(r)
}
}
}
trait MkLabelledGenericLens[T] extends Serializable {
type Repr
def apply(): Lens[T, Repr]
}
object MkLabelledGenericLens {
type Aux[T, Repr0] = MkLabelledGenericLens[T] { type Repr = Repr0 }
implicit def mkLabelledGenericLens[T](implicit gen: LabelledGeneric[T]): Aux[T, gen.Repr] =
new MkLabelledGenericLens[T] {
type Repr = gen.Repr
def apply(): Lens[T, Repr] =
new Lens[T, Repr] {
def get(t: T): Repr = gen.to(t)
def set(t: T)(r: Repr): T = gen.from(r)
}
}
}
trait MkHListNthLens[L <: HList, N <: Nat] extends Serializable {
type Elem
def apply(): Lens[L, Elem]
}
object MkHListNthLens {
type Aux[L <: HList, N <: Nat, Elem0] = MkHListNthLens[L, N] { type Elem = Elem0 }
implicit def mkHListNthLens[L <: HList, N <: Nat, E]
(implicit atx: At.Aux[L, N, E], replace: ReplaceAt.Aux[L, N, E, (E, L)]): Aux[L, N, E] =
new MkHListNthLens[L, N] {
type Elem = E
def apply(): Lens[L, E] =
new Lens[L, E] {
def get(l: L): E = l[N]
def set(l: L)(e: E): L = l.updatedAt[N](e)
}
}
}
trait MkHListSelectLens[L <: HList, U] extends Serializable {
def apply(): Lens[L, U]
}
object MkHListSelectLens {
implicit def mKHlistSelectLens[L <: HList, U]
(implicit selector: Selector[L, U], replacer: Replacer.Aux[L, U, U, (U, L)]): MkHListSelectLens[L, U] =
new MkHListSelectLens[L, U] {
def apply(): Lens[L, U] =
new Lens[L, U] {
def get(l: L) = selector(l)
def set(l: L)(u: U) = replacer(l, u)._2
}
}
}
trait MkCoproductSelectPrism[C <: Coproduct, T] extends Serializable {
def apply(): Prism[C, T]
}
object MkCoproductSelectPrism {
implicit def mKCoproductSelectPrism[C <: Coproduct, T]
(implicit selector: CSelector[C, T], injector: Inject[C, T]): MkCoproductSelectPrism[C, T] =
new MkCoproductSelectPrism[C, T] {
def apply(): Prism[C, T] =
new Prism[C, T] {
def get(c: C): Option[T] = selector(c)
def set(c: C)(t: T): C = injector(t)
}
}
}
trait MkRecordSelectLens[R <: HList, K] extends Serializable {
type Elem
def apply(): Lens[R, Elem]
}
object MkRecordSelectLens {
type Aux[R <: HList, K, Elem0] = MkRecordSelectLens[R, K] { type Elem = Elem0 }
implicit def mkRecordSelectLens[R <: HList, K, E]
(implicit selector: RSelector.Aux[R, K, E], updater: Updater.Aux[R, FieldType[K, E], R]): Aux[R, K, E] =
new MkRecordSelectLens[R, K] {
type Elem = E
def apply(): Lens[R, E] =
new Lens[R, E] {
def get(r: R) = selector(r)
def set(r: R)(e: E) = updater(r, field[K](e))
}
}
}
trait MkPathOptic[S, P <: HList] extends Serializable {
type Out
type Elem
def apply(): Out
}
trait LowPriorityMkPathOptic {
type Aux[S, P <: HList, Out0, E0] = MkPathOptic[S, P] { type Out = Out0 ; type Elem = E0 }
type Aux1[S, P <: HList, Out0] = MkPathOptic[S, P] { type Out = Out0 }
implicit def mkCoselSelPathOptic[S, P <: HList, K, A, C <: Coproduct, I, E, R]
(implicit
mkPrefix: Aux[S, P, R, A],
gen: Generic.Aux[A, C],
infer: InferProduct.Aux[C, K, I],
mkPrism: MkCtorPrism[A, I],
mkLens: MkFieldLens.Aux[I, K, E],
compose: OpticComposer[Prism[A, E], R]
): Aux[S, Select[K] :: P, compose.Out, E] =
new MkPathOptic[S, Select[K] :: P] {
type Out = compose.Out
type Elem = E
def apply(): compose.Out = compose(mkLens() compose mkPrism(), mkPrefix())
}
}
object MkPathOptic extends LowPriorityMkPathOptic {
implicit def mkHNilPathLens[S]: Aux[S, HNil, Lens[S, S], S] =
new MkPathOptic[S, HNil] {
type Out = Lens[S, S]
type Elem = S
def apply(): Lens[S, S] = lens[S]
}
implicit def mkSelPathOptic[S, P <: HList, K, A, E, R]
(implicit
mkPrefix: Aux[S, P, R, A],
mkLens: MkFieldLens.Aux[A, K, E],
compose: OpticComposer[Lens[A, E], R]
): Aux[S, Select[K] :: P, compose.Out, E] =
new MkPathOptic[S, Select[K] :: P] {
type Out = compose.Out
type Elem = E
def apply(): compose.Out = compose(mkLens(), mkPrefix())
}
implicit def mkCoselPathOptic[S, P <: HList, B, A, R]
(implicit
mkPrefix: Aux[S, P, R, A],
mkPrism: MkCtorPrism[A, B],
compose: OpticComposer[Prism[A, B], R]
): Aux[S, Coselect[B] :: P, compose.Out, B] =
new MkPathOptic[S, Coselect[B] :: P] {
type Out = compose.Out
type Elem = B
def apply(): compose.Out = compose(mkPrism(), mkPrefix())
}
}
trait Select[T]
trait Coselect[T]
trait Segment[P, S, T <: HList] {
type Out <: HList
}
trait LowPrioritySegment {
type Aux[P, S, T <: HList, Out0 <: HList] = Segment[P, S, T] { type Out = Out0 }
implicit def two[P, S, T <: HList]: Aux[P, S, T, Coselect[S] :: Select[P] :: T] = new Segment[P, S, T] {
type Out = Coselect[S] :: Select[P] :: T
}
}
object Segment extends LowPrioritySegment {
implicit def one[P, T <: HList]: Aux[P, Nothing, T, Select[P] :: T] = new Segment[P, Nothing, T] {
type Out = Select[P] :: T
}
}
trait Path[T <: HList] extends LPPath[T] {
type P = Path[T]
type L = T
type Lens[T, E] = MkPathOptic.Aux1[T, L, shapeless.Lens[T, E]]
type Prism[T, E] = MkPathOptic.Aux1[T, L, shapeless.Prism[T, E]]
def apply[H]: Path[Coselect[H] :: T] = new Path[Coselect[H] :: T] {}
def selectDynamic(h: String)(implicit segment: Segment[h.type, Nothing, T]): Path[segment.Out] =
new Path[segment.Out] {}
}
trait LPPath[T <: HList] extends Dynamic { self: Path[T] =>
def selectDynamic[H](h: String)(implicit segment: Segment[h.type, H, T], dummy: DummyImplicit): Path[segment.Out] =
new Path[segment.Out] {}
}
object Path extends Path[HNil]
| isaka/shapeless | core/src/main/scala/shapeless/lenses.scala | Scala | apache-2.0 | 18,003 |
/*
* Copyright 2015 Dmitriy Yefremov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.yefremov.sleipnir.generator.types
import com.linkedin.data.schema.DataSchema
import com.linkedin.data.schema.DataSchema.Type
import net.yefremov.sleipnir.generator.GeneratedClass
/**
* This trait should be used for all type generators that do not actually generate any code, but just return references
* to existing predefined classes.
* @author Dmitriy Yefremov
*/
trait PredefinedTypeGenerator {
self: TypeGenerator =>
def typeNames: Map[Type, TypeName]
def typeSchema: DataSchema
override def referencedGenerators: Seq[TypeGenerator] = Seq.empty
override def generateClass: Option[GeneratedClass] = None
override def name: TypeName = typeNames(typeSchema.getDereferencedDataSchema.getType)
}
| dmitriy-yefremov/sleipnir | generator/src/main/scala/net/yefremov/sleipnir/generator/types/PredefinedTypeGenerator.scala | Scala | apache-2.0 | 1,363 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.heron.streamlet.scala
import java.io.Serializable
import org.apache.heron.streamlet.Context
/**
* All user supplied transformation functions have to be serializable.
* Thus all Streamlet transformation definitions take Serializable
* Functions as their input. We simply decorate java.util. function
* definitions with a Serializable tag to ensure that any supplied
* lambda functions automatically become serializable.
*/
trait SerializableTransformer[I, O] extends Serializable {
def setup(context: Context): Unit
def transform(i: I, f: O => Unit): Unit
def cleanup(): Unit
}
| twitter/heron | heron/api/src/scala/org/apache/heron/streamlet/scala/SerializableTransformer.scala | Scala | apache-2.0 | 1,428 |
object Test {
case class Tuple2K[H[_], T[_], X](h: H[X], t: T[X])
trait TC[A]
implicit def case1[F[_]](implicit t: => TC[F[Any]]): TC[Tuple2K[[_] =>> Any, F, Any]] = ???
implicit def case2[A, F[_]](implicit r: TC[F[Any]]): TC[A] = ???
implicitly[TC[Int]] // error
}
object Test2 {
trait TC[A]
implicit def case1[F[_]](implicit t: => TC[F[Any]]): TC[String] = ???
implicit def case2[G[_]](implicit r: TC[G[Any]]): TC[Int] = ???
implicitly[TC[Int]] // error
}
| som-snytt/dotty | tests/neg/i3452.scala | Scala | apache-2.0 | 481 |
/*
* Copyright 2011 javaBin
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package no.java.submitit.app.pages
import no.java.submitit.common.LoggHandling
import no.java.submitit.app.State
import org.apache.wicket.markup.html.basic.Label
class EmsIdPage extends LayoutPage with LoggHandling {
val id = getRequest.getParameter("id")
val p = State().backendClient.loadPresentation(id)
if(p.isDefined) {
setResponsePage(new ReviewPage(p.get, true, true))
}
else {
contentBorder.add(new Label("identified", "Could not find presentation with ems-id: " + id))
}
} | javaBin/submitit | submitit-webapp/src/main/scala/no/java/submitit/app/pages/EmsIdPage.scala | Scala | mit | 1,115 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.examples.ml
// scalastyle:off println
// $example on$
import org.apache.spark.ml.clustering.KMeans
import org.apache.spark.ml.evaluation.ClusteringEvaluator
// $example off$
import org.apache.spark.sql.SparkSession
/**
* An example demonstrating k-means clustering.
* Run with
* {{{
* bin/run-example ml.KMeansExample
* }}}
*/
object KMeansExample {
def main(args: Array[String]): Unit = {
val spark = SparkSession
.builder
.appName(s"${this.getClass.getSimpleName}")
.getOrCreate()
// $example on$
// Loads data.
val dataset = spark.read.format("libsvm").load("data/mllib/sample_kmeans_data.txt")
// Trains a k-means model.
val kmeans = new KMeans().setK(2).setSeed(1L)
val model = kmeans.fit(dataset)
// Make predictions
val predictions = model.transform(dataset)
// Evaluate clustering by computing Silhouette score
val evaluator = new ClusteringEvaluator()
val silhouette = evaluator.evaluate(predictions)
println(s"Silhouette with squared euclidean distance = $silhouette")
// Shows the result.
println("Cluster Centers: ")
model.clusterCenters.foreach(println)
// $example off$
spark.stop()
}
}
// scalastyle:on println
| lhfei/spark-in-action | spark-3.x/src/main/scala/org/apache/spark/examples/ml/KMeansExample.scala | Scala | apache-2.0 | 2,064 |
sealed trait ListF[+E, +A]
case object NilF extends ListF[Nothing, Nothing]
case class ConsF[E, A](h: E, t: A) extends ListF[E, A] | hmemcpy/milewski-ctfp-pdf | src/content/3.8/code/scala/snippet19.scala | Scala | gpl-3.0 | 130 |
package observatory
import com.sksamuel.scrimage.Image
/**
* 5th milestone: value-added information visualization
*/
object Visualization2 {
/**
* @param x X coordinate between 0 and 1
* @param y Y coordinate between 0 and 1
* @param d00 Top-left value
* @param d01 Bottom-left value
* @param d10 Top-right value
* @param d11 Bottom-right value
* @return A guess of the value at (x, y) based on the four known values, using bilinear interpolation
* See https://en.wikipedia.org/wiki/Bilinear_interpolation#Unit_Square
*/
def bilinearInterpolation(
x: Double,
y: Double,
d00: Double,
d01: Double,
d10: Double,
d11: Double
): Double = {
UnitSquare.interpolate(UnitSquare(x, y, d00, d01, d10, d11))
}
/**
* @param grid Grid to visualize
* @param colors Color scale to use
* @param zoom Zoom level of the tile to visualize
* @param x X value of the tile to visualize
* @param y Y value of the tile to visualize
* @return The image of the tile at (x, y, zoom) showing the grid using the given color scale
*/
def visualizeGrid(
grid: (Int, Int) => Double,
colors: Iterable[(Double, Color)],
zoom: Int,
x: Int,
y: Int
): Image = {
import observatory.utils.TileVisualizer.tile
val temperature: Location => Double = (location) => {
val square = UnitSquare.calculate(location, grid)
UnitSquare.interpolate(square)
}
tile(temperature, colors, zoom, x, y)
}
}
case class UnitSquare(x: Double, y: Double, d00: Double, d01: Double, d10: Double, d11: Double)
object UnitSquare {
def interpolate(data: UnitSquare): Double = {
data.d00 * (1 - data.x) * (1 - data.y) + data.d10 * data.x * (1 - data.y) + data.d01 * (1 - data.x) * data.y + data.d11 * data.x * data.y
}
def calculate(location: Location, grid: (Int, Int) => Double): UnitSquare = {
import math._
val topLeftLatitude = ceil(location.lat).toInt
val topLeftLongitude = ceil(location.lon).toInt
val bottomRightLatitude = floor(location.lat).toInt
val bottomRightLongitude = floor(location.lon).toInt
val longitude = location.lon - bottomRightLongitude
val latitude = location.lat - topLeftLatitude
UnitSquare(longitude, latitude,
grid(topLeftLatitude, topLeftLongitude),
grid(bottomRightLatitude, topLeftLongitude),
grid(topLeftLatitude, bottomRightLongitude),
grid(bottomRightLatitude, bottomRightLongitude)
)
}
}
| masipauskas/coursera-scala | capstone/observatory/src/main/scala/observatory/Visualization2.scala | Scala | unlicense | 2,512 |
package eu.inn.binders.cassandra
import scala.reflect.runtime.universe._
import com.datastax.driver.core.{BoundStatement, PreparedStatement, Session}
import eu.inn.binders.naming.Converter
import org.slf4j.LoggerFactory
class Query[C <: Converter : TypeTag](val session: Session, val preparedStatement: PreparedStatement) {
//type C = C
def this(session: Session, queryString: String) = this(session, QueryImpl.prepareStatement(session, queryString))
def createStatement(): Statement[C] = new Statement[C](session, new BoundStatement(preparedStatement))
}
private [cassandra] object QueryImpl {
protected val logger = LoggerFactory.getLogger(getClass)
def prepareStatement(session: Session, queryString: String): PreparedStatement = {
if (logger.isTraceEnabled) {
logger.trace(s"Preparing statement: $queryString")
}
session.prepare(queryString)
}
} | InnovaCo/binders-cassandra | src/main/scala/eu/inn/binders/cassandra/Query.scala | Scala | bsd-3-clause | 887 |
package filodb.core.memstore
import java.util.concurrent.TimeUnit
import java.util.concurrent.locks.StampedLock
import scala.collection.mutable.ArrayBuffer
import scala.concurrent.{ExecutionContext, Future}
import scala.concurrent.duration._
import scala.util.{Random, Try}
import bloomfilter.CanGenerateHashFrom
import bloomfilter.mutable.BloomFilter
import com.googlecode.javaewah.{EWAHCompressedBitmap, IntIterator}
import com.typesafe.scalalogging.StrictLogging
import debox.{Buffer, Map => DMap}
import kamon.Kamon
import kamon.metric.MeasurementUnit
import kamon.tag.TagSet
import monix.eval.Task
import monix.execution.{Scheduler, UncaughtExceptionReporter}
import monix.execution.atomic.AtomicBoolean
import monix.reactive.Observable
import org.jctools.maps.NonBlockingHashMapLong
import spire.syntax.cfor._
import filodb.core.{ErrorResponse, _}
import filodb.core.binaryrecord2._
import filodb.core.downsample.{DownsampleConfig, DownsamplePublisher, ShardDownsampler}
import filodb.core.metadata.{Schema, Schemas}
import filodb.core.query.{ColumnFilter, QuerySession}
import filodb.core.store._
import filodb.memory._
import filodb.memory.format.{UnsafeUtils, ZeroCopyUTF8String}
import filodb.memory.format.BinaryVector.BinaryVectorPtr
import filodb.memory.format.ZeroCopyUTF8String._
class TimeSeriesShardStats(dataset: DatasetRef, shardNum: Int) {
val tags = Map("shard" -> shardNum.toString, "dataset" -> dataset.toString)
val rowsIngested = Kamon.counter("memstore-rows-ingested").withTags(TagSet.from(tags))
val partitionsCreated = Kamon.counter("memstore-partitions-created").withTags(TagSet.from(tags))
val dataDropped = Kamon.counter("memstore-data-dropped").withTags(TagSet.from(tags))
val unknownSchemaDropped = Kamon.counter("memstore-unknown-schema-dropped").withTags(TagSet.from(tags))
val oldContainers = Kamon.counter("memstore-incompatible-containers").withTags(TagSet.from(tags))
val offsetsNotRecovered = Kamon.counter("memstore-offsets-not-recovered").withTags(TagSet.from(tags))
val outOfOrderDropped = Kamon.counter("memstore-out-of-order-samples").withTags(TagSet.from(tags))
val rowsSkipped = Kamon.counter("recovery-row-skipped").withTags(TagSet.from(tags))
val rowsPerContainer = Kamon.histogram("num-samples-per-container").withoutTags()
val numSamplesEncoded = Kamon.counter("memstore-samples-encoded").withTags(TagSet.from(tags))
val encodedBytes = Kamon.counter("memstore-encoded-bytes-allocated", MeasurementUnit.information.bytes)
.withTags(TagSet.from(tags))
val encodedHistBytes = Kamon.counter("memstore-hist-encoded-bytes", MeasurementUnit.information.bytes)
.withTags(TagSet.from(tags))
val flushesSuccessful = Kamon.counter("memstore-flushes-success").withTags(TagSet.from(tags))
val flushesFailedPartWrite = Kamon.counter("memstore-flushes-failed-partition").withTags(TagSet.from(tags))
val flushesFailedChunkWrite = Kamon.counter("memstore-flushes-failed-chunk").withTags(TagSet.from(tags))
val flushesFailedOther = Kamon.counter("memstore-flushes-failed-other").withTags(TagSet.from(tags))
val numDirtyPartKeysFlushed = Kamon.counter("memstore-index-num-dirty-keys-flushed").withTags(TagSet.from(tags))
val indexRecoveryNumRecordsProcessed = Kamon.counter("memstore-index-recovery-partkeys-processed").
withTags(TagSet.from(tags))
val downsampleRecordsCreated = Kamon.counter("memstore-downsample-records-created").withTags(TagSet.from(tags))
/**
* These gauges are intended to be combined with one of the latest offset of Kafka partitions so we can produce
* stats on message lag:
* kafka_ingestion_lag = kafka_latest_offset - offsetLatestInMem
* memstore_ingested_to_persisted_lag = offsetLatestInMem - offsetLatestFlushed
* etc.
*
* NOTE: only positive offsets will be recorded. Kafka does not give negative offsets, but Kamon cannot record
* negative numbers either.
* The "latest" vs "earliest" flushed reflects that there are really n offsets, one per flush group.
*/
val offsetLatestInMem = Kamon.gauge("shard-offset-latest-inmemory").withTags(TagSet.from(tags))
val offsetLatestFlushed = Kamon.gauge("shard-offset-flushed-latest").withTags(TagSet.from(tags))
val offsetEarliestFlushed = Kamon.gauge("shard-offset-flushed-earliest").withTags(TagSet.from(tags))
val numPartitions = Kamon.gauge("num-partitions").withTags(TagSet.from(tags))
val numActivelyIngestingParts = Kamon.gauge("num-ingesting-partitions").withTags(TagSet.from(tags))
val numChunksPagedIn = Kamon.counter("chunks-paged-in").withTags(TagSet.from(tags))
val partitionsPagedFromColStore = Kamon.counter("memstore-partitions-paged-in").withTags(TagSet.from(tags))
val partitionsQueried = Kamon.counter("memstore-partitions-queried").withTags(TagSet.from(tags))
val purgedPartitions = Kamon.counter("memstore-partitions-purged").withTags(TagSet.from(tags))
val partitionsRestored = Kamon.counter("memstore-partitions-paged-restored").withTags(TagSet.from(tags))
val chunkIdsEvicted = Kamon.counter("memstore-chunkids-evicted").withTags(TagSet.from(tags))
val partitionsEvicted = Kamon.counter("memstore-partitions-evicted").withTags(TagSet.from(tags))
val queryTimeRangeMins = Kamon.histogram("query-time-range-minutes").withTags(TagSet.from(tags))
val memoryStats = new MemoryStats(tags)
val bufferPoolSize = Kamon.gauge("memstore-writebuffer-pool-size").withTags(TagSet.from(tags))
val indexEntries = Kamon.gauge("memstore-index-entries").withTags(TagSet.from(tags))
val indexBytes = Kamon.gauge("memstore-index-ram-bytes").withTags(TagSet.from(tags))
val evictedPartKeyBloomFilterQueries = Kamon.counter("evicted-pk-bloom-filter-queries").withTags(TagSet.from(tags))
val evictedPartKeyBloomFilterFalsePositives = Kamon.counter("evicted-pk-bloom-filter-fp").withTags(TagSet.from(tags))
val evictedPkBloomFilterSize = Kamon.gauge("evicted-pk-bloom-filter-approx-size").withTags(TagSet.from(tags))
val evictedPartIdLookupMultiMatch = Kamon.counter("evicted-partId-lookup-multi-match").withTags(TagSet.from(tags))
/**
* Difference between the local clock and the received ingestion timestamps, in milliseconds.
* If this gauge is negative, then the received timestamps are ahead, and it will stay this
* way for a bit, due to the monotonic adjustment. When the gauge value is positive (which is
* expected), then the delay reflects the delay between the generation of the samples and
* receiving them, assuming that the clocks are in sync.
*/
val ingestionClockDelay = Kamon.gauge("ingestion-clock-delay").withTags(TagSet.from(tags))
}
object TimeSeriesShard {
/**
* Writes metadata for TSPartition where every vector is written
*/
def writeMeta(addr: Long, partitionID: Int, info: ChunkSetInfo, vectors: Array[BinaryVectorPtr]): Unit = {
UnsafeUtils.setInt(UnsafeUtils.ZeroPointer, addr, partitionID)
ChunkSetInfo.copy(info, addr + 4)
cforRange { 0 until vectors.size } { i =>
ChunkSetInfo.setVectorPtr(addr + 4, i, vectors(i))
}
}
/**
* Copies serialized ChunkSetInfo bytes from persistent storage / on-demand paging.
*/
def writeMeta(addr: Long, partitionID: Int, bytes: Array[Byte], vectors: Array[BinaryVectorPtr]): Unit = {
UnsafeUtils.setInt(UnsafeUtils.ZeroPointer, addr, partitionID)
ChunkSetInfo.copy(bytes, addr + 4)
cforRange { 0 until vectors.size } { i =>
ChunkSetInfo.setVectorPtr(addr + 4, i, vectors(i))
}
}
/**
* Copies serialized ChunkSetInfo bytes from persistent storage / on-demand paging.
*/
def writeMetaWithoutPartId(addr: Long, bytes: Array[Byte], vectors: Array[BinaryVectorPtr]): Unit = {
ChunkSetInfo.copy(bytes, addr)
cforRange { 0 until vectors.size } { i =>
ChunkSetInfo.setVectorPtr(addr, i, vectors(i))
}
}
// Initial size of partitionSet and partition map structures. Make large enough to avoid too many resizes.
val InitialNumPartitions = 128 * 1024
// Not a real partition, just a special marker for "out of memory"
val OutOfMemPartition = UnsafeUtils.ZeroPointer.asInstanceOf[TimeSeriesPartition]
val EmptyBitmap = new EWAHCompressedBitmap()
/**
* Calculates the flush group of an ingest record or partition key. Be sure to use the right RecordSchema -
* dataset.ingestionSchema or dataset.partKeySchema.l
*/
def partKeyGroup(schema: RecordSchema, partKeyBase: Any, partKeyOffset: Long, numGroups: Int): Int = {
Math.abs(schema.partitionHash(partKeyBase, partKeyOffset) % numGroups)
}
private[memstore] final val CREATE_NEW_PARTID = -1
}
private[core] final case class PartKey(base: Any, offset: Long)
private[core] final case class PartKeyWithTimes(base: Any, offset: Long, startTime: Long, endTime: Long)
trait PartitionIterator extends Iterator[TimeSeriesPartition] {
def skippedPartIDs: Buffer[Int]
}
object PartitionIterator {
def fromPartIt(baseIt: Iterator[TimeSeriesPartition]): PartitionIterator = new PartitionIterator {
val skippedPartIDs = Buffer.empty[Int]
final def hasNext: Boolean = baseIt.hasNext
final def next: TimeSeriesPartition = baseIt.next
}
}
/**
* TSPartition lookup from filters result, usually step 1 of querying.
*
* @param partsInMemory iterates through the in-Memory partitions, some of which may not need ODP.
* Caller needs to filter further
* @param firstSchemaId if defined, the first Schema ID found. If not defined, probably there's no data.
* @param partIdsMemTimeGap contains partIDs in memory but with potential time gaps in data. Their
* startTimes from Lucene are mapped from the ID.
* @param partIdsNotInMemory is a collection of partIds fully not in memory
*/
case class PartLookupResult(shard: Int,
chunkMethod: ChunkScanMethod,
partsInMemory: debox.Buffer[Int],
firstSchemaId: Option[Int] = None,
partIdsMemTimeGap: debox.Map[Int, Long] = debox.Map.empty,
partIdsNotInMemory: debox.Buffer[Int] = debox.Buffer.empty,
pkRecords: Seq[PartKeyLuceneIndexRecord] = Seq.empty)
final case class SchemaMismatch(expected: String, found: String) extends
Exception(s"Multiple schemas found, please filter. Expected schema $expected, found schema $found")
object SchemaMismatch {
def apply(expected: Schema, found: Schema): SchemaMismatch = SchemaMismatch(expected.name, found.name)
}
// scalastyle:off number.of.methods
// scalastyle:off file.size.limit
/**
* Contains all of the data for a SINGLE shard of a time series oriented dataset.
*
* Each partition has an integer ID which is used for bitmap indexing using PartitionKeyIndex.
* Within a shard, the partitions are grouped into a fixed number of groups to facilitate persistence and recovery:
* - groups spread out the persistence/flushing load across time
* - having smaller batches of flushes shortens the window of recovery and enables skipping of records/less CPU
*
* Each incoming time series is hashed into a group. Each group has its own watermark. The watermark indicates,
* for that group, up to what offset incoming records for that group has been persisted. At recovery time, records
* that fall below the watermark for that group will be skipped (since they can be recovered from disk).
*
* @param bufferMemoryManager Unencoded/unoptimized ingested data is stored in buffers that are allocated from this
* memory pool. This pool is also used to store partition keys.
* @param storeConfig the store portion of the sourceconfig, not the global FiloDB application config
* @param downsampleConfig configuration for downsample operations
* @param downsamplePublisher is shared among all shards of the dataset on the node
*/
class TimeSeriesShard(val ref: DatasetRef,
val schemas: Schemas,
val storeConfig: StoreConfig,
val shardNum: Int,
val bufferMemoryManager: MemFactory,
colStore: ColumnStore,
metastore: MetaStore,
evictionPolicy: PartitionEvictionPolicy,
downsampleConfig: DownsampleConfig,
downsamplePublisher: DownsamplePublisher)
(implicit val ioPool: ExecutionContext) extends StrictLogging {
import collection.JavaConverters._
import FiloSchedulers._
import TimeSeriesShard._
val shardStats = new TimeSeriesShardStats(ref, shardNum)
/**
* Map of all partitions in the shard stored in memory, indexed by partition ID
*/
private[memstore] val partitions = new NonBlockingHashMapLong[TimeSeriesPartition](InitialNumPartitions, false)
/**
* next partition ID number
*/
private var nextPartitionID = 0
/**
* This index helps identify which partitions have any given column-value.
* Used to answer queries not involving the full partition key.
* Maintained using a high-performance bitmap index.
*/
private[memstore] final val partKeyIndex = new PartKeyLuceneIndex(ref, schemas.part, shardNum,
storeConfig.demandPagedRetentionPeriod)
/**
* Keeps track of count of rows ingested into memstore, not necessarily flushed.
* This is generally used to report status and metrics.
*/
private final var ingested = 0L
/**
* Keeps track of last offset ingested into memory (not necessarily flushed).
* This value is used to keep track of the checkpoint to be written for next flush for any group.
*/
private final var _offset = Long.MinValue
/**
* The maximum blockMetaSize amongst all the schemas this Dataset could ingest
*/
val maxMetaSize = schemas.schemas.values.map(_.data.blockMetaSize).max
require (storeConfig.maxChunkTime > storeConfig.flushInterval, "MaxChunkTime should be greater than FlushInterval")
val maxChunkTime = storeConfig.maxChunkTime.toMillis
// Called to remove chunks from ChunkMap of a given partition, when an offheap block is reclaimed
private val reclaimListener = new ReclaimListener {
def onReclaim(metaAddr: Long, numBytes: Int): Unit = {
val partID = UnsafeUtils.getInt(metaAddr)
val partition = partitions.get(partID)
if (partition != UnsafeUtils.ZeroPointer) {
// The number of bytes passed in is the metadata size which depends on schema. It should match the
// TSPartition's blockMetaSize; if it doesn't that is a flag for possible corruption, and we should halt
// the process to be safe and log details for further debugging.
val chunkID = UnsafeUtils.getLong(metaAddr + 4)
if (numBytes != partition.schema.data.blockMetaSize) {
logger.error(f"POSSIBLE CORRUPTION DURING onReclaim(metaAddr=0x$metaAddr%08x, numBytes=$numBytes)" +
s"Expected meta size: ${partition.schema.data.blockMetaSize} for schema=${partition.schema}" +
s" Reclaiming chunk chunkID=$chunkID from shard=$shardNum " +
s"partID=$partID ${partition.stringPartition}")
logger.warn("Halting FiloDB...")
sys.exit(33) // Special onReclaim corruption exit code
}
partition.removeChunksAt(chunkID)
logger.debug(s"Reclaiming chunk chunkID=$chunkID from shard=$shardNum " +
s"partID=$partID ${partition.stringPartition}")
}
}
}
// Create a single-threaded scheduler just for ingestion. Name the thread for ease of debugging
// NOTE: to control intermixing of different Observables/Tasks in this thread, customize ExecutionModel param
val ingestSched = Scheduler.singleThread(s"$IngestSchedName-$ref-$shardNum",
reporter = UncaughtExceptionReporter(logger.error("Uncaught Exception in TimeSeriesShard.ingestSched", _)))
private val blockMemorySize = storeConfig.shardMemSize
protected val numGroups = storeConfig.groupsPerShard
private val chunkRetentionHours = (storeConfig.demandPagedRetentionPeriod.toSeconds / 3600).toInt
val pagingEnabled = storeConfig.demandPagingEnabled
/**
* PartitionSet - access TSPartition using ingest record partition key in O(1) time.
*/
private[memstore] final val partSet = PartitionSet.ofSize(InitialNumPartitions)
// Use a StampedLock because it supports optimistic read locking. This means that no blocking
// occurs in the common case, when there isn't any contention reading from partSet.
private[memstore] final val partSetLock = new StampedLock
// The off-heap block store used for encoded chunks
private val shardTags = Map("dataset" -> ref.dataset, "shard" -> shardNum.toString)
private val blockStore = new PageAlignedBlockManager(blockMemorySize, shardStats.memoryStats, reclaimListener,
storeConfig.numPagesPerBlock)
private val blockFactoryPool = new BlockMemFactoryPool(blockStore, maxMetaSize, shardTags)
/**
* Lock that protects chunks from being reclaimed from Memstore.
* This is needed to prevent races between ODP queries and reclaims.
*/
private[memstore] final val reclaimLock = blockStore.reclaimLock
// Requires blockStore.
startHeadroomTask(ingestSched)
// Each shard has a single ingestion stream at a time. This BlockMemFactory is used for buffer overflow encoding
// strictly during ingest() and switchBuffers().
private[core] val overflowBlockFactory = new BlockMemFactory(blockStore, None, maxMetaSize,
shardTags ++ Map("overflow" -> "true"), true)
val partitionMaker = new DemandPagedChunkStore(this, blockStore, chunkRetentionHours)
private val partKeyBuilder = new RecordBuilder(MemFactory.onHeapFactory, reuseOneContainer = true)
private val partKeyArray = partKeyBuilder.allContainers.head.base.asInstanceOf[Array[Byte]]
private[memstore] val bufferPools = {
val pools = schemas.schemas.values.map { sch =>
sch.schemaHash -> new WriteBufferPool(bufferMemoryManager, sch.data, storeConfig)
}
DMap(pools.toSeq: _*)
}
private final val partitionGroups = Array.fill(numGroups)(new EWAHCompressedBitmap)
/**
* Bitmap to track actively ingesting partitions.
* This bitmap is maintained in addition to the ingesting flag per partition.
* TSP.ingesting is MUCH faster than bit.get(i) but we need the bitmap for faster operations
* for all partitions of shard (like ingesting cardinality counting, rollover of time buckets etc).
*/
private[memstore] final val activelyIngesting = debox.Set.empty[Int]
private val numFlushIntervalsDuringRetention = Math.ceil(chunkRetentionHours.hours / storeConfig.flushInterval).toInt
// Use 1/4 of flush intervals within retention period for initial ChunkMap size
private val initInfoMapSize = Math.max((numFlushIntervalsDuringRetention / 4) + 4, 20)
/**
* Timestamp to start searching for partitions to evict. Advances as more and more partitions are evicted.
* Used to ensure we keep searching for newer and newer partitions to evict.
*/
private[core] var evictionWatermark: Long = 0L
/**
* Dirty partitions whose start/end times have not been updated to cassandra.
*
* IMPORTANT. Only modify this var in IngestScheduler
*/
private[memstore] final var dirtyPartitionsForIndexFlush = debox.Buffer.empty[Int]
/**
* This is the group during which this shard will flush dirty part keys. Randomized to
* ensure we dont flush time buckets across shards at same time
*/
private final val dirtyPartKeysFlushGroup = Random.nextInt(numGroups)
logger.info(s"Dirty Part Keys for shard=$shardNum will flush in group $dirtyPartKeysFlushGroup")
/**
* The offset up to and including the last record in this group to be successfully persisted.
* Also used during recovery to figure out what incoming records to skip (since it's persisted)
*/
private final val groupWatermark = Array.fill(numGroups)(Long.MinValue)
/**
* Highest ingestion timestamp observed.
*/
private[memstore] var lastIngestionTime = Long.MinValue
// Flush groups when ingestion time is observed to cross a time boundary (typically an hour),
// plus a group-specific offset. This simplifies disaster recovery -- chunks can be copied
// without concern that they may overlap in time.
private val flushBoundaryMillis = storeConfig.flushInterval.toMillis
// Defines the group-specific flush offset, to distribute the flushes around such they don't
// all flush at the same time. With an hourly boundary and 60 flush groups, flushes are
// scheduled once a minute.
private val flushOffsetMillis = flushBoundaryMillis / numGroups
/**
* Helper for downsampling ingested data for long term retention.
*/
private final val shardDownsamplers = {
val downsamplers = schemas.schemas.values.map { s =>
s.schemaHash -> new ShardDownsampler(ref.dataset, shardNum,
s, s.downsample.getOrElse(s), downsampleConfig.enabled, shardStats)
}
DMap(downsamplers.toSeq: _*)
}
private[memstore] val evictedPartKeys =
BloomFilter[PartKey](storeConfig.evictedPkBfCapacity, falsePositiveRate = 0.01)(new CanGenerateHashFrom[PartKey] {
override def generateHash(from: PartKey): Long = {
schemas.part.binSchema.partitionHash(from.base, from.offset)
}
})
private var evictedPartKeysDisposed = false
private val brRowReader = new MultiSchemaBRRowReader()
/**
* Detailed filtered ingestion record logging. See "trace-filters" StoreConfig setting. Warning: may blow up
* logs, use at your own risk.
*/
val tracedPartFilters = storeConfig.traceFilters
/**
* Iterate TimeSeriesPartition objects relevant to given partIds.
*/
case class InMemPartitionIterator(intIt: IntIterator) extends PartitionIterator {
var nextPart = UnsafeUtils.ZeroPointer.asInstanceOf[TimeSeriesPartition]
val skippedPartIDs = debox.Buffer.empty[Int]
private def findNext(): Unit = {
while (intIt.hasNext && nextPart == UnsafeUtils.ZeroPointer) {
val nextPartID = intIt.next
nextPart = partitions.get(nextPartID)
if (nextPart == UnsafeUtils.ZeroPointer) skippedPartIDs += nextPartID
}
}
findNext()
final def hasNext: Boolean = nextPart != UnsafeUtils.ZeroPointer
final def next: TimeSeriesPartition = {
val toReturn = nextPart
nextPart = UnsafeUtils.ZeroPointer.asInstanceOf[TimeSeriesPartition] // reset so that we can keep going
findNext()
toReturn
}
}
/**
* Iterate TimeSeriesPartition objects relevant to given partIds.
*/
case class InMemPartitionIterator2(partIds: debox.Buffer[Int]) extends PartitionIterator {
var nextPart = UnsafeUtils.ZeroPointer.asInstanceOf[TimeSeriesPartition]
val skippedPartIDs = debox.Buffer.empty[Int]
var nextPartId = -1
findNext()
private def findNext(): Unit = {
while (nextPartId + 1 < partIds.length && nextPart == UnsafeUtils.ZeroPointer) {
nextPartId += 1
nextPart = partitions.get(partIds(nextPartId))
if (nextPart == UnsafeUtils.ZeroPointer) skippedPartIDs += partIds(nextPartId)
}
}
final def hasNext: Boolean = nextPart != UnsafeUtils.ZeroPointer
final def next: TimeSeriesPartition = {
val toReturn = nextPart
nextPart = UnsafeUtils.ZeroPointer.asInstanceOf[TimeSeriesPartition] // reset so that we can keep going
findNext()
toReturn
}
}
// RECOVERY: Check the watermark for the group that this record is part of. If the ingestOffset is < watermark,
// then do not bother with the expensive partition key comparison and ingestion. Just skip it
class IngestConsumer(var ingestionTime: Long = 0,
var numActuallyIngested: Int = 0,
var ingestOffset: Long = -1L) extends BinaryRegionConsumer {
// Receives a new ingestion BinaryRecord
final def onNext(recBase: Any, recOffset: Long): Unit = {
val schemaId = RecordSchema.schemaID(recBase, recOffset)
val schema = schemas(schemaId)
if (schema != Schemas.UnknownSchema) {
val group = partKeyGroup(schema.ingestionSchema, recBase, recOffset, numGroups)
if (ingestOffset < groupWatermark(group)) {
shardStats.rowsSkipped.increment()
try {
// Needed to update index with new partitions added during recovery with correct startTime.
// This is important to do since the group designated for dirty part key persistence can
// lag behind group the partition belongs to. Hence during recovery, we skip
// ingesting the sample, but create the partition and mark it as dirty.
// TODO:
// explore aligning index time buckets with chunks, and we can then
// remove this partition existence check per sample.
val part: FiloPartition = getOrAddPartitionForIngestion(recBase, recOffset, group, schema)
if (part == OutOfMemPartition) { disableAddPartitions() }
} catch {
case e: OutOfOffheapMemoryException => disableAddPartitions()
case e: Exception => logger.error(s"Unexpected ingestion err", e); disableAddPartitions()
}
} else {
getOrAddPartitionAndIngest(ingestionTime, recBase, recOffset, group, schema)
numActuallyIngested += 1
}
} else {
logger.debug(s"Unknown schema ID $schemaId will be ignored during ingestion")
shardStats.unknownSchemaDropped.increment()
}
}
}
private[memstore] val ingestConsumer = new IngestConsumer()
/**
* Ingest new BinaryRecords in a RecordContainer to this shard.
* Skips rows if the offset is below the group watermark for that record's group.
* Adds new partitions if needed.
*/
def ingest(container: RecordContainer, offset: Long): Long = {
assertThreadName(IngestSchedName)
if (container.isCurrentVersion) {
if (!container.isEmpty) {
ingestConsumer.ingestionTime = container.timestamp
ingestConsumer.numActuallyIngested = 0
ingestConsumer.ingestOffset = offset
brRowReader.recordBase = container.base
container.consumeRecords(ingestConsumer)
shardStats.rowsIngested.increment(ingestConsumer.numActuallyIngested)
shardStats.rowsPerContainer.record(ingestConsumer.numActuallyIngested)
ingested += ingestConsumer.numActuallyIngested
_offset = offset
}
} else {
shardStats.oldContainers.increment()
}
_offset
}
def startFlushingIndex(): Unit =
partKeyIndex.startFlushThread(storeConfig.partIndexFlushMinDelaySeconds, storeConfig.partIndexFlushMaxDelaySeconds)
def ingest(data: SomeData): Long = ingest(data.records, data.offset)
def recoverIndex(): Future[Unit] = {
val indexBootstrapper = new IndexBootstrapper(colStore)
indexBootstrapper.bootstrapIndex(partKeyIndex, shardNum, ref)(bootstrapPartKey)
.map { count =>
startFlushingIndex()
logger.info(s"Bootstrapped index for dataset=$ref shard=$shardNum with $count records")
}.runAsync(ingestSched)
}
/**
* Handles actions to be performed for the shard upon bootstrapping
* a partition key from index store
* @param pk partKey
* @return partId assigned to key
*/
// scalastyle:off method.length
private[memstore] def bootstrapPartKey(pk: PartKeyRecord): Int = {
assertThreadName(IngestSchedName)
val partId = if (pk.endTime == Long.MaxValue) {
// this is an actively ingesting partition
val group = partKeyGroup(schemas.part.binSchema, pk.partKey, UnsafeUtils.arayOffset, numGroups)
val schemaId = RecordSchema.schemaID(pk.partKey, UnsafeUtils.arayOffset)
val schema = schemas(schemaId)
if (schema != Schemas.UnknownSchema) {
val part = createNewPartition(pk.partKey, UnsafeUtils.arayOffset, group, CREATE_NEW_PARTID, schema, 4)
// In theory, we should not get an OutOfMemPartition here since
// it should have occurred before node failed too, and with data stopped,
// index would not be updated. But if for some reason we see it, drop data
if (part == OutOfMemPartition) {
logger.error("Could not accommodate partKey while recovering index. " +
"WriteBuffer size may not be configured correctly")
-1
} else {
val stamp = partSetLock.writeLock()
try {
partSet.add(part) // createNewPartition doesn't add part to partSet
part.ingesting = true
part.partID
} finally {
partSetLock.unlockWrite(stamp)
}
}
} else {
logger.info(s"Ignoring part key with unknown schema ID $schemaId")
shardStats.unknownSchemaDropped.increment()
-1
}
} else {
// partition assign a new partId to non-ingesting partition,
// but no need to create a new TSPartition heap object
// instead add the partition to evictedPArtKeys bloom filter so that it can be found if necessary
evictedPartKeys.synchronized {
require(!evictedPartKeysDisposed)
evictedPartKeys.add(PartKey(pk.partKey, UnsafeUtils.arayOffset))
}
createPartitionID()
}
activelyIngesting.synchronized {
if (pk.endTime == Long.MaxValue) activelyIngesting += partId
else activelyIngesting -= partId
}
shardStats.indexRecoveryNumRecordsProcessed.increment()
partId
}
def indexNames(limit: Int): Seq[String] = partKeyIndex.indexNames(limit)
def labelValues(labelName: String, topK: Int): Seq[TermInfo] = partKeyIndex.indexValues(labelName, topK)
/**
* This method is to apply column filters and fetch matching time series partitions.
*
* @param filter column filter
* @param labelNames labels to return in the response
* @param endTime end time
* @param startTime start time
* @param limit series limit
* @return returns an iterator of map of label key value pairs of each matching time series
*/
def labelValuesWithFilters(filter: Seq[ColumnFilter],
labelNames: Seq[String],
endTime: Long,
startTime: Long,
limit: Int): Iterator[Map[ZeroCopyUTF8String, ZeroCopyUTF8String]] = {
LabelValueResultIterator(partKeyIndex.partIdsFromFilters(filter, startTime, endTime), labelNames, limit)
}
/**
* Iterator for lazy traversal of partIdIterator, value for the given label will be extracted from the ParitionKey.
*/
case class LabelValueResultIterator(partIds: debox.Buffer[Int], labelNames: Seq[String], limit: Int)
extends Iterator[Map[ZeroCopyUTF8String, ZeroCopyUTF8String]] {
var currVal: Map[ZeroCopyUTF8String, ZeroCopyUTF8String] = _
var numResultsReturned = 0
var partIndex = 0
override def hasNext: Boolean = {
var foundValue = false
while(partIndex < partIds.length && numResultsReturned < limit && !foundValue) {
val partId = partIds(partIndex)
//retrieve PartKey either from In-memory map or from PartKeyIndex
val nextPart = partKeyFromPartId(partId)
// FIXME This is non-performant and temporary fix for fetching label values based on filter criteria.
// Other strategies needs to be evaluated for making this performant - create facets for predefined fields or
// have a centralized service/store for serving metadata
currVal = schemas.part.binSchema.toStringPairs(nextPart.base, nextPart.offset)
.filter(labelNames contains _._1).map(pair => {
pair._1.utf8 -> pair._2.utf8
}).toMap
foundValue = currVal.nonEmpty
partIndex += 1
}
foundValue
}
override def next(): Map[ZeroCopyUTF8String, ZeroCopyUTF8String] = {
numResultsReturned += 1
currVal
}
}
/**
* This method is to apply column filters and fetch matching time series partition keys.
*/
def partKeysWithFilters(filter: Seq[ColumnFilter],
fetchFirstLastSampleTimes: Boolean,
endTime: Long,
startTime: Long,
limit: Int): Iterator[Map[ZeroCopyUTF8String, ZeroCopyUTF8String]] = {
if (fetchFirstLastSampleTimes) {
partKeyIndex.partKeyRecordsFromFilters(filter, startTime, endTime).iterator.map { pk =>
val partKeyMap = convertPartKeyWithTimesToMap(
PartKeyWithTimes(pk.partKey, UnsafeUtils.arayOffset, pk.startTime, pk.endTime))
partKeyMap ++ Map(
("_firstSampleTime_".utf8, pk.startTime.toString.utf8),
("_lastSampleTime_".utf8, pk.endTime.toString.utf8))
} take(limit)
} else {
val partIds = partKeyIndex.partIdsFromFilters(filter, startTime, endTime)
val inMem = InMemPartitionIterator2(partIds)
val inMemPartKeys = inMem.map { p =>
convertPartKeyWithTimesToMap(PartKeyWithTimes(p.partKeyBase, p.partKeyOffset, -1, -1))}
val skippedPartKeys = inMem.skippedPartIDs.iterator().map(partId => {
convertPartKeyWithTimesToMap(partKeyFromPartId(partId))})
(inMemPartKeys ++ skippedPartKeys).take(limit)
}
}
private def convertPartKeyWithTimesToMap(partKey: PartKeyWithTimes): Map[ZeroCopyUTF8String, ZeroCopyUTF8String] = {
schemas.part.binSchema.toStringPairs(partKey.base, partKey.offset).map(pair => {
pair._1.utf8 -> pair._2.utf8
}).toMap ++
Map("_type_".utf8 -> Schemas.global.schemaName(RecordSchema.schemaID(partKey.base, partKey.offset)).utf8)
}
/**
* retrieve partKey for a given PartId
*/
private def partKeyFromPartId(partId: Int): PartKeyWithTimes = {
val nextPart = partitions.get(partId)
if (nextPart != UnsafeUtils.ZeroPointer)
PartKeyWithTimes(nextPart.partKeyBase, nextPart.partKeyOffset, -1, -1)
else { //retrieving PartKey from lucene index
val partKeyByteBuf = partKeyIndex.partKeyFromPartId(partId)
if (partKeyByteBuf.isDefined) PartKeyWithTimes(partKeyByteBuf.get.bytes, UnsafeUtils.arayOffset, -1, -1)
else throw new IllegalStateException("This is not an expected behavior." +
" PartId should always have a corresponding PartKey!")
}
}
/**
* WARNING: Not performant. Use only in tests, or during initial bootstrap.
*/
def refreshPartKeyIndexBlocking(): Unit = partKeyIndex.refreshReadersBlocking()
def closePartKeyIndex(): Unit = partKeyIndex.closeIndex()
def numRowsIngested: Long = ingested
def numActivePartitions: Int = partSet.size
def latestOffset: Long = _offset
/**
* Sets the watermark for each subgroup. If an ingested record offset is below this watermark then it will be
* assumed to already have been persisted, and the record will be discarded. Use only for recovery.
* @param watermarks a Map from group number to watermark
*/
def setGroupWatermarks(watermarks: Map[Int, Long]): Unit =
watermarks.foreach { case (group, mark) => groupWatermark(group) = mark }
/**
* Prepares the given group for flushing. This MUST be done in the same thread/stream as
* input records to avoid concurrency issues, and to ensure that all the partitions in a
* group are switched at the same watermark. Also required because this method removes
* entries from the partition data structures.
*/
def prepareFlushGroup(groupNum: Int): FlushGroup = {
assertThreadName(IngestSchedName)
// Rapidly switch all of the input buffers for a particular group
logger.debug(s"Switching write buffers for group $groupNum in dataset=$ref shard=$shardNum")
InMemPartitionIterator(partitionGroups(groupNum).intIterator).foreach(_.switchBuffers(overflowBlockFactory))
val dirtyPartKeys = if (groupNum == dirtyPartKeysFlushGroup) {
logger.debug(s"Switching dirty part keys in dataset=$ref shard=$shardNum out for flush. ")
purgeExpiredPartitions()
val old = dirtyPartitionsForIndexFlush
dirtyPartitionsForIndexFlush = debox.Buffer.empty[Int]
old
} else {
debox.Buffer.ofSize[Int](0)
}
FlushGroup(shardNum, groupNum, latestOffset, dirtyPartKeys)
}
private def purgeExpiredPartitions(): Unit = ingestSched.executeTrampolined { () =>
assertThreadName(IngestSchedName)
val partsToPurge = partKeyIndex.partIdsEndedBefore(
System.currentTimeMillis() - storeConfig.demandPagedRetentionPeriod.toMillis)
var numDeleted = 0
val removedParts = debox.Buffer.empty[Int]
InMemPartitionIterator2(partsToPurge).foreach { p =>
if (!p.ingesting) {
logger.debug(s"Purging partition with partId=${p.partID} ${p.stringPartition} from " +
s"memory in dataset=$ref shard=$shardNum")
removePartition(p)
removedParts += p.partID
numDeleted += 1
}
}
if (!removedParts.isEmpty) partKeyIndex.removePartKeys(removedParts)
if (numDeleted > 0) logger.info(s"Purged $numDeleted partitions from memory and " +
s"index from dataset=$ref shard=$shardNum")
shardStats.purgedPartitions.increment(numDeleted)
}
/**
* Creates zero or more flush tasks (up to the number of flush groups) based on examination
* of the record container's ingestion time. This should be called before ingesting the container.
*
* Note that the tasks returned by this method aren't executed yet. The caller decides how
* to run the tasks, and by which threads.
*/
def createFlushTasks(container: RecordContainer): Seq[Task[Response]] = {
val tasks = new ArrayBuffer[Task[Response]]()
var oldTimestamp = lastIngestionTime
val ingestionTime = Math.max(oldTimestamp, container.timestamp) // monotonic clock
var newTimestamp = ingestionTime
if (newTimestamp > oldTimestamp && oldTimestamp != Long.MinValue) {
cforRange ( 0 until numGroups ) { group =>
/* Logically, the task creation filter is as follows:
// Compute the time offset relative to the group number. 0 min, 1 min, 2 min, etc.
val timeOffset = group * flushOffsetMillis
// Adjust the timestamp relative to the offset such that the
// division rounds correctly.
val oldTimestampAdjusted = oldTimestamp - timeOffset
val newTimestampAdjusted = newTimestamp - timeOffset
if (oldTimstampAdjusted / flushBoundary != newTimestampAdjusted / flushBoundary) {
...
As written the code the same thing but with fewer operations. It's also a bit
shorter, but you also had to read this comment...
*/
if (oldTimestamp / flushBoundaryMillis != newTimestamp / flushBoundaryMillis) {
// Flush out the group before ingesting records for a new hour (by group offset).
tasks += createFlushTask(prepareFlushGroup(group))
}
oldTimestamp -= flushOffsetMillis
newTimestamp -= flushOffsetMillis
}
}
// Only update stuff if no exception was thrown.
if (ingestionTime != lastIngestionTime) {
lastIngestionTime = ingestionTime
shardStats.ingestionClockDelay.update(System.currentTimeMillis() - ingestionTime)
}
tasks
}
private def createFlushTask(flushGroup: FlushGroup): Task[Response] = {
assertThreadName(IngestSchedName)
// clone the bitmap so that reads on the flush thread do not conflict with writes on ingestion thread
val partitionIt = InMemPartitionIterator(partitionGroups(flushGroup.groupNum).clone().intIterator)
doFlushSteps(flushGroup, partitionIt)
}
private def updateGauges(): Unit = {
assertThreadName(IngestSchedName)
shardStats.bufferPoolSize.update(bufferPools.valuesArray.map(_.poolSize).sum)
shardStats.indexEntries.update(partKeyIndex.indexNumEntries)
shardStats.indexBytes.update(partKeyIndex.indexRamBytes)
shardStats.numPartitions.update(numActivePartitions)
val numIngesting = activelyIngesting.synchronized { activelyIngesting.size }
shardStats.numActivelyIngestingParts.update(numIngesting)
// Also publish MemFactory stats. Instance is expected to be shared, but no harm in
// publishing a little more often than necessary.
bufferMemoryManager.updateStats()
}
private def toPartKeyRecord(p: TimeSeriesPartition): PartKeyRecord = {
assertThreadName(IOSchedName)
var startTime = partKeyIndex.startTimeFromPartId(p.partID)
if (startTime == -1) startTime = p.earliestTime // can remotely happen since lucene reads are eventually consistent
if (startTime == Long.MaxValue) startTime = 0 // if for any reason we cant find the startTime, use 0
val endTime = if (p.ingesting) {
Long.MaxValue
} else {
val et = p.timestampOfLatestSample // -1 can be returned if no sample after reboot
if (et == -1) System.currentTimeMillis() else et
}
PartKeyRecord(p.partKeyBytes, startTime, endTime, Some(p.partKeyHash))
}
// scalastyle:off method.length
private def doFlushSteps(flushGroup: FlushGroup,
partitionIt: Iterator[TimeSeriesPartition]): Task[Response] = {
assertThreadName(IngestSchedName)
val tracer = Kamon.spanBuilder("chunk-flush-task-latency-after-retries")
.asChildOf(Kamon.currentSpan())
.tag("dataset", ref.dataset)
.tag("shard", shardNum).start()
// Only allocate the blockHolder when we actually have chunks/partitions to flush
val blockHolder = blockFactoryPool.checkout(Map("flushGroup" -> flushGroup.groupNum.toString))
// This initializes the containers for the downsample records. Yes, we create new containers
// and not reuse them at the moment and there is allocation for every call of this method
// (once per minute). We can perhaps use a thread-local or a pool if necessary after testing.
val downsampleRecords = ShardDownsampler
.newEmptyDownsampleRecords(downsampleConfig.resolutions.map(_.toMillis.toInt),
downsampleConfig.enabled)
val chunkSetIter = partitionIt.flatMap { p =>
// TODO re-enable following assertion. Am noticing that monix uses TrampolineExecutionContext
// causing the iterator to be consumed synchronously in some cases. It doesnt
// seem to be consistent environment to environment.
// assertThreadName(IOSchedName)
/* Step 2: Make chunks to be flushed for each partition */
val chunks = p.makeFlushChunks(blockHolder)
/* VERY IMPORTANT: This block is lazy and is executed when chunkSetIter is consumed
in writeChunksFuture below */
/* Step 3: Add downsample records for the chunks into the downsample record builders */
val ds = shardDownsamplers(p.schema.schemaHash)
ds.populateDownsampleRecords(p, p.infosToBeFlushed, downsampleRecords)
/* Step 4: Update endTime of all partKeys that stopped ingesting in this flush period. */
updateIndexWithEndTime(p, chunks, flushGroup.dirtyPartsToFlush)
chunks
}
// Note that all cassandra writes below will have included retries. Failures after retries will imply data loss
// in order to keep the ingestion moving. It is important that we don't fall back far behind.
/* Step 1: Kick off partition iteration to persist chunks to column store */
val writeChunksFuture = writeChunks(flushGroup, chunkSetIter, partitionIt, blockHolder)
/* Step 5.1: Publish the downsample record data collected to the downsample dataset.
* We recover future since we want to proceed to publish downsample data even if chunk flush failed.
* This is done after writeChunksFuture because chunkSetIter is lazy. */
val pubDownsampleFuture = writeChunksFuture.recover {case _ => Success}
.flatMap { _ =>
assertThreadName(IOSchedName)
if (downsampleConfig.enabled)
ShardDownsampler.publishToDownsampleDataset(downsampleRecords, downsamplePublisher, ref, shardNum)
else Future.successful(Success)
}
/* Step 5.2: We flush dirty part keys in the one designated group for each shard.
* We recover future since we want to proceed to write dirty part keys even if chunk flush failed.
* This is done after writeChunksFuture because chunkSetIter is lazy. More partKeys could
* be added during iteration due to endTime detection
*/
val writeDirtyPartKeysFuture = writeChunksFuture.recover {case _ => Success}
.flatMap( _=> writeDirtyPartKeys(flushGroup))
/* Step 6: Checkpoint after dirty part keys and chunks are flushed */
val result = Future.sequence(Seq(writeChunksFuture, writeDirtyPartKeysFuture, pubDownsampleFuture)).map {
_.find(_.isInstanceOf[ErrorResponse]).getOrElse(Success)
}.flatMap {
case Success => blockHolder.markUsedBlocksReclaimable()
commitCheckpoint(ref, shardNum, flushGroup)
case er: ErrorResponse => Future.successful(er)
}.recover { case e =>
logger.error(s"Internal Error when persisting chunks in dataset=$ref shard=$shardNum - should " +
s"have not reached this state", e)
DataDropped
}
result.onComplete { resp =>
assertThreadName(IngestSchedName)
try {
blockFactoryPool.release(blockHolder)
flushDoneTasks(flushGroup, resp)
tracer.finish()
} catch { case e: Throwable =>
logger.error(s"Error when wrapping up doFlushSteps in dataset=$ref shard=$shardNum", e)
}
}(ingestSched)
// Note: The data structures accessed by flushDoneTasks can only be safely accessed by the
// ingestion thread, hence the onComplete steps are run from that thread.
Task.fromFuture(result)
}
protected def flushDoneTasks(flushGroup: FlushGroup, resTry: Try[Response]): Unit = {
assertThreadName(IngestSchedName)
resTry.foreach { resp =>
logger.info(s"Flush of dataset=$ref shard=$shardNum group=${flushGroup.groupNum} " +
s"flushWatermark=${flushGroup.flushWatermark} response=$resp offset=${_offset}")
}
partitionMaker.cleanupOldestBuckets()
// Some partitions might be evictable, see if need to free write buffer memory
checkEnableAddPartitions()
updateGauges()
}
// scalastyle:off method.length
private def writeDirtyPartKeys(flushGroup: FlushGroup): Future[Response] = {
assertThreadName(IOSchedName)
val partKeyRecords = InMemPartitionIterator2(flushGroup.dirtyPartsToFlush).map(toPartKeyRecord)
val updateHour = System.currentTimeMillis() / 1000 / 60 / 60
colStore.writePartKeys(ref, shardNum,
Observable.fromIterator(partKeyRecords),
storeConfig.diskTTLSeconds, updateHour).map { resp =>
if (flushGroup.dirtyPartsToFlush.length > 0) {
logger.info(s"Finished flush of partKeys numPartKeys=${flushGroup.dirtyPartsToFlush.length}" +
s" resp=$resp for dataset=$ref shard=$shardNum")
shardStats.numDirtyPartKeysFlushed.increment(flushGroup.dirtyPartsToFlush.length)
}
resp
}.recover { case e =>
logger.error(s"Internal Error when persisting part keys in dataset=$ref shard=$shardNum - " +
"should have not reached this state", e)
DataDropped
}
}
// scalastyle:on method.length
private def writeChunks(flushGroup: FlushGroup,
chunkSetIt: Iterator[ChunkSet],
partitionIt: Iterator[TimeSeriesPartition],
blockHolder: BlockMemFactory): Future[Response] = {
assertThreadName(IngestSchedName)
val chunkSetStream = Observable.fromIterator(chunkSetIt)
logger.debug(s"Created flush ChunkSets stream for group ${flushGroup.groupNum} in " +
s"dataset=$ref shard=$shardNum")
colStore.write(ref, chunkSetStream, storeConfig.diskTTLSeconds).recover { case e =>
logger.error(s"Critical! Chunk persistence failed after retries and skipped in dataset=$ref " +
s"shard=$shardNum", e)
shardStats.flushesFailedChunkWrite.increment()
// Encode and free up the remainder of the WriteBuffers that have not been flushed yet. Otherwise they will
// never be freed.
partitionIt.foreach(_.encodeAndReleaseBuffers(blockHolder))
// If the above futures fail with ErrorResponse because of DB failures, skip the chunk.
// Sorry - need to drop the data to keep the ingestion moving
DataDropped
}
}
private[memstore] def updatePartEndTimeInIndex(p: TimeSeriesPartition, endTime: Long): Unit =
partKeyIndex.updatePartKeyWithEndTime(p.partKeyBytes, p.partID, endTime)()
private def updateIndexWithEndTime(p: TimeSeriesPartition,
partFlushChunks: Iterator[ChunkSet],
dirtyParts: debox.Buffer[Int]): Unit = {
// TODO re-enable following assertion. Am noticing that monix uses TrampolineExecutionContext
// causing the iterator to be consumed synchronously in some cases. It doesnt
// seem to be consistent environment to environment.
//assertThreadName(IOSchedName)
// Below is coded to work concurrently with logic in getOrAddPartitionAndIngest
// where we try to activate an inactive time series
activelyIngesting.synchronized {
if (partFlushChunks.isEmpty && p.ingesting) {
var endTime = p.timestampOfLatestSample
if (endTime == -1) endTime = System.currentTimeMillis() // this can happen if no sample after reboot
updatePartEndTimeInIndex(p, endTime)
dirtyParts += p.partID
activelyIngesting -= p.partID
p.ingesting = false
}
}
}
private def commitCheckpoint(ref: DatasetRef, shardNum: Int, flushGroup: FlushGroup): Future[Response] = {
assertThreadName(IOSchedName)
// negative checkpoints are refused by Kafka, and also offsets should be positive
if (flushGroup.flushWatermark > 0) {
val fut = metastore.writeCheckpoint(ref, shardNum, flushGroup.groupNum, flushGroup.flushWatermark).map { r =>
shardStats.flushesSuccessful.increment()
r
}.recover { case e =>
logger.error(s"Critical! Checkpoint persistence skipped in dataset=$ref shard=$shardNum", e)
shardStats.flushesFailedOther.increment()
// skip the checkpoint write
// Sorry - need to skip to keep the ingestion moving
DataDropped
}
// Update stats
if (_offset >= 0) shardStats.offsetLatestInMem.update(_offset)
groupWatermark(flushGroup.groupNum) = flushGroup.flushWatermark
val maxWatermark = groupWatermark.max
val minWatermark = groupWatermark.min
if (maxWatermark >= 0) shardStats.offsetLatestFlushed.update(maxWatermark)
if (minWatermark >= 0) shardStats.offsetEarliestFlushed.update(minWatermark)
fut
} else {
Future.successful(NotApplied)
}
}
private[memstore] val addPartitionsDisabled = AtomicBoolean(false)
// scalastyle:off null
private[filodb] def getOrAddPartitionForIngestion(recordBase: Any, recordOff: Long,
group: Int, schema: Schema) = {
var part = partSet.getWithIngestBR(recordBase, recordOff, schema)
if (part == null) {
part = addPartitionForIngestion(recordBase, recordOff, schema, group)
}
part
}
// scalastyle:on
/**
* Looks up the previously assigned partId of a possibly evicted partition.
* @return partId >=0 if one is found, CREATE_NEW_PARTID (-1) if not found.
*/
private def lookupPreviouslyAssignedPartId(partKeyBase: Array[Byte], partKeyOffset: Long): Int = {
assertThreadName(IngestSchedName)
shardStats.evictedPartKeyBloomFilterQueries.increment()
val mightContain = evictedPartKeys.synchronized {
if (!evictedPartKeysDisposed) {
evictedPartKeys.mightContain(PartKey(partKeyBase, partKeyOffset))
} else {
false
}
}
if (mightContain) {
partKeyIndex.partIdFromPartKeySlow(partKeyBase, partKeyOffset)
.getOrElse {
shardStats.evictedPartKeyBloomFilterFalsePositives.increment()
CREATE_NEW_PARTID
}
} else CREATE_NEW_PARTID
}
/**
* Adds new partition with appropriate partId. If it is a newly seen partKey, then new partId is assigned.
* If it is a previously seen partKey that is already in index, it reassigns same partId so that indexes
* are still valid.
*
* This method also updates lucene index and dirty part keys properly.
*/
private def addPartitionForIngestion(recordBase: Any, recordOff: Long, schema: Schema, group: Int) = {
assertThreadName(IngestSchedName)
// TODO: remove when no longer needed - or figure out how to log only for tracing partitions
logger.debug(s"Adding ingestion record details: ${schema.ingestionSchema.debugString(recordBase, recordOff)}")
val partKeyOffset = schema.comparator.buildPartKeyFromIngest(recordBase, recordOff, partKeyBuilder)
val previousPartId = lookupPreviouslyAssignedPartId(partKeyArray, partKeyOffset)
// TODO: remove when no longer needed
logger.debug(s"Adding part key details: ${schema.partKeySchema.debugString(partKeyArray, partKeyOffset)}")
val newPart = createNewPartition(partKeyArray, partKeyOffset, group, previousPartId, schema)
if (newPart != OutOfMemPartition) {
val partId = newPart.partID
val startTime = schema.ingestionSchema.getLong(recordBase, recordOff, 0)
if (previousPartId == CREATE_NEW_PARTID) {
// add new lucene entry if this partKey was never seen before
// causes endTime to be set to Long.MaxValue
partKeyIndex.addPartKey(newPart.partKeyBytes, partId, startTime)()
} else {
// newly created partition is re-ingesting now, so update endTime
updatePartEndTimeInIndex(newPart, Long.MaxValue)
}
dirtyPartitionsForIndexFlush += partId // marks this part as dirty so startTime is flushed
activelyIngesting.synchronized {
activelyIngesting += partId
newPart.ingesting = true
}
val stamp = partSetLock.writeLock()
try {
partSet.add(newPart)
} finally {
partSetLock.unlockWrite(stamp)
}
}
newPart
}
/**
* Retrieves or creates a new TimeSeriesPartition, updating indices, then ingests the sample from record.
* partition portion of ingest BinaryRecord is used to look up existing TSPartition.
* Copies the partition portion of the ingest BinaryRecord to offheap write buffer memory.
* NOTE: ingestion is skipped if there is an error allocating WriteBuffer space.
* @param recordBase the base of the ingestion BinaryRecord
* @param recordOff the offset of the ingestion BinaryRecord
* @param group the group number, from abs(record.partitionHash % numGroups)
*/
def getOrAddPartitionAndIngest(ingestionTime: Long,
recordBase: Any, recordOff: Long,
group: Int, schema: Schema): Unit = {
assertThreadName(IngestSchedName)
try {
val part: FiloPartition = getOrAddPartitionForIngestion(recordBase, recordOff, group, schema)
if (part == OutOfMemPartition) {
disableAddPartitions()
}
else {
val tsp = part.asInstanceOf[TimeSeriesPartition]
brRowReader.schema = schema.ingestionSchema
brRowReader.recordOffset = recordOff
tsp.ingest(ingestionTime, brRowReader, overflowBlockFactory, maxChunkTime)
// Below is coded to work concurrently with logic in updateIndexWithEndTime
// where we try to de-activate an active time series
if (!tsp.ingesting) {
// DO NOT use activelyIngesting to check above condition since it is slow and is called for every sample
activelyIngesting.synchronized {
if (!tsp.ingesting) {
// time series was inactive and has just started re-ingesting
updatePartEndTimeInIndex(part.asInstanceOf[TimeSeriesPartition], Long.MaxValue)
dirtyPartitionsForIndexFlush += part.partID
activelyIngesting += part.partID
tsp.ingesting = true
}
}
}
}
} catch {
case e: OutOfOffheapMemoryException => disableAddPartitions()
case e: Exception =>
shardStats.dataDropped.increment()
logger.error(s"Unexpected ingestion err in dataset=$ref " +
s"shard=$shardNum partition=${schema.ingestionSchema.debugString(recordBase, recordOff)}", e)
}
}
private def shouldTrace(partKeyAddr: Long): Boolean = {
tracedPartFilters.nonEmpty && {
val partKeyPairs = schemas.part.binSchema.toStringPairs(UnsafeUtils.ZeroPointer, partKeyAddr)
tracedPartFilters.forall(p => partKeyPairs.contains(p))
}
}
/**
* Creates new partition and adds them to the shard data structures. DOES NOT update
* lucene index. It is the caller's responsibility to add or skip that step depending on the situation.
*
* @param usePartId pass CREATE_NEW_PARTID to force creation of new partId instead of using one that is passed in
*/
protected def createNewPartition(partKeyBase: Array[Byte], partKeyOffset: Long,
group: Int, usePartId: Int, schema: Schema,
initMapSize: Int = initInfoMapSize): TimeSeriesPartition = {
assertThreadName(IngestSchedName)
// Check and evict, if after eviction we still don't have enough memory, then don't proceed
if (addPartitionsDisabled() || !ensureFreeSpace()) {
OutOfMemPartition
}
else {
// PartitionKey is copied to offheap bufferMemory and stays there until it is freed
// NOTE: allocateAndCopy and allocNew below could fail if there isn't enough memory. It is CRUCIAL
// that min-write-buffers-free setting is large enough to accommodate the below use cases ALWAYS
val (_, partKeyAddr, _) = BinaryRegionLarge.allocateAndCopy(partKeyBase, partKeyOffset, bufferMemoryManager)
val partId = if (usePartId == CREATE_NEW_PARTID) createPartitionID() else usePartId
val pool = bufferPools(schema.schemaHash)
val newPart = if (shouldTrace(partKeyAddr)) {
logger.debug(s"Adding tracing TSPartition dataset=$ref shard=$shardNum group=$group partId=$partId")
new TracingTimeSeriesPartition(
partId, ref, schema, partKeyAddr, shardNum, pool, shardStats, bufferMemoryManager, initMapSize)
} else {
new TimeSeriesPartition(
partId, schema, partKeyAddr, shardNum, pool, shardStats, bufferMemoryManager, initMapSize)
}
partitions.put(partId, newPart)
shardStats.partitionsCreated.increment()
partitionGroups(group).set(partId)
newPart
}
}
private def disableAddPartitions(): Unit = {
assertThreadName(IngestSchedName)
if (addPartitionsDisabled.compareAndSet(false, true))
logger.warn(s"dataset=$ref shard=$shardNum: Out of buffer memory and not able to evict enough; " +
s"adding partitions disabled")
shardStats.dataDropped.increment()
}
private def checkEnableAddPartitions(): Unit = {
assertThreadName(IngestSchedName)
if (addPartitionsDisabled()) {
if (ensureFreeSpace()) {
logger.info(s"dataset=$ref shard=$shardNum: Enough free space to add partitions again! Yay!")
addPartitionsDisabled := false
}
}
}
/**
* Returns a new non-negative partition ID which isn't used by any existing parition. A negative
* partition ID wouldn't work with bitmaps.
*/
private def createPartitionID(): Int = {
assertThreadName(IngestSchedName)
val id = nextPartitionID
// It's unlikely that partition IDs will wrap around, and it's unlikely that collisions
// will be encountered. In case either of these conditions occur, keep incrementing the id
// until no collision is detected. A given shard is expected to support up to 1M actively
// ingesting partitions, and so in the worst case, the loop might run for up to ~100ms.
// Afterwards, a complete wraparound is required for collisions to be detected again.
do {
nextPartitionID += 1
if (nextPartitionID < 0) {
nextPartitionID = 0
logger.info(s"dataset=$ref shard=$shardNum nextPartitionID has wrapped around to 0 again")
}
} while (partitions.containsKey(nextPartitionID))
id
}
def analyzeAndLogCorruptPtr(cve: CorruptVectorException): Unit =
logger.error(cve.getMessage + "\n" + BlockDetective.stringReport(cve.ptr, blockStore, blockFactoryPool))
/**
* Check and evict partitions to free up memory and heap space. NOTE: This must be called in the ingestion
* stream so that there won't be concurrent other modifications. Ideally this is called when trying to add partitions
* @return true if able to evict enough or there was already space, false if not able to evict and not enough mem
*/
// scalastyle:off method.length
private[filodb] def ensureFreeSpace(): Boolean = {
assertThreadName(IngestSchedName)
var lastPruned = EmptyBitmap
while (evictionPolicy.shouldEvict(partSet.size, bufferMemoryManager)) {
// Eliminate partitions evicted from last cycle so we don't have an endless loop
val prunedPartitions = partitionsToEvict().andNot(lastPruned)
if (prunedPartitions.isEmpty) {
logger.warn(s"dataset=$ref shard=$shardNum: No partitions to evict but we are still low on space. " +
s"DATA WILL BE DROPPED")
return false
}
lastPruned = prunedPartitions
// Pruning group bitmaps
for { group <- 0 until numGroups } {
partitionGroups(group) = partitionGroups(group).andNot(prunedPartitions)
}
// Finally, prune partitions and keyMap data structures
logger.info(s"Evicting partitions from dataset=$ref shard=$shardNum, watermark=$evictionWatermark...")
val intIt = prunedPartitions.intIterator
var partsRemoved = 0
var partsSkipped = 0
var maxEndTime = evictionWatermark
while (intIt.hasNext) {
val partitionObj = partitions.get(intIt.next)
if (partitionObj != UnsafeUtils.ZeroPointer) {
// TODO we can optimize fetching of endTime by getting them along with top-k query
val endTime = partKeyIndex.endTimeFromPartId(partitionObj.partID)
if (partitionObj.ingesting)
logger.warn(s"Partition ${partitionObj.partID} is ingesting, but it was eligible for eviction. How?")
if (endTime == PartKeyLuceneIndex.NOT_FOUND || endTime == Long.MaxValue) {
logger.warn(s"endTime $endTime was not correct. how?", new IllegalStateException())
} else {
logger.debug(s"Evicting partId=${partitionObj.partID} ${partitionObj.stringPartition} " +
s"from dataset=$ref shard=$shardNum")
// add the evicted partKey to a bloom filter so that we are able to quickly
// find out if a partId has been assigned to an ingesting partKey before a more expensive lookup.
evictedPartKeys.synchronized {
if (!evictedPartKeysDisposed) {
evictedPartKeys.add(PartKey(partitionObj.partKeyBase, partitionObj.partKeyOffset))
}
}
// The previously created PartKey is just meant for bloom filter and will be GCed
removePartition(partitionObj)
partsRemoved += 1
maxEndTime = Math.max(maxEndTime, endTime)
}
} else {
partsSkipped += 1
}
}
val elemCount = evictedPartKeys.synchronized {
if (!evictedPartKeysDisposed) {
evictedPartKeys.approximateElementCount()
} else {
0
}
}
shardStats.evictedPkBloomFilterSize.update(elemCount)
evictionWatermark = maxEndTime + 1
// Plus one needed since there is a possibility that all partitions evicted in this round have same endTime,
// and there may be more partitions that are not evicted with same endTime. If we didnt advance the watermark,
// we would be processing same partIds again and again without moving watermark forward.
// We may skip evicting some partitions by doing this, but the imperfection is an acceptable
// trade-off for performance and simplicity. The skipped partitions, will ve removed during purge.
logger.info(s"dataset=$ref shard=$shardNum: evicted $partsRemoved partitions," +
s"skipped $partsSkipped, h20=$evictionWatermark")
shardStats.partitionsEvicted.increment(partsRemoved)
}
true
}
//scalastyle:on
// Permanently removes the given partition ID from our in-memory data structures
// Also frees partition key if necessary
private def removePartition(partitionObj: TimeSeriesPartition): Unit = {
assertThreadName(IngestSchedName)
val stamp = partSetLock.writeLock()
try {
partSet.remove(partitionObj)
} finally {
partSetLock.unlockWrite(stamp)
}
if (partitions.remove(partitionObj.partID, partitionObj)) {
partitionObj.shutdown()
}
}
private def partitionsToEvict(): EWAHCompressedBitmap = {
// Iterate and add eligible partitions to delete to our list
// Need to filter out partitions with no endTime. Any endTime calculated would not be set within one flush interval.
partKeyIndex.partIdsOrderedByEndTime(storeConfig.numToEvict, evictionWatermark, Long.MaxValue - 1)
}
private[core] def getPartition(partKey: Array[Byte]): Option[TimeSeriesPartition] = {
var part: Option[FiloPartition] = None
// Access the partition set optimistically. If nothing acquired the write lock, then
// nothing changed in the set, and the partition object is the correct one.
var stamp = partSetLock.tryOptimisticRead()
if (stamp != 0) {
part = partSet.getWithPartKeyBR(partKey, UnsafeUtils.arayOffset, schemas.part)
}
if (!partSetLock.validate(stamp)) {
// Because the stamp changed, the write lock was acquired and the set likely changed.
// Try again with a full read lock, which will block if necessary so as to not run
// concurrently with any thread making changes to the set. This guarantees that
// the correct partition is returned.
stamp = partSetLock.readLock()
try {
part = partSet.getWithPartKeyBR(partKey, UnsafeUtils.arayOffset, schemas.part)
} finally {
partSetLock.unlockRead(stamp)
}
}
part.map(_.asInstanceOf[TimeSeriesPartition])
}
protected def schemaIDFromPartID(partID: Int): Int = {
partitions.get(partID) match {
case TimeSeriesShard.OutOfMemPartition =>
partKeyIndex.partKeyFromPartId(partID).map { pkBytesRef =>
val unsafeKeyOffset = PartKeyLuceneIndex.bytesRefToUnsafeOffset(pkBytesRef.offset)
RecordSchema.schemaID(pkBytesRef.bytes, unsafeKeyOffset)
}.getOrElse(-1)
case p: TimeSeriesPartition => p.schema.schemaHash
}
}
/**
* Looks up partitions and schema info from ScanMethods, usually by doing a Lucene search.
* Also returns detailed information about what is in memory and not, and does schema discovery.
*/
def lookupPartitions(partMethod: PartitionScanMethod,
chunkMethod: ChunkScanMethod,
querySession: QuerySession): PartLookupResult = {
querySession.lock = Some(reclaimLock)
reclaimLock.lock()
// any exceptions thrown here should be caught by a wrapped Task.
// At the end, MultiSchemaPartitionsExec.execute releases the lock when the task is complete
partMethod match {
case SinglePartitionScan(partition, _) =>
val partIds = debox.Buffer.empty[Int]
getPartition(partition).foreach(p => partIds += p.partID)
PartLookupResult(shardNum, chunkMethod, partIds, Some(RecordSchema.schemaID(partition)))
case MultiPartitionScan(partKeys, _) =>
val partIds = debox.Buffer.empty[Int]
partKeys.flatMap(getPartition).foreach(p => partIds += p.partID)
PartLookupResult(shardNum, chunkMethod, partIds, partKeys.headOption.map(RecordSchema.schemaID))
case FilteredPartitionScan(_, filters) =>
// No matter if there are filters or not, need to run things through Lucene so we can discover potential
// TSPartitions to read back from disk
val matches = partKeyIndex.partIdsFromFilters(filters, chunkMethod.startTime, chunkMethod.endTime)
shardStats.queryTimeRangeMins.record((chunkMethod.endTime - chunkMethod.startTime) / 60000 )
Kamon.currentSpan().tag(s"num-partitions-from-index-$shardNum", matches.length)
// first find out which partitions are being queried for data not in memory
val firstPartId = if (matches.isEmpty) None else Some(matches(0))
val _schema = firstPartId.map(schemaIDFromPartID)
val it1 = InMemPartitionIterator2(matches)
val partIdsToPage = it1.filter(_.earliestTime > chunkMethod.startTime).map(_.partID)
val partIdsNotInMem = it1.skippedPartIDs
Kamon.currentSpan().tag(s"num-partitions-not-in-memory-$shardNum", partIdsNotInMem.length)
val startTimes = if (partIdsToPage.nonEmpty) {
val st = partKeyIndex.startTimeFromPartIds(partIdsToPage)
logger.debug(s"Some partitions have earliestTime > queryStartTime(${chunkMethod.startTime}); " +
s"startTime lookup for query in dataset=$ref shard=$shardNum " +
s"resulted in startTimes=$st")
st
}
else {
logger.debug(s"StartTime lookup was not needed. All partition's data for query in dataset=$ref " +
s"shard=$shardNum are in memory")
debox.Map.empty[Int, Long]
}
// now provide an iterator that additionally supplies the startTimes for
// those partitions that may need to be paged
PartLookupResult(shardNum, chunkMethod, matches, _schema, startTimes, partIdsNotInMem)
}
}
def scanPartitions(iterResult: PartLookupResult,
colIds: Seq[Types.ColumnId],
querySession: QuerySession): Observable[ReadablePartition] = {
val partIter = new InMemPartitionIterator2(iterResult.partsInMemory)
Observable.fromIterator(partIter.map { p =>
shardStats.partitionsQueried.increment()
p
})
}
private def startHeadroomTask(sched: Scheduler): Unit = {
sched.scheduleWithFixedDelay(1, 1, TimeUnit.MINUTES, new Runnable {
def run() = blockStore.ensureHeadroom(storeConfig.ensureHeadroomPercent)
})
}
/**
* Please use this for testing only - reclaims ALL used offheap blocks. Maybe you are trying to test
* on demand paging.
*/
private[filodb] def reclaimAllBlocksTestOnly() = blockStore.reclaimAll()
/**
* Reset all state in this shard. Memory is not released as once released, then this class
* cannot be used anymore (except partition key/chunkmap state is removed.)
*/
def reset(): Unit = {
logger.info(s"Clearing all MemStore state for dataset=$ref shard=$shardNum")
ingestSched.executeTrampolined { () =>
partitions.values.asScala.foreach(removePartition)
}
partKeyIndex.reset()
// TODO unable to reset/clear bloom filter
ingested = 0L
for { group <- 0 until numGroups } {
partitionGroups(group) = new EWAHCompressedBitmap()
groupWatermark(group) = Long.MinValue
}
}
def shutdown(): Unit = {
evictedPartKeys.synchronized {
if (!evictedPartKeysDisposed) {
evictedPartKeysDisposed = true
evictedPartKeys.dispose()
}
}
reset() // Not really needed, but clear everything just to be consistent
logger.info(s"Shutting down dataset=$ref shard=$shardNum")
/* Don't explcitly free the memory just yet. These classes instead rely on a finalize
method to ensure that no threads are accessing the memory before it's freed.
blockStore.releaseBlocks()
*/
ingestSched.shutdown()
}
}
| tuplejump/FiloDB | core/src/main/scala/filodb.core/memstore/TimeSeriesShard.scala | Scala | apache-2.0 | 72,619 |
/**
* Copyright 2009 Latterfrosken Software Development Limited
*
* This file is part of Lafros GUI-Cmds.
*
* Lafros GUI-Cmds is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* Lafros GUI-Cmds is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with Lafros GUI-Cmds. If not, see <http://www.gnu.org/licenses/>. */
package com.lafros.gui.cmds
import lafros.maven.scalatest.FunSuite
import org.scalatest.matchers.ShouldMatchers
import scala.swing.Button
/**
* @author Rob Dickens */
class ButtonTrigTest extends FunSuite with ShouldMatchers {
val trig = new Button with Trig
val cmdString = "cmd"
val prefix = "prefix-"
val cmd = new Cmd {
def apply() = None
override def toString = cmdString
}
test("Cmd") {
trig.text should equal ("")
trig.cmd = cmd
trig.text should equal (cmdString)
trig.exerToText = (exer: Exer) => prefix + exer.cmd.toString
trig.text should equal (cmdString)
trig.cmd = NoCmd
trig.text should equal (cmdString)
trig.text = ""
trig.cmd = cmd
trig.text should equal (prefix + cmdString)
}
val text0 = "text0"
val text1 = "text1"
test("Cmd, Cmd") {
val setCmdString = "set-cmd"
val setCmd = new Cmd {
def apply() = None
override def toString = setCmdString
}
val resetCmdString = "reset-cmd"
val resetCmd = new Cmd {
def apply() = None
override def toString = resetCmdString
}
val exer = Exer(setCmd, resetCmd)
trig.text = ""
trig.exer = exer
trig.text should equal (prefix + setCmdString)
trig.doClick()
trig.text should equal (prefix + resetCmdString)
exer.tog.isSet should be (true)
trig.exer = NoExer
trig.text should equal (prefix + resetCmdString)
trig.text0 = text0
trig.text1 = text1
trig.text should equal (prefix + resetCmdString)
trig.exer = exer
exer.tog.isSet should be (true)
trig.text should equal (text1)
trig.doClick()
exer.tog.isSet should be (false)
trig.text should equal (text0)
}
test("Tog.Cmd") {
val togCmdString = "tog-cmd"
val togCmd = new Tog.Cmd {
def apply() = None
val tog = new Tog
override def toString = togCmdString
}
trig.cmd = togCmd
trig.text should equal (text0)
trig.doClick()
trig.text should equal (text1)
trig.text0 = ""
trig.doClick()
trig.text should equal (prefix + togCmdString)
trig.doClick()
trig.text should equal (text1)
trig.doClick()
trig.text1 = ""
trig.doClick()
trig.text should equal (prefix + togCmdString)
}
}
| robcd/lafros-gui | lafros-gui-cmds/src/test/scala/com/lafros/gui/cmds/ButtonTrigTest.scala | Scala | gpl-3.0 | 3,022 |
/*
* Copyright 2016 by Eugene Yokota
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gigahorse
package support.akkahttp
import scala.concurrent.{ Future, ExecutionContext }
import akka.http.scaladsl.model.{ HttpResponse, StatusCode, HttpHeader }
import akka.stream.Materializer
abstract class AkkaHttpCompletionHandler[A] extends CompletionHandler[A] {
def onStatusReceived(status: StatusCode): State = State.Continue
def onHeadersReceived(headers: Seq[HttpHeader]): State = State.Continue
def onCompleted(response: FullResponse): A
def onPartialResponse(httpResponse: HttpResponse, config: Config)(implicit fm: Materializer, ec: ExecutionContext): Future[A] =
for {
entity <- httpResponse.entity.toStrict(config.requestTimeout)
} yield onCompleted(new AkkaHttpFullResponse(httpResponse, entity))
}
| eed3si9n/gigahorse | akka-http/src/main/scala/gigahorse/support/akkahttp/AkkaHttpCompletionHandler.scala | Scala | apache-2.0 | 1,343 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.master.ui
import javax.servlet.http.HttpServletRequest
import scala.concurrent.Await
import scala.xml.Node
import akka.pattern.ask
import org.json4s.JValue
import org.apache.spark.deploy.JsonProtocol
import org.apache.spark.deploy.DeployMessages.{RequestKillDriver, MasterStateResponse, RequestMasterState}
import org.apache.spark.deploy.master._
import org.apache.spark.ui.{WebUIPage, UIUtils}
import org.apache.spark.util.Utils
private[ui] class MasterPage(parent: MasterWebUI) extends WebUIPage("") {
private val master = parent.masterActorRef
private val timeout = parent.timeout
def getMasterState: MasterStateResponse = {
val stateFuture = (master ? RequestMasterState)(timeout).mapTo[MasterStateResponse]
Await.result(stateFuture, timeout)
}
override def renderJson(request: HttpServletRequest): JValue = {
JsonProtocol.writeMasterState(getMasterState)
}
def handleAppKillRequest(request: HttpServletRequest): Unit = {
handleKillRequest(request, id => {
parent.master.idToApp.get(id).foreach { app =>
parent.master.removeApplication(app, ApplicationState.KILLED)
}
})
}
def handleDriverKillRequest(request: HttpServletRequest): Unit = {
handleKillRequest(request, id => { master ! RequestKillDriver(id) })
}
private def handleKillRequest(request: HttpServletRequest, action: String => Unit): Unit = {
if (parent.killEnabled &&
parent.master.securityMgr.checkModifyPermissions(request.getRemoteUser)) {
val killFlag = Option(request.getParameter("terminate")).getOrElse("false").toBoolean
val id = Option(request.getParameter("id"))
if (id.isDefined && killFlag) {
action(id.get)
}
Thread.sleep(100)
}
}
/** Index view listing applications and executors */
def render(request: HttpServletRequest): Seq[Node] = {
val state = getMasterState
val workerHeaders = Seq("Worker Id", "Address", "State", "Cores", "Memory")
val workers = state.workers.sortBy(_.id)
val workerTable = UIUtils.listingTable(workerHeaders, workerRow, workers)
val appHeaders = Seq("Application ID", "Name", "Cores", "Memory per Node", "Submitted Time",
"User", "State", "Duration")
val activeApps = state.activeApps.sortBy(_.startTime).reverse
val activeAppsTable = UIUtils.listingTable(appHeaders, appRow, activeApps)
val completedApps = state.completedApps.sortBy(_.endTime).reverse
val completedAppsTable = UIUtils.listingTable(appHeaders, appRow, completedApps)
val driverHeaders = Seq("Submission ID", "Submitted Time", "Worker", "State", "Cores",
"Memory", "Main Class")
val activeDrivers = state.activeDrivers.sortBy(_.startTime).reverse
val activeDriversTable = UIUtils.listingTable(driverHeaders, driverRow, activeDrivers)
val completedDrivers = state.completedDrivers.sortBy(_.startTime).reverse
val completedDriversTable = UIUtils.listingTable(driverHeaders, driverRow, completedDrivers)
// For now we only show driver information if the user has submitted drivers to the cluster.
// This is until we integrate the notion of drivers and applications in the UI.
def hasDrivers: Boolean = activeDrivers.length > 0 || completedDrivers.length > 0
val content =
<div class="row-fluid">
<div class="span12">
<ul class="unstyled">
<li><strong>URL:</strong> {state.uri}</li>
{
state.restUri.map { uri =>
<li>
<strong>REST URL:</strong> {uri}
<span class="rest-uri"> (cluster mode)</span>
</li>
}.getOrElse { Seq.empty }
}
<li><strong>Workers:</strong> {state.workers.size}</li>
<li><strong>Cores:</strong> {state.workers.map(_.cores).sum} Total,
{state.workers.map(_.coresUsed).sum} Used</li>
<li><strong>Memory:</strong>
{Utils.megabytesToString(state.workers.map(_.memory).sum)} Total,
{Utils.megabytesToString(state.workers.map(_.memoryUsed).sum)} Used</li>
<li><strong>Applications:</strong>
{state.activeApps.size} Running,
{state.completedApps.size} Completed </li>
<li><strong>Drivers:</strong>
{state.activeDrivers.size} Running,
{state.completedDrivers.size} Completed </li>
<li><strong>Status:</strong> {state.status}</li>
</ul>
</div>
</div>
<div class="row-fluid">
<div class="span12">
<h4> Workers </h4>
{workerTable}
</div>
</div>
<div class="row-fluid">
<div class="span12">
<h4> Running Applications </h4>
{activeAppsTable}
</div>
</div>
<div>
{if (hasDrivers) {
<div class="row-fluid">
<div class="span12">
<h4> Running Drivers </h4>
{activeDriversTable}
</div>
</div>
}
}
</div>
<div class="row-fluid">
<div class="span12">
<h4> Completed Applications </h4>
{completedAppsTable}
</div>
</div>
<div>
{
if (hasDrivers) {
<div class="row-fluid">
<div class="span12">
<h4> Completed Drivers </h4>
{completedDriversTable}
</div>
</div>
}
}
</div>;
UIUtils.basicSparkPage(content, "Spark Master at " + state.uri)
}
private def workerRow(worker: WorkerInfo): Seq[Node] = {
<tr>
<td>
<a href={worker.webUiAddress}>{worker.id}</a>
</td>
<td>{worker.host}:{worker.port}</td>
<td>{worker.state}</td>
<td>{worker.cores} ({worker.coresUsed} Used)</td>
<td sorttable_customkey={"%s.%s".format(worker.memory, worker.memoryUsed)}>
{Utils.megabytesToString(worker.memory)}
({Utils.megabytesToString(worker.memoryUsed)} Used)
</td>
</tr>
}
private def appRow(app: ApplicationInfo): Seq[Node] = {
val killLink = if (parent.killEnabled &&
(app.state == ApplicationState.RUNNING || app.state == ApplicationState.WAITING)) {
val confirm =
s"if (window.confirm('Are you sure you want to kill application ${app.id} ?')) " +
"{ this.parentNode.submit(); return true; } else { return false; }"
<form action="app/kill/" method="POST" style="display:inline">
<input type="hidden" name="id" value={app.id.toString}/>
<input type="hidden" name="terminate" value="true"/>
<a href="#" onclick={confirm} class="kill-link">(kill)</a>
</form>
}
<tr>
<td>
<a href={"app?appId=" + app.id}>{app.id}</a>
{killLink}
</td>
<td>
<a href={app.desc.appUiUrl}>{app.desc.name}</a>
</td>
<td>
{app.coresGranted}
</td>
<td sorttable_customkey={app.desc.memoryPerExecutorMB.toString}>
{Utils.megabytesToString(app.desc.memoryPerExecutorMB)}
</td>
<td>{UIUtils.formatDate(app.submitDate)}</td>
<td>{app.desc.user}</td>
<td>{app.state.toString}</td>
<td>{UIUtils.formatDuration(app.duration)}</td>
</tr>
}
private def driverRow(driver: DriverInfo): Seq[Node] = {
val killLink = if (parent.killEnabled &&
(driver.state == DriverState.RUNNING ||
driver.state == DriverState.SUBMITTED ||
driver.state == DriverState.RELAUNCHING)) {
val confirm =
s"if (window.confirm('Are you sure you want to kill driver ${driver.id} ?')) " +
"{ this.parentNode.submit(); return true; } else { return false; }"
<form action="driver/kill/" method="POST" style="display:inline">
<input type="hidden" name="id" value={driver.id.toString}/>
<input type="hidden" name="terminate" value="true"/>
<a href="#" onclick={confirm} class="kill-link">(kill)</a>
</form>
}
<tr>
<td>{driver.id} {killLink}</td>
<td>{driver.submitDate}</td>
<td>{driver.worker.map(w => <a href={w.webUiAddress}>{w.id.toString}</a>).getOrElse("None")}
</td>
<td>{driver.state}</td>
<td sorttable_customkey={driver.desc.cores.toString}>
{driver.desc.cores}
</td>
<td sorttable_customkey={driver.desc.mem.toString}>
{Utils.megabytesToString(driver.desc.mem.toLong)}
</td>
<td>{driver.desc.command.arguments(2)}</td>
</tr>
}
}
| andrewor14/iolap | core/src/main/scala/org/apache/spark/deploy/master/ui/MasterPage.scala | Scala | apache-2.0 | 9,526 |
package scala.quoted.show
trait SyntaxHighlight {
def highlightKeyword(str: String): String
def highlightTypeDef(str: String): String
def highlightLiteral(str: String): String
def highlightValDef(str: String): String
def highlightOperator(str: String): String
def highlightAnnotation(str: String): String
def highlightString(str: String): String
def highlightTripleQs: String
}
object SyntaxHighlight {
def ANSI: SyntaxHighlight = new SyntaxHighlight {
// Keep in sync with SyntaxHighlighting
private val NoColor = Console.RESET
private val CommentColor = Console.BLUE
private val KeywordColor = Console.YELLOW
private val ValDefColor = Console.CYAN
private val LiteralColor = Console.RED
private val StringColor = Console.GREEN
private val TypeColor = Console.MAGENTA
private val AnnotationColor = Console.MAGENTA
def highlightKeyword(str: String): String = KeywordColor + str + NoColor
def highlightTypeDef(str: String): String = TypeColor + str + NoColor
def highlightLiteral(str: String): String = LiteralColor + str + NoColor
def highlightValDef(str: String): String = ValDefColor + str + NoColor
def highlightOperator(str: String): String = TypeColor + str + NoColor
def highlightAnnotation(str: String): String = AnnotationColor + str + NoColor
def highlightString(str: String): String = StringColor + str + NoColor
def highlightTripleQs: String = Console.RED_B + "???" + NoColor
}
def plain: SyntaxHighlight = new SyntaxHighlight {
def highlightKeyword(str: String): String = str
def highlightTypeDef(str: String): String = str
def highlightLiteral(str: String): String = str
def highlightValDef(str: String): String = str
def highlightOperator(str: String): String = str
def highlightAnnotation(str: String): String = str
def highlightString(str: String): String = str
def highlightTripleQs: String = "???"
}
}
| som-snytt/dotty | library/src/scala/quoted/show/SyntaxHighlight.scala | Scala | apache-2.0 | 1,980 |
package models
import org.apache.commons.codec.binary.Base64
object Implicits {
implicit class EncodableByteArray(bytes: Array[Byte]) {
def toB64() = new String(Base64.encodeBase64(bytes), "ASCII")
}
implicit class DecodableString(str: String) {
def fromB64() = Base64.decodeBase64(str.getBytes("ASCII"))
}
} | alexdupre/ripple-blobvault | app/models/Implicits.scala | Scala | bsd-2-clause | 329 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.rules
import org.apache.flink.table.planner.plan.nodes.logical._
import org.apache.flink.table.planner.plan.rules.logical._
import org.apache.flink.table.planner.plan.rules.physical.FlinkExpandConversionRule
import org.apache.flink.table.planner.plan.rules.physical.stream._
import org.apache.calcite.rel.core.RelFactories
import org.apache.calcite.rel.logical.{LogicalIntersect, LogicalMinus, LogicalUnion}
import org.apache.calcite.rel.rules._
import org.apache.calcite.tools.{RuleSet, RuleSets}
import scala.collection.JavaConverters._
object FlinkStreamRuleSets {
val SEMI_JOIN_RULES: RuleSet = RuleSets.ofList(
SimplifyFilterConditionRule.EXTENDED,
FlinkRewriteSubQueryRule.FILTER,
FlinkSubQueryRemoveRule.FILTER,
JoinConditionTypeCoerceRule.INSTANCE,
FlinkJoinPushExpressionsRule.INSTANCE
)
/**
* Convert sub-queries before query decorrelation.
*/
val TABLE_SUBQUERY_RULES: RuleSet = RuleSets.ofList(
SubQueryRemoveRule.FILTER,
SubQueryRemoveRule.PROJECT,
SubQueryRemoveRule.JOIN
)
/**
* Expand plan by replacing references to tables into a proper plan sub trees. Those rules
* can create new plan nodes.
*/
val EXPAND_PLAN_RULES: RuleSet = RuleSets.ofList(
LogicalCorrelateToJoinFromTemporalTableRule.WITH_FILTER,
LogicalCorrelateToJoinFromTemporalTableRule.WITHOUT_FILTER,
LogicalCorrelateToJoinFromTemporalTableFunctionRule.INSTANCE,
TableScanRule.INSTANCE)
val POST_EXPAND_CLEAN_UP_RULES: RuleSet = RuleSets.ofList(
EnumerableToLogicalTableScan.INSTANCE)
/**
* Convert table references before query decorrelation.
*/
val TABLE_REF_RULES: RuleSet = RuleSets.ofList(
TableScanRule.INSTANCE,
EnumerableToLogicalTableScan.INSTANCE
)
/**
* RuleSet to reduce expressions
*/
private val REDUCE_EXPRESSION_RULES: RuleSet = RuleSets.ofList(
ReduceExpressionsRule.FILTER_INSTANCE,
ReduceExpressionsRule.PROJECT_INSTANCE,
ReduceExpressionsRule.CALC_INSTANCE,
ReduceExpressionsRule.JOIN_INSTANCE
)
/**
* RuleSet to rewrite coalesce to case when
*/
private val REWRITE_COALESCE_RULES: RuleSet = RuleSets.ofList(
// rewrite coalesce to case when
RewriteCoalesceRule.FILTER_INSTANCE,
RewriteCoalesceRule.PROJECT_INSTANCE,
RewriteCoalesceRule.JOIN_INSTANCE,
RewriteCoalesceRule.CALC_INSTANCE
)
/**
* RuleSet to simplify predicate expressions in filters and joins
*/
private val PREDICATE_SIMPLIFY_EXPRESSION_RULES: RuleSet = RuleSets.ofList(
SimplifyFilterConditionRule.INSTANCE,
SimplifyJoinConditionRule.INSTANCE,
JoinConditionTypeCoerceRule.INSTANCE,
JoinPushExpressionsRule.INSTANCE
)
/**
* RuleSet to normalize plans for stream
*/
val DEFAULT_REWRITE_RULES: RuleSet = RuleSets.ofList((
PREDICATE_SIMPLIFY_EXPRESSION_RULES.asScala ++
REWRITE_COALESCE_RULES.asScala ++
REDUCE_EXPRESSION_RULES.asScala ++
List(
StreamLogicalWindowAggregateRule.INSTANCE,
// slices a project into sections which contain window agg functions
// and sections which do not.
ProjectToWindowRule.PROJECT,
WindowPropertiesRules.WINDOW_PROPERTIES_RULE,
WindowPropertiesRules.WINDOW_PROPERTIES_HAVING_RULE,
//ensure union set operator have the same row type
new CoerceInputsRule(classOf[LogicalUnion], false),
//ensure intersect set operator have the same row type
new CoerceInputsRule(classOf[LogicalIntersect], false),
//ensure except set operator have the same row type
new CoerceInputsRule(classOf[LogicalMinus], false),
ConvertToNotInOrInRule.INSTANCE,
// optimize limit 0
FlinkLimit0RemoveRule.INSTANCE,
// unnest rule
LogicalUnnestRule.INSTANCE
)
).asJava)
/**
* RuleSet about filter
*/
private val FILTER_RULES: RuleSet = RuleSets.ofList(
// push a filter into a join
FilterJoinRule.FILTER_ON_JOIN,
// push filter into the children of a join
FilterJoinRule.JOIN,
// push filter through an aggregation
FilterAggregateTransposeRule.INSTANCE,
// push a filter past a project
FilterProjectTransposeRule.INSTANCE,
// push a filter past a setop
FilterSetOpTransposeRule.INSTANCE,
FilterMergeRule.INSTANCE
)
/**
* RuleSet to do predicate pushdown
*/
val FILTER_PREPARE_RULES: RuleSet = RuleSets.ofList((
FILTER_RULES.asScala
// simplify predicate expressions in filters and joins
++ PREDICATE_SIMPLIFY_EXPRESSION_RULES.asScala
// reduce expressions in filters and joins
++ REDUCE_EXPRESSION_RULES.asScala
).asJava)
/**
* RuleSet to do push predicate/partition into table scan
*/
val FILTER_TABLESCAN_PUSHDOWN_RULES: RuleSet = RuleSets.ofList(
// push a filter down into the table scan
PushFilterIntoTableSourceScanRule.INSTANCE,
// push partition into the table scan
PushPartitionIntoTableSourceScanRule.INSTANCE
)
/**
* RuleSet to prune empty results rules
*/
val PRUNE_EMPTY_RULES: RuleSet = RuleSets.ofList(
PruneEmptyRules.AGGREGATE_INSTANCE,
PruneEmptyRules.FILTER_INSTANCE,
PruneEmptyRules.JOIN_LEFT_INSTANCE,
FlinkPruneEmptyRules.JOIN_RIGHT_INSTANCE,
PruneEmptyRules.PROJECT_INSTANCE,
PruneEmptyRules.SORT_INSTANCE,
PruneEmptyRules.UNION_INSTANCE
)
/**
* RuleSet about project
*/
val PROJECT_RULES: RuleSet = RuleSets.ofList(
// push a projection past a filter
ProjectFilterTransposeRule.INSTANCE,
// push a projection to the children of a non semi/anti join
// push all expressions to handle the time indicator correctly
new FlinkProjectJoinTransposeRule(
PushProjector.ExprCondition.FALSE, RelFactories.LOGICAL_BUILDER),
// push a projection to the children of a semi/anti Join
ProjectSemiAntiJoinTransposeRule.INSTANCE,
// merge projections
ProjectMergeRule.INSTANCE,
// remove identity project
ProjectRemoveRule.INSTANCE,
// reorder sort and projection
ProjectSortTransposeRule.INSTANCE,
//removes constant keys from an Agg
AggregateProjectPullUpConstantsRule.INSTANCE,
// push project through a Union
ProjectSetOpTransposeRule.INSTANCE
)
val JOIN_REORDER_PREPARE_RULES: RuleSet = RuleSets.ofList(
// merge project to MultiJoin
ProjectMultiJoinMergeRule.INSTANCE,
// merge filter to MultiJoin
FilterMultiJoinMergeRule.INSTANCE,
// merge join to MultiJoin
JoinToMultiJoinRule.INSTANCE
)
val JOIN_REORDER_RULES: RuleSet = RuleSets.ofList(
// equi-join predicates transfer
RewriteMultiJoinConditionRule.INSTANCE,
// join reorder
LoptOptimizeJoinRule.INSTANCE
)
/**
* RuleSet to do logical optimize.
* This RuleSet is a sub-set of [[LOGICAL_OPT_RULES]].
*/
private val LOGICAL_RULES: RuleSet = RuleSets.ofList(
// scan optimization
PushProjectIntoTableSourceScanRule.INSTANCE,
PushFilterIntoTableSourceScanRule.INSTANCE,
// reorder sort and projection
SortProjectTransposeRule.INSTANCE,
// remove unnecessary sort rule
SortRemoveRule.INSTANCE,
// join rules
FlinkJoinPushExpressionsRule.INSTANCE,
// remove union with only a single child
UnionEliminatorRule.INSTANCE,
// convert non-all union into all-union + distinct
UnionToDistinctRule.INSTANCE,
// aggregation and projection rules
AggregateProjectMergeRule.INSTANCE,
AggregateProjectPullUpConstantsRule.INSTANCE,
// remove aggregation if it does not aggregate and input is already distinct
FlinkAggregateRemoveRule.INSTANCE,
// push aggregate through join
FlinkAggregateJoinTransposeRule.EXTENDED,
// using variants of aggregate union rule
AggregateUnionAggregateRule.AGG_ON_FIRST_INPUT,
AggregateUnionAggregateRule.AGG_ON_SECOND_INPUT,
// reduce aggregate functions like AVG, STDDEV_POP etc.
AggregateReduceFunctionsRule.INSTANCE,
WindowAggregateReduceFunctionsRule.INSTANCE,
// reduce useless aggCall
PruneAggregateCallRule.PROJECT_ON_AGGREGATE,
PruneAggregateCallRule.CALC_ON_AGGREGATE,
// expand grouping sets
DecomposeGroupingSetsRule.INSTANCE,
// calc rules
FilterCalcMergeRule.INSTANCE,
ProjectCalcMergeRule.INSTANCE,
FilterToCalcRule.INSTANCE,
ProjectToCalcRule.INSTANCE,
FlinkCalcMergeRule.INSTANCE,
// semi/anti join transpose rule
FlinkSemiAntiJoinJoinTransposeRule.INSTANCE,
FlinkSemiAntiJoinProjectTransposeRule.INSTANCE,
FlinkSemiAntiJoinFilterTransposeRule.INSTANCE,
// set operators
ReplaceIntersectWithSemiJoinRule.INSTANCE,
RewriteIntersectAllRule.INSTANCE,
ReplaceMinusWithAntiJoinRule.INSTANCE,
RewriteMinusAllRule.INSTANCE
)
/**
* RuleSet to translate calcite nodes to flink nodes
*/
private val LOGICAL_CONVERTERS: RuleSet = RuleSets.ofList(
// translate to flink logical rel nodes
FlinkLogicalAggregate.STREAM_CONVERTER,
FlinkLogicalTableAggregate.CONVERTER,
FlinkLogicalOverAggregate.CONVERTER,
FlinkLogicalCalc.CONVERTER,
FlinkLogicalCorrelate.CONVERTER,
FlinkLogicalJoin.CONVERTER,
FlinkLogicalSort.STREAM_CONVERTER,
FlinkLogicalUnion.CONVERTER,
FlinkLogicalValues.CONVERTER,
FlinkLogicalTableSourceScan.CONVERTER,
FlinkLogicalTableFunctionScan.CONVERTER,
FlinkLogicalDataStreamTableScan.CONVERTER,
FlinkLogicalIntermediateTableScan.CONVERTER,
FlinkLogicalExpand.CONVERTER,
FlinkLogicalWatermarkAssigner.CONVERTER,
FlinkLogicalWindowAggregate.CONVERTER,
FlinkLogicalWindowTableAggregate.CONVERTER,
FlinkLogicalSnapshot.CONVERTER,
FlinkLogicalMatch.CONVERTER,
FlinkLogicalSink.CONVERTER
)
/**
* RuleSet to do logical optimize for stream
*/
val LOGICAL_OPT_RULES: RuleSet = RuleSets.ofList((
FILTER_RULES.asScala ++
PROJECT_RULES.asScala ++
PRUNE_EMPTY_RULES.asScala ++
LOGICAL_RULES.asScala ++
LOGICAL_CONVERTERS.asScala
).asJava)
/**
* RuleSet to do rewrite on FlinkLogicalRel for Stream
*/
val LOGICAL_REWRITE: RuleSet = RuleSets.ofList(
// transform over window to topn node
FlinkLogicalRankRule.INSTANCE,
// transpose calc past rank to reduce rank input fields
CalcRankTransposeRule.INSTANCE,
// remove output of rank number when it is a constant
RankNumberColumnRemoveRule.INSTANCE,
// split distinct aggregate to reduce data skew
SplitAggregateRule.INSTANCE,
// transpose calc past snapshot
CalcSnapshotTransposeRule.INSTANCE,
// merge calc after calc transpose
FlinkCalcMergeRule.INSTANCE,
// Rule that splits python ScalarFunctions from java/scala ScalarFunctions.
PythonScalarFunctionSplitRule.INSTANCE
)
/**
* RuleSet to do physical optimize for stream
*/
val PHYSICAL_OPT_RULES: RuleSet = RuleSets.ofList(
FlinkExpandConversionRule.STREAM_INSTANCE,
// source
StreamExecDataStreamScanRule.INSTANCE,
StreamExecTableSourceScanRule.INSTANCE,
StreamExecIntermediateTableScanRule.INSTANCE,
StreamExecWatermarkAssignerRule.INSTANCE,
StreamExecValuesRule.INSTANCE,
// calc
StreamExecCalcRule.INSTANCE,
StreamExecPythonCalcRule.INSTANCE,
// union
StreamExecUnionRule.INSTANCE,
// sort
StreamExecSortRule.INSTANCE,
StreamExecLimitRule.INSTANCE,
StreamExecSortLimitRule.INSTANCE,
StreamExecTemporalSortRule.INSTANCE,
// rank
StreamExecRankRule.INSTANCE,
StreamExecDeduplicateRule.RANK_INSTANCE,
// expand
StreamExecExpandRule.INSTANCE,
// group agg
StreamExecGroupAggregateRule.INSTANCE,
StreamExecGroupTableAggregateRule.INSTANCE,
// over agg
StreamExecOverAggregateRule.INSTANCE,
// window agg
StreamExecGroupWindowAggregateRule.INSTANCE,
StreamExecGroupWindowTableAggregateRule.INSTANCE,
// join
StreamExecJoinRule.INSTANCE,
StreamExecWindowJoinRule.INSTANCE,
StreamExecTemporalJoinRule.INSTANCE,
StreamExecLookupJoinRule.SNAPSHOT_ON_TABLESCAN,
StreamExecLookupJoinRule.SNAPSHOT_ON_CALC_TABLESCAN,
// CEP
StreamExecMatchRule.INSTANCE,
// correlate
StreamExecConstantTableFunctionScanRule.INSTANCE,
StreamExecCorrelateRule.INSTANCE,
// sink
StreamExecSinkRule.INSTANCE
)
/**
* RuleSet for retraction inference.
*/
val RETRACTION_RULES: RuleSet = RuleSets.ofList(
// retraction rules
StreamExecRetractionRules.DEFAULT_RETRACTION_INSTANCE,
StreamExecRetractionRules.UPDATES_AS_RETRACTION_INSTANCE,
StreamExecRetractionRules.ACCMODE_INSTANCE
)
/**
* RuleSet related to watermark assignment.
*/
val MINI_BATCH_RULES: RuleSet = RuleSets.ofList(
// watermark interval infer rule
MiniBatchIntervalInferRule.INSTANCE
)
/**
* RuleSet to optimize plans after stream exec execution.
*/
val PHYSICAL_REWRITE: RuleSet = RuleSets.ofList(
//optimize agg rule
TwoStageOptimizedAggregateRule.INSTANCE,
// incremental agg rule
IncrementalAggregateRule.INSTANCE
)
}
| mbode/flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/plan/rules/FlinkStreamRuleSets.scala | Scala | apache-2.0 | 13,973 |
package org.jetbrains.plugins.scala
package lang
package parser
import com.intellij.extapi.psi.ASTWrapperPsiElement
import com.intellij.lang.ASTNode
import com.intellij.psi.PsiElement
import com.intellij.psi.util.PsiUtilCore
import org.jetbrains.plugins.scala.lang.psi.impl.base._
import org.jetbrains.plugins.scala.lang.psi.impl.base.patterns._
import org.jetbrains.plugins.scala.lang.psi.impl.base.types._
import org.jetbrains.plugins.scala.lang.psi.impl.expr._
import org.jetbrains.plugins.scala.lang.psi.impl.expr.xml._
import org.jetbrains.plugins.scala.lang.psi.impl.statements._
import org.jetbrains.plugins.scala.lang.psi.impl.statements.params._
import org.jetbrains.plugins.scala.lang.psi.impl.toplevel._
import org.jetbrains.plugins.scala.lang.psi.impl.toplevel.imports._
import org.jetbrains.plugins.scala.lang.psi.impl.toplevel.packaging._
import org.jetbrains.plugins.scala.lang.psi.impl.toplevel.templates._
import org.jetbrains.plugins.scala.lang.psi.impl.toplevel.typedef._
import org.jetbrains.plugins.scala.lang.scaladoc.lexer.ScalaDocElementType
import org.jetbrains.plugins.scala.lang.scaladoc.psi.ScalaDocPsiCreator
object ScalaPsiCreator extends ScalaPsiCreator {
}
trait ScalaPsiCreator extends PsiCreator {
def createElement(node: ASTNode): PsiElement =
node.getElementType match {
case s: SelfPsiCreator => s.createElement(node)
case _: ScalaDocElementType => ScalaDocPsiCreator.createElement(node)
/*****************************************************/
/********************** TOP **************************/
/*****************************************************/
case ScalaElementTypes.PACKAGING => new ScPackagingImpl(node)
/***************************************************/
/********************* IMPORT **********************/
/***************************************************/
case ScalaElementTypes.IMPORT_STMT => new ScImportStmtImpl(node)
case ScalaElementTypes.IMPORT_EXPR => new ScImportExprImpl(node)
case ScalaElementTypes.IMPORT_SELECTORS => new ScImportSelectorsImpl(node)
case ScalaElementTypes.IMPORT_SELECTOR => new ScImportSelectorImpl(node)
/***************************************************/
/********************** DEF ************************/
/***************************************************/
case ScalaElementTypes.CLASS_DEF => new ScClassImpl(node)
case ScalaElementTypes.OBJECT_DEF => new ScObjectImpl(node)
case ScalaElementTypes.TRAIT_DEF => new ScTraitImpl(node)
/***************** class ***************/
case ScalaElementTypes.REQUIRES_BLOCK => new ScRequiresBlockImpl(node)
case ScalaElementTypes.EXTENDS_BLOCK => new ScExtendsBlockImpl(node)
/***************************************************/
/******************** TEMPLATES ********************/
/***************************************************/
/******************* parents ****************/
case ScalaElementTypes.CLASS_PARENTS => new ScClassParentsImpl(node)
case ScalaElementTypes.TRAIT_PARENTS => new ScTraitParentsImpl(node)
/******************* body *******************/
case ScalaElementTypes.TEMPLATE_BODY => new ScTemplateBodyImpl(node)
/***************************************************/
/*************** TEMPLATE STATEMENTS ***************/
/***************************************************/
/*************** DECLARATION ***************/
case ScalaElementTypes.VALUE_DECLARATION => new ScValueDeclarationImpl(node)
case ScalaElementTypes.VARIABLE_DECLARATION => new ScVariableDeclarationImpl(node)
case ScalaElementTypes.FUNCTION_DECLARATION => new ScFunctionDeclarationImpl(node)
case ScalaElementTypes.TYPE_DECLARATION => new ScTypeAliasDeclarationImpl(node)
/*************** DEFINITION ***************/
case ScalaElementTypes.PATTERN_DEFINITION => new ScPatternDefinitionImpl(node)
case ScalaElementTypes.VARIABLE_DEFINITION => new ScVariableDefinitionImpl(node)
case ScalaElementTypes.FUNCTION_DEFINITION => new ScFunctionDefinitionImpl(node)
case ScalaElementTypes.MACRO_DEFINITION => new ScMacroDefinitionImpl(node)
case ScalaElementTypes.TYPE_DEFINITION => new ScTypeAliasDefinitionImpl(node)
case ScalaElementTypes.EARLY_DEFINITIONS => new ScEarlyDefinitionsImpl(node)
/********** function definition: supplementary constructor *************/
case ScalaElementTypes.SELF_INVOCATION => new ScSelfInvocationImpl(node)
case ScalaElementTypes.CONSTR_EXPR => new ScConstrExprImpl(node)
case ScalaElementTypes.PRIMARY_CONSTRUCTOR => new ScPrimaryConstructorImpl(node)
/**************** function ******************/
case ScalaElementTypes.CONSTRUCTOR => new ScConstructorImpl(node)
/**************** variable ******************/
case ScalaElementTypes.IDENTIFIER_LIST => new ScIdListImpl(node)
case ScalaElementTypes.FIELD_ID => new ScFieldIdImpl(node)
case ScalaElementTypes.REFERENCE => new ScStableCodeReferenceElementImpl(node)
/**************** pattern ******************/
case ScalaElementTypes.PATTERN_LIST => new ScPatternListImpl(node)
/***************************************************/
/********* PARAMETERS AND TYPE PARAMETERS **********/
/***************************************************/
/******************** parameters *******************/
case ScalaElementTypes.PARAM_CLAUSE => new ScParameterClauseImpl(node)
case ScalaElementTypes.PARAM_CLAUSES => new ScParametersImpl(node)
/*********** class ************/
case ScalaElementTypes.CLASS_PARAM => new ScClassParameterImpl(node)
case ScalaElementTypes.PARAM => new ScParameterImpl(node)
case ScalaElementTypes.PARAM_TYPE => new ScParameterTypeImpl(node)
/***************** type parameters ****************/
case ScalaElementTypes.TYPE_PARAM_CLAUSE => new ScTypeParamClauseImpl(node)
/********** function **********/
case ScalaElementTypes.TYPE_PARAM => new ScTypeParamImpl(node)
/***************************************************/
/************* MODIFIERS AND ATTRIBUTES ************/
/***************************************************/
/************** modifiers **************/
case ScalaElementTypes.MODIFIERS => new ScModifierListImpl(node)
case ScalaElementTypes.ACCESS_MODIFIER => new ScAccessModifierImpl(node)
/************* annotation *************/
case ScalaElementTypes.ANNOTATION => new ScAnnotationImpl(node)
case ScalaElementTypes.ANNOTATION_EXPR => new ScAnnotationExprImpl(node)
case ScalaElementTypes.ANNOTATIONS => new ScAnnotationsImpl(node)
case ScalaElementTypes.NAME_VALUE_PAIR => new ScNameValuePairImpl(node)
case _ => inner(node)
}
//to prevent stack overflow in type checker let's introduce helper method
protected def inner(node: ASTNode): PsiElement = node.getElementType match {
/********************** TOKENS **********************/
/********************* LITERALS *********************/
case ScalaElementTypes.LITERAL => new ScLiteralImpl(node)
case ScalaElementTypes.INTERPOLATED_STRING_LITERAL => new ScInterpolatedStringLiteralImpl(node)
case ScalaElementTypes.INTERPOLATED_PREFIX_PATTERN_REFERENCE => new ScInterpolatedPrefixReference(node)
case ScalaElementTypes.INTERPOLATED_PREFIX_LITERAL_REFERENCE => new ScInterpolatedStringPartReference(node)
/********************** TYPES ************************/
case ScalaElementTypes.SIMPLE_TYPE => new ScSimpleTypeElementImpl(node)
case ScalaElementTypes.TUPLE_TYPE => new ScTupleTypeElementImpl(node)
case ScalaElementTypes.TYPE_IN_PARENTHESIS => new ScParenthesisedTypeElementImpl(node)
case ScalaElementTypes.TYPE => new ScFunctionalTypeElementImpl(node)
case ScalaElementTypes.COMPOUND_TYPE => new ScCompoundTypeElementImpl(node)
case ScalaElementTypes.INFIX_TYPE => new ScInfixTypeElementImpl(node)
case ScalaElementTypes.REFINEMENT => new ScRefinementImpl(node)
case ScalaElementTypes.REFINEMENTS => new ScRefinementsImpl(node)
case ScalaElementTypes.TYPES => new ScTypesImpl(node)
case ScalaElementTypes.TYPE_ARGS => new ScTypeArgsImpl(node)
case ScalaElementTypes.ASCRIPTION => new ScAscriptionImpl(node)
case ScalaElementTypes.ANNOT_TYPE => new ScAnnotTypeElementImpl(node)
case ScalaElementTypes.SEQUENCE_ARG => new ScSequenceArgImpl(node)
case ScalaElementTypes.EXISTENTIAL_CLAUSE => new ScExistentialClauseImpl(node)
case ScalaElementTypes.SELF_TYPE => new ScSelfTypeElementImpl(node)
case ScalaElementTypes.EXISTENTIAL_TYPE => new ScExistentialTypeElementImpl(node)
case ScalaElementTypes.WILDCARD_TYPE => new ScWildcardTypeElementImpl(node)
case ScalaElementTypes.TYPE_PROJECTION => new ScTypeProjectionImpl(node)
case ScalaElementTypes.TYPE_GENERIC_CALL => new ScParameterizedTypeElementImpl(node)
case ScalaElementTypes.TYPE_VARIABLE => new ScTypeVariableTypeElementImpl(node)
case _ => inner1(node)
}
//to prevent stack overflow in type checker let's introduce helper method
private def inner1(node: ASTNode): PsiElement = node.getElementType match {
/******************* EXPRESSIONS*********************/
case ScalaElementTypes.PARENT_EXPR => new ScParenthesisedExprImpl(node)
case ScalaElementTypes.METHOD_CALL => new ScMethodCallImpl(node)
case ScalaElementTypes.REFERENCE_EXPRESSION => new ScReferenceExpressionImpl(node)
case ScalaElementTypes.THIS_REFERENCE => new ScThisReferenceImpl(node)
case ScalaElementTypes.SUPER_REFERENCE => new ScSuperReferenceImpl(node)
case ScalaElementTypes.GENERIC_CALL => new ScGenericCallImpl(node)
case ScalaElementTypes.PREFIX_EXPR => new ScPrefixExprImpl(node)
case ScalaElementTypes.PLACEHOLDER_EXPR => new ScUnderscoreSectionImpl(node)
case ScalaElementTypes.UNIT_EXPR => new ScUnitExprImpl(node)
case ScalaElementTypes.INFIX_EXPR => new ScInfixExprImpl(node)
case ScalaElementTypes.POSTFIX_EXPR => new ScPostfixExprImpl(node)
case ScalaElementTypes.FUNCTION_EXPR => new ScFunctionExprImpl(node)
case ScalaElementTypes.ENUMERATOR => new ScEnumeratorImpl(node)
case ScalaElementTypes.ENUMERATORS => new ScEnumeratorsImpl(node)
case ScalaElementTypes.GENERATOR => new ScGeneratorImpl(node)
case ScalaElementTypes.GUARD => new ScGuardImpl(node)
case ScalaElementTypes.EXPRS => new ScExprsImpl(node)
case ScalaElementTypes.ARG_EXPRS => new ScArgumentExprListImpl(node)
case ScalaElementTypes.BLOCK_EXPR => PsiUtilCore.NULL_PSI_ELEMENT
case ScalaElementTypes.CONSTR_BLOCK => new ScConstrBlockImpl(node)
case ScalaElementTypes.BLOCK => new ScBlockImpl(node)
case ScalaElementTypes.TUPLE => new ScTupleImpl(node)
case ScalaElementTypes.ERROR_STMT => new ScErrorStatImpl(node)
case ScalaElementTypes.IF_STMT => new ScIfStmtImpl(node)
case ScalaElementTypes.FOR_STMT => new ScForStatementImpl(node)
case ScalaElementTypes.DO_STMT => new ScDoStmtImpl(node)
case ScalaElementTypes.TRY_STMT => new ScTryStmtImpl(node)
case ScalaElementTypes.TRY_BLOCK => new ScTryBlockImpl(node)
case ScalaElementTypes.CATCH_BLOCK => new ScCatchBlockImpl(node)
case ScalaElementTypes.FINALLY_BLOCK => new ScFinallyBlockImpl(node)
case ScalaElementTypes.WHILE_STMT => new ScWhileStmtImpl(node)
case ScalaElementTypes.RETURN_STMT => new ScReturnStmtImpl(node)
case ScalaElementTypes.THROW_STMT => new ScThrowStmtImpl(node)
case ScalaElementTypes.ASSIGN_STMT => new ScAssignStmtImpl(node)
case ScalaElementTypes.TYPED_EXPR_STMT => new ScTypedStmtImpl(node)
case ScalaElementTypes.MATCH_STMT => new ScMatchStmtImpl(node)
case ScalaElementTypes.NEW_TEMPLATE => new ScNewTemplateDefinitionImpl(node)
case _ => inner2(node)
}
//to prevent stack overflow in type checker let's introduce helper method
private def inner2(node: ASTNode): PsiElement = node.getElementType match {
/******************* PATTERNS *********************/
case ScalaElementTypes.TUPLE_PATTERN => new ScTuplePatternImpl(node)
case ScalaElementTypes.CONSTRUCTOR_PATTERN => new ScConstructorPatternImpl(node)
case ScalaElementTypes.TYPED_PATTERN => new ScTypedPatternImpl(node)
case ScalaElementTypes.NAMING_PATTERN => new ScNamingPatternImpl(node)
case ScalaElementTypes.INFIX_PATTERN => new ScInfixPatternImpl(node)
case ScalaElementTypes.PATTERN => new ScCompositePatternImpl(node)
case ScalaElementTypes.PATTERN_ARGS => new ScPatternArgumentListImpl(node)
case ScalaElementTypes.PATTERNS => new ScPatternsImpl(node)
case ScalaElementTypes.WILDCARD_PATTERN => new ScWildcardPatternImpl(node)
case ScalaElementTypes.SEQ_WILDCARD => new ScSeqWildcardImpl(node)
case ScalaElementTypes.CASE_CLAUSE => new ScCaseClauseImpl(node)
case ScalaElementTypes.CASE_CLAUSES => new ScCaseClausesImpl(node)
case ScalaElementTypes.LITERAL_PATTERN => new ScLiteralPatternImpl(node)
case ScalaElementTypes.INTERPOLATION_PATTERN => new ScInterpolationPatternImpl(node)
case ScalaElementTypes.REFERENCE_PATTERN => new ScReferencePatternImpl(node)
case ScalaElementTypes.STABLE_REFERENCE_PATTERN => new ScStableReferenceElementPatternImpl(node)
case ScalaElementTypes.PATTERN_IN_PARENTHESIS => new ScParenthesisedPatternImpl(node)
case ScalaElementTypes.TYPE_PATTERN => new ScTypePatternImpl(node)
case ScalaElementTypes.TYPE_PATTERN_ARGS => new ScTypePatternArgsImpl(node)
/********************* XML ************************/
case ScalaElementTypes.XML_EXPR => new ScXmlExprImpl(node)
case ScalaElementTypes.XML_START_TAG => new ScXmlStartTagImpl(node)
case ScalaElementTypes.XML_END_TAG => new ScXmlEndTagImpl(node)
case ScalaElementTypes.XML_EMPTY_TAG => new ScXmlEmptyTagImpl(node)
case ScalaElementTypes.XML_PI => new ScXmlPIImpl(node)
case ScalaElementTypes.XML_CD_SECT => new ScXmlCDSectImpl(node)
case ScalaElementTypes.XML_ATTRIBUTE => new ScXmlAttributeImpl(node)
case ScalaElementTypes.XML_PATTERN => new ScXmlPatternImpl(node)
case ScalaElementTypes.XML_COMMENT => new ScXmlCommentImpl(node)
case ScalaElementTypes.XML_ELEMENT => new ScXmlElementImpl(node)
case _ => new ASTWrapperPsiElement(node)
}
trait SelfPsiCreator extends PsiCreator
}
trait PsiCreator {
def createElement(node: ASTNode): PsiElement
} | whorbowicz/intellij-scala | src/org/jetbrains/plugins/scala/lang/parser/ScalaPsiCreator.scala | Scala | apache-2.0 | 14,563 |
package org.pantsbuild.zinc
import xsbti.compile.CompileProgress
import sbt.Logger
/**
* SimpleCompileProgress implements CompileProgress to add output to zinc scala compilations, but
* does not implement the capability to cancel compilations via the `advance` method.
*/
class SimpleCompileProgress(logPhases: Boolean, printProgress: Boolean, heartbeatSecs: Int)(log: Logger) extends CompileProgress {
@volatile private var lastHeartbeatMillis: Long = 0
/**
* startUnit Optionally reports to stdout when a phase of compilation has begun for a file.
*/
def startUnit(phase: String, unitPath: String): Unit = {
if (logPhases) {
log.info(phase + " " + unitPath + "...")
}
}
/**
* advance Optionally emit the percentage of steps completed, and/or a heartbeat ('.' character)
* roughly every `heartbeatSecs` seconds (though buffering may make the actual interval
* imprecise.) If `heartbeatSecs` is not greater than 0, no heartbeat is emitted.
*
* advance is periodically called during compilation, indicating the total number of compilation
* steps completed (`current`) out of the total number of steps necessary. The method returns
* false if the user wishes to cancel compilation, or true otherwise. Currently, Zinc never
* requests to cancel compilation.
*/
def advance(current: Int, total: Int): Boolean = {
if (printProgress) {
val percent = (current * 100) / total
System.out.print(s"\\rProgress: ${percent}%")
}
if (heartbeatSecs > 0) {
val currentTimeMillis = System.currentTimeMillis
val delta = currentTimeMillis - lastHeartbeatMillis
if (delta > (1000 * heartbeatSecs)) {
System.out.print(".")
lastHeartbeatMillis = currentTimeMillis
}
}
/* Always continue compiling. */
true
}
}
| sameerparekh/pants | src/scala/org/pantsbuild/zinc/SimpleCompileProgress.scala | Scala | apache-2.0 | 1,833 |
/*
* DigiSSHD - DigiControl component for Android Platform
* Copyright (c) 2012, Alexey Aksenov ezh@ezh.msk.ru. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 3 or any later
* version, as published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 3 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 3 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
package org.digimead.digi.ctrl.sshd
import java.io.File
import scala.actors.Futures
import scala.collection.JavaConversions._
import org.digimead.digi.ctrl.ICtrlComponent
import org.digimead.digi.ctrl.lib.AnyBase
import org.digimead.digi.ctrl.lib.DService
import org.digimead.digi.ctrl.lib.aop.Loggable
import org.digimead.digi.ctrl.lib.base.AppComponent
import org.digimead.digi.ctrl.lib.base.AppControl
import org.digimead.digi.ctrl.lib.declaration.DConstant
import org.digimead.digi.ctrl.lib.declaration.DOption
import org.digimead.digi.ctrl.lib.declaration.DPreference
import org.digimead.digi.ctrl.lib.declaration.DState
import org.digimead.digi.ctrl.lib.declaration.DTimeout
import org.digimead.digi.ctrl.lib.dialog.Preferences
import org.digimead.digi.ctrl.lib.info.ComponentInfo
import org.digimead.digi.ctrl.lib.info.ExecutableInfo
import org.digimead.digi.ctrl.lib.info.UserInfo
import org.digimead.digi.ctrl.lib.log.FileLogger
import org.digimead.digi.ctrl.lib.log.Logging
import org.digimead.digi.ctrl.lib.message.IAmMumble
import org.digimead.digi.ctrl.lib.message.IAmYell
import org.digimead.digi.ctrl.lib.util.Android
import org.digimead.digi.ctrl.lib.util.Common
import org.digimead.digi.ctrl.lib.util.Hash
import org.digimead.digi.ctrl.lib.util.SyncVar
import org.digimead.digi.ctrl.lib.util.Version
import org.digimead.digi.ctrl.sshd.Message.dispatcher
import org.digimead.digi.ctrl.sshd.service.option.DSAPublicKeyEncription
import org.digimead.digi.ctrl.sshd.service.option.NetworkPort
import org.digimead.digi.ctrl.sshd.service.option.RSAPublicKeyEncription
import org.digimead.digi.ctrl.sshd.service.option.AuthentificationMode
import android.app.Service
import android.content.Context
import android.content.Intent
import android.content.pm.PackageManager.NameNotFoundException
import android.os.IBinder
class SSHDService extends Service with DService {
private val ready = new SyncVar[Boolean]()
private val binder = new SSHDService.Binder(ready)
log.debug("alive")
@Loggable
override def onCreate() = {
SSHDService.service = Some(this)
// sometimes there is java.lang.IllegalArgumentException in scala.actors.threadpool.ThreadPoolExecutor
// if we started actors from the singleton
SSHDActivity.actor.start // Yes, SSHDActivity from SSHDService
Preferences.DebugLogLevel.set(this)
Preferences.DebugAndroidLogger.set(this)
super.onCreate()
onCreateExt(this)
SSHDPreferences.initServicePersistentOptions(this)
if (AppControl.Inner.isAvailable != Some(true))
Futures.future {
log.debug("try to bind " + DConstant.controlPackage)
AppComponent.Inner.minVersionRequired(DConstant.controlPackage) match {
case Some(minVersion) => try {
val pm = getPackageManager()
val pi = pm.getPackageInfo(DConstant.controlPackage, 0)
val version = new Version(pi.versionName)
log.debug(DConstant.controlPackage + " minimum version '" + minVersion + "' and current version '" + version + "'")
if (version.compareTo(minVersion) == -1) {
val message = Android.getString(this, "error_digicontrol_minimum_version").
getOrElse("Required minimum version of DigiControl: %s. Current version is %s").format(minVersion, version)
IAmYell(message)
AppControl.Inner.bindStub("error_digicontrol_minimum_version", minVersion.toString, version.toString)
} else {
AppControl.Inner.bind(getApplicationContext)
}
} catch {
case e: NameNotFoundException =>
log.debug("DigiControl package " + DConstant.controlPackage + " not found")
}
case None =>
AppControl.Inner.bind(getApplicationContext)
}
}
Futures.future {
if (!SSHDActivity.isConsistent) {
SSHDService.addLazyInit
Message.addLazyInit
AppComponent.LazyInit.init
}
ready.set(true)
}
}
@Loggable
override def onBind(intent: Intent): IBinder = binder
@Loggable
override def onRebind(intent: Intent) = super.onRebind(intent)
@Loggable
override def onUnbind(intent: Intent): Boolean = {
super.onUnbind(intent)
true // Return true if you would like to have the service's onRebind(Intent) method later called when new clients bind to it.
}
@Loggable
override def onDestroy() {
onDestroyExt(this)
super.onDestroy()
}
}
object SSHDService extends Logging {
@volatile private var service: Option[SSHDService] = None
Logging.addLogger(FileLogger)
SSHDCommon
log.debug("alive")
def addLazyInit = AppComponent.LazyInit("SSHDService initialize onCreate", 50, DTimeout.longest) {
service.foreach {
service =>
SSHDPreferences.initServicePersistentOptions(service)
// preload
Futures.future { AppComponent.Inner.getCachedComponentInfo(SSHDActivity.locale, SSHDActivity.localeLanguage) }
// prepare
AppComponent.Inner.state.set(AppComponent.State(DState.Passive))
}
}
@Loggable
def getExecutableInfo(workdir: String, allowCallFromUI: Boolean = false): Seq[ExecutableInfo] = try {
val executables = Seq("dropbear", "openssh")
(for {
context <- AppComponent.Context
appNativePath <- AppComponent.Inner.appNativePath
xml <- AppComponent.Inner.nativeManifest
info <- AnyBase.info.get
} yield {
var executableID = 0
executables.map(executable => {
// get or throw block
val block = (xml \\ "application").find(app => (app \ "name").text == executable).get
val id = executableID
var env: Seq[String] = Seq(
"DIGISSHD_V=" + info.appVersion,
"DIGISSHD_B=" + info.appBuild)
val commandLine = executable match {
case "dropbear" =>
val masterPassword = AuthentificationMode.getStateExt(context) match {
case AuthentificationMode.AuthType.SingleUser =>
SSHDUsers.list.find(_.name == "android") match {
case Some(systemUser) =>
Some(systemUser.password)
case None =>
log.fatal("system user not found")
None
}
case AuthentificationMode.AuthType.MultiUser =>
None
case invalid =>
log.fatal("invalid authenticatin type \"" + invalid + "\"")
None
}
val masterPasswordOption = masterPassword.map(pw => Seq("-Y", pw)).getOrElse(Seq[String]())
val digiIntegrationOption = if (masterPassword.isEmpty) Seq("-D") else Seq()
AppControl.Inner.getInternalDirectory(DTimeout.long) match {
case Some(path) =>
val rsaKey = if (RSAPublicKeyEncription.getState[Boolean](context))
Seq("-r", new File(path, "dropbear_rsa_host_key").getAbsolutePath)
else
Seq()
val dsaKey = if (DSAPublicKeyEncription.getState[Boolean](context))
Seq("-d", new File(path, "dropbear_dss_host_key").getAbsolutePath)
else
Seq()
Option(System.getenv("PATH")).map(s => {
val oldPATH = s.substring(s.indexOf('=') + 1)
env = env :+ ("PATH=" + path + ":" + oldPATH)
})
val forceHomePathOption = AppControl.Inner.getExternalDirectory() match {
case Some(externalPath) if !masterPassword.isEmpty =>
Seq("-H", externalPath.getAbsolutePath)
case _ =>
Seq()
}
Some(Seq(new File(path, executable).getAbsolutePath,
"-i", // start for inetd
"-E", // log to stderr rather than syslog
"-F", // don't fork into background
"-U", // fake user RW permissions in SFTP
"-e") ++ // keep environment variables
forceHomePathOption ++ // forced home path
rsaKey ++ // use rsakeyfile for the rsa host key
dsaKey ++ // use dsskeyfile for the dss host key
digiIntegrationOption ++ // DigiNNN integration
masterPasswordOption) // enable master password to any account
case None =>
val path = new File(".")
val rsaKey = if (RSAPublicKeyEncription.getState[Boolean](context))
Seq("-r", new File(path, "dropbear_rsa_host_key").getAbsolutePath)
else
Seq()
val dsaKey = if (DSAPublicKeyEncription.getState[Boolean](context))
Seq("-d", new File(path, "dropbear_dss_host_key").getAbsolutePath)
else
Seq()
val forceHomePathOption = if (masterPassword.isEmpty) Seq() else Seq("-H", "/")
Some(Seq(executable,
"-i", // start for inetd
"-E", // log to stderr rather than syslog
"-F", // don't fork into background
"-U", // fake user RW permissions in SFTP
"-e") ++ // keep environment variables
forceHomePathOption ++ // forced home path
rsaKey ++ // use rsakeyfile for the rsa host key
dsaKey ++ // use dsskeyfile for the dss host key
digiIntegrationOption ++ // DigiNNN integration
masterPasswordOption) // enable master password to any account
}
case "openssh" => None
}
val port = executable match {
case "dropbear" => Some(NetworkPort.getState[Int](context))
case "openssh" => None
}
val state = DState.Active
val name = executable
val version = (block \ "version").text
val description = (block \ "description").text
val origin = (block \ "origin").text
val license = (block \ "license").text
val project = (block \ "project").text
executableID += 1
new ExecutableInfo(id, commandLine, port, env, state, name, version, description, origin, license, project)
})
}) getOrElse Seq()
} catch {
case e =>
log.error(e.getMessage, e)
Seq()
}
class Binder(ready: SyncVar[Boolean]) extends ICtrlComponent.Stub with Logging {
log.debug("binder alive")
@Loggable(result = false)
def info(): ComponentInfo = {
log.debug("process Binder::info")
SSHDActivity.info
}
@Loggable(result = false)
def uid() = {
log.debug("process Binder::uid")
android.os.Process.myUid()
}
@Loggable(result = false)
def size() = {
log.debug("process Binder::size")
2
}
@Loggable(result = false)
def pre(id: Int, workdir: String): Boolean = try {
log.debug("process Binder::pre for id " + id + " at " + workdir)
ready.get(DTimeout.long).getOrElse({ log.fatal("unable to start DigiSSHD service") })
(for {
context <- AppComponent.Context
appNativePath <- AppComponent.Inner.appNativePath
} yield {
assert(id == 0)
val keyResult = AppControl.Inner.getInternalDirectory(DTimeout.long) match {
case Some(path) =>
// create SCP groups helper
// coreutils groups native failed with exit code 1
// and message "groups: cannot find name for group ID N"
// under Android our app uid == gid
val groups = new File(path, "groups")
if (!groups.exists) {
log.debug("create groups stub for SCP")
Common.writeToFile(groups, "echo %d\n".format(android.os.Process.myUid))
groups.setReadable(true, false)
groups.setExecutable(true, false)
}
// create security keys
(if (RSAPublicKeyEncription.getState[Boolean](context)) {
log.debug("prepare RSA key")
val rsa_key_source = new File(appNativePath, "dropbear_rsa_host_key")
val rsa_key_destination = new File(path, "dropbear_rsa_host_key")
if (rsa_key_source.exists && rsa_key_source.length > 0) {
IAmMumble("syncronize RSA key with origin")
Common.copyFile(rsa_key_source, rsa_key_destination) &&
rsa_key_destination.setReadable(true, false)
} else if (rsa_key_destination.exists && rsa_key_destination.length > 0) {
IAmMumble("restore RSA key from working copy")
Common.copyFile(rsa_key_destination, rsa_key_source)
} else {
if (RSAPublicKeyEncription.generateHostKey(context))
Common.copyFile(rsa_key_source, rsa_key_destination) &&
rsa_key_destination.setReadable(true, false)
else
false
}
} else
true) &&
(if (DSAPublicKeyEncription.getState[Boolean](context)) {
log.debug("prepare DSA key")
val dss_key_source = new File(appNativePath, "dropbear_dss_host_key")
val dss_key_destination = new File(path, "dropbear_dss_host_key")
if (dss_key_source.exists && dss_key_source.length > 0) {
IAmMumble("syncronize DSA key with origin")
Common.copyFile(dss_key_source, dss_key_destination) &&
dss_key_destination.setReadable(true, false)
} else if (dss_key_destination.exists && dss_key_destination.length > 0) {
IAmMumble("restore DSA key from working copy")
Common.copyFile(dss_key_destination, dss_key_source)
} else {
if (DSAPublicKeyEncription.generateHostKey(context))
Common.copyFile(dss_key_source, dss_key_destination) &&
dss_key_destination.setReadable(true, false)
else
false
}
} else
true)
case _ =>
false
}
val homeResult = AppControl.Inner.getExternalDirectory(DTimeout.long) match {
case Some(path) if path != null =>
val profileFile = new File(path, ".profile")
if (!profileFile.exists) {
IAmMumble("Create default user profile")
Common.writeToFile(profileFile, SSHDUserProfile.content)
}
case _ =>
false
}
keyResult
}) getOrElse false
} catch {
case e =>
log.error(e.getMessage, e)
false
}
@Loggable(result = false)
def executable(id: Int, workdir: String): ExecutableInfo = {
log.debug("process Binder::executable for id " + id + " at " + workdir)
SSHDService.getExecutableInfo(workdir).find(_.executableID == id).getOrElse(null)
}
@Loggable(result = false)
def post(id: Int, workdir: String): Boolean = try {
log.debug("process Binder::post for id " + id + " at " + workdir)
assert(id == 0)
true
} catch {
case e =>
log.error(e.getMessage, e)
false
}
@Loggable(result = false)
def accessRulesOrder(): Boolean = try {
log.debug("process Binder::accessRulesOrder")
AppComponent.Context.map(
_.getSharedPreferences(DPreference.Main, Context.MODE_PRIVATE).
getBoolean(DOption.ACLConnection.tag, DOption.ACLConnection.default.asInstanceOf[Boolean])).
getOrElse(DOption.ACLConnection.default.asInstanceOf[Boolean])
} catch {
case e =>
log.error(e.getMessage, e)
DOption.ACLConnection.default.asInstanceOf[Boolean]
}
@Loggable(result = false)
def readBooleanProperty(property: String): Boolean = try {
log.debug("process Binder::readBooleanProperty " + property)
val dprop = DOption.withName(property).asInstanceOf[DOption.OptVal]
AppComponent.Context.map(
_.getSharedPreferences(DPreference.Main, Context.MODE_PRIVATE).
getBoolean(dprop.tag, dprop.default.asInstanceOf[Boolean])).
getOrElse(dprop.default.asInstanceOf[Boolean])
} catch {
case e =>
log.error(e.getMessage, e)
false
}
@Loggable(result = false)
def readIntProperty(property: String): Int = try {
log.debug("process Binder::readIntProperty " + property)
val dprop = DOption.withName(property).asInstanceOf[DOption.OptVal]
AppComponent.Context.map(
_.getSharedPreferences(DPreference.Main, Context.MODE_PRIVATE).
getInt(dprop.tag, dprop.default.asInstanceOf[Int])).
getOrElse(dprop.asInstanceOf[Int])
} catch {
case e =>
log.error(e.getMessage, e)
Int.MinValue
}
@Loggable(result = false)
def readStringProperty(property: String): String = try {
log.debug("process Binder::readStringProperty " + property)
val dprop = DOption.withName(property).asInstanceOf[DOption.OptVal]
AppComponent.Context.map(
_.getSharedPreferences(DPreference.Main, Context.MODE_PRIVATE).
getString(dprop.tag, dprop.default.asInstanceOf[String])).
getOrElse(dprop.default.asInstanceOf[String])
} catch {
case e =>
log.error(e.getMessage, e)
null
}
@Loggable(result = false)
def accessAllowRules(): java.util.List[java.lang.String] = try {
log.debug("process Binder::accessAllowRules")
AppComponent.Context.map(c => SSHDPreferences.FilterConnection.Allow.get(c).
filter(t => t._2).map(_._1)).getOrElse(Seq()).toList
} catch {
case e =>
log.error(e.getMessage, e)
List()
}
@Loggable(result = false)
def accessDenyRules(): java.util.List[java.lang.String] = try {
log.debug("process Binder::accessDenyRules")
AppComponent.Context.map(c => SSHDPreferences.FilterConnection.Deny.get(c).
filter(t => t._2).map(_._1)).getOrElse(Seq()).toList
} catch {
case e =>
log.error(e.getMessage, e)
List()
}
@Loggable(result = false)
def interfaceRules(): java.util.Map[_, _] = try {
log.debug("process Binder::interfaceRules")
AppComponent.Context.map(
_.getSharedPreferences(DPreference.FilterInterface, Context.MODE_PRIVATE).getAll).
getOrElse(new java.util.HashMap[String, Any]())
} catch {
case e =>
log.error(e.getMessage, e)
new java.util.HashMap[String, Any]()
}
@Loggable(result = false)
def user(name: String): UserInfo = try {
log.debug("process Binder::user " + name)
AppComponent.Context.flatMap {
context =>
SSHDUsers.find(context, name).map(user => {
val userHome = SSHDUsers.homeDirectory(context, user)
user.copy(password = Hash.crypt(user.password), home = userHome.getAbsolutePath)
})
} getOrElse null
} catch {
case e =>
log.error(e.getMessage, e)
null
}
}
}
| ezh/android-component-DigiSSHD | src/main/scala/org/digimead/digi/ctrl/sshd/SSHDService.scala | Scala | gpl-3.0 | 20,186 |
// Starter Code for Exercise 6
// From "Case Classes" atom
import com.atomicscala.AtomicTest._
val anotherT1 = new SimpleTimeDefault(10, 30)
val anotherT2 = new SimpleTimeDefault(9)
val anotherST = anotherT1.subtract(anotherT2)
anotherST.hours is 1
anotherST.minutes is 30
val anotherST2 =
new SimpleTimeDefault(10).subtract(new SimpleTimeDefault(9, 45))
anotherST2.hours is 0
anotherST2.minutes is 15
| P7h/ScalaPlayground | Atomic Scala/atomic-scala-solutions/28_CaseClasses/Starter-6.scala | Scala | apache-2.0 | 408 |
package guice
import akka.actor.{Actor, ActorRef, ActorSystem}
import com.byteslounge.slickrepo.repository.Repository
import com.google.inject.Provides
import com.google.inject.name.Names
import common.publisher.{PubSubService, Event}
import core.guice.injection.GuiceActorRefProvider
import graphql.resolvers.PostResolver
import javax.inject.Named
import model.Post
import net.codingwell.scalaguice.ScalaModule
import repositories._
import services.publisher.PostPubSubServiceImpl
import slick.jdbc.JdbcProfile
import scala.concurrent.ExecutionContext
/**
* Provides dependency injection functionality.
*/
class PostBinding extends ScalaModule with GuiceActorRefProvider {
override def configure() {
bind[Actor].annotatedWith(Names.named(PostResolver.name)).to[PostResolver]
bind[PubSubService[Event[Post]]].to[PostPubSubServiceImpl]
}
@Provides
@Named(PostResolver.name)
def actorPost(implicit actorSystem: ActorSystem): ActorRef = provideActorRef(PostResolver)
@Provides
def postRepository(driver: JdbcProfile)(implicit executionContext: ExecutionContext): Repository[Post, Int] =
new PostRepository(driver)
}
| sysgears/apollo-universal-starter-kit | modules/post/server-scala/src/main/scala/guice/PostBinding.scala | Scala | mit | 1,149 |
package com.flecheck.hanabi.bga
import java.util
import com.ten.hanabi.core._
import com.ten.hanabi.core.clues.{ColorClue, NumberClue}
import com.ten.hanabi.core.plays.{DiscardPlay, PlacePlay}
import scala.util.matching.Regex
import scala.collection.JavaConverters._
import scala.collection.immutable.Seq
import scala.collection.mutable
case class BGALoadException (s: String, ex: Throwable) extends Exception(s, ex) {
def this(s: String) = this(s, null)
}
object BGA {
var cookies: Map[String, String] = Map("TournoiEnLignelang" -> "fr",
"TournoiEnLigneuser" -> "84407922",
"TournoiEnLigneauth" -> "f2256199a3cb5924cac9cd1def02a9ab")
def getGameById(id: Int): Hanabi = {
val content1: String = Utils.getUrl("https://fr.boardgamearena.com/gamereview?table=" + id.toString, cookies = cookies)
val regUrl: Regex = """href="(?<url>.*?)" class="choosePlayerLink""".r
val regPlays: Regex = """g_gamelogs = (?<json>.*);""".r
val regSetup: Regex = """completesetup.*"socketio", (?<json>.*}), \\{""".r
val nUrl: String = regUrl findFirstMatchIn content1 match {
case Some(x) => x.subgroups.head
case None => throw new BGALoadException("Can't match replay url regex for game " + id)
}
val content2 = Utils.getUrl("https://fr.boardgamearena.com/" + nUrl, cookies = cookies)
val playsS: String = regPlays findFirstMatchIn content2 match {
case Some(x) => x.subgroups.head
case None => throw new BGALoadException("Can't match plays regex for game " + id)
}
val setup: String = regSetup findFirstMatchIn content2 match {
case Some(x) => x.subgroups.head
case None => throw new BGALoadException("Can't match setup regex for game " + id)
}
val (playerList,startingPlayerDeflt,playerOrder,multi,cardNumberVariant,handCards,deckCards,handFillOrder) = JsonParser.jsonToSetup(setup)
val plays: Seq[Play] = JsonParser.jsonToPlays(playsS)
var startingPlayer = startingPlayerDeflt
try{
startingPlayer = JsonParser.getStartingPlayer(playsS).toString()
} catch { case ex: Throwable => ex.printStackTrace()}
val orderedPlayerList = playerOrder.map(j => playerList.find{case (id, _) => id == j}.get)
val normalizedPlayerList = (orderedPlayerList ::: orderedPlayerList).dropWhile{case (id, _) => id != startingPlayer}.take(playerList.length)
val startingPlayerPos = (playerOrder,(0 to playerOrder.size).toList).zipped.find{case (x,_) => x == startingPlayer}.get._2
var playersM: mutable.Map[String, Player] = scala.collection.mutable.Map[String,Player]()
var playersIdM: mutable.Map[Int, Player] = scala.collection.mutable.Map[Int,Player]()
val players: util.List[Player] = normalizedPlayerList.map{ case (id,name) =>
val p = new Player(name)
playersM += (name -> p)
playersIdM += (id.toInt -> p)
p
}.asJava
var multiIsNormalColor = false
if (multi) {
plays.foreach {
case GiveColor(p, t, c) => {
var color = Color.values()(c.toInt - 1)
if(color == Color.MULTI) {
multiIsNormalColor = true
}
}
case _ =>
}
}
val rs: RuleSet = new RuleSet(multi, multiIsNormalColor, cardNumberVariant, true)
val deck: Deck = new Deck(rs, false)
val hanabi: Hanabi = new Hanabi(rs,deck,players)
val cardPlay: Map[String, (Int, Int)] = plays.filter{_.isInstanceOf[CardInfo]}.asInstanceOf[Seq[CardInfo]].map{ x => (x.card , x.cardInfo)}.toMap
// Old games started with id 2 instead of 1. Even weirder.
val oldGame = if (deckCards.head._1.toInt == 2) 1 else 0
def fromBGAId(BGAid: String): Int = {
val id = deck.size() - BGAid.toInt + oldGame
val nbCardsDealtAtStart = hanabi.getNbOfCardsPerPlayer*players.size
var ret = id
if(id < nbCardsDealtAtStart) {
import com.flecheck.hanabi.bga.Utils.ExtendedInt
val bgaPlayer = id / hanabi.getNbOfCardsPerPlayer // joueur 0,1,2... dans l'ordre des hand002 hand003 hand001
val player = playersIdM(handFillOrder(bgaPlayer)).getId
val handPos = id % hanabi.getNbOfCardsPerPlayer
ret = player + handPos * players.size
}
ret
}
def fromBGACard(id: String, colorS: Int, numS: Int): (Card) = {
var color = colorS.toInt - 1
var num = numS.toInt
if (num == 6) {
val (color2, num2) = cardPlay.getOrElse(id, (-1,-1))
color = color2-1; num = num2;
}
if(multiIsNormalColor && color == Color.MULTI.ordinal())
color = Color.MULTI_6TH_COLOR.ordinal();
num match {
case -1 =>
null
case _ =>
new Card(Color.values()(color), num)
}
}
for {
(id,colorBGA,numBGA) <- deckCards
card = fromBGACard(id,colorBGA,numBGA)
} yield {
deck.setCard(fromBGAId(id), card)
}
for {
(id,colorBGA,numBGA) <- handCards
card = fromBGACard(id,colorBGA,numBGA)
} yield {
deck.setCard(fromBGAId(id), card)
}
deck.lock()
// Mains initiales
val hands = new java.util.HashMap[Int, java.util.ArrayList[Int]]()
val nPlayers = players.size
val nbOfCardsPerPlayer = hanabi.getNbOfCardsPerPlayer
for {
playerId <- 0 until nPlayers
handU = hands.putIfAbsent(playerId, new util.ArrayList[Int]())
hand = hands.get(playerId)
i <- 0 until nbOfCardsPerPlayer
} yield {
hand.add(0, playerId + i * nPlayers) // Ajoute au début de la liste
}
var cardId = nbOfCardsPerPlayer*nPlayers
try{
plays.foreach {
case PlayCard(p, c, _) => {
val hand = hands.get(playersIdM(p).getId)
val cardPos = hand.indexOf(fromBGAId(c))
hand.remove(cardPos)
hand.add(0, cardId)
cardId += 1
hanabi.savePlay(new PlacePlay(playersIdM(p), cardPos))
}
case DiscardCard(p, c, _) => {
val hand = hands.get(playersIdM(p).getId)
val cardPos = hand.indexOf(fromBGAId(c))
hand.remove(cardPos)
hand.add(0, cardId)
cardId += 1
hanabi.savePlay(new DiscardPlay(playersIdM(p), cardPos))
}
case GiveValue(p, t, v) => {
playersM(p).clue(playersIdM(t),new NumberClue(v.toInt))
}
case GiveColor(p, t, c) => {
var givenColor = Color.values()(c.toInt - 1)
if(givenColor == Color.MULTI)
givenColor = Color.MULTI_6TH_COLOR
playersM(p).clue(playersIdM(t),new ColorClue(givenColor))
}
case _ =>
}
}catch {case ex : Throwable => throw new BGALoadException("Error while creating game", ex)}
hanabi
}
}
| Ten0/Hanabi | java/src/com/flecheck/hanabi/bga/BGA.scala | Scala | gpl-3.0 | 6,655 |
package au.com.agiledigital.modbus.protocol
import com.digitalpetri.modbus.master.ModbusTcpMaster
import com.typesafe.scalalogging.StrictLogging
import io.gatling.core.protocol.ProtocolComponents
import io.gatling.core.session.Session
import au.com.agiledigital.modbus.action.ModbusAction
final case class ModbusComponents(modbusProtocol: ModbusProtocol) extends ProtocolComponents with StrictLogging {
override def onStart: Option[Session => Session] = None
override def onExit: Option[Session => Unit] =
Some(session => {
session(ModbusAction.TransportKey).asOption[ModbusTcpMaster].foreach { transport =>
logger.debug(s"Auto-closing session [${session.startDate}] [${System.currentTimeMillis() - session.startDate}].")
transport.disconnect()
}
})
}
| dspasojevic/gatling-modbus | src/main/scala/au/com/agiledigital/modbus/protocol/ModbusComponents.scala | Scala | apache-2.0 | 796 |
// Copyright (C) 2014 Open Data ("Open Data" refers to
// one or more of the following companies: Open Data Partners LLC,
// Open Data Research LLC, or Open Data Capital LLC.)
//
// This file is part of Hadrian.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package test.scala.lib.model.reg
import scala.collection.JavaConversions._
import scala.language.postfixOps
import org.junit.runner.RunWith
import org.scalatest.FlatSpec
import org.scalatest.junit.JUnitRunner
import org.scalatest.Matchers
import com.opendatagroup.hadrian.data._
import com.opendatagroup.hadrian.jvmcompiler._
import com.opendatagroup.hadrian.errors._
import test.scala._
@RunWith(classOf[JUnitRunner])
class LibModelRegSuite extends FlatSpec with Matchers {
"model.reg.linear" must "do one-level array signature" taggedAs(Lib, LibModelReg) in {
val engine = PFAEngine.fromYaml("""
input: {type: array, items: double}
output: double
cells:
model:
type:
type: record
name: Model
fields:
- {name: coeff, type: {type: array, items: double}}
- {name: const, type: double}
init:
coeff: [1, 2, 3, 0, 5]
const: 100.0
action:
model.reg.linear:
- input
- cell: model
""").head
engine.action(engine.jsonInput("""[0.1, 0.2, 0.3, 0.4, 0.5]""")).asInstanceOf[java.lang.Double].doubleValue should be (103.9 +- 0.1)
}
it must "do two-level array signature" taggedAs(Lib, LibModelReg) in {
val engine = PFAEngine.fromYaml("""
input: {type: array, items: double}
output: {type: array, items: double}
cells:
model:
type:
type: record
name: Model
fields:
- {name: coeff, type: {type: array, items: {type: array, items: double}}}
- {name: const, type: {type: array, items: double}}
init:
coeff: [[1, 2, 3, 0, 5],
[1, 1, 1, 1, 1],
[0, 0, 0, 0, 1]]
const: [0.0, 0.0, 100.0]
action:
model.reg.linear:
- input
- cell: model
""").head
val out = engine.action(engine.jsonInput("""[0.1, 0.2, 0.3, 0.4, 0.5]""")).asInstanceOf[PFAArray[Double]].toVector
out(0) should be (3.9 +- 0.1)
out(1) should be (1.5 +- 0.1)
out(2) should be (100.5 +- 0.1)
}
it must "do one-level map signature" taggedAs(Lib, LibModelReg) in {
val engine = PFAEngine.fromYaml("""
input: {type: map, values: double}
output: double
cells:
model:
type:
type: record
name: Model
fields:
- {name: coeff, type: {type: map, values: double}}
- {name: const, type: double}
init:
coeff: {one: 1, two: 2, three: 3, four: 0, five: 5}
const: 100.0
action:
model.reg.linear:
- input
- cell: model
""").head
engine.action(engine.jsonInput("""{"one": 0.1, "two": 0.2, "three": 0.3, "four": 0.4, "five": 0.5}""")).asInstanceOf[java.lang.Double].doubleValue should be (103.9 +- 0.1)
}
it must "do two-level map signature" taggedAs(Lib, LibModelReg) in {
val engine = PFAEngine.fromYaml("""
input: {type: map, values: double}
output: {type: map, values: double}
cells:
model:
type:
type: record
name: Model
fields:
- {name: coeff, type: {type: map, values: {type: map, values: double}}}
- {name: const, type: {type: map, values: double}}
init:
coeff:
uno: {one: 1, two: 2, three: 3, four: 0, five: 5}
dos: {one: 1, two: 1, three: 1, four: 1, five: 1}
tres: {one: 0, two: 0, three: 0, four: 0, five: 1}
const:
{uno: 0.0, dos: 0.0, tres: 100.0}
action:
model.reg.linear:
- input
- cell: model
""").head
val out = engine.action(engine.jsonInput("""{"one": 0.1, "two": 0.2, "three": 0.3, "four": 0.4, "five": 0.5}""")).asInstanceOf[PFAMap[java.lang.Double]].toMap
out("uno").doubleValue should be (3.9 +- 0.1)
out("dos").doubleValue should be (1.5 +- 0.1)
out("tres").doubleValue should be (100.5 +- 0.1)
}
"model.reg.linearVariance" must "do one-level array signature" taggedAs(Lib, LibModelReg) in {
val engine = PFAEngine.fromYaml("""
input: {type: array, items: double}
output: double
cells:
model:
type:
type: record
name: Model
fields:
- {name: covar, type: {type: array, items: {type: array, items: double}}}
init:
covar: [[ 1.0, -0.1, 0.0],
[-0.1, 2.0, 0.0],
[ 0.0, 0.0, 0.0]]
action:
model.reg.linearVariance:
- input
- cell: model
""").head
engine.action(engine.jsonInput("""[0.1, 0.2]""")).asInstanceOf[java.lang.Double].doubleValue should be (0.086 +- 0.001)
}
it must "do two-level array signature" taggedAs(Lib, LibModelReg) in {
val engine = PFAEngine.fromYaml("""
input: {type: array, items: double}
output: {type: array, items: double}
cells:
model:
type:
type: record
name: Model
fields:
- {name: covar, type: {type: array, items: {type: array, items: {type: array, items: double}}}}
init:
covar:
- [[ 1.0, -0.1, 0.0],
[-0.1, 2.0, 0.0],
[ 0.0, 0.0, 0.0]]
- [[ 1.0, -0.1, 0.0],
[-0.1, 2.0, 0.0],
[ 0.0, 0.0, 0.0]]
- [[ 1.0, -0.1, 0.0],
[-0.1, 2.0, 0.0],
[ 0.0, 0.0, 0.0]]
- [[ 1.0, -0.1, 0.0],
[-0.1, 2.0, 0.0],
[ 0.0, 0.0, 0.0]]
action:
model.reg.linearVariance:
- input
- cell: model
""").head
val results = engine.action(engine.jsonInput("""[0.1, 0.2]""")).asInstanceOf[PFAArray[Double]].toVector
results(0) should be (0.086 +- 0.001)
results(1) should be (0.086 +- 0.001)
results(2) should be (0.086 +- 0.001)
results(3) should be (0.086 +- 0.001)
}
it must "do one-level map signature" taggedAs(Lib, LibModelReg) in {
val engine = PFAEngine.fromYaml("""
input: {type: map, values: double}
output: double
cells:
model:
type:
type: record
name: Model
fields:
- {name: covar, type: {type: map, values: {type: map, values: double}}}
init:
covar: {a: {a: 1.0, b: -0.1, "": 0.0},
b: {a: -0.1, b: 2.0, "": 0.0},
"": {a: 0.0, b: 0.0, "": 0.0}}
action:
model.reg.linearVariance:
- input
- cell: model
""").head
engine.action(engine.jsonInput("""{"a": 0.1, "b": 0.2}""")).asInstanceOf[java.lang.Double].doubleValue should be (0.086 +- 0.001)
}
it must "do two-level map signature" taggedAs(Lib, LibModelReg) in {
val engine = PFAEngine.fromYaml("""
input: {type: map, values: double}
output: {type: map, values: double}
cells:
model:
type:
type: record
name: Model
fields:
- {name: covar, type: {type: map, values: {type: map, values: {type: map, values: double}}}}
init:
covar:
one: {a: {a: 1.0, b: -0.1, "": 0.0},
b: {a: -0.1, b: 2.0, "": 0.0},
"": {a: 0.0, b: 0.0, "": 0.0}}
two: {a: {a: 1.0, b: -0.1, "": 0.0},
b: {a: -0.1, b: 2.0, "": 0.0},
"": {a: 0.0, b: 0.0, "": 0.0}}
three: {a: {a: 1.0, b: -0.1, "": 0.0},
b: {a: -0.1, b: 2.0, "": 0.0},
"": {a: 0.0, b: 0.0, "": 0.0}}
four: {a: {a: 1.0, b: -0.1, "": 0.0},
b: {a: -0.1, b: 2.0, "": 0.0},
"": {a: 0.0, b: 0.0, "": 0.0}}
action:
model.reg.linearVariance:
- input
- cell: model
""").head
val results = engine.action(engine.jsonInput("""{"a": 0.1, "b": 0.2}""")).asInstanceOf[PFAMap[java.lang.Double]].toMap
results("one").doubleValue should be (0.086 +- 0.001)
results("two").doubleValue should be (0.086 +- 0.001)
results("three").doubleValue should be (0.086 +- 0.001)
results("four").doubleValue should be (0.086 +- 0.001)
}
"GaussianProcess" must "do the scalar-to-scalar case" taggedAs(Lib, LibModelReg) in {
val engine = PFAEngine.fromYaml("""
input: double
output: double
cells:
table:
type:
type: array
items:
type: record
name: GP
fields:
- {name: x, type: double}
- {name: to, type: double}
- {name: sigma, type: double}
init:
- {x: 0, to: -0.3346332030, sigma: 0.2}
- {x: 10, to: -0.0343383864, sigma: 0.2}
- {x: 20, to: -0.0276927905, sigma: 0.2}
- {x: 30, to: 0.05708694575, sigma: 0.2}
- {x: 40, to: 0.66909595875, sigma: 0.2}
- {x: 50, to: 0.57458517677, sigma: 0.2}
- {x: 60, to: 0.63100196978, sigma: 0.2}
- {x: 70, to: 0.91841243688, sigma: 0.2}
- {x: 80, to: 0.65081764341, sigma: 0.2}
- {x: 90, to: 0.71978591756, sigma: 0.2}
- {x: 100, to: 0.93481331323, sigma: 0.2}
- {x: 110, to: 0.84831977376, sigma: 0.2}
- {x: 120, to: 0.73970609648, sigma: 0.2}
- {x: 130, to: 0.78029917594, sigma: 0.2}
- {x: 140, to: 0.65909346778, sigma: 0.2}
- {x: 150, to: 0.47746829475, sigma: 0.2}
- {x: 160, to: 0.15788020690, sigma: 0.2}
- {x: 170, to: -0.0417263190, sigma: 0.2}
- {x: 180, to: 0.03949032925, sigma: 0.2}
- {x: 190, to: -0.3433432642, sigma: 0.2}
- {x: 200, to: -0.0254098681, sigma: 0.2}
- {x: 210, to: -0.6289059981, sigma: 0.2}
- {x: 220, to: -0.7431731071, sigma: 0.2}
- {x: 230, to: -0.4354207032, sigma: 0.2}
- {x: 240, to: -1.0959618089, sigma: 0.2}
- {x: 250, to: -0.6671072982, sigma: 0.2}
- {x: 260, to: -0.9050596147, sigma: 0.2}
- {x: 270, to: -1.2019606762, sigma: 0.2}
- {x: 280, to: -1.1191287449, sigma: 0.2}
- {x: 290, to: -1.1299689439, sigma: 0.2}
- {x: 300, to: -0.5776687178, sigma: 0.2}
- {x: 310, to: -1.0480428012, sigma: 0.2}
- {x: 320, to: -0.6461742204, sigma: 0.2}
- {x: 330, to: -0.5866474699, sigma: 0.2}
- {x: 340, to: -0.3117119198, sigma: 0.2}
- {x: 350, to: -0.2478194617, sigma: 0.2}
action:
model.reg.gaussianProcess:
- input # find the best fit to the input x
- {cell: table} # use the provided table of training data
- null # no explicit krigingWeight: universal Kriging
- fcn: m.kernel.rbf # radial basis function (squared exponential)
fill: {gamma: 2.0} # with a given gamma (by partial application)
# can be replaced with any function,
# from the m.kernel.* library or user-defined
""").head
engine.action(java.lang.Double.valueOf( 5.0)).asInstanceOf[java.lang.Double].doubleValue should be (0.03087429165 +- 0.001)
engine.action(java.lang.Double.valueOf( 15.0)).asInstanceOf[java.lang.Double].doubleValue should be (0.17652676226 +- 0.001)
engine.action(java.lang.Double.valueOf( 25.0)).asInstanceOf[java.lang.Double].doubleValue should be (0.32346820507 +- 0.001)
engine.action(java.lang.Double.valueOf( 35.0)).asInstanceOf[java.lang.Double].doubleValue should be (0.46100243449 +- 0.001)
engine.action(java.lang.Double.valueOf( 45.0)).asInstanceOf[java.lang.Double].doubleValue should be (0.58099507734 +- 0.001)
engine.action(java.lang.Double.valueOf( 55.0)).asInstanceOf[java.lang.Double].doubleValue should be (0.67895250259 +- 0.001)
engine.action(java.lang.Double.valueOf( 65.0)).asInstanceOf[java.lang.Double].doubleValue should be (0.75393498012 +- 0.001)
engine.action(java.lang.Double.valueOf( 75.0)).asInstanceOf[java.lang.Double].doubleValue should be (0.80736647814 +- 0.001)
engine.action(java.lang.Double.valueOf( 85.0)).asInstanceOf[java.lang.Double].doubleValue should be (0.84119814623 +- 0.001)
engine.action(java.lang.Double.valueOf( 95.0)).asInstanceOf[java.lang.Double].doubleValue should be (0.85612291938 +- 0.001)
engine.action(java.lang.Double.valueOf(105.0)).asInstanceOf[java.lang.Double].doubleValue should be (0.85052616975 +- 0.001)
engine.action(java.lang.Double.valueOf(115.0)).asInstanceOf[java.lang.Double].doubleValue should be (0.82058975268 +- 0.001)
engine.action(java.lang.Double.valueOf(125.0)).asInstanceOf[java.lang.Double].doubleValue should be (0.76154114462 +- 0.001)
engine.action(java.lang.Double.valueOf(135.0)).asInstanceOf[java.lang.Double].doubleValue should be (0.66961346549 +- 0.001)
engine.action(java.lang.Double.valueOf(145.0)).asInstanceOf[java.lang.Double].doubleValue should be (0.54401256459 +- 0.001)
engine.action(java.lang.Double.valueOf(155.0)).asInstanceOf[java.lang.Double].doubleValue should be (0.38817025604 +- 0.001)
engine.action(java.lang.Double.valueOf(165.0)).asInstanceOf[java.lang.Double].doubleValue should be (0.20980133234 +- 0.001)
engine.action(java.lang.Double.valueOf(175.0)).asInstanceOf[java.lang.Double].doubleValue should be (0.01968958089 +- 0.001)
engine.action(java.lang.Double.valueOf(185.0)).asInstanceOf[java.lang.Double].doubleValue should be (-0.1704373229 +- 0.001)
engine.action(java.lang.Double.valueOf(195.0)).asInstanceOf[java.lang.Double].doubleValue should be (-0.3502702450 +- 0.001)
engine.action(java.lang.Double.valueOf(205.0)).asInstanceOf[java.lang.Double].doubleValue should be (-0.5127572323 +- 0.001)
engine.action(java.lang.Double.valueOf(215.0)).asInstanceOf[java.lang.Double].doubleValue should be (-0.6548281470 +- 0.001)
engine.action(java.lang.Double.valueOf(225.0)).asInstanceOf[java.lang.Double].doubleValue should be (-0.7767913542 +- 0.001)
engine.action(java.lang.Double.valueOf(235.0)).asInstanceOf[java.lang.Double].doubleValue should be (-0.8806473549 +- 0.001)
engine.action(java.lang.Double.valueOf(245.0)).asInstanceOf[java.lang.Double].doubleValue should be (-0.9679493266 +- 0.001)
engine.action(java.lang.Double.valueOf(255.0)).asInstanceOf[java.lang.Double].doubleValue should be (-1.0380130690 +- 0.001)
engine.action(java.lang.Double.valueOf(265.0)).asInstanceOf[java.lang.Double].doubleValue should be (-1.0871683887 +- 0.001)
engine.action(java.lang.Double.valueOf(275.0)).asInstanceOf[java.lang.Double].doubleValue should be (-1.1093800549 +- 0.001)
engine.action(java.lang.Double.valueOf(285.0)).asInstanceOf[java.lang.Double].doubleValue should be (-1.0980749198 +- 0.001)
engine.action(java.lang.Double.valueOf(295.0)).asInstanceOf[java.lang.Double].doubleValue should be (-1.0485714839 +- 0.001)
engine.action(java.lang.Double.valueOf(305.0)).asInstanceOf[java.lang.Double].doubleValue should be (-0.9602825140 +- 0.001)
engine.action(java.lang.Double.valueOf(315.0)).asInstanceOf[java.lang.Double].doubleValue should be (-0.8379324438 +- 0.001)
engine.action(java.lang.Double.valueOf(325.0)).asInstanceOf[java.lang.Double].doubleValue should be (-0.6913668676 +- 0.001)
engine.action(java.lang.Double.valueOf(335.0)).asInstanceOf[java.lang.Double].doubleValue should be (-0.5339997309 +- 0.001)
engine.action(java.lang.Double.valueOf(345.0)).asInstanceOf[java.lang.Double].doubleValue should be (-0.3803701293 +- 0.001)
engine.action(java.lang.Double.valueOf(355.0)).asInstanceOf[java.lang.Double].doubleValue should be (-0.2435189466 +- 0.001)
}
"GaussianProcess" must "do the scalar-to-vector case" taggedAs(Lib, LibModelReg) in {
val engine = PFAEngine.fromYaml("""
input: double
output: {type: array, items: double}
cells:
table:
type:
type: array
items:
type: record
name: GP
fields:
- {name: x, type: double}
- {name: to, type: {type: array, items: double}}
- {name: sigma, type: {type: array, items: double}}
init:
- {x: 0, to: [-0.0275638306327, 1.6436104074682], sigma: [0.2, 0.2]}
- {x: 10, to: [-0.0550590156488, 1.1279026778761], sigma: [0.2, 0.2]}
- {x: 20, to: [0.27665811014276, 1.2884952019673], sigma: [0.2, 0.2]}
- {x: 30, to: [0.32564933012538, 0.6975167314472], sigma: [0.2, 0.2]}
- {x: 40, to: [0.50951585410170, 0.5366404828626], sigma: [0.2, 0.2]}
- {x: 50, to: [0.78970794409845, 0.5753573687864], sigma: [0.2, 0.2]}
- {x: 60, to: [0.79560759832648, 0.8669490726924], sigma: [0.2, 0.2]}
- {x: 70, to: [1.11012632091040, 0.2893283390564], sigma: [0.2, 0.2]}
- {x: 80, to: [1.01101991793607, 0.1168159075340], sigma: [0.2, 0.2]}
- {x: 90, to: [0.89167196367050, 0.2336483742367], sigma: [0.2, 0.2]}
- {x: 100, to: [0.79669701754334, -0.262415331320], sigma: [0.2, 0.2]}
- {x: 110, to: [0.73478042254427, -0.269257044570], sigma: [0.2, 0.2]}
- {x: 120, to: [0.54225961573755, -0.528524392539], sigma: [0.2, 0.2]}
- {x: 130, to: [0.63387009124588, -0.550031870271], sigma: [0.2, 0.2]}
- {x: 140, to: [0.53868855884699, -0.756608403729], sigma: [0.2, 0.2]}
- {x: 150, to: [0.52440311808591, -0.764908616789], sigma: [0.2, 0.2]}
- {x: 160, to: [0.38234791058889, -0.755332319548], sigma: [0.2, 0.2]}
- {x: 170, to: [0.06408032993876, -1.208343893027], sigma: [0.2, 0.2]}
- {x: 180, to: [-0.1251140497492, -1.008797566375], sigma: [0.2, 0.2]}
- {x: 190, to: [-0.6622773320724, -0.735977078508], sigma: [0.2, 0.2]}
- {x: 200, to: [-0.5060071246967, -1.131959607514], sigma: [0.2, 0.2]}
- {x: 210, to: [-0.7506697169187, -0.933266228609], sigma: [0.2, 0.2]}
- {x: 220, to: [-0.6114675918420, -1.115429627986], sigma: [0.2, 0.2]}
- {x: 230, to: [-0.7393428452701, -0.644829102596], sigma: [0.2, 0.2]}
- {x: 240, to: [-1.1005820484414, -0.602487247649], sigma: [0.2, 0.2]}
- {x: 250, to: [-0.9199172336156, -0.445415709796], sigma: [0.2, 0.2]}
- {x: 260, to: [-0.5548384390502, -0.130872144887], sigma: [0.2, 0.2]}
- {x: 270, to: [-1.1663758959153, 0.0403022656204], sigma: [0.2, 0.2]}
- {x: 280, to: [-1.3683792108867, -0.055259795527], sigma: [0.2, 0.2]}
- {x: 290, to: [-1.0373014259785, 0.1923335805121], sigma: [0.2, 0.2]}
- {x: 300, to: [-0.8539507289822, 0.6473186579626], sigma: [0.2, 0.2]}
- {x: 310, to: [-1.1658738130819, 0.7019580213786], sigma: [0.2, 0.2]}
- {x: 320, to: [-0.3248586082577, 0.5924413605916], sigma: [0.2, 0.2]}
- {x: 330, to: [-0.4246629811006, 0.7436475098601], sigma: [0.2, 0.2]}
- {x: 340, to: [-0.2888893157821, 0.9129729112785], sigma: [0.2, 0.2]}
- {x: 350, to: [0.16414946814559, 1.1171102512988], sigma: [0.2, 0.2]}
action:
model.reg.gaussianProcess:
- input # find the best fit to the input x
- {cell: table} # use the provided table of training data
- null # no explicit krigingWeight: universal Kriging
- fcn: m.kernel.rbf # radial basis function (squared exponential)
fill: {gamma: 2.0} # with a given gamma (by partial application)
# can be replaced with any function,
# from the m.kernel.* library or user-defined
""").head
engine.action(java.lang.Double.valueOf( 5.0)).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.050270763255514, 1.4492626857937625) map {case (x, y) => x should be (y +- 0.1)}
engine.action(java.lang.Double.valueOf( 15.0)).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.202128488472172, 1.2713110020675373) map {case (x, y) => x should be (y +- 0.1)}
engine.action(java.lang.Double.valueOf( 25.0)).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.373879790860577, 1.0903607984674346) map {case (x, y) => x should be (y +- 0.1)}
engine.action(java.lang.Double.valueOf( 35.0)).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.549471728139445, 0.9222828279518422) map {case (x, y) => x should be (y +- 0.1)}
engine.action(java.lang.Double.valueOf( 45.0)).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.710705016737154, 0.7735795830495835) map {case (x, y) => x should be (y +- 0.1)}
engine.action(java.lang.Double.valueOf( 55.0)).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.840880718229975, 0.6411697711359448) map {case (x, y) => x should be (y +- 0.1)}
engine.action(java.lang.Double.valueOf( 65.0)).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.928231751538775, 0.5151500526277536) map {case (x, y) => x should be (y +- 0.1)}
engine.action(java.lang.Double.valueOf( 75.0)).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.968010181595931, 0.3834313544270441) map {case (x, y) => x should be (y +- 0.1)}
engine.action(java.lang.Double.valueOf( 85.0)).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.962498946612432, 0.2365575688235299) map {case (x, y) => x should be (y +- 0.1)}
engine.action(java.lang.Double.valueOf( 95.0)).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.918962083182861, 0.0710799251660952) map {case (x, y) => x should be (y +- 0.1)}
engine.action(java.lang.Double.valueOf(105.0)).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.846350754433821, -0.109480744439727) map {case (x, y) => x should be (y +- 0.1)}
engine.action(java.lang.Double.valueOf(115.0)).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.752088082104471, -0.296096341267311) map {case (x, y) => x should be (y +- 0.1)}
engine.action(java.lang.Double.valueOf(125.0)).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.640211399707369, -0.477269780678076) map {case (x, y) => x should be (y +- 0.1)}
engine.action(java.lang.Double.valueOf(135.0)).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.511542857843088, -0.642348504023967) map {case (x, y) => x should be (y +- 0.1)}
engine.action(java.lang.Double.valueOf(145.0)).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.365641994383437, -0.783891904586578) map {case (x, y) => x should be (y +- 0.1)}
engine.action(java.lang.Double.valueOf(155.0)).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.203481284390595, -0.898389502372114) map {case (x, y) => x should be (y +- 0.1)}
engine.action(java.lang.Double.valueOf(165.0)).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.029458609235600, -0.985341026538423) map {case (x, y) => x should be (y +- 0.1)}
engine.action(java.lang.Double.valueOf(175.0)).asInstanceOf[PFAArray[Double]].toVector zip Vector(-0.14832016607275, -1.045337536201359) map {case (x, y) => x should be (y +- 0.1)}
engine.action(java.lang.Double.valueOf(185.0)).asInstanceOf[PFAArray[Double]].toVector zip Vector(-0.31973194704182, -1.078106424907824) map {case (x, y) => x should be (y +- 0.1)}
engine.action(java.lang.Double.valueOf(195.0)).asInstanceOf[PFAArray[Double]].toVector zip Vector(-0.47559728238048, -1.081414236647962) map {case (x, y) => x should be (y +- 0.1)}
engine.action(java.lang.Double.valueOf(205.0)).asInstanceOf[PFAArray[Double]].toVector zip Vector(-0.61057739245502, -1.051315002564407) map {case (x, y) => x should be (y +- 0.1)}
engine.action(java.lang.Double.valueOf(215.0)).asInstanceOf[PFAArray[Double]].toVector zip Vector(-0.72464951816128, -0.983656722049111) map {case (x, y) => x should be (y +- 0.1)}
engine.action(java.lang.Double.valueOf(225.0)).asInstanceOf[PFAArray[Double]].toVector zip Vector(-0.82239617282399, -0.876234639178472) map {case (x, y) => x should be (y +- 0.1)}
engine.action(java.lang.Double.valueOf(235.0)).asInstanceOf[PFAArray[Double]].toVector zip Vector(-0.91028667581172, -0.730703295106655) map {case (x, y) => x should be (y +- 0.1)}
engine.action(java.lang.Double.valueOf(245.0)).asInstanceOf[PFAArray[Double]].toVector zip Vector(-0.99300490189735, -0.553433729608649) map {case (x, y) => x should be (y +- 0.1)}
engine.action(java.lang.Double.valueOf(255.0)).asInstanceOf[PFAArray[Double]].toVector zip Vector(-1.07031044036758, -0.354899392727916) map {case (x, y) => x should be (y +- 0.1)}
engine.action(java.lang.Double.valueOf(265.0)).asInstanceOf[PFAArray[Double]].toVector zip Vector(-1.13573876935179, -0.147740859185571) map {case (x, y) => x should be (y +- 0.1)}
engine.action(java.lang.Double.valueOf(275.0)).asInstanceOf[PFAArray[Double]].toVector zip Vector(-1.17774177813471, 0.0558274223675871) map {case (x, y) => x should be (y +- 0.1)}
engine.action(java.lang.Double.valueOf(285.0)).asInstanceOf[PFAArray[Double]].toVector zip Vector(-1.18294425460333, 0.2463503554390451) map {case (x, y) => x should be (y +- 0.1)}
engine.action(java.lang.Double.valueOf(295.0)).asInstanceOf[PFAArray[Double]].toVector zip Vector(-1.14041609925062, 0.4183942587682155) map {case (x, y) => x should be (y +- 0.1)}
engine.action(java.lang.Double.valueOf(305.0)).asInstanceOf[PFAArray[Double]].toVector zip Vector(-1.04551997667269, 0.5703518831234655) map {case (x, y) => x should be (y +- 0.1)}
engine.action(java.lang.Double.valueOf(315.0)).asInstanceOf[PFAArray[Double]].toVector zip Vector(-0.90208517428129, 0.7029745828341570) map {case (x, y) => x should be (y +- 0.1)}
engine.action(java.lang.Double.valueOf(325.0)).asInstanceOf[PFAArray[Double]].toVector zip Vector(-0.72226768613349, 0.8172988304816548) map {case (x, y) => x should be (y +- 0.1)}
engine.action(java.lang.Double.valueOf(335.0)).asInstanceOf[PFAArray[Double]].toVector zip Vector(-0.52422932693523, 0.9128376082472604) map {case (x, y) => x should be (y +- 0.1)}
engine.action(java.lang.Double.valueOf(345.0)).asInstanceOf[PFAArray[Double]].toVector zip Vector(-0.32842152814092, 0.9867583939515017) map {case (x, y) => x should be (y +- 0.1)}
engine.action(java.lang.Double.valueOf(355.0)).asInstanceOf[PFAArray[Double]].toVector zip Vector(-0.15358827566240, 1.0343436515482327) map {case (x, y) => x should be (y +- 0.1)}
}
"GaussianProcess" must "do the vector-to-scalar case" taggedAs(Lib, LibModelReg) in {
val engine = PFAEngine.fromYaml("""
input: {type: array, items: double}
output: double
cells:
table:
type:
type: array
items:
type: record
name: GP
fields:
- {name: x, type: {type: array, items: double}}
- {name: to, type: double}
- {name: sigma, type: double}
init:
- {x: [ 0, 0], to: 0.82118528, sigma: 0.2}
- {x: [ 0, 36], to: 0.63603407, sigma: 0.2}
- {x: [ 0, 72], to: 0.43135014, sigma: 0.2}
- {x: [ 0, 108], to: -0.5271264, sigma: 0.2}
- {x: [ 0, 144], to: -0.7426378, sigma: 0.2}
- {x: [ 0, 180], to: -1.1869050, sigma: 0.2}
- {x: [ 0, 216], to: -0.7996154, sigma: 0.2}
- {x: [ 0, 252], to: -0.4564504, sigma: 0.2}
- {x: [ 0, 288], to: 0.08426291, sigma: 0.2}
- {x: [ 0, 324], to: 0.80768845, sigma: 0.2}
- {x: [ 36, 0], to: 1.35803374, sigma: 0.2}
- {x: [ 36, 36], to: 1.52769845, sigma: 0.2}
- {x: [ 36, 72], to: 1.08079765, sigma: 0.2}
- {x: [ 36, 108], to: 0.31241499, sigma: 0.2}
- {x: [ 36, 144], to: -0.2676979, sigma: 0.2}
- {x: [ 36, 180], to: -0.7164726, sigma: 0.2}
- {x: [ 36, 216], to: -0.3338313, sigma: 0.2}
- {x: [ 36, 252], to: 0.08139820, sigma: 0.2}
- {x: [ 36, 288], to: 0.71689790, sigma: 0.2}
- {x: [ 36, 324], to: 1.13835037, sigma: 0.2}
- {x: [ 72, 0], to: 1.83512995, sigma: 0.2}
- {x: [ 72, 36], to: 1.61494407, sigma: 0.2}
- {x: [ 72, 72], to: 1.50290190, sigma: 0.2}
- {x: [ 72, 108], to: 0.75406155, sigma: 0.2}
- {x: [ 72, 144], to: 0.03405990, sigma: 0.2}
- {x: [ 72, 180], to: 0.14337997, sigma: 0.2}
- {x: [ 72, 216], to: 0.38604138, sigma: 0.2}
- {x: [ 72, 252], to: 0.36514719, sigma: 0.2}
- {x: [ 72, 288], to: 1.31043893, sigma: 0.2}
- {x: [ 72, 324], to: 1.63925281, sigma: 0.2}
- {x: [108, 0], to: 2.18498629, sigma: 0.2}
- {x: [108, 36], to: 1.36922627, sigma: 0.2}
- {x: [108, 72], to: 1.41108233, sigma: 0.2}
- {x: [108, 108], to: 0.80950036, sigma: 0.2}
- {x: [108, 144], to: 0.07678710, sigma: 0.2}
- {x: [108, 180], to: 0.03666408, sigma: 0.2}
- {x: [108, 216], to: -0.2375061, sigma: 0.2}
- {x: [108, 252], to: 0.57171030, sigma: 0.2}
- {x: [108, 288], to: 1.35875134, sigma: 0.2}
- {x: [108, 324], to: 1.64114251, sigma: 0.2}
- {x: [144, 0], to: 1.81406684, sigma: 0.2}
- {x: [144, 36], to: 1.36598027, sigma: 0.2}
- {x: [144, 72], to: 0.87335695, sigma: 0.2}
- {x: [144, 108], to: 0.28625228, sigma: 0.2}
- {x: [144, 144], to: -0.1884535, sigma: 0.2}
- {x: [144, 180], to: -0.7475230, sigma: 0.2}
- {x: [144, 216], to: 0.05916590, sigma: 0.2}
- {x: [144, 252], to: 0.20589299, sigma: 0.2}
- {x: [144, 288], to: 1.49434570, sigma: 0.2}
- {x: [144, 324], to: 1.04382638, sigma: 0.2}
- {x: [180, 0], to: 0.95695423, sigma: 0.2}
- {x: [180, 36], to: 0.99368592, sigma: 0.2}
- {x: [180, 72], to: 0.03288738, sigma: 0.2}
- {x: [180, 108], to: -0.6079039, sigma: 0.2}
- {x: [180, 144], to: -0.3848322, sigma: 0.2}
- {x: [180, 180], to: -1.0155591, sigma: 0.2}
- {x: [180, 216], to: -0.5555413, sigma: 0.2}
- {x: [180, 252], to: -0.0581398, sigma: 0.2}
- {x: [180, 288], to: 0.33743708, sigma: 0.2}
- {x: [180, 324], to: 0.83556571, sigma: 0.2}
- {x: [216, 0], to: 0.20588985, sigma: 0.2}
- {x: [216, 36], to: 0.44298549, sigma: 0.2}
- {x: [216, 72], to: -0.5446849, sigma: 0.2}
- {x: [216, 108], to: -1.0020396, sigma: 0.2}
- {x: [216, 144], to: -1.8021995, sigma: 0.2}
- {x: [216, 180], to: -1.5844545, sigma: 0.2}
- {x: [216, 216], to: -1.7084132, sigma: 0.2}
- {x: [216, 252], to: -0.9891052, sigma: 0.2}
- {x: [216, 288], to: -0.6297273, sigma: 0.2}
- {x: [216, 324], to: 0.26628269, sigma: 0.2}
- {x: [252, 0], to: 0.10807076, sigma: 0.2}
- {x: [252, 36], to: -0.4890686, sigma: 0.2}
- {x: [252, 72], to: -0.5842210, sigma: 0.2}
- {x: [252, 108], to: -1.2321703, sigma: 0.2}
- {x: [252, 144], to: -1.8977512, sigma: 0.2}
- {x: [252, 180], to: -2.1240163, sigma: 0.2}
- {x: [252, 216], to: -1.9555430, sigma: 0.2}
- {x: [252, 252], to: -1.5510880, sigma: 0.2}
- {x: [252, 288], to: -0.6289043, sigma: 0.2}
- {x: [252, 324], to: -0.2906448, sigma: 0.2}
- {x: [288, 0], to: 0.04032433, sigma: 0.2}
- {x: [288, 36], to: -0.0974952, sigma: 0.2}
- {x: [288, 72], to: -0.6059362, sigma: 0.2}
- {x: [288, 108], to: -1.4171517, sigma: 0.2}
- {x: [288, 144], to: -1.7699124, sigma: 0.2}
- {x: [288, 180], to: -2.1935099, sigma: 0.2}
- {x: [288, 216], to: -1.9860432, sigma: 0.2}
- {x: [288, 252], to: -1.1616088, sigma: 0.2}
- {x: [288, 288], to: -0.8162288, sigma: 0.2}
- {x: [288, 324], to: 0.16975848, sigma: 0.2}
- {x: [324, 0], to: 0.34328957, sigma: 0.2}
- {x: [324, 36], to: 0.26405396, sigma: 0.2}
- {x: [324, 72], to: -0.3641890, sigma: 0.2}
- {x: [324, 108], to: -0.9854455, sigma: 0.2}
- {x: [324, 144], to: -1.3019051, sigma: 0.2}
- {x: [324, 180], to: -1.6919030, sigma: 0.2}
- {x: [324, 216], to: -1.1601112, sigma: 0.2}
- {x: [324, 252], to: -0.9362727, sigma: 0.2}
- {x: [324, 288], to: -0.4371584, sigma: 0.2}
- {x: [324, 324], to: 0.17624777, sigma: 0.2}
action:
model.reg.gaussianProcess:
- input # find the best fit to the input x
- {cell: table} # use the provided table of training data
- null # no explicit krigingWeight: universal Kriging
- fcn: m.kernel.rbf # radial basis function (squared exponential)
fill: {gamma: 2.0} # with a given gamma (by partial application)
# can be replaced with any function,
# from the m.kernel.* library or user-defined
""").head
engine.action(engine.jsonInput("[ 0, 0]")).asInstanceOf[java.lang.Double].doubleValue should be (0.789702380 +- 0.001)
engine.action(engine.jsonInput("[ 0, 36]")).asInstanceOf[java.lang.Double].doubleValue should be (0.783152417 +- 0.001)
engine.action(engine.jsonInput("[ 0, 72]")).asInstanceOf[java.lang.Double].doubleValue should be (0.336554168 +- 0.001)
engine.action(engine.jsonInput("[ 0, 108]")).asInstanceOf[java.lang.Double].doubleValue should be (-0.31735296 +- 0.001)
engine.action(engine.jsonInput("[ 0, 144]")).asInstanceOf[java.lang.Double].doubleValue should be (-0.87359504 +- 0.001)
engine.action(engine.jsonInput("[ 0, 180]")).asInstanceOf[java.lang.Double].doubleValue should be (-1.11058453 +- 0.001)
engine.action(engine.jsonInput("[ 0, 216]")).asInstanceOf[java.lang.Double].doubleValue should be (-0.85981518 +- 0.001)
engine.action(engine.jsonInput("[ 0, 252]")).asInstanceOf[java.lang.Double].doubleValue should be (-0.31377406 +- 0.001)
engine.action(engine.jsonInput("[ 0, 288]")).asInstanceOf[java.lang.Double].doubleValue should be (0.302067355 +- 0.001)
engine.action(engine.jsonInput("[ 0, 324]")).asInstanceOf[java.lang.Double].doubleValue should be (0.732170021 +- 0.001)
engine.action(engine.jsonInput("[ 36, 0]")).asInstanceOf[java.lang.Double].doubleValue should be (1.368724991 +- 0.001)
engine.action(engine.jsonInput("[ 36, 36]")).asInstanceOf[java.lang.Double].doubleValue should be (1.462784826 +- 0.001)
engine.action(engine.jsonInput("[ 36, 72]")).asInstanceOf[java.lang.Double].doubleValue should be (1.109794918 +- 0.001)
engine.action(engine.jsonInput("[ 36, 108]")).asInstanceOf[java.lang.Double].doubleValue should be (0.299486358 +- 0.001)
engine.action(engine.jsonInput("[ 36, 144]")).asInstanceOf[java.lang.Double].doubleValue should be (-0.45987381 +- 0.001)
engine.action(engine.jsonInput("[ 36, 180]")).asInstanceOf[java.lang.Double].doubleValue should be (-0.67344493 +- 0.001)
engine.action(engine.jsonInput("[ 36, 216]")).asInstanceOf[java.lang.Double].doubleValue should be (-0.37367662 +- 0.001)
engine.action(engine.jsonInput("[ 36, 252]")).asInstanceOf[java.lang.Double].doubleValue should be (0.144398029 +- 0.001)
engine.action(engine.jsonInput("[ 36, 288]")).asInstanceOf[java.lang.Double].doubleValue should be (0.769348666 +- 0.001)
engine.action(engine.jsonInput("[ 36, 324]")).asInstanceOf[java.lang.Double].doubleValue should be (1.174563705 +- 0.001)
engine.action(engine.jsonInput("[ 72, 0]")).asInstanceOf[java.lang.Double].doubleValue should be (1.850909636 +- 0.001)
engine.action(engine.jsonInput("[ 72, 36]")).asInstanceOf[java.lang.Double].doubleValue should be (1.637780435 +- 0.001)
engine.action(engine.jsonInput("[ 72, 72]")).asInstanceOf[java.lang.Double].doubleValue should be (1.465527124 +- 0.001)
engine.action(engine.jsonInput("[ 72, 108]")).asInstanceOf[java.lang.Double].doubleValue should be (0.840201434 +- 0.001)
engine.action(engine.jsonInput("[ 72, 144]")).asInstanceOf[java.lang.Double].doubleValue should be (-0.00294014 +- 0.001)
engine.action(engine.jsonInput("[ 72, 180]")).asInstanceOf[java.lang.Double].doubleValue should be (-0.28097522 +- 0.001)
engine.action(engine.jsonInput("[ 72, 216]")).asInstanceOf[java.lang.Double].doubleValue should be (-0.01113069 +- 0.001)
engine.action(engine.jsonInput("[ 72, 252]")).asInstanceOf[java.lang.Double].doubleValue should be (0.530567388 +- 0.001)
engine.action(engine.jsonInput("[ 72, 288]")).asInstanceOf[java.lang.Double].doubleValue should be (1.249584091 +- 0.001)
engine.action(engine.jsonInput("[ 72, 324]")).asInstanceOf[java.lang.Double].doubleValue should be (1.626805927 +- 0.001)
engine.action(engine.jsonInput("[108, 0]")).asInstanceOf[java.lang.Double].doubleValue should be (2.126374782 +- 0.001)
engine.action(engine.jsonInput("[108, 36]")).asInstanceOf[java.lang.Double].doubleValue should be (1.532192371 +- 0.001)
engine.action(engine.jsonInput("[108, 72]")).asInstanceOf[java.lang.Double].doubleValue should be (1.312555308 +- 0.001)
engine.action(engine.jsonInput("[108, 108]")).asInstanceOf[java.lang.Double].doubleValue should be (0.848578656 +- 0.001)
engine.action(engine.jsonInput("[108, 144]")).asInstanceOf[java.lang.Double].doubleValue should be (0.016508458 +- 0.001)
engine.action(engine.jsonInput("[108, 180]")).asInstanceOf[java.lang.Double].doubleValue should be (-0.36538357 +- 0.001)
engine.action(engine.jsonInput("[108, 216]")).asInstanceOf[java.lang.Double].doubleValue should be (-0.08564144 +- 0.001)
engine.action(engine.jsonInput("[108, 252]")).asInstanceOf[java.lang.Double].doubleValue should be (0.662172897 +- 0.001)
engine.action(engine.jsonInput("[108, 288]")).asInstanceOf[java.lang.Double].doubleValue should be (1.452223836 +- 0.001)
engine.action(engine.jsonInput("[108, 324]")).asInstanceOf[java.lang.Double].doubleValue should be (1.595081961 +- 0.001)
engine.action(engine.jsonInput("[144, 0]")).asInstanceOf[java.lang.Double].doubleValue should be (1.821615875 +- 0.001)
engine.action(engine.jsonInput("[144, 36]")).asInstanceOf[java.lang.Double].doubleValue should be (1.347803533 +- 0.001)
engine.action(engine.jsonInput("[144, 72]")).asInstanceOf[java.lang.Double].doubleValue should be (0.914697032 +- 0.001)
engine.action(engine.jsonInput("[144, 108]")).asInstanceOf[java.lang.Double].doubleValue should be (0.317009091 +- 0.001)
engine.action(engine.jsonInput("[144, 144]")).asInstanceOf[java.lang.Double].doubleValue should be (-0.35898810 +- 0.001)
engine.action(engine.jsonInput("[144, 180]")).asInstanceOf[java.lang.Double].doubleValue should be (-0.59941579 +- 0.001)
engine.action(engine.jsonInput("[144, 216]")).asInstanceOf[java.lang.Double].doubleValue should be (-0.24689828 +- 0.001)
engine.action(engine.jsonInput("[144, 252]")).asInstanceOf[java.lang.Double].doubleValue should be (0.648902743 +- 0.001)
engine.action(engine.jsonInput("[144, 288]")).asInstanceOf[java.lang.Double].doubleValue should be (1.382349357 +- 0.001)
engine.action(engine.jsonInput("[144, 324]")).asInstanceOf[java.lang.Double].doubleValue should be (1.218171697 +- 0.001)
engine.action(engine.jsonInput("[180, 0]")).asInstanceOf[java.lang.Double].doubleValue should be (1.009615522 +- 0.001)
engine.action(engine.jsonInput("[180, 36]")).asInstanceOf[java.lang.Double].doubleValue should be (0.906944445 +- 0.001)
engine.action(engine.jsonInput("[180, 72]")).asInstanceOf[java.lang.Double].doubleValue should be (0.351897200 +- 0.001)
engine.action(engine.jsonInput("[180, 108]")).asInstanceOf[java.lang.Double].doubleValue should be (-0.50574559 +- 0.001)
engine.action(engine.jsonInput("[180, 144]")).asInstanceOf[java.lang.Double].doubleValue should be (-1.04549932 +- 0.001)
engine.action(engine.jsonInput("[180, 180]")).asInstanceOf[java.lang.Double].doubleValue should be (-1.04417136 +- 0.001)
engine.action(engine.jsonInput("[180, 216]")).asInstanceOf[java.lang.Double].doubleValue should be (-0.76769335 +- 0.001)
engine.action(engine.jsonInput("[180, 252]")).asInstanceOf[java.lang.Double].doubleValue should be (-0.05415796 +- 0.001)
engine.action(engine.jsonInput("[180, 288]")).asInstanceOf[java.lang.Double].doubleValue should be (0.672986650 +- 0.001)
engine.action(engine.jsonInput("[180, 324]")).asInstanceOf[java.lang.Double].doubleValue should be (0.711362567 +- 0.001)
engine.action(engine.jsonInput("[216, 0]")).asInstanceOf[java.lang.Double].doubleValue should be (0.221888719 +- 0.001)
engine.action(engine.jsonInput("[216, 36]")).asInstanceOf[java.lang.Double].doubleValue should be (0.240183946 +- 0.001)
engine.action(engine.jsonInput("[216, 72]")).asInstanceOf[java.lang.Double].doubleValue should be (-0.23160998 +- 0.001)
engine.action(engine.jsonInput("[216, 108]")).asInstanceOf[java.lang.Double].doubleValue should be (-1.12330176 +- 0.001)
engine.action(engine.jsonInput("[216, 144]")).asInstanceOf[java.lang.Double].doubleValue should be (-1.68735535 +- 0.001)
engine.action(engine.jsonInput("[216, 180]")).asInstanceOf[java.lang.Double].doubleValue should be (-1.69764134 +- 0.001)
engine.action(engine.jsonInput("[216, 216]")).asInstanceOf[java.lang.Double].doubleValue should be (-1.58297377 +- 0.001)
engine.action(engine.jsonInput("[216, 252]")).asInstanceOf[java.lang.Double].doubleValue should be (-1.12069223 +- 0.001)
engine.action(engine.jsonInput("[216, 288]")).asInstanceOf[java.lang.Double].doubleValue should be (-0.36194660 +- 0.001)
engine.action(engine.jsonInput("[216, 324]")).asInstanceOf[java.lang.Double].doubleValue should be (0.135935887 +- 0.001)
engine.action(engine.jsonInput("[252, 0]")).asInstanceOf[java.lang.Double].doubleValue should be (-0.13986011 +- 0.001)
engine.action(engine.jsonInput("[252, 36]")).asInstanceOf[java.lang.Double].doubleValue should be (-0.24481529 +- 0.001)
engine.action(engine.jsonInput("[252, 72]")).asInstanceOf[java.lang.Double].doubleValue should be (-0.63250177 +- 0.001)
engine.action(engine.jsonInput("[252, 108]")).asInstanceOf[java.lang.Double].doubleValue should be (-1.31714040 +- 0.001)
engine.action(engine.jsonInput("[252, 144]")).asInstanceOf[java.lang.Double].doubleValue should be (-1.89469570 +- 0.001)
engine.action(engine.jsonInput("[252, 180]")).asInstanceOf[java.lang.Double].doubleValue should be (-2.12134236 +- 0.001)
engine.action(engine.jsonInput("[252, 216]")).asInstanceOf[java.lang.Double].doubleValue should be (-2.01067201 +- 0.001)
engine.action(engine.jsonInput("[252, 252]")).asInstanceOf[java.lang.Double].doubleValue should be (-1.50394827 +- 0.001)
engine.action(engine.jsonInput("[252, 288]")).asInstanceOf[java.lang.Double].doubleValue should be (-0.80015485 +- 0.001)
engine.action(engine.jsonInput("[252, 324]")).asInstanceOf[java.lang.Double].doubleValue should be (-0.19396956 +- 0.001)
engine.action(engine.jsonInput("[288, 0]")).asInstanceOf[java.lang.Double].doubleValue should be (0.002864571 +- 0.001)
engine.action(engine.jsonInput("[288, 36]")).asInstanceOf[java.lang.Double].doubleValue should be (-0.23510960 +- 0.001)
engine.action(engine.jsonInput("[288, 72]")).asInstanceOf[java.lang.Double].doubleValue should be (-0.73985078 +- 0.001)
engine.action(engine.jsonInput("[288, 108]")).asInstanceOf[java.lang.Double].doubleValue should be (-1.30389377 +- 0.001)
engine.action(engine.jsonInput("[288, 144]")).asInstanceOf[java.lang.Double].doubleValue should be (-1.81904616 +- 0.001)
engine.action(engine.jsonInput("[288, 180]")).asInstanceOf[java.lang.Double].doubleValue should be (-2.17590823 +- 0.001)
engine.action(engine.jsonInput("[288, 216]")).asInstanceOf[java.lang.Double].doubleValue should be (-1.93250697 +- 0.001)
engine.action(engine.jsonInput("[288, 252]")).asInstanceOf[java.lang.Double].doubleValue should be (-1.27413740 +- 0.001)
engine.action(engine.jsonInput("[288, 288]")).asInstanceOf[java.lang.Double].doubleValue should be (-0.70495225 +- 0.001)
engine.action(engine.jsonInput("[288, 324]")).asInstanceOf[java.lang.Double].doubleValue should be (-0.23168405 +- 0.001)
engine.action(engine.jsonInput("[324, 0]")).asInstanceOf[java.lang.Double].doubleValue should be (0.295490316 +- 0.001)
engine.action(engine.jsonInput("[324, 36]")).asInstanceOf[java.lang.Double].doubleValue should be (0.079930170 +- 0.001)
engine.action(engine.jsonInput("[324, 72]")).asInstanceOf[java.lang.Double].doubleValue should be (-0.45917604 +- 0.001)
engine.action(engine.jsonInput("[324, 108]")).asInstanceOf[java.lang.Double].doubleValue should be (-0.95631842 +- 0.001)
engine.action(engine.jsonInput("[324, 144]")).asInstanceOf[java.lang.Double].doubleValue should be (-1.36180103 +- 0.001)
engine.action(engine.jsonInput("[324, 180]")).asInstanceOf[java.lang.Double].doubleValue should be (-1.62933375 +- 0.001)
engine.action(engine.jsonInput("[324, 216]")).asInstanceOf[java.lang.Double].doubleValue should be (-1.32079787 +- 0.001)
engine.action(engine.jsonInput("[324, 252]")).asInstanceOf[java.lang.Double].doubleValue should be (-0.80798892 +- 0.001)
engine.action(engine.jsonInput("[324, 288]")).asInstanceOf[java.lang.Double].doubleValue should be (-0.49451255 +- 0.001)
engine.action(engine.jsonInput("[324, 324]")).asInstanceOf[java.lang.Double].doubleValue should be (-0.16477560 +- 0.001)
}
"GaussianProcess" must "do the vector-to-vector case" taggedAs(Lib, LibModelReg) in {
val engine = PFAEngine.fromYaml("""
input: {type: array, items: double}
output: {type: array, items: double}
cells:
table:
type:
type: array
items:
type: record
name: GP
fields:
- {name: x, type: {type: array, items: double}}
- {name: to, type: {type: array, items: double}}
- {name: sigma, type: {type: array, items: double}}
init:
- {x: [ 0, 0], to: [0.01870587, 0.96812508], sigma: [0.2, 0.2]}
- {x: [ 0, 36], to: [0.00242101, 0.95369720], sigma: [0.2, 0.2]}
- {x: [ 0, 72], to: [0.13131668, 0.53822666], sigma: [0.2, 0.2]}
- {x: [ 0, 108], to: [-0.0984303, -0.3743950], sigma: [0.2, 0.2]}
- {x: [ 0, 144], to: [0.15985766, -0.6027780], sigma: [0.2, 0.2]}
- {x: [ 0, 180], to: [-0.2417438, -1.0968682], sigma: [0.2, 0.2]}
- {x: [ 0, 216], to: [0.05190623, -0.9102348], sigma: [0.2, 0.2]}
- {x: [ 0, 252], to: [0.27249439, -0.4792263], sigma: [0.2, 0.2]}
- {x: [ 0, 288], to: [0.07282733, 0.48063363], sigma: [0.2, 0.2]}
- {x: [ 0, 324], to: [-0.0842266, 0.57112860], sigma: [0.2, 0.2]}
- {x: [ 36, 0], to: [0.47755174, 1.13094388], sigma: [0.2, 0.2]}
- {x: [ 36, 36], to: [0.41956515, 0.90267757], sigma: [0.2, 0.2]}
- {x: [ 36, 72], to: [0.59136153, 0.41456807], sigma: [0.2, 0.2]}
- {x: [ 36, 108], to: [0.60570628, -0.2181357], sigma: [0.2, 0.2]}
- {x: [ 36, 144], to: [0.59105899, -0.5619968], sigma: [0.2, 0.2]}
- {x: [ 36, 180], to: [0.57772703, -0.8929270], sigma: [0.2, 0.2]}
- {x: [ 36, 216], to: [0.23902551, -0.8220304], sigma: [0.2, 0.2]}
- {x: [ 36, 252], to: [0.61153563, -0.0519713], sigma: [0.2, 0.2]}
- {x: [ 36, 288], to: [0.64443777, 0.48040414], sigma: [0.2, 0.2]}
- {x: [ 36, 324], to: [0.48667517, 0.71326465], sigma: [0.2, 0.2]}
- {x: [ 72, 0], to: [1.09232448, 0.93827725], sigma: [0.2, 0.2]}
- {x: [ 72, 36], to: [0.81049592, 1.11762190], sigma: [0.2, 0.2]}
- {x: [ 72, 72], to: [0.71568727, 0.06369347], sigma: [0.2, 0.2]}
- {x: [ 72, 108], to: [0.72942906, -0.5640199], sigma: [0.2, 0.2]}
- {x: [ 72, 144], to: [1.06713767, -0.4772772], sigma: [0.2, 0.2]}
- {x: [ 72, 180], to: [1.38277511, -0.9363026], sigma: [0.2, 0.2]}
- {x: [ 72, 216], to: [0.61698083, -0.8860234], sigma: [0.2, 0.2]}
- {x: [ 72, 252], to: [0.82624676, -0.1171322], sigma: [0.2, 0.2]}
- {x: [ 72, 288], to: [0.83217277, 0.30132193], sigma: [0.2, 0.2]}
- {x: [ 72, 324], to: [0.74893667, 0.80824628], sigma: [0.2, 0.2]}
- {x: [108, 0], to: [0.66284547, 0.85288292], sigma: [0.2, 0.2]}
- {x: [108, 36], to: [0.59724043, 0.88159718], sigma: [0.2, 0.2]}
- {x: [108, 72], to: [0.28727426, 0.20407304], sigma: [0.2, 0.2]}
- {x: [108, 108], to: [0.90503697, -0.5979697], sigma: [0.2, 0.2]}
- {x: [108, 144], to: [1.05726502, -0.8156704], sigma: [0.2, 0.2]}
- {x: [108, 180], to: [0.55263541, -1.1994934], sigma: [0.2, 0.2]}
- {x: [108, 216], to: [0.50777742, -0.7713018], sigma: [0.2, 0.2]}
- {x: [108, 252], to: [0.60347324, -0.2211189], sigma: [0.2, 0.2]}
- {x: [108, 288], to: [1.16101443, -0.1406493], sigma: [0.2, 0.2]}
- {x: [108, 324], to: [0.92295182, 0.51506096], sigma: [0.2, 0.2]}
- {x: [144, 0], to: [0.80924121, 0.83038461], sigma: [0.2, 0.2]}
- {x: [144, 36], to: [0.80043759, 0.57306896], sigma: [0.2, 0.2]}
- {x: [144, 72], to: [0.74865899, 0.12507470], sigma: [0.2, 0.2]}
- {x: [144, 108], to: [0.54867424, -0.2083665], sigma: [0.2, 0.2]}
- {x: [144, 144], to: [0.58431995, -0.7811933], sigma: [0.2, 0.2]}
- {x: [144, 180], to: [0.71950969, -0.9713840], sigma: [0.2, 0.2]}
- {x: [144, 216], to: [0.52307948, -0.8731280], sigma: [0.2, 0.2]}
- {x: [144, 252], to: [0.36976490, -0.3895379], sigma: [0.2, 0.2]}
- {x: [144, 288], to: [0.37565453, 0.21778435], sigma: [0.2, 0.2]}
- {x: [144, 324], to: [0.45793731, 0.85264234], sigma: [0.2, 0.2]}
- {x: [180, 0], to: [-0.0441948, 1.09297816], sigma: [0.2, 0.2]}
- {x: [180, 36], to: [-0.2817155, 0.69222421], sigma: [0.2, 0.2]}
- {x: [180, 72], to: [0.12103868, 0.25006600], sigma: [0.2, 0.2]}
- {x: [180, 108], to: [0.11426250, -0.5415858], sigma: [0.2, 0.2]}
- {x: [180, 144], to: [0.10181024, -0.8848316], sigma: [0.2, 0.2]}
- {x: [180, 180], to: [-0.1477347, -1.1392833], sigma: [0.2, 0.2]}
- {x: [180, 216], to: [0.35044408, -0.9500126], sigma: [0.2, 0.2]}
- {x: [180, 252], to: [0.18675249, -0.4131455], sigma: [0.2, 0.2]}
- {x: [180, 288], to: [0.24436046, 0.35884024], sigma: [0.2, 0.2]}
- {x: [180, 324], to: [0.07432997, 1.02698144], sigma: [0.2, 0.2]}
- {x: [216, 0], to: [-0.6591356, 0.94999291], sigma: [0.2, 0.2]}
- {x: [216, 36], to: [-0.4494247, 0.69657926], sigma: [0.2, 0.2]}
- {x: [216, 72], to: [-0.4270339, 0.15420512], sigma: [0.2, 0.2]}
- {x: [216, 108], to: [-0.5964852, -0.4521517], sigma: [0.2, 0.2]}
- {x: [216, 144], to: [-0.3799727, -0.9904939], sigma: [0.2, 0.2]}
- {x: [216, 180], to: [-0.5694217, -1.0015548], sigma: [0.2, 0.2]}
- {x: [216, 216], to: [-0.6918730, -0.5267317], sigma: [0.2, 0.2]}
- {x: [216, 252], to: [-0.5838720, -0.4841855], sigma: [0.2, 0.2]}
- {x: [216, 288], to: [-0.5693374, -0.0133151], sigma: [0.2, 0.2]}
- {x: [216, 324], to: [-0.4903301, 1.03380154], sigma: [0.2, 0.2]}
- {x: [252, 0], to: [-1.3293399, 0.71483260], sigma: [0.2, 0.2]}
- {x: [252, 36], to: [-1.3110310, 0.72705720], sigma: [0.2, 0.2]}
- {x: [252, 72], to: [-1.0671501, 0.24425863], sigma: [0.2, 0.2]}
- {x: [252, 108], to: [-0.8844714, -0.2823489], sigma: [0.2, 0.2]}
- {x: [252, 144], to: [-0.9533401, -1.1736452], sigma: [0.2, 0.2]}
- {x: [252, 180], to: [-0.5345838, -1.2210451], sigma: [0.2, 0.2]}
- {x: [252, 216], to: [-1.0862084, -0.7348636], sigma: [0.2, 0.2]}
- {x: [252, 252], to: [-0.7549718, -0.1849688], sigma: [0.2, 0.2]}
- {x: [252, 288], to: [-1.2390564, 0.54575855], sigma: [0.2, 0.2]}
- {x: [252, 324], to: [-1.0288154, 0.84115420], sigma: [0.2, 0.2]}
- {x: [288, 0], to: [-0.5410771, 1.10696790], sigma: [0.2, 0.2]}
- {x: [288, 36], to: [-0.8322681, 0.44386847], sigma: [0.2, 0.2]}
- {x: [288, 72], to: [-0.9040048, 0.00519231], sigma: [0.2, 0.2]}
- {x: [288, 108], to: [-0.6676514, -0.4833115], sigma: [0.2, 0.2]}
- {x: [288, 144], to: [-1.0580007, -1.2009009], sigma: [0.2, 0.2]}
- {x: [288, 180], to: [-0.8102370, -1.2521135], sigma: [0.2, 0.2]}
- {x: [288, 216], to: [-1.2759558, -0.7864478], sigma: [0.2, 0.2]}
- {x: [288, 252], to: [-0.5628566, 0.13344358], sigma: [0.2, 0.2]}
- {x: [288, 288], to: [-0.9149276, 0.22418075], sigma: [0.2, 0.2]}
- {x: [288, 324], to: [-0.5648838, 0.75833374], sigma: [0.2, 0.2]}
- {x: [324, 0], to: [-0.6311144, 0.83818280], sigma: [0.2, 0.2]}
- {x: [324, 36], to: [-0.5527385, 0.84973376], sigma: [0.2, 0.2]}
- {x: [324, 72], to: [-0.3039325, -0.2189731], sigma: [0.2, 0.2]}
- {x: [324, 108], to: [-0.4498324, 0.07328764], sigma: [0.2, 0.2]}
- {x: [324, 144], to: [-0.7415195, -0.6128136], sigma: [0.2, 0.2]}
- {x: [324, 180], to: [-0.7918942, -1.2435311], sigma: [0.2, 0.2]}
- {x: [324, 216], to: [-0.6853270, -0.5134147], sigma: [0.2, 0.2]}
- {x: [324, 252], to: [-0.7581712, -0.7304523], sigma: [0.2, 0.2]}
- {x: [324, 288], to: [-0.4803783, 0.12660344], sigma: [0.2, 0.2]}
- {x: [324, 324], to: [-0.6815587, 0.82271760], sigma: [0.2, 0.2]}
action:
model.reg.gaussianProcess:
- input # find the best fit to the input x
- {cell: table} # use the provided table of training data
- null # no explicit krigingWeight: universal Kriging
- fcn: m.kernel.rbf # radial basis function (squared exponential)
fill: {gamma: 2.0} # with a given gamma (by partial application)
# can be replaced with any function,
# from the m.kernel.* library or user-defined
""").head
engine.action(engine.jsonInput("[ 0, 0]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.064779229813204, 0.96782301456871) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[ 0, 36]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.121664830264969, 0.91524378145119) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[ 0, 72]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.216316970981976, 0.43536919421404) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[ 0, 108]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.158097330508110, -0.1570712583734) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[ 0, 144]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.005843683253181, -0.6746409371841) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[ 0, 180]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(-0.05505229209753, -1.0348201168723) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[ 0, 216]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.027244856798085, -0.9323695730894) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[ 0, 252]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.228310304549874, -0.3237075447481) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[ 0, 288]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.329555998371579, 0.33447339642380) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[ 0, 324]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.224134433890304, 0.56179760089078) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[ 36, 0]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.642510569955593, 1.08150359261086) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[ 36, 36]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.574414083994342, 1.01547509493490) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[ 36, 72]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.525254987256053, 0.38482080968042) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[ 36, 108]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.521968103358422, -0.2322465354319) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[ 36, 144]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.646137319268672, -0.6026320145505) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[ 36, 180]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.724069639046670, -0.9017679835361) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[ 36, 216]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.573719616038126, -0.8428508869520) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[ 36, 252]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.545852542329543, -0.2506561706813) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[ 36, 288]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.606872636602972, 0.45029925454635) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[ 36, 324]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.441389771978765, 0.74083544766659) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[ 72, 0]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.977748454643780, 1.00104108097745) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[ 72, 36]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.797350444231129, 1.03140212433305) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[ 72, 72]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.673249258827750, 0.32601280917617) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[ 72, 108]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.793138523118285, -0.3819477574562) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[ 72, 144]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(1.136586425054690, -0.7293445925598) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[ 72, 180]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(1.227854236064106, -0.9631493525473) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[ 72, 216]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.852897762435422, -0.8365809399416) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[ 72, 252]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.750239280286434, -0.2492263368339) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[ 72, 288]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.940481362701912, 0.39790602429840) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[ 72, 324]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.768251823210774, 0.72003180803082) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[108, 0]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.859379032952287, 0.83957209536458) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[108, 36]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.746591722059892, 0.84563877181533) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[108, 72]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.700088795230686, 0.19173494869166) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[108, 108]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.825268228136332, -0.4830828897588) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[108, 144]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(1.037957610023134, -0.8665175669264) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[108, 180]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.988710260763903, -1.0813726256458) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[108, 216]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.642968431918638, -0.8706558175409) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[108, 252]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.703236484927596, -0.2891600211628) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[108, 288]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(1.051373809585268, 0.30156199893011) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[108, 324]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.905999740390180, 0.65523913744991) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[144, 0]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.722094791027407, 0.86978626471051) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[144, 36]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.729299311470238, 0.66775521482295) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[144, 72]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.678505139706764, 0.06440219730373) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[144, 108]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.625609724224827, -0.4743647710358) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[144, 144]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.632999597034182, -0.8421184081561) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[144, 180]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.580666654244519, -1.0626002319736) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[144, 216]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.414434221704458, -0.8919514744551) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[144, 252]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.488257767012426, -0.3608586328044) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[144, 288]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.699495457977056, 0.31413231222162) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[144, 324]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.576720865037063, 0.80266743957387) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[180, 0]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.261714306314791, 1.02763696753567) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[180, 36]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.311599104375023, 0.70761204493112) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[180, 72]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.263577805718530, 0.09714664449536) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[180, 108]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.127845554599678, -0.4576463574262) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[180, 144]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.090383674750804, -0.8722179387155) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[180, 180]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.113864383315045, -1.0580113962883) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[180, 216]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.069990322010953, -0.9057571224064) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[180, 252]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.066226881493862, -0.4089280668320) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[180, 288]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.062772044621099, 0.39131654543846) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[180, 324]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(0.009460119595085, 1.01055465934145) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[216, 0]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(-0.73312221526568, 0.93804750105795) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[216, 36]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(-0.68789807457787, 0.70400861091742) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[216, 72]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(-0.55197886175110, 0.16874457785834) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[216, 108]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(-0.52450042744038, -0.4729170540772) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[216, 144]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(-0.49933749191187, -0.9901355525858) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[216, 180]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(-0.49437643240508, -1.0701171487030) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[216, 216]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(-0.54576440321639, -0.7756610412889) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[216, 252]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(-0.57560565089801, -0.2764287342223) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[216, 288]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(-0.68768932281517, 0.46231642218308) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[216, 324]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(-0.63276363506716, 1.00624161265100) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[252, 0]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(-1.25883018418244, 0.86182070915403) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[252, 36]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(-1.27125247167942, 0.66522056518503) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[252, 72]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(-1.05057208838978, 0.16063055721863) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[252, 108]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(-0.92161147272343, -0.5412979662814) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[252, 144]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(-0.88016893226660, -1.1607246598436) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[252, 180]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(-0.93610096907829, -1.1769350713380) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[252, 216]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(-1.03159661761564, -0.6954753280662) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[252, 252]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(-1.01849469291683, -0.1575283455216) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[252, 288]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(-1.11213871999217, 0.44341038263258) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[252, 324]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(-0.99527599035834, 0.85559322529816) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[288, 0]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(-0.87403287278578, 1.00916210932357) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[288, 36]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(-0.92463976323619, 0.76734917968979) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[288, 72]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(-0.83080416416296, 0.20798340092916) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[288, 108]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(-0.84872084908145, -0.4899647534093) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[288, 144]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(-0.93999991208532, -1.1658773991644) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[288, 180]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(-1.05082792329645, -1.2795153389135) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[288, 216]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(-1.12069043055638, -0.8204558452373) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[288, 252]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(-0.97320402678885, -0.3078952922898) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[288, 288]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(-0.90525349575954, 0.28418179753749) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[288, 324]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(-0.80502946506568, 0.77845811479377) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[324, 0]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(-0.50633607253996, 0.89851286875373) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[324, 36]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(-0.48838873442536, 0.76025946112935) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[324, 72]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(-0.41973030207655, 0.32609340242212) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[324, 108]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(-0.52386015896191, -0.2120845250366) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[324, 144]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(-0.69731418799696, -0.8419776200889) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[324, 180]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(-0.78738043867671, -1.1436369487980) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[324, 216]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(-0.80709074532220, -0.9317562808077) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[324, 252]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(-0.66910568870076, -0.5508149563349) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[324, 288]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(-0.58129897583945, 0.08421193036071) map {case (x, y) => x should be (y +- 0.1)}
engine.action(engine.jsonInput("[324, 324]")).asInstanceOf[PFAArray[Double]].toVector zip Vector(-0.56539974304732, 0.73545564385919) map {case (x, y) => x should be (y +- 0.1)}
}
}
| opendatagroup/hadrian | hadrian/src/test/scala/lib/model/reg.scala | Scala | apache-2.0 | 71,243 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution
import org.apache.spark.sql.catalyst.expressions.{BoundReference, UnsafeRow}
import org.apache.spark.sql.catalyst.expressions.codegen.{CodegenContext, ExprCode}
import org.apache.spark.sql.execution.metric.SQLMetrics
import org.apache.spark.sql.types.DataType
import org.apache.spark.sql.vectorized.{ColumnarBatch, ColumnVector}
/**
* Helper trait for abstracting scan functionality using [[ColumnarBatch]]es.
*/
private[sql] trait ColumnarBatchScan extends CodegenSupport {
def vectorTypes: Option[Seq[String]] = None
protected def supportsBatch: Boolean = true
protected def needsUnsafeRowConversion: Boolean = true
override lazy val metrics = Map(
"numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of output rows"),
"scanTime" -> SQLMetrics.createTimingMetric(sparkContext, "scan time"))
/**
* Generate [[ColumnVector]] expressions for our parent to consume as rows.
* This is called once per [[ColumnarBatch]].
*/
private def genCodeColumnVector(
ctx: CodegenContext,
columnVar: String,
ordinal: String,
dataType: DataType,
nullable: Boolean): ExprCode = {
val javaType = ctx.javaType(dataType)
val value = ctx.getValueFromVector(columnVar, dataType, ordinal)
val isNullVar = if (nullable) { ctx.freshName("isNull") } else { "false" }
val valueVar = ctx.freshName("value")
val str = s"columnVector[$columnVar, $ordinal, ${dataType.simpleString}]"
val code = s"${ctx.registerComment(str)}\\n" + (if (nullable) {
s"""
boolean $isNullVar = $columnVar.isNullAt($ordinal);
$javaType $valueVar = $isNullVar ? ${ctx.defaultValue(dataType)} : ($value);
"""
} else {
s"$javaType $valueVar = $value;"
}).trim
ExprCode(code, isNullVar, valueVar)
}
/**
* Produce code to process the input iterator as [[ColumnarBatch]]es.
* This produces an [[UnsafeRow]] for each row in each batch.
*/
// TODO: return ColumnarBatch.Rows instead
override protected def doProduce(ctx: CodegenContext): String = {
// PhysicalRDD always just has one input
val input = ctx.addMutableState("scala.collection.Iterator", "input",
v => s"$v = inputs[0];")
if (supportsBatch) {
produceBatches(ctx, input)
} else {
produceRows(ctx, input)
}
}
private def produceBatches(ctx: CodegenContext, input: String): String = {
// metrics
val numOutputRows = metricTerm(ctx, "numOutputRows")
val scanTimeMetric = metricTerm(ctx, "scanTime")
val scanTimeTotalNs = ctx.addMutableState(ctx.JAVA_LONG, "scanTime") // init as scanTime = 0
val columnarBatchClz = classOf[ColumnarBatch].getName
val batch = ctx.addMutableState(columnarBatchClz, "batch")
val idx = ctx.addMutableState(ctx.JAVA_INT, "batchIdx") // init as batchIdx = 0
val columnVectorClzs = vectorTypes.getOrElse(
Seq.fill(output.indices.size)(classOf[ColumnVector].getName))
val (colVars, columnAssigns) = columnVectorClzs.zipWithIndex.map {
case (columnVectorClz, i) =>
val name = ctx.addMutableState(columnVectorClz, s"colInstance$i")
(name, s"$name = ($columnVectorClz) $batch.column($i);")
}.unzip
val nextBatch = ctx.freshName("nextBatch")
val nextBatchFuncName = ctx.addNewFunction(nextBatch,
s"""
|private void $nextBatch() throws java.io.IOException {
| long getBatchStart = System.nanoTime();
| if ($input.hasNext()) {
| $batch = ($columnarBatchClz)$input.next();
| $numOutputRows.add($batch.numRows());
| $idx = 0;
| ${columnAssigns.mkString("", "\\n", "\\n")}
| }
| $scanTimeTotalNs += System.nanoTime() - getBatchStart;
|}""".stripMargin)
ctx.currentVars = null
val rowidx = ctx.freshName("rowIdx")
val columnsBatchInput = (output zip colVars).map { case (attr, colVar) =>
genCodeColumnVector(ctx, colVar, rowidx, attr.dataType, attr.nullable)
}
val localIdx = ctx.freshName("localIdx")
val localEnd = ctx.freshName("localEnd")
val numRows = ctx.freshName("numRows")
val shouldStop = if (parent.needStopCheck) {
s"if (shouldStop()) { $idx = $rowidx + 1; return; }"
} else {
"// shouldStop check is eliminated"
}
s"""
|if ($batch == null) {
| $nextBatchFuncName();
|}
|while ($batch != null) {
| int $numRows = $batch.numRows();
| int $localEnd = $numRows - $idx;
| for (int $localIdx = 0; $localIdx < $localEnd; $localIdx++) {
| int $rowidx = $idx + $localIdx;
| ${consume(ctx, columnsBatchInput).trim}
| $shouldStop
| }
| $idx = $numRows;
| $batch = null;
| $nextBatchFuncName();
|}
|$scanTimeMetric.add($scanTimeTotalNs / (1000 * 1000));
|$scanTimeTotalNs = 0;
""".stripMargin
}
private def produceRows(ctx: CodegenContext, input: String): String = {
val numOutputRows = metricTerm(ctx, "numOutputRows")
val row = ctx.freshName("row")
ctx.INPUT_ROW = row
ctx.currentVars = null
// Always provide `outputVars`, so that the framework can help us build unsafe row if the input
// row is not unsafe row, i.e. `needsUnsafeRowConversion` is true.
val outputVars = output.zipWithIndex.map { case (a, i) =>
BoundReference(i, a.dataType, a.nullable).genCode(ctx)
}
val inputRow = if (needsUnsafeRowConversion) null else row
s"""
|while ($input.hasNext()) {
| InternalRow $row = (InternalRow) $input.next();
| $numOutputRows.add(1);
| ${consume(ctx, outputVars, inputRow).trim}
| if (shouldStop()) return;
|}
""".stripMargin
}
}
| esi-mineset/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/ColumnarBatchScan.scala | Scala | apache-2.0 | 6,772 |
import sbt._
import sbt.Keys._
import sbt.Package._
import sbtrelease._
import sbtrelease.ReleasePlugin.autoImport._
import sbtrelease.ReleaseStateTransformations._
import sbtrelease.Utilities._
import scala.util.Random
object ReleaseExtras {
object ReleaseExtrasKeys {
val releaseBranchName = taskKey[String]("The name of the branch")
object examples {
val repo = settingKey[String]("Remote location of scalameter-examples")
val tag = settingKey[String]("Tag name on ScalaMeter release")
val tagComment = settingKey[String]("Tag comment on ScalaMeter release")
val commitMessage = settingKey[String]("Commit message on ScalaMeter update")
val scalaMeterVersionFile = settingKey[String]("Name of version file for ScalaMeter artifact")
val scalaMeterVersionFileContent = settingKey[String]("Content of version file for ScalaMeter artifact")
}
}
import ReleaseExtrasKeys._
implicit class RichGit(git: Git) {
def checkout(name: String): ProcessBuilder = git.cmd("checkout", name)
def checkoutNew(name: String, from: String, force: Boolean = false): ProcessBuilder =
git.cmd("checkout", if (force) "-B" else "-b", name, from)
def pushBranch(branch: String, remote: String): ProcessBuilder =
git.cmd("push", "-u", remote, branch)
}
private def git(st: State): Git = {
st.extract.get(releaseVcs).collect {
case git: Git => git
}.getOrElse(sys.error("Aborting release. Working directory is not a repository of a Git."))
}
/** This release step involves following actions:
* - create version branch from current branch setting same remote
* - checkout version branch and push it to remote
* - publish artifacts using predefined sbt-release step
* - push changes of versioned branch to upstream using predefined sbt-release step
* - checkout back to former current branch
*/
lazy val branchRelease: ReleaseStep = ReleaseStep(
action = branchReleaseAction,
check = st => pushChanges.check(publishArtifacts.check(st)),
enableCrossBuild = publishArtifacts.enableCrossBuild || pushChanges.enableCrossBuild
)
private lazy val branchReleaseAction = { st: State =>
val currentBranch = git(st).currentBranch
val currentBranchRemote = git(st).trackingRemote
val (branchState, branch) = st.extract.runTask(releaseBranchName, st)
git(branchState).checkoutNew(branch, from = currentBranch, force = true) !! branchState.log
if (!git(branchState).hasUpstream)
git(branchState).pushBranch(branch, remote = currentBranchRemote) !! branchState.log
// add manifest attribute 'Vcs-Release-Branch' to current settings
val withManifestAttributeState = reapply(Seq[Setting[_]](
packageOptions += ManifestAttributes("Vcs-Release-Branch" -> branch)
), branchState)
val publishArtifactsState = publishArtifacts.action(withManifestAttributeState)
val pushChangesState = pushChanges.action(publishArtifactsState)
git(pushChangesState).checkout(currentBranch) !! pushChangesState.log
pushChangesState
}
/** This release step involves following actions:
* - clone scalameter-examples from `examples.repo`
* - bump up version in all examples to ScalaMeter release version and commit changes
* - tag scalameter-examples with ScalaMeter release version
* - bump up versions in all examples to new ScalaMeter snapshot version and commit changes
* - push changes to `examples.repo`
*/
lazy val bumpUpVersionInExamples: ReleaseStep = { st: State =>
val repo = st.extract.get(examples.repo)
val (releaseV, nextV) = st.get(ReleaseKeys.versions).getOrElse(
sys.error("No versions are set! Was this release part executed before inquireVersions?")
)
val tag = st.extract.get(examples.tag).format(releaseV)
val comment = st.extract.get(examples.tagComment).format(releaseV)
val commitMsg = st.extract.get(examples.commitMessage)
val artifactVersionFile = st.extract.get(examples.scalaMeterVersionFile)
val artifactVersionFileContent = st.extract.get(examples.scalaMeterVersionFileContent)
val exampleDirFilter = new FileFilter {
def accept(file: File): Boolean = IO.listFiles(file).exists(_.getName == artifactVersionFile)
}
def setVersions(in: File, version: String): Seq[String] = {
IO.listFiles(in, exampleDirFilter).map { exampleDir =>
val versionFile = new File(exampleDir, artifactVersionFile)
st.log.info(s"Writing version '$version' to ${in.getName}/${versionFile.getName}")
IO.write(versionFile, artifactVersionFileContent.format(version))
versionFile.getAbsolutePath
}
}
def pushChanges(vc: Git) = {
val defaultChoice = extractDefault(st, "y")
if (vc.hasUpstream) {
defaultChoice orElse SimpleReader.readLine("Push changes to the remote repository (y/n)? [y] ") match {
case Yes() | Some("") =>
st.log.info("git push sends its console output to standard error, which will cause the next few lines to be marked as [error].")
vc.pushChanges !! st.log
case _ => st.log.warn("Remember to push the changes yourself!")
}
} else {
st.log.info(s"Changes were NOT pushed, because no upstream branch is configured for the local branch [${vc.currentBranch}]")
}
}
IO.withTemporaryDirectory { tmpDir =>
st.log.info(s"Starting cloning $repo to $tmpDir")
Process("git" :: "clone" :: repo :: "." :: Nil, tmpDir) !! st.log
val examplesGit = new Git(tmpDir)
st.log.info(s"Setting release version '$releaseV'")
val filesWithReleaseV = setVersions(tmpDir, releaseV)
examplesGit.add(filesWithReleaseV: _*) !! st.log
examplesGit.commit(commitMsg.format(releaseV), sign = false) !! st.log
examplesGit.tag(name = tag, comment = comment, sign = false) !! st.log
st.log.info(s"Setting snapshot version '$nextV'")
val filesWithNextV = setVersions(tmpDir, nextV)
examplesGit.add(filesWithNextV: _*) !! st.log
examplesGit.commit(commitMsg.format(nextV), sign = false) !! st.log
st.log.info(s"Starting pushing changes to $repo")
pushChanges(examplesGit)
}
st
}
}
| storm-enroute/scalameter | project/ReleaseExtras.scala | Scala | bsd-3-clause | 6,269 |
package kofre.decompose.interfaces
import kofre.decompose.*
import kofre.decompose.CRDTInterface.{DeltaMutator, DeltaQuery}
import kofre.decompose.DotStore.DotFun
object LWWRegisterInterface {
type State[A] = MVRegisterInterface.State[TimedVal[A]]
trait LWWRegisterCompanion {
type State[A] = LWWRegisterInterface.State[A]
type Embedded[A] = DotFun[TimedVal[A]]
}
def read[A]: DeltaQuery[State[A], Option[A]] =
MVRegisterInterface.read[TimedVal[A]].andThen(s => s.reduceOption(UIJDLattice[TimedVal[A]].merge).map(_.value))
def write[A](v: A): DeltaMutator[State[A]] =
(replicaID, state) => MVRegisterInterface.write(TimedVal(v, replicaID)).apply(replicaID, state)
def map[A](f: A => A): DeltaMutator[State[A]] = (replicaID, state) =>
read[A].apply(state).map(f) match {
case None => UIJDLattice[State[A]].bottom
case Some(v) => write(v).apply(replicaID, state)
}
def clear[A](): DeltaMutator[State[A]] = MVRegisterInterface.clear()
}
/** An LWW (Last Writer Wins) is a Delta CRDT modeling a register.
*
* If two concurrent write operations occur, the resulting LWW takes on the value of the write operation with the later timestamp.
*/
abstract class LWWRegisterInterface[A, Wrapper]
extends CRDTInterface[LWWRegisterInterface.State[A], Wrapper] {
def read: Option[A] = query(LWWRegisterInterface.read)
def write(v: A): Wrapper = mutate(LWWRegisterInterface.write(v))
def map(f: A => A): Wrapper = mutate(LWWRegisterInterface.map(f))
def clear(): Wrapper = mutate(LWWRegisterInterface.clear())
}
| guidosalva/REScala | Code/Extensions/Kofre/src/main/scala/kofre/decompose/interfaces/LWWRegisterInterface.scala | Scala | apache-2.0 | 1,577 |
package pl.newicom.dddd.view.sql
import pl.newicom.dddd.Eventsourced
import pl.newicom.dddd.view.ViewUpdateConfig
case class SqlViewUpdateConfig(
override val viewName: String,
override val eventSource: Eventsourced,
projections: Projection*)
extends ViewUpdateConfig | pawelkaczor/akka-ddd | view-update-sql/src/main/scala/pl/newicom/dddd/view/sql/SqlViewUpdateConfig.scala | Scala | mit | 367 |
/**
* Copyright (C) 2015-2016 Philipp Haller
*/
package lacasa.run
import org.junit.Test
import org.junit.runner.RunWith
import org.junit.runners.JUnit4
import scala.util.control.ControlThrowable
class Message {
var arr: Array[Int] = _
}
@RunWith(classOf[JUnit4])
class Stack1Spec {
import lacasa.Box._
@Test
def test1(): Unit = {
println(s"run.Stack1Spec.test1")
try {
mkBox[Message] { packed =>
implicit val access = packed.access
packed.box open { msg =>
msg.arr = Array(1, 2, 3, 4)
}
}
} catch {
case ct: ControlThrowable =>
uncheckedCatchControl
assert(true, "this should not fail!")
}
}
}
| phaller/lacasa | plugin/src/test/scala/lacasa/run/Stack1.scala | Scala | bsd-3-clause | 698 |
/**
* Copyright 2011-2017 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.core.check.extractor.bytes
import io.gatling.commons.validation._
import io.gatling.core.check._
import io.gatling.core.check.extractor._
import io.gatling.core.session._
trait BodyBytesCheckType
object BodyBytesCheckBuilder {
val BodyBytes = {
val extractor = new Extractor[Array[Byte], Array[Byte]] with SingleArity {
val name = "bodyBytes"
def apply(prepared: Array[Byte]) = Some(prepared).success
}.expressionSuccess
new DefaultFindCheckBuilder[BodyBytesCheckType, Array[Byte], Array[Byte]](extractor)
}
}
| timve/gatling | gatling-core/src/main/scala/io/gatling/core/check/extractor/bytes/BodyBytesCheckBuilder.scala | Scala | apache-2.0 | 1,181 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.nodes
import org.apache.calcite.rel.RelWriter
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.rel.core.JoinRelType
import org.apache.calcite.rex.RexNode
import org.apache.flink.table.api.TableException
import org.apache.flink.table.plan.util.PythonUtil.containsPythonCall
import scala.collection.JavaConverters._
trait CommonJoin {
protected def validatePythonFunctionInJoinCondition(joinCondition: RexNode): Unit = {
if (containsPythonCall(joinCondition)) {
throw new TableException("Only inner join condition with equality predicates supports the " +
"Python UDF taking the inputs from the left table and the right table at the same time, " +
"e.g., ON T1.id = T2.id && pythonUdf(T1.a, T2.b)")
}
}
private[flink] def joinSelectionToString(inputType: RelDataType): String = {
inputType.getFieldNames.asScala.toList.mkString(", ")
}
private[flink] def joinConditionToString(
inputType: RelDataType,
joinCondition: RexNode,
expression: (RexNode, List[String], Option[List[RexNode]]) => String): String = {
val inFields = inputType.getFieldNames.asScala.toList
expression(joinCondition, inFields, None)
}
private[flink] def joinTypeToString(joinType: JoinRelType) = {
joinType match {
case JoinRelType.INNER => "InnerJoin"
case JoinRelType.LEFT=> "LeftOuterJoin"
case JoinRelType.RIGHT => "RightOuterJoin"
case JoinRelType.FULL => "FullOuterJoin"
case JoinRelType.SEMI => "SemiJoin"
case JoinRelType.ANTI => "AntiJoin"
}
}
private[flink] def temporalJoinToString(
inputType: RelDataType,
joinCondition: RexNode,
joinType: JoinRelType,
expression: (RexNode, List[String], Option[List[RexNode]]) => String): String = {
"Temporal" + joinToString(inputType, joinCondition, joinType, expression)
}
private[flink] def joinToString(
inputType: RelDataType,
joinCondition: RexNode,
joinType: JoinRelType,
expression: (RexNode, List[String], Option[List[RexNode]]) => String): String = {
s"${joinTypeToString(joinType)}" +
s"(where: (${joinConditionToString(inputType, joinCondition, expression)}), " +
s"join: (${joinSelectionToString(inputType)}))"
}
private[flink] def joinExplainTerms(
pw: RelWriter,
inputType: RelDataType,
joinCondition: RexNode,
joinType: JoinRelType,
expression: (RexNode, List[String], Option[List[RexNode]]) => String): RelWriter = {
pw.item("where", joinConditionToString(inputType, joinCondition, expression))
.item("join", joinSelectionToString(inputType))
.item("joinType", joinTypeToString(joinType))
}
}
| hequn8128/flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/plan/nodes/CommonJoin.scala | Scala | apache-2.0 | 3,555 |
package eventstore.examples
import akka.actor._
import eventstore.cluster.ClusterDiscovererActor.{ GetAddress, Address }
import eventstore.cluster.GossipSeedsOrDns.GossipSeeds
import eventstore.cluster.{ ClusterSettings, ClusterInfo, ClusterDiscovererActor }
import eventstore.EsInt
object DiscoverCluster extends App {
implicit val system = ActorSystem()
val settings = ClusterSettings(GossipSeeds(
"127.0.0.1" :: 1113,
"127.0.0.1" :: 2113,
"127.0.0.1" :: 3113))
val discoverer = system.actorOf(ClusterDiscovererActor.props(settings, ClusterInfo.futureFunc), "discoverer")
system.actorOf(Props(classOf[DiscoverCluster], discoverer))
}
class DiscoverCluster(discoverer: ActorRef) extends Actor with ActorLogging {
override def preStart() = discoverer ! GetAddress()
def receive = {
case Address(bestNode) => log.info("Best Node: {}", bestNode)
}
} | pawelkaczor/EventStore.JVM | src/main/scala/eventstore/examples/DiscoverCluster.scala | Scala | bsd-3-clause | 881 |
package mesosphere.marathon.state
import scala.concurrent.Future
/**
* The entity store is mostly syntactic sugar around the PersistentStore.
* The main idea is to handle serializing/deserializing of specific entities.
* @tparam T the specific type of entities that are handled by this specific store.
*/
trait EntityStore[T] {
type Deserialize = () => T //returns deserialized T value.
type Update = Deserialize => T //Update function Gets an Read and returns the (modified) T
def fetch(key: String): Future[Option[T]]
def store(key: String, value: T): Future[T] = modify(key)(_ => value)
def modify(key: String)(update: Update): Future[T]
/**
* Delete entity with given id.
* Success: the file was deleted (true) or not existent (false)
* Failure: the file could not be deleted
* @param key the name of the entity.
* @return result, whether the file was existent or not.
*/
def expunge(key: String): Future[Boolean]
def names(): Future[Seq[String]]
}
| EasonYi/marathon | src/main/scala/mesosphere/marathon/state/EntityStore.scala | Scala | apache-2.0 | 1,009 |
package breeze.stats.distributions
/*
Copyright 2009 David Hall, Daniel Ramage
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import breeze.optimize.DiffFunction
import breeze.linalg._
import breeze.math.{MutablizingAdaptor, VectorSpace, TensorSpace, MutableCoordinateSpace}
import breeze.numerics._
import breeze.numerics
import breeze.storage.DefaultArrayValue
/**
* Represents a Multinomial distribution over elements.
* You can make a distribution over any [[breeze.linalg.QuasiTensor]], which includes
* DenseVectors and Counters.
*
* TODO: I should probably rename this to Discrete or something, since it only handles
* one draw.
*
* @author dlwh
*/
case class Multinomial[T,I](params: T)(implicit ev: T=>QuasiTensor[I, Double], sumImpl: breeze.linalg.sum.Impl[T, Double], rand: RandBasis=Rand) extends DiscreteDistr[I] {
val sum = breeze.linalg.sum(params)
require(sum != 0.0, "There's no mass!")
// check rep
for ((k,v) <- params.activeIterator) {
if (v < 0) {
throw new IllegalArgumentException("Multinomial has negative mass at index "+k)
}
}
def draw():I = {
var prob = rand.uniform.get() * sum
assert(!prob.isNaN, "NaN Probability!")
for((i,w) <- params.activeIterator) {
prob -= w
if(prob <= 0) return i
}
params.activeKeysIterator.next()
}
def probabilityOf(e : I) = params(e) / sum
override def unnormalizedProbabilityOf(e:I) = params(e)
override def toString = ev(params).activeIterator.mkString("Multinomial{",",","}")
def expectedValue[U](f: I=>U)(implicit vs: VectorSpace[U, Double]) = {
val wrapped = MutablizingAdaptor.ensureMutable(vs)
import wrapped.mutaVspace._
import wrapped._
var acc: Wrapper = null.asInstanceOf[Wrapper]
for ( (k, v) <- params.activeIterator) {
if(acc == null) {
acc = wrap(f(k)) * (v/sum)
} else {
axpy(v/sum, wrap(f(k)), acc)
}
}
assert(acc != null)
unwrap(acc)
}
}
/**
* Provides routines to create Multinomials
* @author dlwh
*/
object Multinomial {
class ExpFam[T,I](exemplar: T)(implicit space: TensorSpace[T, I, Double], dav: DefaultArrayValue[T]) extends ExponentialFamily[Multinomial[T,I],I] with HasConjugatePrior[Multinomial[T,I],I] {
import space._
type ConjugatePrior = Dirichlet[T,I]
val conjugateFamily = new Dirichlet.ExpFam[T,I](exemplar)
def predictive(parameter: conjugateFamily.Parameter) = new Polya(parameter)
def posterior(prior: conjugateFamily.Parameter, evidence: TraversableOnce[I]) = {
val copy : T = space.copy(prior)
for( e <- evidence) {
copy(e) += 1.0
}
copy
}
type Parameter = T
case class SufficientStatistic(counts: T) extends breeze.stats.distributions.SufficientStatistic[SufficientStatistic] {
def +(tt: SufficientStatistic) = SufficientStatistic(counts + tt.counts)
def *(w: Double) = SufficientStatistic(counts * w)
}
def emptySufficientStatistic = SufficientStatistic(zeros(exemplar))
def sufficientStatisticFor(t: I) = {
val r = zeros(exemplar)
r(t) = 1.0
SufficientStatistic(r)
}
def mle(stats: SufficientStatistic) = log(stats.counts)
def likelihoodFunction(stats: SufficientStatistic) = new DiffFunction[T] {
def calculate(x: T) = {
val nn: T = logNormalize(x)
val lp = nn dot stats.counts
val mysum = sum(stats.counts)
val exped = numerics.exp(nn)
val grad = exped * mysum - stats.counts
(-lp,grad)
}
}
def distribution(p: Parameter) = {
new Multinomial(numerics.exp(p))
}
}
}
| wavelets/breeze | src/main/scala/breeze/stats/distributions/Multinomial.scala | Scala | apache-2.0 | 4,136 |
/* sbt -- Simple Build Tool
* Copyright 2010 Mark Harrah
*/
package sbt
import inc._
import java.io.File
import compiler.{AnalyzingCompiler, CompilerArguments, JavaCompiler}
import classpath.ClasspathUtilities
import classfile.Analyze
import xsbti.api.Source
import xsbti.AnalysisCallback
import inc.Locate.DefinesClass
import CompileSetup._
import CompileOrder.{JavaThenScala, Mixed, ScalaThenJava}
import sbinary.DefaultProtocol.{ immutableMapFormat, immutableSetFormat, StringFormat }
final class CompileConfiguration(val sources: Seq[File], val classpath: Seq[File],
val previousAnalysis: Analysis, val previousSetup: Option[CompileSetup], val currentSetup: CompileSetup, val getAnalysis: File => Option[Analysis], val definesClass: DefinesClass,
val maxErrors: Int, val compiler: AnalyzingCompiler, val javac: JavaCompiler)
class AggressiveCompile(cacheDirectory: File)
{
def apply(compiler: AnalyzingCompiler, javac: JavaCompiler, sources: Seq[File], classpath: Seq[File], outputDirectory: File, options: Seq[String] = Nil, javacOptions: Seq[String] = Nil, analysisMap: Map[File, Analysis] = Map.empty, definesClass: DefinesClass = Locate.definesClass _, maxErrors: Int = 100, compileOrder: CompileOrder.Value = Mixed, skip: Boolean = false)(implicit log: Logger): Analysis =
{
val setup = new CompileSetup(outputDirectory, new CompileOptions(options, javacOptions), compiler.scalaInstance.actualVersion, compileOrder)
compile1(sources, classpath, setup, store, analysisMap, definesClass, compiler, javac, maxErrors, skip)
}
def withBootclasspath(args: CompilerArguments, classpath: Seq[File]): Seq[File] =
args.bootClasspath ++ args.finishClasspath(classpath)
def compile1(sources: Seq[File], classpath: Seq[File], setup: CompileSetup, store: AnalysisStore, analysis: Map[File, Analysis], definesClass: DefinesClass, compiler: AnalyzingCompiler, javac: JavaCompiler, maxErrors: Int, skip: Boolean)(implicit log: Logger): Analysis =
{
val (previousAnalysis, previousSetup) = extract(store.get())
if(skip)
previousAnalysis
else {
val config = new CompileConfiguration(sources, classpath, previousAnalysis, previousSetup, setup, analysis.get _, definesClass, maxErrors, compiler, javac)
val (modified, result) = compile2(config)
if(modified)
store.set(result, setup)
result
}
}
def compile2(config: CompileConfiguration)(implicit log: Logger, equiv: Equiv[CompileSetup]): (Boolean, Analysis) =
{
import config._
import currentSetup._
val absClasspath = classpath.map(_.getCanonicalFile)
val apiOption= (api: Either[Boolean, Source]) => api.right.toOption
val cArgs = new CompilerArguments(compiler.scalaInstance, compiler.cp)
val searchClasspath = explicitBootClasspath(options.options) ++ withBootclasspath(cArgs, absClasspath)
val entry = Locate.entry(searchClasspath, definesClass)
val compile0 = (include: Set[File], callback: AnalysisCallback) => {
IO.createDirectory(outputDirectory)
val incSrc = sources.filter(include)
val (javaSrcs, scalaSrcs) = incSrc partition javaOnly
logInputs(log, javaSrcs.size, scalaSrcs.size, outputDirectory)
def compileScala() =
if(!scalaSrcs.isEmpty)
{
val sources = if(order == Mixed) incSrc else scalaSrcs
val arguments = cArgs(sources, absClasspath, outputDirectory, options.options)
compiler.compile(arguments, callback, maxErrors, log)
}
def compileJava() =
if(!javaSrcs.isEmpty)
{
import Path._
val loader = ClasspathUtilities.toLoader(searchClasspath)
def readAPI(source: File, classes: Seq[Class[_]]) { callback.api(source, ClassToAPI(classes)) }
Analyze(outputDirectory, javaSrcs, log)(callback, loader, readAPI) {
javac(javaSrcs, absClasspath, outputDirectory, options.javacOptions)
}
}
if(order == JavaThenScala) { compileJava(); compileScala() } else { compileScala(); compileJava() }
}
val sourcesSet = sources.toSet
val analysis = previousSetup match {
case Some(previous) if equiv.equiv(previous, currentSetup) => previousAnalysis
case _ => Incremental.prune(sourcesSet, previousAnalysis)
}
IncrementalCompile(sourcesSet, entry, compile0, analysis, getAnalysis, outputDirectory, log)
}
private[this] def logInputs(log: Logger, javaCount: Int, scalaCount: Int, out: File)
{
val scalaMsg = Util.counted("Scala source", "", "s", scalaCount)
val javaMsg = Util.counted("Java source", "", "s", javaCount)
val combined = scalaMsg ++ javaMsg
if(!combined.isEmpty)
log.info(combined.mkString("Compiling ", " and ", " to " + out.getAbsolutePath + "..."))
}
private def extract(previous: Option[(Analysis, CompileSetup)]): (Analysis, Option[CompileSetup]) =
previous match
{
case Some((an, setup)) => (an, Some(setup))
case None => (Analysis.Empty, None)
}
def javaOnly(f: File) = f.getName.endsWith(".java")
private[this] def explicitBootClasspath(options: Seq[String]): Seq[File] =
options.dropWhile(_ != CompilerArguments.BootClasspathOption).drop(1).take(1).headOption.toList.flatMap(IO.parseClasspath)
import AnalysisFormats._
val store = AggressiveCompile.staticCache(cacheDirectory, AnalysisStore.sync(AnalysisStore.cached(FileBasedStore(cacheDirectory))))
}
private object AggressiveCompile
{
import collection.mutable
import java.lang.ref.{Reference,SoftReference}
private[this] val cache = new collection.mutable.HashMap[File, Reference[AnalysisStore]]
private def staticCache(file: File, backing: => AnalysisStore): AnalysisStore =
synchronized {
cache get file flatMap { ref => Option(ref.get) } getOrElse {
val b = backing
cache.put(file, new SoftReference(b))
b
}
}
} | kuochaoyi/xsbt | main/actions/AggressiveCompile.scala | Scala | bsd-3-clause | 5,682 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.util
import scala.util.Random
import com.github.fommil.netlib.BLAS.{getInstance => blas}
import org.apache.spark.SparkContext
import org.apache.spark.annotation.{DeveloperApi, Since}
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.rdd.RDD
/**
* :: DeveloperApi ::
* Generate sample data used for SVM. This class generates uniform random values
* for the features and adds Gaussian noise with weight 0.1 to generate labels.
*/
@DeveloperApi
@Since("0.8.0")
object SVMDataGenerator {
@Since("0.8.0")
def main(args: Array[String]) {
if (args.length < 2) {
// scalastyle:off println
println("Usage: SVMGenerator " +
"<master> <output_dir> [num_examples] [num_features] [num_partitions]")
// scalastyle:on println
System.exit(1)
}
val sparkMaster: String = args(0)
val outputPath: String = args(1)
val nexamples: Int = if (args.length > 2) args(2).toInt else 1000
val nfeatures: Int = if (args.length > 3) args(3).toInt else 2
val parts: Int = if (args.length > 4) args(4).toInt else 2
val sc = new SparkContext(sparkMaster, "SVMGenerator")
val globalRnd = new Random(94720)
val trueWeights = Array.fill[Double](nfeatures)(globalRnd.nextGaussian())
val data: RDD[LabeledPoint] = sc.parallelize(0 until nexamples, parts).map { idx =>
val rnd = new Random(42 + idx)
val x = Array.fill[Double](nfeatures) {
rnd.nextDouble() * 2.0 - 1.0
}
val yD = blas.ddot(trueWeights.length, x, 1, trueWeights, 1) + rnd.nextGaussian() * 0.1
val y = if (yD < 0) 0.0 else 1.0
LabeledPoint(y, Vectors.dense(x))
}
data.saveAsTextFile(outputPath)
sc.stop()
}
}
| bOOm-X/spark | mllib/src/main/scala/org/apache/spark/mllib/util/SVMDataGenerator.scala | Scala | apache-2.0 | 2,595 |
package reactivemongo.api
// TODO: Remove after release 1.0
private[api] trait PackageCompat {}
| ornicar/ReactiveMongo | driver/src/main/scala-2.13/api/PackageCompat.scala | Scala | apache-2.0 | 97 |
package edu.illinois.wala.ssa
import com.ibm.wala.ipa.cha.IClassHierarchy
import com.ibm.wala.ssa.SSAFieldAccessInstruction
import edu.illinois.wala.Facade._
import edu.illinois.wala.classLoader.ArrayContents
import com.ibm.wala.ssa.SSAGetInstruction
import com.ibm.wala.types.TypeReference
import com.ibm.wala.ssa.SSAInvokeInstruction
import com.ibm.wala.ssa.SSAArrayLoadInstruction
import com.ibm.wala.ssa.SSAArrayLengthInstruction
class RichPutI(val i: PutI) extends AnyVal {
def v = V(i.getVal())
}
class RichGetI(val i: GetI) extends AnyVal {
def d = V(i.getDef())
}
class RichInvokeI(val i: InvokeI) extends AnyVal {
def m(implicit cha: IClassHierarchy) = cha.resolveMethod(i.getDeclaredTarget())
}
trait IWithField extends Any {
def f(implicit cha: IClassHierarchy): Option[F]
}
class RichAccessI(val i: AccessI) extends AnyVal with IWithField {
/**
* Returns None when the cha cannot resolve the field.
*/
def f(implicit cha: IClassHierarchy) = Option(cha.resolveField(i.getDeclaredField()))
}
class RichArrayReferenceI(val i: ArrayReferenceI) extends AnyVal with IWithField {
def f(implicit cha: IClassHierarchy): Some[F] = Some(ArrayContents)
}
class RichI(val i: I) extends AnyVal {
def uses: List[V] = Range(0, i.getNumberOfUses()) map { index => V(i.getUse(index)) } toList
def theDef = V(i.getDef())
def f(implicit cha: IClassHierarchy): Option[F] = i match {
case i: AccessI => i.f
case i: ArrayReferenceI => i.f
case _ => None
}
def defedType: Option[TypeReference] = if (i.hasDef()) Some(i match {
case i: SSAGetInstruction => i.getDeclaredFieldType()
case i: SSAInvokeInstruction => i.getDeclaredResultType()
case i: SSAArrayLoadInstruction => i.getElementType()
case i: SSAArrayLengthInstruction => TypeReference.Int
})
else
None
} | cos/WALAFacade | src/main/scala/edu/illinois/wala/ssa/I.scala | Scala | epl-1.0 | 1,824 |
/*
* Copyright 2020 Lenses.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.lenses.streamreactor.connect.aws.s3.formats
import java.io.{DataInputStream, InputStream}
import java.util.concurrent.atomic.AtomicLong
import io.lenses.streamreactor.connect.aws.s3.model.{BucketAndPath, ByteArraySourceData, BytesWriteMode}
import scala.util.Try
class BytesFormatWithSizesStreamReader(inputStreamFn: () => InputStream, fileSizeFn: () => Long, bucketAndPath: BucketAndPath, bytesWriteMode: BytesWriteMode) extends S3FormatStreamReader[ByteArraySourceData] {
private val inputStream = new DataInputStream(inputStreamFn())
private var recordNumber: Long = -1
private val fileSizeCounter = new AtomicLong(fileSizeFn())
override def hasNext: Boolean = fileSizeCounter.get() > 0
override def next(): ByteArraySourceData = {
recordNumber += 1
val ret = ByteArraySourceData(bytesWriteMode.read(inputStream), recordNumber)
fileSizeCounter.addAndGet(- ret.data.bytesRead.get)
ret
}
override def getLineNumber: Long = recordNumber
override def close(): Unit = {
Try {
inputStream.close()
}
}
override def getBucketAndPath: BucketAndPath = bucketAndPath
}
| datamountaineer/stream-reactor | kafka-connect-aws-s3/src/main/scala/io/lenses/streamreactor/connect/aws/s3/formats/BytesFormatWithSizesStreamReader.scala | Scala | apache-2.0 | 1,729 |
package com.github.pedrovgs.kuronometer.free.interpreter.csv
import java.io.File
import com.github.pedrovgs.kuronometer.free.domain.{
BuildExecution,
SummaryBuildStageExecution,
SummaryBuildStagesExecution
}
import com.github.pedrovgs.kuronometer.generators.BuildExecutionGenerators.{
buildExecution,
_
}
import org.scalacheck.Gen
import org.scalatest.prop.PropertyChecks
import org.scalatest.{BeforeAndAfterEach, FlatSpec, Matchers}
class CsvReporterSpec
extends FlatSpec
with Matchers
with PropertyChecks
with BeforeAndAfterEach {
private val reporter = new CsvReporter()
override protected def beforeEach(): Unit = {
super.beforeEach()
clearReporterFile
}
"CsvReporter" should "return an empty summary build stages execution if there are no previous builds persisted" in {
val stages = reporter.getTotalBuildExecutionStages
stages shouldBe Right(SummaryBuildStagesExecution())
}
it should "return as many stages as previously persisted getting the total build execution stages" in {
forAll { (buildExecution: BuildExecution) =>
clearReporterFile
reporter.report(buildExecution)
val reportedStages = buildExecution.buildStagesExecution.stages
val stages = reporter.getTotalBuildExecutionStages.map(summary =>
summary.buildStages)
stages shouldBe Right(
reportedStages.map(
reportedStage =>
SummaryBuildStageExecution(
reportedStage.info.name,
reportedStage.execution.executionTimeInNanoseconds,
reportedStage.execution.timestamp)))
}
}
it should "filter build stage executions by execution timestamp getting build execution stages by timestamp" in {
forAll(buildExecution(Gen.choose(0L, 1000L))) {
(buildExecution: BuildExecution) =>
clearReporterFile
reporter.report(buildExecution)
val timestamp = 500
val filteredBuildExecutions =
reporter.getBuildExecutionStagesSinceTimestamp(timestamp)
val expectedBuildExecutions =
buildExecution.buildStagesExecution.stages
.filter(timestamp <= _.execution.timestamp)
.map(
reportedStage =>
SummaryBuildStageExecution(
reportedStage.info.name,
reportedStage.execution.executionTimeInNanoseconds,
reportedStage.execution.timestamp))
filteredBuildExecutions shouldBe Right(
SummaryBuildStagesExecution(expectedBuildExecutions))
}
}
it should "count build stages with the exact timestamp as part of the filtered build stages" in {
val filterTimestamp = 1000
forAll(buildExecution(Gen.const(filterTimestamp))) {
(buildExecution: BuildExecution) =>
clearReporterFile
reporter.report(buildExecution)
val timestamp = filterTimestamp
val filteredBuildExecutions =
reporter.getBuildExecutionStagesSinceTimestamp(timestamp)
val expectedBuildExecutions =
buildExecution.buildStagesExecution.stages.map(
reportedStage =>
SummaryBuildStageExecution(
reportedStage.info.name,
reportedStage.execution.executionTimeInNanoseconds,
reportedStage.execution.timestamp))
filteredBuildExecutions shouldBe Right(
SummaryBuildStagesExecution(expectedBuildExecutions))
}
}
private def clearReporterFile = {
val reportsFile = new File(CsvReporterConfig.executionTasksCsvFile)
if (reportsFile.exists()) {
reportsFile.delete()
}
}
}
| pedrovgs/Kuronometer | kuronometer-core/src/test/scala/com/github/pedrovgs/kuronometer/free/interpreter/csv/CsvReporterSpec.scala | Scala | apache-2.0 | 3,632 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct
import uk.gov.hmrc.ct.box.{Calculated, CtBoxIdentifier, CtInteger, NotInPdf}
import uk.gov.hmrc.ct.computations.calculations.NetProfitsChargeableToCtWithoutDonationsCalculator
import uk.gov.hmrc.ct.computations.retriever.ComputationsBoxRetriever
case class CATO13(value: Int) extends CtBoxIdentifier(name = "Net Profits Chargeable to CT without Charitable Donations") with CtInteger with NotInPdf
object CATO13 extends Calculated[CATO13, ComputationsBoxRetriever] with NetProfitsChargeableToCtWithoutDonationsCalculator {
override def calculate(fieldValueRetriever: ComputationsBoxRetriever): CATO13 =
calculateNetProfitsChargeableToCtWithoutDonations(fieldValueRetriever.cp293(), fieldValueRetriever.cp294())
}
| pncampbell/ct-calculations | src/main/scala/uk/gov/hmrc/ct/CATO13.scala | Scala | apache-2.0 | 1,350 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.util
import java.util.Optional
import util.Arrays.asList
import org.apache.kafka.common.message.UpdateMetadataRequestData.{UpdateMetadataBroker, UpdateMetadataEndpoint, UpdateMetadataPartitionState}
import org.apache.kafka.common.network.ListenerName
import org.apache.kafka.common.protocol.{ApiKeys, Errors}
import org.apache.kafka.common.requests.UpdateMetadataRequest
import org.apache.kafka.common.security.auth.SecurityProtocol
import org.junit.Test
import org.junit.Assert._
import org.scalatest.Assertions
import scala.collection.JavaConverters._
class MetadataCacheTest {
val brokerEpoch = 0L
@Test
def getTopicMetadataNonExistingTopics(): Unit = {
val topic = "topic"
val cache = new MetadataCache(1)
val topicMetadata = cache.getTopicMetadata(Set(topic), ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT))
assertTrue(topicMetadata.isEmpty)
}
@Test
def getTopicMetadata(): Unit = {
val topic0 = "topic-0"
val topic1 = "topic-1"
val cache = new MetadataCache(1)
val zkVersion = 3
val controllerId = 2
val controllerEpoch = 1
def endpoints(brokerId: Int): Seq[UpdateMetadataEndpoint] = {
val host = s"foo-$brokerId"
Seq(
new UpdateMetadataEndpoint()
.setHost(host)
.setPort(9092)
.setSecurityProtocol(SecurityProtocol.PLAINTEXT.id)
.setListener(ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT).value),
new UpdateMetadataEndpoint()
.setHost(host)
.setPort(9093)
.setSecurityProtocol(SecurityProtocol.SSL.id)
.setListener(ListenerName.forSecurityProtocol(SecurityProtocol.SSL).value)
)
}
val brokers = (0 to 4).map { brokerId =>
new UpdateMetadataBroker()
.setId(brokerId)
.setEndpoints(endpoints(brokerId).asJava)
.setRack("rack1")
}
val partitionStates = Seq(
new UpdateMetadataPartitionState()
.setTopicName(topic0)
.setPartitionIndex(0)
.setControllerEpoch(controllerEpoch)
.setLeader(0)
.setLeaderEpoch(0)
.setIsr(asList(0, 1, 3))
.setZkVersion(zkVersion)
.setReplicas(asList(0, 1, 3)),
new UpdateMetadataPartitionState()
.setTopicName(topic0)
.setPartitionIndex(1)
.setControllerEpoch(controllerEpoch)
.setLeader(1)
.setLeaderEpoch(1)
.setIsr(asList(1, 0))
.setZkVersion(zkVersion)
.setReplicas(asList(1, 2, 0, 4)),
new UpdateMetadataPartitionState()
.setTopicName(topic1)
.setPartitionIndex(0)
.setControllerEpoch(controllerEpoch)
.setLeader(2)
.setLeaderEpoch(2)
.setIsr(asList(2, 1))
.setZkVersion(zkVersion)
.setReplicas(asList(2, 1, 3)))
val version = ApiKeys.UPDATE_METADATA.latestVersion
val updateMetadataRequest = new UpdateMetadataRequest.Builder(version, controllerId, controllerEpoch, brokerEpoch,
partitionStates.asJava, brokers.asJava).build()
cache.updateMetadata(15, updateMetadataRequest)
for (securityProtocol <- Seq(SecurityProtocol.PLAINTEXT, SecurityProtocol.SSL)) {
val listenerName = ListenerName.forSecurityProtocol(securityProtocol)
def checkTopicMetadata(topic: String): Unit = {
val topicMetadatas = cache.getTopicMetadata(Set(topic), listenerName)
assertEquals(1, topicMetadatas.size)
val topicMetadata = topicMetadatas.head
assertEquals(Errors.NONE, topicMetadata.error)
assertEquals(topic, topicMetadata.topic)
val topicPartitionStates = partitionStates.filter { ps => ps.topicName == topic }
val partitionMetadatas = topicMetadata.partitionMetadata.asScala.sortBy(_.partition)
assertEquals(s"Unexpected partition count for topic $topic", topicPartitionStates.size, partitionMetadatas.size)
partitionMetadatas.zipWithIndex.foreach { case (partitionMetadata, partitionId) =>
assertEquals(Errors.NONE, partitionMetadata.error)
assertEquals(partitionId, partitionMetadata.partition)
val leader = partitionMetadata.leader
val partitionState = topicPartitionStates.find(_.partitionIndex == partitionId).getOrElse(
Assertions.fail(s"Unable to find partition state for partition $partitionId"))
assertEquals(partitionState.leader, leader.id)
assertEquals(Optional.of(partitionState.leaderEpoch), partitionMetadata.leaderEpoch)
assertEquals(partitionState.isr, partitionMetadata.isr.asScala.map(_.id).asJava)
assertEquals(partitionState.replicas, partitionMetadata.replicas.asScala.map(_.id).asJava)
val endpoint = endpoints(partitionMetadata.leader.id).find(_.listener == listenerName.value).get
assertEquals(endpoint.host, leader.host)
assertEquals(endpoint.port, leader.port)
}
}
checkTopicMetadata(topic0)
checkTopicMetadata(topic1)
}
}
@Test
def getTopicMetadataPartitionLeaderNotAvailable(): Unit = {
val securityProtocol = SecurityProtocol.PLAINTEXT
val listenerName = ListenerName.forSecurityProtocol(securityProtocol)
val brokers = Seq(new UpdateMetadataBroker()
.setId(0)
.setEndpoints(Seq(new UpdateMetadataEndpoint()
.setHost("foo")
.setPort(9092)
.setSecurityProtocol(securityProtocol.id)
.setListener(listenerName.value)).asJava))
verifyTopicMetadataPartitionLeaderOrEndpointNotAvailable(brokers, listenerName,
leader = 1, Errors.LEADER_NOT_AVAILABLE, errorUnavailableListeners = false)
verifyTopicMetadataPartitionLeaderOrEndpointNotAvailable(brokers, listenerName,
leader = 1, Errors.LEADER_NOT_AVAILABLE, errorUnavailableListeners = true)
}
@Test
def getTopicMetadataPartitionListenerNotAvailableOnLeader(): Unit = {
val plaintextListenerName = ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT)
val sslListenerName = ListenerName.forSecurityProtocol(SecurityProtocol.SSL)
val broker0Endpoints = Seq(
new UpdateMetadataEndpoint()
.setHost("host0")
.setPort(9092)
.setSecurityProtocol(SecurityProtocol.PLAINTEXT.id)
.setListener(plaintextListenerName.value),
new UpdateMetadataEndpoint()
.setHost("host0")
.setPort(9093)
.setSecurityProtocol(SecurityProtocol.SSL.id)
.setListener(sslListenerName.value))
val broker1Endpoints = Seq(new UpdateMetadataEndpoint()
.setHost("host1")
.setPort(9092)
.setSecurityProtocol(SecurityProtocol.PLAINTEXT.id)
.setListener(plaintextListenerName.value))
val brokers = Seq(
new UpdateMetadataBroker()
.setId(0)
.setEndpoints(broker0Endpoints.asJava),
new UpdateMetadataBroker()
.setId(1)
.setEndpoints(broker1Endpoints.asJava))
verifyTopicMetadataPartitionLeaderOrEndpointNotAvailable(brokers, sslListenerName,
leader = 1, Errors.LISTENER_NOT_FOUND, errorUnavailableListeners = true)
}
@Test
def getTopicMetadataPartitionListenerNotAvailableOnLeaderOldMetadataVersion(): Unit = {
val plaintextListenerName = ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT)
val sslListenerName = ListenerName.forSecurityProtocol(SecurityProtocol.SSL)
val broker0Endpoints = Seq(
new UpdateMetadataEndpoint()
.setHost("host0")
.setPort(9092)
.setSecurityProtocol(SecurityProtocol.PLAINTEXT.id)
.setListener(plaintextListenerName.value),
new UpdateMetadataEndpoint()
.setHost("host0")
.setPort(9093)
.setSecurityProtocol(SecurityProtocol.SSL.id)
.setListener(sslListenerName.value))
val broker1Endpoints = Seq(new UpdateMetadataEndpoint()
.setHost("host1")
.setPort(9092)
.setSecurityProtocol(SecurityProtocol.PLAINTEXT.id)
.setListener(plaintextListenerName.value))
val brokers = Seq(
new UpdateMetadataBroker()
.setId(0)
.setEndpoints(broker0Endpoints.asJava),
new UpdateMetadataBroker()
.setId(1)
.setEndpoints(broker1Endpoints.asJava))
verifyTopicMetadataPartitionLeaderOrEndpointNotAvailable(brokers, sslListenerName,
leader = 1, Errors.LEADER_NOT_AVAILABLE, errorUnavailableListeners = false)
}
private def verifyTopicMetadataPartitionLeaderOrEndpointNotAvailable(brokers: Seq[UpdateMetadataBroker],
listenerName: ListenerName,
leader: Int,
expectedError: Errors,
errorUnavailableListeners: Boolean): Unit = {
val topic = "topic"
val cache = new MetadataCache(1)
val zkVersion = 3
val controllerId = 2
val controllerEpoch = 1
val leaderEpoch = 1
val partitionStates = Seq(new UpdateMetadataPartitionState()
.setTopicName(topic)
.setPartitionIndex(0)
.setControllerEpoch(controllerEpoch)
.setLeader(leader)
.setLeaderEpoch(leaderEpoch)
.setIsr(asList(0))
.setZkVersion(zkVersion)
.setReplicas(asList(0)))
val version = ApiKeys.UPDATE_METADATA.latestVersion
val updateMetadataRequest = new UpdateMetadataRequest.Builder(version, controllerId, controllerEpoch, brokerEpoch,
partitionStates.asJava, brokers.asJava).build()
cache.updateMetadata(15, updateMetadataRequest)
val topicMetadatas = cache.getTopicMetadata(Set(topic), listenerName, errorUnavailableListeners = errorUnavailableListeners)
assertEquals(1, topicMetadatas.size)
val topicMetadata = topicMetadatas.head
assertEquals(Errors.NONE, topicMetadata.error)
val partitionMetadatas = topicMetadata.partitionMetadata
assertEquals(1, partitionMetadatas.size)
val partitionMetadata = partitionMetadatas.get(0)
assertEquals(0, partitionMetadata.partition)
assertEquals(expectedError, partitionMetadata.error)
assertFalse(partitionMetadata.isr.isEmpty)
assertEquals(1, partitionMetadata.replicas.size)
assertEquals(0, partitionMetadata.replicas.get(0).id)
}
@Test
def getTopicMetadataReplicaNotAvailable(): Unit = {
val topic = "topic"
val cache = new MetadataCache(1)
val zkVersion = 3
val controllerId = 2
val controllerEpoch = 1
val securityProtocol = SecurityProtocol.PLAINTEXT
val listenerName = ListenerName.forSecurityProtocol(securityProtocol)
val brokers = Seq(new UpdateMetadataBroker()
.setId(0)
.setEndpoints(Seq(new UpdateMetadataEndpoint()
.setHost("foo")
.setPort(9092)
.setSecurityProtocol(securityProtocol.id)
.setListener(listenerName.value)).asJava))
// replica 1 is not available
val leader = 0
val leaderEpoch = 0
val replicas = asList[Integer](0, 1)
val isr = asList[Integer](0)
val partitionStates = Seq(
new UpdateMetadataPartitionState()
.setTopicName(topic)
.setPartitionIndex(0)
.setControllerEpoch(controllerEpoch)
.setLeader(leader)
.setLeaderEpoch(leaderEpoch)
.setIsr(isr)
.setZkVersion(zkVersion)
.setReplicas(replicas))
val version = ApiKeys.UPDATE_METADATA.latestVersion
val updateMetadataRequest = new UpdateMetadataRequest.Builder(version, controllerId, controllerEpoch, brokerEpoch,
partitionStates.asJava, brokers.asJava).build()
cache.updateMetadata(15, updateMetadataRequest)
// Validate errorUnavailableEndpoints = false
val topicMetadatas = cache.getTopicMetadata(Set(topic), listenerName, errorUnavailableEndpoints = false)
assertEquals(1, topicMetadatas.size)
val topicMetadata = topicMetadatas.head
assertEquals(Errors.NONE, topicMetadata.error)
val partitionMetadatas = topicMetadata.partitionMetadata
assertEquals(1, partitionMetadatas.size)
val partitionMetadata = partitionMetadatas.get(0)
assertEquals(0, partitionMetadata.partition)
assertEquals(Errors.NONE, partitionMetadata.error)
assertEquals(Set(0, 1), partitionMetadata.replicas.asScala.map(_.id).toSet)
assertEquals(Set(0), partitionMetadata.isr.asScala.map(_.id).toSet)
// Validate errorUnavailableEndpoints = true
val topicMetadatasWithError = cache.getTopicMetadata(Set(topic), listenerName, errorUnavailableEndpoints = true)
assertEquals(1, topicMetadatasWithError.size)
val topicMetadataWithError = topicMetadatasWithError.head
assertEquals(Errors.NONE, topicMetadataWithError.error)
val partitionMetadatasWithError = topicMetadataWithError.partitionMetadata
assertEquals(1, partitionMetadatasWithError.size)
val partitionMetadataWithError = partitionMetadatasWithError.get(0)
assertEquals(0, partitionMetadataWithError.partition)
assertEquals(Errors.REPLICA_NOT_AVAILABLE, partitionMetadataWithError.error)
assertEquals(Set(0), partitionMetadataWithError.replicas.asScala.map(_.id).toSet)
assertEquals(Set(0), partitionMetadataWithError.isr.asScala.map(_.id).toSet)
}
@Test
def getTopicMetadataIsrNotAvailable(): Unit = {
val topic = "topic"
val cache = new MetadataCache(1)
val zkVersion = 3
val controllerId = 2
val controllerEpoch = 1
val securityProtocol = SecurityProtocol.PLAINTEXT
val listenerName = ListenerName.forSecurityProtocol(securityProtocol)
val brokers = Seq(new UpdateMetadataBroker()
.setId(0)
.setRack("rack1")
.setEndpoints(Seq(new UpdateMetadataEndpoint()
.setHost("foo")
.setPort(9092)
.setSecurityProtocol(securityProtocol.id)
.setListener(listenerName.value)).asJava))
// replica 1 is not available
val leader = 0
val leaderEpoch = 0
val replicas = asList[Integer](0)
val isr = asList[Integer](0, 1)
val partitionStates = Seq(new UpdateMetadataPartitionState()
.setTopicName(topic)
.setPartitionIndex(0)
.setControllerEpoch(controllerEpoch)
.setLeader(leader)
.setLeaderEpoch(leaderEpoch)
.setIsr(isr)
.setZkVersion(zkVersion)
.setReplicas(replicas))
val version = ApiKeys.UPDATE_METADATA.latestVersion
val updateMetadataRequest = new UpdateMetadataRequest.Builder(version, controllerId, controllerEpoch, brokerEpoch,
partitionStates.asJava, brokers.asJava).build()
cache.updateMetadata(15, updateMetadataRequest)
// Validate errorUnavailableEndpoints = false
val topicMetadatas = cache.getTopicMetadata(Set(topic), listenerName, errorUnavailableEndpoints = false)
assertEquals(1, topicMetadatas.size)
val topicMetadata = topicMetadatas.head
assertEquals(Errors.NONE, topicMetadata.error)
val partitionMetadatas = topicMetadata.partitionMetadata
assertEquals(1, partitionMetadatas.size)
val partitionMetadata = partitionMetadatas.get(0)
assertEquals(0, partitionMetadata.partition)
assertEquals(Errors.NONE, partitionMetadata.error)
assertEquals(Set(0), partitionMetadata.replicas.asScala.map(_.id).toSet)
assertEquals(Set(0, 1), partitionMetadata.isr.asScala.map(_.id).toSet)
// Validate errorUnavailableEndpoints = true
val topicMetadatasWithError = cache.getTopicMetadata(Set(topic), listenerName, errorUnavailableEndpoints = true)
assertEquals(1, topicMetadatasWithError.size)
val topicMetadataWithError = topicMetadatasWithError.head
assertEquals(Errors.NONE, topicMetadataWithError.error)
val partitionMetadatasWithError = topicMetadataWithError.partitionMetadata
assertEquals(1, partitionMetadatasWithError.size)
val partitionMetadataWithError = partitionMetadatasWithError.get(0)
assertEquals(0, partitionMetadataWithError.partition)
assertEquals(Errors.REPLICA_NOT_AVAILABLE, partitionMetadataWithError.error)
assertEquals(Set(0), partitionMetadataWithError.replicas.asScala.map(_.id).toSet)
assertEquals(Set(0), partitionMetadataWithError.isr.asScala.map(_.id).toSet)
}
@Test
def getTopicMetadataWithNonSupportedSecurityProtocol(): Unit = {
val topic = "topic"
val cache = new MetadataCache(1)
val securityProtocol = SecurityProtocol.PLAINTEXT
val brokers = Seq(new UpdateMetadataBroker()
.setId(0)
.setRack("")
.setEndpoints(Seq(new UpdateMetadataEndpoint()
.setHost("foo")
.setPort(9092)
.setSecurityProtocol(securityProtocol.id)
.setListener(ListenerName.forSecurityProtocol(securityProtocol).value)).asJava))
val controllerEpoch = 1
val leader = 0
val leaderEpoch = 0
val replicas = asList[Integer](0)
val isr = asList[Integer](0, 1)
val partitionStates = Seq(new UpdateMetadataPartitionState()
.setTopicName(topic)
.setPartitionIndex(0)
.setControllerEpoch(controllerEpoch)
.setLeader(leader)
.setLeaderEpoch(leaderEpoch)
.setIsr(isr)
.setZkVersion(3)
.setReplicas(replicas))
val version = ApiKeys.UPDATE_METADATA.latestVersion
val updateMetadataRequest = new UpdateMetadataRequest.Builder(version, 2, controllerEpoch, brokerEpoch, partitionStates.asJava,
brokers.asJava).build()
cache.updateMetadata(15, updateMetadataRequest)
val topicMetadata = cache.getTopicMetadata(Set(topic), ListenerName.forSecurityProtocol(SecurityProtocol.SSL))
assertEquals(1, topicMetadata.size)
assertEquals(1, topicMetadata.head.partitionMetadata.size)
assertEquals(-1, topicMetadata.head.partitionMetadata.get(0).leaderId)
}
@Test
def getAliveBrokersShouldNotBeMutatedByUpdateCache(): Unit = {
val topic = "topic"
val cache = new MetadataCache(1)
def updateCache(brokerIds: Seq[Int]): Unit = {
val brokers = brokerIds.map { brokerId =>
val securityProtocol = SecurityProtocol.PLAINTEXT
new UpdateMetadataBroker()
.setId(brokerId)
.setRack("")
.setEndpoints(Seq(new UpdateMetadataEndpoint()
.setHost("foo")
.setPort(9092)
.setSecurityProtocol(securityProtocol.id)
.setListener(ListenerName.forSecurityProtocol(securityProtocol).value)).asJava)
}
val controllerEpoch = 1
val leader = 0
val leaderEpoch = 0
val replicas = asList[Integer](0)
val isr = asList[Integer](0, 1)
val partitionStates = Seq(new UpdateMetadataPartitionState()
.setTopicName(topic)
.setPartitionIndex(0)
.setControllerEpoch(controllerEpoch)
.setLeader(leader)
.setLeaderEpoch(leaderEpoch)
.setIsr(isr)
.setZkVersion(3)
.setReplicas(replicas))
val version = ApiKeys.UPDATE_METADATA.latestVersion
val updateMetadataRequest = new UpdateMetadataRequest.Builder(version, 2, controllerEpoch, brokerEpoch, partitionStates.asJava,
brokers.asJava).build()
cache.updateMetadata(15, updateMetadataRequest)
}
val initialBrokerIds = (0 to 2)
updateCache(initialBrokerIds)
val aliveBrokersFromCache = cache.getAliveBrokers
// This should not change `aliveBrokersFromCache`
updateCache((0 to 3))
assertEquals(initialBrokerIds.toSet, aliveBrokersFromCache.map(_.id).toSet)
}
}
| noslowerdna/kafka | core/src/test/scala/unit/kafka/server/MetadataCacheTest.scala | Scala | apache-2.0 | 20,226 |
/*
Copyright 2013 Ilya Lakhin (Илья Александрович Лахин)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package name.lakhin.eliah.projects
package papacarlo.syntax
import name.lakhin.eliah.projects.papacarlo.lexis.TokenReference
import name.lakhin.eliah.projects.papacarlo.utils.{
Difference,
Registry,
Bounds,
Signal
}
final class Node(private[syntax] var kind: String,
private[syntax] var begin: TokenReference,
private[syntax] var end: TokenReference) {
private[papacarlo] var id = Node.Unbound
private[syntax] var branches = Map.empty[String, List[Node]]
private[syntax] var references = Map.empty[String, List[TokenReference]]
private[syntax] var constants = Map.empty[String, String]
private[syntax] var cachable = false
private[syntax] var parent = Option.empty[Node]
private[syntax] var producer = Option.empty[Rule]
val onChange = new Signal[Node]
val onRemove = new Signal[Node]
val onAddBranch = new Signal[Node]
private val reflection = (reference: TokenReference) => update()
def bound = id != Node.Unbound
def getId = id
def getKind = kind
def getBegin = begin
def getEnd = end
def getRange = Bounds(begin.index, end.index + 1)
def getBranches = branches
def getValues = references.map {
case (tag, tokens) =>
tag -> constants
.get(tag)
.map(constant => List(constant))
.getOrElse(tokens.filter(_.exists).map(_.token.value))
}
def getParent = parent
def getBranchName(deep: Int = 0): Option[String] = parent.flatMap { parent =>
if (deep > 0) parent.getBranchName(deep - 1)
else parent.branches.find(entry => entry._2.exists(_.id == id)).map(_._1)
}
def getProducer = producer
def range = Bounds(begin.index, end.index + 1)
def getCachable = cachable
def update(ancestor: Boolean = false): Unit = {
if (onChange.nonEmpty && !ancestor) onChange.trigger(this)
else for (parent <- parent) parent.update(ancestor)
}
private def getChildren = branches.map(_._2).flatten
private[syntax] def remove(registry: Registry[Node]): Unit = {
if (bound) {
onRemove.trigger(this)
releaseReflection()
registry.remove(id)
id = Node.Unbound
}
}
private[syntax] def merge(registry: Registry[Node],
replacement: Node,
invalidationRange: Bounds) = {
for ((tag, oldBranches) <- this.branches;
newBranches <- replacement.branches.get(tag)) {
val difference = Difference.double[Node](
oldBranches,
newBranches,
(pair: Tuple2[Node, Node]) => {
pair._1.bound &&
!pair._2.range.intersects(invalidationRange) &&
pair._1.sourceCode == pair._2.sourceCode
}
)
if (difference != (0, 0))
replacement.branches += tag ->
Bounds(difference._1, oldBranches.size - difference._2).replace(
oldBranches,
Bounds(difference._1, newBranches.size - difference._2)
.slice(newBranches)
)
}
kind = replacement.kind
releaseReflection()
begin = replacement.begin
end = replacement.end
references = replacement.references
initializeReflection()
var unregistered = List.empty[Node]
var registered = Set.empty[Int]
replacement.visitBranches(this, (parent, newDescendant) => {
if (newDescendant.bound) registered += newDescendant.id
else unregistered ::= newDescendant
newDescendant.parent = Some(parent)
})
reverseVisitBranches(oldDescendant => {
if (!registered.contains(oldDescendant.id))
oldDescendant.remove(registry)
})
branches = replacement.branches
val reversedUnregistered = unregistered.reverse
for (descendant <- reversedUnregistered)
registry.add { id =>
descendant.id = id
descendant
}
for (descendant <- reversedUnregistered;
parent <- descendant.parent)
parent.onAddBranch.trigger(descendant)
reversedUnregistered
}
def visit(enter: Node => Any, leave: Node => Any): Unit = {
enter(this)
for (branch <- branches.map(_._2).flatten) branch.visit(enter, leave)
leave(this)
}
private def visitBranches(current: Node, enter: (Node, Node) => Any): Unit = {
for (branch <- getChildren) {
enter(current, branch)
branch.visitBranches(branch, enter)
}
}
private def reverseVisitBranches(leave: Node => Any): Unit = {
for (branch <- getChildren) {
branch.reverseVisitBranches(leave)
leave(branch)
}
}
private def subscribableReferences =
references
.filter(pair => !constants.contains(pair._1))
.map(_._2.filter(reference =>
!reference.exists ||
reference.token.isMutable))
.flatten
private def initializeReflection(): Unit = {
for (reference <- subscribableReferences)
reference.onUpdate.bind(reflection)
}
private def releaseReflection(): Unit = {
for (reference <- subscribableReferences)
reference.onUpdate.unbind(reflection)
}
override def toString = kind + ":" + id + (if (cachable) " cachable" else "")
def sourceCode =
if (begin.exists && end.exists)
begin.collection.descriptions
.slice(begin.index, end.index + 1)
.map(_.value)
.mkString
else
""
def prettyPrint(prefix: String = ""): String = {
val result = new StringBuilder
result ++= kind + " " + id
if (cachable) result ++= " cachable"
result ++= parent.map(" >> " + _.id).getOrElse("")
if (references.nonEmpty || branches.nonEmpty) {
result ++= " {"
for (reference <- references.keys ++ constants.keys
.filter(constant => !references.contains(constant))) {
result ++= "\\n" + prefix + " " + reference + ": " +
getValues(reference).mkString("")
}
for ((name, subnodes) <- branches; branch <- subnodes) {
result ++= "\\n" + prefix + " " + name + ": "
result ++= branch.prettyPrint(prefix + " ")
}
result ++= "\\n" + prefix + "}"
}
result.toString()
}
def accessor = new NodeAccessor(this)
}
object Node {
val Unbound = -1
def apply(kind: String,
begin: TokenReference,
end: TokenReference,
branches: List[Tuple2[String, Node]] = Nil,
references: List[Tuple2[String, TokenReference]] = Nil,
constants: Map[String, String] = Map.empty) = {
val result = new Node(kind, begin, end)
result.branches = branches.groupBy(_._1).mapValues(_.map(_._2)).toMap
result.references = references.groupBy(_._1).mapValues(_.map(_._2)).toMap
result.constants = constants
result
}
}
| Eliah-Lakhin/papa-carlo | src/main/scala/name.lakhin.eliah.projects/papacarlo/syntax/Node.scala | Scala | apache-2.0 | 7,315 |
/*
* Copyright (c) 2014, Brook 'redattack34' Heisler
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the ModularRayguns team nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package com.castlebravostudios.rayguns.items.recipes
//scalastyle:off underscore.import
import com.castlebravostudios.rayguns.api.items._
import com.castlebravostudios.rayguns.items.RaygunsBlocks
import com.castlebravostudios.rayguns.items.accessories._
import com.castlebravostudios.rayguns.items.barrels._
import com.castlebravostudios.rayguns.items.batteries._
import com.castlebravostudios.rayguns.items.chambers._
import com.castlebravostudios.rayguns.items.emitters.Emitters
import com.castlebravostudios.rayguns.items.frames._
import com.castlebravostudios.rayguns.items.lenses._
import com.castlebravostudios.rayguns.items.misc._
import com.castlebravostudios.rayguns.utils.Extensions.BlockExtensions
import com.castlebravostudios.rayguns.utils.Extensions.ItemExtensions
import net.minecraft.init.Blocks
import net.minecraft.init.Items
import net.minecraft.item.Item
import net.minecraft.item.ItemStack
import net.minecraft.nbt.NBTTagCompound
import net.minecraftforge.fluids.FluidRegistry
import net.minecraftforge.fluids.FluidStack
import cpw.mods.fml.common.event.FMLInterModComms
import cpw.mods.fml.common.registry.GameRegistry
//scalastyle:on
object ThermalExpansionRecipeLibrary extends RecipeLibrary {
private def getTEItem( item : String, count : Int = 1 ) : ItemStack =
GameRegistry.findItemStack("ThermalExpansion", item, count)
private def getFluidStack( fluid : String, amount : Int ) : FluidStack =
FluidRegistry.getFluidStack( fluid, amount )
private def addFluidTransposerRecipe( energyCost : Int, input : ItemStack,
output : ItemStack, fluid : FluidStack, reversible : Boolean = false ) : Unit = {
val toSend = new NBTTagCompound();
toSend.setInteger( "energy", energyCost );
toSend.setTag( "input", new NBTTagCompound( ) );
toSend.setTag( "output", new NBTTagCompound( ) );
toSend.setTag( "fluid", new NBTTagCompound( ) );
input.writeToNBT( toSend.getCompoundTag( "input" ) );
output.writeToNBT( toSend.getCompoundTag( "output" ) );
toSend.setBoolean( "reversible", reversible );
fluid.writeToNBT( toSend.getCompoundTag( "fluid" ) );
FMLInterModComms.sendMessage( "ThermalExpansion", "TransposerFillRecipe", toSend );
}
private def addInductionSmelterRecipe( energyCost : Int, primaryInput : ItemStack, secondaryInput : ItemStack, output : ItemStack ) : Unit = {
val toSend = new NBTTagCompound();
toSend.setInteger("energy", energyCost);
toSend.setTag("primaryInput", new NBTTagCompound());
toSend.setTag("secondaryInput", new NBTTagCompound());
toSend.setTag("primaryOutput", new NBTTagCompound());
primaryInput.writeToNBT(toSend.getCompoundTag("primaryInput"));
secondaryInput.writeToNBT(toSend.getCompoundTag("secondaryInput"));
output.writeToNBT(toSend.getCompoundTag("primaryOutput"));
FMLInterModComms.sendMessage("ThermalExpansion", "SmelterRecipe", toSend);
}
def registerRecipes() : Unit = {
registerAccessories()
registerBatteries()
registerFrames()
registerChambers()
registerEmitters()
registerLenses()
registerBarrels()
registerGainMedia()
registerDopedGlass()
registerDiodes()
registerHeatSinks()
registerCasings()
registerMisc()
}
def getIngredientItems : Seq[(Item, String)] = Seq(
( OpticalGlass, "opticalGlass" ),
( RedstoneDopedGlass, "redstoneGlass" ),
( GlowstoneDopedGlass, "glowstoneGlass" ),
( Tier1Diode, "tier1Diode" ),
( Tier2Diode, "tier2Diode" ),
( Tier3Diode, "tier3Diode" ),
( Tier1HeatSink, "tier1HeatSink" ),
( Tier2HeatSink, "tier2HeatSink" ),
( Tier3HeatSink, "tier3HeatSink" ),
( Tier1ChamberCasing, "tier1Casing" ),
( Tier2ChamberCasing, "tier2Casing" ),
( Tier3ChamberCasing, "Tier3Casing" ),
( RadiantDust, "radiantDust" ),
( RadiantDopedGlass, "radiantGlass" ),
( Shutter, "shutter" ),
( Tier1GainMedium, "tier1GainMedium" ),
( Tier2GainMedium, "tier2GainMedium" ),
( Tier3GainMedium, "tier3GainMedium" ),
( Tier1EmptyChamber, "tier1EmptyChamber" ),
( Tier2EmptyChamber, "tier2EmptyChamber" ),
( Tier3EmptyChamber, "tier3EmptyChamber" )
)
private def registerAccessories() = {
addModuleShapedOre( HighEfficiencyWiring,
"CCC",
"III",
"CCC",
( 'C' -> getTEItem( "dustBlizz" ) ),
( 'I' -> "ingotElectrum" ) )
addModuleShapedOre( RefireCapacitor,
"LPL",
"LPL",
" C ",
( 'C' -> getTEItem( "powerCoilElectrum" ) ),
( 'L' -> getTEItem( "conduitEnergyBasic" ) ),
( 'P' -> Items.paper ) )
addModuleShapedOre( SolarPanel,
"GGG",
"III",
"RCR",
( 'I' -> "ingotIron" ),
( 'R' -> getTEItem( "conduitEnergyBasic" ) ),
( 'G' -> "blockGlass" ),
( 'C' -> getTEItem( "powerCoilSilver" ) ) )
addModuleShapedOre( ChargeCapacitor,
"GLG",
"GLG",
"BRB",
( 'G' -> "ingotGold" ),
( 'L' -> "blockGlass" ),
( 'B' -> BasicBattery ),
( 'R' -> getTEItem( "powerCoilElectrum" ) ) )
}
private def registerBatteries() = {
def addBatteryRecipe( battery : RaygunBattery, core : Any ) : Unit = {
addModuleShapedOre( battery,
"IGI",
"IRI",
"IRI",
( 'G' -> getTEItem( "powerCoilElectrum" ) ),
( 'I' -> "ingotIron" ),
( 'R' -> core ) )
}
addBatteryRecipe( BasicBattery, getTEItem( "capacitorBasic" ) )
addBatteryRecipe( AdvancedBattery, getTEItem( "capacitorHardened" ) )
addBatteryRecipe( UltimateBattery, getTEItem( "capacitorReinforced" ) )
}
private def registerFrames() = {
def addFrameRecipe( frame : RaygunFrame, core : Any ) : Unit = {
addModuleShapedOre( frame,
"IR ",
" IR",
" LI",
( 'L' -> Blocks.lever ),
( 'R' -> core ),
( 'I' -> "ingotIron" ) )
}
addFrameRecipe( FireflyFrame, "dyeRed" )
addFrameRecipe( MantisFrame, "dyeGreen" )
}
private def registerChambers() = {
def registerChamber( chamber : RaygunChamber, emitter : Item, medium : Item, diode : Item, casing : Item, emptyChamber : Item ) : Unit = {
addModuleShapedOre( chamber,
"CDC",
"MME",
"CDC",
( 'D' -> diode ),
( 'C' -> casing ),
( 'M' -> medium ),
( 'E' -> emitter ) )
addModuleShapelessOre( chamber, emitter, emptyChamber )
}
def registerT1Chamber( chamber : RaygunChamber, emitter : Item ) : Unit =
registerChamber( chamber, emitter, Tier1GainMedium, Tier1Diode, Tier1ChamberCasing, Tier1EmptyChamber )
def registerT2Chamber( chamber : RaygunChamber, emitter : Item ) : Unit =
registerChamber( chamber, emitter, Tier2GainMedium, Tier2Diode, Tier2ChamberCasing, Tier2EmptyChamber )
def registerT3Chamber( chamber : RaygunChamber, emitter : Item ) : Unit =
registerChamber( chamber, emitter, Tier3GainMedium, Tier3Diode, Tier3ChamberCasing, Tier3EmptyChamber )
registerT1Chamber( Tier1CuttingChamber, Emitters.tier1CuttingEmitter)
registerT1Chamber( HeatRayChamber, Emitters.heatRayEmitter)
registerT1Chamber( LaserChamber, Emitters.laserEmitter)
registerT1Chamber( LightningChamber, Emitters.lightningEmitter)
registerT2Chamber( Tier2CuttingChamber, Emitters.tier2CuttingEmitter)
registerT2Chamber( FortifiedSunlightChamber, Emitters.fortifiedSunlightEmitter)
registerT2Chamber( FrostRayChamber, Emitters.frostRayEmitter)
registerT2Chamber( ImpulseChamber, Emitters.impulseEmitter)
registerT2Chamber( LifeForceChamber, Emitters.lifeForceEmitter)
registerT2Chamber( TractorChamber, Emitters.tractorEmitter)
registerT2Chamber( MatterTransporterChamber, Emitters.matterTransporterEmitter )
registerT2Chamber( EnderChamber, Emitters.enderEmitter )
registerT3Chamber( Tier3CuttingChamber, Emitters.tier3CuttingEmitter)
registerT3Chamber( DeathRayChamber, Emitters.deathRayEmitter)
registerT3Chamber( ExplosiveChamber, Emitters.explosiveEmitter)
}
private def registerEmitters() = {
def registerEmitter( emitter : Item, core : AnyRef, top : AnyRef, right : AnyRef, bottom : AnyRef, left : AnyRef ) : Unit = {
addShapedOre( emitter.asStack,
"ITI",
"LDR",
"IBI",
'I' -> "ingotIron",
'D' -> core,
'T' -> top,
'R' -> right,
'B' -> bottom,
'L' -> left )
}
def registerT1Emitter( emitter : Item, top : AnyRef, right : AnyRef, bottom : AnyRef, left : AnyRef ) : Unit =
registerEmitter( emitter, Tier1Diode, top, right, bottom, left )
def registerT2Emitter( emitter : Item, top : AnyRef, right : AnyRef, bottom : AnyRef, left : AnyRef ) : Unit =
registerEmitter( emitter, Tier2Diode, top, right, bottom, left )
def registerT3Emitter( emitter : Item, top : AnyRef, right : AnyRef, bottom : AnyRef, left : AnyRef ) : Unit =
registerEmitter( emitter, Tier3Diode, top, right, bottom, left )
registerT1Emitter( Emitters.laserEmitter, "dustRedstone", "dustRedstone", "dustRedstone", "dustRedstone" )
registerT1Emitter( Emitters.heatRayEmitter, Items.coal, Items.lava_bucket, Items.coal, Items.lava_bucket )
registerT1Emitter( Emitters.lightningEmitter, "blockIron", "blockRedstone", "blockIron", "blockRedstone" )
registerT1Emitter( Emitters.tier1CuttingEmitter, Items.stone_pickaxe, Items.stone_shovel, Items.stone_pickaxe, Items.stone_shovel )
registerT2Emitter( Emitters.frostRayEmitter, Blocks.ice, Blocks.snow, Blocks.ice, Blocks.snow )
registerT2Emitter( Emitters.lifeForceEmitter, Items.speckled_melon, Items.ghast_tear, Items.speckled_melon, Items.ghast_tear )
registerT2Emitter( Emitters.fortifiedSunlightEmitter, "woodLog", "woodLog", "woodLog", "woodLog" )
registerT2Emitter( Emitters.enderEmitter, Items.ender_pearl, Items.ender_pearl, Items.ender_pearl, Items.ender_pearl )
registerT2Emitter( Emitters.impulseEmitter, Blocks.piston, Blocks.piston, Blocks.piston, Blocks.piston )
registerT2Emitter( Emitters.tractorEmitter, Blocks.sticky_piston, Blocks.sticky_piston, Blocks.sticky_piston, Blocks.sticky_piston )
registerT2Emitter( Emitters.matterTransporterEmitter, Items.ender_pearl, Blocks.piston, Items.ender_pearl, Blocks.piston )
registerT2Emitter( Emitters.tier2CuttingEmitter, Items.iron_pickaxe, Items.iron_shovel, Items.iron_pickaxe, Items.iron_shovel )
val witherSkull = Items.skull.asStack( 1, 1 )
registerT3Emitter( Emitters.deathRayEmitter, witherSkull, witherSkull, witherSkull, witherSkull )
registerT3Emitter( Emitters.explosiveEmitter, Blocks.tnt, Blocks.tnt, Blocks.tnt, Blocks.tnt )
registerT3Emitter( Emitters.tier3CuttingEmitter, Items.diamond_pickaxe, Items.diamond_shovel, Items.diamond_pickaxe, Items.diamond_shovel )
}
private def registerLenses() = {
addModuleLensGrinder( 600, PreciseLens,
"IGI",
"GGG",
"IGI",
( 'G' -> OpticalGlass ),
( 'I' -> "ingotIron" ) )
addModuleLensGrinder( 1200, WideLens,
"IGI",
"GEG",
"IGI",
( 'G' -> OpticalGlass ),
( 'I' -> "ingotIron" ),
( 'E' -> "gemEmerald" ) )
}
private def registerBarrels() = {
addModuleShapedOre( BeamBarrel,
"GI ",
"IDI",
" IG",
( 'G' -> "blockGlass" ),
( 'I' -> "ingotIron" ),
( 'D' -> Tier2Diode ) )
addModuleShapedOre( BlasterBarrel,
"GI ",
"ISI",
" IG",
( 'G' -> "blockGlass" ),
( 'I' -> "ingotIron" ),
( 'S' -> Shutter ) )
}
private def registerMisc() = {
addShapedOre( RaygunsBlocks.gunBench.asStack,
"II",
"BB",
'I' -> "ingotIron",
'B' -> Blocks.crafting_table )
addShapedOre( RaygunsBlocks.lensGrinder.asStack,
"SSS",
"FMF",
"CRC",
( 'S' -> Blocks.sand ),
( 'F' -> Items.flint ),
( 'M' -> getTEItem( "machineFrame" ) ),
( 'C' -> "ingotCopper" ),
( 'R' -> getTEItem( "powerCoilGold" ) ) )
addInductionSmelterRecipe(800, getTEItem( "dustLead" ),
Blocks.glass.asStack, OpticalGlass.asStack( 3 ) )
addFluidTransposerRecipe( 800, Items.redstone.asStack,
RadiantDust.asStack, getFluidStack( "glowstone", 500 ), false );
addShapedOre( Shutter.asStack,
"PT",
"I ",
'P' -> getTEItem( "pneumaticServo" ),
'T' -> getTEItem( "gearTin" ),
'I' -> "ingotIron" )
def registerEmptyChamber( chamber : ItemStack, medium : Item, diode : Item, casing : Item ) : Unit = {
addShapedOre( chamber,
"CDC",
"MM ",
"CDC",
( 'D' -> diode ),
( 'C' -> casing ),
( 'M' -> medium ) )
}
registerEmptyChamber( Tier1EmptyChamber.asStack, Tier1GainMedium, Tier1Diode, Tier1ChamberCasing )
registerEmptyChamber( Tier2EmptyChamber.asStack, Tier2GainMedium, Tier2Diode, Tier2ChamberCasing )
registerEmptyChamber( Tier3EmptyChamber.asStack, Tier3GainMedium, Tier3Diode, Tier3ChamberCasing )
}
private def registerCasings() : Unit = {
def addCasing( casing : Item, metal : Any, heatSink : Item ) : Unit = {
addShapedOre( casing.asStack,
"MSM",
'M' -> metal,
'S' -> heatSink )
}
addCasing( Tier1ChamberCasing, "ingotTin", Tier1HeatSink )
addCasing( Tier2ChamberCasing, "ingotInvar", Tier2HeatSink )
addCasing( Tier3ChamberCasing, getTEItem( "glassHardened" ), Tier3HeatSink )
}
private def registerHeatSinks() : Unit = {
def addHeatSink( heatSink : Item, core : Any ) : Unit = {
addShapedOre( heatSink.asStack,
"ICI",
"ICI",
"ICI",
'I' -> "ingotInvar",
'C' -> core )
}
addHeatSink( Tier1HeatSink, Items.water_bucket )
addHeatSink( Tier2HeatSink, getTEItem( "dustBlizz" ) )
addHeatSink( Tier3HeatSink, getTEItem( "bucketCryotheum" ) )
}
private def registerDiodes() : Unit = {
def addDiode( time : Short, diode : Item, wire : Any, core : Any ) : Unit = {
addLensGrinder( time, diode.asStack,
"GGG",
"WCW",
"GGG",
'W' -> wire,
'G' -> "paneGlassColorless",
'C' -> core )
}
addDiode( 300, Tier1Diode, "ingotElectrum", "blockRedstone" )
addDiode( 450, Tier2Diode, "ingotElectrum", Blocks.glowstone )
addDiode( 600, Tier3Diode, "ingotElectrum", Items.nether_star )
}
private def registerDopedGlass() : Unit = {
addInductionSmelterRecipe(800, Items.redstone.asStack, OpticalGlass.asStack, RedstoneDopedGlass.asStack )
addInductionSmelterRecipe(800, Items.glowstone_dust.asStack, OpticalGlass.asStack, GlowstoneDopedGlass.asStack )
addInductionSmelterRecipe(800, RadiantDust.asStack, OpticalGlass.asStack, RadiantDopedGlass.asStack )
}
private def registerGainMedia(): Unit = {
def addGainMediumRecipe( medium : Item, ticks : Short, glass : Item ) : Unit = {
addLensGrinder( ticks, medium.asStack,
"GGG",
"MGM",
"GGG",
('M' -> "ingotGold" ),
('G' -> glass ) )
}
addGainMediumRecipe( Tier3GainMedium, 1200, RadiantDopedGlass )
addGainMediumRecipe( Tier2GainMedium, 900, GlowstoneDopedGlass )
addGainMediumRecipe( Tier1GainMedium, 600, RedstoneDopedGlass )
}
} | Redattack34/ModularRayguns | src/main/scala/com/castlebravostudios/rayguns/items/recipes/ThermalExpansionRecipeLibrary.scala | Scala | bsd-3-clause | 16,834 |
/*
* Copyright 2016 rdbc contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.rdbc
import io.rdbc.sapi._
import scala.concurrent.duration._
class TimeoutSpec extends RdbcSpec {
"Duration to timeout converter" should {
"convert finite duration to timeout" in {
val duration = 10.seconds
val timeout = duration.timeout
timeout.value shouldBe duration
}
"convert infinite duration to timeout" in {
val duration = Duration.Inf
val timeout = duration.timeout
timeout.value shouldBe duration
}
}
"Timeout.Inf" should {
"be an infinite timeout" in {
Timeout.Inf shouldBe Timeout(Duration.Inf)
}
}
}
| rdbc-io/rdbc | rdbc-api-scala/src/test/scala/io/rdbc/TimeoutSpec.scala | Scala | apache-2.0 | 1,208 |
object usage extends App {
println(scala.testing.Macros.m[Int])
}
| scala/scala | test/files/neg/t10700-message/usage_2.scala | Scala | apache-2.0 | 69 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.fnothaft.gnocchi.cli
import java.io.FileInputStream
import net.fnothaft.gnocchi.models.{ GnocchiModelMetaData, QualityControlVariant }
import net.fnothaft.gnocchi.models.linear.{ AdditiveLinearGnocchiModel, DominantLinearGnocchiModel }
import net.fnothaft.gnocchi.models.logistic.{ AdditiveLogisticGnocchiModel, DominantLogisticGnocchiModel }
import net.fnothaft.gnocchi.models.variant.linear.{ AdditiveLinearVariantModel, DominantLinearVariantModel }
import net.fnothaft.gnocchi.models.variant.logistic.{ AdditiveLogisticVariantModel, DominantLogisticVariantModel }
import org.apache.spark.SparkContext
import org.bdgenomics.utils.cli._
import org.kohsuke.args4j.{ Option => Args4jOption }
import net.fnothaft.gnocchi.sql.GnocchiContext._
import org.apache.spark.sql.SparkSession
object UpdateGnocchiModel extends BDGCommandCompanion {
val commandName = "UpdateGnocchiModel"
val commandDescription = "Updates saved GnocchiModel with new batch of data"
def apply(cmdLine: Array[String]) = {
new UpdateGnocchiModel(Args4j[UpdateGnocchiModelArgs](cmdLine))
}
}
class UpdateGnocchiModelArgs extends RegressPhenotypesArgs {
@Args4jOption(required = true, name = "-modelLocation", usage = "The location of the model to load.")
var modelLocation: String = _
@Args4jOption(required = true, name = "-saveModelTo", usage = "The location to save model to.")
var saveTo: String = _
}
class UpdateGnocchiModel(protected val args: UpdateGnocchiModelArgs) extends BDGSparkCommand[UpdateGnocchiModelArgs] {
override val companion = UpdateGnocchiModel
override def run(sc: SparkContext) {
// Load in genotype data filtering out any SNPs not provided in command line
val batchGenotypeStates = sc.loadAndFilterGenotypes(args.genotypes, args.associations,
args.ploidy, args.mind, args.maf, args.geno, args.overwrite)
// Load in phenotype data
val batchPhenotypes = sc.loadPhenotypes(args.phenotypes, args.phenoName, args.oneTwo,
args.includeCovariates, args.covarFile, args.covarNames)
// sets up sparkSession
val sparkSession = SparkSession.builder().getOrCreate()
import sparkSession.implicits._
// load model
val gnocchiModelPath = args.modelLocation
val vmLocation = gnocchiModelPath + "/variantModels"
val qcModelsLocation = gnocchiModelPath + "/qcModels"
val metaDataLocation = gnocchiModelPath + "/metaData"
val metaDataIn = new FileInputStream(metaDataLocation)
val metaData = metaDataIn.read.asInstanceOf[GnocchiModelMetaData]
val model = args.associationType match {
case "ADDITIVE_LINEAR" => {
val variantModels = sparkSession.read.parquet(vmLocation).as[AdditiveLinearVariantModel].rdd
val qcModels = sparkSession.read.parquet(qcModelsLocation).as[QualityControlVariant[AdditiveLinearVariantModel]].rdd
.map(qcv => {
(qcv.variantModel, qcv.observations)
})
AdditiveLinearGnocchiModel(metaData, variantModels, qcModels)
}
case "DOMINANT_LINEAR" => {
val variantModels = sparkSession.read.parquet(vmLocation).as[DominantLinearVariantModel].rdd
val qcModels = sparkSession.read.parquet(qcModelsLocation).as[QualityControlVariant[DominantLinearVariantModel]].rdd
.map(qcv => {
(qcv.variantModel, qcv.observations)
})
DominantLinearGnocchiModel(metaData, variantModels, qcModels)
}
case "ADDITIVE_LOGISTIC" => {
val variantModels = sparkSession.read.parquet(vmLocation).as[AdditiveLogisticVariantModel].rdd
val qcModels = sparkSession.read.parquet(qcModelsLocation).as[QualityControlVariant[AdditiveLogisticVariantModel]].rdd
.map(qcv => {
(qcv.variantModel, qcv.observations)
})
AdditiveLogisticGnocchiModel(metaData, variantModels, qcModels)
}
case "DOMINANT_LOGISTIC" => {
val variantModels = sparkSession.read.parquet(vmLocation).as[DominantLogisticVariantModel].rdd
val qcModels = sparkSession.read.parquet(qcModelsLocation).as[QualityControlVariant[DominantLogisticVariantModel]].rdd
.map(qcv => {
(qcv.variantModel, qcv.observations)
})
DominantLogisticGnocchiModel(metaData, variantModels, qcModels)
}
}
val batchObservations = sc.generateObservations(batchGenotypeStates, batchPhenotypes)
// update the model with new data
val updatedModel = model.update(batchObservations)
// save the model
updatedModel.save(args.saveTo)
}
}
| tkdagdelen/gnocchi | gnocchi-cli/src/main/scala/net/fnothaft/gnocchi/cli/UpdateGnocchiModel.scala | Scala | apache-2.0 | 5,343 |
package org.teavm.samples.scala
import org.teavm.samples.scala.Grammar._
object Calculator {
def eval(expr : Expr) : BigInt = expr match {
case Add(a, b) => eval(a) + eval(b)
case Subtract(a, b) => eval(a) - eval(b)
case Multiply(a, b) => eval(a) * eval(b)
case Divide(a, b) => eval(a) / eval(b)
case Negate(n) => -eval(n)
case Number(v) => v
}
def print(expr : Expr) : String = expr match {
case Add(a, b) => "(" + print(a) + " + " + print(b) + ")"
case Subtract(a, b) => "(" + print(a) + " - " + print(b) + ")"
case Multiply(a, b) => "(" + print(a) + " * " + print(b) + ")"
case Divide(a, b) => "(" + print(a) + " / " + print(b) + ")"
case Negate(n) => "-" + print(n)
case Number(v) => v.toString()
}
def parse(str : Seq[Char]) = additive.parse(str)
def additive = multiplicative ~ ((keyword("+") | keyword("-")) ~ multiplicative).* >> {
case (h, t) => t.foldLeft(h) {
case (left, ("+", right)) => Add(left, right)
case (left, ("-", right)) => Subtract(left, right)
}
}
def multiplicative = unary ~ ((keyword("*") | keyword("/")) ~ unary).* >> {
case (h, t) => t.foldLeft(h) {
case (left, ("*", right)) => Multiply(left, right)
case (left, ("/", right)) => Divide(left, right)
}
}
def unary : Rule[Expr] = keyword("-").? ~ primitive >> {
case (Some(_), v) => Negate(v)
case (None, v) => v
}
def primitive : Rule[Expr] = Rule.firstOf(number >> Number, group)
def group : Rule[Expr] = keyword("(") ~ additive ~ keyword(")") >> { case ((_, result), _) => result }
def number : Rule[Int] = range('1', '9') ~ range('0', '9').* ~ ws >> {
case ((h, t), _) => t.foldLeft(h - '0')((n, c) => n * 10 + (c - '0'))
}
def keyword(str : String) = s(str) ~ ws >> { case (s, _) => s }
def ws = s(" ").* >> { case _ => Unit }
}
sealed abstract class Expr
case class Number(value : Int) extends Expr
case class Add(left : Expr, right : Expr) extends Expr
case class Subtract(left : Expr, right : Expr) extends Expr
case class Multiply(left : Expr, right : Expr) extends Expr
case class Divide(left : Expr, right : Expr) extends Expr
case class Negate(argument : Expr) extends Expr | jtulach/teavm | samples/scala/src/main/scala/org/teavm/samples/scala/Calculator.scala | Scala | apache-2.0 | 2,205 |
package debop4s.core.utils
import java.util.UUID
import debop4s.core.AbstractCoreFunSuite
/**
* debop4s.core.tests.LocalFunSuite
* @author 배성혁 sunghyouk.bae@gmail.com
* @since 2013. 12. 14. 오후 7:11
*/
class LocalFunSuite extends AbstractCoreFunSuite {
before { Local.clearAll() }
test("put/get value kind") {
val key = "Local.Value.Key"
val value = UUID.randomUUID().toString
Local.put(key, value)
if (Strings.isWhitespace(key)) fail("key is whitespace")
val stored = Local.get[String](key).orNull
stored should not be null
stored shouldEqual value
}
test("put/get reference kind") {
val key = "Local.Reference.Key"
val user = new User("user", "P" + Thread.currentThread().getId, 1)
Local.put(key, user)
Thread.sleep(5)
val storedUser = Local.get[User](key).orNull
// storedUser != null)
storedUser shouldEqual user
user.name shouldEqual storedUser.name
user.password shouldEqual storedUser.password
user.age shouldEqual storedUser.age
}
test("get or create if not exists") {
val key = "Local.GetOrCreate.Key"
val user = Local.getOrCreate(key, {
new User("user", "P" + Thread.currentThread().getId, 2)
}).orNull
Thread.sleep(5)
val storedUser = Local.get[User](key).orNull
storedUser should not be null
storedUser shouldEqual user
storedUser.name shouldEqual user.name
storedUser.password shouldEqual user.password
storedUser.age shouldEqual user.age
}
}
case class User(name: String, password: String, age: Int) | debop/debop4s | debop4s-core/src/test/scala/debop4s/core/utils/LocalFunSuite.scala | Scala | apache-2.0 | 1,567 |
package com.ignition.script
import org.apache.spark.sql.Row
import org.junit.runner.RunWith
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import com.ignition.types._
@RunWith(classOf[JUnitRunner])
class XPathExpressionSpec extends Specification {
val schema = string("payload").schema
val row = Row(
<items>
<item>
<name>John</name>
</item>
<item>
<name>Jack</name>
</item>
</items>.toString)
"xpath expressions" should {
"evaluate against string data" in {
val proc = "name".xpath("payload")
proc.evaluate(schema)(row) === "<name>John</name><name>Jack</name>"
}
}
} | uralian/ignition | src/test/scala/com/ignition/script/XPathExpressionSpec.scala | Scala | apache-2.0 | 678 |
/* ___ _ ___ _ _ *\
** / __| |/ (_) | | The SKilL Generator **
** \__ \ ' <| | | |__ (c) 2013-18 University of Stuttgart **
** |___/_|\_\_|_|____| see LICENSE **
\* */
package de.ust.skill.generator.csharp.api
import de.ust.skill.generator.csharp.GeneralOutputMaker
trait VisitorsMaker extends GeneralOutputMaker {
abstract override def make {
super.make
if (visitors.length > 0) {
val out = files.open(s"api/Visitor.cs")
//package & imports
out.write(s"""
using System;
namespace ${this.packageName}
{
namespace api
{
/// <summary>
/// Base class of a distributed dispatching function ranging over specified types
/// implemented by the visitor pattern.
///
/// @author Simon Glaub, Timm Felden
/// </summary>
/// <param id =_R> the result type </param>
/// <param id =_A> the argument type </param>
/// <param id =_E> the type of throws exception; use RuntimeException for nothrow </param>
public abstract class Visitor<_R, _A, _E> where _E : Exception{${
(for (t ← visitors) yield s"""
public abstract _R visit(${mapType(t)} self, _A arg);""").mkString
}
}
}
}
""")
out.close()
}
}
}
| skill-lang/skill | src/main/scala/de/ust/skill/generator/csharp/api/VisitorsMaker.scala | Scala | bsd-3-clause | 1,560 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.k8s.submit
import com.google.common.collect.Iterables
import io.fabric8.kubernetes.api.model.{ContainerBuilder, DoneablePod, HasMetadata, Pod, PodBuilder, PodList, Secret, SecretBuilder}
import io.fabric8.kubernetes.client.{KubernetesClient, Watch}
import io.fabric8.kubernetes.client.dsl.{MixedOperation, NamespaceListVisitFromServerGetDeleteRecreateWaitApplicable, NamespaceVisitFromServerGetWatchDeleteRecreateWaitApplicable, PodResource, Resource}
import org.mockito.{ArgumentCaptor, Mock, MockitoAnnotations}
import org.mockito.Mockito.{doReturn, verify, when}
import org.mockito.invocation.InvocationOnMock
import org.mockito.stubbing.Answer
import org.scalatest.BeforeAndAfter
import org.scalatest.mock.MockitoSugar._
import scala.collection.JavaConverters._
import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.deploy.k8s.constants._
import org.apache.spark.deploy.k8s.submit.submitsteps.{DriverConfigurationStep, KubernetesDriverSpec}
class ClientSuite extends SparkFunSuite with BeforeAndAfter {
private val DRIVER_POD_UID = "pod-id"
private val DRIVER_POD_API_VERSION = "v1"
private val DRIVER_POD_KIND = "pod"
private type ResourceList = NamespaceListVisitFromServerGetDeleteRecreateWaitApplicable[
HasMetadata, Boolean]
private type Pods = MixedOperation[Pod, PodList, DoneablePod, PodResource[Pod, DoneablePod]]
@Mock
private var kubernetesClient: KubernetesClient = _
@Mock
private var podOperations: Pods = _
@Mock
private var namedPods: PodResource[Pod, DoneablePod] = _
@Mock
private var loggingPodStatusWatcher: LoggingPodStatusWatcher = _
@Mock
private var resourceList: ResourceList = _
private val submissionSteps = Seq(FirstTestConfigurationStep, SecondTestConfigurationStep)
private var createdPodArgumentCaptor: ArgumentCaptor[Pod] = _
private var createdResourcesArgumentCaptor: ArgumentCaptor[HasMetadata] = _
before {
MockitoAnnotations.initMocks(this)
when(kubernetesClient.pods()).thenReturn(podOperations)
when(podOperations.withName(FirstTestConfigurationStep.podName)).thenReturn(namedPods)
createdPodArgumentCaptor = ArgumentCaptor.forClass(classOf[Pod])
createdResourcesArgumentCaptor = ArgumentCaptor.forClass(classOf[HasMetadata])
when(podOperations.create(createdPodArgumentCaptor.capture())).thenAnswer(new Answer[Pod] {
override def answer(invocation: InvocationOnMock): Pod = {
new PodBuilder(invocation.getArgumentAt(0, classOf[Pod]))
.editMetadata()
.withUid(DRIVER_POD_UID)
.endMetadata()
.withApiVersion(DRIVER_POD_API_VERSION)
.withKind(DRIVER_POD_KIND)
.build()
}
})
when(podOperations.withName(FirstTestConfigurationStep.podName)).thenReturn(namedPods)
when(namedPods.watch(loggingPodStatusWatcher)).thenReturn(mock[Watch])
doReturn(resourceList)
.when(kubernetesClient)
.resourceList(createdResourcesArgumentCaptor.capture())
}
test("The client should configure the pod with the submission steps.") {
val submissionClient = new Client(
submissionSteps,
new SparkConf(false),
kubernetesClient,
false,
"spark",
loggingPodStatusWatcher)
submissionClient.run()
val createdPod = createdPodArgumentCaptor.getValue
assert(createdPod.getMetadata.getName === FirstTestConfigurationStep.podName)
assert(createdPod.getMetadata.getLabels.asScala ===
Map(FirstTestConfigurationStep.labelKey -> FirstTestConfigurationStep.labelValue))
assert(createdPod.getMetadata.getAnnotations.asScala ===
Map(SecondTestConfigurationStep.annotationKey ->
SecondTestConfigurationStep.annotationValue))
assert(createdPod.getSpec.getContainers.size() === 1)
assert(createdPod.getSpec.getContainers.get(0).getName ===
SecondTestConfigurationStep.containerName)
}
test("The client should create the secondary Kubernetes resources.") {
val submissionClient = new Client(
submissionSteps,
new SparkConf(false),
kubernetesClient,
false,
"spark",
loggingPodStatusWatcher)
submissionClient.run()
val createdPod = createdPodArgumentCaptor.getValue
val otherCreatedResources = createdResourcesArgumentCaptor.getAllValues
assert(otherCreatedResources.size === 1)
val createdResource = Iterables.getOnlyElement(otherCreatedResources).asInstanceOf[Secret]
assert(createdResource.getMetadata.getName === FirstTestConfigurationStep.secretName)
assert(createdResource.getData.asScala ===
Map(FirstTestConfigurationStep.secretKey -> FirstTestConfigurationStep.secretData))
val ownerReference = Iterables.getOnlyElement(createdResource.getMetadata.getOwnerReferences)
assert(ownerReference.getName === createdPod.getMetadata.getName)
assert(ownerReference.getKind === DRIVER_POD_KIND)
assert(ownerReference.getUid === DRIVER_POD_UID)
assert(ownerReference.getApiVersion === DRIVER_POD_API_VERSION)
}
test("The client should attach the driver container with the appropriate JVM options.") {
val sparkConf = new SparkConf(false)
.set("spark.logConf", "true")
.set(
org.apache.spark.internal.config.DRIVER_JAVA_OPTIONS,
"-XX:+HeapDumpOnOutOfMemoryError -XX:+PrintGCDetails")
val submissionClient = new Client(
submissionSteps,
sparkConf,
kubernetesClient,
false,
"spark",
loggingPodStatusWatcher)
submissionClient.run()
val createdPod = createdPodArgumentCaptor.getValue
val driverContainer = Iterables.getOnlyElement(createdPod.getSpec.getContainers)
assert(driverContainer.getName === SecondTestConfigurationStep.containerName)
val driverJvmOptsEnvs = driverContainer.getEnv.asScala.filter { env =>
env.getName.startsWith(ENV_JAVA_OPT_PREFIX)
}.sortBy(_.getName)
assert(driverJvmOptsEnvs.size === 4)
val expectedJvmOptsValues = Seq(
"-Dspark.logConf=true",
s"-D${SecondTestConfigurationStep.sparkConfKey}=" +
s"${SecondTestConfigurationStep.sparkConfValue}",
s"-XX:+HeapDumpOnOutOfMemoryError",
s"-XX:+PrintGCDetails")
driverJvmOptsEnvs.zip(expectedJvmOptsValues).zipWithIndex.foreach {
case ((resolvedEnv, expectedJvmOpt), index) =>
assert(resolvedEnv.getName === s"$ENV_JAVA_OPT_PREFIX$index")
assert(resolvedEnv.getValue === expectedJvmOpt)
}
}
test("Waiting for app completion should stall on the watcher") {
val submissionClient = new Client(
submissionSteps,
new SparkConf(false),
kubernetesClient,
true,
"spark",
loggingPodStatusWatcher)
submissionClient.run()
verify(loggingPodStatusWatcher).awaitCompletion()
}
}
private object FirstTestConfigurationStep extends DriverConfigurationStep {
val podName = "test-pod"
val secretName = "test-secret"
val labelKey = "first-submit"
val labelValue = "true"
val secretKey = "secretKey"
val secretData = "secretData"
override def configureDriver(driverSpec: KubernetesDriverSpec): KubernetesDriverSpec = {
val modifiedPod = new PodBuilder(driverSpec.driverPod)
.editMetadata()
.withName(podName)
.addToLabels(labelKey, labelValue)
.endMetadata()
.build()
val additionalResource = new SecretBuilder()
.withNewMetadata()
.withName(secretName)
.endMetadata()
.addToData(secretKey, secretData)
.build()
driverSpec.copy(
driverPod = modifiedPod,
otherKubernetesResources = driverSpec.otherKubernetesResources ++ Seq(additionalResource))
}
}
private object SecondTestConfigurationStep extends DriverConfigurationStep {
val annotationKey = "second-submit"
val annotationValue = "submitted"
val sparkConfKey = "spark.custom-conf"
val sparkConfValue = "custom-conf-value"
val containerName = "driverContainer"
override def configureDriver(driverSpec: KubernetesDriverSpec): KubernetesDriverSpec = {
val modifiedPod = new PodBuilder(driverSpec.driverPod)
.editMetadata()
.addToAnnotations(annotationKey, annotationValue)
.endMetadata()
.build()
val resolvedSparkConf = driverSpec.driverSparkConf.clone().set(sparkConfKey, sparkConfValue)
val modifiedContainer = new ContainerBuilder(driverSpec.driverContainer)
.withName(containerName)
.build()
driverSpec.copy(
driverPod = modifiedPod,
driverSparkConf = resolvedSparkConf,
driverContainer = modifiedContainer)
}
}
| publicRoman/spark | resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/submit/ClientSuite.scala | Scala | apache-2.0 | 9,450 |
package reactive.hide
import reactive.Configuration
import reactive.api.ApplicationJsonFormats
import akka.actor.{ ActorRef, ActorSystem }
import spray.http.StatusCodes
import spray.routing.Directives
class HideService(hide : ActorRef)(implicit system : ActorSystem) extends Directives with ApplicationJsonFormats {
lazy val route =
pathPrefix("resource" / Segment) { str =>
get {
getFromResource(str)
}
} ~
pathPrefix("hide") {
val dir = "hide/"
pathEndOrSingleSlash {
get {
getFromResource(dir + "index.html")
}
} ~
path("ws") {
requestUri { uri =>
val wsUri = uri.withPort(Configuration.portWs)
redirect(wsUri, StatusCodes.PermanentRedirect)
}
} ~
getFromResourceDirectory(dir)
}
}
| PiotrTrzpil/vitrace | backend/src/main/scala/reactive/hide/HideService.scala | Scala | mit | 834 |
package com.tajpure.scheme.compiler.ast
class Argument(val elements: List[Node]) {
val positional: List[Node] = elements.map {
node => {
node
}
}
override
def toString(): String = {
elements.foldLeft(" ( ")((node, str) => node + str + " ") + ")"
}
} | tajpure/SoScheme | src/main/scala/com/tajpure/scheme/compiler/ast/Argument.scala | Scala | gpl-3.0 | 287 |
package org.eknet.spray.openid.model
import org.scalatest.{Matchers, FunSuite}
class CryptTest extends FunSuite with Matchers {
test("verify signature") {
val key = Crypt.HmacSha1.generateKey.get
val data = "abcdefg".getBytes
val signed = Crypt.sign(key, data).get
Crypt.verifySig(key)(signed, data).get should be (true)
val badkey = Crypt.HmacSha1.generateKey.get
Crypt.verifySig(badkey)(signed, data).get should be (false)
}
}
| eikek/spray-openid | src/test/scala/org/eknet/spray/openid/model/CryptTest.scala | Scala | apache-2.0 | 460 |
package org.jetbrains.plugins.scala
package lang
package psi
package api
package statements
import com.intellij.psi.{PsiClass, PsiElement}
import org.jetbrains.plugins.scala.lang.psi.api.base.types.ScTypeElement
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.ScTypeParam
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScTypeParametersOwner
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.{ScClass, ScObject, ScTrait}
import org.jetbrains.plugins.scala.lang.psi.stubs.ScTypeAliasStub
import org.jetbrains.plugins.scala.lang.psi.types._
import org.jetbrains.plugins.scala.lang.psi.types.api.{TypeParameterType, TypeSystem}
import org.jetbrains.plugins.scala.lang.psi.types.result.{Failure, TypeResult, TypingContext}
import org.jetbrains.plugins.scala.macroAnnotations.{CachedInsidePsiElement, ModCount}
/**
* @author Alexander Podkhalyuzin
* Date: 22.02.2008
*/
trait ScTypeAliasDefinition extends ScTypeAlias {
override def isDefinition: Boolean = true
def aliasedTypeElement: ScTypeElement = {
val stub = this.asInstanceOf[ScalaStubBasedElementImpl[_ <: PsiElement]].getStub
if (stub != null) {
return stub.asInstanceOf[ScTypeAliasStub].getTypeElement
}
findChildByClassScala(classOf[ScTypeElement])
}
def aliasedType(ctx: TypingContext): TypeResult[ScType] = {
if (ctx.visited.contains(this)) {
new Failure(ScalaBundle.message("circular.dependency.detected", name), Some(this)) {override def isCyclic = true}
} else {
aliasedTypeElement.getType(ctx(this))
}
}
@CachedInsidePsiElement(this, ModCount.getBlockModificationCount)
def aliasedType: TypeResult[ScType] = aliasedType(TypingContext.empty)
def lowerBound: TypeResult[ScType] = aliasedType(TypingContext.empty)
def upperBound: TypeResult[ScType] = aliasedType(TypingContext.empty)
def isExactAliasFor(cls: PsiClass)(implicit typeSystem: TypeSystem): Boolean = {
val isDefinedInObject = containingClass match {
case obj: ScObject if obj.isStatic => true
case _ => false
}
isDefinedInObject && isAliasFor(cls)
}
def isAliasFor(cls: PsiClass)(implicit typeSystem: TypeSystem): Boolean = {
if (cls.getTypeParameters.length != typeParameters.length) false
else if (cls.hasTypeParameters) {
val typeParamsAreAppliedInOrderToCorrectClass = aliasedType.getOrAny match {
case pte: ScParameterizedType =>
val refersToClass = pte.designator.equiv(ScalaType.designator(cls))
val typeParamsAppliedInOrder = (pte.typeArguments corresponds typeParameters) {
case (tpt: TypeParameterType, tp) if tpt.typeParameter == tp => true
case _ => false
}
refersToClass && typeParamsAppliedInOrder
case _ => false
}
val varianceAndBoundsMatch = cls match {
case sc0@(_: ScClass | _: ScTrait) =>
val sc = sc0.asInstanceOf[ScTypeParametersOwner]
(typeParameters corresponds sc.typeParameters) {
case (tp1, tp2) => tp1.variance == tp2.variance && tp1.upperBound == tp2.upperBound && tp1.lowerBound == tp2.lowerBound &&
tp1.contextBound.isEmpty && tp2.contextBound.isEmpty && tp1.viewBound.isEmpty && tp2.viewBound.isEmpty
}
case _ => // Java class
(typeParameters corresponds cls.getTypeParameters) {
case (tp1, tp2) => tp1.variance == ScTypeParam.Invariant && tp1.upperTypeElement.isEmpty && tp2.getExtendsListTypes.isEmpty &&
tp1.lowerTypeElement.isEmpty && tp1.contextBound.isEmpty && tp1.viewBound.isEmpty
}
}
typeParamsAreAppliedInOrderToCorrectClass && varianceAndBoundsMatch
}
else {
val clsType = ScalaType.designator(cls)
typeParameters.isEmpty && aliasedType.getOrElse(return false).equiv(clsType)
}
}
} | katejim/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/api/statements/ScTypeAliasDefinition.scala | Scala | apache-2.0 | 3,866 |
/*
* Copyright (c) 2012, TU Berlin
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the TU Berlin nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL TU Berlin BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
package de.tuberlin.uebb.sl2.modules
/**
* Check function definitions for well-formedness and correctness.
*/
trait FDChecker {
this: Syntax with Context with Type with Errors =>
/**
* Use a value-class to avoid tuples
* (back to Java-Style *yeah*)
*/
sealed case class FDCheckResult(sigs : Map[Var, FunctionSig], defs :
Map[Var, List[FunctionDef]], ctxt : Context)
/**
* Check a program's top-level function definitions.
*
* @return Signatures and definitions of all top-level functions
*/
def checkFunctions(in: AST): Either[Error, FDCheckResult]
}
| mzuber/simple-language | src/main/scala/modules/FDChecker.scala | Scala | bsd-3-clause | 2,174 |
package mgoeminne.scalaggplot.stat
/**
* Quantile regression.
*/
case class quantile() extends Statistic
{
}
| mgoeminne/scala-ggplot | src/main/scala/mgoeminne/scalaggplot/stat/quantile.scala | Scala | lgpl-3.0 | 113 |
package net.dinkla.lbnn.utils
/**
* Partition
* @param median
* @param ls
* @param es
* @param hs
* @tparam T
*/
class Partition[T](val median: T,
val ls: List[T],
val es: List[T],
val hs: List[T]) {
override def toString: String = s"($median, $ls, $es, $hs)"
def map[S](f: T => S): Partition[S]
= new Partition(f(median), ls map f, es map f, hs map f)
def checkSizes(n: Int): Boolean
= ls.size < n && es.size < n && hs.size < n
} | jdinkla/location-based-nearest-neighbours | src/main/scala/net/dinkla/lbnn/utils/Partition.scala | Scala | apache-2.0 | 538 |
package org.crudible.lift.binding.markup
import org.crudible.core.binding.model.TextAreaComponent
import org.crudible.lift.binding.model.LiftMarkup
import org.crudible.lift.util.DefaultMessageHandler
import org.crudible.lift.util.PropertyHelper
import scala.xml.Text
import org.crudible.lift.util.EasyJS
import org.crudible.lift.binding.util.LiftMarkupHelpers
import net.liftweb.util.Helpers
class LiftTextAreaMarkup(base: TextAreaComponent) extends LiftMarkup {
implicit class TextAreaComponentExt(component: TextAreaComponent) {
def readonlyOption = { if (component.readonly()) { Some(Text("readonly")) } else { None } }
}
protected val messageHandler = new DefaultMessageHandler
def render(realValue: String, callbackName: String) = {
<div class="row">
<div class="form-group" id={ messageHandler.groupID }>
<label for={ callbackName } class="col-sm-3 control-label">{ PropertyHelper.labelOf(base) }</label>
<div class="col-sm-8">
{ renderTextArea(realValue, callbackName) }
<span id={ messageHandler.helpID } class="help-block"></span>
</div>
</div>
</div>
}
def renderTextArea(realValue: String, callbackName: String) = {
val hintID = Helpers.nextFuncName
if (base.autosize()) {
EasyJS.appendJSString(LiftMarkupHelpers.enableAutosize(callbackName))
}
<div class="input-group">
<textarea class="form-control" id={ callbackName } name={ callbackName } placeholder={ base.placeholder() } readonly={ base.readonlyOption }>{ realValue }</textarea>
{
base.max().toXML { max =>
EasyJS.appendJSString(LiftMarkupHelpers.enableMaxLengthHint(callbackName, hintID, max))
<div class="input-group-addon">
<div id={ hintID } style="width: 65px;">max. { max }</div>
</div>
}
}
</div>
}
def setValidationMessage(message: Option[String]) = {
messageHandler.handle(message)
}
} | rehei/crudible | crudible-lift/src/main/scala/org/crudible/lift/binding/markup/LiftTextAreaMarkup.scala | Scala | apache-2.0 | 1,967 |
package com.stefansavev.randomprojections.datarepr.dense.store
import java.io.{BufferedInputStream, File, FileInputStream}
import com.stefansavev.core.serialization.DoubleArraySerializer
import com.stefansavev.randomprojections.actors.Application
import com.stefansavev.randomprojections.buffers._
import com.stefansavev.randomprojections.datarepr.sparse.SparseVector
import com.stefansavev.randomprojections.implementation.HadamardUtils
import com.stefansavev.randomprojections.serialization.ValueStoreSerializationExt
import com.stefansavev.randomprojections.utils.Utils
import scala.reflect.ClassTag
trait ValuesStore {
def fillRow(rowId: Int, output: Array[Double], isPos: Boolean): Unit
def setRow(rowId: Int, input: Array[Double]): Unit
def fillRow(rowId: Int, columnIds: Array[Int], output: Array[Double]): Unit
def multiplyRowComponentWiseBySparseVector(rowId: Int, sv: SparseVector, output: Array[Double]): Unit
def cosineForNormalizedData(query: Array[Double], id: Int): Double
def getBuilderType: StoreBuilderType
}
object ValuesStore {
}
trait ValuesStoreBuilder {
def getCurrentRowIndex: Int
def isFull: Boolean
def getContents(): Array[Byte]
def addValues(values: Array[Double]): Unit
def build(): ValuesStore
def merge(numTotalRows: Int, valueStores: Iterator[ValuesStore]): ValuesStore
}
object ValuesStoreAsDoubleSerializationTags {
val valuesStoreAsDouble = 1
val valuesStoreAsBytes = 2
val valuesStoreAsSingleByte = 3
val lazyLoadValuesStore = 4
val asyncLoadValuesStore = 5
}
class ValuesStoreBuilderAsFloat{
}
object FloatToByteEncoder{
def encodeValue(value: Double): Short = {
val intRepr: Int = java.lang.Float.floatToRawIntBits(value.toFloat)
val s = (intRepr >>> 16).toShort
s
}
def decodeValue(s: Short): Double = {
val bits = (s & 0xFFFF) << 16
java.lang.Float.intBitsToFloat(bits)
}
}
object FloatToSingleByteEncoder{
val almostZero = java.lang.Float.intBitsToFloat(1) //use this as a representation of zero: 00000000000000000000000000000001
def getExponent(value:Double): Int = {
val intRepr: Int = java.lang.Float.floatToRawIntBits(value.toFloat)
val exp = (intRepr >>> (32 - 9)) & 0xFF //treat it as int from 0 to 255
exp
}
def encodeValue(minValue: Float, maxValue: Float, value: Double): Byte = {
val normalizedValue = (value - minValue)/(maxValue - minValue) //from 0 to 1
//if (normalizedValue < 0.0 || normalizedValue >= 1.0){
// println("normalizedValue: " + normalizedValue)
// Utils.internalError()
//}
//check if randomized rounding works better for similarity
val rounded: Int = Math.min(255, (normalizedValue*255.0).round.toInt)
if (rounded < 0 || rounded > 255){
Utils.internalError()
}
val asByte = rounded.toByte
val backToInt = asByte & 0xFF
if (backToInt != rounded){
Utils.internalError()
}
//println(value + " normalized: " + normalizedValue + " " + rounded + " " + asByte)
asByte
}
def decodeValue(minValue: Float, maxValue: Float, input: Byte): Double = {
val asInt = (input & 0xFF).toFloat
val decoded: Float = (maxValue - minValue)*(asInt)/255.0f + minValue
//println("decoded: " + decoded)
decoded.toDouble
}
//need to take special care of zero
def encodeValue_(minExp: Int, maxExp: Int, value: Double): Byte = {
//println("input: " + value)
//println("original exp: " + getExponent(value))
val adjustedExp = getExponent(value) - minExp //normalized exp
//println("exp: " + adjustedExp)
val range = maxExp - minExp
//println("range: " + range)
val numBitsToUse = 32 - Integer.numberOfLeadingZeros(range)
//println("#bits to use " + numBitsToUse)
//val freeBits = 8 - numBitsToUse
//println("normalized exp " + exp + " range: " + (maxExp - minExp) + " " + numBitsToUse)
val intRepr: Int = java.lang.Float.floatToRawIntBits(value.toFloat)
//println("intRepr " + allBits(intRepr))
val signBit = (intRepr >>> 31)
//println("signBit " + allBits(signBit))
//println("diff exp: " + adjustedExp + " " + allBits(adjustedExp))
val expRepr = ((adjustedExp) << 1)
//println("exp " + allBits(expRepr))
val availableBits = 8 - numBitsToUse - 1
val rest = ((intRepr << 9) >>> (32 - availableBits)) << (1 + numBitsToUse)
//println("rest " + allBits(rest))
val output = rest | expRepr | signBit
//println("output " + allBits(output))
output.toByte
}
def decodeValue_(minExp: Int, maxExp: Int, input: Byte): Double = {
//rest | exp bits | sign
val intRepr = input & 0xFF
//println("intRe " + allBits(intRepr))
val signBit = (intRepr & 0x1) << 31
//println("signbit: " + allBits(signBit))
val range = maxExp - minExp
val numBitsToUse = 32 - Integer.numberOfLeadingZeros(range)
val availableBits = 8 - numBitsToUse - 1
val s = 1 << (numBitsToUse - 1)
val mask = (s | (s - 1)) << 1
//1 left (sign bit in compressed), 24 right , 1 left (again because of sign bit)
//24 - 2
val expRepr = ((intRepr & mask) >> 1)
//println("mask: " + allBits(mask))
//println("expBits: " + allBits(expRepr))
val exp = expRepr + minExp
//println("exp: " + exp)
val shiftedExp = exp << 23
//println("shifted exp: " + allBits(shiftedExp))
val rest = (intRepr >>> (numBitsToUse + 1)) << (32 - 9 - availableBits)
//println("rest " + allBits(rest))
val rawBits = signBit | shiftedExp | rest
//println("rawbits out: " + allBits(rawBits))
val finalResult = java.lang.Float.intBitsToFloat(rawBits)
//println("finalRes: " + finalResult)
//throw new IllegalStateException()
finalResult
}
def encodeValues_(values: Array[Double], minExponentPerRecord: IntArrayBuffer, maxExponentPerRecord: IntArrayBuffer, valuesBuffer: ByteArrayBuffer): Unit = {
//if we have zeros or values too close to zero this will increase the range of the exponent
//need to check for the usual case (what is the range of the exponents)
//but because of the normalization by the length, all numbers are smaller than 1
var i = 0
var minExp = 255
var maxExp = 0
while (i < values.length){
val exp = getExponent(values(i))
minExp = Math.min(exp, minExp)
maxExp = Math.max(exp, maxExp)
i += 1
}
minExponentPerRecord += minExp //.toByte
maxExponentPerRecord += maxExp //.toByte
i = 0
while(i < values.length){
valuesBuffer += encodeValue(minExp, maxExp, values(i))
i += 1
}
}
def encodeValues(values: Array[Double], minValues: FloatArrayBuffer, maxValues: FloatArrayBuffer, valuesBuffer: ByteArrayBuffer): Unit = {
//if we have zeros or values too close to zero this will increase the range of the exponent
//need to check for the usual case (what is the range of the exponents)
//but because of the normalization by the length, all numbers are smaller than 1
var minValue = values(0).toFloat
var maxValue = minValue
var i = 1
while (i < values.length){
val value = values(i).toFloat
minValue = Math.min(minValue, value)
maxValue = Math.max(maxValue, value)
i += 1
}
minValues += minValue
maxValues += maxValue
i = 0
while(i < values.length){
valuesBuffer += encodeValue(minValue, maxValue, values(i))
i += 1
}
}
def encodeValues(offsetPerRecord: Int, minValues: Array[Float], maxValues: Array[Float], offsetValues: Int, valuesBuffer: Array[Byte], values: Array[Double]): Unit = {
var minValue = values(0).toFloat
var maxValue = minValue
var i = 1
while (i < values.length){
val value = values(i).toFloat
minValue = Math.min(minValue, value)
maxValue = Math.max(maxValue, value)
i += 1
}
minValues(offsetPerRecord) = minValue
maxValues(offsetPerRecord) = maxValue
i = 0
var k = offsetValues
while(i < values.length){
valuesBuffer(k) = encodeValue(minValue, maxValue, values(i))
i += 1
k += 1
}
}
def decodeValues(offsetPerRecord: Int, minValues: Array[Float], maxValues: Array[Float], offsetValues: Int, valuesBuffer: Array[Byte], output: Array[Double]): Unit = {
val minValue = minValues(offsetPerRecord)
val maxValue = maxValues(offsetPerRecord)
var k = offsetValues
var j = 0
var i = 0
while(i < output.length){
val encoded = valuesBuffer(k)
val decoded = decodeValue(minValue, maxValue, encoded)
output(i) = decoded
i += 1
j += 1
k += 1
}
}
}
class FixedLengthBuffer[T : ClassTag](val size: Int){
val buffer = Array.ofDim[T](size)
var offset = 0
def ++= (values: Array[T]): Unit = {
System.arraycopy(values, 0, buffer, offset, values.length)
offset += values.length
}
def array = buffer
}
| stefansavev/random-projections-at-berlinbuzzwords | src/main/scala/com/stefansavev/randomprojections/datarepr/dense/store/ValuesStore.scala | Scala | apache-2.0 | 8,864 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.