code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.zipkin.config.sampler
import com.twitter.zipkin.collector.sampler.{NullAdaptiveSampler, AdaptiveSampler}
import com.twitter.util.Config
trait AdaptiveSamplerConfig extends Config[AdaptiveSampler] {
def apply(): AdaptiveSampler
}
class NullAdaptiveSamplerConfig extends AdaptiveSamplerConfig {
def apply() = new NullAdaptiveSampler
}
| martindale/zipkin | zipkin-server/src/main/scala/com/twitter/zipkin/config/sampler/AdaptiveSamplerConfig.scala | Scala | apache-2.0 | 960 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.mxnet
import org.apache.mxnet.init.Base._
import org.apache.mxnet.utils.{CToScalaUtils, OperatorBuildUtils}
import scala.annotation.StaticAnnotation
import scala.collection.mutable.{ArrayBuffer, ListBuffer}
import scala.language.experimental.macros
import scala.reflect.macros.blackbox
private[mxnet] class AddNDArrayFunctions(isContrib: Boolean) extends StaticAnnotation {
private[mxnet] def macroTransform(annottees: Any*) = macro NDArrayMacro.addDefs
}
private[mxnet] class AddNDArrayAPIs(isContrib: Boolean) extends StaticAnnotation {
private[mxnet] def macroTransform(annottees: Any*) = macro NDArrayMacro.typeSafeAPIDefs
}
private[mxnet] object NDArrayMacro {
case class NDArrayArg(argName: String, argType: String, isOptional : Boolean)
case class NDArrayFunction(name: String, listOfArgs: List[NDArrayArg])
// scalastyle:off havetype
def addDefs(c: blackbox.Context)(annottees: c.Expr[Any]*) = {
impl(c)(annottees: _*)
}
def typeSafeAPIDefs(c: blackbox.Context)(annottees: c.Expr[Any]*) = {
typeSafeAPIImpl(c)(annottees: _*)
}
// scalastyle:off havetype
private val ndarrayFunctions: List[NDArrayFunction] = initNDArrayModule()
private def impl(c: blackbox.Context)(annottees: c.Expr[Any]*): c.Expr[Any] = {
import c.universe._
val isContrib: Boolean = c.prefix.tree match {
case q"new AddNDArrayFunctions($b)" => c.eval[Boolean](c.Expr(b))
}
val newNDArrayFunctions = {
if (isContrib) ndarrayFunctions.filter(_.name.startsWith("_contrib_"))
else ndarrayFunctions.filterNot(_.name.startsWith("_"))
}
val functionDefs = newNDArrayFunctions flatMap { NDArrayfunction =>
val funcName = NDArrayfunction.name
val termName = TermName(funcName)
Seq(
// scalastyle:off
// (yizhi) We are investigating a way to make these functions type-safe
// and waiting to see the new approach is stable enough.
// Thus these functions may be deprecated in the future.
// e.g def transpose(kwargs: Map[String, Any] = null)(args: Any*)
q"def $termName(kwargs: Map[String, Any] = null)(args: Any*) = {genericNDArrayFunctionInvoke($funcName, args, kwargs)}".asInstanceOf[DefDef],
// e.g def transpose(args: Any*)
q"def $termName(args: Any*) = {genericNDArrayFunctionInvoke($funcName, args, null)}".asInstanceOf[DefDef]
// scalastyle:on
)
}
structGeneration(c)(functionDefs, annottees : _*)
}
private def typeSafeAPIImpl(c: blackbox.Context)(annottees: c.Expr[Any]*) : c.Expr[Any] = {
import c.universe._
val isContrib: Boolean = c.prefix.tree match {
case q"new AddNDArrayAPIs($b)" => c.eval[Boolean](c.Expr(b))
}
val newNDArrayFunctions = {
if (isContrib) ndarrayFunctions.filter(
func => func.name.startsWith("_contrib_") || !func.name.startsWith("_"))
else ndarrayFunctions.filterNot(_.name.startsWith("_"))
}
val functionDefs = newNDArrayFunctions map { ndarrayfunction =>
// Construct argument field
var argDef = ListBuffer[String]()
// Construct Implementation field
var impl = ListBuffer[String]()
impl += "val map = scala.collection.mutable.Map[String, Any]()"
impl += "val args = scala.collection.mutable.ArrayBuffer.empty[NDArray]"
ndarrayfunction.listOfArgs.foreach({ ndarrayarg =>
// var is a special word used to define variable in Scala,
// need to changed to something else in order to make it work
val currArgName = ndarrayarg.argName match {
case "var" => "vari"
case "type" => "typeOf"
case default => ndarrayarg.argName
}
if (ndarrayarg.isOptional) {
argDef += s"${currArgName} : Option[${ndarrayarg.argType}] = None"
}
else {
argDef += s"${currArgName} : ${ndarrayarg.argType}"
}
// NDArray arg implementation
val returnType = "org.apache.mxnet.NDArray"
// TODO: Currently we do not add place holder for NDArray
// Example: an NDArray operator like the following format
// nd.foo(arg1: NDArray(required), arg2: NDArray(Optional), arg3: NDArray(Optional)
// If we place nd.foo(arg1, arg3 = arg3), do we need to add place holder for arg2?
// What it should be?
val base =
if (ndarrayarg.argType.equals(returnType)) {
s"args += $currArgName"
} else if (ndarrayarg.argType.equals(s"Array[$returnType]")){
s"args ++= $currArgName"
} else {
"map(\\"" + ndarrayarg.argName + "\\") = " + currArgName
}
impl.append(
if (ndarrayarg.isOptional) s"if (!$currArgName.isEmpty) $base.get"
else base
)
})
// add default out parameter
argDef += "out : Option[NDArray] = None"
impl += "if (!out.isEmpty) map(\\"out\\") = out.get"
// scalastyle:off
impl += "org.apache.mxnet.NDArray.genericNDArrayFunctionInvoke(\\"" + ndarrayfunction.name + "\\", args.toSeq, map.toMap)"
// scalastyle:on
// Combine and build the function string
val returnType = "org.apache.mxnet.NDArrayFuncReturn"
var finalStr = s"def ${ndarrayfunction.name}"
finalStr += s" (${argDef.mkString(",")}) : $returnType"
finalStr += s" = {${impl.mkString("\\n")}}"
c.parse(finalStr).asInstanceOf[DefDef]
}
structGeneration(c)(functionDefs, annottees : _*)
}
private def structGeneration(c: blackbox.Context)
(funcDef : List[c.universe.DefDef], annottees: c.Expr[Any]*)
: c.Expr[Any] = {
import c.universe._
val inputs = annottees.map(_.tree).toList
// pattern match on the inputs
val modDefs = inputs map {
case ClassDef(mods, name, something, template) =>
val q = template match {
case Template(superMaybe, emptyValDef, defs) =>
Template(superMaybe, emptyValDef, defs ++ funcDef)
case ex =>
throw new IllegalArgumentException(s"Invalid template: $ex")
}
ClassDef(mods, name, something, q)
case ModuleDef(mods, name, template) =>
val q = template match {
case Template(superMaybe, emptyValDef, defs) =>
Template(superMaybe, emptyValDef, defs ++ funcDef)
case ex =>
throw new IllegalArgumentException(s"Invalid template: $ex")
}
ModuleDef(mods, name, q)
case ex =>
throw new IllegalArgumentException(s"Invalid macro input: $ex")
}
// wrap the result up in an Expr, and return it
val result = c.Expr(Block(modDefs, Literal(Constant())))
result
}
// List and add all the atomic symbol functions to current module.
private def initNDArrayModule(): List[NDArrayFunction] = {
val opNames = ListBuffer.empty[String]
_LIB.mxListAllOpNames(opNames)
opNames.map(opName => {
val opHandle = new RefLong
_LIB.nnGetOpHandle(opName, opHandle)
makeNDArrayFunction(opHandle.value, opName)
}).toList
}
// Create an atomic symbol function by handle and function name.
private def makeNDArrayFunction(handle: NDArrayHandle, aliasName: String)
: NDArrayFunction = {
val name = new RefString
val desc = new RefString
val keyVarNumArgs = new RefString
val numArgs = new RefInt
val argNames = ListBuffer.empty[String]
val argTypes = ListBuffer.empty[String]
val argDescs = ListBuffer.empty[String]
_LIB.mxSymbolGetAtomicSymbolInfo(
handle, name, desc, numArgs, argNames, argTypes, argDescs, keyVarNumArgs)
val paramStr = OperatorBuildUtils.ctypes2docstring(argNames, argTypes, argDescs)
val extraDoc: String = if (keyVarNumArgs.value != null && keyVarNumArgs.value.length > 0) {
s"This function support variable length of positional input (${keyVarNumArgs.value})."
} else {
""
}
val realName = if (aliasName == name.value) "" else s"(a.k.a., ${name.value})"
val docStr = s"$aliasName $realName\\n${desc.value}\\n\\n$paramStr\\n$extraDoc\\n"
// scalastyle:off println
if (System.getenv("MXNET4J_PRINT_OP_DEF") != null
&& System.getenv("MXNET4J_PRINT_OP_DEF").toLowerCase == "true") {
println("NDArray function definition:\\n" + docStr)
}
// scalastyle:on println
val argList = argNames zip argTypes map { case (argName, argType) =>
val typeAndOption = CToScalaUtils.argumentCleaner(argType, "org.apache.mxnet.NDArray")
new NDArrayArg(argName, typeAndOption._1, typeAndOption._2)
}
new NDArrayFunction(aliasName, argList.toList)
}
}
| navrasio/mxnet | scala-package/macros/src/main/scala/org/apache/mxnet/NDArrayMacro.scala | Scala | apache-2.0 | 9,497 |
package models
import play.api.libs.json._
import play.api.Logger
import org.squeryl.PrimitiveTypeMode._
import org.squeryl.{Schema, Table}
import collins.solr._
case class AssetMeta(
name: String,
priority: Int,
label: String,
description: String,
id: Long = 0,
value_type: Int = AssetMeta.ValueType.String.id
) extends ValidatedEntity[Long]
{
override def validate() {
require(name != null && name.toUpperCase == name && name.size > 0, "Name must be all upper case, length > 0")
require(AssetMeta.isValidName(name), "Name must be all upper case, alpha numeric (and hyphens)")
require(description != null && description.length > 0, "Need a description")
require(AssetMeta.ValueType.valIds(value_type), "Invalid value_type, must be one of [%s]".format(AssetMeta.ValueType.valStrings.mkString(",")))
}
override def asJson: String = {
Json.stringify(JsObject(Seq(
"ID" -> JsNumber(id),
"NAME" -> JsString(name),
"PRIORITY" -> JsNumber(priority),
"LABEL" -> JsString(label),
"DESCRIPTION" -> JsString(description)
)))
}
def getId(): Long = id
def getValueType(): AssetMeta.ValueType = AssetMeta.ValueType(value_type)
def valueType = getValueType
def getSolrKey(): SolrKey = SolrKey(name, valueType, true, true, false)
def validateValue(value: String): Boolean = typeStringValue(value).isDefined
def typeStringValue(value: String): Option[SolrSingleValue] = getValueType() match {
case AssetMeta.ValueType.Integer => try {
Some(SolrIntValue(Integer.parseInt(value)))
} catch {
case _ => None
}
case AssetMeta.ValueType.Boolean => try {
Some(SolrBooleanValue((new Truthy(value)).isTruthy))
} catch {
case _ => None
}
case AssetMeta.ValueType.Double => try {
Some(SolrDoubleValue(java.lang.Double.parseDouble(value)))
} catch {
case _ => None
}
case _ => Some(SolrStringValue(value))
}
}
object AssetMeta extends Schema with AnormAdapter[AssetMeta] {
private[this] val NameR = """[A-Za-z0-9\-_]+""".r.pattern.matcher(_)
private[this] val logger = Logger.logger
override val tableDef = table[AssetMeta]("asset_meta")
on(tableDef)(a => declare(
a.id is(autoIncremented,primaryKey),
a.name is(unique),
a.priority is(indexed)
))
override def cacheKeys(a: AssetMeta) = Seq(
"AssetMeta.findByName(%s)".format(a.name),
"AssetMeta.findById(%d)".format(a.id),
"AssetMeta.findAll",
"AssetMeta.getViewable"
)
override def delete(a: AssetMeta): Int = inTransaction {
afterDeleteCallback(a) {
tableDef.deleteWhere(p => p.id === a.id)
}
}
def isValidName(name: String): Boolean = {
name != null && name.nonEmpty && NameR(name).matches
}
def findAll(): Seq[AssetMeta] = getOrElseUpdate("AssetMeta.findAll") {
from(tableDef)(s => select(s)).toList
}
def findById(id: Long) = getOrElseUpdate("AssetMeta.findById(%d)".format(id)) {
tableDef.lookup(id)
}
def findOrCreateFromName(name: String, valueType: ValueType = ValueType.String, desc: Option[String] = None ): AssetMeta =
{
val existing = findByName(name).getOrElse{
create(AssetMeta(
name = name.toUpperCase,
priority = -1,
label = name.toLowerCase.capitalize,
description = desc.getOrElse(name),
value_type = valueType.id
)
)
}
//works for updating but can be optimized, short short circuit if it doesn't need to update description
inTransaction{
val newval = AssetMeta(
name = name.toUpperCase,
priority = existing.priority,
label = existing.label,
id = existing.id,
description = desc.getOrElse(existing.name),
value_type = existing.value_type
)
AssetMeta.tableDef.update(newval)
}
findByName(name).get
}
override def get(a: AssetMeta) = findById(a.id).get
def findByName(name: String): Option[AssetMeta] = {
getOrElseUpdate("AssetMeta.findByName(%s)".format(name.toUpperCase)) {
tableDef.where(a =>
a.name.toUpperCase === name.toUpperCase
).headOption
}
}
def getViewable(): Seq[AssetMeta] = getOrElseUpdate("AssetMeta.getViewable") {
from(tableDef)(a =>
where(a.priority gt -1)
select(a)
orderBy(a.priority asc)
).toList
}
type ValueType = ValueType.Value
object ValueType extends Enumeration {
val String = Value(1,"STRING")
val Integer = Value(2,"INTEGER")
val Double = Value(3,"DOUBLE")
val Boolean = Value(4,"BOOLEAN")
def valStrings = values.map{_.toString}
def valIds = values.map{_.id}
val postFix = Map[ValueType,String](
String -> "_meta_s",
Integer -> "_meta_i",
Double -> "_meta_d",
Boolean -> "_meta_b"
)
}
// DO NOT ADD ANYTHING TO THIS
// DEPRECATED
type Enum = Enum.Value
object Enum extends Enumeration(1) {
val ServiceTag = Value(1, "SERVICE_TAG")
val ChassisTag = Value(2, "CHASSIS_TAG")
val RackPosition = Value(3, "RACK_POSITION")
val PowerPort = Value(4, "POWER_PORT")
//val SwitchPort = Value(5, "SWITCH_PORT") Deprecated by id LldpPortIdValue
val CpuCount = Value(6, "CPU_COUNT")
val CpuCores = Value(7, "CPU_CORES")
val CpuThreads = Value(8, "CPU_THREADS")
val CpuSpeedGhz = Value(9, "CPU_SPEED_GHZ")
val CpuDescription = Value(10, "CPU_DESCRIPTION")
val MemorySizeBytes = Value(11, "MEMORY_SIZE_BYTES")
val MemoryDescription = Value(12, "MEMORY_DESCRIPTION")
val MemorySizeTotal = Value(13, "MEMORY_SIZE_TOTAL")
val MemoryBanksTotal = Value(14, "MEMORY_BANKS_TOTAL")
val NicSpeed = Value(15, "NIC_SPEED") // in bits
val MacAddress = Value(16, "MAC_ADDRESS")
val NicDescription = Value(17, "NIC_DESCRIPTION")
val DiskSizeBytes = Value(18, "DISK_SIZE_BYTES")
val DiskType = Value(19, "DISK_TYPE")
val DiskDescription = Value(20, "DISK_DESCRIPTION")
val DiskStorageTotal = Value(21, "DISK_STORAGE_TOTAL")
val LldpInterfaceName = Value(22, "LLDP_INTERFACE_NAME")
val LldpChassisName = Value(23, "LLDP_CHASSIS_NAME")
val LldpChassisIdType = Value(24, "LLDP_CHASSIS_ID_TYPE")
val LldpChassisIdValue = Value(25, "LLDP_CHASSIS_ID_VALUE")
val LldpChassisDescription = Value(26, "LLDP_CHASSIS_DESCRIPTION")
val LldpPortIdType = Value(27, "LLDP_PORT_ID_TYPE")
val LldpPortIdValue = Value(28, "LLDP_PORT_ID_VALUE")
val LldpPortDescription = Value(29, "LLDP_PORT_DESCRIPTION")
val LldpVlanId = Value(30, "LLDP_VLAN_ID")
val LldpVlanName = Value(31, "LLDP_VLAN_NAME")
// DO NOT USE - Deprecated
val NicName = Value(32, "INTERFACE_NAME")
// DO NOT USE - Deprecated
val NicAddress = Value(33, "INTERFACE_ADDRESS")
}
// Post enum fields, enum is not safe to extend with new values
object DynamicEnum {
val BaseDescription = AssetMeta.findOrCreateFromName("BASE_DESCRIPTION")
val BaseProduct = AssetMeta.findOrCreateFromName("BASE_PRODUCT")
val BaseVendor = AssetMeta.findOrCreateFromName("BASE_VENDOR")
def getValues(): Seq[AssetMeta] = {
Seq(BaseDescription,BaseProduct,BaseVendor)
}
}
}
| Shopify/collins | app/models/AssetMeta.scala | Scala | apache-2.0 | 7,263 |
package cfc.shale.redis_client.commands
object RedisGetBooleanOption {
def apply(key: String): RedisCommand[Option[Boolean]] =
RedisGetStringOption(key).map({
case Some("0") => Some(false)
case Some("1") => Some(true)
case _ => None
})
}
| cardforcoin/shale-scala | redis-client/src/main/scala/cfc/shale/redis_client/commands/RedisGetBooleanOption.scala | Scala | mit | 268 |
package xyz.hyperreal.sprolog
import collection.mutable.{HashMap, ArrayBuffer, Buffer, ArrayStack}
import xyz.hyperreal.lia.{FunctionMap, Math}
class PredicateMap( evaluator: Evaluator ) extends HashMap[Indicator, Predicate]
{
def eval( a: Int, w: WAMInterface ) = evaluator.eval( w.wam.arg(a) )
def define( name: String, arity: Int )( c: Predicate )
{
addPredicate( name, arity, c )
}
def addPredicate( name: String, arity: Int, c: Predicate )
{
val ind = Indicator( Symbol(name), arity )
if (this contains ind)
sys.error( s"callable $ind already added" )
else
this(ind) = c
}
}
class PrologBuiltins( evaluator: Evaluator = new Evaluator ) extends PredicateMap( evaluator )
{
define( "traceon", 0 )
{ w =>
w.wam.trace = true
true
}
define( "traceoff", 0 )
{ w =>
w.wam.trace = false
true
}
def identical( w: WAMInterface ) =
{
val l = w.wam.arg( 1 )
val r = w.wam.arg( 2 )
if (l.isInstanceOf[NumberAST] && l.asInstanceOf[NumberAST].n.getClass != r.asInstanceOf[NumberAST].n.getClass)
false
else
l == r
}
define( "is", 2 ) (w => w.unify( w.wam.addr(1), ConCell(eval(2, w)) ))
define( "=:=", 2 ) (w => eval( 1, w ) == eval( 2, w ))
define( "=\\\\=", 2 ) (w => eval( 1, w ) != eval( 2, w ))
define( "<", 2 ) (w => Math( '<, eval(1, w), eval(2, w) ).asInstanceOf[Boolean])
define( "=<", 2 ) (w => Math( '<=, eval(1, w), eval(2, w) ).asInstanceOf[Boolean])
define( ">", 2 ) (w => Math( '>, eval(1, w), eval(2, w) ).asInstanceOf[Boolean])
define( ">=", 2 ) (w => Math( '>=, eval(1, w), eval(2, w) ).asInstanceOf[Boolean])
define( "==", 2 ) (identical)
define( "\\\\==", 2 ) (!identical( _ ))
define( "=", 2 ) (w => w.unify(w.wam.addr(1), w.wam.addr(2)))
define( "\\\\=", 2 ) (w => !w.unify(w.wam.addr(1), w.wam.addr(2)))
define( "arg", 3 )
{ w =>
val n = w.wam.argInteger( 1 )
val term = w.wam.addr( 2 )
if (w.wam.unbound( term ))
sys.error( "instantiation_error" )
if (n < 0)
sys.error( "domain_error" )
if (w.wam.isCompound( term ))
w.unify( w.wam.structureArg(term, n), w.wam.addr(3) )
else
sys.error( "expected a structure" )
}
define( "atom", 1 ) (w => atom( w.wam.arg(1) ))
define( "atomic", 1 ) (w => atomic( w.wam.arg(1) ))
define( "call", 1 )
{ w =>
val start = w.wam.callcode.size
Prolog.compileCall( w.wam.arg(1), w.wam.callcode )
w.wam.cp = w.wam.p
w.wam.p = start + w.wam.QUERY
true
}
define( "compound", 1 ) (w => compound( w.wam.arg(1) ))
define( "fail", 0 ) (_ => false)
define( "float", 1 )
{ w =>
w.wam.arg(1) match
{
case NumberAST( (_: java.lang.Double|_: BigDecimal) ) => true
case _ => false
}
}
define( "functor", 3 )
{ w =>
val term = w.wam.arg(1)
val name = w.wam.arg(2)
val arity = w.wam.arg(3)
if (compound( term ))
{
val Indicator( _name, _arity ) = indicator( term )
w.unify( ConCell(_name), w.wam.addr(2) ) && w.unify( ConCell(_arity), w.wam.addr(3) )
}
else if (atomic( term ))
w.unify( ConCell(constant( term )), w.wam.addr(2) ) && w.unify( ConCell(0), w.wam.addr(3) )
else if (variable( term ) && atomic( name ) && w.wam.isInteger( arity ) && w.wam.asInteger( arity ) == 0)
w.unify( w.wam.addr(1), w.wam.addr(2) )
else if (variable( term ) && atom( name ) && w.wam.isInteger( arity ) && w.wam.asInteger( arity ) > 0)
{
val _arity = w.wam.asInteger( arity )
val start = w.wam.h
w.put( w.wam.h, FunCell(w.wam.asSymbol(name), _arity) )
w.wam.h += 1
for (_ <- 1 to _arity)
{
w.put( w.wam.h, RefCell(w.wam.h) )
w.wam.h += 1
}
w.unify( StrCell(start), w.wam.addr(1) )
}
else
false
}
define( "is_list", 1 )
{ w =>
isList( w.wam.arg(1) )
}
define( "integer", 1 )
{ w =>
w.wam.arg(1) match
{
case NumberAST( (_: java.lang.Integer|_: BigInt) ) => true
case _ => false
}
}
define( "nl", 0 )
{ _ =>
println
true
}
define( "nonvar", 1 ) (w => !w.wam.unbound(w.wam.addr(1)))
define( "number", 1 ) (w => w.wam.arg(1).isInstanceOf[NumberAST])
define( "var", 1 ) (w => w.wam.unbound(w.wam.addr(1)))
define( "true", 0 ) (_ => true)
define( "=..", 2 )
{ w =>
val terma = w.wam.addr( 1 )
if (w.wam.unbound( terma ))
{
val list = w.wam.argInstantiated( 2 )
if (isList( list ))
xyz.hyperreal.sprolog.toList( list ) match
{
case List( c ) =>
atomic( c ) && w.unify( terma, ConCell(constant(c)) )
case f :: args =>
if (atom( f ))
w.unify( w.wam.write(StructureAST(asAtom(f).atom, args.toIndexedSeq)), terma )
else
false
}
else
sys.error( "expected list" )
}
else
{
val term = w.wam.read( terma )
val list = w.wam.addr( 2 )
if (atomic( term ))
w.unify( w.wam.write(fromList(List(term))), list )
else
{
val s = term.asInstanceOf[StructureAST]
w.unify( w.wam.write(fromList(AtomAST(s.f) +: s.args.toList)), list )
}
}
}
define( "write", 1 )
{ w =>
print( w.wam.display(w.wam.arg(1)) )
true
}
define( "iterator_", 2 )
{ w =>
val l = w.wam.arg( 1 )
isList( l ) && w.unify( ConCell(xyz.hyperreal.sprolog.toList(l).iterator), w.wam.addr(2) )
}
define( "next_", 2 )
{ w =>
w.unify( ConCell(constant(constant(w.wam.arg(1)).asInstanceOf[Iterator[Any]].next.asInstanceOf[AST])), w.wam.addr(2) )
}
define( "hasNext_", 1 )
{ w =>
constant(w.wam.arg(1)).asInstanceOf[Iterator[Any]].hasNext
}
} | edadma/sprolog | src/main/scala/PrologBuiltins.scala | Scala | mit | 5,438 |
package truerss.dto
import java.time.LocalDateTime
import java.util.Date
object State extends Enumeration {
type State = Value
val Neutral, Enable, Disable = Value
}
sealed trait SourceDto {
def url: String
def name: String
def interval: Int
def getId: Option[Long] = None
}
case class NewSourceDto(url: String,
name: String,
interval: Int) extends SourceDto
case class UpdateSourceDto(id: Long,
url: String,
name: String,
interval: Int) extends SourceDto {
override def getId: Option[Long] = Some(id)
}
case class SourceViewDto(id: Long,
url: String,
name: String,
interval: Int,
state: State.Value,
normalized: String,
lastUpdate: LocalDateTime,
count: Int = 0,
errorsCount: Int = 0
) {
def recount(x: Int): SourceViewDto = copy(count = x)
def errors(x: Int): SourceViewDto = copy(errorsCount = x)
def isEnabled: Boolean = {
state != State.Disable
}
}
case class NewSourceFromFileWithErrors(url: String, name: String, errors: Iterable[String])
case class PluginDto(author: String,
about: String,
version: String,
pluginName: String,
jarSourcePath: String
)
case class PluginsViewDto(
feed: Vector[PluginDto] = Vector.empty,
content: Vector[PluginDto] = Vector.empty,
publish: Vector[PluginDto] = Vector.empty,
site: Vector[PluginDto] = Vector.empty
) {
val size: Int = feed.size + content.size + publish.size + site.size
}
final case class EnclosureDto(`type`: String, url: String, length: Int)
case class FeedContent(content: Option[String])
case class FeedDto(
id: Long,
sourceId: Long,
url: String,
title: String,
author: String,
publishedDate: LocalDateTime,
description: Option[String],
content: Option[String],
enclosure: Option[EnclosureDto],
normalized: String,
favorite: Boolean = false,
read: Boolean = false,
delete: Boolean = false
)
case class FeedsFrequency(
perDay: Double,
perWeek: Double,
perMonth: Double
)
object FeedsFrequency {
val empty = {
FeedsFrequency(
perDay = 0d,
perWeek = 0d,
perMonth = 0d
)
}
}
case class SourceOverview(
sourceId: Long,
unreadCount: Int,
favoritesCount: Int,
feedsCount: Int,
frequency: FeedsFrequency
)
object SourceOverview {
def empty(sourceId: Long): SourceOverview = {
SourceOverview(
sourceId = sourceId,
unreadCount = 0,
favoritesCount = 0,
feedsCount = 0,
frequency = FeedsFrequency.empty
)
}
}
case class SearchRequest(inFavorites: Boolean, query: String, offset: Int, limit: Int)
object SearchRequest {
def apply(inFavorites: Boolean, query: String): SearchRequest = {
new SearchRequest(inFavorites, query, 0, 100)
}
} | truerss/truerss | dtos/src/main/scala/truerss/dto/NewSourceDto.scala | Scala | mit | 3,617 |
package tethys
import tethys.json4s.ast.Json4sSupport
package object json4s extends Json4sSupport
| tethys-json/tethys | modules/json4s/src/main/scala/tethys/json4s/package.scala | Scala | apache-2.0 | 100 |
package is.hail.types.physical
import is.hail.HailSuite
import is.hail.annotations.{Annotation, Region, ScalaToRegionValue}
import is.hail.asm4s._
import is.hail.expr.ir.EmitFunctionBuilder
import is.hail.utils._
import org.testng.annotations.Test
class PContainerTest extends PhysicalTestUtils {
def nullInByte(nElements: Int, missingElement: Int) = {
IndexedSeq.tabulate(nElements)(i => {
if (i == missingElement - 1)
null
else
i + 1L
})
}
def testContainsNonZeroBits(sourceType: PArray, data: IndexedSeq[Any]) = {
val srcRegion = Region(pool=pool)
val src = ScalaToRegionValue(srcRegion, sourceType, data)
log.info(s"Testing $data")
val res = Region.containsNonZeroBits(src + sourceType.lengthHeaderBytes, sourceType.loadLength(src))
res
}
def testContainsNonZeroBitsStaged(sourceType: PArray, data: IndexedSeq[Any]) = {
val srcRegion = Region(pool=pool)
val src = ScalaToRegionValue(srcRegion, sourceType, data)
log.info(s"Testing $data")
val fb = EmitFunctionBuilder[Long, Boolean](ctx, "not_empty")
val value = fb.getCodeParam[Long](1)
fb.emit(Region.containsNonZeroBits(value + sourceType.lengthHeaderBytes, sourceType.loadLength(value).toL))
val res = fb.result()(theHailClassLoader)(src)
res
}
def testHasMissingValues(sourceType: PArray, data: IndexedSeq[Any]) = {
val srcRegion = Region(pool=pool)
val src = ScalaToRegionValue(srcRegion, sourceType, data)
log.info(s"\nTesting $data")
val fb = EmitFunctionBuilder[Long, Boolean](ctx, "not_empty")
val value = fb.getCodeParam[Long](1)
fb.emit(sourceType.hasMissingValues(value))
val res = fb.result()(theHailClassLoader)(src)
res
}
@Test def checkFirstNonZeroByte() {
val sourceType = PCanonicalArray(PInt64(false))
assert(testContainsNonZeroBits(sourceType, nullInByte(0, 0)) == false)
assert(testContainsNonZeroBits(sourceType, nullInByte(1, 0)) == false)
assert(testContainsNonZeroBits(sourceType, nullInByte(1, 1)) == true)
assert(testContainsNonZeroBits(sourceType, nullInByte(8, 0)) == false)
assert(testContainsNonZeroBits(sourceType, nullInByte(8, 1)) == true)
assert(testContainsNonZeroBits(sourceType, nullInByte(8, 8)) == true)
assert(testContainsNonZeroBits(sourceType, nullInByte(32, 0)) == false)
assert(testContainsNonZeroBits(sourceType, nullInByte(31, 31)) == true)
assert(testContainsNonZeroBits(sourceType, nullInByte(32, 32)) == true)
assert(testContainsNonZeroBits(sourceType, nullInByte(33, 33)) == true)
assert(testContainsNonZeroBits(sourceType, nullInByte(64, 0)) == false)
assert(testContainsNonZeroBits(sourceType, nullInByte(64, 1)) == true)
assert(testContainsNonZeroBits(sourceType, nullInByte(64, 32)) == true)
assert(testContainsNonZeroBits(sourceType, nullInByte(64, 33)) == true)
assert(testContainsNonZeroBits(sourceType, nullInByte(64, 64)) == true)
assert(testContainsNonZeroBits(sourceType, nullInByte(68, 0)) == false)
assert(testContainsNonZeroBits(sourceType, nullInByte(68, 1)) == true)
assert(testContainsNonZeroBits(sourceType, nullInByte(68, 32)) == true)
assert(testContainsNonZeroBits(sourceType, nullInByte(68, 33)) == true)
assert(testContainsNonZeroBits(sourceType, nullInByte(68, 64)) == true)
assert(testContainsNonZeroBits(sourceType, nullInByte(72, 0)) == false)
assert(testContainsNonZeroBits(sourceType, nullInByte(72, 1)) == true)
assert(testContainsNonZeroBits(sourceType, nullInByte(72, 32)) == true)
assert(testContainsNonZeroBits(sourceType, nullInByte(72, 33)) == true)
assert(testContainsNonZeroBits(sourceType, nullInByte(72, 64)) == true)
assert(testContainsNonZeroBits(sourceType, nullInByte(73, 0)) == false)
assert(testContainsNonZeroBits(sourceType, nullInByte(73, 1)) == true)
assert(testContainsNonZeroBits(sourceType, nullInByte(73, 32)) == true)
assert(testContainsNonZeroBits(sourceType, nullInByte(73, 33)) == true)
assert(testContainsNonZeroBits(sourceType, nullInByte(73, 64)) == true)
}
@Test def checkFirstNonZeroByteStaged() {
val sourceType = PCanonicalArray(PInt64(false))
assert(testContainsNonZeroBitsStaged(sourceType, nullInByte(32, 0)) == false)
assert(testContainsNonZeroBitsStaged(sourceType, nullInByte(73, 64)) == true)
}
@Test def checkHasMissingValues() {
val sourceType = PCanonicalArray(PInt64(false))
assert(testHasMissingValues(sourceType, nullInByte(1, 0)) == false)
assert(testHasMissingValues(sourceType, nullInByte(1, 1)) == true)
assert(testHasMissingValues(sourceType, nullInByte(2, 1)) == true)
for {
num <- Seq(2, 16, 31, 32, 33, 50, 63, 64, 65, 90, 127, 128, 129)
missing <- 1 to num
} assert(testHasMissingValues(sourceType, nullInByte(num, missing)) == true)
}
@Test def arrayCopyTest() {
// Note: can't test where data is null due to ArrayStack.top semantics (ScalaToRegionValue: assert(size_ > 0))
def runTests(deepCopy: Boolean, interpret: Boolean) {
copyTestExecutor(PCanonicalArray(PInt32()), PCanonicalArray(PInt64()), IndexedSeq(1, 2, 3, 4, 5, 6, 7, 8, 9),
expectCompileError = true, deepCopy = deepCopy, interpret = interpret)
copyTestExecutor(PCanonicalArray(PInt32()), PCanonicalArray(PInt32()), IndexedSeq(1, 2, 3, 4),
deepCopy = deepCopy, interpret = interpret)
copyTestExecutor(PCanonicalArray(PInt32()), PCanonicalArray(PInt32()), IndexedSeq(1, 2, 3, 4),
deepCopy = deepCopy, interpret = interpret)
copyTestExecutor(PCanonicalArray(PInt32()), PCanonicalArray(PInt32()), IndexedSeq(1, null, 3, 4),
deepCopy = deepCopy, interpret = interpret)
// test upcast
copyTestExecutor(PCanonicalArray(PInt32(true)), PCanonicalArray(PInt32()), IndexedSeq(1, 2, 3, 4),
deepCopy = deepCopy, interpret = interpret)
// test mismatched top-level requiredeness, allowed because by source value address must be present and therefore non-null
copyTestExecutor(PCanonicalArray(PInt32()), PCanonicalArray(PInt32(), true), IndexedSeq(1, 2, 3, 4),
deepCopy = deepCopy, interpret = interpret)
// downcast disallowed
copyTestExecutor(PCanonicalArray(PInt32()), PCanonicalArray(PInt32(true)), IndexedSeq(1, 2, 3, 4),
deepCopy = deepCopy, interpret = interpret)
copyTestExecutor(PCanonicalArray(PCanonicalArray(PInt64())), PCanonicalArray(PCanonicalArray(PInt64(), true)),
FastIndexedSeq(FastIndexedSeq(20L), FastIndexedSeq(1L), FastIndexedSeq(20L,5L,31L,41L), FastIndexedSeq(1L,2L,3L)),
deepCopy = deepCopy, interpret = interpret)
copyTestExecutor(PCanonicalArray(PCanonicalArray(PInt64())), PCanonicalArray(PCanonicalArray(PInt64(), true)),
FastIndexedSeq(FastIndexedSeq(20L), FastIndexedSeq(1L), FastIndexedSeq(20L,5L,31L,41L), FastIndexedSeq(1L,2L,3L)),
deepCopy = deepCopy, interpret = interpret)
copyTestExecutor(PCanonicalArray(PCanonicalArray(PInt64())), PCanonicalArray(PCanonicalArray(PInt64(true))),
FastIndexedSeq(FastIndexedSeq(20L), FastIndexedSeq(1L), FastIndexedSeq(20L,5L,31L,41L), FastIndexedSeq(1L,2L,3L)),
deepCopy = deepCopy, interpret = interpret)
// test empty arrays
copyTestExecutor(PCanonicalArray(PInt32()), PCanonicalArray(PInt32()), FastIndexedSeq(),
deepCopy = deepCopy, interpret = interpret)
copyTestExecutor(PCanonicalArray(PInt32(true)), PCanonicalArray(PInt32(true)), FastIndexedSeq(),
deepCopy = deepCopy, interpret = interpret)
// test missing-only array
copyTestExecutor(PCanonicalArray(PInt64()), PCanonicalArray(PInt64()),
FastIndexedSeq(null), deepCopy = deepCopy, interpret = interpret)
copyTestExecutor(PCanonicalArray(PCanonicalArray(PInt64())), PCanonicalArray(PCanonicalArray(PInt64())),
FastIndexedSeq(FastIndexedSeq(null)), deepCopy = deepCopy, interpret = interpret)
// test 2D arrays
copyTestExecutor(PCanonicalArray(PCanonicalArray(PInt64())), PCanonicalArray(PCanonicalArray(PInt64())),
FastIndexedSeq(null, FastIndexedSeq(null), FastIndexedSeq(20L,5L,31L,41L), FastIndexedSeq(1L,2L,3L)),
deepCopy = deepCopy, interpret = interpret)
// test complex nesting
val complexNesting = FastIndexedSeq(
FastIndexedSeq( FastIndexedSeq(20L,30L,31L,41L), FastIndexedSeq(20L,22L,31L,43L) ),
FastIndexedSeq( FastIndexedSeq(1L,3L,31L,41L), FastIndexedSeq(0L,30L,17L,41L) )
)
copyTestExecutor(PCanonicalArray(PCanonicalArray(PCanonicalArray(PInt64(true), true), true), true), PCanonicalArray(PCanonicalArray(PCanonicalArray(PInt64()))),
complexNesting, deepCopy = deepCopy, interpret = interpret)
copyTestExecutor(PCanonicalArray(PCanonicalArray(PCanonicalArray(PInt64(true), true), true)), PCanonicalArray(PCanonicalArray(PCanonicalArray(PInt64()))),
complexNesting, deepCopy = deepCopy, interpret = interpret)
copyTestExecutor(PCanonicalArray(PCanonicalArray(PCanonicalArray(PInt64(true), true))), PCanonicalArray(PCanonicalArray(PCanonicalArray(PInt64()))),
complexNesting, deepCopy = deepCopy, interpret = interpret)
copyTestExecutor(PCanonicalArray(PCanonicalArray(PCanonicalArray(PInt64(true)))), PCanonicalArray(PCanonicalArray(PCanonicalArray(PInt64()))),
complexNesting, deepCopy = deepCopy, interpret = interpret)
copyTestExecutor(PCanonicalArray(PCanonicalArray(PCanonicalArray(PInt64()))), PCanonicalArray(PCanonicalArray(PCanonicalArray(PInt64()))),
complexNesting, deepCopy = deepCopy, interpret = interpret)
val srcType = PCanonicalArray(PCanonicalStruct("a" -> PCanonicalArray(PInt32(true)), "b" -> PInt64()))
val destType = PCanonicalArray(PCanonicalStruct("a" -> PCanonicalArray(PInt32()), "b" -> PInt64()))
val expectedVal = IndexedSeq(Annotation(IndexedSeq(1,5,7,2,31415926), 31415926535897L))
copyTestExecutor(srcType, destType, expectedVal, deepCopy = deepCopy, interpret = interpret)
}
runTests(true, false)
runTests(false, false)
runTests(true, interpret = true)
runTests(false, interpret = true)
}
@Test def dictCopyTests() {
def runTests(deepCopy: Boolean, interpret: Boolean) {
copyTestExecutor(PCanonicalDict(PCanonicalString(), PInt32()), PCanonicalDict(PCanonicalString(), PInt32()), Map("test" -> 1),
deepCopy = deepCopy, interpret = interpret)
copyTestExecutor(PCanonicalDict(PCanonicalString(true), PInt32(true)), PCanonicalDict(PCanonicalString(), PInt32()), Map("test2" -> 2),
deepCopy = deepCopy, interpret = interpret)
copyTestExecutor(PCanonicalDict(PCanonicalString(), PInt32()), PCanonicalDict(PCanonicalString(true), PInt32()), Map("test3" -> 3),
deepCopy = deepCopy, interpret = interpret)
}
runTests(true, false)
runTests(false, false)
runTests(true, interpret = true)
runTests(false, interpret = true)
}
@Test def setCopyTests() {
def runTests(deepCopy: Boolean, interpret: Boolean) {
copyTestExecutor(PCanonicalSet(PCanonicalString(true)), PCanonicalSet(PCanonicalString()), Set("1", "2"),
deepCopy = deepCopy, interpret = interpret)
}
runTests(true, false)
runTests(false, false)
runTests(true, interpret = true)
runTests(false, interpret = true)
}
}
| hail-is/hail | hail/src/test/scala/is/hail/types/physical/PContainerTest.scala | Scala | mit | 11,408 |
/*
* sbt
* Copyright 2011 - 2018, Lightbend, Inc.
* Copyright 2008 - 2010, Mark Harrah
* Licensed under Apache License 2.0 (see LICENSE)
*/
package sbt.internal.util
import sbt.io.IO
import scala.collection.mutable.ListBuffer
object StackTrace {
def isSbtClass(name: String) = name.startsWith("sbt.") || name.startsWith("xsbt.")
/**
* Return a printable representation of the stack trace associated
* with t. Information about t and its Throwable causes is included.
* The number of lines to be included for each Throwable is configured
* via d which should be greater than or equal to 0.
*
* - If d is 0, then all elements are included up to (but not including)
* the first element that comes from sbt.
* - If d is greater than 0, then up to that many lines are included,
* where the line for the Throwable is counted plus one line for each stack element.
* Less lines will be included if there are not enough stack elements.
*
* See also ConsoleAppender where d <= 2 is treated specially by
* printing a prepared statement.
*/
def trimmedLines(t: Throwable, d: Int): List[String] = {
require(d >= 0)
val b = new ListBuffer[String]()
def appendStackTrace(t: Throwable, first: Boolean): Unit = {
val include: StackTraceElement => Boolean =
if (d == 0)
element => !isSbtClass(element.getClassName)
else {
var count = d - 1
(_ => { count -= 1; count >= 0 })
}
def appendElement(e: StackTraceElement): Unit = {
b.append("\\tat " + e)
()
}
if (!first) b.append("Caused by: " + t.toString)
else b.append(t.toString)
val els = t.getStackTrace()
var i = 0
while ((i < els.size) && include(els(i))) {
appendElement(els(i))
i += 1
}
}
appendStackTrace(t, true)
var c = t
while (c.getCause() != null) {
c = c.getCause()
appendStackTrace(c, false)
}
b.toList
}
/**
* Return a printable representation of the stack trace associated
* with t. Information about t and its Throwable causes is included.
* The number of lines to be included for each Throwable is configured
* via d which should be greater than or equal to 0.
*
* - If d is 0, then all elements are included up to (but not including)
* the first element that comes from sbt.
* - If d is greater than 0, then up to that many lines are included,
* where the line for the Throwable is counted plus one line for each stack element.
* Less lines will be included if there are not enough stack elements.
*/
def trimmed(t: Throwable, d: Int): String =
trimmedLines(t, d).mkString(IO.Newline)
}
| sbt/sbt | internal/util-logging/src/main/scala/sbt/internal/util/StackTrace.scala | Scala | apache-2.0 | 2,746 |
import org.apache.commons.lang3.StringUtils
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.mapreduce.TableInputFormat
import org.apache.spark.mllib.clustering.KMeans
import org.apache.spark.mllib.linalg.distributed.RowMatrix
import org.apache.spark.{SparkContext, SparkConf}
import org.apache.spark.mllib.classification.{NaiveBayes, NaiveBayesModel}
import org.apache.spark.mllib.linalg.{Matrix, Vectors}
import org.apache.spark.mllib.regression.LabeledPoint
import scala.collection.mutable
import scala.io.Source
/**
* Created by ameyapandilwar on 11/26/15.
*/
object NaiveBayesTest {
var count: Int = 0
val trngTrcks = new Array[org.apache.spark.mllib.linalg.Vector](3130);
var trackTimbreMap: Map[String, String] = Map()
def main(args: Array[String]): Unit = {
val sparkConf = new SparkConf().setAppName("MLLib - Naive Bayes").setMaster("local")
val sc = new SparkContext(sparkConf)
// val conf = HBaseConfiguration.create()
// val tableName = "timbre_sample"
// conf.set(TableInputFormat.INPUT_TABLE, tableName)
// val trainingFile = sc.textFile("data/mllib/msd_sample_train_data.txt")
// val trainingData = trainingFile.map { line =>
// val parts = line.split(',')
// LabeledPoint(parts(0).toDouble, Vectors.dense(parts(1).split(' ').map(_.toDouble)))
// }
val timbreSampleSmall = "/Users/ameyapandilwar/CS6240/FINAL_PROJECT/MLPrediction/data/mllib/timbre_sample.txt"
for (line <- Source.fromFile(timbreSampleSmall).getLines()) {
if (StringUtils.isNoneBlank(line)) {
val info = line.split(';')
val trackId = info(0).split(",")(0)
// println(trackId)
trackTimbreMap += (trackId -> info(1))
}
}
val trainingFile = "/Users/ameyapandilwar/CS6240/FINAL_PROJECT/MLPrediction/data/mllib/msd_sample_train_data.txt"
for (line <- Source.fromFile(trainingFile).getLines()) {
if (StringUtils.isNoneBlank(line)) {
val info = line.split(':')
val artistInfo = info(0).split("_")
val tracks = info(1).split(",")
tracks.foreach(track => {
trngTrcks(count) = Vectors.dense(trackTimbreMap(track).map(_.toDouble).toArray)
count += 1
})
// count += tracks.length
// println(artistInfo(0))
// val artistId = info.split(';')
// println(artistId)
}
}
println(count + " tracks trained successfully!")
val points = sc.parallelize(trngTrcks.toList)
val mat: RowMatrix = new RowMatrix(points)
val numOfClusters = 599
val numOfIterations = 10
val model = KMeans.train(points, numOfClusters, numOfIterations)
println(" MODEL ==> " + model)
// val testVector = Vectors.dense(arr)
// val clusterID = model.predict(testVector)
// print("the song's artist is :- " + clusterID)
// val pc: Matrix = mat.computePrincipalComponents(10)
// val projected = mat.multiply(pc).rows
//
// println("===== projected :- " + projected)
// trngTrcks.foreach(println)
// val trainingData = trainingFile.map { line =>
// val info = line.split(':')(0)
// val artistId = info.split('_')(1)
// println(artistId)
// val tracks = info.split(',')
// tracks.map { track =>
// print(track + " | ")
// }
// }
// val testingFile = sc.textFile("data/mllib/msd_sample_test_data.txt")
// val testingData = trainingFile.map { line =>
// val parts = line.split(',')
// LabeledPoint(parts(0).toDouble, Vectors.dense(parts(1).split(' ').map(_.toDouble)))
// }
// val model = NaiveBayes.train(trainingData, lambda = 1.0, modelType = "multinomial")
//
// val predictionAndLabel = testingData.map(p => (model.predict(p.features), p.label))
// val accuracy = 1.0 * predictionAndLabel.filter(x => x._1 == x._2).count() / testingData.count()
// model.save(sc, "msdNaiveBayesModel")
// val msdModel = NaiveBayesModel.load(sc, "msdNaiveBayesModel")
}
}
| Arulselvanmadhavan/Artist_Recognition_from_Audio_Features | MLPrediction/src/main/scala/NaiveBayesTest.scala | Scala | apache-2.0 | 3,981 |
package almhirt.streaming
import almhirt.common._
import akka.actor._
object ActorStillage {
def apply[TElement](actor: ActorRef): Supplier[TElement] =
new Supplier[TElement] {
def signContract(broker: StreamBroker[TElement]) = {
import InternalContractorMessages._
broker.signContract(new SuppliesContractor[TElement] {
def onProblem(problem: Problem) {
actor ! OnProblem(problem)
}
def onStockroom(theStockroom: Stockroom[TElement]) {
actor ! OnStockroom(theStockroom)
}
def onDeliverSuppliesNow(amount: Int) {
actor ! OnDeliverSuppliesNow(amount)
}
def onContractExpired() {
actor ! OnContractExpired
}
})
}
}
def props[TElement](contents: Seq[TElement], packagingSize: Int): Props =
Props(new ActorStillage[TElement](contents, packagingSize))
def props[TElement](contents: Seq[TElement]): Props =
Props(new ActorStillage[TElement](contents, 16))
def create[TElement](contents: Seq[TElement], packagingSize: Int, actorName: String)(implicit system: ActorSystem): Supplier[TElement] =
ActorStillage[TElement](system.actorOf(props(contents, packagingSize), actorName))
def create[TElement](contents: Seq[TElement], actorName: String)(implicit system: ActorSystem): Supplier[TElement] =
ActorStillage[TElement](system.actorOf(props(contents), actorName))
}
private[almhirt] object InternalContractorMessages {
import scala.language.existentials
final case class OnProblem(problem: Problem)
final case class OnStockroom(theStockroom: Stockroom[_])
final case class OnDeliverSuppliesNow(amount: Int)
case object OnContractExpired
}
private[almhirt] class ActorStillage[TElement](contents: Seq[TElement], packagingSize: Int) extends Actor with ActorLogging {
import InternalContractorMessages._
var notYetOffered = contents
var offered: Vector[TElement] = Vector.empty
var toDeliverLeft = contents.size
var stockroom: Option[Stockroom[TElement]] = None
def offer() {
stockroom.foreach(stockroom ⇒ {
val nextBatch = notYetOffered.take(packagingSize)
notYetOffered = notYetOffered.drop(nextBatch.size)
stockroom.offerSupplies(nextBatch.size)
offered = offered ++ nextBatch
})
}
def receive: Receive = {
case OnStockroom(theStockroom: Stockroom[TElement]) ⇒
stockroom match {
case None ⇒
stockroom = Some(theStockroom)
offer()
case _ ⇒
sys.error("There is already a stockroom")
}
case OnDeliverSuppliesNow(amount) ⇒
if (amount > offered.size) {
sys.error("The demand may not exceed my offers!")
}
stockroom.foreach(stockroom ⇒ {
val toLoad = offered.take(amount)
stockroom.deliverSupplies(toLoad)
offered = offered.drop(amount)
toDeliverLeft -= amount
if (!notYetOffered.isEmpty && offered.size < packagingSize) {
offer()
}
if (toDeliverLeft == 0) {
stockroom.cancelContract()
}
})
case OnContractExpired ⇒
stockroom = None
context.system.stop(self)
case OnProblem(problem) ⇒
problem.escalate()
}
override def preRestart(reason: Throwable, message: Option[Any]) {
sys.error("A stillage can not be restarted")
}
override def postStop() {
if (stockroom.isDefined)
log.warning(s"I am still under contract!")
if (!notYetOffered.isEmpty)
log.warning(s"There are still ${notYetOffered.size} elements of ${contents.size} left that have not been offered.")
if (!offered.isEmpty)
log.warning(s"There are still ${offered.size} offered elements of ${contents.size} left that have not been delivered. That makes a total of ${notYetOffered.size + offered.size} elements that have not been delivered.")
}
} | chridou/almhirt | almhirt-core/src/main/scala/almhirt/streaming/ActorStillage.scala | Scala | apache-2.0 | 3,933 |
/*
Copyright (C) 2014-2020 Miquel Sabaté Solà <mikisabate@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.mssola.snacker.core
import backtype.storm.topology.{ TopologyBuilder }
// TODO: move to its own file.
object Base {
val London = 3
val Barcelona = 7
val Genoa = 8
val Bologna = 9
}
trait BaseComponent {
def cityId: Int
def initialize() = {}
def devices() = Request("/api/cities/" + cityId + "/devices")
def buildTopology(builder: TopologyBuilder)
}
| mssola/thesis | upc/snacker-core/src/main/scala/com/mssola/snacker/core/BaseComponent.scala | Scala | gpl-3.0 | 1,070 |
package ru.org.codingteam.horta.plugins.log
import akka.actor.Props
import ru.org.codingteam.horta.core.Clock
import ru.org.codingteam.horta.localization.LocaleDefinition
import ru.org.codingteam.horta.plugins.{ProcessCommand, ProcessMessage}
import ru.org.codingteam.horta.protocol.SendResponse
import ru.org.codingteam.horta.security.{CommonAccess, Credential}
import ru.org.codingteam.horta.test.TestKitSpec
class LogPluginSpec extends TestKitSpec {
override val pluginProps = List(Props[LogPlugin])
val credential = Credential(
testActor,
LocaleDefinition("en"),
CommonAccess,
Some("testroom"),
"testuser",
Some("testuser")
)
"LogPlugin" should {
val plugin = plugins.head._1
"save received message" in {
plugin ! ProcessMessage(Clock.now, credential, "test")
plugin ! ProcessCommand(credential, SearchLogCommand, Array("test"))
val message = expectMsgType[SendResponse](timeout.duration)
assert(message.text.contains("testuser "))
assert(message.text.contains(" test"))
}
}
}
| codingteam/horta-hell | src/test/scala/ru/org/codingteam/horta/plugins/log/LogPluginSpec.scala | Scala | mit | 1,064 |
package com.example.unfilter.repos
import java.time.Duration
import java.util.concurrent.TimeUnit
import akka.actor._
import akka.pattern.ask
import akka.util.Timeout
import com.example.unfilter.Message.{RegisterForReport, Usage}
import com.example.unfilter.actors.ToiletReportSubscriber
import com.example.unfilter.models.{Tid, ToiletEvent, UsageStat}
import unfiltered.netty.websockets.WebSocket
import scala.concurrent.duration.FiniteDuration
import scala.util.{Failure, Success}
class ReportSubscriberRepository extends Actor {
implicit val timeout = Timeout(FiniteDuration(1, TimeUnit.SECONDS))
import context._
val toiletRepositoryPath: String = "akka://system/user/toilets"
val subscribers = Map[WebSocket, ActorRef]()
override def receive: Receive = subscribedOn(subscribers)
val toiletsSelection: ActorSelection = system.actorSelection(ActorPath.fromString(toiletRepositoryPath))
def subscribedOn(subscribers: Map[WebSocket, ActorRef]): Receive = {
case RegisterForReport(id, socket) => {
val subscriber = system.actorOf(Props(new ToiletReportSubscriber(id, socket)))
sendReportTo(id, subscriber)
become(subscribedOn(subscribers + (socket -> subscriber)))
}
case event: ToiletEvent => subscribers.values.map(sendReportTo(event.id, _))
}
def sendReportTo(id: Tid, subscriber: ActorRef): Unit = {
toiletsSelection.resolveOne().onComplete {
case Success(toiletRepository) => {
(toiletRepository ? Usage(id, Duration.ofHours(12)))
.mapTo[List[UsageStat]]
.map {
subscriber ! _
}
}
case Failure(e) => {
println(e.getMessage)
}
}
}
} | aconex-atm/atm-service | src/main/scala/com/example/unfilter/repos/ReportSubscriberRepository.scala | Scala | mit | 1,696 |
package nak.example
/*
Copyright 2013 Jason Baldridge
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/**
* An example of using the API to classify the prepositional phrase attachment
* data, trying to be as simple and self-contained as possible. Includes example
* of how to serialize (save) and deserialize (load) a classifier.
*
* After compiling Nak, you can run it as follows (in the top-level of Nak):
*
* $ bin/nak run nak.example.PpaExample data/classify/ppa/training data/classify/ppa/devset ppa-classifier.obj
*
* @author jasonbaldridge
*/
object PpaExample {
import nak.NakContext._
import nak.core._
import nak.data._
import nak.liblinear.LiblinearConfig
import nak.util.ConfusionMatrix
def main(args: Array[String]) {
val Array(trainfile, evalfile, fileForSavedClassifier) = args
// A function (with supporting regex) that reads the format of the PPA
// files and turns them into Examples. E.g. a line like:
// 0 join board as director V
// becames an Example with "V" as the label, and "join board as director"
// as the features. Normally we'd go ahead and transform this into better
// features, but this shows what you'd be more likely to do if reading in
// documents.
val PpaLineRE = """^(\\d+)\\s(.*)\\s(N|V)$""".r
def readRaw(filename: String) =
for (PpaLineRE(id,obs,label) <- io.Source.fromFile(filename).getLines)
yield Example(label, obs)
// A featurizer that simply splits the raw inputs and attaches the
// appropriate attributes to each of the elements.
val featurizer = new Featurizer[String,String] {
def apply(input: String) = {
val attributes = Array("verb","object","prep","prep-obj")
for ((attr,value) <- attributes.zip(input.split("\\\\s")))
yield FeatureObservation(attr+"="+value)
}
}
// Get the training examples in their raw format.
val rawExamples = readRaw(trainfile).toList
// Configure and train with liblinear. Here we use the (default) L2-Regularized
// Logistic Regression classifier with a C value of .5. We accept the default
// eps and verbosity values.
val config = LiblinearConfig(cost=.5)
val trainedClassifier = trainClassifier(config, featurizer, rawExamples)
// Save the classifier to disk, then load it. Obviously not necessary here, but this
// shows how to do it.
saveClassifier(trainedClassifier, fileForSavedClassifier)
val classifier = loadClassifier[FeaturizedClassifier[String,String]](fileForSavedClassifier)
// Make predictions on the evaluation data. Because the classifier knows about
// featurization, we can apply the classifier directly to each example using evalRaw.
val comparisons = for (ex <- readRaw(evalfile).toList) yield
(ex.label, classifier.predict(ex.features), ex.features)
// Compute and print out the confusion matrix based on the comparisons
// obtained above.
val (goldLabels, predictions, inputs) = comparisons.unzip3
println(ConfusionMatrix(goldLabels, predictions, inputs))
}
}
| seanlgoldberg/nak | src/main/scala/nak/example/PpaExample.scala | Scala | apache-2.0 | 3,595 |
/*
* Copyright (c) 2013-2014 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0, and
* you may not use this file except in compliance with the Apache License
* Version 2.0. You may obtain a copy of the Apache License Version 2.0 at
* http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the Apache License Version 2.0 is distributed on an "AS
* IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the Apache License Version 2.0 for the specific language
* governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.collectors.thrift
// Scala
import scala.collection.JavaConversions._
// ScalaCheck
import org.scalacheck.{Arbitrary,Gen,Properties}
import org.scalacheck.Prop.forAll
/**
* ScalaCheck specification testing all of the properties
* of the SnowplowRawEvent POJO.
*/
object SnowplowRawEventSpec extends Properties("SnowplowRawEvent") {
property("timestamp") = forAll { (timestamp: Long) =>
val event = new SnowplowRawEvent(timestamp, "collector", "encoding",
"127.0.0.1")
event.getTimestamp == timestamp
}
property("protocolVal") = forAll (Gen.choose(1,100)) { (protocolVal) =>
val protocol = PayloadProtocol.findByValue(protocolVal)
val payload = new TrackerPayload(protocol, null, null)
val event = new SnowplowRawEvent(0L, "collector", "encoding", "127.0.0.1")
event.setPayload(payload)
event.getPayload.getProtocol == protocol
}
property("protocolFormat") = forAll (Gen.choose(1,100)) { (formatVal) =>
val format = PayloadFormat.findByValue(formatVal)
val payload = new TrackerPayload(null, format, null)
val event = new SnowplowRawEvent(0L, "collector", "encoding", "127.0.0.1")
event.setPayload(payload)
event.getPayload.getFormat == format
}
property("payloadData") = forAll { (payloadData: String) =>
val payload = new TrackerPayload(
PayloadProtocol.Http, PayloadFormat.HttpGet, payloadData
)
val event = new SnowplowRawEvent(0L, "collector", "encoding", "127.0.0.1")
event.setPayload(payload)
event.getPayload.getData == payloadData
}
property("collector") = forAll { (collector: String) =>
val event = new SnowplowRawEvent(0L, collector, "encoding", "127.0.0.1")
event.getCollector == collector
}
property("encoding") = forAll { (encoding: String) =>
val event = new SnowplowRawEvent(0L, "collector", encoding, "127.0.0.1")
event.getEncoding == encoding
}
property("ip") = forAll { (ip: String) =>
val event = new SnowplowRawEvent(0L, "collector", "encoding", ip)
event.getIpAddress == ip
}
// Check optional variables.
type setFunc = Function2[SnowplowRawEvent,String,SnowplowRawEvent]
type getFunc = Function1[SnowplowRawEvent,String]
val f_hostname_set: setFunc = _.setHostname(_)
val f_hostname_get: getFunc = _.getHostname
val f_userAgent_set: setFunc = _.setUserAgent(_)
val f_userAgent_get: getFunc = _.getUserAgent
val f_refererUri_set: setFunc = _.setRefererUri(_)
val f_refererUri_get: getFunc = _.getRefererUri
val f_networkUserId_set: setFunc = _.setNetworkUserId(_)
val f_networkUserId_get: getFunc = _.getNetworkUserId
for (optionalVar <- List(
("hostname", f_hostname_set, f_hostname_get),
("userAgent", f_userAgent_set, f_userAgent_get),
("refererUri", f_refererUri_set, f_refererUri_get),
("networkUserId", f_networkUserId_set, f_networkUserId_get)
)) {
property(optionalVar._1) = forAll { (value: String) =>
val event = new SnowplowRawEvent(0L, "collector", "encoding", "127.0.0.1")
optionalVar._2(event, value)
optionalVar._3(event) == value
}
}
property("headers") = forAll { (headers: List[String]) =>
val event = new SnowplowRawEvent(0L, null, "collector", "encoding")
event.setHeaders(headers)
event.getHeaders.toList.equals(headers)
}
}
| mdavid/lessig-bigdata | lib/snowplow/2-collectors/thrift-schemas/snowplow-raw-event/src/test/scala/com.snowplowanalytics.snowplow.collectors.thrift/SnowplowRawEventSpec.scala | Scala | mit | 4,066 |
package riftwarp.messagepack
object RiftwarpTypecodes {
val WarpDescriptorCode = 0
val ObjectCode = 1
val BigIntCode = 5
val BigDecimalCode = 6
val UuidCode = 10
val UriCode = 11
val DateTimeCode = 15
val LocalDateTimeCode = 16
val DurationCode = 17
val TreeCode = 20
// val TreeNodeCode = 11
val Tuple2Code = 25
val Tuple3Code = 26
} | chridou/almhirt | riftwarp/src/main/scala/riftwarp/messagepack/CustomTypes.scala | Scala | apache-2.0 | 392 |
package chapter.one
import ExerciseFour.crazyTimeThree
import org.scalatest._
import org.scalatest.junit.JUnitRunner
import org.junit.runner.RunWith
@RunWith(classOf[JUnitRunner])
class ExerciseFourSpec extends FlatSpec with Matchers {
"crazyTimeThree" should "return a string" in {
crazyTimeThree should be ("crazycrazycrazy")
}
}
| deekim/impatient-scala | src/test/scala/chapter/one/ExerciseFourSpec.scala | Scala | apache-2.0 | 344 |
/**
* Copyright (C) 2009-2014 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.util
import annotation.tailrec
import collection.immutable.HashMap
private[akka] object WildcardTree {
private val empty = new WildcardTree[Nothing]()
def apply[T](): WildcardTree[T] = empty.asInstanceOf[WildcardTree[T]]
}
private[akka] case class WildcardTree[T](data: Option[T] = None, children: Map[String, WildcardTree[T]] = HashMap[String, WildcardTree[T]]()) {
def insert(elems: Iterator[String], d: T): WildcardTree[T] =
if (!elems.hasNext) {
copy(data = Some(d))
} else {
val e = elems.next()
copy(children = children.updated(e, children.get(e).getOrElse(WildcardTree()).insert(elems, d)))
}
@tailrec final def find(elems: Iterator[String]): WildcardTree[T] =
if (!elems.hasNext) this
else {
(children.get(elems.next()) orElse children.get("*")) match {
case Some(branch) ⇒ branch.find(elems)
case None ⇒ WildcardTree()
}
}
}
| Fincore/org.spark-project.akka | actor/src/main/scala/akka/util/WildcardTree.scala | Scala | mit | 1,017 |
package controllers
import helpers._
import models._
import org.joda.time._
import play.api.Configuration
import play.api.i18n._
import play.api.mvc.BodyParsers.parse
import play.api.mvc._
import protocols.JsonProtocol._
import protocols._
import security.ModulesAccessControl._
import security._
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.util.Failure
/**
* @author zepeng.li@gmail.com
*/
object RateLimitChecker {
def apply(
shouldIncrement: Boolean = true
)(
implicit
resource: CheckedModule,
_basicPlayApi: BasicPlayApi,
params: RateLimit.Params,
_rateLimits: RateLimits,
eh: UserActionExceptionHandler
) = new ActionFunction[UserRequest, UserRequest]
with BasicPlayComponents
with DefaultPlayExecutor {
override def invokeBlock[A](
req: UserRequest[A],
block: (UserRequest[A]) => Future[Result]
): Future[Result] = {
RateLimit.Check(req, req.user, shouldIncrement).fold[Result](
identity,
limit => block(req).map(limit.setHeaders)
)
}
def basicPlayApi = _basicPlayApi
}
def Parser()(
implicit
resource: CheckedModule,
_basicPlayApi: BasicPlayApi,
params: RateLimit.Params,
_rateLimits: RateLimits,
eh: BodyParserExceptionHandler
) = new BodyParserFunction[UserRequestHeader, UserRequestHeader]
with BodyParserFunctionComponents {
override def invoke[B](
req: UserRequestHeader,
block: UserRequestHeader => Future[BodyParser[B]]
): Future[BodyParser[B]] = {
RateLimit.Check(req, req.user, shouldIncrement = true).fold[BodyParser[B]](
r => Future.successful(parse.error[B](r)),
_ => block(req)
)
}
def basicPlayApi = _basicPlayApi
}
}
/**
* The current status of rate limit
*
* @param count the current count
* @param params parameters of rate limit
*/
case class RateLimit(count: Long, params: RateLimit.Params) extends ExtHeaders {
val exceeded = count >= params.conf.limit
val remaining = if (exceeded) 0 else params.conf.limit - count - 1
def setHeaders(result: Result) = result.withHeaders(
X_RATE_LIMIT_LIMIT -> params.conf.limit.toString,
X_RATE_LIMIT_REMAINING -> remaining.toString,
X_RATE_LIMIT_RESET -> params.reset.toString
)
}
object RateLimit {
/** Unit of rate limit
*
* @param limit max value of permitted request in a span
* @param span every n minutes from o'clock
*/
case class Config(limit: Int, span: Int)
/**
* Parameters of rate limit
*
* @param conf the configuration of rate limit
* @param now time stamp
*/
case class Params(conf: Config, now: DateTime = DateTime.now) {
val minutes = now.getMinuteOfHour
val seconds = now.getSecondOfMinute
val reset = (conf.span - minutes % conf.span) * 60 - seconds
val floor = now.hourOfDay.roundFloorCopy.plusMinutes((minutes / conf.span) * conf.span)
}
/**
* The real process of rate limit checking
*
* @param request request header
* @param user user
* @param shouldIncrement if false then don't increment the counter, since it may be incremented by body parser
*/
case class Check(
request: RequestHeader,
user: User,
shouldIncrement: Boolean = true
)(
implicit
val resource: CheckedModule,
val basicPlayApi: BasicPlayApi,
val _rateLimits: RateLimits,
val params: Params,
val eh: ExceptionHandler
) extends BasicPlayComponents
with DefaultPlayExecutor
with I18nLoggingComponents
with I18nSupport {
def fold[A](
failure: Future[Result] => Future[A],
success: RateLimit => Future[A]
): Future[A] = _rateLimits
.get(resource.name, params.floor)(user)
.map(RateLimit(_, params))
.andThen {
case Failure(e: BaseException) => Logger.debug(s"RateLimitChecker failed, because ${e.reason}", e)
case Failure(e: Throwable) => Logger.error(s"RateLimitChecker failed.", e)
}
.flatMap { limit =>
if (limit.exceeded) {
val result = Results.TooManyRequests {
JsonMessage(s"${resource.name}.exceeded")(request2Messages(request))
}
failure(Future.successful(limit.setHeaders(result)))
}
else for {
___ <- {
if (!shouldIncrement) Future.successful(Unit)
else _rateLimits.inc(resource.name, limit.params.floor)(user)
}
ret <- success(limit)
} yield ret
}
}
}
trait RateLimitConfigComponents {
self: CanonicalNamed =>
def configuration: Configuration
lazy val rateLimitConfig = RateLimit.Config(
configuration
.getInt(s"$canonicalName.rate_limit.limit")
.orElse(configuration.getInt(s"$packageName.rate_limit.limit"))
.getOrElse(900),
configuration
.getMilliseconds(s"$canonicalName.rate_limit.span")
.orElse(configuration.getMilliseconds(s"$packageName.rate_limit.span"))
.map(_ millis)
.map(_.toMinutes.toInt).getOrElse(15)
)
implicit def rateLimitParams = RateLimit.Params(rateLimitConfig)
} | lizepeng/app.io | modules/api/app/controllers/RateLimitChecker.scala | Scala | apache-2.0 | 5,167 |
package com.crobox.clickhouse.dsl.language
import com.crobox.clickhouse.dsl._
import com.crobox.clickhouse.dsl.schemabuilder.ColumnType
trait EmptyFunctionTokenizer {
self: ClickhouseTokenizerModule =>
protected def tokenizeEmptyCol(col: EmptyFunction[_])(implicit ctx: TokenizeContext): String =
col match {
case Empty(c) =>
c.column match {
case NativeColumn(_, ColumnType.UUID, _) => s"${tokenizeColumn(c.column)} == 0"
case _ => s"empty(${tokenizeColumn(c.column)})"
}
case NotEmpty(c) =>
c.column match {
case NativeColumn(_, ColumnType.UUID, _) => s"${tokenizeColumn(c.column)} != 0"
case _ => s"notEmpty(${tokenizeColumn(c.column)})"
}
case IsNull(c) =>
c.column match {
case NativeColumn(_, ColumnType.UUID, _) => s"${tokenizeColumn(c.column)} != 0"
case _ => s"isNull(${tokenizeColumn(c.column)})"
}
}
}
| crobox/clickhouse-scala-client | dsl/src/main/scala/com.crobox.clickhouse/dsl/language/EmptyFunctionTokenizer.scala | Scala | lgpl-3.0 | 1,056 |
/*
Copyright 2013 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.summingbird.scalding
import com.twitter.algebird.{MapAlgebra, Monoid, Group, Interval, Last}
import com.twitter.algebird.monad._
import com.twitter.summingbird.{Producer, TimeExtractor, TestGraphs}
import com.twitter.summingbird.batch._
import com.twitter.summingbird.batch.state.HDFSState
import java.util.TimeZone
import java.io.File
import com.twitter.scalding.{ Source => ScaldingSource, Test => TestMode, _ }
import com.twitter.scalding.typed.TypedSink
import org.scalacheck._
import org.scalacheck.Prop._
import org.scalacheck.Properties
import org.apache.hadoop.conf.Configuration
import scala.collection.JavaConverters._
import scala.collection.mutable.{ArrayBuffer, Buffer, HashMap => MutableHashMap, Map => MutableMap, SynchronizedBuffer, SynchronizedMap}
import cascading.scheme.local.{TextDelimited => CLTextDelimited}
import cascading.tuple.{Tuple, Fields, TupleEntry}
import cascading.flow.FlowDef
import cascading.tap.Tap
import cascading.scheme.NullScheme
import org.apache.hadoop.mapred.JobConf
import org.apache.hadoop.mapred.RecordReader
import org.apache.hadoop.mapred.OutputCollector
import org.specs2.mutable._
/**
* Tests for Summingbird's Scalding planner.
*/
object ScaldingLaws extends Specification {
import MapAlgebra.sparseEquiv
implicit def timeExtractor[T <: (Long, _)] = TestUtil.simpleTimeExtractor[T]
def sample[T: Arbitrary]: T = Arbitrary.arbitrary[T].sample.get
"The ScaldingPlatform" should {
//Set up the job:
"match scala for single step jobs" in {
val original = sample[List[Int]]
val fn = sample[(Int) => List[(Int, Int)]]
val initStore = sample[Map[Int, Int]]
val inMemory = TestGraphs.singleStepInScala(original)(fn)
// Add a time:
val inWithTime = original.zipWithIndex.map { case (item, time) => (time.toLong, item) }
val batcher = TestUtil.randomBatcher(inWithTime)
val testStore = TestStore[Int,Int]("test", batcher, initStore, inWithTime.size)
val (buffer, source) = TestSource(inWithTime)
val summer = TestGraphs.singleStepJob[Scalding,(Long,Int),Int,Int](source, testStore)(t =>
fn(t._2))
val scald = Scalding("scalaCheckJob")
val intr = TestUtil.batchedCover(batcher, 0L, original.size.toLong)
val ws = new LoopState(intr)
val mode: Mode = TestMode(t => (testStore.sourceToBuffer ++ buffer).get(t))
scald.run(ws, mode, scald.plan(summer))
// Now check that the inMemory ==
TestUtil.compareMaps(original, Monoid.plus(initStore, inMemory), testStore) must be_==(true)
}
"match scala single step pruned jobs" in {
val original = sample[List[Int]]
val fn = sample[(Int) => List[(Int, Int)]]
val initStore = sample[Map[Int, Int]]
val prunedList = sample[Set[Int]]
val inMemory = {
val computedMap = TestGraphs.singleStepInScala(original)(fn)
val totalMap = Monoid.plus(initStore, computedMap)
totalMap.filter(kv => !prunedList.contains(kv._1)).toMap
}
val pruner = new PrunedSpace[(Int, Int)] {
def prune(item: (Int, Int), writeTime: Timestamp) = {
prunedList.contains(item._1)
}
}
// Add a time:
val inWithTime = original.zipWithIndex.map { case (item, time) => (time.toLong, item) }
val batcher = TestUtil.randomBatcher(inWithTime)
val testStore = TestStore[Int,Int]("test", batcher, initStore, inWithTime.size, pruner)
val (buffer, source) = TestSource(inWithTime)
val summer = TestGraphs.singleStepJob[Scalding,(Long,Int),Int,Int](source, testStore)(t =>
fn(t._2))
val scald = Scalding("scalaCheckJob")
val intr = TestUtil.batchedCover(batcher, 0L, original.size.toLong)
val ws = new LoopState(intr)
val mode: Mode = TestMode(t => (testStore.sourceToBuffer ++ buffer).get(t))
scald.run(ws, mode, scald.plan(summer))
// Now check that the inMemory ==
TestUtil.compareMaps(original, inMemory, testStore) must be_==(true)
}
"match scala for flatMapKeys jobs" in {
val original = sample[List[Int]]
val initStore = sample[Map[Int,Int]]
val fnA = sample[(Int) => List[(Int, Int)]]
val fnB = sample[Int => List[Int]]
val inMemory = TestGraphs.singleStepMapKeysInScala(original)(fnA, fnB)
// Add a time:
val inWithTime = original.zipWithIndex.map { case (item, time) => (time.toLong, item) }
val batcher = TestUtil.randomBatcher(inWithTime)
val testStore = TestStore[Int,Int]("test", batcher, initStore, inWithTime.size)
val (buffer, source) = TestSource(inWithTime)
val summer = TestGraphs.singleStepMapKeysJob[Scalding,(Long,Int),Int,Int, Int](source, testStore)(t =>
fnA(t._2), fnB)
val intr = TestUtil.batchedCover(batcher, 0L, original.size.toLong)
val scald = Scalding("scalaCheckJob")
val ws = new LoopState(intr)
val mode: Mode = TestMode(t => (testStore.sourceToBuffer ++ buffer).get(t))
scald.run(ws, mode, scald.plan(summer))
// Now check that the inMemory ==
TestUtil.compareMaps(original, Monoid.plus(initStore, inMemory), testStore) must beTrue
}
"match scala for multiple summer jobs" in {
val original = sample[List[Int]]
val initStoreA = sample[Map[Int,Int]]
val initStoreB = sample[Map[Int,Int]]
val fnA = sample[(Int) => List[(Int)]]
val fnB = sample[(Int) => List[(Int, Int)]]
val fnC = sample[(Int) => List[(Int, Int)]]
val (inMemoryA, inMemoryB) = TestGraphs.multipleSummerJobInScala(original)(fnA, fnB, fnC)
// Add a time:
val inWithTime = original.zipWithIndex.map { case (item, time) => (time.toLong, item) }
val batcher = TestUtil.randomBatcher(inWithTime)
val testStoreA = TestStore[Int,Int]("testA", batcher, initStoreA, inWithTime.size)
val testStoreB = TestStore[Int,Int]("testB", batcher, initStoreB, inWithTime.size)
val (buffer, source) = TestSource(inWithTime)
val tail = TestGraphs.multipleSummerJob[Scalding, (Long, Int), Int, Int, Int, Int, Int](source, testStoreA, testStoreB)({t => fnA(t._2)}, fnB, fnC)
val scald = Scalding("scalaCheckMultipleSumJob")
val intr = TestUtil.batchedCover(batcher, 0L, original.size.toLong)
val ws = new LoopState(intr)
val mode: Mode = TestMode(t => (testStoreA.sourceToBuffer ++ testStoreB.sourceToBuffer ++ buffer).get(t))
scald.run(ws, mode, scald.plan(tail))
// Now check that the inMemory ==
TestUtil.compareMaps(original, Monoid.plus(initStoreA, inMemoryA), testStoreA) must beTrue
TestUtil.compareMaps(original, Monoid.plus(initStoreB, inMemoryB), testStoreB) must beTrue
}
"match scala for leftJoin jobs" in {
val original = sample[List[Int]]
val prejoinMap = sample[(Int) => List[(Int, Int)]]
val service = sample[(Int,Int) => Option[Int]]
val postJoin = sample[((Int, (Int, Option[Int]))) => List[(Int, Int)]]
// We need to keep track of time correctly to use the service
var fakeTime = -1
val timeIncIt = new Iterator[Int] {
val inner = original.iterator
def hasNext = inner.hasNext
def next = {
fakeTime += 1
inner.next
}
}
val srvWithTime = { (key: Int) => service(fakeTime, key) }
val inMemory = TestGraphs.leftJoinInScala(timeIncIt)(srvWithTime)(prejoinMap)(postJoin)
// Add a time:
val allKeys = original.flatMap(prejoinMap).map { _._1 }
val allTimes = (0 until original.size)
val stream = for { time <- allTimes; key <- allKeys; v = service(time, key) } yield (time.toLong, (key, v))
val inWithTime = original.zipWithIndex.map { case (item, time) => (time.toLong, item) }
val batcher = TestUtil.randomBatcher(inWithTime)
val initStore = sample[Map[Int, Int]]
val testStore = TestStore[Int,Int]("test", batcher, initStore, inWithTime.size)
/**
* Create the batched service
*/
val batchedService = stream.map{case (time, v) => (Timestamp(time), v)}.groupBy { case (ts, _) => batcher.batchOf(ts) }
val testService = new TestService[Int, Int]("srv", batcher, batcher.batchOf(Timestamp(0)).prev, batchedService)
val (buffer, source) = TestSource(inWithTime)
val summer =
TestGraphs.leftJoinJob[Scalding,(Long, Int),Int,Int,Int,Int](source, testService, testStore) { tup => prejoinMap(tup._2) }(postJoin)
val intr = TestUtil.batchedCover(batcher, 0L, original.size.toLong)
val scald = Scalding("scalaCheckleftJoinJob")
val ws = new LoopState(intr)
val mode: Mode = TestMode(s => (testStore.sourceToBuffer ++ buffer ++ testService.sourceToBuffer).get(s))
scald.run(ws, mode, summer)
// Now check that the inMemory ==
TestUtil.compareMaps(original, Monoid.plus(initStore, inMemory), testStore) must beTrue
}
"match scala for diamond jobs with write" in {
val original = sample[List[Int]]
val fn1 = sample[(Int) => List[(Int, Int)]]
val fn2 = sample[(Int) => List[(Int, Int)]]
val inMemory = TestGraphs.diamondJobInScala(original)(fn1)(fn2)
// Add a time:
val inWithTime = original.zipWithIndex.map { case (item, time) => (time.toLong, item) }
val batcher = TestUtil.randomBatcher(inWithTime)
val initStore = sample[Map[Int, Int]]
val testStore = TestStore[Int,Int]("test", batcher, initStore, inWithTime.size)
val testSink = new TestSink[(Long,Int)]
val (buffer, source) = TestSource(inWithTime)
val summer = TestGraphs
.diamondJob[Scalding,(Long, Int),Int,Int](source,
testSink,
testStore)(t => fn1(t._2))(t => fn2(t._2))
val scald = Scalding("scalding-diamond-Job")
val intr = TestUtil.batchedCover(batcher, 0L, original.size.toLong)
val ws = new LoopState(intr)
val mode: Mode = TestMode(s => (testStore.sourceToBuffer ++ buffer).get(s))
scald.run(ws, mode, summer)
// Now check that the inMemory ==
val sinkOut = testSink.reset
TestUtil.compareMaps(original, Monoid.plus(initStore, inMemory), testStore) must beTrue
val wrongSink = sinkOut.map { _._2 }.toList != inWithTime
wrongSink must be_==(false)
if(wrongSink) {
println("input: " + inWithTime)
println("SinkExtra: " + (sinkOut.map(_._2).toSet -- inWithTime.toSet))
println("SinkMissing: " + (inWithTime.toSet -- sinkOut.map(_._2).toSet))
}
}
"Correctly aggregate multiple sumByKeys" in {
val original = sample[List[(Int,Int)]]
val keyExpand = sample[(Int) => List[Int]]
val (inMemoryA, inMemoryB) = TestGraphs.twoSumByKeyInScala(original, keyExpand)
// Add a time:
val inWithTime = original.zipWithIndex.map { case (item, time) => (time.toLong, item) }
val batcher = TestUtil.randomBatcher(inWithTime)
val initStore = sample[Map[Int, Int]]
val testStoreA = TestStore[Int,Int]("testA", batcher, initStore, inWithTime.size)
val testStoreB = TestStore[Int,Int]("testB", batcher, initStore, inWithTime.size)
val (buffer, source) = TestSource(inWithTime)
val summer = TestGraphs
.twoSumByKey[Scalding,Int,Int,Int](source.map(_._2), testStoreA, keyExpand, testStoreB)
val scald = Scalding("scalding-diamond-Job")
val intr = TestUtil.batchedCover(batcher, 0L, original.size.toLong)
val ws = new LoopState(intr)
val mode: Mode = TestMode((testStoreA.sourceToBuffer ++ testStoreB.sourceToBuffer ++ buffer).get(_))
scald.run(ws, mode, summer)
// Now check that the inMemory ==
TestUtil.compareMaps(original, Monoid.plus(initStore, inMemoryA), testStoreA, "A") must beTrue
TestUtil.compareMaps(original, Monoid.plus(initStore, inMemoryB), testStoreB, "B") must beTrue
}
"Correctly propagates keys outside merge interval" in {
val frozenInitialData = sample[Set[(Int, Int)]]
val frozenKeys = frozenInitialData.map(_._1)
val liquidInitialData = sample[Set[(Int, Int)]] -- frozenInitialData
val initialData = (frozenInitialData ++ liquidInitialData).toMap
// Delta set should not intersect with the frozen keys
val deltaSet = sample[List[(Int, Int)]].filterNot(kv => frozenKeys.contains(kv._1))
val summedDataInMemory = MapAlgebra.sumByKey(deltaSet)
val mergedDataInMemory = Monoid.plus(initialData, summedDataInMemory)
val dataWithTime = deltaSet.zipWithIndex.map { case (item, time) => (time.toLong, item) }
val batcher = new MillisecondBatcher(1L)
def testFrozen(key: Int, range: Interval[Timestamp]): Boolean = {
frozenKeys.contains(key)
}
val store = TestStore[Int, Int]("test", batcher, initialData, dataWithTime.size, boundedKeySpace = TimeBoundedKeySpace(testFrozen))
val (buffer, source) = TestSource(dataWithTime)
val summer = source.map(_._2).sumByKey(store)
val scald = Scalding("scalding-keyFixTime-Job")
val mode: Mode = TestMode((store.sourceToBuffer ++ buffer).get(_))
scald.run(new LoopState(TestUtil.batchedCover(batcher, 0L, dataWithTime.size.toLong)), mode, summer)
TestUtil.compareMaps(deltaSet, mergedDataInMemory, store, "store") must beTrue
}
"Should throw exception if frozen keys and delta's intersect" in {
val frozenInitialData = sample[Set[(Int, Int)]]
val frozenKeys = frozenInitialData.map(_._1)
val liquidInitialData = sample[Set[(Int, Int)]] -- frozenInitialData
val initialData = (frozenInitialData ++ liquidInitialData).toMap
// Delta set should not intersect with the frozen keys
val deltaSet = sample[List[(Int, Int)]] ++ frozenInitialData
val summedDataInMemory = MapAlgebra.sumByKey(deltaSet)
val mergedDataInMemory = Monoid.plus(initialData, summedDataInMemory)
val dataWithTime = deltaSet.zipWithIndex.map { case (item, time) => (time.toLong, item) }
val batcher = new MillisecondBatcher(1L)
def testFrozen(key: Int, range: Interval[Timestamp]): Boolean = {
frozenKeys.contains(key)
}
val store = TestStore[Int, Int]("test", batcher, initialData, dataWithTime.size, boundedKeySpace = TimeBoundedKeySpace(testFrozen))
val (buffer, source) = TestSource(dataWithTime)
val summer = source.map(_._2).sumByKey(store)
val scald = Scalding("scalding-keyFixTime-Job")
val mode: Mode = TestMode((store.sourceToBuffer ++ buffer).get(_))
try {
scald.run(new ErrorThrowState(TestUtil.batchedCover(batcher, 0L, dataWithTime.size.toLong)), mode, summer)
false // Should never reach here
}
catch {
case _: Throwable => true
}
}
}
}
| surabhiiyer/summingbird | summingbird-scalding-test/src/test/scala/com/twitter/summingbird/scalding/ScaldingLaws.scala | Scala | apache-2.0 | 15,392 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.adam.models
import com.esotericsoftware.kryo.{ Kryo, Serializer }
import com.esotericsoftware.kryo.io.{ Input, Output }
import Ordering.Option
import org.apache.spark.Logging
import org.bdgenomics.adam.instrumentation.Timers.CreateReferencePositionPair
import org.bdgenomics.adam.models.ReferenceRegion._
import org.bdgenomics.adam.rich.RichAlignmentRecord
import org.bdgenomics.formats.avro.AlignmentRecord
object ReferencePositionPair extends Logging {
def apply(singleReadBucket: SingleReadBucket): ReferencePositionPair = CreateReferencePositionPair.time {
val firstOfPair = (singleReadBucket.primaryMapped.filter(_.getFirstOfPair) ++
singleReadBucket.unmapped.filter(_.getFirstOfPair)).toSeq
val secondOfPair = (singleReadBucket.primaryMapped.filter(_.getSecondOfPair) ++
singleReadBucket.unmapped.filter(_.getSecondOfPair)).toSeq
def getPos(r: AlignmentRecord): ReferencePosition = {
if (r.getReadMapped) {
new RichAlignmentRecord(r).fivePrimeReferencePosition
} else {
ReferencePosition(r.getSequence, 0L)
}
}
if (firstOfPair.size + secondOfPair.size > 0) {
new ReferencePositionPair(firstOfPair.lift(0).map(getPos),
secondOfPair.lift(0).map(getPos))
} else {
new ReferencePositionPair((singleReadBucket.primaryMapped ++
singleReadBucket.unmapped).toSeq.lift(0).map(getPos),
None)
}
}
}
case class ReferencePositionPair(read1refPos: Option[ReferencePosition],
read2refPos: Option[ReferencePosition])
class ReferencePositionPairSerializer extends Serializer[ReferencePositionPair] {
val rps = new ReferencePositionSerializer()
def writeOptionalReferencePos(kryo: Kryo, output: Output, optRefPos: Option[ReferencePosition]) = {
optRefPos match {
case None =>
output.writeBoolean(false)
case Some(refPos) =>
output.writeBoolean(true)
rps.write(kryo, output, refPos)
}
}
def readOptionalReferencePos(kryo: Kryo, input: Input): Option[ReferencePosition] = {
val exists = input.readBoolean()
if (exists) {
Some(rps.read(kryo, input, classOf[ReferencePosition]))
} else {
None
}
}
def write(kryo: Kryo, output: Output, obj: ReferencePositionPair) = {
writeOptionalReferencePos(kryo, output, obj.read1refPos)
writeOptionalReferencePos(kryo, output, obj.read2refPos)
}
def read(kryo: Kryo, input: Input, klazz: Class[ReferencePositionPair]): ReferencePositionPair = {
val read1ref = readOptionalReferencePos(kryo, input)
val read2ref = readOptionalReferencePos(kryo, input)
new ReferencePositionPair(read1ref, read2ref)
}
}
| FusionWorks/adam | adam-core/src/main/scala/org/bdgenomics/adam/models/ReferencePositionPair.scala | Scala | apache-2.0 | 3,507 |
package reactivemongo.api.commands
import scala.concurrent.{ ExecutionContext, Future }
import scala.util.control.NoStackTrace
import reactivemongo.api.{
BSONSerializationPack,
Cursor,
SerializationPack,
DB,
Collection
}
import reactivemongo.bson.{ BSONDocumentReader, BSONDocumentWriter }
import reactivemongo.core.netty.BufferSequence
import reactivemongo.core.protocol.Response
import reactivemongo.core.errors.ReactiveMongoException
sealed trait AbstractCommand
trait Command extends AbstractCommand
trait CollectionCommand extends AbstractCommand
trait CommandWithResult[R] { self: AbstractCommand => }
trait CommandWithPack[P <: SerializationPack] { self: AbstractCommand => }
trait BoxedAnyVal[A <: AnyVal] {
def value: A
}
/**
* @param response the response associated with the result
* @param numberToReturn the number of documents to return
* @param value the value parsed from the response
*/
case class ResponseResult[R](
response: Response,
numberToReturn: Int,
value: R)
trait CommandError extends Exception with NoStackTrace {
/** The error code */
def code: Option[Int]
/** The error message */
def errmsg: Option[String]
override def getMessage = s"CommandError[code=${code.getOrElse("<unknown>")}, errmsg=${errmsg.getOrElse("<unknown>")}]"
}
/**
* Fetches a cursor from MongoDB results.
* @tparam P the type of the serialization pack
* @tparam C the type of the cursor implementation
*/
trait CursorFetcher[P <: SerializationPack, +C[_] <: Cursor[_]] {
val pack: P
def one[A](implicit reader: pack.Reader[A], ec: ExecutionContext): Future[A]
def cursor[A](implicit reader: pack.Reader[A]): C[A]
}
/**
* @param cursorId the ID of the cursor
* @param fullCollectionName the namespace of the collection
*/
case class ResultCursor(cursorId: Long, fullCollectionName: String)
trait ImplicitCommandHelpers[P <: SerializationPack] {
val pack: P
trait ImplicitlyDocumentProducer {
def produce: pack.Document
}
object ImplicitlyDocumentProducer {
implicit def producer[A](a: A)(implicit writer: pack.Writer[A]): ImplicitlyDocumentProducer = new ImplicitlyDocumentProducer {
def produce = pack.serialize(a, writer)
}
}
}
object UnitBox extends BoxedAnyVal[Unit] {
def value: Unit = ()
}
object Command {
import reactivemongo.api.{
DefaultCursor,
Failover2,
FailoverStrategy,
ReadPreference
}
import reactivemongo.core.actors.RequestMakerExpectingResponse
import reactivemongo.bson.lowlevel.LoweLevelDocumentIterator
import reactivemongo.bson.buffer.{ ReadableBuffer, WritableBuffer }
import reactivemongo.core.netty.{
BufferSequence,
ChannelBufferReadableBuffer,
ChannelBufferWritableBuffer
}
import reactivemongo.core.protocol.{
RequestMaker,
Query,
QueryFlags
}
def defaultCursorFetcher[P <: SerializationPack, A](db: DB, p: P, command: A, failover: FailoverStrategy)(implicit writer: p.Writer[A]): CursorFetcher[p.type, DefaultCursor.Impl] = new CursorFetcher[p.type, DefaultCursor.Impl] {
val pack: p.type = p
@inline private def defaultReadPreference: ReadPreference =
db.connection.options.readPreference
def one[A](readPreference: ReadPreference)(implicit reader: pack.Reader[A], ec: ExecutionContext): Future[A] = {
val (requestMaker, mongo26WriteCommand) =
buildRequestMaker(pack)(command, writer, readPreference, db.name)
// TODO: Await maxTimeout?
Failover2(db.connection, failover) { () =>
db.connection.sendExpectingResponse(
requestMaker, mongo26WriteCommand).map { response =>
pack.readAndDeserialize(
LoweLevelDocumentIterator(ChannelBufferReadableBuffer(
response.documents)).next, reader)
}
}.future
}
def one[A](implicit reader: pack.Reader[A], ec: ExecutionContext): Future[A] = one[A](defaultReadPreference)
def cursor[A](readPreference: ReadPreference)(implicit reader: pack.Reader[A]): DefaultCursor.Impl[A] = {
val buffer = ChannelBufferWritableBuffer()
pack.serializeAndWrite(buffer, command, writer)
val bs = BufferSequence(buffer.buffer)
val flags = if (readPreference.slaveOk) QueryFlags.SlaveOk else 0
val op = Query(flags, db.name + ".$cmd", 0, 1)
val mongo26WriteCommand = command match {
case _: Mongo26WriteCommand => true
case _ => false
}
DefaultCursor.query(pack, op, bs,
if (mongo26WriteCommand) ReadPreference.primary else readPreference,
db.connection, failover, mongo26WriteCommand)
}
def cursor[A](implicit reader: pack.Reader[A]): DefaultCursor.Impl[A] =
cursor(defaultReadPreference)
}
case class CommandWithPackRunner[P <: SerializationPack](pack: P, failover: FailoverStrategy = FailoverStrategy()) {
// database
def apply[R, C <: Command with CommandWithResult[R]](db: DB, command: C with CommandWithResult[R])(implicit writer: pack.Writer[C], reader: pack.Reader[R], ec: ExecutionContext): Future[R] = defaultCursorFetcher(db, pack, command, failover).one[R]
def apply[C <: Command](db: DB, command: C)(implicit writer: pack.Writer[C]): CursorFetcher[pack.type, Cursor] = defaultCursorFetcher(db, pack, command, failover)
def unboxed[A <: AnyVal, R <: BoxedAnyVal[A], C <: Command with CommandWithResult[R]](db: DB, command: C with CommandWithResult[R with BoxedAnyVal[A]])(implicit writer: pack.Writer[C], reader: pack.Reader[R], ec: ExecutionContext): Future[A] = defaultCursorFetcher(db, pack, command, failover).one[R].map(_.value)
// collection
def apply[R, C <: CollectionCommand with CommandWithResult[R]](collection: Collection, command: C with CommandWithResult[R])(implicit writer: pack.Writer[ResolvedCollectionCommand[C]], reader: pack.Reader[R], ec: ExecutionContext): Future[R] = defaultCursorFetcher(collection.db, pack, ResolvedCollectionCommand(collection.name, command), failover).one[R]
def apply[C <: CollectionCommand](collection: Collection, command: C)(implicit writer: pack.Writer[ResolvedCollectionCommand[C]]): CursorFetcher[pack.type, Cursor] = defaultCursorFetcher(collection.db, pack, ResolvedCollectionCommand(collection.name, command), failover)
/**
* Executes the `command` and returns its result
* along with the MongoDB response.
*/
def withResponse[R, C <: CollectionCommand with CommandWithResult[R]](collection: Collection, command: C)(implicit writer: pack.Writer[ResolvedCollectionCommand[C]], reader: pack.Reader[R], ec: ExecutionContext): Future[ResponseResult[R]] = {
val cursor = defaultCursorFetcher(collection.db, pack,
ResolvedCollectionCommand(collection.name, command), failover).cursor[R]
for {
firstResponse <- cursor.makeRequest(cursor.numberToReturn)
result <- cursor.headOption.flatMap(_.fold(Future.failed[R](
ReactiveMongoException("missing result")))(Future.successful(_)))
} yield ResponseResult(firstResponse, cursor.numberToReturn, result)
}
def unboxed[A <: AnyVal, R <: BoxedAnyVal[A], C <: CollectionCommand with CommandWithResult[R]](collection: Collection, command: C with CommandWithResult[R with BoxedAnyVal[A]])(implicit writer: pack.Writer[ResolvedCollectionCommand[C]], reader: pack.Reader[R], ec: ExecutionContext): Future[A] =
defaultCursorFetcher(collection.db, pack, ResolvedCollectionCommand(collection.name, command), failover).one[R].map(_.value)
def rawCommand[T](input: T)(implicit writer: pack.Writer[T]): RawCommand =
RawCommand(pack.serialize(input, writer))
case class RawCommand(document: pack.Document) extends Command
object RawCommand {
implicit val writer: pack.Writer[RawCommand] = pack.writer(_.document)
}
}
/**
* Returns a command runner.
*
* @param pack the serialization pack
*
* {{{
* import reactivemongo.bson.BSONDocument
* import reactivemongo.api.BSONSerializationPack
* import reactivemongo.api.commands.{ Command, Count }
*
* Command.run(BSONSerializationPack).
* unboxed(aCollection, Count(BSONDocument("bulk" -> true)))
* }}}
*/
def run[P <: SerializationPack](pack: P): CommandWithPackRunner[pack.type] =
CommandWithPackRunner(pack)
private[reactivemongo] def deserialize[P <: SerializationPack, A](pack: P, response: Response)(implicit reader: pack.Reader[A]): A =
pack.readAndDeserialize(response, reader)
private[reactivemongo] def buildRequestMaker[P <: SerializationPack, A](pack: P)(command: A, writer: pack.Writer[A], readPreference: ReadPreference, db: String): (RequestMaker, Boolean) = {
val buffer = ChannelBufferWritableBuffer()
pack.serializeAndWrite(buffer, command, writer)
val documents = BufferSequence(buffer.buffer)
val flags = if (readPreference.slaveOk) QueryFlags.SlaveOk else 0
val query = Query(flags, db + ".$cmd", 0, 1)
val mongo26WriteCommand = command match {
case _: Mongo26WriteCommand => true
case _ => false
}
RequestMaker(query, documents, readPreference) -> mongo26WriteCommand
}
private[reactivemongo] case class CommandWithPackMaker[P <: SerializationPack](pack: P) {
def apply[C <: Command](db: DB, command: C, readPreference: ReadPreference)(implicit writer: pack.Writer[C]): RequestMakerExpectingResponse =
onDatabase(db.name, command, readPreference)
def apply[C <: Command with Mongo26WriteCommand](db: DB, command: C)(implicit writer: pack.Writer[C]): RequestMakerExpectingResponse =
onDatabase(db.name, command)
def apply[C <: CollectionCommand](collection: Collection, command: C, readPreference: ReadPreference)(implicit writer: pack.Writer[ResolvedCollectionCommand[C]]): RequestMakerExpectingResponse =
onCollection(collection.db.name, collection.name, command, readPreference)
def apply[C <: CollectionCommand with Mongo26WriteCommand](collection: Collection, command: C)(implicit writer: pack.Writer[ResolvedCollectionCommand[C]]): RequestMakerExpectingResponse =
onCollection(collection.db.name, collection.name, command)
def onDatabase[C <: Command](db: String, command: C, readPreference: ReadPreference)(implicit writer: pack.Writer[C]): RequestMakerExpectingResponse = {
val (requestMaker, mongo26WriteCommand) = buildRequestMaker(pack)(command, writer, readPreference, db)
RequestMakerExpectingResponse(requestMaker, mongo26WriteCommand)
}
def onDatabase[C <: Command with Mongo26WriteCommand](db: String, command: C)(implicit writer: pack.Writer[C]): RequestMakerExpectingResponse = {
val requestMaker = buildRequestMaker(pack)(command, writer, ReadPreference.primary, db)._1
RequestMakerExpectingResponse(requestMaker, true)
}
def onCollection[C <: CollectionCommand](db: String, collection: String, command: C, readPreference: ReadPreference)(implicit writer: pack.Writer[ResolvedCollectionCommand[C]]): RequestMakerExpectingResponse = {
val (requestMaker, mongo26WriteCommand) = buildRequestMaker(pack)(ResolvedCollectionCommand(collection, command), writer, readPreference, db)
RequestMakerExpectingResponse(requestMaker, mongo26WriteCommand)
}
def onCollection[C <: CollectionCommand with Mongo26WriteCommand](db: String, collection: String, command: C)(implicit writer: pack.Writer[ResolvedCollectionCommand[C]]): RequestMakerExpectingResponse = {
val requestMaker = buildRequestMaker(pack)(ResolvedCollectionCommand(collection, command), writer, ReadPreference.primary, db)._1
RequestMakerExpectingResponse(requestMaker, true)
}
}
private[reactivemongo] def requestMaker[P <: SerializationPack](pack: P): CommandWithPackMaker[P] = CommandWithPackMaker(pack)
}
/**
* @param collection the name of the collection against which the command is executed
* @param command the executed command
*/
final case class ResolvedCollectionCommand[C <: CollectionCommand](
collection: String,
command: C) extends Command
object `package` {
type WriteConcern = GetLastError
val WriteConcern = GetLastError
}
| charleskubicek/ReactiveMongo | driver/src/main/scala/api/commands/commands.scala | Scala | apache-2.0 | 12,161 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// scalastyle:off println
package org.apache.spark.examples.ml
// $example on$
import org.apache.spark.ml.feature.SQLTransformer
// $example off$
import org.apache.spark.sql.SparkSession
object SQLTransformerExample {
def main(args: Array[String]): Unit = {
val spark = SparkSession
.builder
.appName("SQLTransformerExample")
.getOrCreate()
// $example on$
val df = spark.createDataFrame(
Seq((0, 1.0, 3.0), (2, 2.0, 5.0))).toDF("id", "v1", "v2")
val sqlTrans = new SQLTransformer().setStatement(
"SELECT *, (v1 + v2) AS v3, (v1 * v2) AS v4 FROM __THIS__")
sqlTrans.transform(df).show()
// $example off$
spark.stop()
}
}
// scalastyle:on println
| lhfei/spark-in-action | spark-3.x/src/main/scala/org/apache/spark/examples/ml/SQLTransformerExample.scala | Scala | apache-2.0 | 1,516 |
package org.jetbrains.plugins.scala
package lang
package psi
package stubs
package elements
import com.intellij.lang.ASTNode
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.templates.ScClassParents
import org.jetbrains.plugins.scala.lang.psi.impl.toplevel.templates.ScClassParentsImpl
/**
* User: Alexander Podkhalyuzin
* Date: 17.06.2009
*/
class ScClassParentsElementType extends ScTemplateParentsElementType[ScClassParents]("class parents") {
override def createElement(node: ASTNode): ScClassParents = new ScClassParentsImpl(node)
override def createPsi(stub: ScTemplateParentsStub[ScClassParents]): ScClassParents = new ScClassParentsImpl(stub)
} | ilinum/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/stubs/elements/ScClassParentsElementType.scala | Scala | apache-2.0 | 673 |
//package riftwarp
//
//import org.scalatest._
//import org.scalatest.matchers.MustMatchers
//import scalaz.Cord
//import riftwarp.impl.dematerializers._
//
//class JsonCordDematerializationTests extends FunSuite with MustMatchers {
// test(""""ToJsonCordDematerializermust give a value for "hello"""") {
// val dematerialized = ToJsonCordDematerializer.getString("hello")
// println(dematerialized.manifestation.toString)
// (dematerialized.manifestation.toString) must equal("\\"hello\\"")
// }
//
// test("It must be possible to create a ToJsonCordWarpSequencer"){
// val riftWarp = RiftWarp.concurrentWithDefaults()
// implicit val hasWarpUnpackers = riftWarp.barracks
// implicit val toolShed = riftWarp.toolShed
// val sequencer = ToJsonCordWarpSequencer()
// }
//
// test("""ToJsonCordWarpSequencer give a value for "hello"""") {
// val riftWarp = RiftWarp.concurrentWithDefaults()
// implicit val hasWarpUnpackers = riftWarp.barracks
// implicit val toolShed = riftWarp.toolShed
// val sequencer = ToJsonCordWarpSequencer()
// val res = sequencer.addString("v", "hello").dematerialize.manifestation.toString
// res must equal("""{"v":"hello"}""")
// }
//
//} | chridou/almhirt | riftwarp/src/test/scala/riftwarp/JsonCordDematerializationTests.scala | Scala | apache-2.0 | 1,242 |
package org.openapitools.client.api
import argonaut._
import argonaut.EncodeJson._
import argonaut.DecodeJson._
import org.http4s.{EntityDecoder, EntityEncoder}
import org.http4s.argonaut._
import org.joda.time.DateTime
import StringParameterDefinition._
case class StringParameterDefinition (
`class`: Option[String],
defaultParameterValue: Option[StringParameterValue],
description: Option[String],
name: Option[String],
`type`: Option[String])
object StringParameterDefinition {
import DateTimeCodecs._
implicit val StringParameterDefinitionCodecJson: CodecJson[StringParameterDefinition] = CodecJson.derive[StringParameterDefinition]
implicit val StringParameterDefinitionDecoder: EntityDecoder[StringParameterDefinition] = jsonOf[StringParameterDefinition]
implicit val StringParameterDefinitionEncoder: EntityEncoder[StringParameterDefinition] = jsonEncoderOf[StringParameterDefinition]
}
| cliffano/swaggy-jenkins | clients/scalaz/generated/src/main/scala/org/openapitools/client/api/StringParameterDefinition.scala | Scala | mit | 912 |
// Copyright (C) Maxime MORGE 2017
package org.scaia.actor.coalition
import akka.actor.{Actor, ActorRef, Stash}
import org.scaia.actor._
import org.scaia.asia._
import org.scaia.solver.asia.{Egalitarian, SocialRule, Utilitarian}
/**
* Agent representing a coalition in the selective matching mechanism (at each integration some individuals can be excluded)
* @param a activity
* @param approximation true if only subgroups of size -1 are investigated
* @param rule to apply (maximize the utilitarian/egalitarian/nash welfare
* */
class SelectiveCoalitionAgent(a: Activity, approximation: Boolean, rule: SocialRule) extends CoalitionAgent(a: Activity, rule: SocialRule) with Stash{
/**
* The coalition agent is waiting for new proposals
*/
override def disposing(): Receive = {
case Propose(i) =>
adr += (i -> sender)
if (debug) log.debug(s"${a.name} receives a proposal from $i")
if (g.isEmpty) {
if (debug) log.debug(s"Since the current group of ${a.name} is empty $i is assigned to the activity ${a.name}")
g += i
sender ! Accept
context.become(disposing())
} else {
val ng = g + i
if (a.c > g.size) {
if (debug) log.debug(s"The capacity of ${a.name} is not reached")
val waitingReplies = if (approximation) query(subgroups(g+i, g.size, g.size+1))
else query(subgroups(g+i, 1, g.size+1))
context.become(casting(i,waitingReplies))
} else {
if (debug) log.debug(s"The capacity of ${a.name} is reached")
val waitingReplies = if (approximation) query(subgroups(g+i, g.size, g.size))
else query(subgroups(g+i, 1, g.size))
context.become(casting(i,waitingReplies))
}
}
case Stop => context.stop(self)
case Confirm => // Deprecated Confirm
case msg@_ => log.debug("${a.name} in state disposing receives a message which was not expected: $msg")
}
} | maximemorge/ScaIA | src/main/scala/org/scaia/actor/coalition/SelectiveCoalitionAgent.scala | Scala | gpl-3.0 | 1,964 |
package cakesolutions.kafka.examples
import akka.actor.{Actor, ActorLogging, ActorRef, ActorSystem, Props}
import cakesolutions.kafka.KafkaConsumer
import cakesolutions.kafka.akka.KafkaConsumerActor._
import cakesolutions.kafka.akka.{ConsumerRecords, KafkaConsumerActor, Offsets}
import com.typesafe.config.{Config, ConfigFactory}
import org.apache.kafka.clients.consumer.OffsetResetStrategy
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.StringDeserializer
import scala.concurrent.duration._
/**
* Simple Kafka Consumer using AutoPartition subscription mode with manual offset control, subscribing to topic: 'topic1'.
*
* If the topic is configured in Kafka with multiple partitions, this app can be started multiple times (potentially on separate nodes)
* and Kafka will balance the partitions to the instances providing parallel consumption of the topic.
*
* Kafka bootstrap server can be provided as an environment variable: -DKAFKA=127.0.0.1:9092 (default).
*/
object AutoPartitionConsumerWithManualOffsetBoot extends App {
AutoPartitionConsumerWithManualOffset(ConfigFactory.load().getConfig("consumer"))
}
object AutoPartitionConsumerWithManualOffset {
/*
* Starts an ActorSystem and instantiates the below Actor that subscribes and
* consumes from the configured KafkaConsumerActor.
*/
def apply(config: Config): ActorRef = {
val consumerConf = KafkaConsumer.Conf(
new StringDeserializer,
new StringDeserializer,
groupId = "test_group",
enableAutoCommit = false,
autoOffsetReset = OffsetResetStrategy.EARLIEST)
.withConf(config)
val actorConf = KafkaConsumerActor.Conf(1.seconds, 3.seconds)
val system = ActorSystem()
system.actorOf(Props(new AutoPartitionConsumerWithManualOffset(consumerConf, actorConf)))
}
}
class AutoPartitionConsumerWithManualOffset(
kafkaConfig: KafkaConsumer.Conf[String, String],
actorConfig: KafkaConsumerActor.Conf) extends Actor with ActorLogging {
private val recordsExt = ConsumerRecords.extractor[String, String]
private val consumer = context.actorOf(
KafkaConsumerActor.props(kafkaConfig, actorConfig, self)
)
consumer ! Subscribe.AutoPartitionWithManualOffset(List("topic1"), assignedListener, revokedListener)
override def receive: Receive = {
// Records from Kafka
case recordsExt(records) =>
processRecords(records.pairs)
sender() ! Confirm(records.offsets)
}
private def processRecords(records: Seq[(Option[String], String)]) =
records.foreach { case (key, value) =>
log.info(s"Received [$key,$value]")
}
private def assignedListener(tps: List[TopicPartition]): Offsets = {
log.info("Partitions have been assigned" + tps.toString())
// Should load the offsets from a persistent store and any related state
val offsetMap = tps.map{ tp =>
tp -> 0l
}.toMap
// Return the required offsets for the assigned partitions
Offsets(offsetMap)
}
private def revokedListener(tps: List[TopicPartition]): Unit = {
log.info("Partitions have been revoked" + tps.toString())
// Opportunity to clear any state for the revoked partitions
()
}
}
| cakesolutions/scala-kafka-client | examples/src/main/scala/cakesolutions/kafka/examples/AutoPartitionConsumerWithManualOffset.scala | Scala | mit | 3,224 |
/*
* Copyright 2011-2019 Asakusa Framework Team.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.asakusafw.spark.runtime
package graph
import scala.concurrent.{ ExecutionContext, Future }
trait Action[T] extends Node {
self: CacheStrategy[RoundContext, Future[T]] =>
protected def doPerform(rc: RoundContext)(implicit ec: ExecutionContext): Future[T]
final def perform(rc: RoundContext)(implicit ec: ExecutionContext): Future[T] = {
getOrCache(rc, doPerform(rc))
}
}
| ueshin/asakusafw-spark | runtime/src/main/scala/com/asakusafw/spark/runtime/graph/Action.scala | Scala | apache-2.0 | 1,010 |
package com.softwaremill.codebrag.eventstream
import com.typesafe.scalalogging.slf4j.Logging
import akka.actor.Actor
import com.softwaremill.codebrag.common.StatisticEvent
import com.softwaremill.codebrag.dao.events.EventDAO
class StatisticEventsCollector(val eventDao: EventDAO) extends Actor with Logging {
def receive = {
case (event: StatisticEvent) => eventDao.storeEvent(event)
}
} | frodejohansen/codebrag | codebrag-service/src/main/scala/com/softwaremill/codebrag/eventstream/StatisticEventsCollector.scala | Scala | agpl-3.0 | 399 |
package es.udc.graph
import org.apache.spark.rdd.RDD
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.linalg.Vectors
import es.udc.graph.utils.GraphUtils
import org.apache.spark.HashPartitioner
import breeze.linalg.{DenseVector => BDV}
object GraphMerger extends Serializable
{
def mergeGraphs(g1:RDD[(Long, NeighborsForElement)], g2:RDD[(Long, NeighborsForElement)], numNeighbors:Int, measurer:DistanceProvider):RDD[(Long, NeighborsForElement)]=
{
if (g1==null) return g2
if (g2==null) return g1
return g1.union(g2).reduceByKey(NeighborsForElement.merge(_, _))
}
def mergeGroupedNeighbors(groupedNeighbors1:List[(Int,List[(Long,Double)])], groupedNeighbors2:List[(Int,List[(Long,Double)])], numNeighbors:Int):List[(Int, List[(Long, Double)])]=
{
val src=if (!groupedNeighbors1.isEmpty) groupedNeighbors1 else groupedNeighbors2
val dest=if (!groupedNeighbors1.isEmpty) groupedNeighbors2 else groupedNeighbors1
val mapNeighbors2=dest.toMap
src.map({case (grId1, l1) =>
val newList:List[(Long,Double)]=(if (mapNeighbors2.contains(grId1))
l1 ++ mapNeighbors2.get(grId1).get
else
l1).toSet.toList
if (newList.size<=numNeighbors)
(grId1, newList)
else
(grId1, newList.sortBy(_._2).take(numNeighbors))
})
}
}
object LSHKNNGraphBuilder
{
val DEFAULT_RADIUS_START=0.1
}
abstract class LSHKNNGraphBuilder extends GraphBuilder
{
final def computeGroupedGraph(data:RDD[(Long,LabeledPoint)], numNeighbors:Int, hasher:Hasher, startRadius:Option[Double], maxComparisonsPerItem:Option[Int], measurer:DistanceProvider, grouper:GroupingProvider):RDD[(Long, GroupedNeighborsForElement)]=
{
var fullGraph:RDD[(Long, GroupedNeighborsForElementWithComparisonCount)]=null //(node, (viewed, List[(groupingId,List[(neighbor,distance))]]))
var currentData:RDD[(Long,(LabeledPoint,Iterable[Int]))]=data.map({case (id, point) => (id,(point,List[Int]()))}) //Adding "special requests" list
var radius=startRadius.getOrElse(LSHKNNGraphBuilder.DEFAULT_RADIUS_START)//5//0.1
val totalElements=currentData.count()
val bfOps:Double=totalElements*(totalElements-1)/2.0
var totalOps:Long=0
var numBuckets:Long=2
var allElementsInSingleBucket=false
var nodesLeft=currentData.count()
val bHasher=data.sparkContext.broadcast(hasher)
println(f"Starting $numNeighbors%d-NN graph computation for $nodesLeft%d nodes")
println(f"\\t R0=$radius%g")
println(f"\\t cMAX=${maxComparisonsPerItem.getOrElse("auto")}%s\\n")
//while(!currentData.isEmpty())
//while(nodesLeft>numNeighbors)
//while((numBuckets>1 || nodesLeft>numNeighbors) && nodesLeft>1)
while(nodesLeft>numNeighbors && nodesLeft>1 && !allElementsInSingleBucket)
{
//Maps each element to numTables (hash, index) pairs with hashes of keyLength length.
val hashRDD=(if (grouper.numGroups==1) //Special case for single group so that the number of hashes is not doubled and no useless separable buckets are created.
currentData.flatMap({case (index, (point, specialRequests)) =>
val hashes=bHasher.value.getHashes(point.features, index, radius)
val grId=grouper.getGroupId(point)
hashes.map({case (h,id) => (h.concat(new Hash(Array[Int](-1))),(id,grId,false))})
})
else
currentData.flatMap({case (index, (point, specialRequests)) =>
val hashes=bHasher.value.getHashes(point.features, index, radius)
val grId=grouper.getGroupId(point)
if (specialRequests.isEmpty)
hashes.flatMap({case (h,id) => List[(Hash,(Long,Int,Boolean))]((h.concat(new Hash(Array[Int](-1))),(id,grId,false)),(h.concat(new Hash(Array[Int](grouper.getGroupId(point)))),(id,grId,false)))})
else
hashes.flatMap({case (h,id) => specialRequests.map({case request => (h.concat(new Hash(Array[Int](request))),(id,grId,request==grId))}) ++ List[(Hash,(Long,Int,Boolean))]((h.concat(new Hash(Array[Int](grouper.getGroupId(point)))),(id,grId,false)))})
})
).coalesce(data.getNumPartitions)
//TODO Should all distances be computed? Maybe there's no point in computing them if we still don't have enough neighbors for an example
//Should they be stored/cached? It may be enough to store a boolean that records if they have been computed. LRU Cache?
//Groups elements mapped to the same hash
var hashBuckets:RDD[(Hash, Iterable[Long], Int)]=hashRDD.filter({case (h,(id,grId,searchesForSelfClass)) => h.values(h.values.size-1)<0})
.map({case (h,(id,grId,searchesForSelfClass)) => (h,id)})
.groupByKey()
.map({case (k, l) => (k, l.toSet)})
.flatMap({case (k, s) => if (s.size>1) Some((k, s, s.size)) else None})
hashBuckets=hashBuckets.coalesce(data.getNumPartitions)
val hashBucketsNotEmpty=(!hashBuckets.isEmpty())
if (hashBucketsNotEmpty)
{
numBuckets=hashBuckets.count()
val stepOps=hashBuckets.map({case (h,s,n) => (n,1)})
.reduceByKey(_+_)
val numStepOps=stepOps.map({case x => x._2 * x._1 * (x._1 - 1) /2.0}).sum().toLong
val largestBucketSize=stepOps.map(_._1).max
allElementsInSingleBucket=largestBucketSize==nodesLeft
totalOps=totalOps+numStepOps
println(f"Performing $numStepOps%g ops (largest bucket has $largestBucketSize%d elements)")
//println(hashBuckets.reduce({case ((h1,s1,n1),(h2,s2,n2)) => if (n1>n2) (h1,s1,n1) else (h2,s2,n2)})._1.values.map(_.toString()).mkString("|"))
/*DEBUG
stepOps.sortBy(_._1)
.foreach({case x => println(x._2+" buckets with "+x._1+" elements => "+(x._2 * x._1 * (x._1-1)/2.0)+" ops")})*/
//println("Changed "+prevStepOps+"ops to "+postStepOps+"ops ("+(postStepOps/prevStepOps)+")")
//TODO Evaluate bucket size and increase/decrease radius without bruteforcing if necessary.
var subgraph=getGroupedGraphFromBuckets(data, hashBuckets, numNeighbors, measurer, grouper).coalesce(data.getNumPartitions)
fullGraph=GraphBuilder.mergeSubgraphs(fullGraph, subgraph, numNeighbors, measurer).coalesce(data.getNumPartitions)
}
else //DEBUG
println("No hash buckets created")
//Separable buckets
//println((currentData.first()._1,currentData.first()._2._2))
//val interestID=currentData.first()._1
//Groups elements mapped to the same hash
var hashSeparableBuckets:RDD[(Hash, Iterable[Long], Iterable[Long])]=hashRDD.filter({case (h,(id,grId,searchesForSelfClass)) => h.values(h.values.size-1)>=0})
.map({case (h,(id,grId,searchesForSelfClass)) =>
val requestedId=h.values(h.values.size-1)
val leftPart=if ((requestedId!=grId) || searchesForSelfClass) List[Long](id) else List[Long]()
val rightPart=if (requestedId==grId) List[Long](id) else List[Long]()
(h,(leftPart,rightPart))})
.reduceByKey({case ((y1,n1),(y2,n2)) => (y1++y2,n1++n2)})
.map({case (k, (y, n)) => (k, (y.toSet, n.toSet))})
.flatMap({case (k, (y, n)) =>
if ((y.size>0) && (n.size>0))
{
//if (y.contains(interestID) || n.contains(interestID))
// println("Kept "+k.values(k.values.size-1)+" ["+y.map(_.toString).mkString(",")+"]"+"["+n.map(_.toString).mkString(",")+"]")
Some((k, y, n))
}
else
{
//if (y.contains(interestID) || n.contains(interestID))
// println("Removed "+k.values(k.values.size-1)+" ["+y.map(_.toString).mkString(",")+"]"+"["+n.map(_.toString).mkString(",")+"]")
None
}})
val hashSeparableBucketsNotEmpty=(!hashSeparableBuckets.isEmpty())
if (hashSeparableBucketsNotEmpty)
{
numBuckets=hashSeparableBuckets.count()
val stepOps=hashSeparableBuckets.map({case (h,y,n) => (y.size*n.size,1)})
.reduceByKey(_+_)
val numStepOps=stepOps.map({case x => x._2 * x._1 }).sum().toLong
val largestBucketSize=stepOps.map(_._1).max
totalOps=totalOps+numStepOps
println(f"Performing $numStepOps%g ops (largest separable bucket needs $largestBucketSize%d ops)")
//println(hashBuckets.reduce({case ((h1,s1,n1),(h2,s2,n2)) => if (n1>n2) (h1,s1,n1) else (h2,s2,n2)})._1.values.map(_.toString()).mkString("|"))
/*DEBUG
stepOps.sortBy(_._1)
.foreach({case x => println(x._2+" buckets with "+x._1+" elements => "+(x._2 * x._1 * (x._1-1)/2.0)+" ops")})*/
//println("Changed "+prevStepOps+"ops to "+postStepOps+"ops ("+(postStepOps/prevStepOps)+")")
//TODO Evaluate bucket size and increase/decrease radius without bruteforcing if necessary.
var subgraph=getGroupedGraphFromPartitionedBuckets(data, hashSeparableBuckets, numNeighbors, measurer, grouper).coalesce(data.getNumPartitions)
//println(f"Obtained ${subgraph.count()} nodes with links")
//DEBUG
/*val retrieved=subgraph.take(5)
retrieved.foreach(println)
println("----------------")
for (i<-retrieved.map(_._1))
fullGraph.filter(_._1==i).foreach(println)
*/
/*val ofInterest=List[Long](subgraph.first()._1) ++ subgraph.first()._2._2.flatMap(_._2.map(_._1).toSet.toList)
println(subgraph.first())
println("################")
val firstID=subgraph.first()._1
currentData.filter(_._1==firstID).map({case (id,(p,r)) => (id,grouper.getGroupId(p),r)}).foreach(println)
println("||||||||||||||||")
for (i<-ofInterest)
{
currentData.filter(_._1==i).map({case (id,(p,r)) => (id,grouper.getGroupId(p),r)}).foreach(println)
subgraph.filter(_._1==i).foreach(println)
}
println("----------------")
for (i<-ofInterest)
fullGraph.filter(_._1==i).foreach(println)*/
fullGraph=GraphBuilder.mergeSubgraphs(fullGraph, subgraph, numNeighbors, measurer).coalesce(data.getNumPartitions)
/*println("----------------")
for (i<-retrieved.map(_._1))
fullGraph.filter(_._1==i).foreach(println)
for (i<-ofInterest)
fullGraph.filter(_._1==i).foreach(println)*/
}
else //DEBUG
println("No separable hash buckets created")
if (hashBucketsNotEmpty || hashSeparableBucketsNotEmpty)
{
//Simplify dataset
val newData=simplifyDataset(currentData, fullGraph, numNeighbors, maxComparisonsPerItem, grouper)
currentData.unpersist(false)
currentData=newData.coalesce(data.getNumPartitions)
currentData.cache()
}
//DEBUG
/*println("$$$$$$$$$$$$$$$$$$$$$$$$")
currentData.flatMap({case (id,(p,r)) =>
val grId=grouper.getGroupId(p)
r.map({case req => ((grId,req),1)})})
.reduceByKey(_+_)
.sortBy(_._2)
.foreach(println)
*/
//Remove elements that need a grId that is no longer in the data.
val existingGrIds=currentData.map({case (id,(p,r)) => grouper.getGroupId(p)}).distinct.collect()
currentData=currentData.map({case (id,(p,r)) =>
val numElems=r.size
(id,(p,r.filter(existingGrIds.contains(_)),numElems))})
.filter({case (id,(p,r,c)) => (r.size>0) || (c==0)})
.map({case (id,(p,r,c)) => (id,(p,r))})
//else
// radius*=2
radius*=2
nodesLeft=currentData.count()
//DEBUG
println(" ----- "+nodesLeft+" nodes left ("+currentData.filter({case (id,(p,r)) => !r.isEmpty}).count()+" with at least one group complete). Radius:"+radius)
//currentData.map({case (id,(p,r)) => (r.size,1)}).reduceByKey(_+_).foreach(println)
//fullGraph.foreach(println(_))
}
if (fullGraph!=null)
{
val incomplete=fullGraph.filter({case (id,neighs) => neighs.numberOfGroupsWithAtLeastKElements(numNeighbors)<grouper.numGroups})
.map({case (id,neighs) =>
val incompleteGroups=grouper.getGroupIdList()
.flatMap({case grId =>
val neighsOfGroup=neighs.neighborsOfGroup(grId)
if (neighsOfGroup.size<numNeighbors)
Some(grId)
else
None})
(id,incompleteGroups)
})
if (!incomplete.isEmpty())
{
//DEBUG
//incomplete.map({case (id,(viewed,groupSizes)) => (groupSizes.sum,1)}).reduceByKey(_+_).sortBy(_._1).foreach(println)
//incomplete.take(100).foreach({case (id,(viewed,neighborCounts)) => println(id+" -> "+viewed+" views ("+neighborCounts.mkString(";")+")")})
println("Recovering "+incomplete.count()+" nodes that didn't have all neighbors")
currentData=currentData.union(incomplete.join(data).map({case (id,(requests,p)) => (id,(p,requests))})).coalesce(data.getNumPartitions)
}
}
if (nodesLeft>0)
{
//println("Elements left:"+currentData.map(_._1).collect().mkString(", "))
if (fullGraph!=null)
{
/*
//If there are any items left, look in the neighbor's neighbors.
val neighbors:RDD[Iterable[Long]]=fullGraph.cartesian(currentData)
.flatMap(
{
case ((dest, (viewed,groupedNeig)), (orig,point)) =>
groupedNeig.flatMap(
{
case (grId,neig) =>
if (neig.map(_._1).contains(orig))
Some((orig,(dest :: neig.map(_._1)).toSet))
else
None
}
)
})
.reduceByKey({case (dl1,dl2) => dl1 ++ dl2})
.map({case (o,dl) => dl + o})
*/
//The remaining points are grouped and collected.
val remainingData=currentData.map({case (id,(point,requests)) => (grouper.getGroupId(point),List((id,requests)))}).reduceByKey(_++_).collect()
val bRemainingData=sparkContextSingleton.getInstance().broadcast(remainingData)
//Get all nodes pointing to a remaining point
val reverseNeighborPairs:RDD[(Long,Long)]=fullGraph.flatMap(
{
case (dest, neighs) =>
val rData=bRemainingData.value
val rDataMap=rData.toMap
//Each neighbor group that was already in the graph is examined
neighs.groupedNeighborLists.flatMap(
{
case (grId,neig) =>
//If we have remaining datapoints of that group
if (rDataMap.contains(grId))
rDataMap(grId).flatMap(
{
//Each datapoint of that group
case (idOrig,requestsOrig) =>
//If it is one of the neighbors for dest in the graph
if (neig.listNeighbors.map(_.index).contains(idOrig))
{
val srcList=if (requestsOrig.isEmpty) //Any neighbor will do, so all are selected
grouper.getGroupIdList()
else
requestsOrig
srcList.map({case reqId => (idOrig,(dest :: neighs.neighborsOfGroup(reqId).get.listNeighbors.map(_.index).filter(_!=idOrig)).toSet)})
}
else
None
})
else
None
}
)
})
.reduceByKey({case (dl1,dl2) => dl1 ++ dl2})
.flatMap({case (o,dl) => dl.map((o,_))})
val pairsWithNewNeighbors:RDD[(Long, Long)]=currentData
.join(fullGraph) //Joined with currentData to restrict this to only elements in the current dataset.
.flatMap({case (id,(point,groupedNeighs)) => groupedNeighs.groupedNeighborLists.flatMap({case (grId,neighs) => neighs.listNeighbors.flatMap({case dest => Some((dest.index,id))})})})
.groupByKey()
.join(fullGraph)
.flatMap(
{
case (via,(ids,groupedNeighs)) =>
groupedNeighs.groupedNeighborLists.flatMap({case (grId,neighs) => neighs.listNeighbors.flatMap({case dest => ids.flatMap({case id=>if (id!=dest.index) Some((id,dest.index)) else None})})})
})
val totalPairs=pairsWithNewNeighbors
.union(reverseNeighborPairs)
.map({case (x,y) => if (x<y) (x,y) else (y,x)})
.groupByKey().flatMap({case (d, neighs) => neighs.toSet.toArray.map({case x => (d, x)})})
val newOps=totalPairs.count()
println("Performing "+newOps+" additional comparisons")
totalOps=totalOps+newOps.toLong
var subgraph=getGroupedGraphFromIndexPairs(data,
totalPairs,
numNeighbors,
measurer,
grouper)
if (!subgraph.isEmpty())
{
//subgraph.foreach(println(_))
fullGraph=GraphBuilder.mergeSubgraphs(fullGraph, subgraph, numNeighbors, measurer)
currentData=simplifyDataset(currentData, fullGraph, numNeighbors, maxComparisonsPerItem, grouper)
nodesLeft=currentData.count()
}
//println(nodesLeft+" nodes left after first attempt")
}
/*if (nodesLeft>0) //No solution other than to check these points with every other
{
val pairs=currentData.cartesian(fullGraph.map({case (point, neighbors) => point}))
val subgraph=getGroupedGraphFromPairs(data, pairs, numNeighbors, measurer, grouper)
fullGraph=mergeSubgraphs(fullGraph, subgraph, numNeighbors, measurer)
totalOps=totalOps+pairs.count()
}*/
}
println(s"Operations wrt bruteforce: ${totalOps/bfOps} "+f"($totalOps%d total ops / ${bfOps.toLong}%d)")
//println((totalOps/bfOps)+"#")
return fullGraph.map({case (node, neighs) => (node,neighs.asInstanceOf[GroupedNeighborsForElement])}).coalesce(data.getNumPartitions)
}
def computeGroupedGraph(data:RDD[(Long,LabeledPoint)], numNeighbors:Int, startRadius:Option[Double], maxComparisonsPerItem:Option[Int], measurer:DistanceProvider, grouper:GroupingProvider):RDD[(Long, GroupedNeighborsForElement)]=
{
val cMax=if (maxComparisonsPerItem.isDefined) math.max(maxComparisonsPerItem.get,numNeighbors) else math.max(128,10*numNeighbors)
val factor=2.0
val (hasher,nComps,suggestedRadius)=EuclideanLSHasher.getHasherForDataset(data, (factor*cMax).toInt)
return computeGroupedGraph(data, numNeighbors, hasher, Some(startRadius.getOrElse(suggestedRadius)), Some(cMax.toInt), measurer, grouper)
}
def computeGroupedGraph(data:RDD[(Long,LabeledPoint)], numNeighbors:Int, keyLength:Int, numTables:Int, startRadius:Option[Double], maxComparisonsPerItem:Option[Int], measurer:DistanceProvider, grouper:GroupingProvider):RDD[(Long, GroupedNeighborsForElement)]=
computeGroupedGraph(data, numNeighbors, new EuclideanLSHasher(data.map({case (index, point) => point.features.size}).max(), keyLength, numTables), startRadius, maxComparisonsPerItem, measurer, grouper)
def computeGraph(data:RDD[(Long,LabeledPoint)], numNeighbors:Int, startRadius:Option[Double], maxComparisonsPerItem:Option[Int], measurer:DistanceProvider):RDD[(Long, NeighborsForElement)]=
{
val cMax=if (maxComparisonsPerItem.isDefined) math.max(maxComparisonsPerItem.get,numNeighbors) else math.max(128,10*numNeighbors)
val factor=2.0
val (hasher,nComps,suggestedRadius)=EuclideanLSHasher.getHasherForDataset(data, (factor*cMax).toInt)
return computeGraph(data, numNeighbors, hasher, Some(startRadius.getOrElse(suggestedRadius)), Some(cMax.toInt), measurer)
}
def computeGraph(data:RDD[(Long,LabeledPoint)], numNeighbors:Int, hasher:Hasher, startRadius:Option[Double], maxComparisonsPerItem:Option[Int], measurer:DistanceProvider):RDD[(Long, NeighborsForElement)]=
{
val graph=computeGroupedGraph(data, numNeighbors, hasher, startRadius, maxComparisonsPerItem, measurer, new DummyGroupingProvider())
return graph.map(
{
case (index, groupedNeighs) => (index, groupedNeighs.groupedNeighborLists.head._2)
})
}
def computeGraph(data:RDD[(Long,LabeledPoint)], numNeighbors:Int, hasherKeyLength:Int, hasherNumTables:Int, startRadius:Option[Double], maxComparisonsPerItem:Option[Int], measurer:DistanceProvider):RDD[(Long, NeighborsForElement)]
=computeGraph(data,
numNeighbors,
new EuclideanLSHasher(data.map({case (index, point) => point.features.size}).max(), hasherKeyLength, hasherNumTables),//Get dimension from dataset
startRadius,
maxComparisonsPerItem,
measurer)
protected def splitLargeBuckets(data:RDD[(Long,LabeledPoint)], hashBuckets:RDD[(Hash, Iterable[Long], Int)], maxBucketSize:Int, radius:Double, hasher:Hasher):RDD[(Hash, Iterable[Long], Int)]
protected def getGroupedGraphFromBuckets(data:RDD[(Long,LabeledPoint)], hashBuckets:RDD[(Hash, Iterable[Long], Int)], numNeighbors:Int, measurer:DistanceProvider, grouper:GroupingProvider):RDD[(Long, GroupedNeighborsForElementWithComparisonCount)]
protected def getGroupedGraphFromPartitionedBuckets(data:RDD[(Long,LabeledPoint)], hashBuckets:RDD[(Hash, Iterable[Long], Iterable[Long])], numNeighbors:Int, measurer:DistanceProvider, grouper:GroupingProvider):RDD[(Long, GroupedNeighborsForElementWithComparisonCount)]
protected def getGroupedGraphFromElementIndexLists(data:RDD[(Long,LabeledPoint)], elementIndexLists:RDD[Iterable[Long]], numNeighbors:Int, measurer:DistanceProvider):RDD[(Long, GroupedNeighborsForElementWithComparisonCount)]
protected def getGroupedGraphFromPairs(data:RDD[(Long,LabeledPoint)], pairs:RDD[((Long, LabeledPoint), Long)], numNeighbors:Int, measurer:DistanceProvider, grouper:GroupingProvider):RDD[(Long, GroupedNeighborsForElementWithComparisonCount)]
//protected def getGroupedGraphFromIndexPairs(data:RDD[(Long,LabeledPoint)], pairs:RDD[(Long, Long)], numNeighbors:Int, measurer:DistanceProvider, grouper:GroupingProvider):RDD[(Long, (BDV[Int],List[(Int,List[(Long, Double)])]))]
private def simplifyDataset(dataset:RDD[(Long,(LabeledPoint,Iterable[Int]))], currentGraph:RDD[(Long, GroupedNeighborsForElementWithComparisonCount)], numNeighbors:Int, maxComparisonsPerItem:Option[Int], grouper:GroupingProvider):RDD[(Long, (LabeledPoint, Iterable[Int]))]=
{
val requestsByNodes:RDD[(Long,Option[Iterable[Int]])]=if (maxComparisonsPerItem.isDefined)
currentGraph.map({case (index, neighs) =>
if (neighs.numberOfGroupsWithAtLeastKElements(numNeighbors)>0) //There's at least one full
{
val incompleteGroups=neighs.getIdsOfGroupsWithLessThanKComparisons(maxComparisonsPerItem.get)
(index,if (incompleteGroups.isEmpty) None else Some(incompleteGroups)) //None indicates that it should be removed.
}
else
(index,Some(List[Int]()))
})
else//Remove only elements that already have all their neighbors
currentGraph.map({case (index, neighs) =>
val incompleteGroups=neighs.getIdsOfIncompleteGroups()
(index,if (incompleteGroups.isEmpty) None else Some(incompleteGroups)) //None indicates that it should be removed.
})
return dataset.leftOuterJoin(requestsByNodes).flatMap({case (index, (neighbors1, requestsOption)) =>
if (!requestsOption.isDefined)
Some((index, (neighbors1._1, List[Int]())))
else
{
val requests=requestsOption.get
if (requests.isDefined)
Some((index, (neighbors1._1, requests.get)))
else
None
}
})
//TODO More advanced simplifications can be done, such as removing only elements that are very "surrounded" (i.e. they landed in various large buckets)
/*val completeNodes=if (maxComparisonsPerItem.isDefined)
currentGraph.filter({case (index, (viewed,groupedList)) => (viewed>maxComparisonsPerItem.get) && (groupedList.forall({case (grId,list) => list.toSet.size>=1.0}) && (groupedList.size>=grouper.numGroups))})
else//Remove only elements that already have all their neighbors
currentGraph.filter({case (index, (viewed,groupedList)) => groupedList.forall({case (grId,list) => list.toSet.size>=numNeighbors})})
val deletableElements=completeNodes
//Remove deletable elements from dataset
return dataset.leftOuterJoin(deletableElements).flatMap({case (index, (neighbors1, n)) =>
if (n==None)
Some((index, (neighbors1._1, neighbors1._2)))
else
None
})*/
}
}
class LSHLookupKNNGraphBuilder(data:RDD[(Long,LabeledPoint)]) extends LSHKNNGraphBuilder
{
var lookup:BroadcastLookupProvider=new BroadcastLookupProvider(data)
//TEST - Trying to broadcast a very large variable and getting memory errors. Testing this solution now.
//var lookup:SplittedBroadcastLookupProvider=new SplittedBroadcastLookupProvider(data)
override def splitLargeBuckets(data:RDD[(Long,LabeledPoint)], hashBuckets:RDD[(Hash, Iterable[Long], Int)], maxBucketSize:Int, radius:Double, hasher:Hasher):RDD[(Hash, Iterable[Long], Int)]=
{
val l=lookup
hashBuckets.flatMap({case (k, s, n) => s.map({ x => (k,x,n) })})
.flatMap({case(k, x, bucketSize) => if (bucketSize<maxBucketSize) Some((k,x))
else hasher.getHashes(l.lookup(x).features, x, radius).map({case (nk,i) => (k.concat(nk),i)})}) //Concat new key
.groupByKey()
.map({case (k, l) => (k, l.toSet)})
.flatMap({case (k, s) => if (s.size>1) Some((k, s, s.size)) else None})
}
override def getGroupedGraphFromBuckets(data:RDD[(Long,LabeledPoint)], hashBuckets:RDD[(Hash, Iterable[Long], Int)], numNeighbors:Int, measurer:DistanceProvider, grouper:GroupingProvider):RDD[(Long, GroupedNeighborsForElementWithComparisonCount)]=
{
val l=lookup
//Discard single element hashes and for the rest get every possible pairing to build graph
val graph=hashBuckets.filter(_._2.size>1)
//TODO Possibly repartition after filter
//.repartition
.flatMap({case (hash, indices, size) =>
//Remove duplicates from indices
val arrayIndices=indices.toSet.toArray
if (arrayIndices.length>1)
BruteForceKNNGraphBuilder.computeGroupedGraph(arrayIndices, l, numNeighbors, measurer, grouper)
else
Nil
})
//Merge neighbors found for the same element in different hash buckets
.reduceByKey({case (groupedNeighbors1, groupedNeighbors2) =>
groupedNeighbors1.addElements(groupedNeighbors2)
groupedNeighbors1
})
.partitionBy(data.partitioner.getOrElse(new HashPartitioner(data.getNumPartitions)))
graph
}
override def getGroupedGraphFromPartitionedBuckets(data:RDD[(Long,LabeledPoint)], hashBuckets:RDD[(Hash, Iterable[Long], Iterable[Long])], numNeighbors:Int, measurer:DistanceProvider, grouper:GroupingProvider):RDD[(Long, GroupedNeighborsForElementWithComparisonCount)]=
{
val l=lookup
//Discard single element hashes and for the rest get every possible pairing to build graph
val graph=hashBuckets.flatMap({case (hash, l1, l2) =>
BruteForceKNNGraphBuilder.computeGroupedGraph(l1, l2, l, numNeighbors, measurer, grouper)
})
//Merge neighbors found for the same element in different hash buckets
.reduceByKey({case (neighs1, neighs2) => neighs1.addElements(neighs2)
neighs1
})
.partitionBy(data.partitioner.getOrElse(new HashPartitioner(data.getNumPartitions)))
graph
}
override def getGroupedGraphFromElementIndexLists(data:RDD[(Long,LabeledPoint)], elementIndexLists:RDD[Iterable[Long]], numNeighbors:Int, measurer:DistanceProvider):RDD[(Long, GroupedNeighborsForElementWithComparisonCount)]=
{
val l=lookup
//Discard single element hashes and for the rest get every possible pairing to build graph
val graph=elementIndexLists.filter(_.size>1)
//TODO Possibly repartition after filter
//.repartition
.flatMap({case (indices) =>
//Remove duplicates from indices
val arrayIndices=indices.toSet.toArray
if (arrayIndices.length>1)
BruteForceKNNGraphBuilder.computeGroupedGraph(arrayIndices, l, numNeighbors, measurer)
else
Nil
})
//Merge neighbors found for the same element in different hash buckets
.reduceByKey({case (groupedNeighbors1,groupedNeighbors2) =>
groupedNeighbors1.addElements(groupedNeighbors2)
groupedNeighbors1
})
graph
}
override def getGroupedGraphFromPairs(data:RDD[(Long,LabeledPoint)], pairs:RDD[((Long, LabeledPoint), Long)], numNeighbors:Int, measurer:DistanceProvider, grouper:GroupingProvider):RDD[(Long, GroupedNeighborsForElementWithComparisonCount)]=
{
val l=lookup
//Discard single element hashes and for the rest get every possible pairing to build graph
val graph=pairs.map(
{
case ((i1,p1),i2) =>
val p2=l.lookup(i2)
val newN=new NeighborsForElementWithComparisonCount(numNeighbors)
newN.addElement(i2, measurer.getDistance(p1, p2))
((i1,grouper.getGroupId(p2)), newN)
})
//Merge neighbors found for the same element in different hash buckets
.reduceByKey({case (neighs1, neighs2) =>
neighs1.addElements(neighs2)
neighs1
})
.map(
{
case ((i1,grId2),neighborList) =>
val grNeighs=GroupedNeighborsForElementWithComparisonCount.newEmpty(grouper.getGroupIdList(),numNeighbors)
grNeighs.addElementsOfGroup(grId2, neighborList, neighborList.comparisons)
(i1,grNeighs)
}
)
.reduceByKey(
{
case (neighs1,neighs2) =>
neighs1.addElements(neighs2)
neighs1
}
)
graph
}
override def getGroupedGraphFromIndexPairs(data:RDD[(Long,LabeledPoint)], pairs:RDD[(Long, Long)], numNeighbors:Int, measurer:DistanceProvider, grouper:GroupingProvider):RDD[(Long, GroupedNeighborsForElementWithComparisonCount)]=
{
val l=lookup
//Discard single element hashes and for the rest get every possible pairing to build graph
val graph=pairs.flatMap({case (i1,i2) =>
var p1=l.lookup(i1)
var p2=l.lookup(i2)
val d=measurer.getDistance(p1, p2)
val grN1=GroupedNeighborsForElementWithComparisonCount.newEmpty(grouper.getGroupIdList(),numNeighbors)
grN1.addElementOfGroup(grouper.getGroupId(p2), i2, d)
val grN2=GroupedNeighborsForElementWithComparisonCount.newEmpty(grouper.getGroupIdList(),numNeighbors)
grN2.addElementOfGroup(grouper.getGroupId(p1), i1, d)
List[((Long,Int),GroupedNeighborsForElementWithComparisonCount)](((i1,grouper.getGroupId(p2)), grN1),((i2,grouper.getGroupId(p1)), grN2))
})
//Merge neighbors found for the same element in different hash buckets
.reduceByKey({case (neigh1, neigh2) =>
neigh1.addElements(neigh2)
neigh1
})
.map(
{
case ((i1,grId2),neighs) => (i1,neighs)
}
)
.reduceByKey(
{
case (neighs1, neighs2) =>
neighs1.addElements(neighs2)
neighs1
}
)
graph
}
}
/*object LSHGraphXKNNGraphBuilder// extends LSHKNNGraphBuilder
{
def getGraph(data:RDD[(LabeledPoint,Long)], numNeighbors:Int, dimension:Int)=
{
val radius=0.5
val hasher=new EuclideanLSHasher(dimension)
val hashRDD=data.flatMap({case (point, index) =>
hasher.getHashes(point.features, index, radius)
});
val hashBuckets=hashRDD.groupByKey()
val closeEdges=hashBuckets.filter(_._2.size>1)
//.repartition
.flatMap({case (hash, indices) =>
//Remove duplicates from indices
val arrayIndices=indices.toSet.toArray
if (arrayIndices.length>1)
{
var list:List[Pair[Long, Long]]=List()
//Cartesian product
for (i <- 0 until arrayIndices.length)
for (j <- i+1 until arrayIndices.length)
{
list=(arrayIndices(i), arrayIndices(j)) :: (arrayIndices(j), arrayIndices(i)) :: list
}
list
}
else
Nil
})
val graph=GraphUtils.calculateNearest(data,
numNeighbors,
{case (x,y) => Vectors.sqdist(x.features, y.features)},
closeEdges)
graph
}
/*override def getGraphFromBuckets(data:RDD[(LabeledPoint,Long)], hashBuckets:RDD[(Hash, Iterable[Long])], numNeighbors:Int):RDD[(Long, List[(Long, Double)])]=
{
}*/
}*/
| eirasf/strath | src/main/scala/es/udc/graph/LSHKNNGraphBuilder.scala | Scala | gpl-2.0 | 42,705 |
/*
* This file is part of the diffson project.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package diffson
import scala.language.higherKinds
trait Patch[F[_], Json, P] {
def apply(json: Json, patch: P): F[Json]
}
| gnieh/diffson | core/src/main/scala/diffson/Patch.scala | Scala | apache-2.0 | 720 |
package com.twitter.finagle.thriftmux.pushsession
import com.twitter.finagle.mux.pushsession.MessageWriter
import com.twitter.finagle.mux.pushsession.MuxChannelHandle
import com.twitter.finagle.mux.pushsession.MuxMessageDecoder
import com.twitter.finagle.mux.pushsession.Negotiation
import com.twitter.finagle.mux.pushsession.SharedNegotiationStats
import com.twitter.finagle._
import com.twitter.finagle.mux.Request
import com.twitter.finagle.mux.Response
import com.twitter.finagle.mux.Handshake.Headers
import com.twitter.finagle.mux.transport.BadMessageException
import com.twitter.finagle.mux.transport.Message
import com.twitter.finagle.param.Stats
import com.twitter.finagle.pushsession.PushChannelHandle
import com.twitter.finagle.pushsession.PushSession
import com.twitter.finagle.pushsession.RefPushSession
import com.twitter.finagle.thrift.thrift.ResponseHeader
import com.twitter.finagle.thrift.thrift.UpgradeReply
import com.twitter.finagle.thrift.InputBuffer
import com.twitter.finagle.thrift.OutputBuffer
import com.twitter.finagle.thrift.ThriftTracing
import com.twitter.io.Buf
import com.twitter.io.ByteReader
import com.twitter.logging.Logger
import com.twitter.util._
import org.apache.thrift.protocol.TMessage
import org.apache.thrift.protocol.TMessageType
import org.apache.thrift.protocol.TProtocolFactory
import scala.util.control.NonFatal
/**
* Check the first message and check if its a valid mux message, and if not,
* downgrade to the vanilla thrift protocol.
*
* We indirect through the `RefPushSession` because the server needs a handle to close
* and that handle needs to be able to target a mux session which is capable of doing
* deliberate draining. Since we don't start life with that level of ability, we need
* to use the `RefPushSession` to allow us to update the target of close calls.
*/
// The server negotiating session is driving itself and nobody needs a handle on it
private[finagle] final class MuxDowngradingNegotiator(
refSession: RefPushSession[ByteReader, Buf],
params: Stack.Params,
sharedStats: SharedNegotiationStats,
handle: MuxChannelHandle,
service: Service[Request, Response])
extends PushSession[ByteReader, Buf](handle) {
import MuxDowngradingNegotiator._
// This should only be satisfied after we've completed an upgrade, or failed
// due to an unrecognized protocol or TLS requirements.
private[this] val handshakeDone = Promise[Unit]
private[this] val sr = params[Stats].statsReceiver
private[this] val thriftmuxConnects = sr.counter("thriftmux", "connects")
private[this] val downgradedConnects = sr.counter("thriftmux", "downgraded_connects")
// If the handle closes before we finish negotiation we need to shutdown
handle.onClose.ensure {
if (!handshakeDone.isDefined) close()
}
def close(deadline: Time): Future[Unit] = {
// We want to proxy close calls to the underlying session, provided it resolves in time.
// This facilitates draining behavior.
handshakeDone.by(deadline)(params[param.Timer].timer).transform {
case Return(_) => refSession.close(deadline)
case Throw(t) => closeWithException(t)
}
}
private[this] def closeWithException(t: Throwable): Future[Unit] = {
val f = Closable.all(handle, service).close()
// We shouldn't have to `updateIfEmpty`, but we do just in case.
handshakeDone.updateIfEmpty(Throw(t))
f
}
def receive(reader: ByteReader): Unit = {
try {
val buf = reader.readAll()
checkDowngrade(buf)
} catch {
case NonFatal(ex) =>
log.error(ex, "Uncaught exception during mux downgrade negotiation. Closing session.")
closeWithException(ex)
} finally reader.close()
}
def status: Status = handle.status
private[this] def checkDowngrade(buf: Buf): Unit = {
Try { Message.decode(buf) } match {
// We assume that a bad message decode indicates a thrift
// session. Due to Mux message numbering, a binary-encoded
// thrift frame corresponds to an Rerr message with tag
// 65537. Note that in this context, an R-message is never
// valid.
//
// Binary-encoded thrift messages have the format
//
// header:4 n:4 method:n seqid:4
//
// The header is
//
// 0x80010000 | type
//
// where the type of CALL is 1; the type of ONEWAY is 4. This makes
// the first four bytes of a CALL message 0x80010001.
//
// Mux messages begin with
//
// Type:1 tag:3
//
// Rerr is type 0x80, so we see the above thrift header
// Rerr corresponds to (tag=0x010001).
//
// The hazards of protocol multiplexing.
case Throw(Failure(Some(_: BadMessageException))) | Return(Message.Rerr(65537, _)) | Return(
Message.Rerr(65540, _)) =>
initThriftDowngrade(buf)
// We have a valid mux session
case Return(_) =>
initThriftMux(buf)
case Throw(exc) =>
val msg = s"Unable to determine the protocol. $remoteAddressString"
log.info(exc, msg)
closeWithException(exc)
}
}
private[this] def initThriftMux(buf: Buf): Unit = {
thriftmuxConnects.incr()
// We have a normal mux transport! Just install the handshaker, give it this
// first message, and be on our way!
Mux.Server.defaultSessionFactory(refSession, params, sharedStats, handle, service)
refSession.receive(ByteReader(buf))
handshakeDone.setDone()
}
private[this] def initThriftDowngrade(buf: Buf): Unit = {
downgradedConnects.incr()
val protocolFactory = params[Thrift.param.ProtocolFactory].protocolFactory
val isTTwitter = checkTTwitter(protocolFactory, buf)
val ttwitterHeader =
if (!isTTwitter) None
else
Some {
Buf.ByteArray.Owned(OutputBuffer.messageToArray(new ResponseHeader, protocolFactory))
}
// We install our new session and then send it the first thrift dispatch
try {
val nextSession =
new DowngradeNegotiatior(ttwitterHeader, params, sharedStats, service)
.negotiate(handle, None)
// Register the new session and then give it the message
refSession.updateRef(nextSession)
// If we're TTwitter, the first message was an init and we need to ack it.
// If we're not TTwitter, the first message was a dispatch and needs to be handled.
if (!isTTwitter) refSession.receive(ByteReader(buf))
else {
handle.sendAndForget {
val buffer = new OutputBuffer(protocolFactory)
buffer().writeMessageBegin(
new TMessage(ThriftTracing.CanTraceMethodName, TMessageType.REPLY, 0)
)
val upgradeReply = new UpgradeReply
upgradeReply.write(buffer())
buffer().writeMessageEnd()
Buf.ByteArray.Shared(buffer.toArray)
}
}
handshakeDone.setDone()
} catch {
case NonFatal(t) =>
// Negotiation failed, so we need to cleanup and shutdown.
log.warning(t, s"Negotiation failed. Closing session. $remoteAddressString")
closeWithException(t)
}
}
private[this] def checkTTwitter(protocolFactory: TProtocolFactory, buf: Buf): Boolean =
try {
val buffer = new InputBuffer(Buf.ByteArray.Owned.extract(buf), protocolFactory)
val msg = buffer().readMessageBegin()
msg.`type` == TMessageType.CALL &&
msg.name == ThriftTracing.CanTraceMethodName
} catch {
case NonFatal(_) => false
}
private[this] def remoteAddressString: String = s"Remote: ${handle.remoteAddress}"
}
private[finagle] object MuxDowngradingNegotiator {
private val log = Logger.get
private final class DowngradeNegotiatior(
ttwitterHeader: Option[Buf],
params: Stack.Params,
sharedStats: SharedNegotiationStats,
service: Service[Request, Response])
extends Negotiation(params, sharedStats, isServer = true) {
override type SessionT = VanillaThriftSession
protected def negotiateCompression(
handle: PushChannelHandle[ByteReader, Buf],
peerHeaders: Option[Headers]
): Unit = ()
override protected def builder(
handle: PushChannelHandle[ByteReader, Buf],
writer: MessageWriter,
decoder: MuxMessageDecoder
): VanillaThriftSession = {
new VanillaThriftSession(handle, ttwitterHeader, params, service)
}
}
def build(
ref: RefPushSession[ByteReader, Buf],
params: Stack.Params,
sharedStats: SharedNegotiationStats,
handle: MuxChannelHandle,
service: Service[Request, Response]
): ref.type = {
val negotiatingSession = new MuxDowngradingNegotiator(
refSession = ref,
params = params,
sharedStats = sharedStats,
handle = handle,
service = service
)
ref.updateRef(negotiatingSession)
ref
}
}
| twitter/finagle | finagle-thriftmux/src/main/scala/com/twitter/finagle/thriftmux/pushsession/MuxDowngradingNegotiator.scala | Scala | apache-2.0 | 8,859 |
object Application {
def productOfSpecificPythagoreanTriplet(): Option[Int] = {
for (a <- 1 to 1000; b <- 1 to 1000; c <- 1 to 1000) {
if ((a < b) && (b < c) && ((a + b + c) == 1000) && ((a * a + b * b) == c * c)) {
return new Some(a * b * c)
}
}
None
}
def main (args: Array[String]) {
val product = productOfSpecificPythagoreanTriplet()
println(s"The product of abc is $product.")
}
}
| krasun/ProjectEulerSolutions | scala/9.scala | Scala | mit | 437 |
// code-examples/XML/sake.scala
import sake.Project._
// Define some convenient variables.
val buildDir = "build/"
val libDir = "../lib/"
// If true, don't actually run any commands.
environment.dryRun = false
// If true, show stack traces when a failure happens (doesn't affect some "errors").
showStackTracesOnFailures = false
// Logging level: Info, Notice, Warn, Error, Failure
log.threshold = Level.Info
environment.classpath :::= (files(libDir + "*.jar") -- files(libDir + "*src.jar"))
environment.classpath ::= buildDir
target('all -> List('clean, 'compile, 'spec, 'scripts))
target('spec) {
specs(
'classpath -> environment.classpath,
'path -> "**/*.scala",
'pattern -> ".*Spec.*"
)
}
target('scripts) {
// Omits the following:
// for-loop-script: Example fragment; doesn't define "someXML" that it uses.
(files("**/*-script.scala") --
files("**/for-loop-script.scala")).foreach { script =>
scala(
'classpath -> environment.classpath,
'opts -> script
)
}
}
target('compile -> List('clean, 'build_dir)) {
scalac(
// Exclude this build script, demonstration scripts, and
// files that intentionally don't compile.
'files -> (files("**/*.scala") -- files("sake.scala") --
files("**/*-script.scala") -- files("**/*-wont-compile.scala")),
'classpath -> environment.classpath,
'd -> buildDir,
'opts -> "-unchecked -deprecation"
)
}
target('clean) {
deleteRecursively(buildDir)
}
target('build_dir) {
mkdir(buildDir)
}
| XClouded/t4f-core | scala/src/tmp/XML/sake.scala | Scala | apache-2.0 | 1,612 |
package com.payu.shorturl.util
import play.api.libs.json._
object JsonUtil extends JsonEnum with JsonEither
trait JsonEnum {
final def enumValueReads[E <: Enumeration](enum: E): Reads[E#Value] = Reads {
case JsString(s) =>
try {
JsSuccess(enum.withName(s))
} catch {
case _: NoSuchElementException =>
JsError(s"Enumeration expected of type: '${enum.getClass}', but it does not appear to contain the value: '$s'")
}
case _ => JsError("String value expected")
}
final def enumIdReads[E <: Enumeration](enum: E): Reads[E#Value] = Reads {
case JsNumber(id) =>
try {
JsSuccess(enum(id.intValue()))
} catch {
case _: NoSuchElementException =>
JsError(s"Enumeration expected of type: '${enum.getClass}', but it does not appear to contain the value: '$id'")
}
case _ => JsError("String value expected")
}
final def enumValueWrites[E <: Enumeration](): Writes[E#Value] = {
Writes { v: E#Value => JsString(v.toString) }
}
final def enumIdWrites[E <: Enumeration](): Writes[E#Value] = {
Writes { v: E#Value => JsNumber(v.id) }
}
final def enumValueFormat[E <: Enumeration](enum: E): Format[E#Value] = {
Format(enumValueReads(enum), enumValueWrites())
}
final def enumIdFormat[E <: Enumeration](enum: E): Format[E#Value] = {
Format(enumIdReads(enum), enumIdWrites())
}
}
trait JsonEither {
final def eitherReads[A: Reads, B: Reads]: Reads[Either[A, B]] = {
Reads { json =>
json.validate[A] match {
case JsSuccess(value, path) => JsSuccess(Left(value), path)
case JsError(aError) =>
json.validate[B] match {
case JsSuccess(value, path) => JsSuccess(Right(value), path)
case JsError(bError) => JsError(JsError.merge(aError, bError))
}
}
}
}
final def eitherWrites[A: Writes, B: Writes]: Writes[Either[A, B]] = {
Writes {
case Left(a) => Json.toJson(a)
case Right(b) => Json.toJson(b)
}
}
final def eitherFormat[A: Format, B: Format]: Format[Either[A, B]] = {
Format(eitherReads, eitherWrites)
}
}
| felipehaack/shorturl | payu-common/src/main/scala/com/payu/shorturl/util/JsonUtil.scala | Scala | gpl-3.0 | 2,157 |
package com.twitter.zipkin.receiver.kafka
import com.twitter.zipkin.gen.{Span => ThriftSpan}
import com.twitter.ostrich.admin.{Service => OstrichService, ServiceTracker}
import kafka.consumer.{Consumer, ConsumerConnector, ConsumerConfig}
import kafka.serializer.Decoder
import com.twitter.util.{Closable, CloseAwaitably, FuturePool, Future, Time}
import java.util.concurrent.{TimeUnit, Executors}
import java.util.Properties
import java.net.{SocketAddress, InetSocketAddress}
object KafkaProcessor {
type KafkaDecoder = Decoder[Option[List[ThriftSpan]]]
def apply(
topics:Map[String, Int],
config: Properties,
process: Seq[ThriftSpan] => Future[Unit],
decoder: KafkaDecoder
): KafkaProcessor = new KafkaProcessor(topics, config, process, decoder)
}
class KafkaProcessor(
topics: Map[String, Int],
config: Properties,
process: Seq[ThriftSpan] => Future[Unit],
decoder: KafkaProcessor.KafkaDecoder
) extends Closable with CloseAwaitably {
private[this] val processorPool = {
val consumerConnector: ConsumerConnector = Consumer.create(new ConsumerConfig(config))
val threadCount = topics.foldLeft(0) { case (sum, (_, a)) => sum + a }
val pool = Executors.newFixedThreadPool(threadCount)
for {
(topic, streams) <- consumerConnector.createMessageStreams(topics, decoder)
stream <- streams
} pool.submit(new KafkaStreamProcessor(stream, process))
pool
}
def close(deadline: Time): Future[Unit] = closeAwaitably {
FuturePool.unboundedPool {
processorPool.shutdown()
processorPool.awaitTermination(deadline.inMilliseconds, TimeUnit.MILLISECONDS)
}
}
}
| eirslett/zipkin | zipkin-receiver-kafka/src/main/scala/com/twitter/zipkin/receiver/kafka/KafkaProcessor.scala | Scala | apache-2.0 | 1,644 |
package org.bitcoins.dlc.oracle
import com.typesafe.config.ConfigFactory
import org.bitcoins.core.api.dlcoracle._
import org.bitcoins.core.api.dlcoracle.db._
import org.bitcoins.core.hd.{HDCoinType, HDPurpose}
import org.bitcoins.core.number._
import org.bitcoins.core.protocol.Bech32Address
import org.bitcoins.core.protocol.dlc.compute.SigningVersion
import org.bitcoins.core.protocol.script.P2WPKHWitnessSPKV0
import org.bitcoins.core.protocol.tlv._
import org.bitcoins.core.util.TimeUtil
import org.bitcoins.core.util.sorted.OrderedNonces
import org.bitcoins.crypto._
import org.bitcoins.testkit.fixtures.DLCOracleFixture
import org.bitcoins.testkitcore.Implicits._
import org.bitcoins.testkitcore.gen.{ChainParamsGenerator, TLVGen}
import java.time.Instant
class DLCOracleTest extends DLCOracleFixture {
val enumOutcomes: Vector[String] = Vector("sunny", "windy", "rainy", "cloudy")
val futureTime: Instant = TimeUtil.now.plusSeconds(100000)
val testDescriptor: EnumEventDescriptorV0TLV = EnumEventDescriptorV0TLV(
enumOutcomes)
behavior of "DLCOracle"
it must "correctly initialize" in { dlcOracle: DLCOracle =>
dlcOracle.conf.exists().map(assert(_))
}
it must "start with no events" in { dlcOracle: DLCOracle =>
dlcOracle.listEventDbs().map { events =>
assert(events.isEmpty)
}
}
it must "start with no pending events" in { dlcOracle: DLCOracle =>
dlcOracle.listPendingEventDbs().map { events =>
assert(events.isEmpty)
}
}
it must "not find an event it doesn't have" in { dlcOracle: DLCOracle =>
val dummyEvent = TLVGen.oracleEventV0TLV.sampleSome
dlcOracle.findEvent(dummyEvent).map { eventOpt =>
assert(eventOpt.isEmpty)
}
}
it must "calculate the correct staking address" in { dlcOracle: DLCOracle =>
forAllAsync(ChainParamsGenerator.bitcoinNetworkParams) { network =>
val expected =
Bech32Address(P2WPKHWitnessSPKV0(dlcOracle.publicKey.publicKey),
network)
assert(dlcOracle.stakingAddress(network) == expected)
}
}
it must "correctly sign a message" in { dlcOracle: DLCOracle =>
val message = "hello world"
val signature = dlcOracle.signMessage(message)
assert(
dlcOracle.publicKey.verify(CryptoUtil.sha256(message).bytes, signature))
}
it must "get the correctly sorted nonces in an announcement " in {
dlcOracle: DLCOracle =>
val eventName = "test"
val descriptorTLV =
DigitDecompositionEventDescriptorV0TLV(base = UInt16(2),
isSigned = false,
numDigits = 3,
unit = "units",
precision = Int32.zero)
for {
announcement <- dlcOracle.createNewAnnouncement(eventName = eventName,
maturationTime =
futureTime,
descriptorTLV)
// To get around foreign key, won't be needed
_ <- dlcOracle.eventOutcomeDAO.deleteAll()
eventDbs <- dlcOracle.eventDAO.findByEventName(eventName)
_ <- dlcOracle.eventDAO.deleteAll()
unsortedDbs = eventDbs.reverse
_ <- dlcOracle.eventDAO.createAll(unsortedDbs)
eventOpt <- dlcOracle.findEvent(eventName)
} yield {
eventOpt match {
case Some(event) =>
assert(announcement == event.announcementTLV)
case None => fail()
}
}
}
it must "have same keys with different network configs" in {
oracleA: DLCOracle =>
// set to mainnet and give separate db
val newConf = oracleA.conf.newConfigOfType(
Vector(ConfigFactory.parseString("bitcoin-s.network = mainnet"),
ConfigFactory.parseString("bitcoin-s.oracle.db.name = oracle1")))
newConf.start().flatMap { _ =>
val oracleB = new DLCOracle()(newConf)
assert(oracleA.publicKey == oracleB.publicKey)
val eventName = "test"
val descriptorTLV =
DigitDecompositionEventDescriptorV0TLV(base = UInt16(2),
isSigned = false,
numDigits = 3,
unit = "units",
precision = Int32.zero)
for {
announcementA <- oracleA.createNewAnnouncement(eventName = eventName,
maturationTime =
futureTime,
descriptorTLV)
announcementB <- oracleB.createNewAnnouncement(eventName = eventName,
maturationTime =
futureTime,
descriptorTLV)
// Can't compare announcementTLV because different nonces might be used for signature
_ = assert(announcementA.publicKey == announcementB.publicKey)
_ = assert(announcementA.eventTLV == announcementB.eventTLV)
eventA <- oracleA.signDigits(eventName, 1)
eventB <- oracleB.signDigits(eventName, 1)
} yield {
(eventA, eventB) match {
case (completedA: CompletedDigitDecompositionV0OracleEvent,
completedB: CompletedDigitDecompositionV0OracleEvent) =>
assert(
completedA.oracleAttestmentV0TLV == completedB.oracleAttestmentV0TLV)
case (_, _) =>
fail("Unexpected outcome")
}
}
}
}
it must "create a new event and list it with pending" in {
dlcOracle: DLCOracle =>
val time = futureTime
for {
_ <- dlcOracle.createNewAnnouncement("test", time, testDescriptor)
pendingEvents <- dlcOracle.listPendingEventDbs()
} yield {
assert(pendingEvents.size == 1)
assert(pendingEvents.head.eventDescriptorTLV == testDescriptor)
}
}
it must "create the same event twice and list them" in {
dlcOracle: DLCOracle =>
val time = futureTime
for {
_ <- dlcOracle.createNewAnnouncement("test", time, testDescriptor)
_ <- dlcOracle.createNewAnnouncement("test2", time, testDescriptor)
events <- dlcOracle.listEvents()
} yield {
assert(events.size == 2)
assert(events.forall(_.eventDescriptorTLV == testDescriptor))
}
}
it must "create two events and use incrementing key indexes" in {
dlcOracle: DLCOracle =>
val create1F =
dlcOracle.createNewDigitDecompAnnouncement(eventName = "test1",
maturationTime = futureTime,
base = UInt16(10),
isSigned = false,
numDigits = 3,
unit = "units",
precision = Int32.zero)
val create2F =
dlcOracle.createNewDigitDecompAnnouncement(eventName = "test2",
maturationTime = futureTime,
base = UInt16(10),
isSigned = false,
numDigits = 3,
unit = "units",
precision = Int32.zero)
for {
_ <- create1F
_ <- create2F
rValDbs <- dlcOracle.rValueDAO.findAll()
} yield {
val indexes = rValDbs.map(_.keyIndex).sorted
assert(indexes == Vector(0, 1, 2, 3, 4, 5))
}
}
it must "fail to create an event with the same name" in {
dlcOracle: DLCOracle =>
for {
_ <- dlcOracle.createNewAnnouncement("test", futureTime, testDescriptor)
res <- recoverToSucceededIf[IllegalArgumentException](
dlcOracle.createNewAnnouncement("test", futureTime, testDescriptor))
} yield res
}
it must "create an enum new event and get its details" in {
dlcOracle: DLCOracle =>
val time = futureTime
val eventName = "test"
for {
announcement <-
dlcOracle.createNewEnumAnnouncement(eventName, time, enumOutcomes)
eventOpt <- dlcOracle.findEvent(announcement.eventTLV)
} yield {
assert(announcement.validateSignature)
assert(eventOpt.isDefined)
val event = eventOpt.get
assert(event.isInstanceOf[PendingEnumV0OracleEvent])
assert(event.eventName == eventName)
assert(event.eventDescriptorTLV == testDescriptor)
assert(event.signingVersion == SigningVersion.latest)
assert(event.pubkey == dlcOracle.publicKey)
assert(event.maturationTime.getEpochSecond == time.getEpochSecond)
val expectedEventTLV =
OracleEventV0TLV(OrderedNonces(event.nonces.head),
UInt32(event.maturationTime.getEpochSecond),
testDescriptor,
eventName)
assert(event.eventTLV == expectedEventTLV)
val expectedAnnouncementTLV =
OracleAnnouncementV0TLV(event.announcementSignature,
event.pubkey,
expectedEventTLV)
assert(event.announcementTLV == expectedAnnouncementTLV)
val announceBytes =
SigningVersion.latest.calcAnnouncementHash(event.eventTLV)
assert(
dlcOracle.publicKey.verify(announceBytes,
event.announcementSignature))
}
}
it must "create and sign an enum event" in { dlcOracle: DLCOracle =>
val descriptor = TLVGen.enumEventDescriptorV0TLV.sampleSome
val outcome = descriptor.outcomes.head
val descriptorV0TLV =
EnumEventDescriptorV0TLV(descriptor.outcomes)
for {
announcement <-
dlcOracle.createNewAnnouncement("test", futureTime, descriptorV0TLV)
signedEventDb <-
dlcOracle.signEnum(announcement.eventTLV, EnumAttestation(outcome))
eventOpt <- dlcOracle.findEvent(announcement.eventTLV)
} yield {
assert(eventOpt.isDefined)
val event = eventOpt.get
val sig = signedEventDb.sigOpt.get
event match {
case completedEvent: CompletedEnumV0OracleEvent =>
assert(completedEvent.attestation == sig.sig)
assert(completedEvent.outcomes == Vector(EnumAttestation(outcome)))
val descriptor = completedEvent.eventDescriptorTLV
val hash = SigningVersion.latest.calcOutcomeHash(descriptor, outcome)
assert(dlcOracle.publicKey.verify(hash, sig))
assert(
SchnorrDigitalSignature(completedEvent.nonces.head,
completedEvent.attestation) == sig)
assert(
OracleEvent.verifyAttestations(announcement,
completedEvent.oracleAttestmentV0TLV,
signingVersion =
SigningVersion.latest))
case _: PendingOracleEvent | _: CompletedOracleEvent =>
fail()
}
}
}
it must "create and sign a large range event" in { dlcOracle: DLCOracle =>
val outcome = -321L
for {
announcement <-
dlcOracle.createNewDigitDecompAnnouncement(eventName = "test",
maturationTime = futureTime,
base = UInt16(10),
isSigned = true,
numDigits = 3,
unit = "units",
precision = Int32.zero)
_ = assert(announcement.validateSignature)
eventTLV = announcement.eventTLV
event <- dlcOracle.signDigits(eventTLV, outcome)
} yield {
event match {
case completedEvent: CompletedDigitDecompositionV0OracleEvent =>
val signOutcome = DigitDecompositionSignAttestation(outcome >= 0)
val digitOutcomes = Vector(DigitDecompositionAttestation(3),
DigitDecompositionAttestation(2),
DigitDecompositionAttestation(1))
assert(completedEvent.outcomes == signOutcome +: digitOutcomes)
val descriptor = completedEvent.eventDescriptorTLV
// Sign Signature Check
val signHash = SigningVersion.latest.calcOutcomeHash(descriptor, "-")
val signSig = completedEvent.signatures.head
assert(dlcOracle.publicKey.verify(signHash, signSig))
assert(
SchnorrDigitalSignature(
completedEvent.nonces.head,
completedEvent.attestations.head) == signSig)
// 100s Place signature Check
val hash100 =
SigningVersion.latest.calcOutcomeHash(
descriptor,
DigitDecompositionAttestation(3).bytes)
val sig100 = completedEvent.signatures(1)
assert(dlcOracle.publicKey.verify(hash100, sig100))
assert(
SchnorrDigitalSignature(completedEvent.nonces(1),
completedEvent.attestations(1)) == sig100)
// 10s Place signature Check
val hash10 =
SigningVersion.latest.calcOutcomeHash(
descriptor,
DigitDecompositionAttestation(2).bytes)
val sig10 = completedEvent.signatures(2)
assert(dlcOracle.publicKey.verify(hash10, sig10))
assert(
SchnorrDigitalSignature(completedEvent.nonces(2),
completedEvent.attestations(2)) == sig10)
// 1s Place signature Check
val hash1 =
SigningVersion.latest.calcOutcomeHash(
descriptor,
DigitDecompositionAttestation(1).bytes)
val sig1 = completedEvent.signatures(3)
assert(dlcOracle.publicKey.verify(hash1, sig1))
assert(
SchnorrDigitalSignature(completedEvent.nonces(3),
completedEvent.attestations(3)) == sig1)
case _: PendingOracleEvent | _: CompletedOracleEvent =>
fail()
}
}
}
it must "create and sign a non-base 10 large range event" in {
dlcOracle: DLCOracle =>
val outcome = -1931L
for {
announcement <-
dlcOracle.createNewDigitDecompAnnouncement(eventName = "test",
maturationTime =
futureTime,
base = UInt16(16),
isSigned = true,
numDigits = 3,
unit = "units",
precision = Int32.zero)
_ = assert(announcement.validateSignature)
eventTLV = announcement.eventTLV
event <- dlcOracle.signDigits(eventTLV, outcome)
} yield {
event match {
case completedEvent: CompletedDigitDecompositionV0OracleEvent =>
val signOutcome = DigitDecompositionSignAttestation(outcome >= 0)
val digitOutcomes = Vector(DigitDecompositionAttestation(7),
DigitDecompositionAttestation(8),
DigitDecompositionAttestation(11))
assert(completedEvent.outcomes == signOutcome +: digitOutcomes)
val descriptor = completedEvent.eventDescriptorTLV
// Sign Signature Check
val signHash =
SigningVersion.latest.calcOutcomeHash(descriptor, "-")
val signSig = completedEvent.signatures.head
assert(dlcOracle.publicKey.verify(signHash, signSig))
assert(
SchnorrDigitalSignature(
completedEvent.nonces.head,
completedEvent.attestations.head) == signSig)
// 100s Place signature Check
val hash100 =
SigningVersion.latest.calcOutcomeHash(
descriptor,
DigitDecompositionAttestation(7).bytes)
val sig100 = completedEvent.signatures(1)
assert(dlcOracle.publicKey.verify(hash100, sig100))
assert(
SchnorrDigitalSignature(completedEvent.nonces(1),
completedEvent.attestations(1)) == sig100)
// 10s Place signature Check
val hash10 =
SigningVersion.latest.calcOutcomeHash(
descriptor,
DigitDecompositionAttestation(8).bytes)
val sig10 = completedEvent.signatures(2)
assert(dlcOracle.publicKey.verify(hash10, sig10))
assert(
SchnorrDigitalSignature(completedEvent.nonces(2),
completedEvent.attestations(2)) == sig10)
// 1s Place signature Check
val hash1 =
SigningVersion.latest.calcOutcomeHash(
descriptor,
DigitDecompositionAttestation(11).bytes)
val sig1 = completedEvent.signatures(3)
assert(dlcOracle.publicKey.verify(hash1, sig1))
assert(
SchnorrDigitalSignature(completedEvent.nonces(3),
completedEvent.attestations(3)) == sig1)
case _: PendingOracleEvent | _: CompletedOracleEvent =>
fail()
}
}
}
it must "create and sign a large range event with digits of 0" in {
dlcOracle: DLCOracle =>
val outcome = 2
for {
announcement <-
dlcOracle.createNewDigitDecompAnnouncement(eventName = "test",
maturationTime =
futureTime,
base = UInt16(2),
isSigned = false,
numDigits = 3,
unit = "units",
precision = Int32.zero)
_ = assert(announcement.validateSignature)
eventTLV = announcement.eventTLV
event <- dlcOracle.signDigits(eventTLV, outcome)
} yield {
event match {
case completedEvent: CompletedDigitDecompositionV0OracleEvent =>
val digitOutcomes = Vector(DigitDecompositionAttestation(0),
DigitDecompositionAttestation(1),
DigitDecompositionAttestation(0))
assert(completedEvent.outcomes == digitOutcomes)
val descriptor = completedEvent.eventDescriptorTLV
// 100s Place signature Check
val hash100 =
SigningVersion.latest.calcOutcomeHash(
descriptor,
DigitDecompositionAttestation(0).bytes)
val sig100 = completedEvent.signatures.head
assert(dlcOracle.publicKey.verify(hash100, sig100))
assert(
SchnorrDigitalSignature(
completedEvent.nonces.head,
completedEvent.attestations.head) == sig100)
// 10s Place signature Check
val hash10 =
SigningVersion.latest.calcOutcomeHash(
descriptor,
DigitDecompositionAttestation(1).bytes)
val sig10 = completedEvent.signatures(1)
assert(dlcOracle.publicKey.verify(hash10, sig10))
assert(
SchnorrDigitalSignature(completedEvent.nonces(1),
completedEvent.attestations(1)) == sig10)
// 1s Place signature Check
val hash1 =
SigningVersion.latest.calcOutcomeHash(
descriptor,
DigitDecompositionAttestation(0).bytes)
val sig1 = completedEvent.signatures(2)
assert(dlcOracle.publicKey.verify(hash1, sig1))
assert(
SchnorrDigitalSignature(completedEvent.nonces(2),
completedEvent.attestations(2)) == sig1)
case _: PendingOracleEvent | _: CompletedOracleEvent =>
fail()
}
}
}
it must "create and sign a decomp event with a large num digits" in {
dlcOracle: DLCOracle =>
//trying make sure we don't regress on
//https://github.com/bitcoin-s/bitcoin-s/issues/3431
val outcome = 30816
val numDigits = 18
val eventName = "test"
for {
announcement <-
dlcOracle.createNewDigitDecompAnnouncement(eventName = eventName,
maturationTime =
futureTime,
base = UInt16(2),
isSigned = false,
numDigits = numDigits,
unit = "units",
precision = Int32.zero)
_ = assert(announcement.validateSignature)
eventTLV = announcement.eventTLV
event <- dlcOracle.signDigits(eventName, outcome)
} yield {
event match {
case _: PendingOracleEvent | _: CompletedEnumV0OracleEvent =>
fail(s"Shouldn't be pending/enum after signDigits()")
case c: CompletedDigitDecompositionV0OracleEvent =>
assert(c.outcomeBase10 == outcome)
}
}
}
it must "correctly track pending events" in { dlcOracle: DLCOracle =>
val outcome = enumOutcomes.head
for {
announcement <-
dlcOracle.createNewEnumAnnouncement("test", futureTime, enumOutcomes)
beforePending <- dlcOracle.listPendingEventDbs()
beforeEvents <- dlcOracle.listEvents()
_ = assert(beforePending.size == 1)
_ = assert(beforeEvents.size == 1)
_ = assert(beforeEvents.head.isInstanceOf[PendingOracleEvent])
nonce = announcement.eventTLV.nonces.head
_ <- dlcOracle.createAttestation(nonce, EnumAttestation(outcome))
afterPending <- dlcOracle.listPendingEventDbs()
afterEvents <- dlcOracle.listEvents()
} yield {
assert(afterPending.isEmpty)
assert(afterEvents.size == 1)
assert(afterEvents.head.isInstanceOf[CompletedOracleEvent])
}
}
it must "fail to sign an event that doesn't exist" in {
dlcOracle: DLCOracle =>
val dummyNonce = SchnorrNonce(ECPublicKey.freshPublicKey.bytes.tail)
recoverToSucceededIf[RuntimeException](
dlcOracle.createAttestation(dummyNonce,
EnumAttestation("testOutcomes")))
}
it must "fail to sign an enum outcome that doesn't exist" in {
dlcOracle: DLCOracle =>
recoverToSucceededIf[RuntimeException] {
for {
announcement <-
dlcOracle.createNewEnumAnnouncement("test",
futureTime,
enumOutcomes)
nonce = announcement.eventTLV.nonces.head
_ <- dlcOracle.createAttestation(
nonce,
EnumAttestation("not a real outcome"))
} yield ()
}
}
it must "sign a negative number for a unsigned digit decomp event that results in 0" in {
dlcOracle: DLCOracle =>
for {
announcement <-
dlcOracle.createNewDigitDecompAnnouncement(eventName = "test",
maturationTime =
futureTime,
base = UInt16(2),
isSigned = false,
numDigits = 3,
unit = "units",
precision = Int32.zero)
res <- dlcOracle.signDigits(announcement.eventTLV, -2)
} yield {
res match {
case p @ (_: PendingOracleEvent | _: CompletedEnumV0OracleEvent) =>
fail(s"Cannot be pending after creating attestations, got=$p")
case c: CompletedDigitDecompositionV0OracleEvent =>
assert(c.outcomeBase10 == 0)
}
}
}
it must "fail to sign an event with an outside nonce" in {
dlcOracle: DLCOracle =>
val ecKey = ECPublicKey.freshPublicKey
val publicKey = ecKey.schnorrPublicKey
val nonce = ecKey.schnorrNonce
val eventName = "dummy"
val sigVersion = SigningVersion.latest
val message = "dummy message"
val rValDb =
RValueDb(nonce, eventName, HDPurpose(0), HDCoinType.Bitcoin, 0, 0, 0)
val eventDb =
EventDb(nonce,
publicKey,
0,
eventName,
0,
sigVersion,
futureTime,
None,
None,
SchnorrDigitalSignature(nonce, FieldElement.one),
testDescriptor)
val setupF = for {
_ <- dlcOracle.rValueDAO.create(rValDb)
_ <- dlcOracle.eventDAO.create(eventDb)
} yield ()
recoverToSucceededIf[IllegalArgumentException] {
for {
_ <- setupF
_ <- dlcOracle.createAttestation(nonce, EnumAttestation(message))
} yield ()
}
}
it must "fail to sign an event with a nonce not in the event db" in {
dlcOracle: DLCOracle =>
val ecKey = ECPublicKey.freshPublicKey
val nonce = ecKey.schnorrNonce
val eventName = "dummy"
val message = "dummy message"
val rValDb =
RValueDb(nonce, eventName, HDPurpose(0), HDCoinType.Bitcoin, 0, 0, 0)
recoverToSucceededIf[RuntimeException] {
for {
_ <- dlcOracle.rValueDAO.create(rValDb)
_ <- dlcOracle.createAttestation(nonce, EnumAttestation(message))
} yield ()
}
}
it must "fail to create an enum event with no outcomes" in {
dlcOracle: DLCOracle =>
assertThrows[IllegalArgumentException] {
dlcOracle.createNewEnumAnnouncement("test", futureTime, Vector.empty)
}
}
it must "fail to create an event with duplicate outcomes" in {
dlcOracle: DLCOracle =>
val outcomes = enumOutcomes :+ enumOutcomes.head
assertThrows[IllegalArgumentException] {
dlcOracle.createNewEnumAnnouncement("test", futureTime, outcomes)
}
}
it must "fail to create an event in the past" in { dlcOracle: DLCOracle =>
assertThrows[IllegalArgumentException] {
dlcOracle.createNewAnnouncement("test", Instant.EPOCH, testDescriptor)
}
}
it must "create and sign a signed digit decomposition event" in {
dlcOracle: DLCOracle =>
val eventName = "signed"
val maturationTime = futureTime
val descriptor =
SignedDigitDecompositionEventDescriptor(UInt16(2),
UInt16(3),
"unit",
Int32(0))
for {
announcement: OracleAnnouncementTLV <-
dlcOracle.createNewAnnouncement(eventName, maturationTime, descriptor)
event <-
dlcOracle
.signDigits(announcement.eventTLV, -2)
} yield {
assert(event.isInstanceOf[CompletedDigitDecompositionV0OracleEvent])
val attestations = event
.asInstanceOf[CompletedDigitDecompositionV0OracleEvent]
.oracleAttestmentV0TLV
assert(
OracleEvent.verifyAttestations(announcement,
attestations,
signingVersion =
SigningVersion.latest))
}
}
it must "create and sign a unsigned digit decomposition event" in {
dlcOracle: DLCOracle =>
val eventName = "unsigned"
val maturationTime = futureTime
val descriptor =
UnsignedDigitDecompositionEventDescriptor(UInt16(2),
UInt16(3),
"unit",
Int32(0))
for {
announcement: OracleAnnouncementTLV <-
dlcOracle.createNewAnnouncement(eventName, maturationTime, descriptor)
event <-
dlcOracle
.signDigits(announcement.eventTLV, 2)
} yield {
assert(event.isInstanceOf[CompletedDigitDecompositionV0OracleEvent])
val attestations = event
.asInstanceOf[CompletedDigitDecompositionV0OracleEvent]
.oracleAttestmentV0TLV
assert(
OracleEvent.verifyAttestations(announcement,
attestations,
signingVersion =
SigningVersion.latest))
}
}
it must "fail to verify a unsigned digit decomposition event " in {
dlcOracle: DLCOracle =>
val eventName1 = "unsigned1"
val eventName2 = "unsigned2"
val maturationTime = futureTime
val descriptor =
UnsignedDigitDecompositionEventDescriptor(UInt16(2),
UInt16(3),
"unit",
Int32(0))
for {
announcement1: OracleAnnouncementTLV <-
dlcOracle.createNewAnnouncement(eventName1,
maturationTime,
descriptor)
announcement2: OracleAnnouncementTLV <-
dlcOracle.createNewAnnouncement(eventName2,
maturationTime,
descriptor)
event1 <-
dlcOracle
.signDigits(announcement1.eventTLV, 2)
event2 <-
dlcOracle
.signDigits(announcement2.eventTLV, 1)
} yield {
assert(event1.isInstanceOf[CompletedDigitDecompositionV0OracleEvent])
val attestations1 = event1
.asInstanceOf[CompletedDigitDecompositionV0OracleEvent]
.oracleAttestmentV0TLV
assert(event2.isInstanceOf[CompletedDigitDecompositionV0OracleEvent])
val attestations2 = event2
.asInstanceOf[CompletedDigitDecompositionV0OracleEvent]
.oracleAttestmentV0TLV
assert(
!OracleEvent.verifyAttestations(announcement1,
attestations2,
signingVersion =
SigningVersion.latest))
assert(
!OracleEvent.verifyAttestations(announcement2,
attestations1,
signingVersion =
SigningVersion.latest))
}
}
it must "create and delete signatures for an enum event" in {
dlcOracle: DLCOracle =>
val descriptor = TLVGen.enumEventDescriptorV0TLV.sampleSome
val outcome = descriptor.outcomes.head
val descriptorV0TLV =
EnumEventDescriptorV0TLV(descriptor.outcomes)
for {
announcement <-
dlcOracle.createNewAnnouncement("test", futureTime, descriptorV0TLV)
_ <-
dlcOracle.signEnum(announcement.eventTLV, EnumAttestation(outcome))
signedEvent <- dlcOracle.findEvent("test").map(_.get)
_ = {
signedEvent match {
case c: CompletedEnumV0OracleEvent =>
assert(c.attestations.nonEmpty)
assert(c.outcomes.nonEmpty)
case _: PendingOracleEvent | _: CompletedOracleEvent =>
fail()
}
}
_ <- dlcOracle.deleteAttestation("test")
event <- dlcOracle.findEvent("test").map(_.get)
} yield {
event match {
case _: PendingEnumV0OracleEvent => succeed
case _: PendingOracleEvent | _: CompletedOracleEvent =>
fail()
}
}
}
it must "create and delete signatures for a decomp event" in {
dlcOracle: DLCOracle =>
val descriptor =
UnsignedDigitDecompositionEventDescriptor(UInt16(2),
UInt16(3),
"unit",
Int32(0))
for {
_ <- dlcOracle.createNewAnnouncement("test", futureTime, descriptor)
_ <- dlcOracle.signDigits("test", 0)
signedEvent <- dlcOracle.findEvent("test").map(_.get)
_ = {
signedEvent match {
case c: CompletedDigitDecompositionV0OracleEvent =>
assert(c.attestations.nonEmpty)
assert(c.outcomes.nonEmpty)
case _: PendingOracleEvent | _: CompletedOracleEvent =>
fail()
}
}
_ <- dlcOracle.deleteAttestation("test")
event <- dlcOracle.findEvent("test").map(_.get)
} yield {
event match {
case _: PendingDigitDecompositionV0OracleEvent => succeed
case _: PendingOracleEvent | _: CompletedOracleEvent =>
fail()
}
}
}
it must "fail to delete signatures for an unsigned enum event" in {
dlcOracle: DLCOracle =>
val descriptor =
UnsignedDigitDecompositionEventDescriptor(UInt16(2),
UInt16(3),
"unit",
Int32(0))
for {
_ <- dlcOracle.createNewAnnouncement("test", futureTime, descriptor)
signedEvent <- dlcOracle.findEvent("test").map(_.get)
_ = assert(
signedEvent.isInstanceOf[PendingDigitDecompositionV0OracleEvent])
res <- recoverToSucceededIf[IllegalArgumentException](
dlcOracle.deleteAttestation("test"))
} yield res
}
it must "fail to delete signatures for an unsigned decomp event" in {
dlcOracle: DLCOracle =>
val descriptor = TLVGen.enumEventDescriptorV0TLV.sampleSome
for {
_ <- dlcOracle.createNewAnnouncement("test", futureTime, descriptor)
signedEvent <- dlcOracle.findEvent("test").map(_.get)
_ = assert(signedEvent.isInstanceOf[PendingEnumV0OracleEvent])
res <- recoverToSucceededIf[IllegalArgumentException](
dlcOracle.deleteAttestation("test"))
} yield res
}
it must "delete enum announcements" in { dlcOracle =>
val eventName = "test"
val createdF =
dlcOracle.createNewEnumAnnouncement(eventName,
futureTime,
Vector("0", "1", "2"))
for {
c <- createdF
_ <- dlcOracle.deleteAnnouncement(c)
//make sure we can't find it
annOpt <- dlcOracle.findEvent(eventName)
} yield {
assert(annOpt.isEmpty)
}
}
it must "delete numeric announcements" in { dlcOracle =>
val eventName = "test"
val createdF =
dlcOracle.createNewDigitDecompAnnouncement(eventName = eventName,
maturationTime = futureTime,
base = UInt16.two,
isSigned = false,
numDigits = 2,
unit = "UNIT",
precision = Int32.zero)
for {
c <- createdF
_ <- dlcOracle.deleteAnnouncement(c)
//make sure we can't find it
annOpt <- dlcOracle.findEvent(eventName)
} yield {
assert(annOpt.isEmpty)
}
}
it must "fail to delete an announcement if there are attesations associated with it" in {
dlcOracle =>
val eventName = "test"
val createdF =
dlcOracle.createNewDigitDecompAnnouncement(eventName = eventName,
maturationTime = futureTime,
base = UInt16(2),
isSigned = false,
numDigits = 2,
unit = "UNIT",
precision = Int32.zero)
val resultF = for {
_ <- createdF
_ <- dlcOracle.signDigits(eventName, 1)
_ <- dlcOracle.deleteAnnouncement(eventName)
} yield ()
recoverToSucceededIf[RuntimeException] {
resultF
}
}
it must "delete enum attestation" in { dlcOracle: DLCOracle =>
val eventName = "test"
val createdF =
dlcOracle.createNewAnnouncement(eventName, futureTime, testDescriptor)
for {
_ <- createdF
_ <- dlcOracle.signEnum(eventName, EnumAttestation("cloudy"))
_ <- dlcOracle.deleteAttestation(eventName)
eventOpt <- dlcOracle.findEvent(eventName)
} yield {
assert(eventOpt.isDefined)
assert(eventOpt.get.isInstanceOf[PendingEnumV0OracleEvent])
}
}
it must "delete numeric attestations" in { dlcOracle: DLCOracle =>
val eventName = "test"
val createdF =
dlcOracle.createNewDigitDecompAnnouncement(eventName = eventName,
maturationTime = futureTime,
base = UInt16(2),
isSigned = false,
numDigits = 2,
unit = "UNIT",
precision = Int32.zero)
for {
_ <- createdF
_ <- dlcOracle.signDigits(eventName, 1)
_ <- dlcOracle.deleteAttestation(eventName)
eventOpt <- dlcOracle.findEvent(eventName)
} yield {
assert(eventOpt.isDefined)
assert(eventOpt.get.isInstanceOf[PendingDigitDecompositionV0OracleEvent])
}
}
it must "delete attestations, and then delete the announcement" in {
dlcOracle: DLCOracle =>
val eventName = "test"
val createdF =
dlcOracle.createNewDigitDecompAnnouncement(eventName = eventName,
maturationTime = futureTime,
base = UInt16(2),
isSigned = false,
numDigits = 2,
unit = "UNIT",
precision = Int32.zero)
for {
_ <- createdF
_ <- dlcOracle.signDigits(eventName, 1)
_ <- dlcOracle.deleteAttestation(eventName)
_ <- dlcOracle.deleteAnnouncement(eventName)
eventOpt <- dlcOracle.findEvent(eventName)
} yield {
assert(eventOpt.isEmpty)
}
}
it must "set and retrieve oracle name" in { dlcOracle: DLCOracle =>
for {
emptyNameOpt <- dlcOracle.oracleName()
_ <- dlcOracle.setOracleName("test name")
testNameOpt <- dlcOracle.oracleName()
} yield {
assert(emptyNameOpt.isEmpty)
assert(testNameOpt.contains("test name"))
}
}
}
| bitcoin-s/bitcoin-s | dlc-oracle-test/src/test/scala/org/bitcoins/dlc/oracle/DLCOracleTest.scala | Scala | mit | 40,593 |
/*
* Copyright 2011-2012 Christos KK Loverdos
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ckkloverdos
package object maybe {
object UnRecoverableException {
def unapply(e: Throwable): Option[Error] = {
require(e ne null, "exception is null")
e match {
case e: Error ⇒ Some(e)
case _ ⇒ None
}
}
}
object RecoverableException {
def unapply(e: Throwable): Option[Throwable] = {
require(e ne null, "exception is null")
e match {
case e: Error ⇒ None
case e ⇒ Some(e)
}
}
}
def effect[A](f: ⇒ A)(_catch: ⇒ Unit)(_finally: ⇒ Unit): Maybe[A] = {
try {
f match {
case null ⇒ NoVal
case a ⇒ Just(a)
}
} catch {
case RecoverableException(e) ⇒
safeUnit(_catch)
Failed(e)
} finally {
safeUnit(_finally)
}
}
@inline
def maybe[A](f: ⇒ A) = Maybe(f)
@inline
def safeUnit[A](f: ⇒ A): Unit = {
try f
catch {
case RecoverableException(e) ⇒
()
}
}
implicit def optionToMaybe[T](x: Option[T]): MaybeOption[T] = x match {
case Some(c) ⇒ Just(c)
case None ⇒ NoVal
}
implicit def eitherToMaybe[A <: Throwable, B](x: Either[A, B]): MaybeEither[B] = x match {
case Left(left) ⇒ Failed(left)
case Right(right) ⇒ Just(right)
}
def getFromMapAsMaybe[A, B <: AnyRef](map: scala.collection.Map[A, B], key: A): Maybe[B] = Maybe {
map.get(key) match {
case Some(value) ⇒
value
case None ⇒
null.asInstanceOf[B]
}
}
def getFromMapAsMaybeOption[A, B <: AnyRef](map: scala.collection.Map[A, B], key: A): MaybeOption[B] = MaybeOption {
map.get(key) match {
case Some(value) ⇒
value
case None ⇒
null.asInstanceOf[B]
}
}
}
| loverdos/maybe | src/main/scala/com/ckkloverdos/package.scala | Scala | apache-2.0 | 2,377 |
package json
import com.fasterxml.jackson.module.scala.DefaultScalaModule
import com.fasterxml.jackson.core.JsonGenerator.Feature
import com.fasterxml.jackson.databind._
object JsonUtil {
val mapper = new ObjectMapper()
mapper.registerModule(new DefaultScalaModule())
mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false)
mapper.disable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS)
} | gengstrand/clojure-news-feed | server/swagger/templates/scalatra/JsonUtil.scala | Scala | epl-1.0 | 417 |
/*
* Copyright 2013 Maurício Linhares
*
* Maurício Linhares licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
*/
package com.github.mauricio.async.db.mysql.message.server
import com.github.mauricio.async.db.KindedMessage
object ServerMessage {
final val ServerProtocolVersion = 10
final val Error = -1
final val Ok = 0
final val EOF = -2
// these messages don't actually exist
// but we use them to simplify the switch statements
final val ColumnDefinition = 100
final val ColumnDefinitionFinished = 101
final val ParamProcessingFinished = 102
final val ParamAndColumnProcessingFinished = 103
final val Row = 104
final val BinaryRow = 105
final val PreparedStatementPrepareResponse = 106
}
class ServerMessage(val kind: Int) extends KindedMessage
| dripower/postgresql-async | mysql-async/src/main/scala/com/github/mauricio/async/db/mysql/message/server/ServerMessage.scala | Scala | apache-2.0 | 1,445 |
package models
import java.util.Date
/**
* ismet-scalongo-seed
* Created by jeronimocarlos on 9/12/16.
*/
case class LaNacionUserNews(
news: List[News],
timestamp: Date
)
object LaNacionUserNews {
import play.api.libs.json.Json
implicit val laNacionUserNewsFormat = Json.format[LaNacionUserNews]
}
| TVilaboa/Egresados | app/models/LaNacionUserNews.scala | Scala | gpl-3.0 | 394 |
package slick.jdbc
import java.io.Closeable
import java.util.Properties
import java.util.concurrent.TimeUnit
import java.sql.{SQLException, DriverManager, Driver, Connection}
import javax.sql.DataSource
import com.typesafe.config.Config
import slick.util.ClassLoaderUtil
import slick.util.BeanConfigurator
import slick.util.ConfigExtensionMethods._
import slick.SlickException
/** A `JdbcDataSource` provides a way to create a `Connection` object for a database. It is
* similar to a `javax.sql.DataSource` but simpler. Unlike [[JdbcBackend.DatabaseDef]] it is not a
* part of the backend cake. This trait defines the SPI for 3rd-party connection pool support. */
trait JdbcDataSource extends Closeable {
/** Create a new Connection or get one from the pool */
def createConnection(): Connection
/** If this object represents a connection pool managed directly by Slick, close it.
* Otherwise no action is taken. */
def close(): Unit
}
object JdbcDataSource {
/** Create a JdbcDataSource from a `Config`. See [[JdbcBackend.DatabaseFactoryDef.forConfig]]
* for documentation of the supported configuration parameters. */
def forConfig(c: Config, driver: Driver, name: String, classLoader: ClassLoader): JdbcDataSource = {
val pf: JdbcDataSourceFactory = c.getStringOr("connectionPool", "HikariCP") match {
case "disabled" => DataSourceJdbcDataSource
case "HikariCP" => HikariCPJdbcDataSource
case name =>
val clazz = classLoader.loadClass(name)
clazz.getField("MODULE$").get(clazz).asInstanceOf[JdbcDataSourceFactory]
}
pf.forConfig(c, driver, name, classLoader)
}
}
/** Create a [[JdbcDataSource]] from a `Config` object and an optional JDBC `Driver`.
* This is used with the "connectionPool" configuration option in
* [[JdbcBackend.DatabaseFactoryDef.forConfig]]. */
trait JdbcDataSourceFactory {
def forConfig(c: Config, driver: Driver, name: String, classLoader: ClassLoader): JdbcDataSource
}
/** A JdbcDataSource for a `DataSource` */
class DataSourceJdbcDataSource(val ds: DataSource, val keepAliveConnection: Boolean,
val connectionPreparer: ConnectionPreparer = null) extends JdbcDataSource {
private[this] var openedKeepAliveConnection: Connection = null
def createConnection(): Connection = {
if(keepAliveConnection) {
synchronized {
if(openedKeepAliveConnection eq null)
openedKeepAliveConnection = ds.getConnection
}
}
val c = ds.getConnection
if(connectionPreparer ne null) connectionPreparer(c)
c
}
def close(): Unit = {
try if(keepAliveConnection && (openedKeepAliveConnection ne null)) openedKeepAliveConnection.close()
finally ds match {
case ds: Closeable => ds.close()
case _ =>
}
}
}
object DataSourceJdbcDataSource extends JdbcDataSourceFactory {
def forConfig(c: Config, driver: Driver, name: String, classLoader: ClassLoader): DataSourceJdbcDataSource = {
val ds = c.getStringOpt("dataSourceClass") match {
case Some(dsClass) =>
val propsO = c.getPropertiesOpt("properties")
try {
val ds = Class.forName(dsClass).newInstance.asInstanceOf[DataSource]
propsO.foreach(BeanConfigurator.configure(ds, _))
ds
} catch { case ex: Exception => throw new SlickException("Error configuring DataSource "+dsClass, ex) }
case None =>
val ds = new DriverDataSource
ds.classLoader = classLoader
BeanConfigurator.configure(ds, c.toProperties, Set("url", "user", "password", "properties", "driver", "driverClassName"))
ds
}
new DataSourceJdbcDataSource(ds, c.getBooleanOr("keepAliveConnection"), new ConnectionPreparer(c))
}
}
/** A JdbcDataSource which can load a JDBC `Driver` from a class name */
@deprecated("Use DataSourceJdbcDataSource with DriverDataSource instead", "3.1")
trait DriverBasedJdbcDataSource extends JdbcDataSource {
private[this] var registeredDriver: Driver = null
protected[this] def registerDriver(driverName: String, url: String): Unit = if(driverName ne null) {
val oldDriver = try DriverManager.getDriver(url) catch { case ex: SQLException if "08001" == ex.getSQLState => null }
if(oldDriver eq null) {
Class.forName(driverName)
registeredDriver = DriverManager.getDriver(url)
}
}
/** Deregister the JDBC driver if it was registered by this JdbcDataSource.
* Returns true if an attempt was made to deregister a driver. */
def deregisterDriver(): Boolean =
if(registeredDriver ne null) { DriverManager.deregisterDriver(registeredDriver); true }
else false
}
/** A JdbcDataSource for lookup via a `Driver` or the `DriverManager` */
@deprecated("Use DataSourceJdbcDataSource with DriverDataSource instead", "3.1")
class DriverJdbcDataSource(url: String, user: String, password: String, prop: Properties,
driverName: String = null, driver: Driver = null,
connectionPreparer: ConnectionPreparer = null,
keepAliveConnection: Boolean = false) extends DriverBasedJdbcDataSource {
private[this] var openedKeepAliveConnection: Connection = null
if(driver eq null) registerDriver(driverName, url)
val connectionProps = if(prop.ne(null) && user.eq(null) && password.eq(null)) prop else {
val p = new Properties(prop)
if(user ne null) p.setProperty("user", user)
if(password ne null) p.setProperty("password", password)
p
}
def createConnection(): Connection = {
if(keepAliveConnection) {
synchronized {
if(openedKeepAliveConnection eq null)
openedKeepAliveConnection = internalCreateConnection()
}
}
internalCreateConnection()
}
protected[this] def internalCreateConnection(): Connection = {
val conn = (if(driver eq null) DriverManager.getConnection(url, connectionProps)
else {
val conn = driver.connect(url, connectionProps)
if(conn eq null)
throw new SQLException("Driver " + driver + " does not know how to handle URL " + url, "08001")
conn
})
if(connectionPreparer ne null) connectionPreparer(conn)
conn
}
def close(): Unit = if(keepAliveConnection) {
if(openedKeepAliveConnection ne null) openedKeepAliveConnection.close()
}
}
@deprecated("Use DataSourceJdbcDataSource with DriverDataSource instead", "3.1")
object DriverJdbcDataSource extends JdbcDataSourceFactory {
def forConfig(c: Config, driver: Driver, name: String, classLoader: ClassLoader): DriverJdbcDataSource = {
val cp = new ConnectionPreparer(c)
new DriverJdbcDataSource(
c.getStringOr("url"),
c.getStringOr("user"),
c.getStringOr("password"),
c.getPropertiesOr("properties"),
c.getStringOr("driver", c.getStringOr("driverClassName")),
driver,
if(cp.isLive) cp else null,
c.getBooleanOr("keepAliveConnection"))
}
}
/** A JdbcDataSource for a HikariCP connection pool */
class HikariCPJdbcDataSource(val ds: com.zaxxer.hikari.HikariDataSource, val hconf: com.zaxxer.hikari.HikariConfig) extends JdbcDataSource {
def createConnection(): Connection = ds.getConnection()
def close(): Unit = ds.close()
}
object HikariCPJdbcDataSource extends JdbcDataSourceFactory {
import com.zaxxer.hikari._
def forConfig(c: Config, driver: Driver, name: String, classLoader: ClassLoader): HikariCPJdbcDataSource = {
if(driver ne null)
throw new SlickException("An explicit Driver object is not supported by HikariCPJdbcDataSource")
val hconf = new HikariConfig()
// Connection settings
hconf.setDataSourceClassName(c.getStringOr("dataSourceClass", null))
Option(c.getStringOr("driverClassName", c.getStringOr("driver"))).map(hconf.setDriverClassName _)
hconf.setJdbcUrl(c.getStringOr("url", null))
c.getStringOpt("user").foreach(hconf.setUsername)
c.getStringOpt("password").foreach(hconf.setPassword)
c.getPropertiesOpt("properties").foreach(hconf.setDataSourceProperties)
// Pool configuration
hconf.setConnectionTimeout(c.getMillisecondsOr("connectionTimeout", 1000))
hconf.setValidationTimeout(c.getMillisecondsOr("validationTimeout", 1000))
hconf.setIdleTimeout(c.getMillisecondsOr("idleTimeout", 600000))
hconf.setMaxLifetime(c.getMillisecondsOr("maxLifetime", 1800000))
hconf.setLeakDetectionThreshold(c.getMillisecondsOr("leakDetectionThreshold", 0))
hconf.setInitializationFailFast(c.getBooleanOr("initializationFailFast", false))
c.getStringOpt("connectionTestQuery").foreach { s =>
hconf.setJdbc4ConnectionTest(false)
hconf.setConnectionTestQuery(s)
}
c.getStringOpt("connectionInitSql").foreach(hconf.setConnectionInitSql)
val numThreads = c.getIntOr("numThreads", 20)
hconf.setMaximumPoolSize(c.getIntOr("maxConnections", numThreads * 5))
hconf.setMinimumIdle(c.getIntOr("minConnections", numThreads))
hconf.setPoolName(name)
hconf.setRegisterMbeans(c.getBooleanOr("registerMbeans", false))
// Equivalent of ConnectionPreparer
hconf.setReadOnly(c.getBooleanOr("readOnly", false))
c.getStringOpt("isolation").map("TRANSACTION_" + _).foreach(hconf.setTransactionIsolation)
hconf.setCatalog(c.getStringOr("catalog", null))
val ds = new HikariDataSource(hconf)
new HikariCPJdbcDataSource(ds, hconf)
}
}
/** Set parameters on a new Connection. This is used by [[DataSourceJdbcDataSource]]. */
class ConnectionPreparer(c: Config) extends (Connection => Unit) {
val isolation = c.getStringOpt("isolation").map {
case "NONE" => Connection.TRANSACTION_NONE
case "READ_COMMITTED" => Connection.TRANSACTION_READ_COMMITTED
case "READ_UNCOMMITTED" => Connection.TRANSACTION_READ_UNCOMMITTED
case "REPEATABLE_READ" => Connection.TRANSACTION_REPEATABLE_READ
case "SERIALIZABLE" => Connection.TRANSACTION_SERIALIZABLE
case unknown => throw new SlickException(s"Unknown transaction isolation level [$unknown]")
}
val catalog = c.getStringOpt("catalog").orElse(c.getStringOpt("defaultCatalog"))
val readOnly = c.getBooleanOpt("readOnly")
val isLive = isolation.isDefined || catalog.isDefined || readOnly.isDefined
def apply(c: Connection): Unit = if(isLive) {
isolation.foreach(c.setTransactionIsolation)
readOnly.foreach(c.setReadOnly)
catalog.foreach(c.setCatalog)
}
}
| dotta/slick | slick/src/main/scala/slick/jdbc/JdbcDataSource.scala | Scala | bsd-2-clause | 10,389 |
package edu.sjsu.mithai.mqtt
import edu.sjsu.mithai.export.MessageStore
import edu.sjsu.mithai.spark.Store
import org.apache.log4j.Logger
import org.eclipse.paho.client.mqttv3.persist.MemoryPersistence
import org.eclipse.paho.client.mqttv3.{MqttClient, MqttMessage}
class MQTTPublisher(brokerUrl: String) {
val logger: Logger = Logger.getLogger(this.getClass)
val persistence = new MemoryPersistence()
var client: MqttClient = null
var topic: String = null
val messageStore: MessageStore[(String, String)] = Store.mqttMessageStore
client = new MqttClient(brokerUrl, MqttClient.generateClientId(), persistence)
client.connect()
def sendDataToTopic(data: String, topic: String): Unit = {
messageStore.addMessage((topic, data))
println(messageStore.getMessageQueue.size())
}
def publishData(data:String, topic:String):Unit = {
val msgTopic = client.getTopic(topic)
val message = new MqttMessage(data.getBytes())
msgTopic.publish(message)
// To avoid burst of messages
Thread.sleep(100)
}
}
| meethai/mithai | src/main/scala/edu/sjsu/mithai/mqtt/MQTTPublisher.scala | Scala | apache-2.0 | 1,065 |
package io.reactors
package concurrent
import io.reactors.common.afterTime
import io.reactors.test._
import org.scalacheck._
import org.scalacheck.Prop.forAllNoShrink
import org.scalacheck.Gen.choose
import org.scalatest.AsyncFunSuite
import org.scalatest.Matchers
import org.scalatest.concurrent.AsyncTimeLimitedTests
import scala.annotation.unchecked
import scala.collection._
import scala.concurrent.Await
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import scala.concurrent.Promise
import scala.concurrent.duration._
import scala.util.Success
import scala.util.control.ControlThrowable
class SelfReactor(val p: Promise[Boolean]) extends Reactor[Int] {
sysEvents onMatch {
case ReactorStarted => p.success(this eq Reactor.self)
}
}
class PromiseReactor(val p: Promise[Unit]) extends Reactor[Unit] {
p.success(())
}
class ReactorSelfReactor(val p: Promise[Boolean]) extends Reactor[Unit] {
if (Reactor.self[Reactor[_]] eq this) p.success(true)
else p.success(false)
}
class ReactorStartedReactor(val p: Promise[Boolean]) extends Reactor[Unit] {
sysEvents onMatch {
case ReactorStarted => p.success(true)
}
}
class AfterFirstBatchReactor(val p: Promise[Boolean]) extends Reactor[String] {
main.events onMatch {
case "success" => p.success(true)
}
}
class DuringFirstBatchReactor(val p: Promise[Boolean]) extends Reactor[String] {
sysEvents onMatch {
case ReactorStarted => main.channel ! "success"
}
main.events onMatch {
case "success" => p.success(true)
}
}
class DuringFirstEventReactor(val p: Promise[Boolean]) extends Reactor[String] {
main.events onMatch {
case "message" => main.channel ! "success"
case "success" => p.success(true)
}
}
class TwoDuringFirstReactor(val p: Promise[Boolean]) extends Reactor[String] {
var countdown = 2
main.events onMatch {
case "start" =>
main.channel ! "dec"
main.channel ! "dec"
case "dec" =>
countdown -= 1
if (countdown == 0) p.success(true)
}
}
class CountdownPromiseReactor(val p: Promise[Boolean], var count: Int)
extends Reactor[String] {
main.events onMatch {
case "dec" =>
count -= 1
if (count == 0) p.success(true)
}
}
class AfterSealTerminateReactor(val p: Promise[Boolean]) extends Reactor[String] {
main.events onMatch {
case "seal" => main.seal()
}
sysEvents onMatch {
case ReactorTerminated => p.success(true)
}
}
class NewChannelReactor(val p: Promise[Boolean]) extends Reactor[String] {
val secondary = system.channels.open[Boolean]
sysEvents onMatch {
case ReactorStarted =>
main.channel ! "open"
case ReactorTerminated =>
p.success(true)
}
main.events onMatch {
case "open" =>
secondary.channel ! true
main.seal()
}
secondary.events onEvent { v =>
secondary.seal()
}
}
class ReactorScheduledReactor(val p: Promise[Boolean]) extends Reactor[String] {
var left = 5
sysEvents onMatch {
case ReactorScheduled =>
left -= 1
if (left == 0) main.seal()
case ReactorTerminated =>
p.success(true)
}
}
class ReactorPreemptedReactor(val p: Promise[Boolean]) extends Reactor[String] {
var left = 5
sysEvents onMatch {
case ReactorPreempted =>
left -= 1
if (left > 0) main.channel ! "dummy"
else if (left == 0) main.seal()
case ReactorTerminated =>
p.success(true)
}
}
class EventSourceReactor(val p: Promise[Boolean]) extends Reactor[String] {
val emitter = new Events.Emitter[Int]()
emitter onDone {
p.success(true)
}
sysEvents onMatch {
case ReactorPreempted => main.seal()
}
}
class TerminatedReactor(val p: Promise[Boolean]) extends Reactor[Unit] {
sysEvents onMatch {
case ReactorStarted =>
main.seal()
case ReactorTerminated =>
// should still be different than null
p.success(system.frames.forName("ephemo") != null)
}
}
class LookupChannelReactor(val started: Promise[Boolean], val ended: Promise[Boolean])
extends Reactor[Unit] {
sysEvents onMatch {
case ReactorStarted =>
val terminator = system.channels.daemon.named("terminator").open[String]
terminator.events onMatch {
case "end" =>
main.seal()
ended.success(true)
}
started.success(true)
}
}
class ChannelsAskReactor(val p: Promise[Boolean]) extends Reactor[Unit] {
val answer = system.channels.daemon.open[Option[Channel[_]]]
system.names.resolve ! (("chaki#main", answer.channel))
answer.events onMatch {
case Some(ch: Channel[Unit] @unchecked) =>
ch ! (())
case None =>
sys.error("chaki#main not found")
}
main.events on {
main.seal()
p.success(true)
}
}
class ReactorSystemTest extends AsyncFunSuite
with Matchers with AsyncTimeLimitedTests {
def timeLimit = 10.seconds
implicit override def executionContext = ExecutionContext.Implicits.global
test("system should return without throwing") {
val system = ReactorSystem.default("test")
try {
val proto = Reactor[Unit] { self => }
system.spawn(proto)
assert(system.frames.forName("reactor-1") != null)
} finally system.shutdown()
}
test("system should return without throwing and use custom name") {
val system = ReactorSystem.default("test")
try {
val proto = Reactor[Unit] { self => }
system.spawn(proto.withName("Izzy"))
assert(system.frames.forName("Izzy") != null)
assert(system.frames.forName("Izzy").frame.name == "Izzy")
} finally system.shutdown()
}
test("system should throw when attempting to reuse the same name") {
val system = ReactorSystem.default("test")
try {
val proto = Reactor[Unit] { self => }
system.spawn(proto.withName("Izzy"))
intercept[IllegalArgumentException] {
val proto = Reactor[Unit] { self => }
system.spawn(proto.withName("Izzy"))
}
assert(true)
} finally system.shutdown()
}
test("system should create a default channel for the reactor") {
val system = ReactorSystem.default("test")
try {
val proto = Reactor[Unit] { self => }
val channel = system.spawn(proto.withName("Izzy"))
assert(channel != null)
val conn =
system.frames.forName("Izzy").connectors("main").asInstanceOf[Connector[_]]
assert(conn != null)
assert(conn.channel eq channel)
assert(!conn.isDaemon)
} finally system.shutdown()
}
test("system should create a system channel for the reactor") {
val system = ReactorSystem.default("test")
try {
val proto = Reactor[Unit] { self => }
val channel = system.spawn(proto.withName("Izzy"))
val conn =
system.frames.forName("Izzy").connectors("system").asInstanceOf[Connector[_]]
assert(conn != null)
assert(conn.isDaemon)
} finally system.shutdown()
}
test("system should schedule reactor's ctor for execution") {
val system = ReactorSystem.default("test")
val p = Promise[Unit]()
system.spawn(Proto[PromiseReactor](p))
p.future onComplete {
case _ => system.shutdown()
}
p.future.map(_ => assert(true))
}
test("system should invoke the ctor with the Reactor.self set") {
val system = ReactorSystem.default("test")
val p = Promise[Boolean]()
system.spawn(Proto[ReactorSelfReactor](p))
p.future.onComplete(_ => system.shutdown())
p.future.map(t => assert(t))
}
test("reactor should ensure the ReactorStarted event") {
val system = ReactorSystem.default("test")
val p = Promise[Boolean]()
system.spawn(Proto[ReactorStartedReactor](p))
p.future.onComplete(_ => system.shutdown())
p.future.map(t => assert(t))
}
test("reactor should process an event that arrives after the first batch") {
val system = ReactorSystem.default("test")
val p = Promise[Boolean]()
val ch = system.spawn(Proto[AfterFirstBatchReactor](p))
afterTime(250.millis) {
ch ! "success"
}
p.future.onComplete(_ => system.shutdown())
p.future.map(t => assert(t))
}
test("reactor should process an event that arrives during the first batch") {
val system = ReactorSystem.default("test")
val p = Promise[Boolean]()
val ch = system.spawn(Proto[DuringFirstBatchReactor](p))
p.future.onComplete(_ => system.shutdown())
p.future.map(t => assert(t))
}
test("reactor should process an event that arrives during the first event") {
val system = ReactorSystem.default("test")
val p = Promise[Boolean]()
val ch = system.spawn(Proto[DuringFirstEventReactor](p))
ch ! "message"
p.future.onComplete(_ => system.shutdown())
p.future.map(t => assert(t))
}
test("reactor should process two events that arrive during the first event") {
val system = ReactorSystem.default("test")
val p = Promise[Boolean]()
val ch = system.spawn(Proto[TwoDuringFirstReactor](p))
ch ! "start"
p.future.onComplete(_ => system.shutdown())
p.future.map(t => assert(t))
}
test("reactor should process 100 incoming events") {
val system = ReactorSystem.default("test")
val p = Promise[Boolean]()
val ch = system.spawn(Proto[CountdownPromiseReactor](p, 100))
afterTime(250.millis) {
for (i <- 0 until 100) ch ! "dec"
}
p.future.onComplete(_ => system.shutdown())
p.future.map(t => assert(t))
}
test("reactor should terminate after sealing its channel") {
val system = ReactorSystem.default("test")
val p = Promise[Boolean]()
val ch = system.spawn(Proto[AfterSealTerminateReactor](p))
ch ! "seal"
p.future.onComplete(_ => system.shutdown())
p.future.map(t => assert(t))
}
test("reactor should be able to open a new channel") {
val system = ReactorSystem.default("test")
val p = Promise[Boolean]()
system.spawn(Proto[NewChannelReactor](p))
p.future.onComplete(_ => system.shutdown())
p.future.map(t => assert(t))
}
test("reactor should get ReactorScheduled events") {
val system = ReactorSystem.default("test")
val p = Promise[Boolean]()
val ch = system.spawn(Proto[ReactorScheduledReactor](p))
def resend(left: Int) {
if (left > 0) {
afterTime(60.millis) {
ch ! "dummy"
resend(left - 1)
}
}
}
resend(5)
p.future.onComplete(_ => system.shutdown())
p.future.map(t => assert(t))
}
test("reactor should get ReactorPreempted events") {
val system = ReactorSystem.default("test")
val p = Promise[Boolean]()
system.spawn(Proto[ReactorPreemptedReactor](p))
p.future.onComplete(_ => system.shutdown())
p.future.map(t => assert(t))
}
test("Reactor.self should be correctly set") {
val system = ReactorSystem.default("test")
val p = Promise[Boolean]()
system.spawn(Proto[SelfReactor](p))
p.future.onComplete(_ => system.shutdown())
p.future.map(t => assert(t))
}
test("after termination and before ReactorTerminated reactor name must be released") {
val system = ReactorSystem.default("test")
val done = Promise[Boolean]()
val p = Promise[Boolean]()
system.spawn(Proto[TerminatedReactor](p).withName("ephemo"))
p.future.onComplete {
case Success(true) =>
afterTime(1200.millis) {
assert(system.frames.forName("ephemo") == null)
done.success(true)
}
}
done.future.onComplete(_ => system.shutdown())
done.future.map(t => assert(t))
}
test("after the reactor starts, its channel should be looked up") {
val system = ReactorSystem.default("test")
val done = Promise[Boolean]()
val started = Promise[Boolean]()
val ended = Promise[Boolean]()
val channel = system.spawn(Proto[LookupChannelReactor](started, ended)
.withName("pi"))
started.future.onComplete {
case Success(true) =>
system.channels.get[String]("pi#terminator") match {
case Some(ch) => ch ! "end"
case None => sys.error("channel not found")
}
ended.future.onComplete {
case Success(true) => done.success(true)
}
}
done.future.onComplete(_ => system.shutdown())
done.future.map(t => assert(t))
}
test("channel resolution reactor should look up channels when asked") {
val system = ReactorSystem.default("test")
val p = Promise[Boolean]
system.spawn(Proto[ChannelsAskReactor](p).withName("chaki"))
p.future.onComplete(_ => system.shutdown())
p.future.map(t => assert(t))
}
test("channel await reactor should await channels when asked") {
val system = ReactorSystem.default("test")
val p = Promise[String]
val awaitee = Reactor[String] { self =>
self.main.events.onEvent { x =>
p.success(x)
self.main.seal()
}
}
system.spawn(awaitee.withName("awaitee"))
system.spawn(Reactor[String] { self =>
val answer = system.channels.daemon.open[Channel[_]]
system.names.await ! (("awaitee#main", answer.channel))
answer.events onMatch {
case (ch: Channel[String] @unchecked) =>
ch ! "done"
self.main.seal()
}
})
p.future.onComplete(_ => system.shutdown())
p.future.map(t => assert(t == "done"))
}
test("channel await reactor should await a channel that appears later") {
val system = ReactorSystem.default("test")
val done = Promise[Boolean]
val p = Promise[String]
val ch = system.spawn(Reactor[String] { self =>
val answer = system.channels.daemon.open[Channel[_]]
system.names.await ! (("awaitee#main", answer.channel))
answer.events onMatch {
case (ch: Channel[String] @unchecked) =>
ch ! "gotem"
self.main.seal()
}
system.clock.timeout(1.second) on {
val proto = Reactor[String] { self =>
self.main.events.onEvent { x =>
p.success(x)
self.main.seal()
}
}
system.spawn(proto.withName("awaitee"))
}
})
p.future.onComplete {
case Success("gotem") =>
afterTime(1000.millis) {
assert(system.channels.get("awaitee#main") == None)
done.success(true)
}
}
done.future.onComplete(_ => system.shutdown())
done.future.map(t => assert(t))
}
test("existing channel listeners must be preserved after spawn failures") {
val system = ReactorSystem.default("test")
val scheduler = new Scheduler.Proxy(system.bundle.defaultScheduler) {
override def initSchedule(f: Frame) = sys.error("Init error (SHOULD BE CAUGHT!)")
}
system.bundle.registerScheduler("proxy", scheduler)
val done = Promise[Boolean]()
val ready = Promise[Boolean]()
system.spawn(Reactor[Unit] { self =>
system.channels.await[String]("test-reactor#aux").onEvent { ch =>
ch ! "done"
self.main.seal()
}
ready.success(true)
})
def spawnTestReactor(fail: Boolean) = {
val proto = Reactor[Unit] { self =>
val aux = system.channels.named("aux").open[String]
aux.events onMatch {
case "done" =>
done.success(true)
aux.seal()
}
self.main.seal()
}
if (fail) system.spawn(proto.withScheduler("proxy").withName("test-reactor"))
else system.spawn(proto.withName("test-reactor"))
}
ready.future.onComplete { _ =>
try spawnTestReactor(true)
catch {
case _: RuntimeException => spawnTestReactor(false)
}
}
done.future.onComplete(_ => system.shutdown())
done.future.map(t => assert(t))
}
test("existing channel listeners must be preserved after terminations") {
val system = ReactorSystem.default("test",
ReactorSystem.Bundle.default(ReactorSystem.defaultScheduler, """
error-handler = {
name = "io.reactors.SilentErrorHandler"
}
"""))
val done = Promise[Boolean]()
val ready = Promise[Boolean]()
system.spawn(Reactor[Unit] { self =>
system.channels.await[String]("test-reactor#aux").onEvent { ch =>
ch ! "done"
self.main.seal()
}
ready.success(true)
})
def spawnTestReactor(fail: Boolean) = {
val proto = Reactor[Unit] { self =>
if (fail) exception.test("Reactor terminated (THIS IS OK!)")
val aux = system.channels.named("aux").open[String]
aux.events onMatch {
case "done" =>
done.success(true)
aux.seal()
}
self.main.seal()
}
system.spawn(proto.withName("test-reactor"))
}
ready.future.onComplete { _ =>
spawnTestReactor(true)
afterTime(1000.millis) {
spawnTestReactor(false)
}
}
done.future.onComplete(_ => system.shutdown())
done.future.map(t => assert(t))
}
}
| reactors-io/reactors | reactors-core/shared/src/test/scala/io/reactors/concurrent/reactor-system-tests.scala | Scala | bsd-3-clause | 16,847 |
package ddd
import Types._
object PaymentAPI {
sealed trait CardType
case object MasterCard extends CardType
case object Visa extends CardType
type CardNumber = String
sealed trait Currency {
def rate: BigDecimal
def sign: String
}
case class USD(rate: BigDecimal, sign: String = "$") extends Currency
case class GBP(rate: BigDecimal, sign: String = "£") extends Currency
case class EUR(rate: BigDecimal, sign: String = "€") extends Currency
case class HUF(rate: BigDecimal, sign: String = "Ft") extends Currency
case class Money(amount: BigDecimal, currency: Currency)
sealed trait Payment
case object Cash extends Payment
case class Cheque(money: Money) extends Payment
case class Card(cardType: CardType, cardNumber: CardNumber) extends Payment
}
| enpassant/miniatures | src/main/scala/ddd/PaymentAPI.scala | Scala | apache-2.0 | 796 |
package net.liftweb.util
import _root_.org.specs._
import _root_.java.io.ByteArrayInputStream
object SecurityHelpersSpec extends Specification with SecurityHelpers with IoHelpers with StringHelpers {
"Security Helpers" should {
"provide a randomLong method returning a random Long modulo a number" in {
randomLong(7L) must be_<(7L)
}
"provide a randomInt method returning a random Int modulo a number" in {
randomInt(7) must be_<(7)
}
"provide a shouldShow function always returning true only a given percentage of time, expressed as a Int between 0 and 100" in {
shouldShow(100) must beTrue
shouldShow(0) must beFalse
}
"provide a shouldShow function always returning true only a given percentage of time, expressed as a Double between 0 and 1.0" in {
shouldShow(1.0) must beTrue
shouldShow(0.0) must beFalse
}
"provide makeBlowfishKey, blowfishEncrypt, blowfishDecrypt functions to encrypt/decrypt Strings with Blowfish keys" in {
val key = makeBlowfishKey
val encrypted = blowfishEncrypt("hello world", key)
encrypted must_!= "hello world"
blowfishDecrypt(encrypted, key) must_== "hello world"
}
"provide a md5 function to create a md5 digest from a string" in {
md5("hello") must_== "XUFAKrxLKna5cZ2REBfFkg=="
md5("hello") must_!= md5("hell0")
}
"provide a hash function to create a SHA digest from a string" in {
hash("hello") must_== "qvTGHdzF6KLavt4PO0gs2a6pQ00="
hash("hello") must_!= hash("hell0")
}
"provide a hash256 function to create a SHA-256 digest from a string" in {
hash256("hello") must_== "LPJNul+wow4m6DsqxbninhsWHlwfp0JecwQzYpOLmCQ="
hash256("hello") must_!= hash256("hell0")
}
"provide a hex encoded SHA hash function" in {
hexDigest("hello".getBytes) must_== "aaf4c61ddcc5e8a2dabede0f3b482cd9aea9434d"
hexDigest("hello".getBytes) must_!= hexDigest("hell0".getBytes)
}
"provide a hex encoded SHA-256 hash function" in {
hexDigest256("hello".getBytes) must_== "2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824"
hexDigest256("hello".getBytes) must_!= hexDigest256("hell0".getBytes)
}
}
}
import _root_.org.specs.runner._
class SecurityHelpersSpecTest extends JUnit4(SecurityHelpersSpec)
| andreum/liftweb | lift-util/src/test/scala/net/liftweb/util/SecurityHelpersSpec.scala | Scala | apache-2.0 | 2,329 |
package example.web
import java.time.LocalDateTime
import com.twitter.finagle.Service
import com.twitter.finagle.http.Method.Get
import com.twitter.finagle.http.Request
import example.external.UserDirectory
import io.fintrospect.templating.View
import io.fintrospect.{RouteSpec, ServerRoute}
case class Index(time: String, browser: String) extends View
object ShowIndex {
def route(userDirectory: UserDirectory): ServerRoute[Request, View] = {
val service = Service.mk[Request, View] {
request => Index(LocalDateTime.now().toString, request.headerMap.getOrElse("User-Agent", "unknown"))
}
RouteSpec("Index").at(Get) bindTo service
}
}
| daviddenton/fintrospect-example-app | src/main/scala/example/web/ShowIndex.scala | Scala | apache-2.0 | 661 |
package synereo.client.utils
import diode.AnyAction._
import shared.dtos._
import shared.models.Post
import synereo.client.handlers._
import synereo.client.logger
import synereo.client.services.{ApiTypes, CoreApi, SYNEREOCircuit}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.util.{Failure, Success, Try}
//scalastyle:off
object ContentUtils {
/**
* This function returns the current message model from the MessagesRootModel
*
* @return
*/
def getCurrMsgModel(): Seq[Post] = {
// #todo Is this method even required. My guess it is not
if (SYNEREOCircuit.zoom(_.messages).value.nonEmpty) SYNEREOCircuit.zoom(_.messages).value.get.messagesModelList else Nil
}
/**
* This function primarily deals with getting the content for the ui interaction
* it is connected to the session ping response.
* Session ping response consists of a number of different types of responses
* which are filtered here and the ui is updated accordingly
*
* @param response This function takes the response from the session ping
* It is called from the message handler refresh messages action
* @return seq of post
*/
def processRes(response: String): Seq[ResponseContent] = {
// process response
val responseArray = upickle.json.read(response).arr.distinct.map(e => upickle.json.write(e)).filterNot(_.contains("sessionPong"))
val omniBalanceResponse = responseArray.filter(_.contains("omniBalanceResponse"))
if (omniBalanceResponse.nonEmpty) {
val content = upickle.default.read[ApiResponse[OmniBalanceResponse]](omniBalanceResponse.head).content
SYNEREOCircuit.dispatch(BalanceChanged(content.amp, content.btc, content.address))
}
val (cnxn, postContent, intro, cnctNot) = sortContent(responseArray)
// three more responses session pong, begin introduction and introduction confirmation which are not processed because tney do nothing
if (intro.nonEmpty) SYNEREOCircuit.dispatch(AddNotification(intro.map(_.content)))
if (cnctNot.nonEmpty) {
val resp = cnctNot.map(e => ConnectionsUtils.getCnxnFromNot(e.content))
SYNEREOCircuit.dispatch(UpdateConnections(resp))
}
if (cnxn.nonEmpty) {
val res = cnxn.map(e => ConnectionsUtils.getCnxnFromRes(e.content))
SYNEREOCircuit.dispatch(UpdateConnections(res))
}
// return the mod messages model if new messages in response otherwise return the old response
if (postContent.nonEmpty) postContent.map(_.content)
else Nil
}
/**
* This function sort content based on their types
*
* @param responseArray
* @return
*/
def sortContent(responseArray: Seq[String]): (Seq[ApiResponse[ConnectionProfileResponse]],
Seq[ApiResponse[ResponseContent]],
Seq[ApiResponse[Introduction]],
Seq[ApiResponse[ConnectNotification]]) = {
var remainingObj: Seq[String] = Nil
var cnxn: Seq[ApiResponse[ConnectionProfileResponse]] = Nil
var msg: Seq[ApiResponse[ResponseContent]] = Nil
var intro: Seq[ApiResponse[Introduction]] = Nil
var cnctNot: Seq[ApiResponse[ConnectNotification]] = Nil
responseArray.foreach {
e =>
Try(upickle.default.read[ApiResponse[ConnectionProfileResponse]](e)) match {
case Success(a) => cnxn :+= a
case Failure(b) => Try(upickle.default.read[ApiResponse[ResponseContent]](e)) match {
case Success(a) => msg :+= a
case Failure(b) => Try(upickle.default.read[ApiResponse[Introduction]](e)) match {
case Success(a) => intro :+= a
case Failure(b) => Try(upickle.default.read[ApiResponse[ConnectNotification]](e)) match {
case Success(a) => cnctNot :+= a
case Failure(b) => remainingObj :+= e
}
}
}
}
}
(cnxn, msg, intro, cnctNot)
}
/**
* This function yields the message model that needs to be updated
*
* @param response
* @return
*/
// def getMsgModel(response: Seq[ApiResponse[ResponseContent]]): Seq[Post] = {
// val msgModelMod = getCurrMsgModel() ++
// response
// .filterNot(_.content.pageOfPosts.isEmpty)
// .flatMap(content => Try(upickle.default.read[MessagePost](content.content.pageOfPosts(0))).toOption)
// msgModelMod.sortWith((x, y) => Moment(x.created).isAfter(Moment(y.created)))
// }
def postNewConnection(content: Content) = {
var count = 1
post()
def post(): Unit = CoreApi.postIntroduction(content).onComplete {
case Success(res) =>
logger.log.debug("Connection request sent successfully")
case Failure(fail) =>
if (count == 3) {
// logger.log.error("Error sending connection request")
SYNEREOCircuit.dispatch(ShowServerError(fail.getMessage))
} else {
count = count + 1
post()
}
}
}
def updateIntroductionsModel(introConfirmReq: IntroConfirmReq) = {
var count = 1
post()
def post(): Unit = CoreApi.postIntroduction(introConfirmReq).onComplete {
case Success(response) => logger.log.debug("Intro confirm request sent successfully")
case Failure(response) => logger.log.error("Error sending intro confirm request")
if (count == 3) {
SYNEREOCircuit.dispatch(ShowServerError(response.getMessage))
}
else {
count = count + 1
post()
}
}
}
/**
* This function issue the default eval subscribe request and then session ping
* The defualt label is MESSAGE_POST_LABEL and then it begin the session ping cycle with
* the dispatch of refresh messages
*/
def subsForMsgAndBeginSessionPing() = {
val expr = Expression(ApiTypes.requestTypes.FEED_EXPRESSION,
ExpressionContent(SYNEREOCircuit.zoom(_.connections.connectionsResponse).value.map(cnxnResp => cnxnResp.connection) ++ Seq(ConnectionsUtils.getSelfConnnection()),
s"any([${AppUtils.MESSAGE_POST_LABEL}])"))
val req = SubscribeRequest(SYNEREOCircuit.zoom(_.sessionRootModel.sessionUri).value, expr)
// clear the previous messages model. It sets the state to Pot.Empty effectively showing the loader
// The loader on the dashboard is shown for Pot state empty
SYNEREOCircuit.dispatch(ClearMessages())
var count = 1
subscribe()
def subscribe(): Unit = CoreApi.evalSubscribeRequest(req).onComplete {
case Success(res) =>
logger.log.debug(s"eval subscribe complete :${res}")
// update the previous labels and the connections. It will be used for the cancelation of request
// before issuing new subscription
SYNEREOCircuit.dispatch(UpdatePrevSearchCnxn(req.expression.content.cnxns))
SYNEREOCircuit.dispatch(UpdatePrevSearchLabel(req.expression.content.label))
// refresh messages to begin the session ping cycle
SYNEREOCircuit.dispatch(RefreshMessages())
case Failure(res) =>
if (count == 3) {
println(s"Failure data = ${res.getMessage}")
// logger.log.error("Open Error modal Popup")
SYNEREOCircuit.dispatch(ShowServerError(res.getMessage))
} else {
count = count + 1
subscribe()
logger.log.error("Error in subscription")
}
}
}
/**
* This message issue an eval subscribe issue for the messages
*
* @param req
*/
def subsForMsg(req: SubscribeRequest) = {
var count = 1
subscribe()
def subscribe(): Unit = CoreApi.evalSubscribeRequest(req).onComplete {
case Success(res) =>
SYNEREOCircuit.dispatch(UpdatePrevSearchCnxn(req.expression.content.cnxns))
SYNEREOCircuit.dispatch(UpdatePrevSearchLabel(req.expression.content.label))
case Failure(res) =>
if (count == 3) {
// logger.log.error("Open Error modal Popup")
SYNEREOCircuit.dispatch(ShowServerError(res.getMessage))
} else {
count = count + 1
subscribe()
logger.log.error("Error in subscription")
}
}
}
/**
* This method cancels the previous request and issue the new one
*
* @param req
*/
def cancelPreviousAndSubscribeNew(req: SubscribeRequest) = {
SYNEREOCircuit.dispatch(ClearMessages())
var count = 1
cancelPrevious()
def cancelPrevious(): Unit = CoreApi.cancelSubscriptionRequest(CancelSubscribeRequest(
SYNEREOCircuit.zoom(_.sessionRootModel.sessionUri).value, SYNEREOCircuit.zoom(_.searches.previousSearchCnxn).value,
SYNEREOCircuit.zoom(_.searches.previousSearchLabel).value)).onComplete {
case Success(res) =>
subsForMsg(req)
case Failure(res) =>
if (count == 3) {
// logger.log.error("server error")
SYNEREOCircuit.dispatch(ShowServerError(res.getMessage))
} else {
count = count + 1
cancelPrevious()
}
}
}
def postMessage(req: SubscribeRequest) = {
var count = 1
postMsg()
def postMsg(): Unit = CoreApi.evalSubscribeRequest(req).onComplete {
case Success(res) =>
// println("messages handler message post success")
logger.log.debug("message post success")
case Failure(fail) =>
if (count == 3) {
// logger.log.error("server error")
// println("messages handler message post failure ")
SYNEREOCircuit.dispatch(ShowServerError(fail.getMessage))
} else {
count = count + 1
postMsg()
}
}
}
// def leaf(text: String /*, color: String = "#CC5C64"*/) = "leaf(text(\\"" + s"${text}" + "\\"),display(color(\\"\\"),image(\\"\\")))"
def postLabelsAndMsg(labelPost: LabelPost, subscribeReq: SubscribeRequest) = {
var count = 1
post()
def post(): Unit = CoreApi.postLabel(labelPost).onComplete {
case Success(res) =>
postMessage(subscribeReq)
SYNEREOCircuit.dispatch(CreateLabels(labelPost.labels))
case Failure(res) =>
// println("searces handler label post failure")
if (count == 3) {
// logger.log.debug("server error")
SYNEREOCircuit.dispatch(ShowServerError(res.getMessage))
} else {
count = count + 1
post()
}
}
}
def postUserUpdate(req: UpdateUserRequest) = {
var count = 1
post()
def post(): Unit = CoreApi.updateUserRequest(req).onComplete {
case Success(response) => {
logger.log.debug("user image update request successful")
SYNEREOCircuit.dispatch(UpdateUserImage(req.jsonBlob.imgSrc))
// println(s"In contentutils ${req}")
}
case Failure(response) =>
if (count == 3) {
logger.log.error("user update error")
SYNEREOCircuit.dispatch(ShowServerError(response.toString))
} else {
count = count + 1
post()
}
}
}
def closeSessionReq(closeSessionRequest: CloseSessionRequest) = {
var count = 1
post()
def post(): Unit = CoreApi.closeSessionRequest(closeSessionRequest).onComplete {
case Success(response) => {
logger.log.debug("Closed session request")
SYNEREOCircuit.dispatch(LogoutUser())
}
case Failure(response) => logger.log.error("Error closing the session")
if (count == 3) {
SYNEREOCircuit.dispatch(ShowServerError(response.getMessage))
}
else {
count = count + 1
post()
}
}
}
}
| LivelyGig/ProductWebUI | sclient/src/main/scala/synereo/client/utils/ContentUtils.scala | Scala | apache-2.0 | 11,606 |
package io.github.mandar2812.dynaml.evaluation
import breeze.linalg.DenseVector
import org.apache.log4j.{Priority, Logger}
import scalax.chart.module.ChartFactories.{XYBarChart, XYLineChart, XYAreaChart}
/**
* Class implementing the calculation
* of regression performance evaluation
* metrics
*
* */
class RegressionMetrics(
override protected val scoresAndLabels: List[(Double, Double)],
val len: Int)
extends Metrics[Double] {
private val logger = Logger.getLogger(this.getClass)
val length: Int = len
val rmse: Double = math.sqrt(scoresAndLabels.map((p) =>
math.pow((p._1 - p._2)/p._2, 2)/length).sum)
val mae: Double = scoresAndLabels.map((p) =>
math.abs((p._1 - p._2)/p._2)/length).sum
val rmsle: Double = math.sqrt(scoresAndLabels.map((p) =>
math.pow(math.log(1 + math.abs(p._1)) - math.log(math.abs(p._2) + 1),
2)/length).sum)
val Rsq: Double = RegressionMetrics.computeRsq(scoresAndLabels, length)
def residuals() = this.scoresAndLabels.map((s) => (s._1 - s._2, s._2))
def scores_and_labels() = this.scoresAndLabels
override def print(): Unit = {
logger.log(Priority.INFO, "Regression Model Performance")
logger.log(Priority.INFO, "============================")
logger.log(Priority.INFO, "MAE: " + mae)
logger.log(Priority.INFO, "RMSE: " + rmse)
logger.log(Priority.INFO, "RMSLE: " + rmsle)
logger.log(Priority.INFO, "R^2: " + Rsq)
}
override def kpi() = DenseVector(mae, rmse, Rsq)
override def generatePlots(): Unit = {
implicit val theme = org.jfree.chart.StandardChartTheme.createDarknessTheme
val roccurve = this.residuals().map(c => (c._2, c._1))
logger.log(Priority.INFO, "Generating Plot of Residuals")
val chart1 = XYBarChart(roccurve,
title = "Residuals", legend = true)
chart1.show()
}
}
object RegressionMetrics {
def computeRsq(scoresAndLabels: Iterable[(Double, Double)], size: Int): Double = {
val mean: Double = scoresAndLabels.map{coup => coup._2}.sum/size
var SSres = 0.0
var SStot = 0.0
scoresAndLabels.foreach((couple) => {
SSres += math.pow(couple._2 - couple._1, 2)
SStot += math.pow(couple._2 - mean, 2)
})
1 - (SSres/SStot)
}
}
| Koldh/DynaML | src/main/scala/io/github/mandar2812/dynaml/evaluation/RegressionMetrics.scala | Scala | apache-2.0 | 2,229 |
/*
* Copyright 2015 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.play.events.examples
import uk.gov.hmrc.play.events.Measurable
case class ExampleMetricEvent(source: String,
name: String,
data: Map[String, String]) extends Measurable
object ExampleMetricEvent {
def apply(fileId: String, fileType: String) =
new ExampleMetricEvent(
source = "TestApp",
name = "NumberOfCreatedFilings",
data = Map (
"File ID" -> fileId,
"File Type" -> fileType
))
}
| scottcutts/play-events | src/test/scala/uk/gov/hmrc/play/events/examples/ExampleMetricEvent.scala | Scala | apache-2.0 | 1,113 |
object A {
val x = "a"
}
class C
| pdalpra/sbt | sbt/src/sbt-test/source-dependencies/restore-classes/changes/A2.scala | Scala | bsd-3-clause | 37 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive
import scala.util.control.NonFatal
import com.google.common.util.concurrent.Striped
import org.apache.hadoop.fs.Path
import org.apache.spark.SparkException
import org.apache.spark.internal.Logging
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.{QualifiedTableName, TableIdentifier}
import org.apache.spark.sql.catalyst.catalog._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.execution.datasources._
import org.apache.spark.sql.internal.SQLConf.HiveCaseSensitiveInferenceMode._
import org.apache.spark.sql.types._
/**
* Legacy catalog for interacting with the Hive metastore.
*
* This is still used for things like creating data source tables, but in the future will be
* cleaned up to integrate more nicely with [[HiveExternalCatalog]].
*/
private[hive] class HiveMetastoreCatalog(sparkSession: SparkSession) extends Logging {
// these are def_s and not val/lazy val since the latter would introduce circular references
private def sessionState = sparkSession.sessionState
private def catalogProxy = sparkSession.sessionState.catalog
import HiveMetastoreCatalog._
/** These locks guard against multiple attempts to instantiate a table, which wastes memory. */
private val tableCreationLocks = Striped.lazyWeakLock(100)
/** Acquires a lock on the table cache for the duration of `f`. */
private def withTableCreationLock[A](tableName: QualifiedTableName, f: => A): A = {
val lock = tableCreationLocks.get(tableName)
lock.lock()
try f finally {
lock.unlock()
}
}
// For testing only
private[hive] def getCachedDataSourceTable(table: TableIdentifier): LogicalPlan = {
val key = QualifiedTableName(
table.database.getOrElse(sessionState.catalog.getCurrentDatabase).toLowerCase,
table.table.toLowerCase)
catalogProxy.getCachedTable(key)
}
private def getCached(
tableIdentifier: QualifiedTableName,
pathsInMetastore: Seq[Path],
schemaInMetastore: StructType,
expectedFileFormat: Class[_ <: FileFormat],
partitionSchema: Option[StructType]): Option[LogicalRelation] = {
catalogProxy.getCachedTable(tableIdentifier) match {
case null => None // Cache miss
case logical @ LogicalRelation(relation: HadoopFsRelation, _, _, _) =>
val cachedRelationFileFormatClass = relation.fileFormat.getClass
expectedFileFormat match {
case `cachedRelationFileFormatClass` =>
// If we have the same paths, same schema, and same partition spec,
// we will use the cached relation.
val useCached =
relation.location.rootPaths.toSet == pathsInMetastore.toSet &&
logical.schema.sameType(schemaInMetastore) &&
// We don't support hive bucketed tables. This function `getCached` is only used for
// converting supported Hive tables to data source tables.
relation.bucketSpec.isEmpty &&
relation.partitionSchema == partitionSchema.getOrElse(StructType(Nil))
if (useCached) {
Some(logical)
} else {
// If the cached relation is not updated, we invalidate it right away.
catalogProxy.invalidateCachedTable(tableIdentifier)
None
}
case _ =>
logWarning(s"Table $tableIdentifier should be stored as $expectedFileFormat. " +
s"However, we are getting a ${relation.fileFormat} from the metastore cache. " +
"This cached entry will be invalidated.")
catalogProxy.invalidateCachedTable(tableIdentifier)
None
}
case other =>
logWarning(s"Table $tableIdentifier should be stored as $expectedFileFormat. " +
s"However, we are getting a $other from the metastore cache. " +
"This cached entry will be invalidated.")
catalogProxy.invalidateCachedTable(tableIdentifier)
None
}
}
def convertToLogicalRelation(
relation: HiveTableRelation,
options: Map[String, String],
fileFormatClass: Class[_ <: FileFormat],
fileType: String): LogicalRelation = {
val metastoreSchema = relation.tableMeta.schema
val tableIdentifier =
QualifiedTableName(relation.tableMeta.database, relation.tableMeta.identifier.table)
val lazyPruningEnabled = sparkSession.sqlContext.conf.manageFilesourcePartitions
val tablePath = new Path(relation.tableMeta.location)
val fileFormat = fileFormatClass.newInstance()
val result = if (relation.isPartitioned) {
val partitionSchema = relation.tableMeta.partitionSchema
val rootPaths: Seq[Path] = if (lazyPruningEnabled) {
Seq(tablePath)
} else {
// By convention (for example, see CatalogFileIndex), the definition of a
// partitioned table's paths depends on whether that table has any actual partitions.
// Partitioned tables without partitions use the location of the table's base path.
// Partitioned tables with partitions use the locations of those partitions' data
// locations,_omitting_ the table's base path.
val paths = sparkSession.sharedState.externalCatalog
.listPartitions(tableIdentifier.database, tableIdentifier.name)
.map(p => new Path(p.storage.locationUri.get))
if (paths.isEmpty) {
Seq(tablePath)
} else {
paths
}
}
withTableCreationLock(tableIdentifier, {
val cached = getCached(
tableIdentifier,
rootPaths,
metastoreSchema,
fileFormatClass,
Some(partitionSchema))
val logicalRelation = cached.getOrElse {
val sizeInBytes = relation.stats.sizeInBytes.toLong
val fileIndex = {
val index = new CatalogFileIndex(sparkSession, relation.tableMeta, sizeInBytes)
if (lazyPruningEnabled) {
index
} else {
index.filterPartitions(Nil) // materialize all the partitions in memory
}
}
val updatedTable = inferIfNeeded(relation, options, fileFormat, Option(fileIndex))
val fsRelation = HadoopFsRelation(
location = fileIndex,
partitionSchema = partitionSchema,
dataSchema = updatedTable.dataSchema,
bucketSpec = None,
fileFormat = fileFormat,
options = options)(sparkSession = sparkSession)
val created = LogicalRelation(fsRelation, updatedTable)
catalogProxy.cacheTable(tableIdentifier, created)
created
}
logicalRelation
})
} else {
val rootPath = tablePath
withTableCreationLock(tableIdentifier, {
val cached = getCached(
tableIdentifier,
Seq(rootPath),
metastoreSchema,
fileFormatClass,
None)
val logicalRelation = cached.getOrElse {
val updatedTable = inferIfNeeded(relation, options, fileFormat)
val created =
LogicalRelation(
DataSource(
sparkSession = sparkSession,
paths = rootPath.toString :: Nil,
userSpecifiedSchema = Option(updatedTable.dataSchema),
bucketSpec = None,
options = options,
className = fileType).resolveRelation(),
table = updatedTable)
catalogProxy.cacheTable(tableIdentifier, created)
created
}
logicalRelation
})
}
// The inferred schema may have different field names as the table schema, we should respect
// it, but also respect the exprId in table relation output.
assert(result.output.length == relation.output.length &&
result.output.zip(relation.output).forall { case (a1, a2) => a1.dataType == a2.dataType })
val newOutput = result.output.zip(relation.output).map {
case (a1, a2) => a1.withExprId(a2.exprId)
}
result.copy(output = newOutput)
}
private def inferIfNeeded(
relation: HiveTableRelation,
options: Map[String, String],
fileFormat: FileFormat,
fileIndexOpt: Option[FileIndex] = None): CatalogTable = {
val inferenceMode = sparkSession.sessionState.conf.caseSensitiveInferenceMode
val shouldInfer = (inferenceMode != NEVER_INFER) && !relation.tableMeta.schemaPreservesCase
val tableName = relation.tableMeta.identifier.unquotedString
if (shouldInfer) {
logInfo(s"Inferring case-sensitive schema for table $tableName (inference mode: " +
s"$inferenceMode)")
val fileIndex = fileIndexOpt.getOrElse {
val rootPath = new Path(relation.tableMeta.location)
new InMemoryFileIndex(sparkSession, Seq(rootPath), options, None)
}
val inferredSchema = fileFormat
.inferSchema(
sparkSession,
options,
fileIndex.listFiles(Nil, Nil).flatMap(_.files))
.map(mergeWithMetastoreSchema(relation.tableMeta.dataSchema, _))
inferredSchema match {
case Some(dataSchema) =>
if (inferenceMode == INFER_AND_SAVE) {
updateDataSchema(relation.tableMeta.identifier, dataSchema)
}
val newSchema = StructType(dataSchema ++ relation.tableMeta.partitionSchema)
relation.tableMeta.copy(schema = newSchema)
case None =>
logWarning(s"Unable to infer schema for table $tableName from file format " +
s"$fileFormat (inference mode: $inferenceMode). Using metastore schema.")
relation.tableMeta
}
} else {
relation.tableMeta
}
}
private def updateDataSchema(identifier: TableIdentifier, newDataSchema: StructType): Unit = try {
logInfo(s"Saving case-sensitive schema for table ${identifier.unquotedString}")
sparkSession.sessionState.catalog.alterTableDataSchema(identifier, newDataSchema)
} catch {
case NonFatal(ex) =>
logWarning(s"Unable to save case-sensitive schema for table ${identifier.unquotedString}", ex)
}
}
private[hive] object HiveMetastoreCatalog {
def mergeWithMetastoreSchema(
metastoreSchema: StructType,
inferredSchema: StructType): StructType = try {
// Find any nullable fields in mestastore schema that are missing from the inferred schema.
val metastoreFields = metastoreSchema.map(f => f.name.toLowerCase -> f).toMap
val missingNullables = metastoreFields
.filterKeys(!inferredSchema.map(_.name.toLowerCase).contains(_))
.values
.filter(_.nullable)
// Merge missing nullable fields to inferred schema and build a case-insensitive field map.
val inferredFields = StructType(inferredSchema ++ missingNullables)
.map(f => f.name.toLowerCase -> f).toMap
StructType(metastoreSchema.map(f => f.copy(name = inferredFields(f.name).name)))
} catch {
case NonFatal(_) =>
val msg = s"""Detected conflicting schemas when merging the schema obtained from the Hive
| Metastore with the one inferred from the file format. Metastore schema:
|${metastoreSchema.prettyJson}
|
|Inferred schema:
|${inferredSchema.prettyJson}
""".stripMargin
throw new SparkException(msg)
}
}
| bravo-zhang/spark | sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala | Scala | apache-2.0 | 12,169 |
/*
* Copyright 2011-2022 GatlingCorp (https://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.javaapi.http.internal
import java.{ util => ju }
import scala.jdk.CollectionConverters._
import io.gatling.core.{ Predef => CorePredef }
import io.gatling.core.check.CheckBuilder
import io.gatling.core.check.bytes.BodyBytesCheckType
import io.gatling.core.check.jmespath.JmesPathCheckType
import io.gatling.core.check.jsonpath.JsonPathCheckType
import io.gatling.core.check.regex.RegexCheckType
import io.gatling.core.check.string.BodyStringCheckType
import io.gatling.core.check.substring.SubstringCheckType
import io.gatling.http.check.ws.{ WsCheck, WsCheckMaterializer }
import io.gatling.javaapi.core.internal.CoreCheckType
import com.fasterxml.jackson.databind.JsonNode
object WsChecks {
private def toScalaTextCheck(javaCheck: io.gatling.javaapi.core.CheckBuilder): WsCheck.Text = {
val scalaCheck = javaCheck.asScala
javaCheck.`type` match {
case CoreCheckType.BodyString => scalaCheck.asInstanceOf[CheckBuilder[BodyStringCheckType, String]].build(WsCheckMaterializer.Text.BodyString)
case CoreCheckType.Regex => scalaCheck.asInstanceOf[CheckBuilder[RegexCheckType, String]].build(WsCheckMaterializer.Text.Regex)
case CoreCheckType.Substring => scalaCheck.asInstanceOf[CheckBuilder[SubstringCheckType, String]].build(WsCheckMaterializer.Text.Substring)
case CoreCheckType.JsonPath =>
scalaCheck.asInstanceOf[CheckBuilder[JsonPathCheckType, JsonNode]].build(WsCheckMaterializer.Text.jsonPath(CorePredef.defaultJsonParsers))
case CoreCheckType.JmesPath =>
scalaCheck.asInstanceOf[CheckBuilder[JmesPathCheckType, JsonNode]].build(WsCheckMaterializer.Text.jmesPath(CorePredef.defaultJsonParsers))
case unknown => throw new IllegalArgumentException(s"WebSocket DSL doesn't support text check $unknown")
}
}
def toScalaTextChecks(javaChecks: ju.List[io.gatling.javaapi.core.CheckBuilder]): Seq[WsCheck.Text] =
javaChecks.asScala.map(toScalaTextCheck).toSeq
private def toScalaBinaryCheck(javaCheck: io.gatling.javaapi.core.CheckBuilder): WsCheck.Binary = {
val scalaCheck = javaCheck.asScala
javaCheck.`type` match {
case CoreCheckType.BodyBytes => scalaCheck.asInstanceOf[CheckBuilder[BodyBytesCheckType, Array[Byte]]].build(WsCheckMaterializer.Binary.BodyBytes)
case CoreCheckType.BodyLength => scalaCheck.asInstanceOf[CheckBuilder[BodyBytesCheckType, Int]].build(WsCheckMaterializer.Binary.BodyLength)
case unknown => throw new IllegalArgumentException(s"WebSocket DSL doesn't support binary check $unknown")
}
}
def toScalaBinaryChecks(javaChecks: ju.List[io.gatling.javaapi.core.CheckBuilder]): Seq[WsCheck.Binary] =
javaChecks.asScala.map(toScalaBinaryCheck).toSeq
}
| gatling/gatling | gatling-http-java/src/main/scala/io/gatling/javaapi/http/internal/WsChecks.scala | Scala | apache-2.0 | 3,359 |
/*
* Copyright 2014–2020 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.contrib.matryoshka
import _root_.matryoshka.{Corecursive, Delay}
import _root_.matryoshka.data._
import _root_.matryoshka.implicits._
import _root_.matryoshka.patterns.{CoEnv, EnvT}
import org.scalacheck.{Arbitrary, Gen}
import scalaz.{Cofree, Free, Functor}
// TODO{matryoshka}: This exists as matryoshka depends on a shapshot version of
// scalacheck at the moment.
trait CorecursiveArbitrary {
def corecursiveArbitrary[T, F[_]: Functor]
(implicit T: Corecursive.Aux[T, F], fArb: Delay[Arbitrary, F])
: Arbitrary[T] =
Arbitrary(Gen.sized(size =>
fArb(Arbitrary(
if (size <= 0)
Gen.fail[T]
else
Gen.resize(size - 1, corecursiveArbitrary[T, F].arbitrary))).arbitrary map (_.embed)))
implicit def fixArbitrary[F[_]: Functor](implicit fArb: Delay[Arbitrary, F]): Arbitrary[Fix[F]] =
corecursiveArbitrary[Fix[F], F]
implicit def muArbitrary[F[_]: Functor](implicit fArb: Delay[Arbitrary, F]): Arbitrary[Mu[F]] =
corecursiveArbitrary[Mu[F], F]
implicit def nuArbitrary[F[_]: Functor](implicit fArb: Delay[Arbitrary, F]): Arbitrary[Nu[F]] =
corecursiveArbitrary[Nu[F], F]
implicit def cofreeArbitrary[F[_]: Functor, A](implicit envTArb: Delay[Arbitrary, EnvT[A, F, ?]]): Arbitrary[Cofree[F, A]] =
corecursiveArbitrary[Cofree[F, A], EnvT[A, F, ?]]
implicit def freeArbitrary[F[_]: Functor, A](implicit coEnvArb: Delay[Arbitrary, CoEnv[A, F, ?]]): Arbitrary[Free[F, A]] =
corecursiveArbitrary[Free[F, A], CoEnv[A, F, ?]]
}
object CorecursiveArbitrary extends CorecursiveArbitrary
| slamdata/quasar | foundation/src/test/scala/quasar/contrib/matryoshka/CorecursiveArbitrary.scala | Scala | apache-2.0 | 2,207 |
/*
Copyright (c) 2017-2021, Robby, Kansas State University
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.sireum.extension
import org.sireum._
object Console_Ext {
def readLine(): String = {
val r = _root_.scala.Console.in.readLine
if (r == null) "" else r
}
}
| sireum/v3-logika-runtime | library/shared/src/main/scala/org/sireum/extension/Console_Ext.scala | Scala | bsd-2-clause | 1,529 |
package filodb.core.binaryrecord2
import scala.collection.mutable.ArrayBuffer
import org.agrona.DirectBuffer
import org.agrona.concurrent.UnsafeBuffer
import filodb.core.metadata.{Column, Dataset}
import filodb.core.metadata.Column.ColumnType.{LongColumn, TimestampColumn}
import filodb.core.query.ColumnInfo
import filodb.memory.{BinaryRegion, BinaryRegionLarge, UTF8StringMedium}
import filodb.memory.format.{RowReader, UnsafeUtils, ZeroCopyUTF8String}
import filodb.memory.format.{vectors => bv}
// scalastyle:off number.of.methods
/**
* A RecordSchema is the schema for a BinaryRecord - what type of each field a BR holds.
* Since it knows the schema it can also read values out efficiently. It does not mutate memory or BinaryRecords.
*
* One instance of this class is meant to serve all of the BinaryRecords of this schema.
* Note that BinaryRecords are not regular Java objects, but rather just a memory location or pointer.
* The methods of this class must be used for access.
*
* RecordSchema v2 has a feature called partition key fields. The idea is that all fields starting at an optional
* partitionFieldStart belong to the "partition key". Special features for partition key fields:
* - A hashcode is calculated for all partition key fields and stored in the record itself for fast comparisons
* and raw data recovery
* - The partition key fields between BinaryRecords can be compared very fast for equality, so long as all the
* partition key fields share the same schema (they can start at differend field #'s). This takes advantage of
* the fact that all variable length fields after the partitionFieldStart are contiguous and can be binary compared
*
* @param columns In order, the column of each field in this schema
* @param brSchema schema of any binary record type column
* @param partitionFieldStart Some(n) from n to the last field are considered the partition key. A field number.
* @param predefinedKeys A list of predefined keys to save space for the tags/MapColumn field(s)
*/
final class RecordSchema(val columns: Seq[ColumnInfo],
val partitionFieldStart: Option[Int] = None,
val predefinedKeys: Seq[String] = Nil,
val brSchema: Map[Int, RecordSchema] = Map.empty) {
import RecordSchema._
import BinaryRegion.NativePointer
override def toString: String = s"RecordSchema<$columns, $partitionFieldStart>"
val colNames = columns.map(_.name)
val columnTypes = columns.map(_.colType)
require(columnTypes.nonEmpty, "columnTypes cannot be empty")
require(predefinedKeys.length < 4096, "Too many predefined keys")
require(partitionFieldStart.isEmpty ||
partitionFieldStart.get < columnTypes.length, s"partitionFieldStart $partitionFieldStart is too high")
// Offset to fixed area for each field. Extra elemnt at end is end of fixed size area / hash.
// Note: these offsets start at 4, after the length header
private val offsets = columnTypes.map(colTypeToFieldSize).scan(4)(_ + _).toArray
// Offset from BR start to beginning of variable area. Also the minimum length of a BR.
val variableAreaStart = partitionFieldStart.map(x => 4).getOrElse(0) + offsets.last
val (predefKeyOffsets, predefKeyBytes, predefKeyNumMap) = makePredefinedStructures(predefinedKeys)
val numFields = columnTypes.length
// Typed, efficient functions, one for each field/column, to add to a RecordBuilder efficiently from a RowReader
// with no boxing or extra allocations involved
val builderAdders = columnTypes.zipWithIndex.map {
case (Column.ColumnType.LongColumn, colNo) =>
(row: RowReader, builder: RecordBuilder) => builder.addLong(row.getLong(colNo))
case (Column.ColumnType.TimestampColumn, colNo) =>
(row: RowReader, builder: RecordBuilder) => builder.addLong(row.getLong(colNo))
case (Column.ColumnType.DoubleColumn, colNo) =>
(row: RowReader, builder: RecordBuilder) => builder.addDouble(row.getDouble(colNo))
case (Column.ColumnType.HistogramColumn, colNo) =>
(row: RowReader, builder: RecordBuilder) => builder.addBlob(row.getHistogram(colNo).serialize())
case (Column.ColumnType.IntColumn, colNo) =>
(row: RowReader, builder: RecordBuilder) => builder.addInt(row.getInt(colNo))
case (Column.ColumnType.StringColumn, colNo) =>
// TODO: we REALLY need a better API than ZeroCopyUTF8String as it creates so much garbage
(row: RowReader, builder: RecordBuilder) => builder.addBlob(row.filoUTF8String(colNo))
case (Column.ColumnType.BinaryRecordColumn, colNo) =>
(row: RowReader, builder: RecordBuilder) =>
builder.addBlob(row.getBlobBase(colNo), row.getBlobOffset(colNo), row.getBlobNumBytes(colNo))
case (t: Column.ColumnType, colNo) =>
// TODO: add more efficient methods
(row: RowReader, builder: RecordBuilder) => builder.addSlowly(row.getAny(colNo))
}.toArray
def numColumns: Int = columns.length
def isTimeSeries: Boolean = columnTypes.length >= 1 &&
(columnTypes.head == LongColumn || columnTypes.head == TimestampColumn)
/**
* Offset to the fixed field primitive or pointer which are at the beginning of the BR
* @param index column number (and not column id)
*/
def fieldOffset(index: Int): Int = offsets(index)
def numBytes(base: Any, offset: Long): Int = BinaryRegionLarge.numBytes(base, offset)
/**
* Retrieves the partition hash field from a BinaryRecord. If partitionFieldStart is None, the results
* of this will be undefined.
*/
def partitionHash(base: Any, offset: Long): Int = UnsafeUtils.getInt(base, offset + offsets.last)
def partitionHash(address: NativePointer): Int = UnsafeUtils.getInt(address + offsets.last)
/**
* Retrieves an Int from field # index. No schema matching is done for speed - you must use this only when
* the columnType at that field is really an int.
*/
def getInt(address: NativePointer, index: Int): Int = UnsafeUtils.getInt(address + offsets(index))
def getInt(base: Any, offset: Long, index: Int): Int = UnsafeUtils.getInt(base, offset + offsets(index))
/**
* Retrieves a Long from field # index. No schema matching is done for speed - you must use this only when
* the columnType at that field is really a Long.
*/
def getLong(address: NativePointer, index: Int): Long = UnsafeUtils.getLong(address + offsets(index))
def getLong(base: Any, offset: Long, index: Int): Long = UnsafeUtils.getLong(base, offset + offsets(index))
/**
* Retrieves a Double from field # index. No schema matching is done for speed - you must use this only when
* the columnType at that field is really a Double.
*/
def getDouble(address: NativePointer, index: Int): Double = UnsafeUtils.getDouble(address + offsets(index))
def getDouble(base: Any, offset: Long, index: Int): Double = UnsafeUtils.getDouble(base, offset + offsets(index))
/**
* Retrieves the value class for a native BinaryRecord UTF8 string field. This should not result in any
* allocations so long as the severe restrictions for value classes are followed. Don't use in a collection!
*/
def utf8StringPointer(address: NativePointer, index: Int): UTF8StringMedium = {
val utf8Addr = address + UnsafeUtils.getInt(address + offsets(index))
new UTF8StringMedium(utf8Addr)
}
/**
* Extracts out the base, offset, length of a string/blob field. Much preferable to using
* asJavaString/asZCUTF8Str methods due to not needing allocations.
*/
def blobBase(base: Any, offset: Long, index: Int): Any = base
def blobOffset(base: Any, offset: Long, index: Int): Long =
offset + UnsafeUtils.getInt(base, offset + offsets(index)) + 2
def blobNumBytes(base: Any, offset: Long, index: Int): Int =
UTF8StringMedium.numBytes(base, offset + UnsafeUtils.getInt(base, offset + offsets(index)))
/**
* Sets an existing DirectBuffer to wrap around the given blob/UTF8/Histogram bytes, including the
* 2-byte length prefix. Since the DirectBuffer is already allocated, this results in no new allocations.
* Could be used to efficiently retrieve blobs or histograms again and again.
*/
def blobAsBuffer(base: Any, offset: Long, index: Int, buf: DirectBuffer): Unit = {
// Number of bytes to give out should not be beyond range of record
val blobLen = Math.min(numBytes(base, offset), blobNumBytes(base, offset, index) + 2)
base match {
case a: Array[Byte] =>
buf.wrap(a, utf8StringOffset(base, offset, index).toInt - UnsafeUtils.arayOffset, blobLen)
case UnsafeUtils.ZeroPointer =>
buf.wrap(utf8StringOffset(base, offset, index), blobLen)
}
}
// Same as above but allocates a new UnsafeBuffer wrapping the blob as a reference
def blobAsBuffer(base: Any, offset: Long, index: Int): DirectBuffer = {
val newBuf = new UnsafeBuffer(Array.empty[Byte])
blobAsBuffer(base, offset, index, newBuf)
newBuf
}
/**
* Used for extracting the offset for a UTF8StringMedium.
* Note that blobOffset method is for the offset to the actual blob bytes, not including length header.
*/
def utf8StringOffset(base: Any, offset: Long, index: Int): Long =
offset + UnsafeUtils.getInt(base, offset + offsets(index))
/**
* COPIES the BinaryRecord field # index out as a new Java String on the heap. Allocation + copying cost.
*/
def asJavaString(base: Any, offset: Long, index: Int): String =
UTF8StringMedium.toString(base, offset + UnsafeUtils.getInt(base, offset + offsets(index)))
// TEMPorary: to be deprecated
def asZCUTF8Str(base: Any, offset: Long, index: Int): ZeroCopyUTF8String = {
val realOffset = offset + UnsafeUtils.getInt(base, offset + offsets(index))
new ZeroCopyUTF8String(base, realOffset + 2, UTF8StringMedium.numBytes(base, realOffset))
}
def asZCUTF8Str(address: NativePointer, index: Int): ZeroCopyUTF8String =
asZCUTF8Str(UnsafeUtils.ZeroPointer, address, index)
/**
* EXPENSIVE to do at server side. Creates a easy-to-read string
* representation of the contents of this BinaryRecord.
*/
def stringify(base: Any, offset: Long): String = {
import Column.ColumnType._
val result = new ArrayBuffer[String]()
columnTypes.zipWithIndex.map {
case (IntColumn, i) => result += s"${colNames(i)}=${getInt(base, offset, i)}"
case (LongColumn, i) => result += s"${colNames(i)}=${getLong(base, offset, i)}"
case (DoubleColumn, i) => result += s"${colNames(i)}=${getDouble(base, offset, i)}"
case (StringColumn, i) => result += s"${colNames(i)}=${asJavaString(base, offset, i)}"
case (TimestampColumn, i) => result += s"${colNames(i)}=${getLong(base, offset, i)}"
case (MapColumn, i) => val consumer = new StringifyMapItemConsumer
consumeMapItems(base, offset, i, consumer)
result += s"${colNames(i)}=${consumer.prettyPrint}"
case (BinaryRecordColumn, i) => result += s"${colNames(i)}=${brSchema(i).stringify(base, offset)}"
case (HistogramColumn, i) =>
result += s"${colNames(i)}= ${bv.BinaryHistogram.BinHistogram(blobAsBuffer(base, offset, i))}"
}
s"b2[${result.mkString(",")}]"
}
def stringify(address: NativePointer): String = stringify(UnsafeUtils.ZeroPointer, address)
def stringify(bytes: Array[Byte]): String = stringify(bytes, UnsafeUtils.arayOffset)
/**
* EXPENSIVE to do at server side. Creates a stringified map with contents of this BinaryRecord.
*/
def toStringPairs(base: Any, offset: Long): Seq[(String, String)] = {
import Column.ColumnType._
val resultMap = new collection.mutable.ArrayBuffer[(String, String)]()
columnTypes.zipWithIndex.map {
case (IntColumn, i) => resultMap += ((colNames(i), getInt(base, offset, i).toString))
case (LongColumn, i) => resultMap += ((colNames(i), getLong(base, offset, i).toString))
case (DoubleColumn, i) => resultMap += ((colNames(i), getDouble(base, offset, i).toString))
case (StringColumn, i) => resultMap += ((colNames(i), asJavaString(base, offset, i).toString))
case (TimestampColumn, i) => resultMap += ((colNames(i), getLong(base, offset, i).toString))
case (MapColumn, i) => val consumer = new StringifyMapItemConsumer
consumeMapItems(base, offset, i, consumer)
resultMap ++= consumer.stringPairs
case (BinaryRecordColumn, i) => resultMap ++= brSchema(i).toStringPairs(base, offset)
case (HistogramColumn, i) =>
resultMap += ((colNames(i), bv.BinaryHistogram.BinHistogram(blobAsBuffer(base, offset, i)).toString))
}
resultMap
}
/**
* Iterates through each key/value pair of a MapColumn field without any object allocations.
* How is this done? By calling the consumer for each pair and directly passing the base and offset.
* The consumer would use the UTF8StringMedium object to work with the UTF8String blobs.
*
* TODO: have a version of consumer that is passed the value class if both key and value are offheap.
* This can only be done however if we move the predefined keys offheap.
*/
def consumeMapItems(base: Any, offset: Long, index: Int, consumer: MapItemConsumer): Unit = {
val mapOffset = offset + UnsafeUtils.getInt(base, offset + offsets(index))
val mapNumBytes = UnsafeUtils.getInt(base, mapOffset)
var curOffset = mapOffset + 4
val endOffset = curOffset + mapNumBytes
var itemIndex = 0
while (curOffset < endOffset) {
// Read key length. Is it a predefined key?
val keyLen = UnsafeUtils.getShort(base, curOffset) & 0x0FFFF
val keyIndex = keyLen ^ 0x0F000
if (keyIndex < 0x1000) { // predefined key; no key bytes
consumer.consume(predefKeyBytes, predefKeyOffsets(keyIndex), base, curOffset + 2, itemIndex)
curOffset += 4 + (UnsafeUtils.getShort(base, curOffset + 2) & 0x0FFFF)
} else {
consumer.consume(base, curOffset, base, curOffset + 2 + keyLen, itemIndex)
curOffset += 4 + keyLen + (UnsafeUtils.getShort(base, curOffset + 2 + keyLen) & 0x0FFFF)
}
itemIndex += 1
}
}
/**
* Returns the offset from start of the BinaryRecord to the
* UTF8StringMedium (2 byte length header + UTF8 string bytes)
*/
def getStringOffset(base: Any, offset: Long, index: Int): Int = {
UnsafeUtils.getInt(base, offset + offsets(index))
}
def consumeMapItems(address: NativePointer, index: Int, consumer: MapItemConsumer): Unit =
consumeMapItems(UnsafeUtils.ZeroPointer, address, index, consumer)
/**
* Returns true if the two BinaryRecords are equal
*/
def equals(base1: Any, offset1: Long, base2: Any, offset2: Long): Boolean =
BinaryRegionLarge.equals(base1, offset1, base2, offset2)
def equals(record1: NativePointer, record2: NativePointer): Boolean =
BinaryRegionLarge.equals(UnsafeUtils.ZeroPointer, record1, UnsafeUtils.ZeroPointer, record2)
/**
* Returns the BinaryRecordv2 as its own byte array, copying if needed
*/
def asByteArray(base: Any, offset: Long): Array[Byte] = base match {
case a: Array[Byte] if offset == UnsafeUtils.arayOffset => a
case other: Any => BinaryRegionLarge.asNewByteArray(base, offset)
case UnsafeUtils.ZeroPointer => BinaryRegionLarge.asNewByteArray(base, offset)
}
def asByteArray(address: NativePointer): Array[Byte] = asByteArray(UnsafeUtils.ZeroPointer, address)
/**
* Allows us to compare two RecordSchemas against each other
*/
override def equals(other: Any): Boolean = other match {
case r: RecordSchema => columnTypes == r.columnTypes &&
partitionFieldStart == r.partitionFieldStart &&
predefinedKeys == r.predefinedKeys
case other: Any => false
}
override def hashCode: Int = ((columnTypes.hashCode * 31) + partitionFieldStart.hashCode) * 31 +
predefinedKeys.hashCode
import debox.{Map => DMap} // An unboxed, fast Map
private def makePredefinedStructures(predefinedKeys: Seq[String]): (Array[Long], Array[Byte], DMap[Long, Int]) = {
// Convert predefined keys to UTF8StringMediums. First estimate size they would all take.
val totalNumBytes = predefinedKeys.map(_.length + 2).sum
val stringBytes = new Array[Byte](totalNumBytes)
val keyToNum = DMap.empty[Long, Int]
var index = 0
val offsets = predefinedKeys.scanLeft(UnsafeUtils.arayOffset.toLong) { case (offset, str) =>
val bytes = str.getBytes
UTF8StringMedium.copyByteArrayTo(bytes, stringBytes, offset)
keyToNum(makeKeyKey(bytes)) = index
index += 1
offset + bytes.size + 2
}.toArray
(offsets, stringBytes, keyToNum)
}
// For serialization purposes
private[filodb] def toSerializableTuple: (Seq[ColumnInfo], Option[Int], Seq[String], Map[Int, RecordSchema]) =
(columns, partitionFieldStart, predefinedKeys, brSchema)
}
trait MapItemConsumer {
/**
* Invoked for each key and value pair. The (base, offset) points to a UTF8StringMedium, use that objects
* methods to work with each UTF8 string.
* @param (keyBase,keyOffset) pointer to the key UTF8String
* @param (valueBase, valueOffset) pointer to the value UTF8String
* @param index an increasing index of the pair within the map, starting at 0
*/
def consume(keyBase: Any, keyOffset: Long, valueBase: Any, valueOffset: Long, index: Int): Unit
}
/**
* A MapItemConsumer which turns the key and value pairs into strings
*/
class StringifyMapItemConsumer extends MapItemConsumer {
val stringPairs = new collection.mutable.ArrayBuffer[(String, String)]
def prettyPrint: String = "{" + stringPairs.map { case (k, v) => s"$k: $v" }.mkString(", ") + "}"
def consume(keyBase: Any, keyOffset: Long, valueBase: Any, valueOffset: Long, index: Int): Unit = {
stringPairs += (UTF8StringMedium.toString(keyBase, keyOffset) ->
UTF8StringMedium.toString(valueBase, valueOffset))
}
}
object RecordSchema {
import Column.ColumnType._
val colTypeToFieldSize = Map[Column.ColumnType, Int](IntColumn -> 4,
LongColumn -> 8,
DoubleColumn -> 8,
TimestampColumn -> 8, // Just a long ms timestamp
StringColumn -> 4,
BinaryRecordColumn -> 4,
MapColumn -> 4,
HistogramColumn -> 4)
/**
* Creates a "unique" Long key for each incoming predefined key for quick lookup. This will not be perfect
* but probably good enough for the beginning.
* TODO: improve on this. One reason for difficulty is that we need custom hashCode and equals functions and
* we don't want to box.
* In the output, the lower 32 bits is the hashcode of the bytes.
*/
private[binaryrecord2] def makeKeyKey(strBytes: Array[Byte]): Long = {
val hash = BinaryRegion.hasher32.hash(strBytes, 0, strBytes.size, BinaryRegion.Seed)
(UnsafeUtils.getInt(strBytes, UnsafeUtils.arayOffset).toLong << 32) | hash
}
private[binaryrecord2] def makeKeyKey(strBytes: Array[Byte], index: Int, len: Int, keyHash: Int): Long = {
val hash = if (keyHash != 7) { keyHash }
else { BinaryRegion.hasher32.hash(strBytes, index, len, BinaryRegion.Seed) }
(UnsafeUtils.getInt(strBytes, index + UnsafeUtils.arayOffset).toLong << 32) | hash
}
/**
* Create an "ingestion" RecordSchema with the data columns followed by the partition columns.
*/
def ingestion(dataset: Dataset, predefinedKeys: Seq[String] = Nil): RecordSchema = {
val columns = dataset.dataColumns ++ dataset.partitionColumns
new RecordSchema(columns.map(c => ColumnInfo(c.name, c.columnType)),
Some(dataset.dataColumns.length),
predefinedKeys)
}
def fromSerializableTuple(tuple: (Seq[ColumnInfo],
Option[Int], Seq[String], Map[Int, RecordSchema])): RecordSchema =
new RecordSchema(tuple._1, tuple._2, tuple._3, tuple._4)
}
// Used with PartitionTimeRangeReader, when a user queries for a partition column
final class PartKeyUTF8Iterator(schema: RecordSchema, base: Any, offset: Long, fieldNo: Int) extends bv.UTF8Iterator {
val blob = schema.asZCUTF8Str(base, offset, fieldNo)
final def next: ZeroCopyUTF8String = blob
}
final class PartKeyLongIterator(schema: RecordSchema, base: Any, offset: Long, fieldNo: Int) extends bv.LongIterator {
val num = schema.getLong(base, offset, fieldNo)
final def next: Long = num
}
/**
* This is a class meant to provide a RowReader API for the new BinaryRecord v2.
* NOTE: Strings cause an allocation of a ZeroCopyUTF8String instance. TODO: provide a better API that does
* not result in allocations.
* It is meant to be reused again and again and is MUTABLE.
*/
final class BinaryRecordRowReader(schema: RecordSchema,
var recordBase: Any = UnsafeUtils.ZeroPointer,
var recordOffset: Long = 0L) extends RowReader {
// BinaryRecordV2 fields always have a value
def notNull(columnNo: Int): Boolean = columnNo >= 0 && columnNo < schema.numFields
def getBoolean(columnNo: Int): Boolean = schema.getInt(recordBase, recordOffset, columnNo) != 0
def getInt(columnNo: Int): Int = schema.getInt(recordBase, recordOffset, columnNo)
def getLong(columnNo: Int): Long = schema.getLong(recordBase, recordOffset, columnNo)
def getDouble(columnNo: Int): Double = schema.getDouble(recordBase, recordOffset, columnNo)
def getFloat(columnNo: Int): Float = ???
def getString(columnNo: Int): String = filoUTF8String(columnNo).toString
override def getHistogram(columnNo: Int): bv.Histogram =
bv.BinaryHistogram.BinHistogram(blobAsBuffer(columnNo)).toHistogram
def getAny(columnNo: Int): Any = schema.columnTypes(columnNo).keyType.extractor.getField(this, columnNo)
override def filoUTF8String(i: Int): ZeroCopyUTF8String = schema.asZCUTF8Str(recordBase, recordOffset, i)
def getBlobBase(columnNo: Int): Any = schema.blobBase(recordBase, recordOffset, columnNo)
def getBlobOffset(columnNo: Int): Long = schema.blobOffset(recordBase, recordOffset, columnNo)
def getBlobNumBytes(columnNo: Int): Int = schema.blobNumBytes(recordBase, recordOffset, columnNo)
val buf = new UnsafeBuffer(Array.empty[Byte])
// NOTE: this method reuses the same buffer to avoid allocations.
override def blobAsBuffer(columnNo: Int): DirectBuffer = {
UnsafeUtils.wrapDirectBuf(recordBase, schema.utf8StringOffset(recordBase, recordOffset, columnNo),
getBlobNumBytes(columnNo) + 2, buf)
buf
}
} | velvia/FiloDB | core/src/main/scala/filodb.core/binaryrecord2/RecordSchema.scala | Scala | apache-2.0 | 23,111 |
/*
* This file is part of Apparat.
*
* Copyright (C) 2010 Joa Ebert
* http://www.joa-ebert.com/
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
package apparat.graph
import annotation.tailrec
class ControlFlowGraph[T, V <: BlockVertex[T]](val graph: GraphLike[V], val entryVertex: V, val exitVertex: V) extends ControlFlowGraphLike[V] with DOTExportAvailable[V] {
override type G = ControlFlowGraph[T, V]
type ControlFlowVertex = V
type ControlFlowEdge = E
type ControlFlowElm = T
override def topsort = graph.topsort
override def sccs = graph.sccs
override def dominance = graph.dominance
override def predecessorsOf(vertex: V) = graph.predecessorsOf(vertex)
override def successorsOf(vertex: V) = graph.successorsOf(vertex)
override def incomingOf(vertex: V) = graph.incomingOf(vertex)
override def verticesIterator = graph.verticesIterator
override def edgesIterator = graph.edgesIterator
override def indegreeOf(vertex: V) = graph.indegreeOf(vertex)
override def outdegreeOf(vertex: V) = graph.outdegreeOf(vertex)
override def contains(edge: E) = graph.contains(edge)
override def outgoingOf(vertex: V) = graph.outgoingOf(vertex)
override def contains(vertex: V) = graph.contains(vertex)
override def +(edge: E) = new G(graph + edge, entryVertex, exitVertex)
override def -(edge: E) = new G(graph - edge, entryVertex, exitVertex)
override def +(vertex: V) = new G(graph + vertex, entryVertex, exitVertex)
override def -(vertex: V) = new G(graph - vertex, entryVertex, exitVertex)
override def replace(v0: V, v1: V) = new G(graph.replace(v0, v1), entryVertex, exitVertex)
override def optimized = simplified
override def toString = "[ControlFlowGraph]"
private lazy val simplified = {
var g = graph
var modified = false
@tailrec def loop() {
var vertices = g.verticesIterator.filterNot(p => p == entryVertex || p == exitVertex)
//remove empty block
vertices.filter(_.isEmpty).foreach {
emptyVertex =>
val out = g.outgoingOf(emptyVertex)
if (out.size == 1 && out.head.kind == EdgeKind.Jump) {
val endEdge = out.head
g = g - endEdge
g.incomingOf(emptyVertex).foreach {
startEdge => g = (g - startEdge) + Edge.copy[V](startEdge, Some(startEdge.startVertex), Some(endEdge.endVertex))
}
g = g - emptyVertex
modified = true
}
}
// remove dead edge
for (edge <- g.edgesIterator.filter(e => if (g.contains(e.startVertex)) {g.incomingOf(e.startVertex).isEmpty && !isEntry(e.startVertex)} else false)) {
g = g - edge
g = g - edge.startVertex
modified = true
}
if (modified) {
modified = false
loop()
}
}
loop()
if (g != graph)
new G(g, entryVertex, exitVertex)
else
this
}
def cleanString(str: String) = {
val len = str.length
@tailrec def loop(sb: StringBuilder, strIndex: Int): StringBuilder = {
if (strIndex >= len)
sb
else {
str(strIndex) match {
case '"' => sb append "\\\""
case '>' => sb append ">"
case '<' => sb append "<"
case '\r' => sb append "\\r"
case '\t' => sb append "\\t"
case '\n' => sb append "\\n"
case c => sb append c
}
loop(sb, strIndex + 1)
}
}
loop(new StringBuilder(), 0) toString
}
def label(value: String) = "label=\"" + cleanString(value) + "\""
def vertexToString(vertex: V) = "[" + label({
if (isEntry(vertex))
"Entry"
else if (isExit(vertex))
"Exit"
else
vertex toString
}) + "]"
def edgeToString(edge: E) = "[" + label(edge match {
case DefaultEdge(x, y) => ""
case JumpEdge(x, y) => "jump"
case TrueEdge(x, y) => "true"
case FalseEdge(x, y) => "false"
case DefaultCaseEdge(x, y) => "default"
case CaseEdge(x, y) => "case"
case NumberedCaseEdge(x, y, n) => "case " + n
case ThrowEdge(x, y) => "throw"
case ReturnEdge(x, y) => "return"
}) + "]"
override def dotExport = {
new DOTExport(this, (vertex: V) => vertexToString(vertex), (edge: E) => edgeToString(edge))
}
}
| joa/apparat | apparat-core/src/main/scala/apparat/graph/ControlFlowGraph.scala | Scala | lgpl-2.1 | 4,706 |
package iosr.filters
import akka.actor.Actor
import com.sksamuel.scrimage.Image
import com.sksamuel.scrimage.filter.{TwirlFilter => ScrimageTwirlFilter}
import iosr.Messages.{Response, TwirlCommand}
class TwirlFilter extends Actor {
override def receive: Receive = {
case TwirlCommand(imageBytes, params) =>
val senderActor = sender()
val image = Image(imageBytes)
senderActor ! Response(
image.filter(
ScrimageTwirlFilter(params.radius)
).bytes
)
}
}
| salceson/iosr-cloud-load-balancing | worker/src/main/scala/iosr/filters/TwirlFilter.scala | Scala | mit | 512 |
package domala.jdbc.models
case class Name(value: String) extends AnyVal
| bakenezumi/domala | core/src/test/scala/domala/jdbc/models/Name.scala | Scala | apache-2.0 | 74 |
package com.example
import com.typesafe.config.{Config, ConfigFactory}
import scala.collection.JavaConverters._
trait ConfigTrait {
lazy val mainConfig: Config = ConfigFactory.load()
val checkpointDirectory = mainConfig.getString("app.kafka.checkpoint-dir")
val _collectionPath = mainConfig.getString("app.mongo.collection-path")
val mongoHost = mainConfig.getString("app.mongo.host")
val brokers = mainConfig.getString("app.kafka.brokers")
val topics = mainConfig.getStringList("app.kafka.commands-topics").asScala.toSet
val _eventsTopic = mainConfig.getString("app.kafka.events-topic")
}
| btrofimov/spark-enterprise-example | nonblocking-bulletinboard/bl-processor/src/main/scala/com/example/ConfigTrait.scala | Scala | apache-2.0 | 609 |
package com.thetestpeople.trt.importer.teamcity
import org.junit.runner.RunWith
import org.scalatest.FlatSpec
import org.scalatest.Matchers
import org.scalatest.junit.JUnitRunner
import com.thetestpeople.trt.utils.TestUtils
import org.joda.time.DateTime
import org.joda.time.DateTimeZone.forOffsetHours
import com.thetestpeople.trt.utils.UriUtils._
import org.joda.time.Duration
@RunWith(classOf[JUnitRunner])
class TeamCityUrlParserTest extends FlatSpec with Matchers {
"TeamCity URL parser" should "find the build configuration and server URL" in {
val teamCityUrl = uri("https://teamcity.jetbrains.com/viewType.html?buildTypeId=NetCommunityProjects_Femah_Commit")
val Right(TeamCityJobLink(serverUrl, buildTypeId)) = TeamCityUrlParser.parse(teamCityUrl)
serverUrl should equal(uri("https://teamcity.jetbrains.com"))
buildTypeId should equal("NetCommunityProjects_Femah_Commit")
}
it should "give an error on a non-TeamCity URL" in {
val Left(_) = TeamCityUrlParser.parse(uri("http://www.google.com"))
}
} | thetestpeople/trt | test/com/thetestpeople/trt/teamcity/importer/TeamCityUrlParserTest.scala | Scala | mit | 1,042 |
package org.jetbrains.plugins.scala.lang.completion.filters.other
import com.intellij.psi._
import com.intellij.psi.filters.ElementFilter
import org.jetbrains.annotations.NonNls
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.completion.ScalaCompletionUtil._
import org.jetbrains.plugins.scala.lang.completion.filters.other.DerivesFilter._
import org.jetbrains.plugins.scala.lang.lexer.{ScalaTokenType, ScalaTokenTypes}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScTypeDefinition
class DerivesFilter extends ElementFilter {
override def isAcceptable(element: Any, context: PsiElement): Boolean = {
if (!context.isInScala3File || context.is[PsiComment]) return false
val (leaf, _) = processPsiLeafForFilter(getLeafByOffset(context.getTextRange.getStartOffset, context))
if (leaf == null) return false
val errorBeforeDerivesStart = leaf.prevLeafs.filterNot(_.is[PsiComment, PsiWhiteSpace]).nextOption()
errorBeforeDerivesStart match {
case Some((_: PsiErrorElement) && PrevSibling(typeDefBeforeError: ScTypeDefinition)) =>
if (typeDefBeforeError.extendsBlock.derivesClause.isDefined) false
else {
// Do not suggest `derives` before `extends` or another `derives`
!leaf.nextVisibleLeaf.exists(l => isExtendsKeyword(l) || isDerivesSoftKeyword(l))
}
case _ => false
}
}
override def isClassAcceptable(hintClass: Class[_]): Boolean = true
@NonNls
override def toString: String = "derives keyword filter"
}
object DerivesFilter {
private def isExtendsKeyword(leaf: PsiElement): Boolean = leaf.elementType == ScalaTokenTypes.kEXTENDS
private def isDerivesSoftKeyword(leaf: PsiElement): Boolean =
leaf.elementType == ScalaTokenType.DerivesKeyword ||
// Scala 3 Soft Keywords can be parsed as identifiers
leaf.elementType == ScalaTokenTypes.tIDENTIFIER && leaf.textMatches(ScalaTokenType.DerivesKeyword.keywordText)
}
| JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/completion/filters/other/DerivesFilter.scala | Scala | apache-2.0 | 1,988 |
package com.twitter.finagle
import com.twitter.finagle.dispatch.{GenSerialClientDispatcher, SerialClientDispatcher, SerialServerDispatcher}
import com.twitter.finagle.netty3.transport.ChannelTransport
import com.twitter.finagle.stats.StatsReceiver
import com.twitter.finagle.tracing.TraceInitializerFilter
import com.twitter.finagle.transport.Transport
import com.twitter.util.Closable
import java.net.{InetSocketAddress, SocketAddress}
import org.jboss.netty.channel.{Channel, ChannelPipeline, ChannelPipelineFactory}
/**
* Codecs provide protocol encoding and decoding via netty pipelines
* as well as a standard filter stack that is applied to services
* from this codec.
*/
trait Codec[Req, Rep] {
/**
* The pipeline factory that implements the protocol.
*/
def pipelineFactory: ChannelPipelineFactory
/* Note: all of the below interfaces are scheduled for deprecation in favor of
* clients/servers
*/
/**
* Prepare a factory for usage with the codec. Used to allow codec
* modifications to the service at the top of the network stack.
*/
def prepareServiceFactory(
underlying: ServiceFactory[Req, Rep]
): ServiceFactory[Req, Rep] =
underlying
/**
* Prepare a connection factory. Used to allow codec modifications
* to the service at the bottom of the stack (connection level).
*/
def prepareConnFactory(
underlying: ServiceFactory[Req, Rep]
): ServiceFactory[Req, Rep] =
underlying
/**
* Note: the below ("raw") interfaces are low level, and require a
* good understanding of finagle internals to implement correctly.
* Proceed with care.
*/
def newClientTransport(ch: Channel, statsReceiver: StatsReceiver): Transport[Any, Any] =
new ChannelTransport(ch)
def newClientDispatcher(transport: Transport[Any, Any]): Service[Req, Rep] =
newClientDispatcher(transport, Stack.Params.empty)
def newClientDispatcher(
transport: Transport[Any, Any],
params: Stack.Params
): Service[Req, Rep] =
new SerialClientDispatcher(
Transport.cast[Req, Rep](transport),
params[param.Stats].statsReceiver.scope(GenSerialClientDispatcher.StatsScope)
)
def newServerDispatcher(
transport: Transport[Any, Any],
service: Service[Req, Rep]
): Closable =
new SerialServerDispatcher[Req, Rep](Transport.cast[Rep, Req](transport), service)
/**
* Is this Codec OK for failfast? This is a temporary hack to
* disable failFast for codecs for which it isn't well-behaved.
*/
def failFastOk = true
/**
* A hack to allow for overriding the TraceInitializerFilter when using
* Client/Server Builders rather than stacks.
*/
def newTraceInitializer: Stackable[ServiceFactory[Req, Rep]] = TraceInitializerFilter.clientModule[Req, Rep]
/**
* A protocol library name to use for displaying which protocol library this client or server is using.
*/
def protocolLibraryName: String = "not-specified"
}
/**
* An abstract class version of the above for java compatibility.
*/
abstract class AbstractCodec[Req, Rep] extends Codec[Req, Rep]
object Codec {
def ofPipelineFactory[Req, Rep](makePipeline: => ChannelPipeline) =
new Codec[Req, Rep] {
def pipelineFactory = new ChannelPipelineFactory {
def getPipeline = makePipeline
}
}
def ofPipeline[Req, Rep](p: ChannelPipeline) = new Codec[Req, Rep] {
def pipelineFactory = new ChannelPipelineFactory {
def getPipeline = p
}
}
}
/**
* Codec factories create codecs given some configuration.
*/
/**
* Clients
*/
case class ClientCodecConfig(serviceName: String)
/**
* Servers
*/
case class ServerCodecConfig(serviceName: String, boundAddress: SocketAddress) {
def boundInetSocketAddress = boundAddress match {
case ia: InetSocketAddress => ia
case _ => new InetSocketAddress(0)
}
}
/**
* A combined codec factory provides both client and server codec
* factories in one (when available).
*/
trait CodecFactory[Req, Rep] {
type Client = ClientCodecConfig => Codec[Req, Rep]
type Server = ServerCodecConfig => Codec[Req, Rep]
def client: Client
def server: Server
/**
* A protocol library name to use for displaying which protocol library this client or server is using.
*/
def protocolLibraryName: String = "not-specified"
}
| liamstewart/finagle | finagle-core/src/main/scala/com/twitter/finagle/Codec.scala | Scala | apache-2.0 | 4,318 |
/*
* This file is part of the Linux Variability Modeling Tools (LVAT).
*
* Copyright (C) 2011 Steven She <shshe@gsd.uwaterloo.ca>
*
* LVAT is free software: you can redistribute it and/or modify it under
* the terms of the GNU Lesser General Public License as published by the
* Free Software Foundation, either version 3 of the License, or (at your
* option) any later version.
*
* LVAT is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
* more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with LVAT. (See files COPYING and COPYING.LESSER.) If not, see
* <http://www.gnu.org/licenses/>.
*/
package gsd.linux.stats
import com.typesafe.scalalogging.LazyLogging
import java.io.PrintStream
import gsd.linux.cnf.DimacsReader.{DimacsHeader, DimacsProblem}
import gsd.linux.cnf.{DimacsReader, SATBuilder}
import java.util.Scanner
import org.clapper.argot._
import gsd.linux.tools.ArgotUtil
object DeadFeaturesMain extends ArgotUtil with LazyLogging {
import ArgotConverters._
val name = "DeadFeaturesMain"
val inParam = parser.parameter[String]("in-file",
"input file containing CNF in dimacs format, stdin if not specified", false)
val outParam = parser.parameter[String]("out-file",
"output file for the list of dead features, stdout if not specified", true)
val genFlag = parser.flag[Boolean](List("g"),
"do NOT consider variables that end with '_m' as generated")
def main(args: Array[String]) {
try {
parser.parse(args)
val (header, problem): (DimacsHeader, DimacsProblem) =
(pOpt.value, inParam.value) match {
case (Some(_), Some(_)) =>
parser.usage("Either a project (-p) is specified or input & output parameters are used.")
case (Some(p), None) => (p.header, p.dimacs)
case (None, Some(f)) =>
(DimacsReader.readHeaderFile(f), DimacsReader.readFile(f))
case (None, None) =>
logger.info("Using stdin as input...")
logger.info("Warning: dimacs parsing from stdin is experimental!")
val scanner = new Scanner(System.in)
val header = DimacsReader.readHeader(scanner)
val dimacs = DimacsReader.read(scanner)
(header, dimacs)
}
val output =
(pOpt.value, outParam.value) match {
case (Some(p), None) => new PrintStream(p.implgFile.get)
case (None, Some(f)) => new PrintStream(f)
case _ => System.out
}
execute(header, problem, output)
}
catch {
case e: ArgotUsageException => println(e.message)
}
}
def execute(header: DimacsHeader, dimacs: DimacsProblem,
out: PrintStream) {
val generated =
if (genFlag.value.getOrElse(false)) header.generated
else {
logger.info("[INFO] Considering features that end with _m as generated...")
header.generated ++
(header.varMap filter { case (_,v) => v.endsWith("_m") } map (_._1))
}
logger.info("Initializing SAT solver...")
val sat = new SATBuilder(dimacs.cnf, dimacs.numVars, generated)
with LazyLogging
val stats = new SATStatistics(sat, header.idMap) with LazyLogging
stats.deadFeatures foreach out.println
}
}
| matachi/linux-variability-analysis-tools.fm-translation | src/main/scala/gsd/linux/stats/DeadFeaturesMain.scala | Scala | gpl-3.0 | 3,477 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.nn
import com.intel.analytics.bigdl.keras.{KerasBaseSpec, KerasRunner, Regularizer}
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import com.intel.analytics.bigdl.dllib.utils.serializer.ModuleSerializationTest
import scala.util.Random
class ActivityRegularizationSpec extends KerasBaseSpec {
"ActivityRegularization" should "same as keras" in {
ifskipTest()
val keras =
"""
|act_reg = core.ActivityRegularization(l1=0.01, l2=0.01)
|
|input_tensor = Input(shape=(2,))
|output_tensor = act_reg(input_tensor)
|model = Model(input=input_tensor, output=output_tensor)
|
|input = np.random.random((2, 2))
|loss = model.losses
|
|Y = []
""".stripMargin
val ar = ActivityRegularization[Float](0.01, 0.01)
val (gradInput, gradWeight, weights, input, target, output) = KerasRunner.run(keras,
Regularizer)
val boutput = ar.forward(input)
boutput.almostEqual(output, 1e-5) should be(true)
ar.loss.toDouble should be (target.value().toDouble +- 1e-5)
val bgradInput = ar.backward(input, boutput.clone())
bgradInput.almostEqual(gradInput, 1e-5) should be(true)
}
}
class ActivityRegularizationSerialTest extends ModuleSerializationTest {
override def test(): Unit = {
val activityRegularization = ActivityRegularization[Float](l1 = 0.01, l2 = 0.01).
setName("activityRegularization")
val input = Tensor[Float](5, 5).apply1(_ => Random.nextFloat())
runSerializationTest(activityRegularization, input)
}
}
| intel-analytics/BigDL | scala/dllib/src/test/scala/com/intel/analytics/bigdl/dllib/nn/ActivityRegularizationSpec.scala | Scala | apache-2.0 | 2,210 |
package specs.analysis
import helpers.Utils
import org.scalatest._
import utils.TestUtils
class CyclicSpec extends FlatSpec {
var i = 0
def testNr: String = {
i += 1
i.toString
}
it should testNr in {
TestUtils.expectMutability(List("A", "B"), Utils.IsDeeplyImmutable) {
"""
class A {
val b: B = new B
}
class B {
val a: A = new A
}
"""
}
TestUtils.expectMutability(List("A", "B"), Utils.IsDeeplyImmutable) {
"""
class B {
val a: A = new A
}
class A {
val b: B = new B
}
"""
}
}
it should testNr in {
TestUtils.expectMutability(List("A", "B"), Utils.IsMutable) {
"""
class A {
var b: B = new B
}
class B {
var a: A = new A
}
"""
}
TestUtils.expectMutability(List("A", "B"), Utils.IsMutable) {
"""
class B {
var a: A = new A
}
class A {
var b: B = new B
}
"""
}
}
it should testNr in {
TestUtils.expectMutability(Map(List("B") -> Utils.IsShallowImmutable, List("A") -> Utils.IsMutable)) {
"""
class A {
var b: B = new B
}
class B {
val a: A = new A
}
"""
}
TestUtils.expectMutability(Map(List("B") -> Utils.IsShallowImmutable, List("A") -> Utils.IsMutable)) {
"""
class B {
val a: A = new A
}
class A {
var b: B = new B
}
"""
}
}
it should testNr in {
TestUtils.expectMutability(Map(List("C", "B") -> Utils.IsShallowImmutable, List("A") -> Utils.IsMutable)) {
"""
class C {
val a: A = new A
val B: B = new B
}
class A {
var c: C = new C
}
class B {
val c: C = new C
}
"""
}
TestUtils.expectMutability(Map(List("C", "B") -> Utils.IsShallowImmutable, List("A") -> Utils.IsMutable)) {
"""
class A {
var c: C = new C
}
class C {
val a: A = new A
val B: B = new B
}
class B {
val c: C = new C
}
"""
}
}
it should testNr in {
TestUtils.expectMutability(List("A", "B", "C", "D"), Utils.IsDeeplyImmutable) {
"""
class A {
val b: B = new B
}
class B {
val c: C = new C
}
class C {
val d: D = new D
}
class D {
val a: A = new A
}
"""
}
TestUtils.expectMutability(List("A", "B", "C", "D"), Utils.IsDeeplyImmutable) {
"""
class D extends C {
val a: A = new A
}
class C extends B {
val d: D = new D
}
class B extends A {
val c: C = new C
}
class A {
val b: B = new B
}
"""
}
}
}
| luax/scala-immutability-plugin | plugin/src/test/scala/specs/analysis/CyclicSpec.scala | Scala | mit | 2,845 |
package layer
import layer.configuration.{ProjectBasicInfo, Project}
import Project.StructureKind._
import config.Configuration
import layer.constructor.{GraphFile, GraphConstructor}
import layer.module._
/**
* Created by chanjinpark on 15. 3. 19..
*/
object StructureExtractor {
def extractGeneralization(tdg: TypeStructure): Map[String, List[String]] = {
val gens = tdg.edges.filter(e => e.isSubtypeEdge).map(e => (e.getTarget.value, e.getSource.value)).groupBy(_._1)
gens.map(g => (g._1 -> g._2.map(_._2)))
//println(gens.map(g => g._1 + "\\n\\t" + g._2.mkString("\\n\\t")).mkString("\\n"))
//gens
}
def getTypeGraph(p: String, jar: String) = {
val project: ProjectBasicInfo = new ProjectBasicInfo(p, jar, Configuration.srcdir(p), "", false)
Project.proj = project
layer.util.FileOut.removeFiles(project.getOutputPath, "dot")
Graph2Dot.prefix = project.prefix
var dgs: (TypeStructure, PackageStructure) = null
if (project.requireReanalyze) {
println("generate layer class information")
dgs = GraphConstructor.constructTypeStructure(project.rootpath)
} else {
dgs = GraphFile.load(project.getOutputPath)
}
val tdg = dgs._1
val pdg = dgs._2
tdg
}
def main(args: Array[String]) = {
val p = "junit"
val f = Configuration.jarfile(p, "junit-4.11.jar")
val tg = getTypeGraph(p, f)
extractGeneralization(tg)
}
} | chanjin/DesignEvolution | src/main/scala/layer/StructureExtractor.scala | Scala | apache-2.0 | 1,420 |
package com.recursivity.commons.validator
/**
* Created by IntelliJ IDEA.
* User: wfaler
* Date: Oct 31, 2010
* Time: 3:44:44 PM
* To change this template use File | Settings | File Templates.
*/
case class MinLongValidator(key: String, min: Long, value: () => Long) extends Validator{
def getKey = key
def isValid = (min <= value())
def getReplaceModel = List(("min", min))
}
object MinLong {
def apply(key: String, min: Long, value: => Long) =
MinLongValidator(key, min, () => value)
}
| rkpandey/recursivity-commons | src/main/scala/com/recursivity/commons/validator/MinLongValidator.scala | Scala | bsd-3-clause | 511 |
package is.hail.io
import java.io._
import is.hail.annotations._
import is.hail.expr.ir.ExecuteContext
import is.hail.types.physical._
import is.hail.io.fs.FS
import is.hail.io.index.IndexWriter
import is.hail.rvd.{AbstractIndexSpec, IndexSpec, MakeRVDSpec, RVDContext, RVDPartitioner, RVDType}
import is.hail.sparkextras._
import is.hail.utils._
import is.hail.utils.richUtils.ByteTrackingOutputStream
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.Row
import org.apache.spark.{ExposedMetrics, TaskContext}
import scala.reflect.ClassTag
object RichContextRDDRegionValue {
def writeRowsPartition(
makeEnc: (OutputStream) => Encoder,
indexKeyFieldIndices: Array[Int] = null,
rowType: PStruct = null
)(ctx: RVDContext, it: Iterator[Long], os: OutputStream, iw: IndexWriter): (Long, Long) = {
val context = TaskContext.get
val outputMetrics =
if (context != null)
context.taskMetrics().outputMetrics
else
null
val trackedOS = new ByteTrackingOutputStream(os)
val en = makeEnc(trackedOS)
var rowCount = 0L
it.foreach { ptr =>
if (iw != null) {
val off = en.indexOffset()
val key = SafeRow.selectFields(rowType, ctx.r, ptr)(indexKeyFieldIndices)
iw.appendRow(key, off, Row())
}
en.writeByte(1)
en.writeRegionValue(ptr)
ctx.region.clear()
rowCount += 1
if (outputMetrics != null) {
ExposedMetrics.setBytes(outputMetrics, trackedOS.bytesWritten)
ExposedMetrics.setRecords(outputMetrics, rowCount)
}
}
en.writeByte(0) // end
en.flush()
var bytesWritten = 0L
if (iw != null) {
// close() flushes to the output stream, so look up bytesWritten after closing
iw.close()
bytesWritten += iw.trackedOS().bytesWritten
}
if (outputMetrics != null) {
ExposedMetrics.setBytes(outputMetrics, trackedOS.bytesWritten)
}
bytesWritten += trackedOS.bytesWritten
os.close()
(rowCount, bytesWritten)
}
def writeSplitRegion(
localTmpdir: String,
fs: FS,
path: String,
t: RVDType,
it: Iterator[Long],
idx: Int,
ctx: RVDContext,
partDigits: Int,
stageLocally: Boolean,
makeIndexWriter: (String) => IndexWriter,
makeRowsEnc: (OutputStream) => Encoder,
makeEntriesEnc: (OutputStream) => Encoder
): FileWriteMetadata = {
val fullRowType = t.rowType
val context = TaskContext.get
val f = partFile(partDigits, idx, context)
val outputMetrics = context.taskMetrics().outputMetrics
val finalRowsPartPath = path + "/rows/rows/parts/" + f
val finalEntriesPartPath = path + "/entries/rows/parts/" + f
val finalIdxPath = path + "/index/" + f + ".idx"
val (rowsPartPath, entriesPartPath, idxPath) =
if (stageLocally) {
val rowsPartPath = ExecuteContext.createTmpPathNoCleanup(localTmpdir, "write-split-staged-rows-part")
val entriesPartPath = ExecuteContext.createTmpPathNoCleanup(localTmpdir, "write-split-staged-entries-part")
val idxPath = rowsPartPath + ".idx"
context.addTaskCompletionListener[Unit] { (context: TaskContext) =>
fs.delete(rowsPartPath, recursive = false)
fs.delete(entriesPartPath, recursive = false)
fs.delete(idxPath, recursive = true)
}
(rowsPartPath, entriesPartPath, idxPath)
} else
(finalRowsPartPath, finalEntriesPartPath, finalIdxPath)
val (rowCount, totalBytesWritten) = using(fs.create(rowsPartPath)) { rowsOS =>
val trackedRowsOS = new ByteTrackingOutputStream(rowsOS)
using(makeRowsEnc(trackedRowsOS)) { rowsEN =>
using(fs.create(entriesPartPath)) { entriesOS =>
val trackedEntriesOS = new ByteTrackingOutputStream(entriesOS)
using(makeEntriesEnc(trackedEntriesOS)) { entriesEN =>
using(makeIndexWriter(idxPath)) { iw =>
var rowCount = 0L
it.foreach { ptr =>
val rows_off = rowsEN.indexOffset()
val ents_off = entriesEN.indexOffset()
val key = SafeRow.selectFields(fullRowType, ctx.r, ptr)(t.kFieldIdx)
iw.appendRow(key, rows_off, Row(ents_off))
rowsEN.writeByte(1)
rowsEN.writeRegionValue(ptr)
entriesEN.writeByte(1)
entriesEN.writeRegionValue(ptr)
ctx.region.clear()
rowCount += 1
ExposedMetrics.setBytes(outputMetrics, trackedRowsOS.bytesWritten + trackedEntriesOS.bytesWritten)
ExposedMetrics.setRecords(outputMetrics, 2 * rowCount)
}
rowsEN.writeByte(0) // end
entriesEN.writeByte(0)
rowsEN.flush()
entriesEN.flush()
val totalBytesWritten = trackedRowsOS.bytesWritten + trackedEntriesOS.bytesWritten + iw.trackedOS().bytesWritten
ExposedMetrics.setBytes(outputMetrics, totalBytesWritten)
(rowCount, totalBytesWritten)
}
}
}
}
}
if (stageLocally) {
fs.copy(rowsPartPath, finalRowsPartPath)
fs.copy(entriesPartPath, finalEntriesPartPath)
fs.copy(idxPath + "/index", finalIdxPath + "/index")
fs.copy(idxPath + "/metadata.json.gz", finalIdxPath + "/metadata.json.gz")
}
FileWriteMetadata(f, rowCount, totalBytesWritten)
}
def writeSplitSpecs(
fs: FS,
path: String,
rowsCodecSpec: AbstractTypedCodecSpec,
entriesCodecSpec: AbstractTypedCodecSpec,
rowsIndexSpec: AbstractIndexSpec,
entriesIndexSpec: AbstractIndexSpec,
t: RVDType,
rowsRVType: PStruct,
entriesRVType: PStruct,
partFiles: Array[String],
partitioner: RVDPartitioner
) {
val rowsSpec = MakeRVDSpec(rowsCodecSpec, partFiles, partitioner, rowsIndexSpec)
rowsSpec.write(fs, path + "/rows/rows")
val entriesSpec = MakeRVDSpec(entriesCodecSpec, partFiles,
RVDPartitioner.unkeyed(partitioner.numPartitions), entriesIndexSpec)
entriesSpec.write(fs, path + "/entries/rows")
}
}
class RichContextRDDLong(val crdd: ContextRDD[Long]) extends AnyVal {
def boundary: ContextRDD[Long] =
crdd.cmapPartitionsAndContext { (consumerCtx, part) =>
val producerCtx = consumerCtx.freshContext
val it = part.flatMap(_ (producerCtx))
new Iterator[Long]() {
private[this] var cleared: Boolean = false
def hasNext: Boolean = {
if (!cleared) {
cleared = true
producerCtx.region.clear()
}
it.hasNext
}
def next: Long = {
if (!cleared) {
producerCtx.region.clear()
}
cleared = false
it.next
}
}
}
def toCRDDRegionValue: ContextRDD[RegionValue] =
boundary.cmapPartitionsWithContext((ctx, part) => {
val rv = RegionValue(ctx.r)
part(ctx).map(ptr => { rv.setOffset(ptr); rv })
})
def writeRows(
ctx: ExecuteContext,
path: String,
idxRelPath: String,
t: RVDType,
stageLocally: Boolean,
encoding: AbstractTypedCodecSpec
): Array[FileWriteMetadata] = {
crdd.writePartitions(
ctx,
path,
idxRelPath,
stageLocally,
IndexWriter.builder(ctx, t.kType, +PCanonicalStruct()),
RichContextRDDRegionValue.writeRowsPartition(
encoding.buildEncoder(ctx, t.rowType),
t.kFieldIdx,
t.rowType))
}
def toRows(rowType: PStruct): RDD[Row] = {
crdd.cmap((ctx, ptr) => SafeRow(rowType, ptr)).run
}
}
class RichContextRDDRegionValue(val crdd: ContextRDD[RegionValue]) extends AnyVal {
def boundary: ContextRDD[RegionValue] =
crdd.cmapPartitionsAndContext { (consumerCtx, part) =>
val producerCtx = consumerCtx.freshContext
val it = part.flatMap(_ (producerCtx))
new Iterator[RegionValue]() {
private[this] var cleared: Boolean = false
def hasNext: Boolean = {
if (!cleared) {
cleared = true
producerCtx.region.clear()
}
it.hasNext
}
def next: RegionValue = {
if (!cleared) {
producerCtx.region.clear()
}
cleared = false
it.next
}
}
}
def toCRDDPtr: ContextRDD[Long] =
crdd.cmap { (consumerCtx, rv) =>
// Need to track regions that are in use, but don't want to create a cycle.
if (consumerCtx.region != rv.region) {
consumerCtx.region.addReferenceTo(rv.region)
}
rv.offset
}
def cleanupRegions: ContextRDD[RegionValue] = {
crdd.cmapPartitionsAndContext { (ctx, part) =>
val it = part.flatMap(_ (ctx))
new Iterator[RegionValue]() {
private[this] var cleared: Boolean = false
def hasNext: Boolean = {
if (!cleared) {
cleared = true
ctx.region.clear()
}
it.hasNext
}
def next: RegionValue = {
if (!cleared) {
ctx.region.clear()
}
cleared = false
it.next
}
}
}
}
def toRows(rowType: PStruct): RDD[Row] = {
crdd.run.map(rv => SafeRow(rowType, rv.offset))
}
}
| danking/hail | hail/src/main/scala/is/hail/io/RichContextRDDRegionValue.scala | Scala | mit | 9,259 |
/** This file is part of TextCompose, a program for producing PDF from text files.
* Copyright 2014 Jesper S Villadsen <jeschvi@gmail.com>
* License: GNU Affero General Public License version 3 or later.
* For full license text see LICENSE.txt or <http://www.gnu.org/licenses/>.
*/
package textcompose.core
import scala.collection.mutable.HashMap
import scala.io.Source
import java.io.File
class VariableRegister {
private val variables = new HashMap[String, Variable]
private var copyingStarted = false
private var copyingDepthCounter = 0
private var copyingVariableName = ""
private var copyingKeyName = ""
private var copyingForAdd = false
private var copiedValue = ""
def toBaseType(kind: String) = kind match {
case "Str" => BaseType.Str
case "Int" => BaseType.Int
case _ => throw new TagError("Unknown base type '" + kind + "'. Try 'Int' or 'Str'.")
}
def declareVariable(name: String, keyTypeName: String, valueTypeName: String, converge: Boolean) {
val keyType = if (keyTypeName == "") BaseType.NA else toBaseType(keyTypeName)
val valueType = toBaseType(valueTypeName)
def unchangedType: Boolean = variables(name) match {
case v: ValueVariable => v.valType == valueType
case v: MapVariable => v.valType == valueType && v.keyType == keyType
}
val addNewVariable =
if (variables.contains(name)) {
if (unchangedType) {
false
} else {
if (variables(name).isDeclared) {
throw new TagError("Attempt to change type of variable '" + name + "'")
} else {
// It was added to carry a "prior" value but of another type.
variables -= name
true
}
}
} else {
true
}
if (addNewVariable) {
variables(name) = if (keyType == BaseType.NA) {
ValueVariable(name, valueType, "")
} else {
MapVariable(name, keyType, valueType)
}
}
variables(name).declare(converge)
}
def tryGetType(name: String) = {
if (variables.contains(name)) {
variables(name) match {
case v: ValueVariable => "val"
case v: MapVariable => "map"
}
} else {
"NA"
}
}
def stopCopying() {
copyingStarted = false
copiedValue = ""
}
def startCopying(name: String, key: String, add: Boolean) {
if (!variables.contains(name)) throw new TagError("Unknown variable '" + name + "'")
copyingStarted = true
copyingDepthCounter = 1
copyingVariableName = name
copyingKeyName = key
copyingForAdd = add
}
def copy(s: String) {
copiedValue += s
}
def isCopying = copyingStarted
def isCopyingToConvergeVariable(name: String) =
copyingStarted && name == copyingVariableName && variables(name).mustConverge
def increaseDepth = { copyingDepthCounter += 1 }
def decreaseDepth = { copyingDepthCounter -= 1 }
def isDepthZero = copyingDepthCounter == 0
def getCurrentlyAdding = copyingForAdd
def getCurrentVariable = copyingVariableName
def updateVariable {
if (variables.contains(copyingVariableName)) {
variables(copyingVariableName) match {
case v: ValueVariable => {
if (copyingForAdd) v.add(copiedValue)
else v.set(copiedValue)
}
case v: MapVariable => {
if (copyingForAdd) v.add(copyingKeyName, copiedValue)
else v.set(copyingKeyName, copiedValue)
}
}
stopCopying()
} else {
throw new TagError("Unknown variable '" + copyingVariableName + "'")
}
}
def get(name: String, key: String): String = {
if (variables.contains(name) && variables(name).isDeclared) {
variables(name) match {
case v: ValueVariable => v.show
case v: MapVariable => v.show(key)
}
} else {
throw new TagError("Unknown variable '" + name + "'")
}
}
def getSorted(name: String, byValue: Boolean) = {
if (variables.contains(name) && variables(name).isDeclared) {
variables(name) match {
case v: ValueVariable => throw new TagError("Variable '" + name +
"' is not a map and can therefore not be used in the 'loop' tag")
case v: MapVariable => v.getSorted(byValue)
}
} else {
throw new TagError("Unknown variable '" + name + "'")
}
}
def allHasConverged = variables.keys.forall(n => variables(n).hasConverged)
def save(fileName: String) {
if (variables.exists(pair => pair._2.mustConverge)) {
try {
val outFile = new java.io.FileWriter(fileName)
for (pair <- variables) {
if (pair._2.mustConverge) {
pair._2 match {
case v: ValueVariable => {
outFile.write(v.profile + "\\t" + v.get + "\\n")
}
case v: MapVariable => {
for (kv <- v.get) {
outFile.write(v.profile + "\\t" + kv._1 + "\\t" + kv._2 + "\\n")
}
}
}
}
}
outFile.close
} catch {
case e: Exception => {
textcompose.editor.DialogBox.stackTrace("Could not write variables to \\"" + fileName + "\\".", e)
}
}
}
}
def load(fileName: String) {
def setPriorValue(
name: String,
keyTypeName: String,
valueTypeName: String,
priorKey: String,
priorValue: String) {
val keyType = if (keyTypeName == "") BaseType.NA else toBaseType(keyTypeName)
val valueType = toBaseType(valueTypeName)
if (variables.contains(name)) {
variables(name) match {
case v: ValueVariable => v.setPrior(priorValue)
case v: MapVariable => v.setPrior(priorKey, priorValue)
}
} else {
if (keyType == BaseType.NA) {
val vv = ValueVariable(name, valueType, "")
vv.setPrior(priorValue)
variables(name) = vv
} else {
val mv = MapVariable(name, keyType, valueType)
mv.setPrior(priorKey, priorValue)
variables(name) = mv
}
}
}
def loadLine(line: String) {
var index = -1
def getNext(): String = {
val priorIndex = index
index = line.indexOf("\\t", priorIndex + 1)
line.substring(priorIndex + 1, index)
}
def getLast: String = {
line.substring(index + 1, line.length)
}
val name = getNext()
if (getNext() == "val") {
val valType = getNext()
val value = getLast
setPriorValue(name, "", valType, "", value)
} else {
val keyType = getNext()
val valType = getNext()
val key = getNext()
val value = getLast
setPriorValue(name, keyType, valType, key, value)
}
}
try {
var src = Source fromFile (fileName)
src.getLines.foreach(line => loadLine(line))
} catch {
case e: Exception => None
}
}
} | jvilladsen/TextCompose | src/main/scala/core/VariableRegister.scala | Scala | agpl-3.0 | 6,966 |
package com.socrata.balboa.metrics.data
import java.io.IOException
import com.socrata.balboa.metrics.config.ConfigurationException
import com.socrata.balboa.metrics.data.impl._
import com.typesafe.config.{Config, ConfigFactory}
trait DataStoreFactory {
def get: DataStore
def get(conf: Config): DataStore
}
object DefaultDataStoreFactory extends DataStoreFactory {
private lazy val defaultDataStore: DataStore = try {
get(ConfigFactory.load())
} catch {
case ioe: IOException =>
throw new
ConfigurationException(
"Unable to determine which datastore to use because the configuration couldn't be read.", ioe)
}
def self: DataStoreFactory = this // For convenient Java access
def get: DataStore = defaultDataStore
def get(conf: Config): DataStore = {
lazy val datastore: String = conf.getString("balboa.datastore")
lazy val bufferGranularity: Long = conf.getLong("buffer.granularity")
datastore match {
case "buffered-cassandra" =>
new BufferedDataStore(
new BadIdeasDataStore(
new CassandraDataStore(
new CassandraQueryImpl(
CassandraUtil.initializeContext(conf)))), bufferGranularity = bufferGranularity)
case "cassandra" =>
new BadIdeasDataStore(
new CassandraDataStore(
new CassandraQueryImpl(
CassandraUtil.initializeContext(conf))))
case _ =>
throw new ConfigurationException("Unknown datastore '" + datastore + "'.")
}
}
}
| socrata-platform/balboa | balboa-core/src/main/scala/com/socrata/balboa/metrics/data/DataStoreFactory.scala | Scala | apache-2.0 | 1,537 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.graphx
import org.apache.spark.{SparkContext, SparkFunSuite}
import org.apache.spark.graphx.Graph._
import org.apache.spark.graphx.PartitionStrategy._
import org.apache.spark.rdd._
import org.apache.spark.storage.StorageLevel
import org.apache.spark.util.Utils
class GraphSuite extends SparkFunSuite with LocalSparkContext {
def starGraph(sc: SparkContext, n: Int): Graph[String, Int] = {
Graph.fromEdgeTuples(sc.parallelize((1 to n).map(x => (0: VertexId, x: VertexId)), 3), "v")
}
test("Graph.fromEdgeTuples") {
withSpark { sc =>
val ring = (0L to 100L).zip((1L to 99L) :+ 0L)
val doubleRing = ring ++ ring
val graph = Graph.fromEdgeTuples(sc.parallelize(doubleRing), 1)
assert(graph.edges.count() === doubleRing.size)
assert(graph.edges.collect().forall(e => e.attr == 1))
// uniqueEdges option should uniquify edges and store duplicate count in edge attributes
val uniqueGraph = Graph.fromEdgeTuples(sc.parallelize(doubleRing), 1, Some(RandomVertexCut))
assert(uniqueGraph.edges.count() === ring.size)
assert(uniqueGraph.edges.collect().forall(e => e.attr == 2))
}
}
test("Graph.fromEdges") {
withSpark { sc =>
val ring = (0L to 100L).zip((1L to 99L) :+ 0L).map { case (a, b) => Edge(a, b, 1) }
val graph = Graph.fromEdges(sc.parallelize(ring), 1.0F)
assert(graph.edges.count() === ring.size)
}
}
test("Graph.apply") {
withSpark { sc =>
val rawEdges = (0L to 98L).zip((1L to 99L) :+ 0L)
val edges: RDD[Edge[Int]] = sc.parallelize(rawEdges).map { case (s, t) => Edge(s, t, 1) }
val vertices: RDD[(VertexId, Boolean)] = sc.parallelize((0L until 10L).map(id => (id, true)))
val graph = Graph(vertices, edges, false)
assert( graph.edges.count() === rawEdges.size )
// Vertices not explicitly provided but referenced by edges should be created automatically
assert( graph.vertices.count() === 100)
graph.triplets.collect().map { et =>
assert((et.srcId < 10 && et.srcAttr) || (et.srcId >= 10 && !et.srcAttr))
assert((et.dstId < 10 && et.dstAttr) || (et.dstId >= 10 && !et.dstAttr))
}
}
}
test("triplets") {
withSpark { sc =>
val n = 5
val star = starGraph(sc, n)
assert(star.triplets.map(et => (et.srcId, et.dstId, et.srcAttr, et.dstAttr)).collect().toSet
=== (1 to n).map(x => (0: VertexId, x: VertexId, "v", "v")).toSet)
}
}
test("partitionBy") {
withSpark { sc =>
def mkGraph(edges: List[(Long, Long)]): Graph[Int, Int] = {
Graph.fromEdgeTuples(sc.parallelize(edges, 2), 0)
}
def nonemptyParts(graph: Graph[Int, Int]): RDD[List[Edge[Int]]] = {
graph.edges.partitionsRDD.mapPartitions { iter =>
Iterator(iter.next()._2.iterator.toList)
}.filter(_.nonEmpty)
}
val identicalEdges = List((0L, 1L), (0L, 1L))
val canonicalEdges = List((0L, 1L), (1L, 0L))
val sameSrcEdges = List((0L, 1L), (0L, 2L))
// The two edges start out in different partitions
for (edges <- List(identicalEdges, canonicalEdges, sameSrcEdges)) {
assert(nonemptyParts(mkGraph(edges)).count === 2)
}
// partitionBy(RandomVertexCut) puts identical edges in the same partition
assert(nonemptyParts(mkGraph(identicalEdges).partitionBy(RandomVertexCut)).count === 1)
// partitionBy(EdgePartition1D) puts same-source edges in the same partition
assert(nonemptyParts(mkGraph(sameSrcEdges).partitionBy(EdgePartition1D)).count === 1)
// partitionBy(CanonicalRandomVertexCut) puts edges that are identical modulo direction into
// the same partition
assert(
nonemptyParts(mkGraph(canonicalEdges).partitionBy(CanonicalRandomVertexCut)).count === 1)
// partitionBy(EdgePartition2D) puts identical edges in the same partition
assert(nonemptyParts(mkGraph(identicalEdges).partitionBy(EdgePartition2D)).count === 1)
// partitionBy(EdgePartition2D) ensures that vertices need only be replicated to 2 * sqrt(p)
// partitions
val n = 100
val p = 100
val verts = 1 to n
val graph = Graph.fromEdgeTuples(sc.parallelize(verts.flatMap(x =>
verts.withFilter(y => y % x == 0).map(y => (x: VertexId, y: VertexId))), p), 0)
assert(graph.edges.partitions.length === p)
val partitionedGraph = graph.partitionBy(EdgePartition2D)
assert(graph.edges.partitions.length === p)
val bound = 2 * math.sqrt(p)
// Each vertex should be replicated to at most 2 * sqrt(p) partitions
val partitionSets = partitionedGraph.edges.partitionsRDD.mapPartitions { iter =>
val part = iter.next()._2
Iterator((part.iterator.flatMap(e => Iterator(e.srcId, e.dstId))).toSet)
}.collect
if (!verts.forall(id => partitionSets.count(_.contains(id)) <= bound)) {
val numFailures = verts.count(id => partitionSets.count(_.contains(id)) > bound)
val failure = verts.maxBy(id => partitionSets.count(_.contains(id)))
fail(("Replication bound test failed for %d/%d vertices. " +
"Example: vertex %d replicated to %d (> %f) partitions.").format(
numFailures, n, failure, partitionSets.count(_.contains(failure)), bound))
}
// This should not be true for the default hash partitioning
val partitionSetsUnpartitioned = graph.edges.partitionsRDD.mapPartitions { iter =>
val part = iter.next()._2
Iterator((part.iterator.flatMap(e => Iterator(e.srcId, e.dstId))).toSet)
}.collect
assert(verts.exists(id => partitionSetsUnpartitioned.count(_.contains(id)) > bound))
// Forming triplets view
val g = Graph(
sc.parallelize(List((0L, "a"), (1L, "b"), (2L, "c"))),
sc.parallelize(List(Edge(0L, 1L, 1), Edge(0L, 2L, 1)), 2))
assert(g.triplets.collect().map(_.toTuple).toSet ===
Set(((0L, "a"), (1L, "b"), 1), ((0L, "a"), (2L, "c"), 1)))
val gPart = g.partitionBy(EdgePartition2D)
assert(gPart.triplets.collect().map(_.toTuple).toSet ===
Set(((0L, "a"), (1L, "b"), 1), ((0L, "a"), (2L, "c"), 1)))
}
}
test("mapVertices") {
withSpark { sc =>
val n = 5
val star = starGraph(sc, n)
// mapVertices preserving type
val mappedVAttrs = star.mapVertices((vid, attr) => attr + "2")
assert(mappedVAttrs.vertices.collect().toSet === (0 to n).map(x => (x: VertexId, "v2")).toSet)
// mapVertices changing type
val mappedVAttrs2 = star.mapVertices((vid, attr) => attr.length)
assert(mappedVAttrs2.vertices.collect().toSet === (0 to n).map(x => (x: VertexId, 1)).toSet)
}
}
test("mapVertices changing type with same erased type") {
withSpark { sc =>
val vertices = sc.parallelize(Array[(Long, Option[java.lang.Integer])](
(1L, Some(1)),
(2L, Some(2)),
(3L, Some(3))
))
val edges = sc.parallelize(Array(
Edge(1L, 2L, 0),
Edge(2L, 3L, 0),
Edge(3L, 1L, 0)
))
val graph0 = Graph(vertices, edges)
// Trigger initial vertex replication
graph0.triplets.foreach(x => {})
// Change type of replicated vertices, but preserve erased type
val graph1 = graph0.mapVertices { case (vid, integerOpt) =>
integerOpt.map((x: java.lang.Integer) => x.toDouble: java.lang.Double)
}
// Access replicated vertices, exposing the erased type
val graph2 = graph1.mapTriplets(t => t.srcAttr.get)
assert(graph2.edges.map(_.attr).collect().toSet === Set[java.lang.Double](1.0, 2.0, 3.0))
}
}
test("mapEdges") {
withSpark { sc =>
val n = 3
val star = starGraph(sc, n)
val starWithEdgeAttrs = star.mapEdges(e => e.dstId)
val edges = starWithEdgeAttrs.edges.collect()
assert(edges.size === n)
assert(edges.toSet === (1 to n).map(x => Edge(0, x, x)).toSet)
}
}
test("mapTriplets") {
withSpark { sc =>
val n = 5
val star = starGraph(sc, n)
assert(star.mapTriplets(et => et.srcAttr + et.dstAttr).edges.collect().toSet ===
(1L to n).map(x => Edge(0, x, "vv")).toSet)
}
}
test("reverse") {
withSpark { sc =>
val n = 5
val star = starGraph(sc, n)
assert(star.reverse.outDegrees.collect().toSet === (1 to n).map(x => (x: VertexId, 1)).toSet)
}
}
test("reverse with join elimination") {
withSpark { sc =>
val vertices: RDD[(VertexId, Int)] = sc.parallelize(Array((1L, 1), (2L, 2)))
val edges: RDD[Edge[Int]] = sc.parallelize(Array(Edge(1L, 2L, 0)))
val graph = Graph(vertices, edges).reverse
val result = graph.mapReduceTriplets[Int](et => Iterator((et.dstId, et.srcAttr)), _ + _)
assert(result.collect().toSet === Set((1L, 2)))
}
}
test("subgraph") {
withSpark { sc =>
// Create a star graph of 10 veritces.
val n = 10
val star = starGraph(sc, n)
// Take only vertices whose vids are even
val subgraph = star.subgraph(vpred = (vid, attr) => vid % 2 == 0)
// We should have 5 vertices.
assert(subgraph.vertices.collect().toSet === (0 to n by 2).map(x => (x, "v")).toSet)
// And 4 edges.
assert(subgraph.edges.map(_.copy()).collect().toSet ===
(2 to n by 2).map(x => Edge(0, x, 1)).toSet)
}
}
test("mask") {
withSpark { sc =>
val n = 5
val vertices = sc.parallelize((0 to n).map(x => (x: VertexId, x)))
val edges = sc.parallelize((1 to n).map(x => Edge(0, x, x)))
val graph: Graph[Int, Int] = Graph(vertices, edges).cache()
val subgraph = graph.subgraph(
e => e.dstId != 4L,
(vid, vdata) => vid != 3L
).mapVertices((vid, vdata) => -1).mapEdges(e => -1)
val projectedGraph = graph.mask(subgraph)
val v = projectedGraph.vertices.collect().toSet
assert(v === Set((0, 0), (1, 1), (2, 2), (4, 4), (5, 5)))
// the map is necessary because of object-reuse in the edge iterator
val e = projectedGraph.edges.map(e => Edge(e.srcId, e.dstId, e.attr)).collect().toSet
assert(e === Set(Edge(0, 1, 1), Edge(0, 2, 2), Edge(0, 5, 5)))
}
}
test("groupEdges") {
withSpark { sc =>
val n = 5
val star = starGraph(sc, n)
val doubleStar = Graph.fromEdgeTuples(
sc.parallelize((1 to n).flatMap(x =>
List((0: VertexId, x: VertexId), (0: VertexId, x: VertexId))), 1), "v")
val star2 = doubleStar.groupEdges { (a, b) => a}
assert(star2.edges.collect().toArray.sorted(Edge.lexicographicOrdering[Int]) ===
star.edges.collect().toArray.sorted(Edge.lexicographicOrdering[Int]))
assert(star2.vertices.collect().toSet === star.vertices.collect().toSet)
}
}
test("mapReduceTriplets") {
withSpark { sc =>
val n = 5
val star = starGraph(sc, n).mapVertices { (_, _) => 0 }.cache()
val starDeg = star.joinVertices(star.degrees){ (vid, oldV, deg) => deg }
val neighborDegreeSums = starDeg.mapReduceTriplets(
edge => Iterator((edge.srcId, edge.dstAttr), (edge.dstId, edge.srcAttr)),
(a: Int, b: Int) => a + b)
assert(neighborDegreeSums.collect().toSet === (0 to n).map(x => (x, n)).toSet)
// activeSetOpt
val allPairs = for (x <- 1 to n; y <- 1 to n) yield (x: VertexId, y: VertexId)
val complete = Graph.fromEdgeTuples(sc.parallelize(allPairs, 3), 0)
val vids = complete.mapVertices((vid, attr) => vid).cache()
val active = vids.vertices.filter { case (vid, attr) => attr % 2 == 0 }
val numEvenNeighbors = vids.mapReduceTriplets(et => {
// Map function should only run on edges with destination in the active set
if (et.dstId % 2 != 0) {
throw new Exception("map ran on edge with dst vid %d, which is odd".format(et.dstId))
}
Iterator((et.srcId, 1))
}, (a: Int, b: Int) => a + b, Some((active, EdgeDirection.In))).collect().toSet
assert(numEvenNeighbors === (1 to n).map(x => (x: VertexId, n / 2)).toSet)
// outerJoinVertices followed by mapReduceTriplets(activeSetOpt)
val ringEdges = sc.parallelize((0 until n).map(x => (x: VertexId, (x + 1) % n: VertexId)), 3)
val ring = Graph.fromEdgeTuples(ringEdges, 0) .mapVertices((vid, attr) => vid).cache()
val changed = ring.vertices.filter { case (vid, attr) => attr % 2 == 1 }.mapValues(-_).cache()
val changedGraph = ring.outerJoinVertices(changed) { (vid, old, newOpt) =>
newOpt.getOrElse(old)
}
val numOddNeighbors = changedGraph.mapReduceTriplets(et => {
// Map function should only run on edges with source in the active set
if (et.srcId % 2 != 1) {
throw new Exception("map ran on edge with src vid %d, which is even".format(et.dstId))
}
Iterator((et.dstId, 1))
}, (a: Int, b: Int) => a + b, Some(changed, EdgeDirection.Out)).collect().toSet
assert(numOddNeighbors === (2 to n by 2).map(x => (x: VertexId, 1)).toSet)
}
}
test("aggregateMessages") {
withSpark { sc =>
val n = 5
val agg = starGraph(sc, n).aggregateMessages[String](
ctx => {
if (ctx.dstAttr != null) {
throw new Exception(
"expected ctx.dstAttr to be null due to TripletFields, but it was " + ctx.dstAttr)
}
ctx.sendToDst(ctx.srcAttr)
}, _ + _, TripletFields.Src)
assert(agg.collect().toSet === (1 to n).map(x => (x: VertexId, "v")).toSet)
}
}
test("outerJoinVertices") {
withSpark { sc =>
val n = 5
val reverseStar = starGraph(sc, n).reverse.cache()
// outerJoinVertices changing type
val reverseStarDegrees = reverseStar.outerJoinVertices(reverseStar.outDegrees) {
(vid, a, bOpt) => bOpt.getOrElse(0)
}
val neighborDegreeSums = reverseStarDegrees.mapReduceTriplets(
et => Iterator((et.srcId, et.dstAttr), (et.dstId, et.srcAttr)),
(a: Int, b: Int) => a + b).collect().toSet
assert(neighborDegreeSums === Set((0: VertexId, n)) ++ (1 to n).map(x => (x: VertexId, 0)))
// outerJoinVertices preserving type
val messages = reverseStar.vertices.mapValues { (vid, attr) => vid.toString }
val newReverseStar =
reverseStar.outerJoinVertices(messages) { (vid, a, bOpt) => a + bOpt.getOrElse("") }
assert(newReverseStar.vertices.map(_._2).collect().toSet ===
(0 to n).map(x => "v%d".format(x)).toSet)
}
}
test("more edge partitions than vertex partitions") {
withSpark { sc =>
val verts = sc.parallelize(List((1: VertexId, "a"), (2: VertexId, "b")), 1)
val edges = sc.parallelize(List(Edge(1, 2, 0), Edge(2, 1, 0)), 2)
val graph = Graph(verts, edges)
val triplets = graph.triplets.map(et => (et.srcId, et.dstId, et.srcAttr, et.dstAttr))
.collect().toSet
assert(triplets ===
Set((1: VertexId, 2: VertexId, "a", "b"), (2: VertexId, 1: VertexId, "b", "a")))
}
}
test("checkpoint") {
val checkpointDir = Utils.createTempDir()
withSpark { sc =>
sc.setCheckpointDir(checkpointDir.getAbsolutePath)
val ring = (0L to 100L).zip((1L to 99L) :+ 0L).map { case (a, b) => Edge(a, b, 1)}
val rdd = sc.parallelize(ring)
val graph = Graph.fromEdges(rdd, 1.0F)
assert(!graph.isCheckpointed)
assert(graph.getCheckpointFiles.size === 0)
graph.checkpoint()
graph.edges.map(_.attr).count()
graph.vertices.map(_._2).count()
val edgesDependencies = graph.edges.partitionsRDD.dependencies
val verticesDependencies = graph.vertices.partitionsRDD.dependencies
assert(edgesDependencies.forall(_.rdd.isInstanceOf[CheckpointRDD[_]]))
assert(verticesDependencies.forall(_.rdd.isInstanceOf[CheckpointRDD[_]]))
assert(graph.isCheckpointed)
assert(graph.getCheckpointFiles.size === 2)
}
}
test("cache, getStorageLevel") {
// test to see if getStorageLevel returns correct value
withSpark { sc =>
val verts = sc.parallelize(List((1: VertexId, "a"), (2: VertexId, "b")), 1)
val edges = sc.parallelize(List(Edge(1, 2, 0), Edge(2, 1, 0)), 2)
val graph = Graph(verts, edges, "", StorageLevel.MEMORY_ONLY, StorageLevel.MEMORY_ONLY)
// Note: Before caching, graph.vertices is cached, but graph.edges is not (but graph.edges'
// parent RDD is cached).
graph.cache()
assert(graph.vertices.getStorageLevel == StorageLevel.MEMORY_ONLY)
assert(graph.edges.getStorageLevel == StorageLevel.MEMORY_ONLY)
}
}
test("non-default number of edge partitions") {
val n = 10
val defaultParallelism = 3
val numEdgePartitions = 4
assert(defaultParallelism != numEdgePartitions)
val conf = new org.apache.spark.SparkConf()
.set("spark.default.parallelism", defaultParallelism.toString)
val sc = new SparkContext("local", "test", conf)
try {
val edges = sc.parallelize((1 to n).map(x => (x: VertexId, 0: VertexId)),
numEdgePartitions)
val graph = Graph.fromEdgeTuples(edges, 1)
val neighborAttrSums = graph.mapReduceTriplets[Int](
et => Iterator((et.dstId, et.srcAttr)), _ + _)
assert(neighborAttrSums.collect().toSet === Set((0: VertexId, n)))
} finally {
sc.stop()
}
}
test("SPARK-14219: pickRandomVertex") {
withSpark { sc =>
val vert = sc.parallelize(List((1L, "a")), 1)
val edges = sc.parallelize(List(Edge[Long](1L, 1L)), 1)
val g0 = Graph(vert, edges)
assert(g0.pickRandomVertex() === 1L)
}
}
}
| haowu80s/spark | graphx/src/test/scala/org/apache/spark/graphx/GraphSuite.scala | Scala | apache-2.0 | 18,422 |
package com.azavea.opentransit.indicators.parameters
import com.azavea.opentransit.database.RoadsTable
import geotrellis.vector._
import grizzled.slf4j.Logging
import scala.slick.jdbc.JdbcBackend.{Database, DatabaseDef, Session}
/**
* Trait used to populate indicator parameters with Road Length
*/
trait RoadLength {
def totalRoadLength: Double
}
object RoadLength extends Logging {
def totalRoadLength(implicit session: Session): Double = {
debug("Fetching Roads")
val roadLines: List[Line] = RoadsTable.allRoads
val distinctRoadLines: Array[Line] =
(MultiLine(roadLines: _*).union match {
case MultiLineResult(ml) => ml
case LineResult(l) => MultiLine(l)
case NoResult => MultiLine.EMPTY
}).lines
val len = distinctRoadLines.map(x => x.length).sum / 1000
debug(s"Length of roadlines: $len")
len
}
}
| flibbertigibbet/open-transit-indicators | scala/opentransit/src/main/scala/com/azavea/opentransit/indicators/parameters/RoadLength.scala | Scala | gpl-3.0 | 877 |
/* NSC -- new Scala compiler
* Copyright 2005-2013 LAMP/EPFL
* @author Paul Phillips
*/
package scala
package tools.nsc
package interpreter
import scala.language.reflectiveCalls
import java.lang.{ Iterable => JIterable }
import scala.reflect.internal.util.ScalaClassLoader
import java.io.{ ByteArrayInputStream, CharArrayWriter, FileNotFoundException, PrintWriter, StringWriter, Writer }
import java.util.{ Locale }
import java.util.concurrent.ConcurrentLinkedQueue
import javax.tools.{ Diagnostic, DiagnosticListener,
ForwardingJavaFileManager, JavaFileManager, JavaFileObject,
SimpleJavaFileObject, StandardLocation }
import scala.reflect.io.File
import scala.io.Source
import scala.util.{ Try, Success, Failure }
import scala.util.Properties.{ lineSeparator => EOL }
import scala.collection.JavaConverters._
import scala.collection.generic.Clearable
import java.net.URL
import Javap.{ JpResult, JpError, Showable, helper, toolArgs, DefaultOptions }
/** Javap command implementation.
*/
class JavapClass(
val loader: ScalaClassLoader,
val printWriter: PrintWriter,
intp: IMain
) extends Javap {
import JavapClass._
lazy val tool = JavapTool()
def apply(args: Seq[String]): List[JpResult] = {
val (options0, targets) = args partition (s => (s startsWith "-") && s.length > 1)
val (options, filter) = {
val (opts, flag) = toolArgs(options0)
(if (opts.isEmpty) DefaultOptions else opts, flag)
}
if ((options contains "-help") || targets.isEmpty)
List(JpResult(helper(printWriter)))
else
tool(options, filter)(targets map targeted)
}
/** Associate the requested path with a possibly failed or empty array of bytes. */
private def targeted(path: String): (String, Try[Array[Byte]]) =
bytesFor(path) match {
case Success((target, bytes)) => (target, Try(bytes))
case f: Failure[_] => (path, Failure(f.exception))
}
/** Find bytes. Handle "-", "Foo#bar" (by ignoring member), "#bar" (by taking "bar").
* @return the path to use for filtering, and the byte array
*/
private def bytesFor(path: String) = Try {
val req = path match {
case "-" => intp.mostRecentVar
case HashSplit(prefix, _) if prefix != null => prefix
case HashSplit(_, member) if member != null => member
case s => s
}
(path, findBytes(req)) match {
case (_, bytes) if bytes.isEmpty => throw new FileNotFoundException(s"Could not find class bytes for '$path'")
case ok => ok
}
}
def findBytes(path: String): Array[Byte] = tryFile(path) getOrElse tryClass(path)
/** Assume the string is a path and try to find the classfile it represents.
*/
def tryFile(path: String): Option[Array[Byte]] =
(Try (File(path.asClassResource)) filter (_.exists) map (_.toByteArray())).toOption
/** Assume the string is a fully qualified class name and try to
* find the class object it represents.
* There are other symbols of interest, too:
* - a definition that is wrapped in an enclosing class
* - a synthetic that is not in scope but its associated class is
*/
def tryClass(path: String): Array[Byte] = {
def load(name: String) = loader classBytes name
def loadable(name: String) = loader resourceable name
// if path has an interior dollar, take it as a synthetic
// if the prefix up to the dollar is a symbol in scope,
// result is the translated prefix + suffix
def desynthesize(s: String) = {
val i = s indexOf '$'
if (0 until s.length - 1 contains i) {
val name = s substring (0, i)
val sufx = s substring i
val tran = intp translatePath name
def loadableOrNone(strip: Boolean) = {
def suffix(strip: Boolean)(x: String) =
(if (strip && (x endsWith "$")) x.init else x) + sufx
val res = tran map (suffix(strip) _)
if (res.isDefined && loadable(res.get)) res else None
}
// try loading translated+suffix
val res = loadableOrNone(strip = false)
// some synthetics lack a dollar, (e.g., suffix = delayedInit$body)
// so as a hack, if prefix$$suffix fails, also try prefix$suffix
if (res.isDefined) res else loadableOrNone(strip = true)
} else None
}
val p = path.asClassName // scrub any suffix
// if repl, translate the name to something replish
// (for translate, would be nicer to get the sym and ask .isClass,
// instead of translatePath and then asking did I get a class back)
val q = (
// only simple names get the scope treatment
Some(p) filter (_ contains '.')
// take path as a Name in scope
orElse (intp translatePath p filter loadable)
// take path as a Name in scope and find its enclosing class
orElse (intp translateEnclosingClass p filter loadable)
// take path as a synthetic derived from some Name in scope
orElse desynthesize(p)
// just try it plain
getOrElse p
)
load(q)
}
class JavapTool {
type ByteAry = Array[Byte]
type Input = Tuple2[String, Try[ByteAry]]
implicit protected class Failer[A](a: =>A) {
def orFailed[B >: A](b: =>B) = if (failed) b else a
}
protected def noToolError = new JpError(s"No javap tool available: ${getClass.getName} failed to initialize.")
// output filtering support
val writer = new CharArrayWriter
def written = {
writer.flush()
val w = writer.toString
writer.reset()
w
}
def filterLines(target: String, text: String): String = {
// take Foo# as Foo#apply for purposes of filtering.
val filterOn = target.splitHashMember._2 map { s => if (s.isEmpty) "apply" else s }
var filtering = false // true if in region matching filter
// turn filtering on/off given the pattern of interest
def filterStatus(line: String, pattern: String) = {
def isSpecialized(method: String) = (method startsWith pattern+"$") && (method endsWith "$sp")
def isAnonymized(method: String) = (pattern == "$anonfun") && (method startsWith "$anonfun$")
// cheap heuristic, todo maybe parse for the java sig.
// method sigs end in paren semi
def isAnyMethod = line endsWith ");"
// take the method name between the space char and left paren.
// accept exact match or something that looks like what we might be asking for.
def isOurMethod = {
val lparen = line lastIndexOf '('
val blank = line.lastIndexOf(' ', lparen)
if (blank < 0) false
else {
val method = line.substring(blank+1, lparen)
(method == pattern || isSpecialized(method) || isAnonymized(method))
}
}
filtering =
if (filtering) {
// next blank line terminates section
// in non-verbose mode, next line is next method, more or less
line.trim.nonEmpty && (!isAnyMethod || isOurMethod)
} else {
isAnyMethod && isOurMethod
}
filtering
}
// do we output this line?
def checkFilter(line: String) = filterOn map (filterStatus(line, _)) getOrElse true
val sw = new StringWriter
val pw = new PrintWriter(sw)
for {
line <- Source.fromString(text).getLines()
if checkFilter(line)
} pw println line
pw.flush()
sw.toString
}
import JavapTool._
type Task = {
def call(): Boolean // true = ok
//def run(args: Array[String]): Int // all args
//def handleOptions(args: Array[String]): Unit // options, then run() or call()
}
// result of Task.run
//object TaskResult extends Enumeration {
// val Ok, Error, CmdErr, SysErr, Abnormal = Value
//}
val TaskClass = loader.tryToInitializeClass[Task](JavapTask).orNull
// Since the tool is loaded by reflection, check for catastrophic failure.
protected def failed = TaskClass eq null
val TaskCtor = TaskClass.getConstructor(
classOf[Writer],
classOf[JavaFileManager],
classOf[DiagnosticListener[_]],
classOf[JIterable[String]],
classOf[JIterable[String]]
) orFailed null
class JavaReporter extends DiagnosticListener[JavaFileObject] with Clearable {
type D = Diagnostic[_ <: JavaFileObject]
val diagnostics = new ConcurrentLinkedQueue[D]
override def report(d: Diagnostic[_ <: JavaFileObject]) {
diagnostics add d
}
override def clear() = diagnostics.clear()
/** All diagnostic messages.
* @param locale Locale for diagnostic messages, null by default.
*/
def messages(implicit locale: Locale = null) = diagnostics.asScala.map(_ getMessage locale).toList
def reportable(): String = {
clear()
if (messages.nonEmpty) messages mkString ("", EOL, EOL) else ""
}
}
val reporter = new JavaReporter
// DisassemblerTool.getStandardFileManager(reporter,locale,charset)
val defaultFileManager: JavaFileManager =
(loader.tryToLoadClass[JavaFileManager]("com.sun.tools.javap.JavapFileManager").get getMethod (
"create",
classOf[DiagnosticListener[_]],
classOf[PrintWriter]
) invoke (null, reporter, new PrintWriter(System.err, true))).asInstanceOf[JavaFileManager] orFailed null
// manages named arrays of bytes, which might have failed to load
class JavapFileManager(val managed: Seq[Input])(delegate: JavaFileManager = defaultFileManager)
extends ForwardingJavaFileManager[JavaFileManager](delegate) {
import JavaFileObject.Kind
import Kind._
import StandardLocation._
import JavaFileManager.Location
import java.net.{ URI, URISyntaxException }
// name#fragment is OK, but otherwise fragile
def uri(name: String): URI =
try new URI(name) // new URI("jfo:" + name)
catch { case _: URISyntaxException => new URI("dummy") }
def inputNamed(name: String): Try[ByteAry] = (managed find (_._1 == name)).get._2
def managedFile(name: String, kind: Kind) = kind match {
case CLASS => fileObjectForInput(name, inputNamed(name), kind)
case _ => null
}
// todo: just wrap it as scala abstractfile and adapt it uniformly
def fileObjectForInput(name: String, bytes: Try[ByteAry], kind: Kind): JavaFileObject =
new SimpleJavaFileObject(uri(name), kind) {
override def openInputStream(): InputStream = new ByteArrayInputStream(bytes.get)
// if non-null, ClassWriter wrongly requires scheme non-null
override def toUri: URI = null
override def getName: String = name
// suppress
override def getLastModified: Long = -1L
}
override def getJavaFileForInput(location: Location, className: String, kind: Kind): JavaFileObject =
location match {
case CLASS_PATH => managedFile(className, kind)
case _ => null
}
override def hasLocation(location: Location): Boolean =
location match {
case CLASS_PATH => true
case _ => false
}
}
def fileManager(inputs: Seq[Input]) = new JavapFileManager(inputs)()
/** Create a Showable to show tool messages and tool output, with output massage.
* @param target attempt to filter output to show region of interest
* @param filter whether to strip REPL names
*/
def showable(target: String, filter: Boolean): Showable =
new Showable {
val output = filterLines(target, s"${reporter.reportable()}${written}")
def show() =
if (filter) intp.withoutTruncating(printWriter.write(output))
else intp.withoutUnwrapping(printWriter.write(output, 0, output.length))
}
// eventually, use the tool interface
def task(options: Seq[String], classes: Seq[String], inputs: Seq[Input]): Task = {
//ServiceLoader.load(classOf[javax.tools.DisassemblerTool]).
//getTask(writer, fileManager, reporter, options.asJava, classes.asJava)
val toolopts = options filter (_ != "-filter")
TaskCtor.newInstance(writer, fileManager(inputs), reporter, toolopts.asJava, classes.asJava)
.orFailed (throw new IllegalStateException)
}
// a result per input
private def applyOne(options: Seq[String], filter: Boolean, klass: String, inputs: Seq[Input]): Try[JpResult] =
Try {
task(options, Seq(klass), inputs).call()
} map {
case true => JpResult(showable(klass, filter))
case _ => JpResult(reporter.reportable())
} recoverWith {
case e: java.lang.reflect.InvocationTargetException => e.getCause match {
case t: IllegalArgumentException => Success(JpResult(t.getMessage)) // bad option
case x => Failure(x)
}
} lastly {
reporter.clear()
}
/** Run the tool. */
def apply(options: Seq[String], filter: Boolean)(inputs: Seq[Input]): List[JpResult] = (inputs map {
case (klass, Success(_)) => applyOne(options, filter, klass, inputs).get
case (_, Failure(e)) => JpResult(e.toString)
}).toList orFailed List(noToolError)
}
object JavapTool {
// >= 1.7
val JavapTask = "com.sun.tools.javap.JavapTask"
private def hasClass(cl: ScalaClassLoader, cn: String) = cl.tryToInitializeClass[AnyRef](cn).isDefined
def isAvailable = hasClass(loader, JavapTask)
/** Select the tool implementation for this platform. */
def apply() = {
require(isAvailable)
new JavapTool
}
}
}
object JavapClass {
def apply(
loader: ScalaClassLoader = ScalaClassLoader.appLoader,
printWriter: PrintWriter = new PrintWriter(System.out, true),
intp: IMain
) = new JavapClass(loader, printWriter, intp)
/** Match foo#bar, both groups are optional (may be null). */
val HashSplit = "([^#]+)?(?:#(.+)?)?".r
// We enjoy flexibility in specifying either a fully-qualified class name com.acme.Widget
// or a resource path com/acme/Widget.class; but not widget.out
implicit class MaybeClassLike(val s: String) extends AnyVal {
/* private[this] final val suffix = ".class" */
private def suffix = ".class"
def asClassName = (s stripSuffix suffix).replace('/', '.')
def asClassResource = if (s endsWith suffix) s else s.replace('.', '/') + suffix
def splitSuffix: (String, String) = if (s endsWith suffix) (s dropRight suffix.length, suffix) else (s, "")
def strippingSuffix(f: String => String): String =
if (s endsWith suffix) f(s dropRight suffix.length) else s
// e.g. Foo#bar. Foo# yields zero-length member part.
def splitHashMember: (String, Option[String]) = {
val i = s lastIndexOf '#'
if (i < 0) (s, None)
//else if (i >= s.length - 1) (s.init, None)
else (s take i, Some(s drop i+1))
}
}
implicit class ClassLoaderOps(val loader: ScalaClassLoader) extends AnyVal {
/* would classBytes succeed with a nonempty array */
def resourceable(className: String): Boolean = loader.getResource(className.asClassResource) != null
}
implicit class URLOps(val url: URL) extends AnyVal {
def isFile: Boolean = url.getProtocol == "file"
}
}
abstract class Javap {
/** Run the tool. Option args start with "-", except that "-" itself
* denotes the last REPL result.
* The default options are "-protected -verbose".
* Byte data for filename args is retrieved with findBytes.
* @return results for invoking JpResult.show()
*/
def apply(args: Seq[String]): List[Javap.JpResult]
}
object Javap {
def isAvailable(cl: ScalaClassLoader = ScalaClassLoader.appLoader) = JavapClass(cl, intp = null).JavapTool.isAvailable
def apply(path: String): Unit = apply(Seq(path))
def apply(args: Seq[String]): Unit = JavapClass(intp=null) apply args foreach (_.show())
private[interpreter] trait Showable {
def show(): Unit
}
sealed trait JpResult {
type ResultType
def isError: Boolean
def value: ResultType
def show(): Unit
// todo
// def header(): String
// def fields(): List[String]
// def methods(): List[String]
// def signatures(): List[String]
}
object JpResult {
def apply(msg: String) = new JpError(msg)
def apply(res: Showable) = new JpSuccess(res)
}
class JpError(msg: String) extends JpResult {
type ResultType = String
def isError = true
def value = msg
def show() = println(msg) // makes sense for :javap, less for -Ygen-javap
}
class JpSuccess(val value: Showable) extends JpResult {
type ResultType = AnyRef
def isError = false
def show() = value.show() // output to tool's PrintWriter
}
def toolArgs(args: Seq[String]): (Seq[String], Boolean) = {
val (opts, rest) = args flatMap massage partition (_ != "-filter")
(opts, rest.nonEmpty)
}
val helps = List(
"usage" -> ":javap [opts] [path or class or -]...",
"-help" -> "Prints this help message",
"-verbose/-v" -> "Stack size, number of locals, method args",
"-private/-p" -> "Private classes and members",
"-package" -> "Package-private classes and members",
"-protected" -> "Protected classes and members",
"-public" -> "Public classes and members",
"-l" -> "Line and local variable tables",
"-c" -> "Disassembled code",
"-s" -> "Internal type signatures",
"-sysinfo" -> "System info of class",
"-constants" -> "Static final constants",
"-filter" -> "Filter REPL machinery from output"
)
// match prefixes and unpack opts, or -help on failure
private def massage(arg: String): Seq[String] = {
require(arg startsWith "-")
// arg matches opt "-foo/-f" if prefix of -foo or exactly -f
val r = """(-[^/]*)(?:/(-.))?""".r
def maybe(opt: String, s: String): Option[String] = opt match {
// disambiguate by preferring short form
case r(lf, sf) if s == sf => Some(sf)
case r(lf, sf) if lf startsWith s => Some(lf)
case _ => None
}
def candidates(s: String) = (helps map (h => maybe(h._1, s))).flatten
// one candidate or one single-char candidate
def uniqueOf(maybes: Seq[String]) = {
def single(s: String) = s.length == 2
if (maybes.length == 1) maybes
else if ((maybes count single) == 1) maybes filter single
else Nil
}
// each optchar must decode to exactly one option
def unpacked(s: String): Try[Seq[String]] = {
val ones = (s drop 1) map { c =>
val maybes = uniqueOf(candidates(s"-$c"))
if (maybes.length == 1) Some(maybes.head) else None
}
Try(ones) filter (_ forall (_.isDefined)) map (_.flatten)
}
val res = uniqueOf(candidates(arg))
if (res.nonEmpty) res
else (unpacked(arg)
getOrElse (Seq("-help"))) // or else someone needs help
}
def helpText: String = (helps map { case (name, help) => f"$name%-12.12s$help%n" }).mkString
def helper(pw: PrintWriter) = new Showable {
def show() = pw print helpText
}
val DefaultOptions = List("-protected", "-verbose")
}
object NoJavap extends Javap {
def apply(args: Seq[String]): List[Javap.JpResult] = Nil
}
| felixmulder/scala | src/repl/scala/tools/nsc/interpreter/JavapClass.scala | Scala | bsd-3-clause | 19,487 |
/*
* Copyright 2014 porter <https://github.com/eikek/porter>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package porter.app.akka.telnet
import scala.concurrent.ExecutionContext
import akka.util.Timeout
import porter.model._
import scala.util.Try
import porter.app.akka.api.PorterMain.ShowSettings
import porter.app.akka.PorterUtil
object HelperCommands extends Commands {
import akka.pattern.ask
def makeDoc =
"""
|Helper commands
|---------------
|crypt pw <plainpassw> encrypt a given plaintext password which can be used
| with ConfigStore or PropertyFileStore
|show settings shows the PorterSettings object that was used to create
| this porter instance
|add props adds properties to a group or an account
|remove props removes properties from a group or an account
""".stripMargin
def make(implicit executor: ExecutionContext, to: Timeout) = Seq({
case in@Input(help, conn, _, _) if help == "help" =>
in << s"""
|This is a simple interface for managing accounts.
|
|${TelnetConnection.documentation}
""".stripMargin
case in@Input(pwreq, conn, _, _) if pwreq.startsWith("crypt pw") =>
val plain = pwreq.substring("crypt pw".length).trim
if (plain.isEmpty)
in << "Error: Empty password."
else
in << Password(PasswordCrypt.choose)(plain).asString
case in@Input(show, conn, porter, _) if show == "show settings" =>
val settings = (porter ? ShowSettings).mapTo[String]
in << settings.map(s => s /* + s"\nPorter path: ${porter.porterPath}" */)
}, manageProps("add"), manageProps("remove"))
def manageProps(verb: String)(implicit ec: ExecutionContext, to: Timeout) = new Form {
def fields = List("Group or Account (g/a)", "Name", "Properties")
def show = {
case in@Input(msg, conn, _, sess) if msg == (verb+" props") =>
in.withRealm { _ =>
conn ! tcp(s"Please enter account/group name and properties to $verb.\n")
}
sess.realm.isDefined
}
def validateConvert = {
case ("Group or Account (g/a)", value) => Try {
if (value.equalsIgnoreCase("g")) false
else if (value.equalsIgnoreCase("a")) true
else sys.error("Invalid value. Type 'a' or 'g'.")
}
case ("Name", value) => Try(Ident(value))
case ("Properties", value) =>
if (verb == "remove") Try(makeList(' ')(value))
else makePairs(value)
}
def onComplete(in: Input) = {
val isaccount = in.session[Boolean]("Group or Account (g/a)")
val name = in.session[Ident]("Name")
in.withRealm { realm =>
if (isaccount) {
val changeFun = if (verb == "remove") {
val props = in.session[List[String]]("Properties")
(a: Account) => a.updatedProps(p => p -- props)
} else {
val props = in.session[Map[String, String]]("Properties")
(a: Account) => a.updatedProps(_ ++ props)
}
val f = PorterUtil.updateAccount(in.porter, realm.id, name, changeFun)
in << f.map(of => if (of.success) "Success" else "Failed")
} else {
val changeFun = if (verb == "remove") {
val props = in.session[List[String]]("Properties")
(g: Group) => g.updatedProps(p => p -- props)
} else {
val props = in.session[Map[String, String]]("Properties")
(g: Group) => g.updatedProps(_ ++ props)
}
val f = PorterUtil.updateGroup(in.porter, realm.id, name, changeFun)
in << f.map(of => if (of.success) "Success" else "Failed")
}
}
}
}
}
| eikek/porter | app/src/main/scala/porter/app/akka/telnet/HelperCommands.scala | Scala | apache-2.0 | 4,299 |
/**
* Copyright (c) 2002-2012 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher.internal.symbols
import java.lang.String
import org.neo4j.cypher.CypherTypeException
case class AnyType() extends CypherType {
override def equals(other: Any) = if (other == null)
false
else
other match {
case x: AnyRef => x.getClass == this.getClass
case _ => false
}
override val iteratedType: CypherType = this
def parentType: CypherType = this
//This is the root of all
override def toString: String = "Any"
}
| dksaputra/community | cypher/src/main/scala/org/neo4j/cypher/internal/symbols/AnyType.scala | Scala | gpl-3.0 | 1,307 |
package opal
import cell._
import lattice.{Lattice, Key}
object ImmutabilityKey extends Key[Immutability] {
def resolve[K <: Key[Immutability]](cells: Seq[Cell[K, Immutability]]): Seq[(Cell[K, Immutability], Immutability)] = {
val conditionallyImmutableCells = cells.filter(_.getResult() == ConditionallyImmutable)
if (conditionallyImmutableCells.nonEmpty)
cells.map(cell => (cell, ConditionallyImmutable))
else
cells.map(cell => (cell, Immutable))
}
def fallback[K <: Key[Immutability]](cells: Seq[Cell[K, Immutability]]): Seq[(Cell[K, Immutability], Immutability)] = {
val conditionallyImmutableCells = cells.filter(_.getResult() == ConditionallyImmutable)
if(conditionallyImmutableCells.nonEmpty)
conditionallyImmutableCells.map(cell => (cell, cell.getResult()))
else
cells.map(cell => (cell, Immutable))
}
override def toString = "Immutability"
}
sealed trait Immutability
case object Mutable extends Immutability
case object ConditionallyImmutable extends Immutability
case object Immutable extends Immutability
object Immutability {
implicit object ImmutabilityLattice extends Lattice[Immutability] {
override def join(current: Immutability, next: Immutability): Immutability = {
if (<=(next, current)) current
else next
}
def <=(lhs: Immutability, rhs: Immutability): Boolean = {
lhs == rhs || lhs == Immutable ||
(lhs == ConditionallyImmutable && rhs != Immutable)
}
override def empty: Immutability = Immutable
}
}
| packlnd/IFDS-RA | src/main/scala/opal/ImmutabilityLattice.scala | Scala | mit | 1,540 |
package com.wandoulabs.onecache.core
import akka.actor.ActorSystem
import akka.testkit.{ ImplicitSender, TestKit }
import com.typesafe.config.ConfigFactory
import org.scalatest.{ FlatSpecLike, BeforeAndAfterAll, Matchers }
import com.wandoulabs.onecache.core.store.MapStore
import com.wandoulabs.onecache.core.cache.{ OneCache, Cache }
abstract class OneCacheSpec(_system: ActorSystem) extends TestKit(_system) with FlatSpecLike with Matchers with CacheBehaviors with ImplicitSender with BeforeAndAfterAll {
def name: String
def cache: Cache
val ttlSecs = 1
def this() = this(ActorSystem("OneCacheSpec",
ConfigFactory.parseString("")))
override def afterAll {
TestKit.shutdownActorSystem(system)
}
def basicTest {
name should behave like {
crud(cache)
expired(cache)
}
}
}
class MapStoreSpec extends OneCacheSpec {
val store = system.actorOf(MapStore.props(testActor))
val cache: Cache = new OneCache(store, ttlSecs)
val name = "MapStore"
basicTest
}
| cowboy129/onecache | onecache-core/src/test/scala/com/wandoulabs/onecache/core/OneCacheSpec.scala | Scala | apache-2.0 | 1,015 |
/**
* A simple text based RPG
*
* @package simplerpg
* @copyright 2015
*/
package simplerpg
import scala.io.StdIn.readLine
import com.owlike.genson.defaultGenson._
case class Experience(var level: Int, var currentExperience: Int, var maxExperience: Int) {
def +=(that: Experience): Experience = {
currentExperience += that.currentExperience
if (currentExperience >= maxExperience) {
level += 1
currentExperience -= maxExperience
maxExperience += (maxExperience / 10)
}
this
}
override def toString(): String = toJson(this)
}
case class Stats(var strength: Int, var magic: Int, var stamina: Int) {
override def toString(): String = toJson(this)
}
case class Vitals(var health: Int, var mana: Int) {
def +=(that: Vitals): Vitals = {
health += that.health
mana += that.mana
this
}
override def toString(): String = toJson(this)
}
case class Player(
name: String,
experience: Experience,
vitals: Vitals,
maxVitals: Vitals,
stats: Stats,
inventory: Inventory,
currentLocation: String) extends Character {
private val strengthMultiplier = 100
def attack(enemy: Player): Int = {
val damage = inventory.weapons.find(_.isEquipped) match {
case Some(weapon) => weapon.damage * (stats.strength / strengthMultiplier)
case None => (stats.strength / strengthMultiplier)
}
enemy.vitals.health = enemy.vitals.health - damage
damage
}
def dropItem(name: String) {
inventory.items.find(_.name equals name) match {
case Some(item) => inventory.items = inventory.items.filter(!_.equals(item))
case None => throw new Exception(s"$name is not in your inventory")
}
}
def giveItem(item: Item) {
inventory.items ++= List(item)
}
def dropWeapon(name: String) {
inventory.weapons.find(_.name equals name) match {
case Some(weapon) => inventory.weapons = inventory.weapons.filter(!_.equals(weapon))
case None => throw new Exception(s"$name is not in your inventory")
}
}
def giveWeapon(weapon: Weapon) {
inventory.weapons ++= List(weapon)
}
def dropArmor(name: String) {
inventory.armor.find(_.name equals name) match {
case Some(armor) => inventory.armor = inventory.armor.filter(!_.equals(armor))
case None => throw new Exception(s"$name is not in your inventory")
}
}
def giveArmor(armor: Armor) {
inventory.armor ++= List(armor)
}
def equipWeapon(name: String) {
inventory.weapons.find(_.name equals name) match {
case Some(weapon) => weapon.isEquipped = !weapon.isEquipped
case None => throw new Exception(s"Weapon $name does not exist")
}
}
def equipArmor(name: String) {
inventory.armor.find(_.name equals name) match {
case Some(armor) => armor.isEquipped = !armor.isEquipped
case None => throw new Exception(s"Armor $name does not exist")
}
}
def capMaxVitals() {
if (vitals.health > maxVitals.health)
vitals.health = maxVitals.health
if (vitals.mana > maxVitals.mana)
vitals.mana = maxVitals.mana
}
def resetVitals() {
vitals.health = maxVitals.health
vitals.mana = maxVitals.mana
}
def isDead(): Boolean = vitals.health <= 0
def hasMana(): Boolean = vitals.mana > 0
override def isPlayerControlled(): Boolean = true
override def askForCommands(): Array[String] = readLine().split(" ").filter(!_.isEmpty)
override def equals(other: Any): Boolean = other match {
case that: Player => that.name.equalsIgnoreCase(this.name)
case _ => false
}
override def hashCode = name.toLowerCase.hashCode
override def toString(): String = toJson(this)
}
| mcross1882/SimpleRPG | src/main/scala/simplerpg/player/Player.scala | Scala | mit | 3,981 |
class B
class C(x: String) extends B
class A {
type A >: Null
class D { type T >: C <: B }
val x: A with D = null
var y: x.T = new C("abc")
}
object Test extends A with App {
class C { type T = Int; val x = 1 }
type A = C
y = 42
}
| AlexSikia/dotty | tests/untried/neg/null-unsoundness.scala | Scala | bsd-3-clause | 246 |
/*
* Copyright 2012-2015 Kieron Wilkinson.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package viper.ui
import viper.domain.{Subscription, Subscriber}
/** Interface to the selected UI implementation. */
trait ViperUI {
def hasSubscriber(subscriber: Subscriber): Boolean
def addSubscription(subscription: Subscription): Unit
def focusOn(subscriber: Subscriber): Unit
def toFront(): Unit
}
| vyadh/viper | ui/src/main/scala/viper/ui/ViperUI.scala | Scala | apache-2.0 | 919 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import kafka.cluster.{Broker, Partition, Replica}
import collection._
import mutable.HashMap
import org.I0Itec.zkclient.ZkClient
import java.util.concurrent.atomic.AtomicBoolean
import kafka.utils._
import kafka.log.LogManager
import kafka.metrics.KafkaMetricsGroup
import com.yammer.metrics.core.Gauge
import java.util.concurrent.TimeUnit
import kafka.common._
import kafka.api.{StopReplicaRequest, PartitionStateInfo, LeaderAndIsrRequest}
import kafka.controller.KafkaController
import org.apache.log4j.Logger
object ReplicaManager {
val UnknownLogEndOffset = -1L
}
class ReplicaManager(val config: KafkaConfig,
time: Time,
val zkClient: ZkClient,
kafkaScheduler: KafkaScheduler,
val logManager: LogManager,
val isShuttingDown: AtomicBoolean ) extends Logging with KafkaMetricsGroup {
/* epoch of the controller that last changed the leader */
@volatile var controllerEpoch: Int = KafkaController.InitialControllerEpoch - 1
private val localBrokerId = config.brokerId
private val allPartitions = new Pool[(String, Int), Partition]
private var leaderPartitions = new mutable.HashSet[Partition]()
private val leaderPartitionsLock = new Object
val replicaFetcherManager = new ReplicaFetcherManager(config, this)
private val highWatermarkCheckPointThreadStarted = new AtomicBoolean(false)
val highWatermarkCheckpoints = config.logDirs.map(dir => (dir, new HighwaterMarkCheckpoint(dir))).toMap
private var hwThreadInitialized = false
this.logIdent = "[Replica Manager on Broker " + localBrokerId + "]: "
val stateChangeLogger = Logger.getLogger(KafkaController.stateChangeLogger)
/*newGauge(
"LeaderCount",
new Gauge[Int] {
def value = {
leaderPartitionsLock synchronized {
leaderPartitions.size
}
}
}
)
newGauge(
"PartitionCount",
new Gauge[Int] {
def value = allPartitions.size
}
)
newGauge(
"UnderReplicatedPartitions",
new Gauge[Int] {
def value = {
leaderPartitionsLock synchronized {
leaderPartitions.count(_.isUnderReplicated)
}
}
}
) */
// val isrExpandRate = newMeter("IsrExpandsPerSec", "expands", TimeUnit.SECONDS)
// val isrShrinkRate = newMeter("ISRShrinksPerSec", "shrinks", TimeUnit.SECONDS)
def startHighWaterMarksCheckPointThread() = {
if(highWatermarkCheckPointThreadStarted.compareAndSet(false, true))
kafkaScheduler.scheduleWithRate(checkpointHighWatermarks, "highwatermark-checkpoint-thread", 0, config.replicaHighWatermarkCheckpointIntervalMs)
}
/**
* This function is only used in two places: in Partition.updateISR() and KafkaApis.handleProducerRequest().
* In the former case, the partition should have been created, in the latter case, return -1 will put the request into purgatory
*/
def getReplicationFactorForPartition(topic: String, partitionId: Int) = {
val partitionOpt = getPartition(topic, partitionId)
partitionOpt match {
case Some(partition) =>
partition.replicationFactor
case None =>
-1
}
}
def startup() {
// start ISR expiration thread
kafkaScheduler.scheduleWithRate(maybeShrinkIsr, "isr-expiration-thread-", 0, config.replicaLagTimeMaxMs)
}
def stopReplica(topic: String, partitionId: Int, deletePartition: Boolean): Short = {
stateChangeLogger.trace("Broker %d handling stop replica for partition [%s,%d]".format(localBrokerId, topic, partitionId))
val errorCode = ErrorMapping.NoError
getReplica(topic, partitionId) match {
case Some(replica) =>
replicaFetcherManager.removeFetcher(topic, partitionId)
/* TODO: handle deleteLog in a better way */
//if (deletePartition)
// logManager.deleteLog(topic, partition)
leaderPartitionsLock synchronized {
leaderPartitions -= replica.partition
}
if(deletePartition)
allPartitions.remove((topic, partitionId))
case None => //do nothing if replica no longer exists
}
stateChangeLogger.trace("Broker %d finished handling stop replica for partition [%s,%d]".format(localBrokerId, topic, partitionId))
errorCode
}
def stopReplicas(stopReplicaRequest: StopReplicaRequest): (mutable.Map[(String, Int), Short], Short) = {
val responseMap = new collection.mutable.HashMap[(String, Int), Short]
if(stopReplicaRequest.controllerEpoch < controllerEpoch) {
stateChangeLogger.warn("Broker %d received stop replica request from an old controller epoch %d."
.format(localBrokerId, stopReplicaRequest.controllerEpoch) +
" Latest known controller epoch is %d " + controllerEpoch)
(responseMap, ErrorMapping.StaleControllerEpochCode)
} else {
controllerEpoch = stopReplicaRequest.controllerEpoch
val responseMap = new HashMap[(String, Int), Short]
for((topic, partitionId) <- stopReplicaRequest.partitions){
val errorCode = stopReplica(topic, partitionId, stopReplicaRequest.deletePartitions)
responseMap.put((topic, partitionId), errorCode)
}
(responseMap, ErrorMapping.NoError)
}
}
def getOrCreatePartition(topic: String, partitionId: Int, replicationFactor: Int): Partition = {
var partition = allPartitions.get((topic, partitionId))
if (partition == null) {
allPartitions.putIfNotExists((topic, partitionId), new Partition(topic, partitionId, replicationFactor, time, this))
partition = allPartitions.get((topic, partitionId))
}
partition
}
def getPartition(topic: String, partitionId: Int): Option[Partition] = {
val partition = allPartitions.get((topic, partitionId))
if (partition == null)
None
else
Some(partition)
}
def getReplicaOrException(topic: String, partition: Int): Replica = {
val replicaOpt = getReplica(topic, partition)
if(replicaOpt.isDefined)
return replicaOpt.get
else
throw new ReplicaNotAvailableException("Replica %d is not available for partition [%s,%d]".format(config.brokerId, topic, partition))
}
def getLeaderReplicaIfLocal(topic: String, partitionId: Int): Replica = {
val partitionOpt = getPartition(topic, partitionId)
partitionOpt match {
case None =>
throw new UnknownTopicOrPartitionException("Partition [%s,%d] doesn't exist on %d".format(topic, partitionId, config.brokerId))
case Some(partition) =>
partition.leaderReplicaIfLocal match {
case Some(leaderReplica) => leaderReplica
case None =>
throw new NotLeaderForPartitionException("Leader not local for partition [%s,%d] on broker %d"
.format(topic, partitionId, config.brokerId))
}
}
}
def getReplica(topic: String, partitionId: Int, replicaId: Int = config.brokerId): Option[Replica] = {
val partitionOpt = getPartition(topic, partitionId)
partitionOpt match {
case None => None
case Some(partition) => partition.getReplica(replicaId)
}
}
def becomeLeaderOrFollower(leaderAndISRRequest: LeaderAndIsrRequest): (collection.Map[(String, Int), Short], Short) = {
leaderAndISRRequest.partitionStateInfos.foreach(p =>
stateChangeLogger.trace("Broker %d handling LeaderAndIsr request correlation id %d received from controller %d epoch %d for partition [%s,%d]"
.format(localBrokerId, leaderAndISRRequest.correlationId, leaderAndISRRequest.controllerId,
leaderAndISRRequest.controllerEpoch, p._1._1, p._1._2)))
info("Handling LeaderAndIsr request %s".format(leaderAndISRRequest))
val responseMap = new collection.mutable.HashMap[(String, Int), Short]
if(leaderAndISRRequest.controllerEpoch < controllerEpoch) {
stateChangeLogger.warn("Broker %d received LeaderAndIsr request correlation id %d with an old controller epoch %d. Latest known controller epoch is %d"
.format(localBrokerId, leaderAndISRRequest.controllerEpoch, leaderAndISRRequest.correlationId, controllerEpoch))
(responseMap, ErrorMapping.StaleControllerEpochCode)
}else {
val controllerId = leaderAndISRRequest.controllerId
controllerEpoch = leaderAndISRRequest.controllerEpoch
for((topicAndPartition, partitionStateInfo) <- leaderAndISRRequest.partitionStateInfos) {
var errorCode = ErrorMapping.NoError
val topic = topicAndPartition._1
val partitionId = topicAndPartition._2
val requestedLeaderId = partitionStateInfo.leaderIsrAndControllerEpoch.leaderAndIsr.leader
try {
if(requestedLeaderId == config.brokerId)
makeLeader(controllerId, controllerEpoch, topic, partitionId, partitionStateInfo, leaderAndISRRequest.correlationId)
else
makeFollower(controllerId, controllerEpoch, topic, partitionId, partitionStateInfo, leaderAndISRRequest.aliveLeaders,
leaderAndISRRequest.correlationId)
} catch {
case e =>
val errorMsg = ("Error on broker %d while processing LeaderAndIsr request correlationId %d received from controller %d " +
"epoch %d for partition %s").format(localBrokerId, leaderAndISRRequest.correlationId, leaderAndISRRequest.controllerId,
leaderAndISRRequest.controllerEpoch, topicAndPartition)
stateChangeLogger.error(errorMsg, e)
errorCode = ErrorMapping.codeFor(e.getClass.asInstanceOf[Class[Throwable]])
}
responseMap.put(topicAndPartition, errorCode)
stateChangeLogger.trace("Broker %d handled LeaderAndIsr request correlationId %d received from controller %d epoch %d for partition [%s,%d]"
.format(localBrokerId, leaderAndISRRequest.correlationId, leaderAndISRRequest.controllerId, leaderAndISRRequest.controllerEpoch,
topicAndPartition._1, topicAndPartition._2))
}
info("Handled leader and isr request %s".format(leaderAndISRRequest))
// we initialize highwatermark thread after the first leaderisrrequest. This ensures that all the partitions
// have been completely populated before starting the checkpointing there by avoiding weird race conditions
if (!hwThreadInitialized) {
startHighWaterMarksCheckPointThread()
hwThreadInitialized = true
}
replicaFetcherManager.shutdownIdleFetcherThreads()
(responseMap, ErrorMapping.NoError)
}
}
private def makeLeader(controllerId: Int, epoch:Int, topic: String, partitionId: Int,
partitionStateInfo: PartitionStateInfo, correlationId: Int) = {
val leaderIsrAndControllerEpoch = partitionStateInfo.leaderIsrAndControllerEpoch
stateChangeLogger.trace(("Broker %d received LeaderAndIsr request correlationId %d from controller %d epoch %d " +
"starting the become-leader transition for partition [%s,%d]")
.format(localBrokerId, correlationId, controllerId, epoch, topic, partitionId))
val partition = getOrCreatePartition(topic, partitionId, partitionStateInfo.replicationFactor)
if (partition.makeLeader(controllerId, topic, partitionId, leaderIsrAndControllerEpoch, correlationId)) {
// also add this partition to the list of partitions for which the leader is the current broker
leaderPartitionsLock synchronized {
leaderPartitions += partition
}
}
stateChangeLogger.trace("Broker %d completed become-leader transition for partition [%s,%d]".format(localBrokerId, topic, partitionId))
}
private def makeFollower(controllerId: Int, epoch: Int, topic: String, partitionId: Int,
partitionStateInfo: PartitionStateInfo, aliveLeaders: Set[Broker], correlationId: Int) {
val leaderIsrAndControllerEpoch = partitionStateInfo.leaderIsrAndControllerEpoch
stateChangeLogger.trace(("Broker %d received LeaderAndIsr request correlationId %d from controller %d epoch %d " +
"starting the become-follower transition for partition [%s,%d]")
.format(localBrokerId, correlationId, controllerId, epoch, topic, partitionId))
val partition = getOrCreatePartition(topic, partitionId, partitionStateInfo.replicationFactor)
if (partition.makeFollower(controllerId, topic, partitionId, leaderIsrAndControllerEpoch, aliveLeaders, correlationId)) {
// remove this replica's partition from the ISR expiration queue
leaderPartitionsLock synchronized {
leaderPartitions -= partition
}
}
stateChangeLogger.trace("Broker %d completed the become-follower transition for partition [%s,%d]".format(localBrokerId, topic, partitionId))
}
private def maybeShrinkIsr(): Unit = {
trace("Evaluating ISR list of partitions to see which replicas can be removed from the ISR")
leaderPartitionsLock synchronized {
leaderPartitions.foreach(partition => partition.maybeShrinkIsr(config.replicaLagTimeMaxMs, config.replicaLagMaxMessages))
}
}
def recordFollowerPosition(topic: String, partitionId: Int, replicaId: Int, offset: Long) = {
val partitionOpt = getPartition(topic, partitionId)
if(partitionOpt.isDefined) {
partitionOpt.get.updateLeaderHWAndMaybeExpandIsr(replicaId, offset)
} else {
warn("While recording the follower position, the partition [%s,%d] hasn't been created, skip updating leader HW".format(topic, partitionId))
}
}
/**
* Flushes the highwatermark value for all partitions to the highwatermark file
*/
def checkpointHighWatermarks() {
val replicas = allPartitions.values.map(_.getReplica(config.brokerId)).collect{case Some(replica) => replica}
val replicasByDir = replicas.filter(_.log.isDefined).groupBy(_.log.get.dir.getParent)
for((dir, reps) <- replicasByDir) {
val hwms = reps.map(r => (TopicAndPartition(r.topic, r.partitionId) -> r.highWatermark)).toMap
highWatermarkCheckpoints(dir).write(hwms)
}
}
def shutdown() {
info("Shut down")
replicaFetcherManager.shutdown()
checkpointHighWatermarks()
info("Shutted down completely")
}
}
| kavink92/kafka-0.8.0-beta1-src | core/src/main/scala/kafka/server/ReplicaManager.scala | Scala | apache-2.0 | 15,167 |
package name.abhijitsarkar.user.repository
import name.abhijitsarkar.user.TestUtil._
import org.scalatest.BeforeAndAfterAll
import scala.concurrent.ExecutionContext.Implicits.global
class MySQLUserRepositorySpec extends UserRepositorySpec with BeforeAndAfterAll {
val dBContext = new TestDBContext
val dbConfig = dBContext.dbConfig
val db = dBContext.db
import dbConfig.driver.api._
// Give the DB enough time to start up
Thread.sleep(5000)
override protected val userRepository = MySQLUserRepository(dBContext) {
implicitly
}
val query = TableQuery[Users]
override def afterAll() {
println("Cleaning up")
deleteAllUsers()
db.close
}
override protected def dumpAllUsers = {
println("Printing all users")
db.run(query.result).map {
println(_)
}
}
override protected def deleteAllUsers() = {
println("Deleting all users")
db.run(query.delete)
}
override protected def someUserId = {
randomUserId
}
} | asarkar/akka | user-service/src/test/scala/name/abhijitsarkar/user/repository/MySQLUserRepositorySpec.scala | Scala | gpl-3.0 | 990 |
/* __ *\\
** ________ ___ / / ___ __ ____ Scala.js Benchmarks **
** / __/ __// _ | / / / _ | __ / // __/ (c) 2013, Jonas Fonseca **
** __\\ \\/ /__/ __ |/ /__/ __ |/_// /_\\ \\ **
** /____/\\___/_/ |_/____/_/ | |__/ /____/ **
** |/____/ **
\\* */
// Copyright 2011 Google Inc. All Rights Reserved.
// Copyright 1996 John Maloney and Mario Wolczko
//
// This file is part of GNU Smalltalk.
//
// GNU Smalltalk is free software; you can redistribute it and/or modify it
// under the terms of the GNU General Public License as published by the Free
// Software Foundation; either version 2, or (at your option) any later version.
//
// GNU Smalltalk is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
// FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
// details.
//
// You should have received a copy of the GNU General Public License along with
// GNU Smalltalk; see the file COPYING. If not, write to the Free Software
// Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
//
// Translated first from Smalltalk to JavaScript, and finally to
// Dart by Google 2008-2010.
// Translated to Scala.js by Jonas Fonseca 2013
package org.scalajs.benchmark.deltablue
/**
* A Scala implementation of the DeltaBlue constraint-solving
* algorithm, as described in:
*
* "The DeltaBlue Algorithm: An Incremental Constraint Hierarchy Solver"
* Bjorn N. Freeman-Benson and John Maloney
* January 1990 Communications of the ACM,
* also available as University of Washington TR 89-08-06.
*
* Beware: this benchmark is written in a grotesque style where
* the constraint model is built by side-effects from constructors.
* I've kept it this way to avoid deviating too much from the original
* implementation.
*/
import scala.collection.mutable.{ArrayBuffer, ListBuffer, Stack}
object DeltaBlue extends org.scalajs.benchmark.Benchmark {
override def prefix = "DeltaBlue"
def run {
chainTest(100)
projectionTest(100)
}
/**
* This is the standard DeltaBlue benchmark. A long chain of equality
* constraints is constructed with a stay constraint on one end. An
* edit constraint is then added to the opposite end and the time is
* measured for adding and removing this constraint, and extracting
* and executing a constraint satisfaction plan. There are two cases.
* In case 1, the added constraint is stronger than the stay
* constraint and values must propagate down the entire length of the
* chain. In case 2, the added constraint is weaker than the stay
* constraint so it cannot be accomodated. The cost in this case is,
* of course, very low. Typical situations lie somewhere between these
* two extremes.
*/
def chainTest(n: Int) {
implicit val planner = new Planner()
var prev: Variable = null
var first: Variable = null
var last: Variable = null
// Build chain of n equality constraints.
for (i <- 0 to n) {
val v = new Variable("v", 0)
if (prev != null) new EqualityConstraint(prev, v, REQUIRED)
if (i == 0) first = v
if (i == n) last = v
prev = v
}
new StayConstraint(last, STRONG_DEFAULT)
val edit = new EditConstraint(first, PREFERRED)
val plan = planner.extractPlanFromConstraints(Seq(edit))
for (i <- 0 until 100) {
first.value = i
plan.execute()
if (last.value != i) {
print("Chain test failed.\\n{last.value)\\n{i}")
}
}
}
/**
* This test constructs a two sets of variables related to each
* other by a simple linear transformation (scale and offset). The
* time is measured to change a variable on either side of the
* mapping and to change the scale and offset factors.
*/
def projectionTest(n: Int) {
implicit val planner = new Planner()
val scale = new Variable("scale", 10)
val offset = new Variable("offset", 1000)
var src: Variable = null
var dst: Variable = null
val dests = new ArrayBuffer[Variable](n)
for (i <- 0 until n) {
src = new Variable("src", i)
dst = new Variable("dst", i)
dests += dst
new StayConstraint(src, NORMAL)
new ScaleConstraint(src, scale, offset, dst, REQUIRED)
}
change(src, 17)
if (dst.value != 1170) print("Projection 1 failed")
change(dst, 1050)
if (src.value != 5) print("Projection 2 failed")
change(scale, 5)
for (i <- 0 until n - 1) {
if (dests(i).value != i * 5 + 1000) print("Projection 3 failed")
}
change(offset, 2000)
for (i <- 0 until n - 1) {
if (dests(i).value != i * 5 + 2000) print("Projection 4 failed")
}
}
def change(v: Variable, newValue: Int)(implicit planner: Planner) {
val edit = new EditConstraint(v, PREFERRED)
val plan = planner.extractPlanFromConstraints(Seq(edit))
for (i <- 0 until 10) {
v.value = newValue
plan.execute()
}
edit.destroyConstraint
}
}
/**
* Strengths are used to measure the relative importance of constraints.
* New strengths may be inserted in the strength hierarchy without
* disrupting current constraints. Strengths cannot be created outside
* this class, so == can be used for value comparison.
*/
sealed class Strength(val value: Int, val name: String) {
def nextWeaker = value match {
case 0 => STRONG_PREFERRED
case 1 => PREFERRED
case 2 => STRONG_DEFAULT
case 3 => NORMAL
case 4 => WEAK_DEFAULT
case 5 => WEAKEST
}
}
case object REQUIRED extends Strength(0, "required")
case object STRONG_PREFERRED extends Strength(1, "strongPreferred")
case object PREFERRED extends Strength(2, "preferred")
case object STRONG_DEFAULT extends Strength(3, "strongDefault")
case object NORMAL extends Strength(4, "normal")
case object WEAK_DEFAULT extends Strength(5, "weakDefault")
case object WEAKEST extends Strength(6, "weakest")
// Compile time computed constants.
object Strength {
def stronger(s1: Strength, s2: Strength): Boolean =
s1.value < s2.value
def weaker(s1: Strength, s2: Strength): Boolean =
s1.value > s2.value
def weakest(s1: Strength, s2: Strength) =
if (weaker(s1, s2)) s1 else s2
def strongest(s1: Strength, s2: Strength) =
if (stronger(s1, s2)) s1 else s2
}
abstract class Constraint(val strength: Strength)(implicit planner: Planner) {
def isSatisfied(): Boolean
def markUnsatisfied(): Unit
def addToGraph(): Unit
def removeFromGraph(): Unit
def chooseMethod(mark: Int): Unit
def markInputs(mark: Int): Unit
def inputsKnown(mark: Int): Boolean
def output(): Variable
def execute(): Unit
def recalculate(): Unit
/// Activate this constraint and attempt to satisfy it.
def addConstraint() {
addToGraph()
planner.incrementalAdd(this)
}
/**
* Attempt to find a way to enforce this constraint. If successful,
* record the solution, perhaps modifying the current dataflow
* graph. Answer the constraint that this constraint overrides, if
* there is one, or nil, if there isn't.
* Assume: I am not already satisfied.
*/
def satisfy(mark: Int): Constraint = {
chooseMethod(mark)
if (!isSatisfied()) {
if (strength == REQUIRED) {
print("Could not satisfy a required constraint!")
}
null
} else {
markInputs(mark)
val out = output()
val overridden = out.determinedBy
if (overridden != null)
overridden.markUnsatisfied()
out.determinedBy = this
if (!planner.addPropagate(this, mark))
print("Cycle encountered")
out.mark = mark
overridden
}
}
def destroyConstraint {
if (isSatisfied())
planner.incrementalRemove(this)
removeFromGraph()
}
/**
* Normal constraints are not input constraints. An input constraint
* is one that depends on external state, such as the mouse, the
* keybord, a clock, or some arbitraty piece of imperative code.
*/
def isInput = false
}
/**
* Abstract superclass for constraints having a single possible output variable.
*/
abstract class UnaryConstraint(myOutput: Variable, strength: Strength)(implicit planner: Planner) extends Constraint(strength) {
private var satisfied = false
addConstraint()
/// Adds this constraint to the constraint graph
def addToGraph() {
myOutput.addConstraint(this)
satisfied = false
}
/// Decides if this constraint can be satisfied and records that decision.
def chooseMethod(mark: Int) {
satisfied = (myOutput.mark != mark) &&
Strength.stronger(strength, myOutput.walkStrength)
}
/// Returns true if this constraint is satisfied in the current solution.
def isSatisfied() = satisfied
def markInputs(mark: Int) {
// has no inputs.
}
/// Returns the current output variable.
def output() = myOutput
/**
* Calculate the walkabout strength, the stay flag, and, if it is
* 'stay', the value for the current output of this constraint. Assume
* this constraint is satisfied.
*/
def recalculate() {
myOutput.walkStrength = strength
myOutput.stay = !isInput
if (myOutput.stay) execute(); // Stay optimization.
}
/// Records that this constraint is unsatisfied.
def markUnsatisfied() {
satisfied = false
}
def inputsKnown(mark: Int) = true
def removeFromGraph() {
if (myOutput != null) myOutput.removeConstraint(this)
satisfied = false
}
}
/**
* Variables that should, with some level of preference, stay the same.
* Planners may exploit the fact that instances, if satisfied, will not
* change their output during plan execution. This is called "stay
* optimization".
*/
class StayConstraint(v: Variable, str: Strength)(implicit planner: Planner) extends UnaryConstraint(v, str) {
def execute() {
// Stay constraints do nothing.
}
}
/**
* A unary input constraint used to mark a variable that the client
* wishes to change.
*/
class EditConstraint(v: Variable, str: Strength)(implicit planner: Planner) extends UnaryConstraint(v, str) {
/// Edits indicate that a variable is to be changed by imperative code.
override val isInput = true
def execute() {
// Edit constraints do nothing.
}
}
object Direction {
final val NONE = 1
final val FORWARD = 2
final val BACKWARD = 0
}
/**
* Abstract superclass for constraints having two possible output
* variables.
*/
abstract class BinaryConstraint(v1: Variable, v2: Variable, strength: Strength)(implicit planner: Planner) extends Constraint(strength) {
protected var direction = Direction.NONE
addConstraint()
/**
* Decides if this constraint can be satisfied and which way it
* should flow based on the relative strength of the variables related,
* and record that decision.
*/
def chooseMethod(mark: Int) {
if (v1.mark == mark) {
direction =
if ((v2.mark != mark && Strength.stronger(strength, v2.walkStrength)))
Direction.FORWARD
else
Direction.NONE
}
if (v2.mark == mark) {
direction =
if (v1.mark != mark && Strength.stronger(strength, v1.walkStrength))
Direction.BACKWARD
else
Direction.NONE
}
if (Strength.weaker(v1.walkStrength, v2.walkStrength)) {
direction =
if (Strength.stronger(strength, v1.walkStrength))
Direction.BACKWARD
else
Direction.NONE
} else {
direction =
if (Strength.stronger(strength, v2.walkStrength))
Direction.FORWARD
else
Direction.BACKWARD
}
}
/// Add this constraint to the constraint graph.
override def addToGraph() {
v1.addConstraint(this)
v2.addConstraint(this)
direction = Direction.NONE
}
/// Answer true if this constraint is satisfied in the current solution.
def isSatisfied() = direction != Direction.NONE
/// Mark the input variable with the given mark.
def markInputs(mark: Int) {
input().mark = mark
}
/// Returns the current input variable
def input() = if (direction == Direction.FORWARD) v1 else v2
/// Returns the current output variable.
def output() = if (direction == Direction.FORWARD) v2 else v1
/**
* Calculate the walkabout strength, the stay flag, and, if it is
* 'stay', the value for the current output of this
* constraint. Assume this constraint is satisfied.
*/
def recalculate() {
val ihn = input()
val out = output()
out.walkStrength = Strength.weakest(strength, ihn.walkStrength)
out.stay = ihn.stay
if (out.stay) execute()
}
/// Record the fact that this constraint is unsatisfied.
def markUnsatisfied() {
direction = Direction.NONE
}
def inputsKnown(mark: Int): Boolean = {
val i = input()
i.mark == mark || i.stay || i.determinedBy == null
}
def removeFromGraph() {
if (v1 != null) v1.removeConstraint(this)
if (v2 != null) v2.removeConstraint(this)
direction = Direction.NONE
}
}
/**
* Relates two variables by the linear scaling relationship: "v2 =
* (v1 * scale) + offset". Either v1 or v2 may be changed to maintain
* this relationship but the scale factor and offset are considered
* read-only.
*/
class ScaleConstraint(v1: Variable, scale: Variable, offset: Variable,
v2: Variable, strength: Strength)(implicit planner: Planner)
extends BinaryConstraint(v1, v2, strength) {
/// Adds this constraint to the constraint graph.
override def addToGraph() {
super.addToGraph()
scale.addConstraint(this)
offset.addConstraint(this)
}
override def removeFromGraph() {
super.removeFromGraph()
if (scale != null) scale.removeConstraint(this)
if (offset != null) offset.removeConstraint(this)
}
override def markInputs(mark: Int) {
super.markInputs(mark)
scale.mark = mark
offset.mark = mark
}
/// Enforce this constraint. Assume that it is satisfied.
def execute() {
if (direction == Direction.FORWARD) {
v2.value = v1.value * scale.value + offset.value
} else {
// XXX: Truncates the resulting value
v1.value = (v2.value - offset.value) / scale.value
}
}
/**
* Calculate the walkabout strength, the stay flag, and, if it is
* 'stay', the value for the current output of this constraint. Assume
* this constraint is satisfied.
*/
override def recalculate() {
val ihn = input()
val out = output()
out.walkStrength = Strength.weakest(strength, ihn.walkStrength)
out.stay = ihn.stay && scale.stay && offset.stay
if (out.stay) execute()
}
}
/**
* Constrains two variables to have the same value.
*/
class EqualityConstraint(v1: Variable, v2: Variable, strength: Strength)(implicit planner: Planner) extends BinaryConstraint(v1, v2, strength) {
/// Enforce this constraint. Assume that it is satisfied.
def execute() {
output().value = input().value
}
}
/**
* A constrained variable. In addition to its value, it maintain the
* structure of the constraint graph, the current dataflow graph, and
* various parameters of interest to the DeltaBlue incremental
* constraint solver.
*/
class Variable(val name: String, var value: Int) {
val constraints = new ListBuffer[Constraint]()
var determinedBy: Constraint = null
var mark = 0
var walkStrength: Strength = WEAKEST
var stay = true
/**
* Add the given constraint to the set of all constraints that refer
* this variable.
*/
def addConstraint(c: Constraint) {
constraints += c
}
/// Removes all traces of c from this variable.
def removeConstraint(c: Constraint) {
constraints -= c
if (determinedBy == c) determinedBy = null
}
}
class Planner {
var currentMark = 0
/**
* Attempt to satisfy the given constraint and, if successful,
* incrementally update the dataflow graph. Details: If satifying
* the constraint is successful, it may override a weaker constraint
* on its output. The algorithm attempts to resatisfy that
* constraint using some other method. This process is repeated
* until either a) it reaches a variable that was not previously
* determined by any constraint or b) it reaches a constraint that
* is too weak to be satisfied using any of its methods. The
* variables of constraints that have been processed are marked with
* a unique mark value so that we know where we've been. This allows
* the algorithm to avoid getting into an infinite loop even if the
* constraint graph has an inadvertent cycle.
*/
def incrementalAdd(c: Constraint) {
val mark = newMark()
var overridden = c.satisfy(mark)
while (overridden != null)
overridden = overridden.satisfy(mark)
}
/**
* Entry point for retracting a constraint. Remove the given
* constraint and incrementally update the dataflow graph.
* Details: Retracting the given constraint may allow some currently
* unsatisfiable downstream constraint to be satisfied. We therefore collect
* a list of unsatisfied downstream constraints and attempt to
* satisfy each one in turn. This list is traversed by constraint
* strength, strongest first, as a heuristic for avoiding
* unnecessarily adding and then overriding weak constraints.
* Assume: [c] is satisfied.
*/
def incrementalRemove(c: Constraint) {
val out = c.output()
c.markUnsatisfied()
c.removeFromGraph()
val unsatisfied = removePropagateFrom(out)
var strength: Strength = REQUIRED
do {
for (u <- unsatisfied) {
if (u.strength == strength) incrementalAdd(u)
}
strength = strength.nextWeaker
} while (strength != WEAKEST)
}
/// Select a previously unused mark value.
def newMark(): Int = {
currentMark += 1
currentMark
}
/**
* Extract a plan for resatisfaction starting from the given source
* constraints, usually a set of input constraints. This method
* assumes that stay optimization is desired; the plan will contain
* only constraints whose output variables are not stay. Constraints
* that do no computation, such as stay and edit constraints, are
* not included in the plan.
* Details: The outputs of a constraint are marked when it is added
* to the plan under construction. A constraint may be appended to
* the plan when all its input variables are known. A variable is
* known if either a) the variable is marked (indicating that has
* been computed by a constraint appearing earlier in the plan), b)
* the variable is 'stay' (i.e. it is a constant at plan execution
* time), or c) the variable is not determined by any
* constraint. The last provision is for past states of history
* variables, which are not stay but which are also not computed by
* any constraint.
* Assume: [sources] are all satisfied.
*/
def makePlan(sources: Stack[Constraint]) = {
val mark = newMark()
val plan = new Plan()
val todo = sources
while (!todo.isEmpty) {
val c = todo.pop()
if (c.output().mark != mark && c.inputsKnown(mark)) {
plan.addConstraint(c)
c.output().mark = mark
addConstraintsConsumingTo(c.output(), todo)
}
}
plan
}
/**
* Extract a plan for resatisfying starting from the output of the
* given [constraints], usually a set of input constraints.
*/
def extractPlanFromConstraints(constraints: Seq[Constraint]) = {
val sources = new Stack[Constraint]()
for (c <- constraints) {
// if not in plan already and eligible for inclusion.
if (c.isInput && c.isSatisfied()) sources.push(c)
}
makePlan(sources)
}
/**
* Recompute the walkabout strengths and stay flags of all variables
* downstream of the given constraint and recompute the actual
* values of all variables whose stay flag is true. If a cycle is
* detected, remove the given constraint and answer
* false. Otherwise, answer true.
* Details: Cycles are detected when a marked variable is
* encountered downstream of the given constraint. The sender is
* assumed to have marked the inputs of the given constraint with
* the given mark. Thus, encountering a marked node downstream of
* the output constraint means that there is a path from the
* constraint's output to one of its inputs.
*/
def addPropagate(c: Constraint, mark: Int): Boolean = {
val todo = new Stack[Constraint]().push(c)
while (!todo.isEmpty) {
val d = todo.pop()
if (d.output().mark == mark) {
incrementalRemove(c)
return false
}
d.recalculate()
addConstraintsConsumingTo(d.output(), todo)
}
true
}
/**
* Update the walkabout strengths and stay flags of all variables
* downstream of the given constraint. Answer a collection of
* unsatisfied constraints sorted in order of decreasing strength.
*/
def removePropagateFrom(out: Variable): Seq[Constraint] = {
out.determinedBy = null
out.walkStrength = WEAKEST
out.stay = true
val unsatisfied = new ListBuffer[Constraint]()
val todo = new Stack[Variable]().push(out)
while (!todo.isEmpty) {
val v = todo.pop()
for (c <- v.constraints) {
if (!c.isSatisfied()) unsatisfied += c
}
val determining = v.determinedBy
for (next <- v.constraints) {
if (next != determining && next.isSatisfied()) {
next.recalculate()
todo.push(next.output())
}
}
}
unsatisfied
}
def addConstraintsConsumingTo(v: Variable, coll: Stack[Constraint]) {
val determining = v.determinedBy
for (c <- v.constraints) {
if (c != determining && c.isSatisfied()) coll.push(c)
}
}
}
/**
* A Plan is an ordered list of constraints to be executed in sequence
* to resatisfy all currently satisfiable constraints in the face of
* one or more changing inputs.
*/
class Plan {
private val list = new ListBuffer[Constraint]()
def addConstraint(c: Constraint) {
list += c
}
def execute() {
for (constraint <- list) {
constraint.execute()
}
}
}
| sjrd/scalajs-benchmarks | deltablue/src/main/scala/org/scalajs/benchmark/deltablue/DeltaBlue.scala | Scala | bsd-3-clause | 22,446 |
package redmine4s.api.model
import org.joda.time.{DateTime, LocalDate}
import redmine4s.Redmine
case class Issue(id: Long,
project: (Long, String),
tracker: (Long, String),
status: (Long, String),
priority: (Long, String),
author: (Long, String),
subject: String,
doneRatio: Int,
createdOn: DateTime,
updatedOn: DateTime,
parent: Option[Long],
description: Option[String],
fixedVersion: Option[(Long, String)],
assignedTo: Option[(Long, String)],
category: Option[(Long, String)],
startDate: Option[LocalDate],
dueDate: Option[LocalDate],
actualStartDate: Option[LocalDate],
actualDueDate: Option[LocalDate],
estimatedHours: Option[Double],
closedOn: Option[DateTime],
customField: Seq[CustomFieldValue],
watchers: Option[Seq[(Long, String)]],
attachments: Option[Seq[Attachment]],
changeSets: Option[Seq[ChangeSet]],
journals: Option[Seq[Journal]],
children: Option[Seq[ChildIssue]],
relations: Option[Seq[IssueRelation]],
redmine: Redmine) {
def show: Issue = redmine.showIssue(this.id)
def update(subject: Option[String] = None,
projectId: Option[Long] = None,
trackerId: Option[Long] = None,
statusId: Option[Long] = None,
priorityId: Option[Long] = None,
description: Option[String] = None,
doneRatio: Option[Int] = None,
categoryId: Option[Long] = None,
startDate: Option[LocalDate] = None,
dueDate: Option[LocalDate] = None,
actualStartDate: Option[LocalDate] = None,
actualDueDate: Option[LocalDate] = None,
fixedVersionId: Option[Long] = None,
assignedToId: Option[Long] = None,
parentIssueId: Option[Long] = None,
customFields: Option[Seq[(Long, String)]] = None,
watcherUserIds: Option[Seq[Long]] = None,
isPrivate: Option[Boolean] = None,
estimatedHours: Option[Double] = None,
uploadFiles: Option[Seq[UploadFile]] = None): Issue = {
redmine.updateIssue(id, subject, projectId, trackerId, statusId, priorityId, description, doneRatio, categoryId, startDate, dueDate, actualStartDate, actualDueDate, fixedVersionId, assignedToId, parentIssueId, customFields, watcherUserIds, isPrivate, estimatedHours, uploadFiles)
}
def delete(): Unit = redmine.deleteIssue(this.id)
}
case class ChildIssue(id: Long, tracker: (Long, String), subject: String, children: Seq[ChildIssue])
| tomingtoming/redmine4s | src/main/scala/redmine4s/api/model/Issue.scala | Scala | apache-2.0 | 2,921 |
package org.kirhgoff.lastobot
import akka.actor.{Actor, ActorRef, Props}
import com.typesafe.scalalogging.LazyLogging
import info.mukel.telegram.bots.OptionPimps._
import info.mukel.telegram.bots.TelegramBot
import info.mukel.telegram.bots.api.{InputFile, Message, ReplyKeyboardMarkup, ReplyMarkup}
import org.kirhgoff.lastobot.BotAction._
import scala.collection.mutable
/**
* Created by kirilllastovirya on 26/04/2016.
*
* Class processes requests received from user and converts them
* into internal bot events, creating bots per sender. Receives
* bots replies and converts them to Telegram messages
*/
class UserRouter(val bot:TelegramBot) extends Actor with LazyLogging {
val senderMap = mutable.Map[Int, ActorRef]()
val storageFactory = new StorageBotFactory ("localhost", 27017)
override def receive: Receive = {
//TODO move out command constants
case UserCommand(sender, commandName, args) ⇒ commandName match {
//TODO pass option and skip confirmation if value is set
case "smoke" => userActor (sender) ! Smoke(args.headOption)
case "smokestats" => userActor (sender) ! ShowSmokingStats
case "weight" => userActor (sender) ! Weight(args.headOption)
case "weightstats" => userActor (sender) ! ShowWeightStats
case "start" => userActor (sender) ! Start
case "setlocale" => userActor (sender) ! ChangeLocale
case "bug" => userActor (sender) ! Bug
case any => logger.error(s"Unknown command $any")
}
case UserTextMessage(msg:Message) =>
userActor(msg.chat.id) ! UserSaid(msg.text.getOrElse("blah"))
//Feedback
case Text(sender:Int, text:String) =>
bot.sendMessage(sender, text)
case Keyboard(sender:Int, text:String, buttons:Array[Array[String]]) =>
bot.sendMessage(sender, text, None, None, None,
Option(new ReplyKeyboardMarkup(
buttons,
resizeKeyboard = true,
oneTimeKeyboard = true
)))
case Picture(sender:Int, filePath:String) =>
logger.info(s"Received picture $filePath")
bot.sendPhoto(sender, InputFile(filePath))
}
def userActor(senderId: Int): ActorRef = {
val userStorage: UserStorage = storageFactory.userStorageFor(senderId)
senderMap.getOrElseUpdate(senderId,
context.actorOf(Props(new SmokeBot(senderId, userStorage)), name = s"Sender$senderId")
)
}
override def postStop() = storageFactory.close()
}
| kirhgoff/lastobot | src/main/scala/org/kirhgoff/lastobot/UserRouter.scala | Scala | gpl-3.0 | 2,439 |
package models
import java.util.UUID
import play.api.libs.json._
import com.mohiva.play.silhouette.api.{ AuthInfo, Identity, LoginInfo }
case class Contact(
phoneNumber: Option[String],
address: Option[Address])
object Contact {
implicit val jsonFormat = Json.format[Contact]
}
/**
* The user object.
*
* @param userID The unique ID of the user.
* @param loginInfo The linked login info.
* @param firstName Maybe the first name of the authenticated user.
* @param lastName Maybe the last name of the authenticated user.
* @param fullName Maybe the full name of the authenticated user.
* @param email Maybe the email of the authenticated provider.
* @param avatarURL Maybe the avatar URL of the authenticated provider.
* @param activated Indicates that the user has activated its registration.
*/
case class User(
userID: UUID,
loginInfo: LoginInfo,
firstName: Option[String],
lastName: Option[String],
fullName: Option[String],
email: Option[String],
avatarURL: Option[String],
activated: Boolean,
contact: Option[Contact],
agreedToTOS: Option[Boolean] = Some(false)) extends Identity {
/**
* Tries to construct a name.
*
* @return Maybe a name.
*/
def name: Option[String] = fullName.orElse {
firstName -> lastName match {
case (Some(f), Some(l)) => Some(f + " " + l)
case (Some(f), None) => Some(f)
case (None, Some(l)) => Some(l)
case _ => None
}
}
}
object User {
implicit val jsonFormat: OFormat[User] = Json.format[User]
}
case class TwilioUser(userID: UUID, apiPassword: String) extends AuthInfo with Identity {
}
object TwilioUser {
implicit val jsonFormat: OFormat[TwilioUser] = Json.format[TwilioUser]
}
| vetafi/vetafi-web | app/models/User.scala | Scala | apache-2.0 | 1,715 |
package com.twitter.scrooge.backend
/*
* Copyright 2011 Twitter, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.File
import com.twitter.scrooge.ast._
import com.twitter.scrooge.frontend.{ScroogeInternalException, ResolvedDocument}
import com.twitter.scrooge.mustache.Dictionary._
import com.twitter.scrooge.mustache.HandlebarLoader
object JavaGeneratorFactory extends GeneratorFactory {
val lang = "experimental-java"
val handlebarLoader = new HandlebarLoader("/javagen/", ".java")
def apply(
includeMap: Map[String, ResolvedDocument],
defaultNamespace: String,
experimentFlags: Seq[String]
): ThriftGenerator = new JavaGenerator(includeMap, defaultNamespace, handlebarLoader)
}
class JavaGenerator(
val includeMap: Map[String, ResolvedDocument],
val defaultNamespace: String,
val templatesLoader: HandlebarLoader
) extends TemplateGenerator {
def templates: HandlebarLoader = templatesLoader
val fileExtension = ".java"
val experimentFlags = Seq.empty[String]
private[this] object JavaKeywords {
private[this] val set = Set[String](
"abstract", "assert", "boolean", "break", "byte", "case",
"catch", "char", "class", "const", "continue", "default", "double",
"do", "else", "enum", "extends", "false", "final", "finally", "float",
"for", "goto", "if", "implements", "import", "instanceof", "int",
"interface", "long", "native", "new", "null", "package", "private",
"protected", "public", "return", "short", "static", "strictfp",
"super", "switch", "synchronized", "this", "throw", "throws",
"transient", "true", "try", "void", "volatile", "while")
def contains(str: String): Boolean = set.contains(str)
}
// put Java keywords in "_"
def quoteKeyword(str: String): String =
if (JavaKeywords.contains(str))
"_" + str + "_"
else
str
def normalizeCase[N <: Node](node: N) = {
(node match {
case d: Document =>
d.copy(defs = d.defs.map(normalizeCase(_)))
case id: Identifier => id.toTitleCase
case e: EnumRHS =>
e.copy(normalizeCase(e.enum), normalizeCase(e.value))
case f: Field =>
f.copy(
sid = f.sid.toCamelCase,
default = f.default.map(normalizeCase(_)))
case f: Function =>
f.copy(
funcName = f.funcName.toCamelCase,
args = f.args.map(normalizeCase(_)),
throws = f.throws.map(normalizeCase(_)))
case c: ConstDefinition =>
c.copy(value = normalizeCase(c.value))
case e: Enum =>
e.copy(values = e.values.map(normalizeCase(_)))
case e: EnumField =>
e.copy(sid = e.sid.toUpperCase)
case s: Struct =>
s.copy(fields = s.fields.map(normalizeCase(_)))
case f: FunctionArgs =>
f.copy(fields = f.fields.map(normalizeCase(_)))
case f: FunctionResult =>
f.copy(fields = f.fields.map(normalizeCase(_)))
case e: Exception_ =>
e.copy(fields = e.fields.map(normalizeCase(_)))
case s: Service =>
s.copy(functions = s.functions.map(normalizeCase(_)))
case n => n
}).asInstanceOf[N]
}
def genList(list: ListRHS, mutable: Boolean = false): CodeFragment = {
val typeArguments = list.listType match {
case Some(ListType(t,_)) => "<" + genType(t).toData + ">"
case _ => ""
}
val code = "Utilities." + typeArguments + "makeList(" +
list.elems.map(genConstant(_).toData).mkString(", ") + ")"
codify(code)
}
def genSet(set: SetRHS, mutable: Boolean = false): CodeFragment = {
val typeArguments = set.setType match {
case Some(SetType(t, _)) => "<" + genType(t).toData + ">"
case _ => ""
}
val code = "Utilities." + typeArguments + "makeSet(" +
set.elems.map(genConstant(_).toData).mkString(", ") + ")"
codify(code)
}
def genMap(map: MapRHS, mutable: Boolean = false): CodeFragment = {
val typeArguments = map.mapType match {
case Some(MapType(k, v, _)) => "<" + genType(k).toData + ", " + genType(v).toData + ">"
case _ => ""
}
val code = "Utilities." + typeArguments + "makeMap(" + (map.elems.map {
case (k, v) =>
"Utilities.makeTuple(" + genConstant(k).toData + ", " + genConstant(v).toData + ")"
} mkString (", ")) + ")"
codify(code)
}
def genEnum(enum: EnumRHS, fieldType: Option[FieldType] = None): CodeFragment = {
def getTypeId: Identifier = fieldType.getOrElse(Void) match {
case n: NamedType => qualifyNamedType(n)
case _ => enum.enum.sid
}
genID(enum.value.sid.toUpperCase.addScope(getTypeId.toTitleCase))
}
def genStruct(struct: StructRHS): CodeFragment =
codify("new " + struct.sid.name + ".Builder()" +
struct.elems.map {
case (field, rhs) => "." + field.sid.toCamelCase.name + "(" + genConstant(rhs) + ")"
}.mkString("") +
".build()")
/**
* Generates a suffix to append to a field expression that will
* convert the value to an immutable equivalent.
*/
def genToImmutable(t: FieldType): CodeFragment = {
val code = t match {
case MapType(_, _, _) => ".toMap"
case SetType(_, _) => ".toSet"
case ListType(_, _) => ".toList"
case _ => ""
}
codify(code)
}
/**
* Generates a suffix to append to a field expression that will
* convert the value to an immutable equivalent.
*/
def genToImmutable(f: Field): CodeFragment = {
if (f.requiredness.isOptional) {
val code = genToImmutable(f.fieldType).toData match {
case "" => ""
case underlyingToImmutable => ".map(_" + underlyingToImmutable + ")"
}
codify(code)
} else {
genToImmutable(f.fieldType)
}
}
/**
* Generates a prefix and suffix to wrap around a field expression that will
* convert the value to a mutable equivalent.
*/
def toMutable(t: FieldType): (String, String) = {
t match {
case MapType(_, _, _) | SetType(_, _) => (genType(t, true).toData + "() ++= ", "")
case ListType(_, _) => ("", ".toBuffer")
case _ => ("", "")
}
}
/**
* Generates a prefix and suffix to wrap around a field expression that will
* convert the value to a mutable equivalent.
*/
def toMutable(f: Field): (String, String) = {
if (f.requiredness.isOptional) {
toMutable(f.fieldType) match {
case ("", "") => ("", "")
case (prefix, suffix) => ("", ".map(" + prefix + "_" + suffix + ")")
}
} else {
toMutable(f.fieldType)
}
}
override def genDefaultValue(fieldType: FieldType, mutable: Boolean = false): CodeFragment = {
fieldType match {
case t@MapType(_, _, _) => genMap(MapRHS(elems = Nil, mapType = Some(t)))
case s@SetType(_, _) => genSet(SetRHS(elems = Set(), setType = Some(s)))
case l@ListType(_, _) => genList(ListRHS(elems = Nil, listType = Some(l)))
case TI64 => codify("0L")
case _ => super.genDefaultValue(fieldType, mutable)
}
}
override def genConstant(constant: RHS, mutable: Boolean = false, fieldType: Option[FieldType] = None): CodeFragment = {
(constant, fieldType) match {
case (IntLiteral(value), Some(TI64)) => codify(value.toString + "L")
case _ => super.genConstant(constant, mutable, fieldType)
}
}
def genType(t: FunctionType, mutable: Boolean = false): CodeFragment = {
val code = t match {
case Void => "Void"
case OnewayVoid => "Void"
case TBool => "Boolean"
case TByte => "Byte"
case TI16 => "Short"
case TI32 => "Integer"
case TI64 => "Long"
case TDouble => "Double"
case TString => "String"
case TBinary => "ByteBuffer"
case MapType(k, v, _) => "Map<" + genType(k).toData + ", " + genType(v).toData + ">"
case SetType(x, _) => "Set<" + genType(x).toData + ">"
case ListType(x, _) => "List<" + genType(x).toData + ">"
case n: NamedType => genID(qualifyNamedType(n).toTitleCase).toData
case r: ReferenceType =>
throw new ScroogeInternalException("ReferenceType should not appear in backend")
}
codify(code)
}
def genPrimitiveType(t: FunctionType, mutable: Boolean = false): CodeFragment = {
val code = t match {
case Void => "void"
case TBool => "boolean"
case TByte => "byte"
case TI16 => "short"
case TI32 => "int"
case TI64 => "long"
case TDouble => "double"
case _ => genType(t, mutable).toData
}
codify(code)
}
def genFieldType(f: Field, mutable: Boolean = false): CodeFragment = {
val code = if (f.requiredness.isOptional) {
val baseType = genType(f.fieldType, mutable).toData
"com.twitter.scrooge.Option<" + baseType + ">"
} else {
genPrimitiveType(f.fieldType).toData
}
codify(code)
}
def genFieldParams(fields: Seq[Field], asVal: Boolean = false): CodeFragment = {
val code = fields.map {
f =>
genFieldType(f).toData + " " + genID(f.sid).toData
}.mkString(", ")
codify(code)
}
def genBaseFinagleService = codify("Service<byte[], byte[]>")
def getParentFinagleService(p: ServiceParent): CodeFragment =
genID(SimpleID("FinagledService").addScope(getServiceParentID(p)))
def getParentFinagleClient(p: ServiceParent): CodeFragment =
genID(SimpleID("FinagledClient").addScope(getServiceParentID(p)))
override def finagleClientFile(
packageDir: File,
service: Service, options:
Set[ServiceOption]
): Option[File] =
options.find(_ == WithFinagle) map { _ =>
new File(packageDir, service.sid.toTitleCase.name + "$FinagleClient" + fileExtension)
}
override def finagleServiceFile(
packageDir: File,
service: Service, options:
Set[ServiceOption]
): Option[File] =
options.find(_ == WithFinagle) map { _ =>
new File(packageDir, service.sid.toTitleCase.name + "$FinagleService" + fileExtension)
}
override def hystrixServiceFile(
packageDir: File,
service: Service, options:
Set[ServiceOption]
): Option[File] =
options.find(_ == WithFinagle) map { _ =>
new File(packageDir, "Hystrix" + service.sid.toTitleCase.name + fileExtension)
}
}
| eirslett/scrooge | scrooge-generator/src/main/scala/com/twitter/scrooge/backend/JavaGenerator.scala | Scala | apache-2.0 | 10,859 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.rules._
/**
* Pushes NOT through And/Or
*/
object NotPusher extends Rule[Expression] {
def apply(pred: Expression): Expression = pred transformDown {
case Not(And(left, right)) => Or(Not(left), Not(right))
case Not(Or(left, right)) => And(Not(left), Not(right))
case not @ Not(exp) =>
// This pattern has been caught by optimizer but after NOT pushdown
// more opportunities may present
exp match {
case GreaterThan(l, r) => LessThanOrEqual(l, r)
case GreaterThanOrEqual(l, r) => LessThan(l, r)
case LessThan(l, r) => GreaterThanOrEqual(l, r)
case LessThanOrEqual(l, r) => GreaterThan(l, r)
case Not(e) => e
case _ => not
}
}
}
| bomeng/HSpark | src/main/scala/org/apache/spark/sql/catalyst/NotPusher.scala | Scala | apache-2.0 | 1,649 |
package nimrod
import com.twitter.util.Eval
object Main {
private def printUsage = {
System.err.println("Usage: ./nimrod script.scala [args]")
System.exit(-1)
}
def main(_args : Array[String]) {
if(_args.length == 2 && _args(0) == "-d") {
val port = try {
_args(1).toInt
} catch {
case nfx : NumberFormatException => {
printUsage
0
}
}
new NimrodServer(port).start
} else {
var args = _args.toList
if(args.length < 1) {
printUsage
}
var beginStep = 1
var listMode = false
while(args(0).startsWith("-")) {
args(0) match {
case "-s" => {
beginStep = args(1).toInt
args = args.drop(2)
}
case "-l" => {
listMode = true
args = args.drop(1)
}
case _ => printUsage
}
}
val programSB = new StringBuilder()
val ln = System.getProperty("line.separator")
programSB.append("import nimrod._ ; ")
programSB.append("import nimrod.tasks._ ; ")
programSB.append("import java.io._ ; ")
programSB.append("implicit val workflow = new Workflow(\\""+args(0)+"\\") ; ")
programSB.append("val opts = new Opts(Array[String](" + args.drop(1).map("\\""+_+"\\"").mkString(",") + ")) ; ")
for(line <- io.Source.fromFile(args(0)).getLines) {
programSB.append(line + ln)
}
if(listMode) {
programSB.append("workflow.list " + ln)
} else {
programSB.append("workflow.start(" + beginStep + ")" + ln)
}
Preprocessor(programSB)
try {
new Eval()(programSB.toString())
} catch {
case x : WorkflowException => System.err.println(x.getMessage())
case x : Eval.CompilerException => {
System.err.println("The scripts has the following errors:")
System.err.println(x.getMessage())
}
}
}
}
}
| jmccrae/nimrod | src/main/scala/nimrod/Main.scala | Scala | apache-2.0 | 1,982 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.predictionio.data.webhooks
import org.specs2.execute.Result
import org.specs2.mutable._
import org.json4s.JObject
import org.json4s.DefaultFormats
import org.json4s.native.JsonMethods.parse
import org.json4s.native.Serialization.write
/** TestUtil for JsonConnector */
trait ConnectorTestUtil extends Specification {
implicit val formats = DefaultFormats
def check(connector: JsonConnector, original: String, event: String): Result = {
val originalJson = parse(original).asInstanceOf[JObject]
val eventJson = parse(event).asInstanceOf[JObject]
// write and parse back to discard any JNothing field
val result = parse(write(connector.toEventJson(originalJson))).asInstanceOf[JObject]
result.obj must containTheSameElementsAs(eventJson.obj)
}
def check(connector: FormConnector, original: Map[String, String], event: String) = {
val eventJson = parse(event).asInstanceOf[JObject]
// write and parse back to discard any JNothing field
val result = parse(write(connector.toEventJson(original))).asInstanceOf[JObject]
result.obj must containTheSameElementsAs(eventJson.obj)
}
}
| pferrel/PredictionIO | data/src/test/scala/org/apache/predictionio/data/webhooks/ConnectorTestUtil.scala | Scala | apache-2.0 | 1,949 |
/**
* Copyright 2015 Yahoo Inc. Licensed under the Apache License, Version 2.0
* See accompanying LICENSE file.
*/
package kafka.manager
import akka.actor.{ActorRef, Cancellable, ActorPath}
import kafka.manager.features.KMJMXMetricsFeature
import kafka.manager.utils.FiniteQueue
import org.joda.time.DateTime
import scala.collection.immutable.Queue
import scala.collection.mutable
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util.Try
/**
* @author hiral
*/
import ActorModel._
case class BrokerViewCacheActorConfig(kafkaStateActorPath: ActorPath,
clusterContext: ClusterContext,
longRunningPoolConfig: LongRunningPoolConfig,
updatePeriod: FiniteDuration = 10 seconds)
class BrokerViewCacheActor(config: BrokerViewCacheActorConfig) extends LongRunningPoolActor {
private[this] val ZERO = BigDecimal(0)
private[this] var cancellable : Option[Cancellable] = None
private[this] var topicIdentities : Map[String, TopicIdentity] = Map.empty
private[this] var topicDescriptionsOption : Option[TopicDescriptions] = None
private[this] var brokerListOption : Option[BrokerList] = None
private[this] var brokerMetrics : Map[Int, BrokerMetrics] = Map.empty
private[this] val brokerTopicPartitions : mutable.Map[Int, BVView] = new mutable.HashMap[Int, BVView]
private[this] val topicMetrics: mutable.Map[String, mutable.Map[Int, BrokerMetrics]] =
new mutable.HashMap[String, mutable.Map[Int, BrokerMetrics]]()
private[this] var combinedBrokerMetric : Option[BrokerMetrics] = None
private[this] val EMPTY_BVVIEW = BVView(Map.empty, config.clusterContext, Option(BrokerMetrics.DEFAULT))
private[this] var brokerMessagesPerSecCountHistory : Map[Int, Queue[BrokerMessagesPerSecCount]] = Map.empty
override def preStart() = {
log.info("Started actor %s".format(self.path))
log.info("Scheduling updater for %s".format(config.updatePeriod))
cancellable = Some(
context.system.scheduler.schedule(0 seconds,
config.updatePeriod,
self,
BVForceUpdate)(context.system.dispatcher,self)
)
}
@scala.throws[Exception](classOf[Exception])
override def postStop(): Unit = {
log.info("Stopped actor %s".format(self.path))
log.info("Cancelling updater...")
Try(cancellable.map(_.cancel()))
super.postStop()
}
override protected def longRunningPoolConfig: LongRunningPoolConfig = config.longRunningPoolConfig
override protected def longRunningQueueFull(): Unit = {
log.error("Long running pool queue full, skipping!")
}
private def produceBViewWithBrokerClusterState(bv: BVView, id: Int) : BVView = {
val bcs = for {
metrics <- bv.metrics
cbm <- combinedBrokerMetric
} yield {
val perMessages = if(cbm.messagesInPerSec.oneMinuteRate > 0) {
BigDecimal(metrics.messagesInPerSec.oneMinuteRate / cbm.messagesInPerSec.oneMinuteRate * 100D).setScale(3, BigDecimal.RoundingMode.HALF_UP)
} else ZERO
val perIncoming = if(cbm.bytesInPerSec.oneMinuteRate > 0) {
BigDecimal(metrics.bytesInPerSec.oneMinuteRate / cbm.bytesInPerSec.oneMinuteRate * 100D).setScale(3, BigDecimal.RoundingMode.HALF_UP)
} else ZERO
val perOutgoing = if(cbm.bytesOutPerSec.oneMinuteRate > 0) {
BigDecimal(metrics.bytesOutPerSec.oneMinuteRate / cbm.bytesOutPerSec.oneMinuteRate * 100D).setScale(3, BigDecimal.RoundingMode.HALF_UP)
} else ZERO
BrokerClusterStats(perMessages, perIncoming, perOutgoing)
}
val messagesPerSecCountHistory = brokerMessagesPerSecCountHistory.get(id)
if(bcs.isDefined) {
bv.copy(stats = bcs, messagesPerSecCountHistory = messagesPerSecCountHistory)
} else {
bv.copy(messagesPerSecCountHistory = messagesPerSecCountHistory)
}
}
private def allBrokerViews(): Seq[BVView] = {
var bvs = mutable.MutableList[BVView]()
for (key <- brokerTopicPartitions.keySet.toSeq.sorted) {
val bv = brokerTopicPartitions.get(key).map { bv => produceBViewWithBrokerClusterState(bv, key) }
if (bv.isDefined) {
bvs += bv.get
}
}
bvs.asInstanceOf[Seq[BVView]]
}
override def processActorRequest(request: ActorRequest): Unit = {
request match {
case BVForceUpdate =>
log.info("Updating broker view...")
//ask for topic descriptions
val lastUpdateMillisOption: Option[Long] = topicDescriptionsOption.map(_.lastUpdateMillis)
context.actorSelection(config.kafkaStateActorPath).tell(KSGetAllTopicDescriptions(lastUpdateMillisOption), self)
context.actorSelection(config.kafkaStateActorPath).tell(KSGetBrokers, self)
case BVGetViews =>
sender ! allBrokerViews()
case BVGetView(id) =>
sender ! brokerTopicPartitions.get(id).map { bv =>
produceBViewWithBrokerClusterState(bv, id)
}
case BVGetBrokerMetrics =>
sender ! brokerMetrics
case BVGetTopicMetrics(topic) =>
sender ! topicMetrics.get(topic).map(m => m.values.foldLeft(BrokerMetrics.DEFAULT)((acc,bm) => acc + bm))
case BVGetTopicIdentities =>
sender ! topicIdentities
case BVUpdateTopicMetricsForBroker(id, metrics) =>
metrics.foreach {
case (topic, bm) =>
val tm = topicMetrics.getOrElse(topic, new mutable.HashMap[Int, BrokerMetrics])
tm.put(id, bm)
topicMetrics.put(topic, tm)
}
case BVUpdateBrokerMetrics(id, metrics) =>
brokerMetrics += (id -> metrics)
combinedBrokerMetric = Option(brokerMetrics.values.foldLeft(BrokerMetrics.DEFAULT)((acc, m) => acc + m))
val updatedBVView = brokerTopicPartitions.getOrElse(id, EMPTY_BVVIEW).copy(metrics = Option(metrics))
brokerTopicPartitions.put(id, updatedBVView)
val now = DateTime.now()
val messagesCount = BrokerMessagesPerSecCount(now, metrics.messagesInPerSec.count)
brokerMessagesPerSecCountHistory += (id -> brokerMessagesPerSecCountHistory.get(id).map {
history =>
history.enqueueFinite(messagesCount, 10)
}.getOrElse {
Queue(messagesCount)
})
case any: Any => log.warning("bvca : processActorRequest : Received unknown message: {}", any)
}
}
override def processActorResponse(response: ActorResponse): Unit = {
response match {
case td: TopicDescriptions =>
topicDescriptionsOption = Some(td)
updateView()
case bl: BrokerList =>
brokerListOption = Some(bl)
updateView()
case any: Any => log.warning("bvca : processActorResponse : Received unknown message: {}", any)
}
}
implicit def queue2finitequeue[A](q: Queue[A]): FiniteQueue[A] = new FiniteQueue[A](q)
private[this] def updateView(): Unit = {
for {
brokerList <- brokerListOption
topicDescriptions <- topicDescriptionsOption
} {
val topicIdentity : IndexedSeq[TopicIdentity] = topicDescriptions.descriptions.map(
TopicIdentity.from(brokerList.list.size,_,None, config.clusterContext))
topicIdentities = topicIdentity.map(ti => (ti.topic, ti)).toMap
val topicPartitionByBroker = topicIdentity.flatMap(
ti => ti.partitionsByBroker.map(btp => (ti,btp.id,btp.partitions))).groupBy(_._2)
//check for 2*broker list size since we schedule 2 jmx calls for each broker
if (config.clusterContext.clusterFeatures.features(KMJMXMetricsFeature) && hasCapacityFor(2*brokerListOption.size)) {
implicit val ec = longRunningExecutionContext
val brokerLookup = brokerList.list.map(bi => bi.id -> bi).toMap
topicPartitionByBroker.foreach {
case (brokerId, topicPartitions) =>
val brokerInfoOpt = brokerLookup.get(brokerId)
brokerInfoOpt.foreach {
broker =>
longRunning {
Future {
val tryResult = KafkaJMX.doWithConnection(broker.host, broker.jmxPort) {
mbsc =>
topicPartitions.map {
case (topic, id, partitions) =>
(topic.topic,
KafkaMetrics.getBrokerMetrics(config.clusterContext.config.version, mbsc, Option(topic.topic)))
}
}
val result = tryResult match {
case scala.util.Failure(t) =>
log.error(t, s"Failed to get topic metrics for broker $broker")
topicPartitions.map {
case (topic, id, partitions) =>
(topic.topic, BrokerMetrics.DEFAULT)
}
case scala.util.Success(bm) => bm
}
self.tell(BVUpdateTopicMetricsForBroker(broker.id,result), ActorRef.noSender)
}
}
}
}
brokerList.list.foreach {
broker =>
longRunning {
Future {
val tryResult = KafkaJMX.doWithConnection(broker.host, broker.jmxPort) {
mbsc =>
KafkaMetrics.getBrokerMetrics(config.clusterContext.config.version, mbsc)
}
val result = tryResult match {
case scala.util.Failure(t) =>
log.error(t, s"Failed to get broker metrics for $broker")
BrokerMetrics.DEFAULT
case scala.util.Success(bm) => bm
}
self.tell(BVUpdateBrokerMetrics(broker.id,result), ActorRef.noSender)
}
}
}
} else if(config.clusterContext.clusterFeatures.features(KMJMXMetricsFeature)) {
log.warning("Not scheduling update of JMX for all brokers, not enough capacity!")
}
topicPartitionByBroker.foreach {
case (brokerId, topicPartitions) =>
val topicPartitionsMap: Map[TopicIdentity, IndexedSeq[Int]] = topicPartitions.map {
case (topic, id, partitions) =>
(topic, partitions)
}.toMap
brokerTopicPartitions.put(
brokerId, BVView(topicPartitionsMap, config.clusterContext, brokerMetrics.get(brokerId)))
}
}
}
}
| mjtieman/kafka-manager | app/kafka/manager/BrokerViewCacheActor.scala | Scala | apache-2.0 | 10,438 |
// Copyright (C) 2011-2012 the original author or authors.
// See the LICENCE.txt file distributed with this work for additional
// information regarding copyright ownership.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.scalastyle.scalariform
import org.scalastyle.file.CheckerTest
import org.scalatest.junit.AssertionsForJUnit
import org.junit.Test
// scalastyle:off magic.number
class DisallowSpaceAfterTokenTest extends AssertionsForJUnit with CheckerTest {
override protected val key: String = "disallow.space.after.token"
override protected val classUnderTest = classOf[DisallowSpaceAfterTokenChecker]
@Test def testOK(): Unit = {
val source =
"""
|package foobar
|
|case class A(i: Int)
|
""".stripMargin
assertErrors(List(), source)
}
@Test def testNewLine(): Unit = {
val source =
"""
|package foobar
|
|case class A(
|i: Int, c: Int)
|
""".stripMargin
assertErrors(List(), source)
}
@Test def testFailureCases(): Unit = {
val source =
"""
|package foobar
|
|case class A( i: Int, c: Int)
|
|
""".stripMargin
assertErrors(List(columnError(4, 12, List("("))), source)
}
}
class DisallowSpaceBeforeTokenTest extends AssertionsForJUnit with CheckerTest {
override protected val key: String = "disallow.space.before.token"
override protected val classUnderTest = classOf[DisallowSpaceBeforeTokenChecker]
@Test def testOK(): Unit = {
val source =
"""
|package foobar
|
|case class A(i: Int)
|
""".stripMargin
assertErrors(List(), source)
}
@Test def testNewLine(): Unit = {
val source =
"""
|package foobar
|
|case class A(
|i: Int, c: Int
|)
|
""".stripMargin
assertErrors(List(), source)
}
@Test def testEdgeCases(): Unit = {
val source =
"""
|package foobar
|
|class Dummy[T: Manifest] {
| val a: Int = 0
| // A:B:C
| def b(ch: Int): Int = 1
| d ++: e
| d :\\ e
| e /: d
|}
|
""".stripMargin
assertErrors(List(), source)
}
@Test def testFailureCases(): Unit = {
val source =
"""
|package foobar
|
|class Dummy[T : Manifest] {
| val a : Int = 0
| // A:B:C
| def b (ch : Int): Int = 1
| def c (ch: Int ) : Int = 1
| val d:Int = 2
| val e :Int = 3
|}
|
""".stripMargin
assertErrors(List(columnError(4, 14, List(":")), columnError(5, 8, List(":")),
columnError(7, 14, List(":")), columnError(8, 17, List(")")),
columnError(8, 19, List(":")), columnError(10, 8, List(":"))), source)
}
}
class EnsureSpaceAfterTokenTest extends AssertionsForJUnit with CheckerTest {
override protected val key: String = "ensure.single.space.after.token"
override protected val classUnderTest = classOf[EnsureSingleSpaceAfterTokenChecker]
@Test def testOK(): Unit = {
val source =
"""
|package foobar
|
|case class A(i: Int)
|
""".stripMargin
assertErrors(List(), source)
}
@Test def testNewLine(): Unit = {
val source =
"""
|package foobar
|
|case class A(
|i: Int, c: Int
|)
|
""".stripMargin
assertErrors(List(), source)
}
@Test def testEdgeCases(): Unit = {
val source =
"""
|package foobar
|
|class Dummy[T: Manifest] {
| val a: Int = 0
| // A:B:C
| def b(ch: Int): Int = 1
| d ++: e
| d :\\ e
| e /: d
|}
|
""".stripMargin
assertErrors(List(), source)
}
@Test def testFailureCases(): Unit = {
val source =
"""
|package foobar
|
|class Dummy[T : Manifest] {
| val a : Int = 0
| // A:B:C
| def b (ch : Int): Int = 1
| def c (ch: Int ) : Int = 1
| val d:Int = 2
| val e :Int = 3
|}
|
""".stripMargin
assertErrors(List(columnError(7, 20, List(":")), columnError(9, 7, List(":")), columnError(10, 8, List(":"))), source)
}
}
| dwango/scalastyle | src/test/scala/org/scalastyle/scalariform/SpaceAroundTokenCheckerTest.scala | Scala | apache-2.0 | 4,901 |
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher.internal.compiler.v2_3.pipes
import org.neo4j.cypher.internal.compiler.v2_3._
import org.neo4j.cypher.internal.compiler.v2_3.commands._
import org.neo4j.cypher.internal.compiler.v2_3.commands.values.KeyToken
import org.neo4j.cypher.internal.compiler.v2_3.executionplan.Effects
import org.neo4j.cypher.internal.compiler.v2_3.planDescription.{NoChildren, PlanDescriptionImpl}
import org.neo4j.cypher.internal.compiler.v2_3.symbols.SymbolTable
import org.neo4j.cypher.internal.frontend.v2_3.symbols._
class ConstraintOperationPipe(op: PropertyConstraintOperation, keyToken: KeyToken, propertyKey: KeyToken)
(implicit val monitor: PipeMonitor) extends Pipe {
protected def internalCreateResults(state: QueryState): Iterator[ExecutionContext] = {
val keyTokenId = keyToken.getOrCreateId(state.query)
val propertyKeyId = propertyKey.getOrCreateId(state.query)
op match {
case _: CreateUniqueConstraint => state.query.createUniqueConstraint(keyTokenId, propertyKeyId)
case _: DropUniqueConstraint => state.query.dropUniqueConstraint(keyTokenId, propertyKeyId)
case _: CreateNodePropertyExistenceConstraint => state.query.createNodePropertyExistenceConstraint(keyTokenId, propertyKeyId)
case _: DropNodePropertyExistenceConstraint => state.query.dropNodePropertyExistenceConstraint(keyTokenId, propertyKeyId)
case _: CreateRelationshipPropertyExistenceConstraint => state.query.createRelationshipPropertyExistenceConstraint(keyTokenId, propertyKeyId)
case _: DropRelationshipPropertyExistenceConstraint => state.query.dropRelationshipPropertyExistenceConstraint(keyTokenId, propertyKeyId)
}
Iterator.empty
}
def symbols = new SymbolTable()
def planDescription = new PlanDescriptionImpl(this.id, "ConstraintOperation", NoChildren, Seq.empty, identifiers)
def exists(pred: Pipe => Boolean) = pred(this)
def dup(sources: List[Pipe]): Pipe = {
require(sources.isEmpty)
this
}
def sources: Seq[Pipe] = Seq.empty
override val localEffects = Effects()
}
| HuangLS/neo4j | community/cypher/cypher-compiler-2.3/src/main/scala/org/neo4j/cypher/internal/compiler/v2_3/pipes/ConstraintOperationPipe.scala | Scala | apache-2.0 | 2,883 |
package domala.internal.macros.reflect
import domala.internal.jdbc.command._
import domala.internal.macros.DaoParam
import domala.internal.macros.reflect.mock.{MockEmbeddable, MockEntity, MockHolder}
import domala.jdbc.Result
import domala.jdbc.query.EntityAndEntityDesc
import org.scalatest.FunSuite
import org.seasar.doma.internal.jdbc.command._
class DaoReflectionMacrosTestSuite extends FunSuite {
test("getOptionSingleResultHandler for Entity") {
assert(DaoReflectionMacros.getOptionSingleResultHandler[DaoReflectionMacrosTestSuite, MockEntity](classOf[DaoReflectionMacrosTestSuite], "get Handler for Entity").isInstanceOf[OptionEntitySingleResultHandler[_]])
}
test("getOptionSingleResultHandler for Domain") {
assert(DaoReflectionMacros.getOptionSingleResultHandler[DaoReflectionMacrosTestSuite, MockHolder](classOf[DaoReflectionMacrosTestSuite], "get Handler for Domain").isInstanceOf[OptionHolderSingleResultHandler[_, _]])
}
test("getOptionSingleResultHandler for Other") {
//コンパイルエラー
//DaoReflectionMacros.getOptionSingleResultHandler[DaoReflectionMacrosTestSuite, String](classOf[DaoReflectionMacrosTestSuite], "type error")
}
test("getResultListHandler for Entity") {
assert(DaoReflectionMacros.getResultListHandler[DaoReflectionMacrosTestSuite, MockEntity](classOf[DaoReflectionMacrosTestSuite], "get Handler for Entity").isInstanceOf[EntityResultListHandler[_]])
}
test("getResultListHandler for Domain") {
assert(DaoReflectionMacros.getResultListHandler[DaoReflectionMacrosTestSuite, MockHolder](classOf[DaoReflectionMacrosTestSuite], "get Handler for Domain").isInstanceOf[DomainResultListHandler[_, _]])
}
test("getResultListHandler for Other") {
//コンパイルエラー
//DaoReflectionMacros.getResultListHandler[DaoReflectionMacrosTestSuite, String](classOf[DaoReflectionMacrosTestSuite], "getResultListHandler")
}
test("getStreamHandler for Entity") {
assert(DaoReflectionMacros.getStreamHandler((p: Stream[MockEntity]) => p.toString, classOf[DaoReflectionMacrosTestSuite], "get Handler for Entity").isInstanceOf[EntityStreamHandler[_, _]])
}
test("getStreamHandler for Domain") {
assert(DaoReflectionMacros.getStreamHandler((p: Stream[MockHolder]) => p.toString, classOf[DaoReflectionMacrosTestSuite], "get Handler for Entity").isInstanceOf[DomainStreamHandler[_, _, _]])
}
test("getStreamHandler for Other") {
//コンパイルエラー
//assert(DaoReflectionMacros.getStreamHandler((p: Stream[String]) => p.toString, classOf[DaoReflectionMacrosTestSuite], "get Handler for Entity").isInstanceOf[DomainStreamHandler[_, _, _]])
}
test("getEntityAndEntityDesc has entity") {
val entity1 = MockEntity(1, MockHolder("aa"), "bb", MockEmbeddable(2, "cc"), 2)
val entity2 = MockEntity(2, MockHolder("bb"), "cc", MockEmbeddable(1, "dd"), 3)
val ret = DaoReflectionMacros.getEntityAndEntityDesc(classOf[MockEntity], "method1", classOf[Int], DaoParam("aaa", 1, classOf[Int]), DaoParam("bbb", entity1, classOf[MockEntity]), DaoParam("ccc", entity2, classOf[MockEntity]))
assert(ret == Some(EntityAndEntityDesc("bbb", entity1, MockEntity.entityDesc)))
}
test("getEntityAndEntityDesc has entity and return Result") {
val entity1 = MockEntity(1, MockHolder("aa"), "aa", MockEmbeddable(2, "cc"),2)
val entity2 = MockEntity(2, MockHolder("bb"), "bb", MockEmbeddable(1, "dd"),3)
val ret = DaoReflectionMacros.getEntityAndEntityDesc(classOf[MockEntity], "method1", classOf[Result[MockEntity]], DaoParam("aaa", 1, classOf[Int]), DaoParam("bbb", entity1, classOf[MockEntity]), DaoParam("ccc", entity2, classOf[MockEntity]))
assert(ret == Some(EntityAndEntityDesc("bbb", entity1, MockEntity.entityDesc)))
}
test("getEntityAndEntityDesc has entity and return Other") {
//コンパイルエラー
// val entity1 = DummyEntity(1, null, "aa", 2)
// val entity2 = DummyEntity(2, null, "bb", 3)
// val ret = DaoReflectionMacros.getEntityAndEntityDesc(classOf[DummyEntity], "method1", classOf[Long], DaoParam("aaa", 1, classOf[Int]), DaoParam("bbb", entity1, classOf[DummyEntity]), DaoParam("ccc", entity2, classOf[DummyEntity]))
// assert(ret == Some(EntityAndEntityDesc("bbb", entity1, DummyEntity.entityDesc)))
}
test("getEntityAndEntityDesc no entity") {
val ret = DaoReflectionMacros.getEntityAndEntityDesc(classOf[Int], "method1", classOf[Int], DaoParam("aaa", 1, classOf[Int]), DaoParam("bbb", "aaa", classOf[String]))
assert(ret.isEmpty)
}
test("getEntityAndEntityDesc no entity and return Other") {
//コンパイルエラー
// val ret = DaoReflectionMacros.getEntityAndEntityDesc(classOf[DummyEntity], "method1", classOf[String], DaoParam("aaa", 1, classOf[Int]), DaoParam("bbb", "aaa", classOf[String]))
// assert(ret.isEmpty)
}
}
| bakenezumi/domala | core/src/test/scala/domala/internal/macros/reflect/DaoReflectionMacrosTestSuite.scala | Scala | apache-2.0 | 4,848 |
package spinoco.protocol.mail.header
import java.time.ZonedDateTime
import scodec.{Attempt, Codec, Err}
import scodec.codecs._
import spinoco.protocol.mail.header.codec.DateTimeCodec
/**
* Created by pach on 17/10/17.
*/
case class Received(token: String, at: ZonedDateTime) extends DefaultEmailHeaderField
object Received extends DefaultHeaderDescription[Received] {
def nonRFCCodec(datePosition: Int): Codec[Received] = {
scodec.codecs.utf8.exmap(
s => {
val parts = s.trim.split(" ")
val date = parts.takeRight(datePosition).mkString(" ")
val token = parts.dropRight(datePosition).mkString(" ")
DateTimeCodec.parseDate(date).map { zdt =>
Received(token, zdt)
}
}
, rcv => DateTimeCodec.formatDate(rcv.at) map { s => s"${rcv.token};\r\n $s" }
)
}
val RFCCodec: Codec[Received] = {
scodec.codecs.utf8.exmap(
s => {
val part = s.lastIndexOf(';')
if (part < 0) Attempt.failure(Err(s"""Failed to parse header, *received-token ";" date-time expected, got: $s"""))
else {
val (rem, dt) = s.splitAt(part)
DateTimeCodec.parseDate(dt.tail.trim) map { zdt =>
Received(rem, zdt)
}
}
}
, rcv => DateTimeCodec.formatDate(rcv.at) map { s => s"${rcv.token};\r\n $s" }
)
}
val codec: Codec[Received] = choice(RFCCodec, nonRFCCodec(4), nonRFCCodec(7))
}
| Spinoco/protocol | mail/src/main/scala/spinoco/protocol/mail/header/Received.scala | Scala | mit | 1,440 |
/*
* The MIT License (MIT)
*
* Copyright (c) 2014 MineFormers
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package de.mineformers.core.asm.transformer
import de.mineformers.core.asm.util.ClassInfo
import net.minecraft.launchwrapper.IClassTransformer
import org.objectweb.asm.ClassWriter._
import org.objectweb.asm.tree.ClassNode
import org.objectweb.asm.{ClassReader, ClassWriter}
import scala.collection.mutable
/**
* CachedClassTransformer
*
* @author PaleoCrafter
*/
trait CachedClassTransformer extends IClassTransformer {
protected val transformers = mutable.ArrayBuffer[ClassTransformer]()
init()
/**
* Initialize this transformer wrapper, register the actual transformers here
*/
def init(): Unit
/**
* Register a transformer to this wrapper
* @param transformer the [[ClassTransformer]] to register
*/
def register(transformer: ClassTransformer): Unit = transformers += transformer
/**
* Transform any class given.
* @param name the untransformed name of the class
* @param transformedName the name of the class after transformation
* @param bytes the untransformed class' bytes
* @return a (modified) byte array representing the class
*/
override def transform(name: String, transformedName: String, bytes: Array[Byte]): Array[Byte] = {
var transformed: Boolean = false
var clazz: ClassNode = null
var classInfo: ClassInfo = null
for (transformer <- transformers) {
if (transformer.transforms(transformedName)) {
if (clazz == null) {
val cr = new ClassReader(bytes)
clazz = new ClassNode()
cr.accept(clazz, 0)
classInfo = ClassInfo.of(clazz)
}
transformed |= transformer.transform(clazz, classInfo)
}
}
if (transformed) {
val cw = new ClassWriter(COMPUTE_FRAMES | COMPUTE_MAXS)
clazz.accept(cw)
return cw.toByteArray
}
bytes
}
}
| MineFormers/MFCore | src/main/scala/de/mineformers/core/asm/transformer/CachedClassTransformer.scala | Scala | mit | 2,962 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.