code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package com.twitter.finagle.mysql
import com.twitter.finagle.mysql.transport.{MysqlBuf, MysqlBufWriter}
import java.{lang => jl}
trait CanBeParameter[-A] {
/**
* Returns the size of the given parameter in its MySQL binary representation.
*/
def sizeOf(param: A): Int
/**
* Returns the MySQL type code for the given parameter.
*/
def typeCode(param: A): Short
def write(writer: MysqlBufWriter, param: A): Unit
}
/**
* When a new implicit [[CanBeParameter]] is added here, it should also be
* explicitly added to [[Parameter.unsafeWrap]].
*/
object CanBeParameter {
private[this] def arrayLength(bytes: Array[Byte]): Int =
MysqlBuf.sizeOfLen(bytes.length) + bytes.length
implicit val stringCanBeParameter: CanBeParameter[String] = {
new CanBeParameter[String] {
def sizeOf(param: String): Int = {
val bytes = param.getBytes(MysqlCharset.defaultCharset)
arrayLength(bytes)
}
def typeCode(param: String): Short = Type.VarChar
def write(writer: MysqlBufWriter, param: String): Unit =
writer.writeLengthCodedString(param, MysqlCharset.defaultCharset)
}
}
implicit val booleanCanBeParameter: CanBeParameter[Boolean] = {
new CanBeParameter[Boolean] {
def sizeOf(param: Boolean): Int = 1
def typeCode(param: Boolean): Short = Type.Tiny
def write(writer: MysqlBufWriter, param: Boolean): Unit =
writer.writeByte(if (param) 1 else 0)
}
}
implicit val javaLangBooleanCanBeParameter: CanBeParameter[jl.Boolean] = {
new CanBeParameter[jl.Boolean] {
def sizeOf(param: jl.Boolean): Int = booleanCanBeParameter.sizeOf(param.booleanValue())
def typeCode(param: jl.Boolean): Short = booleanCanBeParameter.typeCode(param.booleanValue())
def write(writer: MysqlBufWriter, param: jl.Boolean): Unit =
booleanCanBeParameter.write(writer, param.booleanValue())
}
}
implicit val byteCanBeParameter: CanBeParameter[Byte] = {
new CanBeParameter[Byte] {
def sizeOf(param: Byte): Int = 1
def typeCode(param: Byte): Short = Type.Tiny
def write(writer: MysqlBufWriter, param: Byte): Unit =
writer.writeByte(param)
}
}
implicit val javaLangByteCanBeParameter: CanBeParameter[jl.Byte] = {
new CanBeParameter[jl.Byte] {
def sizeOf(param: jl.Byte): Int = byteCanBeParameter.sizeOf(param.byteValue())
def typeCode(param: jl.Byte): Short = byteCanBeParameter.typeCode(param.byteValue())
def write(writer: MysqlBufWriter, param: jl.Byte): Unit =
byteCanBeParameter.write(writer, param.byteValue())
}
}
implicit val shortCanBeParameter: CanBeParameter[Short] = {
new CanBeParameter[Short] {
def sizeOf(param: Short): Int = 2
def typeCode(param: Short): Short = Type.Short
def write(writer: MysqlBufWriter, param: Short): Unit =
writer.writeShortLE(param)
}
}
implicit val javaLangShortCanBeParameter: CanBeParameter[jl.Short] = {
new CanBeParameter[jl.Short] {
def sizeOf(param: jl.Short): Int = shortCanBeParameter.sizeOf(param.shortValue())
def typeCode(param: jl.Short): Short = shortCanBeParameter.typeCode(param.shortValue())
def write(writer: MysqlBufWriter, param: jl.Short): Unit =
shortCanBeParameter.write(writer, param.shortValue())
}
}
implicit val intCanBeParameter: CanBeParameter[Int] = {
new CanBeParameter[Int] {
def sizeOf(param: Int): Int = 4
def typeCode(param: Int): Short = Type.Long
def write(writer: MysqlBufWriter, param: Int): Unit =
writer.writeIntLE(param)
}
}
implicit val javaLangIntCanBeParameter: CanBeParameter[jl.Integer] = {
new CanBeParameter[jl.Integer] {
def sizeOf(param: jl.Integer): Int = intCanBeParameter.sizeOf(param.intValue())
def typeCode(param: jl.Integer): Short = intCanBeParameter.typeCode(param.intValue())
def write(writer: MysqlBufWriter, param: jl.Integer): Unit =
intCanBeParameter.write(writer, param.intValue())
}
}
implicit val longCanBeParameter: CanBeParameter[Long] = {
new CanBeParameter[Long] {
def sizeOf(param: Long): Int = 8
def typeCode(param: Long): Short = Type.LongLong
def write(writer: MysqlBufWriter, param: Long): Unit =
writer.writeLongLE(param)
}
}
implicit val javaLangLongCanBeParameter: CanBeParameter[jl.Long] = {
new CanBeParameter[jl.Long] {
def sizeOf(param: jl.Long): Int = longCanBeParameter.sizeOf(param.longValue())
def typeCode(param: jl.Long): Short = longCanBeParameter.typeCode(param.longValue())
def write(writer: MysqlBufWriter, param: jl.Long): Unit =
longCanBeParameter.write(writer, param.longValue())
}
}
implicit val bigIntCanBeParameter: CanBeParameter[BigInt] = {
new CanBeParameter[BigInt] {
def sizeOf(param: BigInt): Int = 8
def typeCode(param: BigInt): Short = Type.LongLong
def write(writer: MysqlBufWriter, param: BigInt): Unit = {
val byteArray: Array[Byte] = param.toByteArray
val lengthOfByteArray: Int = byteArray.length
if (lengthOfByteArray > 8) {
throw new BigIntTooLongException(size = lengthOfByteArray)
}
for (i <- (lengthOfByteArray - 1) to 0 by -1) {
writer.writeByte(byteArray(i))
}
for (i <- lengthOfByteArray until 8) {
writer.writeByte(0x0)
}
}
}
}
implicit val floatCanBeParameter: CanBeParameter[Float] = {
new CanBeParameter[Float] {
def sizeOf(param: Float): Int = 4
def typeCode(param: Float): Short = Type.Float
def write(writer: MysqlBufWriter, param: Float): Unit =
writer.writeFloatLE(param)
}
}
implicit val javaLangFloatCanBeParameter: CanBeParameter[jl.Float] = {
new CanBeParameter[jl.Float] {
def sizeOf(param: jl.Float): Int = floatCanBeParameter.sizeOf(param.floatValue())
def typeCode(param: jl.Float): Short = floatCanBeParameter.typeCode(param.floatValue())
def write(writer: MysqlBufWriter, param: jl.Float): Unit =
floatCanBeParameter.write(writer, param.floatValue())
}
}
implicit val doubleCanBeParameter: CanBeParameter[Double] = {
new CanBeParameter[Double] {
def sizeOf(param: Double): Int = 8
def typeCode(param: Double): Short = Type.Double
def write(writer: MysqlBufWriter, param: Double): Unit =
writer.writeDoubleLE(param)
}
}
implicit val javaLangDoubleCanBeParameter: CanBeParameter[jl.Double] = {
new CanBeParameter[jl.Double] {
def sizeOf(param: jl.Double): Int = doubleCanBeParameter.sizeOf(param.doubleValue())
def typeCode(param: jl.Double): Short = doubleCanBeParameter.typeCode(param.doubleValue())
def write(writer: MysqlBufWriter, param: jl.Double): Unit =
doubleCanBeParameter.write(writer, param.doubleValue())
}
}
// the format: varlen followed by the value as a string
// https://dev.mysql.com/doc/internals/en/binary-protocol-value.html#packet-ProtocolBinary::MYSQL_TYPE_NEWDECIMAL
implicit val bigDecimalCanBeParameter: CanBeParameter[BigDecimal] =
new CanBeParameter[BigDecimal] {
private[this] val binaryCharset = MysqlCharset(MysqlCharset.Binary)
private[this] def asBytes(bd: BigDecimal): Array[Byte] =
bd.toString.getBytes(binaryCharset)
def sizeOf(param: BigDecimal): Int =
arrayLength(asBytes(param))
def typeCode(param: BigDecimal): Short =
Type.NewDecimal
def write(writer: MysqlBufWriter, param: BigDecimal): Unit =
writer.writeLengthCodedBytes(asBytes(param))
}
implicit val byteArrayCanBeParameter: CanBeParameter[Array[Byte]] = {
new CanBeParameter[Array[Byte]] {
def sizeOf(param: Array[Byte]): Int =
arrayLength(param)
def typeCode(param: Array[Byte]): Short = {
if (param.length <= 255) Type.TinyBlob
else if (param.length <= 65535) Type.Blob
else if (param.length <= 16777215) Type.MediumBlob
else -1
}
def write(writer: MysqlBufWriter, param: Array[Byte]): Unit =
writer.writeLengthCodedBytes(param)
}
}
implicit val valueCanBeParameter: CanBeParameter[Value] = {
new CanBeParameter[Value] {
def sizeOf(param: Value): Int = param match {
case RawValue(_, _, true, b) => arrayLength(b)
case StringValue(s) =>
val bytes = s.getBytes(MysqlCharset.defaultCharset)
arrayLength(bytes)
case ByteValue(_) => 1
case ShortValue(_) => 2
case IntValue(_) => 4
case LongValue(_) => 8
case BigIntValue(_) => 8
case FloatValue(_) => 4
case DoubleValue(_) => 8
case NullValue => 0
case _ => throw new IllegalArgumentException(s"Cannot determine size of $param.")
}
def typeCode(param: Value): Short = param match {
case RawValue(typ, _, _, _) => typ
case StringValue(_) => Type.VarChar
case ByteValue(_) => Type.Tiny
case ShortValue(_) => Type.Short
case IntValue(_) => Type.Long
case LongValue(_) => Type.LongLong
case BigIntValue(_) => Type.LongLong
case FloatValue(_) => Type.Float
case DoubleValue(_) => Type.Double
case EmptyValue => -1
case NullValue => Type.Null
}
def write(writer: MysqlBufWriter, param: Value): Unit = param match {
// allows for generic binary values as params to a prepared statement.
case RawValue(_, _, true, bytes) => writer.writeLengthCodedBytes(bytes)
// allows for Value types as params to prepared statements
case ByteValue(b) => writer.writeByte(b)
case ShortValue(s) => writer.writeShortLE(s)
case IntValue(i) => writer.writeIntLE(i)
case LongValue(l) => writer.writeLongLE(l)
case BigIntValue(b) => bigIntCanBeParameter.write(writer, b)
case FloatValue(f) => writer.writeFloatLE(f)
case DoubleValue(d) => writer.writeDoubleLE(d)
case StringValue(s) => writer.writeLengthCodedString(s, MysqlCharset.defaultCharset)
case _ =>
throw new IllegalArgumentException(s"Type $param is not supported, cannot write value.")
}
}
}
/**
* Because java.sql.Date and java.sql.Timestamp extend java.util.Date and
* because CanBeParameter's type parameter is contravariant, having separate
* implicits for these types results in the one for the supertype being used
* when the one for the subtype should be used. To work around this we use
* just one implicit and pattern match within it.
*/
implicit val dateCanBeParameter: CanBeParameter[java.util.Date] = {
new CanBeParameter[java.util.Date] {
def sizeOf(param: java.util.Date): Int = param match {
case _: java.sql.Date => 5
case _: java.sql.Timestamp => 12
case _ => 12
}
def typeCode(param: java.util.Date): Short = param match {
case _: java.sql.Date => Type.Date
case _: java.sql.Timestamp => Type.Timestamp
case _ => Type.DateTime
}
def write(writer: MysqlBufWriter, param: java.util.Date): Unit = param match {
case sqlDate: java.sql.Date => valueCanBeParameter.write(writer, DateValue(sqlDate))
case sqlTimestamp: java.sql.Timestamp =>
valueCanBeParameter.write(
writer,
TimestampValue(sqlTimestamp)
)
case javaDate =>
valueCanBeParameter.write(
writer,
TimestampValue(new java.sql.Timestamp(javaDate.getTime))
)
}
}
}
// Note that Timestamp is UTC only and includes both Date and Time parts.
// See https://dev.mysql.com/doc/refman/8.0/en/datetime.html.
implicit val ctuTimeCanBeParameter: CanBeParameter[com.twitter.util.Time] = {
new CanBeParameter[com.twitter.util.Time] {
def sizeOf(param: com.twitter.util.Time): Int = 12
def typeCode(param: com.twitter.util.Time): Short = Type.Timestamp
def write(writer: MysqlBufWriter, param: com.twitter.util.Time): Unit = {
valueCanBeParameter.write(writer, TimestampValue(new java.sql.Timestamp(param.inMillis)))
}
}
}
implicit val nullCanBeParameter: CanBeParameter[Null] = {
new CanBeParameter[Null] {
def sizeOf(param: Null): Int = 0
def typeCode(param: Null): Short = Type.Null
def write(writer: MysqlBufWriter, param: Null): Unit = ()
}
}
}
class BigIntTooLongException(size: Int)
extends Exception(
s"BigInt is stored as Unsigned Long, thus it cannot be longer than 8 bytes. Size = $size"
)
| twitter/finagle | finagle-mysql/src/main/scala/com/twitter/finagle/mysql/CanBeParameter.scala | Scala | apache-2.0 | 12,682 |
package com.twitter.finagle.exp.zookeeper.data
import com.twitter.finagle.exp.zookeeper.data.ACL.Perms
import com.twitter.finagle.exp.zookeeper.transport.{BufArray, BufString}
import com.twitter.io.Buf
case class Id(scheme: String, data: String) extends Data {
def buf: Buf = Buf.Empty
.concat(BufString(scheme))
.concat(BufString(data))
}
case class Auth(scheme: String, data: Array[Byte]) extends Data {
def buf: Buf = Buf.Empty
.concat(BufString(scheme))
.concat(BufArray(data))
}
object Id extends DataDecoder[Id] {
def unapply(buf: Buf): Option[(Id, Buf)] = buf match {
case BufString(scheme, BufString(id, rem)) => Some(Id(scheme, id), rem)
case _ => None
}
}
/**
* A set of basic ids
*/
object Ids {
/**
* This Id represents anyone.
*/
val ANYONE_ID_UNSAFE = new Id("world", "anyone")
/**
* This Id is only usable to set ACLs. It will get substituted with the
* Id's the client authenticated with.
*/
val AUTH_IDS = new Id("auth", "")
/**
* This is a completely open ACL .
*/
val OPEN_ACL_UNSAFE = Seq[ACL](ACL(Perms.ALL, ANYONE_ID_UNSAFE))
/**
* This ACL gives the creators authentication id's all permissions.
*/
val CREATOR_ALL_ACL = Seq[ACL](ACL(Perms.ALL, AUTH_IDS))
/**
* This ACL gives the world the ability to read.
*/
val READ_ACL_UNSAFE = Seq[ACL](ACL(Perms.READ, ANYONE_ID_UNSAFE))
} | yonglehou/finagle-zookeeper | core/src/main/scala/com/twitter/finagle/exp/zookeeper/data/Id.scala | Scala | apache-2.0 | 1,396 |
package fabricator.entities
import fabricator.enums.DateFormat
import org.joda.time.{DateTime, IllegalFieldValueException}
class RandomDate {
private val cal = fabricator.Calendar()
private var year: Int = cal.year.toInt
private var month: Int = cal.month(asNumber = true).toInt
private var day: Int = cal.day(year, month).toInt
private var hour: Int = cal.hour24h.toInt
private var minute: Int = cal.minute.toInt
private var date: DateTime = new DateTime(year, month, day, hour, minute)
def inYear(year: Int): this.type = {
this.year = year
this.day = cal.day(year, this.month).toInt
this
}
def inMonth(month: Int): this.type = {
this.month = month
this.day = cal.day(this.year, month).toInt
this
}
def inDay(day: Int): this.type = {
this.day = validateDay(day)
this
}
private def isValidDay(year: Int, month: Int, day: Int): Boolean = {
try {
new DateTime(year, month, day, 0, 0)
true
} catch {
case e: IllegalFieldValueException => false
}
}
private def validateDay(day: Int): Int = {
var dayOfMonth = day
while (!isValidDay(year, month, dayOfMonth)) {
dayOfMonth = dayOfMonth - 1
}
dayOfMonth
}
def inHour(hour: Int): this.type = {
this.hour = hour
this
}
def inMinute(minute: Int): this.type = {
this.minute = minute
this
}
def inTime(hour: Int, minute: Int): this.type = {
this.hour = hour
this.minute = minute
this
}
private def makeDate: DateTime = {
date = new DateTime(year, month, day, hour, minute)
date
}
def asDate: DateTime = {
makeDate
}
def asString: String = {
asString(DateFormat.dd_MM_yyyy)
}
def asString(format: DateFormat): String = {
val date = makeDate
date.toString(format.getFormat)
}
}
| edombowsky/fabricator | src/main/scala/fabricator/entities/RandomDate.scala | Scala | apache-2.0 | 1,835 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.v2
import java.util.Date
import org.apache.hadoop.mapreduce.{TaskAttemptID, TaskID, TaskType}
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl
import org.apache.spark.internal.io.{FileCommitProtocol, SparkHadoopWriterUtils}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.execution.datasources.{DynamicPartitionDataWriter, SingleDirectoryDataWriter, WriteJobDescription}
import org.apache.spark.sql.sources.v2.writer.{DataWriter, DataWriterFactory}
import org.apache.spark.util.SerializableConfiguration
case class FileWriterFactory (
description: WriteJobDescription,
committer: FileCommitProtocol) extends DataWriterFactory {
override def createWriter(partitionId: Int, realTaskId: Long): DataWriter[InternalRow] = {
val taskAttemptContext = createTaskAttemptContext(partitionId)
committer.setupTask(taskAttemptContext)
if (description.partitionColumns.isEmpty) {
new SingleDirectoryDataWriter(description, taskAttemptContext, committer)
} else {
new DynamicPartitionDataWriter(description, taskAttemptContext, committer)
}
}
private def createTaskAttemptContext(partitionId: Int): TaskAttemptContextImpl = {
val jobId = SparkHadoopWriterUtils.createJobID(new Date, 0)
val taskId = new TaskID(jobId, TaskType.MAP, partitionId)
val taskAttemptId = new TaskAttemptID(taskId, 0)
// Set up the configuration object
val hadoopConf = description.serializableHadoopConf.value
hadoopConf.set("mapreduce.job.id", jobId.toString)
hadoopConf.set("mapreduce.task.id", taskId.toString)
hadoopConf.set("mapreduce.task.attempt.id", taskAttemptId.toString)
hadoopConf.setBoolean("mapreduce.task.ismap", true)
hadoopConf.setInt("mapreduce.task.partition", 0)
new TaskAttemptContextImpl(hadoopConf, taskAttemptId)
}
}
| Aegeaner/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/FileWriterFactory.scala | Scala | apache-2.0 | 2,695 |
/**
* Copyright (c) 2016 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.org.trustedanalytics.sparktk.deeptrees.classification
import org.apache.spark.ml.org.trustedanalytics.sparktk.deeptrees.param.ParamsSuite
import org.apache.spark.ml.org.trustedanalytics.sparktk.deeptrees.tree.impl.TreeTests
import org.apache.spark.ml.org.trustedanalytics.sparktk.deeptrees.tree.{ CategoricalSplit, InternalNode, LeafNode }
import org.apache.spark.ml.org.trustedanalytics.sparktk.deeptrees.util.{ MLTestingUtils, DefaultReadWriteTest, MLlibTestSparkContext, SparkFunSuite }
import org.apache.spark.mllib.linalg.{ Vector, Vectors }
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.org.trustedanalytics.sparktk.deeptrees.tree.{ DecisionTree => OldDecisionTree, DecisionTreeSuite => OldDecisionTreeSuite }
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{ DataFrame, Row, SQLContext }
class DecisionTreeClassifierSuite
extends SparkFunSuite with MLlibTestSparkContext with DefaultReadWriteTest {
import DecisionTreeClassifierSuite.compareAPIs
//import testImplicits._
private var categoricalDataPointsRDD: RDD[LabeledPoint] = _
private var orderedLabeledPointsWithLabel0RDD: RDD[LabeledPoint] = _
private var orderedLabeledPointsWithLabel1RDD: RDD[LabeledPoint] = _
private var categoricalDataPointsForMulticlassRDD: RDD[LabeledPoint] = _
private var continuousDataPointsForMulticlassRDD: RDD[LabeledPoint] = _
private var categoricalDataPointsForMulticlassForOrderedFeaturesRDD: RDD[LabeledPoint] = _
override def beforeAll() {
super.beforeAll()
categoricalDataPointsRDD =
sc.parallelize(OldDecisionTreeSuite.generateCategoricalDataPoints())
orderedLabeledPointsWithLabel0RDD =
sc.parallelize(OldDecisionTreeSuite.generateOrderedLabeledPointsWithLabel0())
orderedLabeledPointsWithLabel1RDD =
sc.parallelize(OldDecisionTreeSuite.generateOrderedLabeledPointsWithLabel1())
categoricalDataPointsForMulticlassRDD =
sc.parallelize(OldDecisionTreeSuite.generateCategoricalDataPointsForMulticlass())
continuousDataPointsForMulticlassRDD =
sc.parallelize(OldDecisionTreeSuite.generateContinuousDataPointsForMulticlass())
categoricalDataPointsForMulticlassForOrderedFeaturesRDD = sc.parallelize(
OldDecisionTreeSuite.generateCategoricalDataPointsForMulticlassForOrderedFeatures())
}
test("params") {
ParamsSuite.checkParams(new DecisionTreeClassifier)
val model = new DecisionTreeClassificationModel("dtc", new LeafNode(0.0, 0.0, null), 1, 2)
ParamsSuite.checkParams(model)
}
/////////////////////////////////////////////////////////////////////////////
// Tests calling train()
/////////////////////////////////////////////////////////////////////////////
test("Binary classification stump with ordered categorical features") {
val dt = new DecisionTreeClassifier()
.setImpurity("gini")
.setMaxDepth(2)
.setMaxBins(100)
.setSeed(1)
val categoricalFeatures = Map(0 -> 3, 1 -> 3)
val numClasses = 2
compareAPIs(categoricalDataPointsRDD, dt, categoricalFeatures, numClasses)
}
test("Binary classification stump with fixed labels 0,1 for Entropy,Gini") {
val dt = new DecisionTreeClassifier()
.setMaxDepth(3)
.setMaxBins(100)
val numClasses = 2
Array(orderedLabeledPointsWithLabel0RDD, orderedLabeledPointsWithLabel1RDD).foreach { rdd =>
DecisionTreeClassifier.supportedImpurities.foreach { impurity =>
dt.setImpurity(impurity)
compareAPIs(rdd, dt, categoricalFeatures = Map.empty[Int, Int], numClasses)
}
}
}
test("Multiclass classification stump with 3-ary (unordered) categorical features") {
val rdd = categoricalDataPointsForMulticlassRDD
val dt = new DecisionTreeClassifier()
.setImpurity("Gini")
.setMaxDepth(4)
val numClasses = 3
val categoricalFeatures = Map(0 -> 3, 1 -> 3)
compareAPIs(rdd, dt, categoricalFeatures, numClasses)
}
test("Binary classification stump with 1 continuous feature, to check off-by-1 error") {
val arr = Array(
LabeledPoint(0.0, Vectors.dense(0.0)),
LabeledPoint(1.0, Vectors.dense(1.0)),
LabeledPoint(1.0, Vectors.dense(2.0)),
LabeledPoint(1.0, Vectors.dense(3.0)))
val rdd = sc.parallelize(arr)
val dt = new DecisionTreeClassifier()
.setImpurity("Gini")
.setMaxDepth(4)
val numClasses = 2
compareAPIs(rdd, dt, categoricalFeatures = Map.empty[Int, Int], numClasses)
}
test("Binary classification stump with 2 continuous features") {
val arr = Array(
LabeledPoint(0.0, Vectors.sparse(2, Seq((0, 0.0)))),
LabeledPoint(1.0, Vectors.sparse(2, Seq((1, 1.0)))),
LabeledPoint(0.0, Vectors.sparse(2, Seq((0, 0.0)))),
LabeledPoint(1.0, Vectors.sparse(2, Seq((1, 2.0)))))
val rdd = sc.parallelize(arr)
val dt = new DecisionTreeClassifier()
.setImpurity("Gini")
.setMaxDepth(4)
val numClasses = 2
compareAPIs(rdd, dt, categoricalFeatures = Map.empty[Int, Int], numClasses)
}
test("Multiclass classification stump with unordered categorical features," +
" with just enough bins") {
val maxBins = 2 * (math.pow(2, 3 - 1).toInt - 1) // just enough bins to allow unordered features
val rdd = categoricalDataPointsForMulticlassRDD
val dt = new DecisionTreeClassifier()
.setImpurity("Gini")
.setMaxDepth(4)
.setMaxBins(maxBins)
val categoricalFeatures = Map(0 -> 3, 1 -> 3)
val numClasses = 3
compareAPIs(rdd, dt, categoricalFeatures, numClasses)
}
test("Multiclass classification stump with continuous features") {
val rdd = continuousDataPointsForMulticlassRDD
val dt = new DecisionTreeClassifier()
.setImpurity("Gini")
.setMaxDepth(4)
.setMaxBins(100)
val numClasses = 3
compareAPIs(rdd, dt, categoricalFeatures = Map.empty[Int, Int], numClasses)
}
test("Multiclass classification stump with continuous + unordered categorical features") {
val rdd = continuousDataPointsForMulticlassRDD
val dt = new DecisionTreeClassifier()
.setImpurity("Gini")
.setMaxDepth(4)
.setMaxBins(100)
val categoricalFeatures = Map(0 -> 3)
val numClasses = 3
compareAPIs(rdd, dt, categoricalFeatures, numClasses)
}
test("Multiclass classification stump with 10-ary (ordered) categorical features") {
val rdd = categoricalDataPointsForMulticlassForOrderedFeaturesRDD
val dt = new DecisionTreeClassifier()
.setImpurity("Gini")
.setMaxDepth(4)
.setMaxBins(100)
val categoricalFeatures = Map(0 -> 10, 1 -> 10)
val numClasses = 3
compareAPIs(rdd, dt, categoricalFeatures, numClasses)
}
test("Multiclass classification tree with 10-ary (ordered) categorical features," +
" with just enough bins") {
val rdd = categoricalDataPointsForMulticlassForOrderedFeaturesRDD
val dt = new DecisionTreeClassifier()
.setImpurity("Gini")
.setMaxDepth(4)
.setMaxBins(10)
val categoricalFeatures = Map(0 -> 10, 1 -> 10)
val numClasses = 3
compareAPIs(rdd, dt, categoricalFeatures, numClasses)
}
test("split must satisfy min instances per node requirements") {
val arr = Array(
LabeledPoint(0.0, Vectors.sparse(2, Seq((0, 0.0)))),
LabeledPoint(1.0, Vectors.sparse(2, Seq((1, 1.0)))),
LabeledPoint(0.0, Vectors.sparse(2, Seq((0, 1.0)))))
val rdd = sc.parallelize(arr)
val dt = new DecisionTreeClassifier()
.setImpurity("Gini")
.setMaxDepth(2)
.setMinInstancesPerNode(2)
val numClasses = 2
compareAPIs(rdd, dt, categoricalFeatures = Map.empty[Int, Int], numClasses)
}
test("do not choose split that does not satisfy min instance per node requirements") {
// if a split does not satisfy min instances per node requirements,
// this split is invalid, even though the information gain of split is large.
val arr = Array(
LabeledPoint(0.0, Vectors.dense(0.0, 1.0)),
LabeledPoint(1.0, Vectors.dense(1.0, 1.0)),
LabeledPoint(0.0, Vectors.dense(0.0, 0.0)),
LabeledPoint(0.0, Vectors.dense(0.0, 0.0)))
val rdd = sc.parallelize(arr)
val dt = new DecisionTreeClassifier()
.setImpurity("Gini")
.setMaxBins(2)
.setMaxDepth(2)
.setMinInstancesPerNode(2)
val categoricalFeatures = Map(0 -> 2, 1 -> 2)
val numClasses = 2
compareAPIs(rdd, dt, categoricalFeatures, numClasses)
}
test("split must satisfy min info gain requirements") {
val arr = Array(
LabeledPoint(0.0, Vectors.sparse(2, Seq((0, 0.0)))),
LabeledPoint(1.0, Vectors.sparse(2, Seq((1, 1.0)))),
LabeledPoint(0.0, Vectors.sparse(2, Seq((0, 1.0)))))
val rdd = sc.parallelize(arr)
val dt = new DecisionTreeClassifier()
.setImpurity("Gini")
.setMaxDepth(2)
.setMinInfoGain(1.0)
val numClasses = 2
compareAPIs(rdd, dt, categoricalFeatures = Map.empty[Int, Int], numClasses)
}
test("predictRaw and predictProbability") {
val rdd = continuousDataPointsForMulticlassRDD
val dt = new DecisionTreeClassifier()
.setImpurity("Gini")
.setMaxDepth(4)
.setMaxBins(100)
val categoricalFeatures = Map(0 -> 3)
val numClasses = 3
val newData: DataFrame = TreeTests.setMetadata(rdd, categoricalFeatures, numClasses)
val newTree = dt.fit(newData)
// copied model must have the same parent.
MLTestingUtils.checkCopy(newTree)
val predictions = newTree.transform(newData)
.select(newTree.getPredictionCol, newTree.getRawPredictionCol, newTree.getProbabilityCol)
.collect()
predictions.foreach {
case Row(pred: Double, rawPred: Vector, probPred: Vector) =>
assert(pred === rawPred.argmax,
s"Expected prediction $pred but calculated ${rawPred.argmax} from rawPrediction.")
val sum = rawPred.toArray.sum
assert(Vectors.dense(rawPred.toArray.map(_ / sum)) === probPred,
"probability prediction mismatch")
}
}
test("training with 1-category categorical feature") {
val data = sc.parallelize(Seq(
LabeledPoint(0, Vectors.dense(0, 2, 3)),
LabeledPoint(1, Vectors.dense(0, 3, 1)),
LabeledPoint(0, Vectors.dense(0, 2, 2)),
LabeledPoint(1, Vectors.dense(0, 3, 9)),
LabeledPoint(0, Vectors.dense(0, 2, 6))
))
val df = TreeTests.setMetadata(data, Map(0 -> 1), 2)
val dt = new DecisionTreeClassifier().setMaxDepth(3)
dt.fit(df)
}
test("Use soft prediction for binary classification with ordered categorical features") {
// The following dataset is set up such that the best split is {1} vs. {0, 2}.
// If the hard prediction is used to order the categories, then {0} vs. {1, 2} is chosen.
val arr = Array(
LabeledPoint(0.0, Vectors.dense(0.0)),
LabeledPoint(0.0, Vectors.dense(0.0)),
LabeledPoint(0.0, Vectors.dense(0.0)),
LabeledPoint(1.0, Vectors.dense(0.0)),
LabeledPoint(0.0, Vectors.dense(1.0)),
LabeledPoint(0.0, Vectors.dense(1.0)),
LabeledPoint(0.0, Vectors.dense(1.0)),
LabeledPoint(0.0, Vectors.dense(1.0)),
LabeledPoint(0.0, Vectors.dense(2.0)),
LabeledPoint(0.0, Vectors.dense(2.0)),
LabeledPoint(0.0, Vectors.dense(2.0)),
LabeledPoint(1.0, Vectors.dense(2.0)))
val data = sc.parallelize(arr)
val df = TreeTests.setMetadata(data, Map(0 -> 3), 2)
// Must set maxBins s.t. the feature will be treated as an ordered categorical feature.
val dt = new DecisionTreeClassifier()
.setImpurity("gini")
.setMaxDepth(1)
.setMaxBins(3)
val model = dt.fit(df)
model.rootNode match {
case n: InternalNode =>
n.split match {
case s: CategoricalSplit =>
assert(s.leftCategories === Array(1.0))
case other =>
fail(s"All splits should be categorical, but got ${other.getClass.getName}: $other.")
}
case other =>
fail(s"Root node should be an internal node, but got ${other.getClass.getName}: $other.")
}
}
test("Feature importance with toy data") {
val dt = new DecisionTreeClassifier()
.setImpurity("gini")
.setMaxDepth(3)
.setSeed(123)
// In this data, feature 1 is very important.
val data: RDD[LabeledPoint] = TreeTests.featureImportanceData(sc)
val numFeatures = data.first().features.size
val categoricalFeatures = (0 to numFeatures).map(i => (i, 2)).toMap
val df = TreeTests.setMetadata(data, categoricalFeatures, 2)
val model = dt.fit(df)
val importances = model.featureImportances
val mostImportantFeature = importances.argmax
assert(mostImportantFeature === 1)
assert(importances.toArray.sum === 1.0)
assert(importances.toArray.forall(_ >= 0.0))
}
test("should support all NumericType labels and not support other types") {
val dt = new DecisionTreeClassifier().setMaxDepth(1)
MLTestingUtils.checkNumericTypes[DecisionTreeClassificationModel, DecisionTreeClassifier](
dt, sqlContext) { (expected, actual) =>
TreeTests.checkEqual(expected, actual)
}
}
test("Fitting without numClasses in metadata") {
val sqlContext = SQLContext.getOrCreate(sc)
import sqlContext.implicits._
val df: DataFrame = TreeTests.featureImportanceData(sc).toDF()
val dt = new DecisionTreeClassifier().setMaxDepth(1)
dt.fit(df)
}
/////////////////////////////////////////////////////////////////////////////
// Tests of model save/load
/////////////////////////////////////////////////////////////////////////////
test("read/write") {
def checkModelData(
model: DecisionTreeClassificationModel,
model2: DecisionTreeClassificationModel): Unit = {
TreeTests.checkEqual(model, model2)
assert(model.numFeatures === model2.numFeatures)
assert(model.numClasses === model2.numClasses)
}
val dt = new DecisionTreeClassifier()
val rdd = TreeTests.getTreeReadWriteData(sc)
val allParamSettings = TreeTests.allParamSettings ++ Map("impurity" -> "entropy")
// Categorical splits with tree depth 2
val categoricalData: DataFrame =
TreeTests.setMetadata(rdd, Map(0 -> 2, 1 -> 3), numClasses = 2)
testEstimatorAndModelReadWrite(dt, categoricalData, allParamSettings, checkModelData)
// Continuous splits with tree depth 2
val continuousData: DataFrame =
TreeTests.setMetadata(rdd, Map.empty[Int, Int], numClasses = 2)
testEstimatorAndModelReadWrite(dt, continuousData, allParamSettings, checkModelData)
// Continuous splits with tree depth 0
testEstimatorAndModelReadWrite(dt, continuousData, allParamSettings ++ Map("maxDepth" -> 0),
checkModelData)
}
}
private[ml] object DecisionTreeClassifierSuite extends SparkFunSuite {
/**
* Train 2 decision trees on the given dataset, one using the old API and one using the new API.
* Convert the old tree to the new format, compare them, and fail if they are not exactly equal.
*/
def compareAPIs(
data: RDD[LabeledPoint],
dt: DecisionTreeClassifier,
categoricalFeatures: Map[Int, Int],
numClasses: Int): Unit = {
val numFeatures = data.first().features.size
val oldStrategy = dt.getOldStrategy(categoricalFeatures, numClasses)
val oldTree = OldDecisionTree.train(data, oldStrategy)
val newData: DataFrame = TreeTests.setMetadata(data, categoricalFeatures, numClasses)
val newTree = dt.fit(newData)
// Use parent from newTree since this is not checked anyways.
val oldTreeAsNew = DecisionTreeClassificationModel.fromOld(
oldTree, newTree.parent.asInstanceOf[DecisionTreeClassifier], categoricalFeatures)
TreeTests.checkEqual(oldTreeAsNew, newTree)
assert(newTree.numFeatures === numFeatures)
}
}
| trustedanalytics/spark-tk | sparktk-core/src/test/scala/org/apache/spark/ml/org/trustedanalytics/sparktk/deeptrees/classification/DecisionTreeClassifierSuite.scala | Scala | apache-2.0 | 16,498 |
package com.gl.barbell.rules
import com.gl.barbell.core.{NumberBasedLottery, Rule}
class RangeRule(ranges: Map[Range, Int]) extends Rule {
override def satisfied(lottery: NumberBasedLottery): Boolean = {
ranges.forall((pair) => lottery.numbers.count(pair._1.contains(_)) >= pair._2)
}
}
| ajaxchelsea/barbell | src/main/scala/com/gl/barbell/rules/RangeRule.scala | Scala | gpl-2.0 | 297 |
package org.bitcoins.node.networking.peer
import akka.actor.ActorRef
import org.bitcoins.core.config.RegTest
import org.bitcoins.core.number.Int32
import org.bitcoins.core.p2p.{InetAddress, VerAckMessage, VersionMessage}
import org.bitcoins.node.constant.NodeConstants
import org.bitcoins.node.models.Peer
import org.bitcoins.node.networking.P2PClient
import org.bitcoins.server.BitcoinSAppConfig
import org.bitcoins.testkit.BitcoinSTestAppConfig
import org.bitcoins.testkit.node.NodeTestWithCachedBitcoindPair
import org.bitcoins.testkit.node.fixture.NeutrinoNodeConnectedWithBitcoinds
import org.bitcoins.testkit.util.TorUtil
import org.scalatest.{FutureOutcome, Outcome}
import java.net.InetSocketAddress
import scala.concurrent.{Future, Promise}
class PeerMessageReceiverTest extends NodeTestWithCachedBitcoindPair {
/** Wallet config with data directory set to user temp directory */
override protected def getFreshConfig: BitcoinSAppConfig =
BitcoinSTestAppConfig.getNeutrinoWithEmbeddedDbTestConfig(pgUrl)
override type FixtureParam = NeutrinoNodeConnectedWithBitcoinds
override def withFixture(test: OneArgAsyncTest): FutureOutcome = {
val torClientF = if (TorUtil.torEnabled) torF else Future.unit
val outcomeF: Future[Outcome] = for {
_ <- torClientF
bitcoinds <- clientsF
outcome = withNeutrinoNodeConnectedToBitcoinds(test, bitcoinds.toVector)(
system,
getFreshConfig)
f <- outcome.toFuture
} yield f
new FutureOutcome(outcomeF)
}
behavior of "PeerMessageReceiverTest"
it must "change a peer message receiver to be disconnected" in {
nodeConnectedWithBitcoind: NeutrinoNodeConnectedWithBitcoinds =>
val node = nodeConnectedWithBitcoind.node
val socket = InetSocketAddress.createUnresolved("google.com", 12345)
val client = P2PClient(ActorRef.noSender, Peer(socket, None, None))
val clientP = Promise[P2PClient]()
clientP.success(client)
val versionMsgP = Promise[VersionMessage]()
val localhost = java.net.InetAddress.getLocalHost
val versionMsg = VersionMessage(RegTest,
NodeConstants.userAgent,
Int32.one,
InetAddress(localhost.getAddress),
InetAddress(localhost.getAddress),
false)
versionMsgP.success(versionMsg)
val verackMsgP = Promise[VerAckMessage.type]()
verackMsgP.success(VerAckMessage)
val normal = PeerMessageReceiverState.Normal(clientConnectP = clientP,
clientDisconnectP =
Promise[Unit](),
versionMsgP = versionMsgP,
verackMsgP = verackMsgP)
val peerMsgReceiver =
PeerMessageReceiver(normal, node, node.peerManager.peers.head)(
system,
node.nodeAppConfig)
val newMsgReceiver = peerMsgReceiver.disconnect()
assert(
newMsgReceiver.state
.isInstanceOf[PeerMessageReceiverState.Disconnected])
assert(newMsgReceiver.isDisconnected)
}
it must "change a peer message receiver to be initializing disconnect" in {
nodeConnectedWithBitcoind: NeutrinoNodeConnectedWithBitcoinds =>
val node = nodeConnectedWithBitcoind.node
val socket = InetSocketAddress.createUnresolved("google.com", 12345)
val client = P2PClient(ActorRef.noSender, Peer(socket, None, None))
val clientP = Promise[P2PClient]()
clientP.success(client)
val versionMsgP = Promise[VersionMessage]()
val localhost = java.net.InetAddress.getLocalHost
val versionMsg = VersionMessage(RegTest,
NodeConstants.userAgent,
Int32.one,
InetAddress(localhost.getAddress),
InetAddress(localhost.getAddress),
false)
versionMsgP.success(versionMsg)
val verackMsgP = Promise[VerAckMessage.type]()
verackMsgP.success(VerAckMessage)
val normal = PeerMessageReceiverState.Normal(clientConnectP = clientP,
clientDisconnectP =
Promise[Unit](),
versionMsgP = versionMsgP,
verackMsgP = verackMsgP)
val peerMsgReceiver =
PeerMessageReceiver(normal, node, node.peerManager.peers.head)(
system,
node.nodeAppConfig)
val newMsgReceiver = peerMsgReceiver.initializeDisconnect()
assert(
newMsgReceiver.state
.isInstanceOf[PeerMessageReceiverState.InitializedDisconnect])
assert(!newMsgReceiver.isDisconnected)
val disconnectRecv = newMsgReceiver.disconnect()
assert(
disconnectRecv.state
.isInstanceOf[PeerMessageReceiverState.InitializedDisconnectDone])
assert(disconnectRecv.isDisconnected)
assert(disconnectRecv.state.clientDisconnectP.isCompleted)
}
}
| bitcoin-s/bitcoin-s | node-test/src/test/scala/org/bitcoins/node/networking/peer/PeerMessageReceiverTest.scala | Scala | mit | 5,338 |
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author John Miller
* @version 1.2
* @date Thu May 12 11:56:13 EDT 2011
* @see LICENSE (MIT style license file).
*/
package scalation.stat
import math.{abs, cos}
import scalation.linalgebra.VectorD
import scalation.random.Random
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `BatchVector` class contains a `VectorD` for maintaining experimental
* data in multiple batches and methods for computing statistics on these data.
* Ex: It can be used to support the Method of Batch Means (MBM).
* @param name name of the batch statistic
* @param _bSize size of each batch
* @param _nBatches number of batches
*/
class BatchVector (name: String, private var _bSize: Int = 10, private var _nBatches: Int = 10)
{
/** The vector containing all the elements from all the batches.
* FIX: with more clever coding the 'y' vector would not be necessary
*/
private var y = new VectorD (bSize * nBatches)
/** The vector containing the means from all the batches.
*/
private var yb = new VectorD (nBatches)
/** The index in the y vector of the next element to add.
*/
private var next = 0
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get the total length of the batched vector.
*/
def len = _nBatches * _bSize
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get the batch size.
*/
def bSize = _bSize
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get the number of batches.
*/
def nBatches = _nBatches
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Allocate additional batches for this batched vector.
* @param more the number of additional batches to allocate
*/
def allocBatches (more: Int = 1) { y.expand (more * _bSize); _nBatches += more }
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute means for each batch and store them in the 'yb' stat vector.
*/
def computeMeans ()
{
for (i <- 0 until nBatches) yb(i) = y(i * _bSize until (i+1) * _bSize-1).sum / bSize.toDouble
} // computeMeans
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Add the given value in the next index position in the batched vector.
* @param value the given value to add
*/
def tally (value: Double)
{
if (next == y.dim) { y = y.expand (); _bSize += _bSize } // double bSize
y(next) = value
next += 1
} // tally
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the lag 1 autocorrelation of the batch means. Be sure to run
* 'computeMeans' first.
*/
def acorr: Double = yb.acorr ()
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Determine whether the batches are sufficiently uncorrelated.
* @param threshold the cut-off value to be considered uncorrelated
*/
def uncorrelated (threshold: Double = .2) = abs (acorr) <= threshold
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the relative precision, i.e., the ratio of the confidence interval
* half-width and the mean.
* @param p the confidence level
*/
def precision (p: Double = .95): Double = yb.precision ()
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Determine whether the Confidence Interval (CI) on the mean is tight enough.
* @param precision the cut-off value for CI to be considered tight
*/
def precise (threshold: Double = .2, p: Double = .95) = yb.precise (threshold, p)
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Convert the batched vector into a string showing each of the batches.
*/
override def toString =
{
var s = name
for (i <- 0 until nBatches) {
s += "\nBatch_" + i + ": \t" + y(i * _bSize until (i+1) * _bSize-1)
} // for
s
} // toString
} // BatchVector class
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `BatchVectorTest` object provides an example of how to use the `BatchVector`
* class to implement the Method of Batch Means (MBM).
*/
object BatchVectorTest extends App
{
val bat = new BatchVector ("ResponseTime")
var from = 0
var till = bat.len
val rng = Random ()
// increase the batch size until the batches are sufficiently uncorrelated
do {
for (i <- from until till) bat.tally (75.0 * rng.gen + 12.5 * (1.0 + cos (i/10.0)))
from = till
till += till
bat.computeMeans ()
println ("acorr for " + bat.nBatches + " batches of size " + bat.bSize + " = " + bat.acorr)
} while (! bat.uncorrelated ())
// increase the number of batches until the confidence interval is tight enough
while (! bat.precise ()) {
from = till
till += bat.bSize
bat.allocBatches ()
for (i <- from until till) bat.tally (75.0 * rng.gen + 12.5 * (1.0 + cos (i/10.0)))
bat.computeMeans ()
println ("precision for " + bat.nBatches + " batches of size " + bat.bSize + " = " + bat.precision ())
} // while
println ("final precision for " + bat.nBatches + " batches of size " + bat.bSize + " = " + bat.precision ())
} // BatchVectorTest object
| mvnural/scalation | src/main/scala/scalation/stat/BatchVector.scala | Scala | mit | 5,697 |
package com.sageserpent.plutonium
import java.time.Instant
import java.util
import java.util.{Optional, UUID}
import com.lambdaworks.redis.{RedisClient, RedisURI}
import com.sageserpent.americium.randomEnrichment._
import com.sageserpent.americium.{PositiveInfinity, Unbounded}
import com.sageserpent.plutonium.World.Revision
import org.scalacheck.Prop.BooleanOperators
import org.scalacheck.{Gen, Prop, Test}
import org.scalatest.prop.Checkers
import org.scalatest.{FlatSpec, Matchers}
import resource._
import scala.collection.mutable.Set
import scala.util.Random
trait WorldStateSharingBehaviours
extends FlatSpec
with Matchers
with Checkers
with WorldSpecSupport {
val worldSharingCommonStateFactoryResourceGenerator: Gen[
ManagedResource[() => World]]
val testParameters: Test.Parameters
val numberOfConcurrentQueriesPerRevision: Revision
def multipleInstancesRepresentingTheSameWorldBehaviour = {
they should "yield the same results to scope queries regardless of which instance is used to define a revision" in {
class DemultiplexingWorld(worldFactory: () => World, seed: Long)
extends World {
val random = new scala.util.Random(seed)
val worlds: Set[World] = Set.empty
def world: World = {
worlds.synchronized {
if (worlds.nonEmpty && random.nextBoolean()) {
worlds -= random.chooseOneOf(worlds)
}
if (worlds.nonEmpty && random.nextBoolean())
random.chooseOneOf(worlds)
else {
val newWorldSharingCommonState = worldFactory()
worlds += newWorldSharingCommonState
newWorldSharingCommonState
}
}
}
override def nextRevision: Revision = world.nextRevision
override def revise(events: Map[_ <: EventId, Option[Event]],
asOf: Instant): Revision =
world.revise(events, asOf)
override def revise(events: util.Map[_ <: EventId, Optional[Event]],
asOf: Instant): Revision =
world.revise(events, asOf)
override def scopeFor(when: Unbounded[Instant],
nextRevision: Revision): Scope =
world.scopeFor(when, nextRevision)
override def scopeFor(when: Unbounded[Instant], asOf: Instant): Scope =
world.scopeFor(when, asOf)
override def forkExperimentalWorld(scope: javaApi.Scope): World =
world.forkExperimentalWorld(scope)
override def revisionAsOfs: Array[Instant] = world.revisionAsOfs
override def revise(eventId: EventId,
event: Event,
asOf: Instant): Revision =
world.revise(eventId, event, asOf)
override def annul(eventId: EventId, asOf: Instant): Revision =
world.annul(eventId, asOf)
}
val testCaseGenerator = for {
worldSharingCommonStateFactoryResource <- worldSharingCommonStateFactoryResourceGenerator
recordingsGroupedById <- recordingsGroupedByIdGenerator(
forbidAnnihilations = false)
obsoleteRecordingsGroupedById <- nonConflictingRecordingsGroupedByIdGenerator
seed <- seedGenerator
random = new Random(seed)
shuffledRecordings = shuffleRecordingsPreservingRelativeOrderOfEventsAtTheSameWhen(
random,
recordingsGroupedById)
shuffledObsoleteRecordings = shuffleRecordingsPreservingRelativeOrderOfEventsAtTheSameWhen(
random,
obsoleteRecordingsGroupedById)
bigShuffledHistoryOverLotsOfThings = intersperseObsoleteEvents(
random,
shuffledRecordings,
shuffledObsoleteRecordings)
asOfs <- Gen.listOfN(bigShuffledHistoryOverLotsOfThings.length,
instantGenerator) map (_.sorted)
queryWhen <- unboundedInstantGenerator
} yield
(worldSharingCommonStateFactoryResource,
recordingsGroupedById,
bigShuffledHistoryOverLotsOfThings,
asOfs,
queryWhen,
seed)
check(
Prop.forAllNoShrink(testCaseGenerator) {
case (worldSharingCommonStateFactoryResource,
recordingsGroupedById,
bigShuffledHistoryOverLotsOfThings,
asOfs,
queryWhen,
seed) =>
worldSharingCommonStateFactoryResource acquireAndGet {
worldFactory =>
val demultiplexingWorld =
new DemultiplexingWorld(worldFactory, seed)
recordEventsInWorld(bigShuffledHistoryOverLotsOfThings,
asOfs,
demultiplexingWorld)
val scope =
demultiplexingWorld.scopeFor(queryWhen,
demultiplexingWorld.nextRevision)
val checks = for {
RecordingsNoLaterThan(
historyId,
historiesFrom,
pertinentRecordings,
_,
_) <- recordingsGroupedById flatMap (_.thePartNoLaterThan(
queryWhen))
Seq(history) = historiesFrom(scope)
} yield
(historyId, history.datums, pertinentRecordings.map(_._1))
checks.nonEmpty ==>
Prop.all(checks.map {
case (historyId, actualHistory, expectedHistory) =>
((actualHistory.length == expectedHistory.length) :| s"${actualHistory.length} == expectedHistory.length") &&
Prop.all(
(actualHistory zip expectedHistory zipWithIndex) map {
case ((actual, expected), step) =>
(actual == expected) :| s"For ${historyId}, @step ${step}, ${actual} == ${expected}"
}: _*)
}: _*)
}
},
testParameters
)
}
class DemultiplexingWorld(worldFactory: () => World) extends World {
val worldThreadLocal: ThreadLocal[World] =
ThreadLocal.withInitial[World](() => worldFactory())
def world: World = worldThreadLocal.get
override def nextRevision: Revision = world.nextRevision
override def revise(events: Map[_ <: EventId, Option[Event]],
asOf: Instant): Revision = world.revise(events, asOf)
override def revise(events: util.Map[_ <: EventId, Optional[Event]],
asOf: Instant): Revision = world.revise(events, asOf)
override def scopeFor(when: Unbounded[Instant],
nextRevision: Revision): Scope =
world.scopeFor(when, nextRevision)
override def scopeFor(when: Unbounded[Instant], asOf: Instant): Scope =
world.scopeFor(when, asOf)
override def forkExperimentalWorld(scope: javaApi.Scope): World =
world.forkExperimentalWorld(scope)
override def revisionAsOfs: Array[Instant] = world.revisionAsOfs
override def revise(eventId: EventId,
event: Event,
asOf: Instant): Revision =
world.revise(eventId, event, asOf)
override def annul(eventId: EventId, asOf: Instant): Revision =
world.annul(eventId, asOf)
}
val integerHistoryRecordingsGroupedByIdThatAreRobustAgainstConcurrencyGenerator =
recordingsGroupedByIdGenerator_(integerDataSamplesForAnIdGenerator,
forbidAnnihilations = true)
they should "allow concurrent revisions to be attempted on distinct instances" in {
val testCaseGenerator = for {
worldSharingCommonStateFactoryResource <- worldSharingCommonStateFactoryResourceGenerator
recordingsGroupedById <- integerHistoryRecordingsGroupedByIdThatAreRobustAgainstConcurrencyGenerator
obsoleteRecordingsGroupedById <- nonConflictingRecordingsGroupedByIdGenerator
seed <- seedGenerator
random = new Random(seed)
shuffledRecordings = shuffleRecordingsPreservingRelativeOrderOfEventsAtTheSameWhen(
random,
recordingsGroupedById)
shuffledObsoleteRecordings = shuffleRecordingsPreservingRelativeOrderOfEventsAtTheSameWhen(
random,
obsoleteRecordingsGroupedById)
bigShuffledHistoryOverLotsOfThings = intersperseObsoleteEvents(
random,
shuffledRecordings,
shuffledObsoleteRecordings)
asOfs <- Gen.listOfN(bigShuffledHistoryOverLotsOfThings.length,
instantGenerator) map (_.sorted)
} yield
(worldSharingCommonStateFactoryResource,
recordingsGroupedById,
bigShuffledHistoryOverLotsOfThings,
asOfs)
check(
Prop.forAllNoShrink(testCaseGenerator) {
case (worldSharingCommonStateFactoryResource,
recordingsGroupedById,
bigShuffledHistoryOverLotsOfThings,
asOfs) =>
worldSharingCommonStateFactoryResource acquireAndGet {
worldFactory =>
val demultiplexingWorld = new DemultiplexingWorld(worldFactory)
try {
revisionActions(
bigShuffledHistoryOverLotsOfThings,
asOfs.iterator,
demultiplexingWorld).toParArray foreach (_.apply)
Prop.collect(
"No concurrent revision attempt detected in revision.")(
Prop.undecided)
} catch {
case exception: RuntimeException
if exception.getMessage.startsWith(
"Concurrent revision attempt detected in revision") =>
Prop.collect(
"Concurrent revision attempt detected in revision.")(
Prop.proved)
case exception: RuntimeException
if exception.getMessage.contains(
"should be no earlier than") =>
Prop.collect(
"Asofs were presented out of order due to racing.")(
Prop.undecided)
}
}
},
testParameters
)
}
they should "allow queries to be attempted on instances while one other is being revised" in {
// PLAN: book events that define objects that all have an integer property, such that sorting
// the property values by the associated id of their host instances yields either a monotonic
// increasing or decreasing sequence.
// By switching between increasing and decreasing from one revision to another, we hope to provoke
// an inconsistent mixture of property values due to data racing between the N query threads and the
// one revising thread. This should not be allowed to happen in a successful test case. We also expect
// the test to report the detection of queries that would have produced such mixing - IOW, we expect
// queries to fail fast, *not* to acquire locks.
val universalSetOfIds = scala.collection.immutable.Set(0 until 20: _*)
val testCaseGenerator = for {
worldSharingCommonStateFactoryResource <- worldSharingCommonStateFactoryResourceGenerator
asOfs <- Gen.nonEmptyListOf(instantGenerator) map (_.sorted)
numberOfRevisions = asOfs.size
idSetsForEachRevision <- Gen.listOfN(numberOfRevisions,
Gen.someOf(universalSetOfIds))
} yield
(worldSharingCommonStateFactoryResource,
asOfs,
numberOfRevisions,
idSetsForEachRevision)
check(
Prop.forAllNoShrink(testCaseGenerator) {
case (worldSharingCommonStateFactoryResource,
asOfs,
numberOfRevisions,
idSetsForEachRevision) =>
worldSharingCommonStateFactoryResource acquireAndGet {
worldFactory =>
val demultiplexingWorld = new DemultiplexingWorld(worldFactory)
val finalAsOf = asOfs.last
val queries = for {
_ <- 1 to numberOfConcurrentQueriesPerRevision * numberOfRevisions
} yield
() => {
try {
val scope = demultiplexingWorld
.scopeFor(PositiveInfinity[Instant](), finalAsOf)
val itemInstancesSortedById = scope
.render(Bitemporal.wildcard[Item])
.toList
.sortBy(_.id)
Prop.collect(
"No concurrent revision attempt detected in query.")(
Prop.undecided && (itemInstancesSortedById.isEmpty || (itemInstancesSortedById zip itemInstancesSortedById.tail forall {
case (first, second) =>
first.property < second.property
}) || (itemInstancesSortedById zip itemInstancesSortedById.tail forall {
case (first, second) =>
first.property > second.property
})))
} catch {
case exception: RuntimeException
if exception.getMessage.startsWith(
"Concurrent revision attempt detected in query") =>
Prop.collect(
"Concurrent revision attempt detected in query.")(
Prop.proved)
}
}
def toggledChoices(firstChoice: Boolean): Stream[Boolean] =
firstChoice #:: toggledChoices(!firstChoice)
val revisionCommandSequence = () => {
for {
((idSet, asOf), ascending) <- idSetsForEachRevision zip asOfs zip toggledChoices(
true)
} {
demultiplexingWorld.revise(
universalSetOfIds map (id =>
id -> (if (idSet.contains(id))
Some(Change.forOneItem[Item](id, {
(item: Item) =>
if (ascending)
item.property = id
else
item.property = -id
}))
else None)) toMap,
asOf
)
}
Prop.undecided
}
val checks = (revisionCommandSequence +: queries).toParArray map (_.apply)
checks.reduce(_ ++ _)
}
},
testParameters
)
}
they should "allow lots of concurrent revisions to be attempted on distinct instances" in {
// PLAN: book events that define objects that all have an integer property; each revision
// confines its events to dealing with one of several sequences of item ids, such that all
// the sequences taken together covers all of the items that can exist, and none of the
// sequences overlap. That way, by comparing items queried from pairs of successive
// revisions, we expect to show that each new revision only shows changes for item whose ids
// belong to a single sequence.
// By mixing lots of concurrent revisions, we hope to provoke a mixing of events due to data racing
// between the N revising threads. This should not be allowed to happen in a successful test case. We
// also expect the test to report the detection of revision attempts that would have produced such
// mixing - IOW, we expect revisions to fail fast, *not* to acquire locks.
val numberOfDistinctIdSequences = 10
val idSequenceLength = 10
val universalSetOfIds = scala.collection.immutable
.Set(0 until (idSequenceLength * numberOfDistinctIdSequences): _*)
val testCaseGenerator = for {
worldSharingCommonStateFactoryResource <- worldSharingCommonStateFactoryResourceGenerator
asOfs <- Gen.nonEmptyListOf(instantGenerator) map (_.sorted)
} yield (worldSharingCommonStateFactoryResource, asOfs)
check(
Prop.forAllNoShrink(testCaseGenerator) {
case (worldSharingCommonStateFactoryResource, asOfs) =>
worldSharingCommonStateFactoryResource acquireAndGet {
worldFactory =>
val demultiplexingWorld = new DemultiplexingWorld(worldFactory)
val asOfsIterator = asOfs.iterator
val revisionCommands = for {
index <- asOfs.indices
} yield
() => {
try {
demultiplexingWorld.revise(
0 until idSequenceLength map (index % numberOfDistinctIdSequences + numberOfDistinctIdSequences * _) map (
id =>
id ->
Some(Change.forOneItem[Item](id, {
(item: Item) =>
item.property = index
}))) toMap,
asOfsIterator.next()
)
Prop.collect("No concurrent revision attempt detected.")(
Prop.undecided)
} catch {
case exception: RuntimeException
if exception.getMessage.startsWith(
"Concurrent revision attempt detected in revision") =>
Prop.collect(
"Concurrent revision attempt detected in revision.")(
Prop.proved)
case exception: RuntimeException
if exception.getMessage.contains(
"should be no earlier than") =>
Prop.collect(
"Asofs were presented out of order due to racing.")(
Prop.undecided)
}
}
val revisionChecks = revisionCommands.toParArray map (_.apply)
val revisionRange = World.initialRevision until demultiplexingWorld.nextRevision
val queryChecks = for {
(previousNextRevision, nextRevision) <- revisionRange zip revisionRange.tail
} yield {
val previousScope =
demultiplexingWorld.scopeFor(PositiveInfinity[Instant](),
previousNextRevision)
val scope =
demultiplexingWorld.scopeFor(PositiveInfinity[Instant](),
nextRevision)
val itemsFromPreviousScope =
(previousScope.render(Bitemporal.wildcard[Item]) map (
item => item.id -> item.property)).toSet
val itemsFromScope =
(scope.render(Bitemporal.wildcard[Item]) map (item =>
item.id -> item.property)).toSet
val itemsThatHaveChanged = itemsFromScope diff itemsFromPreviousScope
val sequenceIndicesOfChangedItems = itemsThatHaveChanged map (_._1 % numberOfDistinctIdSequences)
(1 == (sequenceIndicesOfChangedItems groupBy identity).size) :| "Detected changes contributed by another revision."
}
revisionChecks.reduce(_ ++ _) && queryChecks
.reduceOption(_ && _)
.getOrElse(Prop.undecided)
}
},
testParameters
)
}
}
}
abstract class Item {
val id: Int
var property: Int = 0
}
class WorldStateSharingSpecUsingWorldReferenceImplementation
extends WorldStateSharingBehaviours {
val testParameters: Test.Parameters =
Test.Parameters.defaultVerbose.withMaxSize(50).withMinSuccessfulTests(50)
val numberOfConcurrentQueriesPerRevision: Revision = 100
val worldSharingCommonStateFactoryResourceGenerator
: Gen[ManagedResource[() => World]] =
Gen.const(
for (sharedMutableState <- makeManagedResource(new MutableState)(_ => {})(
List.empty))
yield
() =>
new WorldReferenceImplementation(mutableState = sharedMutableState))
"multiple world instances representing the same world (using the world reference implementation)" should behave like multipleInstancesRepresentingTheSameWorldBehaviour
}
class WorldStateSharingSpecUsingWorldRedisBasedImplementation
extends WorldStateSharingBehaviours
with RedisServerFixture {
val redisServerPort: Int = 6451
val testParameters: Test.Parameters =
Test.Parameters.defaultVerbose.withMaxSize(30).withMinSuccessfulTests(50)
val numberOfConcurrentQueriesPerRevision: Revision = 20
val worldSharingCommonStateFactoryResourceGenerator
: Gen[ManagedResource[() => World]] =
Gen.const(for {
sharedGuid <- makeManagedResource(UUID.randomUUID().toString)(_ => {})(
List.empty)
redisClientSet <- makeManagedResource(Set.empty[RedisClient])(
redisClientSet => redisClientSet.foreach(_.shutdown()))(List.empty)
} yield {
val redisClient = RedisClient.create(
RedisURI.Builder.redis("localhost", redisServerPort).build())
redisClientSet += redisClient
() =>
new WorldRedisBasedImplementation(redisClient, sharedGuid)
})
"multiple world instances representing the same world (using the world Redis-based implementation)" should behave like multipleInstancesRepresentingTheSameWorldBehaviour
}
| sageserpent-open/open-plutonium | src/test/scala/com/sageserpent/plutonium/WorldStateSharingBehaviours.scala | Scala | mit | 22,332 |
package com.sksamuel.elastic4s.searches.aggs
import com.sksamuel.elastic4s.ScriptBuilder
import org.elasticsearch.search.aggregations.AggregationBuilders
import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder
import scala.collection.JavaConverters._
object HistogramAggregationBuilder {
def apply(agg: HistogramAggregation): HistogramAggregationBuilder = {
val builder = AggregationBuilders.histogram(agg.name)
agg.field.foreach(builder.field)
agg.missing.foreach(builder.missing)
agg.format.foreach(builder.format)
agg.order.foreach(builder.order)
agg.keyed.foreach(builder.keyed)
agg.interval.foreach(builder.interval)
agg.minDocCount.foreach(builder.minDocCount)
agg.offset.foreach(builder.offset)
agg.extendedBounds.foreach { case (min, max) => builder.extendedBounds(min, max) }
agg.script.map(ScriptBuilder.apply).foreach(builder.script)
SubAggsFn(builder, agg.subaggs)
if (agg.metadata.nonEmpty) builder.setMetaData(agg.metadata.asJava)
builder
}
}
| aroundus-inc/elastic4s | elastic4s-tcp/src/main/scala/com/sksamuel/elastic4s/searches/aggs/HistogramAggregationBuilder.scala | Scala | apache-2.0 | 1,055 |
package org.bitcoins.core.protocol.dlc.sign
import org.bitcoins.core.config.BitcoinNetwork
import org.bitcoins.core.crypto.{
TransactionSignatureCreator,
TransactionSignatureSerializer
}
import org.bitcoins.core.currency.CurrencyUnit
import org.bitcoins.core.number.UInt32
import org.bitcoins.core.protocol.dlc.build.DLCTxBuilder
import org.bitcoins.core.protocol.dlc.models._
import org.bitcoins.core.protocol.script._
import org.bitcoins.core.protocol.transaction._
import org.bitcoins.core.protocol.{Bech32Address, BitcoinAddress}
import org.bitcoins.core.psbt.InputPSBTRecord.PartialSignature
import org.bitcoins.core.psbt.PSBT
import org.bitcoins.core.script.crypto.HashType
import org.bitcoins.core.util.{FutureUtil, Indexed}
import org.bitcoins.core.wallet.signer.BitcoinSigner
import org.bitcoins.core.wallet.utxo._
import org.bitcoins.crypto._
import scodec.bits.ByteVector
import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success, Try}
/** Responsible for constructing all DLC signatures
* and signed transactions
*/
case class DLCTxSigner(
builder: DLCTxBuilder,
isInitiator: Boolean,
fundingKey: AdaptorSign,
finalAddress: BitcoinAddress,
fundingUtxos: Vector[ScriptSignatureParams[InputInfo]]) {
private val offer = builder.offer
private val accept = builder.accept
private val remoteFundingPubKey = if (isInitiator) {
accept.pubKeys.fundingKey
} else {
offer.pubKeys.fundingKey
}
if (isInitiator) {
require(fundingKey.publicKey == offer.pubKeys.fundingKey &&
finalAddress == offer.pubKeys.payoutAddress,
"Given keys do not match public key and address in offer")
val fundingUtxosAsInputs =
fundingUtxos
.sortBy(_.outPoint.bytes)
.zip(offer.fundingInputs.sortBy(_.outPoint.bytes))
.map { case (utxo, fund) =>
DLCFundingInput.fromInputSigningInfo(utxo,
fund.inputSerialId,
fund.sequence)
}
.sortBy(_.inputSerialId)
require(fundingUtxosAsInputs == offer.fundingInputs.sortBy(_.inputSerialId),
"Funding ScriptSignatureParams did not match offer funding inputs")
} else {
require(
fundingKey.publicKey == accept.pubKeys.fundingKey &&
finalAddress == accept.pubKeys.payoutAddress,
"Given keys do not match public key and address in accept"
)
val fundingUtxosAsInputs =
fundingUtxos
.sortBy(_.outPoint.bytes)
.zip(accept.fundingInputs.sortBy(_.outPoint.bytes))
.map { case (utxo, fund) =>
DLCFundingInput.fromInputSigningInfo(utxo,
fund.inputSerialId,
fund.sequence)
}
.sortBy(_.inputSerialId)
require(
fundingUtxosAsInputs == accept.fundingInputs.sortBy(_.inputSerialId),
"Funding ScriptSignatureParams did not match accept funding inputs"
)
}
/** Return's this party's payout for a given oracle signature */
def getPayout(sigs: Vector[OracleSignatures]): CurrencyUnit = {
val (offerPayout, acceptPayout) = builder.getPayouts(sigs)
if (isInitiator) {
offerPayout
} else {
acceptPayout
}
}
/** Creates this party's FundingSignatures */
def signFundingTx(): Try[FundingSignatures] = {
val fundingInputs =
if (isInitiator) builder.offerFundingInputs
else builder.acceptFundingInputs
val utxos =
fundingUtxos
.zip(fundingInputs)
.map { case (utxo, fundingInput) =>
SpendingInfoWithSerialId(utxo, fundingInput.inputSerialId)
}
.sortBy(_.serialId)
DLCTxSigner.signFundingTx(builder.buildFundingTx, utxos)
}
/** Constructs the signed DLC funding transaction given remote FundingSignatures */
def completeFundingTx(remoteSigs: FundingSignatures): Try[Transaction] = {
signFundingTx().flatMap { localSigs =>
DLCTxSigner.completeFundingTx(localSigs,
remoteSigs,
offer.fundingInputs,
accept.fundingInputs,
builder.buildFundingTx)
}
}
private var _cetSigningInfo: Option[ECSignatureParams[P2WSHV0InputInfo]] =
None
private def cetSigningInfo: ECSignatureParams[P2WSHV0InputInfo] = {
_cetSigningInfo match {
case Some(info) => info
case None =>
val signingInfo =
DLCTxSigner.buildCETSigningInfo(builder.fundOutputIndex,
builder.buildFundingTx,
builder.fundingMultiSig,
fundingKey)
_cetSigningInfo = Some(signingInfo)
signingInfo
}
}
/** Signs remote's Contract Execution Transaction (CET) for a given outcome */
def signCET(adaptorPoint: ECPublicKey, index: Int): ECAdaptorSignature = {
signCETs(Vector(Indexed(adaptorPoint, index))).head._2
}
/** Signs remote's Contract Execution Transaction (CET) for a given outcomes */
def buildAndSignCETs(adaptorPoints: Vector[Indexed[ECPublicKey]]): Vector[
(ECPublicKey, WitnessTransaction, ECAdaptorSignature)] = {
val outcomesAndCETs = builder.buildCETsMap(adaptorPoints)
DLCTxSigner.buildAndSignCETs(outcomesAndCETs, cetSigningInfo, fundingKey)
}
/** Signs remote's Contract Execution Transaction (CET) for a given outcomes */
def signCETs(adaptorPoints: Vector[Indexed[ECPublicKey]]): Vector[
(ECPublicKey, ECAdaptorSignature)] = {
buildAndSignCETs(adaptorPoints).map { case (outcome, _, sig) =>
outcome -> sig
}
}
/** Signs remote's Contract Execution Transaction (CET) for a given outcomes and their corresponding CETs */
def signGivenCETs(outcomesAndCETs: Vector[AdaptorPointCETPair]): Vector[
(ECPublicKey, ECAdaptorSignature)] = {
DLCTxSigner.signCETs(outcomesAndCETs, cetSigningInfo, fundingKey)
}
def completeCET(
outcome: OracleOutcome,
remoteAdaptorSig: ECAdaptorSignature,
oracleSigs: Vector[OracleSignatures]): WitnessTransaction = {
val index = builder.contractInfo.allOutcomes.indexOf(outcome)
DLCTxSigner.completeCET(
outcome,
cetSigningInfo,
builder.fundingMultiSig,
builder.buildFundingTx,
builder.buildCET(outcome.sigPoint, index),
remoteAdaptorSig,
remoteFundingPubKey,
oracleSigs
)
}
/** Creates this party's signature of the refund transaction */
lazy val signRefundTx: PartialSignature = {
DLCTxSigner.signRefundTx(cetSigningInfo, builder.buildRefundTx)
}
/** Constructs the signed refund transaction given remote's signature */
def completeRefundTx(remoteSig: PartialSignature): WitnessTransaction = {
val localSig = signRefundTx
DLCTxSigner.completeRefundTx(localSig,
remoteSig,
builder.fundingMultiSig,
builder.buildFundingTx,
builder.buildRefundTx)
}
/** Creates all of this party's CETSignatures */
def createCETSigs(): CETSignatures = {
val adaptorPoints = builder.contractInfo.adaptorPointsIndexed
val cetSigs = signCETs(adaptorPoints)
CETSignatures(cetSigs)
}
/** Creates CET signatures async */
def createCETSigsAsync()(implicit
ec: ExecutionContext): Future[CETSignatures] = {
val adaptorPoints = builder.contractInfo.adaptorPointsIndexed
//divide and conquer
//we want a batch size of at least 1
val size =
Math.max(adaptorPoints.length / Runtime.getRuntime.availableProcessors(),
1)
val computeBatchFn: Vector[Indexed[ECPublicKey]] => Future[
Vector[(ECPublicKey, ECAdaptorSignature)]] = {
adaptorPoints: Vector[Indexed[ECPublicKey]] =>
FutureUtil.makeAsync(() => signCETs(adaptorPoints))
}
val cetSigsF: Future[Vector[(ECPublicKey, ECAdaptorSignature)]] = {
FutureUtil.batchAndParallelExecute(elements = adaptorPoints,
f = computeBatchFn,
batchSize = size)
}.map(_.flatten)
for {
cetSigs <- cetSigsF
} yield CETSignatures(cetSigs)
}
/** Creates all of this party's CETSignatures */
def createCETsAndCETSigs(): (CETSignatures, Vector[WitnessTransaction]) = {
val adaptorPoints = builder.contractInfo.adaptorPointsIndexed
val cetsAndSigs = buildAndSignCETs(adaptorPoints)
val (msgs, cets, sigs) = cetsAndSigs.unzip3
(CETSignatures(msgs.zip(sigs)), cets)
}
/** The equivalent of [[createCETsAndCETSigs()]] but async */
def createCETsAndCETSigsAsync()(implicit
ec: ExecutionContext): Future[(CETSignatures, Vector[WitnessTransaction])] = {
val adaptorPoints = builder.contractInfo.adaptorPointsIndexed
val fn = { adaptorPoints: Vector[Indexed[ECPublicKey]] =>
FutureUtil.makeAsync(() => buildAndSignCETs(adaptorPoints))
}
val cetsAndSigsF: Future[
Vector[Vector[(ECPublicKey, WitnessTransaction, ECAdaptorSignature)]]] = {
FutureUtil.batchAndParallelExecute[Indexed[ECPublicKey],
Vector[(
ECPublicKey,
WitnessTransaction,
ECAdaptorSignature)]](
elements = adaptorPoints,
f = fn)
}
for {
cetsAndSigsNested <- cetsAndSigsF
cetsAndSigs = cetsAndSigsNested.flatten
(msgs, cets, sigs) = cetsAndSigs.unzip3
} yield (CETSignatures(msgs.zip(sigs)), cets)
}
/** Creates this party's CETSignatures given the outcomes and their unsigned CETs */
def createCETSigs(
outcomesAndCETs: Vector[AdaptorPointCETPair]): CETSignatures = {
val cetSigs = signGivenCETs(outcomesAndCETs)
CETSignatures(cetSigs)
}
}
object DLCTxSigner {
def apply(
builder: DLCTxBuilder,
isInitiator: Boolean,
fundingKey: AdaptorSign,
payoutPrivKey: AdaptorSign,
network: BitcoinNetwork,
fundingUtxos: Vector[ScriptSignatureParams[InputInfo]]): DLCTxSigner = {
val payoutAddr =
Bech32Address(P2WPKHWitnessSPKV0(payoutPrivKey.publicKey), network)
DLCTxSigner(builder, isInitiator, fundingKey, payoutAddr, fundingUtxos)
}
def buildCETSigningInfo(
fundOutputIndex: Int,
fundingTx: Transaction,
fundingMultiSig: MultiSignatureScriptPubKey,
fundingKey: Sign): ECSignatureParams[P2WSHV0InputInfo] = {
val fundingOutPoint =
TransactionOutPoint(fundingTx.txId, UInt32(fundOutputIndex))
ECSignatureParams(
P2WSHV0InputInfo(
outPoint = fundingOutPoint,
amount = fundingTx.outputs(fundOutputIndex).value,
scriptWitness = P2WSHWitnessV0(fundingMultiSig),
conditionalPath = ConditionalPath.NoCondition
),
fundingTx,
fundingKey,
HashType.sigHashAll
)
}
def signCET(
sigPoint: ECPublicKey,
cet: WitnessTransaction,
cetSigningInfo: ECSignatureParams[P2WSHV0InputInfo],
fundingKey: AdaptorSign): ECAdaptorSignature = {
signCETs(Vector(AdaptorPointCETPair(sigPoint, cet)),
cetSigningInfo,
fundingKey).head._2
}
def signCETs(
outcomesAndCETs: Vector[AdaptorPointCETPair],
cetSigningInfo: ECSignatureParams[P2WSHV0InputInfo],
fundingKey: AdaptorSign): Vector[(ECPublicKey, ECAdaptorSignature)] = {
buildAndSignCETs(outcomesAndCETs, cetSigningInfo, fundingKey).map {
case (outcome, _, sig) => outcome -> sig
}
}
def buildAndSignCETs(
outcomesAndCETs: Vector[AdaptorPointCETPair],
cetSigningInfo: ECSignatureParams[P2WSHV0InputInfo],
fundingKey: AdaptorSign): Vector[
(ECPublicKey, WitnessTransaction, ECAdaptorSignature)] = {
outcomesAndCETs.map { case AdaptorPointCETPair(sigPoint, cet) =>
val hashToSign =
TransactionSignatureSerializer.hashForSignature(cet,
cetSigningInfo,
HashType.sigHashAll)
val adaptorSig = fundingKey.adaptorSign(sigPoint, hashToSign.bytes)
(sigPoint, cet, adaptorSig)
}
}
// TODO: Without PSBTs
def completeCET(
outcome: OracleOutcome,
cetSigningInfo: ECSignatureParams[P2WSHV0InputInfo],
fundingMultiSig: MultiSignatureScriptPubKey,
fundingTx: Transaction,
ucet: WitnessTransaction,
remoteAdaptorSig: ECAdaptorSignature,
remoteFundingPubKey: ECPublicKey,
oracleSigs: Vector[OracleSignatures]): WitnessTransaction = {
val signLowR: ByteVector => ECDigitalSignature =
cetSigningInfo.signer.signLowR(_: ByteVector)
val localSig = TransactionSignatureCreator.createSig(ucet,
cetSigningInfo,
signLowR,
HashType.sigHashAll)
val oracleSigSum =
OracleSignatures.computeAggregateSignature(outcome, oracleSigs)
val remoteSig =
oracleSigSum
.completeAdaptorSignature(remoteAdaptorSig, HashType.sigHashAll.byte)
val localParitalSig =
PartialSignature(cetSigningInfo.signer.publicKey, localSig)
val remotePartialSig = PartialSignature(remoteFundingPubKey, remoteSig)
val psbt =
PSBT
.fromUnsignedTx(ucet)
.addUTXOToInput(fundingTx, index = 0)
.addScriptWitnessToInput(P2WSHWitnessV0(fundingMultiSig), index = 0)
.addSignature(localParitalSig, inputIndex = 0)
.addSignature(remotePartialSig, inputIndex = 0)
val cetT = psbt.finalizePSBT
.flatMap(_.extractTransactionAndValidate)
.map(_.asInstanceOf[WitnessTransaction])
cetT match {
case Success(cet) => cet
case Failure(err) => throw err
}
}
def signRefundTx(
refundSigningInfo: ECSignatureParams[P2WSHV0InputInfo],
refundTx: WitnessTransaction
): PartialSignature = {
val fundingPubKey = refundSigningInfo.signer.publicKey
val signLowR: ByteVector => ECDigitalSignature =
refundSigningInfo.signer.signLowR(_: ByteVector)
val sig = TransactionSignatureCreator.createSig(refundTx,
refundSigningInfo,
signLowR,
HashType.sigHashAll)
PartialSignature(fundingPubKey, sig)
}
// TODO: Without PSBTs
def completeRefundTx(
localSig: PartialSignature,
remoteSig: PartialSignature,
fundingMultiSig: MultiSignatureScriptPubKey,
fundingTx: Transaction,
uRefundTx: WitnessTransaction): WitnessTransaction = {
val psbt = PSBT
.fromUnsignedTx(uRefundTx)
.addUTXOToInput(fundingTx, index = 0)
.addScriptWitnessToInput(P2WSHWitnessV0(fundingMultiSig), index = 0)
.addSignature(localSig, inputIndex = 0)
.addSignature(remoteSig, inputIndex = 0)
val refundTxT = psbt.finalizePSBT
.flatMap(_.extractTransactionAndValidate)
.map(_.asInstanceOf[WitnessTransaction])
refundTxT match {
case Success(refundTx) => refundTx
case Failure(err) => throw err
}
}
def signFundingTx(
fundingTx: Transaction,
fundingUtxos: Vector[SpendingInfoWithSerialId]
): Try[FundingSignatures] = {
val sigsT = fundingUtxos
.foldLeft[Try[Vector[(TransactionOutPoint, ScriptWitnessV0)]]](
Success(Vector.empty)) {
case (sigsT, SpendingInfoWithSerialId(utxo, _)) =>
sigsT.flatMap { sigs =>
val sigComponent =
BitcoinSigner.sign(utxo, fundingTx, isDummySignature = false)
val witnessT =
sigComponent.transaction match {
case wtx: WitnessTransaction =>
val witness = wtx.witness(sigComponent.inputIndex.toInt)
if (witness == EmptyScriptWitness) {
Failure(
new RuntimeException(
s"Funding Inputs must be SegWit: $utxo"))
} else {
Success(witness)
}
case _: NonWitnessTransaction =>
Failure(
new RuntimeException(
s"Funding Inputs must be SegWit: $utxo"))
}
witnessT.flatMap {
case witness: ScriptWitnessV0 =>
Success(sigs.:+((utxo.outPoint, witness)))
case witness: ScriptWitness =>
Failure(
new RuntimeException(
s"Unrecognized script witness: $witness"))
}
}
}
sigsT.map { sigs =>
val sigsMap = sigs.toMap
val sigsVec = fundingUtxos.map {
case SpendingInfoWithSerialId(input, _) =>
input.outPoint -> sigsMap(input.outPoint)
}
FundingSignatures(sigsVec)
}
}
def completeFundingTx(
localSigs: FundingSignatures,
remoteSigs: FundingSignatures,
offerFundingInputs: Vector[DLCFundingInput],
acceptFundingInputs: Vector[DLCFundingInput],
fundingTx: Transaction): Try[Transaction] = {
val fundingInputs =
(offerFundingInputs ++ acceptFundingInputs).sortBy(_.inputSerialId)
val allSigs = localSigs.merge(remoteSigs)
val psbt = fundingInputs.zipWithIndex.foldLeft(
PSBT.fromUnsignedTxWithP2SHScript(fundingTx)) {
case (psbt, (fundingInput, index)) =>
val witness = allSigs(fundingInput.outPoint)
psbt
.addUTXOToInput(fundingInput.prevTx, index)
.addFinalizedScriptWitnessToInput(fundingInput.scriptSignature,
witness,
index)
}
val finalizedT = if (psbt.isFinalized) {
Success(psbt)
} else {
psbt.finalizePSBT
}
finalizedT.flatMap(_.extractTransactionAndValidate)
}
}
| bitcoin-s/bitcoin-s | core/src/main/scala/org/bitcoins/core/protocol/dlc/sign/DLCTxSigner.scala | Scala | mit | 18,290 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.streams.examples.flink.twitter.test
import java.io.File
import java.nio.file.Files
import java.nio.file.Paths
import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory
import com.typesafe.config.ConfigParseOptions
import org.apache.streams.config.StreamsConfigurator
import org.apache.streams.examples.flink.twitter.TwitterUserInformationPipelineConfiguration
import org.apache.streams.examples.flink.twitter.collection.FlinkTwitterUserInformationPipeline
import org.scalatest.Assertions._
import org.scalatest.concurrent.Eventually._
import org.scalatest.time.SpanSugar._
import org.slf4j.Logger
import org.slf4j.LoggerFactory
import org.testng.annotations.Test
import scala.io.Source
/**
* FlinkTwitterUserInformationPipelineIT is an integration test for FlinkTwitterUserInformationPipeline.
*/
class FlinkTwitterUserInformationPipelineIT {
private val LOGGER: Logger = LoggerFactory.getLogger(classOf[FlinkTwitterUserInformationPipelineIT])
import FlinkTwitterUserInformationPipeline._
@Test
def flinkTwitterUserInformationPipelineIT = {
val reference: Config = ConfigFactory.load()
val conf_file: File = new File("target/test-classes/FlinkTwitterUserInformationPipelineIT.conf")
assert(conf_file.exists())
val testResourceConfig = ConfigFactory.parseFileAnySyntax(conf_file, ConfigParseOptions.defaults.setAllowMissing(false))
StreamsConfigurator.addConfig(testResourceConfig)
val testConfig = new StreamsConfigurator(classOf[TwitterUserInformationPipelineConfiguration]).detectCustomConfiguration()
setup(testConfig)
val job = new FlinkTwitterUserInformationPipeline(config = testConfig)
val jobThread = new Thread(job)
jobThread.start
jobThread.join
eventually (timeout(30 seconds), interval(1 seconds)) {
assert(Files.exists(Paths.get(testConfig.getDestination.getPath + "/" + testConfig.getDestination.getWriterPath)))
val lines = Source.fromFile(testConfig.getDestination.getPath + "/" + testConfig.getDestination.getWriterPath, "UTF-8").getLines.toList
assert(lines.size > 500)
lines foreach {
line => assert( line.contains("created_at") )
}
}
}
}
| jfrazee/incubator-streams | streams-examples/streams-examples-flink/flink-twitter-collection/src/test/scala/org/apache/streams/examples/flink/twitter/test/FlinkTwitterUserInformationPipelineIT.scala | Scala | apache-2.0 | 2,960 |
package mesosphere.marathon
package api.v2.validation
import com.wix.accord.{ Failure, Result, Validator }
import mesosphere.marathon.raml.{ Constraint, ConstraintOperator, DockerPullConfig, Endpoint, EnvVarSecret, Image, ImageType, Network, NetworkMode, PersistentVolumeInfo, Pod, PodContainer, PodEphemeralVolume, PodPersistentVolume, PodSchedulingPolicy, PodSecretVolume, PodUpgradeStrategy, Resources, SecretDef, UnreachableDisabled, UnreachableEnabled, VolumeMount }
import mesosphere.marathon.state.PersistentVolume
import mesosphere.marathon.util.SemanticVersion
import mesosphere.{ UnitTest, ValidationTestLike }
class PodsValidationTest extends UnitTest with ValidationTestLike with PodsValidation with SchedulingValidation {
"A pod definition" should {
"be rejected if the id is empty" in new Fixture() {
validator(validPod.copy(id = "/")) should haveViolations("/id" -> "Path must contain at least one path element")
}
"be rejected if the id is not absolute" in new Fixture() {
validator(validPod.copy(id = "some/foo")) should haveViolations("/id" -> "Path needs to be absolute")
}
"be rejected if a defined user is empty" in new Fixture() {
validator(validPod.copy(user = Some(""))) should haveViolations("/user" -> "must not be empty")
}
"be accepted if secrets defined" in new Fixture(validateSecrets = true) {
private val valid = validPod.copy(secrets = Map("secret1" -> SecretDef(source = "/foo")), environment = Map("TEST" -> EnvVarSecret("secret1")))
validator(valid) should be(aSuccess)
}
"be rejected if no container is defined" in new Fixture() {
validator(validPod.copy(containers = Seq.empty)) should haveViolations("/containers" -> "must not be empty")
}
"be rejected if container names are not unique" in new Fixture() {
validator(validPod.copy(containers = Seq(validContainer, validContainer))) should haveViolations(
"/containers" -> PodsValidationMessages.ContainerNamesMustBeUnique)
}
"be rejected if endpoint names are not unique" in new Fixture() {
val endpoint1 = Endpoint("endpoint", hostPort = Some(123))
val endpoint2 = Endpoint("endpoint", hostPort = Some(124))
private val invalid = validPod.copy(containers = Seq(validContainer.copy(endpoints = Seq(endpoint1, endpoint2))))
validator(invalid) should haveViolations(
"/" -> PodsValidationMessages.EndpointNamesMustBeUnique)
}
"be rejected if endpoint host ports are not unique" in new Fixture() {
val endpoint1 = Endpoint("endpoint1", hostPort = Some(123))
val endpoint2 = Endpoint("endpoint2", hostPort = Some(123))
private val invalid = validPod.copy(containers = Seq(validContainer.copy(endpoints = Seq(endpoint1, endpoint2))))
validator(invalid) should haveViolations(
"/" -> PodsValidationMessages.HostPortsMustBeUnique)
}
"be rejected if endpoint container ports are not unique" in new Fixture() {
val endpoint1 = Endpoint("endpoint1", containerPort = Some(123))
val endpoint2 = Endpoint("endpoint2", containerPort = Some(123))
private val invalid = validPod.copy(
networks = Seq(Network(mode = NetworkMode.Container, name = Some("default-network-name"))),
containers = Seq(validContainer.copy(endpoints = Seq(endpoint1, endpoint2)))
)
validator(invalid) should haveViolations(
"/" -> PodsValidationMessages.ContainerPortsMustBeUnique)
}
"be rejected if volume names are not unique" in new Fixture() {
val volume = PodEphemeralVolume("volume")
val volumeMount = VolumeMount(volume.name, "/bla")
private val invalid = validPod.copy(
volumes = Seq(volume, volume),
containers = Seq(validContainer.copy(volumeMounts = Seq(volumeMount)))
)
validator(invalid) should haveViolations(
"/volumes" -> PodsValidationMessages.VolumeNamesMustBeUnique)
}
"be rejected if a secret volume is defined without a corresponding secret" in new Fixture(validateSecrets = true) {
val volume = PodSecretVolume("volume", "foo")
val volumeMount = VolumeMount(volume.name, "/bla")
private val invalid = validPod.copy(
volumes = Seq(volume),
containers = Seq(validContainer.copy(volumeMounts = Seq(volumeMount)))
)
// Here and below: stringifying validation is admittedly not the best way but it's a nested Set(GroupViolation...) and not easy to test.
validator(invalid).toString should include(PodsValidationMessages.SecretVolumeMustReferenceSecret)
}
"be accepted if it is a valid pod with a Docker pull config" in new Fixture(validateSecrets = true) {
validator(pullConfigPod) should be(aSuccess)
}
"be rejected if a pull config pod is provided when secrets features is disabled" in new Fixture(validateSecrets = false) {
val validationResult: Result = validator(pullConfigPod)
validationResult shouldBe a[Failure]
val validationResultString: String = validationResult.toString
validationResultString should include("must be empty")
validationResultString should include("Feature secrets is not enabled. Enable with --enable_features secrets)")
}
"be rejected if a pull config pod doesn't have secrets" in new Fixture(validateSecrets = true) {
private val invalid = pullConfigPod.copy(secrets = Map.empty)
validator(invalid) should haveViolations(
"/containers(0)/image/pullConfig" -> "pullConfig.secret must refer to an existing secret")
}
"be rejected if a pull config image is not Docker" in new Fixture() {
private val invalid = pullConfigPod.copy(
containers = Seq(pullConfigContainer.copy(
image = Some(pullConfigContainer.image.get.copy(kind = ImageType.Appc))
)))
validator(invalid) should haveViolations(
"/containers(0)/image/pullConfig" -> "pullConfig is supported only with Docker images")
}
}
"A constraint definition" should {
"MaxPer is accepted with an integer value" in {
complyWithConstraintRules(Constraint("foo", ConstraintOperator.MaxPer, Some("3"))).isSuccess shouldBe true
}
"MaxPer is rejected with no value" in {
complyWithConstraintRules(Constraint("foo", ConstraintOperator.MaxPer)).isSuccess shouldBe false
}
}
"with persistent volumes" should {
"be valid" in new Fixture {
val pod = validResidentPod
validator(pod) should be(aSuccess)
}
"be valid if no unreachable strategy is provided" in new Fixture {
val pod = validResidentPod.copy(scheduling = validResidentPod.scheduling.map(_.copy(
unreachableStrategy = None)))
validator(pod) should be(aSuccess)
}
"be valid if no upgrade strategy is provided" in new Fixture {
val pod = validResidentPod.copy(scheduling = validResidentPod.scheduling.map(_.copy(
upgrade = None)))
validator(pod) should be(aSuccess)
}
"be invalid if unreachable strategy is enabled" in new Fixture {
val pod = validResidentPod.copy(scheduling = validResidentPod.scheduling.map(_.copy(
unreachableStrategy = Some(UnreachableEnabled()))))
validator(pod) should haveViolations(
"/" -> "unreachableStrategy must be disabled for pods with persistent volumes")
}
"be invalid if upgrade strategy has maximumOverCapacity set to non-zero" in new Fixture {
val pod = validResidentPod.copy(scheduling = validResidentPod.scheduling.map(_.copy(
upgrade = Some(PodUpgradeStrategy(maximumOverCapacity = 0.1)))))
validator(pod) should haveViolations(
"/upgrade/maximumOverCapacity" -> "got 0.1, expected 0.0")
}
"be invalid if cpu changes" in new Fixture {
val pod = validResidentPod.fromRaml
val to = pod.copy(containers = pod.containers.map(ct => ct.copy(resources = ct.resources.copy(cpus = 3))))
residentUpdateIsValid(pod)(to) should haveViolations(
"/" -> PodsValidationMessages.CpusPersistentVolumes)
}
"be invalid if mem changes" in new Fixture {
val pod = validResidentPod.fromRaml
val to = pod.copy(containers = pod.containers.map(ct => ct.copy(resources = ct.resources.copy(mem = 3))))
residentUpdateIsValid(pod)(to) should haveViolations(
"/" -> PodsValidationMessages.MemPersistentVolumes)
}
"be invalid if disk changes" in new Fixture {
val pod = validResidentPod.fromRaml
val to = pod.copy(containers = pod.containers.map(ct => ct.copy(resources = ct.resources.copy(disk = 3))))
residentUpdateIsValid(pod)(to) should haveViolations(
"/" -> PodsValidationMessages.DiskPersistentVolumes)
}
"be invalid if gpus change" in new Fixture {
val pod = validResidentPod.fromRaml
val to = pod.copy(containers = pod.containers.map(ct => ct.copy(resources = ct.resources.copy(gpus = 3))))
residentUpdateIsValid(pod)(to) should haveViolations(
"/" -> PodsValidationMessages.GpusPersistentVolumes)
}
"be invalid with default upgrade strategy" in new Fixture {
val pod = validResidentPod.fromRaml
val to = pod.copy(upgradeStrategy = state.UpgradeStrategy.empty)
residentUpdateIsValid(pod)(to) should haveViolations(
"/upgradeStrategy/maximumOverCapacity" -> "got 1.0, expected 0.0")
}
"be invalid if persistent volumes change" in new Fixture {
val pod = validResidentPod.fromRaml
val to = pod.copy(volumes = pod.volumes.map {
case vol: PersistentVolume => vol.copy(persistent = vol.persistent.copy(size = 2))
case vol => vol
})
residentUpdateIsValid(pod)(to) should haveViolations(
"/" -> "persistent volumes cannot be updated")
}
"be invalid if ports change" in new Fixture {
val pod = validResidentPod.fromRaml
val to1 = pod.copy(containers = pod.containers.map(ct => ct.copy(
endpoints = ct.endpoints.map(ep => ep.copy(hostPort = None)))))
val to2 = pod.copy(containers = pod.containers.map(ct => ct.copy(
endpoints = ct.endpoints.map(ep => ep.copy(hostPort = Some(2))))))
residentUpdateIsValid(pod)(to1) should haveViolations(
"/" -> PodsValidationMessages.HostPortsPersistentVolumes)
residentUpdateIsValid(pod)(to2) should haveViolations(
"/" -> PodsValidationMessages.HostPortsPersistentVolumes)
}
}
class Fixture(validateSecrets: Boolean = false) {
val validContainer = PodContainer(
name = "ct1",
resources = Resources()
)
val validPod = Pod(
id = "/some/pod",
containers = Seq(validContainer),
networks = Seq(Network(mode = NetworkMode.Host))
)
val pullConfigContainer = PodContainer(
name = "pull-config-container",
resources = Resources(),
image = Some(Image(kind = ImageType.Docker, id = "some/image", pullConfig = Some(DockerPullConfig("aSecret"))))
)
val pullConfigPod = Pod(
id = "/pull/config/pod",
containers = Seq(pullConfigContainer),
secrets = Map("aSecret" -> SecretDef("/pull/config"))
)
def validResidentPod = validPod.copy(
containers = Seq(validContainer.copy(
endpoints = Seq(Endpoint("ep1", hostPort = Some(1))),
volumeMounts = Seq(VolumeMount("vol1", "vol1-mount", Some(false))))),
volumes = Seq(PodPersistentVolume("vol1", PersistentVolumeInfo(size = 1))),
scheduling = Some(PodSchedulingPolicy(
upgrade = Some(PodUpgradeStrategy(minimumHealthCapacity = 0, maximumOverCapacity = 0)),
unreachableStrategy = Some(UnreachableDisabled()))))
val features: Set[String] = if (validateSecrets) Set(Features.SECRETS) else Set.empty
implicit val validator: Validator[Pod] = podValidator(features, SemanticVersion.zero, None)
}
"network validation" when {
implicit val validator: Validator[Pod] = podValidator(Set.empty, SemanticVersion.zero, Some("default-network-name"))
def podContainer(name: String = "ct1", resources: Resources = Resources(), endpoints: Seq[Endpoint]) =
PodContainer(
name = name,
resources = resources,
endpoints = endpoints)
def networks(networkCount: Int = 1): Seq[Network] =
1.to(networkCount).map(i => Network(mode = NetworkMode.Container, name = Some(i.toString)))
def bridgeNetwork: Seq[Network] = Seq(Network(mode = NetworkMode.ContainerBridge))
def hostNetwork: Seq[Network] = Seq(Network(mode = NetworkMode.Host))
def networkedPod(containers: Seq[PodContainer], nets: => Seq[Network] = networks()) =
Pod(
id = "/foo",
networks = nets,
containers = containers)
"multiple container networks are specified for a pod" should {
"require networkNames for containerPort to hostPort mapping" in {
val badApp = networkedPod(
Seq(podContainer(endpoints = Seq(Endpoint("endpoint", containerPort = Some(80), hostPort = Option(0))))),
networks(2))
validator(badApp).isFailure shouldBe true
}
"allow endpoints that don't declare hostPort nor networkNames" in {
val app = networkedPod(
Seq(podContainer(endpoints = Seq(Endpoint("endpoint", containerPort = Some(80))))),
networks(2))
validator(app) should be(aSuccess)
}
"allow endpoints for pods with bridge networking" in {
val pod = Pod(
id = "/bridge",
networks = Seq(Network(mode = NetworkMode.ContainerBridge)),
containers = Seq(podContainer(endpoints = Seq(Endpoint("endpoint", hostPort = Some(0), containerPort = Some(80)))))
)
validator(pod) should be(aSuccess)
}
"allow endpoints that both declare a hostPort and a networkNames" in {
val app = networkedPod(
Seq(podContainer(endpoints = Seq(
Endpoint(
"endpoint",
hostPort = Option(0),
containerPort = Some(80),
networkNames = List("1"))))),
networks(2))
validator(app) should be(aSuccess)
}
}
"bridge or single container network" should {
def containerAndBridgeMode(subtitle: String, networks: => Seq[Network]): Unit = {
s"${subtitle} allow endpoint with no networkNames" in {
validator(
networkedPod(Seq(
podContainer(endpoints = Seq(
Endpoint(
"endpoint",
hostPort = Some(80),
containerPort = Some(80),
networkNames = Nil)))), networks)) should be(aSuccess)
}
s"${subtitle} allow endpoint without hostport" in {
validator(
networkedPod(Seq(
podContainer(endpoints = Seq(
Endpoint(
"endpoint",
hostPort = None,
containerPort = Some(80),
networkNames = Nil)))), networks)) should be(aSuccess)
}
s"${subtitle} allow endpoint with zero hostport" in {
validator(
networkedPod(Seq(
podContainer(endpoints = Seq(
Endpoint(
"endpoint",
containerPort = Some(80),
hostPort = Some(0))))), networks)) should be(aSuccess)
}
s"${subtitle} allows containerPort of zero" in {
validator(
networkedPod(Seq(
podContainer(endpoints = Seq(
Endpoint("name1", containerPort = Some(0)),
Endpoint("name2", containerPort = Some(0))
))), networks)) should be(aSuccess)
}
s"${subtitle} require that hostPort is unique" in {
val pod = networkedPod(Seq(
podContainer(endpoints = Seq(
Endpoint(
"name1",
hostPort = Some(123)),
Endpoint(
"name2",
hostPort = Some(123))))), networks)
validator(pod) should haveViolations("/" -> PodsValidationMessages.HostPortsMustBeUnique)
}
}
behave like containerAndBridgeMode("container-mode:", networks(1))
behave like containerAndBridgeMode("bridge-mode:", bridgeNetwork)
}
"container-mode: requires containerPort" in {
val pod = networkedPod(Seq(
podContainer(endpoints = Seq(
Endpoint(
"name1",
hostPort = Some(123))))))
validator(pod) should haveViolations(
"/containers(0)/endpoints(0)/containerPort" -> "is required when using container-mode networking")
}
"allow endpoint with a networkNames" in {
validator(
networkedPod(Seq(
podContainer(endpoints = Seq(
Endpoint(
"endpoint",
hostPort = Some(80),
containerPort = Some(80),
networkNames = List("1"))))))) should be(aSuccess)
}
"disallow endpoint with a host port and two valid networkNames" in {
validator(
networkedPod(Seq(
podContainer(endpoints = Seq(
Endpoint(
"endpoint",
hostPort = Some(80),
containerPort = Some(80),
networkNames = List("1", "2"))))))) shouldNot be(aSuccess)
}
"disallow endpoint with a non-matching network name" in {
validator(
networkedPod(Seq(
podContainer(endpoints = Seq(
Endpoint(
"endpoint",
containerPort = Some(80),
hostPort = Some(80),
networkNames = List("invalid-network-name"))))))) shouldNot be(aSuccess)
}
"allow endpoint without hostPort for host networking" in {
validator(networkedPod(Seq(
podContainer(endpoints = Seq(Endpoint("ep")))
), hostNetwork)) should be(aSuccess)
}
}
}
| guenter/marathon | src/test/scala/mesosphere/marathon/api/v2/validation/PodsValidationTest.scala | Scala | apache-2.0 | 17,893 |
package com.jeff.chaser.models.systems.common
object DetectorViewSystem {
}
| jregistr/Academia | CSC455-Game-Programming/Chaser/core/src/com/jeff/chaser/models/systems/common/DetectorViewSystem.scala | Scala | mit | 81 |
/*
* Copyright 2021 ACINQ SAS
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package fr.acinq.eclair.wire.protocol
import fr.acinq.bitcoin.Crypto.{PrivateKey, PublicKey}
import fr.acinq.eclair.crypto.Sphinx
import fr.acinq.eclair.{ShortChannelId, UInt64}
import scodec.bits.ByteVector
import scala.util.Try
/**
* Created by t-bast on 19/10/2021.
*/
sealed trait RouteBlindingEncryptedDataTlv extends Tlv
object RouteBlindingEncryptedDataTlv {
/** Some padding can be added to ensure all payloads are the same size to improve privacy. */
case class Padding(dummy: ByteVector) extends RouteBlindingEncryptedDataTlv
/** Id of the outgoing channel, used to identify the next node. */
case class OutgoingChannelId(shortChannelId: ShortChannelId) extends RouteBlindingEncryptedDataTlv
/** Id of the next node. */
case class OutgoingNodeId(nodeId: PublicKey) extends RouteBlindingEncryptedDataTlv
/**
* The final recipient may store some data in the encrypted payload for itself to avoid storing it locally.
* It can for example put a payment_hash to verify that the route is used for the correct invoice.
* It should use that field to detect when blinded routes are used outside of their intended use (malicious probing)
* and react accordingly (ignore the message or send an error depending on the use-case).
*/
case class PathId(data: ByteVector) extends RouteBlindingEncryptedDataTlv
/** Blinding override for the rest of the route. */
case class NextBlinding(blinding: PublicKey) extends RouteBlindingEncryptedDataTlv
}
object RouteBlindingEncryptedDataCodecs {
import RouteBlindingEncryptedDataTlv._
import fr.acinq.eclair.wire.protocol.CommonCodecs.{publicKey, shortchannelid, varint, varintoverflow}
import scodec.Codec
import scodec.bits.HexStringSyntax
import scodec.codecs._
private val padding: Codec[Padding] = variableSizeBytesLong(varintoverflow, "padding" | bytes).as[Padding]
private val outgoingChannelId: Codec[OutgoingChannelId] = variableSizeBytesLong(varintoverflow, "short_channel_id" | shortchannelid).as[OutgoingChannelId]
private val outgoingNodeId: Codec[OutgoingNodeId] = (("length" | constant(hex"21")) :: ("node_id" | publicKey)).as[OutgoingNodeId]
private val pathId: Codec[PathId] = variableSizeBytesLong(varintoverflow, "path_id" | bytes).as[PathId]
private val nextBlinding: Codec[NextBlinding] = (("length" | constant(hex"21")) :: ("blinding" | publicKey)).as[NextBlinding]
private val encryptedDataTlvCodec = discriminated[RouteBlindingEncryptedDataTlv].by(varint)
.typecase(UInt64(1), padding)
.typecase(UInt64(2), outgoingChannelId)
.typecase(UInt64(4), outgoingNodeId)
.typecase(UInt64(6), pathId)
.typecase(UInt64(8), nextBlinding)
val encryptedDataCodec: Codec[TlvStream[RouteBlindingEncryptedDataTlv]] = TlvCodecs.tlvStream[RouteBlindingEncryptedDataTlv](encryptedDataTlvCodec).complete
/**
* Decrypt and decode the contents of an encrypted_recipient_data TLV field.
*
* @param nodePrivKey this node's private key.
* @param blindingKey blinding point (usually provided in the lightning message).
* @param encryptedData encrypted route blinding data (usually provided inside an onion).
* @return decrypted contents of the encrypted recipient data, which usually contain information about the next node,
* and the blinding point that should be sent to the next node.
*/
def decode(nodePrivKey: PrivateKey, blindingKey: PublicKey, encryptedData: ByteVector): Try[(TlvStream[RouteBlindingEncryptedDataTlv], PublicKey)] = {
Sphinx.RouteBlinding.decryptPayload(nodePrivKey, blindingKey, encryptedData).flatMap {
case (payload, nextBlindingKey) => encryptedDataCodec.decode(payload.bits).map(r => (r.value, nextBlindingKey)).toTry
}
}
}
| ACINQ/eclair | eclair-core/src/main/scala/fr/acinq/eclair/wire/protocol/RouteBlinding.scala | Scala | apache-2.0 | 4,337 |
package io.opencensus.scala.http
import io.opencensus.scala.trace.AttributeValueOps._
import io.opencensus.trace.Span
object ServiceAttributes {
def setAttributesForService(span: Span, serviceData: ServiceData): Unit = {
serviceData.name.foreach(span.putAttribute("service.name", _))
serviceData.version.foreach(span.putAttribute("service.version", _))
}
}
| census-ecosystem/opencensus-scala | http/src/main/scala/io/opencensus/scala/http/ServiceAttributes.scala | Scala | apache-2.0 | 371 |
package edu.gemini.phase2.template.factory.impl.visitor
import edu.gemini.phase2.template.factory.impl._
import edu.gemini.spModel.gemini.visitor.blueprint.SpVisitorBlueprint
import edu.gemini.spModel.gemini.visitor.VisitorInstrument
import edu.gemini.pot.sp.{ISPGroup, ISPObservation}
trait VisitorBase extends GroupInitializer[SpVisitorBlueprint] with TemplateDsl {
val program = "VISITOR INSTRUMENT PHASE I/II MAPPING BPS"
val seqConfigCompType = VisitorInstrument.SP_TYPE
implicit def pimpInst(obs: ISPObservation) = new {
val ed = StaticObservationEditor[edu.gemini.spModel.gemini.visitor.VisitorInstrument](obs, instrumentType)
def setName(n: String): Either[String, Unit] =
ed.updateInstrument(_.setName(n))
}
// HACK: override superclass initialize to hang onto db reference
var db: Option[TemplateDb] = None
override def initialize(db: TemplateDb): Maybe[ISPGroup] =
try {
this.db = Some(db)
super.initialize(db)
} finally {
this.db = None
}
def attempt[A](a: => A) = tryFold(a) {
e =>
e.printStackTrace()
e.getMessage
}
// DSL Setters
def setName = Setter[String](blueprint.name)(_.setName(_))
}
| arturog8m/ocs | bundle/edu.gemini.phase2.skeleton.servlet/src/main/scala/edu/gemini/phase2/template/factory/impl/visitor/VisitorBase.scala | Scala | bsd-3-clause | 1,197 |
package breeze.optimize
import breeze.linalg._
import breeze.numerics._
import breeze.math.MutableCoordinateSpace
/**
* Implements the L2^2 and L1 updates from
* Duchi et al 2010 Adaptive Subgradient Methods for Online Learning and Stochastic Optimization.
*
* Basically, we use "forward regularization" and an adaptive step size based
* on the previous gradients.
*
* @author dlwh
*/
object AdaptiveGradientDescent {
/**
* Implements the L2 regularization update.
*
* Each step is:
*
* x_{t+1}i = (s_{ti} * x_{ti} - \\eta * g_ti) / (eta * regularization + delta + s_ti)
*
* where g_ti is the gradient and s_ti = \\sqrt(\\sum_t'^{t} g_ti^2)
*/
class L2Regularization[T](val regularizationConstant: Double = 1.0,
stepSize: Double, maxIter: Int,
tolerance: Double = 1E-5,
improvementTolerance: Double= 1E-4,
minImprovementWindow: Int = 50)(implicit vspace: MutableCoordinateSpace[T, Double])
extends StochasticGradientDescent[T](stepSize, maxIter, tolerance, improvementTolerance, minImprovementWindow) {
val delta = 1E-4
import vspace._
case class History(sumOfSquaredGradients: T)
override def initialHistory(f: StochasticDiffFunction[T],init: T) = History(zeros(init))
override def updateHistory(newX: T, newGrad: T, newValue: Double, f: StochasticDiffFunction[T], oldState: State) = {
val oldHistory = oldState.history
val newG = (oldState.grad :* oldState.grad)
val maxAge = 1000.0
if(oldState.iter > maxAge) {
newG *= 1/maxAge
axpy((maxAge - 1)/maxAge, oldHistory.sumOfSquaredGradients, newG)
} else {
newG += oldHistory.sumOfSquaredGradients
}
new History(newG)
}
override protected def takeStep(state: State, dir: T, stepSize: Double) = {
import state._
val s = sqrt(state.history.sumOfSquaredGradients :+ (state.grad :* state.grad))
val newx = x :* s
axpy(stepSize, dir, newx)
s += (delta + regularizationConstant * stepSize)
newx :/= s
newx
}
override def determineStepSize(state: State, f: StochasticDiffFunction[T], dir: T) = {
defaultStepSize
}
override protected def adjust(newX: T, newGrad: T, newVal: Double) = {
val av = newVal + (newX dot newX) * regularizationConstant / 2.0
val ag = newGrad + newX * regularizationConstant
(av -> ag)
}
}
/**
* Implements the L1 regularization update.
*
* Each step is:
*
* x_{t+1}i = sign(x_{t,i} - eta/s_i * g_ti) * (abs(x_ti - eta/s_ti * g_ti) - lambda * eta /s_ti))_+
*
* where g_ti is the gradient and s_ti = \\sqrt(\\sum_t'^{t} g_ti^2)
*/
class L1Regularization[T](val lambda: Double=1.0,
delta: Double = 1E-5,
eta: Double=4,
maxIter: Int=100)(implicit vspace: MutableCoordinateSpace[T, Double]) extends StochasticGradientDescent[T](eta,maxIter) {
import vspace._
case class History(sumOfSquaredGradients: T)
def initialHistory(f: StochasticDiffFunction[T],init: T)= History(zeros(init))
/*
override def updateHistory(newX: T, newGrad: T, newValue: Double, oldState: State) = {
val oldHistory = oldState.history
val newG = oldHistory.sumOfSquaredGradients :+ (oldState.grad :* oldState.grad)
new History(newG)
}
*/
override def updateHistory(newX: T, newGrad: T, newValue: Double, f: StochasticDiffFunction[T], oldState: State) = {
val oldHistory = oldState.history
val newG = (oldState.grad :* oldState.grad)
val maxAge = 200.0
if(oldState.iter > maxAge) {
newG *= 1/maxAge
axpy((maxAge - 1)/maxAge, oldHistory.sumOfSquaredGradients, newG)
} else {
newG += oldHistory.sumOfSquaredGradients
}
new History(newG)
}
override protected def takeStep(state: State, dir: T, stepSize: Double) = {
import state._
val s:T = sqrt(state.history.sumOfSquaredGradients :+ (grad :* grad) :+ delta)
val res:T = x + (dir * stepSize :/ s)
val tlambda = lambda * stepSize
vspace.zipMapValues.map(res, s, { case (x_half ,s_i) =>
if(x_half.abs < tlambda / s_i) {
0.0
} else {
(x_half - math.signum(x_half) * tlambda / s_i)
}
})
}
override def determineStepSize(state: State, f: StochasticDiffFunction[T], dir: T) = {
defaultStepSize
}
override protected def adjust(newX: T, newGrad: T, newVal: Double) = {
val av = newVal + norm(newX,1) * lambda
val ag = newGrad + signum(newX) * lambda
(av -> ag)
}
}
} | ktakagaki/breeze | src/main/scala/breeze/optimize/AdaptiveGradientDescent.scala | Scala | apache-2.0 | 4,753 |
/*
* sbt
* Copyright 2011 - 2018, Lightbend, Inc.
* Copyright 2008 - 2010, Mark Harrah
* Licensed under Apache License 2.0 (see LICENSE)
*/
package sbt
package std
import java.io.{ File => _, _ }
import java.util.concurrent.ConcurrentHashMap
import sbt.internal.io.DeferredWriter
import sbt.internal.util.ManagedLogger
import sbt.internal.util.Util.nil
import sbt.io.IO
import sbt.io.syntax._
import sbt.util._
// no longer specific to Tasks, so 'TaskStreams' should be renamed
/**
* Represents a set of streams associated with a context.
* In sbt, this is a named set of streams for a particular scoped key.
* For example, logging for test:compile is by default sent to the "out" stream in the test:compile context.
*/
sealed trait TaskStreams[Key] {
/** The default stream ID, used when an ID is not provided. */
def default = outID
def outID = "out"
def errorID = "err"
def getInput(key: Key, sid: String = default): Input
def getOutput(sid: String = default): Output
/**
* Provides a reader to read text from the stream `sid` for `key`.
* It is the caller's responsibility to coordinate writing to the stream.
* That is, no synchronization or ordering is provided and so this method should only be called when writing is complete.
*/
def readText(key: Key, sid: String = default): BufferedReader
/**
* Provides an output stream to read from the stream `sid` for `key`.
* It is the caller's responsibility to coordinate writing to the stream.
* That is, no synchronization or ordering is provided and so this method should only be called when writing is complete.
*/
def readBinary(a: Key, sid: String = default): BufferedInputStream
final def readText(a: Key, sid: Option[String]): BufferedReader = readText(a, getID(sid))
final def readBinary(a: Key, sid: Option[String]): BufferedInputStream =
readBinary(a, getID(sid))
def key: Key
/** Provides a writer for writing text to the stream with the given ID. */
def text(sid: String = default): PrintWriter
/** Provides an output stream for writing to the stream with the given ID. */
def binary(sid: String = default): BufferedOutputStream
/** A cache directory that is unique to the context of this streams instance.*/
def cacheDirectory: File
def cacheStoreFactory: CacheStoreFactory
// default logger
/** Obtains the default logger. */
final lazy val log: ManagedLogger = log(default)
/** Creates a Logger that logs to stream with ID `sid`.*/
def log(sid: String): ManagedLogger
private[this] def getID(s: Option[String]) = s getOrElse default
}
sealed trait ManagedStreams[Key] extends TaskStreams[Key] {
def open(): Unit
def close(): Unit
def isClosed: Boolean
}
trait Streams[Key] {
def apply(a: Key): ManagedStreams[Key]
def use[T](key: Key)(f: TaskStreams[Key] => T): T = {
val s = apply(key)
s.open()
try {
f(s)
} finally {
s.close()
}
}
}
trait CloseableStreams[Key] extends Streams[Key] with java.io.Closeable
object Streams {
private[this] val closeQuietly = (c: Closeable) =>
try {
c.close()
} catch { case _: IOException => () }
private[this] val streamLocks = new ConcurrentHashMap[File, AnyRef]()
def closeable[Key](delegate: Streams[Key]): CloseableStreams[Key] = new CloseableStreams[Key] {
private[this] val streams = new collection.mutable.HashMap[Key, ManagedStreams[Key]]
def apply(key: Key): ManagedStreams[Key] =
synchronized {
streams.get(key) match {
case Some(s) if !s.isClosed => s
case _ =>
val newS = delegate(key)
streams.put(key, newS)
newS
}
}
def close(): Unit =
synchronized { streams.values.foreach(_.close()); streams.clear() }
}
@deprecated("Use constructor without converter", "1.4")
def apply[Key, J: sjsonnew.IsoString](
taskDirectory: Key => File,
name: Key => String,
mkLogger: (Key, PrintWriter) => ManagedLogger,
converter: sjsonnew.SupportConverter[J],
): Streams[Key] = apply[Key](taskDirectory, name, mkLogger)
@deprecated("Use constructor without converter", "1.4")
private[sbt] def apply[Key, J: sjsonnew.IsoString](
taskDirectory: Key => File,
name: Key => String,
mkLogger: (Key, PrintWriter) => ManagedLogger,
converter: sjsonnew.SupportConverter[J],
mkFactory: (File, sjsonnew.SupportConverter[J]) => CacheStoreFactory
): Streams[Key] = apply[Key](taskDirectory, name, mkLogger, mkFactory(_, converter))
def apply[Key](
taskDirectory: Key => File,
name: Key => String,
mkLogger: (Key, PrintWriter) => ManagedLogger,
): Streams[Key] =
apply(
taskDirectory,
name,
mkLogger,
file => new DirectoryStoreFactory(file)
)
private[sbt] def apply[Key](
taskDirectory: Key => File,
name: Key => String,
mkLogger: (Key, PrintWriter) => ManagedLogger,
mkFactory: File => CacheStoreFactory
): Streams[Key] = new Streams[Key] {
def apply(a: Key): ManagedStreams[Key] = new ManagedStreams[Key] {
private[this] var opened: List[Closeable] = nil
private[this] var closed = false
def getInput(a: Key, sid: String = default): Input =
make(a, sid)(f => new FileInput(f))
def getOutput(sid: String = default): Output =
make(a, sid)(f => new FileOutput(f))
def readText(a: Key, sid: String = default): BufferedReader =
make(a, sid)(
f => new BufferedReader(new InputStreamReader(new FileInputStream(f), IO.defaultCharset))
)
def readBinary(a: Key, sid: String = default): BufferedInputStream =
make(a, sid)(f => new BufferedInputStream(new FileInputStream(f)))
def text(sid: String = default): PrintWriter =
make(a, sid)(
f =>
new PrintWriter(
new DeferredWriter(
new BufferedWriter(
new OutputStreamWriter(new FileOutputStream(f), IO.defaultCharset)
)
)
)
)
def binary(sid: String = default): BufferedOutputStream =
make(a, sid)(f => new BufferedOutputStream(new FileOutputStream(f)))
lazy val cacheDirectory: File = {
val dir = taskDirectory(a)
IO.createDirectory(dir)
dir
}
lazy val cacheStoreFactory: CacheStoreFactory = mkFactory(cacheDirectory)
def log(sid: String): ManagedLogger = mkLogger(a, text(sid))
def make[T <: Closeable](a: Key, sid: String)(f: File => T): T = synchronized {
checkOpen()
val file = taskDirectory(a) / sid
val parent = file.getParentFile
val newLock = new AnyRef
val lock = streamLocks.putIfAbsent(parent, newLock) match {
case null => newLock
case l => l
}
try lock.synchronized {
if (!file.exists) IO.touch(file, setModified = false)
} finally {
streamLocks.remove(parent)
()
}
val t = f(file)
opened ::= (t: Closeable)
t
}
def key: Key = a
def open(): Unit = ()
def isClosed: Boolean = synchronized { closed }
def close(): Unit = synchronized {
if (!closed) {
closed = true
opened foreach closeQuietly
}
}
def checkOpen(): Unit = synchronized {
if (closed) sys.error("Streams for '" + name(a) + "' have been closed.")
}
}
}
}
| sbt/sbt | tasks-standard/src/main/scala/sbt/std/Streams.scala | Scala | apache-2.0 | 7,527 |
package scadla.utils
import scadla._
import InlineOps._
import scala.math._
import squants.space.Length
import squants.space.Radians
object Hexagon {
def maxRadius(minRadius: Length) = minRadius / math.sin(math.Pi/3)
def minRadius(maxRadius: Length) = maxRadius * math.sin(math.Pi/3)
/* Extrude vertically an hexagon (centered at 0,0 with z from 0 to height)
* @param minRadius the radius of the circle inscribed in the hexagon
* @param height
*/
def apply(minRadius: Length, _height: Length) = {
val unit = minRadius.unit
val height = _height.in(unit)
if (minRadius.value <= 0.0 || height.value <= 0.0) {
Empty
} else {
import scala.math._
val rd0 = minRadius/sin(Pi/3)
val pts = for (i <- 0 until 6; j <- 0 to 1) yield
Point(rd0 * cos(i * Pi/3), rd0 * sin(i * Pi/3), height * j) //linter:ignore ZeroDivideBy
def face(a: Int, b: Int, c: Int) = Face(pts(a % 12), pts(b % 12), pts(c % 12))
val side1 = for (i <- 0 until 6) yield face( 2*i, 2*i+2, 2*i+3) //linter:ignore ZeroDivideBy
val side2 = for (i <- 0 until 6) yield face(2*i+1, 2*i, 2*i+3) //linter:ignore ZeroDivideBy
val bottom = Array(
face(0, 4, 2),
face(4, 8, 6),
face(8, 0, 10),
face(0, 8, 4)
)
val top = Array(
face(1, 3, 5),
face(5, 7, 9),
face(9, 11, 1),
face(1, 5, 9)
)
val faces = side1 ++ side2 ++ bottom ++ top
Polyhedron(faces)
}
}
/* Extrude vertically a semi-regular hexagon (centered at 0,0 with z from 0 to height)
* @param radius1 the radius of the circle inscribed in the hexagon even faces
* @param radius2 the radius of the circle inscribed in the hexagon odd faces
* @param height
*/
def semiRegular(radius1: Length, radius2: Length, height: Length) = {
val r = maxRadius(radius1) max maxRadius(radius2)
val base = Cylinder(r,height)
val chop = Cube(r, 2*r, height).moveY(-r)
val neg1 = for(i <- 0 until 6 if i % 2 == 0) yield chop.moveX(radius1).rotateZ(Radians(i * Pi / 3))
val neg2 = for(i <- 0 until 6 if i % 2 == 1) yield chop.moveX(radius2).rotateZ(Radians(i * Pi / 3))
base -- neg1 -- neg2
}
}
| dzufferey/scadla | src/main/scala/scadla/utils/Hexagon.scala | Scala | apache-2.0 | 2,222 |
/*
Scala-Paxos
Copyright (C) 2013 Alex Tomic
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package ch.usi.tomica.paxos
import scala.actors.Actor
//abstract class AbstractProposer(val pId: Int, val acceptors: Array[AbstractAcceptor]) extends Actor {
abstract class AbstractProposer extends AbstractPaxosProcess {
val BUFF_SIZE = 1000
var cRnd: Int = 0
var cRndPreExec : Int = 0
var cVal = new Array[String](BUFF_SIZE)
var valuesDecided = 0
var avgLatency: Double = 0.0
var latency: Long = 0
var votesP1 = new Array[Int](BUFF_SIZE)
var votesP2 = new Array[Int](BUFF_SIZE)
var vRound: Int = 0
var pCmd: String = ""
}
| atomic77/scala-paxos | src/ch/usi/tomica/paxos/AbstractProposer.scala | Scala | gpl-3.0 | 1,245 |
package spire.math
import org.scalatest.Matchers
import org.scalacheck.Arbitrary._
import org.scalatest._
import prop._
import BigDecimal.RoundingMode._
class RationalCheck extends PropSpec with Matchers with GeneratorDrivenPropertyChecks {
type Q = Rational
def rat1(name: String)(f: Q => Unit) =
property(name) {
forAll { (nx: Long, _dx: Long) =>
val dx = if (_dx == 0) 1 else _dx
f(Rational(nx, dx))
}
}
def rat2(name: String)(f: (Q, Q) => Unit) =
property(name) {
forAll { (nx: Long, _dx: Long, ny: Long, _dy: Long) =>
val dx = if (_dx == 0) 1 else _dx
val dy = if (_dy == 0) 1 else _dy
f(Rational(nx, dx), Rational(ny, dy))
}
}
def rat3(name: String)(f: (Q, Q, Q) => Unit) =
property(name) {
forAll { (nx: Long, _dx: Long, ny: Long, _dy: Long, nz: Long, _dz: Long) =>
val dx = if (_dx == 0) 1 else _dx
val dy = if (_dy == 0) 1 else _dy
val dz = if (_dz == 0) 1 else _dz
f(Rational(nx, dx), Rational(ny, dy), Rational(nz, dz))
}
}
rat1("x + 0 == x") { x: Q => x + Rational(0) shouldBe x }
rat1("x * 1 == x") { x: Q => x * Rational(1) shouldBe x }
rat1("x * 0 == 0") { x: Q => x * Rational(0) shouldBe Rational(0) }
rat1("x.floor <= x.round <= x.ceil") { x: Q =>
x.floor should be <= x.round
x.round should be <= x.ceil
}
rat1("x + x == 2x") { x: Q => (x + x) shouldBe 2 * x }
rat1("x - x == 0") { x: Q => x - x shouldBe Rational(0) }
rat1("x * x == x^2") { x: Q => (x * x) shouldBe x.pow(2) }
rat1("(x^-1)^3 == x^-3") { x: Q => if (x != 0) x.reciprocal.pow(3) shouldBe x.pow(-3) }
rat1("x / x == 1") { x: Q => if (x != 0) x / x shouldBe Rational(1) }
rat2("x + y == y + x") { (x: Q, y: Q) => x + y shouldBe y + x }
rat2("x - y == -y + x") { (x: Q, y: Q) => x - y shouldBe -y + x }
rat2("x + y - x == y") { (x: Q, y: Q) => (x + y) - x shouldBe y }
rat2("x / y == x * (y^-1)") { (x: Q, y: Q) => if (y != 0) x / y shouldBe x * y.reciprocal }
rat3("(x + y) * z == x * z + y * z") { (x: Q, y: Q, z: Q) => (x + y) * z shouldBe x * z + y * z }
}
| lrytz/spire | tests/src/test/scala/spire/math/RationalCheck.scala | Scala | mit | 2,132 |
package org.sofi.deadman.messages
import org.sofi.deadman.messages.event.{ Task, TaskTermination }
package object command {
implicit class ScheduleTaskOps(val st: ScheduleTask) extends AnyVal {
def event = Task(st.key, st.aggregate, st.entity, st.ts.getOrElse(System.currentTimeMillis()), st.ttl, st.ttw, st.tags)
}
implicit class CompleteTaskOps(val ct: CompleteTask) extends AnyVal {
def event = TaskTermination(ct.key, ct.aggregate, ct.entity)
}
}
| SocialFinance/deadman-switch | domain/src/main/scala/org/sofi/deadman/messages/command/package.scala | Scala | bsd-3-clause | 471 |
package ru.org.codingteam.cttalk.services
import java.util.Date
import javax.inject.Inject
import com.google.inject.ImplementedBy
import play.api.libs.concurrent.Execution.Implicits._
import play.api.libs.functional.syntax._
import play.api.libs.json.{JsPath, Json}
import play.modules.reactivemongo.ReactiveMongoApi
import play.modules.reactivemongo.json.ImplicitBSONHandlers._
import play.modules.reactivemongo.json.collection.JSONCollection
import ru.org.codingteam.cttalk.model.Handle._
import ru.org.codingteam.cttalk.model.{Handle, Message}
import scala.concurrent.Future
/**
* Created by hgn on 25.10.2015.
*/
@ImplementedBy(classOf[MessagesRepositoryImpl])
trait MessagesRepository {
def save(message: Message): Future[Message]
def getUnread(handle: Handle): Future[Seq[Message]]
def getLast(sender: Handle, receiver: Handle, upTo: Int = Int.MaxValue): Future[Seq[Message]]
def markRead(messageSeq: Seq[Message]): Future[Seq[Message]]
}
class MessagesRepositoryImpl @Inject()(mongo: ReactiveMongoApi, tokens: TokensRepository) extends MessagesRepository {
implicit val writes = ((JsPath \\ "_id").write[String] and
(JsPath \\ "sender").write[Handle] and
(JsPath \\ "receiver").write[Handle] and
(JsPath \\ "wasRead").write[Boolean] and
(JsPath \\ "moment").write[Date] and
(JsPath \\ "text").write[String]) {m: Message => (m.id, m.sender, m.receiver, m.wasRead, m.moment, m.text)}
implicit val reads = ((JsPath \\ "sender").read[Handle] and
(JsPath \\ "receiver").read[Handle] and
(JsPath \\ "wasRead").read[Boolean] and
(JsPath \\ "moment").read[Date] and
(JsPath \\ "text").read[String]) (Message.apply _)
override def save(message: Message): Future[Message] = {
messages.insert(message) map { _ => message }
}
def messages = mongo.db.collection[JSONCollection]("messages")
override def getUnread(handle: Handle): Future[Seq[Message]] = {
messages.find(Json.obj(
"receiver" -> Json.toJson(handle),
"wasRead" -> false))
.cursor[Message]()
.collect[Seq]()
}
override def getLast(sender: Handle, receiver: Handle, upTo: Int = Int.MaxValue): Future[Seq[Message]] = {
messages.find(Json.obj(
"receiver" -> Json.toJson(receiver),
"sender" -> Json.toJson(sender)))
.sort(Json.obj("moment" -> -1))
.cursor[Message]()
.collect[Seq](upTo)
}
override def markRead(messageSeq: Seq[Message]): Future[Seq[Message]] = {
messages.update(Json.obj("_id" -> Json.toJson(messageSeq map { message => message.id })),
Json.obj("$set" -> Json.obj("wasRead" -> Json.toJson(true)))) map { _ => messageSeq }
}
}
| hagane/cttalk | server/app/ru/org/codingteam/cttalk/services/MessagesRepository.scala | Scala | mit | 2,678 |
package com.twitter.finagle.ssl
/**
* CipherSuites represent the collection of prioritized cipher suites that should be
* enabled for a TLS [[Engine]]. A cipher suite, for protocols prior to TLSv1.3, is a
* combination of various algorithms for items such as key exchange, authentication
* type, bulk encryption algorithm, and message authentication code.
*
* @note Java users: See [[CipherSuitesConfig]].
*/
private[finagle] sealed trait CipherSuites
private[finagle] object CipherSuites {
/**
* Indicates that the determination for which cipher suites to use with the
* particular engine should be delegated to the engine factory.
*/
case object Unspecified extends CipherSuites
/**
* Indicates the cipher suites which should be enabled for a particular engine.
*
* @param ciphers A list of cipher suites listed in the order in which they
* should be attempted to be used by the engine. The set of suites in this list
* must be a subset of the set of suites supported by the underlying engine.
*
* {{{
* val suites = Seq(
* "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384",
* "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384")
* val cipherSuites = CipherSuites.Enabled(suites)
* }}}
*/
case class Enabled(ciphers: Seq[String]) extends CipherSuites
/**
* Converts a string list of cipher suites, separated by colons
* to a [[CipherSuites]] value.
*/
def fromString(ciphers: String): CipherSuites = {
val suites = ciphers.split(":").toSeq.filterNot(_.isEmpty)
if (suites.isEmpty) Unspecified
else Enabled(suites)
}
}
| spockz/finagle | finagle-core/src/main/scala/com/twitter/finagle/ssl/CipherSuites.scala | Scala | apache-2.0 | 1,607 |
package com.outr.arango.api.model
import io.circe.Json
case class GeneralGraphCreateHttpExamplesRc201(error: Boolean,
code: Option[Int] = None,
graph: Option[GraphRepresentation] = None) | outr/arangodb-scala | api/src/main/scala/com/outr/arango/api/model/GeneralGraphCreateHttpExamplesRc201.scala | Scala | mit | 283 |
package org.helianto.core.repository
import java.util.Date
import org.helianto.core.domain.Context
import org.springframework.data.jpa.repository.JpaRepository
trait ContextRepository extends JpaRepository[Context, Integer] {
def findByContextName(contextName: String): Context
def findByInstallDateIsLessThanEqual(installDate: Date): java.util.List[Context]
}
| iservport/helianto-spring | src/main/scala/org/helianto/core/repository/ContextRepository.scala | Scala | apache-2.0 | 371 |
/*
* The MIT License (MIT)
*
* Copyright (c) 2014 rainysoft
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.rainysoft.marketsimulator.agent
import com.rainysoft.marketsimulator.market._
/** Base class for agent messages.
*
* Base class for agent messages.
*/
abstract class AgentMessage
/** Message used to force an agent to place an order.
*
* Message used to force an agent to place an order.
*/
case class ForceNewOrderSingle(symbol: String, orderType: OrderType, isBuy: Boolean, quantity: Int,
price: Double) extends AgentMessage
/** Pulse message.
*
* A pulse message for when a new pulse is sent.
*/
case class Pulse(pulse: Int) extends AgentMessage() | MikaelUmaN/Market | src/main/scala/com/rainysoft/marketsimulator/agent/AgentMessage.scala | Scala | mit | 1,750 |
/**
* Copyright 2015 Mohiva Organisation (license at mohiva dot com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mohiva.play.silhouette.impl.providers.oauth2
import com.mohiva.play.silhouette.api.LoginInfo
import com.mohiva.play.silhouette.impl.exceptions.{ ProfileRetrievalException, UnexpectedResponseException }
import com.mohiva.play.silhouette.impl.providers.OAuth2Provider._
import com.mohiva.play.silhouette.impl.providers.SocialProfileBuilder._
import com.mohiva.play.silhouette.impl.providers._
import com.mohiva.play.silhouette.impl.providers.oauth2.FoursquareProvider._
import play.api.libs.json.Json
import play.api.libs.ws.{ WSRequest, WSResponse }
import play.api.test.{ FakeRequest, WithApplication }
import test.Helper
import scala.concurrent.Future
/**
* Test case for the [[FoursquareProvider]] class.
*/
class FoursquareProviderSpec extends OAuth2ProviderSpec {
"The `withSettings` method" should {
"create a new instance with customized settings" in new WithApplication with Context {
val s = provider.withSettings { s =>
s.copy(accessTokenURL = "new-access-token-url")
}
s.settings.accessTokenURL must be equalTo "new-access-token-url"
}
}
"The `authenticate` method" should {
"fail with UnexpectedResponseException if OAuth2Info can be build because of an unexpected response" in new WithApplication with Context {
val requestHolder = mock[WSRequest]
val response = mock[WSResponse]
implicit val req = FakeRequest(GET, "?" + Code + "=my.code")
response.json returns Json.obj()
requestHolder.withHeaders(any) returns requestHolder
requestHolder.post[Map[String, Seq[String]]](any)(any) returns Future.successful(response)
httpLayer.url(oAuthSettings.accessTokenURL) returns requestHolder
stateProvider.validate(any) returns Future.successful(state)
failed[UnexpectedResponseException](provider.authenticate()) {
case e => e.getMessage must startWith(InvalidInfoFormat.format(provider.id, ""))
}
}
"return the auth info" in new WithApplication with Context {
val requestHolder = mock[WSRequest]
val response = mock[WSResponse]
implicit val req = FakeRequest(GET, "?" + Code + "=my.code")
response.json returns oAuthInfo
requestHolder.withHeaders(any) returns requestHolder
requestHolder.post[Map[String, Seq[String]]](any)(any) returns Future.successful(response)
httpLayer.url(oAuthSettings.accessTokenURL) returns requestHolder
stateProvider.validate(any) returns Future.successful(state)
authInfo(provider.authenticate()) {
case authInfo => authInfo must be equalTo oAuthInfo.as[OAuth2Info]
}
}
}
"The `retrieveProfile` method" should {
"fail with ProfileRetrievalException if API returns error" in new WithApplication with Context {
val requestHolder = mock[WSRequest]
val response = mock[WSResponse]
response.json returns Helper.loadJson("providers/oauth2/foursquare.error.json")
requestHolder.get() returns Future.successful(response)
httpLayer.url(API.format("my.access.token", DefaultAPIVersion)) returns requestHolder
failed[ProfileRetrievalException](provider.retrieveProfile(oAuthInfo.as[OAuth2Info])) {
case e => e.getMessage must equalTo(SpecifiedProfileError.format(
provider.id,
400,
Some("param_error"),
Some("Must provide a valid user ID or 'self.'")))
}
}
"fail with ProfileRetrievalException if an unexpected error occurred" in new WithApplication with Context {
val requestHolder = mock[WSRequest]
val response = mock[WSResponse]
response.json throws new RuntimeException("")
requestHolder.get() returns Future.successful(response)
httpLayer.url(API.format("my.access.token", DefaultAPIVersion)) returns requestHolder
failed[ProfileRetrievalException](provider.retrieveProfile(oAuthInfo.as[OAuth2Info])) {
case e => e.getMessage must equalTo(UnspecifiedProfileError.format(provider.id))
}
}
"return the social profile" in new WithApplication with Context {
val requestHolder = mock[WSRequest]
val response = mock[WSResponse]
response.json returns Helper.loadJson("providers/oauth2/foursquare.success.json")
requestHolder.get() returns Future.successful(response)
httpLayer.url(API.format("my.access.token", DefaultAPIVersion)) returns requestHolder
profile(provider.retrieveProfile(oAuthInfo.as[OAuth2Info])) {
case p =>
p must be equalTo new CommonSocialProfile(
loginInfo = LoginInfo(provider.id, "13221052"),
firstName = Some("Apollonia"),
lastName = Some("Vanova"),
email = Some("apollonia.vanova@watchmen.com"),
avatarURL = Some("https://irs0.4sqi.net/img/user/100x100/blank_girl.png")
)
}
}
"return the social profile if API is deprecated" in new WithApplication with Context {
val requestHolder = mock[WSRequest]
val response = mock[WSResponse]
response.json returns Helper.loadJson("providers/oauth2/foursquare.deprecated.json")
requestHolder.get() returns Future.successful(response)
httpLayer.url(API.format("my.access.token", DefaultAPIVersion)) returns requestHolder
profile(provider.retrieveProfile(oAuthInfo.as[OAuth2Info])) {
case p =>
p must be equalTo new CommonSocialProfile(
loginInfo = LoginInfo(provider.id, "13221052"),
firstName = Some("Apollonia"),
lastName = Some("Vanova"),
email = Some("apollonia.vanova@watchmen.com"),
avatarURL = Some("https://irs0.4sqi.net/img/user/100x100/blank_girl.png")
)
}
}
"handle the custom API version property" in new WithApplication with Context {
val customProperties = Map(APIVersion -> "20120101")
val requestHolder = mock[WSRequest]
val response = mock[WSResponse]
response.json returns Helper.loadJson("providers/oauth2/foursquare.success.json")
requestHolder.get() returns Future.successful(response)
httpLayer.url(API.format("my.access.token", "20120101")) returns requestHolder
profile(provider.withSettings(_.copy(customProperties = customProperties))
.retrieveProfile(oAuthInfo.as[OAuth2Info])) {
case p =>
p must be equalTo new CommonSocialProfile(
loginInfo = LoginInfo(provider.id, "13221052"),
firstName = Some("Apollonia"),
lastName = Some("Vanova"),
email = Some("apollonia.vanova@watchmen.com"),
avatarURL = Some("https://irs0.4sqi.net/img/user/100x100/blank_girl.png")
)
}
}
"handle the custom avatar resolution property" in new WithApplication with Context {
val customProperties = Map(AvatarResolution -> "150x150")
val requestHolder = mock[WSRequest]
val response = mock[WSResponse]
response.json returns Helper.loadJson("providers/oauth2/foursquare.success.json")
requestHolder.get() returns Future.successful(response)
httpLayer.url(API.format("my.access.token", DefaultAPIVersion)) returns requestHolder
profile(provider.withSettings(_.copy(customProperties = customProperties))
.retrieveProfile(oAuthInfo.as[OAuth2Info])) {
case p =>
p must be equalTo new CommonSocialProfile(
loginInfo = LoginInfo(provider.id, "13221052"),
firstName = Some("Apollonia"),
lastName = Some("Vanova"),
email = Some("apollonia.vanova@watchmen.com"),
avatarURL = Some("https://irs0.4sqi.net/img/user/150x150/blank_girl.png")
)
}
}
}
/**
* Defines the context for the abstract OAuth2 provider spec.
*
* @return The Context to use for the abstract OAuth2 provider spec.
*/
override protected def context: OAuth2ProviderSpecContext = new Context {}
/**
* The context.
*/
trait Context extends OAuth2ProviderSpecContext {
/**
* The OAuth2 settings.
*/
lazy val oAuthSettings = spy(OAuth2Settings(
authorizationURL = Some("https://foursquare.com/oauth2/authenticate"),
accessTokenURL = "https://foursquare.com/oauth2/access_token",
redirectURL = "https://www.mohiva.com",
clientID = "my.client.id",
clientSecret = "my.client.secret"))
/**
* The OAuth2 info returned by Foursquare.
*
* @see https://developer.foursquare.com/overview/auth
*/
override lazy val oAuthInfo = Helper.loadJson("providers/oauth2/foursquare.access.token.json")
/**
* The provider to test.
*/
lazy val provider = new FoursquareProvider(httpLayer, stateProvider, oAuthSettings)
}
}
| rfranco/play-silhouette | silhouette/test/com/mohiva/play/silhouette/impl/providers/oauth2/FoursquareProviderSpec.scala | Scala | apache-2.0 | 9,368 |
/**
* Copyright 2009 Latterfrosken Software Development Limited
*
* This file is part of Lafros GUI-Alerts.
*
* Lafros GUI-Alerts is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* Lafros GUI-Alerts is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with Lafros GUI-Alerts. If not, see <http://www.gnu.org/licenses/>. */
package com.lafros.gui.alerts
/**
* an implementation may be passed to <tt>TheAlertsController</tt>.
* @author Rob Dickens */
abstract class AlertsApp {
/**
* called when the number of alerts raised changes from zero to one, or from one
* to zero. */
def setAlertsRaised(b: Boolean)
}
| robcd/lafros-gui | lafros-gui-alerts/src/main/scala/com/lafros/gui/alerts/AlertsApp.scala | Scala | gpl-3.0 | 1,077 |
package com.datawizards.sparklocal.rdd
import com.datawizards.sparklocal.SparkLocalBaseTest
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class TakeTest extends SparkLocalBaseTest {
test("Take(n) result") {
assert(RDDAPI(Seq(1,2,3)).take(2) sameElements Array(1,2))
}
test("TakeOrdered(n) result") {
assert(RDDAPI(Seq(5,6,3,5,2,3,5,6,3,1)).takeOrdered(2) sameElements Array(1,2))
}
test("Take(n) equal") {
def take2:(RDDAPI[Int] => Array[Int]) = rdd => rdd.take(2)
assertRDDOperationReturnsSameResultWithEqual(Seq(1,2,3), take2) {
case(r1,r2) => r1 sameElements r2
}
}
test("TakeOrdered(n) equal") {
def takeOrdered2:(RDDAPI[Int] => Array[Int]) = rdd => rdd.takeOrdered(2)
assertRDDOperationReturnsSameResultWithEqual(Seq(5,6,3,5,2,3,5,6,3,1), takeOrdered2) {
case(r1,r2) => r1 sameElements r2
}
}
} | piotr-kalanski/spark-local | src/test/scala/com/datawizards/sparklocal/rdd/TakeTest.scala | Scala | apache-2.0 | 924 |
object Test extends dotty.runtime.LegacyApp {
case class A()
class B extends A() { override def toString() = "B()" }
println(A())
println(new B())
}
| yusuke2255/dotty | tests/run/t0607.scala | Scala | bsd-3-clause | 158 |
package edu.jhu.hlt.probe
/**
* @author Tongfei Chen
* @since 0.6.0
*/
trait Priority0Implicits extends ProjectionOps {
implicit class PureFeaturizerOps[A, B](val f: Featurizer[A, B]) {
def apply(x: A) = f.extract(x)
}
implicit class PureFeatureExtractorOps[A, B](val f: FeatureExtractor[A, B]) {
def apply(x: A) = f.extract(x)
}
}
| ctongfei/probe | core/src/main/scala/edu/jhu/hlt/probe/ops.scala | Scala | mit | 355 |
package jigg.pipeline
/*
Copyright 2013-2017 Hiroshi Noji
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import java.util.Properties
import org.scalatest._
import scala.xml._
class EasyCCGAnnotatorSpec extends BaseAnnotatorSpec {
def mkProps(kBest: Int): Properties = {
val dummyP = new Properties
dummyP.setProperty("easyccg.model", "a")
dummyP.setProperty("easyccg.kBest", kBest+"")
dummyP
}
class AnnotatorStub(output: String, kBest: Int = 1)
extends EasyCCGAnnotator("easyccg", mkProps(kBest)) {
override def mkLocalAnnotator = new LocalEasyCCGAnnotator {
override def buildParser() = new WrappedParser {
def parse(line: String) = output
}
}
override def nThreads = 1
}
Annotation.CCGSpan.idGen.reset()
Annotation.CCG.idGen.reset()
"Annotator" should "add a CCG annotation" in {
val doc =
<document id="d1">
<sentences>
<sentence id="s1" characterOffsetBegin="0" characterOffsetEnd="14">
He ate pizza .
<tokens annotators="corenlp">
<token characterOffsetEnd="2" characterOffsetBegin="0" id="t4" form="He"/>
<token characterOffsetEnd="6" characterOffsetBegin="3" id="t5" form="ate"/>
<token characterOffsetEnd="12" characterOffsetBegin="7" id="t6" form="pizza"/>
<token characterOffsetEnd="14" characterOffsetBegin="13" id="t7" form="."/>
</tokens>
</sentence>
</sentences>
</document>
val output ="""ID=1
(<T S[dcl] ba 1 2> (<L NP He He x x O NP>) (<T S[dcl]\\NP fa 0 2> (<L (S[dcl]\\NP)/NP ate ate x x O (S[dcl]\\NP)/NP>) (<T NP rp 0 2> (<T NP lex 0 1> (<L N pizza pizza x x O N>) ) (<L . . . x x O .>) ) ) )"""
// (<T S[dcl] ba 1 2>
// (<L NP He He x x O NP>)
// (<T S[dcl]\\NP fa 0 2>
// (<L (S[dcl]\\NP)/NP ate ate x x O (S[dcl]\\NP)/NP>)
// (<T NP rp 0 2>
// (<T NP lex 0 1>
// (<L N pizza pizza x x O N>) )
// (<L . . . x x O .>) ) ) )
val ann = new AnnotatorStub(output)
val annotation = ann.annotate(doc)
val s = annotation \\\\ "sentence"
(s \\ "ccg").head should equal(
<ccg annotators="easyccg" root="ccgsp0" id="ccg0">
<span id="ccgsp0" begin="0" end="4" symbol="S[dcl]" rule="ba" children="ccgsp1 ccgsp2"/>
<span id="ccgsp1" begin="0" end="1" symbol="NP" children="t4"/>
<span id="ccgsp2" begin="1" end="4" symbol="S[dcl]\\NP" rule="fa" children="ccgsp3 ccgsp4"/>
<span id="ccgsp3" begin="1" end="2" symbol="(S[dcl]\\NP)/NP" children="t5"/>
<span id="ccgsp4" begin="2" end="4" symbol="NP" rule="rp" children="ccgsp5 ccgsp7"/>
<span id="ccgsp5" begin="2" end="3" symbol="NP" rule="lex" children="ccgsp6"/>
<span id="ccgsp6" begin="2" end="3" symbol="N" children="t6"/>
<span id="ccgsp7" begin="3" end="4" symbol="." children="t7"/>
</ccg>) (decided by sameElem)
}
it should "add two trees when kBest=2" in {
val doc =
<document id="d1">
<sentences>
<sentence id="s1" characterOffsetBegin="0" characterOffsetEnd="1">
A
<tokens annotators="corenlp">
<token characterOffsetEnd="1" characterOffsetBegin="0" id="t8" form="A"/>
</tokens>
</sentence>
</sentences>
</document>
val output ="""ID=1
(<T S[dcl] tr 0 1> (<L NP A A x x O NP>) )
ID=1
(<T S[wq] tr 0 1> (<L NP A A x x O NP>) )"""
val ann = new AnnotatorStub(output, 2)
val annotation = ann.annotate(doc)
val s = annotation \\\\ "sentence"
val ccgs = s \\ "ccg"
ccgs(0) should equal(
<ccg annotators="easyccg" root="ccgsp8" id="ccg1">
<span id="ccgsp8" begin="0" end="1" symbol="S[dcl]" rule="tr" children="ccgsp9"/>
<span id="ccgsp9" begin="0" end="1" symbol="NP" children="t8"/>
</ccg>
) (decided by sameElem)
ccgs(1) should equal(
<ccg annotators="easyccg" root="ccgsp10" id="ccg2">
<span id="ccgsp10" begin="0" end="1" symbol="S[wq]" rule="tr" children="ccgsp11"/>
<span id="ccgsp11" begin="0" end="1" symbol="NP" children="t8"/>
</ccg>
) (decided by sameElem)
}
}
| mynlp/jigg | src/test/scala/jigg/pipeline/EasyCCGAnnotatorSpec.scala | Scala | apache-2.0 | 4,684 |
package com.twitter.finatra.http.integration.doeverything.test
import com.twitter.finagle.http.Status._
import com.twitter.finatra.http.integration.doeverything.main.DoEverythingServer
import com.twitter.finatra.http.test.EmbeddedHttpServer
import com.twitter.finatra.test.LocalFilesystemTestUtils._
import com.twitter.inject.server.FeatureTest
import java.io.File
import org.apache.commons.io.FileUtils
class OverlappingRootsDoEverythingServerFeatureTest
extends FeatureTest
with DocRootLocalFilesystemTestUtility {
override protected def beforeAll() = {
super.beforeAll()
// create src/main/webapp directory and add files
val webapp = createFile(s"${BaseDirectory}src/main/webapp")
FileUtils.writeStringToFile(createFile(webapp, "testfile.txt"), testFileText)
FileUtils.writeStringToFile(createFile(webapp, "testindex.html"), testIndexHtml)
// create /templates directory *under* webapp and add files
val templates = createFile(webapp, "templates")
FileUtils.writeStringToFile(createFile(templates, "testuser.mustache"), testUserMustacheString)
FileUtils.writeStringToFile(createFile(templates, "testuser2.mustache"), testUser2MustacheString)
FileUtils.writeStringToFile(createFile(templates, "testHtml.mustache"), testHtmlMustacheString)
}
override protected def afterAll() = {
// try to help clean up
new File(s"${BaseDirectory}src").delete
super.afterAll()
}
override val server = new EmbeddedHttpServer(
clientFlags = Map(
"local.doc.root" -> s"${BaseDirectory}src/main/webapp",
"mustache.templates.dir" -> s"/templates"),
extraArgs = Array("-magicNum=1", "-moduleMagicNum=2"),
twitterServer = new DoEverythingServer)
"DoEverythingServer" should {
"getView" in {
server.httpGet(
"/getView?age=18&name=bob",
andExpect = Ok,
withBody = "age:18\nname:bob\nuser1\nuser2\n")
}
"formPostViewFromBuilderViewWithDiffTemplateThanAnnotation" in {
server.httpFormPost(
"/formPostViewFromBuilderView",
params = Map("name" -> "bob", "age" -> "18"),
andExpect = Ok,
withBody = "age2:18\nname2:bob\nuser1\nuser2\n")
}
"formPostViewFromBuilderHtml" in {
server.httpFormPost(
"/formPostViewFromBuilderHtml",
params = Map("name" -> "bob", "age" -> "18"),
andExpect = Ok,
withBody = "age:18\nname:bob\nuser1\nuser2\n")
}
"formPostViewFromBuilderCreatedView" in {
val response = server.httpFormPost(
"/formPostViewFromBuilderCreatedView",
params = Map("name" -> "bob", "age" -> "18"),
andExpect = Created,
withBody = "age2:18\nname2:bob\nuser1\nuser2\n")
response.location should equal(Some("/foo/1"))
}
"formPostViewFromBuilderCreatedHtml" in {
val response = server.httpFormPost(
"/formPostViewFromBuilderCreatedHtml",
params = Map("name" -> "bob", "age" -> "18"),
andExpect = Created,
withBody = "age:18\nname:bob\nuser1\nuser2\n")
response.location should equal(Some("/foo/1"))
}
"testfile" in {
server.httpGet(
"/testfile",
andExpect = Ok,
withBody = "testfile123")
}
"testfile when not found" in {
server.httpGet(
"/testfileWhenNotfound",
andExpect = NotFound,
withBody = "/doesntexist.txt not found")
}
"index root" in {
server.httpGet(
"/index/",
andExpect = Ok,
withBody = "testindex")
}
"index file without extension" in {
server.httpGet(
"/index/testfile",
andExpect = Ok,
withBody = "testindex")
}
"index file with extension" in {
server.httpGet(
"/index/testfile.txt",
andExpect = Ok,
withBody = "testfile123")
}
"TestCaseClassWithHtml" in {
server.httpGet(
"/testClassWithHtml",
andExpect = Ok,
withJsonBody =
"""
|{
| "address" : "123 Main St. Anywhere, CA US 90210",
| "phone" : "+12221234567",
| "rendered_html" : "<div class="nav">\n <table cellpadding="0" cellspacing="0">\n <tr>\n <th>Name</th>\n <th>Age</th>\n <th>Friends</th>\n </tr>\n <tr>\n <td>age2:28</td>\n <td>name:Bob Smith</td>\n <td>\n user1\n user2\n </td>\n </tr>\n </table>\n</div>"
|}
""".
stripMargin)
}
}
}
| joecwu/finatra | http/src/test/scala/com/twitter/finatra/http/integration/doeverything/test/OverlappingRootsDoEverythingServerFeatureTest.scala | Scala | apache-2.0 | 4,676 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark.testsuite.directdictionary
import java.io.File
import java.sql.Timestamp
import org.apache.spark.sql.Row
import org.apache.spark.sql.hive.HiveContext
import org.scalatest.BeforeAndAfterAll
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.keygenerator.directdictionary.timestamp.TimeStampGranularityConstants
import org.apache.carbondata.core.util.CarbonProperties
import org.apache.spark.sql.test.util.QueryTest
/**
* Test Class for detailed query on timestamp datatypes
*
*
*/
class TimestampDataTypeNullDataTest extends QueryTest with BeforeAndAfterAll {
var hiveContext: HiveContext = _
override def beforeAll {
try {
CarbonProperties.getInstance()
.addProperty(TimeStampGranularityConstants.CARBON_CUTOFF_TIMESTAMP, "2000-12-13 02:10.00.0")
CarbonProperties.getInstance()
.addProperty(TimeStampGranularityConstants.CARBON_TIME_GRANULARITY,
TimeStampGranularityConstants.TIME_GRAN_SEC.toString
)
sql(
"""CREATE TABLE IF NOT EXISTS timestampTyeNullData
(ID Int, dateField Timestamp, country String,
name String, phonetype String, serialname String, salary Int)
STORED BY 'org.apache.carbondata.format'"""
)
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd")
val csvFilePath = s"$resourcesPath/datasamplenull.csv"
sql("LOAD DATA LOCAL INPATH '" + csvFilePath + "' INTO TABLE timestampTyeNullData").collect();
} catch {
case x: Throwable => CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "dd-MM-yyyy")
}
}
test("SELECT max(dateField) FROM timestampTyeNullData where dateField is not null") {
checkAnswer(
sql("SELECT max(dateField) FROM timestampTyeNullData where dateField is not null"),
Seq(Row(Timestamp.valueOf("2015-07-23 00:00:00.0"))
)
)
}
test("SELECT * FROM timestampTyeNullData where dateField is null") {
checkAnswer(
sql("SELECT dateField FROM timestampTyeNullData where dateField is null"),
Seq(Row(null)
))
}
override def afterAll {
sql("drop table timestampTyeNullData")
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "dd-MM-yyyy")
CarbonProperties.getInstance().addProperty("carbon.direct.dictionary", "false")
}
} | shivangi1015/incubator-carbondata | integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/directdictionary/TimestampDataTypeNullDataTest.scala | Scala | apache-2.0 | 3,342 |
/*
* Copyright 2013 Filippo De Luca - http://filippodeluca.com
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.github.filosganga.play.cassandra
import play.api.{Logger, Configuration}
import com.datastax.driver.core.{LatencyTracker, Session, Cluster}
import com.google.common.cache.{RemovalNotification, RemovalListener, CacheBuilder, Cache}
import java.util.concurrent.{Callable, TimeUnit}
import com.datastax.driver.core.Host.StateListener
/**
* The Cassandra API layer.
*
* @param cfg The play configuration containing cassandra settings (usually cassandra.*),
* @param clusterFactory The ClusterFactory needed to build the Cassandra Cluster.
*/
class CassandraApi(cfg: Configuration, clusterFactory: ClusterFactory) {
private val names = cfg.subKeys
protected[cassandra] val clusters: Map[String, Cluster] = names.map{dbName=>
dbName->clusterFactory.initCluster(dbName, cfg)
}.toMap
protected[cassandra] val sessions: Cache[String, Session] = CacheBuilder
.newBuilder()
.expireAfterAccess(5, TimeUnit.MINUTES)
.removalListener(removalListener)
.build()
private def removalListener: RemovalListener[String, Session] = {
new RemovalListener[String, Session] {
def onRemoval(notification: RemovalNotification[String, Session]) {
Logger.info("Shutting down Cassandra Session...")
notification.getValue.shutdown()
}
}
}
private def withCluster[A](dbName: String)(block: Cluster => A): A = clusters.get(dbName).map{c=>
block(c)
}.getOrElse {
throw new IllegalArgumentException("Cassandra database[" + dbName + "] not available")
}
def withSession[A](dbName: String)(block: Session => A): A = withCluster(dbName){c=>
val session = sessions.get(dbName, new Callable[Session]{
def call() = {
c.connect()
}
})
block(session)
}
def withSession[A](dbName: String, keyspace: String)(block: Session => A): A = withCluster(dbName){c=>
val session = sessions.get(dbName + ":" + keyspace, new Callable[Session]{
def call() = {
c.connect(keyspace)
}
})
block(session)
}
def register[T: Registrable](clusterName: String = "default")(toRegister: T) {
withCluster(clusterName){c=> toRegister match {
case x: LatencyTracker => c.register(x)
case x: StateListener => c.register(x)
}}
}
def unregister[T: Registrable](clusterName: String = "default")(toUnregister: T) {
withCluster(clusterName){c=> toUnregister match {
case x: LatencyTracker => c.unregister(x)
case x: StateListener => c.unregister(x)
}}
}
}
sealed class Registrable[T]
object Registrable {
implicit object LatencyTrackerRegistrable extends Registrable[LatencyTracker]
implicit object StateListenerRegistrable extends Registrable[StateListener]
}
| filosganga/play-cassandra | src/main/scala/com/github/filosganga/play/cassandra/CassandraApi.scala | Scala | apache-2.0 | 3,599 |
package com.twitter.finagle.thrift
import com.twitter.finagle.{Path, Dtab}
import com.twitter.finagle.tracing.{Flags, SpanId, TraceId}
import java.util.ArrayList
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class RichRequestHeaderTest extends FunSuite {
test("None if clientId is not set") {
val header = new thrift.RequestHeader
val richHeader = new RichRequestHeader(header)
assert(None === richHeader.clientId)
}
test("None if clientId.name is not set") {
val header = (new thrift.RequestHeader)
.setClient_id(new thrift.ClientId)
val richHeader = new RichRequestHeader(header)
assert(None === richHeader.clientId)
}
test("Some(clientId)") {
val header = (new thrift.RequestHeader)
.setClient_id(new thrift.ClientId("foo"))
val richHeader = new RichRequestHeader(header)
assert(Some(ClientId("foo")) === richHeader.clientId)
}
test("empth path if dest is null") {
val header = new thrift.RequestHeader
val richHeader = new RichRequestHeader(header)
assert(Path.empty === richHeader.dest)
}
test("path if dest is non-null") {
val header = (new thrift.RequestHeader)
.setDest("/foo")
val richHeader = new RichRequestHeader(header)
assert(Path.read("/foo") === richHeader.dest)
}
test("null dtab") {
val header = new thrift.RequestHeader
val richHeader = new RichRequestHeader(header)
assert(Dtab.empty === richHeader.dtab)
}
test("non-null dtab") {
val delegations = new ArrayList[thrift.Delegation]
delegations.add(new thrift.Delegation("/foo", "/bar"))
val header = (new thrift.RequestHeader)
.setDelegations(delegations)
val richHeader = new RichRequestHeader(header)
assert(Dtab.read("/foo=>/bar") === richHeader.dtab)
}
test("default traceId") {
val header = new thrift.RequestHeader
val richHeader = new RichRequestHeader(header)
assert(TraceId(Some(SpanId(0)), None, SpanId(0), None, Flags()) === richHeader.traceId)
}
test("non-default traceId") {
val header = (new thrift.RequestHeader)
.setTrace_id(0)
.setParent_span_id(1)
.setSpan_id(2)
.setSampled(true)
.setFlags(4)
val richHeader = new RichRequestHeader(header)
val expected = TraceId(Some(SpanId(0)), Some(SpanId(1)), SpanId(2), Some(true), Flags(4))
assert(expected === richHeader.traceId)
}
}
| travisbrown/finagle | finagle-thrift/src/test/scala/com/twitter/finagle/thrift/RichRequestHeaderTest.scala | Scala | apache-2.0 | 2,475 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package models.moneyservicebusiness
import org.scalatestplus.play.PlaySpec
import jto.validation.{Invalid, Path, Valid}
import jto.validation.ValidationError
import play.api.libs.json.{JsPath, JsSuccess}
class FXTransactionInNext12MonthsSpec extends PlaySpec {
"FXTransactionInNext12Months" should {
"Form Validation" must {
"Successfully read form data for option yes" in {
val map = Map("fxTransaction" -> Seq("12345678963"))
FXTransactionsInNext12Months.formRule.validate(map) must be(Valid(FXTransactionsInNext12Months("12345678963")))
}
"fail validation on missing field" in {
val map = Map("fxTransaction" -> Seq(""))
FXTransactionsInNext12Months.formRule.validate(map) must be(Invalid(
Seq( Path \\ "fxTransaction" -> Seq(ValidationError("error.required.msb.fx.transactions.in.12months")))))
}
"fail validation on invalid field" in {
val map = Map("fxTransaction" -> Seq("asas"))
FXTransactionsInNext12Months.formRule.validate(map) must be(Invalid(
Seq( Path \\ "fxTransaction" -> Seq(ValidationError("error.invalid.msb.fx.transactions.in.12months.number")))))
}
"fail validation on invalid field when it exceeds the max length" in {
val map = Map("fxTransaction" -> Seq("123"*10))
FXTransactionsInNext12Months.formRule.validate(map) must be(Invalid(
Seq( Path \\ "fxTransaction" -> Seq(ValidationError("error.invalid.msb.fx.transactions.in.12months")))))
}
"fail validation on invalid field1" in {
val map = Map("fxTransaction" -> Seq("123456"))
FXTransactionsInNext12Months.formRule.validate(map) must be(Valid(FXTransactionsInNext12Months("123456")))
}
"successfully write form data" in {
FXTransactionsInNext12Months.formWrites.writes(FXTransactionsInNext12Months("12345678963")) must be(Map("fxTransaction" -> Seq("12345678963")))
}
}
"Json Validation" must {
"Successfully read/write Json data" in {
FXTransactionsInNext12Months.format.reads(FXTransactionsInNext12Months.format.writes(
FXTransactionsInNext12Months("12345678963"))) must be(JsSuccess(FXTransactionsInNext12Months("12345678963"), JsPath))
}
}
}
}
| hmrc/amls-frontend | test/models/moneyservicebusiness/FXTransactionInNext12MonthsSpec.scala | Scala | apache-2.0 | 3,130 |
package mesosphere.marathon.event.http
import javax.inject.Inject
import akka.actor._
import akka.pattern.ask
import mesosphere.marathon.api.v2.json.Formats._
import mesosphere.marathon.core.base.Clock
import mesosphere.marathon.event._
import mesosphere.marathon.event.http.HttpEventActor._
import mesosphere.marathon.event.http.SubscribersKeeperActor.GetSubscribers
import mesosphere.marathon.metrics.{ MetricPrefixes, Metrics }
import spray.client.pipelining.{ sendReceive, _ }
import spray.http.{ HttpRequest, HttpResponse }
import spray.httpx.PlayJsonSupport
import scala.concurrent.{ ExecutionContext, Future }
import scala.concurrent.duration._
import scala.util.control.NonFatal
import scala.util.{ Failure, Success }
/**
* This actor subscribes to the event bus and distributes every event to all http callback listener.
* The list of active subscriptions is handled in the subscribersKeeper.
* If a callback handler can not be reached or is slow, an exponential backoff is applied.
*/
object HttpEventActor {
case class NotificationFailed(url: String)
case class NotificationSuccess(url: String)
case class EventNotificationLimit(failedCount: Long, backoffUntil: Option[Deadline]) {
def nextFailed: EventNotificationLimit = {
val next = failedCount + 1
EventNotificationLimit(next, Some(math.pow(2, next.toDouble).seconds.fromNow))
}
def notLimited: Boolean = backoffUntil.fold(true)(_.isOverdue())
def limited: Boolean = !notLimited
}
val NoLimit = EventNotificationLimit(0, None)
private case class Broadcast(event: MarathonEvent, subscribers: EventSubscribers)
class HttpEventActorMetrics @Inject() (metrics: Metrics) {
private val pre = MetricPrefixes.SERVICE
private val clazz = classOf[HttpEventActor]
// the number of requests that are open without response
val outstandingCallbacks = metrics.counter(metrics.name(pre, clazz, "outstanding-callbacks"))
// the number of events that are broadcast
val eventMeter = metrics.meter(metrics.name(pre, clazz, "events"))
// the number of events that are not send to callback listeners due to backoff
val skippedCallbacks = metrics.meter(metrics.name(pre, clazz, "skipped-callbacks"))
// the number of callbacks that have failed during delivery
val failedCallbacks = metrics.meter(metrics.name(pre, clazz, "failed-callbacks"))
// the response time of the callback listeners
val callbackResponseTime = metrics.timer(metrics.name(pre, clazz, "callback-response-time"))
}
}
class HttpEventActor(conf: HttpEventConfiguration,
subscribersKeeper: ActorRef,
metrics: HttpEventActorMetrics,
clock: Clock)
extends Actor with ActorLogging with PlayJsonSupport {
implicit val timeout = HttpEventModule.timeout
def pipeline(implicit ec: ExecutionContext): HttpRequest => Future[HttpResponse] = {
addHeader("Accept", "application/json") ~> sendReceive
}
var limiter = Map.empty[String, EventNotificationLimit].withDefaultValue(NoLimit)
def receive: Receive = {
case event: MarathonEvent => resolveSubscribersForEventAndBroadcast(event)
case Broadcast(event, subscribers) => broadcast(event, subscribers)
case NotificationSuccess(url) => limiter += url -> NoLimit
case NotificationFailed(url) => limiter += url -> limiter(url).nextFailed
case _ => log.warning("Message not understood!")
}
def resolveSubscribersForEventAndBroadcast(event: MarathonEvent): Unit = {
metrics.eventMeter.mark()
log.info("POSTing to all endpoints.")
val me = self
import context.dispatcher
(subscribersKeeper ? GetSubscribers).mapTo[EventSubscribers].map { subscribers =>
me ! Broadcast(event, subscribers)
}.onFailure {
case NonFatal(e) => log.error("While trying to resolve subscribers for event {}", event)
}
}
def broadcast(event: MarathonEvent, subscribers: EventSubscribers): Unit = {
val (active, limited) = subscribers.urls.partition(limiter(_).notLimited)
if (limited.nonEmpty) {
log.info(s"""Will not send event ${event.eventType} to unresponsive hosts: ${limited.mkString(" ")}""")
}
//remove all unsubscribed callback listener
limiter = limiter.filterKeys(subscribers.urls).iterator.toMap.withDefaultValue(NoLimit)
metrics.skippedCallbacks.mark(limited.size)
active.foreach(post(_, event, self))
}
def post(url: String, event: MarathonEvent, eventActor: ActorRef): Unit = {
log.info("Sending POST to:" + url)
metrics.outstandingCallbacks.inc()
val start = clock.now()
val request = Post(url, eventToJson(event))
val response = pipeline(context.dispatcher)(request)
import context.dispatcher
response.onComplete {
case _ =>
metrics.outstandingCallbacks.dec()
metrics.callbackResponseTime.update(start.until(clock.now()))
}
response.onComplete {
case Success(res) if res.status.isSuccess =>
val inTime = start.until(clock.now()) < conf.slowConsumerTimeout
eventActor ! (if (inTime) NotificationSuccess(url) else NotificationFailed(url))
case Success(res) =>
log.warning(s"No success response for post $event to $url")
metrics.failedCallbacks.mark()
eventActor ! NotificationFailed(url)
case Failure(ex) =>
log.warning(s"Failed to post $event to $url because ${ex.getClass.getSimpleName}: ${ex.getMessage}")
metrics.failedCallbacks.mark()
eventActor ! NotificationFailed(url)
}
}
}
| vivekjuneja/marathon | src/main/scala/mesosphere/marathon/event/http/HttpEventActor.scala | Scala | apache-2.0 | 5,610 |
package com.bstek.designer.core
import _root_.icons.DoradoIcons
import com.bstek.designer.core.surface.DoradoDesignerEditorPanel
import com.intellij.designer._
import com.intellij.openapi.application.ApplicationManager
import com.intellij.openapi.fileEditor.FileEditorManager
import com.intellij.openapi.project.Project
import com.intellij.openapi.wm.ex.ToolWindowEx
import com.intellij.openapi.wm.impl.content.ToolWindowContentUi
import com.intellij.openapi.wm.{ToolWindowAnchor, ToolWindowManager}
import com.intellij.ui.content.{Content, ContentManager}
import org.jetbrains.annotations.Nullable
import scala.beans.BeanProperty
/**
* Dorado7属性编辑子窗口管理抽象父类
* Created by robin on 14-6-280.
*
* @author robin
*
*/
abstract class DoradoComponentTreeToolWindowManager(project: Project, fileEditorManager: FileEditorManager) extends AbstractDoradoToolWindowManager(project, fileEditorManager) {
val doradoToolWindowContent: DoradoComponentTreeToolWindowContent = createDoradoToolWindowContent
//--构建子窗口
protected def initToolWindow {
//--已编程方式注册一个子窗口
doradoToolWindow = ToolWindowManager.getInstance(project).registerToolWindow("Dorado7 ComponentTree", false, getAnchor, project, true)
doradoToolWindow.setIcon(DoradoIcons.DORADO7_PROPERTY_SHEET)
//--判断是否为无头环境
if (!ApplicationManager.getApplication.isHeadlessEnvironment) {
doradoToolWindow.getComponent.putClientProperty(ToolWindowContentUi.HIDE_ID_LABEL, "true")
}
//--设定工具栏动作集
(doradoToolWindow.asInstanceOf[ToolWindowEx]).setTitleActions(doradoToolWindowContent.createActions: _*)
//--扩充齿轮装备动作集
initGearActions
val contentManager: ContentManager = doradoToolWindow.getContentManager
val content: Content = contentManager.getFactory.createContent(doradoToolWindowContent.getToolWindowPanel, "Dorado7 Properties", false)
content.setCloseable(false)
//content.setPreferredFocusableComponent(doradoToolWindowContent.getComponentTree)
contentManager.addContent(content)
contentManager.setSelectedContent(content, true)
doradoToolWindow.setAvailable(false, null)
}
protected def getAnchor: ToolWindowAnchor = {
val customization: DesignerCustomizations = getCustomizations
return if (customization != null) customization.getStructureAnchor else ToolWindowAnchor.LEFT
}
protected def updateToolWindow(@Nullable designer: DoradoDesignerEditorPanel) {
doradoToolWindowContent.update(designer)
if (designer == null) {
doradoToolWindow.setAvailable(false, null)
}
else {
doradoToolWindow.setAvailable(true, null)
doradoToolWindow.show(null)
}
}
override def disposeComponent {
doradoToolWindowContent.dispose
}
def getComponentName: String = {
return "DoradoDesignerToolWindowManager"
}
protected def createContent(designer: DoradoDesignerEditorPanel): DoradoToolWindow = {
val toolWindowContent: DoradoComponentTreeToolWindowContent = createDoradoToolWindowContent
toolWindowContent.update(designer)
return createContent(designer, toolWindowContent, "Dorado7 ComponentTree", DoradoIcons.DORADO7_PROPERTY_SHEET, toolWindowContent.getToolWindowPanel, null, 320, toolWindowContent.createActions)
}
def createDoradoToolWindowContent:DoradoComponentTreeToolWindowContent
}
| OuYuBin/IDEADorado | dorado-core/src/com/bstek/designer/core/DoradoComponentTreeToolWindowManager.scala | Scala | apache-2.0 | 3,412 |
package gh.test.gh2011
import gh2011.models.Actor
import org.scalatest.{Matchers, FlatSpec}
import net.liftweb.json._
class ActorTest extends FlatSpec with Matchers
{
"A valid Avatar" must "be correclty parsed" in {
val json = parse(
"""
| {
|
| "gravatar_id":"887fa2fcd0cf1cbdc6dc43e5524f33f6",
| "id":65608,
| "url":"https://api.github.dev/users/bess",
| "avatar_url":"https://secure.gravatar.com/avatar/887fa2fcd0cf1cbdc6dc43e5524f33f6?d=http://github.dev%2Fimages%2Fgravatars%2Fgravatar-user-420.png",
| "login":"bess"
|
|}
""".stripMargin)
Actor(json).isDefined shouldBe true
}
}
| mgoeminne/github_etl | src/test/scala/gh/test/gh2011/ActorTest.scala | Scala | mit | 740 |
/*
* Copyright 2019 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.play.frontend.filters
import javax.inject.Inject
import play.api.{Configuration, Logger, Play}
import uk.gov.hmrc.crypto.{ApplicationCrypto, Crypted, PlainText}
class SessionCookieCryptoFilter(applicationCrypto: ApplicationCrypto)
extends CookieCryptoFilter
with MicroserviceFilterSupport {
// Lazy because the filter is instantiated before the config is loaded
private lazy val crypto = applicationCrypto.SessionCookieCrypto
override protected val encrypter = encrypt _
override protected val decrypter = decrypt _
def encrypt(plainCookie: String): String = crypto.encrypt(PlainText(plainCookie)).value
def decrypt(encryptedCookie: String): String = crypto.decrypt(Crypted(encryptedCookie)).value
}
| hmrc/frontend-bootstrap | src/main/scala/uk/gov/hmrc/play/frontend/filters/SessionCookieCryptoFilter.scala | Scala | apache-2.0 | 1,351 |
package com.cpuheater.ml
import com.cpuheater.util.Loader
import org.nd4j.linalg.api.buffer.DataBuffer
import org.nd4j.linalg.api.buffer.util.DataTypeUtil
import org.nd4j.linalg.api.ndarray.INDArray
import org.nd4j.linalg.factory.Nd4j
import org.nd4j.linalg.indexing.BooleanIndexing
import org.nd4j.linalg.indexing.conditions.Conditions
import org.nd4j.linalg.ops.transforms.Transforms._
import org.nd4s.Implicits._
import org.nd4s.Evidences.float
object Ex3 extends App with Ex3Util{
DataTypeUtil.setDTypeForContext(DataBuffer.Type.FLOAT)
val numLinesToSkip = 0
val delimiter = ","
/**
* We are not going to use ny optimization procedure but we will use
* gradient descent to find parameters thetas
*/
def logisticRegressionMultiClass(): Unit = {
val alpha = 4f
val iters = 100
val content = Loader.load("ex3/ex3data1.mat")
val features: INDArray = Nd4j.create(content("X"))
val labels:INDArray = Nd4j.create(content("y"))
val ones = Nd4j.ones(features.rows(), 1)
val featuresWithBias = Nd4j.concat(1, ones, features)
val allThetas = computeThetasForEachClass(featuresWithBias, labels, alpha, iters)
def classPrediction(features: INDArray, thetas: INDArray): Float = {
val predictions = (0 until 10).map{
index =>
val pred = hypothesis(features, thetas(index, ->))
pred.getFloat(0)
}
val max = Nd4j.argMax(Nd4j.create(predictions.toArray)).getFloat(0)
if(max == 0) 10.0f else max
}
val (total, correct) = (0 until featuresWithBias.rows()).foldLeft((0, 0)){
case ((total, correct), index) =>
val pred = classPrediction(featuresWithBias.getRow(index), allThetas)
if(classPrediction(featuresWithBias.getRow(index), allThetas) == labels.getRow(index).getFloat(0))
(total+1, correct+1)
else
(total+1, correct)
}
println(s"Logistic Regression Multi Class Accuracy ${correct.toDouble/total}")
}
def neuralNetwork() = {
val content = Loader.load("ex3/ex3data1.mat")
val features: INDArray = Nd4j.create(content("X"))
val labels:INDArray = Nd4j.create(content("y").flatten)
val ones = Nd4j.ones(features.rows(), 1)
val featuresWithBias = Nd4j.concat(1, ones, features)
val thetas = Loader.load("ex3/ex3weights.mat")
val (theta1, theta2) = (Nd4j.create(thetas("Theta1")), Nd4j.create(thetas("Theta2")))
def forwardPropagate(features: INDArray, theta1: INDArray, theta2: INDArray) : INDArray= {
val z2 = features.mmul(theta1.T)
val ones = Nd4j.ones(1)
val a2 = Nd4j.concat(1, ones, sigmoid(z2))
val z3 = a2.mmul(theta2.T)
val output = sigmoid(z3)
output
}
val correct = (0 until featuresWithBias.rows()).foldLeft(0){
case (accu, rowIndex) =>
val dataRow = featuresWithBias.getRow(rowIndex)
val output = forwardPropagate(dataRow, theta1, theta2)
val argMax = Nd4j.argMax(output, 1).getInt(0) +1
val label = labels.getDouble(rowIndex).toInt
if(argMax == label)
accu + 1
else
accu
}
println(s"Neural network accuracy: ${correct/(featuresWithBias.rows()toDouble)}")
}
neuralNetwork()
logisticRegressionMultiClass()
}
trait Ex3Util {
def hypothesis(features: INDArray, thetas: INDArray) ={
sigmoid(features.mmul(thetas.T))
}
def computeCost(features: INDArray, labels: INDArray, thetas: INDArray, lambda: Float = 0.0f): Float = {
val output = hypothesis(features, thetas)
val term1 = log(output).mul(-labels)
val term2 = log(output.rsub(1)).mul(labels.rsub(1))
Nd4j.clearNans(term2)
val nbOfTrainingExamples = features.rows()
val regularization = (thetas(1 to term1.rows(), ->).mmul(thetas(1 to term1.rows(), ->).T) * (lambda/2*nbOfTrainingExamples)).getFloat(0)
val crossEntropy = term1.sub(term2).sumNumber().floatValue()/nbOfTrainingExamples + regularization
crossEntropy
}
def computeGradient(features: INDArray, labels: INDArray, alpha: Float, iters: Int, lambda: Float = 0.0f): INDArray ={
val thetas = Nd4j.zeros(features.columns(), 1).T
val nbOfTrainingExamples = features.rows()
val updatedTheta = (0 to iters).foldLeft(thetas)({
case (thetas, i) =>
val error = sigmoid(features.mmul(thetas.T)) - labels
val regu = thetas(->, 1->) * lambda/nbOfTrainingExamples
val grad = error.T.dot(features) * alpha/nbOfTrainingExamples
grad(->, 1->) = grad(->, 1->) + regu
val updatedThetas = thetas - grad
println(s"Cost: ${computeCost(features, labels, updatedThetas)}")
updatedThetas
})
updatedTheta
}
def computeThetasForEachClass(features: INDArray, labels: INDArray, alpha: Float, iters: Int): INDArray = {
val thetas = (0 until 10).foldLeft(Nd4j.zeros(10, features.columns())){
case (allThetas, index) =>
val labelsDuplicate = labels.dup()
val `class` = if(index==0) 10 else index
//TODO Waiting when Conditions.notEquals will be fixed
BooleanIndexing.applyWhere(labelsDuplicate, Conditions.equals(`class`), 100)
BooleanIndexing.applyWhere(labelsDuplicate, Conditions.lessThan(100), 0.0)
BooleanIndexing.applyWhere(labelsDuplicate, Conditions.equals(100), 1.0)
val currentThetas = computeGradient(features, labelsDuplicate, alpha, iters)
allThetas(index, ->) = currentThetas
}
thetas
}
}
| cpuheater/ml-coursera-scala | src/main/scala/com/cpuheater/ml/Ex3.scala | Scala | apache-2.0 | 5,504 |
/* __ *\\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2005-2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\\___/_/ |_/____/_/ | | **
** |/ **
\\* */
package scala
package collection
package mutable
import generic._
/** This class implements mutable sets using a hashtable.
* The iterator and all traversal methods of this class visit elements in the order they were inserted.
*
* @author Matthias Zenger
* @author Martin Odersky
* @author Pavel Pavlov
* @version 2.0, 31/12/2006
* @since 1
*
* @tparam A the type of the elements contained in this set.
*
* @define Coll `LinkedHashSet`
* @define coll linked hash set
* @define thatinfo the class of the returned collection. In the standard library configuration,
* `That` is always `LinkedHashSet[B]` because an implicit of type `CanBuildFrom[LinkedHashSet, B, LinkedHashSet[B]]`
* is defined in object `LinkedHashSet`.
* @define bfinfo an implicit value of class `CanBuildFrom` which determines the
* result class `That` from the current representation type `Repr`
* and the new element type `B`. This is usually the `canBuildFrom` value
* defined in object `LinkedHashSet`.
* @define mayNotTerminateInf
* @define willNotTerminateInf
* @define orderDependent
* @define orderDependentFold
*/
@SerialVersionUID(1L)
class LinkedHashSet[A] extends AbstractSet[A]
with Set[A]
with GenericSetTemplate[A, LinkedHashSet]
with SetLike[A, LinkedHashSet[A]]
with HashTable[A, LinkedHashSet.Entry[A]]
with Serializable
{
override def companion: GenericCompanion[LinkedHashSet] = LinkedHashSet
type Entry = LinkedHashSet.Entry[A]
@transient protected var firstEntry: Entry = null
@transient protected var lastEntry: Entry = null
override def size: Int = tableSize
def contains(elem: A): Boolean = findEntry(elem) ne null
@deprecatedOverriding("+= should not be overridden so it stays consistent with add.", "2.11.0")
def += (elem: A): this.type = { add(elem); this }
@deprecatedOverriding("-= should not be overridden so it stays consistent with remove.", "2.11.0")
def -= (elem: A): this.type = { remove(elem); this }
override def add(elem: A): Boolean = findOrAddEntry(elem, null) eq null
override def remove(elem: A): Boolean = {
val e = removeEntry(elem)
if (e eq null) false
else {
if (e.earlier eq null) firstEntry = e.later
else e.earlier.later = e.later
if (e.later eq null) lastEntry = e.earlier
else e.later.earlier = e.earlier
e.earlier = null // Null references to prevent nepotism
e.later = null
true
}
}
def iterator: Iterator[A] = new AbstractIterator[A] {
private var cur = firstEntry
def hasNext = cur ne null
def next =
if (hasNext) { val res = cur.key; cur = cur.later; res }
else Iterator.empty.next()
}
override def foreach[U](f: A => U) {
var cur = firstEntry
while (cur ne null) {
f(cur.key)
cur = cur.later
}
}
protected override def foreachEntry[U](f: Entry => U) {
var cur = firstEntry
while (cur ne null) {
f(cur)
cur = cur.later
}
}
protected def createNewEntry[B](key: A, dummy: B): Entry = {
val e = new Entry(key)
if (firstEntry eq null) firstEntry = e
else { lastEntry.later = e; e.earlier = lastEntry }
lastEntry = e
e
}
override def clear() {
clearTable()
firstEntry = null
lastEntry = null
}
private def writeObject(out: java.io.ObjectOutputStream) {
serializeTo(out, { e => out.writeObject(e.key) })
}
private def readObject(in: java.io.ObjectInputStream) {
firstEntry = null
lastEntry = null
init(in, createNewEntry(in.readObject().asInstanceOf[A], null))
}
}
/** $factoryInfo
* @define Coll `LinkedHashSet`
* @define coll linked hash set
*/
object LinkedHashSet extends MutableSetFactory[LinkedHashSet] {
implicit def canBuildFrom[A]: CanBuildFrom[Coll, A, LinkedHashSet[A]] = setCanBuildFrom[A]
override def empty[A]: LinkedHashSet[A] = new LinkedHashSet[A]
/** Class for the linked hash set entry, used internally.
* @since 2.10
*/
private[scala] final class Entry[A](val key: A) extends HashEntry[A, Entry[A]] with Serializable {
var earlier: Entry[A] = null
var later: Entry[A] = null
}
}
| felixmulder/scala | src/library/scala/collection/mutable/LinkedHashSet.scala | Scala | bsd-3-clause | 4,821 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.api.scala.typeutils
import org.apache.flink.annotation.Internal
import org.apache.flink.api.common.typeutils.TypeSerializer
import org.apache.flink.core.memory.{DataOutputView, DataInputView}
/**
* Serializer for cases where no serializer is required but the system still expects one. This
* happens for OptionTypeInfo when None is used, or for Either when one of the type parameters
* is Nothing.
*/
@Internal
class NothingSerializer extends TypeSerializer[Any] {
override def duplicate: NothingSerializer = this
override def createInstance: Any = {
Integer.valueOf(-1)
}
override def isImmutableType: Boolean = true
override def getLength: Int = -1
override def copy(from: Any): Any =
throw new RuntimeException("This must not be used. You encountered a bug.")
override def copy(from: Any, reuse: Any): Any = copy(from)
override def copy(source: DataInputView, target: DataOutputView): Unit =
throw new RuntimeException("This must not be used. You encountered a bug.")
override def serialize(any: Any, target: DataOutputView): Unit =
throw new RuntimeException("This must not be used. You encountered a bug.")
override def deserialize(source: DataInputView): Any =
throw new RuntimeException("This must not be used. You encountered a bug.")
override def deserialize(reuse: Any, source: DataInputView): Any =
throw new RuntimeException("This must not be used. You encountered a bug.")
override def equals(obj: Any): Boolean = {
obj match {
case nothingSerializer: NothingSerializer => nothingSerializer.canEqual(this)
case _ => false
}
}
override def canEqual(obj: scala.Any): Boolean = {
obj.isInstanceOf[NothingSerializer]
}
override def hashCode(): Int = {
classOf[NothingSerializer].hashCode()
}
}
| DieBauer/flink | flink-scala/src/main/scala/org/apache/flink/api/scala/typeutils/NothingSerializer.scala | Scala | apache-2.0 | 2,640 |
package patmat
import org.scalatest.FunSuite
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import patmat.Huffman._
@RunWith(classOf[JUnitRunner])
class HuffmanSuite extends FunSuite {
trait TestTrees {
val t1 = Fork(Leaf('a', 2), Leaf('b', 3), List('a', 'b'), 5)
val t2 = Fork(Fork(Leaf('a', 2), Leaf('b', 3), List('a', 'b'), 5), Leaf('d', 4), List('a', 'b', 'd'), 9)
}
test("weight of a larger tree") {
new TestTrees {
assert(weight(t1) === 5)
}
}
test("chars of a larger tree") {
new TestTrees {
assert(chars(t2) === List('a', 'b', 'd'))
}
}
test("times(List('a', 'b', 'a'))") {
assert(times(List('a', 'b', 'a')) === List(('a', 2), ('b', 1)));
}
test("string2chars(\\"hello, world\\")") {
assert(string2Chars("hello, world") === List('h', 'e', 'l', 'l', 'o', ',', ' ', 'w', 'o', 'r', 'l', 'd'))
}
test("makeOrderedLeafList for some frequency table") {
assert(makeOrderedLeafList(List(('t', 2), ('e', 1), ('x', 3))) === List(Leaf('e', 1), Leaf('t', 2), Leaf('x', 3)))
}
test("singleton sampleTree") {
val sampleTree = makeCodeTree(
makeCodeTree(Leaf('x', 1), Leaf('e', 1)),
Leaf('t', 2))
assert(singleton(List(sampleTree)), true)
}
test("singleton sampleLeaf") {
val sampleLeaf = Leaf('t', 2)
assert(singleton(List(sampleLeaf)), true)
}
// test("singleton sampleTree and sampleLeaf") {
// val sampleTree = makeCodeTree(
// makeCodeTree(Leaf('x', 1), Leaf('e', 1)),
// Leaf('t', 2))
// val sampleLeaf = makeCodeTree(Leaf('t', 2))
//
// var combined = List(sampleTree, sampleLeaf);
//
// println("combined length: " + combined.length)
//
// assert(singleton(combined), false)
// }
test("combine of some leaf list") {
val leaflist = List(Leaf('e', 1), Leaf('t', 2), Leaf('x', 4))
assert(combine(leaflist) === List(Fork(Leaf('e', 1), Leaf('t', 2), List('e', 't'), 3), Leaf('x', 4)))
}
test("combine of some tree list") {
val sampleTree = makeCodeTree(
makeCodeTree(Leaf('x', 1), Leaf('e', 1)),
Leaf('t', 2))
val leaflist = List(sampleTree, Leaf('a', 3), Leaf('b', 4))
assert(combine(leaflist) === List(Leaf('b', 4), Fork(sampleTree, Leaf('a', 3), List('x','e', 't','a'), 7)))
}
test("decode and encode a very short text should be identity") {
new TestTrees {
assert(decode(t1, encode(t1)("ab".toList)) === "ab".toList)
}
}
}
| relyah/CourseraFunctionalProgramming | assignments/04patmap/patmat/src/test/scala/patmat/HuffmanSuite.scala | Scala | gpl-2.0 | 2,487 |
package ghpages.examples
import ghpages.GhPagesMacros
import ghpages.examples.util.SingleSide
import japgolly.scalajs.react._, vdom.prefix_<^._
object CallbackOptionExample {
def content = SingleSide.Content(source, Main2())
val source = GhPagesMacros.exampleSource
def Main2 = ReactComponentB[Unit]("CallbackOption example")
.render(_ =>
<.div(
<.p(
<.code("CallbackOption"), " is a ", <.code("Callback"), " that you can compose so that steps can abort the rest of the process.",
<.br,
"It makes it easy to work with conditions."),
<.br,
<.p(
"Press ←↑↓→ to move the box. Hold ctrl to move to the edges.",
<.br,
"Notice that PageDown still scrolls the page but ↓ doesn't? That's because", <.code("preventDefault"), "is only called when a key is matched."),
Main()))
.buildU
// EXAMPLE:START
import org.scalajs.dom.ext.KeyCode
val OuterX = 600
val OuterY = 240
val InnerSize = 24
val MoveDist = 24
case class State(x: Int, y: Int)
def initState = State((OuterX - InnerSize) / 2, (OuterY - InnerSize) / 2)
val OuterRef = Ref("o")
val OuterDiv =
<.div(
^.ref := OuterRef,
^.tabIndex := 0,
^.width := OuterX,
^.height := OuterY,
^.border := "solid 1px #333",
^.background := "#ddd")
val InnerDiv =
<.div(
^.position.relative,
^.width := InnerSize,
^.height := InnerSize,
^.background := "#800")
def moveOneAxis(pos: Int, steps: Int, max: Int): Int =
(pos + steps * MoveDist) min (max - InnerSize) max 0
class Backend($: BackendScope[Unit, State]) {
def init: Callback =
OuterRef($).tryFocus
def move(dx: Int, dy: Int): Callback =
$.modState(s => s.copy(
x = moveOneAxis(s.x, dx, OuterX),
y = moveOneAxis(s.y, dy, OuterY)))
def handleKey(e: ReactKeyboardEvent): Callback = {
def plainKey: CallbackOption[Unit] = // CallbackOption will stop if a key isn't matched
CallbackOption.keyCodeSwitch(e) {
case KeyCode.Up => move(0, -1)
case KeyCode.Down => move(0, 1)
case KeyCode.Left => move(-1, 0)
case KeyCode.Right => move( 1, 0)
}
def ctrlKey: CallbackOption[Unit] = // Like above but if ctrlKey is pressed
CallbackOption.keyCodeSwitch(e, ctrlKey = true) {
case KeyCode.Up => move(0, -OuterY)
case KeyCode.Down => move(0, OuterY)
case KeyCode.Left => move(-OuterX, 0)
case KeyCode.Right => move( OuterX, 0)
}
(plainKey orElse ctrlKey) >> e.preventDefaultCB // This is the interesting part.
//
// orElse joins CallbackOptions so if one fails, it tries the other.
//
// The >> means "and then run" but only if the left side passes.
// This means preventDefault only runs if a valid key is pressed.
}
def render(s: State) =
OuterDiv(
^.onKeyDown ==> handleKey,
InnerDiv(^.left := s.x, ^.top := s.y))
}
val Main = ReactComponentB[Unit]("CallbackOption example")
.initialState(initState)
.renderBackend[Backend]
.componentDidMount(_.backend.init)
.buildU
// EXAMPLE:END
}
| zlangbert/scalajs-react | gh-pages/src/main/scala/ghpages/examples/CallbackOptionExample.scala | Scala | apache-2.0 | 3,578 |
package com.harrys.hyppo.worker.scheduling
import org.scalacheck.Gen
import org.scalatest.prop.GeneratorDrivenPropertyChecks
import org.scalatest.{Matchers, PropSpec}
/**
* Created by jpetty on 2/22/16.
*/
class GompertzFunctionTests extends PropSpec with Matchers with GeneratorDrivenPropertyChecks {
property("should never produce values outside of 0 < x <= 1.0") {
forAll(
Gen.choose(0, Int.MaxValue).label("seconds"),
Gen.choose(0.0, Double.MaxValue).label("scaleFactor"),
Gen.choose(0.0, Double.MaxValue).label("delayFactor")) {
(seconds: Int, scale: Double, delay: Double) =>
val factor = Sigmoid.gompertzCurveBackoffFactor(seconds, scale, delay)
factor should be > 0.0
factor should be <= 1.0
}
}
property("should throw IllegalArgumentExceptions for invalid input values") {
forAll(
Gen.choose(Int.MinValue, 0).label("seconds"),
Gen.choose(0.0, Double.MaxValue).label("scaleFactor"),
Gen.choose(0.0, Double.MaxValue).label("delayFactor")) {
(seconds: Int, scale: Double, delay: Double) =>
an [IllegalArgumentException] shouldBe thrownBy { Sigmoid.gompertzCurveBackoffFactor(seconds, scale, delay) }
}
info("negative value for seconds is recognized")
forAll(
Gen.choose(0, Int.MaxValue).label("seconds"),
Gen.choose(Double.MinValue, 0.0).label("scaleFactor"),
Gen.choose(0.0, Double.MaxValue).label("delayFactor")) {
(seconds: Int, scale: Double, delay: Double) =>
an [IllegalArgumentException] shouldBe thrownBy { Sigmoid.gompertzCurveBackoffFactor(seconds, scale, delay) }
}
info("negative value for scaleFactor is recognized")
forAll(
Gen.choose(0, Int.MaxValue).label("seconds"),
Gen.choose(0.0, Double.MaxValue).label("scaleFactor"),
Gen.choose(Double.MinValue, 0.0).label("delayFactor")) {
(seconds: Int, scale: Double, delay: Double) =>
an [IllegalArgumentException] shouldBe thrownBy { Sigmoid.gompertzCurveBackoffFactor(seconds, scale, delay) }
}
info("negative value for delayFactor is recognized")
}
}
| harrystech/hyppo-worker | worker/src/test/scala/com/harrys/hyppo/worker/scheduling/GompertzFunctionTests.scala | Scala | mit | 2,125 |
package sledtr.util
import scala.io.Source
import org.mozilla.universalchardet.UniversalDetector
import java.net._
import java.io.{InputStream}
import sledtr.MyPreDef._
import scala.collection.mutable._
object HttpReader {
def fromURL(url: String): String = {
val bhtml: Array[Byte] = getByteArray(url)
val shtml: String = new String(bhtml, getEnc(bhtml))
shtml
}
def fromFile(file: File): String = {
val path = file.getAbsolutePath()
Source.fromFile(path, FileUtil.CEncoding).mkString
}
def getEnc(ba: Array[Byte]): String = {
val ud: UniversalDetector = new UniversalDetector(null)
val buf: Array[Byte] = new Array[Byte](4096)
var i: Int = 0
while(i * buf.size <= ba.size && !ud.isDone) {
if((i + 1) * buf.size > ba.size)
Array.copy(ba, i, buf, 0, ba.size - buf.size * i)
else
Array.copy(ba, i, buf, 0, buf.size)
ud.handleData(buf, 0, buf.size)
i = i + 1
}
ud.dataEnd()
if(ud.getDetectedCharset == null) FileUtil.CEncoding else ud.getDetectedCharset
}
def getImage(url: String, file: File): Unit =
if(!file.exists) FileUtil.write(getByteArray(url), file)
def getByteArray(url: String): Array[Byte] = {
val is: InputStream = ((new URL(url)).openConnection()).getInputStream()
try {
val bl: ListBuffer[Byte] = ListBuffer[Byte]()
var b: Byte = 0
var i: Int = 0
i = is.read()
while(i > -1) {
b = i.toByte
bl += b
i = is.read()
}
return bl.toArray
} finally is.close()
}
} | K2Da/sledtr | src/main/scala/sledtr/util/HttpReader.scala | Scala | gpl-3.0 | 1,596 |
package com.twitter.finagle.oauth2
import com.twitter.util.Future
sealed abstract class GrantHandler {
def handle[U](
request: Request.Authorization,
dataHandler: DataHandler[U]
): Future[GrantResult]
protected def issueAccessToken[U](
dataHandler: DataHandler[U],
authInfo: AuthInfo[U]
): Future[GrantResult] = for {
tokenOption <- dataHandler.getStoredAccessToken(authInfo)
token <- tokenOption match {
case Some(t) if dataHandler.isAccessTokenExpired(t) =>
val refreshToken = t.refreshToken map { dataHandler.refreshAccessToken(authInfo, _) }
refreshToken.getOrElse(dataHandler.createAccessToken(authInfo))
case Some(t) => Future.value(t)
case None => dataHandler.createAccessToken(authInfo)
}
} yield GrantResult(
"Bearer",
token.token,
token.expiresIn,
token.refreshToken,
token.scope
)
}
object GrantHandler {
def fromGrantType(grantType: String): Option[GrantHandler] = grantType match {
case "authorization_code" => Some(AuthorizationCode)
case "refresh_token" => Some(RefreshToken)
case "client_credentials" => Some(ClientCredentials)
case "password" => Some(Password)
case _ => None
}
object RefreshToken extends GrantHandler {
def handle[U](
request: Request.Authorization,
dataHandler: DataHandler[U]
): Future[GrantResult] = {
val clientCredential = request.clientCredential match {
case Some(c) => Future.value(c)
case None => Future.exception(new InvalidRequest("BadRequest"))
}
val refreshToken = request.requireRefreshToken
for {
credential <- clientCredential
infoOption <- dataHandler.findAuthInfoByRefreshToken(refreshToken)
info <- infoOption match {
case Some(i) =>
if (i.clientId != credential.clientId) Future.exception(new InvalidClient())
else Future.value(i)
case None => Future.exception(new InvalidGrant("NotFound"))
}
token <- dataHandler.refreshAccessToken(info, refreshToken)
} yield GrantResult(
"Bearer",
token.token,
token.expiresIn,
token.refreshToken,
token.scope
)
}
}
object Password extends GrantHandler {
def handle[U](
request: Request.Authorization,
dataHandler: DataHandler[U]
): Future[GrantResult] = {
val clientCredential = request.clientCredential match {
case Some(c) => Future.value(c)
case None => Future.exception(new InvalidRequest("BadRequest"))
}
val username = request.requireUsername
val password = request.requirePassword
val scope = request.scope
for {
credential <- clientCredential
userOption <- dataHandler.findUser(username, password)
user <- userOption match {
case Some(u) => Future.value(u)
case None => Future.exception(new InvalidGrant())
}
token <- issueAccessToken(dataHandler, AuthInfo(user, credential.clientId, scope, None))
} yield token
}
}
object ClientCredentials extends GrantHandler {
def handle[U](
request: Request.Authorization,
dataHandler: DataHandler[U]
): Future[GrantResult] = {
val clientCredential = request.clientCredential match {
case Some(c) => Future.value(c)
case None => Future.exception(new InvalidRequest("BadRequest"))
}
val scope = request.scope
for {
credential <- clientCredential
userOption <- dataHandler.findClientUser(credential.clientId, credential.clientSecret, scope)
user <- userOption match {
case Some(u) => Future.value(u)
case None => Future.exception(new InvalidGrant())
}
token <- issueAccessToken(dataHandler, AuthInfo(user, credential.clientId, scope, None))
} yield token
}
}
object AuthorizationCode extends GrantHandler {
def handle[U](
request: Request.Authorization,
dataHandler: DataHandler[U]
): Future[GrantResult] = {
val clientCredential = request.clientCredential match {
case Some(c) => Future.value(c)
case None => Future.exception(new InvalidRequest("BadRequest"))
}
val code = request.requireCode
val redirectUri = request.redirectUri
for {
credential <- clientCredential
infoOption <- dataHandler.findAuthInfoByCode(code)
info <- infoOption match {
case Some(i) =>
if (i.clientId != credential.clientId)
Future.exception(new InvalidClient())
else if (i.redirectUri.isDefined && i.redirectUri != redirectUri)
Future.exception(new RedirectUriMismatch())
else Future.value(i)
case None => Future.exception(new InvalidGrant())
}
token <- issueAccessToken(dataHandler, info)
} yield token
}
}
}
| finagle/finagle-oauth2 | src/main/scala/com/twitter/finagle/oauth2/GrantHandler.scala | Scala | apache-2.0 | 4,950 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.metric
import java.io.File
import scala.reflect.{classTag, ClassTag}
import scala.util.Random
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.expressions.aggregate.{Final, Partial}
import org.apache.spark.sql.catalyst.plans.logical.LocalRelation
import org.apache.spark.sql.execution.{FilterExec, RangeExec, SparkPlan, WholeStageCodegenExec}
import org.apache.spark.sql.execution.aggregate.HashAggregateExec
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.util.{AccumulatorContext, JsonProtocol}
class SQLMetricsSuite extends SparkFunSuite with SQLMetricsTestUtils with SharedSQLContext {
import testImplicits._
/**
* Generates a `DataFrame` by filling randomly generated bytes for hash collision.
*/
private def generateRandomBytesDF(numRows: Int = 65535): DataFrame = {
val random = new Random()
val manyBytes = (0 until numRows).map { _ =>
val byteArrSize = random.nextInt(100)
val bytes = new Array[Byte](byteArrSize)
random.nextBytes(bytes)
(bytes, random.nextInt(100))
}
manyBytes.toSeq.toDF("a", "b")
}
test("LocalTableScanExec computes metrics in collect and take") {
val df1 = spark.createDataset(Seq(1, 2, 3))
val logical = df1.queryExecution.logical
require(logical.isInstanceOf[LocalRelation])
df1.collect()
val metrics1 = df1.queryExecution.executedPlan.collectLeaves().head.metrics
assert(metrics1.contains("numOutputRows"))
assert(metrics1("numOutputRows").value === 3)
val df2 = spark.createDataset(Seq(1, 2, 3)).limit(2)
df2.collect()
val metrics2 = df2.queryExecution.executedPlan.collectLeaves().head.metrics
assert(metrics2.contains("numOutputRows"))
assert(metrics2("numOutputRows").value === 2)
}
test("Filter metrics") {
// Assume the execution plan is
// PhysicalRDD(nodeId = 1) -> Filter(nodeId = 0)
val df = person.filter('age < 25)
testSparkPlanMetrics(df, 1, Map(
0L -> (("Filter", Map(
"number of output rows" -> 1L))))
)
}
test("WholeStageCodegen metrics") {
// Assume the execution plan is
// WholeStageCodegen(nodeId = 0, Range(nodeId = 2) -> Filter(nodeId = 1))
// TODO: update metrics in generated operators
val ds = spark.range(10).filter('id < 5)
testSparkPlanMetrics(ds.toDF(), 1, Map.empty)
}
test("Aggregate metrics") {
// Assume the execution plan is
// ... -> HashAggregate(nodeId = 2) -> Exchange(nodeId = 1)
// -> HashAggregate(nodeId = 0)
val df = testData2.groupBy().count() // 2 partitions
val expected1 = Seq(
Map("number of output rows" -> 2L,
"avg hash probe (min, med, max)" -> "\\n(1, 1, 1)"),
Map("number of output rows" -> 1L,
"avg hash probe (min, med, max)" -> "\\n(1, 1, 1)"))
val shuffleExpected1 = Map(
"records read" -> 2L,
"local blocks read" -> 2L,
"remote blocks read" -> 0L,
"shuffle records written" -> 2L)
testSparkPlanMetrics(df, 1, Map(
2L -> (("HashAggregate", expected1(0))),
1L -> (("Exchange", shuffleExpected1)),
0L -> (("HashAggregate", expected1(1))))
)
// 2 partitions and each partition contains 2 keys
val df2 = testData2.groupBy('a).count()
val expected2 = Seq(
Map("number of output rows" -> 4L,
"avg hash probe (min, med, max)" -> "\\n(1, 1, 1)"),
Map("number of output rows" -> 3L,
"avg hash probe (min, med, max)" -> "\\n(1, 1, 1)"))
val shuffleExpected2 = Map(
"records read" -> 4L,
"local blocks read" -> 4L,
"remote blocks read" -> 0L,
"shuffle records written" -> 4L)
testSparkPlanMetrics(df2, 1, Map(
2L -> (("HashAggregate", expected2(0))),
1L -> (("Exchange", shuffleExpected2)),
0L -> (("HashAggregate", expected2(1))))
)
}
test("Aggregate metrics: track avg probe") {
// The executed plan looks like:
// HashAggregate(keys=[a#61], functions=[count(1)], output=[a#61, count#71L])
// +- Exchange hashpartitioning(a#61, 5)
// +- HashAggregate(keys=[a#61], functions=[partial_count(1)], output=[a#61, count#76L])
// +- Exchange RoundRobinPartitioning(1)
// +- LocalTableScan [a#61]
//
// Assume the execution plan with node id is:
// Wholestage disabled:
// HashAggregate(nodeId = 0)
// Exchange(nodeId = 1)
// HashAggregate(nodeId = 2)
// Exchange (nodeId = 3)
// LocalTableScan(nodeId = 4)
//
// Wholestage enabled:
// WholeStageCodegen(nodeId = 0)
// HashAggregate(nodeId = 1)
// Exchange(nodeId = 2)
// WholeStageCodegen(nodeId = 3)
// HashAggregate(nodeId = 4)
// Exchange(nodeId = 5)
// LocalTableScan(nodeId = 6)
Seq(true, false).foreach { enableWholeStage =>
val df = generateRandomBytesDF().repartition(1).groupBy('a).count()
val nodeIds = if (enableWholeStage) {
Set(4L, 1L)
} else {
Set(2L, 0L)
}
val metrics = getSparkPlanMetrics(df, 1, nodeIds, enableWholeStage).get
nodeIds.foreach { nodeId =>
val probes = metrics(nodeId)._2("avg hash probe (min, med, max)")
probes.toString.stripPrefix("\\n(").stripSuffix(")").split(", ").foreach { probe =>
assert(probe.toDouble > 1.0)
}
}
}
}
test("ObjectHashAggregate metrics") {
// Assume the execution plan is
// ... -> ObjectHashAggregate(nodeId = 2) -> Exchange(nodeId = 1)
// -> ObjectHashAggregate(nodeId = 0)
val df = testData2.groupBy().agg(collect_set('a)) // 2 partitions
testSparkPlanMetrics(df, 1, Map(
2L -> (("ObjectHashAggregate", Map("number of output rows" -> 2L))),
1L -> (("Exchange", Map(
"shuffle records written" -> 2L,
"records read" -> 2L,
"local blocks read" -> 2L,
"remote blocks read" -> 0L))),
0L -> (("ObjectHashAggregate", Map("number of output rows" -> 1L))))
)
// 2 partitions and each partition contains 2 keys
val df2 = testData2.groupBy('a).agg(collect_set('a))
testSparkPlanMetrics(df2, 1, Map(
2L -> (("ObjectHashAggregate", Map("number of output rows" -> 4L))),
1L -> (("Exchange", Map(
"shuffle records written" -> 4L,
"records read" -> 4L,
"local blocks read" -> 4L,
"remote blocks read" -> 0L))),
0L -> (("ObjectHashAggregate", Map("number of output rows" -> 3L))))
)
}
test("Sort metrics") {
// Assume the execution plan is
// WholeStageCodegen(nodeId = 0, Range(nodeId = 2) -> Sort(nodeId = 1))
val ds = spark.range(10).sort('id)
testSparkPlanMetrics(ds.toDF(), 2, Map.empty)
}
test("SortMergeJoin metrics") {
// Because SortMergeJoin may skip different rows if the number of partitions is different, this
// test should use the deterministic number of partitions.
val testDataForJoin = testData2.filter('a < 2) // TestData2(1, 1) :: TestData2(1, 2)
testDataForJoin.createOrReplaceTempView("testDataForJoin")
withTempView("testDataForJoin") {
// Assume the execution plan is
// ... -> SortMergeJoin(nodeId = 1) -> TungstenProject(nodeId = 0)
val df = spark.sql(
"SELECT * FROM testData2 JOIN testDataForJoin ON testData2.a = testDataForJoin.a")
testSparkPlanMetrics(df, 1, Map(
0L -> (("SortMergeJoin", Map(
// It's 4 because we only read 3 rows in the first partition and 1 row in the second one
"number of output rows" -> 4L))),
2L -> (("Exchange", Map(
"records read" -> 4L,
"local blocks read" -> 2L,
"remote blocks read" -> 0L,
"shuffle records written" -> 2L))))
)
}
}
test("SortMergeJoin(outer) metrics") {
// Because SortMergeJoin may skip different rows if the number of partitions is different,
// this test should use the deterministic number of partitions.
val testDataForJoin = testData2.filter('a < 2) // TestData2(1, 1) :: TestData2(1, 2)
testDataForJoin.createOrReplaceTempView("testDataForJoin")
withTempView("testDataForJoin") {
// Assume the execution plan is
// ... -> SortMergeJoin(nodeId = 1) -> TungstenProject(nodeId = 0)
val df = spark.sql(
"SELECT * FROM testData2 left JOIN testDataForJoin ON testData2.a = testDataForJoin.a")
testSparkPlanMetrics(df, 1, Map(
0L -> (("SortMergeJoin", Map(
// It's 8 because we read 6 rows in the left and 2 row in the right one
"number of output rows" -> 8L))))
)
val df2 = spark.sql(
"SELECT * FROM testDataForJoin right JOIN testData2 ON testData2.a = testDataForJoin.a")
testSparkPlanMetrics(df2, 1, Map(
0L -> (("SortMergeJoin", Map(
// It's 8 because we read 6 rows in the left and 2 row in the right one
"number of output rows" -> 8L))))
)
}
}
test("BroadcastHashJoin metrics") {
val df1 = Seq((1, "1"), (2, "2")).toDF("key", "value")
val df2 = Seq((1, "1"), (2, "2"), (3, "3"), (4, "4")).toDF("key", "value")
// Assume the execution plan is
// ... -> BroadcastHashJoin(nodeId = 1) -> TungstenProject(nodeId = 0)
val df = df1.join(broadcast(df2), "key")
testSparkPlanMetrics(df, 2, Map(
1L -> (("BroadcastHashJoin", Map(
"number of output rows" -> 2L))))
)
}
test("ShuffledHashJoin metrics") {
withSQLConf("spark.sql.autoBroadcastJoinThreshold" -> "40",
"spark.sql.shuffle.partitions" -> "2",
"spark.sql.join.preferSortMergeJoin" -> "false") {
val df1 = Seq((1, "1"), (2, "2")).toDF("key", "value")
val df2 = (1 to 10).map(i => (i, i.toString)).toSeq.toDF("key", "value")
// Assume the execution plan is
// Project(nodeId = 0)
// +- ShuffledHashJoin(nodeId = 1)
// :- Exchange(nodeId = 2)
// : +- Project(nodeId = 3)
// : +- LocalTableScan(nodeId = 4)
// +- Exchange(nodeId = 5)
// +- Project(nodeId = 6)
// +- LocalTableScan(nodeId = 7)
val df = df1.join(df2, "key")
testSparkPlanMetrics(df, 1, Map(
1L -> (("ShuffledHashJoin", Map(
"number of output rows" -> 2L))),
2L -> (("Exchange", Map(
"shuffle records written" -> 2L,
"records read" -> 2L))),
5L -> (("Exchange", Map(
"shuffle records written" -> 10L,
"records read" -> 10L))))
)
}
}
test("BroadcastHashJoin(outer) metrics") {
val df1 = Seq((1, "a"), (1, "b"), (4, "c")).toDF("key", "value")
val df2 = Seq((1, "a"), (1, "b"), (2, "c"), (3, "d")).toDF("key2", "value")
// Assume the execution plan is
// ... -> BroadcastHashJoin(nodeId = 0)
val df = df1.join(broadcast(df2), $"key" === $"key2", "left_outer")
testSparkPlanMetrics(df, 2, Map(
0L -> (("BroadcastHashJoin", Map(
"number of output rows" -> 5L))))
)
val df3 = df1.join(broadcast(df2), $"key" === $"key2", "right_outer")
testSparkPlanMetrics(df3, 2, Map(
0L -> (("BroadcastHashJoin", Map(
"number of output rows" -> 6L))))
)
}
test("BroadcastNestedLoopJoin metrics") {
val testDataForJoin = testData2.filter('a < 2) // TestData2(1, 1) :: TestData2(1, 2)
testDataForJoin.createOrReplaceTempView("testDataForJoin")
withSQLConf(SQLConf.CROSS_JOINS_ENABLED.key -> "true") {
withTempView("testDataForJoin") {
// Assume the execution plan is
// ... -> BroadcastNestedLoopJoin(nodeId = 1) -> TungstenProject(nodeId = 0)
val df = spark.sql(
"SELECT * FROM testData2 left JOIN testDataForJoin ON " +
"testData2.a * testDataForJoin.a != testData2.a + testDataForJoin.a")
testSparkPlanMetrics(df, 3, Map(
1L -> (("BroadcastNestedLoopJoin", Map(
"number of output rows" -> 12L))))
)
}
}
}
test("BroadcastLeftSemiJoinHash metrics") {
val df1 = Seq((1, "1"), (2, "2")).toDF("key", "value")
val df2 = Seq((1, "1"), (2, "2"), (3, "3"), (4, "4")).toDF("key2", "value")
// Assume the execution plan is
// ... -> BroadcastHashJoin(nodeId = 0)
val df = df1.join(broadcast(df2), $"key" === $"key2", "leftsemi")
testSparkPlanMetrics(df, 2, Map(
0L -> (("BroadcastHashJoin", Map(
"number of output rows" -> 2L))))
)
}
test("CartesianProduct metrics") {
withSQLConf(SQLConf.CROSS_JOINS_ENABLED.key -> "true") {
val testDataForJoin = testData2.filter('a < 2) // TestData2(1, 1) :: TestData2(1, 2)
testDataForJoin.createOrReplaceTempView("testDataForJoin")
withTempView("testDataForJoin") {
// Assume the execution plan is
// ... -> CartesianProduct(nodeId = 1) -> TungstenProject(nodeId = 0)
val df = spark.sql(
"SELECT * FROM testData2 JOIN testDataForJoin")
testSparkPlanMetrics(df, 1, Map(
0L -> (("CartesianProduct", Map("number of output rows" -> 12L))))
)
}
}
}
test("SortMergeJoin(left-anti) metrics") {
val anti = testData2.filter("a > 2")
withTempView("antiData") {
anti.createOrReplaceTempView("antiData")
val df = spark.sql(
"SELECT * FROM testData2 ANTI JOIN antiData ON testData2.a = antiData.a")
testSparkPlanMetrics(df, 1, Map(
0L -> (("SortMergeJoin", Map("number of output rows" -> 4L))))
)
}
}
test("save metrics") {
withTempPath { file =>
// person creates a temporary view. get the DF before listing previous execution IDs
val data = person.select('name)
val previousExecutionIds = currentExecutionIds()
// Assume the execution plan is
// PhysicalRDD(nodeId = 0)
data.write.format("json").save(file.getAbsolutePath)
sparkContext.listenerBus.waitUntilEmpty(10000)
val executionIds = currentExecutionIds().diff(previousExecutionIds)
assert(executionIds.size === 1)
val executionId = executionIds.head
val jobs = statusStore.execution(executionId).get.jobs
// Use "<=" because there is a race condition that we may miss some jobs
// TODO Change "<=" to "=" once we fix the race condition that missing the JobStarted event.
assert(jobs.size <= 1)
val metricValues = statusStore.executionMetrics(executionId)
// Because "save" will create a new DataFrame internally, we cannot get the real metric id.
// However, we still can check the value.
assert(metricValues.values.toSeq.exists(_ === "2"))
}
}
test("metrics can be loaded by history server") {
val metric = SQLMetrics.createMetric(sparkContext, "zanzibar")
metric += 10L
val metricInfo = metric.toInfo(Some(metric.value), None)
metricInfo.update match {
case Some(v: Long) => assert(v === 10L)
case Some(v) => fail(s"metric value was not a Long: ${v.getClass.getName}")
case _ => fail("metric update is missing")
}
assert(metricInfo.metadata === Some(AccumulatorContext.SQL_ACCUM_IDENTIFIER))
// After serializing to JSON, the original value type is lost, but we can still
// identify that it's a SQL metric from the metadata
val metricInfoJson = JsonProtocol.accumulableInfoToJson(metricInfo)
val metricInfoDeser = JsonProtocol.accumulableInfoFromJson(metricInfoJson)
metricInfoDeser.update match {
case Some(v: String) => assert(v.toLong === 10L)
case Some(v) => fail(s"deserialized metric value was not a string: ${v.getClass.getName}")
case _ => fail("deserialized metric update is missing")
}
assert(metricInfoDeser.metadata === Some(AccumulatorContext.SQL_ACCUM_IDENTIFIER))
}
test("range metrics") {
val res1 = InputOutputMetricsHelper.run(
spark.range(30).filter(x => x % 3 == 0).toDF()
)
assert(res1 === (30L, 0L, 30L) :: Nil)
val res2 = InputOutputMetricsHelper.run(
spark.range(150).repartition(4).filter(x => x < 10).toDF()
)
assert(res2 === (150L, 0L, 150L) :: (0L, 150L, 10L) :: Nil)
withTempDir { tempDir =>
val dir = new File(tempDir, "pqS").getCanonicalPath
spark.range(10).write.parquet(dir)
spark.read.parquet(dir).createOrReplaceTempView("pqS")
// The executed plan looks like:
// Exchange RoundRobinPartitioning(2)
// +- BroadcastNestedLoopJoin BuildLeft, Cross
// :- BroadcastExchange IdentityBroadcastMode
// : +- Exchange RoundRobinPartitioning(3)
// : +- *Range (0, 30, step=1, splits=2)
// +- *FileScan parquet [id#465L] Batched: true, Format: Parquet, Location: ...(ignored)
val res3 = InputOutputMetricsHelper.run(
spark.range(30).repartition(3).crossJoin(sql("select * from pqS")).repartition(2).toDF()
)
// The query above is executed in the following stages:
// 1. range(30) => (30, 0, 30)
// 2. sql("select * from pqS") => (0, 30, 0)
// 3. crossJoin(...) of 1. and 2. => (10, 0, 300)
// 4. shuffle & return results => (0, 300, 0)
assert(res3 === (30L, 0L, 30L) :: (0L, 30L, 0L) :: (10L, 0L, 300L) :: (0L, 300L, 0L) :: Nil)
}
}
test("SPARK-25278: output metrics are wrong for plans repeated in the query") {
val name = "demo_view"
withView(name) {
sql(s"CREATE OR REPLACE VIEW $name AS VALUES 1,2")
val view = spark.table(name)
val union = view.union(view)
testSparkPlanMetrics(union, 1, Map(
0L -> ("Union" -> Map()),
1L -> ("LocalTableScan" -> Map("number of output rows" -> 2L)),
2L -> ("LocalTableScan" -> Map("number of output rows" -> 2L))))
}
}
test("writing data out metrics: parquet") {
testMetricsNonDynamicPartition("parquet", "t1")
}
test("writing data out metrics with dynamic partition: parquet") {
testMetricsDynamicPartition("parquet", "parquet", "t1")
}
private def collectNodeWithinWholeStage[T <: SparkPlan : ClassTag](plan: SparkPlan): Seq[T] = {
val stages = plan.collect {
case w: WholeStageCodegenExec => w
}
assert(stages.length == 1, "The query plan should have one and only one whole-stage.")
val cls = classTag[T].runtimeClass
stages.head.collect {
case n if n.getClass == cls => n.asInstanceOf[T]
}
}
test("SPARK-25602: SparkPlan.getByteArrayRdd should not consume the input when not necessary") {
def checkFilterAndRangeMetrics(
df: DataFrame,
filterNumOutputs: Int,
rangeNumOutputs: Int): Unit = {
val plan = df.queryExecution.executedPlan
val filters = collectNodeWithinWholeStage[FilterExec](plan)
assert(filters.length == 1, "The query plan should have one and only one Filter")
assert(filters.head.metrics("numOutputRows").value == filterNumOutputs)
val ranges = collectNodeWithinWholeStage[RangeExec](plan)
assert(ranges.length == 1, "The query plan should have one and only one Range")
assert(ranges.head.metrics("numOutputRows").value == rangeNumOutputs)
}
withSQLConf(SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> "true") {
val df = spark.range(0, 3000, 1, 2).toDF().filter('id % 3 === 0)
df.collect()
checkFilterAndRangeMetrics(df, filterNumOutputs = 1000, rangeNumOutputs = 3000)
df.queryExecution.executedPlan.foreach(_.resetMetrics())
// For each partition, we get 2 rows. Then the Filter should produce 2 rows per-partition,
// and Range should produce 4 rows per-partition ([0, 1, 2, 3] and [15, 16, 17, 18]). Totally
// Filter produces 4 rows, and Range produces 8 rows.
df.queryExecution.toRdd.mapPartitions(_.take(2)).collect()
checkFilterAndRangeMetrics(df, filterNumOutputs = 4, rangeNumOutputs = 8)
// Top-most limit will call `CollectLimitExec.executeCollect`, which will only run the first
// task, so totally the Filter produces 2 rows, and Range produces 4 rows ([0, 1, 2, 3]).
val df2 = df.limit(2)
df2.collect()
checkFilterAndRangeMetrics(df2, filterNumOutputs = 2, rangeNumOutputs = 4)
}
}
test("SPARK-25497: LIMIT within whole stage codegen should not consume all the inputs") {
withSQLConf(SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> "true") {
// A special query that only has one partition, so there is no shuffle and the entire query
// can be whole-stage-codegened.
val df = spark.range(0, 1500, 1, 1).limit(10).groupBy('id).count().limit(1).filter('id >= 0)
df.collect()
val plan = df.queryExecution.executedPlan
val ranges = collectNodeWithinWholeStage[RangeExec](plan)
assert(ranges.length == 1, "The query plan should have one and only one Range")
// The Range should only produce the first batch, i.e. 1000 rows.
assert(ranges.head.metrics("numOutputRows").value == 1000)
val aggs = collectNodeWithinWholeStage[HashAggregateExec](plan)
assert(aggs.length == 2, "The query plan should have two and only two Aggregate")
val partialAgg = aggs.filter(_.aggregateExpressions.head.mode == Partial).head
// The partial aggregate should output 10 rows, because its input is 10 rows.
assert(partialAgg.metrics("numOutputRows").value == 10)
val finalAgg = aggs.filter(_.aggregateExpressions.head.mode == Final).head
// The final aggregate should only produce 1 row, because the upstream limit only needs 1 row.
assert(finalAgg.metrics("numOutputRows").value == 1)
val filters = collectNodeWithinWholeStage[FilterExec](plan)
assert(filters.length == 1, "The query plan should have one and only one Filter")
// The final Filter should produce 1 rows, because the input is just one row.
assert(filters.head.metrics("numOutputRows").value == 1)
}
}
test("SPARK-26327: FileSourceScanExec metrics") {
withTable("testDataForScan") {
spark.range(10).selectExpr("id", "id % 3 as p")
.write.partitionBy("p").saveAsTable("testDataForScan")
// The execution plan only has 1 FileScan node.
val df = spark.sql(
"SELECT * FROM testDataForScan WHERE p = 1")
testSparkPlanMetrics(df, 1, Map(
0L -> (("Scan parquet default.testdataforscan", Map(
"number of output rows" -> 3L,
"number of files" -> 2L))))
)
}
}
}
| mdespriee/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/metric/SQLMetricsSuite.scala | Scala | apache-2.0 | 23,299 |
package com.cloudray.scalapress.account.controller
import com.cloudray.scalapress.account.AccountLink
import com.cloudray.scalapress.framework.ScalapressContext
/** @author Stephen Samuel */
object AccountRenderer {
def links(links: Seq[Class[_ <: AccountLink]], context: ScalapressContext): String = {
links.map(klass => klass.newInstance)
.sortBy(_.accountLinkPriority)
.filter(_.accountLinkEnabled(context))
.map(link => {
<div class="row-fluid">
<div class="span9">
{link.accountLinkText}
</div>
<div class="span3">
<a href={link.profilePageLinkUrl} class="btn btn-block" id={link.profilePageLinkId}>
{link.profilePageLinkText}
</a>
</div>
</div>
}).mkString("<br/>")
}
}
class LogoutAccountLink extends AccountLink {
def accountLinkEnabled(context: ScalapressContext): Boolean = true
def profilePageLinkId: String = "accountlink-logout"
def profilePageLinkUrl: String = "/j_spring_security_logout"
def profilePageLinkText: String = "Logout"
def accountLinkText: String = "Logout of your account"
override def accountLinkPriority: Int = Integer.MAX_VALUE
} | vidyacraghav/scalapress | src/main/scala/com/cloudray/scalapress/account/controller/AccountRenderer.scala | Scala | apache-2.0 | 1,193 |
/*
* Copyright (C) 2005, The Beangle Software.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.beangle.commons.conversion.converter
import org.scalatest.funspec.AnyFunSpec
import org.scalatest.matchers.should.Matchers
import java.util
class IterableConverterTest extends AnyFunSpec with Matchers {
describe("Iterable Converter") {
it("Convert java iterable to scala") {
val c = IterableConverterFactory
val seq = c.convert(new util.ArrayList[Integer], classOf[collection.Seq[Integer]])
seq.isInstanceOf[collection.Seq[_]] should be(true)
val iseq = c.convert(new util.ArrayList[Integer], classOf[collection.immutable.Seq[Integer]])
iseq.isInstanceOf[collection.immutable.Seq[_]] should be(true)
val mc = MapConverterFactory
val map = mc.convert(new util.HashMap[String, String], classOf[collection.mutable.Map[String, String]])
assert(map != null)
}
}
}
| beangle/commons | core/src/test/scala/org/beangle/commons/conversion/converter/IterableConverterTest.scala | Scala | lgpl-3.0 | 1,557 |
/**
* Copyright 2015 Zaradai
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.zaradai.lattrac.analytics
import com.espertech.esper.client._
case class RegisteredType(eventTypeName: String, eventClassName: String)
trait EsperService {
private[this] lazy val esperService: EPServiceProvider = createEsperService
def registeredTypes: List[RegisteredType] = List()
def sendEvent(event: AnyRef) =
esperService.getEPRuntime.sendEvent(event)
def addStatement(statement: String): Unit =
esperService.getEPAdministrator.createEPL(statement)
def addStatementWithHandler(statement: String, fn: (Array[EventBean], Array[EventBean]) => Unit): Unit = {
val esperStatement = esperService.getEPAdministrator.createEPL(statement)
esperStatement.addListener(new UpdateListener {
override def update(newEvents: Array[EventBean], oldEvents: Array[EventBean]): Unit =
fn(newEvents, oldEvents)
})
}
private def createEsperService: EPServiceProvider = {
val config = new Configuration
registeredTypes.foreach(re => config.addEventType(re.eventTypeName, re.eventClassName))
val epService = EPServiceProviderManager.getProvider(this.getClass.getName, config)
epService.initialize()
epService
}
}
| zaradai/lattrac | src/main/scala/com/zaradai/lattrac/analytics/EsperService.scala | Scala | apache-2.0 | 1,772 |
package controllers
import org.specs2.execute.Result
import org.specs2.mutable
import play.api.libs.json.{JsValue, Writes, Reads, Json}
import play.api.mvc
import play.api.test.Helpers._
import scala.reflect.runtime.universe._
trait RestHelpers {
this: mutable.Specification =>
def withJsonResult[T](result: mvc.Result)(test: T => Result)
(implicit reader: Reads[T], writer: Writes[T], typeTag: TypeTag[T]): Result = {
test(parseJsonContent(result))
}
def withValidJsonResult[T](result: mvc.Result)(test: T => Result)
(implicit reader: Reads[T], writer: Writes[T], typeTag: TypeTag[T]): Result = {
status(result) must beEqualTo(OK)
contentType(result) must beSome("application/json")
charset(result) must beSome("utf-8")
test(parseJsonContentOrElse(result, jsonResult =>
failure("Could not parse " + typeOf[T] + " from:\n" + jsonResult))
)
}
def parseJsonContent[T](result: mvc.Result)
(implicit reader: Reads[T], writer: Writes[T], typeTag: TypeTag[T]): T = {
parseJsonContentOrElse(result, jsonResult =>
throw new NoSuchElementException("Could not parse " + typeOf[T] + " from:\n" + jsonResult)
)
}
private def parseJsonContentOrElse[T](result: mvc.Result, orElse: JsValue => T)
(implicit reader: Reads[T], writer: Writes[T], typeTag: TypeTag[T]): T = {
val jsonResult = Json.parse(contentAsString(result))
Json.fromJson[T](jsonResult) getOrElse orElse(jsonResult)
}
}
| jeffmay/angular-play-multimodule-seed | api/test/controllers/RestHelpers.scala | Scala | apache-2.0 | 1,464 |
/***********************************************************************
* Copyright (c) 2013-2017 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.memory.cqengine.datastore
import java.util
import com.typesafe.scalalogging.LazyLogging
import org.geotools.data.Query
import org.geotools.data.store.{ContentDataStore, ContentEntry, ContentFeatureSource}
import org.geotools.feature.NameImpl
import org.locationtech.geomesa.memory.cqengine.GeoCQEngine
import org.opengis.feature.`type`.Name
import org.opengis.feature.simple.SimpleFeatureType
import scala.collection.JavaConversions._
class GeoCQEngineDataStore(useGeoIndex: java.lang.Boolean
) extends ContentDataStore with LazyLogging {
logger.info(s"useGeoIndex=$useGeoIndex")
val namesToEngine = new java.util.concurrent.ConcurrentHashMap[String, GeoCQEngine]()
override def createFeatureSource(entry: ContentEntry): ContentFeatureSource = {
val engine = namesToEngine.get(entry.getTypeName)
if (engine != null) {
new GeoCQEngineFeatureStore(engine, entry, Query.ALL)
} else {
null
}
}
override def createTypeNames(): util.List[Name] = { namesToEngine.keys().toList.map { new NameImpl(_) } }
override def createSchema(featureType: SimpleFeatureType): Unit = {
namesToEngine.putIfAbsent(
featureType.getTypeName,
new GeoCQEngine(featureType, enableGeomIndex=useGeoIndex))
}
}
object GeoCQEngineDataStore {
lazy val engine = new GeoCQEngineDataStore(useGeoIndex=true)
lazy val engineNoGeoIndex = new GeoCQEngineDataStore(useGeoIndex=false)
}
| ronq/geomesa | geomesa-memory/geomesa-cqengine-datastore/src/main/scala/org/locationtech/geomesa/memory/cqengine/datastore/GeoCQEngineDataStore.scala | Scala | apache-2.0 | 1,939 |
/* __ *\\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2003-2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\\___/_/ |_/____/_/ | | **
** |/ **
\\* */
package scala.concurrent
/** This class ...
*
* @author Martin Odersky
* @version 1.0, 10/03/2003
*/
@deprecated("use java.util.concurrent.locks.Lock", "2.11.2")
class Lock {
var available = true
def acquire() = synchronized {
while (!available) wait()
available = false
}
def release() = synchronized {
available = true
notify()
}
}
| felixmulder/scala | src/library/scala/concurrent/Lock.scala | Scala | bsd-3-clause | 898 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler.cluster
import org.apache.spark.annotation.DeveloperApi
/**
* :: DeveloperApi ::
* Stores information about an executor to pass from the scheduler to SparkListeners.
* 存储有关执行程序从调度程序传递到SparkListeners的信息
*/
@DeveloperApi
class ExecutorInfo(
val executorHost: String,
val totalCores: Int,
val logUrlMap: Map[String, String]) {
def canEqual(other: Any): Boolean = other.isInstanceOf[ExecutorInfo]
override def equals(other: Any): Boolean = other match {
case that: ExecutorInfo =>
(that canEqual this) &&
executorHost == that.executorHost &&
totalCores == that.totalCores &&
logUrlMap == that.logUrlMap
case _ => false
}
override def hashCode(): Int = {
val state = Seq(executorHost, totalCores, logUrlMap)
state.map(_.hashCode()).foldLeft(0)((a, b) => 31 * a + b)
}
}
| tophua/spark1.52 | core/src/main/scala/org/apache/spark/scheduler/cluster/ExecutorInfo.scala | Scala | apache-2.0 | 1,714 |
package com.szadowsz.gospel.core.db.theory.clause
import com.szadowsz.gospel.core.engine.clause.ClauseInfo
import com.szadowsz.gospel.core.data.{Struct, Term, Var, numeric}
import java.{util => ju}
import scala.collection.JavaConverters._
/**
* <code>FamilyClausesList</code> is a common <code>LinkedList</code>
* which stores {@link ClauseInfo} objects. Internally it indexes stored data
* in such a way that, knowing what type of clauses are required, only
* goal compatible clauses are returned
*
* @author Paolo Contessi
* @since 2.2
*
* @see LinkedList
*/
@SerialVersionUID(1L)
class FamilyClausesList extends ju.LinkedList[ClauseInfo] {
private val numCompClausesIndex = new FamilyClausesIndex[numeric.Number]
private val constantCompClausesIndex = new FamilyClausesIndex[String]
private val structCompClausesIndex = new FamilyClausesIndex[String]
private val listCompClausesList = new ju.LinkedList[ClauseInfo]
/**
* Adds the given clause as first of the family
*
* @param ci The clause to be added (with related informations)
*/
override def addFirst(ci: ClauseInfo) {
super.addFirst(ci)
register(ci, true)
}
/**
* Adds the given clause as last of the family
*
* @param ci The clause to be added (with related informations)
*/
override def addLast(ci: ClauseInfo) {
super.addLast(ci)
register(ci, false)
}
override def add(o: ClauseInfo): Boolean = {
addLast(o)
return true
}
@deprecated
override def addAll(index: scala.Int, c: ju.Collection[_ <: ClauseInfo]): Boolean = {
throw new UnsupportedOperationException("Not supported.")
}
@deprecated
override def add(index: scala.Int, element: ClauseInfo) {
throw new UnsupportedOperationException("Not supported.")
}
@deprecated
override def set(index: scala.Int, element: ClauseInfo): ClauseInfo = {
throw new UnsupportedOperationException("Not supported.")
}
override def removeFirst: ClauseInfo = {
val ci: ClauseInfo = getFirst
if (remove(ci)) {
ci
} else {
null
}
}
override def removeLast: ClauseInfo = {
val ci: ClauseInfo = getLast
if (remove(ci)) {
ci
} else {
null
}
}
override def remove: ClauseInfo = {
removeFirst
}
override def remove(index: scala.Int): ClauseInfo = {
val ci: ClauseInfo = super.get(index)
if (remove(ci)) {
ci
} else {
null
}
}
override def remove(ci: AnyRef): Boolean = {
if (super.remove(ci.asInstanceOf[ClauseInfo])) {
unregister(ci.asInstanceOf[ClauseInfo])
true
} else {
false
}
}
override def clear() {
while (size > 0) {
removeFirst
}
}
/**
* Retrieves a sublist of all the clauses of the same family as the goal
* and which, in all probability, could match with the given goal
*
* @param goal The goal to be resolved
* @return The list of goal-compatible predicates
*/
def get(goal: Term): List[ClauseInfo] = {
// Gets the correct list and encapsulates it in ReadOnlyLinkedList
if (goal.isInstanceOf[Struct]) {
val g: Struct = goal.getTerm.asInstanceOf[Struct]
/*
* If no arguments no optimization can be applied
* (and probably no optimization is needed)
*/
if (g.getArity == 0) {
return this.asScala.toList ::: List[ClauseInfo]()
}
/* Retrieves first argument and checks type */
val t = g.getArg(0).getTerm
if (t.isInstanceOf[Var]) {
/*
* if first argument is an unbounded variable,
* no reasoning is possible, all family must be returned
*/
this.asScala.toList ::: List[ClauseInfo]()
} else if (t.isAtomic) {
if (t.isInstanceOf[numeric.Number]) {
/* retrieves clauses whose first argument is numeric (or Var)
* and same as goal's first argument, if no clauses
* are retrieved, all clauses with a variable
* as first argument
*/
numCompClausesIndex.get(t.asInstanceOf[numeric.Number]).asScala.toList ::: List[ClauseInfo]()
} else {
/* retrieves clauses whose first argument is a constant (or Var)
* and same as goal's first argument, if no clauses
* are retrieved, all clauses with a variable
* as first argument
*/
constantCompClausesIndex.get(t.asInstanceOf[Struct].getName).asScala.toList ::: List[ClauseInfo]()
}
} else if (t.isInstanceOf[Struct]) {
if (isAList(t.asInstanceOf[Struct])) {
/* retrieves clauses which has a list (or Var) as first argument */
listCompClausesList.asScala.toList ::: List[ClauseInfo]()
} else {
/* retrieves clauses whose first argument is a struct (or Var)
* and same as goal's first argument, if no clauses
* are retrieved, all clauses with a variable
* as first argument
*/
structCompClausesIndex.get(t.asInstanceOf[Struct].getPredicateIndicator).asScala.toList ::: List[ClauseInfo]()
}
} else {
/* Default behaviour: no optimization done */
this.asScala.toList ::: List[ClauseInfo]()
}
} else {
/* Default behaviour: no optimization done */
this.asScala.toList ::: List[ClauseInfo]()
}
}
override def iterator: ju.Iterator[ClauseInfo] = {
listIterator(0)
}
override def listIterator: ju.ListIterator[ClauseInfo] = {
new ListItr(this, 0)
}
private def superListIterator(index: scala.Int): ju.ListIterator[ClauseInfo] = {
super.listIterator(index)
}
override def listIterator(index: scala.Int): ju.ListIterator[ClauseInfo] = {
new ListItr(this, index)
}
/*
* Checks if a Struct is also a list.
* A list can be an empty list, or a Struct with name equals to "."
* and arity equals to 2.
*/
private def isAList(t: Struct): Boolean = {
t.isEmptyList || ((t.getName == ".") && t.getArity == 2)
}
// Updates indexes, storing informations about the last added clause
private def register(ci: ClauseInfo, first: Boolean) {
// See FamilyClausesList.get(Term): same concept
val clause = ci.getHead
if (clause.isInstanceOf[Struct]) {
val g: Struct = clause.getTerm.asInstanceOf[Struct]
if (g.getArity == 0) {
return
}
val t: Term = g.getArg(0).getTerm
if (t.isInstanceOf[Var]) {
numCompClausesIndex.insertAsShared(ci, first)
constantCompClausesIndex.insertAsShared(ci, first)
structCompClausesIndex.insertAsShared(ci, first)
if (first) {
listCompClausesList.addFirst(ci)
}
else {
listCompClausesList.addLast(ci)
}
}
else if (t.isAtomic) {
if (t.isInstanceOf[numeric.Number]) {
numCompClausesIndex.insert(t.asInstanceOf[numeric.Number], ci, first)
}
else if (t.isInstanceOf[Struct]) {
constantCompClausesIndex.insert(t.asInstanceOf[Struct].getName, ci, first)
}
}
else if (t.isInstanceOf[Struct]) {
if (isAList(t.asInstanceOf[Struct])) {
if (first) {
listCompClausesList.addFirst(ci)
}
else {
listCompClausesList.addLast(ci)
}
}
else {
structCompClausesIndex.insert(t.asInstanceOf[Struct].getPredicateIndicator, ci, first)
}
}
}
}
// Updates indexes, deleting informations about the last removed clause
def unregister(ci: ClauseInfo) {
val clause: Term = ci.getHead
if (clause.isInstanceOf[Struct]) {
val g: Struct = clause.getTerm.asInstanceOf[Struct]
if (g.getArity == 0) {
return
}
val t: Term = g.getArg(0).getTerm
if (t.isInstanceOf[Var]) {
numCompClausesIndex.removeShared(ci)
constantCompClausesIndex.removeShared(ci)
structCompClausesIndex.removeShared(ci)
listCompClausesList.remove(ci)
}
else if (t.isAtomic) {
if (t.isInstanceOf[numeric.Number]) {
numCompClausesIndex.delete(t.asInstanceOf[numeric.Number], ci)
}
else if (t.isInstanceOf[Struct]) {
constantCompClausesIndex.delete(t.asInstanceOf[Struct].getName, ci)
}
}
else if (t.isInstanceOf[Struct]) {
if (t.isList) {
listCompClausesList.remove(ci)
}
else {
structCompClausesIndex.delete(t.asInstanceOf[Struct].getPredicateIndicator, ci)
}
}
}
}
private class ListItr(list: FamilyClausesList, index: scala.Int) extends ju.ListIterator[ClauseInfo] {
private val it: ju.ListIterator[ClauseInfo] = list.superListIterator(index)
private val l: ju.LinkedList[ClauseInfo] = list
private var currentIndex: scala.Int = 0
override def hasNext: Boolean = {
it.hasNext
}
override def next: ClauseInfo = {
// Alessandro Montanari - alessandro.montanar5@studio.unibo.it
currentIndex = it.nextIndex
it.next
}
override def hasPrevious: Boolean = {
it.hasPrevious
}
override def previous: ClauseInfo = {
// Alessandro Montanari - alessandro.montanar5@studio.unibo.it
currentIndex = it.previousIndex
it.previous
}
override def nextIndex: scala.Int = {
it.nextIndex
}
override def previousIndex: scala.Int = {
it.previousIndex
}
override def remove() {
// Alessandro Montanari - alessandro.montanar5@studio.unibo.it
val ci: ClauseInfo = l.get(currentIndex)
it.remove()
unregister(ci)
}
override def set(o: ClauseInfo) {
it.set(o)
}
override def add(o: ClauseInfo) {
l.addLast(o)
}
}
} | zakski/project-soisceal | scala-core/src/main/scala/com/szadowsz/gospel/core/db/theory/clause/FamilyClausesList.scala | Scala | lgpl-3.0 | 9,136 |
package com.enkidu.lignum.parsers.ast.expression.discardable.binary
import com.enkidu.lignum.parsers.ast.expression.discardable.DiscardableExpression
case class QualifiedThisReference(name: Seq[String]) extends DiscardableExpression
| marek1840/java-parser | src/main/scala/com/enkidu/lignum/parsers/ast/expression/discardable/binary/QualifiedThisReference.scala | Scala | mit | 235 |
package io.scalac.seed
import java.nio.ByteBuffer
import java.nio.charset.Charset
import akka.actor.{ActorRef, ExtendedActorSystem}
import akka.persistence.eventstore.EventStoreSerializer
import akka.persistence.eventstore.snapshot.EventStoreSnapshotStore.SnapshotEvent
import akka.persistence.eventstore.snapshot.EventStoreSnapshotStore.SnapshotEvent.Snapshot
import akka.persistence.{PersistentRepr, SnapshotMetadata}
import akka.util.ByteString
import eventstore.{Content, ContentType, Event, EventData}
import io.scalac.seed.domain.VehicleAggregate.{KeeperAssigned, VehicleInitialized, VrnAssigned}
import org.json4s.Extraction.decompose
import org.json4s._
import org.json4s.native.Serialization.{read, write}
class Json4sSerializer(val system: ExtendedActorSystem) extends EventStoreSerializer {
import Json4sSerializer._
implicit val formats = DefaultFormats + SnapshotSerializer + PersistentReprSerializer + ActorRefSerializer
def identifier = Identifier
def includeManifest = true
def fromBinary(bytes: Array[Byte], manifestOpt: Option[Class[_]]) = {
implicit val manifest = manifestOpt match {
case Some(x) => Manifest.classType(x)
case None => Manifest.AnyRef
}
read(new String(bytes, UTF8))
}
def toBinary(o: AnyRef) = write(o).getBytes(UTF8)
def toEvent(x: AnyRef) = x match {
case x: PersistentRepr =>
EventData(
eventType = eventType(x.payload),
data = Content(ByteString(toBinary(x)), ContentType.Json))
case x: SnapshotEvent => EventData(
eventType = classFor(x).getName,
data = Content(ByteString(toBinary(x)), ContentType.Json))
case _ => sys.error(s"Cannot serialize $x, SnapshotEvent expected")
}
def fromEvent(event: Event, manifest: Class[_]) = {
val clazz = classOf[PersistentRepr]
val result = fromBinary(event.data.data.value.toArray, clazz)
if (manifest.isInstance(result)) result
else sys.error(s"Cannot deserialize event as $manifest, event: $event")
}
def classFor(x: AnyRef) = x match {
case x: PersistentRepr => classOf[PersistentRepr]
case _ => x.getClass
}
def eventType(x: Any) = x match {
case x: VehicleInitialized => classOf[VehicleInitialized].getName
case x: VrnAssigned => classOf[VrnAssigned].getName
case x: KeeperAssigned => classOf[KeeperAssigned].getName
}
object ActorRefSerializer extends Serializer[ActorRef] {
val Clazz = classOf[ActorRef]
def deserialize(implicit format: Formats) = {
case (TypeInfo(Clazz, _), JString(x)) => system.provider.resolveActorRef(x)
}
def serialize(implicit format: Formats) = {
case x: ActorRef => JString(x.path.toSerializationFormat)
}
}
}
object Json4sSerializer {
val UTF8: Charset = Charset.forName("UTF-8")
val Identifier: Int = ByteBuffer.wrap("json4s".getBytes(UTF8)).getInt
object SnapshotSerializer extends Serializer[Snapshot] {
val Clazz = classOf[Snapshot]
def deserialize(implicit format: Formats) = {
case (TypeInfo(Clazz, _), JObject(List(
JField("data", JString(x)),
JField("metadata", metadata)))) => Snapshot(x, metadata.extract[SnapshotMetadata])
}
def serialize(implicit format: Formats) = {
case Snapshot(data, metadata) => JObject("data" -> JString(data.toString), "metadata" -> decompose(metadata))
}
}
object PersistentReprSerializer extends Serializer[PersistentRepr] {
val Clazz = classOf[PersistentRepr]
def deserialize(implicit format: Formats) = {
case (TypeInfo(Clazz, _), json) =>
val x = json.extract[Mapping]
x.eventType match {
case "io.scalac.seed.domain.VehicleAggregate$VehicleInitialized" =>
val y = json.extract [VehicleInitialisedMapping]
PersistentRepr(y.payload, y.sequenceNr, y.persistenceId, y.deleted, sender = y.sender)
case "io.scalac.seed.domain.VehicleAggregate$VrnAssigned" =>
val y = json.extract [VrnAssignedMapping]
PersistentRepr(y.payload, y.sequenceNr, y.persistenceId, y.deleted, sender = y.sender)
case "io.scalac.seed.domain.VehicleAggregate$KeeperAssigned" =>
val y = json.extract [KeeperAssignedMapping]
PersistentRepr(y.payload, y.sequenceNr, y.persistenceId, y.deleted, sender = y.sender)
}
}
def serialize(implicit format: Formats) = {
case x: PersistentRepr =>
x.payload match {
case y: VehicleInitialized =>
val mapping = VehicleInitialisedMapping(y, x.sequenceNr, x.persistenceId, x.deleted, x.sender, x.payload.getClass.getName)
decompose (mapping)
case y: VrnAssigned =>
val mapping = VrnAssignedMapping(y, x.sequenceNr, x.persistenceId, x.deleted, x.sender, x.payload.getClass.getName)
decompose (mapping)
case y: KeeperAssigned =>
val mapping = KeeperAssignedMapping(y, x.sequenceNr, x.persistenceId, x.deleted, x.sender, x.payload.getClass.getName)
decompose (mapping)
}
}
case class Mapping(payload: Any, sequenceNr: Long, persistenceId: String, deleted: Boolean, sender: ActorRef, eventType: String)
case class VehicleInitialisedMapping(payload: VehicleInitialized, sequenceNr: Long, persistenceId: String, deleted: Boolean, sender: ActorRef, eventType: String)
case class VrnAssignedMapping(payload: VrnAssigned, sequenceNr: Long, persistenceId: String, deleted: Boolean, sender: ActorRef, eventType: String)
case class KeeperAssignedMapping(payload: KeeperAssigned, sequenceNr: Long, persistenceId: String, deleted: Boolean, sender: ActorRef, eventType: String)
}
} | petervdm/akka-persistence-eventsourcing | src/main/scala/io/scalac/seed/Json4sSerializer.scala | Scala | apache-2.0 | 5,686 |
package edu.rice.habanero.benchmarks.apsp
import edu.rice.habanero.actors.HabaneroActor
import edu.rice.habanero.benchmarks.{Benchmark, BenchmarkRunner}
import edu.rice.hj.Module0._
import edu.rice.hj.api.HjSuspendable
import scala.collection.mutable.ListBuffer
/**
*
* @author <a href="http://shams.web.rice.edu/">Shams Imam</a> (shams@rice.edu)
*/
object ApspHabaneroActorBenchmark {
def main(args: Array[String]) {
BenchmarkRunner.runBenchmark(args, new ApspHabaneroActorBenchmark)
}
private final class ApspHabaneroActorBenchmark extends Benchmark {
def initialize(args: Array[String]) {
ApspConfig.parseArgs(args)
ApspUtils.generateGraph()
}
def printArgInfo() {
ApspConfig.printArgs()
}
def runIteration() {
finish(new HjSuspendable {
override def run() = {
val graphData = ApspUtils.graphData
val numNodes = ApspConfig.N
val blockSize = ApspConfig.B
val numBlocksInSingleDim: Int = numNodes / blockSize
// create and automatically the actors
val blockActors = Array.tabulate[HabaneroActor[AnyRef]](numBlocksInSingleDim, numBlocksInSingleDim) {
(i, j) =>
val myBlockId = (i * numBlocksInSingleDim) + j
val apspActor = new ApspFloydWarshallActor(myBlockId, blockSize, numNodes, graphData)
apspActor.start()
apspActor
}
// create the links to the neighbors
for (bi <- 0 until numBlocksInSingleDim) {
for (bj <- 0 until numBlocksInSingleDim) {
val neighbors = new ListBuffer[HabaneroActor[AnyRef]]()
// add neighbors in same column
for (r <- 0 until numBlocksInSingleDim) {
if (r != bi) {
neighbors.append(blockActors(r)(bj))
}
}
// add neighbors in same row
for (c <- 0 until numBlocksInSingleDim) {
if (c != bj) {
neighbors.append(blockActors(bi)(c))
}
}
blockActors(bi)(bj).send(ApspNeighborMessage(neighbors))
}
}
// start the computation
for (bi <- 0 until numBlocksInSingleDim) {
for (bj <- 0 until numBlocksInSingleDim) {
blockActors(bi)(bj).send(ApspInitialMessage)
}
}
}
})
}
def cleanupIteration(lastIteration: Boolean, execTimeMillis: Double): Unit = {
ApspUtils.generateGraph()
}
}
sealed abstract class ApspMessage
private case object ApspInitialMessage extends ApspMessage
private case class ApspResultMessage(k: Int, myBlockId: Int, initData: Array[Array[Long]]) extends ApspMessage
private case class ApspNeighborMessage(neighbors: ListBuffer[HabaneroActor[AnyRef]]) extends ApspMessage
private class ApspFloydWarshallActor(myBlockId: Int, blockSize: Int, graphSize: Int, initGraphData: Array[Array[Long]]) extends HabaneroActor[AnyRef] {
private val self = this
private val numBlocksInSingleDim: Int = graphSize / blockSize
private val numNeighbors: Int = 2 * (numBlocksInSingleDim - 1)
final val rowOffset: Int = (myBlockId / numBlocksInSingleDim) * blockSize
final val colOffset: Int = (myBlockId % numBlocksInSingleDim) * blockSize
private val neighbors = new ListBuffer[HabaneroActor[AnyRef]]()
private var k = -1
private val neighborDataPerIteration = new java.util.HashMap[Int, Array[Array[Long]]]()
private var receivedNeighbors = false
private var currentIterData = ApspUtils.getBlock(initGraphData, myBlockId)
override def process(msg: AnyRef) {
msg match {
case message: ApspResultMessage =>
if (!receivedNeighbors) {
val msg = "Block-" + myBlockId + " hasn't received neighbors yet!"
println("ERROR: " + msg)
throw new Exception(msg)
}
val haveAllData = storeIterationData(message.k, message.myBlockId, message.initData)
if (haveAllData) {
// received enough data from neighbors, can proceed to do computation for next k
k += 1
performComputation()
notifyNeighbors()
neighborDataPerIteration.clear()
if (k == graphSize - 1) {
// we've completed the computation
exit()
}
}
case ApspInitialMessage =>
notifyNeighbors()
case ApspNeighborMessage(msgNeighbors) =>
receivedNeighbors = true
msgNeighbors.foreach {
loopNeighbor => neighbors.append(loopNeighbor)
}
}
}
private def storeIterationData(iteration: Int, sourceId: Int, dataArray: Array[Array[Long]]): Boolean = {
neighborDataPerIteration.put(sourceId, dataArray)
neighborDataPerIteration.size() == numNeighbors
}
private def performComputation(): Unit = {
val prevIterData = currentIterData
// make modifications on a fresh local data array for this iteration
currentIterData = Array.tabulate[Long](blockSize, blockSize)((i, j) => 0)
for (i <- 0 until blockSize) {
for (j <- 0 until blockSize) {
val gi = rowOffset + i
val gj = colOffset + j
val newIterData = elementAt(gi, k, k - 1, prevIterData) + elementAt(k, gj, k - 1, prevIterData)
currentIterData(i)(j) = scala.math.min(prevIterData(i)(j), newIterData)
}
}
}
private def elementAt(row: Int, col: Int, srcIter: Int, prevIterData: Array[Array[Long]]): Long = {
val destBlockId = ((row / blockSize) * numBlocksInSingleDim) + (col / blockSize)
val localRow = row % blockSize
val localCol = col % blockSize
// println("Accessing block-" + destBlockId + " from block-" + selfActor.myBlockId + " for " + (row, col))
if (destBlockId == myBlockId) {
prevIterData(localRow)(localCol)
} else {
val blockData = neighborDataPerIteration.get(destBlockId)
blockData(localRow)(localCol)
}
}
private def notifyNeighbors(): Unit = {
// send the current result to all other blocks who might need it
// note: this is inefficient version where data is sent to neighbors
// who might not need it for the current value of k
val resultMessage = ApspResultMessage(k, myBlockId, currentIterData)
neighbors.foreach {
loopNeighbor =>
loopNeighbor.send(resultMessage)
}
}
}
}
| shamsmahmood/savina | src/main/scala/edu/rice/habanero/benchmarks/apsp/ApspHabaneroActorBenchmark.scala | Scala | gpl-2.0 | 6,599 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.tree.configuration
import org.apache.spark.annotation.Experimental
/**
* :: Experimental ::
* Enum to select the algorithm for the decision tree
*/
@Experimental
object Algo extends Enumeration {
type Algo = Value
val Classification, Regression = Value
private[mllib] def fromString(name: String): Algo = name match {
case "classification" | "Classification" => Classification
case "regression" | "Regression" => Regression
case _ => throw new IllegalArgumentException(s"Did not recognize Algo name: $name")
}
}
| andrewor14/iolap | mllib/src/main/scala/org/apache/spark/mllib/tree/configuration/Algo.scala | Scala | apache-2.0 | 1,371 |
package templemore.liftjson.provider
import fixture.AddressInputTransformer
import org.specs2.Specification
class NewInstanceTransformerFactorySpec extends Specification { def is =
"Specification for the new instance transformer factory" ^
endp^
"A new instance transformer factory should" ^
"create a transformer instance when requested" ! createTransformer^
end
def createTransformer = {
val factory = NewInstanceTransformerFactory
factory.transformer(classOf[AddressInputTransformer]) must_!= null
}
} | skipoleschris/lift-json-jsr311-provider | provider/src/test/scala/templemore/liftjson/provider/NewInstanceTransformerFactorySpec.scala | Scala | apache-2.0 | 718 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming
import java.io.{InputStream, IOException, OutputStream}
import java.nio.charset.StandardCharsets.UTF_8
import scala.io.{Source => IOSource}
import scala.reflect.ClassTag
import org.apache.hadoop.fs.{Path, PathFilter}
import org.apache.spark.sql.SparkSession
/**
* An abstract class for compactible metadata logs. It will write one log file for each batch.
* The first line of the log file is the version number, and there are multiple serialized
* metadata lines following.
*
* As reading from many small files is usually pretty slow, also too many
* small files in one folder will mess the FS, [[CompactibleFileStreamLog]] will
* compact log files every 10 batches by default into a big file. When
* doing a compaction, it will read all old log files and merge them with the new batch.
*/
abstract class CompactibleFileStreamLog[T: ClassTag](
metadataLogVersion: String,
sparkSession: SparkSession,
path: String)
extends HDFSMetadataLog[Array[T]](sparkSession, path) {
import CompactibleFileStreamLog._
/**
* If we delete the old files after compaction at once, there is a race condition in S3: other
* processes may see the old files are deleted but still cannot see the compaction file using
* "list". The `allFiles` handles this by looking for the next compaction file directly, however,
* a live lock may happen if the compaction happens too frequently: one processing keeps deleting
* old files while another one keeps retrying. Setting a reasonable cleanup delay could avoid it.
*/
protected def fileCleanupDelayMs: Long
protected def isDeletingExpiredLog: Boolean
protected def compactInterval: Int
/**
* Serialize the data into encoded string.
*/
protected def serializeData(t: T): String
/**
* Deserialize the string into data object.
*/
protected def deserializeData(encodedString: String): T
/**
* Filter out the obsolete logs.
*/
def compactLogs(logs: Seq[T]): Seq[T]
override def batchIdToPath(batchId: Long): Path = {
if (isCompactionBatch(batchId, compactInterval)) {
new Path(metadataPath, s"$batchId$COMPACT_FILE_SUFFIX")
} else {
new Path(metadataPath, batchId.toString)
}
}
override def pathToBatchId(path: Path): Long = {
getBatchIdFromFileName(path.getName)
}
override def isBatchFile(path: Path): Boolean = {
try {
getBatchIdFromFileName(path.getName)
true
} catch {
case _: NumberFormatException => false
}
}
override def serialize(logData: Array[T], out: OutputStream): Unit = {
// called inside a try-finally where the underlying stream is closed in the caller
out.write(metadataLogVersion.getBytes(UTF_8))
logData.foreach { data =>
out.write('\\n')
out.write(serializeData(data).getBytes(UTF_8))
}
}
override def deserialize(in: InputStream): Array[T] = {
val lines = IOSource.fromInputStream(in, UTF_8.name()).getLines()
if (!lines.hasNext) {
throw new IllegalStateException("Incomplete log file")
}
val version = lines.next()
if (version != metadataLogVersion) {
throw new IllegalStateException(s"Unknown log version: ${version}")
}
lines.map(deserializeData).toArray
}
override def add(batchId: Long, logs: Array[T]): Boolean = {
if (isCompactionBatch(batchId, compactInterval)) {
compact(batchId, logs)
} else {
super.add(batchId, logs)
}
}
/**
* Compacts all logs before `batchId` plus the provided `logs`, and writes them into the
* corresponding `batchId` file. It will delete expired files as well if enabled.
*/
private def compact(batchId: Long, logs: Array[T]): Boolean = {
val validBatches = getValidBatchesBeforeCompactionBatch(batchId, compactInterval)
val allLogs = validBatches.flatMap(batchId => super.get(batchId)).flatten ++ logs
if (super.add(batchId, compactLogs(allLogs).toArray)) {
if (isDeletingExpiredLog) {
deleteExpiredLog(batchId)
}
true
} else {
// Return false as there is another writer.
false
}
}
/**
* Returns all files except the deleted ones.
*/
def allFiles(): Array[T] = {
var latestId = getLatest().map(_._1).getOrElse(-1L)
// There is a race condition when `FileStreamSink` is deleting old files and `StreamFileCatalog`
// is calling this method. This loop will retry the reading to deal with the
// race condition.
while (true) {
if (latestId >= 0) {
try {
val logs =
getAllValidBatches(latestId, compactInterval).flatMap(id => super.get(id)).flatten
return compactLogs(logs).toArray
} catch {
case e: IOException =>
// Another process using `CompactibleFileStreamLog` may delete the batch files when
// `StreamFileCatalog` are reading. However, it only happens when a compaction is
// deleting old files. If so, let's try the next compaction batch and we should find it.
// Otherwise, this is a real IO issue and we should throw it.
latestId = nextCompactionBatchId(latestId, compactInterval)
super.get(latestId).getOrElse {
throw e
}
}
} else {
return Array.empty
}
}
Array.empty
}
/**
* Since all logs before `compactionBatchId` are compacted and written into the
* `compactionBatchId` log file, they can be removed. However, due to the eventual consistency of
* S3, the compaction file may not be seen by other processes at once. So we only delete files
* created `fileCleanupDelayMs` milliseconds ago.
*/
private def deleteExpiredLog(compactionBatchId: Long): Unit = {
val expiredTime = System.currentTimeMillis() - fileCleanupDelayMs
fileManager.list(metadataPath, new PathFilter {
override def accept(path: Path): Boolean = {
try {
val batchId = getBatchIdFromFileName(path.getName)
batchId < compactionBatchId
} catch {
case _: NumberFormatException =>
false
}
}
}).foreach { f =>
if (f.getModificationTime <= expiredTime) {
fileManager.delete(f.getPath)
}
}
}
}
object CompactibleFileStreamLog {
val COMPACT_FILE_SUFFIX = ".compact"
def getBatchIdFromFileName(fileName: String): Long = {
fileName.stripSuffix(COMPACT_FILE_SUFFIX).toLong
}
/**
* Returns if this is a compaction batch. FileStreamSinkLog will compact old logs every
* `compactInterval` commits.
*
* E.g., if `compactInterval` is 3, then 2, 5, 8, ... are all compaction batches.
*/
def isCompactionBatch(batchId: Long, compactInterval: Int): Boolean = {
(batchId + 1) % compactInterval == 0
}
/**
* Returns all valid batches before the specified `compactionBatchId`. They contain all logs we
* need to do a new compaction.
*
* E.g., if `compactInterval` is 3 and `compactionBatchId` is 5, this method should returns
* `Seq(2, 3, 4)` (Note: it includes the previous compaction batch 2).
*/
def getValidBatchesBeforeCompactionBatch(
compactionBatchId: Long,
compactInterval: Int): Seq[Long] = {
assert(isCompactionBatch(compactionBatchId, compactInterval),
s"$compactionBatchId is not a compaction batch")
(math.max(0, compactionBatchId - compactInterval)) until compactionBatchId
}
/**
* Returns all necessary logs before `batchId` (inclusive). If `batchId` is a compaction, just
* return itself. Otherwise, it will find the previous compaction batch and return all batches
* between it and `batchId`.
*/
def getAllValidBatches(batchId: Long, compactInterval: Long): Seq[Long] = {
assert(batchId >= 0)
val start = math.max(0, (batchId + 1) / compactInterval * compactInterval - 1)
start to batchId
}
/**
* Returns the next compaction batch id after `batchId`.
*/
def nextCompactionBatchId(batchId: Long, compactInterval: Long): Long = {
(batchId + compactInterval + 1) / compactInterval * compactInterval - 1
}
}
| gioenn/xSpark | sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/CompactibleFileStreamLog.scala | Scala | apache-2.0 | 8,952 |
/*
* Copyright (C) 2005, The Beangle Software.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.beangle.commons.lang
import org.scalatest.matchers.should.Matchers
import org.scalatest.funspec.AnyFunSpec
class BitStringsTest extends AnyFunSpec with Matchers {
describe("BitStrings") {
it("Calculate binary value of bit string") {
BitStrings.binValueOf("00000000000000000000000000000000011111111111111111111") should be(1048575)
BitStrings.binValueOf("00000000000000000000000000000000000011100000000000000") should be(114688)
}
}
}
| beangle/commons | core/src/test/scala/org/beangle/commons/lang/BitStringsTest.scala | Scala | lgpl-3.0 | 1,194 |
/* ___ _ ___ _ _ *\\
** / __| |/ (_) | | Your SKilL scala Binding **
** \\__ \\ ' <| | | |__ generated: 01.02.2019 **
** |___/_|\\_\\_|_|____| by: feldentm **
\\* */
package de.ust.skill.sir.api.internal
import scala.collection.mutable.ArrayBuffer
import scala.collection.mutable.HashMap
import scala.collection.mutable.HashSet
import scala.collection.mutable.ListBuffer
import scala.collection.mutable.WrappedArray
import scala.reflect.Manifest
import de.ust.skill.common.jvm.streams.InStream
import de.ust.skill.common.scala.SkillID
import de.ust.skill.common.scala.api.SkillObject
import de.ust.skill.common.scala.api.TypeMissmatchError
import de.ust.skill.common.scala.internal.BasePool
import de.ust.skill.common.scala.internal.FieldDeclaration
import de.ust.skill.common.scala.internal.SkillState
import de.ust.skill.common.scala.internal.SingletonStoragePool
import de.ust.skill.common.scala.internal.StoragePool
import de.ust.skill.common.scala.internal.SubPool
import de.ust.skill.common.scala.internal.fieldTypes._
import de.ust.skill.common.scala.internal.restrictions.FieldRestriction
import _root_.de.ust.skill.sir.api._
final class UserdefinedTypePool(poolIndex : Int,
superPool: TypePool)
extends SubPool[_root_.de.ust.skill.sir.UserdefinedType, de.ust.skill.sir.Type](
poolIndex,
"userdefinedtype",
superPool
) {
override def getInstanceClass: Class[_root_.de.ust.skill.sir.UserdefinedType] = classOf[_root_.de.ust.skill.sir.UserdefinedType]
override def addField[T : Manifest](ID : Int, t : FieldType[T], name : String,
restrictions : HashSet[FieldRestriction]) : FieldDeclaration[T, _root_.de.ust.skill.sir.UserdefinedType] = {
val f = (name match {
case "comment" ⇒ new F_UserdefinedType_comment(ID, this, t.asInstanceOf[FieldType[_root_.de.ust.skill.sir.Comment]])
case "hints" ⇒ new F_UserdefinedType_hints(ID, this, t.asInstanceOf[FieldType[scala.collection.mutable.ArrayBuffer[_root_.de.ust.skill.sir.Hint]]])
case "restrictions" ⇒ new F_UserdefinedType_restrictions(ID, this, t.asInstanceOf[FieldType[scala.collection.mutable.ArrayBuffer[_root_.de.ust.skill.sir.Restriction]]])
case "name" ⇒ new F_UserdefinedType_name(ID, this, t.asInstanceOf[FieldType[_root_.de.ust.skill.sir.Identifier]])
case _ ⇒ return super.addField(ID, t, name, restrictions)
}).asInstanceOf[FieldDeclaration[T, _root_.de.ust.skill.sir.UserdefinedType]]
//check type
if (t != f.t)
throw new TypeMissmatchError(t, f.t.toString, f.name, name)
val rs = restrictions.iterator
while(rs.hasNext)
f.addRestriction(rs.next())
dataFields += f
return f
}
override def ensureKnownFields(st : SkillState) {
val state = st.asInstanceOf[SkillFile]
// data fields
val Clscomment = classOf[F_UserdefinedType_comment]
val Clshints = classOf[F_UserdefinedType_hints]
val Clsrestrictions = classOf[F_UserdefinedType_restrictions]
val Clsname = classOf[F_UserdefinedType_name]
val fields = HashSet[Class[_ <: FieldDeclaration[_, _root_.de.ust.skill.sir.UserdefinedType]]](Clscomment,Clshints,Clsrestrictions,Clsname)
var dfi = dataFields.size
while (dfi != 0) {
dfi -= 1
fields.remove(dataFields(dfi).getClass)
}
if(fields.contains(Clscomment))
dataFields += new F_UserdefinedType_comment(dataFields.size + 1, this, state.Comment)
if(fields.contains(Clshints))
dataFields += new F_UserdefinedType_hints(dataFields.size + 1, this, VariableLengthArray(state.Hint))
if(fields.contains(Clsrestrictions))
dataFields += new F_UserdefinedType_restrictions(dataFields.size + 1, this, VariableLengthArray(state.Restriction))
if(fields.contains(Clsname))
dataFields += new F_UserdefinedType_name(dataFields.size + 1, this, state.Identifier)
// no auto fields
val fs = (dataFields ++ autoFields).iterator
while (fs.hasNext)
fs.next().createKnownRestrictions
}
override def makeSubPool(name : String, poolIndex : Int) = new UserdefinedTypeSubPool(poolIndex, name, this)
override def reflectiveAllocateInstance: _root_.de.ust.skill.sir.UserdefinedType = {
val r = new _root_.de.ust.skill.sir.UserdefinedType(-1)
this.newObjects.append(r)
r
}
override def allocateInstances {
for (b ← blocks.par) {
var i : SkillID = b.bpo
val last = i + b.staticCount
while (i < last) {
data(i) = new _root_.de.ust.skill.sir.UserdefinedType(i + 1)
i += 1
}
}
}
def make(comment : _root_.de.ust.skill.sir.Comment = null, hints : scala.collection.mutable.ArrayBuffer[_root_.de.ust.skill.sir.Hint] = scala.collection.mutable.ArrayBuffer[_root_.de.ust.skill.sir.Hint](), restrictions : scala.collection.mutable.ArrayBuffer[_root_.de.ust.skill.sir.Restriction] = scala.collection.mutable.ArrayBuffer[_root_.de.ust.skill.sir.Restriction](), name : _root_.de.ust.skill.sir.Identifier = null) = {
val r = new _root_.de.ust.skill.sir.UserdefinedType(-1 - newObjects.size, comment : _root_.de.ust.skill.sir.Comment, hints : scala.collection.mutable.ArrayBuffer[_root_.de.ust.skill.sir.Hint], restrictions : scala.collection.mutable.ArrayBuffer[_root_.de.ust.skill.sir.Restriction], name : _root_.de.ust.skill.sir.Identifier)
newObjects.append(r)
r
}
}
final class UserdefinedTypeSubPool(poolIndex : Int, name : String, superPool : StoragePool[_ >: _root_.de.ust.skill.sir.UserdefinedType.UnknownSubType <: _root_.de.ust.skill.sir.UserdefinedType, _root_.de.ust.skill.sir.Type])
extends SubPool[_root_.de.ust.skill.sir.UserdefinedType.UnknownSubType, _root_.de.ust.skill.sir.Type](
poolIndex,
name,
superPool
) {
override def getInstanceClass : Class[_root_.de.ust.skill.sir.UserdefinedType.UnknownSubType] = classOf[_root_.de.ust.skill.sir.UserdefinedType.UnknownSubType]
override def makeSubPool(name : String, poolIndex : Int) = new UserdefinedTypeSubPool(poolIndex, name, this)
override def ensureKnownFields(st : SkillState) {}
override def allocateInstances {
for (b ← blocks.par) {
var i : SkillID = b.bpo
val last = i + b.staticCount
while (i < last) {
data(i) = new _root_.de.ust.skill.sir.UserdefinedType.UnknownSubType(i + 1, this)
i += 1
}
}
}
def reflectiveAllocateInstance : _root_.de.ust.skill.sir.UserdefinedType.UnknownSubType = {
val r = new _root_.de.ust.skill.sir.UserdefinedType.UnknownSubType(-1, this)
this.newObjects.append(r)
r
}
}
| skill-lang/skill | src/main/scala/de/ust/skill/sir/api/internal/PoolUserdefinedType.scala | Scala | bsd-3-clause | 7,035 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.livy
sealed trait MsgType
object MsgType {
case object execute_request extends MsgType
case object execute_reply extends MsgType
}
case class Msg[T <: Content](msg_type: MsgType, content: T)
sealed trait Content
case class ExecuteRequest(code: String, kind: Option[String]) extends Content {
val msg_type = MsgType.execute_request
}
sealed trait ExecutionStatus
object ExecutionStatus {
case object ok extends ExecutionStatus
case object error extends ExecutionStatus
case object abort extends ExecutionStatus
}
sealed trait ExecuteReply extends Content {
val msg_type = MsgType.execute_reply
val status: ExecutionStatus
val execution_count: Int
}
case class ExecuteReplyOk(execution_count: Int,
payload: Map[String, String]) extends ExecuteReply {
val status = ExecutionStatus.ok
}
case class ExecuteReplyError(execution_count: Int,
ename: String,
evalue: String,
traceback: List[String]) extends ExecuteReply {
val status = ExecutionStatus.error
}
case class ExecuteResponse(id: Int, input: Seq[String], output: Seq[String])
case class CompletionRequest(code: String, kind: String, cursor: Int) extends Content
case class CompletionResponse(candidates: List[String]) extends Content
case class ShutdownRequest() extends Content
| ajbozarth/incubator-livy | core/src/main/scala/org/apache/livy/msgs.scala | Scala | apache-2.0 | 2,200 |
/**
* Copyright (c) 2013-2016 Extended Mind Technologies Oy
*
* This file is part of Extended Mind.
*
* Extended Mind is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.extendedmind.email
import scala.util.{ Success, Failure }
import org.extendedmind.domain._
import org.extendedmind._
import org.extendedmind.Response._
import scaldi._
import akka.actor.ActorSystem
import akka.io.IO
import akka.pattern.ask
import spray.http._
import MediaTypes._
import spray.client.pipelining._
import spray.json._
import spray.httpx.SprayJsonSupport._
import spray.httpx.marshalling._
import spray.httpx.marshalling.Marshaller._
import spray.json.DefaultJsonProtocol._
import spray.util.LoggingContext
import scala.concurrent.Future
import akka.util.Timeout
import scala.concurrent.duration._
import scala.concurrent.Await
import scala.concurrent.ExecutionContext
import akka.actor.ActorRefFactory
import java.util.UUID
object MailgunProtocol extends DefaultJsonProtocol {
implicit val sendEmailRequestMarshaller =
Marshaller.delegate[SendEmailRequest, FormData](`application/x-www-form-urlencoded`) { (sendEmailRequest, contentType) =>
new FormData(getCCParams(sendEmailRequest).map { case (k, v) => (k, v) } toList)
}
implicit val sendEmailResponseFormat = jsonFormat2(SendEmailResponse)
def getCCParams(cc: AnyRef): Map[String, String] =
(Map[String, String]() /: cc.getClass.getDeclaredFields) { (a, f) =>
f.setAccessible(true)
a + (f.getName -> f.get(cc).asInstanceOf[String])
}
}
trait MailgunClient extends MailClient{
import MailgunProtocol._
val sendEmailPipeline = sendReceive ~> unmarshal[SendEmailResponse]
override def sendEmail(sendEmailRequest: SendEmailRequest): Future[SendEmailResponse] = {
implicit val timeout = Timeout(10 seconds)
val address = "https://api.mailgun.net/v2/" + settings.mailgunDomain + "/messages"
sendEmailPipeline {
Post(address,
marshal(sendEmailRequest).right.get
) ~> addCredentials(BasicHttpCredentials("api", settings.mailgunApiKey))
}
}
}
class MailgunClientImpl(implicit val implSettings: Settings, implicit val implActorRefFactory: ActorRefFactory,
implicit val inj: Injector)
extends MailgunClient with Injectable {
override def actorRefFactory = implActorRefFactory
override def settings = implSettings
}
| ttiurani/extendedmind | backend/src/main/scala/org/extendedmind/email/MailgunClient.scala | Scala | agpl-3.0 | 2,973 |
package no.digipost.labs.util
import javax.servlet._
import org.slf4j.LoggerFactory
import javax.servlet.http.HttpServletRequest
class ResponseTimeFilter extends Filter {
val log = LoggerFactory.getLogger(getClass)
def destroy() = {
}
def doFilter(request: ServletRequest, response: ServletResponse, chain: FilterChain) = {
val start = System.currentTimeMillis()
chain.doFilter(request, response)
val stop = System.currentTimeMillis() - start
val req = request.asInstanceOf[HttpServletRequest]
val method = req.getMethod
val path = req.getPathInfo
log.debug(s"$method $path took $stop ms")
}
def init(filterConfig: FilterConfig) = {
}
}
| digipost/labs | backend/src/main/scala/no/digipost/labs/util/ResponseTimeFilter.scala | Scala | apache-2.0 | 687 |
package breeze.linalg.operators
/*
Copyright 2012 David Hall
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import breeze.generic.{UFunc, MMRegistry2}
import breeze.generic.UFunc.InPlaceImpl2
import scala.reflect.ClassTag
/**
* This is a special kind of BinaryUpdateOp that supports registration
* of specialized implementations for a given operation.
* @author dlwh
*/
// This trait could reuse code from Multimethod2, but not doing so allows us to reduce code size a lot
// because we don't need BinaryOp's to inherit from Function2, which has a lot of @specialzied cruft.
trait BinaryUpdateRegistry[A <: AnyRef, B, Op <: OpType]
extends UFunc.InPlaceImpl2[Op, A, B]
with MMRegistry2[UFunc.InPlaceImpl2[Op, _ <: A, _ <: B]] {
protected def bindingMissing(a: A, b: B): Unit =
throw new UnsupportedOperationException("Types not found!" + a + b + " " + ops)
protected def multipleOptions(
a: A,
b: B,
m: Map[(Class[_], Class[_]), UFunc.InPlaceImpl2[Op, _ <: A, _ <: B]]): Unit = {
throw new RuntimeException("Multiple bindings for method: " + m)
}
def apply(a: A, b: B): Unit = {
val ac = a.asInstanceOf[AnyRef].getClass
val bc = b.asInstanceOf[AnyRef].getClass
val cached = cache.get(ac -> bc)
if (cached != null) {
cached match {
case None => bindingMissing(a, b)
case Some(m) =>
m.asInstanceOf[InPlaceImpl2[Op, A, B]].apply(a, b)
}
} else {
val options = resolve(ac, bc.asInstanceOf[Class[_ <: B]])
options.size match {
case 0 =>
cache.put(ac -> bc, None)
bindingMissing(a, b)
case 1 =>
val method = options.values.head
cache.put(ac -> bc, Some(method))
method.asInstanceOf[InPlaceImpl2[Op, A, B]].apply(a, b)
case _ =>
val selected = selectBestOption(options)
if (selected.size != 1)
multipleOptions(a, b, options)
else {
val method = selected.values.head
cache.put(ac -> bc, Some(method))
method.asInstanceOf[InPlaceImpl2[Op, A, B]].apply(a, b)
}
}
}
}
def register[AA <: A, BB <: B](op: InPlaceImpl2[Op, AA, BB])(implicit cA: ClassTag[AA], cB: ClassTag[BB]) = {
super.register(cA.runtimeClass, cB.runtimeClass, op)
op
}
}
| scalanlp/breeze | math/src/main/scala/breeze/linalg/operators/BinaryUpdateRegistry.scala | Scala | apache-2.0 | 2,822 |
package pew.entity
import pew._
class ExplosionParticle(
px: Float,
py: Float,
color: Int,
pdx: Float,
pdy: Float,
age: Ticks,
step: Float
) extends Entity(px, py, pdx, pdy) {
private var falling = false
override def tick(): Unit = {
super.tick()
x += dx
y += dy
if (!falling) {
dx *= step
dy *= step
falling = doesMove()
} else {
dy += 0.01f
}
if (ticks > age) {
remove()
return ()
}
}
private val moveThereshold = 0.9f
private def doesMove() =
near(moveThereshold, dx, 0f) &&
near(moveThereshold, dy, 0f)
override def draw(bm: Bitmap): Unit = {
val xx = Math.round(x)
val yy = Math.round(y)
bm(xx, yy) = color
}
}
| keddelzz/pixel-firework | src/main/scala/pew/entity/ExplosionParticle.scala | Scala | mit | 755 |
package com.github.gdefacci.briscola.presentation.competition
import org.obl.raz.Path
object Input {
import com.github.gdefacci.briscola.competition.MatchKind
import com.github.gdefacci.briscola.competition.CompetitionStartDeadline
final case class Competition(players: GamePlayers, kind: MatchKind, deadline: CompetitionStartDeadline)
sealed trait GamePlayers
final case class Players(players: Set[Path]) extends GamePlayers
final case class TeamPlayers(players: Set[TeamPlayer], teams:Set[TeamInfo]) extends GamePlayers
final case class TeamPlayer(player:Path, teamName:String)
final case class TeamInfo(name:String)
final case class Team(name:String, players:Set[Path])
final case class Teams(teams:Seq[Team])
}
| gdefacci/briscola | ddd-briscola-web/src/main/scala/com/github/gdefacci/briscola/presentation/competition/input.scala | Scala | bsd-3-clause | 751 |
/*
* La Trobe University - Distributed Deep Learning System
* Copyright 2016 Matthias Langer (t3l@threelights.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package edu.latrobe
import scala.collection._
import scala.util.hashing._
final class IntBuffer(override val banks: SortedMap[Int, IntBank])
extends BufferEx[IntBuffer, IntBank, Int] {
override def toString
: String = s"IntBuffer[${banks.size}]"
override def hashCode()
: Int = MurmurHash3.mix(super.hashCode(), banks.hashCode())
override def canEqual(that: Any)
: Boolean = that.isInstanceOf[IntBuffer]
override protected def doEquals(other: Equatable)
: Boolean = super.doEquals(other) && (other match {
case other: IntBuffer =>
banks == other.banks
case _ =>
false
})
def sum
: Long = {
MapEx.foldLeftValues(
0L,
banks
)(_ + _.sum)
}
def +(value: Int)
: IntBuffer = {
val result = mapBanks(
_ + value
)
IntBuffer(result)
}
def +(other: IntBuffer)
: IntBuffer = {
val result = zipBanksEx(
other
)((b0, b1) => b0 + b1, b0 => b0, b1 => b1)
IntBuffer(result)
}
def -(value: Int)
: IntBuffer = {
val result = mapBanks(
_ - value
)
IntBuffer(result)
}
def -(other: IntBuffer)
: IntBuffer = {
val result = zipBanksEx(
other
)((b0, b1) => b0 - b1, b0 => b0, b1 => b1)
IntBuffer(result)
}
def partitionSegments(noBucketsMax: Int)
: Seq[Seq[((Int, Int), Int)]] = {
val buckets = mutable.Buffer.empty[mutable.Buffer[((Int, Int), Int)]]
val segmentList = segments.toSeq.sortWith(_._2 > _._2)
for(segment <- segmentList) {
// If have not used all buckets yet.
if (buckets.length < noBucketsMax) {
val bucket = mutable.Buffer(segment)
buckets += bucket
}
else {
// Find bucket with lowest amount and insert there.
val bucket = buckets.minBy(_.map(_._2).sum)
bucket += segment
}
}
buckets
}
def partitionSegmentsIntoLimitedSizeBuckets(maxBucketSize: Int)
: Seq[Seq[((Int, Int), Int)]] = {
val buckets = mutable.Buffer.empty[mutable.Buffer[((Int, Int), Int)]]
val segmentList = segments.toSeq.sortWith(_._2 > _._2)
for(segment <- segmentList) {
// Try to insert into existing bucket.
var inserted = false
val iter = buckets.iterator
while(iter.hasNext && !inserted) {
val bucket = iter.next()
val size = bucket.map(_._2).sum
if (size + segment._2 <= maxBucketSize) {
bucket += segment
inserted = true
}
}
// No Bucket fits. Let's create a new bucket.
if (!inserted) {
val bucket = mutable.Buffer(segment)
buckets += bucket
}
}
buckets
}
// ---------------------------------------------------------------------------
// Conversion
// ---------------------------------------------------------------------------
override protected def doCreateView(banks: SortedMap[Int, IntBank])
: IntBuffer = IntBuffer(banks)
}
object IntBuffer {
final def apply(banks: SortedMap[Int, IntBank])
: IntBuffer = new IntBuffer(banks)
final val empty
: IntBuffer = apply(SortedMap.empty)
final def fillLike(buffer: BufferLike,
value: Int)
: IntBuffer = {
val result = MapEx.mapValues(
buffer.banks
)(IntBank.fillLike(_, value))
apply(result)
}
final def zeroLike(buffer: BufferLike)
: IntBuffer = fillLike(buffer, 0)
}
| bashimao/ltudl | base/src/main/scala/edu/latrobe/IntBuffer.scala | Scala | apache-2.0 | 4,066 |
/**
* error.scala
*
* @author <a href="mailto:jim@corruptmemory.com">Jim Powers</a>
*
* Copyright 2011 Jim Powers
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.corruptmemory.herding_cats
trait Error
case class Message(message:String) extends Error
case class Caught(message:String,throwable:Throwable) extends Error
case class Uncaught(throwable:Throwable) extends Error
case object Disconnected extends Error
case class NoNode(path:String) extends Error
case class NodeExists(path:String) extends Error
case object VersionMismatch extends Error
case object Shutdown extends Error
trait Errors {
def message(m:String):Error = Message(m)
def caught(m:String,t:Throwable):Error = Caught(m,t)
def uncaught(t:Throwable):Error = Uncaught(t)
def disconnected:Error = Disconnected
def noNode(path:String):Error = NoNode(path)
def nodeExists(path:String):Error = NodeExists(path)
def versionMismatch:Error = VersionMismatch
def shutdown:Error = Shutdown
}
| corruptmemory/herding-cats | library/src/main/scala/herding_cats/error.scala | Scala | apache-2.0 | 1,500 |
/*
* Copyright 2016 rdbc contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.rdbc.implbase
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Sink, Source}
import io.rdbc.ImmutSeq
import io.rdbc.sapi.exceptions.{ColumnIndexOutOfBoundsException, NoKeysReturnedException}
import io.rdbc.sapi.{Row, RowMetadata, RowPublisher, Timeout, Warning}
import org.reactivestreams.Subscriber
import org.scalamock.scalatest.MockFactory
import scala.concurrent.{ExecutionContext, Future}
import scala.reflect.ClassTag
class ExecutableStatementPartialImplSpec
extends RdbcImplbaseSpec
with MockFactory {
private implicit val actorSystem: ActorSystem = ActorSystem()
private implicit val materializer: ActorMaterializer = ActorMaterializer()
private implicit val timeout: Timeout = Timeout.Inf
private val publisherFailure = new RuntimeException
"ExecutableStatementPartialImpl" when {
"executed for the first row" should {
"return first row if there are rows returned" in {
val rows = Vector(mock[Row], mock[Row], mock[Row])
new TestStmt(rows).executeForFirstRow().get shouldBe Some(rows.head)
}
"return None if there are no rows returned" in {
new TestStmt(Vector.empty).executeForFirstRow().get shouldBe empty
}
"fail if source fails" in {
val rows = Vector(mock[Row], mock[Row], mock[Row])
the[RuntimeException] thrownBy {
new TestStmt(rows, failOn = Some(0)).executeForFirstRow().get
}.shouldBe(theSameInstanceAs(publisherFailure))
}
}
"executed for value should" should {
"return first row's value if there are rows returned" in {
val firstRow = mock[Row]
val firstRowVal = "val"
val valIndex = 0
(firstRow.str(_: Int)).expects(valIndex).returning(firstRowVal)
val rows = Vector(firstRow, mock[Row], mock[Row])
new TestStmt(rows).executeForValue(_.str(valIndex)).get shouldBe Some(firstRowVal)
}
"return None if there are no rows returned" in {
new TestStmt(Vector.empty).executeForValue(_.str(0)).get shouldBe empty
}
"fail if source fails before the first element" in {
val rows = Vector(mock[Row], mock[Row], mock[Row])
the[RuntimeException] thrownBy {
new TestStmt(rows, failOn = Some(0)).executeForValue(_.str(0)).get
}.shouldBe(theSameInstanceAs(publisherFailure))
}
}
"executed for key" should {
"return first row's value if there are rows returned" in {
val firstRow = mock[Row]
val firstRowVal = 0
(firstRow.colOpt[Int](_: Int)(_: ClassTag[Int])).expects(0, ClassTag.Int).returning(Some(firstRowVal))
val rows = Vector(firstRow, mock[Row], mock[Row])
new TestStmt(rows).executeForKey[Int]().get shouldBe firstRowVal
}
"fail with NoKeysReturnedException if there are no rows" in {
assertThrows[NoKeysReturnedException] {
new TestStmt(Vector.empty).executeForKey[String]().get
}
}
"fail with NoKeysReturnedException if row has no values" in {
val firstRow = mock[Row]
(firstRow.colOpt[Int](_: Int)(_: ClassTag[Int]))
.expects(0, ClassTag.Int)
.throwing(new ColumnIndexOutOfBoundsException(0, 0))
val rows = Vector(firstRow, mock[Row], mock[Row])
assertThrows[NoKeysReturnedException] {
new TestStmt(rows).executeForKey[Int]().get
}
}
"fail with NoKeysReturnedException if row has a null value" in {
val firstRow = mock[Row]
(firstRow.colOpt[Int](_: Int)(_: ClassTag[Int]))
.expects(0, ClassTag.Int)
.returning(None)
val rows = Vector(firstRow, mock[Row], mock[Row])
assertThrows[NoKeysReturnedException] {
new TestStmt(rows).executeForKey[Int]().get
}
}
"fail if source fails before the first element" in {
val rows = Vector(mock[Row], mock[Row], mock[Row])
the[RuntimeException] thrownBy {
new TestStmt(rows, failOn = Some(0)).executeForKey[String]().get
}.shouldBe(theSameInstanceAs(publisherFailure))
}
}
"executed for set" should {
"return all rows" in {
val rows = Vector(mock[Row], mock[Row], mock[Row])
val set = new TestStmt(rows).executeForSet().get
set.rows shouldBe rows
}
"fail if source fails" in {
val rows = Vector(mock[Row], mock[Row], mock[Row])
the[RuntimeException] thrownBy {
new TestStmt(rows, failOn = Some(0)).executeForSet().get
}.shouldBe(theSameInstanceAs(publisherFailure))
}
"return rows affected" in {
val rows = Vector(mock[Row], mock[Row], mock[Row])
new TestStmt(rows).executeForSet().get.rowsAffected shouldBe rows.size.toLong
}
"return source's metadata" in {
val metadata = mock[RowMetadata]
new TestStmt(
rows = Vector(mock[Row], mock[Row], mock[Row]),
metadata = metadata
).executeForSet().get.metadata shouldBe theSameInstanceAs(metadata)
}
"return source's warnings" in {
val warnings = Vector(Warning("msg1", "code1"), Warning("msg2", "code2"))
new TestStmt(
rows = Vector(mock[Row], mock[Row], mock[Row]),
warnings = warnings
).executeForSet().get.warnings shouldBe warnings
}
}
"executed for rows affected" should {
"return number of affected rows" in {
val rows = Vector(mock[Row], mock[Row], mock[Row])
new TestStmt(rows).executeForRowsAffected().get shouldBe rows.size
}
"fail if source fails" in {
val rows = Vector(mock[Row], mock[Row], mock[Row])
the[RuntimeException] thrownBy {
new TestStmt(rows, failOn = Some(0)).executeForRowsAffected().get
new AnyRef //to satisfy scalatest
}.shouldBe(theSameInstanceAs(publisherFailure))
}
}
"executed ignoring result" should {
"succeed if source succeeds" in {
val rows = Vector(mock[Row], mock[Row], mock[Row])
noException should be thrownBy new TestStmt(rows).execute().get
}
"fail if source fails" in {
val rows = Vector(mock[Row], mock[Row], mock[Row])
the[RuntimeException] thrownBy {
new TestStmt(rows, failOn = Some(0)).execute().get
new AnyRef //to satisfy scalatest
}.shouldBe(theSameInstanceAs(publisherFailure))
}
}
}
class TestStmt(rows: Vector[Row],
failOn: Option[Int] = None,
warnings: Vector[Warning] = Vector.empty,
metadata: RowMetadata = RowMetadata(Vector.empty)) extends ExecutableStatementPartialImpl {
implicit protected val ec: ExecutionContext = ExecutionContext.global
def stream()(implicit timeout: Timeout): RowPublisher = {
new TestRowPublisher(rows, failOn, warnings, metadata)
}
}
class TestRowPublisher(rows: Vector[Row],
failOn: Option[Int],
warns: Vector[Warning],
mdata: RowMetadata) extends RowPublisher {
private val publisher = {
Source(rows).zipWithIndex.map { case (row, idx) =>
failOn match {
case Some(`idx`) => throw publisherFailure
case _ => row
}
}.runWith(Sink.asPublisher(fanout = false))
}
val rowsAffected: Future[Long] = Future.successful(rows.length.toLong)
val warnings: Future[ImmutSeq[Warning]] = Future.successful(warns)
val metadata: Future[RowMetadata] = Future.successful(mdata)
def done: Future[Unit] = ???
def subscribe(s: Subscriber[_ >: Row]): Unit = {
publisher.subscribe(s)
}
}
}
| rdbc-io/rdbc | rdbc-implbase/src/test/scala/io/rdbc/implbase/ExecutableStatementPartialImplSpec.scala | Scala | apache-2.0 | 8,375 |
/*
* ____ ____ _____ ____ ___ ____
* | _ \\ | _ \\ | ____| / ___| / _/ / ___| Precog (R)
* | |_) | | |_) | | _| | | | | /| | | _ Advanced Analytics Engine for NoSQL Data
* | __/ | _ < | |___ | |___ |/ _| | | |_| | Copyright (C) 2010 - 2013 SlamData, Inc.
* |_| |_| \\_\\ |_____| \\____| /__/ \\____| All Rights Reserved.
*
* This program is free software: you can redistribute it and/or modify it under the terms of the
* GNU Affero General Public License as published by the Free Software Foundation, either version
* 3 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License along with this
* program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package com.precog.ingest
package service
import akka.dispatch.{ExecutionContext, Future, Promise}
import blueeyes.core.data.ByteChunk
import blueeyes.core.http.HttpRequest
import blueeyes.json._
import IngestProcessing._
import AsyncParser._
import com.precog.common.Path
import com.precog.common.ingest._
import com.precog.common.jobs.JobId
import com.precog.common.security.{APIKey, Authorities, WriteMode}
import com.weiglewilczek.slf4s.Logging
import java.nio.ByteBuffer
import scalaz._
import scalaz.syntax.monad._
import scalaz.syntax.std.boolean._
sealed trait JSONRecordStyle
case object JSONValueStyle extends JSONRecordStyle
case object JSONStreamStyle extends JSONRecordStyle
final class JSONIngestProcessing(apiKey: APIKey, path: Path, authorities: Authorities, recordStyle: JSONRecordStyle, maxFields: Int, storage: IngestStore)(implicit M: Monad[Future]) extends IngestProcessing with Logging {
def forRequest(request: HttpRequest[_]): ValidationNel[String, IngestProcessor] = {
Success(new IngestProcessor)
}
case class IngestReport(ingested: Int, errors: Seq[(Int, String)])
object IngestReport {
val Empty = IngestReport(0, Vector())
}
case class JSONParseState(parser: AsyncParser, report: IngestReport) {
def update(newParser: AsyncParser, newIngested: Int, newErrors: Seq[(Int, String)] = Seq.empty) = {
JSONParseState(newParser, IngestReport(report.ingested + newIngested, report.errors ++ newErrors))
}
}
object JSONParseState {
def empty(stopOnFirstError: Boolean) = JSONParseState(AsyncParser.stream(), IngestReport.Empty)
}
final class IngestProcessor extends IngestProcessorLike {
def ingestJSONChunk(errorHandling: ErrorHandling, storeMode: WriteMode, jobId: Option[JobId], stream: StreamT[Future, Array[Byte]]): Future[IngestReport] = {
val overLargeMsg = "Cannot ingest values with more than %d primitive fields. This limitiation may be lifted in a future release. Thank you for your patience.".format(maxFields)
@inline def expandArraysAtRoot(values: Seq[JValue]) = recordStyle match {
case JSONValueStyle =>
values flatMap {
case JArray(elements) => elements
case value => Seq(value)
}
case JSONStreamStyle =>
values
}
def ingestAllOrNothing(state: JSONParseState, stream: StreamT[Future, Array[Byte]], streamRef: StreamRef): Future[IngestReport] = {
def accumulate(state: JSONParseState, records: Vector[JValue], stream: StreamT[Future, Array[Byte]]): Future[IngestReport] = {
stream.uncons.flatMap {
case Some((bytes, rest)) =>
val (parsed, updatedParser) = state.parser(More(ByteBuffer.wrap(bytes)))
val ingestSize = parsed.values.size
val overLargeIdx = parsed.values.indexWhere(_.flattenWithPath.size > maxFields)
val errors = parsed.errors.map(pe => (pe.line, pe.msg)) ++
(overLargeIdx >= 0).option(overLargeIdx + state.report.ingested -> overLargeMsg)
if (errors.isEmpty) {
accumulate(state.update(updatedParser, ingestSize), records ++ parsed.values, rest)
} else {
IngestReport(0, errors).point[Future]
}
case None =>
val (parsed, finalParser) = state.parser(Done)
val overLargeIdx = parsed.values.indexWhere(_.flattenWithPath.size > maxFields)
val errors = parsed.errors.map(pe => (pe.line, pe.msg)) ++
(overLargeIdx >= 0).option(overLargeIdx + state.report.ingested -> overLargeMsg)
if (errors.isEmpty) {
val completedRecords = records ++ parsed.values
storage.store(apiKey, path, authorities, completedRecords, jobId, streamRef.terminate) map {
_.fold(
storeFailure => IngestReport(0, (0, storeFailure.message) :: Nil),
_ => IngestReport(completedRecords.size, Nil)
)
}
} else {
IngestReport(0, errors).point[Future]
}
}
}
accumulate(state, Vector.empty[JValue], stream)
}
def ingestUnbuffered(state: JSONParseState, stream: StreamT[Future, Array[Byte]], streamRef: StreamRef): Future[JSONParseState] = {
stream.uncons.flatMap {
case Some((bytes, rest)) =>
// Dup and rewind to ensure we have something to parse
val (parsed, updatedParser) = state.parser(More(ByteBuffer.wrap(bytes)))
rest.isEmpty flatMap {
case false => ingestBlock(parsed, updatedParser, state, streamRef) { ingestUnbuffered(_, rest, streamRef) }
case true => ingestFinalBlock(parsed, updatedParser, state, streamRef)
}
case None =>
val (parsed, finalParser) = state.parser(Done)
ingestFinalBlock(parsed, finalParser, state, streamRef)
}
}
def ingestFinalBlock(parsed: AsyncParse, updatedParser: AsyncParser, state: JSONParseState, streamRef: StreamRef) = {
ingestBlock(parsed, updatedParser, state, streamRef.terminate) { (_: JSONParseState).point[Future] }
}
def partitionIndexed[A](as: Seq[A])(f: A => Boolean): (Seq[A], Seq[Int]) = {
var ok: Vector[A] = Vector()
var ko: Vector[Int] = Vector()
var i = 0
as foreach { a =>
if (f(a)) ko = ko :+ i else ok = ok :+ a
i += 1
}
(ok, ko)
}
def ingestBlock(parsed: AsyncParse, updatedParser: AsyncParser, state: JSONParseState, streamRef: StreamRef)(continue: => JSONParseState => Future[JSONParseState]): Future[JSONParseState] = {
(errorHandling: @unchecked) match {
case IngestAllPossible =>
val (toIngest, overLarge) = partitionIndexed(expandArraysAtRoot(parsed.values)) { _.flattenWithPath.size > maxFields }
val ingestSize = toIngest.size
storage.store(apiKey, path, authorities, toIngest, jobId, streamRef) flatMap {
_.fold(
storeFailure => sys.error("Do something useful with %s" format storeFailure.message),
_ => {
val errors = parsed.errors.map(pe => (pe.line, pe.msg)) ++ overLarge.map(i => (i, overLargeMsg))
continue(state.update(updatedParser, ingestSize, errors))
}
)
}
case StopOnFirstError =>
val (toIngest, overLarge) = expandArraysAtRoot(parsed.values) span { _.flattenWithPath.size <= maxFields }
val ingestSize = toIngest.size
if (overLarge.isEmpty && parsed.errors.isEmpty) {
storage.store(apiKey, path, authorities, toIngest, jobId, streamRef) flatMap {
_.fold(
storeFailure => sys.error("Do something useful with %s" format storeFailure.message),
_ => continue(state.update(updatedParser, ingestSize, Nil))
)
}
} else {
storage.store(apiKey, path, authorities, toIngest, jobId, streamRef.terminate) map {
_.fold(
storeFailure => sys.error("Do something useful with%s" format storeFailure.message),
_ => {
val errors = parsed.errors.map(pe => (pe.line, pe.msg)) ++
(overLarge.nonEmpty).option(state.report.ingested + toIngest.size -> overLargeMsg)
state.update(updatedParser, ingestSize, errors)
}
)
}
}
}
}
errorHandling match {
case StopOnFirstError =>
ingestUnbuffered(JSONParseState.empty(true), stream, StreamRef.forWriteMode(storeMode, false)) map { _.report }
case IngestAllPossible =>
ingestUnbuffered(JSONParseState.empty(false), stream, StreamRef.forWriteMode(storeMode, false)) map { _.report }
case AllOrNothing =>
ingestAllOrNothing(JSONParseState.empty(true), stream, StreamRef.forWriteMode(storeMode, false))
}
}
def ingest(durability: Durability, errorHandling: ErrorHandling, storeMode: WriteMode, data: ByteChunk): Future[IngestResult] = {
val dataStream = data.fold(_ :: StreamT.empty[Future, Array[Byte]], identity)
durability match {
case LocalDurability =>
ingestJSONChunk(errorHandling, storeMode, None, dataStream) map {
case IngestReport(ingested, errors) =>
errorHandling match {
case StopOnFirstError | AllOrNothing =>
StreamingResult(ingested, errors.headOption.map(_._2))
case IngestAllPossible =>
BatchResult(ingested + errors.size, ingested, Vector(errors: _*))
}
}
case GlobalDurability(jobId) =>
ingestJSONChunk(errorHandling, storeMode, Some(jobId), dataStream) map {
case IngestReport(ingested, errors) =>
BatchResult(ingested + errors.size, ingested, Vector(errors: _*))
}
}
}
}
}
| precog/platform | ingest/src/main/scala/com/precog/ingest/service/JSONIngestProcessing.scala | Scala | agpl-3.0 | 10,346 |
/*
* Sentries
* Copyright (c) 2012-2015 Erik van Oosten All rights reserved.
*
* The primary distribution site is https://github.com/erikvanoosten/sentries
*
* This software is released under the terms of the BSD 2-Clause License.
* There is NO WARRANTY. See the file LICENSE for the full text.
*/
package nl.grons.sentries.support
import com.yammer.metrics.core.MetricName
import java.lang.management.ManagementFactory
import javax.management.{MBeanRegistrationException, InstanceNotFoundException, ObjectName, MBeanServer}
import nl.grons.sentries
import nl.grons.sentries.SentrySupport
import org.slf4j.LoggerFactory
import scala.collection.concurrent.{Map => CMap}
import scala.collection.concurrent.TrieMap.{empty => emptyCMap}
/**
* A reporter which exposes sentries as JMX MBeans.
*/
class JmxReporter(
private[this] val sentryRegistry: SentriesRegistry = SentrySupport.defaultRegistry,
private[this] val server: MBeanServer = ManagementFactory.getPlatformMBeanServer
) extends SentriesRegistryListener {
private[this] var listening = false
private[this] val registeredBeans: CMap[MetricName, ObjectName] = newRegisteredBeansMap()
private[this] val logger = LoggerFactory.getLogger(getClass)
/**
* Called when a sentry has been added to the [[nl.grons.sentries.support.SentriesRegistry]].
*
* @param name the name of the sentry
* @param sentry the sentry
*/
def onSentryAdded(name: MetricName, sentry: NamedSentry) {
registerBean(name, createMBean(sentry), new ObjectName(name.getMBeanName))
}
private def createMBean(sentry: NamedSentry): JmxReporter.SentryMBean = {
sentry match {
case s: sentries.core.CircuitBreakerSentry => new JmxReporter.CircuitBreakerSentry(s)
case s: sentries.core.AdaptiveThroughputSentry => new JmxReporter.AdaptiveThroughputSentry(s)
case s => new JmxReporter.Sentry(s)
}
}
/**
* Called when a sentry has been removed from the [[nl.grons.sentries.support.SentriesRegistry]].
*
* @param name the name of the sentry
*/
def onSentryRemoved(name: MetricName) {
unregisterBean(new ObjectName(name.getMBeanName))
}
/**
* Returns a new concurrent map implementation. Subclass this to do weird things with
* your own [[nl.grons.sentries.support.JmxReporter]] implementation.
*
* @return a new [[scala.collection.concurrent.Map]]
*/
protected def newRegisteredBeansMap(): CMap[MetricName, ObjectName] = emptyCMap
def shutdown() {
sentryRegistry.removeListener(this)
registeredBeans.values.foreach(unregisterBean(_))
registeredBeans.clear()
listening = false
}
/**
* Starts the reporter.
*/
def start() {
if (!listening) sentryRegistry.addListener(this)
listening = true
}
private def registerBean(name: MetricName, bean: JmxReporter.SentryMBean, objectName: ObjectName) {
server.registerMBean(bean, objectName)
registeredBeans.put(name, objectName)
}
private def unregisterBean(objectName: ObjectName) {
try {
server.unregisterMBean(objectName)
} catch {
case e: InstanceNotFoundException =>
// This is often thrown when the process is shutting down. An application with lots of
// sentries will often begin unregistering sentries *after* JMX itself has cleared,
// resulting in a huge dump of exceptions as the process is exiting.
logger.trace("Error unregistering {}", Array(objectName, e))
case e: MBeanRegistrationException =>
logger.debug("Error unregistering {}", Array(objectName, e))
}
}
}
object JmxReporter {
trait SentryMBean {
def reset()
}
class Sentry(val sentry: nl.grons.sentries.support.Sentry) extends SentryMBean {
def reset() { sentry.reset() }
}
trait CircuitBreakerSentryMBean extends SentryMBean {
def trip()
}
class CircuitBreakerSentry(sentry: nl.grons.sentries.core.CircuitBreakerSentry) extends Sentry(sentry) with CircuitBreakerSentryMBean {
def trip() { sentry.trip() }
}
trait AdaptiveThroughputSentryMBean extends SentryMBean {
def trip()
}
class AdaptiveThroughputSentry(sentry: nl.grons.sentries.core.AdaptiveThroughputSentry) extends Sentry(sentry) with AdaptiveThroughputSentryMBean {
def trip() { sentry.trip() }
}
}
| erikvanoosten/sentries | src/main/scala/nl/grons/sentries/support/JmxReporter.scala | Scala | bsd-2-clause | 4,288 |
package skinny.micro.multipart
/**
* Multi parmas in a file.
*/
class FileMultiParams(
wrapped: Map[String, Seq[FileItem]] = Map.empty)
extends Map[String, Seq[FileItem]] {
def get(key: String): Option[Seq[FileItem]] = {
(wrapped.get(key) orElse wrapped.get(key + "[]"))
}
def get(key: Symbol): Option[Seq[FileItem]] = get(key.name)
def +[B1 >: Seq[FileItem]](kv: (String, B1)): FileMultiParams =
new FileMultiParams(wrapped + kv.asInstanceOf[(String, Seq[FileItem])])
def -(key: String): FileMultiParams = new FileMultiParams(wrapped - key)
def iterator: Iterator[(String, Seq[FileItem])] = wrapped.iterator
override def default(a: String): Seq[FileItem] = wrapped.default(a)
}
object FileMultiParams {
def apply(): FileMultiParams = new FileMultiParams
def apply[SeqType <: Seq[FileItem]](wrapped: Map[String, Seq[FileItem]]): FileMultiParams = {
new FileMultiParams(wrapped)
}
}
| xerial/skinny-micro | micro/src/main/scala/skinny/micro/multipart/FileMultiParams.scala | Scala | bsd-2-clause | 933 |
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package play.it.test
import java.io.Closeable
import java.util.concurrent.TimeUnit
import akka.actor.ActorSystem
import akka.actor.Terminated
import akka.stream.Materializer
import com.typesafe.sslconfig.ssl.SSLConfigSettings
import com.typesafe.sslconfig.ssl.SSLLooseConfig
import org.specs2.execute.AsResult
import org.specs2.specification.core.Fragment
import play.api.Configuration
import play.api.libs.ws.ahc.AhcWSClient
import play.api.libs.ws.ahc.AhcWSClientConfig
import play.api.libs.ws.WSClient
import play.api.libs.ws.WSClientConfig
import play.api.libs.ws.WSRequest
import play.api.libs.ws.WSResponse
import play.api.test.ApplicationFactory
import play.api.test.DefaultAwaitTimeout
import play.api.test.FutureAwaits
import play.core.server.ServerEndpoint
import scala.annotation.implicitNotFound
import scala.concurrent.duration.Duration
import scala.concurrent.Await
import scala.concurrent.Future
/**
* Provides a similar interface to [[play.api.test.WsTestClient]], but
* connects to an integration test's [[ServerEndpoint]] instead of an
* arbitrary scheme and port.
*/
trait WSEndpointSupport {
self: EndpointIntegrationSpecification with FutureAwaits with DefaultAwaitTimeout =>
/** Describes a [[WSClient]] that is bound to a particular [[ServerEndpoint]]. */
@implicitNotFound("Use withAllWSEndpoints { implicit wsEndpoint: WSEndpoint => ... } to get a value")
trait WSEndpoint {
/** The endpoint to connect to. */
def endpoint: ServerEndpoint
/** The client to connect with. */
def client: WSClient
/**
* Build a request to the endpoint using the given path.
*/
def buildRequest(path: String): WSRequest = {
client.url(s"${endpoint.scheme}://localhost:" + endpoint.port + path)
}
/**
* Make a request to the endpoint using the given path.
*/
def makeRequest(path: String): WSResponse = {
await(buildRequest(path).get())
}
}
/**
* Takes a [[ServerEndpoint]], creates a matching [[WSEndpoint]], calls
* a block of code on the client and then closes the client afterwards.
*
* Most users should use [[WSApplicationFactory.withAllWSEndpoints()]]
* instead of this method.
*/
def withWSEndpoint[A](endpoint: ServerEndpoint)(block: WSEndpoint => A): A = {
val e = endpoint // Avoid a name clash
val serverClient = new WSEndpoint with Closeable {
override val endpoint = e
private val actorSystem: ActorSystem = {
val actorConfig = Configuration(
"akka.loglevel" -> "WARNING"
)
ActorSystem("WSEndpointSupport", actorConfig.underlying)
}
override val client: WSClient = {
// Set up custom config to trust any SSL certificate. Unfortunately
// even though we have the certificate information already loaded
// we can't easily get it to our WSClient due to limitations in
// the ssl-config library.
val sslLooseConfig: SSLLooseConfig = SSLLooseConfig().withAcceptAnyCertificate(true)
val sslConfig: SSLConfigSettings = SSLConfigSettings().withLoose(sslLooseConfig)
val wsClientConfig: WSClientConfig = WSClientConfig(ssl = sslConfig)
val ahcWsClientConfig = AhcWSClientConfig(wsClientConfig = wsClientConfig, maxRequestRetry = 0)
implicit val materializer = Materializer.matFromSystem(actorSystem)
AhcWSClient(ahcWsClientConfig)
}
override def close(): Unit = {
client.close()
val terminated: Future[Terminated] = actorSystem.terminate()
Await.ready(terminated, Duration(20, TimeUnit.SECONDS))
}
}
try block(serverClient)
finally serverClient.close()
}
/**
* Implicit class that enhances [[ApplicationFactory]] with the [[withAllWSEndpoints()]] method.
*/
implicit class WSApplicationFactory(appFactory: ApplicationFactory) {
/**
* Helper that creates a specs2 fragment for the server endpoints given in
* [[allEndpointRecipes]]. Each fragment creates an application, starts a server,
* starts a [[WSClient]] and runs the given block of code.
*
* {{{
* withResult(Results.Ok("Hello")) withAllWSEndpoints {
* wsEndpoint: WSEndpoint =>
* val response = wsEndpoint.makeRequest("/")
* response.body must_== "Hello"
* }
* }}}
*/
def withAllWSEndpoints[A: AsResult](block: WSEndpoint => A): Fragment =
appFactory.withAllEndpoints { endpoint: ServerEndpoint =>
withWSEndpoint(endpoint)(block)
}
}
}
| benmccann/playframework | core/play-integration-test/src/it/scala/play/it/test/WSEndpointSupport.scala | Scala | apache-2.0 | 4,620 |
/*
* La Trobe University - Distributed Deep Learning System
* Copyright 2016 Matthias Langer (t3l@threelights.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package edu.latrobe
import it.unimi.dsi.util._
import scala.collection._
import scala.util.hashing._
/**
* Use XorShift128PlusRandomGenerator if 1024 star is too slow.
*/
final class PseudoRNG
//extends MersenneTwister
//extends XorShift128PlusRandomGenerator
extends XorShift1024StarRandomGenerator
with Serializable
with Equatable
with CopyableEx[PseudoRNG] {
override def hashCode()
: Int = MurmurHash3.mix(super.hashCode(), ArrayEx.serialize(this).hashCode())
override def canEqual(that: Any)
: Boolean = that.isInstanceOf[PseudoRNG]
override protected def doEquals(other: Equatable)
: Boolean = super.doEquals(other) && (other match {
case other: PseudoRNG =>
val bytes0 = ArrayEx.serialize(this)
val bytes1 = ArrayEx.serialize(other)
ArrayEx.compare(bytes0, bytes1)
case _ =>
false
})
override def copy
: PseudoRNG = ArrayEx.deserialize(ArrayEx.serialize(this))
// --------------------------------------------------------------------------
// REAL SWITCH DOUBLE
// ---------------------------------------------------------------------------
/*
@inline
def nextReal(): Real = nextDouble
@inline
def nextGaussianReal(): Real = nextGaussian
*/
// -------------------------------------------------------------------------
// REAL SWITCH FLOAT
// -------------------------------------------------------------------------
///*
@inline
def nextReal()
: Real = nextFloat()
@inline
def nextGaussianReal()
: Real = Real(nextGaussian())
//*/
// -------------------------------------------------------------------------
// REAL SWITCH END
// -------------------------------------------------------------------------
@inline
def next[T](values: Array[T])
: T = {
val i = nextInt(values.length)
values(i)
}
@inline
def next[T](values: Seq[T])
: T = {
val i = nextInt(values.length)
values(i)
}
@inline
def nextBoolean(p: Real)
: Boolean = {
require(p >= Real.zero && p <= Real.one)
nextReal() < p
}
@inline
def nextReal(min: Real, max: Real)
: Real = {
require(min <= max)
nextReal() * (max - min) + min
}
@inline
def nextReal(range: RealRange)
: Real = nextReal() * range.length + range.min
@inline
def nextGaussianReal(mu: Real, sigma: Real)
: Real = nextGaussianReal() * sigma + mu
// TODO: Do we still need this with the new generators?
/*
/**
* Uniformly samples a long integer in [0,MAX_INT]
*/
// TODO: Already reported to main project. https://github.com/scalanlp/breeze/issues/438
val randPositiveInt: Rand[Int] = new Rand[Int] {
override def draw(): Int = {
var value = rb.generator.nextInt
if (value < 0) {
value -= Int.MinValue
}
value
}
}
// TODO: I already commited this to breeze. Check and remove when we upgrade. https://github.com/scalanlp/breeze/pull/427 & https://github.com/scalanlp/breeze/pull/429
/**
* Uniformly samples a long integer in [0,MAX_LONG]
*/
val randLong: Rand[Long] = new Rand[Long] {
override def draw(): Long = {
var value = rb.generator.nextLong
if (value < 0L) {
value -= Long.MinValue
}
value
}
}
/**
* Uniformly samples a long integer in [0,n)
*/
def randLong(n: Long): Rand[Long] = new Rand[Long] {
override def draw(): Long = {
var value = rb.generator.nextLong
if (value < 0L) {
value -= Long.MinValue
}
value % n
}
}
/**
* Uniformly samples a long integer in [n,m)
*/
def randLong(n: Long, m: Long): Rand[Long] = new Rand[Long] {
override def draw(): Long = {
var value = rb.generator.nextLong
if (value < 0L) {
value -= Long.MinValue
}
value % (m - n) + n
}
}
*/
@inline
def bernoulliDistribution(p: Real)
: Distribution[Boolean] = {
if (p == Real.pointFive) {
new Distribution[Boolean] {
override val isThreadSafe
: Boolean = false
override def sample()
: Boolean = nextBoolean()
}
}
else {
require(p >= Real.zero && p <= Real.one)
new Distribution[Boolean] {
override val isThreadSafe
: Boolean = false
override def sample()
: Boolean = nextReal() < p
}
}
}
@inline
def bernoulliDistribution[T](p: Real, trueValue: T, falseValue: T)
: Distribution[T] = {
if (p == Real.pointFive) {
new Distribution[T] {
override val isThreadSafe
: Boolean = false
override def sample()
: T = if (nextBoolean()) trueValue else falseValue
}
}
else {
require(p >= Real.zero && p <= Real.one)
new Distribution[T] {
override val isThreadSafe
: Boolean = false
override def sample()
: T = if (nextReal() < p) trueValue else falseValue
}
}
}
@inline
def uniformDistribution()
: Distribution[Real] = {
new Distribution[Real] {
override val isThreadSafe
: Boolean = false
override def sample()
: Real = nextReal()
}
}
@inline
def uniformDistribution(min: Real, max: Real)
: Distribution[Real] = uniformDistribution(RealRange(min, max))
@inline
def uniformDistribution(range: RealRange)
: Distribution[Real] = {
if (range.min == Real.zero && range.max == Real.one) {
new Distribution[Real] {
override val isThreadSafe
: Boolean = false
override def sample()
: Real = nextReal()
}
}
else {
val offset = range.min
val scale = range.length
new Distribution[Real] {
override val isThreadSafe
: Boolean = false
override def sample()
: Real = nextReal() * scale + offset
}
}
}
@inline
def gaussianDistribution()
: Distribution[Real] = {
new Distribution[Real] {
override val isThreadSafe
: Boolean = false
override def sample()
: Real = nextGaussianReal()
}
}
@inline
def gaussianDistribution(mu: Real, sigma: Real)
: Distribution[Real] = {
if (mu == Real.zero && sigma == Real.one) {
new Distribution[Real] {
override val isThreadSafe
: Boolean = false
override def sample()
: Real = nextGaussianReal()
}
}
else {
new Distribution[Real] {
override val isThreadSafe
: Boolean = false
override def sample()
: Real = nextGaussianReal(mu, sigma)
}
}
}
}
/**
* Enhanced version of the Rand object that supports seeding.
*/
object PseudoRNG {
final val default
: PseudoRNG = apply()
final def apply()
: PseudoRNG = new PseudoRNG
final def apply(seed: Long)
: PseudoRNG = {
val rng = apply()
rng.setSeed(seed)
rng
}
} | bashimao/ltudl | base/src/main/scala/edu/latrobe/PseudoRNG.scala | Scala | apache-2.0 | 7,579 |
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.filter.index
import org.locationtech.geomesa.filter.FilterHelper
import org.locationtech.geomesa.utils.index.SpatialIndex
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
import org.opengis.filter.Filter
trait SpatialIndexSupport {
import org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType
def sft: SimpleFeatureType
def index: SpatialIndex[SimpleFeature]
/**
* Query based on a geotools filter
*
* @param filter filter
* @return
*/
def query(filter: Filter): Iterator[SimpleFeature] = {
if (filter == Filter.INCLUDE) { index.query() } else {
val geometries = FilterHelper.extractGeometries(filter, sft.getGeomField, intersect = false)
if (geometries.isEmpty) { index.query().filter(filter.evaluate) } else {
val env = geometries.values.head.getEnvelopeInternal
geometries.values.tail.foreach(g => env.expandToInclude(g.getEnvelopeInternal))
index.query(env.getMinX, env.getMinY, env.getMaxX, env.getMaxY).filter(filter.evaluate)
}
}
}
}
| aheyne/geomesa | geomesa-filter/src/main/scala/org/locationtech/geomesa/filter/index/SpatialIndexSupport.scala | Scala | apache-2.0 | 1,577 |
/**
* Copyright 2013-2015 PayPal
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.paypal.cascade.common.tests.properties
import org.specs2._
import com.paypal.cascade.common.properties.BuildProperties
import com.paypal.cascade.common.tests.util.CommonImmutableSpecificationContext
/**
* Tests [[com.paypal.cascade.common.properties.BuildProperties]]
*/
class BuildPropertiesSpecs extends Specification { override def is = s2"""
BuildProperties loads the build.properties files.
Get should:
get the value when the it's in the file ${GetValue().ok}
return None when the value isn't in the file ${GetValue().notInFile}
return None when the file wasn't loaded ${GetValue().fileNotLoaded}
"""
case class GetValue() extends CommonImmutableSpecificationContext {
def ok = apply {
val bp = new BuildProperties("/test_build.properties")
bp.get("test") must beSome("foo")
}
def notInFile = apply {
val bp = new BuildProperties("/test_build.properties")
bp.get("not.in.file") must beNone
}
def fileNotLoaded = apply {
val bp = new BuildProperties("/not.a.file")
bp.get("test") must beNone
}
}
}
| 2rs2ts/cascade | common/src/test/scala/com/paypal/cascade/common/tests/properties/BuildPropertiesSpecs.scala | Scala | apache-2.0 | 1,737 |
/**
* Original work: SecureSocial (https://github.com/jaliss/securesocial)
* Copyright 2013 Jorge Aliss (jaliss at gmail dot com) - twitter: @jaliss
*
* Derivative work: Silhouette (https://github.com/mohiva/play-silhouette)
* Modifications Copyright 2015 Mohiva Organisation (license at mohiva dot com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mohiva.play.silhouette.api
import com.mohiva.play.silhouette.api.exceptions.{ NotAuthenticatedException, NotAuthorizedException }
import com.mohiva.play.silhouette.api.services.AuthenticatorResult
import com.mohiva.play.silhouette.api.util.DefaultEndpointHandler
import play.api.Play
import play.api.i18n.{ MessagesApi, I18nSupport }
import play.api.libs.concurrent.Execution.Implicits._
import play.api.mvc._
import scala.concurrent.Future
import scala.language.higherKinds
/**
* Provides the mechanisms that can be used to protect endpoints and retrieve the current user
* if available.
*
* {{{
* class MyController(env: Environment[User, CookieAuthenticator])
* extends Silhouette[User, CookieAuthenticator] {
*
* def protectedAction = SecuredAction { implicit request =>
* Ok("Hello %s".format(request.identity.fullName))
* }
* }
* }}}
*
* @tparam I The type of the identity.
* @tparam A The type of the authenticator.
*/
trait Silhouette[I <: Identity, A <: Authenticator] extends Controller with Logger with I18nSupport {
/**
* Defines the messages API.
*/
val messagesApi: MessagesApi = env.messagesApi
/**
* Provides an `extract` method on an `Either` which contains the same types.
*/
private implicit class ExtractEither[T](r: Either[T, T]) {
def extract: T = r.fold(identity, identity)
}
/**
* Gets the environment needed to instantiate a Silhouette controller.
*
* @return The environment needed to instantiate a Silhouette controller.
*/
protected def env: Environment[I, A]
/**
* Implement this to return a result when the user is not authenticated.
*
* As defined by RFC 2616, the status code of the response should be 401 Unauthorized.
*
* @param request The request header.
* @return The result to send to the client.
*/
protected def onNotAuthenticated(request: RequestHeader): Option[Future[Result]] = None
/**
* Implement this to return a result when the user is authenticated but not authorized.
*
* As defined by RFC 2616, the status code of the response should be 403 Forbidden.
*
* @param request The request header.
* @return The result to send to the client.
*/
protected def onNotAuthorized(request: RequestHeader): Option[Future[Result]] = None
/**
* Default exception handler for silhouette exceptions which translates an exception into
* the appropriate result.
*
* Translates an ForbiddenException into a 403 Forbidden result and an UnauthorizedException
* into a 401 Unauthorized result.
*
* @param request The request header.
* @return The result to send to the client based on the exception.
*/
protected def exceptionHandler(implicit request: RequestHeader): PartialFunction[Throwable, Future[Result]] = {
case e: NotAuthenticatedException =>
logger.info(e.getMessage, e)
handleNotAuthenticated
case e: NotAuthorizedException =>
logger.info(e.getMessage, e)
handleNotAuthorized
}
/**
* Produces a result indicating that the user must provide authentication before
* the requested endpoint can be accessed.
*
* This should be called when the user is not authenticated.
* This indicates a temporary condition. The user can authenticate and repeat the request.
*
* As defined by RFC 2616, the status code of the response will be 401 Unauthorized.
*
* @param request The request header.
* @return The result to send to the client if the user isn't authenticated.
*/
private def handleNotAuthenticated(implicit request: RequestHeader): Future[Result] = {
logger.debug("[Silhouette] Unauthenticated user trying to access '%s'".format(request.uri))
onNotAuthenticated(request).orElse {
Play.current.global match {
case s: SecuredSettings => s.onNotAuthenticated(request, request2Messages)
case _ => None
}
}.getOrElse(DefaultEndpointHandler.handleNotAuthenticated)
}
/**
* Produces a result indicating that the request will be forbidden because the authenticated
* user is not authorized to access the requested endpoint.
*
* This should be called when the user is authenticated but authorization failed.
* This indicates a permanent situation. Repeating the request with the same authenticated
* user will produce the same response.
*
* As defined by RFC 2616, the status code of the response will be 403 Forbidden.
*
* @param request The request header.
* @return The result to send to the client if the user isn't authorized.
*/
private def handleNotAuthorized(implicit request: RequestHeader): Future[Result] = {
logger.debug("[Silhouette] Unauthorized user trying to access '%s'".format(request.uri))
onNotAuthorized(request).orElse {
Play.current.global match {
case s: SecuredSettings => s.onNotAuthorized(request, request2Messages)
case _ => None
}
}.getOrElse(DefaultEndpointHandler.handleNotAuthorized)
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Base implementations for request handlers
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* A result which can transport a result as also additional data through the request handler process.
*
* @param result A Play Framework result.
* @param data Additional data to transport in the result.
* @tparam T The type of the data.
*/
case class HandlerResult[+T](result: Result, data: Option[T] = None)
/**
* A builder for building request handlers.
*/
trait RequestHandlerBuilder[+R[_]] {
/**
* Constructs a request handler with default content.
*
* @param block The block of code to invoke.
* @param request The current request.
* @tparam T The type of the data included in the handler result.
* @return A handler result.
*/
final def apply[T](block: R[AnyContent] => Future[HandlerResult[T]])(implicit request: Request[AnyContent]): Future[HandlerResult[T]] = {
invokeBlock(block)
}
/**
* Constructs a request handler with the content of the given request.
*
* @param request The current request.
* @param block The block of code to invoke.
* @tparam B The type of the request body.
* @tparam T The type of the data included in the handler result.
* @return A handler result.
*/
final def apply[B, T](request: Request[B])(block: R[B] => Future[HandlerResult[T]]): Future[HandlerResult[T]] = {
invokeBlock(block)(request)
}
/**
* Invoke the block.
*
* This is the main method that an request handler has to implement.
*
* @param request The current request.
* @param block The block of code to invoke.
* @tparam B The type of the request body.
* @tparam T The type of the data included in the handler result.
* @return A handler result.
*/
protected def invokeBlock[B, T](block: R[B] => Future[HandlerResult[T]])(implicit request: Request[B]): Future[HandlerResult[T]]
/**
* Handles a block for an authenticator.
*
* Invokes the block with the authenticator and handles the result. See `handleInitializedAuthenticator` and
* `handleUninitializedAuthenticator` methods too see how the different authenticator types will be handled.
*
* @param authenticator An already initialized authenticator on the left and a new authenticator on the right.
* @param block The block to handle with the authenticator.
* @param request The current request header.
* @return A handler result.
*/
protected def handleBlock[T](authenticator: Either[A, A], block: A => Future[HandlerResult[T]])(implicit request: RequestHeader) = {
authenticator match {
case Left(a) => handleInitializedAuthenticator(a, block)
case Right(a) => handleUninitializedAuthenticator(a, block)
}
}
/**
* Handles the authentication of an identity.
*
* As first it checks for authenticators in requests, then it tries to authenticate against a request provider.
* This method marks the returned authenticators by returning already initialized authenticators on the
* left and new authenticators on the right. All new authenticators must be initialized later in the flow,
* with the result returned from the invoked block.
*
* @param request The current request.
* @tparam B The type of the request body.
* @return A tuple which consists of (maybe the existing authenticator on the left or a
* new authenticator on the right -> maybe the identity).
*/
protected def handleAuthentication[B](implicit request: Request[B]): Future[(Option[Either[A, A]], Option[I])] = {
env.authenticatorService.retrieve.flatMap {
// A valid authenticator was found so we retrieve also the identity
case Some(a) if a.isValid => env.identityService.retrieve(a.loginInfo).map(i => Some(Left(a)) -> i)
// An invalid authenticator was found so we needn't retrieve the identity
case Some(a) if !a.isValid => Future.successful(Some(Left(a)) -> None)
// No authenticator was found so we try to authenticate with a request provider
case None => handleRequestProviderAuthentication.flatMap {
// Authentication was successful, so we retrieve the identity and create a new authenticator for it
case Some(loginInfo) => env.identityService.retrieve(loginInfo).flatMap { i =>
env.authenticatorService.create(loginInfo).map(a => Some(Right(a)) -> i)
}
// No identity and no authenticator was found
case None => Future.successful(None -> None)
}
}
}
/**
* Handles already initialized authenticators.
*
* The authenticator handled by this method was found in the current request. So it was initialized on
* a previous request and must now be updated if it was touched and no authenticator result was found.
*
* @param authenticator The authenticator to handle.
* @param block The block to handle with the authenticator.
* @param request The current request header.
* @return A handler result.
*/
private def handleInitializedAuthenticator[T](authenticator: A, block: A => Future[HandlerResult[T]])(implicit request: RequestHeader) = {
val auth = env.authenticatorService.touch(authenticator)
block(auth.extract).flatMap {
case hr @ HandlerResult(pr: AuthenticatorResult, _) => Future.successful(hr)
case hr @ HandlerResult(pr, _) => auth match {
// Authenticator was touched so we update the authenticator and maybe the result
case Left(a) => env.authenticatorService.update(a, pr).map(pr => hr.copy(pr))
// Authenticator was not touched so we return the original result
case Right(a) => Future.successful(hr)
}
}
}
/**
* Handles not initialized authenticators.
*
* The authenticator handled by this method was newly created after authentication with a request provider.
* So it must be initialized with the result of the invoked block if no authenticator result was found.
*
* @param authenticator The authenticator to handle.
* @param block The block to handle with the authenticator.
* @param request The current request header.
* @return A handler result.
*/
private def handleUninitializedAuthenticator[T](authenticator: A, block: A => Future[HandlerResult[T]])(implicit request: RequestHeader) = {
block(authenticator).flatMap {
case hr @ HandlerResult(pr: AuthenticatorResult, _) => Future.successful(hr)
case hr @ HandlerResult(pr, _) =>
env.authenticatorService.init(authenticator).flatMap { value =>
env.authenticatorService.embed(value, pr)
}.map(pr => hr.copy(pr))
}
}
/**
* Handles the authentication with the request providers.
*
* Silhouette supports chaining of request providers. So if more as one request provider is defined
* it tries to authenticate until one provider returns an identity. The order of the providers
* isn't guaranteed.
*
* @param request The current request.
* @tparam B The type of the request body.
* @return Some identity or None if authentication was not successful.
*/
private def handleRequestProviderAuthentication[B](implicit request: Request[B]): Future[Option[LoginInfo]] = {
def auth(providers: Seq[RequestProvider]): Future[Option[LoginInfo]] = {
providers match {
case Nil => Future.successful(None)
case h :: t => h.authenticate(request).flatMap {
case Some(i) => Future.successful(Some(i))
case None => if (t.isEmpty) Future.successful(None) else auth(t)
}
}
}
auth(env.requestProviders)
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Implementations for secured actions and requests
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* A request that only allows access if an identity is authorized.
*
* @param identity The identity implementation.
* @param authenticator The authenticator implementation.
* @param request The current request.
* @tparam B The type of the request body.
*/
case class SecuredRequest[B](identity: I, authenticator: A, request: Request[B]) extends WrappedRequest(request)
/**
* Handles secured requests.
*
* @param authorize An Authorize object that checks if the user is authorized to invoke the handler.
*/
class SecuredRequestHandlerBuilder(authorize: Option[Authorization[I]] = None) extends RequestHandlerBuilder[SecuredRequest] {
/**
* Invokes the block.
*
* @param request The current request.
* @param block The block of code to invoke.
* @tparam B The type of the request body.
* @tparam T The type of the data included in the handler result.
* @return A handler result.
*/
protected def invokeBlock[B, T](block: SecuredRequest[B] => Future[HandlerResult[T]])(implicit request: Request[B]): Future[HandlerResult[T]] = {
withAuthorization(handleAuthentication).flatMap {
// A user is both authenticated and authorized. The request will be granted
case (Some(authenticator), Some(identity), Some(authorized)) if authorized =>
env.eventBus.publish(AuthenticatedEvent(identity, request, request2Messages))
handleBlock(authenticator, a => block(SecuredRequest(identity, a, request)))
// A user is authenticated but not authorized. The request will be forbidden
case (Some(authenticator), Some(identity), _) =>
env.eventBus.publish(NotAuthorizedEvent(identity, request, request2Messages))
handleBlock(authenticator, _ => handleNotAuthorized(request).map(r => HandlerResult(r)))
// An authenticator but no user was found. The request will ask for authentication and the authenticator will be discarded
case (Some(authenticator), None, _) =>
env.eventBus.publish(NotAuthenticatedEvent(request, request2Messages))
for {
result <- handleNotAuthenticated(request)
discardedResult <- env.authenticatorService.discard(authenticator.extract, result)
} yield HandlerResult(discardedResult)
// No authenticator and no user was found. The request will ask for authentication
case _ =>
env.eventBus.publish(NotAuthenticatedEvent(request, request2Messages))
handleNotAuthenticated(request).map(r => HandlerResult(r))
}
}
/**
* Adds the authorization status to the authentication result.
*
* @param result The authentication result.
* @param request The current request header.
* @return The authentication result with the additional authorization status.
*/
private def withAuthorization(result: Future[(Option[Either[A, A]], Option[I])])(implicit request: RequestHeader) = {
result.flatMap {
case (a, Some(i)) =>
authorize.map(_.isAuthorized(i)).getOrElse(Future.successful(true)).map(b => (a, Some(i), Some(b)))
case (a, i) =>
Future.successful((a, i, None))
}
}
}
/**
* A secured request handler.
*/
object SecuredRequestHandler extends SecuredRequestHandlerBuilder {
/**
* Creates a secured action handler.
*
* @param authorize An Authorize object that checks if the user is authorized to invoke the action.
* @return A secured action handler.
*/
def apply(authorize: Authorization[I]) = new SecuredRequestHandlerBuilder(Some(authorize))
}
/**
* A builder for secured actions.
*
* Requests are subject to authentication logic and, optionally, authorization.
* HTTP status codes 401 (Unauthorized) and 403 (Forbidden) will be returned when appropriate.
*
* For reference see:
* [[http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html RFC 2616]],
* [[http://danielirvine.com/blog/2011/07/18/understanding-403-forbidden/ Understanding 403 Forbidden]],
* [[http://stackoverflow.com/questions/3297048/403-forbidden-vs-401-unauthorized-http-responses/6937030#6937030 403 Forbidden vs 401 Unauthorized HTTP responses]].
*
* @param authorize An Authorize object that checks if the user is authorized to invoke the action.y
*/
class SecuredActionBuilder(authorize: Option[Authorization[I]] = None) extends ActionBuilder[SecuredRequest] {
/**
* Invokes the block.
*
* @param request The current request.
* @param block The block of code to invoke.
* @tparam B The type of the request body.
* @return A handler result.
*/
def invokeBlock[B](request: Request[B], block: SecuredRequest[B] => Future[Result]) = {
val b = (r: SecuredRequest[B]) => block(r).map(r => HandlerResult(r))
(authorize match {
case Some(a) => SecuredRequestHandler(a)(request)(b)
case None => SecuredRequestHandler(request)(b)
}).map(_.result).recoverWith(exceptionHandler(request))
}
}
/**
* A secured action.
*
* If the user is not authenticated or not authorized, the request is forwarded to
* the [[com.mohiva.play.silhouette.api.Silhouette.onNotAuthenticated]] or
* the [[com.mohiva.play.silhouette.api.Silhouette.onNotAuthorized]] methods.
*
* If these methods are not implemented, then
* the [[com.mohiva.play.silhouette.api.SecuredSettings.onNotAuthenticated]] or
* the [[com.mohiva.play.silhouette.api.SecuredSettings.onNotAuthorized]] methods
* will be called as fallback.
*
* If the [[com.mohiva.play.silhouette.api.SecuredSettings]] trait isn't implemented,
* a default message will be displayed.
*/
object SecuredAction extends SecuredActionBuilder {
/**
* Creates a secured action.
*
* @param authorize An Authorize object that checks if the user is authorized to invoke the action.
* @return A secured action builder.
*/
def apply(authorize: Authorization[I]) = new SecuredActionBuilder(Some(authorize))
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Implementations for user aware actions and requests
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* A request that adds the identity and the authenticator for the current call.
*
* @param identity Some identity implementation if authentication was successful, None otherwise.
* @param authenticator Some authenticator implementation if authentication was successful, None otherwise.
* @param request The current request.
* @tparam B The type of the request body.
*/
case class UserAwareRequest[B](identity: Option[I], authenticator: Option[A], request: Request[B]) extends WrappedRequest(request)
/**
* An handler that adds the current user in the request if it's available.
*/
object UserAwareRequestHandler extends RequestHandlerBuilder[UserAwareRequest] {
/**
* Invokes the block.
*
* @param block The block of code to invoke.
* @param request The current request.
* @tparam B The type of the request body.
* @tparam T The type of the data included in the handler result.
* @return A handler result.
*/
protected def invokeBlock[B, T](block: UserAwareRequest[B] => Future[HandlerResult[T]])(implicit request: Request[B]) = {
handleAuthentication.flatMap {
// A valid authenticator was found and the identity may be exists
case (Some(authenticator), identity) if authenticator.extract.isValid =>
handleBlock(authenticator, a => block(UserAwareRequest(identity, Some(a), request)))
// An invalid authenticator was found. The authenticator will be discarded
case (Some(authenticator), identity) if !authenticator.extract.isValid =>
block(UserAwareRequest(None, None, request)).flatMap {
case hr @ HandlerResult(pr, d) =>
env.authenticatorService.discard(authenticator.extract, pr).map(r => hr.copy(pr))
}
// No authenticator and no user was found
case _ =>
block(UserAwareRequest(None, None, request))
}
}
}
/**
* An action that adds the current user in the request if it's available.
*/
object UserAwareAction extends ActionBuilder[UserAwareRequest] {
/**
* Invokes the block.
*
* @param request The current request.
* @param block The block of code to invoke.
* @tparam B The type of the request body.
* @return The result to send to the client.
*/
def invokeBlock[B](request: Request[B], block: UserAwareRequest[B] => Future[Result]) = {
UserAwareRequestHandler(request) { r =>
block(r).map(r => HandlerResult(r))
}.map(_.result).recoverWith(exceptionHandler(request))
}
}
}
| rfranco/play-silhouette | silhouette/app/com/mohiva/play/silhouette/api/Silhouette.scala | Scala | apache-2.0 | 23,266 |
package controllers
import game.Game
import play.api._
import play.api.mvc._
import security.Secured
import user.User
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
object Application extends Controller with Secured {
def index = withUserFuture { (user, token) => implicit request =>
Game.getGamesForUser(user).map(games => Ok(views.html.index(user, games)))
}
def login = Action {
Ok(views.html.login())
}
def loginPost = Action.async { implicit request =>
request.body.asFormUrlEncoded match {
case Some(map) =>
map.get("name") match {
case Some(seq) =>
User.getUserByName(seq.head).map {
case Some(user) =>
Redirect(routes.Application.index)
.withSession(Security.username -> user.id)
case _ =>
Redirect(routes.Application.login())
}
case None =>
Future(Redirect(routes.Application.login()))
}
case _ =>
Future(Redirect(routes.Application.login()))
}
}
} | tomasharkema/Drie-en.Scala | app/controllers/Application.scala | Scala | mit | 1,106 |
package org.beaucatcher.mongo.jdriver
import org.beaucatcher.bson._
import org.beaucatcher.mongo._
import org.junit.Assert._
import org.junit._
class CollectionTest
extends AbstractCollectionTest
with JavaDriverTestContextProvider {
@Test
def usingExpectedDriver(): Unit = {
assertTrue("expecting to use Java driver",
implicitly[Context].driver.getClass.getSimpleName.contains("JavaDriver"))
}
}
| havocp/beaucatcher | mongo-test/src/test/scala/org/beaucatcher/mongo/jdriver/CollectionTest.scala | Scala | apache-2.0 | 439 |
/***********************************************************************
* Copyright (c) 2013-2015 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0 which
* accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.iterators
import java.nio.ByteBuffer
import java.util
import java.util.{Collection => jCollection, Map => jMap}
import com.typesafe.scalalogging.slf4j.{Logger, Logging}
import org.apache.accumulo.core.client.IteratorSetting
import org.apache.accumulo.core.data.{ByteSequence, Key, Range, Value}
import org.apache.accumulo.core.iterators.{IteratorEnvironment, SortedKeyValueIterator}
import org.apache.commons.vfs2.impl.VFSClassLoader
import org.geotools.factory.GeoTools
import org.geotools.filter.text.ecql.ECQL
import org.locationtech.geomesa.features.kryo.{KryoBufferSimpleFeature, KryoFeatureSerializer}
import org.locationtech.geomesa.features.nio.{AttributeAccessor, LazySimpleFeature}
import org.locationtech.geomesa.filter.factory.FastFilterFactory
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
import org.opengis.filter.Filter
import scala.reflect.ClassTag
trait LazyFilterTransformIterator extends SortedKeyValueIterator[Key, Value] with Logging {
import LazyFilterTransformIterator._
var sft: SimpleFeatureType = null
var source: SortedKeyValueIterator[Key, Value] = null
var filter: Filter = null
var transform: String = null
var transformSchema: SimpleFeatureType = null
var topValue: Value = new Value()
override def init(src: SortedKeyValueIterator[Key, Value],
options: jMap[String, String],
env: IteratorEnvironment): Unit = {
LazyFilterTransformIterator.initClassLoader(logger)
this.source = src.deepCopy(env)
sft = SimpleFeatureTypes.createType("test", options.get(SFT_OPT))
filter = Option(options.get(CQL_OPT)).map(FastFilterFactory.toFilter).orNull
transform = Option(options.get(TRANSFORM_DEFINITIONS_OPT)).orNull
transformSchema = Option(options.get(TRANSFORM_SCHEMA_OPT)).map(SimpleFeatureTypes.createType("", _)).orNull
}
def sf: SimpleFeature
def initReusableFeature(buf: Array[Byte]): Unit
def setTransform(): Unit
override def next(): Unit = {
source.next()
findTop()
}
def findTop(): Unit = {
var found = false
while (!found && source.hasTop) {
initReusableFeature(source.getTopValue.get())
if (filter == null || filter.evaluate(sf)) {
found = true
} else {
source.next()
}
}
}
override def hasTop: Boolean = source.hasTop
override def getTopKey: Key = source.getTopKey
override def getTopValue: Value =
if (transform == null) {
source.getTopValue
} else {
setTransform()
topValue
}
override def seek(range: Range, columnFamilies: jCollection[ByteSequence], inclusive: Boolean): Unit = {
source.seek(range, columnFamilies, inclusive)
findTop()
}
override def deepCopy(env: IteratorEnvironment): SortedKeyValueIterator[Key, Value] = ???
}
class KryoLazyFilterTransformIterator extends LazyFilterTransformIterator {
private var kryo: KryoFeatureSerializer = null
private var reusablesf: KryoBufferSimpleFeature = null
override def sf: SimpleFeature = reusablesf
override def init(source: SortedKeyValueIterator[Key, Value],
options: jMap[String, String],
env: IteratorEnvironment): Unit = {
super.init(source, options, env)
kryo = new KryoFeatureSerializer(sft)
reusablesf = kryo.getReusableFeature
if (transform != null && transformSchema != null) {
reusablesf.setTransforms(transform, transformSchema)
}
}
override def initReusableFeature(buf: Array[Byte]): Unit = reusablesf.setBuffer(buf)
override def setTransform(): Unit = topValue.set(reusablesf.transform())
}
class NIOLazyFilterTransformIterator extends LazyFilterTransformIterator {
private var accessors: IndexedSeq[AttributeAccessor[_ <: AnyRef]] = null
private var reusablesf: LazySimpleFeature = null
override def sf: SimpleFeature = reusablesf
override def init(source: SortedKeyValueIterator[Key, Value],
options: util.Map[String, String],
env: IteratorEnvironment): Unit = {
super.init(source, options, env)
accessors = AttributeAccessor.buildSimpleFeatureTypeAttributeAccessors(sft)
reusablesf = new LazySimpleFeature("", sft, accessors, null)
}
override def initReusableFeature(buf: Array[Byte]): Unit = reusablesf.setBuf(ByteBuffer.wrap(buf))
override def setTransform(): Unit = topValue.set(source.getTopValue.get()) // TODO
}
object LazyFilterTransformIterator {
val SFT_OPT = "sft"
val CQL_OPT = "cql"
val TRANSFORM_SCHEMA_OPT = "tsft"
val TRANSFORM_DEFINITIONS_OPT = "tdefs"
def configure[T <: LazyFilterTransformIterator](sft: SimpleFeatureType,
filter: Option[Filter],
transform: Option[(String, SimpleFeatureType)],
priority: Int)(implicit ct: ClassTag[T]) = {
assert(filter.isDefined || transform.isDefined, "No options configured")
val is = new IteratorSetting(priority, "featurefilter", ct.runtimeClass.getCanonicalName)
is.addOption(SFT_OPT, SimpleFeatureTypes.encodeType(sft))
filter.foreach(f => is.addOption(CQL_OPT, ECQL.toCQL(f)))
transform.foreach { case (tdef, tsft) =>
is.addOption(TRANSFORM_DEFINITIONS_OPT, tdef)
is.addOption(TRANSFORM_SCHEMA_OPT, SimpleFeatureTypes.encodeType(tsft))
}
is
}
private var initialized = false
def initClassLoader(log: Logger) = synchronized {
if (!initialized) {
try {
log.trace("Initializing classLoader")
// locate the geomesa-distributed-runtime jar
val cl = this.getClass.getClassLoader
cl match {
case vfsCl: VFSClassLoader =>
var url = vfsCl.getFileObjects.map(_.getURL).filter {
_.toString.contains("geomesa-distributed-runtime")
}.head
if (log != null) log.debug(s"Found geomesa-distributed-runtime at $url")
var u = java.net.URLClassLoader.newInstance(Array(url), vfsCl)
GeoTools.addClassLoader(u)
url = vfsCl.getFileObjects.map(_.getURL).filter {
_.toString.contains("geomesa-feature")
}.head
if (log != null) log.debug(s"Found geomesa-feature at $url")
u = java.net.URLClassLoader.newInstance(Array(url), vfsCl)
GeoTools.addClassLoader(u)
case _ =>
}
} catch {
case t: Throwable =>
if(log != null) log.error("Failed to initialize GeoTools' ClassLoader ", t)
} finally {
initialized = true
}
}
}
} | mcharles/geomesa | geomesa-accumulo/geomesa-iterators/geomesa-iterators-feature-filter/src/main/scala/org/locationtech/geomesa/iterators/LazySimpleFeatureFilteringIterator.scala | Scala | apache-2.0 | 7,238 |
package models
import play.api.mvc.{Request, WrappedRequest}
case class LoggedRequest[A](
email: String,
request: Request[A]
) extends WrappedRequest[A](request) {
def trigram = email.slice(0, email.indexOf('@'))
}
| denis-mludek/zencroissants | server/app/models/LoggedRequest.scala | Scala | mit | 223 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.consumer
import scala.collection.JavaConversions._
import org.I0Itec.zkclient._
import joptsimple._
import java.util.Properties
import java.util.Random
import java.io.PrintStream
import kafka.message._
import kafka.serializer._
import kafka.utils._
import kafka.metrics.KafkaMetricsReporter
/**
* Consumer that dumps messages out to standard out.
*
*/
object ConsoleConsumer extends Logging {
def main(args: Array[String]) {
val parser = new OptionParser
val topicIdOpt = parser.accepts("topic", "The topic id to consume on.")
.withRequiredArg
.describedAs("topic")
.ofType(classOf[String])
val whitelistOpt = parser.accepts("whitelist", "Whitelist of topics to include for consumption.")
.withRequiredArg
.describedAs("whitelist")
.ofType(classOf[String])
val blacklistOpt = parser.accepts("blacklist", "Blacklist of topics to exclude from consumption.")
.withRequiredArg
.describedAs("blacklist")
.ofType(classOf[String])
val zkConnectOpt = parser.accepts("zookeeper", "REQUIRED: The connection string for the zookeeper connection in the form host:port. " +
"Multiple URLS can be given to allow fail-over.")
.withRequiredArg
.describedAs("urls")
.ofType(classOf[String])
val groupIdOpt = parser.accepts("group", "The group id to consume on.")
.withRequiredArg
.describedAs("gid")
.defaultsTo("console-consumer-" + new Random().nextInt(100000))
.ofType(classOf[String])
val fetchSizeOpt = parser.accepts("fetch-size", "The amount of data to fetch in a single request.")
.withRequiredArg
.describedAs("size")
.ofType(classOf[java.lang.Integer])
.defaultsTo(1024 * 1024)
val minFetchBytesOpt = parser.accepts("min-fetch-bytes", "The min number of bytes each fetch request waits for.")
.withRequiredArg
.describedAs("bytes")
.ofType(classOf[java.lang.Integer])
.defaultsTo(1)
val maxWaitMsOpt = parser.accepts("max-wait-ms", "The max amount of time each fetch request waits.")
.withRequiredArg
.describedAs("ms")
.ofType(classOf[java.lang.Integer])
.defaultsTo(100)
val socketBufferSizeOpt = parser.accepts("socket-buffer-size", "The size of the tcp RECV size.")
.withRequiredArg
.describedAs("size")
.ofType(classOf[java.lang.Integer])
.defaultsTo(2 * 1024 * 1024)
val socketTimeoutMsOpt = parser.accepts("socket-timeout-ms", "The socket timeout used for the connection to the broker")
.withRequiredArg
.describedAs("ms")
.ofType(classOf[java.lang.Integer])
.defaultsTo(ConsumerConfig.SocketTimeout)
val consumerTimeoutMsOpt = parser.accepts("consumer-timeout-ms", "consumer throws timeout exception after waiting this much " +
"of time without incoming messages")
.withRequiredArg
.describedAs("prop")
.ofType(classOf[java.lang.Integer])
.defaultsTo(-1)
val messageFormatterOpt = parser.accepts("formatter", "The name of a class to use for formatting kafka messages for display.")
.withRequiredArg
.describedAs("class")
.ofType(classOf[String])
.defaultsTo(classOf[DefaultMessageFormatter].getName)
val messageFormatterArgOpt = parser.accepts("property")
.withRequiredArg
.describedAs("prop")
.ofType(classOf[String])
val resetBeginningOpt = parser.accepts("from-beginning", "If the consumer does not already have an established offset to consume from, " +
"start with the earliest message present in the log rather than the latest message.")
val autoCommitIntervalOpt = parser.accepts("autocommit.interval.ms", "The time interval at which to save the current offset in ms")
.withRequiredArg
.describedAs("ms")
.ofType(classOf[java.lang.Integer])
.defaultsTo(10*1000)
val maxMessagesOpt = parser.accepts("max-messages", "The maximum number of messages to consume before exiting. If not set, consumption is continual.")
.withRequiredArg
.describedAs("num_messages")
.ofType(classOf[java.lang.Integer])
val skipMessageOnErrorOpt = parser.accepts("skip-message-on-error", "If there is an error when processing a message, " +
"skip it instead of halt.")
val csvMetricsReporterEnabledOpt = parser.accepts("csv-reporter-enabled", "If set, the CSV metrics reporter will be enabled")
val metricsDirectoryOpt = parser.accepts("metrics-dir", "If csv-reporter-enable is set, and this parameter is" +
"set, the csv metrics will be outputed here")
.withRequiredArg
.describedAs("metrics dictory")
.ofType(classOf[java.lang.String])
val options: OptionSet = tryParse(parser, args)
CommandLineUtils.checkRequiredArgs(parser, options, zkConnectOpt)
val topicOrFilterOpt = List(topicIdOpt, whitelistOpt, blacklistOpt).filter(options.has)
if (topicOrFilterOpt.size != 1) {
error("Exactly one of whitelist/blacklist/topic is required.")
parser.printHelpOn(System.err)
System.exit(1)
}
val topicArg = options.valueOf(topicOrFilterOpt.head)
val filterSpec = if (options.has(blacklistOpt))
new Blacklist(topicArg)
else
new Whitelist(topicArg)
val csvMetricsReporterEnabled = options.has(csvMetricsReporterEnabledOpt)
if (csvMetricsReporterEnabled) {
val csvReporterProps = new Properties()
csvReporterProps.put("kafka.metrics.polling.interval.secs", "5")
csvReporterProps.put("kafka.metrics.reporters", "kafka.metrics.KafkaCSVMetricsReporter")
if (options.has(metricsDirectoryOpt))
csvReporterProps.put("kafka.csv.metrics.dir", options.valueOf(metricsDirectoryOpt))
else
csvReporterProps.put("kafka.csv.metrics.dir", "kafka_metrics")
csvReporterProps.put("kafka.csv.metrics.reporter.enabled", "true")
val verifiableProps = new VerifiableProperties(csvReporterProps)
KafkaMetricsReporter.startReporters(verifiableProps)
}
val props = new Properties()
props.put("group.id", options.valueOf(groupIdOpt))
props.put("socket.receive.buffer.bytes", options.valueOf(socketBufferSizeOpt).toString)
props.put("socket.timeout.ms", options.valueOf(socketTimeoutMsOpt).toString)
props.put("fetch.message.max.bytes", options.valueOf(fetchSizeOpt).toString)
props.put("fetch.min.bytes", options.valueOf(minFetchBytesOpt).toString)
props.put("fetch.wait.max.ms", options.valueOf(maxWaitMsOpt).toString)
props.put("auto.commit.enable", "true")
props.put("auto.commit.interval.ms", options.valueOf(autoCommitIntervalOpt).toString)
props.put("auto.offset.reset", if(options.has(resetBeginningOpt)) "smallest" else "largest")
props.put("zk.connect", options.valueOf(zkConnectOpt))
props.put("consumer.timeout.ms", options.valueOf(consumerTimeoutMsOpt).toString)
val config = new ConsumerConfig(props)
val skipMessageOnError = if (options.has(skipMessageOnErrorOpt)) true else false
val messageFormatterClass = Class.forName(options.valueOf(messageFormatterOpt))
val formatterArgs = MessageFormatter.tryParseFormatterArgs(options.valuesOf(messageFormatterArgOpt))
val maxMessages = if(options.has(maxMessagesOpt)) options.valueOf(maxMessagesOpt).intValue else -1
val connector = Consumer.create(config)
if(options.has(resetBeginningOpt))
ZkUtils.maybeDeletePath(options.valueOf(zkConnectOpt), "/consumers/" + options.valueOf(groupIdOpt))
Runtime.getRuntime.addShutdownHook(new Thread() {
override def run() {
connector.shutdown()
// if there is no group specified then avoid polluting zookeeper with persistent group data, this is a hack
if(!options.has(groupIdOpt))
ZkUtils.maybeDeletePath(options.valueOf(zkConnectOpt), "/consumers/" + options.valueOf(groupIdOpt))
}
})
var numMessages = 0L
val formatter: MessageFormatter = messageFormatterClass.newInstance().asInstanceOf[MessageFormatter]
formatter.init(formatterArgs)
try {
val stream = connector.createMessageStreamsByFilter(filterSpec, 1, new DefaultDecoder(), new DefaultDecoder()).get(0)
val iter = if(maxMessages >= 0)
stream.slice(0, maxMessages)
else
stream
for(messageAndTopic <- iter) {
try {
formatter.writeTo(messageAndTopic.key, messageAndTopic.message, System.out)
numMessages += 1
} catch {
case e =>
if (skipMessageOnError)
error("Error processing message, skipping this message: ", e)
else
throw e
}
if(System.out.checkError()) {
// This means no one is listening to our output stream any more, time to shutdown
System.err.println("Unable to write to standard out, closing consumer.")
System.err.println("Consumed %d messages".format(numMessages))
formatter.close()
connector.shutdown()
System.exit(1)
}
}
} catch {
case e => error("Error processing message, stopping consumer: ", e)
}
System.err.println("Consumed %d messages".format(numMessages))
System.out.flush()
formatter.close()
connector.shutdown()
}
def tryParse(parser: OptionParser, args: Array[String]) = {
try {
parser.parse(args : _*)
} catch {
case e: OptionException => {
Utils.croak(e.getMessage)
null
}
}
}
def tryCleanupZookeeper(zkUrl: String, groupId: String) {
try {
val dir = "/consumers/" + groupId
info("Cleaning up temporary zookeeper data under " + dir + ".")
val zk = new ZkClient(zkUrl, 30*1000, 30*1000, ZKStringSerializer)
zk.deleteRecursive(dir)
zk.close()
} catch {
case _ => // swallow
}
}
}
object MessageFormatter {
def tryParseFormatterArgs(args: Iterable[String]): Properties = {
val splits = args.map(_ split "=").filterNot(_ == null).filterNot(_.length == 0)
if(!splits.forall(_.length == 2)) {
System.err.println("Invalid parser arguments: " + args.mkString(" "))
System.exit(1)
}
val props = new Properties
for(a <- splits)
props.put(a(0), a(1))
props
}
}
trait MessageFormatter {
def writeTo(key: Array[Byte], value: Array[Byte], output: PrintStream)
def init(props: Properties) {}
def close() {}
}
class DefaultMessageFormatter extends MessageFormatter {
var printKey = false
var keySeparator = "\\t".getBytes
var lineSeparator = "\\n".getBytes
override def init(props: Properties) {
if(props.containsKey("print.key"))
printKey = props.getProperty("print.key").trim.toLowerCase.equals("true")
if(props.containsKey("key.separator"))
keySeparator = props.getProperty("key.separator").getBytes
if(props.containsKey("line.separator"))
lineSeparator = props.getProperty("line.separator").getBytes
}
def writeTo(key: Array[Byte], value: Array[Byte], output: PrintStream) {
if(printKey) {
output.write(if (key == null) "null".getBytes() else key)
output.write(keySeparator)
}
output.write(if (value == null) "null".getBytes() else value)
output.write(lineSeparator)
}
}
class NoOpMessageFormatter extends MessageFormatter {
override def init(props: Properties) {}
def writeTo(key: Array[Byte], value: Array[Byte], output: PrintStream) {}
}
class ChecksumMessageFormatter extends MessageFormatter {
private var topicStr: String = _
override def init(props: Properties) {
topicStr = props.getProperty("topic")
if (topicStr != null)
topicStr = topicStr + ":"
else
topicStr = ""
}
def writeTo(key: Array[Byte], value: Array[Byte], output: PrintStream) {
val chksum = new Message(value, key).checksum
output.println(topicStr + "checksum:" + chksum)
}
}
| akosiaris/kafka | core/src/main/scala/kafka/consumer/ConsoleConsumer.scala | Scala | apache-2.0 | 13,045 |
package at.iem.point.eh.sketches
object Pitch {
// cf. http://www.sengpielaudio.com/Rechner-notennamen.htm
implicit val ordering = Ordering.by[Pitch, Int](_.midi)
def toString(midi: Int, lang: Language = Language.default): String = {
val pc = midi % 12
val register = midi / 12
lang match {
case Language.English =>
PitchClass.toString(pc, lang) + register
case Language.German =>
val caps = register <= 3
val pcs = PitchClass.toString(pc, lang, caps)
if (register <= 2) {
val ticks = 3 - register
("," * ticks) + pcs
} else if (register >= 5) {
val ticks = register - 4
pcs + ("'" * ticks)
} else {
pcs
}
}
}
}
final class Pitch(val midi: Int) extends AnyVal {
override def toString = Pitch.toString(midi)
def interval(that: Pitch): DirectedInterval = (this, that) // new DirectedInterval(math.abs(this.midi - that.midi))
def map(fun: Int => Int): Pitch = new Pitch(fun(midi))
implicit def `class`: PitchClass = new PitchClass(midi % 12)
}
object PitchClass {
private final val pcStrings_en = Array("C","C#","D","D#","E","F","F#","G","G#","A","A#","B")
private final val pcStrings_de = Array("c","cis","d","dis","e","f","fis","g","gis","a","ais","h")
def toString(step: Int, lang: Language = Language.default, caps: Boolean = false): String = {
lang match {
case Language.English => pcStrings_en(step)
case Language.German =>
val s = pcStrings_de(step)
if (caps) s.capitalize else s
}
}
}
final class PitchClass(val step: Int) extends AnyVal {
override def toString = PitchClass.toString(step)
}
| iem-projects/PointLib | src/main/scala/at/iem/point/eh/sketches/Pitch.scala | Scala | gpl-2.0 | 1,713 |
package com.aesthetikx.android.canopy
import android.graphics.Color
import android.util.TypedValue
import android.view.{LayoutInflater, View, ViewGroup}
import android.widget.{FrameLayout, RelativeLayout}
import com.aesthetikx.android.canopy.view.{CanopyRowView, ColorProvider, DefaultColorProvider}
import java.util.List
import scala.collection.JavaConversions._
abstract class BaseCanopyItem(
private var expanded: Boolean,
private var visible: Boolean,
private val depth: Int,
private val children: List[CanopyItem])
extends CanopyItem {
// Auxiliary constructor
def this(depth: Int, children: List[CanopyItem]) = this(true, true, depth, children)
// Expansion
override def setExpanded(expanded: Boolean) : Unit = {
this.expanded = expanded
children.toList.foreach { child => child.parentToggled(isExpanded, isVisible) }
}
override def toggleExpanded() : Unit = {
setExpanded(!expanded)
}
override def isExpanded() : Boolean = expanded
override def parentToggled(parentExpanded: Boolean, parentVisible: Boolean) : Unit = {
setVisible(parentExpanded && parentVisible)
children.toList.foreach { child => child.parentToggled(isExpanded, isVisible) }
}
// Visibility
override def setVisible(visible: Boolean) : Unit = {
this.visible = visible
}
override def isVisible() : Boolean = visible
// Tree
override def getDepth() : Integer = depth
override def getChildCount() : Integer = {
children.toList.foldLeft(0) { (count, child) => count + (child.getChildCount + 1) }
}
override def getChildren(): List[CanopyItem] = children
// Views
private def getDefaultCanopyRowView(inflater: LayoutInflater, parent: ViewGroup): View = {
val view: CanopyRowView = inflater.inflate(R.layout.canopy_row, parent, false).asInstanceOf[CanopyRowView]
val fiveDpi: Integer = TypedValue.applyDimension(TypedValue.COMPLEX_UNIT_DIP, 5, inflater.getContext.getResources.getDisplayMetrics).toInt
view.getSpacer.getLayoutParams.width = getDepth * fiveDpi
view.getColorBar.setBackgroundColor(getColorProvider.getColor(depth))
view
}
override def getExpandedView(inflater: LayoutInflater, parent: ViewGroup): View = getDefaultCanopyRowView(inflater, parent)
override def getCollapsedView(inflater: LayoutInflater, parent: ViewGroup): View = getDefaultCanopyRowView(inflater, parent)
def getColorProvider:ColorProvider = DefaultColorProvider
}
| Aesthetikx/canopy | library/src/main/scala/com/aesthetikx/android/canopy/BaseCanopyItem.scala | Scala | gpl-2.0 | 2,464 |
/**
* This file is part of SensApp [ http://sensapp.modelbased.net ]
*
* Copyright (C) 2011- SINTEF ICT
* Contact: SINTEF ICT <nicolas.ferry@sintef.no>
*
* Module: net.modelbased.sensapp
*
* SensApp is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation, either version 3 of
* the License, or (at your option) any later version.
*
* SensApp is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General
* Public License along with SensApp. If not, see
* <http://www.gnu.org/licenses/>.
*/
package net.modelbased.sensapp.restful
import akka.actor._
import akka.http._
import akka.config.Supervision._
abstract class AbstractBoot[T <: Container](implicit val m: scala.reflect.Manifest[T]) {
val factory = SupervisorFactory(SupervisorConfig(OneForOneStrategy(List(classOf[Exception]), 3, 100),
Supervise(Actor.actorOf[RootEndpoint], Permanent) ::
Supervise(Actor.actorOf[T], Permanent) :: Nil))
factory.newInstance.start
} | SINTEF-9012/sensapp | _attic/net.modelbased.sensapp.restful/src/main/scala/net/modelbased/sensapp/restful/AbstractBoot.scala | Scala | lgpl-3.0 | 1,392 |
package com.yuzhouwan.bigdata.spark
/**
* Copyright @ 2019 yuzhouwan.com
* All right reserved.
* Function:Unit Test Style
*
* @author Benedict Jin
* @since 2015/9/7
*/
abstract class UnitTestStyle extends FlatSpec
with Matchers with OptionValues with Inside with Inspectors
| asdf2014/yuzhouwan | yuzhouwan-bigdata/yuzhouwan-bigdata-spark/src/test/scala/com/yuzhouwan/bigdata/spark/UnitTestStyle.scala | Scala | apache-2.0 | 293 |
/* sbt -- Simple Build Tool
* Copyright 2009, 2010 Mark Harrah
*/
package sbt
import org.scalacheck._
import Prop._
import TaskGen._
import Task._
import sbt.internal.util.Types._
object TaskRunnerSortTest extends Properties("TaskRunnerSort") {
property("sort") = forAll(TaskListGen, MaxWorkersGen) { (list: List[Int], workers: Int) =>
val a = list.toArray
val sorted = a.toArray
java.util.Arrays.sort(sorted)
("Workers: " + workers) |: ("Array: " + a.toList) |:
{
def result = tryRun(sort(a.toSeq), false, if (workers > 0) workers else 1)
checkResult(result.toList, sorted.toList)
}
}
final def sortDirect(a: Seq[Int]): Seq[Int] =
{
if (a.length < 2)
a
else {
val pivot = a(0)
val (lt, gte) = a.view.drop(1).partition(_ < pivot)
sortDirect(lt) ++ List(pivot) ++ sortDirect(gte)
}
}
final def sort(a: Seq[Int]): Task[Seq[Int]] =
{
if (a.length < 200)
task(sortDirect(a))
else {
task(a) flatMap { a =>
val pivot = a(0)
val (lt, gte) = a.view.drop(1).partition(_ < pivot)
Test.t2(sort(lt), sort(gte)) map {
case (l, g) => l ++ List(pivot) ++ g
}
}
}
}
}
| mdedetrich/sbt | tasks/standard/src/test/scala/TestRunnerSort.scala | Scala | bsd-3-clause | 1,268 |
import _root_.io.gatling.core.scenario.Simulation
import ch.qos.logback.classic.{Level, LoggerContext}
import io.gatling.core.Predef._
import io.gatling.http.Predef._
import org.slf4j.LoggerFactory
import scala.concurrent.duration._
/**
* Performance test for the Actor entity.
*/
class AddActorGatlingTest extends Simulation {
val context: LoggerContext = LoggerFactory.getILoggerFactory.asInstanceOf[LoggerContext]
// Log all HTTP requests
//context.getLogger("io.gatling.http").setLevel(Level.valueOf("TRACE"))
// Log failed HTTP requests
//context.getLogger("io.gatling.http").setLevel(Level.valueOf("DEBUG"))
val baseURL = Option(System.getProperty("baseURL")) getOrElse """http://127.0.0.1:8080"""
val httpConf = http
.baseURL(baseURL)
.acceptHeader("*/*")
.acceptEncodingHeader("gzip, deflate")
.acceptLanguageHeader("fr,fr-fr;q=0.8,en-us;q=0.5,en;q=0.3")
.connection("keep-alive")
.userAgentHeader("Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:33.0) Gecko/20100101 Firefox/33.0")
val headers_http = Map(
"Accept" -> """application/json"""
)
val headers_http_authenticated = Map(
"Accept" -> """application/json""",
"X-CSRF-TOKEN" -> "${csrf_token}"
)
val csvFeeder = csv("actors.csv").queue // 359712 entries
val scn = scenario("Add Actor")
.exec(http("First unauthenticated request")
.get("/api/account")
.headers(headers_http)
.check(status.is(401))
.check(headerRegex("Set-Cookie", "CSRF-TOKEN=(.*);[\\\\s]?[P,p]ath=/").saveAs("csrf_token"))).exitHereIfFailed
.exec(http("Authentication")
.post("/api/authentication")
.headers(headers_http_authenticated)
.formParam("j_username", "admin")
.formParam("j_password", "admin")
.formParam("remember-me", "true")
.formParam("submit", "Login")).exitHereIfFailed
.exec(http("Authenticated request")
.get("/api/account")
.headers(headers_http_authenticated)
.check(status.is(200))
.check(headerRegex("Set-Cookie", "CSRF-TOKEN=(.*);[\\\\s]?[P,p]ath=/").saveAs("csrf_token")))
.repeat(3600) { // = 359712 / 100 rounded
feed(csvFeeder)
.exec(http("Create new actor")
.post("/api/actors")
.headers(headers_http_authenticated)
.body(StringBody("{\\"firstName\\":\\"${firstName}\\", \\"lastName\\":\\"${lastName}\\", \\"birthDate\\":\\"${birthDate}\\", \\"birthLocation\\":\\"${birthLocation}\\"}")).asJSON
.check(status.is(201)))
.exitHereIfFailed
}
val users = scenario("Users").exec(scn)
setUp(
users.inject(atOnceUsers(100))
).protocols(httpConf)
}
| anthonydahanne/ehcache3-samples | fullstack/src/test/gatling/simulations/AddActorGatlingTest.scala | Scala | apache-2.0 | 2,875 |
/*
* Copyright 2016 Miroslav Janíček
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.classdump.luna.test.fragments
import org.classdump.luna.runtime.{IllegalOperationAttemptException, LuaFunction}
import org.classdump.luna.test.{FragmentBundle, FragmentExpectations, OneLiners}
import org.classdump.luna.{ConversionException, LuaRuntimeException, Table}
//noinspection TypeAnnotation
object BasicFragments extends FragmentBundle with FragmentExpectations with OneLiners {
val JustX = fragment("JustX") {
"""return x
"""
}
JustX in EmptyContext succeedsWith (null)
val NotX = fragment("NotX") {
"""return not x
"""
}
NotX in EmptyContext succeedsWith (true)
val NotTrue = fragment("NotTrue") {
"""return not true
"""
}
NotTrue in EmptyContext succeedsWith (false)
val NotNotX = fragment("NotNotX") {
"""return not not x
"""
}
NotNotX in EmptyContext succeedsWith (false)
val EnvResolution = fragment("EnvResolution") {
"""return _ENV
"""
}
EnvResolution in EmptyContext succeedsWith (classOf[Table])
val StringParsing1 = fragment("StringParsing1") {
"""return "\\\\","\\\\"
"""
}
StringParsing1 in EmptyContext succeedsWith("\\\\", "\\\\")
val StringParsing2 = fragment("StringParsing2") {
"""return '\\\\','\\\\'
"""
}
StringParsing2 in EmptyContext succeedsWith("\\\\", "\\\\")
val StringParsing3 = fragment("StringParsing3") {
"""return '\\\\',"\\\\",'\\\\',"\\\\"
"""
}
StringParsing3 in EmptyContext succeedsWith("\\\\", "\\\\", "\\\\", "\\\\")
val FloatParsing = fragment("FloatParsing") {
"""return 0e12 == 0 and .0 == 0 and 0. == 0 and .2e2 == 20 and 2.E-1 == 0.2
"""
}
FloatParsing in EmptyContext succeedsWith (true)
val LocalEnvResolution = fragment("LocalEnvResolution") {
"""local _ENV
|return x
"""
}
LocalEnvResolution in EmptyContext failsWith(classOf[IllegalOperationAttemptException], "" << "attempt to index a nil value")
val LocalReassignResolve = fragment("LocalReassignResolve") {
"""local f = function (i) return "f1" end
|local a = f()
|function f(b) return "f2" end
|local b = f()
|local f = function (i) return "f3" end
|local c = f()
|return a, b, c
"""
}
LocalReassignResolve in EmptyContext succeedsWith("f1", "f2", "f3")
val JustAdd = fragment("JustAdd") {
"""return x + 1
"""
}
JustAdd in EmptyContext failsWith(classOf[IllegalOperationAttemptException], "" << "attempt to perform arithmetic on a nil value")
val AddNumbers = fragment("AddNumbers") {
"""local a = 39
|local b = 3.0
|return a + b
"""
}
AddNumbers in EmptyContext succeedsWith (42.0)
val AddHexString = fragment("AddHexString") {
"""return "0x10" + 1
"""
}
AddHexString in EmptyContext succeedsWith (17.0)
val IfThenElse = fragment("IfThenElse") {
"""if x >= 0 and x <= 10 then print(x) end
"""
}
IfThenElse in EmptyContext failsWith(classOf[IllegalOperationAttemptException], "" << "attempt to compare number with nil")
val Or1 = fragment("Or1") {
"""local assert = assert or function() return end
|return assert
"""
}
Or1 in EmptyContext succeedsWith (classOf[LuaFunction[_, _, _, _, _]])
Or1 in BasicContext succeedsWith (classOf[LuaFunction[_, _, _, _, _]])
val Or2 = fragment("Or2") {
"""local assert = assert or function() return end
|return not not assert
"""
}
Or2 in EmptyContext succeedsWith (true)
Or2 in BasicContext succeedsWith (true)
val Or3 = fragment("Or3") {
"""local x = true or false
|return x or 10
"""
}
Or3 in EmptyContext succeedsWith (true)
Or3 in BasicContext succeedsWith (true)
val IfOr1 = fragment("IfOr1") {
"""local x
|if assert then x = assert else x = function() return end end
|return x
"""
}
IfOr1 in EmptyContext succeedsWith (classOf[LuaFunction[_, _, _, _, _]])
IfOr1 in BasicContext succeedsWith (classOf[LuaFunction[_, _, _, _, _]])
val IfOr2 = fragment("IfOr2") {
"""local x
|if assert then x = assert else x = function() return end end
|return not not x
"""
}
IfOr2 in EmptyContext succeedsWith (true)
IfOr2 in BasicContext succeedsWith (true)
val IntegerCmp = fragment("IntegerCmp") {
"""local min = 0x8000000000000000
|local max = 0x7fffffffffffffff
|return min < max, max + 1 == min, min < 0, max > 0
"""
}
IntegerCmp in EmptyContext succeedsWith(true, true, true, true)
val NumericCmp = fragment("NumericCmp") {
"""return 1 < 1.0, 1 > 1.0, 1 <= 1.0, 1 >= 1.0, 1 == 1.0
"""
}
NumericCmp in EmptyContext succeedsWith(false, false, true, true, true)
val NaNCmp = fragment("NaNCmp") {
"""local nan = 0/0
|return 0 < nan, 0 > nan, nan ~= nan, nan == nan, nan < nan, nan > nan, nan >= nan, nan <= nan
"""
}
NaNCmp in EmptyContext succeedsWith(false, false, true, false, false, false, false, false)
val StringCmp = fragment("StringCmp") {
"""return "hello" < "there", "1" < "1.0", "1" > "1.0", "1" == "1.0"
"""
}
StringCmp in EmptyContext succeedsWith(true, true, false, false)
val MixedEq = fragment("MixedEq") {
"""return 1 == "1", "1" == 1.0, 1 == 1.0
"""
}
MixedEq in EmptyContext succeedsWith(false, false, true)
val MixedCmp = fragment("MixedCmp") {
"""return 1 < "1"
"""
}
MixedCmp in EmptyContext failsWith(classOf[IllegalOperationAttemptException], "" << "attempt to compare number with string")
val MixedCmpReverse = fragment("MixedCmpReverse") {
"""return 1 > "1"
"""
}
MixedCmpReverse in EmptyContext failsWith(classOf[IllegalOperationAttemptException], "" << "attempt to compare string with number")
val MultiReturn = fragment("MultiReturn") {
"""local function f() end
|return f(), f(), f()
"""
}
MultiReturn in EmptyContext succeedsWith(null, null)
val LocalMultiAssign = fragment("LocalMultiAssign") {
"""local a, b = (function() return 1, 2 end)()
|return a, b
"""
}
LocalMultiAssign in EmptyContext succeedsWith(1, 2)
val LocalMultiAssignWithPrefix = fragment("LocalMultiAssignWithPrefix") {
"""local a, b, c = 3, (function() return 1, 2 end)()
|return a, b, c
"""
}
LocalMultiAssignWithPrefix in EmptyContext succeedsWith(3, 1, 2)
val NonLocalMultiAssign = fragment("NonLocalMultiAssign") {
"""a, b = (function() return 1, 2 end)()
|return a, b
"""
}
NonLocalMultiAssign in EmptyContext succeedsWith(1, 2)
val NonLocalMultiAssignWithPrefix = fragment("NonLocalMultiAssignWithPrefix") {
"""a, b, c = 3, (function() return 1, 2 end)()
|return a, b, c
"""
}
NonLocalMultiAssignWithPrefix in EmptyContext succeedsWith(3, 1, 2)
val SplitLocalMultiAssign = fragment("SplitLocalMultiAssign") {
"""local a, b
|a, b = (function() return 1, 2 end)()
|return a, b
"""
}
SplitLocalMultiAssign in EmptyContext succeedsWith(1, 2)
val SplitLocalMultiAssignWithPrefix = fragment("SplitLocalMultiAssignWithPrefix") {
"""local a, b, c
|a, b, c = 3, (function() return 1, 2 end)()
|return a, b, c
"""
}
SplitLocalMultiAssignWithPrefix in EmptyContext succeedsWith(3, 1, 2)
val SimpleForLoop = fragment("SimpleForLoop") {
"""local sum = 0
|for i = 1, 10 do
| sum = sum + i
|end
|return sum
"""
}
SimpleForLoop in EmptyContext succeedsWith (55)
val FloatForLoop = fragment("FloatForLoop") {
"""local sum = 0
|for i = 1.0, 9.9, 0.4 do
| sum = sum + i
|end
|return sum
"""
}
expect {
var sum = 0.0
for (d <- 1.0 to 9.9 by 0.4) {
sum += d
}
FloatForLoop in EmptyContext succeedsWith (sum)
}
val MixedNumericForLoop = fragment("MixedNumericForLoop") {
"""local sum = 0
|for i = 1, 10.0 do
| sum = sum + i
|end
|return sum
"""
}
MixedNumericForLoop in EmptyContext succeedsWith (55)
val RuntimeDeterminedForLoop = fragment("RuntimeDeterminedForLoop") {
"""local sum = 0
|for i = 1, "10" do
| sum = sum + i
|end
|return sum
"""
}
RuntimeDeterminedForLoop in EmptyContext succeedsWith (55)
val IllegalForLoop1 = fragment("IllegalForLoop1") {
"""for i = "a", "b", "c" do end
"""
}
IllegalForLoop1 in EmptyContext failsWith(classOf[ConversionException], "" << "'for' limit must be a number")
val IllegalForLoop2 = fragment("IllegalForLoop2") {
"""for i = "a", 0, "c" do end
"""
}
IllegalForLoop2 in EmptyContext failsWith(classOf[ConversionException], "" << "'for' step must be a number")
val IllegalForLoop3 = fragment("IllegalForLoop3") {
"""for i = "a", 0, 0 do end
"""
}
IllegalForLoop3 in EmptyContext failsWith(classOf[ConversionException], "" << "'for' initial value must be a number")
val IllegalForLoop4 = fragment("IllegalForLoop4") {
"""for i = 1, "x" do end
"""
}
IllegalForLoop4 in EmptyContext failsWith(classOf[ConversionException], "" << "'for' limit must be a number")
val NaNForLoop = fragment("NaNForLoop") {
"""local n = 0
|for i = 0, (0/0) do
| n = n + 1.0
|end
|return n
"""
}
NaNForLoop in EmptyContext succeedsWith (0)
val ZeroStepForLoop = fragment("ZeroStepForLoop") {
"""for i = 1, 10, 0 do assert(false) end
"""
}
ZeroStepForLoop in EmptyContext succeedsWith()
ZeroStepForLoop in BasicContext succeedsWith()
val ZeroStepFloatForLoop = fragment("ZeroStepFloatForLoop") {
"""for i = 1, 10, 0.0 do assert(false) end
"""
}
ZeroStepFloatForLoop in EmptyContext succeedsWith()
ZeroStepFloatForLoop in BasicContext succeedsWith()
val NegativeStepForLoop = fragment("NegativeStepForLoop") {
"""for i = 1, 10, -1 do assert(false) end
"""
}
NegativeStepForLoop in EmptyContext succeedsWith()
NegativeStepForLoop in BasicContext succeedsWith()
val NegativeStepFloatForLoop = fragment("NegativeStepFloatForLoop") {
"""for i = 0, 1, -1.0 do assert(false) end
"""
}
NegativeStepFloatForLoop in EmptyContext succeedsWith()
NegativeStepFloatForLoop in BasicContext succeedsWith()
val SimplifiableFloatForLoop = fragment("SimplifiableFloatForLoop") {
"""local step = -1.0
|for i = 0, 5, step do -- loop type (negative, non-NaN) can be determined at compile time
| assert(false)
|end
"""
}
SimplifiableFloatForLoop in EmptyContext succeedsWith()
SimplifiableFloatForLoop in BasicContext succeedsWith()
val DynamicIntegerForLoop = fragment("DynamicIntegerForLoop") {
"""local function forloop(init, limit, step, f)
| for i = init, limit, step do
| f(i)
| end
|end
|
|local sum = 0
|forloop(1, 10, 1, function(i) sum = sum + i end)
|return sum
"""
}
DynamicIntegerForLoop in EmptyContext succeedsWith (55)
val ForLoopMtAttempt = fragment("ForLoopMtAttempt") {
"""local function nt(v)
| local t = {}
| local f = function(a, b) return v end
| setmetatable(t, { __add = f, __sub = f })
| return t
|end
|
|for i = nt(0), 10 do assert(false) end
"""
}
ForLoopMtAttempt in EmptyContext failsWith(classOf[IllegalOperationAttemptException], "" << "attempt to call a nil value")
ForLoopMtAttempt in BasicContext failsWith(classOf[ConversionException], "" << "'for' initial value must be a number")
val ForLoopVarNameResolution = fragment("ForLoopVarNameResolution") {
"""local count = 0
|local i = 2
|for i = 1, 10 do count = count + 1 end
|for j = 1, 5 do count = count + i end
|return count
"""
}
ForLoopVarNameResolution in EmptyContext succeedsWith (20)
val RepeatUntil = fragment("RepeatUntil") {
"""local sum = 0
|repeat
| sum = sum + 1
| local dbl = sum * 2
|until dbl > 10
|return sum
"""
}
RepeatUntil in EmptyContext succeedsWith (6)
val InfiniteWhileLoop1 = fragment("InfiniteWhileLoop1") {
"""return function() while true do local a = -1 end end
"""
}
InfiniteWhileLoop1 in EmptyContext succeedsWith (classOf[LuaFunction[_, _, _, _, _]])
val InfiniteWhileLoop2 = fragment("InfiniteWhileLoop2") {
"""return function() while 1 do local a = -1 end end
"""
}
InfiniteWhileLoop2 in EmptyContext succeedsWith (classOf[LuaFunction[_, _, _, _, _]])
val InfiniteWhileLoop3 = fragment("InfiniteWhileLoop3") {
"""return function () repeat local x = 1 until true end
"""
}
InfiniteWhileLoop3 in EmptyContext succeedsWith (classOf[LuaFunction[_, _, _, _, _]])
val BitwiseOps = fragment("BitwiseOps") {
"""local x = 3
|local y = 10
|
|return x & y, x | y, x ~ y, ~x, ~y, x << y, x >> y
"""
}
BitwiseOps in EmptyContext succeedsWith(2, 11, 9, -4, -11, 3072, 0)
val BitwiseCoercedOps = fragment("BitwiseCoercedOps") {
"""local x = 3.0
|local y = 10.0
|return x & y, x | y
"""
}
BitwiseCoercedOps in EmptyContext succeedsWith(2, 11)
val BitwiseStringCoercedOps = fragment("BitwiseStringCoercedOps") {
"""local x = "3"
|local y = "10.0"
|return x & y, x | y, x ~ y, ~x, ~y, x << y, x >> y
"""
}
BitwiseStringCoercedOps in EmptyContext succeedsWith(2, 11, 9, -4, -11, 3072, 0)
val BitwiseAttemptError = fragment("BitwiseAttemptError") {
"""return x & y
"""
}
BitwiseAttemptError in EmptyContext failsWith(classOf[IllegalOperationAttemptException], "" << "attempt to perform bitwise operation on a nil value")
val BitwiseRepresentationError = fragment("BitwiseRepresentationError") {
"""local function int(x)
| return x & -1
|end
|int(3.1)
"""
}
BitwiseRepresentationError in EmptyContext failsWith(classOf[IllegalOperationAttemptException], "" << "number has no integer representation")
val BitwiseError = fragment("BitwiseError") {
"""local x = print or 1.2
|return 10 & x
"""
}
BitwiseError in EmptyContext failsWith(classOf[IllegalOperationAttemptException], "" << "number has no integer representation")
BitwiseError in BasicContext failsWith(classOf[IllegalOperationAttemptException], "" << "attempt to perform bitwise operation on a function value")
val UnmOnNumbers = fragment("UnmOnNumbers") {
"""local i = 42
|local f = 42.6
|local function fun()
| if assert then return 314 else return 0.314 end
|end
|local n = fun()
|
|return i, -i, f, -f, n, -n
"""
}
UnmOnNumbers in EmptyContext succeedsWith(42, -42, 42.6, -42.6, 0.314, -0.314)
UnmOnNumbers in BasicContext succeedsWith(42, -42, 42.6, -42.6, 314, -314)
val UnmOnNumericString = fragment("UnmOnNumericString") {
"""local i = "42"
|local f = "42.6"
|
|return i, -i, f, -f
"""
}
UnmOnNumericString in EmptyContext succeedsWith("42", -42.0, "42.6", -42.6)
val UnmOnNonNumericString = fragment("UnmOnNonNumericString") {
"""local s = "hello"
|return -s
"""
}
UnmOnNonNumericString in EmptyContext failsWith(classOf[IllegalOperationAttemptException], "" << "attempt to perform arithmetic on a string value")
val UnmOnNil = fragment("UnmOnNil") {
"""return -x
"""
}
UnmOnNil in EmptyContext failsWith(classOf[IllegalOperationAttemptException], "" << "attempt to perform arithmetic on a nil value")
val StringLength = fragment("StringLength") {
"""local s = "hello"
|return #s
"""
}
StringLength in EmptyContext succeedsWith (5)
val SeqTableLength = fragment("SeqTableLength") {
"""local t = {}
|t[1] = "hi"
|t[2] = "there"
|return #t
"""
}
SeqTableLength in EmptyContext succeedsWith (2)
val SeqTableLengthMultiAssign = fragment("SeqTableLengthMultiAssign") {
"""local t = {}
|t[1], t[2] = #t, #t
|return #t, t[1], t[2]
"""
}
SeqTableLengthMultiAssign in EmptyContext succeedsWith(2, 0, 0)
val TableMultiAssign1 = fragment("TableMultiAssign1") {
"""local t = {}
|t["hi"], t['there'] = 1, 2
|return t.hi, t.there
"""
}
TableMultiAssign1 in EmptyContext succeedsWith(1, 2)
val TableMultiAssign2 = fragment("TableMultiAssign2") {
"""local t = {}
|t.hi, t.there = 1, 2
|return t.hi, t.there
"""
}
TableMultiAssign2 in EmptyContext succeedsWith(1, 2)
val NilTableLength = fragment("NilTableLength") {
"""return #t
"""
}
NilTableLength in EmptyContext failsWith(classOf[IllegalOperationAttemptException], "" << "attempt to get length of a nil value")
val TableFloatKeys = fragment("TableFloatKeys") {
"""local x = -1
|local mz = 0/x -- minus zero
|local t = {[0] = 10, 20, 30, 40, 50}
|return t[mz], t[0], t[mz] == t[0], t[-0], t[0], t[-0] == t[0]
"""
}
TableFloatKeys in EmptyContext succeedsWith(10, 10, true, 10, 10, true)
val ConcatStrings = fragment("ConcatStrings") {
"""return "hello".." ".."world"
"""
}
ConcatStrings in EmptyContext succeedsWith ("hello world")
val ConcatStringsAndNumbers = fragment("ConcatStringsAndNumbers") {
"""return (4 .. 1 + 1.0) .. " = " .. 42
"""
}
ConcatStringsAndNumbers in EmptyContext succeedsWith ("42.0 = 42")
val ConcatDynamic = fragment("ConcatDynamic") {
"""local function c(a, b, c)
| return a..b..c
|end
|
|local s = c(1, "2", c(0, 0, 0))
|return s
"""
}
ConcatDynamic in EmptyContext succeedsWith ("12000")
val ConcatNil = fragment("ConcatNil") {
"""return "x = "..x
"""
}
ConcatNil in EmptyContext failsWith(classOf[IllegalOperationAttemptException], "" << "attempt to concatenate a nil value")
val ConcatNumeric = fragment("ConcatNumeric") {
"""local i = 1
|local f = 1.0
|local n
|if assert then n = i else n = f end
|return ":"..n
"""
}
ConcatNumeric in EmptyContext succeedsWith (":1.0")
ConcatNumeric in BasicContext succeedsWith (":1")
val Upvalues1 = fragment("Upvalues1") {
"""local x = {}
|for i = 0, 10 do
| if i % 2 == 0 then x[i // 2] = function() return i, x end end
|end
"""
}
Upvalues1 in EmptyContext succeedsWith()
val Upvalues2 = fragment("Upvalues2") {
"""local x
|x = 1
|
|local function sqr()
| return x * x
|end
|
|x = 3
|return sqr()
"""
}
Upvalues2 in EmptyContext succeedsWith (9)
val Upvalues3 = fragment("Upvalues3") {
"""local x, y
|if g then
| y = function() return x end
|else
| x = function() return y end
|end
|return x or y
"""
}
Upvalues3 in EmptyContext succeedsWith (classOf[LuaFunction[_, _, _, _, _]])
val Upvalues4 = fragment("Upvalues4") {
"""local n = 0
|local function f() n = n + 1 end
|f()
|f()
|local m = n
|n = n - 1
|local l = n
|return m, l
"""
}
Upvalues4 in EmptyContext succeedsWith(2, 1)
val SetUpvalue = fragment("SetUpvalue") {
"""local x = 1
|
|local function f()
| x = 123
|end
|
|f()
|return x
"""
}
SetUpvalue in EmptyContext succeedsWith (123)
val SetTabUp = fragment("SetTabUp") {
"""x = 1
|return x
"""
}
SetTabUp in EmptyContext succeedsWith (1)
val Tables = fragment("Tables") {
"""local t = {}
|t.self = t
|return t.self
"""
}
Tables in EmptyContext succeedsWith (classOf[Table])
val Self = fragment("Self") {
"""local function GET(tab, k) return tab[k] end
|local function SET(tab, k, v) tab[k] = v end
|
|local t = {}
|t.get = GET
|t.set = SET
|
|local before = t:get(1)
|t:set(1, "hello")
|local after = t:get(1)
|
|return before, after
"""
}
Self in EmptyContext succeedsWith(null, "hello")
val BlockLocals = fragment("BlockLocals") {
"""do
| local a = 0
| local b = 1
|end
|
|do
| local a = 2
|end
"""
}
BlockLocals in EmptyContext succeedsWith()
val BlockLocalShadowing = fragment("BlockLocalShadowing") {
"""local a, b = 1, 2
|local x, y
|do
| local a, b = "x", "y"
| x, y = a, b
|end
|return x, y
"""
}
BlockLocalShadowing in EmptyContext succeedsWith("x", "y")
val Tailcalls = fragment("Tailcalls") {
"""function f(x)
| print(x)
| if x > 0 then
| return f(x - 1)
| else
| if x < 0 then
| return f(x + 1)
| else
| return 0
| end
| end
|end
|
|return f(3),f(-2)
"""
}
Tailcalls in EmptyContext failsWith(classOf[IllegalOperationAttemptException], "" << "attempt to call a nil value")
val YesTailcall = fragment("YesTailcall") {
"""return (function () return 1, 2 end)()
"""
}
YesTailcall in EmptyContext succeedsWith(1, 2)
val NoTailcall = fragment("NoTailcall") {
"""return ((function () return 1, 2 end)())
"""
}
NoTailcall in EmptyContext succeedsWith (1)
val FuncWith2Params = fragment("FuncWith2Params") {
"""local f = function (x, y)
| return x + y
|end
|return -1 + f(1, 3) + 39
"""
}
FuncWith2Params in EmptyContext succeedsWith (42)
val FuncWith3Params = fragment("FuncWith3Params") {
"""local f = function (x, y, z)
| return x + y + z
|end
|return -1 + f(1, 1, 2) + 39
"""
}
FuncWith3Params in EmptyContext succeedsWith (42)
val DeterminateVarargs = fragment("DeterminateVarargs") {
"""local a, b = ...
|if a > 0 then
| return b, a
|else
| return a, b
|end
"""
}
DeterminateVarargs in EmptyContext failsWith(classOf[IllegalOperationAttemptException], "" << "attempt to compare number with nil")
val ReturnVarargs = fragment("ReturnVarargs") {
"""return ...
"""
}
ReturnVarargs in EmptyContext succeedsWith()
val IndeterminateVarargs = fragment("IndeterminateVarargs") {
"""local a = ...
|if a then
| return ...
|else
| return false, ...
|end
"""
}
IndeterminateVarargs in EmptyContext succeedsWith (false)
val MultiTableConstructor = fragment("MultiTableConstructor") {
"""return #({(function() return 3, 2, 1 end)()})
"""
}
MultiTableConstructor in EmptyContext succeedsWith (3)
val NoMultiTableConstructor = fragment("NoMultiTableConstructor") {
"""return #({((function() return 3, 2, 1 end)())})
"""
}
NoMultiTableConstructor in EmptyContext succeedsWith (1)
val NilTestInlining = fragment("NilTestInlining") {
"""local a
|if a then
| return true
|else
| return false
|end
"""
}
NilTestInlining in EmptyContext succeedsWith (false)
val VarargFunctionCalls = fragment("VarargFunctionCalls") {
"""local f = function(...) return ... end
|return true, f(...)
"""
}
VarargFunctionCalls in EmptyContext succeedsWith (true)
val VarargFunctionCalls2 = fragment("VarargFunctionCalls2") {
"""local x = ...
|local f = function(...) return ... end
|if x then
| return f(...)
|else
| return true, f(...)
|end
"""
}
VarargFunctionCalls2 in EmptyContext succeedsWith (true)
val VarargDecomposition = fragment("VarargDecomposition") {
"""local a, b = ...
|local c, d, e = a(b, ...)
|return d(e, ...)
"""
}
VarargDecomposition in EmptyContext failsWith(classOf[IllegalOperationAttemptException], "" << "attempt to call a nil value")
val VarargCallWithFixedPrefix = fragment("VarargCallWithFixedPrefix") {
"""x = 10
|local function f(g, ...) return x, g(...) end
|return f(function (a, b) return x + a, x + b end, 1, 2.3, "456")
"""
}
VarargCallWithFixedPrefix in EmptyContext succeedsWith(10, 11, 12.3)
val BigParamListFunctionCall = fragment("BigParamListFunctionCall") {
"""local function f(a,b,c,d,e,f,g,h)
| return h or g or f or e or d or c or b or a or z
|end
|
|return f(1,2,3,4), f(1,2,3), f(1,2,3,4,5,6)
"""
}
BigParamListFunctionCall in EmptyContext succeedsWith(4, 3, 6)
val FunctionCalls = fragment("FunctionCalls") {
"""local function f(x, y)
| return x + y
|end
|
|return f(1, 2)
"""
}
FunctionCalls in EmptyContext succeedsWith (3)
val FunctionCalls2 = fragment("FunctionCalls2") {
"""local function abs(x)
| local function f(x, acc)
| if x > 0 then
| return f(x - 1, acc + 1)
| elseif x < 0 then
| return f(x + 1, acc + 1)
| else
| return acc
| end
| end
| local v = f(x, 0)
| return v
|end
|
|return abs(20)
"""
}
FunctionCalls2 in EmptyContext succeedsWith (20)
val FunctionCalls3 = fragment("FunctionCalls3") {
"""local function abs(x)
| local function f(g, x, acc)
| if x > 0 then
| return g(g, x - 1, acc + 1)
| elseif x < 0 then
| return g(g, x + 1, acc + 1)
| else
| return acc
| end
| end
| local v = f(f, x, 0)
| return v
|end
|
|return abs(20)
"""
}
FunctionCalls3 in EmptyContext succeedsWith (20)
val LocalUpvalue = fragment("LocalUpvalue") {
"""local function f()
| local x = 1
| local function g()
| return x * x
| end
| return g()
|end
|
|return f() -- equivalent to return 1
"""
}
LocalUpvalue in EmptyContext succeedsWith (1)
val ReturningAFunction = fragment("ReturningAFunction") {
"""local function f()
| return function(x) return not not x, x end
|end
|
|return f()()
"""
}
ReturningAFunction in EmptyContext succeedsWith(false, null)
val IncompatibleFunctions = fragment("IncompatibleFunctions") {
"""local f
|if x then
| f = function(x, y)
| local z = x or y
| return not not z, z
| end
|else
| f = function()
| return x
| end
|end
|
|return f(42)
"""
}
IncompatibleFunctions in EmptyContext succeedsWith (null)
val TailcallWith21Args = fragment("TailcallWith21Args") {
"""local function f(...) return true, ... end
|return f("hello", "there", "this", "is", "a", "result", "of", "a", "tail", "call",
| "with", "many", "arguments", "and", "it", "still", "appears", "to", "work", "quite",
| "well")
"""
}
TailcallWith21Args in EmptyContext succeedsWith(true,
"hello", "there", "this", "is", "a", "result", "of", "a", "tail", "call",
"with", "many", "arguments", "and", "it", "still", "appears", "to", "work", "quite",
"well")
val TailcallWith21ArgsAndVarargs = fragment("TailcallWith21ArgsAndVarargs") {
"""local function f(...) return true, ... end
|return f("hello", "there", "this", "is", "a", "result", "of", "a", "tail", "call",
| "with", "many", "arguments", "and", "it", "still", "appears", "to", "work", "quite",
| "well", ...)
"""
}
TailcallWith21ArgsAndVarargs in EmptyContext succeedsWith(true,
"hello", "there", "this", "is", "a", "result", "of", "a", "tail", "call",
"with", "many", "arguments", "and", "it", "still", "appears", "to", "work", "quite",
"well")
val NumIterator = fragment("NumIterator") {
"""local called = 0
|local looped = 0
|
|local function iter(limit, n)
| called = called + 1
| if n < limit then
| return n + 1
| end
|end
|
|for i in iter,10,0 do
| looped = looped + 1
|end
|
|return called, looped
"""
}
NumIterator in EmptyContext succeedsWith(11, 10)
val BasicSetList = fragment("BasicSetList") {
"""local a = { 1, 2, 3, 4, 5 }
|return #a, a
"""
}
BasicSetList in EmptyContext succeedsWith(5, classOf[Table])
val VarLengthSetList = fragment("VarLengthSetList") {
"""local function f() return 1, 2, 3 end
|local a = { f(), f() } -- should return 1, 1, 2, 3
|return #a, a
"""
}
VarLengthSetList in EmptyContext succeedsWith(4, classOf[Table])
val VarargSetList = fragment("VarargSetList") {
"""local a = { ... }
|return #a, a
"""
}
VarargSetList in EmptyContext succeedsWith(0, classOf[Table])
// test should fail
val GotoLocalSlot_withX = fragment("GotoLocalSlot_withX") {
"""do
| local k = 0
| local x
| ::foo::
| local y
| assert(not y)
| y = true
| k = k + 1
| if k < 2 then goto foo end
|end
"""
}
GotoLocalSlot_withX in EmptyContext failsWith(classOf[IllegalOperationAttemptException], "" << "attempt to call a nil value")
// test should fail, reported succeeding in Lua 5.2, 5.3
val GotoLocalSlot_withoutX = fragment("GotoLocalSlot_withoutX") {
"""do
| local k = 0
| ::foo::
| local y
| assert(not y)
| y = true
| k = k + 1
| if k < 2 then goto foo end
|end
"""
}
GotoLocalSlot_withoutX in EmptyContext failsWith(classOf[IllegalOperationAttemptException], "" << "attempt to call a nil value")
val GotoLastStatementWithLocals = fragment("GotoLastStatementWithLocals") {
"""goto l
|local x
|::l::
"""
}
GotoLastStatementWithLocals in EmptyContext succeedsWith()
val IfsAndGotos = fragment("IfsAndGotos") {
"""local function f(a)
| if a == 1 then goto l1
| elseif a == 2 then goto l2
| elseif a == 3 then goto l2
| else if a == 4 then goto l3
| else goto l3
| end
| end
| ::l1:: ::l2:: ::l3:: ::l4::
|end
|return f(0)
"""
}
IfsAndGotos in EmptyContext succeedsWith()
val PureFunctionsAreReused = fragment("PureFunctionsAreReused") {
"""function pure(x)
| return function()
| return 1234
| end
|end
|return pure(1) == pure(2)
"""
}
PureFunctionsAreReused in EmptyContext succeedsWith (true)
val ClosuresWithoutOpenUpvaluesAreReused = fragment("ClosuresWithoutOpenUpvaluesAreReused") {
"""function noopen(x)
| a = x
| return function()
| return a
| end
|end
|return noopen(1) == noopen(2)
"""
}
ClosuresWithoutOpenUpvaluesAreReused in EmptyContext succeedsWith (true)
val ClosuresWithOpenUpvaluesAreNotReused = fragment("ClosuresWithOpenUpvaluesAreNotReused") {
"""function withopen(x)
| local a = x
| return function()
| return a
| end
|end
|return withopen(1) == withopen(2)
"""
}
ClosuresWithOpenUpvaluesAreNotReused in EmptyContext succeedsWith (false)
val BigForLoop = fragment("BigForLoop") {
"""local sum = 0
|
|for i = 1, 1000000 do
| sum = sum + i
|end
|
|return sum
"""
}
BigForLoop in EmptyContext succeedsWith (500000500000L)
val ToStringMetamethod = fragment("ToStringMetamethod") {
"""local t = setmetatable({}, { __tostring = function () return end})
|local ts = tostring(t)
|return ts, type(ts)
"""
}
ToStringMetamethod in BasicContext succeedsWith(null, "nil")
val SimpleToNumber = fragment("SimpleToNumber") {
"""local a = tonumber("123.5")
|local b = tonumber(123.5)
|return a, type(a), b, type(b)
"""
}
SimpleToNumber in BasicContext succeedsWith(123.5, "number", 123.5, "number")
val ToNumberWithBase = fragment("ToNumberWithBase") {
"""local a = tonumber("123.5", 10)
|local b = tonumber("123", 9)
|local c = tonumber("helloThere", 36)
|return a, type(a), b, type(b), c, type(c)
"""
}
ToNumberWithBase in BasicContext succeedsWith(null, "nil", 102, "number", 1767707662651898L, "number")
val GetSetMetatableWithMetatable = fragment("GetSetMetatableWithMetatable") {
"""local mt = {}
|local t = setmetatable({}, mt)
|local a = getmetatable(t) -- a == mt
|mt.__metatable = 42
|local b = getmetatable(t) -- b == 42
|return a, a == mt, b, b == mt
"""
}
GetSetMetatableWithMetatable in BasicContext succeedsWith(classOf[Table], true, 42, false)
val SetMetatableRefusesMetatableField = fragment("SetMetatableRefusesMetatableField") {
"""local t = setmetatable({}, { __metatable = 123 })
|setmetatable(t, {})
"""
}
SetMetatableRefusesMetatableField in BasicContext failsWith(classOf[IllegalOperationAttemptException], "" << "cannot change a protected metatable")
val TypesOfValues = fragment("TypesOfValues") {
"""return type(x), type(true), type(false), type(42), type(42.0), type("hello"), type({})
"""
}
TypesOfValues in BasicContext succeedsWith("nil", "boolean", "boolean", "number", "number", "string", "table")
val BasicPCall = fragment("BasicPCall") {
"""return pcall(something, "with argument", 123)
"""
}
BasicPCall in BasicContext succeedsWith(false, "attempt to call a nil value")
val AssertReturnsItsArguments = fragment("AssertReturnsItsArguments") {
"""return assert(true, "hello", "there", 5)
"""
}
AssertReturnsItsArguments in BasicContext succeedsWith(true, "hello", "there", 5)
val AssertWithDefaultErrorObject = fragment("AssertWithDefaultErrorObject") {
"""local a, b = pcall(assert, false)
|return b
"""
}
AssertWithDefaultErrorObject in BasicContext succeedsWith ("assertion failed!")
val AssertWithNilErrorObject = fragment("AssertWithNilErrorObject") {
"""local a, b = pcall(assert, false, nil)
|return type(b)
"""
}
AssertWithNilErrorObject in BasicContext succeedsWith ("nil")
val AssertWithBooleanErrorObject = fragment("AssertWithBooleanErrorObject") {
"""local a, b = pcall(assert, false, true)
|local c, d = pcall(assert, false, false)
|return type(b), type(d)
"""
}
AssertWithBooleanErrorObject in BasicContext succeedsWith("boolean", "boolean")
val AssertWithNumberErrorObjectIsCastToString = fragment("AssertWithNumberErrorObjectIsCastToString") {
"""local a, b = pcall(assert, false, 1)
|local c, d = pcall(assert, false, 1.2)
|return type(b), type(d)
"""
}
AssertWithNumberErrorObjectIsCastToString in BasicContext succeedsWith("string", "string")
val AssertWithTableErrorObject = fragment("AssertWithTableErrorObject") {
"""local a, b = pcall(assert, false, {})
|return type(b)
"""
}
AssertWithTableErrorObject in BasicContext succeedsWith ("table")
val AssertWithFunctionErrorObject = fragment("AssertWithFunctionErrorObject") {
"""local a, b = pcall(assert, false, assert)
|return type(b)
"""
}
AssertWithFunctionErrorObject in BasicContext succeedsWith ("function")
val AssertWithCoroutineErrorObject = fragment("AssertWithCoroutineErrorObject") {
"""local a, b = pcall(assert, false, coroutine.create(function() end))
|return type(b)
"""
}
AssertWithCoroutineErrorObject in CoroContext succeedsWith ("thread")
val ErrorThrowsAnError = fragment("ErrorThrowsAnError") {
"""error("boom!")
"""
}
ErrorThrowsAnError in BasicContext failsWith(classOf[LuaRuntimeException], "" << "boom!")
val ErrorWithoutArguments = fragment("ErrorWithoutArguments") {
"""local a, b = pcall(error)
|return a, b, type(b)
"""
}
ErrorWithoutArguments in BasicContext succeedsWith(false, null, "nil")
val XPCallAndError = fragment("XPCallAndError") {
"""local a, b, c = xpcall(error, function(e) return type(e), e end)
|return a, b, type(b), c, type(c)
"""
}
XPCallAndError in BasicContext succeedsWith(false, "nil", "string", null, "nil")
val XPCallWithEmptyHandler = fragment("XPCallWithEmptyHandler") {
"""return xpcall(error, function() end)
"""
}
XPCallWithEmptyHandler in BasicContext succeedsWith(false, null)
val XPCallWithErroneousHandler = fragment("XPCallWithErroneousHandler") {
"""return xpcall(error, function() error() end)
"""
}
XPCallWithErroneousHandler in BasicContext succeedsWith(false, "error in error handling")
val XPCallMaxDepth = fragment("XPCallMaxDepth") {
"""local count = 0
|
|local function handler(e)
| count = count + 1
| error(e)
|end
|
|local a, b = xpcall(error, handler)
|return a, b, count
"""
}
XPCallMaxDepth in BasicContext succeedsWith(false, "error in error handling", 220) // 220 in PUC-Lua 5.3
val RawEqualWithNoArgs = fragment("RawEqualWithNoArgs") {
"""return rawequal()
"""
}
RawEqualWithNoArgs in BasicContext failsWith(classOf[IllegalArgumentException], "" << "bad argument #1 to 'rawequal' (value expected)")
val RawEqualWithOneArg = fragment("RawEqualWithOneArg") {
"""return rawequal(42)
"""
}
RawEqualWithOneArg in BasicContext failsWith(classOf[IllegalArgumentException], "" << "bad argument #2 to 'rawequal' (value expected)")
val BasicRawGet = fragment("BasicRawGet") {
"""local t = {}
|t.hello = "world"
|return rawget(t, hi), rawget(t, "hello")
"""
}
BasicRawGet in BasicContext succeedsWith(null, "world")
val BasicRawGetFail = fragment("BasicRawGetFail") {
"""return rawget(42, "something")
"""
}
BasicRawGetFail in BasicContext failsWith(classOf[IllegalArgumentException], "" << "bad argument #1 to 'rawget' (table expected, got number)")
val BasicRawGetFail2 = fragment("BasicRawGetFail2") {
"""return rawget(42)
"""
}
BasicRawGetFail2 in BasicContext failsWith(classOf[IllegalArgumentException], "" << "bad argument #1 to 'rawget' (table expected, got number)")
val BasicRawGetArgCountFail = fragment("BasicRawGetArgCountFail") {
"""return rawget({})
"""
}
BasicRawGetArgCountFail in BasicContext failsWith(classOf[IllegalArgumentException], "" << "bad argument #2 to 'rawget' (value expected)")
val RawGetNoArg = fragment("RawGetNoArg") {
"""return rawget()
"""
}
RawGetNoArg in BasicContext failsWith(classOf[IllegalArgumentException], "" << "bad argument #1 to 'rawget' (table expected, got no value)")
val BasicRawSet = fragment("BasicRawSet") {
"""local t = {}
|rawset(t, "hello", "world")
|local a = t.hello
|rawset(t, "hello", uu)
|local b = t.hello
|return a, b
"""
}
BasicRawSet in BasicContext succeedsWith("world", null)
val RawSetNilFail = fragment("RawSetNilFail") {
"""rawset({}, uu, uu)
"""
}
RawSetNilFail in BasicContext failsWith(classOf[IllegalArgumentException], "" << "table index is nil")
val RawSetNaNFail = fragment("RawSetNaNFail") {
"""rawset({}, 0/0, uu)
"""
}
RawSetNaNFail in BasicContext failsWith(classOf[IllegalArgumentException], "" << "table index is NaN")
val RawSetArgCountFail1 = fragment("RawSetArgCountFail1") {
"""rawset({})
"""
}
RawSetArgCountFail1 in BasicContext failsWith(classOf[IllegalArgumentException], "" << "bad argument #2 to 'rawset' (value expected)")
val RawSetArgCountFail2 = fragment("RawSetArgCountFail2") {
"""rawset({}, 0/0)
"""
}
RawSetArgCountFail2 in BasicContext failsWith(classOf[IllegalArgumentException], "" << "bad argument #3 to 'rawset' (value expected)")
val RawSetNoArg = fragment("RawSetNoArg") {
"""rawset()
"""
}
RawSetNoArg in BasicContext failsWith(classOf[IllegalArgumentException], "" << "bad argument #1 to 'rawset' (table expected, got no value)")
val BasicRawLen = fragment("BasicRawLen") {
"""return rawlen({3, 2, 1}), rawlen("hello")
"""
}
BasicRawLen in BasicContext succeedsWith(3, 5)
val RawLenArgCountFail = fragment("RawLenArgCountFail") {
"""rawlen()
"""
}
RawLenArgCountFail in BasicContext failsWith(classOf[IllegalArgumentException], "" << "bad argument #1 to 'rawlen' (table or string expected)")
val RawLenBadArgFail = fragment("RawLenBadArgFail") {
"""rawlen(42)
"""
}
RawLenBadArgFail in BasicContext failsWith(classOf[IllegalArgumentException], "" << "bad argument #1 to 'rawlen' (table or string expected)")
val NextOnEmptyTable = fragment("NextOnEmptyTable") {
"""return next({})
"""
}
NextOnEmptyTable in BasicContext succeedsWith (null)
val NextTraversesEverything = fragment("NextTraversesEverything") {
"""local t = {}
|t.hello = "world"
|t[42] = true
|t[1/0] = 0/0
|
|local u = next(t)
|local count = 0
|while u do
| count = count + 1
| u = next(t, u)
|end
|
|return count
"""
}
NextTraversesEverything in BasicContext succeedsWith (3)
val NextArgMustBeTable = fragment("NextArgMustBeTable") {
"""next(uu)
"""
}
NextArgMustBeTable in BasicContext failsWith(classOf[IllegalArgumentException], "" << "bad argument #1 to 'next' (table expected, got nil)")
val NextNoArg = fragment("NextNoArg") {
"""next()
"""
}
NextNoArg in BasicContext failsWith(classOf[IllegalArgumentException], "" << "bad argument #1 to 'next' (table expected, got no value)")
val NextNonexistentKey = fragment("NextNonexistentKey") {
"""next({}, "boom")
"""
}
NextNonexistentKey in BasicContext failsWith(classOf[IllegalArgumentException], "" << "invalid key to 'next'")
val NextNaNKey = fragment("NextNaNKey") {
"""next({}, "0/0")
"""
}
NextNaNKey in BasicContext failsWith(classOf[IllegalArgumentException], "" << "invalid key to 'next'")
val PairsOnTable = fragment("PairsOnTable") {
"""local t = {u = "hu"}
|t[42] = {}
|t.hello = 22/7
|
|local count = 0
|
|for k, v in pairs(t) do
| count = count + 1
|end
|
|return count
"""
}
PairsOnTable in BasicContext succeedsWith (3)
val PairsWithMetatable = fragment("PairsWithMetatable") {
"""local t = {}
|local mt = { __pairs = function(x) return 1, 2, 3 end }
|setmetatable(t, mt)
|return pairs(t)
"""
}
PairsWithMetatable in BasicContext succeedsWith(1, 2, 3)
val PairsNoTable = fragment("PairsNoTable") {
"""pairs(42)
"""
}
PairsNoTable in BasicContext failsWith(classOf[IllegalArgumentException], "" << "bad argument #1 to 'pairs' (table expected, got number)")
val PairsNoArg = fragment("PairsNoArg") {
"""pairs()
"""
}
PairsNoArg in BasicContext failsWith(classOf[IllegalArgumentException], "" << "bad argument #1 to 'pairs' (table expected, got no value)")
val IPairsOnList = fragment("IPairsOnList") {
"""local l = {5, 4, 3, 2}
|local count = 0
|local a = 1
|local s = ""
|
|for i, v in ipairs(l) do
| count = count + 1
| a = a * v + i
| s = s..i..v
|end
|
|return count, a, s
"""
}
IPairsOnList in BasicContext succeedsWith(4, 166, "15243342")
val IPairsRespectsIndexMetatable = fragment("") {
"""local l = {5, 4, 3, 2}
|local count = 0
|local mt_count = 0
|local a = 1
|local s = ""
|
|local proxy = setmetatable({}, {
| __index = function(t, k)
| mt_count = mt_count + 1
| return rawget(l, k)
| end})
|
|for i, v in ipairs(proxy) do
| count = count + 1
| a = a * v + i
| s = s..i..v
|end
|
|return count, mt_count, a, s
"""
}
IPairsRespectsIndexMetatable in BasicContext succeedsWith(4, 5, 166, "15243342")
val IPairsWithPairsMetatable = fragment("IPairsWithPairsMetatable") {
"""local t = {}
|local mt = { __pairs = function(x) error() end }
|setmetatable(t, mt)
|return ipairs(t)
"""
}
IPairsWithPairsMetatable in BasicContext succeedsWith(classOf[LuaFunction[_, _, _, _, _]], classOf[Table], 0)
val IPairsNoTable = fragment("IPairsNoTable") {
"""ipairs(42)
"""
}
IPairsNoTable in BasicContext failsWith(classOf[IllegalArgumentException], "" << "bad argument #1 to 'ipairs' (table expected, got number)")
val IPairsNoArg = fragment("IPairsNoArg") {
"""ipairs()
"""
}
IPairsNoArg in BasicContext failsWith(classOf[IllegalArgumentException], "" << "bad argument #1 to 'ipairs' (table expected, got no value)")
val SelectCount = fragment("SelectCount") {
"""return select('#', 3, 2, x)
"""
}
SelectCount in BasicContext succeedsWith (3)
val SelectPositiveIndex = fragment("SelectPositiveIndex") {
"""return select(2, 1, 2, 3, 4)
"""
}
SelectPositiveIndex in BasicContext succeedsWith(2, 3, 4)
val SelectNegativeIndex = fragment("SelectNegativeIndex") {
"""return select(-2, 1, 2, 3, 4)
"""
}
SelectNegativeIndex in BasicContext succeedsWith(3, 4)
val VersionSniff = fragment("VersionSniff") {
"""local f, t = function()return function()end end, {nil,
| [false] = 'Lua 5.1',
| [true] = 'Lua 5.2',
| [1/'-0'] = 'Lua 5.3',
| [1] = 'LuaJIT' }
|local version = t[1] or t[1/0] or t[f()==f()]
|return version
"""
}
VersionSniff in EmptyContext succeedsWith ("Lua 5.3")
val SniffIntegerTrick = fragment("SniffIntegerTrick") {
"""return 1/'-0'
"""
}
SniffIntegerTrick in EmptyContext succeedsWith (Double.PositiveInfinity)
val NameMetaFieldIsUsedInLibErrorMessages = fragment("NameMetaFieldIsUsedInLibErrorMessages") {
"""local t = setmetatable({}, { __name = "elbaT" })
|select(t)
"""
}
NameMetaFieldIsUsedInLibErrorMessages in BasicContext failsWith "bad argument #1 to 'select' (number expected, got elbaT)"
about("coercions") {
in(EmptyContext) {
program("""return -("0")""") succeedsWith (-0.0)
program("""return -("-0")""") succeedsWith (-0.0)
program("""return -("-0.0")""") succeedsWith (0.0)
program("""return 1 + 2""") succeedsWith (3)
program("""return "1" + 2""") succeedsWith (3.0)
program("""return "1" + "2"""") succeedsWith (3.0)
}
}
}
| kroepke/luna | luna-tests/src/test/scala/org/classdump/luna/test/fragments/BasicFragments.scala | Scala | apache-2.0 | 46,796 |
package net.fluffy8x.thsch.entity
import net.fluffy8x.thsch.base._
import org.lwjgl.opengl._
import net.fluffy8x.thsch.resource._
import scala.collection.mutable.Set
/**
* Describes an entity that can be rendered.
*/
trait Renderable extends Entity with Child[Renderable, EntityManager] {
var position: Point3D
var angle: Angle
var center: Point3D
def render() {
if (isVisible) _render()
}
protected def _render(): Unit
protected def _register(m: EntityManager) {
val r = m.renderables
try {
val theSet = r(renderPriority)
theSet += this
} catch {
case e: NoSuchElementException => {
m.renderables = r.updated(renderPriority, Set(this))
}
}
}
var isVisible = false
def basis: CoordinateBasis
private var rp = 0.0
def renderPriority: Double = rp
def renderPriority_=(_rp: Double) = {
val r = manager.renderables
val theSet = r(rp)
theSet -= this
rp = _rp
try {
val theSet = r(rp)
theSet += this
} catch {
case e: NoSuchElementException => {
manager.renderables = r.updated(rp, Set(this))
}
}
}
/**
* Returns the upper left corner based on the current {@link CoordinateBasis}
* and render priority.
*/
def upperLeft = basis match {
case CoordinateBasis.Window => Point2D(0, 0)
case CoordinateBasis.Frame => {
val window = parent.parent.parent
Point2D(window.stageX, window.stageY)
}
case CoordinateBasis.Auto => {
if (renderPriority >= 0.2 && renderPriority <= 0.8) {
val window = parent.parent.parent
Point2D(window.stageX, window.stageY)
} else Point2D(0, 0)
}
}
abstract override def tick() {
super.tick()
render()
}
override def delete() {
super.delete()
val r = manager.renderables
val theSet = r(renderPriority)
theSet -= this
}
}
/**
* A primitive drawing mode.
*/
case class PrimType(t: Int) extends AnyVal {
/**
* Calls <code>GL11.glBegin</code> using the current object's t.
*/
def glBegin() = GL11.glBegin(t)
}
object PrimType {
val Points = PrimType(GL11.GL_POINTS)
val Lines = PrimType(GL11.GL_LINES)
val LineStrip = PrimType(GL11.GL_LINE_STRIP)
val LineLoop = PrimType(GL11.GL_LINE_LOOP)
val Triangles = PrimType(GL11.GL_TRIANGLES)
val TriangleStrip = PrimType(GL11.GL_TRIANGLE_STRIP)
val TriangleFan = PrimType(GL11.GL_TRIANGLE_FAN)
val Quads = PrimType(GL11.GL_QUADS)
val QuadStrip = PrimType(GL11.GL_QUAD_STRIP)
val Polygon = PrimType(GL11.GL_POLYGON)
}
case class BlendMode(
rgbEquation: Int, rgbSfactor: Int, rgbDfactor: Int,
aEquation: Int, aSfactor: Int, aDfactor: Int) {
def use(): Unit = {
GL20.glBlendEquationSeparate(rgbEquation, aEquation)
GL14.glBlendFuncSeparate(rgbSfactor, rgbDfactor, aSfactor, aDfactor)
}
}
object BlendMode {
// I have no idea
val Alpha =
BlendMode(
GL14.GL_FUNC_ADD, GL11.GL_SRC_ALPHA, GL11.GL_ONE_MINUS_SRC_ALPHA,
GL14.GL_FUNC_ADD, GL11.GL_SRC_ALPHA, GL11.GL_ONE_MINUS_SRC_ALPHA)
val Add =
BlendMode(
GL14.GL_FUNC_ADD, GL11.GL_ONE_MINUS_DST_COLOR, GL11.GL_ONE,
GL14.GL_FUNC_ADD, GL11.GL_SRC_ALPHA, GL11.GL_ONE_MINUS_SRC_ALPHA)
}
// Refer to later: glBegin glEnd glVertex* glTexCoord*
trait TPrimitive2D extends Renderable {
def primtype: PrimType
def texture: SCHTexture
def vertices: Array[(Color, Point2D, Point2D)]
def isAbsolute: Boolean = false
def rotatable: Boolean = false
def blendMode: BlendMode = BlendMode.Alpha
protected def _render {
if (isAbsolute) _renderAbs()
else if (!rotatable) _renderRel()
else _renderRot()
}
protected def _renderAbs() {
primtype.glBegin()
texture.glSet()
blendMode.use()
val len = vertices.length
var i = 0
while (i < len) {
val (col, texCoords, coords) = vertices(i)
col.set()
GL11.glTexCoord2d(texCoords.x, texCoords.y)
GL11.glVertex2d(coords.x, coords.y)
i += 1
}
GL11.glEnd()
}
protected def _renderRel() {
primtype.glBegin()
texture.glSet()
blendMode.use()
val len = vertices.length
var i = 0
while (i < len) {
val (col, texCoords, c) = vertices(i)
val coords = c +
(upperLeft - Point2D(0, 0)) +
(position.to2 - Point2D(0, 0))
col.set()
GL11.glTexCoord2d(texCoords.x, texCoords.y)
GL11.glVertex2d(coords.x, coords.y)
i += 1
}
GL11.glEnd()
}
protected def _renderRot() {
primtype.glBegin()
texture.glSet()
blendMode.use()
val len = vertices.length
var i = 0
while (i < len) {
val (col, texCoords, c) = vertices(i)
val rawcoords = c +
(upperLeft - Point2D(0, 0)) +
(position.to2 - Point2D(0, 0))
val cr = rawcoords - center.to2
val r = cr.r
val theta = cr.theta
val coords = center.to2 + Vector2D.fromRt(r, theta + angle)
col.set()
GL11.glTexCoord2d(texCoords.x, texCoords.y)
GL11.glVertex2d(coords.x, coords.y)
i += 1
}
GL11.glEnd()
}
}
/**
* A 2D primitive object.
*/
class Primitive2D(
var _primtype: PrimType,
var _texture: SCHTexture,
var _vertices: Array[(Color, Point2D, Point2D)],
var _isAbsolute: Boolean = false,
var _rotatable: Boolean = false,
var _blendMode: BlendMode = BlendMode.Alpha) extends TPrimitive2D {
def primtype = _primtype
def texture = _texture
def vertices = _vertices
override def isAbsolute = _isAbsolute
override def rotatable = _rotatable
override def blendMode = _blendMode
}
class Sprite2D(
var _texture: SCHTexture,
var _source: BoundsRect,
var _dest: BoundsRect,
var _rotatable: Boolean = true,
var _blendMode: BlendMode = BlendMode.Alpha) extends TPrimitive2D {
def primtype = PrimType.Quads
val s1 = _source.p1
val s2 = _source.p2
val ss1 =
Point2D(s1.x.toDouble / texture.width, s1.y.toDouble / texture.height)
val ss2 =
Point2D(s2.x.toDouble / texture.width, s2.y.toDouble / texture.height)
def texture = _texture
def vertices = Array(
(Color(0xFFFFFFFF), ss1, _dest.p1),
(Color(0xFFFFFFFF), Point2D(ss2.x, ss1.y), Point2D(_dest.p2.x, _dest.p1.y)),
(Color(0xFFFFFFFF), ss2, _dest.p2),
(Color(0xFFFFFFFF), Point2D(ss1.x, ss2.y), Point2D(_dest.p1.x, _dest.p2.y)))
override def isAbsolute = false
override def rotatable = _rotatable
override def blendMode = _blendMode
}
object Primitive2D {
def sprite(texture: SCHTexture, source: BoundsRect, dest: BoundsRect) = {
val s1 = source.p1
val s2 = source.p2
val ss1 =
Point2D(s1.x.toDouble / texture.width, s1.y.toDouble / texture.height)
val ss2 =
Point2D(s2.x.toDouble / texture.width, s2.y.toDouble / texture.height)
new Primitive2D(
PrimType.Quads,
texture,
Array(
(Color(0xFFFFFFFF), ss1, dest.p1),
(Color(0xFFFFFFFF), Point2D(ss2.x, ss1.y), Point2D(dest.p2.x, dest.p1.y)),
(Color(0xFFFFFFFF), ss2, dest.p2),
(Color(0xFFFFFFFF), Point2D(ss1.x, ss2.y), Point2D(dest.p1.x, dest.p2.y))))
}
}
/*
trait Primitive3D extends Renderable {
var vertices: Array[(Color, Point3D, Point3D)]
*/
| bluebear94/proj-youmu | src/main/scala/net/fluffy8x/thsch/entity/Renderable.scala | Scala | apache-2.0 | 7,194 |
package org.jetbrains.plugins.scala.testingSupport.specs2.specs2_2_10_2_4_6
import org.jetbrains.plugins.scala.SlowTests
import org.jetbrains.plugins.scala.testingSupport.specs2.Specs2FileStructureViewTest
import org.junit.experimental.categories.Category
/**
* @author Roman.Shein
* @since 20.04.2015.
*/
@Category(Array(classOf[SlowTests]))
class Specs2_2_10_2_4_6_FileStructureViewTest extends Specs2FileStructureViewTest with Specs2_2_10_2_4_6_Base
| jastice/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/testingSupport/specs2/specs2_2_10_2_4_6/Specs2_2_10_2_4_6_FileStructureViewTest.scala | Scala | apache-2.0 | 458 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.io
import java.io.{IOException, InputStream, OutputStream}
import com.ning.compress.lzf.{LZFInputStream, LZFOutputStream}
import net.jpountz.lz4.{LZ4BlockInputStream, LZ4BlockOutputStream}
import org.xerial.snappy.{Snappy, SnappyInputStream, SnappyOutputStream}
import org.apache.spark.SparkConf
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.util.Utils
/**
* :: DeveloperApi ::
* CompressionCodec allows the customization of choosing different compression implementations
* to be used in block storage.
* CompressionCodec允许定制在块存储中使用不同的压缩实现
*
* Note: The wire protocol for a codec is not guaranteed compatible across versions of Spark.
* This is intended for use as an internal compression utility within a single
* Spark application.
* 注意:编码解码器的有线协议在Spark版本之间不能保证兼容,此功能旨在用作单个Spark应用程序中的内部压缩实用程序
*/
@DeveloperApi
trait CompressionCodec {
def compressedOutputStream(s: OutputStream): OutputStream
def compressedInputStream(s: InputStream): InputStream
}
/**
* 为了节省磁盘存储空间,有些情况下需要对Block进行压缩
*/
private[spark] object CompressionCodec {
//用于压缩内部数据如 RDD分区和shuffle输出的编码解码器
private val configKey = "spark.io.compression.codec"
private val shortCompressionCodecNames = Map(
"lz4" -> classOf[LZ4CompressionCodec].getName,
"lzf" -> classOf[LZFCompressionCodec].getName,
"snappy" -> classOf[SnappyCompressionCodec].getName)
//默认压缩算法snappy,此压缩算法在牺牲少量压缩比例条件下,却极大地提高压缩速度
def getCodecName(conf: SparkConf): String = {
conf.get(configKey, DEFAULT_COMPRESSION_CODEC)
}
def createCodec(conf: SparkConf): CompressionCodec = {
createCodec(conf, getCodecName(conf))
}
def createCodec(conf: SparkConf, codecName: String): CompressionCodec = {
val codecClass = shortCompressionCodecNames.getOrElse(codecName.toLowerCase, codecName)
val codec = try {
val ctor = Utils.classForName(codecClass).getConstructor(classOf[SparkConf])
Some(ctor.newInstance(conf).asInstanceOf[CompressionCodec])
} catch {
case e: ClassNotFoundException => None
case e: IllegalArgumentException => None
}
codec.getOrElse(throw new IllegalArgumentException(s"Codec [$codecName] is not available. " +
s"Consider setting $configKey=$FALLBACK_COMPRESSION_CODEC"))
}
/**
* Return the short version of the given codec name.
* If it is already a short name, just return it.
* 返回给定编解码器名称的短版本,如果它已经是一个简短的名字,只要返回,
*/
def getShortName(codecName: String): String = {
if (shortCompressionCodecNames.contains(codecName)) {
codecName
} else {
shortCompressionCodecNames
.collectFirst { case (k, v) if v == codecName => k }
.getOrElse { throw new IllegalArgumentException(s"No short name for codec $codecName.") }
}
}
val FALLBACK_COMPRESSION_CODEC = "lzf"
val DEFAULT_COMPRESSION_CODEC = "snappy"
val ALL_COMPRESSION_CODECS = shortCompressionCodecNames.values.toSeq
}
/**
* :: DeveloperApi ::
* LZ4 implementation of [[org.apache.spark.io.CompressionCodec]].
* Block size can be configured by `spark.io.compression.lz4.blockSize`.
*
* Note: The wire protocol for this codec is not guaranteed to be compatible across versions
* of Spark. This is intended for use as an internal compression utility within a single Spark
* application.
* 注意:这种编解码器的有线协议不能保证在版本的Spark之间兼容。 这适用于单个Spark应用程序中的内部压缩实用程序。
*/
@DeveloperApi
class LZ4CompressionCodec(conf: SparkConf) extends CompressionCodec {
override def compressedOutputStream(s: OutputStream): OutputStream = {
val blockSize = conf.getSizeAsBytes("spark.io.compression.lz4.blockSize", "32k").toInt
new LZ4BlockOutputStream(s, blockSize)
}
override def compressedInputStream(s: InputStream): InputStream = new LZ4BlockInputStream(s)
}
/**
* :: DeveloperApi ::
* LZF implementation of [[org.apache.spark.io.CompressionCodec]].
*
* Note: The wire protocol for this codec is not guaranteed to be compatible across versions
* of Spark. This is intended for use as an internal compression utility within a single Spark
* application.
* 注意:这种编解码器的有线协议不能保证在版本的Spark之间兼容,这适用于单个Spark应用程序中的内部压缩实用程序。
*/
@DeveloperApi
class LZFCompressionCodec(conf: SparkConf) extends CompressionCodec {
override def compressedOutputStream(s: OutputStream): OutputStream = {
new LZFOutputStream(s).setFinishBlockOnFlush(true)
}
override def compressedInputStream(s: InputStream): InputStream = new LZFInputStream(s)
}
/**
* :: DeveloperApi ::
* Snappy implementation of [[org.apache.spark.io.CompressionCodec]].
* Block size can be configured by `spark.io.compression.snappy.blockSize`.
* [[org.apache.spark.io.CompressionCodec]]的Snappy实现]块大小可以由`spark.io.compression.snappy.blockSize`配置
*
* Note: The wire protocol for this codec is not guaranteed to be compatible across versions
* of Spark. This is intended for use as an internal compression utility within a single Spark
* application.
* 注意:这种编解码器的有线协议不能保证在版本的Spark之间兼容,这适用于单个Spark应用程序中的内部压缩实用程序,
*/
@DeveloperApi
class SnappyCompressionCodec(conf: SparkConf) extends CompressionCodec {
try {
Snappy.getNativeLibraryVersion
} catch {
case e: Error => throw new IllegalArgumentException(e)
}
override def compressedOutputStream(s: OutputStream): OutputStream = {
val blockSize = conf.getSizeAsBytes("spark.io.compression.snappy.blockSize", "32k").toInt
new SnappyOutputStreamWrapper(new SnappyOutputStream(s, blockSize))
}
override def compressedInputStream(s: InputStream): InputStream = new SnappyInputStream(s)
}
/**
* Wrapper over [[SnappyOutputStream]] which guards against write-after-close and double-close
* issues. See SPARK-7660 for more details. This wrapping can be removed if we upgrade to a version
* of snappy-java that contains the fix for https://github.com/xerial/snappy-java/issues/107.
*/
private final class SnappyOutputStreamWrapper(os: SnappyOutputStream) extends OutputStream {
private[this] var closed: Boolean = false
override def write(b: Int): Unit = {
if (closed) {
throw new IOException("Stream is closed")
}
os.write(b)
}
override def write(b: Array[Byte]): Unit = {
if (closed) {
throw new IOException("Stream is closed")
}
os.write(b)
}
override def write(b: Array[Byte], off: Int, len: Int): Unit = {
if (closed) {
throw new IOException("Stream is closed")
}
os.write(b, off, len)
}
override def flush(): Unit = {
if (closed) {
throw new IOException("Stream is closed")
}
os.flush()
}
override def close(): Unit = {
if (!closed) {
closed = true
os.close()
}
}
}
| tophua/spark1.52 | core/src/main/scala/org/apache/spark/io/CompressionCodec.scala | Scala | apache-2.0 | 8,173 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.