code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
|---|---|---|---|---|---|
package sbt
package plugins
import sbt.librarymanagement.{ Configuration, Configurations }
import Def.Setting
/** A plugin representing the ability to build a JVM project.
*
* Core tasks/keys:
* - `run`
* - `test`
* - `compile`
* - `fullClasspath`
* Core configurations
* - `Test`
* - `Compile`
*/
object JvmPlugin extends AutoPlugin {
// We are automatically enabled for any IvyModule project. We also require its settings
// for ours to work.
override def requires = IvyPlugin
override def trigger = allRequirements
override lazy val projectSettings: Seq[Setting[_]] =
Defaults.runnerSettings ++
Defaults.paths ++
Classpaths.jvmPublishSettings ++
Classpaths.jvmBaseSettings ++
Defaults.projectTasks ++
Defaults.packageBase ++
Defaults.compileBase ++
Defaults.defaultConfigs
override lazy val globalSettings: Seq[Setting[_]] =
Defaults.globalJvmCore
override def projectConfigurations: Seq[Configuration] =
Configurations.default
}
|
dansanduleac/sbt
|
main/src/main/scala/sbt/plugins/JvmPlugin.scala
|
Scala
|
bsd-3-clause
| 1,016
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.streaming
import java.util.Locale
import scala.collection.JavaConverters._
import org.apache.spark.annotation.{Experimental, InterfaceStability}
import org.apache.spark.sql.{AnalysisException, Dataset, ForeachWriter}
import org.apache.spark.sql.catalyst.streaming.InternalOutputModes
import org.apache.spark.sql.execution.command.DDLUtils
import org.apache.spark.sql.execution.datasources.DataSource
import org.apache.spark.sql.execution.streaming.{ForeachSink, MemoryPlan, MemorySink}
/**
* :: Experimental ::
* Interface used to write a streaming `Dataset` to external storage systems (e.g. file systems,
* key-value stores, etc). Use `Dataset.writeStream` to access this.
*
* @since 2.0.0
*/
@Experimental
@InterfaceStability.Evolving
final class DataStreamWriter[T] private[sql](ds: Dataset[T]) {
private val df = ds.toDF()
/**
* Specifies how data of a streaming DataFrame/Dataset is written to a streaming sink.
* - `OutputMode.Append()`: only the new rows in the streaming DataFrame/Dataset will be
* written to the sink
* - `OutputMode.Complete()`: all the rows in the streaming DataFrame/Dataset will be written
* to the sink every time these is some updates
* - `OutputMode.Update()`: only the rows that were updated in the streaming DataFrame/Dataset
* will be written to the sink every time there are some updates. If
* the query doesn't contain aggregations, it will be equivalent to
* `OutputMode.Append()` mode.
*
* @since 2.0.0
*/
def outputMode(outputMode: OutputMode): DataStreamWriter[T] = {
this.outputMode = outputMode
this
}
/**
* Specifies how data of a streaming DataFrame/Dataset is written to a streaming sink.
* - `append`: only the new rows in the streaming DataFrame/Dataset will be written to
* the sink
* - `complete`: all the rows in the streaming DataFrame/Dataset will be written to the sink
* every time these is some updates
* - `update`: only the rows that were updated in the streaming DataFrame/Dataset will
* be written to the sink every time there are some updates. If the query doesn't
* contain aggregations, it will be equivalent to `append` mode.
* @since 2.0.0
*/
def outputMode(outputMode: String): DataStreamWriter[T] = {
this.outputMode = InternalOutputModes(outputMode)
this
}
/**
* Set the trigger for the stream query. The default value is `ProcessingTime(0)` and it will run
* the query as fast as possible.
*
* Scala Example:
* {{{
* df.writeStream.trigger(ProcessingTime("10 seconds"))
*
* import scala.concurrent.duration._
* df.writeStream.trigger(ProcessingTime(10.seconds))
* }}}
*
* Java Example:
* {{{
* df.writeStream().trigger(ProcessingTime.create("10 seconds"))
*
* import java.util.concurrent.TimeUnit
* df.writeStream().trigger(ProcessingTime.create(10, TimeUnit.SECONDS))
* }}}
*
* @since 2.0.0
*/
def trigger(trigger: Trigger): DataStreamWriter[T] = {
this.trigger = trigger
this
}
/**
* Specifies the name of the [[StreamingQuery]] that can be started with `start()`.
* This name must be unique among all the currently active queries in the associated SQLContext.
*
* @since 2.0.0
*/
def queryName(queryName: String): DataStreamWriter[T] = {
this.extraOptions += ("queryName" -> queryName)
this
}
/**
* Specifies the underlying output data source.
*
* @since 2.0.0
*/
def format(source: String): DataStreamWriter[T] = {
this.source = source
this
}
/**
* Partitions the output by the given columns on the file system. If specified, the output is
* laid out on the file system similar to Hive's partitioning scheme. As an example, when we
* partition a dataset by year and then month, the directory layout would look like:
*
* - year=2016/month=01/
* - year=2016/month=02/
*
* Partitioning is one of the most widely used techniques to optimize physical data layout.
* It provides a coarse-grained index for skipping unnecessary data reads when queries have
* predicates on the partitioned columns. In order for partitioning to work well, the number
* of distinct values in each column should typically be less than tens of thousands.
*
* @since 2.0.0
*/
@scala.annotation.varargs
def partitionBy(colNames: String*): DataStreamWriter[T] = {
this.partitioningColumns = Option(colNames)
this
}
/**
* Adds an output option for the underlying data source.
*
* You can set the following option(s):
* <ul>
* <li>`timeZone` (default session local timezone): sets the string that indicates a timezone
* to be used to format timestamps in the JSON/CSV datasources or partition values.</li>
* </ul>
*
* @since 2.0.0
*/
def option(key: String, value: String): DataStreamWriter[T] = {
this.extraOptions += (key -> value)
this
}
/**
* Adds an output option for the underlying data source.
*
* @since 2.0.0
*/
def option(key: String, value: Boolean): DataStreamWriter[T] = option(key, value.toString)
/**
* Adds an output option for the underlying data source.
*
* @since 2.0.0
*/
def option(key: String, value: Long): DataStreamWriter[T] = option(key, value.toString)
/**
* Adds an output option for the underlying data source.
*
* @since 2.0.0
*/
def option(key: String, value: Double): DataStreamWriter[T] = option(key, value.toString)
/**
* (Scala-specific) Adds output options for the underlying data source.
*
* You can set the following option(s):
* <ul>
* <li>`timeZone` (default session local timezone): sets the string that indicates a timezone
* to be used to format timestamps in the JSON/CSV datasources or partition values.</li>
* </ul>
*
* @since 2.0.0
*/
def options(options: scala.collection.Map[String, String]): DataStreamWriter[T] = {
this.extraOptions ++= options
this
}
/**
* Adds output options for the underlying data source.
*
* You can set the following option(s):
* <ul>
* <li>`timeZone` (default session local timezone): sets the string that indicates a timezone
* to be used to format timestamps in the JSON/CSV datasources or partition values.</li>
* </ul>
*
* @since 2.0.0
*/
def options(options: java.util.Map[String, String]): DataStreamWriter[T] = {
this.options(options.asScala)
this
}
/**
* Starts the execution of the streaming query, which will continually output results to the given
* path as new data arrives. The returned [[StreamingQuery]] object can be used to interact with
* the stream.
*
* @since 2.0.0
*/
def start(path: String): StreamingQuery = {
option("path", path).start()
}
/**
* Starts the execution of the streaming query, which will continually output results to the given
* path as new data arrives. The returned [[StreamingQuery]] object can be used to interact with
* the stream.
*
* @since 2.0.0
*/
def start(): StreamingQuery = {
if (source.toLowerCase(Locale.ROOT) == DDLUtils.HIVE_PROVIDER) {
throw new AnalysisException("Hive data source can only be used with tables, you can not " +
"write files of Hive data source directly.")
}
if (source == "memory") {
assertNotPartitioned("memory")
if (extraOptions.get("queryName").isEmpty) {
throw new AnalysisException("queryName must be specified for memory sink")
}
val sink = new MemorySink(df.schema, outputMode)
val resultDf = Dataset.ofRows(df.sparkSession, new MemoryPlan(sink))
val chkpointLoc = extraOptions.get("checkpointLocation")
val recoverFromChkpoint = outputMode == OutputMode.Complete()
val query = df.sparkSession.sessionState.streamingQueryManager.startQuery(
extraOptions.get("queryName"),
chkpointLoc,
df,
sink,
outputMode,
useTempCheckpointLocation = true,
recoverFromCheckpointLocation = recoverFromChkpoint,
trigger = trigger)
resultDf.createOrReplaceTempView(query.name)
query
} else if (source == "foreach") {
assertNotPartitioned("foreach")
val sink = new ForeachSink[T](foreachWriter)(ds.exprEnc)
df.sparkSession.sessionState.streamingQueryManager.startQuery(
extraOptions.get("queryName"),
extraOptions.get("checkpointLocation"),
df,
sink,
outputMode,
useTempCheckpointLocation = true,
trigger = trigger)
} else {
val (useTempCheckpointLocation, recoverFromCheckpointLocation) =
if (source == "console") {
(true, false)
} else {
(false, true)
}
val dataSource =
DataSource(
df.sparkSession,
className = source,
options = extraOptions.toMap,
partitionColumns = normalizedParCols.getOrElse(Nil))
df.sparkSession.sessionState.streamingQueryManager.startQuery(
extraOptions.get("queryName"),
extraOptions.get("checkpointLocation"),
df,
dataSource.createSink(outputMode),
outputMode,
useTempCheckpointLocation = useTempCheckpointLocation,
recoverFromCheckpointLocation = recoverFromCheckpointLocation,
trigger = trigger)
}
}
/**
* Starts the execution of the streaming query, which will continually send results to the given
* `ForeachWriter` as new data arrives. The `ForeachWriter` can be used to send the data
* generated by the `DataFrame`/`Dataset` to an external system.
*
* Scala example:
* {{{
* datasetOfString.writeStream.foreach(new ForeachWriter[String] {
*
* def open(partitionId: Long, version: Long): Boolean = {
* // open connection
* }
*
* def process(record: String) = {
* // write string to connection
* }
*
* def close(errorOrNull: Throwable): Unit = {
* // close the connection
* }
* }).start()
* }}}
*
* Java example:
* {{{
* datasetOfString.writeStream().foreach(new ForeachWriter<String>() {
*
* @Override
* public boolean open(long partitionId, long version) {
* // open connection
* }
*
* @Override
* public void process(String value) {
* // write string to connection
* }
*
* @Override
* public void close(Throwable errorOrNull) {
* // close the connection
* }
* }).start();
* }}}
*
* @since 2.0.0
*/
def foreach(writer: ForeachWriter[T]): DataStreamWriter[T] = {
this.source = "foreach"
this.foreachWriter = if (writer != null) {
ds.sparkSession.sparkContext.clean(writer)
} else {
throw new IllegalArgumentException("foreach writer cannot be null")
}
this
}
private def normalizedParCols: Option[Seq[String]] = partitioningColumns.map { cols =>
cols.map(normalize(_, "Partition"))
}
/**
* The given column name may not be equal to any of the existing column names if we were in
* case-insensitive context. Normalize the given column name to the real one so that we don't
* need to care about case sensitivity afterwards.
*/
private def normalize(columnName: String, columnType: String): String = {
val validColumnNames = df.logicalPlan.output.map(_.name)
validColumnNames.find(df.sparkSession.sessionState.analyzer.resolver(_, columnName))
.getOrElse(throw new AnalysisException(s"$columnType column $columnName not found in " +
s"existing columns (${validColumnNames.mkString(", ")})"))
}
private def assertNotPartitioned(operation: String): Unit = {
if (partitioningColumns.isDefined) {
throw new AnalysisException(s"'$operation' does not support partitioning")
}
}
///////////////////////////////////////////////////////////////////////////////////////
// Builder pattern config options
///////////////////////////////////////////////////////////////////////////////////////
private var source: String = df.sparkSession.sessionState.conf.defaultDataSourceName
private var outputMode: OutputMode = OutputMode.Append
private var trigger: Trigger = Trigger.ProcessingTime(0L)
private var extraOptions = new scala.collection.mutable.HashMap[String, String]
private var foreachWriter: ForeachWriter[T] = null
private var partitioningColumns: Option[Seq[String]] = None
}
|
MLnick/spark
|
sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamWriter.scala
|
Scala
|
apache-2.0
| 13,567
|
class SCL5869(private[this] var param:Int) {
def <ref>param():Int = this.param
def param_= (param:Int) {this.param = param}
}
|
katejim/intellij-scala
|
testdata/resolve/failed/scope/SCL5869.scala
|
Scala
|
apache-2.0
| 129
|
/**
* Copyright 2015, deepsense.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.deepsense.deeplang.doperables
import org.mockito.Mockito._
import io.deepsense.deeplang.doperables.dataframe.DataFrame
import io.deepsense.deeplang.inference.{InferContext, InferenceWarnings}
import io.deepsense.deeplang.{DKnowledge, ExecutionContext, UnitSpec}
class EvaluatorSpec extends UnitSpec {
private def evaluator = {
val e = mock[Evaluator]
when(e.evaluate) thenCallRealMethod()
e
}
val dataFrame = mock[DataFrame]
val metricValue = mock[MetricValue]
val execCtx = mock[ExecutionContext]
val inferCtx = mock[InferContext]
val emptyWarnings = InferenceWarnings.empty
"Evaluator" should {
"evaluate DataFrame" in {
val e = evaluator
when(e._evaluate(execCtx, dataFrame)) thenReturn metricValue
e.evaluate(execCtx)(())(dataFrame) shouldBe metricValue
}
"infer knowledge" in {
val e = evaluator
when(e._infer(DKnowledge(dataFrame))) thenReturn metricValue
val (knowledge, warnings) = e.evaluate.infer(inferCtx)(())(DKnowledge(dataFrame))
knowledge.single shouldBe metricValue
warnings shouldBe emptyWarnings
}
}
}
|
deepsense-io/seahorse-workflow-executor
|
deeplang/src/test/scala/io/deepsense/deeplang/doperables/EvaluatorSpec.scala
|
Scala
|
apache-2.0
| 1,736
|
/*
Copyright (c) 2012, The Children's Hospital of Philadelphia All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
following disclaimer in the documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package edu.chop.cbmi.dataExpress.dataWriters.sql
import edu.chop.cbmi.dataExpress.backends.SqlBackend
import edu.chop.cbmi.dataExpress.exceptions.TableDoesNotExist
import collection.mutable.ListBuffer
import edu.chop.cbmi.dataExpress.dataModels.{DataType, DataTable, DataRow}
import edu.chop.cbmi.dataExpress.dataWriters.{Updater, DataWriter}
/**
* Created by IntelliJ IDEA.
* User: masinoa
* Date: 12/16/11
* Time: 12:37 PM
* To change this template use File | Settings | File Templates.
*/
object SqlTableWriter{
val OVERWRITE_OPTION_DROP = 0
val OVERWRITE_OPTION_TRUNCATE = 1
val OVERWRITE_OPTION_APPEND = 2
}
case class SqlTableWriter(val backend : SqlBackend, val schema : Option[String] = None, val catalog : String = null)
extends DataWriter with Updater{
private def column_names(table_name : String) = {
val rs = backend.connection.getMetaData.getColumns(catalog, schema.getOrElse(null), table_name, null)
var names = scala.collection.mutable.HashMap.empty[Int,String]
if(rs.next){
do{
names += (rs.getInt(17)->rs.getString(4))
}while(rs.next)
names.toList.sortBy(_._1).map(_._2)
}else throw TableDoesNotExist(table_name)
}
/**
* @param row A DataRow whose column names match columns in the target table, not all columns are required
* @return SqlOperationsStatus contains status and primary key of newly inserted row
*/
override def insert_row[T](table_name:String, row: DataRow[T]) =
SqlOperationStatus(true, backend.insertReturningKeys(table_name, row, schema))
/**
* @param table A DataTable whose column names match columns in the target table, not all columns are required
* @return SqlOperationsStatus contains status and primary keys for each row
*/
override def insert_rows[T](table_name: String, table: DataTable[T]) = {
//TODO for logging it would help to know how many rows were inserted
val result = backend.batchInsert(table_name, table, schema)
SqlOperationStatus(true)
}
override def insert_rows[T](table_name: String, rows: Iterable[DataRow[T]]) = {
val result = backend.batchInsertRows(table_name, rows.iterator, column_names(table_name), schema)
if(result == -1)SqlOperationStatus(false) else SqlOperationStatus(true)
}
/**
* @param f A function that takes a column_name : String and returns the value for that column
*/
override def insert_row[T](table_name:String, f : (String)=>Option[T]) =
SqlOperationStatus(true, backend.insertReturningKeys(table_name, DataRow(apply_f_for_cols(table_name,f): _*), schema))
override def update_row[T](table_name : String, updated_row : DataRow[T], filter : (String,_)*) =
SqlOperationStatus(backend.updateRow(table_name, updated_row, filter.toList, schema))
def update_row[T](table_name : String, filter : (String,_)*)(f:(String)=>Option[T]) =
SqlOperationStatus(backend.updateRow(table_name, DataRow(apply_f_for_cols(table_name, f):_*), filter.toList, schema))
private def apply_f_for_cols[T](table_name:String, f:(String)=>T) = {
val new_row = ListBuffer.empty[(String,T)]
column_names(table_name).foreach((name:String)=>{
(f(name) : @unchecked) match{
case Some(t) => new_row += name->t.asInstanceOf[T]
case None => {} //row was filtered
}
})
new_row
}
override def insert_table[T,G<:DataType](table_name: String, data_types: Seq[G] = Seq.empty[DataType], table: DataTable[T],
overwrite_option: Int = SqlTableWriter.OVERWRITE_OPTION_APPEND) = {
if(data_types.isEmpty)throw new Exception("data_types list is empty in insert table")
else {
overwrite_option match {
case SqlTableWriter.OVERWRITE_OPTION_DROP =>
backend.createTable(table_name, table.column_names.toList, data_types.toList, schema)
case SqlTableWriter.OVERWRITE_OPTION_TRUNCATE => backend.truncateTable(table_name)
case SqlTableWriter.OVERWRITE_OPTION_APPEND =>{} //nothing to do hear
case _ => throw new Exception("Unsupported option : " + overwrite_option)
}
if(table != DataTable.empty) insert_rows(table_name, table)
else SqlOperationStatus(true)
}
}
}
|
chop-dbhi/dataexpress
|
src/main/scala/edu/chop/cbmi/dataExpress/dataWriters/sql/SqlTableWriter.scala
|
Scala
|
bsd-2-clause
| 5,553
|
package net.stsmedia.akka.http.models
import reactivemongo.bson.{BSONDocument, BSONDocumentReader, BSONDocumentWriter, BSONObjectID}
import spray.json._
/**
* Created by sschmidt on 14/11/14.
*/
case class User(id: BSONObjectID, name: String)
object User extends DefaultJsonProtocol {
implicit object UserReader extends BSONDocumentReader[User] {
def read(doc: BSONDocument): User = {
val id = doc.getAs[BSONObjectID]("_id").get
val name = doc.getAs[String]("name").get
User(id, name)
}
}
implicit object UserWriter extends BSONDocumentWriter[User] {
override def write(u: User): BSONDocument = BSONDocument {
"name" -> u.name
}
}
implicit object UserJsonFormat extends RootJsonFormat[User] {
def write(u: User) = JsObject("name" -> JsString(u.name))
def read(value: JsValue) = value match {
case JsArray(Vector(JsString(name))) =>
new User(null, name)
case _ => deserializationError("User expected")
}
}
}
|
stsmedia/akka-http-server
|
src/main/scala/net/stsmedia/akka/http/models/User.scala
|
Scala
|
mit
| 999
|
package io.udash.demos.jquery.views.functions
import io.udash.demos.jquery.views.FunctionView
import io.udash.wrappers.jquery._
import scalatags.JsDom.tags2
/** Based on examples from: <a href="http://api.jquery.com/add/">jQuery Docs</a>. */
object AddView extends FunctionView {
import scalatags.JsDom.all._
override protected val content = div(cls := "addview")(
h3(".add() & .css()"),
tags2.style(
""".addview div {
| width: 60px;
| height: 60px;
| margin: 10px;
| float: left;
|}
|.addview p {
| clear: left;
| font-weight: bold;
| font-size: 16px;
| color: blue;
| margin: 0 10px;
| padding: 2px;
|}""".stripMargin
),
div(),
div(),
div(),
div(),
div(),
div(),
p("Added this... (notice no border)")
).render
override protected def script = () => {
jQ("div", content).css("border", "2px solid red")
.add("p", content)
.css("background", "yellow")
}
}
|
UdashFramework/scala-js-jquery
|
example/src/main/scala/io/udash/demos/jquery/views/functions/AddView.scala
|
Scala
|
apache-2.0
| 1,044
|
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.words
import org.scalatest.matchers._
import org.scalatest.enablers._
import org.scalautils._
import org.scalatest.FailureMessages
import org.scalatest.UnquotedString
import org.scalatest.Resources
import scala.collection.GenTraversable
import scala.collection.GenSeq
import org.scalatest.Matchers.newTestFailedException
import org.scalatest.Helper.accessProperty
/**
* This class is part of the ScalaTest matchers DSL. Please see the documentation for <a href="../Matchers.html"><code>Matchers</code></a> for an overview of
* the matchers DSL.
*
* @author Bill Venners
*/
final class HaveWord {
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* book should have length (9)
* ^
* </pre>
*/
def length(expectedLength: Long): MatcherFactory1[Any, Length] =
new MatcherFactory1[Any, Length] {
def matcher[T <: Any : Length]: Matcher[T] = {
val length = implicitly[Length[T]]
new Matcher[T] {
def apply(left: T): MatchResult = {
val lengthOfLeft = length.extentOf(left)
MatchResult(
lengthOfLeft == expectedLength,
// FailureMessages("hadLengthInsteadOfExpectedLength", left, lengthOfLeft, expectedLength),
FailureMessages("didNotHaveExpectedLength", left, expectedLength),
FailureMessages("hadExpectedLength", left, expectedLength)
)
}
}
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* book should have size (9)
* ^
* </pre>
*
* <p>
* Currently, this method will produce a <code>Matcher[AnyRef]</code>, and if the
* <code>AnyRef</code> passed to that matcher's <code>apply</code> method does not have the appropriate <code>size</code> property
* structure, all will compile but a <code>TestFailedException</code> will result at runtime explaining the problem.
* In a future ScalaTest release, this may be tightened so that all is statically checked at compile time.
* </p>
*/
def size(expectedSize: Long): MatcherFactory1[Any, Size] =
new MatcherFactory1[Any, Size] {
def matcher[T <: Any : Size]: Matcher[T] = {
val length = implicitly[Size[T]]
new Matcher[T] {
def apply(left: T): MatchResult = {
val lengthOfLeft = length.extentOf(left)
MatchResult(
lengthOfLeft == expectedSize,
// FailureMessages("hadSizeInsteadOfExpectedSize", left, lengthOfLeft, expectedSize),
FailureMessages("didNotHaveExpectedSize", left, expectedSize),
FailureMessages("hadExpectedSize", left, expectedSize)
)
}
}
}
}
/*
new Matcher[AnyRef] {
def apply(left: AnyRef): MatchResult =
left match {
case leftArray: Array[_] =>
MatchResult(
leftArray.length == expectedSize,
FailureMessages("didNotHaveExpectedSize", left, expectedSize),
FailureMessages("hadExpectedSize", left, expectedSize)
)
case leftTrav: GenTraversable[_] =>
MatchResult(
leftTrav.size == expectedSize,
FailureMessages("didNotHaveExpectedSize", left, expectedSize),
FailureMessages("hadExpectedSize", left, expectedSize)
)
case leftJavaList: java.util.List[_] =>
MatchResult(
leftJavaList.size == expectedSize,
FailureMessages("didNotHaveExpectedSize", left, expectedSize),
FailureMessages("hadExpectedSize", left, expectedSize)
)
case _ =>
accessProperty(left, 'size, false) match {
case None =>
throw newTestFailedException(Resources("noSizeStructure", expectedSize.toString))
case Some(result) =>
MatchResult(
result == expectedSize,
FailureMessages("didNotHaveExpectedSize", left, expectedSize),
FailureMessages("hadExpectedSize", left, expectedSize)
)
}
}
}
*/
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* book should have (title ("A Tale of Two Cities"))
* ^
* </pre>
*/
def apply[T](firstPropertyMatcher: HavePropertyMatcher[T, _], propertyMatchers: HavePropertyMatcher[T, _]*): Matcher[T] =
new Matcher[T] {
def apply(left: T): MatchResult = {
val results =
for (propertyVerifier <- firstPropertyMatcher :: propertyMatchers.toList) yield
propertyVerifier(left)
val firstFailureOption = results.find(pv => !pv.matches)
val justOneProperty = propertyMatchers.length == 0
firstFailureOption match {
case Some(firstFailure) =>
val failedVerification = firstFailure
val failureMessage =
FailureMessages(
"propertyDidNotHaveExpectedValue",
UnquotedString(failedVerification.propertyName),
failedVerification.expectedValue,
failedVerification.actualValue,
left
)
val midSentenceFailureMessage =
FailureMessages(
"midSentencePropertyDidNotHaveExpectedValue",
UnquotedString(failedVerification.propertyName),
failedVerification.expectedValue,
failedVerification.actualValue,
left
)
MatchResult(false, failureMessage, failureMessage, midSentenceFailureMessage, midSentenceFailureMessage)
case None =>
val failureMessage =
if (justOneProperty) {
val firstPropertyResult = results.head // know this will succeed, because firstPropertyMatcher was required
FailureMessages(
"propertyHadExpectedValue",
UnquotedString(firstPropertyResult.propertyName),
firstPropertyResult.expectedValue,
left
)
}
else FailureMessages("allPropertiesHadExpectedValues", left)
val midSentenceFailureMessage =
if (justOneProperty) {
val firstPropertyResult = results.head // know this will succeed, because firstPropertyMatcher was required
FailureMessages(
"midSentencePropertyHadExpectedValue",
UnquotedString(firstPropertyResult.propertyName),
firstPropertyResult.expectedValue,
left
)
}
else FailureMessages("midSentenceAllPropertiesHadExpectedValues", left)
MatchResult(true, failureMessage, failureMessage, midSentenceFailureMessage, midSentenceFailureMessage)
}
}
}
}
|
svn2github/scalatest
|
src/main/scala/org/scalatest/words/HaveWord.scala
|
Scala
|
apache-2.0
| 7,653
|
package com.seanshubin.scala.training.sample.data
import com.seanshubin.scala.training.core.Item
trait ItemFormatter {
def format(item: Item): String
}
|
SeanShubin/scala-training
|
sample-data/src/main/scala/com/seanshubin/scala/training/sample/data/ItemFormatter.scala
|
Scala
|
unlicense
| 156
|
import org.eclipse.jetty.server.Server
import org.eclipse.jetty.servlet.{ DefaultServlet, ServletContextHandler }
import org.eclipse.jetty.webapp.WebAppContext
import org.scalatra.servlet.ScalatraListener
object JettyLauncher {
def main(args: Array[String]) {
val port = if(System.getenv("PORT") != null) System.getenv("PORT").toInt else 8080
val server = new Server(port)
val context = new WebAppContext()
context.setContextPath("/")
context.setResourceBase("src/main/webapp")
context.addServlet(classOf[edu.depauw.sjss.SJSS], "/*")
context.setEventListeners(Array(new ScalatraListener))
server.setHandler(context)
server.start
server.join
}
}
|
DePauwREU2013/sjs-server
|
src/main/scala/JettyLauncher.scala
|
Scala
|
mit
| 694
|
package jp.opap.material.data
import java.time.LocalDateTime
import java.time.format.DateTimeFormatter
import com.fasterxml.jackson.core.JsonGenerator
import com.fasterxml.jackson.databind.SerializerProvider
import com.fasterxml.jackson.databind.module.SimpleModule
import com.fasterxml.jackson.databind.ser.std.StdSerializer
object JsonSerializers {
object AppSerializerModule extends SimpleModule {
this.addSerializer(classOf[Seq[_]], new SeqSerializer())
this.addSerializer(classOf[Map[_, _]], new MapSerializer())
this.addSerializer(classOf[LocalDateTime], new LocalDateTimeSerializer())
this.addSerializer(classOf[Option[_]], new OptionSerializer())
// this.addDeserializer(classOf[Option[_]], OptionDeserializer)
}
class SeqSerializer() extends StdSerializer[Seq[_]](classOf[Seq[_]]) {
override def serialize(value: Seq[_], gen: JsonGenerator, provider: SerializerProvider): Unit = {
gen.writeStartArray()
value.foreach(gen.writeObject)
gen.writeEndArray()
}
}
class MapSerializer() extends StdSerializer[Map[_, _]](classOf[Map[_, _]]) {
override def serialize(value: Map[_, _], gen: JsonGenerator, provider: SerializerProvider): Unit = {
gen.writeStartObject()
value.foreach(entry => gen.writeObjectField(entry._1.toString, entry._2))
gen.writeEndObject()
}
}
class LocalDateTimeSerializer() extends StdSerializer[LocalDateTime](classOf[LocalDateTime]) {
override def serialize(value: LocalDateTime, gen: JsonGenerator, provider: SerializerProvider): Unit = {
gen.writeString(value.format(DateTimeFormatter.ISO_DATE_TIME))
}
}
class OptionSerializer() extends StdSerializer[Option[_]](classOf[Option[_]]) {
override def serialize(value: Option[_], gen: JsonGenerator, provider: SerializerProvider): Unit = {
if (value.isDefined)
gen.writeString(value.get.toString)
else
gen.writeNull()
}
}
// object OptionDeserializer extends StdDeserializer[Option[_]](classOf[Option[_]]) {
// override def deserialize(p: JsonParser, ctxt: DeserializationContext): Option[_] = ???
// }
}
|
opap-jp/material-explorer
|
rest/src/main/scala/jp/opap/material/data/JsonSerializers.scala
|
Scala
|
mit
| 2,135
|
/* *\\
** Squants **
** **
** Scala Quantities and Units of Measure Library and DSL **
** (c) 2013-2015, Gary Keorkunian **
** **
\\* */
package squants.energy
import squants.electro.{Coulombs, Volts}
import squants.mass.{Kilograms, Moles}
import squants.motion.{MetersPerSecond, NewtonSeconds, Newtons}
import squants.space.{CubicMeters, Meters}
import squants.thermal.{JoulesPerKelvin, Kelvin}
import squants.time.Hours
import squants.{MetricSystem, QuantityParseException}
import squants.radio.{WattsPerSquareMeter, BecquerelsPerSquareMeterSecond}
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
/**
* @author garyKeorkunian
* @since 0.1
*
*/
class EnergySpec extends AnyFlatSpec with Matchers {
behavior of "Energy and its Units of Measure"
it should "create values using UOM factories" in {
WattHours(1).toWattHours should be(1)
MilliwattHours(1).toMilliwattHours should be(1)
KilowattHours(1).toKilowattHours should be(1)
MegawattHours(1).toMegawattHours should be(1)
GigawattHours(1).toGigawattHours should be(1)
Joules(1).toJoules should be(1)
Picojoules(1).toPicojoules should be(1)
Nanojoules(1).toNanojoules should be(1)
Microjoules(1).toMicrojoules should be(1)
Millijoules(1).toMillijoules should be(1)
Kilojoules(1).toKilojoules should be(1)
Megajoules(1).toMegajoules should be(1)
Gigajoules(1).toGigajoules should be(1)
Terajoules(1).toTerajoules should be(1)
BritishThermalUnits(1).toBtus should be(1)
MBtus(1).toMBtus should be(1)
MMBtus(1).toMMBtus should be(1)
Ergs(1).toErgs should be(1)
ElectronVolt(1).toeV should be(1)
MilliElectronVolt(1).tomeV should be(1)
KiloElectronVolt(1).tokeV should be(1)
MegaElectronVolt(1).toMeV should be(1)
GigaElectronVolt(1).toGeV should be(1)
TeraElectronVolt(1).toTeV should be(1)
PetaElectronVolt(1).toPeV should be(1)
ExaElectronVolt(1).toEeV should be(1)
}
it should "create values from properly formatted Strings" in {
Energy("10.22 J").get should be(Joules(10.22))
Energy("10.22 Wh").get should be(WattHours(10.22))
Energy("10.22 mWh").get should be(MilliwattHours(10.22))
Energy("10.22 kWh").get should be(KilowattHours(10.22))
Energy("10.22 MWh").get should be(MegawattHours(10.22))
Energy("10.22 GWh").get should be(GigawattHours(10.22))
Energy("10.22 Btu").get should be(BritishThermalUnits(10.22))
Energy("10.22 MBtu").get should be(MBtus(10.22))
Energy("10.22 MMBtu").get should be(MMBtus(10.22))
Energy("10.22 erg").get should be(Ergs(10.22))
Energy("10.22 eV").get should be(ElectronVolt(10.22))
Energy("10.22 meV").get should be(MilliElectronVolt(10.22))
Energy("10.22 keV").get should be(KiloElectronVolt(10.22))
Energy("10.22 MeV").get should be(MegaElectronVolt(10.22))
Energy("10.22 GeV").get should be(GigaElectronVolt(10.22))
Energy("10.22 TeV").get should be(TeraElectronVolt(10.22))
Energy("10.22 PeV").get should be(PetaElectronVolt(10.22))
Energy("10.22 EeV").get should be(ExaElectronVolt(10.22))
Energy("10.22 zz").failed.get should be(QuantityParseException("Unable to parse Energy", "10.22 zz"))
Energy("ZZ J").failed.get should be(QuantityParseException("Unable to parse Energy", "ZZ J"))
}
it should "properly convert to all supported Units of Measure" in {
val x = WattHours(1)
x.toWattHours should be(1)
x.toMilliwattHours should be(1 / MetricSystem.Milli)
x.toKilowattHours should be(1 / MetricSystem.Kilo)
x.toMegawattHours should be(1 / MetricSystem.Mega)
x.toGigawattHours should be(1 / MetricSystem.Giga)
x.toJoules should be(1 / Joules.conversionFactor)
x.toPicojoules should be(1 / Picojoules.conversionFactor)
x.toNanojoules should be(1 / Nanojoules.conversionFactor)
x.toMicrojoules should be(1 / Microjoules.conversionFactor)
x.toMillijoules should be(1 / Millijoules.conversionFactor)
x.toKilojoules should be(1 / Kilojoules.conversionFactor)
x.toMegajoules should be(1 / Megajoules.conversionFactor)
x.toGigajoules should be(1 / Gigajoules.conversionFactor)
x.toBtus should be(1 / BritishThermalUnits.conversionFactor)
x.toMBtus should be(1 / MBtus.conversionFactor)
x.toMMBtus should be(1 / MMBtus.conversionFactor)
x.toErgs should be(1 / Ergs.conversionFactor)
x.toeV should be(1 / ElectronVolt.conversionFactor)
x.tomeV should be(1 / MilliElectronVolt.conversionFactor)
x.tokeV should be(1 / KiloElectronVolt.conversionFactor)
x.toMeV should be(1 / MegaElectronVolt.conversionFactor)
x.toGeV should be(1 / GigaElectronVolt.conversionFactor)
x.toTeV should be(1 / TeraElectronVolt.conversionFactor)
x.toPeV should be(1 / PetaElectronVolt.conversionFactor)
x.toEeV should be(1 / ExaElectronVolt.conversionFactor)
ElectronVolt(1).toJoules should be(1.602176565e-19)
}
it should "return properly formatted strings for all supported Units of Measure" in {
WattHours(1).toString(WattHours) should be("1.0 Wh")
MilliwattHours(1).toString(MilliwattHours) should be("1.0 mWh")
KilowattHours(1).toString(KilowattHours) should be("1.0 kWh")
MegawattHours(1).toString(MegawattHours) should be("1.0 MWh")
GigawattHours(1).toString(GigawattHours) should be("1.0 GWh")
Joules(1).toString(Joules) should be("1.0 J")
Picojoules(1).toString(Picojoules) should be("1.0 pJ")
Nanojoules(1).toString(Nanojoules) should be("1.0 nJ")
Microjoules(1).toString(Microjoules) should be("1.0 µJ")
Millijoules(1).toString(Millijoules) should be("1.0 mJ")
Kilojoules(1).toString(Kilojoules) should be("1.0 kJ")
Megajoules(1).toString(Megajoules) should be("1.0 MJ")
Gigajoules(1).toString(Gigajoules) should be("1.0 GJ")
Terajoules(1).toString(Terajoules) should be("1.0 TJ")
BritishThermalUnits(1).toString(BritishThermalUnits) should be("1.0 Btu")
MBtus(1).toString(MBtus) should be("1.0 MBtu")
MMBtus(1).toString(MMBtus) should be("1.0 MMBtu")
Ergs(1).toString(Ergs) should be ("1.0 erg")
ElectronVolt(1).toString(ElectronVolt) should be("1.0 eV")
MilliElectronVolt(1).toString(MilliElectronVolt) should be("1.0 meV")
KiloElectronVolt(1).toString(KiloElectronVolt) should be("1.0 keV")
MegaElectronVolt(1).toString(MegaElectronVolt) should be("1.0 MeV")
GigaElectronVolt(1).toString(GigaElectronVolt) should be("1.0 GeV")
TeraElectronVolt(1).toString(TeraElectronVolt) should be("1.0 TeV")
PetaElectronVolt(1).toString(PetaElectronVolt) should be("1.0 PeV")
ExaElectronVolt(1).toString(ExaElectronVolt) should be("1.0 EeV")
}
it should "return Irradiance when multiplied by ParticleFlux" in {
WattHours(1) * BecquerelsPerSquareMeterSecond(1) should be(WattsPerSquareMeter(Hours(1).toSeconds))
}
it should "return Power when divided by Time" in {
WattHours(1) / Hours(1) should be(Watts(1))
}
it should "return Time when divided by Power" in {
WattHours(1) / Watts(1) should be(Hours(1))
}
it should "return ElectricalPotential when divided by ElectricalCharge" in {
Joules(1) / Coulombs(1) should be(Volts(1))
}
it should "return Force when divided by Length" in {
Joules(1) / Meters(1) should be(Newtons(1))
}
it should "return Mass when divided by SpecificEnergy" in {
Joules(1) / Grays(1) should be(Kilograms(1))
}
it should "return SpecificEnergy when divided by Mass" in {
Joules(1) / Kilograms(1) should be(Grays(1))
}
it should "return Volume when divided by EnergyDensity" in {
Joules(1) / JoulesPerCubicMeter(1) should be(CubicMeters(1))
}
it should "return EnergyDensity when divided by Volume" in {
Joules(1) / CubicMeters(1) should be(JoulesPerCubicMeter(1))
}
it should "return ThermalCapacity when divided by Temperature" in {
Joules(10) / JoulesPerKelvin(4) should be(Kelvin(2.5))
}
it should "return Temperature when divided by ThermalCapacity" in {
Joules(10) / Kelvin(2) should be(JoulesPerKelvin(5))
}
it should "return MolarEnergy when divided by ChemicalAmount" in {
Joules(10) / Moles(2) should be(JoulesPerMole(5))
}
behavior of "KineticEnergyCalculations"
it should "calculate Kinetic Energy from Mass and Velocity" in {
KineticEnergy(Kilograms(10), MetersPerSecond(5)) should be(Joules(125))
KineticEnergy(Kilograms(5), MetersPerSecond(10)) should be(Joules(250))
}
it should "calculate Kinetic Energy from Mass and Momentum" in {
KineticEnergy(Kilograms(10), NewtonSeconds(5)) should be(Joules(.25))
KineticEnergy(Kilograms(5), NewtonSeconds(10)) should be(Joules(1))
}
behavior of "EnergyConversions"
it should "provide aliases for single unit values" in {
import EnergyConversions._
wattHour should be(WattHours(1))
Wh should be(WattHours(1))
milliwattHour should be(MilliwattHours(1))
mWh should be(MilliwattHours(1))
kilowattHour should be(KilowattHours(1))
kWh should be(KilowattHours(1))
megawattHour should be(MegawattHours(1))
MWh should be(MegawattHours(1))
gigawattHour should be(GigawattHours(1))
GWh should be(GigawattHours(1))
joule should be(Joules(1))
picojoule should be(Picojoules(1))
nanojoule should be(Nanojoules(1))
microjoule should be(Microjoules(1))
millijoule should be(Millijoules(1))
kilojoule should be(Kilojoules(1))
megajoule should be(Megajoules(1))
gigajoule should be(Gigajoules(1))
terajoule should be(Terajoules(1))
btu should be(BritishThermalUnits(1))
btuMultiplier should be(0.2930710701722222)
eV should be(ElectronVolt(1))
meV should be(MilliElectronVolt(1))
keV should be(KiloElectronVolt(1))
MeV should be(MegaElectronVolt(1))
GeV should be(GigaElectronVolt(1))
TeV should be(TeraElectronVolt(1))
PeV should be(PetaElectronVolt(1))
EeV should be(ExaElectronVolt(1))
}
it should "provide implicit conversion from Double" in {
import EnergyConversions._
val d = 10D
d.Wh should be(WattHours(d))
d.mWh should be(MilliwattHours(d))
d.kWh should be(KilowattHours(d))
d.MWh should be(MegawattHours(d))
d.GWh should be(GigawattHours(d))
d.wattHours should be(WattHours(d))
d.kilowattHours should be(KilowattHours(d))
d.megawattHours should be(MegawattHours(d))
d.gigawattHours should be(GigawattHours(d))
d.J should be(Joules(d))
d.joules should be(Joules(d))
d.picojoules should be(Picojoules(d))
d.nanojoules should be(Nanojoules(d))
d.microjoules should be(Microjoules(d))
d.milljoules should be(Millijoules(d))
d.kilojoules should be(Kilojoules(d))
d.megajoules should be(Megajoules(d))
d.gigajoules should be(Gigajoules(d))
d.Btu should be(BritishThermalUnits(d))
d.MBtu should be(MBtus(d))
d.MMBtu should be(MMBtus(d))
d.ergs should be(Ergs(d))
d.eV should be(ElectronVolt(d))
d.meV should be(MilliElectronVolt(d))
d.keV should be(KiloElectronVolt(d))
d.MeV should be(MegaElectronVolt(d))
d.GeV should be(GigaElectronVolt(d))
d.TeV should be(TeraElectronVolt(d))
d.PeV should be(PetaElectronVolt(d))
d.EeV should be(ExaElectronVolt(d))
}
it should "provide implicit conversions from String" in {
import EnergyConversions._
"10.22 J".toEnergy.get should be(Joules(10.22))
"10.22 Wh".toEnergy.get should be(WattHours(10.22))
"10.22 mWh".toEnergy.get should be(MilliwattHours(10.22))
"10.22 kWh".toEnergy.get should be(KilowattHours(10.22))
"10.22 MWh".toEnergy.get should be(MegawattHours(10.22))
"10.22 GWh".toEnergy.get should be(GigawattHours(10.22))
"10.22 Btu".toEnergy.get should be(BritishThermalUnits(10.22))
"10.22 MBtu".toEnergy.get should be(MBtus(10.22))
"10.22 MMBtu".toEnergy.get should be(MMBtus(10.22))
"10.22 erg".toEnergy.get should be(Ergs(10.22))
"10.22 eV".toEnergy.get should be(ElectronVolt(10.22))
"10.22 meV".toEnergy.get should be(MilliElectronVolt(10.22))
"10.22 keV".toEnergy.get should be(KiloElectronVolt(10.22))
"10.22 MeV".toEnergy.get should be(MegaElectronVolt(10.22))
"10.22 GeV".toEnergy.get should be(GigaElectronVolt(10.22))
"10.22 TeV".toEnergy.get should be(TeraElectronVolt(10.22))
"10.22 PeV".toEnergy.get should be(PetaElectronVolt(10.22))
"10.22 EeV".toEnergy.get should be(ExaElectronVolt(10.22))
"10.22 zz".toEnergy.failed.get should be(QuantityParseException("Unable to parse Energy", "10.22 zz"))
"ZZ J".toEnergy.failed.get should be(QuantityParseException("Unable to parse Energy", "ZZ J"))
}
it should "provide Numeric support" in {
import EnergyConversions.EnergyNumeric
val es = List(WattHours(100), KilowattHours(1))
es.sum should be(KilowattHours(1.1))
}
}
|
typelevel/squants
|
shared/src/test/scala/squants/energy/EnergySpec.scala
|
Scala
|
apache-2.0
| 13,235
|
/**
* Copyright 2016 Rackspace US, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.rackspace.identity.components
import java.io.ByteArrayOutputStream
import java.io.File
import javax.xml.xpath.XPathExpression
import javax.xml.xpath.XPathConstants
import javax.xml.xpath.XPathException
import javax.xml.transform.Source
import javax.xml.transform.stream.StreamSource
import javax.xml.transform.dom.DOMSource
import com.rackspace.com.papi.components.checker.util.XMLParserPool._
import com.rackspace.com.papi.components.checker.util.ImmutableNamespaceContext
import com.rackspace.com.papi.components.checker.util.XPathExpressionPool._
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatest.FunSuite
import net.sf.saxon.s9api._
import net.sf.saxon.Configuration.LicenseFeature._
import com.fasterxml.jackson.databind.JsonNode
import com.fasterxml.jackson.databind.ObjectMapper
import org.scalatest.exceptions.TestFailedException
import org.w3c.dom.Document
@RunWith(classOf[JUnitRunner])
class ExtractExtensionSuite extends AttributeMapperBase with XPathAssertions {
val testDir = new File("src/test/resources/tests/extract-extn-tests")
val tests : List[File] = testDir.listFiles.toList.filter(f => {
f.toString.endsWith("xml")
})
val authXMLSample = new File("src/test/resources/samples/AuthenticateResponseSAML.xml")
val authJSONSample = new File("src/test/resources/samples/AuthenticateResponseSAML.json")
type ExtractTestXML = (File /* assertion */, String /* validation engine */) => Source /* Resulting extensions */
def runTestsXML(description : String, extractTest : ExtractTestXML) : Unit = {
tests.foreach( assertFile => {
val asserterExec = getAsserterExec(new StreamSource(assertFile))
validators.foreach (v => {
test (s"$description ($assertFile validated with $v)") {
var docBuilder : javax.xml.parsers.DocumentBuilder = null
try {
docBuilder = borrowParser
val outDoc = docBuilder.newDocument
val domDest = new DOMDestination(outDoc)
val newExt = extractTest(assertFile, v)
val asserter = AttributeMapper.getXsltTransformer(asserterExec)
asserter.setSource(newExt)
asserter.setDestination(domDest)
asserter.transform()
assert(outDoc)
} finally {
if (docBuilder != null) returnParser(docBuilder)
}
}
})
})
}
type ExtractTestJSON = (File /* assertion */, String /* validation engine */) => String /* Resulting JSON as string */
def runTestsJSON(description : String, extractTest : ExtractTestJSON) : Unit = {
tests.foreach( assertFile => {
val asserterExec = getAsserterJsonExec(new StreamSource(assertFile))
validators.foreach (v => {
test (s"$description ($assertFile validated with $v)") {
var docBuilder : javax.xml.parsers.DocumentBuilder = null
try {
docBuilder = borrowParser
val outDoc = docBuilder.newDocument
val domDest = new DOMDestination(outDoc)
val newExt = extractTest(assertFile, v)
val asserter = AttributeMapper.getXQueryEvaluator(asserterExec, Map[QName,XdmValue](new QName("__JSON__") ->
new XdmAtomicValue(newExt)))
asserter.setDestination(domDest)
asserter.run()
assert(outDoc)
} finally {
if (docBuilder != null) returnParser(docBuilder)
}
}
})
})
}
//
// Register namespaces for xpath asserts
//
register("ks","http://docs.openstack.org/identity/api/v2.0")
register("rax-auth","http://docs.rackspace.com/identity/api/ext/RAX-AUTH/v1.0")
runTestsXML("XML Extended Attributes", (assertFile : File, v : String) => {
println (s"Getting extended attributes in XML from $assertFile") // scalastyle:ignore
val dest = new XdmDestination
AttributeMapper.extractExtendedAttributes (new StreamSource(assertFile), dest, false,
true, v)
dest.getXdmNode.asSource
})
//
// Some simple spot assertions to make sure we didn't muck with the
// auth response after we modified it.
//
def accessXMLAssertions(doc : Document) : Unit = {
assert(doc, "/ks:access")
// spot check token
assert(doc, "/ks:access/ks:token[@id='aaaaa-bbbbb-ccccc-dddd']")
assert(doc, "/ks:access/ks:token/ks:tenant[@id='12345']")
assert(doc, "/ks:access/ks:token/rax-auth:authenticatedBy/rax-auth:credential = 'FEDERATED'")
// spot check user
assert(doc, "/ks:access/ks:user[@id='161418']")
assert(doc, "/ks:access/ks:user[@id='161418']/ks:roles/ks:role[1]/@id = '3'")
assert(doc, "/ks:access/ks:user[@id='161418']/ks:roles/ks:role[1]/@name = 'identity:default'")
assert(doc, "/ks:access/ks:user[@id='161418']/ks:roles/ks:role[2]/@id = '208'")
assert(doc, "/ks:access/ks:user[@id='161418']/ks:roles/ks:role[2]/@name = 'nova:admin'")
// spot check catalog
assert(doc, """/ks:access/ks:serviceCatalog/ks:service[@type='rax:database']/ks:endpoint[@region='DFW']/@publicURL
= 'https://dfw.databases.api.rackspacecloud.com/v1.0/12345' """)
assert(doc, """/ks:access/ks:serviceCatalog/ks:service[@type='rax:monitor']/ks:endpoint/@publicURL
= 'https://monitoring.api.rackspacecloud.com/v1.0/12345' """)
assert(doc, """/ks:access/ks:serviceCatalog/ks:service[@type='compute' and @name='cloudServers']/ks:endpoint/@publicURL
= 'https://servers.api.rackspacecloud.com/v1.0/12345' """)
assert(doc, """/ks:access/ks:serviceCatalog/ks:service[@type='compute' and @name='cloudServersOpenStack']/ks:endpoint[@region='DFW']/@publicURL
= 'https://dfw.servers.api.rackspacecloud.com/v2/12345' """)
}
//
// Some simple spot assertions to make sure we didn't muck with the
// auth response after we modified it.
//
def accessJSONAssertions(node : JsonNode) : Unit = {
assert (node, "exists($_?access)")
//
// spot check token
//
assert (node, "$_?access?token?id = 'aaaaa-bbbbb-ccccc-dddd'")
assert (node, "$_?access?token?tenant?id = '12345'")
assert (node, "$_?access?token?('RAX-AUTH:authenticatedBy')?*[1] = 'FEDERATED'")
// spot check user
assert (node, "$_?access?user?id = '161418'")
assert (node, "$_?access?user?roles?1?id = '3'")
assert (node, "$_?access?user?roles?1?name = 'identity:default'")
assert (node, "$_?access?user?roles?2?id = '208'")
assert (node, "$_?access?user?roles?2?name = 'nova:admin'")
// spot check catalog
assert (node, """$_?access?serviceCatalog?*[?type='rax:database']?endpoints?*[?region='DFW']?publicURL =
'https://dfw.databases.api.rackspacecloud.com/v1.0/12345'""")
assert (node, """$_?access?serviceCatalog?*[?type='rax:monitor']?endpoints?1?publicURL =
'https://monitoring.api.rackspacecloud.com/v1.0/12345'""")
assert (node, """$_?access?serviceCatalog?*[?type='compute' and ?name='cloudServers']?endpoints?1?publicURL =
'https://servers.api.rackspacecloud.com/v1.0/12345'""")
assert (node, """$_?access?serviceCatalog?*[?type='compute' and ?name='cloudServersOpenStack']?endpoints?*[?region='DFW']?publicURL =
'https://dfw.servers.api.rackspacecloud.com/v2/12345'""")
}
def shouldBeEmptyExtensions(assert : Source) : Boolean = {
val nsContext = ImmutableNamespaceContext(Map[String,String]())
val xpathString = "//processing-instruction()[name() ='noExt']"
val XPATH_VERSION = 31
var exp : XPathExpression = null
try {
exp = borrowExpression(xpathString,nsContext, XPATH_VERSION)
exp.evaluate (assert, XPathConstants.BOOLEAN).asInstanceOf[Boolean]
} catch {
case xpe : XPathException => throw new TestFailedException (s"Error in XPath $xpathString", xpe, 4) // scalastyle:ignore
case tf : TestFailedException => throw tf
case unknown : Throwable => throw new TestFailedException(s"Unknown error in XPath $xpathString", 4) // scalastyle:ignore
} finally {
if (exp != null) returnExpression (xpathString, nsContext, XPATH_VERSION, exp)
}
}
//
// Validates an auth result with a possible auth extension and
// returns the extension (if one exists) or the original if one
// doesn't.
//
def validateAuthExtensions (authExt : Document, assertion : Source) : Source = {
var docBuilder : javax.xml.parsers.DocumentBuilder = null
var outDoc2 : Document = null
try {
docBuilder = borrowParser
outDoc2 = docBuilder.newDocument
} finally {
if (docBuilder != null) returnParser(docBuilder)
}
//
// Asserts on destination
//
accessXMLAssertions(authExt)
//
// Empty assert
//
if (shouldBeEmptyExtensions(assertion)) {
assert(authExt,"empty(/ks:access/rax-auth:extendedAttributes)")
} else {
assert(authExt,"not(empty(/ks:access/rax-auth:extendedAttributes))")
assert(authExt,"count(/ks:access/rax-auth:extendedAttributes) = 1")
}
//
// Extract and return extensions
//
val extnNode = authExt.getElementsByTagNameNS("http://docs.rackspace.com/identity/api/ext/RAX-AUTH/v1.0",
"extendedAttributes").item(0)
if (extnNode != null) {
val newExtn = outDoc2.importNode (extnNode, true)
outDoc2.appendChild(newExtn)
new DOMSource(outDoc2)
} else {
new DOMSource(authExt)
}
}
//
// Validates an auth result with a possible auth extension and
// returns the extension (if one exists) or the original if one
// doesn't.
//
def validateAuthExtensions (authExt : JsonNode, assertion : Source) : String = {
val om = new ObjectMapper
//
// Asserts on destination
//
accessJSONAssertions(authExt)
//
// Empty assert
//
if (shouldBeEmptyExtensions(assertion)) {
assert(authExt,"empty($_?access?('RAX-AUTH:extendedAttributes'))")
} else {
assert(authExt,"exists($_?access?('RAX-AUTH:extendedAttributes'))")
}
om.writeValueAsString(authExt.get("access"))
}
runTestsXML("XML Extended Attributes -- built into request (combine call)", (assertFile : File, v : String) => {
println (s"Adding extended attributes in XML from $assertFile") // scalastyle:ignore
var docBuilder : javax.xml.parsers.DocumentBuilder = null
try {
docBuilder = borrowParser
val outDoc = docBuilder.newDocument
val accessDest = new DOMDestination(outDoc)
AttributeMapper.addExtendedAttributes (new StreamSource(authXMLSample), new StreamSource(assertFile),
accessDest, false, true, v)
validateAuthExtensions (outDoc, new StreamSource(assertFile))
} finally {
if (docBuilder != null) returnParser(docBuilder)
}
})
runTestsXML("XML Extended Attributes -- built into request (doc assert, doc result)", (assertFile : File, v : String) => {
println (s"Adding extended attributes in XML from $assertFile") // scalastyle:ignore
var docBuilder : javax.xml.parsers.DocumentBuilder = null
try {
docBuilder = borrowParser
val assert = docBuilder.parse(assertFile)
val outDoc = AttributeMapper.addExtendedAttributes (new StreamSource(authXMLSample), assert,
true, v)
validateAuthExtensions (outDoc, new DOMSource(assert))
} finally {
if (docBuilder != null) returnParser(docBuilder)
}
})
runTestsXML("XML Extended Attributes -- built into request (doc auth, doc assert, doc result)", (assertFile : File, v : String) => {
println (s"Adding extended attributes in XML from $assertFile") // scalastyle:ignore
var docBuilder : javax.xml.parsers.DocumentBuilder = null
try {
docBuilder = borrowParser
val assert = docBuilder.parse(assertFile)
val authResp = docBuilder.parse(authXMLSample)
val outDoc = AttributeMapper.addExtendedAttributes (authResp, assert, true, v)
validateAuthExtensions (outDoc, new DOMSource(assert))
} finally {
if (docBuilder != null) returnParser(docBuilder)
}
})
runTestsXML("XML Extended Attributes -- built into request (xml source call)", (assertFile : File, v : String) => {
println (s"Adding extended attributes in XML from $assertFile") // scalastyle:ignore
var docBuilder : javax.xml.parsers.DocumentBuilder = null
try {
docBuilder = borrowParser
val outDoc = docBuilder.newDocument
val accessDest = new DOMDestination(outDoc)
AttributeMapper.addExtendedAttributes (new StreamSource(authXMLSample), new StreamSource(assertFile),
accessDest, true, v)
validateAuthExtensions (outDoc, new StreamSource(assertFile))
} finally {
if (docBuilder != null) returnParser(docBuilder)
}
})
runTestsJSON("JSON Extended Attributes", (assertFile : File, v : String) => {
println (s"Getting extended attributes in JSON from $assertFile") // scalastyle:ignore
val bout = new ByteArrayOutputStream
val dest = AttributeMapper.processor.newSerializer(bout)
AttributeMapper.extractExtendedAttributes(new StreamSource(assertFile), dest, true,
true, v)
bout.toString("UTF-8")
})
runTestsJSON("JSON Extended Attributes (JsonNode)", (assertFile : File, v : String) => {
println (s"Getting extended attributes in JSON from $assertFile") // scalastyle:ignore
val om = new ObjectMapper
val node = AttributeMapper.extractExtendedAttributes(new StreamSource(assertFile), true, v)
om.writeValueAsString(node)
})
runTestsJSON("JSON Extended Attributes -- built into request (combine call)", (assertFile : File, v : String) => {
println (s"Getting extended attributes in JSON from $assertFile") // scalastyle:ignore
val om = new ObjectMapper
val bout = new ByteArrayOutputStream
val dest = AttributeMapper.processor.newSerializer(bout)
AttributeMapper.addExtendedAttributes(new StreamSource (authJSONSample), new StreamSource(assertFile), dest,
true, true, v)
val node = om.readTree (bout.toString("UTF-8"))
validateAuthExtensions(node, new StreamSource(assertFile))
})
runTestsJSON("JSON Extended Attributes -- built into request (authResp as JsonNode, return JsonNode)", (assertFile : File, v : String) => {
println (s"Getting extended attributes in JSON from $assertFile") // scalastyle:ignore
val om = new ObjectMapper
val node = AttributeMapper.addExtendedAttributes(om.readTree (authJSONSample), new StreamSource(assertFile),
true, v)
validateAuthExtensions(node, new StreamSource(assertFile))
})
runTestsJSON("JSON Extended Attributes -- built into request (authResp as streamSource, return JsonNode)", (assertFile : File, v : String) => {
println (s"Getting extended attributes in JSON from $assertFile") // scalastyle:ignore
val node = AttributeMapper.addExtendedAttributes(new StreamSource (authJSONSample), new StreamSource(assertFile),
true, v)
validateAuthExtensions(node, new StreamSource(assertFile))
})
runTestsJSON("JSON Extended Attributes -- built into request (authResp as JsonNode, assert as Doc, return JsonNode)",
(assertFile : File, v : String) => {
println (s"Getting extended attributes in JSON from $assertFile") // scalastyle:ignore
var docBuilder : javax.xml.parsers.DocumentBuilder = null
try {
docBuilder = borrowParser
val om = new ObjectMapper
val assert = docBuilder.parse(assertFile)
val node = AttributeMapper.addExtendedAttributes(om.readTree (authJSONSample), assert, true, v)
validateAuthExtensions(node, new StreamSource(assertFile))
} finally {
if (docBuilder != null) returnParser(docBuilder)
}
})
}
|
RackerWilliams/attributeMapping
|
core/src/test/scala/com/rackspace/identity/components/ExtractExtensionSuite.scala
|
Scala
|
apache-2.0
| 16,903
|
package com.twitter.finatra.http.tests.integration.doeverything.main.services
import com.google.inject.assistedinject.Assisted
import com.twitter.inject.annotations.Flag
import javax.inject.{Inject, Named}
import com.twitter.util.Duration
class ComplexService @Inject() (
exampleService: DoEverythingService,
defaultString: String,
@Named("str1") string1: String,
@Named("str2") string2: String,
defaultInt: Int,
@Flag("moduleDuration") duration1: Duration,
@Assisted name: String) {
assert(defaultString == "" || defaultString == "default string")
assert(string1 != null)
assert(string2 != null)
assert(defaultInt == 0 || defaultInt == 11)
def execute: String = exampleService.doit + " " + name + " " + duration1.inMillis
}
|
twitter/finatra
|
http-server/src/test/scala/com/twitter/finatra/http/tests/integration/doeverything/main/services/ComplexService.scala
|
Scala
|
apache-2.0
| 753
|
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.xdata.analytics.spark.micropathing
import java.lang.{Double => JavaDouble}
import java.awt.geom.Point2D
import java.util.Properties
import java.util.ArrayList;
import scala.annotation.serializable
import scala.collection.mutable.ListBuffer
import org.joda.time.DateTime
import com.oculusinfo.binning.TileData
import com.oculusinfo.binning.TileIndex
import com.oculusinfo.binning.impl.WebMercatorTilePyramid
import com.oculusinfo.tilegen.tiling.StandardDoubleBinDescriptor
import com.oculusinfo.tilegen.tiling.StandardDoubleArrayBinDescriptor
import com.oculusinfo.tilegen.tiling.HBaseTileIO
import com.oculusinfo.tilegen.tiling.LocalTileIO
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
/**
* Executes all stages of micro pathing analytic.
*
* Extend this trait and override the readInput method (or use an available class)
*
*/
trait MicroPathEngine {
var inputPath = ""
var outputPath = ""
var idCol = -1
var latCol = -1
var lonCol = -1
var dateTimeCol = -1
var dateCol = -1
var timeCol = -1
var columnSeperator = ""
def run(config:Properties) = {
checkConf(config)
readStandardConfProperties(config)
val sc = setup(config)
val triplineConf = getMicroPathConf(config)
val data = readInput(sc,config,triplineConf)
val paths = processPaths(sc,config,triplineConf,data)
execute(sc,paths,config,triplineConf)
}
/**
* enforce required conf file properties
*/
def checkConf(conf:Properties){
}
def readStandardConfProperties(conf:Properties) = {
inputPath = conf.getProperty("input.path")
outputPath = conf.getProperty("output.path")
idCol = conf.getProperty("col.id","0").toInt
latCol = conf.getProperty("col.lat","1").toInt
lonCol = conf.getProperty("col.lon","2").toInt
dateTimeCol = conf.getProperty("col.datetime","-1").toInt
dateCol = conf.getProperty("col.date","-1").toInt
timeCol = conf.getProperty("col.time","-1").toInt
columnSeperator = conf.getProperty("col.seperator",0x1.toChar.toString) // default hive field terminator
}
/**
* Create the trip line analytic configuration from the given properties file
*/
def getMicroPathConf(conf:Properties) = {
val timeFilter = conf.getProperty("time.filter",Int.MaxValue.toString).toInt
val distanceFilter = conf.getProperty("distance.filter",Int.MaxValue.toString).toInt
val lowerLat = conf.getProperty("lower.lat","-90.0").toDouble
val lowerLon = conf.getProperty("lower.lon","-180.0").toDouble
val upperLat = conf.getProperty("upper.lat","90.0").toDouble
val upperLon = conf.getProperty("upper.lon","179.999999").toDouble
val regionWidth = conf.getProperty("tripline.region.width","100000.0").toDouble
val regionHeight = conf.getProperty("tripline.region.height","100000.0").toDouble
val velocityFilter = conf.getProperty("velocity.filter","-1.0").toDouble
new MicroPathConfig(timeFilter, distanceFilter, velocityFilter, lowerLat, lowerLon, upperLat, upperLon, regionWidth, regionHeight)
}
/**
* Create the spark context from the properties file.
*/
def setup(config:Properties): SparkContext = {
val default_parallelism = config.getProperty("default_parallelism","8").toInt
val frameSize = config.getProperty("spark.akka.frameSize","200")
val master_uri = config.getProperty("master_uri","local")
val spark_home = config.getProperty("SPARK_HOME","")
val deploymentCodePaths = config.getProperty("deployment_path","").split(":")
val jobName = config.getProperty("job.name","SparkMicroPathing")
// System.setProperty("spark.executor.memory", "6G")
System.setProperty("spark.default.parallelism",default_parallelism.toString)
println("spark.default.parallelism "+default_parallelism.toString)
System.setProperty("spark.akka.frameSize",frameSize)
println("spark.akka.frameSize "+frameSize)
System.setProperty("spark.storage.memoryFraction", "0.5")
println("spark.storage.memoryFraction 0.5")
System.setProperty("spark.worker.timeout", "30000")
System.setProperty("spark.akka.timeout", "30000")
System.setProperty("spark.storage.blockManagerHeartBeatMs", "30000")
val checkPointDir = config.getProperty("checkpoint.dir","/tmp/checkpoints")
println("check point dir: "+checkPointDir)
System.setProperty("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
//System.setProperty("spark.kryo.registrator", "org.xdata.analytics.spark.micropathing.TriplineDataRegistrator")
val sc:SparkContext =
if (master_uri.indexOf("local") == 0)
new SparkContext( master_uri, jobName)
else
new SparkContext( master_uri, jobName, spark_home, deploymentCodePaths)
sc.setCheckpointDir(checkPointDir, true)
return sc
}
/**
* Read input data into an RDD[MicroPathInputRow]
*
* OVERRIDE this method to read you input data. Ensure you filter out data that is not inside the test region.
*
*/
def readInput(sc:SparkContext,config:Properties,triplineConf:MicroPathConfig) :RDD[(String,DateTime,Double,Double)]
/**
* Convert the raw input data into paths, and cache for future use.
*/
def processPaths(sc:SparkContext,config:Properties,triplineConf:MicroPathConfig,data:RDD[(String,DateTime,Double,Double)]) : RDD[(Point2D.Double,Point2D.Double,Double)] = {
val pathRDD = data.groupBy(_._1)
.flatMap( {case (id,dataSeq) =>
val buffer = new ListBuffer[(Point2D.Double,Point2D.Double,Double)]
val sorted = dataSeq.sortWith({case(row1,row2) => row1._2.compareTo(row2._2) < 0 })
var prevPoint : Point2D.Double = null
var prevTime :DateTime = null
for (row <- sorted){
val currentPoint = new Point2D.Double(row._3,row._4)
val currentTime = row._2
if (prevPoint != null){
val distance = MicroPathUtilities.getDistance(prevPoint,currentPoint)
val timeDelta = MicroPathUtilities.getTimeDelta(currentTime,prevTime)
val velocity = MicroPathUtilities.getVelocity(distance,timeDelta)
if ( distance > 0 && distance < triplineConf.distanceFilter &&
timeDelta < triplineConf.timeFilter &&
(velocity < triplineConf.velocityFilter || triplineConf.velocityFilter < 0)
){
buffer += ((prevPoint,currentPoint,velocity))
}
}
prevPoint = currentPoint
prevTime = currentTime
}
buffer
}).cache // cache the paths as they will be used multiple times (once for each level)
//pathRDD.checkpoint
return pathRDD
}
/**
* Execute the analytic and return weighted coordinates.
*/
def execute(sc:SparkContext,paths:RDD[(Point2D.Double,Point2D.Double,Double)],config:Properties,triplineConf:MicroPathConfig) = {
val maxLevel = config.getProperty("mercator.level","1").toInt
var name = config.getProperty("avro.output.name","unkown")
var desc = config.getProperty("avro.output.desc","unknown")
val datastore = config.getProperty("avro.data.store","local")
println("paths: " + paths.count())
for (level <- 1 to maxLevel){
val t1 = System.currentTimeMillis()
/*
val decay = config.getProperty("velocity.filter.decay","-1.0").toDouble
val filteredPaths = if (decay >0 && decay < 1 && level > 0) paths.filter({case (p1,p2,v) =>
v < (math.pow(decay,level-1)*triplineConf.velocityFilter) || triplineConf.velocityFilter < 0
}) else paths
*/
val tileRDD = paths.flatMap( {case(p1,p2,v)=> MicroPathUtilities.findPath(p1,p2,level)})
.groupBy({case ((tileX,tileY,binX,binY)) => (tileX,tileY) })
.map({case (tilePoint,dataSeq) =>
val freqs = new Array[Double](256*256)
for ((tileX,tileY,binX,binY) <- dataSeq){
val index = binX+(binY*256)
freqs(index) += 1
}
var tileData = new ArrayList[JavaDouble](256*256)
for (freq <- freqs) {
tileData.add(freq)
}
// TODO - do an additional aggregation step on bins here. check to see if visual is better
// END
val tileIndex = new TileIndex(level,tilePoint._1,tilePoint._2)
new TileData[JavaDouble](tileIndex, tileData)
})
// tileRDD.cache // cache before writing to ensure computation of RDD is not repeated.
val pyramider = new WebMercatorTilePyramid()
var io = if (datastore =="hbase") new HBaseTileIO(config.getProperty("hbase.zookeeper.quorum"),
config.getProperty("hbase.zookeeper.port"),
config.getProperty("hbase.master"))
else new LocalTileIO("avro")
var binDesc = new StandardDoubleBinDescriptor()
// if using hdfs output path should be the path in hdfs, if hbase outputPath is the table name
io.writeTileSet[Double, JavaDouble](pyramider,
outputPath,
tileRDD,
binDesc,
name,
desc)
//io.writeTiles(outputPath, pyramider, serializer, tileRDD) // config.getProperty("avro.output.name","unkown"), config.getProperty("avro.output.desc","unknown"))
//tileRDD.unpersist
val time = System.currentTimeMillis() - t1
println("Computed level "+level+" in "+time+" msec.")
}
}
}
|
Sotera/aggregate-micro-paths
|
spark/src/main/scala/org/xdata/analytics/spark/micropathing/MicroPathEngine.scala
|
Scala
|
apache-2.0
| 10,549
|
/*
Copyright 2017-2020 Erik Erlandson
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package coulomb.pureconfig
import scala.reflect.runtime.universe.WeakTypeTag
import _root_.pureconfig.{ConfigReader, ConfigWriter, ConfigCursor}
import _root_.pureconfig.error.{CannotConvert, ConfigReaderFailures, ConvertFailure}
import com.typesafe.config.ConfigValue
import eu.timepit.refined.refineV
import eu.timepit.refined.api.{ Refined, Validate }
import coulomb._
package object refined {
import coulomb.pureconfig.infra.CoulombPureconfigOverride
// prevent the "main" pureconfig rule from ambiguity on `Refined[V, P]`
implicit def overridePureconfigRefined[V, P]: CoulombPureconfigOverride[Refined[V, P]] =
new CoulombPureconfigOverride[Refined[V, P]] {}
/** Manifest a ConfigWriter for `Quantity[Refined[P, V], U]` */
implicit def coulombRefinedConfigWriter[V, P, U](implicit
qcw: ConfigWriter[Quantity[V, U]]
): ConfigWriter[Quantity[Refined[V, P], U]] = new ConfigWriter[Quantity[Refined[V, P], U]] {
def to(q: Quantity[Refined[V, P], U]): ConfigValue =
qcw.to(Quantity[V, U](q.value.value))
}
/** Manifest a ConfigReader for `Quantity[Refined[P, V], U]` */
implicit def coulombRefinedConfigReader[V, P, U](implicit
qcr: ConfigReader[Quantity[V, U]],
qpv: Validate[V, P],
qtt: WeakTypeTag[Quantity[Refined[V, P], U]]
): ConfigReader[Quantity[Refined[V, P], U]] = new ConfigReader[Quantity[Refined[V, P], U]] {
def from(cur: ConfigCursor): Either[ConfigReaderFailures, Quantity[Refined[V, P], U]] = {
qcr.from(cur) match {
case Left(readFailure) => Left(readFailure)
case Right(q) => {
refineV[P](q.value) match {
case Right(rval) => Right(rval.withUnit[U])
case Left(because) => Left(
ConfigReaderFailures(
ConvertFailure(
reason = CannotConvert(
value = s"$q",
toType = qtt.tpe.toString,
because = because
),
cur = cur)))
}
}
}
}
}
}
|
erikerlandson/coulomb
|
coulomb-pureconfig-refined/src/main/scala/coulomb/pureconfig/refined/package.scala
|
Scala
|
apache-2.0
| 2,603
|
/*
* The MIT License
*
* Copyright 2014 Kamnev Georgiy (nt.gocha@gmail.com).
*
* Данная лицензия разрешает, безвозмездно, лицам, получившим копию данного программного
* обеспечения и сопутствующей документации (в дальнейшем именуемыми "Программное Обеспечение"),
* использовать Программное Обеспечение без ограничений, включая неограниченное право на
* использование, копирование, изменение, объединение, публикацию, распространение, сублицензирование
* и/или продажу копий Программного Обеспечения, также как и лицам, которым предоставляется
* данное Программное Обеспечение, при соблюдении следующих условий:
*
* Вышеупомянутый копирайт и данные условия должны быть включены во все копии
* или значимые части данного Программного Обеспечения.
*
* ДАННОЕ ПРОГРАММНОЕ ОБЕСПЕЧЕНИЕ ПРЕДОСТАВЛЯЕТСЯ «КАК ЕСТЬ», БЕЗ ЛЮБОГО ВИДА ГАРАНТИЙ,
* ЯВНО ВЫРАЖЕННЫХ ИЛИ ПОДРАЗУМЕВАЕМЫХ, ВКЛЮЧАЯ, НО НЕ ОГРАНИЧИВАЯСЬ ГАРАНТИЯМИ ТОВАРНОЙ ПРИГОДНОСТИ,
* СООТВЕТСТВИЯ ПО ЕГО КОНКРЕТНОМУ НАЗНАЧЕНИЮ И НЕНАРУШЕНИЯ ПРАВ. НИ В КАКОМ СЛУЧАЕ АВТОРЫ
* ИЛИ ПРАВООБЛАДАТЕЛИ НЕ НЕСУТ ОТВЕТСТВЕННОСТИ ПО ИСКАМ О ВОЗМЕЩЕНИИ УЩЕРБА, УБЫТКОВ
* ИЛИ ДРУГИХ ТРЕБОВАНИЙ ПО ДЕЙСТВУЮЩИМ КОНТРАКТАМ, ДЕЛИКТАМ ИЛИ ИНОМУ, ВОЗНИКШИМ ИЗ, ИМЕЮЩИМ
* ПРИЧИНОЙ ИЛИ СВЯЗАННЫМ С ПРОГРАММНЫМ ОБЕСПЕЧЕНИЕМ ИЛИ ИСПОЛЬЗОВАНИЕМ ПРОГРАММНОГО ОБЕСПЕЧЕНИЯ
* ИЛИ ИНЫМИ ДЕЙСТВИЯМИ С ПРОГРАММНЫМ ОБЕСПЕЧЕНИЕМ.
*/
package xyz.cofe.odtexport.xtest
/**
* Посетитель XML узлов делегиующий вызовы шаблонам
* @param patterns Шаблоны узлов
*/
class NodePatterns extends XMLPathVisitor with NodePatternsVisitor
{
private var _patterns = List[NodePattern]();
def patterns = _patterns;
def this(patterns:NodePattern*) = {
this();
for( p <- patterns )this._patterns = p :: this.patterns;
this._patterns = this._patterns.reverse;
}
def this(patterns:Iterable[NodePattern]) = {
this();
for( p <- patterns )this._patterns = p :: this._patterns;
this._patterns = this.patterns.reverse;
}
}
/**
* Примесь к посетителю XMLPathVisitor, делегирующего вызовы к шаблонам узлов
*/
trait NodePatternsVisitor extends XMLPathVisitor {
import org.w3c.dom.{Node => XMLNode};
/**
* Используемые шаблоны
* @return XML шаблоны
*/
def patterns : List[NodePattern];
/**
* Вызывается при входе в узел XML.
* Делегиует вызовы к шаблонам.
* Передается управление тому шаблону кто первый ответил на NodePattern.test(node) утвердительно.
* @param node Узел XML
* @return Результат делегирования (если было совпадение с шаблоном), либо true по умолчанию.
*/
override def enter(node:XMLNode):Boolean = {
super.enter(node);
var res = true;
var stop = false;
for( pattern <- patterns ){
if( !stop & pattern.test(node) ){
res = pattern.enter(node);
stop = true;
}
}
res;
}
/**
* Вызывается при выходе из узла
* Делегиует вызовы к шаблонам.
* Передается управление тому шаблону кто первый ответил на NodePattern.test(node) утвердительно.
* @param node Узел XML
*/
override def exit(node:XMLNode):Unit = {
var stop = false;
for( pattern <- patterns ){
if( !stop & pattern.test(node) ){
pattern.exit(node);
stop = true;
}
}
super.exit(node);
}
/**
* Вызывается перед началом обработки дерева.
* Делегирует вызовы к шаблонам.
*/
override def begin():Unit = {
for( ptr <- patterns )
ptr.begin();
}
/**
* Вызывается после обработки дерева.
* Делегирует вызовы к шаблонам.
*/
override def end():Unit = {
for( ptr <- patterns.reverse )
ptr.end();
}
}
|
gochaorg/odt-export
|
src/main/scala/xyz/cofe/odtexport/xtest/NodePatternsVisitor.scala
|
Scala
|
mit
| 5,131
|
package me.yingrui.segment.dict
import org.junit.Assert
import org.junit.Test
class POSArrayTest {
@Test
def one_Word_Could_has_Multi_POSes() {
val posArray = new POSArray()
val posV = POS("V", 1)
val posT = POS("T", 1)
val posN = POS("N", 3451)
posArray.add(posV)
posArray.add(posT)
posArray.add(posN)
Assert.assertEquals(posArray.getOccurredCount("V"), 1)
Assert.assertEquals(posArray.getOccurredCount("N"), 3451)
Assert.assertEquals(posArray.getOccurredSum(), 3453)
val posArray2 = new POSArray()
posArray2.add(posArray)
Assert.assertEquals(posArray2.getOccurredCount("V"), 1)
Assert.assertEquals(posArray2.getOccurredCount("N"), 3451)
Assert.assertEquals(posArray2.getOccurredSum(), 3453)
val pa = posArray.getWordPOSTable()
Assert.assertEquals(pa(0)(1) + pa(1)(1) + pa(2)(1), 3453)
}
}
|
yingrui/mahjong
|
lib-segment/src/test/scala/me/yingrui/segment/dict/POSArrayTest.scala
|
Scala
|
gpl-3.0
| 944
|
package mesosphere.marathon
package api
import javax.servlet._
import javax.servlet.http.HttpServletResponse
class CacheDisablingFilter extends Filter {
override def init(filterConfig: FilterConfig): Unit = {}
override def doFilter(request: ServletRequest, response: ServletResponse, chain: FilterChain): Unit = {
response match {
case httpResponse: HttpServletResponse =>
httpResponse.setHeader("Cache-Control", "no-cache, no-store, must-revalidate") // HTTP 1.1
httpResponse.setHeader("Pragma", "no-cache") // HTTP 1.0
httpResponse.setHeader("Expires", "0") // Proxies
case _ => // ignore other responses
}
chain.doFilter(request, response)
}
override def destroy(): Unit = {}
}
|
gsantovena/marathon
|
src/main/scala/mesosphere/marathon/api/CacheDisablingFilter.scala
|
Scala
|
apache-2.0
| 742
|
package main.scala.overlapping.timeSeriesOld
import breeze.linalg.{DenseMatrix, DenseVector}
import main.scala.overlapping.containers._
import scala.reflect.ClassTag
/**
* Created by Francois Belletti on 9/23/15.
*/
object SecondMomentEstimator{
/**
* Compute the second moment of a Time Series RDD.
*
* @param timeSeries Input data.
* @tparam IndexT Timestamp type.
* @return Second moment matrix.
*/
def apply[IndexT : ClassTag](
timeSeries: VectTimeSeries[IndexT]): DenseMatrix[Double] ={
val estimator = new SecondMomentEstimator[IndexT](timeSeries.config)
estimator.estimate(timeSeries)
}
}
class SecondMomentEstimator[IndexT : ClassTag](config: VectTSConfig[IndexT])
extends FirstOrderEssStat[IndexT, (DenseMatrix[Double], Long)]
with Estimator[IndexT, DenseMatrix[Double]]{
override def zero = (DenseMatrix.zeros[Double](config.dim, config.dim), 0L)
override def kernel(t: TSInstant[IndexT], v: DenseVector[Double]): (DenseMatrix[Double], Long) = {
(v * v.t, 1L)
}
override def reducer(r1: (DenseMatrix[Double], Long), r2: (DenseMatrix[Double], Long)): (DenseMatrix[Double], Long) = {
(r1._1 + r2._1, r1._2 + r2._2)
}
def normalize(x: (DenseMatrix[Double], Long)): DenseMatrix[Double] = {
x._1 / x._2.toDouble
}
override def estimate(timeSeries: VectTimeSeries[IndexT]): DenseMatrix[Double] = {
normalize(timeSeriesStats(timeSeries))
}
}
|
bellettif/sparkGeoTS
|
sparkTS/src/main/scala/overlapping/timeSeriesOld/SecondMomentEstimator.scala
|
Scala
|
bsd-3-clause
| 1,440
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming
import java.io.{InputStream, NotSerializableException}
import java.util.concurrent.atomic.{AtomicInteger, AtomicReference}
import scala.collection.Map
import scala.collection.mutable.Queue
import scala.reflect.ClassTag
import scala.util.control.NonFatal
import akka.actor.{Props, SupervisorStrategy}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.io.{BytesWritable, LongWritable, Text}
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat
import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat}
import org.apache.spark._
import org.apache.spark.annotation.{DeveloperApi, Experimental}
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.input.FixedLengthBinaryInputFormat
import org.apache.spark.rdd.{RDD, RDDOperationScope}
import org.apache.spark.serializer.SerializationDebugger
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.StreamingContextState._
import org.apache.spark.streaming.dstream._
import org.apache.spark.streaming.receiver.{ActorReceiver, ActorSupervisorStrategy, Receiver}
import org.apache.spark.streaming.scheduler.{JobScheduler, StreamingListener}
import org.apache.spark.streaming.ui.{StreamingJobProgressListener, StreamingTab}
import org.apache.spark.util.{AsynchronousListenerBus, CallSite, ShutdownHookManager, ThreadUtils, Utils}
/**
* Main entry point for Spark Streaming functionality. It provides methods used to create
* [[org.apache.spark.streaming.dstream.DStream]]s from various input sources. It can be either
* created by providing a Spark master URL and an appName, or from a org.apache.spark.SparkConf
* configuration (see core Spark documentation), or from an existing org.apache.spark.SparkContext.
* The associated SparkContext can be accessed using `context.sparkContext`. After
* creating and transforming DStreams, the streaming computation can be started and stopped
* using `context.start()` and `context.stop()`, respectively.
* `context.awaitTermination()` allows the current thread to wait for the termination
* of the context by `stop()` or by an exception.
*/
class StreamingContext private[streaming] (
sc_ : SparkContext,
cp_ : Checkpoint,
batchDur_ : Duration
) extends Logging {
/**
* Create a StreamingContext using an existing SparkContext.
* @param sparkContext existing SparkContext
* @param batchDuration the time interval at which streaming data will be divided into batches
*/
def this(sparkContext: SparkContext, batchDuration: Duration) = {
this(sparkContext, null, batchDuration)
}
/**
* Create a StreamingContext by providing the configuration necessary for a new SparkContext.
* @param conf a org.apache.spark.SparkConf object specifying Spark parameters
* @param batchDuration the time interval at which streaming data will be divided into batches
*/
def this(conf: SparkConf, batchDuration: Duration) = {
this(StreamingContext.createNewSparkContext(conf), null, batchDuration)
}
/**
* Create a StreamingContext by providing the details necessary for creating a new SparkContext.
* @param master cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
* @param appName a name for your job, to display on the cluster web UI
* @param batchDuration the time interval at which streaming data will be divided into batches
*/
def this(
master: String,
appName: String,
batchDuration: Duration,
sparkHome: String = null,
jars: Seq[String] = Nil,
environment: Map[String, String] = Map()) = {
this(StreamingContext.createNewSparkContext(master, appName, sparkHome, jars, environment),
null, batchDuration)
}
/**
* Recreate a StreamingContext from a checkpoint file.
* @param path Path to the directory that was specified as the checkpoint directory
* @param hadoopConf Optional, configuration object if necessary for reading from
* HDFS compatible filesystems
*/
def this(path: String, hadoopConf: Configuration) =
this(null, CheckpointReader.read(path, new SparkConf(), hadoopConf).get, null)
/**
* Recreate a StreamingContext from a checkpoint file.
* @param path Path to the directory that was specified as the checkpoint directory
*/
def this(path: String) = this(path, SparkHadoopUtil.get.conf)
/**
* Recreate a StreamingContext from a checkpoint file using an existing SparkContext.
* @param path Path to the directory that was specified as the checkpoint directory
* @param sparkContext Existing SparkContext
*/
def this(path: String, sparkContext: SparkContext) = {
this(
sparkContext,
CheckpointReader.read(path, sparkContext.conf, sparkContext.hadoopConfiguration).get,
null)
}
if (sc_ == null && cp_ == null) {
throw new Exception("Spark Streaming cannot be initialized with " +
"both SparkContext and checkpoint as null")
}
private[streaming] val isCheckpointPresent = (cp_ != null)
private[streaming] val sc: SparkContext = {
if (sc_ != null) {
sc_
} else if (isCheckpointPresent) {
SparkContext.getOrCreate(cp_.createSparkConf())
} else {
throw new SparkException("Cannot create StreamingContext without a SparkContext")
}
}
if (sc.conf.get("spark.master") == "local" || sc.conf.get("spark.master") == "local[1]") {
logWarning("spark.master should be set as local[n], n > 1 in local mode if you have receivers" +
" to get data, otherwise Spark jobs will not get resources to process the received data.")
}
private[streaming] val conf = sc.conf
private[streaming] val env = sc.env
private[streaming] val graph: DStreamGraph = {
if (isCheckpointPresent) {
cp_.graph.setContext(this)
cp_.graph.restoreCheckpointData()
cp_.graph
} else {
require(batchDur_ != null, "Batch duration for StreamingContext cannot be null")
val newGraph = new DStreamGraph()
newGraph.setBatchDuration(batchDur_)
newGraph
}
}
private val nextInputStreamId = new AtomicInteger(0)
private[streaming] var checkpointDir: String = {
if (isCheckpointPresent) {
sc.setCheckpointDir(cp_.checkpointDir)
cp_.checkpointDir
} else {
null
}
}
private[streaming] val checkpointDuration: Duration = {
if (isCheckpointPresent) cp_.checkpointDuration else graph.batchDuration
}
private[streaming] val scheduler = new JobScheduler(this)
private[streaming] val waiter = new ContextWaiter
private[streaming] val progressListener = new StreamingJobProgressListener(this)
private[streaming] val uiTab: Option[StreamingTab] =
if (conf.getBoolean("spark.ui.enabled", true)) {
Some(new StreamingTab(this))
} else {
None
}
/* Initializing a streamingSource to register metrics */
private val streamingSource = new StreamingSource(this)
private var state: StreamingContextState = INITIALIZED
private val startSite = new AtomicReference[CallSite](null)
private[streaming] def getStartSite(): CallSite = startSite.get()
private var shutdownHookRef: AnyRef = _
conf.getOption("spark.streaming.checkpoint.directory").foreach(checkpoint)
/**
* Return the associated Spark context
*/
def sparkContext: SparkContext = sc
/**
* Set each DStreams in this context to remember RDDs it generated in the last given duration.
* DStreams remember RDDs only for a limited duration of time and releases them for garbage
* collection. This method allows the developer to specify how long to remember the RDDs (
* if the developer wishes to query old data outside the DStream computation).
* @param duration Minimum duration that each DStream should remember its RDDs
*/
def remember(duration: Duration) {
graph.remember(duration)
}
/**
* Set the context to periodically checkpoint the DStream operations for driver
* fault-tolerance.
* @param directory HDFS-compatible directory where the checkpoint data will be reliably stored.
* Note that this must be a fault-tolerant file system like HDFS for
*/
def checkpoint(directory: String) {
if (directory != null) {
val path = new Path(directory)
val fs = path.getFileSystem(sparkContext.hadoopConfiguration)
fs.mkdirs(path)
val fullPath = fs.getFileStatus(path).getPath().toString
sc.setCheckpointDir(fullPath)
checkpointDir = fullPath
} else {
checkpointDir = null
}
}
private[streaming] def isCheckpointingEnabled: Boolean = {
checkpointDir != null
}
private[streaming] def initialCheckpoint: Checkpoint = {
if (isCheckpointPresent) cp_ else null
}
private[streaming] def getNewInputStreamId() = nextInputStreamId.getAndIncrement()
/**
* Execute a block of code in a scope such that all new DStreams created in this body will
* be part of the same scope. For more detail, see the comments in `doCompute`.
*
* Note: Return statements are NOT allowed in the given body.
*/
private[streaming] def withScope[U](body: => U): U = sparkContext.withScope(body)
/**
* Execute a block of code in a scope such that all new DStreams created in this body will
* be part of the same scope. For more detail, see the comments in `doCompute`.
*
* Note: Return statements are NOT allowed in the given body.
*/
private[streaming] def withNamedScope[U](name: String)(body: => U): U = {
RDDOperationScope.withScope(sc, name, allowNesting = false, ignoreParent = false)(body)
}
/**
* Create an input stream with any arbitrary user implemented receiver.
* Find more details at: http://spark.apache.org/docs/latest/streaming-custom-receivers.html
* @param receiver Custom implementation of Receiver
*
* @deprecated As of 1.0.0", replaced by `receiverStream`.
*/
@deprecated("Use receiverStream", "1.0.0")
def networkStream[T: ClassTag](receiver: Receiver[T]): ReceiverInputDStream[T] = {
withNamedScope("network stream") {
receiverStream(receiver)
}
}
/**
* Create an input stream with any arbitrary user implemented receiver.
* Find more details at: http://spark.apache.org/docs/latest/streaming-custom-receivers.html
* @param receiver Custom implementation of Receiver
*/
def receiverStream[T: ClassTag](receiver: Receiver[T]): ReceiverInputDStream[T] = {
withNamedScope("receiver stream") {
new PluggableInputDStream[T](this, receiver)
}
}
/**
* Create an input stream with any arbitrary user implemented actor receiver.
* Find more details at: http://spark.apache.org/docs/latest/streaming-custom-receivers.html
* @param props Props object defining creation of the actor
* @param name Name of the actor
* @param storageLevel RDD storage level (default: StorageLevel.MEMORY_AND_DISK_SER_2)
*
* @note An important point to note:
* Since Actor may exist outside the spark framework, It is thus user's responsibility
* to ensure the type safety, i.e parametrized type of data received and actorStream
* should be same.
*/
def actorStream[T: ClassTag](
props: Props,
name: String,
storageLevel: StorageLevel = StorageLevel.MEMORY_AND_DISK_SER_2,
supervisorStrategy: SupervisorStrategy = ActorSupervisorStrategy.defaultStrategy
): ReceiverInputDStream[T] = withNamedScope("actor stream") {
receiverStream(new ActorReceiver[T](props, name, storageLevel, supervisorStrategy))
}
/**
* Create a input stream from TCP source hostname:port. Data is received using
* a TCP socket and the receive bytes is interpreted as UTF8 encoded `\\n` delimited
* lines.
* @param hostname Hostname to connect to for receiving data
* @param port Port to connect to for receiving data
* @param storageLevel Storage level to use for storing the received objects
* (default: StorageLevel.MEMORY_AND_DISK_SER_2)
*/
def socketTextStream(
hostname: String,
port: Int,
storageLevel: StorageLevel = StorageLevel.MEMORY_AND_DISK_SER_2
): ReceiverInputDStream[String] = withNamedScope("socket text stream") {
socketStream[String](hostname, port, SocketReceiver.bytesToLines, storageLevel)
}
/**
* Create a input stream from TCP source hostname:port. Data is received using
* a TCP socket and the receive bytes it interepreted as object using the given
* converter.
* @param hostname Hostname to connect to for receiving data
* @param port Port to connect to for receiving data
* @param converter Function to convert the byte stream to objects
* @param storageLevel Storage level to use for storing the received objects
* @tparam T Type of the objects received (after converting bytes to objects)
*/
def socketStream[T: ClassTag](
hostname: String,
port: Int,
converter: (InputStream) => Iterator[T],
storageLevel: StorageLevel
): ReceiverInputDStream[T] = {
new SocketInputDStream[T](this, hostname, port, converter, storageLevel)
}
/**
* Create a input stream from network source hostname:port, where data is received
* as serialized blocks (serialized using the Spark's serializer) that can be directly
* pushed into the block manager without deserializing them. This is the most efficient
* way to receive data.
* @param hostname Hostname to connect to for receiving data
* @param port Port to connect to for receiving data
* @param storageLevel Storage level to use for storing the received objects
* (default: StorageLevel.MEMORY_AND_DISK_SER_2)
* @tparam T Type of the objects in the received blocks
*/
def rawSocketStream[T: ClassTag](
hostname: String,
port: Int,
storageLevel: StorageLevel = StorageLevel.MEMORY_AND_DISK_SER_2
): ReceiverInputDStream[T] = withNamedScope("raw socket stream") {
new RawInputDStream[T](this, hostname, port, storageLevel)
}
/**
* Create a input stream that monitors a Hadoop-compatible filesystem
* for new files and reads them using the given key-value types and input format.
* Files must be written to the monitored directory by "moving" them from another
* location within the same file system. File names starting with . are ignored.
* @param directory HDFS directory to monitor for new file
* @tparam K Key type for reading HDFS file
* @tparam V Value type for reading HDFS file
* @tparam F Input format for reading HDFS file
*/
def fileStream[
K: ClassTag,
V: ClassTag,
F <: NewInputFormat[K, V]: ClassTag
] (directory: String): InputDStream[(K, V)] = {
new FileInputDStream[K, V, F](this, directory)
}
/**
* Create a input stream that monitors a Hadoop-compatible filesystem
* for new files and reads them using the given key-value types and input format.
* Files must be written to the monitored directory by "moving" them from another
* location within the same file system.
* @param directory HDFS directory to monitor for new file
* @param filter Function to filter paths to process
* @param newFilesOnly Should process only new files and ignore existing files in the directory
* @tparam K Key type for reading HDFS file
* @tparam V Value type for reading HDFS file
* @tparam F Input format for reading HDFS file
*/
def fileStream[
K: ClassTag,
V: ClassTag,
F <: NewInputFormat[K, V]: ClassTag
] (directory: String, filter: Path => Boolean, newFilesOnly: Boolean): InputDStream[(K, V)] = {
new FileInputDStream[K, V, F](this, directory, filter, newFilesOnly)
}
/**
* Create a input stream that monitors a Hadoop-compatible filesystem
* for new files and reads them using the given key-value types and input format.
* Files must be written to the monitored directory by "moving" them from another
* location within the same file system. File names starting with . are ignored.
* @param directory HDFS directory to monitor for new file
* @param filter Function to filter paths to process
* @param newFilesOnly Should process only new files and ignore existing files in the directory
* @param conf Hadoop configuration
* @tparam K Key type for reading HDFS file
* @tparam V Value type for reading HDFS file
* @tparam F Input format for reading HDFS file
*/
def fileStream[
K: ClassTag,
V: ClassTag,
F <: NewInputFormat[K, V]: ClassTag
] (directory: String,
filter: Path => Boolean,
newFilesOnly: Boolean,
conf: Configuration): InputDStream[(K, V)] = {
new FileInputDStream[K, V, F](this, directory, filter, newFilesOnly, Option(conf))
}
/**
* Create a input stream that monitors a Hadoop-compatible filesystem
* for new files and reads them as text files (using key as LongWritable, value
* as Text and input format as TextInputFormat). Files must be written to the
* monitored directory by "moving" them from another location within the same
* file system. File names starting with . are ignored.
* @param directory HDFS directory to monitor for new file
*/
def textFileStream(directory: String): DStream[String] = withNamedScope("text file stream") {
fileStream[LongWritable, Text, TextInputFormat](directory).map(_._2.toString)
}
/**
* Create an input stream that monitors a Hadoop-compatible filesystem
* for new files and reads them as flat binary files, assuming a fixed length per record,
* generating one byte array per record. Files must be written to the monitored directory
* by "moving" them from another location within the same file system. File names
* starting with . are ignored.
*
* '''Note:''' We ensure that the byte array for each record in the
* resulting RDDs of the DStream has the provided record length.
*
* @param directory HDFS directory to monitor for new file
* @param recordLength length of each record in bytes
*/
def binaryRecordsStream(
directory: String,
recordLength: Int): DStream[Array[Byte]] = withNamedScope("binary records stream") {
val conf = sc_.hadoopConfiguration
conf.setInt(FixedLengthBinaryInputFormat.RECORD_LENGTH_PROPERTY, recordLength)
val br = fileStream[LongWritable, BytesWritable, FixedLengthBinaryInputFormat](
directory, FileInputDStream.defaultFilter: Path => Boolean, newFilesOnly = true, conf)
val data = br.map { case (k, v) =>
val bytes = v.getBytes
require(bytes.length == recordLength, "Byte array does not have correct length. " +
s"${bytes.length} did not equal recordLength: $recordLength")
bytes
}
data
}
/**
* Create an input stream from a queue of RDDs. In each batch,
* it will process either one or all of the RDDs returned by the queue.
*
* NOTE: Arbitrary RDDs can be added to `queueStream`, there is no way to recover data of
* those RDDs, so `queueStream` doesn't support checkpointing.
*
* @param queue Queue of RDDs
* @param oneAtATime Whether only one RDD should be consumed from the queue in every interval
* @tparam T Type of objects in the RDD
*/
def queueStream[T: ClassTag](
queue: Queue[RDD[T]],
oneAtATime: Boolean = true
): InputDStream[T] = {
queueStream(queue, oneAtATime, sc.makeRDD(Seq[T](), 1))
}
/**
* Create an input stream from a queue of RDDs. In each batch,
* it will process either one or all of the RDDs returned by the queue.
*
* NOTE: Arbitrary RDDs can be added to `queueStream`, there is no way to recover data of
* those RDDs, so `queueStream` doesn't support checkpointing.
*
* @param queue Queue of RDDs
* @param oneAtATime Whether only one RDD should be consumed from the queue in every interval
* @param defaultRDD Default RDD is returned by the DStream when the queue is empty.
* Set as null if no RDD should be returned when empty
* @tparam T Type of objects in the RDD
*/
def queueStream[T: ClassTag](
queue: Queue[RDD[T]],
oneAtATime: Boolean,
defaultRDD: RDD[T]
): InputDStream[T] = {
new QueueInputDStream(this, queue, oneAtATime, defaultRDD)
}
/**
* Create a unified DStream from multiple DStreams of the same type and same slide duration.
*/
def union[T: ClassTag](streams: Seq[DStream[T]]): DStream[T] = withScope {
new UnionDStream[T](streams.toArray)
}
/**
* Create a new DStream in which each RDD is generated by applying a function on RDDs of
* the DStreams.
*/
def transform[T: ClassTag](
dstreams: Seq[DStream[_]],
transformFunc: (Seq[RDD[_]], Time) => RDD[T]
): DStream[T] = withScope {
new TransformedDStream[T](dstreams, sparkContext.clean(transformFunc))
}
/** Add a [[org.apache.spark.streaming.scheduler.StreamingListener]] object for
* receiving system events related to streaming.
*/
def addStreamingListener(streamingListener: StreamingListener) {
scheduler.listenerBus.addListener(streamingListener)
}
private def validate() {
assert(graph != null, "Graph is null")
graph.validate()
require(
!isCheckpointingEnabled || checkpointDuration != null,
"Checkpoint directory has been set, but the graph checkpointing interval has " +
"not been set. Please use StreamingContext.checkpoint() to set the interval."
)
// Verify whether the DStream checkpoint is serializable
if (isCheckpointingEnabled) {
val checkpoint = new Checkpoint(this, Time.apply(0))
try {
Checkpoint.serialize(checkpoint, conf)
} catch {
case e: NotSerializableException =>
throw new NotSerializableException(
"DStream checkpointing has been enabled but the DStreams with their functions " +
"are not serializable\\n" +
SerializationDebugger.improveException(checkpoint, e).getMessage()
)
}
}
if (Utils.isDynamicAllocationEnabled(sc.conf)) {
logWarning("Dynamic Allocation is enabled for this application. " +
"Enabling Dynamic allocation for Spark Streaming applications can cause data loss if " +
"Write Ahead Log is not enabled for non-replayable sources like Flume. " +
"See the programming guide for details on how to enable the Write Ahead Log")
}
}
/**
* :: DeveloperApi ::
*
* Return the current state of the context. The context can be in three possible states -
*
* - StreamingContextState.INTIALIZED - The context has been created, but not been started yet.
* Input DStreams, transformations and output operations can be created on the context.
* - StreamingContextState.ACTIVE - The context has been started, and been not stopped.
* Input DStreams, transformations and output operations cannot be created on the context.
* - StreamingContextState.STOPPED - The context has been stopped and cannot be used any more.
*/
@DeveloperApi
def getState(): StreamingContextState = synchronized {
state
}
/**
* Start the execution of the streams.
*
* @throws IllegalStateException if the StreamingContext is already stopped.
*/
def start(): Unit = synchronized {
state match {
case INITIALIZED =>
startSite.set(DStream.getCreationSite())
StreamingContext.ACTIVATION_LOCK.synchronized {
StreamingContext.assertNoOtherContextIsActive()
try {
validate()
// Start the streaming scheduler in a new thread, so that thread local properties
// like call sites and job groups can be reset without affecting those of the
// current thread.
ThreadUtils.runInNewThread("streaming-start") {
sparkContext.setCallSite(startSite.get)
sparkContext.clearJobGroup()
sparkContext.setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, "false")
scheduler.start()
}
state = StreamingContextState.ACTIVE
} catch {
case NonFatal(e) =>
logError("Error starting the context, marking it as stopped", e)
scheduler.stop(false)
state = StreamingContextState.STOPPED
throw e
}
StreamingContext.setActiveContext(this)
}
shutdownHookRef = ShutdownHookManager.addShutdownHook(
StreamingContext.SHUTDOWN_HOOK_PRIORITY)(stopOnShutdown)
// Registering Streaming Metrics at the start of the StreamingContext
assert(env.metricsSystem != null)
env.metricsSystem.registerSource(streamingSource)
uiTab.foreach(_.attach())
logInfo("StreamingContext started")
case ACTIVE =>
logWarning("StreamingContext has already been started")
case STOPPED =>
throw new IllegalStateException("StreamingContext has already been stopped")
}
}
/**
* Wait for the execution to stop. Any exceptions that occurs during the execution
* will be thrown in this thread.
*/
def awaitTermination() {
waiter.waitForStopOrError()
}
/**
* Wait for the execution to stop. Any exceptions that occurs during the execution
* will be thrown in this thread.
* @param timeout time to wait in milliseconds
*
* @deprecated As of 1.3.0, replaced by `awaitTerminationOrTimeout(Long)`.
*/
@deprecated("Use awaitTerminationOrTimeout(Long) instead", "1.3.0")
def awaitTermination(timeout: Long) {
waiter.waitForStopOrError(timeout)
}
/**
* Wait for the execution to stop. Any exceptions that occurs during the execution
* will be thrown in this thread.
*
* @param timeout time to wait in milliseconds
* @return `true` if it's stopped; or throw the reported error during the execution; or `false`
* if the waiting time elapsed before returning from the method.
*/
def awaitTerminationOrTimeout(timeout: Long): Boolean = {
waiter.waitForStopOrError(timeout)
}
/**
* Stop the execution of the streams immediately (does not wait for all received data
* to be processed). By default, if `stopSparkContext` is not specified, the underlying
* SparkContext will also be stopped. This implicit behavior can be configured using the
* SparkConf configuration spark.streaming.stopSparkContextByDefault.
*
* @param stopSparkContext If true, stops the associated SparkContext. The underlying SparkContext
* will be stopped regardless of whether this StreamingContext has been
* started.
*/
def stop(
stopSparkContext: Boolean = conf.getBoolean("spark.streaming.stopSparkContextByDefault", true)
): Unit = synchronized {
stop(stopSparkContext, false)
}
/**
* Stop the execution of the streams, with option of ensuring all received data
* has been processed.
*
* @param stopSparkContext if true, stops the associated SparkContext. The underlying SparkContext
* will be stopped regardless of whether this StreamingContext has been
* started.
* @param stopGracefully if true, stops gracefully by waiting for the processing of all
* received data to be completed
*/
def stop(stopSparkContext: Boolean, stopGracefully: Boolean): Unit = {
var shutdownHookRefToRemove: AnyRef = null
if (AsynchronousListenerBus.withinListenerThread.value) {
throw new SparkException("Cannot stop StreamingContext within listener thread of" +
" AsynchronousListenerBus")
}
synchronized {
try {
state match {
case INITIALIZED =>
logWarning("StreamingContext has not been started yet")
case STOPPED =>
logWarning("StreamingContext has already been stopped")
case ACTIVE =>
scheduler.stop(stopGracefully)
// Removing the streamingSource to de-register the metrics on stop()
env.metricsSystem.removeSource(streamingSource)
uiTab.foreach(_.detach())
StreamingContext.setActiveContext(null)
waiter.notifyStop()
if (shutdownHookRef != null) {
shutdownHookRefToRemove = shutdownHookRef
shutdownHookRef = null
}
logInfo("StreamingContext stopped successfully")
}
} finally {
// The state should always be Stopped after calling `stop()`, even if we haven't started yet
state = STOPPED
}
}
if (shutdownHookRefToRemove != null) {
ShutdownHookManager.removeShutdownHook(shutdownHookRefToRemove)
}
// Even if we have already stopped, we still need to attempt to stop the SparkContext because
// a user might stop(stopSparkContext = false) and then call stop(stopSparkContext = true).
if (stopSparkContext) sc.stop()
}
private def stopOnShutdown(): Unit = {
val stopGracefully = conf.getBoolean("spark.streaming.stopGracefullyOnShutdown", false)
logInfo(s"Invoking stop(stopGracefully=$stopGracefully) from shutdown hook")
// Do not stop SparkContext, let its own shutdown hook stop it
stop(stopSparkContext = false, stopGracefully = stopGracefully)
}
}
/**
* StreamingContext object contains a number of utility functions related to the
* StreamingContext class.
*/
object StreamingContext extends Logging {
/**
* Lock that guards activation of a StreamingContext as well as access to the singleton active
* StreamingContext in getActiveOrCreate().
*/
private val ACTIVATION_LOCK = new Object()
private val SHUTDOWN_HOOK_PRIORITY = ShutdownHookManager.SPARK_CONTEXT_SHUTDOWN_PRIORITY + 1
private val activeContext = new AtomicReference[StreamingContext](null)
private def assertNoOtherContextIsActive(): Unit = {
ACTIVATION_LOCK.synchronized {
if (activeContext.get() != null) {
throw new IllegalStateException(
"Only one StreamingContext may be started in this JVM. " +
"Currently running StreamingContext was started at" +
activeContext.get.getStartSite().longForm)
}
}
}
private def setActiveContext(ssc: StreamingContext): Unit = {
ACTIVATION_LOCK.synchronized {
activeContext.set(ssc)
}
}
/**
* :: Experimental ::
*
* Get the currently active context, if there is one. Active means started but not stopped.
*/
@Experimental
def getActive(): Option[StreamingContext] = {
ACTIVATION_LOCK.synchronized {
Option(activeContext.get())
}
}
/**
* @deprecated As of 1.3.0, replaced by implicit functions in the DStream companion object.
* This is kept here only for backward compatibility.
*/
@deprecated("Replaced by implicit functions in the DStream companion object. This is " +
"kept here only for backward compatibility.", "1.3.0")
def toPairDStreamFunctions[K, V](stream: DStream[(K, V)])
(implicit kt: ClassTag[K], vt: ClassTag[V], ord: Ordering[K] = null)
: PairDStreamFunctions[K, V] = {
DStream.toPairDStreamFunctions(stream)(kt, vt, ord)
}
/**
* :: Experimental ::
*
* Either return the "active" StreamingContext (that is, started but not stopped), or create a
* new StreamingContext that is
* @param creatingFunc Function to create a new StreamingContext
*/
@Experimental
def getActiveOrCreate(creatingFunc: () => StreamingContext): StreamingContext = {
ACTIVATION_LOCK.synchronized {
getActive().getOrElse { creatingFunc() }
}
}
/**
* :: Experimental ::
*
* Either get the currently active StreamingContext (that is, started but not stopped),
* OR recreate a StreamingContext from checkpoint data in the given path. If checkpoint data
* does not exist in the provided, then create a new StreamingContext by calling the provided
* `creatingFunc`.
*
* @param checkpointPath Checkpoint directory used in an earlier StreamingContext program
* @param creatingFunc Function to create a new StreamingContext
* @param hadoopConf Optional Hadoop configuration if necessary for reading from the
* file system
* @param createOnError Optional, whether to create a new StreamingContext if there is an
* error in reading checkpoint data. By default, an exception will be
* thrown on error.
*/
@Experimental
def getActiveOrCreate(
checkpointPath: String,
creatingFunc: () => StreamingContext,
hadoopConf: Configuration = SparkHadoopUtil.get.conf,
createOnError: Boolean = false
): StreamingContext = {
ACTIVATION_LOCK.synchronized {
getActive().getOrElse { getOrCreate(checkpointPath, creatingFunc, hadoopConf, createOnError) }
}
}
/**
* Either recreate a StreamingContext from checkpoint data or create a new StreamingContext.
* If checkpoint data exists in the provided `checkpointPath`, then StreamingContext will be
* recreated from the checkpoint data. If the data does not exist, then the StreamingContext
* will be created by called the provided `creatingFunc`.
*
* @param checkpointPath Checkpoint directory used in an earlier StreamingContext program
* @param creatingFunc Function to create a new StreamingContext
* @param hadoopConf Optional Hadoop configuration if necessary for reading from the
* file system
* @param createOnError Optional, whether to create a new StreamingContext if there is an
* error in reading checkpoint data. By default, an exception will be
* thrown on error.
*/
def getOrCreate(
checkpointPath: String,
creatingFunc: () => StreamingContext,
hadoopConf: Configuration = SparkHadoopUtil.get.conf,
createOnError: Boolean = false
): StreamingContext = {
val checkpointOption = CheckpointReader.read(
checkpointPath, new SparkConf(), hadoopConf, createOnError)
checkpointOption.map(new StreamingContext(null, _, null)).getOrElse(creatingFunc())
}
/**
* Find the JAR from which a given class was loaded, to make it easy for users to pass
* their JARs to StreamingContext.
*/
def jarOfClass(cls: Class[_]): Option[String] = SparkContext.jarOfClass(cls)
private[streaming] def createNewSparkContext(conf: SparkConf): SparkContext = {
new SparkContext(conf)
}
private[streaming] def createNewSparkContext(
master: String,
appName: String,
sparkHome: String,
jars: Seq[String],
environment: Map[String, String]
): SparkContext = {
val conf = SparkContext.updatedConf(
new SparkConf(), master, appName, sparkHome, jars, environment)
new SparkContext(conf)
}
private[streaming] def rddToFileName[T](prefix: String, suffix: String, time: Time): String = {
var result = time.milliseconds.toString
if (prefix != null && prefix.length > 0) {
result = s"$prefix-$result"
}
if (suffix != null && suffix.length > 0) {
result = s"$result.$suffix"
}
result
}
}
private class StreamingContextPythonHelper {
/**
* This is a private method only for Python to implement `getOrCreate`.
*/
def tryRecoverFromCheckpoint(checkpointPath: String): Option[StreamingContext] = {
val checkpointOption = CheckpointReader.read(
checkpointPath, new SparkConf(), SparkHadoopUtil.get.conf, false)
checkpointOption.map(new StreamingContext(null, _, null))
}
}
|
chenc10/Spark-PAF
|
streaming/src/main/scala/org/apache/spark/streaming/StreamingContext.scala
|
Scala
|
apache-2.0
| 36,642
|
/**
* ====
* This file is part of SensApp [ http://sensapp.modelbased.net ]
*
* Copyright (C) 2011- SINTEF ICT
* Contact: SINTEF ICT <nicolas.ferry@sintef.no>
*
* Module: net.modelbased.sensapp
*
* SensApp is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation, either version 3 of
* the License, or (at your option) any later version.
*
* SensApp is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General
* Public License along with SensApp. If not, see
* <http://www.gnu.org/licenses/>.
* ====
*
* This file is part of SensApp [ http://sensapp.modelbased.net ]
*
* Copyright (C) 2012- SINTEF ICT
* Contact: SINTEF ICT <nicolas.ferry@sintef.no>
*
* Module: net.modelbased.sensapp.service.registry
*
* SensApp is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation, either version 3 of
* the License, or (at your option) any later version.
*
* SensApp is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General
* Public License along with SensApp. If not, see
* <http://www.gnu.org/licenses/>.
*/
package net.modelbased.sensapp.service.registry.data
import cc.spray.json._
import net.modelbased.sensapp.library.datastore._
import ElementJsonProtocol._
/**
* Persistence layer associated to the Element class
*
* @author Sebastien Mosser
*/
class SensorDescriptionRegistry extends DataStore[SensorDescription] {
override val databaseName = "sensapp_db"
override val collectionName = "registry.sensors"
override val key = "id"
override def getIdentifier(e: SensorDescription) = e.id
override def deserialize(json: String): SensorDescription = { json.asJson.convertTo[SensorDescription] }
override def serialize(e: SensorDescription): String = { e.toJson.toString }
}
class CompositeSensorDescriptionRegistry extends DataStore[CompositeSensorDescription] {
override val databaseName = "sensapp_db"
override val collectionName = "registry.sensors.composite"
override val key = "id"
override def getIdentifier(e: CompositeSensorDescription) = e.id
override def deserialize(json: String): CompositeSensorDescription = { json.asJson.convertTo[CompositeSensorDescription] }
override def serialize(e: CompositeSensorDescription): String = { e.toJson.toString }
}
|
SINTEF-9012/sensapp
|
net.modelbased.sensapp.service.registry/src/main/scala/net/modelbased/sensapp/service/registry/data/SensorDataStructureRegistry.scala
|
Scala
|
lgpl-3.0
| 3,025
|
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding
/**
* The args class does a simple command line parsing. The rules are:
* keys start with one or more "-". Each key has zero or more values
* following.
*/
object Args {
/**
* Split on whitespace and then parse.
*/
def apply(argString : String) : Args = Args(argString.split("\\\\s+"))
/**
* parses keys as starting with a dash, except single dashed digits.
* Also parses key value pairs that are separated with an equal sign.
* All following non-dashed args are a list of values.
* If the list starts with non-dashed args, these are associated with the
* empty string: ""
**/
def apply(args : Iterable[String]) : Args = {
def startingDashes(word : String) = word.takeWhile { _ == '-' }.length
new Args(
//Fold into a list of (arg -> List[values])
args
.filter{ a => !a.matches("\\\\s*") }
.foldLeft(List("" -> List[String]())) { (acc, arg) =>
val noDashes = arg.dropWhile{ _ == '-'}
if (arg.contains("=")) {
val splitArg = arg.split("=")
(splitArg(0) -> List(splitArg(1))) :: acc
} else if(arg == noDashes || isNumber(arg))
(acc.head._1 -> (arg :: acc.head._2)) :: acc.tail
else
(noDashes -> List()) :: acc
}
//Now reverse the values to keep the same order
.map {case (key, value) => key -> value.reverse}.toMap
)
}
def isNumber(arg : String) : Boolean = {
try {
arg.toDouble
true
}
catch {
case e : NumberFormatException => false
}
}
}
class Args(val m : Map[String,List[String]]) extends java.io.Serializable {
//Replace or add a given key+args pair:
def +(keyvals : (String,Iterable[String])) = {
new Args(m + (keyvals._1 -> keyvals._2.toList))
}
/**
* Does this Args contain a given key?
*/
def boolean(key : String) = m.contains(key)
/**
* Get the list of values associated with a given key.
* if the key is absent, return the empty list. NOTE: empty
* does not mean the key is absent, it could be a key without
* a value. Use boolean() to check existence.
*/
def list(key : String) = m.get(key).getOrElse(List())
/**
* This is a synonym for required
*/
def apply(key : String) = required(key)
/**
* Gets the list of positional arguments
*/
def positional : List[String] = list("")
override def equals(other : Any) = {
if( other.isInstanceOf[Args] ) {
other.asInstanceOf[Args].m.equals(m)
}
else {
false
}
}
/**
* Equivalent to .optional(key).getOrElse(default)
*/
def getOrElse(key : String, default : String) = optional(key).getOrElse(default)
/**
* return exactly one value for a given key.
* If there is more than one value, you get an exception
*/
def required(key : String) = list(key) match {
case List() => sys.error("Please provide a value for --" + key)
case List(a) => a
case _ => sys.error("Please only provide a single value for --" + key)
}
def toList : List[String] = {
m.foldLeft(List[String]()) { (args, kvlist) =>
val k = kvlist._1
val values = kvlist._2
if( k != "") {
//Make sure positional args are first
args ++ ((("--" + k) :: values))
}
else {
// These are positional args (no key), put them first:
values ++ args
}
}
}
// TODO: if there are spaces in the keys or values, this will not round-trip
override def toString : String = toList.mkString(" ")
/**
* If there is zero or one element, return it as an Option.
* If there is a list of more than one item, you get an error
*/
def optional(key : String) : Option[String] = list(key) match {
case List() => None
case List(a) => Some(a)
case _ => sys.error("Please provide at most one value for --" + key)
}
}
|
AoJ/scalding
|
src/main/scala/com/twitter/scalding/Args.scala
|
Scala
|
apache-2.0
| 4,424
|
/*
Copyright 2015 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding.serialization
import org.scalacheck.Arbitrary
import org.scalacheck.Properties
import org.scalacheck.Prop
import org.scalacheck.Prop.forAll
import org.scalacheck.Gen
import org.scalacheck.Prop._
import JavaStreamEnrichments._
import java.io._
import scala.collection.generic.CanBuildFrom
object JavaStreamEnrichmentsProperties extends Properties("JavaStreamEnrichmentsProperties") {
def output = new ByteArrayOutputStream
// The default Array[Equiv] is reference. WAT!?
implicit def aeq[T: Equiv]: Equiv[Array[T]] = new Equiv[Array[T]] {
def equiv(a: Array[T], b: Array[T]): Boolean = {
val teq = Equiv[T]
@annotation.tailrec
def go(pos: Int): Boolean =
if (pos == a.length) true
else {
teq.equiv(a(pos), b(pos)) && go(pos + 1)
}
(a.length == b.length) && go(0)
}
}
implicit def teq[T1: Equiv, T2: Equiv]: Equiv[(T1, T2)] = new Equiv[(T1, T2)] {
def equiv(a: (T1, T2), b: (T1, T2)) = {
Equiv[T1].equiv(a._1, b._1) &&
Equiv[T2].equiv(a._2, b._2)
}
}
def writeRead[T: Equiv](g: Gen[T], w: (T, OutputStream) => Unit, r: InputStream => T): Prop =
forAll(g) { t =>
val test = output
w(t, test)
Equiv[T].equiv(r(test.toInputStream), t)
}
def writeRead[T: Equiv: Arbitrary](w: (T, OutputStream) => Unit, r: InputStream => T): Prop =
writeRead(implicitly[Arbitrary[T]].arbitrary, w, r)
property("Can (read/write)Size") = writeRead(Gen.chooseNum(0, Int.MaxValue),
{ (i: Int, os) => os.writePosVarInt(i) }, { _.readPosVarInt })
property("Can (read/write)Float") = writeRead(
{ (i: Float, os) => os.writeFloat(i) }, { _.readFloat })
property("Can (read/write)Array[Byte]") = writeRead(
// Use list because Array has a shitty toString
{ (b: List[Byte], os) => os.writePosVarInt(b.size); os.writeBytes(b.toArray) },
{ is =>
val bytes = new Array[Byte](is.readPosVarInt)
is.readFully(bytes)
bytes.toList
})
property("Can (read/write)Boolean") = writeRead(
{ (i: Boolean, os) => os.writeBoolean(i) }, { _.readBoolean })
property("Can (read/write)Double") = writeRead(
{ (i: Double, os) => os.writeDouble(i) }, { _.readDouble })
property("Can (read/write)Int") = writeRead(Gen.chooseNum(Int.MinValue, Int.MaxValue),
{ (i: Int, os) => os.writeInt(i) }, { _.readInt })
property("Can (read/write)Long") = writeRead(Gen.chooseNum(Long.MinValue, Long.MaxValue),
{ (i: Long, os) => os.writeLong(i) }, { _.readLong })
property("Can (read/write)Short") = writeRead(Gen.chooseNum(Short.MinValue, Short.MaxValue),
{ (i: Short, os) => os.writeShort(i) }, { _.readShort })
property("Can (read/write)UnsignedByte") = writeRead(Gen.chooseNum(0, (1 << 8) - 1),
{ (i: Int, os) => os.write(i.toByte) }, { _.readUnsignedByte })
property("Can (read/write)UnsignedShort") = writeRead(Gen.chooseNum(0, (1 << 16) - 1),
{ (i: Int, os) => os.writeShort(i.toShort) }, { _.readUnsignedShort })
}
|
sriramkrishnan/scalding
|
scalding-serialization/src/test/scala/com/twitter/scalding/serialization/JavaStreamEnrichmentsProperties.scala
|
Scala
|
apache-2.0
| 3,581
|
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package views.returns
import org.jsoup.Jsoup
import play.api.data.Form
import play.api.data.Forms.{boolean, single}
import views.VatRegViewSpec
import views.html.returns.claim_refunds_view
class ClaimRefundsViewSpec extends VatRegViewSpec {
val form = Form(single("value" -> boolean))
val view = app.injector.instanceOf[claim_refunds_view]
implicit val doc = Jsoup.parse(view(form).body)
object ExpectedContent {
val heading = "Does the business expect to regularly claim VAT refunds from HMRC?"
val title = "Does the business expect to regularly claim VAT refunds from HMRC?"
val para1 = "Most businesses do not claim VAT refunds. It is only possible when the VAT a business pays on " +
"business-related purchases is more than the VAT it charges customers."
val detailsSummary = "Show me an example"
val detailsContent = "If a business sells mainly zero-rated items (the VAT on them is 0%), it may pay more VAT to " +
"run its business than it can charge. For example, most books are zero-rated, so a bookshop may find itself in this situation."
val label = "Select yes if you expect the business to regularly claim VAT refunds from HMRC"
val continue = "Save and continue"
val yes = "Yes"
val no = "No"
}
"The charge expectancy (regularly claim refunds) page" must {
"have a back link in new Setup" in new ViewSetup {
doc.hasBackLink mustBe true
}
"have the correct heading" in new ViewSetup {
doc.heading mustBe Some(ExpectedContent.heading)
}
"have a progressive disclosure" in new ViewSetup {
doc.details mustBe Some(Details(ExpectedContent.detailsSummary, ExpectedContent.detailsContent))
}
"have yes/no radio options" in new ViewSetup {
doc.radio("true") mustBe Some(ExpectedContent.yes)
doc.radio("false") mustBe Some(ExpectedContent.no)
}
"have a primary action" in new ViewSetup {
doc.submitButton mustBe Some(ExpectedContent.continue)
}
}
}
|
hmrc/vat-registration-frontend
|
test/views/returns/ClaimRefundsViewSpec.scala
|
Scala
|
apache-2.0
| 2,606
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.clustering
import scala.reflect.ClassTag
import org.apache.spark.annotation.Since
import org.apache.spark.api.java.JavaSparkContext._
import org.apache.spark.internal.Logging
import org.apache.spark.mllib.linalg.{BLAS, Vector, Vectors}
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.api.java.{JavaDStream, JavaPairDStream}
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.util.Utils
import org.apache.spark.util.random.XORShiftRandom
/**
* StreamingKMeansModel extends MLlib's KMeansModel for streaming
* algorithms, so it can keep track of a continuously updated weight
* associated with each cluster, and also update the model by
* doing a single iteration of the standard k-means algorithm.
*
* The update algorithm uses the "mini-batch" KMeans rule,
* generalized to incorporate forgetfulness (i.e. decay).
* The update rule (for each cluster) is:
*
* <blockquote>
* $$
* \begin{align}
* c_{t+1} &= [(c_t * n_t * a) + (x_t * m_t)] / [n_t + m_t] \\
* n_{t+1} &= n_t * a + m_t
* \end{align}
* $$
* </blockquote>
*
* Where c_t is the previously estimated centroid for that cluster,
* n_t is the number of points assigned to it thus far, x_t is the centroid
* estimated on the current batch, and m_t is the number of points assigned
* to that centroid in the current batch.
*
* The decay factor 'a' scales the contribution of the clusters as estimated thus far,
* by applying a as a discount weighting on the current point when evaluating
* new incoming data. If a=1, all batches are weighted equally. If a=0, new centroids
* are determined entirely by recent data. Lower values correspond to
* more forgetting.
*
* Decay can optionally be specified by a half life and associated
* time unit. The time unit can either be a batch of data or a single
* data point. Considering data arrived at time t, the half life h is defined
* such that at time t + h the discount applied to the data from t is 0.5.
* The definition remains the same whether the time unit is given
* as batches or points.
*/
@Since("1.2.0")
class StreamingKMeansModel @Since("1.2.0") (
@Since("1.2.0") override val clusterCenters: Array[Vector],
@Since("1.2.0") val clusterWeights: Array[Double])
extends KMeansModel(clusterCenters) with Logging {
/**
* Perform a k-means update on a batch of data.
*/
@Since("1.2.0")
def update(data: RDD[Vector], decayFactor: Double, timeUnit: String): StreamingKMeansModel = {
// find nearest cluster to each point
val closest = data.map(point => (this.predict(point), (point, 1L)))
// get sums and counts for updating each cluster
def mergeContribs(p1: (Vector, Long), p2: (Vector, Long)): (Vector, Long) = {
val sum =
if (p1._1 == null) {
p2._1
} else if (p2._1 == null) {
p1._1
} else {
BLAS.axpy(1.0, p2._1, p1._1)
p1._1
}
(sum, p1._2 + p2._2)
}
val dim = clusterCenters(0).size
val pointStats: Array[(Int, (Vector, Long))] = closest
.aggregateByKey((null.asInstanceOf[Vector], 0L))(mergeContribs, mergeContribs)
.collect()
val discount = timeUnit match {
case StreamingKMeans.BATCHES => decayFactor
case StreamingKMeans.POINTS =>
val numNewPoints = pointStats.iterator.map { case (_, (_, n)) =>
n
}.sum
math.pow(decayFactor, numNewPoints)
}
// apply discount to weights
BLAS.scal(discount, Vectors.dense(clusterWeights))
// implement update rule
pointStats.foreach { case (label, (sum, count)) =>
val centroid = clusterCenters(label)
val updatedWeight = clusterWeights(label) + count
val lambda = count / math.max(updatedWeight, 1e-16)
clusterWeights(label) = updatedWeight
BLAS.scal(1.0 - lambda, centroid)
BLAS.axpy(lambda / count, sum, centroid)
// display the updated cluster centers
val display = clusterCenters(label).size match {
case x if x > 100 => centroid.toArray.take(100).mkString("[", ",", "...")
case _ => centroid.toArray.mkString("[", ",", "]")
}
logInfo(s"Cluster $label updated with weight $updatedWeight and centroid: $display")
}
// Check whether the smallest cluster is dying. If so, split the largest cluster.
val (maxWeight, largest) = clusterWeights.iterator.zipWithIndex.maxBy(_._1)
val (minWeight, smallest) = clusterWeights.iterator.zipWithIndex.minBy(_._1)
if (minWeight < 1e-8 * maxWeight) {
logInfo(s"Cluster $smallest is dying. Split the largest cluster $largest into two.")
val weight = (maxWeight + minWeight) / 2.0
clusterWeights(largest) = weight
clusterWeights(smallest) = weight
val largestClusterCenter = clusterCenters(largest)
val smallestClusterCenter = clusterCenters(smallest)
var j = 0
while (j < dim) {
val x = largestClusterCenter(j)
val p = 1e-14 * math.max(math.abs(x), 1.0)
largestClusterCenter.asBreeze(j) = x + p
smallestClusterCenter.asBreeze(j) = x - p
j += 1
}
}
new StreamingKMeansModel(clusterCenters, clusterWeights)
}
}
/**
* StreamingKMeans provides methods for configuring a
* streaming k-means analysis, training the model on streaming,
* and using the model to make predictions on streaming data.
* See KMeansModel for details on algorithm and update rules.
*
* Use a builder pattern to construct a streaming k-means analysis
* in an application, like:
*
* {{{
* val model = new StreamingKMeans()
* .setDecayFactor(0.5)
* .setK(3)
* .setRandomCenters(5, 100.0)
* .trainOn(DStream)
* }}}
*/
@Since("1.2.0")
class StreamingKMeans @Since("1.2.0") (
@Since("1.2.0") var k: Int,
@Since("1.2.0") var decayFactor: Double,
@Since("1.2.0") var timeUnit: String) extends Logging with Serializable {
@Since("1.2.0")
def this() = this(2, 1.0, StreamingKMeans.BATCHES)
protected var model: StreamingKMeansModel = new StreamingKMeansModel(null, null)
/**
* Set the number of clusters.
*/
@Since("1.2.0")
def setK(k: Int): this.type = {
require(k > 0,
s"Number of clusters must be positive but got ${k}")
this.k = k
this
}
/**
* Set the forgetfulness of the previous centroids.
*/
@Since("1.2.0")
def setDecayFactor(a: Double): this.type = {
require(a >= 0,
s"Decay factor must be nonnegative but got ${a}")
this.decayFactor = a
this
}
/**
* Set the half life and time unit ("batches" or "points"). If points, then the decay factor
* is raised to the power of number of new points and if batches, then decay factor will be
* used as is.
*/
@Since("1.2.0")
def setHalfLife(halfLife: Double, timeUnit: String): this.type = {
require(halfLife > 0,
s"Half life must be positive but got ${halfLife}")
if (timeUnit != StreamingKMeans.BATCHES && timeUnit != StreamingKMeans.POINTS) {
throw new IllegalArgumentException("Invalid time unit for decay: " + timeUnit)
}
this.decayFactor = math.exp(math.log(0.5) / halfLife)
logInfo("Setting decay factor to: %g ".format (this.decayFactor))
this.timeUnit = timeUnit
this
}
/**
* Specify initial centers directly.
*/
@Since("1.2.0")
def setInitialCenters(centers: Array[Vector], weights: Array[Double]): this.type = {
require(centers.size == weights.size,
"Number of initial centers must be equal to number of weights")
require(centers.size == k,
s"Number of initial centers must be ${k} but got ${centers.size}")
require(weights.forall(_ >= 0),
s"Weight for each initial center must be nonnegative but got [${weights.mkString(" ")}]")
model = new StreamingKMeansModel(centers, weights)
this
}
/**
* Initialize random centers, requiring only the number of dimensions.
*
* @param dim Number of dimensions
* @param weight Weight for each center
* @param seed Random seed
*/
@Since("1.2.0")
def setRandomCenters(dim: Int, weight: Double, seed: Long = Utils.random.nextLong): this.type = {
require(dim > 0,
s"Number of dimensions must be positive but got ${dim}")
require(weight >= 0,
s"Weight for each center must be nonnegative but got ${weight}")
val random = new XORShiftRandom(seed)
val centers = Array.fill(k)(Vectors.dense(Array.fill(dim)(random.nextGaussian())))
val weights = Array.fill(k)(weight)
model = new StreamingKMeansModel(centers, weights)
this
}
/**
* Return the latest model.
*/
@Since("1.2.0")
def latestModel(): StreamingKMeansModel = {
model
}
/**
* Update the clustering model by training on batches of data from a DStream.
* This operation registers a DStream for training the model,
* checks whether the cluster centers have been initialized,
* and updates the model using each batch of data from the stream.
*
* @param data DStream containing vector data
*/
@Since("1.2.0")
def trainOn(data: DStream[Vector]): Unit = {
assertInitialized()
data.foreachRDD { (rdd, time) =>
model = model.update(rdd, decayFactor, timeUnit)
}
}
/**
* Java-friendly version of `trainOn`.
*/
@Since("1.4.0")
def trainOn(data: JavaDStream[Vector]): Unit = trainOn(data.dstream)
/**
* Use the clustering model to make predictions on batches of data from a DStream.
*
* @param data DStream containing vector data
* @return DStream containing predictions
*/
@Since("1.2.0")
def predictOn(data: DStream[Vector]): DStream[Int] = {
assertInitialized()
data.map(model.predict)
}
/**
* Java-friendly version of `predictOn`.
*/
@Since("1.4.0")
def predictOn(data: JavaDStream[Vector]): JavaDStream[java.lang.Integer] = {
JavaDStream.fromDStream(predictOn(data.dstream).asInstanceOf[DStream[java.lang.Integer]])
}
/**
* Use the model to make predictions on the values of a DStream and carry over its keys.
*
* @param data DStream containing (key, feature vector) pairs
* @tparam K key type
* @return DStream containing the input keys and the predictions as values
*/
@Since("1.2.0")
def predictOnValues[K: ClassTag](data: DStream[(K, Vector)]): DStream[(K, Int)] = {
assertInitialized()
data.mapValues(model.predict)
}
/**
* Java-friendly version of `predictOnValues`.
*/
@Since("1.4.0")
def predictOnValues[K](
data: JavaPairDStream[K, Vector]): JavaPairDStream[K, java.lang.Integer] = {
implicit val tag = fakeClassTag[K]
JavaPairDStream.fromPairDStream(
predictOnValues(data.dstream).asInstanceOf[DStream[(K, java.lang.Integer)]])
}
/** Check whether cluster centers have been initialized. */
private[this] def assertInitialized(): Unit = {
if (model.clusterCenters == null) {
throw new IllegalStateException(
"Initial cluster centers must be set before starting predictions")
}
}
}
private[clustering] object StreamingKMeans {
final val BATCHES = "batches"
final val POINTS = "points"
}
|
ueshin/apache-spark
|
mllib/src/main/scala/org/apache/spark/mllib/clustering/StreamingKMeans.scala
|
Scala
|
apache-2.0
| 11,970
|
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package controllers
import com.google.inject.Inject
import config.MyErrorHandler
class AssetsController @Inject() (errorHandler: MyErrorHandler,metadata: AssetsMetadata) extends AssetsBuilder(errorHandler,metadata)
|
hmrc/gmp-frontend
|
app/controllers/AssetsController.scala
|
Scala
|
apache-2.0
| 821
|
package com.blackboxsociety.net
import scalaz.{Reader => _, _}
import scalaz.syntax.bind._
import com.blackboxsociety._
import scalaz.concurrent._
import scalaz.concurrent.Task._
import scalaz.stream.{async => streamAsync, _}
import java.nio.channels._
import java.nio._
import java.io.IOException
import scodec.bits.ByteVector
import com.blackboxsociety.util._
trait TcpClient {
def reader(): Process[Task, ByteVector]
def stringReader(): Process[Task, String]
def write(b: Array[Byte]): Task[Unit]
def write(s: String): Task[Unit]
def write(b: ByteBuffer): Task[Unit]
def write(s: ByteVector): Task[Unit]
def write(c: FileChannel, o: Long = 0): Task[Unit]
def end(b: Array[Byte]): Task[Unit]
def end(s: String): Task[Unit]
def end(c: FileChannel): Task[Unit]
def close(): Task[Unit]
def close(n: Unit): Task[Unit]
}
object TcpClient {
def apply(socket: SocketChannel): TcpClient = {
socket.configureBlocking(false)
TcpClientImpl(socket)
}
private case class TcpClientImpl(s: SocketChannel) extends TcpClient {
def reader(): Process[Task, ByteVector] = {
def read(): Task[Finishable[ByteVector]] = async { next =>
EventLoop.addSocketRead(s, { () =>
val buffer = ByteBuffer.allocate(32768)
try {
s.read(buffer) match {
case -1 => next(\\/-(Done(ByteVector.view(buffer))))
case _ => next(\\/-(More(ByteVector.view(buffer))))
}
} catch {
case e: IOException => next(-\\/(e))
}
})
}
def go(): Process[Task, ByteVector] =
Process.await[Task, Finishable[ByteVector], ByteVector](read()) {
case Done(b) => Process.emit(b)
case More(b) => Process.Emit(Seq(b), go())
}
go()
}
def stringReader(): Process[Task, String] = reader().pipe(text.utf8Decode)
def writer(): Sink[Task, ByteVector] = {
def go(): Sink[Task, ByteVector] =
Process.await[Task, ByteVector => Task[Unit], ByteVector => Task[Unit]](Task.now(write _)) { f =>
Process.Emit(Seq(f), go())
}
go()
}
def write(b: Array[Byte]): Task[Unit] = async { next =>
val buffer = ByteBuffer.allocate(b.length)
buffer.clear()
buffer.put(b)
buffer.flip()
write(buffer).runAsync(next)
}
def write(b: ByteVector): Task[Unit] = write(b.toByteBuffer)
def write(b: ByteBuffer): Task[Unit] = async { next =>
EventLoop.addSocketWrite(s, { () =>
try {
s.write(b)
if (b.hasRemaining) {
write(b).runAsync(next)
} else {
next(\\/-(Unit))
}
} catch {
case e: IOException => close().runAsync({ _ => next(-\\/(e))})
}
})
}
def write(s: String): Task[Unit] = {
write(s.getBytes)
}
def write(c: FileChannel, o: Long = 0): Task[Unit] = async { next =>
EventLoop.addSocketWrite(s, { () =>
val size = c.size()
try {
val sent = c.transferTo(o, size, s)
val rem = o + sent
if (rem < c.size()) {
write(c, rem).runAsync(next)
} else {
next(\\/-(Unit))
}
} catch {
case e: IOException => close().runAsync({ _ => next(-\\/(e))})
}
})
}
def end(b: Array[Byte]): Task[Unit] = write(b) >>= close
def end(s: String): Task[Unit] = write(s) >>= close
def end(c: FileChannel): Task[Unit] = write(c) >>= close
def close(): Task[Unit] = async { next =>
EventLoop.closeChannel(s)
s.close()
next(\\/-(Unit))
}
def close(n: Unit): Task[Unit] = close()
}
}
|
blackboxsociety/blackbox-core
|
src/main/scala/com/blackboxsociety/net/TcpClient.scala
|
Scala
|
mit
| 3,703
|
package TAPLcomp.untyped
import scala.text.Document
// outer means that the term is the top-level term
object UntypedPrinter {
import TAPLcomp.Print._
def ptmTerm(outer: Boolean, t: Term): Document = t match {
case TmAbs(x, t2) =>
val abs = g0("\\\\" :: x :: ".")
val body = ptmTerm(outer, t2)
g2(abs :/: body)
case t => ptmAppTerm(outer, t)
}
def ptmAppTerm(outer: Boolean, t: Term): Document = t match {
case TmApp(t1, t2) =>
g2(ptmAppTerm(false, t1) :/: ptmATerm(false, t2))
case t =>
ptmATerm(outer, t)
}
def ptm(t: Term) = ptmTerm(true, t)
def ptmATerm(outer: Boolean, t: Term): Document = t match {
case TmVar(x) => x
case t => "(" :: ptmTerm(outer, t) :: ")"
}
}
|
hy-zhang/parser
|
Scala/Parser/src/TAPLcomp/untyped/syntax.scala
|
Scala
|
bsd-3-clause
| 746
|
package dtc.instances.moment
import java.time.{DayOfWeek, Duration, LocalDate, LocalTime}
import dtc.js.MomentZonedDateTime
import dtc.{Offset, TimeZoneId, Zoned}
trait MomentZonedDateTimeInstanceWithoutOrder extends Zoned[MomentZonedDateTime] {
def capture(date: LocalDate, time: LocalTime, zone: TimeZoneId): MomentZonedDateTime =
MomentZonedDateTime.of(date, time, zone)
def withZoneSameInstant(x: MomentZonedDateTime, zone: TimeZoneId): MomentZonedDateTime =
x.withZoneSameInstant(zone)
def withZoneSameLocal(x: MomentZonedDateTime, zone: TimeZoneId): MomentZonedDateTime = x.withZoneSameLocal(zone)
def zone(x: MomentZonedDateTime): TimeZoneId = x.zone
def date(x: MomentZonedDateTime): LocalDate = x.toLocalDate
def time(x: MomentZonedDateTime): LocalTime = x.toLocalTime
def plus(x: MomentZonedDateTime, d: Duration): MomentZonedDateTime = x.plus(d)
def minus(x: MomentZonedDateTime, d: Duration): MomentZonedDateTime = x.minus(d)
def plusDays(x: MomentZonedDateTime, days: Int): MomentZonedDateTime = x.plusDays(days)
def plusMonths(x: MomentZonedDateTime, months: Int): MomentZonedDateTime = x.plusMonths(months)
def plusYears(x: MomentZonedDateTime, years: Int): MomentZonedDateTime = x.plusYears(years)
def offset(x: MomentZonedDateTime): Offset = x.offset
def withYear(x: MomentZonedDateTime, year: Int): MomentZonedDateTime = x.withYear(year)
def withMonth(x: MomentZonedDateTime, month: Int): MomentZonedDateTime = x.withMonth(month)
def withDayOfMonth(x: MomentZonedDateTime, dayOfMonth: Int): MomentZonedDateTime = x.withDayOfMonth(dayOfMonth)
def withHour(x: MomentZonedDateTime, hour: Int): MomentZonedDateTime = x.withHour(hour)
def withMinute(x: MomentZonedDateTime, minute: Int): MomentZonedDateTime = x.withMinute(minute)
def withSecond(x: MomentZonedDateTime, second: Int): MomentZonedDateTime = x.withSecond(second)
def withMillisecond(x: MomentZonedDateTime, millisecond: Int): MomentZonedDateTime =
x.withMillisecond(millisecond)
def withTime(x: MomentZonedDateTime, time: LocalTime): MomentZonedDateTime = x.withTime(time)
def withDate(x: MomentZonedDateTime, date: LocalDate): MomentZonedDateTime = x.withDate(date)
def dayOfWeek(x: MomentZonedDateTime): DayOfWeek = x.dayOfWeek
def dayOfMonth(x: MomentZonedDateTime): Int = x.dayOfMonth
def month(x: MomentZonedDateTime): Int = x.month
def year(x: MomentZonedDateTime): Int = x.year
def millisecond(x: MomentZonedDateTime): Int = x.millisecond
def second(x: MomentZonedDateTime): Int = x.second
def minute(x: MomentZonedDateTime): Int = x.minute
def hour(x: MomentZonedDateTime): Int = x.hour
def yearsUntil(x: MomentZonedDateTime, until: MomentZonedDateTime): Long = x.yearsUntil(until)
def monthsUntil(x: MomentZonedDateTime, until: MomentZonedDateTime): Long = x.monthsUntil(until)
def daysUntil(x: MomentZonedDateTime, until: MomentZonedDateTime): Long = x.daysUntil(until)
def hoursUntil(x: MomentZonedDateTime, until: MomentZonedDateTime): Long = x.hoursUntil(until)
def minutesUntil(x: MomentZonedDateTime, until: MomentZonedDateTime): Long = x.minutesUntil(until)
def secondsUntil(x: MomentZonedDateTime, until: MomentZonedDateTime): Long = x.secondsUntil(until)
def millisecondsUntil(x: MomentZonedDateTime, until: MomentZonedDateTime): Long = x.millisecondsUntil(until)
def utc(x: MomentZonedDateTime): (LocalDate, LocalTime) = {
val utcTime = x.withZoneSameInstant(TimeZoneId.UTC)
utcTime.toLocalDate -> utcTime.toLocalTime
}
}
|
vpavkin/dtc
|
moment/src/main/scala/dtc/instances/moment/MomentZonedDateTimeInstanceWithoutOrder.scala
|
Scala
|
apache-2.0
| 3,525
|
/*
* Copyright 2001-2015 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.prop
import org.scalactic.anyvals._
import org.scalatest.FunSpec
import org.scalatest.Matchers
import org.scalatest.exceptions.TestFailedException
import scala.collection.immutable.SortedSet
import scala.collection.immutable.SortedMap
class GeneratorSpec extends FunSpec with Matchers {
describe("A Generator") {
it("should offer a map and flatMap method that composes the next methods") {
import Generator._
def pairGen(): Generator[(Int, Double)] =
// doubleGen().flatMap(d => intGen().map(i => (i, d)))
for {
d <- doubleGenerator
i <- intGenerator
} yield (i, d)
val aGen = pairGen()
val bGen = pairGen()
val (a1, _, ar1) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (a2, _, ar2) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar1)
val (a3, _, _) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar2)
a1._1 should not equal a2._1
a1._2 should not equal a2._2
val (b1, _, br1) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (b2, _, br2) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br1)
val (b3, _, _) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br2)
a1 shouldEqual b1
a2 shouldEqual b2
a3 shouldEqual b3
}
it("should offer a map method that composes canonicals methods and offers a shrink that uses the canonicals methods") {
import Generator._
val (intCanonicalsIt, _) = intGenerator.canonicals(Randomizer.default)
val expectedTupCanonicals = intCanonicalsIt.map(i => ('A', i)).toList
val tupGen = for (i <- intGenerator) yield ('A', i)
val (tupShrinkIt, _) = tupGen.shrink(('A', 100), Randomizer.default)
val (tupCanonicalsIt, _) = tupGen.canonicals(Randomizer.default)
val tupShrink = tupShrinkIt.toList
val tupCanonicals = tupCanonicalsIt.toList
tupShrink shouldBe expectedTupCanonicals
tupCanonicals shouldBe expectedTupCanonicals
}
it("should offer a flatMap method that composes canonicals methods and offers a shrink that uses the canonicals methods") {
import Generator._
val (intCanonicalsIt, _) = intGenerator.canonicals(Randomizer.default)
val intCanonicals = intCanonicalsIt.toList
val (doubleCanonicalsIt, _) = doubleGenerator.canonicals(Randomizer.default)
val doubleCanonicals = doubleCanonicalsIt.toList
val expectedTupCanonicals: List[(Int, Double)] =
for {
i <- intCanonicals
d <- doubleCanonicals
} yield (i, d)
val tupGen =
for {
i <- intGenerator
d <- doubleGenerator
} yield (i, d)
val (tupShrinkIt, _) = tupGen.shrink((100, 100.0), Randomizer.default)
val (tupCanonicalsIt, _) = tupGen.canonicals(Randomizer.default)
val tupShrink = tupShrinkIt.toList
val tupCanonicals = tupCanonicalsIt.toList
tupShrink shouldBe expectedTupCanonicals
tupCanonicals shouldBe expectedTupCanonicals
}
it("should offer a filter method so that pattern matching can be used in for expressions with Generator generators") {
"""for ((a, b) <- CommonGenerators.tuple2s[String, Int]) yield (b, a)""" should compile
case class Person(name: String, age: Int)
val persons = CommonGenerators.instancesOf(Person) { p => (p.name, p.age) }
"""for (Person(a, b) <- persons) yield (b, a)""" should compile
}
it("should offer a filter method that throws an exception if too many objects are filtered out") {
val doNotDoThisAtHome = CommonGenerators.ints.filter(i => i == 0) // Only keep zero
a [IllegalStateException] should be thrownBy {
doNotDoThisAtHome.next(SizeParam(PosZInt(0), 100, 100), Nil, Randomizer.default)
}
val okToDoThisAtHome = CommonGenerators.ints.filter(i => i != 0) // Only keep non-zeros
noException should be thrownBy {
okToDoThisAtHome.next(SizeParam(PosZInt(0), 100, 100), Nil, Randomizer.default)
}
}
it("should mix up both i and d when used in a for expression") {
import Generator._
def pairGen(): Generator[(Int, Double)] =
// doubleGen().flatMap(d => intGen().map(i => (i, d)))
for {
i <- intGenerator
d <- doubleGenerator
} yield (i, d)
val aGen = pairGen()
val bGen = pairGen()
val (a1, _, ar1) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (a2, _, ar2) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar1)
val (a3, _, _) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar2)
a1._1 should not equal a2._1
a1._2 should not equal a2._2
val (b1, _, br1) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (b2, _, br2) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br1)
val (b3, _, _) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br2)
a1 shouldEqual b1
a2 shouldEqual b2
a3 shouldEqual b3
}
it("should be usable in a forAll") {
import GeneratorDrivenPropertyChecks._
forAll { (i: Int) =>
i + i shouldEqual i * 2
}
a [TestFailedException] should be thrownBy {
forAll { (i: Int) =>
i + i shouldEqual i * 3
}
}
}
it("should be used at least minSuccessful times in a forAll") {
import GeneratorDrivenPropertyChecks._
var count = 0
forAll { (i: Int) =>
count += 1
i + i shouldEqual i * 2
}
count shouldEqual generatorDrivenConfig.minSuccessful.value
{
implicit val generatorDrivenConfig = PropertyCheckConfiguration(minSuccessful = 10)
count = 0
forAll { (i: Int) =>
count += 1
i + i shouldEqual i * 2
}
count shouldEqual generatorDrivenConfig.minSuccessful.value
}
}
it("should be used at least maxDiscarded times in a forAll") {
import GeneratorDrivenPropertyChecks._
var count = 0
a [TestFailedException] should be thrownBy {
forAll { (i: Int) =>
count += 1
whenever(false) {
i + i shouldEqual i * 3
}
}
}
val maxDiscarded = PropertyCheckConfiguration.calculateMaxDiscarded(generatorDrivenConfig.maxDiscardedFactor, generatorDrivenConfig.minSuccessful)
count shouldEqual maxDiscarded
{
val expectedTestDiscarded = 49
val maxDiscardedFactor = PosZDouble.ensuringValid(PropertyCheckConfiguration.calculateMaxDiscardedFactor(10, expectedTestDiscarded))
implicit val generatorDrivenConfig = PropertyCheckConfiguration(maxDiscardedFactor = maxDiscardedFactor)
info(s"What is this one: ${generatorDrivenConfig.maxDiscardedFactor}")
count = 0
a [TestFailedException] should be thrownBy {
forAll { (i: Int) =>
count += 1
whenever(false) {
i + i shouldEqual i * 3
}
}
}
count shouldEqual expectedTestDiscarded
}
}
it("mapping and flatMapping a Generator should compose the edges") {
// import prop._
import Generator._
val intGenerator1 = intGenerator
val intGenerator2 = intGenerator
val (initEdges1, ir1) = intGenerator1.initEdges(100, Randomizer.default)
val (initEdges2, ir2) = intGenerator2.initEdges(100, ir1)
initEdges1 should contain theSameElementsAs initEdges2
def pairGen(): Generator[(Int, Int)] =
for {
i <- intGenerator1
j <- intGenerator2
} yield (i, j)
val gen = pairGen()
val (initEdges, ier) = gen.initEdges(100, ir2)
initEdges.length should equal (initEdges1.length * initEdges2.length)
val comboLists: List[List[Int]] = initEdges1.combinations(2).toList
val comboPairs: List[(Int, Int)] = comboLists.map(xs => (xs(0), xs(1)))
val plusReversedPairs: List[(Int, Int)] = comboPairs flatMap { case (x, y) => List((x, y), (y, x)) }
val sameValuePairs: List[(Int, Int)] = initEdges1.map(i => (i, i))
val expectedInitEdges: List[(Int, Int)] = plusReversedPairs ++ sameValuePairs
initEdges should contain theSameElementsAs expectedInitEdges
val (tup1, e1, r1) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = initEdges, rnd = ier)
val (tup2, e2, r2) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = e1, rnd = r1)
val (tup3, e3, r3) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = e2, rnd = r2)
val (tup4, e4, r4) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = e3, rnd = r3)
val (tup5, e5, r5) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = e4, rnd = r4)
val (tup6, e6, r6) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = e5, rnd = r5)
val (tup7, e7, r7) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = e6, rnd = r6)
val (tup8, e8, r8) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = e7, rnd = r7)
val (tup9, e9, r9) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = e8, rnd = r8)
val (tup10, e10, r10) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = e9, rnd = r9)
val (tup11, e11, r11) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = e10, rnd = r10)
val (tup12, e12, r12) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = e11, rnd = r11)
val (tup13, e13, r13) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = e12, rnd = r12)
val (tup14, e14, r14) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = e13, rnd = r13)
val (tup15, e15, r15) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = e14, rnd = r14)
val (tup16, e16, r16) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = e15, rnd = r15)
val (tup17, e17, r17) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = e16, rnd = r16)
val (tup18, e18, r18) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = e17, rnd = r17)
val (tup19, e19, r19) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = e18, rnd = r18)
val (tup20, e20, r20) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = e19, rnd = r19)
val (tup21, e21, r21) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = e20, rnd = r20)
val (tup22, e22, r22) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = e21, rnd = r21)
val (tup23, e23, r23) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = e22, rnd = r22)
val (tup24, e24, r24) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = e23, rnd = r23)
val (tup25, _, _) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = e24, rnd = r24)
val values = List(tup1, tup2, tup3, tup4, tup5, tup6, tup7, tup8, tup9, tup10,
tup11, tup12, tup13, tup14, tup15, tup16, tup17, tup18, tup19, tup20,
tup21, tup22, tup23, tup24, tup25)
values should contain theSameElementsAs expectedInitEdges
}
describe("for Booleans") {
it("should produce true and false more or less equally") {
import Generator._
val classification = CommonGenerators.classify(100000, booleanGenerator) {
case x if x => "true"
case _ => "false"
}
classification.portions("true") should be (0.5 +- 0.01)
}
it("should produce the same Boolean values in the same order given the same Randomizer") {
import Generator._
@scala.annotation.tailrec
def loop(n: Int, rnd: Randomizer, results: List[Boolean]): List[Boolean] = {
if (n == 0)
results
else {
val (bool, _, nextRnd) = booleanGenerator.next(SizeParam(0, 0, 0), Nil, rnd)
loop(n - 1, nextRnd, bool :: results)
}
}
val rnd = Randomizer.default
val firstRound = loop(100, rnd, Nil)
val secondRound = loop(100, rnd, Nil)
firstRound should contain theSameElementsAs(secondRound)
}
}
describe("for Bytes") {
it("should produce the same Byte values in the same order given the same Randomizer") {
import Generator._
val aGen = byteGenerator
val bGen = byteGenerator
val (a1, _, ar1) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (a2, _, ar2) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar1)
val (a3, _, ar3) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar2)
val (a4, _, ar4) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar3)
val (a5, _, ar5) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar4)
val (a6, _, ar6) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar5)
val (a7, _, _) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar6)
val (b1, _, br1) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (b2, _, br2) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br1)
val (b3, _, br3) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br2)
val (b4, _, br4) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br3)
val (b5, _, br5) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br4)
val (b6, _, br6) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br5)
val (b7, _, _) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br6)
List(a1, a2, a3, a4, a5) should contain theSameElementsAs List(b1, b2, b3, b4, b5)
a6 shouldEqual b6
a7 shouldEqual b7
}
it("should produce Byte edge values first in random order") {
import Generator._
val gen = byteGenerator
val (initEdges, ier) = gen.initEdges(10, Randomizer.default)
val (a1: Byte, ae1: List[Byte], ar1: Randomizer) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = initEdges, rnd = ier)
val (a2, ae2, ar2) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae1, rnd = ar1)
val (a3, ae3, ar3) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae2, rnd = ar2)
val (a4, ae4, ar4) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae3, rnd = ar3)
val (a5, _, _) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae4, rnd = ar4)
val edges = List(a1, a2, a3, a4, a5)
edges should contain (0)
edges should contain (1)
edges should contain (-1)
edges should contain (Byte.MaxValue)
edges should contain (Byte.MinValue)
}
it("should produce Byte canonical values") {
import Generator._
val gen = byteGenerator
val (canonicals, _) = gen.canonicals(Randomizer.default)
canonicals.toList shouldBe List(0, 1, -1, 2, -2, 3, -3).map(_.toByte)
}
it("should shrink Bytes by repeatedly halving and negating") {
import GeneratorDrivenPropertyChecks._
forAll { (b: Byte) =>
val generator = implicitly[Generator[Byte]]
val (shrinkIt, _) = generator.shrink(b, Randomizer.default)
val shrinks: List[Byte] = shrinkIt.toList
shrinks.distinct.length shouldEqual shrinks.length
if (b == 0)
shrinks shouldBe empty
else {
if (b > 1.toByte)
shrinks.last should be > 0.toByte
else if (b < -1.toByte)
shrinks.last should be < 0.toByte
import org.scalatest.Inspectors._
val pairs: List[(Byte, Byte)] = shrinks.zip(shrinks.tail)
forAll (pairs) { case (x, y) =>
assert(x == 0 || x == -y || x.abs == y.abs / 2)
}
}
}
}
}
describe("for Shorts") {
it("should produce the same Short values in the same order given the same Randomizer") {
import Generator._
val aGen= shortGenerator
val bGen = shortGenerator
val (a1, _, ar1) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (a2, _, ar2) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar1)
val (a3, _, ar3) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar2)
val (a4, _, ar4) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar3)
val (a5, _, ar5) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar4)
val (a6, _, ar6) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar5)
val (a7, _, _) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar6)
val (b1, _, br1) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (b2, _, br2) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br1)
val (b3, _, br3) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br2)
val (b4, _, br4) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br3)
val (b5, _, br5) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br4)
val (b6, _, br6) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br5)
val (b7, _, _) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br6)
List(a1, a2, a3, a4, a5) should contain theSameElementsAs List(b1, b2, b3, b4, b5)
a6 shouldEqual b6
a7 shouldEqual b7
}
it("should produce Short edge values first in random order") {
import Generator._
val gen = shortGenerator
val (initEdges, ier) = gen.initEdges(10, Randomizer.default)
val (a1: Short, ae1: List[Short], ar1: Randomizer) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = initEdges, rnd = ier)
val (a2, ae2, ar2) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae1, rnd = ar1)
val (a3, ae3, ar3) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae2, rnd = ar2)
val (a4, ae4, ar4) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae3, rnd = ar3)
val (a5, _, _) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae4, rnd = ar4)
val edges = List(a1, a2, a3, a4, a5)
edges should contain (0)
edges should contain (1)
edges should contain (-1)
edges should contain (Short.MaxValue)
edges should contain (Short.MinValue)
}
it("should produce Short canonical values") {
import Generator._
val gen = shortGenerator
val (canonicals, _) = gen.canonicals(Randomizer.default)
canonicals.toList shouldBe List(0, 1, -1, 2, -2, 3, -3).map(_.toShort)
}
it("should shrink Shorts by repeatedly halving and negating") {
import GeneratorDrivenPropertyChecks._
forAll { (n: Short) =>
val generator = implicitly[Generator[Short]]
val (shrinkIt, _) = generator.shrink(n, Randomizer.default)
val shrinks: List[Short] = shrinkIt.toList
shrinks.distinct.length shouldEqual shrinks.length
if (n == 0)
shrinks shouldBe empty
else {
if (n > 1.toShort)
shrinks.last should be > 0.toShort
else if (n < -1.toShort)
shrinks.last should be < 0.toShort
import org.scalatest.Inspectors._
val pairs: List[(Short, Short)] = shrinks.zip(shrinks.tail)
forAll (pairs) { case (x, y) =>
assert(x == 0 || x == -y || x.abs == y.abs / 2)
}
}
}
}
}
describe("for Ints") {
it("should produce the same Int values in the same order given the same Randomizer") {
import Generator._
val aGen= intGenerator
val bGen = intGenerator
val (a1, _, ar1) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (a2, _, ar2) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar1)
val (a3, _, ar3) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar2)
val (a4, _, ar4) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar3)
val (a5, _, ar5) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar4)
val (a6, _, ar6) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar5)
val (a7, _, _) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar6)
val (b1, _, br1) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (b2, _, br2) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br1)
val (b3, _, br3) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br2)
val (b4, _, br4) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br3)
val (b5, _, br5) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br4)
val (b6, _, br6) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br5)
val (b7, _, _) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br6)
List(a1, a2, a3, a4, a5) should contain theSameElementsAs List(b1, b2, b3, b4, b5)
a6 shouldEqual b6
a7 shouldEqual b7
}
it("should produce Int edge values first in random order") {
import Generator._
val gen = intGenerator
val (initEdges, ier) = gen.initEdges(10, Randomizer.default)
val (a1: Int, ae1: List[Int], ar1: Randomizer) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = initEdges, rnd = ier)
val (a2, ae2, ar2) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae1, rnd = ar1)
val (a3, ae3, ar3) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae2, rnd = ar2)
val (a4, ae4, ar4) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae3, rnd = ar3)
val (a5, _, _) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae4, rnd = ar4)
val edges = List(a1, a2, a3, a4, a5)
edges should contain (0)
edges should contain (1)
edges should contain (-1)
edges should contain (Int.MaxValue)
edges should contain (Int.MinValue)
}
it("should produce Int canonical values") {
import Generator._
val gen = intGenerator
val (canonicals, _) = gen.canonicals(Randomizer.default)
canonicals.toList shouldBe List(0, 1, -1, 2, -2, 3, -3)
}
it("should shrink Ints by repeatedly halving and negating") {
import GeneratorDrivenPropertyChecks._
forAll { (i: Int) =>
val generator = implicitly[Generator[Int]]
val (shrinkIt, _) = generator.shrink(i, Randomizer.default)
val shrinks: List[Int] = shrinkIt.toList
shrinks.distinct.length shouldEqual shrinks.length
if (i == 0)
shrinks shouldBe empty
else {
if (i > 1)
shrinks.last should be > 0
else if (i < -1)
shrinks.last should be < 0
import org.scalatest.Inspectors._
val pairs: List[(Int, Int)] = shrinks.zip(shrinks.tail)
forAll (pairs) { case (x, y) =>
assert(x == 0 || x == -y || x.abs == y.abs / 2)
}
}
}
}
}
describe("for Longs") {
it("should produce the same Long values in the same order given the same Randomizer") {
import Generator._
val aGen= longGenerator
val bGen = longGenerator
val (a1, _, ar1) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (a2, _, ar2) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar1)
val (a3, _, ar3) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar2)
val (a4, _, ar4) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar3)
val (a5, _, ar5) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar4)
val (a6, _, ar6) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar5)
val (a7, _, _) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar6)
val (b1, _, br1) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (b2, _, br2) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br1)
val (b3, _, br3) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br2)
val (b4, _, br4) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br3)
val (b5, _, br5) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br4)
val (b6, _, br6) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br5)
val (b7, _, _) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br6)
List(a1, a2, a3, a4, a5) should contain theSameElementsAs List(b1, b2, b3, b4, b5)
a6 shouldEqual b6
a7 shouldEqual b7
}
it("should produce Long edge values first in random order") {
import Generator._
val gen = longGenerator
val (initEdges, ier) = gen.initEdges(10, Randomizer.default)
val (a1: Long, ae1: List[Long], ar1: Randomizer) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = initEdges, rnd = ier)
val (a2, ae2, ar2) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae1, rnd = ar1)
val (a3, ae3, ar3) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae2, rnd = ar2)
val (a4, ae4, ar4) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae3, rnd = ar3)
val (a5, _, _) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae4, rnd = ar4)
val edges = List(a1, a2, a3, a4, a5)
edges should contain (0)
edges should contain (1)
edges should contain (-1)
edges should contain (Long.MaxValue)
edges should contain (Long.MinValue)
}
it("should produce Long canonical values") {
import Generator._
val gen = longGenerator
val (canonicals, _) = gen.canonicals(Randomizer.default)
canonicals.toList shouldBe List(0L, 1L, -1L, 2L, -2L, 3L, -3L)
}
it("should shrink Longs by repeatedly halving and negating") {
import GeneratorDrivenPropertyChecks._
forAll { (n: Long) =>
val generator = implicitly[Generator[Long]]
val (shrinkIt, _) = generator.shrink(n, Randomizer.default)
val shrinks: List[Long] = shrinkIt.toList
shrinks.distinct.length shouldEqual shrinks.length
if (n == 0)
shrinks shouldBe empty
else {
if (n > 1L)
shrinks.last should be > 0L
else if (n < -1L)
shrinks.last should be < 0L
import org.scalatest.Inspectors._
val pairs: List[(Long, Long)] = shrinks.zip(shrinks.tail)
forAll (pairs) { case (x, y) =>
assert(x == 0 || x == -y || x.abs == y.abs / 2)
}
/*
all (pairs) should satisfy { case (x, y) =>
y == 0 || y == -x || y.abs == x.abs / 2
}
*/
}
}
}
}
describe("for Chars") {
it("should produce the same Char values in the same order given the same Randomizer") {
import Generator._
val aGen= charGenerator
val bGen = charGenerator
val (a1, _, ar1) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (a2, _, ar2) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar1)
val (a3, _, ar3) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar2)
val (a4, _, ar4) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar3)
val (a5, _, ar5) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar4)
val (a6, _, ar6) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar5)
val (a7, _, _) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar6)
val (b1, _, br1) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (b2, _, br2) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br1)
val (b3, _, br3) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br2)
val (b4, _, br4) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br3)
val (b5, _, br5) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br4)
val (b6, _, br6) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br5)
val (b7, _, _) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br6)
List(a1, a2, a3, a4, a5) should contain theSameElementsAs List(b1, b2, b3, b4, b5)
a6 shouldEqual b6
a7 shouldEqual b7
}
it("should produce Char edge values first in random order") {
import Generator._
val gen = charGenerator
val (initEdges, ier) = gen.initEdges(10, Randomizer.default)
val (a1: Char, ae1: List[Char], ar1: Randomizer) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = initEdges, rnd = ier)
val (a2, _, _) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae1, rnd = ar1)
val edges = List(a1, a2)
edges should contain (Char.MinValue)
edges should contain (Char.MaxValue)
}
it("should produce Char canonical values") {
import Generator._
val gen = charGenerator
val (canonicalsIt, _) = gen.canonicals(Randomizer.default)
val canonicals = canonicalsIt.toList
canonicals(0) should (be >= 'a' and be <= 'z')
canonicals(1) should (be >= 'A' and be <= 'Z')
canonicals(2) should (be >= '0' and be <= '9')
}
it("should shrink Chars by trying selected printable characters") {
import GeneratorDrivenPropertyChecks._
val expectedChars = "abcdefghikjlmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789".toList
val generator = implicitly[Generator[Char]]
forAll { (c: Char) =>
val (shrinkIt, _) = generator.shrink(c, Randomizer.default)
val shrinks: List[Char] = shrinkIt.toList
shrinks.distinct.length shouldEqual shrinks.length
if (c >= '0' && c <= '9' || c >= 'A' && c <= 'Z' || c >= 'a' && c <= 'z')
shrinks shouldBe empty
else
shrinks shouldEqual expectedChars
}
import org.scalatest.Inspectors
Inspectors.forAll (expectedChars) { (c: Char) =>
val (shrinkIt, _) = generator.shrink(c, Randomizer.default)
val shrinks: List[Char] = shrinkIt.toList
shrinks shouldBe empty
}
}
}
describe("for Floats") {
it("should produce the same Float values in the same order given the same Randomizer") {
import Generator._
val aGen = floatGenerator
val bGen = floatGenerator
val (a1, _, ar1) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (a2, _, ar2) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar1)
val (a3, _, _) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar2)
val (b1, _, br1) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (b2, _, br2) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br1)
val (b3, _, _) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br2)
a1 shouldEqual b1
a2 shouldEqual b2
a3 shouldEqual b3
}
it("should produce the Float edge value first") {
import Generator._
val gen = floatGenerator
val (initEdges, ier) = gen.initEdges(10, Randomizer.default)
val (a1: Float, ae1: List[Float], ar1: Randomizer) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = initEdges, rnd = ier)
val (a2, ae2, ar2) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae1, rnd = ar1)
val (a3, ae3, ar3) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae2, rnd = ar2)
val (a4, ae4, ar4) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae3, rnd = ar3)
val (a5, ae5, ar5) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae4, rnd = ar4)
val (a6, ae6, ar6) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae5, rnd = ar5)
val (a7, ae7, ar7) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae6, rnd = ar6)
val (a8, ae8, ar8) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae7, rnd = ar7)
val (a9, ae9, ar9) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae8, rnd = ar8)
val (a10, _, _) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae9, rnd = ar9)
val edges = List(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10)
edges should contain (Float.NegativeInfinity)
edges should contain (Float.MinValue)
edges should contain (-1.0F)
edges should contain (-Float.MinPositiveValue)
edges should contain (-0.0F)
edges should contain (0.0F)
edges should contain (Float.MinPositiveValue)
edges should contain (1.0F)
edges should contain (Float.MaxValue)
edges should contain (Float.PositiveInfinity)
}
it("should produce Float canonical values") {
import Generator._
val gen = floatGenerator
val (canonicals, _) = gen.canonicals(Randomizer.default)
canonicals.toList shouldBe List(0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f)
}
it("should shrink Floats by dropping the fraction part then repeatedly 'square-rooting' and negating") {
import GeneratorDrivenPropertyChecks._
forAll { (f: Float) =>
val generator = implicitly[Generator[Float]]
val (shrinkIt, _) = generator.shrink(f, Randomizer.default)
val shrinks: List[Float] = shrinkIt.toList
shrinks.distinct.length shouldEqual shrinks.length
if (f == 0.0f) {
shrinks shouldBe empty
} else {
val n =
if (f == Float.PositiveInfinity || f == Float.NaN)
Float.MaxValue
else if (f == Float.NegativeInfinity)
Float.MinValue
else f
if (n > 1.0f)
shrinks.last should be > 0.0f
else if (n < -1.0f)
shrinks.last should be < 0.0f
import org.scalatest.Inspectors._
if (!n.isWhole) {
shrinks.last shouldEqual (if (n > 0.0f) n.floor else n.ceil)
}
val pairs: List[(Float, Float)] = shrinks.zip(shrinks.tail)
forAll (pairs) { case (x, y) =>
assert(x == 0.0f || x == -y || x.abs < y.abs)
}
}
}
}
}
describe("for Doubles") {
it("should produce the same Double values in the same order given the same Randomizer") {
import Generator._
val aGen = doubleGenerator
val bGen = doubleGenerator
val (a1, _, ar1) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (a2, _, ar2) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar1)
val (a3, _, _) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar2)
val (b1, _, br1) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (b2, _, br2) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br1)
val (b3, _, _) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br2)
a1 shouldEqual b1
a2 shouldEqual b2
a3 shouldEqual b3
}
it("should produce the Double edge value first") {
import Generator._
val gen = doubleGenerator
val (initEdges, ier) = gen.initEdges(10, Randomizer.default)
val (a1: Double, ae1: List[Double], ar1: Randomizer) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = initEdges, rnd = ier)
val (a2, ae2, ar2) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae1, rnd = ar1)
val (a3, ae3, ar3) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae2, rnd = ar2)
val (a4, ae4, ar4) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae3, rnd = ar3)
val (a5, ae5, ar5) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae4, rnd = ar4)
val (a6, ae6, ar6) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae5, rnd = ar5)
val (a7, ae7, ar7) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae6, rnd = ar6)
val (a8, ae8, ar8) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae7, rnd = ar7)
val (a9, ae9, ar9) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae8, rnd = ar8)
val (a10, _, _) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae9, rnd = ar9)
val edges = List(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10)
edges should contain (Double.NegativeInfinity)
edges should contain (Double.MinValue)
edges should contain (-1.0)
edges should contain (-Double.MinPositiveValue)
edges should contain (-0.0)
edges should contain (0.0)
edges should contain (Double.MinPositiveValue)
edges should contain (1.0)
edges should contain (Double.MaxValue)
edges should contain (Double.PositiveInfinity)
}
it("should produce Double canonical values") {
import Generator._
val gen = doubleGenerator
val (canonicals, _) = gen.canonicals(Randomizer.default)
canonicals.toList shouldBe List(0.0, 1.0, -1.0, 2.0, -2.0, 3.0, -3.0)
}
it("should shrink Doubles by dropping the fraction part then repeatedly 'square-rooting' and negating") {
import GeneratorDrivenPropertyChecks._
// try with -173126.1489439121
forAll { (d: Double) =>
val generator = implicitly[Generator[Double]]
val (shrinkIt, _) = generator.shrink(d, Randomizer.default)
val shrinks: List[Double] = shrinkIt.toList
shrinks.distinct.length shouldEqual shrinks.length
if (d == 0.0) {
shrinks shouldBe empty
}
else {
val n =
if (d == Double.PositiveInfinity || d == Double.NaN)
Double.MaxValue
else if (d == Double.NegativeInfinity)
Double.MinValue
else d
if (n > 1.0)
shrinks.last should be > 0.0
else if (n < -1.0)
shrinks.last should be < 0.0
if (!n.isWhole) {
shrinks.last shouldEqual (if (n > 0.0) n.floor else n.ceil)
}
val pairs: List[(Double, Double)] = shrinks.zip(shrinks.tail)
import org.scalatest.Inspectors._
forAll (pairs) { case (x, y) =>
assert(x == 0.0 || x == -y || x.abs < y.abs)
}
}
}
}
}
/**
* Boilerplate reduction for those `(Iterator[T], Randomizer)` pairs returned
* from `canonicals()` and `shrink()`
*
* @param pair the returned values from the Generator method
* @tparam T the type of the Generator
*/
implicit class GeneratorIteratorPairOps[T](pair: (Iterator[T], Randomizer)) {
/**
* Helper method for testing canonicals and shrinks, which should always be
* "growing".
*
* The definition of "growing" means, essentially, "moving further from zero".
* Sometimes that's in the positive direction (eg, PosInt), sometimes negative
* (NegFloat), sometimes both (NonZeroInt).
*
* This returns Unit, because it's all about the assertion.
*
* This is a bit loose and approximate, but sufficient for the various
* Scalactic types.
*
* @param iter an Iterator over a type, typically a Scalactic type
* @param conv a conversion function from the Scalactic type to an ordinary Numeric
* @tparam T the Scalactic type
* @tparam N the underlying ordered numeric type
*/
def shouldGrowWith[N: Ordering](conv: T => N)(implicit nOps: Numeric[N]): Unit = {
val iter: Iterator[T] = pair._1
iter.reduce { (last, cur) =>
// Duplicates not allowed:
last should not equal cur
val nLast = nOps.abs(conv(last))
val nCur = nOps.abs(conv(cur))
nLast should be <= nCur
cur
}
}
}
describe("for PosInts") {
it("should produce the same PosInt values in the same order given the same Randomizer") {
import Generator._
val aGen= posIntGenerator
val bGen = posIntGenerator
val (a1, _, ar1) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (a2, _, ar2) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar1)
val (a3, _, ar3) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar2)
val (a4, _, ar4) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar3)
val (a5, _, ar5) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar4)
val (a6, _, ar6) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar5)
val (a7, _, _) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar6)
val (b1, _, br1) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (b2, _, br2) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br1)
val (b3, _, br3) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br2)
val (b4, _, br4) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br3)
val (b5, _, br5) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br4)
val (b6, _, br6) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br5)
val (b7, _, _) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br6)
List(a1, a2, a3, a4, a5) should contain theSameElementsAs List(b1, b2, b3, b4, b5)
a6 shouldEqual b6
a7 shouldEqual b7
}
it("should produce PosInt edge values first in random order") {
import Generator._
val gen = posIntGenerator
val (initEdges, ier) = gen.initEdges(10, Randomizer.default)
val (a1: PosInt, ae1: List[PosInt], ar1: Randomizer) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = initEdges, rnd = ier)
val (a2, _, _) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae1, rnd = ar1)
val edges = List(a1, a2)
edges should contain (PosInt(1))
edges should contain (PosInt.MaxValue)
}
it("should have legitimate canonicals and shrink") {
import Generator._
val gen = posIntGenerator
val rnd = Randomizer.default
gen.canonicals(rnd).shouldGrowWith(_.value)
gen.shrink(PosInt(10000), rnd).shouldGrowWith(_.value)
}
}
describe("for PosZInts") {
it("should produce the same PosZInt values in the same order given the same Randomizer") {
import Generator._
val aGen= posZIntGenerator
val bGen = posZIntGenerator
val (a1, _, ar1) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (a2, _, ar2) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar1)
val (a3, _, ar3) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar2)
val (a4, _, ar4) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar3)
val (a5, _, ar5) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar4)
val (a6, _, ar6) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar5)
val (a7, _, _) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar6)
val (b1, _, br1) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (b2, _, br2) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br1)
val (b3, _, br3) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br2)
val (b4, _, br4) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br3)
val (b5, _, br5) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br4)
val (b6, _, br6) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br5)
val (b7, _, _) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br6)
List(a1, a2, a3, a4, a5) should contain theSameElementsAs List(b1, b2, b3, b4, b5)
a6 shouldEqual b6
a7 shouldEqual b7
}
it("should produce PosZInt edge values first in random order") {
import Generator._
val gen = posZIntGenerator
val (initEdges, ier) = gen.initEdges(10, Randomizer.default)
val (a1: PosZInt, ae1: List[PosZInt], ar1: Randomizer) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = initEdges, rnd = ier)
val (a2, ae2, ar2) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae1, rnd = ar1)
val (a3, _, _) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae2, rnd = ar2)
val edges = List(a1, a2, a3)
edges should contain (PosZInt(0))
edges should contain (PosZInt(1))
edges should contain (PosZInt.MaxValue)
}
it("should have legitimate canonicals and shrink") {
import Generator._
val gen = posZIntGenerator
val rnd = Randomizer.default
gen.canonicals(rnd).shouldGrowWith(_.value)
gen.shrink(PosZInt(10000), rnd).shouldGrowWith(_.value)
}
}
describe("for PosLongs") {
it("should produce the same PosLong values in the same order given the same Randomizer") {
import Generator._
val aGen= posLongGenerator
val bGen = posLongGenerator
val (a1, _, ar1) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (a2, _, ar2) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar1)
val (a3, _, ar3) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar2)
val (a4, _, ar4) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar3)
val (a5, _, ar5) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar4)
val (a6, _, ar6) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar5)
val (a7, _, _) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar6)
val (b1, _, br1) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (b2, _, br2) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br1)
val (b3, _, br3) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br2)
val (b4, _, br4) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br3)
val (b5, _, br5) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br4)
val (b6, _, br6) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br5)
val (b7, _, _) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br6)
List(a1, a2, a3, a4, a5) should contain theSameElementsAs List(b1, b2, b3, b4, b5)
a6 shouldEqual b6
a7 shouldEqual b7
}
it("should produce PosLong edge values first in random order") {
import Generator._
val gen = posLongGenerator
val (initEdges, ier) = gen.initEdges(10, Randomizer.default)
val (a1: PosLong, ae1: List[PosLong], ar1: Randomizer) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = initEdges, rnd = ier)
val (a2, _, _) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae1, rnd = ar1)
val edges = List(a1, a2)
edges should contain (PosLong(1L))
edges should contain (PosLong.MaxValue)
}
it("should have legitimate canonicals and shrink") {
import Generator._
val gen = posLongGenerator
val rnd = Randomizer.default
gen.canonicals(rnd).shouldGrowWith(_.value)
gen.shrink(PosLong(10000), rnd).shouldGrowWith(_.value)
}
}
describe("for PosZLongs") {
it("should produce the same PosZLong values in the same order given the same Randomizer") {
import Generator._
val aGen= posZLongGenerator
val bGen = posZLongGenerator
val (a1, _, ar1) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (a2, _, ar2) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar1)
val (a3, _, ar3) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar2)
val (a4, _, ar4) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar3)
val (a5, _, ar5) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar4)
val (a6, _, ar6) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar5)
val (a7, _, _) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar6)
val (b1, _, br1) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (b2, _, br2) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br1)
val (b3, _, br3) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br2)
val (b4, _, br4) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br3)
val (b5, _, br5) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br4)
val (b6, _, br6) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br5)
val (b7, _, _) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br6)
List(a1, a2, a3, a4, a5) should contain theSameElementsAs List(b1, b2, b3, b4, b5)
a6 shouldEqual b6
a7 shouldEqual b7
}
it("should produce PosZLong edge values first in random order") {
import Generator._
val gen = posZLongGenerator
val (initEdges, ier) = gen.initEdges(10, Randomizer.default)
val (a1: PosZLong, ae1: List[PosZLong], ar1: Randomizer) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = initEdges, rnd = ier)
val (a2, ae2, ar2) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae1, rnd = ar1)
val (a3, _, _) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae2, rnd = ar2)
val edges = List(a1, a2, a3)
edges should contain (PosZLong(0L))
edges should contain (PosZLong(1L))
edges should contain (PosZLong.MaxValue)
}
it("should have legitimate canonicals and shrink") {
import Generator._
val gen = posZLongGenerator
val rnd = Randomizer.default
gen.canonicals(rnd).shouldGrowWith(_.value)
gen.shrink(PosZLong(10000), rnd).shouldGrowWith(_.value)
}
}
describe("for PosFloat") {
it("should produce the same PosFloat values in the same order given the same Randomizer") {
import Generator._
val aGen= posFloatGenerator
val bGen = posFloatGenerator
val (a1, _, ar1) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (a2, _, ar2) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar1)
val (a3, _, ar3) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar2)
val (a4, _, ar4) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar3)
val (a5, _, ar5) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar4)
val (a6, _, ar6) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar5)
val (a7, _, _) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar6)
val (b1, _, br1) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (b2, _, br2) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br1)
val (b3, _, br3) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br2)
val (b4, _, br4) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br3)
val (b5, _, br5) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br4)
val (b6, _, br6) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br5)
val (b7, _, _) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br6)
List(a1, a2, a3, a4, a5) should contain theSameElementsAs List(b1, b2, b3, b4, b5)
a6 shouldEqual b6
a7 shouldEqual b7
}
it("should produce PosFloat edge values first in random order") {
import Generator._
val gen = posFloatGenerator
val (initEdges, ier) = gen.initEdges(10, Randomizer.default)
val (a1: PosFloat, ae1: List[PosFloat], ar1: Randomizer) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = initEdges, rnd = ier)
val (a2, ae2, ar2) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae1, rnd = ar1)
val (a3, ae3, ar3) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae2, rnd = ar2)
val (a4, _, _) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae3, rnd = ar3)
val edges = List(a1, a2, a3, a4)
edges should contain (PosFloat.MinPositiveValue)
edges should contain (PosFloat(1.0f))
edges should contain (PosFloat.MaxValue)
edges should contain (PosFloat.PositiveInfinity)
}
it("should have legitimate canonicals and shrink") {
import Generator._
val gen = posFloatGenerator
val rnd = Randomizer.default
gen.canonicals(rnd).shouldGrowWith(_.value)
gen.shrink(PosFloat(10000), rnd).shouldGrowWith(_.value)
}
}
describe("for PosFiniteFloat") {
it("should produce the same PosFiniteFloat values in the same order given the same Randomizer") {
import Generator._
val aGen= posFiniteFloatGenerator
val bGen = posFiniteFloatGenerator
val (a1, _, ar1) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (a2, _, ar2) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar1)
val (a3, _, ar3) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar2)
val (a4, _, ar4) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar3)
val (a5, _, ar5) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar4)
val (a6, _, ar6) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar5)
val (a7, _, _) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar6)
val (b1, _, br1) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (b2, _, br2) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br1)
val (b3, _, br3) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br2)
val (b4, _, br4) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br3)
val (b5, _, br5) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br4)
val (b6, _, br6) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br5)
val (b7, _, _) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br6)
List(a1, a2, a3, a4, a5) should contain theSameElementsAs List(b1, b2, b3, b4, b5)
a6 shouldEqual b6
a7 shouldEqual b7
}
it("should produce PosFiniteFloat edge values first in random order") {
import Generator._
val gen = posFiniteFloatGenerator
val (initEdges, ier) = gen.initEdges(10, Randomizer.default)
val (a1: PosFiniteFloat, ae1: List[PosFiniteFloat], ar1: Randomizer) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = initEdges, rnd = ier)
val (a2, ae2, ar2) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae1, rnd = ar1)
val (a3, _, _) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae2, rnd = ar2)
val edges = List(a1, a2, a3)
edges should contain (PosFiniteFloat.MinValue)
edges should contain (PosFiniteFloat(1.0f))
edges should contain (PosFiniteFloat.MaxValue)
}
it("should have legitimate canonicals and shrink") {
import Generator._
val gen = posFiniteFloatGenerator
val rnd = Randomizer.default
gen.canonicals(rnd).shouldGrowWith(_.value)
gen.shrink(PosFiniteFloat(10000), rnd).shouldGrowWith(_.value)
}
}
describe("for PosZFloat") {
it("should produce the same PosZFloat values in the same order given the same Randomizer") {
import Generator._
val aGen= posZFloatGenerator
val bGen = posZFloatGenerator
val (a1, _, ar1) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (a2, _, ar2) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar1)
val (a3, _, ar3) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar2)
val (a4, _, ar4) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar3)
val (a5, _, ar5) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar4)
val (a6, _, ar6) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar5)
val (a7, _, _) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar6)
val (b1, _, br1) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (b2, _, br2) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br1)
val (b3, _, br3) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br2)
val (b4, _, br4) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br3)
val (b5, _, br5) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br4)
val (b6, _, br6) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br5)
val (b7, _, _) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br6)
List(a1, a2, a3, a4, a5) should contain theSameElementsAs List(b1, b2, b3, b4, b5)
a6 shouldEqual b6
a7 shouldEqual b7
}
it("should produce PosZFloat edge values first in random order") {
import Generator._
val gen = posZFloatGenerator
val (initEdges, ier) = gen.initEdges(10, Randomizer.default)
val (a1: PosZFloat, ae1: List[PosZFloat], ar1: Randomizer) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = initEdges, rnd = ier)
val (a2, ae2, ar2) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae1, rnd = ar1)
val (a3, ae3, ar3) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae2, rnd = ar2)
val (a4, ae4, ar4) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae3, rnd = ar3)
val (a5, ae5, ar5) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae4, rnd = ar4)
val (a6, _, _) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae5, rnd = ar5)
val edges = List(a1, a2, a3, a4, a5, a6)
edges should contain (PosZFloat(-0.0f))
edges should contain (PosZFloat(0.0f))
edges should contain (PosZFloat.MinPositiveValue)
edges should contain (PosZFloat(1.0f))
edges should contain (PosZFloat.MaxValue)
edges should contain (PosZFloat.PositiveInfinity)
}
it("should have legitimate canonicals and shrink") {
import Generator._
val gen = posZFloatGenerator
val rnd = Randomizer.default
gen.canonicals(rnd).shouldGrowWith(_.value)
gen.shrink(PosZFloat(10000), rnd).shouldGrowWith(_.value)
}
}
describe("for PosZFiniteFloat") {
it("should produce the same PosZFiniteFloat values in the same order given the same Randomizer") {
import Generator._
val aGen= posZFiniteFloatGenerator
val bGen = posZFiniteFloatGenerator
val (a1, _, ar1) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (a2, _, ar2) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar1)
val (a3, _, ar3) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar2)
val (a4, _, ar4) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar3)
val (a5, _, ar5) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar4)
val (a6, _, ar6) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar5)
val (a7, _, _) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar6)
val (b1, _, br1) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (b2, _, br2) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br1)
val (b3, _, br3) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br2)
val (b4, _, br4) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br3)
val (b5, _, br5) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br4)
val (b6, _, br6) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br5)
val (b7, _, _) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br6)
List(a1, a2, a3, a4, a5) should contain theSameElementsAs List(b1, b2, b3, b4, b5)
a6 shouldEqual b6
a7 shouldEqual b7
}
it("should produce PosZFiniteFloat edge values first in random order") {
import Generator._
val gen = posZFiniteFloatGenerator
val (initEdges, ier) = gen.initEdges(10, Randomizer.default)
val (a1: PosZFiniteFloat, ae1: List[PosZFiniteFloat], ar1: Randomizer) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = initEdges, rnd = ier)
val (a2, ae2, ar2) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae1, rnd = ar1)
val (a3, ae3, ar3) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae2, rnd = ar2)
val (a4, ae4, ar4) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae3, rnd = ar3)
val (a5, _, _) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae4, rnd = ar4)
val edges = List(a1, a2, a3, a4, a5)
edges should contain (PosZFiniteFloat(-0.0f))
edges should contain (PosZFiniteFloat(0.0f))
edges should contain (PosZFiniteFloat.MinPositiveValue)
edges should contain (PosZFiniteFloat(1.0f))
edges should contain (PosZFiniteFloat.MaxValue)
}
it("should have legitimate canonicals and shrink") {
import Generator._
val gen = posZFiniteFloatGenerator
val rnd = Randomizer.default
gen.canonicals(rnd).shouldGrowWith(_.value)
gen.shrink(PosZFiniteFloat(10000), rnd).shouldGrowWith(_.value)
}
}
describe("for PosDouble") {
it("should produce the same PosDouble values in the same order given the same Randomizer") {
import Generator._
val aGen= posDoubleGenerator
val bGen = posDoubleGenerator
val (a1, _, ar1) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (a2, _, ar2) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar1)
val (a3, _, ar3) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar2)
val (a4, _, ar4) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar3)
val (a5, _, ar5) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar4)
val (a6, _, ar6) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar5)
val (a7, _, _) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar6)
val (b1, _, br1) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (b2, _, br2) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br1)
val (b3, _, br3) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br2)
val (b4, _, br4) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br3)
val (b5, _, br5) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br4)
val (b6, _, br6) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br5)
val (b7, _, _) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br6)
List(a1, a2, a3, a4, a5) should contain theSameElementsAs List(b1, b2, b3, b4, b5)
a6 shouldEqual b6
a7 shouldEqual b7
}
it("should produce PosDouble edge values first in random order") {
import Generator._
val gen = posDoubleGenerator
val (initEdges, ier) = gen.initEdges(10, Randomizer.default)
val (a1: PosDouble, ae1: List[PosDouble], ar1: Randomizer) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = initEdges, rnd = ier)
val (a2, ae2, ar2) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae1, rnd = ar1)
val (a3, ae3, ar3) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae2, rnd = ar2)
val (a4, _, _) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae3, rnd = ar3)
val edges = List(a1, a2, a3, a4)
edges should contain (PosDouble.MinPositiveValue)
edges should contain (PosDouble(1.0))
edges should contain (PosDouble.MaxValue)
edges should contain (PosDouble.PositiveInfinity)
}
it("should have legitimate canonicals and shrink") {
import Generator._
val gen = posDoubleGenerator
val rnd = Randomizer.default
gen.canonicals(rnd).shouldGrowWith(_.value)
gen.shrink(PosDouble(10000), rnd).shouldGrowWith(_.value)
}
}
describe("for PosFiniteDouble") {
it("should produce the same PosFiniteDouble values in the same order given the same Randomizer") {
import Generator._
val aGen= posFiniteDoubleGenerator
val bGen = posFiniteDoubleGenerator
val (a1, _, ar1) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (a2, _, ar2) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar1)
val (a3, _, ar3) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar2)
val (a4, _, ar4) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar3)
val (a5, _, ar5) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar4)
val (a6, _, ar6) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar5)
val (a7, _, _) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar6)
val (b1, _, br1) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (b2, _, br2) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br1)
val (b3, _, br3) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br2)
val (b4, _, br4) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br3)
val (b5, _, br5) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br4)
val (b6, _, br6) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br5)
val (b7, _, _) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br6)
List(a1, a2, a3, a4, a5) should contain theSameElementsAs List(b1, b2, b3, b4, b5)
a6 shouldEqual b6
a7 shouldEqual b7
}
it("should produce PosFiniteDouble edge values first in random order") {
import Generator._
val gen = posFiniteDoubleGenerator
val (initEdges, ier) = gen.initEdges(10, Randomizer.default)
val (a1: PosFiniteDouble, ae1: List[PosFiniteDouble], ar1: Randomizer) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = initEdges, rnd = ier)
val (a2, ae2, ar2) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae1, rnd = ar1)
val (a3, _, _) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae2, rnd = ar2)
val edges = List(a1, a2, a3)
edges should contain (PosFiniteDouble.MinValue)
edges should contain (PosFiniteDouble(1.0))
edges should contain (PosFiniteDouble.MaxValue)
}
it("should have legitimate canonicals and shrink") {
import Generator._
val gen = posFiniteDoubleGenerator
val rnd = Randomizer.default
gen.canonicals(rnd).shouldGrowWith(_.value)
gen.shrink(PosFiniteDouble(10000), rnd).shouldGrowWith(_.value)
}
}
describe("for PosZDouble") {
it("should produce the same PosZDouble values in the same order given the same Randomizer") {
import Generator._
val aGen= posZDoubleGenerator
val bGen = posZDoubleGenerator
val (a1, _, ar1) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (a2, _, ar2) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar1)
val (a3, _, ar3) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar2)
val (a4, _, ar4) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar3)
val (a5, _, ar5) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar4)
val (a6, _, ar6) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar5)
val (a7, _, _) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar6)
val (b1, _, br1) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (b2, _, br2) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br1)
val (b3, _, br3) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br2)
val (b4, _, br4) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br3)
val (b5, _, br5) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br4)
val (b6, _, br6) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br5)
val (b7, _, _) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br6)
List(a1, a2, a3, a4, a5) should contain theSameElementsAs List(b1, b2, b3, b4, b5)
a6 shouldEqual b6
a7 shouldEqual b7
}
it("should produce PosZDouble edge values first in random order") {
import Generator._
val gen = posZDoubleGenerator
val (initEdges, ier) = gen.initEdges(10, Randomizer.default)
val (a1: PosZDouble, ae1: List[PosZDouble], ar1: Randomizer) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = initEdges, rnd = ier)
val (a2, ae2, ar2) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae1, rnd = ar1)
val (a3, ae3, ar3) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae2, rnd = ar2)
val (a4, ae4, ar4) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae3, rnd = ar3)
val (a5, ae5, ar5) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae4, rnd = ar4)
val (a6, _, _) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae5, rnd = ar5)
val edges = List(a1, a2, a3, a4, a5, a6)
edges should contain (PosZDouble(-0.0f))
edges should contain (PosZDouble(0.0f))
edges should contain (PosZDouble.MinPositiveValue)
edges should contain (PosZDouble(1.0f))
edges should contain (PosZDouble.MaxValue)
edges should contain (PosZDouble.PositiveInfinity)
}
it("should have legitimate canonicals and shrink") {
import Generator._
val gen = posZDoubleGenerator
val rnd = Randomizer.default
gen.canonicals(rnd).shouldGrowWith(_.value)
gen.shrink(PosZDouble(10000), rnd).shouldGrowWith(_.value)
}
}
describe("for PosZFiniteDouble") {
it("should produce the same PosZFiniteDouble values in the same order given the same Randomizer") {
import Generator._
val aGen= posZFiniteDoubleGenerator
val bGen = posZFiniteDoubleGenerator
val (a1, _, ar1) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (a2, _, ar2) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar1)
val (a3, _, ar3) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar2)
val (a4, _, ar4) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar3)
val (a5, _, ar5) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar4)
val (a6, _, ar6) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar5)
val (a7, _, _) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar6)
val (b1, _, br1) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (b2, _, br2) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br1)
val (b3, _, br3) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br2)
val (b4, _, br4) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br3)
val (b5, _, br5) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br4)
val (b6, _, br6) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br5)
val (b7, _, _) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br6)
List(a1, a2, a3, a4, a5) should contain theSameElementsAs List(b1, b2, b3, b4, b5)
a6 shouldEqual b6
a7 shouldEqual b7
}
it("should produce PosZFiniteDouble edge values first in random order") {
import Generator._
val gen = posZFiniteDoubleGenerator
val (initEdges, ier) = gen.initEdges(10, Randomizer.default)
val (a1: PosZFiniteDouble, ae1: List[PosZFiniteDouble], ar1: Randomizer) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = initEdges, rnd = ier)
val (a2, ae2, ar2) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae1, rnd = ar1)
val (a3, ae3, ar3) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae2, rnd = ar2)
val (a4, ae4, ar4) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae3, rnd = ar3)
val (a5, _, _) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae4, rnd = ar4)
val edges = List(a1, a2, a3, a4, a5)
edges should contain (PosZFiniteDouble(-0.0f))
edges should contain (PosZFiniteDouble(0.0f))
edges should contain (PosZFiniteDouble.MinPositiveValue)
edges should contain (PosZFiniteDouble(1.0f))
edges should contain (PosZFiniteDouble.MaxValue)
}
it("should have legitimate canonicals and shrink") {
import Generator._
val gen = posZFiniteDoubleGenerator
val rnd = Randomizer.default
gen.canonicals(rnd).shouldGrowWith(_.value)
gen.shrink(PosZFiniteDouble(10000), rnd).shouldGrowWith(_.value)
}
}
describe("for NegInts") {
it("should produce the same NegInt values in the same order given the same Randomizer") {
import Generator._
val aGen= negIntGenerator
val bGen = negIntGenerator
val (a1, _, ar1) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (a2, _, ar2) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar1)
val (a3, _, ar3) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar2)
val (a4, _, ar4) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar3)
val (a5, _, ar5) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar4)
val (a6, _, ar6) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar5)
val (a7, _, _) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar6)
val (b1, _, br1) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (b2, _, br2) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br1)
val (b3, _, br3) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br2)
val (b4, _, br4) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br3)
val (b5, _, br5) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br4)
val (b6, _, br6) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br5)
val (b7, _, _) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br6)
List(a1, a2, a3, a4, a5) should contain theSameElementsAs List(b1, b2, b3, b4, b5)
a6 shouldEqual b6
a7 shouldEqual b7
}
it("should produce NegInt edge values first in random order") {
import Generator._
val gen = negIntGenerator
val (initEdges, ier) = gen.initEdges(10, Randomizer.default)
val (a1: NegInt, ae1: List[NegInt], ar1: Randomizer) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = initEdges, rnd = ier)
val (a2, _, _) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae1, rnd = ar1)
val edges = List(a1, a2)
edges should contain (NegInt(-1))
edges should contain (NegInt.MaxValue)
}
it("should have legitimate canonicals and shrink") {
import Generator._
val gen = negIntGenerator
val rnd = Randomizer.default
gen.canonicals(rnd).shouldGrowWith(_.value)
gen.shrink(NegInt(-10000), rnd).shouldGrowWith(_.value)
}
}
describe("for NegZInts") {
it("should produce the same NegZInt values in the same order given the same Randomizer") {
import Generator._
val aGen= negZIntGenerator
val bGen = negZIntGenerator
val (a1, _, ar1) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (a2, _, ar2) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar1)
val (a3, _, ar3) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar2)
val (a4, _, ar4) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar3)
val (a5, _, ar5) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar4)
val (a6, _, ar6) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar5)
val (a7, _, _) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar6)
val (b1, _, br1) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (b2, _, br2) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br1)
val (b3, _, br3) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br2)
val (b4, _, br4) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br3)
val (b5, _, br5) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br4)
val (b6, _, br6) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br5)
val (b7, _, _) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br6)
List(a1, a2, a3, a4, a5) should contain theSameElementsAs List(b1, b2, b3, b4, b5)
a6 shouldEqual b6
a7 shouldEqual b7
}
it("should produce NegZInt edge values first in random order") {
import Generator._
val gen = negZIntGenerator
val (initEdges, ier) = gen.initEdges(10, Randomizer.default)
val (a1: NegZInt, ae1: List[NegZInt], ar1: Randomizer) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = initEdges, rnd = ier)
val (a2, ae2, ar2) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae1, rnd = ar1)
val (a3, _, _) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae2, rnd = ar2)
val edges = List(a1, a2, a3)
edges should contain (NegZInt(0))
edges should contain (NegZInt(-1))
edges should contain (NegZInt.MaxValue)
}
it("should have legitimate canonicals and shrink") {
import Generator._
val gen = negZIntGenerator
val rnd = Randomizer.default
gen.canonicals(rnd).shouldGrowWith(_.value)
gen.shrink(NegZInt(-10000), rnd).shouldGrowWith(_.value)
}
}
describe("for NegLongs") {
it("should produce the same NegLong values in the same order given the same Randomizer") {
import Generator._
val aGen= negLongGenerator
val bGen = negLongGenerator
val (a1, _, ar1) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (a2, _, ar2) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar1)
val (a3, _, ar3) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar2)
val (a4, _, ar4) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar3)
val (a5, _, ar5) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar4)
val (a6, _, ar6) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar5)
val (a7, _, _) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar6)
val (b1, _, br1) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (b2, _, br2) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br1)
val (b3, _, br3) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br2)
val (b4, _, br4) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br3)
val (b5, _, br5) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br4)
val (b6, _, br6) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br5)
val (b7, _, _) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br6)
List(a1, a2, a3, a4, a5) should contain theSameElementsAs List(b1, b2, b3, b4, b5)
a6 shouldEqual b6
a7 shouldEqual b7
}
it("should produce NegLong edge values first in random order") {
import Generator._
val gen = negLongGenerator
val (initEdges, ier) = gen.initEdges(10, Randomizer.default)
val (a1: NegLong, ae1: List[NegLong], ar1: Randomizer) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = initEdges, rnd = ier)
val (a2, _, _) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae1, rnd = ar1)
val edges = List(a1, a2)
edges should contain (NegLong(-1L))
edges should contain (NegLong.MaxValue)
}
it("should have legitimate canonicals and shrink") {
import Generator._
val gen = negLongGenerator
val rnd = Randomizer.default
gen.canonicals(rnd).shouldGrowWith(_.value)
gen.shrink(NegLong(-10000), rnd).shouldGrowWith(_.value)
}
}
describe("for NegZLongs") {
it("should produce the same NegZLong values in the same order given the same Randomizer") {
import Generator._
val aGen= negZLongGenerator
val bGen = negZLongGenerator
val (a1, _, ar1) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (a2, _, ar2) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar1)
val (a3, _, ar3) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar2)
val (a4, _, ar4) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar3)
val (a5, _, ar5) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar4)
val (a6, _, ar6) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar5)
val (a7, _, _) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar6)
val (b1, _, br1) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (b2, _, br2) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br1)
val (b3, _, br3) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br2)
val (b4, _, br4) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br3)
val (b5, _, br5) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br4)
val (b6, _, br6) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br5)
val (b7, _, _) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br6)
List(a1, a2, a3, a4, a5) should contain theSameElementsAs List(b1, b2, b3, b4, b5)
a6 shouldEqual b6
a7 shouldEqual b7
}
it("should produce NegZLong edge values first in random order") {
import Generator._
val gen = negZLongGenerator
val (initEdges, ier) = gen.initEdges(10, Randomizer.default)
val (a1: NegZLong, ae1: List[NegZLong], ar1: Randomizer) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = initEdges, rnd = ier)
val (a2, ae2, ar2) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae1, rnd = ar1)
val (a3, _, _) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae2, rnd = ar2)
val edges = List(a1, a2, a3)
edges should contain (NegZLong(0L))
edges should contain (NegZLong(-1L))
edges should contain (NegZLong.MaxValue)
}
it("should have legitimate canonicals and shrink") {
import Generator._
val gen = negZLongGenerator
val rnd = Randomizer.default
gen.canonicals(rnd).shouldGrowWith(_.value)
gen.shrink(NegZLong(-10000), rnd).shouldGrowWith(_.value)
}
}
describe("for NegFloat") {
it("should produce the same NegFloat values in the same order given the same Randomizer") {
import Generator._
val aGen= negFloatGenerator
val bGen = negFloatGenerator
val (a1, _, ar1) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (a2, _, ar2) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar1)
val (a3, _, ar3) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar2)
val (a4, _, ar4) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar3)
val (a5, _, ar5) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar4)
val (a6, _, ar6) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar5)
val (a7, _, _) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar6)
val (b1, _, br1) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (b2, _, br2) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br1)
val (b3, _, br3) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br2)
val (b4, _, br4) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br3)
val (b5, _, br5) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br4)
val (b6, _, br6) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br5)
val (b7, _, _) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br6)
List(a1, a2, a3, a4, a5) should contain theSameElementsAs List(b1, b2, b3, b4, b5)
a6 shouldEqual b6
a7 shouldEqual b7
}
it("should produce NegFloat edge values first in random order") {
import Generator._
val gen = negFloatGenerator
val (initEdges, ier) = gen.initEdges(10, Randomizer.default)
val (a1: NegFloat, ae1: List[NegFloat], ar1: Randomizer) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = initEdges, rnd = ier)
val (a2, ae2, ar2) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae1, rnd = ar1)
val (a3, ae3, ar3) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae2, rnd = ar2)
val (a4, _, _) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae3, rnd = ar3)
val edges = List(a1, a2, a3, a4)
edges should contain (NegFloat.MaxValue)
edges should contain (NegFloat(-1.0f))
edges should contain (NegFloat.MinValue)
edges should contain (NegFloat.NegativeInfinity)
}
it("should have legitimate canonicals and shrink") {
import Generator._
val gen = negFloatGenerator
val rnd = Randomizer.default
gen.canonicals(rnd).shouldGrowWith(_.value)
gen.shrink(NegFloat(-10000), rnd).shouldGrowWith(_.value)
}
}
describe("for NegFiniteFloat") {
it("should produce the same NegFiniteFloat values in the same order given the same Randomizer") {
import Generator._
val aGen= negFiniteFloatGenerator
val bGen = negFiniteFloatGenerator
val (a1, _, ar1) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (a2, _, ar2) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar1)
val (a3, _, ar3) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar2)
val (a4, _, ar4) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar3)
val (a5, _, ar5) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar4)
val (a6, _, ar6) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar5)
val (a7, _, _) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar6)
val (b1, _, br1) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (b2, _, br2) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br1)
val (b3, _, br3) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br2)
val (b4, _, br4) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br3)
val (b5, _, br5) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br4)
val (b6, _, br6) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br5)
val (b7, _, _) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br6)
List(a1, a2, a3, a4, a5) should contain theSameElementsAs List(b1, b2, b3, b4, b5)
a6 shouldEqual b6
a7 shouldEqual b7
}
it("should produce NegFiniteFloat edge values first in random order") {
import Generator._
val gen = negFiniteFloatGenerator
val (initEdges, ier) = gen.initEdges(10, Randomizer.default)
val (a1: NegFiniteFloat, ae1: List[NegFiniteFloat], ar1: Randomizer) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = initEdges, rnd = ier)
val (a2, ae2, ar2) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae1, rnd = ar1)
val (a3, _, _) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae2, rnd = ar2)
val edges = List(a1, a2, a3)
edges should contain (NegFiniteFloat(-1.0f))
edges should contain (NegFiniteFloat.MaxValue)
edges should contain (NegFiniteFloat.MinValue)
}
it("should have legitimate canonicals and shrink") {
import Generator._
val gen = negFiniteFloatGenerator
val rnd = Randomizer.default
gen.canonicals(rnd).shouldGrowWith(_.value)
gen.shrink(NegFiniteFloat(-10000), rnd).shouldGrowWith(_.value)
}
}
describe("for NegZFloat") {
it("should produce the same NegZFloat values in the same order given the same Randomizer") {
import Generator._
val aGen= negZFloatGenerator
val bGen = negZFloatGenerator
val (a1, _, ar1) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (a2, _, ar2) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar1)
val (a3, _, ar3) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar2)
val (a4, _, ar4) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar3)
val (a5, _, ar5) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar4)
val (a6, _, ar6) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar5)
val (a7, _, _) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar6)
val (b1, _, br1) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (b2, _, br2) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br1)
val (b3, _, br3) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br2)
val (b4, _, br4) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br3)
val (b5, _, br5) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br4)
val (b6, _, br6) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br5)
val (b7, _, _) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br6)
List(a1, a2, a3, a4, a5) should contain theSameElementsAs List(b1, b2, b3, b4, b5)
a6 shouldEqual b6
a7 shouldEqual b7
}
it("should produce NegZFloat edge values first in random order") {
import Generator._
val gen = negZFloatGenerator
val (initEdges, ier) = gen.initEdges(10, Randomizer.default)
val (a1: NegZFloat, ae1: List[NegZFloat], ar1: Randomizer) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = initEdges, rnd = ier)
val (a2, ae2, ar2) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae1, rnd = ar1)
val (a3, ae3, ar3) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae2, rnd = ar2)
val (a4, ae4, ar4) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae3, rnd = ar3)
val (a5, ae5, ar5) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae4, rnd = ar4)
val (a6, _, _) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae5, rnd = ar5)
val edges = List(a1, a2, a3, a4, a5, a6)
edges should contain (NegZFloat(0.0f))
edges should contain (NegZFloat(-0.0f))
edges should contain (NegZFloat.ensuringValid(-Float.MinPositiveValue))
edges should contain (NegZFloat(-1.0f))
edges should contain (NegZFloat.MinValue)
edges should contain (NegZFloat.NegativeInfinity)
}
it("should have legitimate canonicals and shrink") {
import Generator._
val gen = negZFloatGenerator
val rnd = Randomizer.default
gen.canonicals(rnd).shouldGrowWith(_.value)
gen.shrink(NegZFloat(-10000), rnd).shouldGrowWith(_.value)
}
}
describe("for NegZFiniteFloat") {
it("should produce the same NegZFiniteFloat values in the same order given the same Randomizer") {
import Generator._
val aGen= negZFiniteFloatGenerator
val bGen = negZFiniteFloatGenerator
val (a1, _, ar1) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (a2, _, ar2) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar1)
val (a3, _, ar3) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar2)
val (a4, _, ar4) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar3)
val (a5, _, ar5) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar4)
val (a6, _, ar6) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar5)
val (a7, _, _) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar6)
val (b1, _, br1) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (b2, _, br2) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br1)
val (b3, _, br3) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br2)
val (b4, _, br4) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br3)
val (b5, _, br5) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br4)
val (b6, _, br6) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br5)
val (b7, _, _) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br6)
List(a1, a2, a3, a4, a5) should contain theSameElementsAs List(b1, b2, b3, b4, b5)
a6 shouldEqual b6
a7 shouldEqual b7
}
it("should produce NegZFiniteFloat edge values first in random order") {
import Generator._
val gen = negZFiniteFloatGenerator
val (initEdges, ier) = gen.initEdges(10, Randomizer.default)
val (a1: NegZFiniteFloat, ae1: List[NegZFiniteFloat], ar1: Randomizer) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = initEdges, rnd = ier)
val (a2, ae2, ar2) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae1, rnd = ar1)
val (a3, ae3, ar3) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae2, rnd = ar2)
val (a4, ae4, ar4) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae3, rnd = ar3)
val (a5, _, _) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae4, rnd = ar4)
val edges = List(a1, a2, a3, a4, a5)
edges should contain (NegZFiniteFloat(0.0f))
edges should contain (NegZFiniteFloat(-0.0f))
edges should contain (NegZFiniteFloat.ensuringValid(-Float.MinPositiveValue))
edges should contain (NegZFiniteFloat(-1.0f))
edges should contain (NegZFiniteFloat.MinValue)
}
it("should have legitimate canonicals and shrink") {
import Generator._
val gen = negZFiniteFloatGenerator
val rnd = Randomizer.default
gen.canonicals(rnd).shouldGrowWith(_.value)
gen.shrink(NegZFiniteFloat(-10000), rnd).shouldGrowWith(_.value)
}
}
describe("for NegDouble") {
it("should produce the same NegDouble values in the same order given the same Randomizer") {
import Generator._
val aGen= negDoubleGenerator
val bGen = negDoubleGenerator
val (a1, _, ar1) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (a2, _, ar2) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar1)
val (a3, _, ar3) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar2)
val (a4, _, ar4) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar3)
val (a5, _, ar5) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar4)
val (a6, _, ar6) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar5)
val (a7, _, _) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar6)
val (b1, _, br1) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (b2, _, br2) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br1)
val (b3, _, br3) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br2)
val (b4, _, br4) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br3)
val (b5, _, br5) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br4)
val (b6, _, br6) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br5)
val (b7, _, _) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br6)
List(a1, a2, a3, a4, a5) should contain theSameElementsAs List(b1, b2, b3, b4, b5)
a6 shouldEqual b6
a7 shouldEqual b7
}
it("should produce NegDouble edge values first in random order") {
import Generator._
val gen = negDoubleGenerator
val (initEdges, ier) = gen.initEdges(10, Randomizer.default)
val (a1: NegDouble, ae1: List[NegDouble], ar1: Randomizer) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = initEdges, rnd = ier)
val (a2, ae2, ar2) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae1, rnd = ar1)
val (a3, ae3, ar3) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae2, rnd = ar2)
val (a4, _, _) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae3, rnd = ar3)
val edges = List(a1, a2, a3, a4)
edges should contain (NegDouble.MaxValue)
edges should contain (NegDouble(-1.0f))
edges should contain (NegDouble.MinValue)
edges should contain (NegDouble.NegativeInfinity)
}
it("should have legitimate canonicals and shrink") {
import Generator._
val gen = negDoubleGenerator
val rnd = Randomizer.default
gen.canonicals(rnd).shouldGrowWith(_.value)
gen.shrink(NegDouble(-10000), rnd).shouldGrowWith(_.value)
}
}
describe("for NegFiniteDouble") {
it("should produce the same NegFiniteDouble values in the same order given the same Randomizer") {
import Generator._
val aGen= negFiniteDoubleGenerator
val bGen = negFiniteDoubleGenerator
val (a1, _, ar1) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (a2, _, ar2) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar1)
val (a3, _, ar3) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar2)
val (a4, _, ar4) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar3)
val (a5, _, ar5) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar4)
val (a6, _, ar6) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar5)
val (a7, _, _) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar6)
val (b1, _, br1) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (b2, _, br2) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br1)
val (b3, _, br3) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br2)
val (b4, _, br4) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br3)
val (b5, _, br5) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br4)
val (b6, _, br6) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br5)
val (b7, _, _) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br6)
List(a1, a2, a3, a4, a5) should contain theSameElementsAs List(b1, b2, b3, b4, b5)
a6 shouldEqual b6
a7 shouldEqual b7
}
it("should produce NegFiniteDouble edge values first in random order") {
import Generator._
val gen = negFiniteDoubleGenerator
val (initEdges, ier) = gen.initEdges(10, Randomizer.default)
val (a1: NegFiniteDouble, ae1: List[NegFiniteDouble], ar1: Randomizer) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = initEdges, rnd = ier)
val (a2, ae2, ar2) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae1, rnd = ar1)
val (a3, _, _) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae2, rnd = ar2)
val edges = List(a1, a2, a3)
edges should contain (NegFiniteDouble(-1.0))
edges should contain (NegFiniteDouble.MinValue)
edges should contain (NegFiniteDouble.MaxValue)
}
it("should have legitimate canonicals and shrink") {
import Generator._
val gen = negFiniteDoubleGenerator
val rnd = Randomizer.default
gen.canonicals(rnd).shouldGrowWith(_.value)
gen.shrink(NegFiniteDouble(-10000), rnd).shouldGrowWith(_.value)
}
}
describe("for NegZDouble") {
it("should produce the same NegZDouble values in the same order given the same Randomizer") {
import Generator._
val aGen= negZDoubleGenerator
val bGen = negZDoubleGenerator
val (a1, _, ar1) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (a2, _, ar2) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar1)
val (a3, _, ar3) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar2)
val (a4, _, ar4) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar3)
val (a5, _, ar5) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar4)
val (a6, _, ar6) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar5)
val (a7, _, _) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar6)
val (b1, _, br1) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (b2, _, br2) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br1)
val (b3, _, br3) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br2)
val (b4, _, br4) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br3)
val (b5, _, br5) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br4)
val (b6, _, br6) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br5)
val (b7, _, _) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br6)
List(a1, a2, a3, a4, a5) should contain theSameElementsAs List(b1, b2, b3, b4, b5)
a6 shouldEqual b6
a7 shouldEqual b7
}
it("should produce NegZDouble edge values first in random order") {
import Generator._
val gen = negZDoubleGenerator
val (initEdges, ier) = gen.initEdges(10, Randomizer.default)
val (a1: NegZDouble, ae1: List[NegZDouble], ar1: Randomizer) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = initEdges, rnd = ier)
val (a2, ae2, ar2) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae1, rnd = ar1)
val (a3, ae3, ar3) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae2, rnd = ar2)
val (a4, ae4, ar4) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae3, rnd = ar3)
val (a5, ae5, ar5) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae4, rnd = ar4)
val (a6, _, _) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae5, rnd = ar5)
val edges = List(a1, a2, a3, a4, a5, a6)
edges should contain (NegZDouble(0.0f))
edges should contain (NegZDouble(-0.0f))
edges should contain (NegZDouble.ensuringValid(-Double.MinPositiveValue))
edges should contain (NegZDouble(-1.0f))
edges should contain (NegZDouble.MinValue)
edges should contain (NegZDouble.NegativeInfinity)
}
it("should have legitimate canonicals and shrink") {
import Generator._
val gen = negZDoubleGenerator
val rnd = Randomizer.default
gen.canonicals(rnd).shouldGrowWith(_.value)
gen.shrink(NegZDouble(-10000), rnd).shouldGrowWith(_.value)
}
}
describe("for NegZFiniteDouble") {
it("should produce the same NegZFiniteDouble values in the same order given the same Randomizer") {
import Generator._
val aGen= negZFiniteDoubleGenerator
val bGen = negZFiniteDoubleGenerator
val (a1, _, ar1) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (a2, _, ar2) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar1)
val (a3, _, ar3) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar2)
val (a4, _, ar4) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar3)
val (a5, _, ar5) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar4)
val (a6, _, ar6) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar5)
val (a7, _, _) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar6)
val (b1, _, br1) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (b2, _, br2) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br1)
val (b3, _, br3) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br2)
val (b4, _, br4) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br3)
val (b5, _, br5) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br4)
val (b6, _, br6) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br5)
val (b7, _, _) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br6)
List(a1, a2, a3, a4, a5) should contain theSameElementsAs List(b1, b2, b3, b4, b5)
a6 shouldEqual b6
a7 shouldEqual b7
}
it("should produce NegZFiniteDouble edge values first in random order") {
import Generator._
val gen = negZFiniteDoubleGenerator
val (initEdges, ier) = gen.initEdges(10, Randomizer.default)
val (a1: NegZFiniteDouble, ae1: List[NegZFiniteDouble], ar1: Randomizer) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = initEdges, rnd = ier)
val (a2, ae2, ar2) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae1, rnd = ar1)
val (a3, ae3, ar3) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae2, rnd = ar2)
val (a4, ae4, ar4) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae3, rnd = ar3)
val (a5, _, _) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae4, rnd = ar4)
val edges = List(a1, a2, a3, a4, a5)
edges should contain (NegZFiniteDouble(0.0))
edges should contain (NegZFiniteDouble(-0.0))
edges should contain (NegZFiniteDouble.ensuringValid(-Double.MinPositiveValue))
edges should contain (NegZFiniteDouble(-1.0))
edges should contain (NegZFiniteDouble.MinValue)
}
it("should have legitimate canonicals and shrink") {
import Generator._
val gen = negZFiniteDoubleGenerator
val rnd = Randomizer.default
gen.canonicals(rnd).shouldGrowWith(_.value)
gen.shrink(NegZFiniteDouble(-10000), rnd).shouldGrowWith(_.value)
}
}
describe("for NonZeroInts") {
it("should produce the same NonZeroInt values in the same order given the same Randomizer") {
import Generator._
val aGen= nonZeroIntGenerator
val bGen = nonZeroIntGenerator
val (a1, _, ar1) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (a2, _, ar2) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar1)
val (a3, _, ar3) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar2)
val (a4, _, ar4) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar3)
val (a5, _, ar5) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar4)
val (a6, _, ar6) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar5)
val (a7, _, _) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar6)
val (b1, _, br1) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (b2, _, br2) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br1)
val (b3, _, br3) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br2)
val (b4, _, br4) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br3)
val (b5, _, br5) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br4)
val (b6, _, br6) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br5)
val (b7, _, _) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br6)
List(a1, a2, a3, a4, a5) should contain theSameElementsAs List(b1, b2, b3, b4, b5)
a6 shouldEqual b6
a7 shouldEqual b7
}
it("should produce NonZeroInt edge values first in random order") {
import Generator._
val gen = nonZeroIntGenerator
val (initEdges, ier) = gen.initEdges(10, Randomizer.default)
val (a1: NonZeroInt, ae1: List[NonZeroInt], ar1: Randomizer) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = initEdges, rnd = ier)
val (a2, ae2, ar2) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae1, rnd = ar1)
val (a3, ae3, ar3) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae2, rnd = ar2)
val (a4, ae4, ar4) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae3, rnd = ar3)
val edges = List(a1, a2, a3, a4)
edges should contain (NonZeroInt(-1))
edges should contain (NonZeroInt.MaxValue)
edges should contain (NonZeroInt(1))
edges should contain (NonZeroInt.MinValue)
}
it("should produce NonZeroInt canonical values") {
import Generator._
val gen = nonZeroIntGenerator
val (canonicals, _) = gen.canonicals(Randomizer.default)
canonicals.toList shouldBe List(NonZeroInt(1), NonZeroInt(-1), NonZeroInt(2), NonZeroInt(-2), NonZeroInt(3), NonZeroInt(-3))
}
it("should shrink NonZeroInts by repeatedly halving and negating") {
import GeneratorDrivenPropertyChecks._
forAll { (i: NonZeroInt) =>
val generator = implicitly[Generator[NonZeroInt]]
val (shrinkIt, _) = generator.shrink(i, Randomizer.default)
val shrinks: List[NonZeroInt] = shrinkIt.toList
shrinks.distinct.length shouldEqual shrinks.length
if (i.value == 1 || i.value == -1)
shrinks shouldBe empty
else {
if (i > 1)
shrinks.last.value should be >= 1
else if (i < -1)
shrinks.last.value should be <= 1
import org.scalatest.Inspectors._
val pairs: List[(NonZeroInt, NonZeroInt)] = shrinks.zip(shrinks.tail)
forAll (pairs) { case (x, y) =>
assert(x == -y || x.value.abs == y.value.abs / 2)
}
}
}
}
}
describe("for NonZeroLongs") {
it("should produce the same NonZeroLong values in the same order given the same Randomizer") {
import Generator._
val aGen= nonZeroLongGenerator
val bGen = nonZeroLongGenerator
val (a1, _, ar1) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (a2, _, ar2) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar1)
val (a3, _, ar3) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar2)
val (a4, _, ar4) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar3)
val (a5, _, ar5) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar4)
val (a6, _, ar6) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar5)
val (a7, _, _) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar6)
val (b1, _, br1) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (b2, _, br2) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br1)
val (b3, _, br3) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br2)
val (b4, _, br4) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br3)
val (b5, _, br5) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br4)
val (b6, _, br6) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br5)
val (b7, _, _) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br6)
List(a1, a2, a3, a4, a5) should contain theSameElementsAs List(b1, b2, b3, b4, b5)
a6 shouldEqual b6
a7 shouldEqual b7
}
it("should produce NonZeroLong edge values first in random order") {
import Generator._
val gen = nonZeroLongGenerator
val (initEdges, ier) = gen.initEdges(10, Randomizer.default)
val (a1: NonZeroLong, ae1: List[NonZeroLong], ar1: Randomizer) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = initEdges, rnd = ier)
val (a2, ae2, ar2) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae1, rnd = ar1)
val (a3, ae3, ar3) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae2, rnd = ar2)
val (a4, _, _) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae3, rnd = ar3)
val edges = List(a1, a2, a3, a4)
edges should contain (NonZeroLong(-1L))
edges should contain (NonZeroLong.MaxValue)
edges should contain (NonZeroLong(1L))
edges should contain (NonZeroLong.MinValue)
}
it("should have legitimate canonicals and shrink") {
import Generator._
val gen = nonZeroLongGenerator
val rnd = Randomizer.default
gen.canonicals(rnd).shouldGrowWith(_.value)
gen.shrink(NonZeroLong(10000), rnd).shouldGrowWith(_.value)
}
}
describe("for NonZeroFloat") {
it("should produce the same NonZeroFloat values in the same order given the same Randomizer") {
import Generator._
val aGen= nonZeroFloatGenerator
val bGen = nonZeroFloatGenerator
val (a1, _, ar1) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (a2, _, ar2) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar1)
val (a3, _, ar3) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar2)
val (a4, _, ar4) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar3)
val (a5, _, ar5) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar4)
val (a6, _, ar6) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar5)
val (a7, _, _) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar6)
val (b1, _, br1) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (b2, _, br2) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br1)
val (b3, _, br3) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br2)
val (b4, _, br4) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br3)
val (b5, _, br5) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br4)
val (b6, _, br6) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br5)
val (b7, _, _) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br6)
List(a1, a2, a3, a4, a5) should contain theSameElementsAs List(b1, b2, b3, b4, b5)
a6 shouldEqual b6
a7 shouldEqual b7
}
it("should produce NonZeroFloat edge values first in random order") {
import Generator._
val gen = nonZeroFloatGenerator
val (initEdges, ier) = gen.initEdges(10, Randomizer.default)
val (a1: NonZeroFloat, ae1: List[NonZeroFloat], ar1: Randomizer) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = initEdges, rnd = ier)
val (a2, ae2, ar2) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae1, rnd = ar1)
val (a3, ae3, ar3) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae2, rnd = ar2)
val (a4, ae4, ar4) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae3, rnd = ar3)
val (a5, ae5, ar5) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae4, rnd = ar4)
val (a6, ae6, ar6) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae5, rnd = ar5)
val (a7, ae7, ar7) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae6, rnd = ar6)
val (a8, _, _) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae7, rnd = ar7)
val edges = List(a1, a2, a3, a4, a5, a6, a7, a8)
edges should contain (NonZeroFloat.NegativeInfinity)
edges should contain (NonZeroFloat.MinValue)
edges should contain (NonZeroFloat(-1.0f))
edges should contain (-NonZeroFloat.MinPositiveValue)
edges should contain (NonZeroFloat.MinPositiveValue)
edges should contain (NonZeroFloat(1.0f))
edges should contain (NonZeroFloat.MaxValue)
edges should contain (NonZeroFloat.PositiveInfinity)
}
it("should have legitimate canonicals and shrink") {
import Generator._
val gen = nonZeroFloatGenerator
val rnd = Randomizer.default
gen.canonicals(rnd).shouldGrowWith(_.value)
gen.shrink(NonZeroFloat(10000), rnd).shouldGrowWith(_.value)
}
}
describe("for NonZeroFiniteFloat") {
it("should produce the same NonZeroFiniteFloat values in the same order given the same Randomizer") {
import Generator._
val aGen= nonZeroFiniteFloatGenerator
val bGen = nonZeroFiniteFloatGenerator
val (a1, _, ar1) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (a2, _, ar2) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar1)
val (a3, _, ar3) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar2)
val (a4, _, ar4) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar3)
val (a5, _, ar5) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar4)
val (a6, _, ar6) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar5)
val (a7, _, _) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar6)
val (b1, _, br1) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (b2, _, br2) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br1)
val (b3, _, br3) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br2)
val (b4, _, br4) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br3)
val (b5, _, br5) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br4)
val (b6, _, br6) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br5)
val (b7, _, _) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br6)
List(a1, a2, a3, a4, a5) should contain theSameElementsAs List(b1, b2, b3, b4, b5)
a6 shouldEqual b6
a7 shouldEqual b7
}
it("should produce NonZeroFiniteFloat edge values first in random order") {
import Generator._
val gen = nonZeroFiniteFloatGenerator
val (initEdges, ier) = gen.initEdges(10, Randomizer.default)
val (a1: NonZeroFiniteFloat, ae1: List[NonZeroFiniteFloat], ar1: Randomizer) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = initEdges, rnd = ier)
val (a2, ae2, ar2) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae1, rnd = ar1)
val (a3, ae3, ar3) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae2, rnd = ar2)
val (a4, ae4, ar4) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae3, rnd = ar3)
val (a5, ae5, ar5) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae4, rnd = ar4)
val (a6, _, _) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae5, rnd = ar5)
val edges = List(a1, a2, a3, a4, a5, a6)
edges should contain (NonZeroFiniteFloat.MinValue)
edges should contain (NonZeroFiniteFloat(-1.0f))
edges should contain (NonZeroFiniteFloat.ensuringValid(-NonZeroFiniteFloat.MinPositiveValue))
edges should contain (NonZeroFiniteFloat.MinPositiveValue)
edges should contain (NonZeroFiniteFloat(1.0f))
edges should contain (NonZeroFiniteFloat.MaxValue)
}
it("should have legitimate canonicals and shrink") {
import Generator._
val gen = nonZeroFiniteFloatGenerator
val rnd = Randomizer.default
gen.canonicals(rnd).shouldGrowWith(_.value)
gen.shrink(NonZeroFiniteFloat(10000), rnd).shouldGrowWith(_.value)
}
}
describe("for NonZeroDouble") {
it("should produce the same NonZeroDouble values in the same order given the same Randomizer") {
import Generator._
val aGen= nonZeroDoubleGenerator
val bGen = nonZeroDoubleGenerator
val (a1, _, ar1) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (a2, _, ar2) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar1)
val (a3, _, ar3) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar2)
val (a4, _, ar4) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar3)
val (a5, _, ar5) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar4)
val (a6, _, ar6) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar5)
val (a7, _, _) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar6)
val (b1, _, br1) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (b2, _, br2) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br1)
val (b3, _, br3) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br2)
val (b4, _, br4) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br3)
val (b5, _, br5) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br4)
val (b6, _, br6) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br5)
val (b7, _, _) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br6)
List(a1, a2, a3, a4, a5) should contain theSameElementsAs List(b1, b2, b3, b4, b5)
a6 shouldEqual b6
a7 shouldEqual b7
}
it("should produce NonZeroDouble edge values first in random order") {
import Generator._
val gen = nonZeroDoubleGenerator
val (initEdges, ier) = gen.initEdges(10, Randomizer.default)
val (a1: NonZeroDouble, ae1: List[NonZeroDouble], ar1: Randomizer) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = initEdges, rnd = ier)
val (a2, ae2, ar2) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae1, rnd = ar1)
val (a3, ae3, ar3) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae2, rnd = ar2)
val (a4, ae4, ar4) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae3, rnd = ar3)
val (a5, ae5, ar5) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae4, rnd = ar4)
val (a6, ae6, ar6) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae5, rnd = ar5)
val (a7, ae7, ar7) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae6, rnd = ar6)
val (a8, _, _) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae7, rnd = ar7)
val edges = List(a1, a2, a3, a4, a5, a6, a7, a8)
edges should contain (NonZeroDouble.NegativeInfinity)
edges should contain (NonZeroDouble.MinValue)
edges should contain (NonZeroDouble(-1.0f))
edges should contain (-NonZeroDouble.MinPositiveValue)
edges should contain (NonZeroDouble.MinPositiveValue)
edges should contain (NonZeroDouble(1.0f))
edges should contain (NonZeroDouble.MaxValue)
edges should contain (NonZeroDouble.PositiveInfinity)
}
it("should have legitimate canonicals and shrink") {
import Generator._
val gen = nonZeroDoubleGenerator
val rnd = Randomizer.default
gen.canonicals(rnd).shouldGrowWith(_.value)
gen.shrink(NonZeroDouble(10000), rnd).shouldGrowWith(_.value)
}
}
describe("for NonZeroFiniteDouble") {
it("should produce the same NonZeroFiniteDouble values in the same order given the same Randomizer") {
import Generator._
val aGen= nonZeroFiniteDoubleGenerator
val bGen = nonZeroFiniteDoubleGenerator
val (a1, _, ar1) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (a2, _, ar2) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar1)
val (a3, _, ar3) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar2)
val (a4, _, ar4) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar3)
val (a5, _, ar5) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar4)
val (a6, _, ar6) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar5)
val (a7, _, _) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar6)
val (b1, _, br1) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (b2, _, br2) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br1)
val (b3, _, br3) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br2)
val (b4, _, br4) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br3)
val (b5, _, br5) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br4)
val (b6, _, br6) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br5)
val (b7, _, _) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br6)
List(a1, a2, a3, a4, a5) should contain theSameElementsAs List(b1, b2, b3, b4, b5)
a6 shouldEqual b6
a7 shouldEqual b7
}
it("should produce NonZeroFiniteDouble edge values first in random order") {
import Generator._
val gen = nonZeroFiniteDoubleGenerator
val (initEdges, ier) = gen.initEdges(10, Randomizer.default)
val (a1: NonZeroFiniteDouble, ae1: List[NonZeroFiniteDouble], ar1: Randomizer) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = initEdges, rnd = ier)
val (a2, ae2, ar2) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae1, rnd = ar1)
val (a3, ae3, ar3) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae2, rnd = ar2)
val (a4, ae4, ar4) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae3, rnd = ar3)
val (a5, ae5, ar5) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae4, rnd = ar4)
val (a6, _, _) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae5, rnd = ar5)
val edges = List(a1, a2, a3, a4, a5, a6)
edges should contain (NonZeroFiniteDouble.MinValue)
edges should contain (NonZeroFiniteDouble(-1.0))
edges should contain (NonZeroFiniteDouble.ensuringValid(-NonZeroFiniteDouble.MinPositiveValue))
edges should contain (NonZeroFiniteDouble.MinPositiveValue)
edges should contain (NonZeroFiniteDouble(1.0))
edges should contain (NonZeroFiniteDouble.MaxValue)
}
it("should have legitimate canonicals and shrink") {
import Generator._
val gen = nonZeroFiniteDoubleGenerator
val rnd = Randomizer.default
gen.canonicals(rnd).shouldGrowWith(_.value)
gen.shrink(NonZeroFiniteDouble(10000), rnd).shouldGrowWith(_.value)
}
}
describe("for FiniteFloat") {
it("should produce the same FiniteFloat values in the same order given the same Randomizer") {
import Generator._
val aGen = finiteFloatGenerator
val bGen = finiteFloatGenerator
val (a1, _, ar1) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (a2, _, ar2) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar1)
val (a3, _, ar3) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar2)
val (a4, _, ar4) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar3)
val (a5, _, ar5) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar4)
val (a6, _, ar6) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar5)
val (a7, _, _) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar6)
val (b1, _, br1) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (b2, _, br2) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br1)
val (b3, _, br3) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br2)
val (b4, _, br4) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br3)
val (b5, _, br5) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br4)
val (b6, _, br6) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br5)
val (b7, _, _) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br6)
List(a1, a2, a3, a4, a5) should contain theSameElementsAs List(b1, b2, b3, b4, b5)
a6 shouldEqual b6
a7 shouldEqual b7
}
it("should produce FiniteFloat edge values first in random order") {
import Generator._
val gen = finiteFloatGenerator
val (initEdges, ier) = gen.initEdges(10, Randomizer.default)
val (a1: FiniteFloat, ae1: List[FiniteFloat], ar1: Randomizer) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = initEdges, rnd = ier)
val (a2, ae2, ar2) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae1, rnd = ar1)
val (a3, ae3, ar3) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae2, rnd = ar2)
val (a4, ae4, ar4) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae3, rnd = ar3)
val (a5, ae5, ar5) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae4, rnd = ar4)
val (a6, ae6, ar6) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae5, rnd = ar5)
val (a7, _, _) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae6, rnd = ar6)
val edges = List(a1, a2, a3, a4, a5, a6, a7)
edges should contain (FiniteFloat.MinValue)
edges should contain (FiniteFloat(-1.0f))
edges should contain (FiniteFloat.ensuringValid(-FiniteFloat.MinPositiveValue))
edges should contain (FiniteFloat(0.0f))
edges should contain (FiniteFloat.MinPositiveValue)
edges should contain (FiniteFloat(1.0f))
edges should contain (FiniteFloat.MaxValue)
}
it("should have legitimate canonicals and shrink") {
import Generator._
val gen = finiteFloatGenerator
val rnd = Randomizer.default
gen.canonicals(rnd).shouldGrowWith(_.value)
gen.shrink(FiniteFloat(10000), rnd).shouldGrowWith(_.value)
}
}
describe("for FiniteDouble") {
it("should produce the same FiniteDouble values in the same order given the same Randomizer") {
import Generator._
val aGen = finiteDoubleGenerator
val bGen = finiteDoubleGenerator
val (a1, _, ar1) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (a2, _, ar2) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar1)
val (a3, _, ar3) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar2)
val (a4, _, ar4) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar3)
val (a5, _, ar5) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar4)
val (a6, _, ar6) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar5)
val (a7, _, _) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar6)
val (b1, _, br1) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (b2, _, br2) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br1)
val (b3, _, br3) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br2)
val (b4, _, br4) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br3)
val (b5, _, br5) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br4)
val (b6, _, br6) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br5)
val (b7, _, _) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br6)
List(a1, a2, a3, a4, a5) should contain theSameElementsAs List(b1, b2, b3, b4, b5)
a6 shouldEqual b6
a7 shouldEqual b7
}
it("should produce FiniteDouble edge values first in random order") {
import Generator._
val gen = finiteDoubleGenerator
val (initEdges, ier) = gen.initEdges(10, Randomizer.default)
val (a1: FiniteDouble, ae1: List[FiniteDouble], ar1: Randomizer) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = initEdges, rnd = ier)
val (a2, ae2, ar2) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae1, rnd = ar1)
val (a3, ae3, ar3) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae2, rnd = ar2)
val (a4, ae4, ar4) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae3, rnd = ar3)
val (a5, ae5, ar5) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae4, rnd = ar4)
val (a6, ae6, ar6) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae5, rnd = ar5)
val (a7, _, _) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae6, rnd = ar6)
val edges = List(a1, a2, a3, a4, a5, a6, a7)
edges should contain (FiniteDouble.MinValue)
edges should contain (FiniteDouble(-1.0))
edges should contain (FiniteDouble.ensuringValid(-FiniteDouble.MinPositiveValue))
edges should contain (FiniteDouble(0.0))
edges should contain (FiniteDouble.MinPositiveValue)
edges should contain (FiniteDouble(1.0))
edges should contain (FiniteDouble.MaxValue)
}
it("should have legitimate canonicals and shrink") {
import Generator._
val gen = finiteDoubleGenerator
val rnd = Randomizer.default
gen.canonicals(rnd).shouldGrowWith(_.value)
gen.shrink(FiniteDouble(10000), rnd).shouldGrowWith(_.value)
}
}
describe("for NumericChar") {
it("should produce the same NumericChar values in the same order given the same Randomizer") {
import Generator._
val aGen = numericCharGenerator
val bGen = numericCharGenerator
val (a1, _, ar1) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (a2, _, ar2) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar1)
val (a3, _, ar3) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar2)
val (a4, _, ar4) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar3)
val (a5, _, ar5) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar4)
val (a6, _, ar6) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar5)
val (a7, _, _) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar6)
val (b1, _, br1) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (b2, _, br2) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br1)
val (b3, _, br3) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br2)
val (b4, _, br4) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br3)
val (b5, _, br5) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br4)
val (b6, _, br6) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br5)
val (b7, _, _) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br6)
List(a1, a2, a3, a4, a5) should contain theSameElementsAs List(b1, b2, b3, b4, b5)
a6 shouldEqual b6
a7 shouldEqual b7
}
it("should produce NumericChar edge values first in random order") {
import Generator._
val gen = numericCharGenerator
val (initEdges, ier) = gen.initEdges(10, Randomizer.default)
val (a1, ae1, ar1) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = initEdges, rnd = ier)
val (a2, _, ar2) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae1, rnd = ar1)
List(a1, a2) should contain theSameElementsAs List(NumericChar('0'), NumericChar('9'))
}
}
describe("for Strings") {
it("should offer a String generator that returns a string whose length equals the passed size") {
import Generator._
val gen = stringGenerator
val (s1, _, r1) = gen.next(szp = SizeParam(PosZInt(0), 100, 0), edges = Nil, rnd = Randomizer(100))
s1.length shouldBe 0
val (s2, _, r2) = gen.next(szp = SizeParam(PosZInt(0), 100, 3), edges = Nil, rnd = r1)
s2.length shouldBe 3
val (s3, _, r3) = gen.next(szp = SizeParam(PosZInt(0), 100, 38), edges = Nil, rnd = r2)
s3.length shouldBe 38
val (s4, _, r4) = gen.next(szp = SizeParam(PosZInt(0), 100, 88), edges = Nil, rnd = r3)
s4.length shouldBe 88
val (s5, _, _) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = r4)
s5.length shouldBe 100
}
it("should shrink Strings using strategery") {
import GeneratorDrivenPropertyChecks._
forAll { (s: String) =>
val generator = implicitly[Generator[String]]
val (shrinkIt, _) = generator.shrink(s, Randomizer.default)
val shrinks: List[String] = shrinkIt.toList
if (s.isEmpty)
shrinks shouldBe empty
else {
shrinks(0) shouldBe ""
shrinks(1) should have length 1
shrinks(1).head should (be >= 'a' and be <= 'z')
shrinks(2) should have length 1
shrinks(2).head should (be >= 'A' and be <= 'Z')
shrinks(3) should have length 1
shrinks(3).head should (be >= '0' and be <= '9')
val theChars = shrinks.drop(4)
val distincts: List[String] = s.distinct.toList.map(_.toString)
theChars.take(distincts.length).toList shouldEqual distincts
val theHalves = shrinks.drop(4 + distincts.length)
if (theHalves.length > 1) {
import org.scalatest.Inspectors
val zipped = theHalves.zip(theHalves.tail)
Inspectors.forAll (zipped) { case (s, t) =>
s.length should be < t.length
}
} else succeed
}
}
}
it("should offer a String generator that offers cononicals based on Char canonicals") {
import Generator._
val gen = stringGenerator
val (canonicalsIt, _) = gen.canonicals(Randomizer.default)
val canonicals = canonicalsIt.toList
canonicals(0) shouldBe empty
canonicals(1) should have length 1
canonicals(1).head should (be >= 'a' and be <= 'z')
canonicals(2) should have length 1
canonicals(2).head should (be >= 'A' and be <= 'Z')
canonicals(3) should have length 1
canonicals(3).head should (be >= '0' and be <= '9')
}
}
describe("for Options") {
it("should produce Nones with a reasonable frequency") {
import Generator._
val gen = optionGenerator[Int]
val classified = CommonGenerators.classify(1000, gen) {
case Some(_) => "Some"
case None => "None"
}
classified.portions("None") should be (0.1 +- 0.03)
}
it("should use the base type for edges") {
import Generator._
val baseGen = intGenerator
val gen = optionGenerator[Int]
val rnd = Randomizer.default
val (intEdges, _) = baseGen.initEdges(100, rnd)
val (optEdges, _) = gen.initEdges(100, rnd)
optEdges should contain (None)
optEdges.filter(_.isDefined).map(_.get) should contain theSameElementsAs intEdges
}
it("should use the base type for canonicals") {
import Generator._
val baseGen = intGenerator
val gen = optionGenerator[Int]
val rnd = Randomizer.default
val (intCanon, _) = baseGen.canonicals(rnd)
val (optCanonIter, _) = gen.canonicals(rnd)
val optCanon = optCanonIter.toList
optCanon should contain (None)
optCanon.filter(_.isDefined).map(_.get) should contain theSameElementsAs intCanon.toList
}
it("should use the base type for shrinking") {
import Generator._
val baseGen = intGenerator
val gen = optionGenerator[Int]
val rnd = Randomizer.default
val (intShrinkIter, _) = baseGen.shrink(10000, rnd)
val (optShrinkIter, _) = gen.shrink(Some(10000), rnd)
val intShrink = intShrinkIter.toList
val optShrink = optShrinkIter.toList
optShrink should contain (None)
optShrink.filter(_.isDefined).map(_.get) should contain theSameElementsAs(intShrink)
}
it("should not try to shrink None") {
import Generator._
val gen = optionGenerator[Int]
val rnd = Randomizer.default
val (optShrink, _) = gen.shrink(None, rnd)
assert(optShrink.isEmpty)
}
}
describe("for Ors") {
it("should use the base types for edges") {
import Generator._
import org.scalactic._
val gGen = intGenerator
val bGen = stringGenerator
val gen = orGenerator[Int, String]
val rnd = Randomizer.default
val (gEdges, _) = gGen.initEdges(100, rnd)
val (bEdges, _) = bGen.initEdges(100, rnd)
val (orEdges, _) = gen.initEdges(100, rnd)
orEdges should contain theSameElementsAs(gEdges.map(Good(_)) ++ bEdges.map(Bad(_)))
}
it("should use the base types for canonicals") {
import Generator._
import org.scalactic._
val gGen = intGenerator
val bGen = stringGenerator
val gen = orGenerator[Int, String]
val rnd = Randomizer.default
val (gCanon, _) = gGen.canonicals(rnd)
val (bCanon, _) = bGen.canonicals(rnd)
val (orCanon, _) = gen.canonicals(rnd)
orCanon.toList should contain theSameElementsAs((gCanon.map(Good(_)) ++ bCanon.map(Bad(_))).toList)
}
it("should produce an appropriate mix of Good and Bad") {
import Generator._
import org.scalactic._
val gen = orGenerator[Int, String]
val classification = CommonGenerators.classify(1000, gen) {
case Good(_) => "Good"
case Bad(_) => "Bad"
}
// It's arbitrary, but we know that it produces Bad about a quarter of the time:
classification.percentages("Bad").value should be (25 +- 2)
}
it("should use the base types to shrink") {
import Generator._
import org.scalactic._
val gGen = intGenerator
val bGen = stringGenerator
val gen = orGenerator[Int, String]
val rnd = Randomizer.default
val (gShrink, _) = gGen.shrink(1000, rnd)
val (bShrink, _) = bGen.shrink("hello world!", rnd)
val (orGoodShrink, _) = gen.shrink(Good(1000), rnd)
val (orBadShrink, _) = gen.shrink(Bad("hello world!"), rnd)
orGoodShrink.toList should contain theSameElementsAs(gShrink.map(Good(_)).toList)
orBadShrink.toList should contain theSameElementsAs(bShrink.map(Bad(_)).toList)
}
}
describe("for Eithers") {
it("should use the base types for edges") {
import Generator._
val rGen = intGenerator
val lGen = stringGenerator
val gen = eitherGenerator[String, Int]
val rnd = Randomizer.default
val (rEdges, _) = rGen.initEdges(100, rnd)
val (lEdges, _) = lGen.initEdges(100, rnd)
val (eitherEdges, _) = gen.initEdges(100, rnd)
eitherEdges should contain theSameElementsAs(rEdges.map(Right(_)) ++ lEdges.map(Left(_)))
}
it("should use the base types for canonicals") {
import Generator._
val rGen = intGenerator
val lGen = stringGenerator
val gen = eitherGenerator[String, Int]
val rnd = Randomizer.default
val (rCanon, _) = rGen.canonicals(rnd)
val (lCanon, _) = lGen.canonicals(rnd)
val (eitherCanon, _) = gen.canonicals(rnd)
eitherCanon.toList should contain theSameElementsAs((rCanon.map(Right(_)) ++ lCanon.map(Left(_))).toList)
}
it("should produce an appropriate mix of Right and Left") {
import Generator._
val gen = eitherGenerator[String, Int]
val classification = CommonGenerators.classify(1000, gen) {
case Right(_) => "Right"
case Left(_) => "Left"
}
// It's arbitrary, but we know that it produces Left about a quarter of the time:
classification.percentages("Left").value should be (25 +- 2)
}
it("should use the base types to shrink") {
import Generator._
val rGen = intGenerator
val lGen = stringGenerator
val gen = eitherGenerator[String, Int]
val rnd = Randomizer.default
val (rShrink, _) = rGen.shrink(1000, rnd)
val (lShrink, _) = lGen.shrink("hello world!", rnd)
val (eitherRightShrink, _) = gen.shrink(Right(1000), rnd)
val (eitherLeftShrink, _) = gen.shrink(Left("hello world!"), rnd)
eitherRightShrink.toList should contain theSameElementsAs(rShrink.map(Right(_)).toList)
eitherLeftShrink.toList should contain theSameElementsAs(lShrink.map(Left(_)).toList)
}
}
import scala.collection.GenTraversable
import org.scalactic.ColCompatHelper
/**
* A common test for how we do shrinking in the collection Generators.
*
* Since we generally try to deal with shrink() the same way for collections, we use
* this common test to make sure it's consistent. Not every collection type manages
* to use this (because Scala 2.12 collections just aren't that consistent), but
* it generally works.
*
* @param factory the companion object for this collection type
* @param generator the Generator for this collection type
* @tparam F the collection type we are testing
*/
def shrinkByStrategery[F[Int] <: GenTraversable[Int]](factory: ColCompatHelper.Factory[Int, F[Int]])(implicit generator: Generator[F[Int]]): Unit = {
import GeneratorDrivenPropertyChecks._
val intGenerator = Generator.intGenerator
val (intCanonicalsIt, _) = intGenerator.canonicals(Randomizer.default)
val intCanonicals = intCanonicalsIt.toList
forAll { (xs: F[Int]) =>
val (shrinkIt, _) = generator.shrink(xs, Randomizer.default)
val shrinks: List[F[Int]] = shrinkIt.toList
if (xs.isEmpty)
shrinks shouldBe empty
else {
// First one should be the empty list
shrinks(0) shouldBe empty
// Then should come one-element Lists of the canonicals of the type
val phase2 = shrinks.drop(1).take(intCanonicals.length)
phase2 shouldEqual (intCanonicals.map(i => ColCompatHelper.newBuilder(factory).+=(i).result))
// Phase 3 should be one-element lists of all distinct values in the value passed to shrink
// If xs already is a one-element list, then we don't do this, because then xs would appear in the output.
val xsList = xs.toList
val xsDistincts = if (xsList.length > 1) xsList.distinct else Nil
val phase3 = shrinks.drop(1 + intCanonicals.length).take(xsDistincts.length)
phase3 shouldEqual (xsDistincts.map(i => ColCompatHelper.newBuilder(factory).+=(i).result))
// Phase 4 should be n-element lists that are prefixes cut in half
val theHalves = shrinks.drop(1 + intCanonicals.length + xsDistincts.length)
theHalves should not contain xs // This was a bug I noticed
if (theHalves.length > 1) {
import org.scalatest.Inspectors
val zipped = theHalves.zip(theHalves.tail)
Inspectors.forAll (zipped) { case (s, t) =>
s.size should be < t.size
}
} else succeed
}
}
}
import org.scalactic.ColCompatHelper.Factory._
describe("for Lists") {
it("should offer a List[T] generator that returns a List[T] whose length equals the passed size") {
import Generator._
val gen = listGenerator[Int]
val (l1, _, r1) = gen.next(szp = SizeParam(PosZInt(0), 100, 0), edges = Nil, rnd = Randomizer(100))
l1.length shouldBe 0
val (l2, _, r2) = gen.next(szp = SizeParam(PosZInt(0), 100, 3), edges = Nil, rnd = r1)
l2.length shouldBe 3
val (l3, _, r3) = gen.next(szp = SizeParam(PosZInt(0), 100, 38), edges = Nil, rnd = r2)
l3.length shouldBe 38
val (l4, _, r4) = gen.next(szp = SizeParam(PosZInt(0), 100, 88), edges = Nil, rnd = r3)
l4.length shouldBe 88
val (l5, _, _) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = r4)
l5.length shouldBe 100
}
it("should not exhibit this bug in List shrinking") {
val lstGen = implicitly[Generator[List[List[Int]]]]
val xss = List(List(100, 200, 300, 400, 300))
lstGen.shrink(xss, Randomizer.default)._1.toList should not contain xss
}
it("should shrink Lists using strategery") {
shrinkByStrategery[List](List)
}
it("should return an empty Iterator when asked to shrink a List of size 0") {
val lstGen = implicitly[Generator[List[Int]]]
val xs = List.empty[Int]
lstGen.shrink(xs, Randomizer.default)._1.toList shouldBe empty
}
it("should return an Iterator of the canonicals excluding the given values to shrink when asked to shrink a List of size 1") {
val lstGen = implicitly[Generator[List[Int]]]
val canonicalLists = List(0, 1, -1, 2, -2, 3, -3).map(i => List(i))
val expectedLists = List(List.empty[Int]) ++ canonicalLists
val nonCanonical = List(99)
lstGen.shrink(nonCanonical, Randomizer.default)._1.toList should contain theSameElementsAs expectedLists
val canonical = List(3)
// Ensure 3 (an Int canonical value) does not show up twice in the output
lstGen.shrink(canonical, Randomizer.default)._1.toList should contain theSameElementsAs expectedLists
}
it("should return an Iterator that does not repeat canonicals when asked to shrink a List of size 2 that includes canonicals") {
val lstGen = implicitly[Generator[List[Int]]]
val shrinkees = lstGen.shrink(List(3, 99), Randomizer.default)._1.toList
shrinkees.distinct should contain theSameElementsAs shrinkees
}
it("should return an Iterator that does not repeat the passed list-to-shink even if that list has a power of 2 length") {
// Since the last batch of lists produced by the list shrinker start at length 2 and then double in size each time,
// they lengths will be powers of two: 2, 4, 8, 16, etc... So make sure that if the original length has length 16,
// for example, that that one doesn't show up in the shrinks output, because it would be the original list-to-shrink.
val lstGen = implicitly[Generator[List[Int]]]
val listToShrink = List.fill(16)(99)
val shrinkees = lstGen.shrink(listToShrink, Randomizer.default)._1.toList
shrinkees.distinct should not contain listToShrink
}
it("should offer a list generator whose canonical method uses the canonical method of the underlying T") {
import GeneratorDrivenPropertyChecks._
val intGenerator = Generator.intGenerator
val (intCanonicalsIt, _) = intGenerator.canonicals(Randomizer.default)
val intCanonicals = intCanonicalsIt.toList
val listOfIntGenerator = Generator.listGenerator[Int]
val (listOfIntCanonicalsIt, _) = listOfIntGenerator.canonicals(Randomizer.default)
val listOfIntCanonicals = listOfIntCanonicalsIt.toList
listOfIntCanonicals shouldEqual intCanonicals.map(i => List(i))
}
}
describe("for Function0s") {
it("should offer an implicit provider for constant function0's with a pretty toString") {
val function0s = Generator.function0Generator[Int]
import GeneratorDrivenPropertyChecks._
forAll (function0s) { (f: () => Int) =>
val constantResult = f()
import org.scalactic.TimesOnInt._
10 times { f() shouldEqual constantResult }
f.toString shouldBe s"() => $constantResult"
}
}
it("should offer an implicit provider for constant function0's that returns the edges of the result type") {
val ints = Generator.intGenerator
val function0s = Generator.function0Generator[Int]
val (intEdgesIt, rnd1) = ints.initEdges(100, Randomizer.default)
val (function0EdgesIt, _) = function0s.initEdges(100, rnd1)
val intEdges = intEdgesIt.toList
val function0Edges = function0EdgesIt.toList
function0Edges.map(f => f()) should contain theSameElementsAs intEdges
}
it("should offer an implicit provider for constant function0's that returns the canonicals of the result type") {
val ints = Generator.intGenerator
val function0s = Generator.function0Generator[Int]
val (intCanonicalsIt, rnd1) = ints.canonicals(Randomizer.default)
val (function0CanonicalsIt, _) = function0s.canonicals(rnd1)
val intCanonicals = intCanonicalsIt.toList
val function0Canonicals = function0CanonicalsIt.toList
function0Canonicals.map(f => f()) should contain theSameElementsAs intCanonicals
}
it("should offer an implicit provider for constant function0's that returns the shrinks of the result type") {
val ints = Generator.intGenerator
val function0s = Generator.function0Generator[Int]
import GeneratorDrivenPropertyChecks._
forAll (ints) { (i: Int) =>
val (intShrinksIt, rnd1) = ints.shrink(i, Randomizer.default)
val (function0ShrinksIt, _) = function0s.shrink(() => i, rnd1)
val intShrinks = intShrinksIt.toList
val function0Shrinks = function0ShrinksIt.toList
function0Shrinks.map(f => f()) should contain theSameElementsAs intShrinks
}
}
}
describe("for arbitrary Function1s") {
it("should offer an implicit provider that uses hashCode to tweak a seed and has a pretty toString") {
val gen = implicitly[Generator[Option[Int] => List[Int]]]
val sample = gen.sample
sample.toString should include ("Option[Int]")
sample.toString should include ("List[Int]")
}
}
describe("for Tuple2s") {
it("should offer a tuple2 generator") {
val gen = implicitly[Generator[(Int, Int)]]
val intGen = implicitly[Generator[Int]]
val (it8, rnd1) = intGen.shrink(8, Randomizer.default)
val (it18, rnd2)= intGen.shrink(18, rnd1)
val list8 = it8.toList
val list18 = it18.toList
val listTup =
for {
x <- list8
y <- list18
} yield (x, y)
gen.shrink((8, 18), rnd2)._1.toList shouldEqual listTup
}
it("should be able to transform a tuple generator to a case class generator") {
val tupGen: Generator[(String, Int)] = Generator.tuple2Generator[String, Int]
case class Person(name: String, age: Int)
val persons = for (tup <- tupGen) yield Person(tup._1, tup._2)
val (it, _) = persons.shrink(Person("Harry Potter", 32), Randomizer.default)
it.toList should not be empty
}
}
describe("for Int => Ints") {
it("should have toString and simpleName that doesn't include org.scalatest.prop.valueOf") {
import GeneratorDrivenPropertyChecks._
forAll { (f: Int => Int) =>
f.toString should startWith ("(i: Int) => ")
f.toString should not include "org.scalatest.prop.valueOf"
}
}
}
describe("for Vector[T]s") {
it("should produce the same Vector[T] values in the same order given the same Randomizer") {
val aGen= Generator.vectorGenerator[Int]
val bGen = Generator.vectorGenerator[Int]
val (a1, _, ar1) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (a2, _, ar2) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar1)
val (a3, _, ar3) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar2)
val (a4, _, ar4) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar3)
val (a5, _, ar5) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar4)
val (a6, _, ar6) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar5)
val (a7, _, _) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar6)
val (b1, _, br1) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (b2, _, br2) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br1)
val (b3, _, br3) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br2)
val (b4, _, br4) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br3)
val (b5, _, br5) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br4)
val (b6, _, br6) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br5)
val (b7, _, _) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br6)
List(a1, a2, a3, a4, a5) should contain theSameElementsAs List(b1, b2, b3, b4, b5)
a6 shouldEqual b6
a7 shouldEqual b7
}
it("should produce Vector[T] edge values first in random order") {
val gen = Generator.vectorGenerator[Int]
val (a1: Vector[Int], ae1: List[Vector[Int]], ar1: Randomizer) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = List(Vector.empty[Int], Vector(1, 2), Vector(3, 4, 5)), rnd = Randomizer.default)
val (a2, ae2, ar2) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae1, rnd = ar1)
val (a3, _, _) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae2, rnd = ar2)
val edges = List(a1, a2, a3)
edges should contain (Vector.empty[Int])
edges should contain (Vector(1, 2))
edges should contain (Vector(3, 4, 5))
}
it("should produce Vector[T] following size determined by havingSize method") {
val aGen= Generator.vectorGenerator[Int]
implicit val sGen = aGen.havingSize(PosZInt(3))
import GeneratorDrivenPropertyChecks._
forAll { v: Vector[Int] =>
v.size shouldBe 3
}
}
it("should produce Vector[T] following length determined by havingLength method") {
val aGen= Generator.vectorGenerator[Int]
implicit val sGen = aGen.havingLength(PosZInt(3))
import GeneratorDrivenPropertyChecks._
forAll { v: Vector[Int] =>
v.length shouldBe 3
}
}
it("should produce Vector[T] following sizes determined by havingSizeBetween method") {
val aGen= Generator.vectorGenerator[Int]
implicit val sGen = aGen.havingSizesBetween(PosZInt(3), PosZInt(5))
import GeneratorDrivenPropertyChecks._
forAll { v: Vector[Int] =>
v.size should (be >= 3 and be <= 5)
}
}
it("should produce IllegalArgumentException when havingSizesBetween is called with invalid from and to pair") {
val aGen= Generator.vectorGenerator[Int]
aGen.havingSizesBetween(PosZInt(3), PosZInt(5))
assertThrows[IllegalArgumentException] {
aGen.havingSizesBetween(PosZInt(3), PosZInt(3))
}
assertThrows[IllegalArgumentException] {
aGen.havingSizesBetween(PosZInt(3), PosZInt(2))
}
}
it("should produce Vector[T] following lengths determined by havingLengthBetween method") {
val aGen= Generator.vectorGenerator[Int]
implicit val sGen = aGen.havingLengthsBetween(PosZInt(3), PosZInt(5))
import GeneratorDrivenPropertyChecks._
forAll { v: Vector[Int] =>
v.length should (be >= 3 and be <= 5)
}
}
it("should produce IllegalArgumentException when havingLengthBetween is called with invalid from and to pair") {
val aGen= Generator.vectorGenerator[Int]
aGen.havingLengthsBetween(PosZInt(3), PosZInt(5))
assertThrows[IllegalArgumentException] {
aGen.havingLengthsBetween(PosZInt(3), PosZInt(3))
}
assertThrows[IllegalArgumentException] {
aGen.havingLengthsBetween(PosZInt(3), PosZInt(2))
}
}
it("should produce Vector[T] following sizes determined by havingSizesDeterminedBy method") {
val aGen= Generator.vectorGenerator[Int]
implicit val sGen = aGen.havingSizesDeterminedBy(s => SizeParam(5, 0, 5))
import GeneratorDrivenPropertyChecks._
forAll { v: Vector[Int] =>
v.size shouldBe 5
}
}
it("should produce Vector[T] following sizes determined by havingLengthsDeterminedBy method") {
val aGen= Generator.vectorGenerator[Int]
implicit val sGen = aGen.havingLengthsDeterminedBy(s => SizeParam(5, 0, 5))
import GeneratorDrivenPropertyChecks._
forAll { v: Vector[Int] =>
v.length shouldBe 5
}
}
it("should shrink Vectors using strategery") {
shrinkByStrategery[Vector](Vector)
}
it("should return an empty Iterator when asked to shrink a Vector of size 0") {
val lstGen = implicitly[Generator[Vector[Int]]]
val xs = Vector.empty[Int]
lstGen.shrink(xs, Randomizer.default)._1.toVector shouldBe empty
}
it("should return an Iterator of the canonicals excluding the given values to shrink when asked to shrink a Vector of size 1") {
val lstGen = implicitly[Generator[Vector[Int]]]
val canonicalLists = Vector(0, 1, -1, 2, -2, 3, -3).map(i => Vector(i))
val expectedLists = Vector(Vector.empty[Int]) ++ canonicalLists
val nonCanonical = Vector(99)
lstGen.shrink(nonCanonical, Randomizer.default)._1.toVector should contain theSameElementsAs expectedLists
val canonical = Vector(3)
// Ensure 3 (an Int canonical value) does not show up twice in the output
lstGen.shrink(canonical, Randomizer.default)._1.toVector should contain theSameElementsAs expectedLists
}
it("should return an Iterator that does not repeat canonicals when asked to shrink a Vector of size 2 that includes canonicals") {
val lstGen = implicitly[Generator[Vector[Int]]]
val shrinkees = lstGen.shrink(Vector(3, 99), Randomizer.default)._1.toList
shrinkees.distinct should contain theSameElementsAs shrinkees
}
it("should return an Iterator that does not repeat the passed list-to-shink even if that list has a power of 2 length") {
// Since the last batch of lists produced by the list shrinker start at length 2 and then double in size each time,
// they lengths will be powers of two: 2, 4, 8, 16, etc... So make sure that if the original length has length 16,
// for example, that that one doesn't show up in the shrinks output, because it would be the original list-to-shrink.
val lstGen = implicitly[Generator[Vector[Int]]]
val listToShrink = Vector.fill(16)(99)
val shrinkees = lstGen.shrink(listToShrink, Randomizer.default)._1.toList
shrinkees.distinct should not contain listToShrink
}
it("should offer a Vector generator whose canonical method uses the canonical method of the underlying T") {
import GeneratorDrivenPropertyChecks._
val intGenerator = Generator.intGenerator
val (intCanonicalsIt, _) = intGenerator.canonicals(Randomizer.default)
val intCanonicals = intCanonicalsIt.toVector
val listOfIntGenerator = Generator.vectorGenerator[Int]
val (listOfIntCanonicalsIt, _) = listOfIntGenerator.canonicals(Randomizer.default)
val listOfIntCanonicals = listOfIntCanonicalsIt.toList
listOfIntCanonicals shouldEqual intCanonicals.map(i => List(i))
}
}
describe("for Set[T]s") {
it("should produce the same Set[T] values in the same order given the same Randomizer") {
val aGen= Generator.setGenerator[Int]
val bGen = Generator.setGenerator[Int]
val (a1, _, ar1) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (a2, _, ar2) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar1)
val (a3, _, ar3) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar2)
val (a4, _, ar4) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar3)
val (a5, _, ar5) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar4)
val (a6, _, ar6) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar5)
val (a7, _, _) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar6)
val (b1, _, br1) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (b2, _, br2) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br1)
val (b3, _, br3) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br2)
val (b4, _, br4) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br3)
val (b5, _, br5) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br4)
val (b6, _, br6) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br5)
val (b7, _, _) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br6)
List(a1, a2, a3, a4, a5) should contain theSameElementsAs List(b1, b2, b3, b4, b5)
a6 shouldEqual b6
a7 shouldEqual b7
}
it("should produce Set[T] edge values first in random order") {
val gen = Generator.setGenerator[Int]
val (a1: Set[Int], ae1: List[Set[Int]], ar1: Randomizer) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = List(Set.empty[Int], Set(1, 2), Set(3, 4, 5)), rnd = Randomizer.default)
val (a2, ae2, ar2) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae1, rnd = ar1)
val (a3, _, _) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae2, rnd = ar2)
val edges = List(a1, a2, a3)
edges should contain (Set.empty[Int])
edges should contain (Set(1, 2))
edges should contain (Set(3, 4, 5))
}
it("should produce Set[T] following size determined by havingSize method") {
val aGen= Generator.setGenerator[Int]
implicit val sGen = aGen.havingSize(PosZInt(3))
import GeneratorDrivenPropertyChecks._
forAll { s: Set[Int] =>
s.size shouldBe 3
}
}
it("should produce Set[T] following sizes determined by havingSizeBetween method") {
val aGen= Generator.setGenerator[Int]
implicit val sGen = aGen.havingSizesBetween(PosZInt(3), PosZInt(5))
import GeneratorDrivenPropertyChecks._
forAll { s: Set[Int] =>
s.size should (be >= 3 and be <= 5)
}
}
it("should produce IllegalArgumentException when havingSizesBetween is called with invalid from and to pair") {
val aGen= Generator.setGenerator[Int]
aGen.havingSizesBetween(PosZInt(3), PosZInt(5))
assertThrows[IllegalArgumentException] {
aGen.havingSizesBetween(PosZInt(3), PosZInt(3))
}
assertThrows[IllegalArgumentException] {
aGen.havingSizesBetween(PosZInt(3), PosZInt(2))
}
}
it("should produce Set[T] following sizes determined by havingSizesDeterminedBy method") {
val aGen= Generator.setGenerator[Int]
implicit val sGen = aGen.havingSizesDeterminedBy(s => SizeParam(5, 0, 5))
import GeneratorDrivenPropertyChecks._
forAll { s: Set[Int] =>
s.size shouldBe 5
}
}
it("should shrink Sets using strategery") {
shrinkByStrategery[Set](Set)
}
it("should return an empty Iterator when asked to shrink a Set of size 0") {
val lstGen = implicitly[Generator[Set[Int]]]
val xs = Set.empty[Int]
lstGen.shrink(xs, Randomizer.default)._1.toSet shouldBe empty
}
it("should return an Iterator of the canonicals excluding the given values to shrink when asked to shrink a Set of size 1") {
val lstGen = implicitly[Generator[Set[Int]]]
val canonicalLists = Vector(0, 1, -1, 2, -2, 3, -3).map(i => Set(i))
val expectedLists = Vector(Set.empty[Int]) ++ canonicalLists
val nonCanonical = Set(99)
lstGen.shrink(nonCanonical, Randomizer.default)._1.toVector should contain theSameElementsAs expectedLists
val canonical = Set(3)
// Ensure 3 (an Int canonical value) does not show up twice in the output
lstGen.shrink(canonical, Randomizer.default)._1.toVector should contain theSameElementsAs expectedLists
}
it("should return an Iterator that does not repeat canonicals when asked to shrink a Set of size 2 that includes canonicals") {
val lstGen = implicitly[Generator[Set[Int]]]
val shrinkees = lstGen.shrink(Set(3, 99), Randomizer.default)._1.toList
shrinkees.distinct should contain theSameElementsAs shrinkees
}
it("should return an Iterator that does not repeat the passed set-to-shink even if that set has a power of 2 length") {
// Since the last batch of lists produced by the list shrinker start at length 2 and then double in size each time,
// they lengths will be powers of two: 2, 4, 8, 16, etc... So make sure that if the original length has length 16,
// for example, that that one doesn't show up in the shrinks output, because it would be the original list-to-shrink.
val lstGen = implicitly[Generator[Set[Int]]]
val listToShrink: Set[Int] = (Set.empty[Int] /: (1 to 16)) { (set, n) =>
set + n
}
val shrinkees = lstGen.shrink(listToShrink, Randomizer.default)._1.toList
shrinkees.distinct should not contain listToShrink
}
it("should offer a Set generator whose canonical method uses the canonical method of the underlying T") {
import GeneratorDrivenPropertyChecks._
val intGenerator = Generator.intGenerator
val (intCanonicalsIt, _) = intGenerator.canonicals(Randomizer.default)
val intCanonicals = intCanonicalsIt.toList
val listOfIntGenerator = Generator.setGenerator[Int]
val (listOfIntCanonicalsIt, _) = listOfIntGenerator.canonicals(Randomizer.default)
val listOfIntCanonicals = listOfIntCanonicalsIt.toList
listOfIntCanonicals shouldEqual intCanonicals.map(i => Set(i))
}
}
describe("for SortedSet[T]s") {
it("should produce the same SortedSet[T] values in the same order given the same Randomizer") {
val aGen= Generator.sortedSetGenerator[Int]
val bGen = Generator.sortedSetGenerator[Int]
val (a1, _, ar1) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (a2, _, ar2) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar1)
val (a3, _, ar3) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar2)
val (a4, _, ar4) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar3)
val (a5, _, ar5) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar4)
val (a6, _, ar6) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar5)
val (a7, _, _) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar6)
val (b1, _, br1) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (b2, _, br2) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br1)
val (b3, _, br3) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br2)
val (b4, _, br4) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br3)
val (b5, _, br5) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br4)
val (b6, _, br6) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br5)
val (b7, _, _) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br6)
List(a1, a2, a3, a4, a5) should contain theSameElementsAs List(b1, b2, b3, b4, b5)
a6 shouldEqual b6
a7 shouldEqual b7
}
it("should produce SortedSet[T] edge values first in random order") {
val gen = Generator.sortedSetGenerator[Int]
val (a1: SortedSet[Int], ae1: List[SortedSet[Int]], ar1: Randomizer) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = List(SortedSet.empty[Int], SortedSet(1, 2), SortedSet(3, 4, 5)), rnd = Randomizer.default)
val (a2, ae2, ar2) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae1, rnd = ar1)
val (a3, _, _) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae2, rnd = ar2)
val edges = List(a1, a2, a3)
edges should contain (SortedSet.empty[Int])
edges should contain (SortedSet(1, 2))
edges should contain (SortedSet(3, 4, 5))
}
it("should produce Set[T] following size determined by havingSize method") {
val aGen= Generator.sortedSetGenerator[Int]
implicit val sGen = aGen.havingSize(PosZInt(3))
import GeneratorDrivenPropertyChecks._
forAll { s: SortedSet[Int] =>
s.size shouldBe 3
}
}
it("should produce Set[T] following sizes determined by havingSizeBetween method") {
val aGen= Generator.sortedSetGenerator[Int]
implicit val sGen = aGen.havingSizesBetween(PosZInt(3), PosZInt(5))
import GeneratorDrivenPropertyChecks._
forAll { s: SortedSet[Int] =>
s.size should (be >= 3 and be <= 5)
}
}
it("should produce IllegalArgumentException when havingSizesBetween is called with invalid from and to pair") {
val aGen= Generator.sortedSetGenerator[Int]
aGen.havingSizesBetween(PosZInt(3), PosZInt(5))
assertThrows[IllegalArgumentException] {
aGen.havingSizesBetween(PosZInt(3), PosZInt(3))
}
assertThrows[IllegalArgumentException] {
aGen.havingSizesBetween(PosZInt(3), PosZInt(2))
}
}
it("should produce Set[T] following sizes determined by havingSizesDeterminedBy method") {
val aGen= Generator.sortedSetGenerator[Int]
implicit val sGen = aGen.havingSizesDeterminedBy(s => SizeParam(5, 0, 5))
import GeneratorDrivenPropertyChecks._
forAll { s: SortedSet[Int] =>
s.size shouldBe 5
}
}
it("should shrink SortedSets using strategery") {
// Due to what I can only assume is an oversight in the standard library, SortedSet's
// companion object is not a GenericCompanion, so we can't use the common function here:
import GeneratorDrivenPropertyChecks._
val generator = implicitly[Generator[SortedSet[Int]]]
val intGenerator = Generator.intGenerator
val (intCanonicalsIt, _) = intGenerator.canonicals(Randomizer.default)
val intCanonicals = intCanonicalsIt.toList
forAll { (xs: SortedSet[Int]) =>
val (shrinkIt, _) = generator.shrink(xs, Randomizer.default)
val shrinks: List[SortedSet[Int]] = shrinkIt.toList
if (xs.isEmpty)
shrinks shouldBe empty
else {
// First one should be the empty list
shrinks(0) shouldBe empty
// Then should come one-element Lists of the canonicals of the type
val phase2 = shrinks.drop(1).take(intCanonicals.length)
phase2 shouldEqual (intCanonicals.map(i => SortedSet(i)))
// Phase 3 should be one-element lists of all distinct values in the value passed to shrink
// If xs already is a one-element list, then we don't do this, because then xs would appear in the output.
val xsList = xs.toList
val xsDistincts = if (xsList.length > 1) xsList.distinct else Nil
val phase3 = shrinks.drop(1 + intCanonicals.length).take(xsDistincts.length)
phase3 shouldEqual (xsDistincts.map(i => SortedSet(i)))
// Phase 4 should be n-element lists that are prefixes cut in half
val theHalves = shrinks.drop(1 + intCanonicals.length + xsDistincts.length)
theHalves should not contain xs // This was a bug I noticed
if (theHalves.length > 1) {
import org.scalatest.Inspectors
val zipped = theHalves.zip(theHalves.tail)
Inspectors.forAll (zipped) { case (s, t) =>
s.size should be < t.size
}
} else succeed
}
}
}
it("should return an empty Iterator when asked to shrink a SortedSet of size 0") {
val lstGen = implicitly[Generator[SortedSet[Int]]]
val xs = SortedSet.empty[Int]
lstGen.shrink(xs, Randomizer.default)._1.toSet shouldBe empty
}
it("should return an Iterator of the canonicals excluding the given values to shrink when asked to shrink a Set of size 1") {
val lstGen = implicitly[Generator[SortedSet[Int]]]
val canonicalLists = Vector(0, 1, -1, 2, -2, 3, -3).map(i => SortedSet(i))
val expectedLists = Vector(SortedSet.empty[Int]) ++ canonicalLists
val nonCanonical = SortedSet(99)
lstGen.shrink(nonCanonical, Randomizer.default)._1.toVector should contain theSameElementsAs expectedLists
val canonical = SortedSet(3)
// Ensure 3 (an Int canonical value) does not show up twice in the output
lstGen.shrink(canonical, Randomizer.default)._1.toVector should contain theSameElementsAs expectedLists
}
it("should return an Iterator that does not repeat canonicals when asked to shrink a SortedSet of size 2 that includes canonicals") {
val lstGen = implicitly[Generator[SortedSet[Int]]]
val shrinkees = lstGen.shrink(SortedSet(3, 99), Randomizer.default)._1.toList
shrinkees.distinct should contain theSameElementsAs shrinkees
}
it("should return an Iterator that does not repeat the passed set-to-shink even if that set has a power of 2 length") {
// Since the last batch of lists produced by the list shrinker start at length 2 and then double in size each time,
// they lengths will be powers of two: 2, 4, 8, 16, etc... So make sure that if the original length has length 16,
// for example, that that one doesn't show up in the shrinks output, because it would be the original list-to-shrink.
val lstGen = implicitly[Generator[SortedSet[Int]]]
val listToShrink: SortedSet[Int] = (SortedSet.empty[Int] /: (1 to 16)) { (set, n) =>
set + n
}
val shrinkees = lstGen.shrink(listToShrink, Randomizer.default)._1.toList
shrinkees.distinct should not contain listToShrink
}
it("should offer a Set generator whose canonical method uses the canonical method of the underlying T") {
import GeneratorDrivenPropertyChecks._
val intGenerator = Generator.intGenerator
val (intCanonicalsIt, _) = intGenerator.canonicals(Randomizer.default)
val intCanonicals = intCanonicalsIt.toList
val listOfIntGenerator = Generator.sortedSetGenerator[Int]
val (listOfIntCanonicalsIt, _) = listOfIntGenerator.canonicals(Randomizer.default)
val listOfIntCanonicals = listOfIntCanonicalsIt.toList
listOfIntCanonicals shouldEqual intCanonicals.map(i => SortedSet(i))
}
}
describe("for Map[K, V]s") {
it("should produce the same Map[K, V] values in the same order given the same Randomizer") {
val aGen= Generator.mapGenerator[Int, String]
val bGen = Generator.mapGenerator[Int, String]
val (a1, _, ar1) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (a2, _, ar2) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar1)
val (a3, _, ar3) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar2)
val (a4, _, ar4) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar3)
val (a5, _, ar5) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar4)
val (a6, _, ar6) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar5)
val (a7, _, _) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar6)
val (b1, _, br1) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (b2, _, br2) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br1)
val (b3, _, br3) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br2)
val (b4, _, br4) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br3)
val (b5, _, br5) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br4)
val (b6, _, br6) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br5)
val (b7, _, _) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br6)
List(a1, a2, a3, a4, a5) should contain theSameElementsAs List(b1, b2, b3, b4, b5)
a6 shouldEqual b6
a7 shouldEqual b7
}
it("should produce Map[K, V] edge values first in random order") {
val gen = Generator.mapGenerator[Int, String]
val (a1: Map[Int, String], ae1: List[Map[Int, String]], ar1: Randomizer) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = List(Map.empty[Int, String], Map(1 -> "one", 2 -> "two"), Map(3 -> "three", 4 -> "four", 5 -> "five")), rnd = Randomizer.default)
val (a2, ae2, ar2) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae1, rnd = ar1)
val (a3, _, _) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae2, rnd = ar2)
val edges = List(a1, a2, a3)
edges should contain (Map.empty[Int, String])
edges should contain (Map(1 -> "one", 2 -> "two"))
edges should contain (Map(3 -> "three", 4 -> "four", 5 -> "five"))
}
it("should produce Map[K, V] following size determined by havingSize method") {
val aGen= Generator.mapGenerator[Int, String]
implicit val sGen = aGen.havingSize(PosZInt(3))
import GeneratorDrivenPropertyChecks._
forAll { s: Map[Int, String] =>
s.size shouldBe 3
}
}
it("should produce Map[K, V] following sizes determined by havingSizeBetween method") {
val aGen= Generator.mapGenerator[Int, String]
implicit val sGen = aGen.havingSizesBetween(PosZInt(3), PosZInt(5))
import GeneratorDrivenPropertyChecks._
forAll { s: Map[Int, String] =>
s.size should (be >= 3 and be <= 5)
}
}
it("should produce IllegalArgumentException when havingSizesBetween is called with invalid from and to pair") {
val aGen= Generator.mapGenerator[Int, String]
aGen.havingSizesBetween(PosZInt(3), PosZInt(5))
assertThrows[IllegalArgumentException] {
aGen.havingSizesBetween(PosZInt(3), PosZInt(3))
}
assertThrows[IllegalArgumentException] {
aGen.havingSizesBetween(PosZInt(3), PosZInt(2))
}
}
it("should produce Map[K, V] following sizes determined by havingSizesDeterminedBy method") {
val aGen= Generator.mapGenerator[Int, String]
implicit val sGen = aGen.havingSizesDeterminedBy(s => SizeParam(5, 0, 5))
import GeneratorDrivenPropertyChecks._
forAll { s: Map[Int, String] =>
s.size shouldBe 5
}
}
it("should shrink Maps using strategery") {
import GeneratorDrivenPropertyChecks._
val generator = implicitly[Generator[Map[PosInt, Int]]]
val tupleGenerator = Generator.tuple2Generator[PosInt, Int]
val (tupleCanonicalsIt, _) = tupleGenerator.canonicals(Randomizer.default)
val tupleCanonicals = tupleCanonicalsIt.toList
forAll { (xs: Map[PosInt, Int]) =>
val (shrinkIt, _) = generator.shrink(xs, Randomizer.default)
val shrinks: List[Map[PosInt, Int]] = shrinkIt.toList
if (xs.isEmpty)
shrinks shouldBe empty
else {
// First one should be the empty list
shrinks(0) shouldBe empty
// Then should come one-element Lists of the canonicals of the type
val phase2 = shrinks.drop(1).take(tupleCanonicals.length)
phase2 shouldEqual (tupleCanonicals.map(i => Map(i)))
// Phase 3 should be one-element lists of all distinct values in the value passed to shrink
// If xs already is a one-element list, then we don't do this, because then xs would appear in the output.
val xsList = xs.toList
val xsDistincts = if (xsList.length > 1) xsList.distinct else Nil
val phase3 = shrinks.drop(1 + tupleCanonicals.length).take(xsDistincts.length)
phase3 shouldEqual (xsDistincts.map(i => Map(i)))
// Phase 4 should be n-element lists that are prefixes cut in half
val theHalves = shrinks.drop(1 + tupleCanonicals.length + xsDistincts.length)
theHalves should not contain xs // This was a bug I noticed
if (theHalves.length > 1) {
import org.scalatest.Inspectors
val zipped = theHalves.zip(theHalves.tail)
Inspectors.forAll (zipped) { case (s, t) =>
s.size should be < t.size
}
} else succeed
}
}
}
it("should return an empty Iterator when asked to shrink a Map of size 0") {
val lstGen = implicitly[Generator[Map[PosInt, Int]]]
val xs = Map.empty[PosInt, Int]
lstGen.shrink(xs, Randomizer.default)._1.toSet shouldBe empty
}
it("should return an Iterator of the canonicals excluding the given values to shrink when asked to shrink a Map of size 1") {
val lstGen = implicitly[Generator[Map[PosInt, Int]]]
val canonicalLists =
for {
k <- Vector(1, 2, 3)
v <- Vector(0, 1, -1, 2, -2, 3, -3)
}
yield Map(PosInt.ensuringValid(k) -> v)
val expectedLists = Vector(Map.empty[PosInt, Int]) ++ canonicalLists
val nonCanonical = Map(PosInt(99) -> 99)
lstGen.shrink(nonCanonical, Randomizer.default)._1.toVector should contain theSameElementsAs expectedLists
val canonical = Map(PosInt(3) -> 3)
// Ensure 3 (an Int canonical value) does not show up twice in the output
lstGen.shrink(canonical, Randomizer.default)._1.toVector should contain theSameElementsAs expectedLists
}
it("should return an Iterator that does not repeat canonicals when asked to shrink a Map of size 2 that includes canonicals") {
val lstGen = implicitly[Generator[Map[PosInt, Int]]]
val shrinkees = lstGen.shrink(Map(PosInt(3) -> 3, PosInt(2) -> 2, PosInt(99) -> 99), Randomizer.default)._1.toList
shrinkees.distinct should contain theSameElementsAs shrinkees
}
it("should return an Iterator that does not repeat the passed map-to-shink even if that set has a power of 2 length") {
// Since the last batch of lists produced by the list shrinker start at length 2 and then double in size each time,
// they lengths will be powers of two: 2, 4, 8, 16, etc... So make sure that if the original length has length 16,
// for example, that that one doesn't show up in the shrinks output, because it would be the original list-to-shrink.
val lstGen = implicitly[Generator[Map[PosInt, Int]]]
val listToShrink: Map[PosInt, Int] = (Map.empty[PosInt, Int] /: (1 to 16)) { (map, n) =>
map + (PosInt.ensuringValid(n) -> n)
}
val shrinkees = lstGen.shrink(listToShrink, Randomizer.default)._1.toList
shrinkees.distinct should not contain listToShrink
}
it("should offer a Map generator whose canonical method uses the canonical method of the underlying types") {
import GeneratorDrivenPropertyChecks._
val tupleGenerator = Generator.tuple2Generator[PosInt, Int]
val (tupleCanonicalsIt, _) = tupleGenerator.canonicals(Randomizer.default)
val tupleCanonicals = tupleCanonicalsIt.toList
val mapGenerator = Generator.mapGenerator[PosInt, Int]
val (mapCanonicalsIt, _) = mapGenerator.canonicals(Randomizer.default)
val mapCanonicals = mapCanonicalsIt.toList
mapCanonicals shouldEqual tupleCanonicals.map(i => Map(i))
}
}
describe("for SortedMaps") {
it("should produce the same SortedMap[K, V] values in the same order given the same Randomizer") {
val aGen= Generator.sortedMapGenerator[Int, String]
val bGen = Generator.sortedMapGenerator[Int, String]
val (a1, _, ar1) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (a2, _, ar2) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar1)
val (a3, _, ar3) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar2)
val (a4, _, ar4) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar3)
val (a5, _, ar5) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar4)
val (a6, _, ar6) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar5)
val (a7, _, _) = aGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = ar6)
val (b1, _, br1) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = Randomizer(100))
val (b2, _, br2) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br1)
val (b3, _, br3) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br2)
val (b4, _, br4) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br3)
val (b5, _, br5) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br4)
val (b6, _, br6) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br5)
val (b7, _, _) = bGen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = Nil, rnd = br6)
List(a1, a2, a3, a4, a5) should contain theSameElementsAs List(b1, b2, b3, b4, b5)
a6 shouldEqual b6
a7 shouldEqual b7
}
it("should produce SortedMap[K, V] edge values first in random order") {
val gen = Generator.sortedMapGenerator[Int, String]
val (a1: SortedMap[Int, String], ae1: List[SortedMap[Int, String]], ar1: Randomizer) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = List(SortedMap.empty[Int, String], SortedMap(1 -> "one", 2 -> "two"), SortedMap(3 -> "three", 4 -> "four", 5 -> "five")), rnd = Randomizer.default)
val (a2, ae2, ar2) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae1, rnd = ar1)
val (a3, _, _) = gen.next(szp = SizeParam(PosZInt(0), 100, 100), edges = ae2, rnd = ar2)
val edges = List(a1, a2, a3)
edges should contain (SortedMap.empty[Int, String])
edges should contain (SortedMap(1 -> "one", 2 -> "two"))
edges should contain (SortedMap(3 -> "three", 4 -> "four", 5 -> "five"))
}
it("should produce SortedMap[K, V] following size determined by havingSize method") {
val aGen= Generator.sortedMapGenerator[Int, String]
implicit val sGen = aGen.havingSize(PosZInt(3))
import GeneratorDrivenPropertyChecks._
forAll { s: SortedMap[Int, String] =>
s.size shouldBe 3
}
}
it("should produce SortedMap[K, V] following sizes determined by havingSizeBetween method") {
val aGen= Generator.sortedMapGenerator[Int, String]
implicit val sGen = aGen.havingSizesBetween(PosZInt(3), PosZInt(5))
import GeneratorDrivenPropertyChecks._
forAll { s: SortedMap[Int, String] =>
s.size should (be >= 3 and be <= 5)
}
}
it("should produce IllegalArgumentException when havingSizesBetween is called with invalid from and to pair") {
val aGen= Generator.sortedMapGenerator[Int, String]
aGen.havingSizesBetween(PosZInt(3), PosZInt(5))
assertThrows[IllegalArgumentException] {
aGen.havingSizesBetween(PosZInt(3), PosZInt(3))
}
assertThrows[IllegalArgumentException] {
aGen.havingSizesBetween(PosZInt(3), PosZInt(2))
}
}
it("should produce SortedMap[K, V] following sizes determined by havingSizesDeterminedBy method") {
val aGen= Generator.sortedMapGenerator[Int, String]
implicit val sGen = aGen.havingSizesDeterminedBy(s => SizeParam(5, 0, 5))
import GeneratorDrivenPropertyChecks._
forAll { s: SortedMap[Int, String] =>
s.size shouldBe 5
}
}
it("should shrink SortedMaps using strategery") {
import GeneratorDrivenPropertyChecks._
val generator = implicitly[Generator[SortedMap[PosInt, Int]]]
val tupleGenerator = Generator.tuple2Generator[PosInt, Int]
val (tupleCanonicalsIt, _) = tupleGenerator.canonicals(Randomizer.default)
val tupleCanonicals = tupleCanonicalsIt.toList
forAll { (xs: SortedMap[PosInt, Int]) =>
val (shrinkIt, _) = generator.shrink(xs, Randomizer.default)
val shrinks: List[SortedMap[PosInt, Int]] = shrinkIt.toList
if (xs.isEmpty)
shrinks shouldBe empty
else {
// First one should be the empty list
shrinks(0) shouldBe empty
// Then should come one-element Lists of the canonicals of the type
val phase2 = shrinks.drop(1).take(tupleCanonicals.length)
phase2 shouldEqual (tupleCanonicals.map(i => SortedMap(i)))
// Phase 3 should be one-element lists of all distinct values in the value passed to shrink
// If xs already is a one-element list, then we don't do this, because then xs would appear in the output.
val xsList = xs.toList
val xsDistincts = if (xsList.length > 1) xsList.distinct else Nil
val phase3 = shrinks.drop(1 + tupleCanonicals.length).take(xsDistincts.length)
phase3 shouldEqual (xsDistincts.map(i => SortedMap(i)))
// Phase 4 should be n-element lists that are prefixes cut in half
val theHalves = shrinks.drop(1 + tupleCanonicals.length + xsDistincts.length)
theHalves should not contain xs // This was a bug I noticed
if (theHalves.length > 1) {
import org.scalatest.Inspectors
val zipped = theHalves.zip(theHalves.tail)
Inspectors.forAll (zipped) { case (s, t) =>
s.size should be < t.size
}
} else succeed
}
}
}
it("should return an empty Iterator when asked to shrink a SortedMap of size 0") {
val lstGen = implicitly[Generator[SortedMap[PosInt, Int]]]
val xs = SortedMap.empty[PosInt, Int]
lstGen.shrink(xs, Randomizer.default)._1.toSet shouldBe empty
}
it("should return an Iterator of the canonicals excluding the given values to shrink when asked to shrink a SortedMap of size 1") {
val lstGen = implicitly[Generator[SortedMap[PosInt, Int]]]
val canonicalLists =
for {
k <- Vector(1, 2, 3)
v <- Vector(0, 1, -1, 2, -2, 3, -3)
}
yield SortedMap(PosInt.ensuringValid(k) -> v)
val expectedLists = Vector(SortedMap.empty[PosInt, Int]) ++ canonicalLists
val nonCanonical = SortedMap(PosInt(99) -> 99)
lstGen.shrink(nonCanonical, Randomizer.default)._1.toVector should contain theSameElementsAs expectedLists
val canonical = SortedMap(PosInt(3) -> 3)
// Ensure 3 (an Int canonical value) does not show up twice in the output
lstGen.shrink(canonical, Randomizer.default)._1.toVector should contain theSameElementsAs expectedLists
}
it("should return an Iterator that does not repeat canonicals when asked to shrink a SortedMap of size 2 that includes canonicals") {
val lstGen = implicitly[Generator[SortedMap[PosInt, Int]]]
val shrinkees = lstGen.shrink(SortedMap(PosInt(3) -> 3, PosInt(2) -> 2, PosInt(99) -> 99), Randomizer.default)._1.toList
shrinkees.distinct should contain theSameElementsAs shrinkees
}
it("should return an Iterator that does not repeat the passed SortedMap-to-shink even if that SortedMap has a power of 2 length") {
// Since the last batch of lists produced by the list shrinker start at length 2 and then double in size each time,
// they lengths will be powers of two: 2, 4, 8, 16, etc... So make sure that if the original length has length 16,
// for example, that that one doesn't show up in the shrinks output, because it would be the original list-to-shrink.
val lstGen = implicitly[Generator[SortedMap[PosInt, Int]]]
val listToShrink: SortedMap[PosInt, Int] = (SortedMap.empty[PosInt, Int] /: (1 to 16)) { (map, n) =>
map + (PosInt.ensuringValid(n) -> n)
}
val shrinkees = lstGen.shrink(listToShrink, Randomizer.default)._1.toList
shrinkees.distinct should not contain listToShrink
}
it("should offer a SortedMap generator whose canonical method uses the canonical method of the underlying types") {
import GeneratorDrivenPropertyChecks._
val tupleGenerator = Generator.tuple2Generator[PosInt, Int]
val (tupleCanonicalsIt, _) = tupleGenerator.canonicals(Randomizer.default)
val tupleCanonicals = tupleCanonicalsIt.toList
val mapGenerator = Generator.sortedMapGenerator[PosInt, Int]
val (mapCanonicalsIt, _) = mapGenerator.canonicals(Randomizer.default)
val mapCanonicals = mapCanonicalsIt.toList
mapCanonicals shouldEqual tupleCanonicals.map(i => Map(i))
}
}
it("should be creatable for recursive types") {
// Based on an example from ScalaCheck: The Definitive Guide
sealed trait Color extends Product with Serializable
case object Red extends Color
case object Green extends Color
sealed trait Shape extends Product with Serializable { def color: Color }
case class Line(val color: Color) extends Shape
case class Circle(val color: Color) extends Shape
case class Box(val color: Color, boxed: Shape) extends Shape
import CommonGenerators.{evenly, specificValues}
val genColor = specificValues(Red, Green)
val genLine = for { color <- genColor } yield Line(color)
val genCircle = for { color <- genColor } yield Circle(color)
"""
lazy val genShape = evenly(genLine, genCircle, genBox)
lazy val genBox: Generator[Box] = for {
color <- genColor
shape <- genShape
} yield Box(color, shape)
""" should compile
}
}
}
|
dotty-staging/scalatest
|
scalatest-test/src/test/scala/org/scalatest/prop/GeneratorSpec.scala
|
Scala
|
apache-2.0
| 209,065
|
package me.reminisce.stats.retrieving
import akka.actor._
import com.github.nscala_time.time.Imports._
import me.reminisce.stats.model.DatabaseCollection
import me.reminisce.stats.model.Messages._
import me.reminisce.stats.model.RetrievingMessages._
import me.reminisce.stats.retrieving.RetrievingService._
import me.reminisce.stats.statistics.Responses._
import me.reminisce.stats.statistics.Stats.StatsEntities
import reactivemongo.api.{Cursor, DefaultDB}
import reactivemongo.api.collections.bson._
import reactivemongo.bson.{BSONDateTime, BSONDocument}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.util.{Failure, Success}
object RetrievingService {
def props(database: DefaultDB): Props =
Props(new RetrievingService(database))
def getStatistics(database: DefaultDB, userId: String, from: Option[DateTime], to: Option[DateTime], limit: Option[Int]): Future[List[StatsEntities]] = {
val query = getQuery(userId, from, to)
val collectionStats = database[BSONCollection](DatabaseCollection.statsCollection)
limit match {
case Some(max) =>
collectionStats.find(query).sort(BSONDocument("date" -> -1)).cursor[StatsEntities]().collect[List](max, Cursor.DoneOnError[List[StatsEntities]]())
case None =>
collectionStats.find(query).sort(BSONDocument("date" -> -1)).cursor[StatsEntities]().collect[List](maxDocs = -1, Cursor.DoneOnError[List[StatsEntities]]())
}
}
def getQuery(userId: String, from: Option[DateTime], to: Option[DateTime]): BSONDocument = {
(from, to) match {
case (Some(f), Some(t)) =>
BSONDocument(
"userId" -> userId,
"date" -> BSONDocument(
"$gte" -> BSONDateTime(f.getMillis),
"$lte" -> BSONDateTime(t.getMillis)
)
)
case (Some(f), None) =>
BSONDocument(
"userId" -> userId,
"date" -> BSONDocument(
"$gte" -> BSONDateTime(f.getMillis)
)
)
case (None, Some(t)) =>
BSONDocument(
"userId" -> userId,
"date" -> BSONDocument(
"$lte" -> BSONDateTime(t.getMillis)
)
)
case (None, None) =>
BSONDocument(
"userId" -> userId
)
}
}
}
class RetrievingService(database: DefaultDB) extends Actor with ActorLogging {
def receive: Receive = waitingForMessages
def waitingForMessages: Receive = {
case RetrieveStats(userId, from, to, limit) =>
val client = sender
val future = getStatistics(database, userId, from, to, limit)
future.onComplete {
case Success(stats) =>
if (stats.isEmpty) {
client ! UserNotFound(s"Statistics not found for $userId")
} else {
client ! StatsRetrieved(stats.map(responseFromStats))
}
case Failure(_) =>
sender ! Abort
}
case o => log.info(s"[RS] Unexpected message ($o) received in waitingForMessages state")
}
}
|
reminisceme/stats
|
src/main/scala/me/reminisce/stats/retrieving/RetrievingService.scala
|
Scala
|
apache-2.0
| 3,028
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.util
import java.sql.{Date, Timestamp}
import java.text.{DateFormat, SimpleDateFormat}
import java.util.{TimeZone, Calendar}
import org.apache.spark.unsafe.types.UTF8String
/**
* Helper functions for converting between internal and external date and time representations.
* Dates are exposed externally as java.sql.Date and are represented internally as the number of
* dates since the Unix epoch (1970-01-01). Timestamps are exposed externally as java.sql.Timestamp
* and are stored internally as longs, which are capable of storing timestamps with 100 nanosecond
* precision.
*/
object DateTimeUtils {
// we use Int and Long internally to represent [[DateType]] and [[TimestampType]]
type SQLDate = Int
type SQLTimestamp = Long
// see http://stackoverflow.com/questions/466321/convert-unix-timestamp-to-julian
// it's 2440587.5, rounding up to compatible with Hive
final val JULIAN_DAY_OF_EPOCH = 2440588
final val SECONDS_PER_DAY = 60 * 60 * 24L
final val MICROS_PER_SECOND = 1000L * 1000L
final val NANOS_PER_SECOND = MICROS_PER_SECOND * 1000L
final val MICROS_PER_DAY = MICROS_PER_SECOND * SECONDS_PER_DAY
final val MILLIS_PER_DAY = SECONDS_PER_DAY * 1000L
// number of days in 400 years
final val daysIn400Years: Int = 146097
// number of days between 1.1.1970 and 1.1.2001
final val to2001 = -11323
// this is year -17999, calculation: 50 * daysIn400Year
final val YearZero = -17999
final val toYearZero = to2001 + 7304850
@transient lazy val defaultTimeZone = TimeZone.getDefault
// Java TimeZone has no mention of thread safety. Use thread local instance to be safe.
private val threadLocalLocalTimeZone = new ThreadLocal[TimeZone] {
override protected def initialValue: TimeZone = {
Calendar.getInstance.getTimeZone
}
}
// `SimpleDateFormat` is not thread-safe.
private val threadLocalTimestampFormat = new ThreadLocal[DateFormat] {
override def initialValue(): SimpleDateFormat = {
new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
}
}
// `SimpleDateFormat` is not thread-safe.
private val threadLocalDateFormat = new ThreadLocal[DateFormat] {
override def initialValue(): SimpleDateFormat = {
new SimpleDateFormat("yyyy-MM-dd")
}
}
// we should use the exact day as Int, for example, (year, month, day) -> day
def millisToDays(millisUtc: Long): SQLDate = {
// SPARK-6785: use Math.floor so negative number of days (dates before 1970)
// will correctly work as input for function toJavaDate(Int)
val millisLocal = millisUtc + threadLocalLocalTimeZone.get().getOffset(millisUtc)
Math.floor(millisLocal.toDouble / MILLIS_PER_DAY).toInt
}
// reverse of millisToDays
def daysToMillis(days: SQLDate): Long = {
val millisUtc = days.toLong * MILLIS_PER_DAY
millisUtc - threadLocalLocalTimeZone.get().getOffset(millisUtc)
}
def dateToString(days: SQLDate): String =
threadLocalDateFormat.get.format(toJavaDate(days))
// Converts Timestamp to string according to Hive TimestampWritable convention.
def timestampToString(us: SQLTimestamp): String = {
val ts = toJavaTimestamp(us)
val timestampString = ts.toString
val formatted = threadLocalTimestampFormat.get.format(ts)
if (timestampString.length > 19 && timestampString.substring(19) != ".0") {
formatted + timestampString.substring(19)
} else {
formatted
}
}
def stringToTime(s: String): java.util.Date = {
if (!s.contains('T')) {
// JDBC escape string
if (s.contains(' ')) {
Timestamp.valueOf(s)
} else {
Date.valueOf(s)
}
} else if (s.endsWith("Z")) {
// this is zero timezone of ISO8601
stringToTime(s.substring(0, s.length - 1) + "GMT-00:00")
} else if (s.indexOf("GMT") == -1) {
// timezone with ISO8601
val inset = "+00.00".length
val s0 = s.substring(0, s.length - inset)
val s1 = s.substring(s.length - inset, s.length)
if (s0.substring(s0.lastIndexOf(':')).contains('.')) {
stringToTime(s0 + "GMT" + s1)
} else {
stringToTime(s0 + ".0GMT" + s1)
}
} else {
// ISO8601 with GMT insert
val ISO8601GMT: SimpleDateFormat = new SimpleDateFormat( "yyyy-MM-dd'T'HH:mm:ss.SSSz" )
ISO8601GMT.parse(s)
}
}
/**
* Returns the number of days since epoch from from java.sql.Date.
*/
def fromJavaDate(date: Date): SQLDate = {
millisToDays(date.getTime)
}
/**
* Returns a java.sql.Date from number of days since epoch.
*/
def toJavaDate(daysSinceEpoch: SQLDate): Date = {
new Date(daysToMillis(daysSinceEpoch))
}
/**
* Returns a java.sql.Timestamp from number of micros since epoch.
*/
def toJavaTimestamp(us: SQLTimestamp): Timestamp = {
// setNanos() will overwrite the millisecond part, so the milliseconds should be
// cut off at seconds
var seconds = us / MICROS_PER_SECOND
var micros = us % MICROS_PER_SECOND
// setNanos() can not accept negative value
if (micros < 0) {
micros += MICROS_PER_SECOND
seconds -= 1
}
val t = new Timestamp(seconds * 1000)
t.setNanos(micros.toInt * 1000)
t
}
/**
* Returns the number of micros since epoch from java.sql.Timestamp.
*/
def fromJavaTimestamp(t: Timestamp): SQLTimestamp = {
if (t != null) {
t.getTime() * 1000L + (t.getNanos().toLong / 1000) % 1000L
} else {
0L
}
}
/**
* Returns the number of microseconds since epoch from Julian day
* and nanoseconds in a day
*/
def fromJulianDay(day: Int, nanoseconds: Long): SQLTimestamp = {
// use Long to avoid rounding errors
val seconds = (day - JULIAN_DAY_OF_EPOCH).toLong * SECONDS_PER_DAY
seconds * MICROS_PER_SECOND + nanoseconds / 1000L
}
/**
* Returns Julian day and nanoseconds in a day from the number of microseconds
*
* Note: support timestamp since 4717 BC (without negative nanoseconds, compatible with Hive).
*/
def toJulianDay(us: SQLTimestamp): (Int, Long) = {
val julian_us = us + JULIAN_DAY_OF_EPOCH * MICROS_PER_DAY
val day = julian_us / MICROS_PER_DAY
val micros = julian_us % MICROS_PER_DAY
(day.toInt, micros * 1000L)
}
/**
* Parses a given UTF8 date string to the corresponding a corresponding [[Long]] value.
* The return type is [[Option]] in order to distinguish between 0L and null. The following
* formats are allowed:
*
* `yyyy`
* `yyyy-[m]m`
* `yyyy-[m]m-[d]d`
* `yyyy-[m]m-[d]d `
* `yyyy-[m]m-[d]d [h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]`
* `yyyy-[m]m-[d]d [h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]Z`
* `yyyy-[m]m-[d]d [h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]-[h]h:[m]m`
* `yyyy-[m]m-[d]d [h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]+[h]h:[m]m`
* `yyyy-[m]m-[d]dT[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]`
* `yyyy-[m]m-[d]dT[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]Z`
* `yyyy-[m]m-[d]dT[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]-[h]h:[m]m`
* `yyyy-[m]m-[d]dT[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]+[h]h:[m]m`
* `[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]`
* `[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]Z`
* `[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]-[h]h:[m]m`
* `[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]+[h]h:[m]m`
* `T[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]`
* `T[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]Z`
* `T[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]-[h]h:[m]m`
* `T[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]+[h]h:[m]m`
*/
def stringToTimestamp(s: UTF8String): Option[SQLTimestamp] = {
if (s == null) {
return None
}
var timeZone: Option[Byte] = None
val segments: Array[Int] = Array[Int](1, 1, 1, 0, 0, 0, 0, 0, 0)
var i = 0
var currentSegmentValue = 0
val bytes = s.getBytes
var j = 0
var digitsMilli = 0
var justTime = false
while (j < bytes.length) {
val b = bytes(j)
val parsedValue = b - '0'.toByte
if (parsedValue < 0 || parsedValue > 9) {
if (j == 0 && b == 'T') {
justTime = true
i += 3
} else if (i < 2) {
if (b == '-') {
segments(i) = currentSegmentValue
currentSegmentValue = 0
i += 1
} else if (i == 0 && b == ':') {
justTime = true
segments(3) = currentSegmentValue
currentSegmentValue = 0
i = 4
} else {
return None
}
} else if (i == 2) {
if (b == ' ' || b == 'T') {
segments(i) = currentSegmentValue
currentSegmentValue = 0
i += 1
} else {
return None
}
} else if (i == 3 || i == 4) {
if (b == ':') {
segments(i) = currentSegmentValue
currentSegmentValue = 0
i += 1
} else {
return None
}
} else if (i == 5 || i == 6) {
if (b == 'Z') {
segments(i) = currentSegmentValue
currentSegmentValue = 0
i += 1
timeZone = Some(43)
} else if (b == '-' || b == '+') {
segments(i) = currentSegmentValue
currentSegmentValue = 0
i += 1
timeZone = Some(b)
} else if (b == '.' && i == 5) {
segments(i) = currentSegmentValue
currentSegmentValue = 0
i += 1
} else {
return None
}
if (i == 6 && b != '.') {
i += 1
}
} else {
if (b == ':' || b == ' ') {
segments(i) = currentSegmentValue
currentSegmentValue = 0
i += 1
} else {
return None
}
}
} else {
if (i == 6) {
digitsMilli += 1
}
currentSegmentValue = currentSegmentValue * 10 + parsedValue
}
j += 1
}
segments(i) = currentSegmentValue
while (digitsMilli < 6) {
segments(6) *= 10
digitsMilli += 1
}
if (!justTime && (segments(0) < 1000 || segments(0) > 9999 || segments(1) < 1 ||
segments(1) > 12 || segments(2) < 1 || segments(2) > 31)) {
return None
}
if (segments(3) < 0 || segments(3) > 23 || segments(4) < 0 || segments(4) > 59 ||
segments(5) < 0 || segments(5) > 59 || segments(6) < 0 || segments(6) > 999999 ||
segments(7) < 0 || segments(7) > 23 || segments(8) < 0 || segments(8) > 59) {
return None
}
val c = if (timeZone.isEmpty) {
Calendar.getInstance()
} else {
Calendar.getInstance(
TimeZone.getTimeZone(f"GMT${timeZone.get.toChar}${segments(7)}%02d:${segments(8)}%02d"))
}
c.set(Calendar.MILLISECOND, 0)
if (justTime) {
c.set(Calendar.HOUR_OF_DAY, segments(3))
c.set(Calendar.MINUTE, segments(4))
c.set(Calendar.SECOND, segments(5))
} else {
c.set(segments(0), segments(1) - 1, segments(2), segments(3), segments(4), segments(5))
}
Some(c.getTimeInMillis * 1000 + segments(6))
}
/**
* Parses a given UTF8 date string to the corresponding a corresponding [[Int]] value.
* The return type is [[Option]] in order to distinguish between 0 and null. The following
* formats are allowed:
*
* `yyyy`,
* `yyyy-[m]m`
* `yyyy-[m]m-[d]d`
* `yyyy-[m]m-[d]d `
* `yyyy-[m]m-[d]d *`
* `yyyy-[m]m-[d]dT*`
*/
def stringToDate(s: UTF8String): Option[SQLDate] = {
if (s == null) {
return None
}
val segments: Array[Int] = Array[Int](1, 1, 1)
var i = 0
var currentSegmentValue = 0
val bytes = s.getBytes
var j = 0
while (j < bytes.length && (i < 3 && !(bytes(j) == ' ' || bytes(j) == 'T'))) {
val b = bytes(j)
if (i < 2 && b == '-') {
segments(i) = currentSegmentValue
currentSegmentValue = 0
i += 1
} else {
val parsedValue = b - '0'.toByte
if (parsedValue < 0 || parsedValue > 9) {
return None
} else {
currentSegmentValue = currentSegmentValue * 10 + parsedValue
}
}
j += 1
}
segments(i) = currentSegmentValue
if (segments(0) < 1000 || segments(0) > 9999 || segments(1) < 1 || segments(1) > 12 ||
segments(2) < 1 || segments(2) > 31) {
return None
}
val c = Calendar.getInstance(TimeZone.getTimeZone("GMT"))
c.set(segments(0), segments(1) - 1, segments(2), 0, 0, 0)
c.set(Calendar.MILLISECOND, 0)
Some((c.getTimeInMillis / MILLIS_PER_DAY).toInt)
}
/**
* Returns the hour value of a given timestamp value. The timestamp is expressed in microseconds.
*/
def getHours(timestamp: SQLTimestamp): Int = {
val localTs = (timestamp / 1000) + defaultTimeZone.getOffset(timestamp / 1000)
((localTs / 1000 / 3600) % 24).toInt
}
/**
* Returns the minute value of a given timestamp value. The timestamp is expressed in
* microseconds.
*/
def getMinutes(timestamp: SQLTimestamp): Int = {
val localTs = (timestamp / 1000) + defaultTimeZone.getOffset(timestamp / 1000)
((localTs / 1000 / 60) % 60).toInt
}
/**
* Returns the second value of a given timestamp value. The timestamp is expressed in
* microseconds.
*/
def getSeconds(timestamp: SQLTimestamp): Int = {
((timestamp / 1000 / 1000) % 60).toInt
}
private[this] def isLeapYear(year: Int): Boolean = {
(year % 4) == 0 && ((year % 100) != 0 || (year % 400) == 0)
}
/**
* Return the number of days since the start of 400 year period.
* The second year of a 400 year period (year 1) starts on day 365.
*/
private[this] def yearBoundary(year: Int): Int = {
year * 365 + ((year / 4 ) - (year / 100) + (year / 400))
}
/**
* Calculates the number of years for the given number of days. This depends
* on a 400 year period.
* @param days days since the beginning of the 400 year period
* @return (number of year, days in year)
*/
private[this] def numYears(days: Int): (Int, Int) = {
val year = days / 365
val boundary = yearBoundary(year)
if (days > boundary) (year, days - boundary) else (year - 1, days - yearBoundary(year - 1))
}
/**
* Calculates the year and and the number of the day in the year for the given
* number of days. The given days is the number of days since 1.1.1970.
*
* The calculation uses the fact that the period 1.1.2001 until 31.12.2400 is
* equals to the period 1.1.1601 until 31.12.2000.
*/
private[this] def getYearAndDayInYear(daysSince1970: SQLDate): (Int, Int) = {
// add the difference (in days) between 1.1.1970 and the artificial year 0 (-17999)
val daysNormalized = daysSince1970 + toYearZero
val numOfQuarterCenturies = daysNormalized / daysIn400Years
val daysInThis400 = daysNormalized % daysIn400Years + 1
val (years, dayInYear) = numYears(daysInThis400)
val year: Int = (2001 - 20000) + 400 * numOfQuarterCenturies + years
(year, dayInYear)
}
/**
* Returns the 'day in year' value for the given date. The date is expressed in days
* since 1.1.1970.
*/
def getDayInYear(date: SQLDate): Int = {
getYearAndDayInYear(date)._2
}
/**
* Returns the year value for the given date. The date is expressed in days
* since 1.1.1970.
*/
def getYear(date: SQLDate): Int = {
getYearAndDayInYear(date)._1
}
/**
* Returns the quarter for the given date. The date is expressed in days
* since 1.1.1970.
*/
def getQuarter(date: SQLDate): Int = {
var (year, dayInYear) = getYearAndDayInYear(date)
if (isLeapYear(year)) {
dayInYear = dayInYear - 1
}
if (dayInYear <= 90) {
1
} else if (dayInYear <= 181) {
2
} else if (dayInYear <= 273) {
3
} else {
4
}
}
/**
* Split date (expressed in days since 1.1.1970) into four fields:
* year, month (Jan is Month 1), dayInMonth, daysToMonthEnd (0 if it's last day of month).
*/
def splitDate(date: SQLDate): (Int, Int, Int, Int) = {
var (year, dayInYear) = getYearAndDayInYear(date)
val isLeap = isLeapYear(year)
if (isLeap && dayInYear == 60) {
(year, 2, 29, 0)
} else {
if (isLeap && dayInYear > 60) dayInYear -= 1
if (dayInYear <= 181) {
if (dayInYear <= 31) {
(year, 1, dayInYear, 31 - dayInYear)
} else if (dayInYear <= 59) {
(year, 2, dayInYear - 31, if (isLeap) 60 - dayInYear else 59 - dayInYear)
} else if (dayInYear <= 90) {
(year, 3, dayInYear - 59, 90 - dayInYear)
} else if (dayInYear <= 120) {
(year, 4, dayInYear - 90, 120 - dayInYear)
} else if (dayInYear <= 151) {
(year, 5, dayInYear - 120, 151 - dayInYear)
} else {
(year, 6, dayInYear - 151, 181 - dayInYear)
}
} else {
if (dayInYear <= 212) {
(year, 7, dayInYear - 181, 212 - dayInYear)
} else if (dayInYear <= 243) {
(year, 8, dayInYear - 212, 243 - dayInYear)
} else if (dayInYear <= 273) {
(year, 9, dayInYear - 243, 273 - dayInYear)
} else if (dayInYear <= 304) {
(year, 10, dayInYear - 273, 304 - dayInYear)
} else if (dayInYear <= 334) {
(year, 11, dayInYear - 304, 334 - dayInYear)
} else {
(year, 12, dayInYear - 334, 365 - dayInYear)
}
}
}
}
/**
* Returns the month value for the given date. The date is expressed in days
* since 1.1.1970. January is month 1.
*/
def getMonth(date: SQLDate): Int = {
var (year, dayInYear) = getYearAndDayInYear(date)
if (isLeapYear(year)) {
if (dayInYear == 60) {
return 2
} else if (dayInYear > 60) {
dayInYear = dayInYear - 1
}
}
if (dayInYear <= 31) {
1
} else if (dayInYear <= 59) {
2
} else if (dayInYear <= 90) {
3
} else if (dayInYear <= 120) {
4
} else if (dayInYear <= 151) {
5
} else if (dayInYear <= 181) {
6
} else if (dayInYear <= 212) {
7
} else if (dayInYear <= 243) {
8
} else if (dayInYear <= 273) {
9
} else if (dayInYear <= 304) {
10
} else if (dayInYear <= 334) {
11
} else {
12
}
}
/**
* Returns the 'day of month' value for the given date. The date is expressed in days
* since 1.1.1970.
*/
def getDayOfMonth(date: SQLDate): Int = {
var (year, dayInYear) = getYearAndDayInYear(date)
if (isLeapYear(year)) {
if (dayInYear == 60) {
return 29
} else if (dayInYear > 60) {
dayInYear = dayInYear - 1
}
}
if (dayInYear <= 31) {
dayInYear
} else if (dayInYear <= 59) {
dayInYear - 31
} else if (dayInYear <= 90) {
dayInYear - 59
} else if (dayInYear <= 120) {
dayInYear - 90
} else if (dayInYear <= 151) {
dayInYear - 120
} else if (dayInYear <= 181) {
dayInYear - 151
} else if (dayInYear <= 212) {
dayInYear - 181
} else if (dayInYear <= 243) {
dayInYear - 212
} else if (dayInYear <= 273) {
dayInYear - 243
} else if (dayInYear <= 304) {
dayInYear - 273
} else if (dayInYear <= 334) {
dayInYear - 304
} else {
dayInYear - 334
}
}
/**
* The number of days for each month (not leap year)
*/
private val monthDays = Array(31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
/**
* Returns the date value for the first day of the given month.
* The month is expressed in months since year zero (17999 BC), starting from 0.
*/
private def firstDayOfMonth(absoluteMonth: Int): SQLDate = {
val absoluteYear = absoluteMonth / 12
var monthInYear = absoluteMonth - absoluteYear * 12
var date = getDateFromYear(absoluteYear)
if (monthInYear >= 2 && isLeapYear(absoluteYear + YearZero)) {
date += 1
}
while (monthInYear > 0) {
date += monthDays(monthInYear - 1)
monthInYear -= 1
}
date
}
/**
* Returns the date value for January 1 of the given year.
* The year is expressed in years since year zero (17999 BC), starting from 0.
*/
private def getDateFromYear(absoluteYear: Int): SQLDate = {
val absoluteDays = (absoluteYear * 365 + absoluteYear / 400 - absoluteYear / 100
+ absoluteYear / 4)
absoluteDays - toYearZero
}
/**
* Add date and year-month interval.
* Returns a date value, expressed in days since 1.1.1970.
*/
def dateAddMonths(days: SQLDate, months: Int): SQLDate = {
val (year, monthInYear, dayOfMonth, daysToMonthEnd) = splitDate(days)
val absoluteMonth = (year - YearZero) * 12 + monthInYear - 1 + months
val nonNegativeMonth = if (absoluteMonth >= 0) absoluteMonth else 0
val currentMonthInYear = nonNegativeMonth % 12
val currentYear = nonNegativeMonth / 12
val leapDay = if (currentMonthInYear == 1 && isLeapYear(currentYear + YearZero)) 1 else 0
val lastDayOfMonth = monthDays(currentMonthInYear) + leapDay
val currentDayInMonth = if (daysToMonthEnd == 0 || dayOfMonth >= lastDayOfMonth) {
// last day of the month
lastDayOfMonth
} else {
dayOfMonth
}
firstDayOfMonth(nonNegativeMonth) + currentDayInMonth - 1
}
/**
* Add timestamp and full interval.
* Returns a timestamp value, expressed in microseconds since 1.1.1970 00:00:00.
*/
def timestampAddInterval(start: SQLTimestamp, months: Int, microseconds: Long): SQLTimestamp = {
val days = millisToDays(start / 1000L)
val newDays = dateAddMonths(days, months)
daysToMillis(newDays) * 1000L + start - daysToMillis(days) * 1000L + microseconds
}
/**
* Returns number of months between time1 and time2. time1 and time2 are expressed in
* microseconds since 1.1.1970.
*
* If time1 and time2 having the same day of month, or both are the last day of month,
* it returns an integer (time under a day will be ignored).
*
* Otherwise, the difference is calculated based on 31 days per month, and rounding to
* 8 digits.
*/
def monthsBetween(time1: SQLTimestamp, time2: SQLTimestamp): Double = {
val millis1 = time1 / 1000L
val millis2 = time2 / 1000L
val date1 = millisToDays(millis1)
val date2 = millisToDays(millis2)
val (year1, monthInYear1, dayInMonth1, daysToMonthEnd1) = splitDate(date1)
val (year2, monthInYear2, dayInMonth2, daysToMonthEnd2) = splitDate(date2)
val months1 = year1 * 12 + monthInYear1
val months2 = year2 * 12 + monthInYear2
if (dayInMonth1 == dayInMonth2 || ((daysToMonthEnd1 == 0) && (daysToMonthEnd2 == 0))) {
return (months1 - months2).toDouble
}
// milliseconds is enough for 8 digits precision on the right side
val timeInDay1 = millis1 - daysToMillis(date1)
val timeInDay2 = millis2 - daysToMillis(date2)
val timesBetween = (timeInDay1 - timeInDay2).toDouble / MILLIS_PER_DAY
val diff = (months1 - months2).toDouble + (dayInMonth1 - dayInMonth2 + timesBetween) / 31.0
// rounding to 8 digits
math.round(diff * 1e8) / 1e8
}
/*
* Returns day of week from String. Starting from Thursday, marked as 0.
* (Because 1970-01-01 is Thursday).
*/
def getDayOfWeekFromString(string: UTF8String): Int = {
val dowString = string.toString.toUpperCase
dowString match {
case "SU" | "SUN" | "SUNDAY" => 3
case "MO" | "MON" | "MONDAY" => 4
case "TU" | "TUE" | "TUESDAY" => 5
case "WE" | "WED" | "WEDNESDAY" => 6
case "TH" | "THU" | "THURSDAY" => 0
case "FR" | "FRI" | "FRIDAY" => 1
case "SA" | "SAT" | "SATURDAY" => 2
case _ => -1
}
}
/**
* Returns the first date which is later than startDate and is of the given dayOfWeek.
* dayOfWeek is an integer ranges in [0, 6], and 0 is Thu, 1 is Fri, etc,.
*/
def getNextDateForDayOfWeek(startDate: SQLDate, dayOfWeek: Int): SQLDate = {
startDate + 1 + ((dayOfWeek - 1 - startDate) % 7 + 7) % 7
}
/**
* Returns last day of the month for the given date. The date is expressed in days
* since 1.1.1970.
*/
def getLastDayOfMonth(date: SQLDate): SQLDate = {
val (_, _, _, daysToMonthEnd) = splitDate(date)
date + daysToMonthEnd
}
private val TRUNC_TO_YEAR = 1
private val TRUNC_TO_MONTH = 2
private val TRUNC_INVALID = -1
/**
* Returns the trunc date from original date and trunc level.
* Trunc level should be generated using `parseTruncLevel()`, should only be 1 or 2.
*/
def truncDate(d: SQLDate, level: Int): SQLDate = {
if (level == TRUNC_TO_YEAR) {
d - DateTimeUtils.getDayInYear(d) + 1
} else if (level == TRUNC_TO_MONTH) {
d - DateTimeUtils.getDayOfMonth(d) + 1
} else {
// caller make sure that this should never be reached
sys.error(s"Invalid trunc level: $level")
}
}
/**
* Returns the truncate level, could be TRUNC_YEAR, TRUNC_MONTH, or TRUNC_INVALID,
* TRUNC_INVALID means unsupported truncate level.
*/
def parseTruncLevel(format: UTF8String): Int = {
if (format == null) {
TRUNC_INVALID
} else {
format.toString.toUpperCase match {
case "YEAR" | "YYYY" | "YY" => TRUNC_TO_YEAR
case "MON" | "MONTH" | "MM" => TRUNC_TO_MONTH
case _ => TRUNC_INVALID
}
}
}
/**
* Returns a timestamp of given timezone from utc timestamp, with the same string
* representation in their timezone.
*/
def fromUTCTime(time: SQLTimestamp, timeZone: String): SQLTimestamp = {
val tz = TimeZone.getTimeZone(timeZone)
val offset = tz.getOffset(time / 1000L)
time + offset * 1000L
}
/**
* Returns a utc timestamp from a given timestamp from a given timezone, with the same
* string representation in their timezone.
*/
def toUTCTime(time: SQLTimestamp, timeZone: String): SQLTimestamp = {
val tz = TimeZone.getTimeZone(timeZone)
val offset = tz.getOffset(time / 1000L)
time - offset * 1000L
}
}
|
ArvinDevel/onlineAggregationOnSparkV2
|
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/DateTimeUtils.scala
|
Scala
|
apache-2.0
| 26,854
|
/*
* This software is licensed under the GNU Affero General Public License, quoted below.
*
* This file is a part of PowerAPI.
*
* Copyright (C) 2011-2016 Inria, University of Lille 1.
*
* PowerAPI is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of
* the License, or (at your option) any later version.
*
* PowerAPI is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with PowerAPI.
*
* If not, please consult http://www.gnu.org/licenses/agpl-3.0.html.
*/
package org.powerapi.core
import java.util.UUID
import scala.concurrent.Await
import scala.concurrent.duration.DurationInt
import akka.actor.{Actor, Props, Terminated}
import akka.event.Logging.Info
import akka.pattern.gracefulStop
import akka.testkit.{EventFilter, TestActorRef, TestProbe}
import akka.util.Timeout
import org.powerapi.core.MonitorChannel._
import org.powerapi.core.power._
import org.powerapi.core.target.{All, Application, Process, Target, intToProcess, stringToApplication}
import org.powerapi.core.TickChannel.{publishTick, tickTopic}
import org.powerapi.module.FormulaChannel.startFormula
import org.powerapi.module.PowerChannel.{AggregatePowerReport, RawPowerReport, publishRawPowerReport, subscribeAggPowerReport, unsubscribeAggPowerReport}
import org.powerapi.module.SensorChannel.startSensor
import org.powerapi.module.{Formula, Formulas, Sensor, Sensors}
import org.powerapi.reporter.Reporters
import org.powerapi.{PowerDisplay, UnitTest}
class EmptySensor(eventBus: MessageBus, muid: UUID, target: Target) extends Sensor(eventBus, muid, target) {
def init(): Unit = {}
def terminate(): Unit = {}
def handler: Actor.Receive = sensorDefault
}
class EmptyFormula(eventBus: MessageBus, muid: UUID, target: Target) extends Formula(eventBus, muid, target) {
def init(): Unit = {}
def terminate(): Unit = {}
def handler: Actor.Receive = formulaDefault
}
class MonitorSuite extends UnitTest {
val timeout = Timeout(1.seconds)
val threshold = 0.5
override def afterAll() = {
system.terminate()
}
trait Bus {
val eventBus = new MessageBus
}
"A MonitorChild actor" should "launch an exception when the messages received cannot be handled" in new Bus {
val muid = UUID.randomUUID()
val frequency = 50.milliseconds
val targets = Set[Target](1)
val monitor = TestActorRef(Props(classOf[MonitorChild], eventBus, muid, targets), "monitor")
EventFilter.warning(occurrences = 1, source = monitor.path.toString).intercept({
monitor ! MonitorStop("test", muid)
})
// Not an exception, just an assessment (switching in the running state).
EventFilter.info(occurrences = 1, source = monitor.path.toString).intercept({
monitor ! MonitorStart("test", muid, targets)
})
EventFilter.warning(occurrences = 1, source = monitor.path.toString).intercept({
monitor ! MonitorStart("test", muid, targets)
})
EventFilter.info(occurrences = 1, source = monitor.path.toString).intercept({
monitor ! MonitorFrequency("test", muid, 1.second)
})
EventFilter.info(occurrences = 1, source = monitor.path.toString).intercept({
monitor ! MonitorAggregator("test", muid, MAX)
})
EventFilter.warning(occurrences = 1, source = monitor.path.toString).intercept({
monitor ! MonitorFrequency("test", UUID.randomUUID(), 1.second)
})
EventFilter.warning(occurrences = 1, source = monitor.path.toString).intercept({
monitor ! MonitorAggregator("test", UUID.randomUUID(), MAX)
})
EventFilter.warning(occurrences = 1, source = monitor.path.toString).intercept({
monitor ! MonitorStop("test", UUID.randomUUID())
})
EventFilter.info(occurrences = 1, source = monitor.path.toString).intercept({
monitor ! MonitorStop("test", muid)
})
Await.result(gracefulStop(monitor, timeout.duration), timeout.duration)
}
it should "produce MonitorTick per muid/target when a Tick is received" in new Bus {
val muid1 = UUID.randomUUID()
val muid2 = UUID.randomUUID()
val targets = Set[Target](1, "java", All)
case class ExtendedTick(topic: String, traces: Seq[String], timestamp: Long) extends Tick
val clocks = TestActorRef(Props(classOf[Clocks], eventBus), "clocks")
val monitor1 = TestActorRef(Props(classOf[MonitorChild], eventBus, muid1, targets), "monitor1")
val monitor2 = TestActorRef(Props(classOf[MonitorChild], eventBus, muid2, targets), "monitor2")
val watcher = TestProbe()
watcher.watch(monitor1)
watcher.watch(monitor2)
targets.foreach(target => {
subscribeMonitorTick(muid1, target)(eventBus)(testActor)
subscribeMonitorTick(muid2, target)(eventBus)(testActor)
})
EventFilter.info(occurrences = 1, source = monitor1.path.toString).intercept({
monitor1 ! MonitorStart("test", muid1, targets)
})
EventFilter.info(occurrences = 1, source = monitor2.path.toString).intercept({
monitor2 ! MonitorStart("test", muid2, targets)
})
val tick1 = ExtendedTick(tickTopic(muid1), Seq("method", "a"), System.currentTimeMillis())
val tick2 = ExtendedTick(tickTopic(muid2), Seq("method", "b"), System.currentTimeMillis() + 1000)
publishTick(tick1)(eventBus)
var msgs = receiveN(targets.size).asInstanceOf[Seq[MonitorTick]]
msgs.map(_.target) should contain theSameElementsAs targets
msgs.map(_.muid).toSet should contain theSameElementsAs Seq(muid1)
msgs.map(_.tick).toSet should contain theSameElementsAs Seq(tick1)
publishTick(tick2)(eventBus)
msgs = receiveN(targets.size).asInstanceOf[Seq[MonitorTick]]
msgs.map(_.target) should contain theSameElementsAs targets
msgs.map(_.muid).toSet should contain theSameElementsAs Seq(muid2)
msgs.map(_.tick).toSet should contain theSameElementsAs Seq(tick2)
EventFilter.info(occurrences = 1, source = monitor1.path.toString).intercept({
monitor1 ! MonitorStop("test", muid1)
})
val filter = EventFilter.custom({
case Info(_, claz, _) if claz == classOf[ClockChild] || claz == classOf[MonitorChild] => true
}, occurrences = 2)
filter.intercept({
monitor2 ! MonitorFrequency("test", muid2, 50.millis)
})
publishTick(tick2)(eventBus)
receiveWhile(5.seconds) {
case msg: MonitorTick if !msg.tick.isInstanceOf[ExtendedTick] => msg
}
EventFilter.info(occurrences = 1, source = monitor2.path.toString).intercept({
monitor2 ! MonitorStop("test", muid2)
})
watcher.receiveN(2).asInstanceOf[Seq[Terminated]].map(_.actor) should contain theSameElementsAs Seq(monitor1, monitor2)
Await.result(gracefulStop(clocks, timeout.duration), timeout.duration)
Await.result(gracefulStop(monitor1, timeout.duration), timeout.duration)
Await.result(gracefulStop(monitor2, timeout.duration), timeout.duration)
}
it should "handle a clock at a given frequency when needed, and produce MonitorTick per muid/target when a Tick is received" in new Bus {
val clocks = TestActorRef(Props(classOf[Clocks], eventBus), "clocks")
val frequency1 = 250.milliseconds
val frequency2 = 50.milliseconds
val frequency3 = 1.second
val muid1 = UUID.randomUUID()
val muid2 = UUID.randomUUID()
val targets = Set[Target](1, "java", All)
var messages = Seq[MonitorTick]()
val monitor1 = TestActorRef(Props(classOf[MonitorChild], eventBus, muid1, targets), "monitor1")
val monitor2 = TestActorRef(Props(classOf[MonitorChild], eventBus, muid2, targets), "monitor2")
val watcher = TestProbe()
watcher.watch(monitor1)
watcher.watch(monitor2)
targets.foreach(target => {
subscribeMonitorTick(muid1, target)(eventBus)(testActor)
subscribeMonitorTick(muid2, target)(eventBus)(testActor)
})
EventFilter.info(occurrences = 1, source = monitor1.path.toString).intercept({
monitor1 ! MonitorStart("test", muid1, targets)
})
EventFilter.info(occurrences = 1, source = monitor2.path.toString).intercept({
monitor2 ! MonitorStart("test", muid2, targets)
})
var filter = EventFilter.custom({
case Info(_, claz, _) if claz == classOf[ClockChild] || claz == classOf[MonitorChild] => true
}, occurrences = 2)
filter.intercept({
monitor1 ! MonitorFrequency("test", muid1, frequency1)
})
messages = receiveWhile(10.seconds, messages = targets.size * ((10.seconds / frequency1) * threshold).toInt) {
case msg: MonitorTick => msg
}
messages.size should equal(targets.size * ((10.seconds / frequency1) * threshold).toInt)
messages.map(_.muid).toSet should contain theSameElementsAs Seq(muid1)
messages.map(_.target).toSet should contain theSameElementsAs targets
targets.foreach(target => unsubscribeMonitorTick(muid1, target)(eventBus)(testActor))
filter = EventFilter.custom({
case Info(_, claz, _) if claz == classOf[ClockChild] || claz == classOf[MonitorChild] => true
}, occurrences = 2)
filter.intercept({
monitor2 ! MonitorFrequency("test", muid2, frequency2)
})
messages = receiveWhile(10.seconds, messages = targets.size * ((10.seconds / frequency2) * threshold).toInt) {
case msg: MonitorTick => msg
}
messages.size should equal(targets.size * ((10.seconds / frequency2) * threshold).toInt)
messages.map(_.muid).toSet should contain theSameElementsAs Seq(muid2)
messages.map(_.target).toSet should contain theSameElementsAs targets
filter = EventFilter.custom({
case Info(_, claz, _) if claz == classOf[ClockChild] || claz == classOf[MonitorChild] => true
}, occurrences = 3)
filter.intercept({
monitor2 ! MonitorFrequency("test", muid2, frequency3)
})
messages = receiveWhile(10.seconds, messages = targets.size * ((10.seconds / frequency3) * threshold).toInt) {
case msg: MonitorTick => msg
}
messages.size should equal(targets.size * ((10.seconds / frequency3) * threshold).toInt)
messages.map(_.muid).toSet should contain theSameElementsAs Seq(muid2)
messages.map(_.target).toSet should contain theSameElementsAs targets
val children = clocks.children.seq
children.foreach(child => watcher.watch(child))
monitor1 ! MonitorStop("test", muid1)
monitor2 ! MonitorStopAll("test")
watcher.receiveN(2 + children.size).asInstanceOf[Seq[Terminated]].map(_.actor) should contain theSameElementsAs Seq(monitor1, monitor2) ++ children
Await.result(gracefulStop(clocks, timeout.duration), timeout.duration)
Await.result(gracefulStop(monitor1, timeout.duration), timeout.duration)
Await.result(gracefulStop(monitor2, timeout.duration), timeout.duration)
}
it should "aggregate its RawPowerReport into one AggregatePowerReport when all required RawPowerReport are stacked" in new Bus {
val muid = UUID.randomUUID()
val targets = Set[Target](1, "java", All)
val tick1 = new Tick {
val topic = "test"
val timestamp = System.currentTimeMillis()
}
val tick2 = new Tick {
val topic = "test"
val timestamp = System.currentTimeMillis() + 1000
}
val tick3 = new Tick {
val topic = "test"
val timestamp = System.currentTimeMillis() + 2000
}
val tick4 = new Tick {
val topic = "test"
val timestamp = System.currentTimeMillis() + 3000
}
val monitor = TestActorRef(Props(classOf[MonitorChild], eventBus, muid, targets), "monitor")
val watcher = TestProbe()
watcher.watch(monitor)
subscribeAggPowerReport(muid)(eventBus)(testActor)
EventFilter.info(occurrences = 1, source = monitor.path.toString).intercept({
monitor ! MonitorStart("test", muid, targets)
})
EventFilter.info(occurrences = 1, source = monitor.path.toString).intercept({
monitor ! MonitorAggregator("test", muid, MEAN)
})
monitor ! RawPowerReport("test", muid, 1, 10.W, "cpu", tick1)
monitor ! RawPowerReport("test", muid, 1, 2.W, "disk", tick1)
monitor ! RawPowerReport("test", muid, "java", 5.W, "cpu", tick1)
monitor ! RawPowerReport("test", muid, "java", 1.W, "disk", tick1)
expectNoMsg()
monitor ! RawPowerReport("test", muid, 1, 15.W, "cpu", tick2)
monitor ! RawPowerReport("test", muid, 1, 6.W, "disk", tick2)
var msg = expectMsgClass(classOf[AggregatePowerReport])
msg.size should equal(4)
msg.targets should contain theSameElementsAs Seq[Target](1, "java")
msg.devices should contain theSameElementsAs Seq("cpu", "disk")
msg.ticks should contain theSameElementsAs Seq(tick1)
msg.power should equal(MEAN(Seq(10.W, 2.W, 5.W, 1.W)))
msg.powerPerDevice should contain theSameElementsAs Map("cpu" -> MEAN(Seq(10.W, 5.W)), "disk" -> MEAN(Seq(2.W, 1.W)))
msg.powerPerTarget should contain theSameElementsAs Map(Process(1) -> MEAN(Seq(10.W, 2.W)), Application("java") -> MEAN(Seq(5.W, 1.W)))
monitor ! RawPowerReport("test", muid, 1, 12.W, "cpu", tick3)
monitor ! RawPowerReport("test", muid, 1, 8.W, "disk", tick3)
msg = expectMsgClass(classOf[AggregatePowerReport])
msg.size should equal(2)
msg.targets should contain theSameElementsAs Seq[Target](1)
msg.devices should contain theSameElementsAs Seq("cpu", "disk")
msg.ticks should contain theSameElementsAs Seq(tick2)
msg.power should equal(MEAN(Seq(15.W, 6.W)))
msg.powerPerDevice should contain theSameElementsAs Map("cpu" -> MEAN(Seq(15.W)), "disk" -> MEAN(Seq(6.W)))
msg.powerPerTarget should contain theSameElementsAs Map(Process(1) -> MEAN(Seq(15.W, 6.W)))
EventFilter.info(occurrences = 1, source = monitor.path.toString).intercept({
monitor ! MonitorAggregator("test", muid, MAX)
})
monitor ! RawPowerReport("test", muid, "java", 3.W, "cpu", tick3)
monitor ! RawPowerReport("test", muid, "java", 1.W, "disk", tick3)
monitor ! RawPowerReport("test", muid, All, 15.W, "cpu", tick3)
monitor ! RawPowerReport("test", muid, All, 10.W, "disk", tick3)
monitor ! RawPowerReport("test", muid, 1, 15.W, "cpu", tick4)
monitor ! RawPowerReport("test", muid, 1, 4.W, "disk", tick4)
msg = expectMsgClass(classOf[AggregatePowerReport])
msg.size should equal(6)
msg.targets should contain theSameElementsAs Seq[Target](1, "java", All)
msg.devices should contain theSameElementsAs Seq("cpu", "disk")
msg.ticks should contain theSameElementsAs Seq(tick3)
msg.power should equal(MAX(Seq(12.W, 8.W, 3.W, 1.W, 15.W, 10.W)))
msg.powerPerDevice should contain theSameElementsAs Map("cpu" -> MAX(Seq(12.W, 3.W, 15.W)), "disk" -> MAX(Seq(8.W, 1.W, 10.W)))
msg.powerPerTarget should contain theSameElementsAs Map(Process(1) -> MAX(Seq(12.W, 8.W)), Application("java") -> MAX(Seq(3.W, 1.W)), All -> MAX(Seq(15.W, 10.W)))
Await.result(gracefulStop(monitor, timeout.duration), timeout.duration)
}
"A Monitors actor" should "handle MonitorChild actors" in new Bus {
val muid1 = UUID.randomUUID()
val frequency1 = 50.milliseconds
val muid2 = UUID.randomUUID()
val muid3 = UUID.randomUUID()
val clocks = TestActorRef(Props(classOf[Clocks], eventBus), "clocks")
val monitors = TestActorRef(Props(classOf[Monitors], eventBus), "monitors")
val watcher = TestProbe()
subscribeMonitorTick(muid1, 1)(eventBus)(testActor)
subscribeMonitorTick(muid1, "java")(eventBus)(testActor)
EventFilter.info(occurrences = 1, start = s"monitor is started, muid: $muid1").intercept({
startMonitor(muid1, Set(1, "java"))(eventBus)
})
EventFilter.info(occurrences = 1, start = s"monitor is started, muid: $muid2").intercept({
startMonitor(muid2, Set(2, "java", All))(eventBus)
})
EventFilter.info(occurrences = 1, start = s"monitor is started, muid: $muid3").intercept({
startMonitor(muid3, Set(All))(eventBus)
})
var filter = EventFilter.custom({
case Info(_, claz, _) if claz == classOf[ClockChild] || claz == classOf[MonitorChild] => true
}, occurrences = 2)
filter.intercept({
setFrequency(muid1, frequency1)(eventBus)
})
receiveWhile(10.seconds, messages = (2 * (10.seconds / frequency1) * threshold).toInt) {
case msg: MonitorTick => msg
}.size should equal(2 * ((10.seconds / frequency1) * threshold).toInt)
val clockChildren = clocks.children.seq
val monitorChildren = monitors.children.seq
clockChildren foreach watcher.watch
monitorChildren foreach watcher.watch
unsubscribeMonitorTick(muid1, 1)(eventBus)(testActor)
unsubscribeMonitorTick(muid1, "java")(eventBus)(testActor)
expectNoMsg()
unsubscribeAggPowerReport(muid1)(eventBus)(testActor)
stopMonitor(muid1)(eventBus)
subscribeAggPowerReport(muid2)(eventBus)(testActor)
val tick1 = new Tick {
val topic = "test"
val timestamp = System.currentTimeMillis()
}
val tick2 = new Tick {
val topic = "test"
val timestamp = System.currentTimeMillis() + 1000
}
publishRawPowerReport(muid2, 2, 1.W, "cpu", tick1)(eventBus)
publishRawPowerReport(muid2, "java", 5.W, "cpu", tick1)(eventBus)
publishRawPowerReport(muid2, All, 6.W, "cpu", tick1)(eventBus)
EventFilter.info(occurrences = 1, message = "aggregator is changed").intercept({
setAggregator(muid2, MEDIAN)(eventBus)
})
publishRawPowerReport(muid2, 1, 10.W, "cpu", tick2)(eventBus)
val msg = expectMsgClass(classOf[AggregatePowerReport])
msg.size should equal(3)
msg.devices should contain theSameElementsAs Seq("cpu")
msg.power should equal(MEDIAN(Seq(1.W, 5.W, 6.W)))
msg.targets should contain theSameElementsAs Seq[Target](2, "java", All)
msg.ticks should contain theSameElementsAs Seq(tick1)
stopAllMonitor(eventBus)
watcher.receiveN(4).asInstanceOf[Seq[Terminated]].map(_.actor) should contain theSameElementsAs clockChildren ++ monitorChildren
watcher.expectNoMsg()
Await.result(gracefulStop(clocks, timeout.duration), timeout.duration)
Await.result(gracefulStop(monitors, timeout.duration), timeout.duration)
}
"A Monitor object" should "allow to interact directly with the Monitor supervisor" in new Bus {
val monitorO = new Monitor(eventBus)
val frequency = 50.milliseconds
val clocks = TestActorRef(Props(classOf[Clocks], eventBus), "clocks")
val monitors = TestActorRef(Props(classOf[Monitors], eventBus), "monitors")
val sensors = TestActorRef(Props(classOf[Sensors], eventBus), "sensors")
val formulas = TestActorRef(Props(classOf[Formulas], eventBus), "formulas")
val reporters = TestActorRef(Props(classOf[Reporters], eventBus), "reporters")
val watcher = TestProbe()
val tick1 = new Tick {
val topic = "test"
val timestamp = System.currentTimeMillis()
}
val tick2 = new Tick {
val topic = "test"
val timestamp = System.currentTimeMillis() + 1000
}
val tick3 = new Tick {
val topic = "test"
val timestamp = System.currentTimeMillis() + 2000
}
val tick4 = new Tick {
val topic = "test"
val timestamp = System.currentTimeMillis() + 3000
}
val tick5 = new Tick {
val topic = "test"
val timestamp = System.currentTimeMillis() + 4000
}
val reporter = new DisplayMock
class DisplayMock extends PowerDisplay {
def display(aggregatePowerReport: AggregatePowerReport): Unit = testActor ! aggregatePowerReport.power
}
EventFilter.info(occurrences = 1, start = s"monitor is started, muid: ${monitorO.muid}").intercept({
startMonitor(monitorO.muid, Set(1, "java"))(eventBus)
})
EventFilter.info(occurrences = 2, start = s"sensor is started, class: ${classOf[EmptySensor].getName}").intercept({
startSensor(monitorO.muid, Process(1), classOf[EmptySensor], Seq(eventBus, monitorO.muid, Process(1)))(eventBus)
startSensor(monitorO.muid, Application("java"), classOf[EmptySensor], Seq(eventBus, monitorO.muid, Application("java")))(eventBus)
})
EventFilter.info(occurrences = 2, start = s"formula is started, class: ${classOf[EmptyFormula].getName}").intercept({
startFormula(monitorO.muid, Process(1), classOf[EmptyFormula], Seq(eventBus, monitorO.muid, Process(1)))(eventBus)
startFormula(monitorO.muid, Application("java"), classOf[EmptyFormula], Seq(eventBus, monitorO.muid, Application("java")))(eventBus)
})
EventFilter.info(occurrences = 1, message = "aggregator is changed").intercept({
monitorO(MAX)
})
EventFilter.info(occurrences = 1, start = s"reporter is started, class: ${reporter.getClass.getName}").intercept({
monitorO.to(reporter)
})
val reporterChildren = reporters.children.seq
reporterChildren foreach watcher.watch
publishRawPowerReport(monitorO.muid, 1, 1.W, "cpu", tick1)(eventBus)
publishRawPowerReport(monitorO.muid, "java", 5.W, "cpu", tick1)(eventBus)
publishRawPowerReport(monitorO.muid, "java", 10.W, "cpu", tick2)(eventBus)
expectMsgClass(classOf[RawPower]) should equal(MAX(Seq(1.W, 5.W)))
EventFilter.info(occurrences = 1, start = s"reporter is stopped, class: ${reporter.getClass.getName}").intercept({
monitorO.unto(reporter)
})
watcher.receiveN(1).asInstanceOf[Seq[Terminated]].map(_.actor) should contain theSameElementsAs reporterChildren
monitorO.to(testActor)
publishRawPowerReport(monitorO.muid, 1, 6.W, "cpu", tick2)(eventBus)
publishRawPowerReport(monitorO.muid, 1, 5.W, "cpu", tick3)(eventBus)
expectMsgClass(classOf[AggregatePowerReport]).power should equal(MAX(Seq(6.W, 10.W)))
expectNoMsg()
monitorO.unto(testActor)
monitorO.to(testActor, subscribeAggPowerReport(monitorO.muid))
publishRawPowerReport(monitorO.muid, "java", 1.W, "cpu", tick3)(eventBus)
publishRawPowerReport(monitorO.muid, "java", 4.W, "cpu", tick4)(eventBus)
expectMsgClass(classOf[AggregatePowerReport]).power should equal(MAX(Seq(5.W, 1.W)))
expectNoMsg()
monitorO.unto(testActor, unsubscribeAggPowerReport(monitorO.muid))
publishRawPowerReport(monitorO.muid, "java", 4.W, "cpu", tick5)(eventBus)
expectNoMsg()
subscribeMonitorTick(monitorO.muid, 1)(eventBus)(testActor)
subscribeMonitorTick(monitorO.muid, "java")(eventBus)(testActor)
val filter = EventFilter.custom({
case Info(_, claz, _) if claz == classOf[ClockChild] || claz == classOf[MonitorChild] => true
}, occurrences = 2)
filter.intercept({
monitorO.every(frequency)
})
receiveWhile(10.seconds, messages = (2 * (10.seconds / frequency) * threshold).toInt) {
case msg: MonitorTick => msg
}.size should equal(2 * ((10.seconds / frequency) * threshold).toInt)
val clockChildren = clocks.children.seq
val monitorChildren = monitors.children.seq
val sensorChildren = sensors.children.seq
val formulaChildren = formulas.children.seq
clockChildren foreach watcher.watch
monitorChildren foreach watcher.watch
sensorChildren foreach watcher.watch
formulaChildren foreach watcher.watch
monitorO.cancel()
watcher.receiveN(6).asInstanceOf[Seq[Terminated]].map(_.actor) should contain theSameElementsAs clockChildren ++ monitorChildren ++ sensorChildren ++ formulaChildren
Await.result(gracefulStop(clocks, timeout.duration), timeout.duration)
Await.result(gracefulStop(monitors, timeout.duration), timeout.duration)
Await.result(gracefulStop(sensors, timeout.duration), timeout.duration)
Await.result(gracefulStop(formulas, timeout.duration), timeout.duration)
Await.result(gracefulStop(reporters, timeout.duration), timeout.duration)
}
}
|
Spirals-Team/powerapi
|
powerapi-core/src/test/scala/org/powerapi/core/MonitorSuite.scala
|
Scala
|
agpl-3.0
| 23,926
|
package play
import sbt._
import sbt.Keys._
trait Keys {
val jdbc = "com.typesafe.play" %% "play-jdbc" % play.core.PlayVersion.current
val anorm = "com.typesafe.play" %% "anorm" % play.core.PlayVersion.current
val javaCore = "com.typesafe.play" %% "play-java" % play.core.PlayVersion.current
val javaJdbc = "com.typesafe.play" %% "play-java-jdbc" % play.core.PlayVersion.current
val javaEbean = "com.typesafe.play" %% "play-java-ebean" % play.core.PlayVersion.current
val javaJpa = "com.typesafe.play" %% "play-java-jpa" % play.core.PlayVersion.current
def component(id: String) = "com.typesafe.play" %% id % play.core.PlayVersion.current
val filters = "com.typesafe.play" %% "filters-helpers" % play.core.PlayVersion.current
val cache = "com.typesafe.play" %% "play-cache" % play.core.PlayVersion.current
val playVersion = SettingKey[String]("play-version")
val playDefaultPort = SettingKey[Int]("play-default-port")
val requireJs = SettingKey[Seq[String]]("play-require-js")
val requireJsFolder = SettingKey[String]("play-require-js-folder")
val requireJsShim = SettingKey[String]("play-require-js-shim")
val requireNativePath = SettingKey[Option[String]]("play-require-native-path")
/** Our means of hooking the run task with additional behavior. */
val playRunHooks = TaskKey[Seq[play.PlayRunHook]]("play-run-hooks")
@deprecated("2.2", "Please use playRunHooks setting instead.")
val playOnStarted = SettingKey[Seq[(java.net.InetSocketAddress) => Unit]]("play-onStarted")
@deprecated("2.2", "Please use playRunHooks setting instead.")
val playOnStopped = SettingKey[Seq[() => Unit]]("play-onStopped")
/** A hook to configure how play blocks on user input while running. */
val playInteractionMode = SettingKey[play.PlayInteractionMode]("play-interaction-mode")
val playAssetsDirectories = SettingKey[Seq[File]]("play-assets-directories")
val playExternalAssets = SettingKey[Seq[(File, File => PathFinder, String)]]("play-external-assets")
val confDirectory = SettingKey[File]("play-conf")
val templatesImport = SettingKey[Seq[String]]("play-templates-imports")
val routesImport = SettingKey[Seq[String]]("play-routes-imports")
val generateReverseRouter = SettingKey[Boolean]("play-generate-reverse-router",
"Whether the reverse router should be generated. Setting to false may reduce compile times if it's not needed.")
val namespaceReverseRouter = SettingKey[Boolean]("play-namespace-reverse-router",
"Whether the reverse router should be namespaced. Useful if you have many routers that use the same actions.")
val ebeanEnabled = SettingKey[Boolean]("play-ebean-enabled")
val templatesTypes = SettingKey[Map[String, String]]("play-templates-formats")
val closureCompilerOptions = SettingKey[Seq[String]]("play-closure-compiler-options")
val lessOptions = SettingKey[Seq[String]]("play-less-options")
val coffeescriptOptions = SettingKey[Seq[String]]("play-coffeescript-options")
val lessEntryPoints = SettingKey[PathFinder]("play-less-entry-points")
val coffeescriptEntryPoints = SettingKey[PathFinder]("play-coffeescript-entry-points")
val javascriptEntryPoints = SettingKey[PathFinder]("play-javascript-entry-points")
val playPlugin = SettingKey[Boolean]("play-plugin")
val devSettings = SettingKey[Seq[(String, String)]]("play-dev-settings")
val scalaIdePlay2Prefs = TaskKey[Unit]("scala-ide-play2-prefs")
// Constants that may be useful elsewhere
val defaultJavaTemplatesImport = Seq(
"models._",
"controllers._",
"java.lang._",
"java.util._",
"scala.collection.JavaConversions._",
"scala.collection.JavaConverters._",
"play.api.i18n._",
"play.core.j.PlayMagicForJava._",
"play.mvc._",
"play.data._",
"play.api.data.Field",
"play.mvc.Http.Context.Implicit._",
"views.%format%._")
val defaultScalaTemplatesImport = Seq(
"models._",
"controllers._",
"play.api.i18n._",
"play.api.mvc._",
"play.api.data._",
"views.%format%._")
val defaultTemplatesImport = Seq("play.api.templates._", "play.api.templates.PlayMagic._")
}
object Keys extends Keys
trait PlayInternalKeys {
type ClassLoaderCreator = (String, Array[URL], ClassLoader) => ClassLoader
val playDependencyClasspath = TaskKey[Classpath]("play-dependency-classpath")
val playReloaderClasspath = TaskKey[Classpath]("play-reloader-classpath")
val playCommonClassloader = TaskKey[ClassLoader]("play-common-classloader")
val playDependencyClassLoader = TaskKey[ClassLoaderCreator]("play-dependency-classloader")
val playReloaderClassLoader = TaskKey[ClassLoaderCreator]("play-reloader-classloader")
val playReload = TaskKey[sbt.inc.Analysis]("play-reload")
val buildRequire = TaskKey[Seq[(File, File)]]("play-build-require-assets")
val playCompileEverything = TaskKey[Seq[sbt.inc.Analysis]]("play-compile-everything")
}
|
michaelahlers/team-awesome-wedding
|
vendor/play-2.2.1/framework/src/sbt-plugin/src/main/scala/PlayKeys.scala
|
Scala
|
mit
| 4,925
|
package com.github.kmizu.mregex
import java.io.{PrintWriter, StringWriter}
class Dfa(val table: Array[Array[Int]], val start: Int, val finals: Set[Int]) {
import Nfa.Compiler.NumberOfAlphabets
def and(rhs: Dfa): Dfa = {
val newTable = Array.ofDim[Int](table.length * rhs.table.length, NumberOfAlphabets)
val newStart = rhs.table.length * start + rhs.start
var newFinals = Set[Int]()
for (a <- finals) {
for (b <- rhs.finals) {
newFinals += (rhs.table.length * a + b)
}
}
var a = 0
while (a < table.length) {
var b = 0
while (b < rhs.table.length) {
var input: Int = 0
while (input < NumberOfAlphabets) {
val nextA = table(a)(input)
val nextB = rhs.table(b)(input)
if (nextA == -1 || nextB == -1) {
newTable(rhs.table.length * a + b)(input) = -1
} else {
newTable(rhs.table.length * a + b)(input) = rhs.table.length * nextA + nextB
}
input += 1
}
b += 1
}
a += 1
}
new Dfa(newTable, newStart, newFinals)
}
def matches(input: String): Boolean = {
var current = start
var cursor = 0
while(cursor < input.length && (current != -1 || !finals.contains(current))) {
val ch = input.charAt(cursor)
current = table(current)(ch)
cursor += 1
}
finals.contains(current)
}
def disjoint(dfa: Dfa): Boolean = this.and(dfa).isEmpty
def isEmpty: Boolean = {
val reachable = mark(Set(), start)
reachable.filter{finals contains _}.isEmpty
}
override def toString: String = {
var maxDigit: Int = String.valueOf(table.length).length
if (maxDigit % 2 == 0) maxDigit += 1
val ASCII_PRINTABLE_START = 32
val ASCII_PRINTABLE_FINAL = 126
val buff = new StringWriter
val w = new PrintWriter(buff)
w.printf("start: %d%n", new Integer(start))
w.printf("final: ")
for (f <- finals) {
w.printf("%0" + maxDigit + "d ", new Integer(f))
}
w.println()
for(i <- 0 until table.length) {
w.printf("%0" + maxDigit + "d: ", new Integer(i))
for(j <- ASCII_PRINTABLE_START to ASCII_PRINTABLE_FINAL) {
if (table(i)(j) != -1) {
w.printf("%c -> %0" + maxDigit + "d ", j.toChar:Character, new Integer(table(i)(j)))
w.flush()
}
w.flush()
}
w.println()
}
w.flush()
new String(buff.getBuffer)
}
private def mark(reachable: Set[Int], stateNum: Int): Set[Int] = {
var result = reachable
if (reachable.contains(stateNum)) return result
result += stateNum
var input = 0
while (input < NumberOfAlphabets) {
val next = table(stateNum)(input)
if (next != -1) result ++= mark(reachable, next)
input += 1
}
result
}
private def spacing(w: PrintWriter, n: Int): Unit = {
w.print(" " * n)
}
}
|
kmizu/minimal_regex_matcher
|
src/main/scala/com/github/kmizu/mregex/Dfa.scala
|
Scala
|
mit
| 2,897
|
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.algebird
import org.scalacheck.Arbitrary
import org.scalacheck.Arbitrary.arbitrary
import org.scalacheck.Properties
import org.scalacheck.Prop.forAll
import org.scalacheck.Gen.choose
object SummingIteratorTest extends Properties("SummingIterator") {
def sumEquiv[V:Semigroup:Equiv](it0: Iterator[V], it1: Iterator[V]): Boolean =
StatefulSummerLaws.zeroEquiv(Semigroup.sumOption(it0), Semigroup.sumOption(it1))
case class Capacity(c: Int)
implicit val capArb = Arbitrary { for(c <- choose(0, 10240)) yield Capacity(c) }
implicit def mapEquiv[K,V:Monoid:Equiv]: Equiv[Map[K,V]] = Equiv.fromFunction { (l, r) =>
val zl = MapAlgebra.removeZeros(l)
val zr = MapAlgebra.removeZeros(r)
zl.size == zr.size && {
zl.forall { case (k,v) =>
zr.get(k).map { rv => Equiv[V].equiv(rv, v) }.getOrElse(false)
}
}
}
property("With Maps is preserved[(Short,Int)]") = forAll { (cap: Capacity, items: List[(Short, Int)]) =>
val mitems = items.map { Map(_) }
val qit = SummingIterator[Map[Short, Int]](SummingQueue[Map[Short,Int]](cap.c), mitems.iterator)
val qitc = SummingIterator[Map[Short, Int]](SummingCache[Short,Int](cap.c), mitems.iterator)
sumEquiv(mitems.iterator, qit) && sumEquiv(mitems.iterator, qitc)
}
property("With Maps is preserved[(Short,String)]") = forAll { (cap: Capacity, items: List[(Short, String)]) =>
val mitems = items.map { Map(_) }
val qit = SummingIterator(SummingQueue[Map[Short,String]](cap.c), mitems.iterator)
val qitc = SummingIterator(SummingCache[Short,String](cap.c), mitems.iterator)
sumEquiv(mitems.iterator, qit) && sumEquiv(mitems.iterator, qitc)
}
}
|
snoble/algebird
|
algebird-test/src/test/scala/com/twitter/algebird/SummingIteratorTest.scala
|
Scala
|
apache-2.0
| 2,245
|
package com.dominikgruber.fpinscala.chapter03
import org.scalatest._
class Exercise20Spec extends FlatSpec with Matchers {
"flatMap" should "duplicate elements" in {
val l = List(1, 2, 3)
val f = (i: Int) => List(i, i)
List.flatMap(l)(f) should be (List(1, 1, 2, 2, 3, 3))
}
}
|
TheDom/functional-programming-in-scala
|
src/test/scala/com/dominikgruber/fpinscala/chapter03/Exercise20Spec.scala
|
Scala
|
mit
| 295
|
package rtmp.protocol
import akka.util.ByteString
/**
*/
abstract class BaseProtocol {
def handshake(input:Array[Byte]):Response
}
|
vimvim/AkkaTest
|
src/main/scala/rtmp/protocol/BaseProtocol.scala
|
Scala
|
agpl-3.0
| 136
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.samza.metrics
import org.apache.samza.clustermanager.SamzaApplicationState
import org.apache.samza.config.{ClusterManagerConfig, Config, MetricsConfig}
import org.apache.samza.util.Logging
/**
* Responsible for wiring up Samza's metrics. Given that Samza has a metric
* registry, we might as well use it. This class takes Samza's application
* master state, and converts it to metrics.
*/
class ContainerProcessManagerMetrics(val config: Config,
val state: SamzaApplicationState,
val registry: ReadableMetricsRegistry) extends MetricsHelper with Logging {
val clusterManagerConfig = new ClusterManagerConfig(config)
val mRunningContainers = newGauge("running-containers", () => state.runningProcessors.size)
val mNeededContainers = newGauge("needed-containers", () => state.neededProcessors.get())
val mCompletedContainers = newGauge("completed-containers", () => state.completedProcessors.get())
val mFailedContainers = newGauge("failed-containers", () => state.failedContainers.get())
val mReleasedContainers = newGauge("released-containers", () => state.releasedContainers.get())
val mContainers = newGauge("container-count", () => state.processorCount.get())
val mRedundantNotifications = newGauge("redundant-notifications", () => state.redundantNotifications.get())
val mJobHealthy = newGauge("job-healthy", () => if (state.jobHealthy.get()) 1 else 0)
val mPreferredHostRequests = newGauge("preferred-host-requests", () => state.preferredHostRequests.get())
val mAnyHostRequests = newGauge("any-host-requests", () => state.anyHostRequests.get())
val mExpiredPreferredHostRequests = newGauge("expired-preferred-host-requests", () => state.expiredPreferredHostRequests.get())
val mExpiredAnyHostRequests = newGauge("expired-any-host-requests", () => state.expiredAnyHostRequests.get())
val mHostAffinityMatchPct = newGauge("host-affinity-match-pct", () => {
val numPreferredHostRequests = state.preferredHostRequests.get()
val numExpiredPreferredHostRequests = state.expiredPreferredHostRequests.get()
if (numPreferredHostRequests != 0) {
100.00 * (numPreferredHostRequests - numExpiredPreferredHostRequests) / numPreferredHostRequests
} else {
0L
}
})
val mFailedStandbyAllocations = newGauge("failed-standby-allocations", () => state.failedStandbyAllocations.get())
val mFailoversToAnyHost = newGauge("failovers-to-any-host", () => state.failoversToAnyHost.get())
val mFailoversToStandby = newGauge("failovers-to-standby", () => state.failoversToStandby.get())
val mFailedContainerPlacementActions = newGauge("failed-container-placements-actions", () => state.failedContainerPlacementActions.get())
val mContainerMemoryMb = newGauge("container-memory-mb", () => clusterManagerConfig.getContainerMemoryMb)
val mContainerCpuCores = newGauge("container-cpu-cores", () => clusterManagerConfig.getNumCores)
}
|
lhaiesp/samza
|
samza-core/src/main/scala/org/apache/samza/metrics/ContainerProcessManagerMetrics.scala
|
Scala
|
apache-2.0
| 3,730
|
package com.twitter.util
import java.util.Locale
import org.junit.runner.RunWith
import org.scalatest.WordSpec
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class TwitterDateFormatTest extends WordSpec {
"TwitterDateFormat" should {
"disallow Y without w" in {
intercept[IllegalArgumentException] {
TwitterDateFormat("YYYYMMDD")
}
intercept[IllegalArgumentException] {
TwitterDateFormat("YMD", Locale.GERMAN)
}
}
"allow Y with w" in {
TwitterDateFormat("YYYYww")
}
"allow Y when quoted" in {
TwitterDateFormat("yyyy 'Year'")
}
"stripSingleQuoted" in {
import TwitterDateFormat._
assert(stripSingleQuoted("") == "")
assert(stripSingleQuoted("YYYY") == "YYYY")
assert(stripSingleQuoted("''") == "")
assert(stripSingleQuoted("'abc'") == "")
assert(stripSingleQuoted("x'abc'") == "x")
assert(stripSingleQuoted("'abc'x") == "x")
assert(stripSingleQuoted("'abc'def'ghi'") == "def")
intercept[IllegalArgumentException] {
stripSingleQuoted("'abc")
}
intercept[IllegalArgumentException] {
stripSingleQuoted("'abc'def'ghi")
}
}
}
}
|
BuoyantIO/twitter-util
|
util-core/src/test/scala/com/twitter/util/TwitterDateFormatTest.scala
|
Scala
|
apache-2.0
| 1,229
|
package io.skysail.core.app.resources
import akka.pattern.ask
import io.skysail.core.akka.RequestEvent
import io.skysail.core.app.SkysailApplication
import io.skysail.core.app.menus.MenuItem
import io.skysail.core.resources.AsyncListResource
import io.skysail.core.server.actors.ApplicationsActor
import scala.concurrent.ExecutionContext.Implicits.global
import scala.util.{Failure, Success}
class MenusResource extends AsyncListResource[MenuItem] {
def get(requestEvent: RequestEvent): Unit = {
val appsActor = SkysailApplication.getApplicationsActor(this.actorContext.system)
(appsActor ? ApplicationsActor.GetMenus())
.mapTo[List[MenuItem]]
.onComplete {
case Success(menuItems) => requestEvent.controllerActor ! menuItems
case Failure(f) => println(s"failure ${f}")
}
}
}
|
evandor/skysail-core
|
skysail.core/src/io/skysail/core/app/resources/MenusResource.scala
|
Scala
|
apache-2.0
| 837
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.kafka010
import java.util.Locale
import org.apache.kafka.clients.producer.ProducerConfig
import org.apache.kafka.common.serialization.ByteArraySerializer
import org.scalatest.time.SpanSugar._
import org.apache.spark.sql.{AnalysisException, DataFrame, Row}
import org.apache.spark.sql.catalyst.expressions.{AttributeReference, SpecificInternalRow, UnsafeProjection}
import org.apache.spark.sql.streaming._
import org.apache.spark.sql.types.{BinaryType, DataType}
import org.apache.spark.util.Utils
/**
* This is a temporary port of KafkaSinkSuite, since we do not yet have a V2 memory stream.
* Once we have one, this will be changed to a specialization of KafkaSinkSuite and we won't have
* to duplicate all the code.
*/
class KafkaContinuousSinkSuite extends KafkaContinuousTest {
import testImplicits._
override val streamingTimeout = 30.seconds
override val brokerProps = Map("auto.create.topics.enable" -> "false")
override def afterAll(): Unit = {
if (testUtils != null) {
testUtils.teardown()
testUtils = null
}
super.afterAll()
}
test("streaming - write to kafka with topic field") {
val inputTopic = newTopic()
testUtils.createTopic(inputTopic, partitions = 1)
val input = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("subscribe", inputTopic)
.option("startingOffsets", "earliest")
.load()
val topic = newTopic()
testUtils.createTopic(topic)
val writer = createKafkaWriter(
input.toDF(),
withTopic = None,
withOutputMode = Some(OutputMode.Append))(
withSelectExpr = s"'$topic' as topic", "value")
val reader = createKafkaReader(topic)
.selectExpr("CAST(key as STRING) key", "CAST(value as STRING) value")
.selectExpr("CAST(key as INT) key", "CAST(value as INT) value")
.as[(Option[Int], Int)]
.map(_._2)
try {
testUtils.sendMessages(inputTopic, Array("1", "2", "3", "4", "5"))
eventually(timeout(streamingTimeout)) {
checkDatasetUnorderly(reader, 1, 2, 3, 4, 5)
}
testUtils.sendMessages(inputTopic, Array("6", "7", "8", "9", "10"))
eventually(timeout(streamingTimeout)) {
checkDatasetUnorderly(reader, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
}
} finally {
writer.stop()
}
}
test("streaming - write w/o topic field, with topic option") {
val inputTopic = newTopic()
testUtils.createTopic(inputTopic, partitions = 1)
val input = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("subscribe", inputTopic)
.option("startingOffsets", "earliest")
.load()
val topic = newTopic()
testUtils.createTopic(topic)
val writer = createKafkaWriter(
input.toDF(),
withTopic = Some(topic),
withOutputMode = Some(OutputMode.Append()))()
val reader = createKafkaReader(topic)
.selectExpr("CAST(key as STRING) key", "CAST(value as STRING) value")
.selectExpr("CAST(key as INT) key", "CAST(value as INT) value")
.as[(Option[Int], Int)]
.map(_._2)
try {
testUtils.sendMessages(inputTopic, Array("1", "2", "3", "4", "5"))
eventually(timeout(streamingTimeout)) {
checkDatasetUnorderly(reader, 1, 2, 3, 4, 5)
}
testUtils.sendMessages(inputTopic, Array("6", "7", "8", "9", "10"))
eventually(timeout(streamingTimeout)) {
checkDatasetUnorderly(reader, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
}
} finally {
writer.stop()
}
}
test("streaming - topic field and topic option") {
/* The purpose of this test is to ensure that the topic option
* overrides the topic field. We begin by writing some data that
* includes a topic field and value (e.g., 'foo') along with a topic
* option. Then when we read from the topic specified in the option
* we should see the data i.e., the data was written to the topic
* option, and not to the topic in the data e.g., foo
*/
val inputTopic = newTopic()
testUtils.createTopic(inputTopic, partitions = 1)
val input = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("subscribe", inputTopic)
.option("startingOffsets", "earliest")
.load()
val topic = newTopic()
testUtils.createTopic(topic)
val writer = createKafkaWriter(
input.toDF(),
withTopic = Some(topic),
withOutputMode = Some(OutputMode.Append()))(
withSelectExpr = "'foo' as topic", "CAST(value as STRING) value")
val reader = createKafkaReader(topic)
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.selectExpr("CAST(key AS INT)", "CAST(value AS INT)")
.as[(Option[Int], Int)]
.map(_._2)
try {
testUtils.sendMessages(inputTopic, Array("1", "2", "3", "4", "5"))
eventually(timeout(streamingTimeout)) {
checkDatasetUnorderly(reader, 1, 2, 3, 4, 5)
}
testUtils.sendMessages(inputTopic, Array("6", "7", "8", "9", "10"))
eventually(timeout(streamingTimeout)) {
checkDatasetUnorderly(reader, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
}
} finally {
writer.stop()
}
}
test("null topic attribute") {
val inputTopic = newTopic()
testUtils.createTopic(inputTopic, partitions = 1)
val input = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("subscribe", inputTopic)
.option("startingOffsets", "earliest")
.load()
val topic = newTopic()
testUtils.createTopic(topic)
/* No topic field or topic option */
var writer: StreamingQuery = null
var ex: Exception = null
try {
writer = createKafkaWriter(input.toDF())(
withSelectExpr = "CAST(null as STRING) as topic", "value"
)
testUtils.sendMessages(inputTopic, Array("1", "2", "3", "4", "5"))
eventually(timeout(streamingTimeout)) {
assert(writer.exception.isDefined)
ex = writer.exception.get
}
} finally {
writer.stop()
}
assert(ex.getCause.getCause.getMessage
.toLowerCase(Locale.ROOT)
.contains("null topic present in the data."))
}
test("streaming - write data with bad schema") {
val inputTopic = newTopic()
testUtils.createTopic(inputTopic, partitions = 1)
val input = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("subscribe", inputTopic)
.option("startingOffsets", "earliest")
.load()
val topic = newTopic()
testUtils.createTopic(topic)
val ex = intercept[AnalysisException] {
/* No topic field or topic option */
createKafkaWriter(input.toDF())(
withSelectExpr = "value as key", "value"
)
}
assert(ex.getMessage
.toLowerCase(Locale.ROOT)
.contains("topic option required when no 'topic' attribute is present"))
val ex2 = intercept[AnalysisException] {
/* No value field */
createKafkaWriter(input.toDF())(
withSelectExpr = s"'$topic' as topic", "value as key"
)
}
assert(ex2.getMessage.toLowerCase(Locale.ROOT).contains(
"required attribute 'value' not found"))
}
test("streaming - write data with valid schema but wrong types") {
val inputTopic = newTopic()
testUtils.createTopic(inputTopic, partitions = 1)
val input = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("subscribe", inputTopic)
.option("startingOffsets", "earliest")
.load()
.selectExpr("CAST(value as STRING) value")
val topic = newTopic()
testUtils.createTopic(topic)
val ex = intercept[AnalysisException] {
/* topic field wrong type */
createKafkaWriter(input.toDF())(
withSelectExpr = s"CAST('1' as INT) as topic", "value"
)
}
assert(ex.getMessage.toLowerCase(Locale.ROOT).contains("topic type must be a string"))
val ex2 = intercept[AnalysisException] {
/* value field wrong type */
createKafkaWriter(input.toDF())(
withSelectExpr = s"'$topic' as topic", "CAST(value as INT) as value"
)
}
assert(ex2.getMessage.toLowerCase(Locale.ROOT).contains(
"value attribute type must be a string or binary"))
val ex3 = intercept[AnalysisException] {
/* key field wrong type */
createKafkaWriter(input.toDF())(
withSelectExpr = s"'$topic' as topic", "CAST(value as INT) as key", "value"
)
}
assert(ex3.getMessage.toLowerCase(Locale.ROOT).contains(
"key attribute type must be a string or binary"))
}
test("streaming - write to non-existing topic") {
val inputTopic = newTopic()
testUtils.createTopic(inputTopic, partitions = 1)
val input = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("subscribe", inputTopic)
.option("startingOffsets", "earliest")
.load()
val topic = newTopic()
var writer: StreamingQuery = null
var ex: Exception = null
try {
ex = intercept[StreamingQueryException] {
writer = createKafkaWriter(input.toDF(), withTopic = Some(topic))()
testUtils.sendMessages(inputTopic, Array("1", "2", "3", "4", "5"))
eventually(timeout(streamingTimeout)) {
assert(writer.exception.isDefined)
}
throw writer.exception.get
}
} finally {
writer.stop()
}
assert(ex.getCause.getCause.getMessage.toLowerCase(Locale.ROOT).contains("job aborted"))
}
test("streaming - exception on config serializer") {
val inputTopic = newTopic()
testUtils.createTopic(inputTopic, partitions = 1)
testUtils.sendMessages(inputTopic, Array("0"))
val input = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("subscribe", inputTopic)
.load()
val ex = intercept[IllegalArgumentException] {
createKafkaWriter(
input.toDF(),
withOptions = Map("kafka.key.serializer" -> "foo"))()
}
assert(ex.getMessage.toLowerCase(Locale.ROOT).contains(
"kafka option 'key.serializer' is not supported"))
val ex2 = intercept[IllegalArgumentException] {
createKafkaWriter(
input.toDF(),
withOptions = Map("kafka.value.serializer" -> "foo"))()
}
assert(ex2.getMessage.toLowerCase(Locale.ROOT).contains(
"kafka option 'value.serializer' is not supported"))
}
test("generic - write big data with small producer buffer") {
/* This test ensures that we understand the semantics of Kafka when
* is comes to blocking on a call to send when the send buffer is full.
* This test will configure the smallest possible producer buffer and
* indicate that we should block when it is full. Thus, no exception should
* be thrown in the case of a full buffer.
*/
val topic = newTopic()
testUtils.createTopic(topic, 1)
val options = new java.util.HashMap[String, Object]
options.put("bootstrap.servers", testUtils.brokerAddress)
options.put("buffer.memory", "16384") // min buffer size
options.put("block.on.buffer.full", "true")
options.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, classOf[ByteArraySerializer].getName)
options.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, classOf[ByteArraySerializer].getName)
val inputSchema = Seq(AttributeReference("value", BinaryType)())
val data = new Array[Byte](15000) // large value
val writeTask = new KafkaStreamDataWriter(Some(topic), options, inputSchema)
try {
val fieldTypes: Array[DataType] = Array(BinaryType)
val converter = UnsafeProjection.create(fieldTypes)
val row = new SpecificInternalRow(fieldTypes)
row.update(0, data)
val iter = Seq.fill(1000)(converter.apply(row)).iterator
iter.foreach(writeTask.write(_))
writeTask.commit()
} finally {
writeTask.close()
}
}
private def createKafkaReader(topic: String): DataFrame = {
spark.read
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("startingOffsets", "earliest")
.option("endingOffsets", "latest")
.option("subscribe", topic)
.load()
}
private def createKafkaWriter(
input: DataFrame,
withTopic: Option[String] = None,
withOutputMode: Option[OutputMode] = None,
withOptions: Map[String, String] = Map[String, String]())
(withSelectExpr: String*): StreamingQuery = {
var stream: DataStreamWriter[Row] = null
val checkpointDir = Utils.createTempDir()
var df = input.toDF()
if (withSelectExpr.length > 0) {
df = df.selectExpr(withSelectExpr: _*)
}
stream = df.writeStream
.format("kafka")
.option("checkpointLocation", checkpointDir.getCanonicalPath)
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
// We need to reduce blocking time to efficiently test non-existent partition behavior.
.option("kafka.max.block.ms", "1000")
.trigger(Trigger.Continuous(1000))
.queryName("kafkaStream")
withTopic.foreach(stream.option("topic", _))
withOutputMode.foreach(stream.outputMode(_))
withOptions.foreach(opt => stream.option(opt._1, opt._2))
stream.start()
}
}
|
aosagie/spark
|
external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaContinuousSinkSuite.scala
|
Scala
|
apache-2.0
| 14,435
|
/*
* This file is part of the "silex" library of helpers for Apache Spark.
*
* Copyright (c) 2015 Red Hat, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.redhat.et.silex.util
import org.scalatest._
class RegexImplicitsSpec extends FlatSpec with Matchers {
import OptionalArgProperties._
it should "use the r-interpolator to generate a regex" in {
import RegexImplicits._
val matches = "123::456" match {
case r"([0-9]+)$first::([0-9]+)$second" => (first, second)
case _ => ("", "")
}
assert(matches == ("123", "456"))
}
}
|
erikerlandson/silex
|
src/test/scala/com/redhat/et/silex/util/regexImplicits.scala
|
Scala
|
apache-2.0
| 1,106
|
/*
* Copyright 2012-2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.edda.web
import org.scalatest.FunSuite
class FieldSelectorParserTest extends FunSuite {
test("ParseSingleKey") {
val expr = ":(a)"
expectResult(KeySelectExpr(Map("a" -> FixedExpr(matches = true)))) {
FieldSelectorParser.parse(expr)
}
}
test("ParseManyKeys") {
val expr = ":(a,b,c)"
expectResult(KeySelectExpr(Map(
"a" -> FixedExpr(matches = true),
"b" -> FixedExpr(matches = true),
"c" -> FixedExpr(matches = true)
))) {
FieldSelectorParser.parse(expr)
}
}
test("ParseFlatten") {
val expr = "::(a,b,c)"
expectResult(FlattenExpr(KeySelectExpr(Map(
"a" -> FixedExpr(matches = true),
"b" -> FixedExpr(matches = true),
"c" -> FixedExpr(matches = true)
)))) {
FieldSelectorParser.parse(expr)
}
}
test("ParseSubExpr") {
val expr = ":(a,b:(d,e),c::(f,g,h))"
expectResult(KeySelectExpr(Map(
"a" -> FixedExpr(matches = true),
"b" -> KeySelectExpr(Map(
"d" -> FixedExpr(matches = true),
"e" -> FixedExpr(matches = true)
)),
"c" -> FlattenExpr(KeySelectExpr(Map(
"f" -> FixedExpr(matches = true),
"g" -> FixedExpr(matches = true),
"h" -> FixedExpr(matches = true)
)))
))) {
FieldSelectorParser.parse(expr)
}
}
test("ParseEqualExpr") {
val expr = ":(a=42)"
expectResult(KeySelectExpr(Map("a" -> EqualExpr(42)))) {
FieldSelectorParser.parse(expr)
}
}
test("ParseNotEqualExpr") {
val expr = ":(a!=42)"
expectResult(KeySelectExpr(Map("a" -> NotEqualExpr(42)))) {
FieldSelectorParser.parse(expr)
}
}
test("ParseRegexExpr") {
val expr = ":(a~/^.*Id$/)"
expectResult(KeySelectExpr(Map("a" -> RegexExpr("^.*Id$", invert = false)))) {
FieldSelectorParser.parse(expr)
}
}
test("ParseInvRegexExpr") {
val expr = ":(a!~/^.*Id$/)"
expectResult(KeySelectExpr(Map("a" -> RegexExpr("^.*Id$", invert = true)))) {
FieldSelectorParser.parse(expr)
}
}
test("ParseStringLiteral") {
val expr = ":(a=\\"42\\")"
expectResult(KeySelectExpr(Map("a" -> EqualExpr("42")))) {
FieldSelectorParser.parse(expr)
}
}
}
|
wstrucke/edda
|
src/test/scala/com/netflix/edda/web/FieldSelectorParserTest.scala
|
Scala
|
apache-2.0
| 2,821
|
val p: Seq[Double] = Seq()
p.foldLeft("") {
(a, b) /*(B, Double) inferred*/ =>
/*start*/(a, b)/*end*/
0d
}
//(String, Double)
|
ilinum/intellij-scala
|
testdata/typeInference/bugs3/SCL2412.scala
|
Scala
|
apache-2.0
| 135
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.carbondata.restructure.vectorreader
import java.math.BigDecimal
import org.apache.spark.sql.Row
import org.apache.spark.sql.common.util.Spark2QueryTest
import org.scalatest.BeforeAndAfterAll
import org.apache.carbondata.spark.exception.ProcessMetaDataException
class ChangeDataTypeTestCases extends Spark2QueryTest with BeforeAndAfterAll {
override def beforeAll {
sql("DROP TABLE IF EXISTS changedatatypetest")
sql("DROP TABLE IF EXISTS hivetable")
}
test("test change datatype on existing column and load data, insert into hive table") {
def test_change_column_load_insert() = {
beforeAll
sql(
"CREATE TABLE changedatatypetest(intField INT,stringField STRING,charField STRING," +
"timestampField TIMESTAMP,decimalField DECIMAL(6,2)) STORED BY 'carbondata'")
sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/restructure/data1.csv' INTO TABLE " +
s"changedatatypetest OPTIONS('FILEHEADER'='intField,stringField,charField,timestampField,"
+ s"decimalField')")
sql("ALTER TABLE changedatatypetest CHANGE intField intfield BIGINT")
sql(
"CREATE TABLE hivetable(intField BIGINT,stringField STRING,charField STRING,timestampField "
+ "TIMESTAMP,decimalField DECIMAL(6,2)) STORED AS PARQUET")
sql("INSERT INTO TABLE hivetable SELECT * FROM changedatatypetest")
afterAll
}
sqlContext.setConf("carbon.enable.vector.reader", "true")
test_change_column_load_insert()
sqlContext.setConf("carbon.enable.vector.reader", "false")
test_change_column_load_insert()
}
test("test datatype change and filter") {
def test_change_datatype_and_filter() = {
beforeAll
sql(
"CREATE TABLE changedatatypetest(intField INT,stringField STRING,charField STRING," +
"timestampField TIMESTAMP,decimalField DECIMAL(6,2)) STORED BY 'carbondata'")
sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/restructure/data1.csv' INTO TABLE " +
s"changedatatypetest OPTIONS('FILEHEADER'='intField,stringField,charField,timestampField,"
+ s"decimalField')")
sql("ALTER TABLE changedatatypetest CHANGE intField intfield BIGINT")
sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/restructure/data1.csv' INTO TABLE " +
s"changedatatypetest OPTIONS('FILEHEADER'='intField,stringField,charField,timestampField,"
+ s"decimalField')")
checkAnswer(sql("SELECT charField FROM changedatatypetest WHERE intField > 99"),
Seq(Row("abc"), Row("abc")))
checkAnswer(sql("SELECT charField FROM changedatatypetest WHERE intField < 99"), Seq())
checkAnswer(sql("SELECT charField FROM changedatatypetest WHERE intField = 100"),
Seq(Row("abc"), Row("abc")))
afterAll
}
sqlContext.setConf("carbon.enable.vector.reader", "true")
test_change_datatype_and_filter
sqlContext.setConf("carbon.enable.vector.reader", "false")
test_change_datatype_and_filter
}
test("test change int datatype and load data") {
def test_change_int_and_load() = {
beforeAll
sql(
"CREATE TABLE changedatatypetest(intField INT,stringField STRING,charField STRING," +
"timestampField TIMESTAMP,decimalField DECIMAL(6,2)) STORED BY 'carbondata'")
sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/restructure/data1.csv' INTO TABLE " +
s"changedatatypetest OPTIONS('FILEHEADER'='intField,stringField,charField,timestampField,"
+ s"decimalField')")
sql("ALTER TABLE changedatatypetest CHANGE intField intfield BIGINT")
sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/restructure/data1.csv' INTO TABLE " +
s"changedatatypetest OPTIONS('FILEHEADER'='intField,stringField,charField,timestampField,"
+ s"decimalField')")
checkAnswer(sql("SELECT SUM(intField) FROM changedatatypetest"), Row(200))
afterAll
}
sqlContext.setConf("carbon.enable.vector.reader", "true")
test_change_int_and_load()
sqlContext.setConf("carbon.enable.vector.reader", "false")
test_change_int_and_load()
}
test("test change decimal datatype and compaction") {
def test_change_decimal_and_compaction() = {
beforeAll
sql(
"CREATE TABLE changedatatypetest(intField INT,stringField STRING,charField STRING," +
"timestampField TIMESTAMP,decimalField DECIMAL(6,2)) STORED BY 'carbondata'")
sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/restructure/data1.csv' INTO TABLE " +
s"changedatatypetest OPTIONS('FILEHEADER'='intField,stringField,charField,timestampField,"
+ s"decimalField')")
sql("ALTER TABLE changedatatypetest CHANGE decimalField decimalField DECIMAL(9,5)")
sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/restructure/data1.csv' INTO TABLE " +
s"changedatatypetest OPTIONS('FILEHEADER'='intField,stringField,charField,timestampField,"
+ s"decimalField')")
checkAnswer(sql("SELECT decimalField FROM changedatatypetest"),
Seq(Row(new BigDecimal("21.23").setScale(5)), Row(new BigDecimal("21.23").setScale(5))))
sql("ALTER TABLE changedatatypetest COMPACT 'major'")
checkExistence(sql("SHOW SEGMENTS FOR TABLE changedatatypetest"), true, "0 Compacted")
checkExistence(sql("SHOW SEGMENTS FOR TABLE changedatatypetest"), true, "1 Compacted")
checkExistence(sql("SHOW SEGMENTS FOR TABLE changedatatypetest"), true, "0.1 Success")
afterAll
}
sqlContext.setConf("carbon.enable.vector.reader", "true")
test_change_decimal_and_compaction()
sqlContext.setConf("carbon.enable.vector.reader", "false")
test_change_decimal_and_compaction()
}
test("test to change int datatype to long") {
def test_change_int_to_long() = {
beforeAll
sql(
"CREATE TABLE changedatatypetest(intField INT,stringField STRING,charField STRING," +
"timestampField TIMESTAMP,decimalField DECIMAL(6,2)) STORED BY 'carbondata'")
sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/restructure/data1.csv' INTO TABLE " +
s"changedatatypetest OPTIONS('FILEHEADER'='intField,stringField,charField,timestampField,"
+ s"decimalField')")
sql("ALTER TABLE changedatatypetest CHANGE intField intField LONG")
checkAnswer(sql("SELECT intField FROM changedatatypetest LIMIT 1"), Row(100))
afterAll
}
sqlContext.setConf("carbon.enable.vector.reader", "true")
test_change_int_to_long()
sqlContext.setConf("carbon.enable.vector.reader", "false")
test_change_int_to_long()
}
test("test data type change for with pre-aggregate table should throw exception") {
sql("drop table if exists preaggMain")
sql("drop table if exists PreAggMain_preagg1")
sql("create table preaggMain (a int, b string, c string) stored by 'carbondata'")
sql(
"create datamap preagg1 on table PreAggMain using 'preaggregate' as select" +
" a,sum(b) from PreAggMain group by a")
assert(intercept[ProcessMetaDataException] {
sql("alter table preaggmain change a a long").show
}.getMessage.contains("exists in a pre-aggregate table"))
assert(intercept[ProcessMetaDataException] {
sql("alter table preaggmain_preagg1 change a a long").show
}.getMessage.contains("Cannot change data type or rename column for columns in pre-aggregate table"))
sql("drop table if exists preaggMain")
sql("drop table if exists PreAggMain_preagg1")
}
test("test data type change for dictionary exclude INT type column") {
def test_change_data_type() = {
beforeAll
sql("drop table if exists table_sort")
sql("CREATE TABLE table_sort (imei int,age int,mac string) STORED BY 'carbondata' TBLPROPERTIES('DICTIONARY_EXCLUDE'='imei,age','SORT_COLUMNS'='imei,age')")
sql("insert into table_sort select 32674,32794,'MAC1'")
sql("alter table table_sort change age age bigint")
sql("insert into table_sort select 32675,9223372036854775807,'MAC2'")
try {
sqlContext.setConf("carbon.enable.vector.reader", "true")
checkAnswer(sql("select * from table_sort"),
Seq(Row(32674, 32794, "MAC1"), Row(32675, Long.MaxValue, "MAC2")))
} finally {
sqlContext.setConf("carbon.enable.vector.reader", "true")
}
afterAll
}
sqlContext.setConf("carbon.enable.vector.reader", "true")
test_change_data_type()
sqlContext.setConf("carbon.enable.vector.reader", "false")
test_change_data_type()
}
override def afterAll {
sql("DROP TABLE IF EXISTS changedatatypetest")
sql("DROP TABLE IF EXISTS hivetable")
sqlContext.setConf("carbon.enable.vector.reader", "false")
}
}
|
manishgupta88/carbondata
|
integration/spark2/src/test/scala/org/apache/spark/carbondata/restructure/vectorreader/ChangeDataTypeTestCases.scala
|
Scala
|
apache-2.0
| 9,462
|
package dispatch.classic.tagsoup
import dispatch.classic.{HandlerVerbs, Request}
import xml.parsing.NoBindingFactoryAdapter
import java.io.InputStreamReader
import org.xml.sax.InputSource
import org.ccil.cowan.tagsoup.jaxp.SAXFactoryImpl
trait ImplicitTagSoupHandlers {
implicit def handlerToTagSoupHandlers(h: HandlerVerbs) = new TagSoupHandlers(h)
implicit def requestToTagSoupHandlers(req: Request) = new TagSoupHandlers(req);
implicit def stringToTagSoupHandlers(str: String) = new TagSoupHandlers(new Request(str));
}
object TagSoupHttp extends ImplicitTagSoupHandlers
class TagSoupHandlers(subject: HandlerVerbs) {
lazy val parserFactory = new SAXFactoryImpl
/** Process response with TagSoup html processor in block */
def tagsouped [T] (block: (xml.NodeSeq) => T) = subject >> { (stm, charset) =>
block( new NoBindingFactoryAdapter().loadXML(new InputSource(new InputStreamReader(stm, charset)), parserFactory.newSAXParser()) )
}
/** Alias for verb tagsouped */
def </> [T] (block: (xml.NodeSeq) => T) = tagsouped (block)
/** Conveniences handler for retrieving a NodeSeq */
def as_tagsouped = tagsouped {ns => ns}
}
|
dispatch/dispatch
|
tagsoup/src/main/scala/TagSoupHttp.scala
|
Scala
|
lgpl-2.1
| 1,158
|
package org.http4s
package server
package middleware
package authentication
import org.http4s.headers.Authorization
import scalaz._
import scalaz.concurrent.Task
/**
* Provides Basic Authentication from RFC 2617.
* @param realm The realm used for authentication purposes.
* @param store A partial function mapping (realm, user) to the
* appropriate password.
*/
class BasicAuthentication(realm: String, store: AuthenticationStore) extends Authentication {
private trait AuthReply
private sealed case class OK(user: String, realm: String) extends AuthReply
private case object NeedsAuth extends AuthReply
protected def getChallenge(req: Request) = checkAuth(req).map {
case OK(user, realm) => \\/-(addUserRealmAttributes(req, user, realm))
case NeedsAuth => -\\/(Challenge("Basic", realm, Nil.toMap))
}
private def checkAuth(req: Request): Task[AuthReply] = {
req.headers.get(Authorization) match {
case Some(Authorization(BasicCredentials(user, client_pass))) =>
store(realm, user).map {
case None => NeedsAuth
case Some(server_pass) =>
if (server_pass == client_pass) OK(user, realm)
else NeedsAuth
}
case Some(Authorization(_)) => Task.now(NeedsAuth)
case None => Task.now(NeedsAuth)
}
}
}
|
hvesalai/http4s
|
server/src/main/scala/org/http4s/server/middleware/authentication/BasicAuthentication.scala
|
Scala
|
apache-2.0
| 1,329
|
/*
* Licensed to The Apereo Foundation under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
*
* The Apereo Foundation licenses this file to you under the Apache License,
* Version 2.0, (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.tle.web.bulk.workflow.operations
import com.google.inject.assistedinject.{Assisted, AssistedInject}
import com.tle.beans.item.HistoryEvent
import com.tle.common.i18n.CurrentLocale
import com.tle.common.security.Privilege
import com.tle.common.workflow.node.WorkflowNode
import com.tle.common.workflow.{WorkflowItemStatus, WorkflowNodeStatus}
import com.tle.core.item.standard.operations.workflow.TaskOperation
import com.tle.core.security.TLEAclManager
import com.tle.core.security.impl.SecureInModeration
import com.tle.exceptions.AccessDeniedException
import javax.inject.Inject
import scala.collection.JavaConverters._
@SecureInModeration
class WorkflowMoveOperation @AssistedInject()(@Assisted("msg") val msg: String,
@Assisted("toStep") val toStep: String)
extends TaskOperation {
@Inject var aclService: TLEAclManager = _
override def execute: Boolean = {
if (!aclService.hasPrivilege(getWorkflow, Privilege.MANAGE_WORKFLOW)) {
throw new AccessDeniedException(
CurrentLocale.get("com.tle.core.services.item.error.nopriv", "MANAGE_WORKFLOW", getItemId))
}
clearAllStatuses()
val nodeSeq = getWorkflow.getNodes.asScala.toSeq
def requiresSiblingCompletion(wn: WorkflowNode) = wn.getType != WorkflowNode.PARALLEL_TYPE
val parentMap = nodeSeq.groupBy(n => Option(n.getParent))
def newStatus(completed: Boolean)(n: WorkflowNode) = {
val ns = n.getType match {
case WorkflowNode.ITEM_TYPE =>
val wis = new WorkflowItemStatus(n, null)
wis.setStarted(params.getDateNow)
wis
case _ => new WorkflowNodeStatus(n)
}
ns.setStatus(if (completed) WorkflowNodeStatus.COMPLETE else WorkflowNodeStatus.INCOMPLETE)
ns
}
nodeSeq.find(_.getUuid == toStep).foreach { wn =>
def completePreviousSiblings(child: WorkflowNode): Seq[WorkflowNodeStatus] =
(for {
parent <- Option(child.getParent).filter(requiresSiblingCompletion)
children <- parentMap.get(Some(parent))
} yield {
children.filter(_.getChildIndex < child.getChildIndex).map(newStatus(true))
}).getOrElse(Seq.empty)
def addParentStatuses(child: WorkflowNode): Seq[WorkflowNodeStatus] = {
(completePreviousSiblings(child) :+ newStatus(false)(child)) ++
Option(child.getParent).toSeq.flatMap(addParentStatuses)
}
val newStatuses = addParentStatuses(wn)
initStatusMap(newStatuses.asJava)
newStatuses.foreach { wns =>
if (wns.getStatus == WorkflowNodeStatus.INCOMPLETE && !wns.getNode.isLeafNode) {
update(wns.getNode)
}
}
enter(wn)
val h = createHistory(HistoryEvent.Type.taskMove)
setToStepFromTask(h, toStep)
h.setComment(msg)
}
updateModeration()
true
}
}
|
equella/Equella
|
Source/Plugins/Core/com.equella.core/scalasrc/com/tle/web/bulk/workflow/operations/WorkflowMoveOperation.scala
|
Scala
|
apache-2.0
| 3,682
|
package org.jetbrains.plugins.scala.lang.scaladoc.generate
import javax.swing.JComponent
import javax.swing.event.DocumentEvent
import com.intellij.CommonBundle
import com.intellij.analysis.{AnalysisScope, BaseAnalysisAction, BaseAnalysisActionDialog}
import com.intellij.execution.configurations._
import com.intellij.execution.executors.DefaultRunExecutor
import com.intellij.execution.impl.{RunManagerImpl, RunnerAndConfigurationSettingsImpl}
import com.intellij.execution.runners.{ExecutionEnvironment, ExecutionEnvironmentBuilder, ProgramRunner}
import com.intellij.execution.util.ExecutionErrorDialog
import com.intellij.execution.{ExecutionException, Executor, RunnerRegistry}
import com.intellij.ide.util.PropertiesComponent
import com.intellij.openapi.options.SettingsEditor
import com.intellij.openapi.project.Project
import com.intellij.ui.DocumentAdapter
import org.jetbrains.plugins.scala.console.ScalaConsoleConfigurationType
import org.jetbrains.plugins.scala.lang.scaladoc.generate.ScaladocAction.ScaladocRunConfiguration
/**
* User: Dmitry Naidanov
* Date: 01.10.11
*/
class ScaladocAction extends BaseAnalysisAction("Generate Scaladoc", "Scaladoc") {
private var configurationDialog: ScaladocConsoleRunConfigurationForm = null
private def disposeForm() {
configurationDialog = null
}
def analyze(project: Project, scope: AnalysisScope) {
var config: ScaladocConfiguration = null
try {
configurationDialog.saveSettings()
config = new ScaladocConfiguration(configurationDialog, project, scope)
try {
val runConfig = new ScaladocRunConfiguration(project, configurationDialog, config)
val runner: ProgramRunner[_ <: RunnerSettings] =
RunnerRegistry.getInstance.getRunner(DefaultRunExecutor.EXECUTOR_ID, config)
val builder: ExecutionEnvironmentBuilder =
new ExecutionEnvironmentBuilder(project, DefaultRunExecutor.getRunExecutorInstance)
builder.runProfile(config)
builder.runnerAndSettings(runner,
new RunnerAndConfigurationSettingsImpl(new RunManagerImpl(project), runConfig, false))
runner.execute(builder.build())
} catch {
case e: ExecutionException => ExecutionErrorDialog.show(e, CommonBundle.getErrorTitle, project)
}
}
finally {
disposeForm()
}
}
override def canceled() {
super.canceled()
disposeForm()
}
override def getAdditionalActionSettings(project: Project, dialog: BaseAnalysisActionDialog): JComponent = {
configurationDialog = new ScaladocConsoleRunConfigurationForm(project)
configurationDialog.getOutputDirChooser.getDocument.addDocumentListener(new DocumentAdapter() {
def textChanged(e: DocumentEvent) {
updateAvailability(dialog)
}
})
updateAvailability(dialog)
configurationDialog.createCenterPanel()
}
private def updateAvailability(dialog: BaseAnalysisActionDialog) {
dialog.setOKActionEnabled(!configurationDialog.getOutputDir.isEmpty)
}
}
object ScaladocAction {
// just stub entities, will never be invoked
object ScaladocRunConfigurationFactory extends ConfigurationFactory(new ScalaConsoleConfigurationType) {
override def createTemplateConfiguration(project: Project): RunConfiguration = new ScaladocRunConfiguration(project, null, null)
}
class ScaladocRunConfiguration(project: Project,
dialog: ScaladocConsoleRunConfigurationForm,
config: ScaladocConfiguration)
extends RunConfigurationBase(project, ScaladocRunConfigurationFactory, "Generate Scaladoc") {
override def checkConfiguration() {}
override def getConfigurationEditor: SettingsEditor[_ <: ScaladocRunConfiguration] = new SettingsEditor[ScaladocRunConfiguration]() {
override def createEditor(): JComponent = dialog.createCenterPanel()
override def resetEditorFrom(s: ScaladocRunConfiguration) {}
override def applyEditorTo(s: ScaladocRunConfiguration) {}
}
override def getState(executor: Executor, env: ExecutionEnvironment): RunProfileState = config.getState(executor, env)
}
}
|
loskutov/intellij-scala
|
src/org/jetbrains/plugins/scala/lang/scaladoc/generate/ScaladocAction.scala
|
Scala
|
apache-2.0
| 4,148
|
package org.apache.mesos.chronos.scheduler.jobs.constraints
import java.util.regex.PatternSyntaxException
import org.specs2.mutable.SpecificationWithJUnit
class UnlikeConstraintSpec extends SpecificationWithJUnit
with ConstraintSpecHelper {
"matches attributes of type text" in {
val attributes = List(createTextAttribute("dc", "north"), createTextAttribute("rack", "rack-4"))
val constraint = UnlikeConstraint("rack", "rack-[1-3]")
constraint.matches(attributes) must_== true
val attributes2 = List(createTextAttribute("dc", "north"))
val constraint2 = UnlikeConstraint("dc", "north|south")
constraint2.matches(attributes2) must_== false
}
"matches attributes of type scalar" in {
val attributes = List(createScalarAttribute("number", 1))
val constraint = UnlikeConstraint("number", """\\d\\.\\d""")
constraint.matches(attributes) must_== false
val attributes2 = List(createScalarAttribute("number", 1))
val constraint2 = UnlikeConstraint("number", """100.\\d""")
constraint2.matches(attributes) must_== true
}
"matches attributes of type set" in {
val attributes = List(createSetAttribute("dc", Array("north")))
val constraint = UnlikeConstraint("dc", "^n.*")
constraint.matches(attributes) must_== false
val attributes2 = List(createSetAttribute("dc", Array("south")))
constraint.matches(attributes2) must_== true
}
"fails in case of an invalid regular expression" in {
UnlikeConstraint("invalid-regex", "[[[") must throwA[PatternSyntaxException]
}
}
|
lionelnicolas/chronos
|
src/test/scala/org/apache/mesos/chronos/scheduler/jobs/constraints/UnlikeConstraintSpec.scala
|
Scala
|
apache-2.0
| 1,555
|
package de.sciss.mellite
import de.sciss.file._
import de.sciss.lucre.store.BerkeleyDB
import de.sciss.lucre.{Copy, Txn}
import de.sciss.nuages.Nuages
import de.sciss.nuages.Nuages.Surface
import de.sciss.proc.{Durable, Workspace}
// quick hack to copy a nuages-only database into a regular mellite session
object ImportNuages extends App {
val fIn = args.headOption.map(file).getOrElse(
// userHome/"Documents"/"projects"/"Anemone"/"sessions"/"session_160527_164131"
userHome/"Music"/"renibday"/"sessions"/"session_160616_160647"
)
val fOut = userHome/"mellite"/"sessions"/fIn.replaceExt(".mllt").name
require(!fOut.exists())
Mellite.initTypes()
val factIn = BerkeleyDB.factory(fIn, createIfNecessary = false)
type In = Durable.Txn
type Out = Durable.Txn
val sysIn = Durable(factIn)
try {
val nInH = sysIn.root[Nuages[In]] { _ => sys.error("Expecting existing Nuages file") }
val dsc = BerkeleyDB.Config()
dsc.allowCreate = true
val ds = BerkeleyDB.factory(fOut, dsc)
val wOut = Workspace.Durable.empty(fOut.toURI, ds)
try {
Txn.copy[In, Out, Unit] { (txIn: In, tx: Out) =>
val cpy = Copy[In, Out]()(txIn, tx)
val nIn = nInH()(txIn)
val infoIn = nIn.surface match {
case Surface.Timeline(tl) => s"Timeline: ${tl.iterator(txIn).size}"
case Surface.Folder (f) => s"Folder: ${f.size(txIn)}"
}
println(s"IN: $infoIn")
val nOut = cpy(nIn)
cpy.finish()
val infoOut = nOut.surface match {
case Surface.Timeline(tl) => s"Timeline: ${tl.iterator(tx).size}"
case Surface.Folder (f) => s"Folder: ${f.size(tx)}"
}
println(s"OUT: $infoOut")
val foldOut = wOut.root(tx)
foldOut.addLast(nOut)(tx)
} (sysIn, wOut.cursor)
} finally {
wOut.cursor.step { implicit tx =>
wOut.dispose()
}
}
} finally {
sysIn.close()
}
println("Import done.")
sys.exit()
}
|
Sciss/Mellite
|
app/src/test/scala/de/sciss/mellite/ImportNuages.scala
|
Scala
|
agpl-3.0
| 2,025
|
package colossus.extensions.util.bson.reader
import java.nio.ByteBuffer
import colossus.extensions.util.bson.element.BsonObjectId
case class BsonObjectIdReader(buffer: ByteBuffer) extends Reader[BsonObjectId] {
override def read: Option[BsonObjectId] = {
val name = readCString()
val value = readBytes(12)
Some(BsonObjectId(name, value))
}
}
|
fehmicansaglam/colossus-extensions
|
mongo/src/main/scala/colossus/extensions/util/bson/reader/BsonObjectIdReader.scala
|
Scala
|
apache-2.0
| 361
|
// code-examples/AdvOOP/overrides/final-class-wont-compile.scala
// WON'T COMPILE.
final class Fixed {
def doSomething = "Fixed did something!"
}
class Changeable1 extends Fixed // ERROR
|
XClouded/t4f-core
|
scala/src/tmp/AdvOOP/overrides/final-class-wont-compile.scala
|
Scala
|
apache-2.0
| 195
|
import org.specs2.mutable.Specification
class FizzBuzzTwoSpec extends Specification {
"n % 3 or contains 3 == Fizz and n % 5, contains 5 == Buzz, both == FizzBuzz" should {
"for 1" in {
FizzBuzzTwo(1).take(1).toList.map(_.value) must beEqualTo(
List("1")
)
}
"for 1..10" in {
FizzBuzzTwo(1).take(10).toList.map {
_.value
} must beEqualTo(
List(
"1", "2", "Fizz", "4", "Buzz", "Fizz", "7", "8", "Fizz", "Buzz"
)
)
}
"for 10..20" in {
FizzBuzzTwo(10).take(10).toList.map {
_.value
} must beEqualTo(
List(
"Buzz", "11", "Fizz", "Fizz", "14", "FizzBuzz", "16", "17", "Fizz", "19"
)
)
}
"for 50..60" in {
FizzBuzzTwo(50).take(10).toList.map {
_.value
} must beEqualTo(
List("Buzz", "Fizz", "Buzz", "FizzBuzz", "Fizz", "Buzz", "Buzz", "Fizz", "Buzz", "Buzz")
)
}
}
}
|
dkowis/CodingDojo
|
KataFizzBuzz/src/test/scala/FizzBuzzTwoSpec.scala
|
Scala
|
mit
| 960
|
package offGridOrcs
object ViewInspection {
def view(model: Model.Inspection): Seq[Sprite] = {
val titleAndDescription = getTitleAndDetails(model)
(viewBackground()
++ viewGrid(model)
++ viewTitle(titleAndDescription._1)
++ viewCloseButton()
++ viewDetails(titleAndDescription._2)
++ viewModeButton('A', 0, model.mode == Model.InspectionMode.Status())
++ viewModeButton('S', 1, model.mode == Model.InspectionMode.Stock())
++ viewCursor(model))
}
def getTitleAndDetails(model: Model.Inspection): (String, Seq[String]) = {
val world = model.mapModel.world
val tile = world(model.topLeft + model.selection)
val healthyAndGreen = Seq("HEALTHY", "GREEN")
val fullData = tile match {
case Tile(_, _, _, Some(_), _, _, _) =>
("DEMON",
Seq("FIERY", "ANGRY"),
getStockDetails(Stock.Zero))
case Tile(_, _, Some(orcID), _, _, _, _) =>
val orc = world(orcID)
("ORC",
healthyAndGreen,
getStockDetails(orc.stock))
case Tile(_, _, _, _, Some(buildingID), _, _) =>
val building = world(buildingID)
(building.blueprint.name,
Seq(
"STURDY",
s"${building.currentOrcs}/${building.blueprint.housingCapacity} ORCS"),
getStockDetails(building.stock))
case Tile(_, Tile.Trees(_), _, _, _, _, _) =>
("TREES",
healthyAndGreen,
getStockDetails(tile.stock))
case Tile(_, Tile.Grass(_), _, _, _, _, _) =>
("GRASS",
healthyAndGreen,
getStockDetails(tile.stock))
case Tile(_, Tile.Building(_), _, _, _, _, _) =>
("???",
Seq("UNDER", "CNSTRCTN"),
getStockDetails(tile.stock))
}
(fullData._1, model.mode match {
case Model.InspectionMode.Status() =>
fullData._2
case Model.InspectionMode.Stock() =>
fullData._3
})
}
def getStockDetails(stock: Stock): Seq[String] = {
if (stock.wood == 0) {
Seq("NO STOCK")
} else {
Seq(s"${stock.wood} WOOD")
}
}
def viewBackground(): Seq[Sprite] = {
Seq(Sprite(
Vec2.Zero,
BitmapLibrary.InspectScreen))
}
def viewGrid(model: Model.Inspection): Seq[Sprite] = {
val i = model.selection.x.toInt + model.selection.y.toInt * 3
Seq(Sprite(
Vec2(4, 5),
BitmapLibrary.InspectGrid(i)))
}
def viewTitle(text: String): Seq[Sprite] = {
GlyphBitmap.getSprites(Vec2(17, 4), text, _.boldLargeBitmap)
}
def viewCloseButton(): Seq[Sprite] = {
val topLeft = Vec2(
Dimensions.LowRez - 10, 1)
Seq(
Sprite(
topLeft,
BitmapLibrary.InspectCorner),
GlyphBitmap.getSprite(
topLeft + Vec2(2, 1),
'Q',
_.boldBitmap))
}
def viewModeButton(char: Char, index: Int, isActive: Boolean): Seq[Sprite] = {
val topLeft = Vec2(
4 + 12 * index.toDouble, Dimensions.LowRez - 14)
Seq(
Sprite(
topLeft,
if (isActive) {
BitmapLibrary.InspectButton
} else {
BitmapLibrary.InspectReverseButton
}),
GlyphBitmap.getSprite(
topLeft + Vec2(2, 2),
char,
if (isActive) {
_.boldBitmap
} else {
_.boldReverseBitmap
}))
}
def viewDetails(lines: Seq[String]): Seq[Sprite] = {
val topLeft = Vec2(4, 18)
lines
.zip(Stream.iterate(0)(_ + 1))
.map({ case (line, index) => GlyphBitmap.getSprites(
topLeft + Vec2(0, 8 * index.toDouble),
line,
_.boldBitmap)
})
.flatten
}
def viewCursor(model: Model.Inspection): Seq[Sprite] = {
ViewMap.viewCursor(model.cursor, Time.Zero)
}
}
|
dcecile/off-grid-orcs
|
src/ViewInspection.scala
|
Scala
|
mit
| 3,745
|
package ignition.core.utils
import java.sql.Timestamp
import org.joda.time.{DateTime, DateTimeZone, Period, Seconds}
import org.joda.time.format.ISODateTimeFormat
object DateUtils {
private val isoDateTimeFormatter = ISODateTimeFormat.dateTime().withZoneUTC()
implicit def dateTimeOrdering: Ordering[DateTime] = Ordering.fromLessThan(_ isBefore _)
implicit def periodOrdering: Ordering[Period] = Ordering.fromLessThan(_.toStandardSeconds.getSeconds < _.toStandardSeconds.getSeconds)
implicit def timestampOrdering: Ordering[Timestamp] = new Ordering[Timestamp] {
def compare(x: Timestamp, y: Timestamp): Int = x compareTo y
}
implicit class DateTimeImprovements(val dateTime: DateTime) {
def toIsoString = isoDateTimeFormatter.print(dateTime)
def saneEqual(other: DateTime) =
dateTime.withZone(DateTimeZone.UTC).isEqual(other.withZone(DateTimeZone.UTC))
def isEqualOrAfter(other: DateTime) =
dateTime.isAfter(other) || dateTime.saneEqual(other)
def isEqualOrBefore(other: DateTime) =
dateTime.isBefore(other) || dateTime.saneEqual(other)
def isBetween(start: DateTime, end: DateTime) =
dateTime.isAfter(start) && dateTime.isEqualOrBefore(end)
}
implicit class SecondsImprovements(val seconds: Seconds) {
implicit def toScalaDuration: scala.concurrent.duration.FiniteDuration = {
scala.concurrent.duration.Duration(seconds.getSeconds, scala.concurrent.duration.SECONDS)
}
}
}
|
chaordic/ignition-core
|
src/main/scala/ignition/core/utils/DateUtils.scala
|
Scala
|
mit
| 1,469
|
package jsky.app.ot.gemini.editor.targetComponent.details2
import java.awt.{GridBagConstraints, GridBagLayout, Insets}
import javax.swing.JPanel
import edu.gemini.pot.sp.ISPNode
import edu.gemini.shared.util.immutable.{Option => GOption}
import edu.gemini.shared.util.immutable.ScalaConverters.ScalaOptionOps
import edu.gemini.spModel.obs.context.ObsContext
import edu.gemini.spModel.target.SPTarget
import jsky.app.ot.gemini.editor.targetComponent.{TargetFeedbackEditor, TargetFeedbackEditor$, TelescopePosEditor}
import scala.collection.JavaConverters._
import scalaz.syntax.id._
final class TargetDetailPanel extends JPanel with TelescopePosEditor with ReentrancyHack {
private val nonsidereal = new NonSiderealDetailEditor
private val sidereal = new SiderealDetailEditor
private val too = new TooDetailEditor
val allEditors = List(nonsidereal, sidereal)
val allEditorsJava = allEditors.asJava
// This doodad will ensure that any change event coming from the SPTarget will get turned into
// a call to `edit`, so we don't have to worry about that case everywhere. Everything from here
// on down only needs to care about implementing `edit`.
val tpw = new ForwardingTelescopePosWatcher(this)
// Fields
private[this] var tde: TargetDetailEditor = null
def curDetailEditor: Option[TargetDetailEditor] = Option(tde)
def curDetailEditorJava: GOption[TargetDetailEditor] = curDetailEditor.asGeminiOpt
val source = new SourceDetailsEditor
val targetFeedbackEditor = new TargetFeedbackEditor
// Put it all together
setLayout(new GridBagLayout)
add(source.peer, new GridBagConstraints() <| { c =>
c.anchor = GridBagConstraints.NORTH
c.gridx = 1
c.gridy = 0
c.weightx = 1.0
c.weighty = 1.0
c.fill = GridBagConstraints.BOTH
})
add(targetFeedbackEditor.getComponent, new GridBagConstraints() <| { c =>
c.gridx = 0
c.gridy = 1
c.weightx = 1
c.gridwidth = 2
c.insets = new Insets(0,4,0,4)
c.fill = GridBagConstraints.HORIZONTAL
})
def edit(obsContext: GOption[ObsContext], spTarget: SPTarget, node: ISPNode): Unit = {
// Create or replace the existing detail editor, if needed
val newTde = spTarget.getTarget.fold(_ => too, _ => sidereal, _ => nonsidereal)
if (tde != newTde) {
if (tde != null) remove(tde)
tde = newTde
add(tde, new GridBagConstraints() <| { c =>
c.anchor = GridBagConstraints.NORTH
c.gridx = 0
c.gridy = 0
})
revalidate()
repaint()
}
// Forward the `edit` call.
tpw. edit(obsContext, spTarget, node)
tde. edit(obsContext, spTarget, node)
targetFeedbackEditor.edit(obsContext, spTarget, node)
source. edit(obsContext, spTarget, node)
}
}
|
spakzad/ocs
|
bundle/jsky.app.ot/src/main/scala/jsky/app/ot/gemini/editor/targetComponent/details2/TargetDetailPanel.scala
|
Scala
|
bsd-3-clause
| 2,870
|
package debop4s.rediscala.client
import java.lang.{Double => JDouble, Iterable => JIterable, Long => JLong}
import java.util.{List => JList, Map => JMap, Set => JSet}
import debop4s.rediscala._
import org.slf4j.{Logger, LoggerFactory}
import redis._
import redis.commands.Transactions
object JRedisClient {
def apply(): JRedisClient = apply(RedisClient())
def apply(host: String, port: Int): JRedisClient =
apply(RedisClient(host, port))
def apply(redisClient: RedisClient): JRedisClient = {
require(redisClient != null)
new JRedisClient(redisClient)
}
}
/**
* Redis 서버를 이용하기 위한 비동기 통신을 수행하는 client 입니다.
* [[RedisClient]] 가 scala 전용의 수형을 사용하므로, Java에서도 쉽게 사용할 수 있도록 Wrapping 했습니다.
*
* @param _redis [[RedisClient]] instance.
* @author Sunghyouk Bae
*/
class JRedisClient(private[this] val _redis: RedisClient) extends JRedisSupport with JRedisTransactionalSupport {
def this() = this(RedisClient())
require(_redis != null)
protected lazy val log: Logger = LoggerFactory.getLogger(getClass)
override val redis: RedisCommands = {
_redis
// def server = RedisServer(_redis.host, _redis.port, db = _redis.db)
// RedisClientPool((0 until sys.runtime.availableProcessors()).map(i => server))
}
override lazy val redisBlocking: RedisBlockingClient =
RedisBlockingClient(
host = _redis.host,
port = _redis.port,
password = _redis.password,
db = _redis.db,
name = _redis.name
)
override lazy val transactionalRedis: Transactions = _redis
}
|
debop/debop4s
|
debop4s-rediscala/src/main/scala/debop4s/rediscala/client/JRedisClient.scala
|
Scala
|
apache-2.0
| 1,642
|
package nz.wicker.autoencoder.math.optimization
/**
* Differentiable `Double`-valued function suitable for line-search
* algorithms. The domain of the function is some vectorspace `V`,
* and the corresponding gradient is also from `V`.
*/
trait DifferentiableFunction[V] extends (V => Double) {
/**
* Returns the function value `f(x)` together with the gradient
* `(grad f)(x)`
*/
def apply(x: V): Double
def grad(x: V): V
def valueAndGrad(x: V): (Double, V) = (apply(x), grad(x))
}
|
joergwicker/autoencoder
|
src/main/scala/nz/wicker/autoencoder/math/optimization/DifferentiableFunction.scala
|
Scala
|
gpl-3.0
| 507
|
package io.github.chikei.sbt.onelog
import java.io.PrintWriter
import com.zavakid.sbt.LogDepProcess.{ProcessContext, _}
import net.virtualvoid.sbt.graph.DependencyGraphKeys._
import net.virtualvoid.sbt.graph.{DependencyGraphPlugin, ModuleGraph, ModuleId}
import org.fusesource.scalate.TemplateEngine
import sbt.Keys._
import sbt.{AutoPlugin, Def, IO, Load, ModuleID, Plugins, Project, ProjectRef, Scoped, State, Task, _}
object OneLog extends AutoPlugin {
import OneLogKeys._
val autoImport = OneLogKeys
override def requires: Plugins = DependencyGraphPlugin
override def trigger = allRequirements
override def globalSettings: Seq[Def.Setting[_]] = {
onLoad := onLoad.value andThen doTask
}
var appended = false
val doTask: State => State = { state =>
if (OneLog.appended)
state
else {
import state.globalLogging.{full => log}
val buildStruct = Project.structure(state)
val extracted = Project.extract(state)
if (!extracted.getOpt(oneLogDisableDependencyProcess in Global).getOrElse(false)) {
log.info("sbt-one-log start process...")
def compute(graph: ModuleGraph, libraryDeps: Seq[sbt.ModuleID], p: ProjectRef): IndexedSeq[ModuleID] = {
val roots = graph.nodes.filter(n => !graph.edges.exists(_._2 == n.id)).sortBy(_.id.idString)
val directDeps = roots.flatMap(d => graph.dependencyMap(d.id))
.filter(_.evictedByVersion.isEmpty)
.filterNot(d => d.id.organisation.equals("org.scala-lang"))
.flatMap { dep => // filter deps which not contains sub projects
libraryDeps.find { libDep =>
libDep.organization.equals(dep.id.organisation) && libDep.name.equals(dep.id.name)
}.map(dep -> _)
}
directDeps.foldLeft(libraryDeps.toIndexedSeq) {
case (libs, (dep, libDep)) =>
val context = ProcessContext(dep.id, libDep, graph, libs, p, extracted)
processStrategies(context).libraryDeps
}
}
val (transformed, newState) = buildStruct.allProjectRefs.filter { p =>
//FIXME! .task is deprecated
val t = oneLogComputeModuleGraph in p
val s = Scoped.scopedSetting(t.scope, t.key)
extracted.getOpt(s).isDefined
//extracted.getOpt((computeModuleGraph in p).task).isDefined
}.foldLeft((extracted.session.mergeSettings, state)) { case ((allSettings, foldedState), p) =>
// need receive new state
val (newState, depGraph) = extracted.runTask(oneLogComputeModuleGraph in p, foldedState)
val newLibs = compute(depGraph, extracted.get(libraryDependencies in p), p)
(allSettings.map {
s =>
s.key.key match {
//case s if "libraryDependencies".equals(s.key.key.label) =>
case libraryDependencies.key =>
// ensure just modify this project's dependencies
s.key.scope.project.toOption.filter(p.equals(_)).fold(s.asInstanceOf[Setting[Seq[ModuleID]]]) { _ =>
s.asInstanceOf[Setting[Seq[ModuleID]]].mapInit((_, _) => newLibs)
}
case _ => s
}
}, newState)
}
OneLog.appended = true
//extracted.append(appendedSettings, state)
val newStructure = Load.reapply(transformed, extracted.structure)(extracted.showKey)
log.info("sbt-one-log finished process")
Project.setProject(extracted.session, newStructure, newState)
} else state
}
}
// def task: State => State = { state =>
// val extracted = Project.extract(state)
// extracted.structure.allProjectRefs.foldLeft(state) { (state, p) =>
// val ds: Seq[ModuleID] = extracted.get(libraryDependencies in p)
// println("=====" + p + " dep : ")
// ds.foreach(println)
// println("===========")
// if (p.project == "module1") {
// val (newState, _) = extracted.runTask(update.in(p).in(Compile), state)
// extracted.append(Seq[Setting[_]](
// libraryDependencies in p := Seq()
// ), newState)
// } else state
//
// }
// }
override def projectSettings: Seq[Setting[_]] = Seq[Setting[_]](
oneLogSlf4jVersion := "1.7.10",
oneLogLogbackVersion := "1.1.2",
oneLogScalaLoggingVersion := (scalaBinaryVersion.value match {
case "2.12" => "3.5.0"
case "2.11" => "3.5.0"
case "2.10" => "2.1.2"
}),
oneLogUseScalaLogging := true,
resolvers += "99-empty" at "http://version99.qos.ch/",
oneLogComputeModuleGraph := (moduleGraph in Compile).value
) ++ inConfig(Compile) {
Seq(
oneLogLogbackXmlTemplate := "/sbtonelog/templates/logback.xml.mustache",
oneLogLogbackFileName := "logback.xml",
oneLogGenerateLogbackXml := generateLogbackXMLImpl.value
)
} ++ inConfig(Test) {
Seq(
oneLogLogbackXmlTemplate := "/sbtonelog/templates/logback-test.xml.mustache",
oneLogLogbackFileName := "logback-test.xml",
oneLogGenerateLogbackXml := generateLogbackXMLImpl.value
)
} ++ Seq(Compile, Test, Runtime, Provided, Optional).flatMap{ config =>
inConfig(config)(Seq(
oneLogComputeChanges :=
Changes.generate(moduleGraph.value, libraryDependencies.value,
oneLogSlf4jVersion.value, oneLogLogbackVersion.value,
scalaBinaryVersion.value, oneLogScalaLoggingVersion.value),
oneLogRenderOverride := ChangesLayout.sbtRenderOverride,
oneLogRenderDependency := ChangesLayout.sbtRenderAdd,
ongLogChangesAscii := ChangesLayout.ascii(oneLogComputeChanges.value, oneLogRenderDependency.value, oneLogRenderOverride.value),
oneLogChanges := {
val s = streams.value
s.log.info(name.value)
s.log.info(ongLogChangesAscii.value)
},
oneLogWriteChangesFile := target.value / ("changes-%s.txt").format(config.toString),
oneLogWriteChange := {
val f = oneLogWriteChangesFile.value
val p = new PrintWriter(f, "UTF-8")
p.write(ChangesLayout.ascii(oneLogComputeChanges.value, oneLogRenderDependency.value, oneLogRenderOverride.value,
false))
p.close()
}
))
}
lazy val generateLogbackXMLImpl: Def.Initialize[Task[Unit]] = Def.task {
val out = streams.value
def generateContent(engine: TemplateEngine, context: Map[String, Any], templatePath: String, baseDir: File, file: File) {
val content = engine.layout(templatePath, context)
if (!baseDir.exists) baseDir.mkdirs()
file.createNewFile()
out.log.info(s"generate $file")
IO.write(file, content)
}
//val force = generateLogbackXMLParser.parsed
val force = false
val resourceDir = resourceDirectory.value
val logbackXML = resourceDir / oneLogLogbackFileName.value
val context = Map("projectName" -> name.value)
val engine = new TemplateEngine()
(force, logbackXML.exists()) match {
case (false, false) =>
generateContent(engine, context, oneLogLogbackXmlTemplate.value, resourceDir, logbackXML)
case (false, true) =>
out.log.info(s"${logbackXML.toString} is exist")
case (true, _) =>
out.log.warn(s"force generate is not support yes")
}
}
}
|
chikei/sbt-one-log
|
src/main/scala/io/github/chikei/sbt/onelog/OneLog.scala
|
Scala
|
apache-2.0
| 7,339
|
package pw.ian.sysadmincraft.world
import org.specs2.mutable.Specification
class PillarManagerSpec extends Specification {
"spiralIndex" >> {
"should be unique" >> {
val list = (0 to 10000).map(PillarManagerUtils.spiralIndex)
list.toSet.size must_== list.size
}
}
}
|
simplyianm/sysadmincraft
|
src/test/scala/pw/ian/sysadmincraft/world/PillarManagerSpec.scala
|
Scala
|
isc
| 297
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.sources
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.table.api.ValidationException
import org.apache.flink.table.expressions.{Expression, PlannerExpression, ResolvedFieldReference}
/**
* The [[FieldComputer]] interface returns an expression to compute the field of the table schema
* of a [[TableSource]] from one or more fields of the [[TableSource]]'s return type.
*
* @tparam T The result type of the provided expression.
*/
abstract class FieldComputer[T] {
/**
* Returns the names of all fields that the expression of the field computer accesses.
*
* @return An array with the names of all accessed fields.
*/
def getArgumentFields: Array[String]
/**
* Returns the result type of the expression.
*
* @return The result type of the expression.
*/
def getReturnType: TypeInformation[T]
/**
* Validates that the fields that the expression references have the correct types.
*
* @param argumentFieldTypes The types of the physical input fields.
*/
@throws[ValidationException]
def validateArgumentFields(argumentFieldTypes: Array[TypeInformation[_]]): Unit
/**
* Returns the [[Expression]] that computes the value of the field.
*
* @param fieldAccesses Field access expressions for the argument fields.
* @return The expression to extract the timestamp from the [[TableSource]] return type.
*/
def getExpression(fieldAccesses: Array[ResolvedFieldReference]): Expression
}
|
ueshin/apache-flink
|
flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/sources/FieldComputer.scala
|
Scala
|
apache-2.0
| 2,353
|
package akkaviz.events
import akka.NotUsed
import akka.actor.ActorRef
import akka.stream.scaladsl.Flow
import akkaviz.events.types.{ThroughputMeasurement, ReceivedWithId, BackendEvent}
import scala.concurrent.duration._
object ThroughputMeasurementFlow {
def apply(period: FiniteDuration): Flow[BackendEvent, ThroughputMeasurement, NotUsed] = {
Flow[BackendEvent]
.collect { case r: ReceivedWithId => r.actorRef }
.groupedWithin(Int.MaxValue, period)
.map { refs =>
refs.groupBy(identity).mapValues(_.length)
}
.scan(Map[ActorRef, Int]()) {
case (previous, current) =>
// produce zero for actors that have been measured previously but didn't receive any messages during `period`
current ++ (for { k <- previous.keySet.diff(current.keySet) } yield k -> 0)
}
.mapConcat { m =>
for {
(ref, count) <- m
} yield ThroughputMeasurement(ref, count / (period.toMillis.toDouble / 1.second.toMillis.toDouble))
}
}
}
|
blstream/akka-viz
|
monitoring/src/main/scala/akkaviz/events/ThroughputMeasurementFlow.scala
|
Scala
|
mit
| 1,027
|
import akka.actor._
import akka.remote._
import akka.pattern.ask
import akka.pattern.pipe
import akka.util.Timeout
import scala.concurrent.duration._
import scala.concurrent.ExecutionContext.Implicits._
import scalafx.application.Platform
class ParticipantActor(
val control: tictactoeClientFrontWindowController#Controller ,
val controller: tictactoeClientPlayWindowController#Controller) extends Actor {
import ParticipantActor._
import ServerActor._
var player1: Option[ActorRef] = None
var player2: Option[ActorRef] = None
def host = context.actorSelection(control.serverLocation)
override def preStart(): Unit = {
context.system.eventStream.subscribe(self, classOf[akka.remote.DisassociatedEvent])
context.system.eventStream.subscribe(self, classOf[akka.remote.AssociationErrorEvent])
}
def receive = {
case Join(name , ref) =>
implicit val timeout: Timeout = Timeout(5 seconds)
//println(host)
host ? Connect(name , ref) pipeTo sender
//var name1 = name
println(s"name: $name")
case JoinSuccess =>
tictactoeRunUI.player = "p1"
//tictactoeRunUI.player1Name = name1
context.become(join)
case AlreadyJoin =>
Platform.runLater({
control.showJoinedError()
})
case AssociationErrorEvent(_ , localAddress , remoteAddress , _ , _) =>
Platform.runLater({
control.showConnectionError()
})
case DisassociatedEvent(localAddress , remoteAddress , _) =>
Platform.runLater({
control.showConnectionError()
})
case Click =>
context.become(click)
case Done =>
context.unbecome()
case player2Join =>
//println("set player 2")
tictactoeRunUI.player = "p2"
context.become(p2)
}
// player1 become join
def join: Receive = {
case Join(name , ref) =>
sender ! AlreadyJoin
case p1(name , secondPlayer) =>
// knows 2nd player
// send start to p1 and 2nd player
player2 = Some(secondPlayer)
tictactoeRunUI.player2Name.value = name
case AssociationErrorEvent(_ , localAddress , remoteAddress , _ , _) =>
Platform.runLater({
control.showConnectionError()
})
case DisassociatedEvent(localAddress , remoteAddress , _) =>
Platform.runLater({
control.showConnectionError()
})
case Start =>
Platform.runLater({
controller.showStart()
controller.clickable.value = true
println("click true")
})
context.become(click)
}
// player2 become p2
def p2: Receive = {
case Join(name , ref) =>
sender ! AlreadyJoin
case AssociationErrorEvent(_ , localAddress , remoteAddress , _ , _) =>
Platform.runLater({
control.showConnectionError()
})
case DisassociatedEvent(localAddress , remoteAddress , _) =>
Platform.runLater({
control.showConnectionError()
})
case Start => // show stop
Platform.runLater({
controller.showStop()
})
//println("receivedstart")
context.become(unclick)
case p2(name , player1) =>
//println("setting playaer")
this.player1 = Some(player1)
tictactoeRunUI.player1Name.value = name
player1 ! Start
//println("sending start to "+player1)
self ! Start
}
def click: Receive = {
case AssociationErrorEvent(_ , localAddress , remoteAddress , _ , _) =>
Platform.runLater({
control.showConnectionError()
})
case DisassociatedEvent(localAddress , remoteAddress , _) =>
Platform.runLater({
control.showConnectionError()
})
case ClickCount(myCount) =>
println("clickcount")
Platform.runLater({
controller.clickable.value = false
controller.selfClickOnButton(myCount)
})
if (tictactoeRunUI.player == "p1") {
player2.get ! SelectCount(myCount)
//println("SelectCount(myCount):"+SelectCount(myCount))
context.become(unclick)
}
else if (tictactoeRunUI.player == "p2") {
player1.get ! SelectCount(myCount)
context.become(unclick)
}
sender ! Done
case Reset =>
reset()
case GameEnd =>
controller.clickable.value = false
}
def unclick: Receive = {
case AssociationErrorEvent(_ , localAddress , remoteAddress , _ , _) =>
Platform.runLater({
control.showConnectionError()
})
case DisassociatedEvent(localAddress , remoteAddress , _) =>
Platform.runLater({
control.showConnectionError()
})
case ClickCount(myCount) =>
Platform.runLater({
controller.showStop()
//controller.clickOnButtonX(myCount)
})
sender ! Done
case SelectCount(myCount) =>
context.become(click)
//println("selectCount execute")
Platform.runLater({
controller.clickable.value = true
controller.clickOnButton(myCount)
})
case Reset =>
reset()
case GameEnd =>
controller.clickable.value = false
}
def reset() {
player1 = None
player2 = None
context.unbecome()
}
}
object ParticipantActor {
// external message
case class Winner(name: String)
case class Join(name: String , ref: ActorRef)
case class JoinSuccess(name: String , ref: ActorRef)
case class p2(name: String , playerOwn: ActorRef)
case class p1(name: String , playerTwo: ActorRef)
case class SelectCount(myCount: Int)
case class ClickCount(myCount: Int)
// internal message
case object ConnectSuccess
case object ConnectFail
case object JoinFail
case object Click
case object AlreadyJoin
case object player2Join
case object Done
case object Start
case object Reset
case object GameEnd
}
|
MagikarpBot/scala-akka-tictactoe
|
src/main/scala/ParticipantActor.scala
|
Scala
|
gpl-3.0
| 6,265
|
package com.gajdulewicz
import com.mongodb.casbah.WriteConcern
/**
* Created by gajduler on 09/12/14.
*/
object GeneratorApp {
def main(args: Array[String]) {
val writes: Int = 1 * 100 * 1000
val initWriter = new MongoWriter(bufferSize = 100000)
(1 to writes).foreach(id => {
val prof = ProfileGenerator.genProfile(id)
initWriter.scheduleForInsert(prof)
})
val defaultWriter = new MongoWriter(bufferSize = 100000)
time("Default concern") {
(1 to writes).foreach(id => {
val prof = ProfileGenerator.genProfile(id)
defaultWriter.scheduleForInsert(prof)
})
}
val journaledWriter = new MongoWriter(bufferSize = 100000, writeConcenrn = WriteConcern.Journaled)
time("Journaled concern") {
(1 to writes).foreach(id => {
val prof = ProfileGenerator.genProfile(id)
journaledWriter.scheduleForInsert(prof)
})
}
val fsynced = new MongoWriter(bufferSize = 100000, writeConcenrn = WriteConcern.Fsynced)
time("fsynced concern") {
(1 to writes).foreach(id => {
val prof = ProfileGenerator.genProfile(id)
fsynced.scheduleForInsert(prof)
})
}
}
def time[A](message: String = "")(a: => A) = {
val now = System.nanoTime
val result = a
val micros = (System.nanoTime - now) / 1000 / 1000
println(message + " %d ms".format(micros))
result
}
}
|
rafax/dbperf
|
src/main/scala/com/gajdulewicz/Main.scala
|
Scala
|
mit
| 1,412
|
/*
* Copyright (c) 2012-2014 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.enrich.common
package utils
// Java
import java.net.URI
// Scalaz
import scalaz._
import Scalaz._
// Specs2
import org.specs2.{Specification, ScalaCheck}
import org.specs2.matcher.DataTables
import org.specs2.scalaz.ValidationMatchers
import org.scalacheck._
import org.scalacheck.Arbitrary._
class StringToUriTest extends Specification with DataTables {
def is =
"Parsing Strings into URIs should work" ! e1
def e1 =
"SPEC NAME" || "URI" | "EXPECTED" |
"Empty URI" !! null ! None.success |
"Simple URI" !! "https://google.com" ! Some(URI.create("https://google.com")).success |
"Complex URI" !! "http://www.google.com/search?q=gateway+oracle+cards+denise+linn&hl=en&client=safari" ! Some(URI.create("http://www.google.com/search?q=gateway+oracle+cards+denise+linn&hl=en&client=safari")).success |
"Salvageable bad URI with raw spaces" !! "http://www.psychicbazaar.com/2-tarot-cards/genre/gothic/type/all/view/grid?n=24&utm_source=GoogleSearch&utm_medium=cpc&utm_campaign=uk-tarot--gothic-tarot&utm_term=bohemian gothic tarot&utm_content=33088202008&gclid=CN2LmteX2LkCFQKWtAodrSMASw" ! Some(URI.create("http://www.psychicbazaar.com/2-tarot-cards/genre/gothic/type/all/view/grid?n=24&utm_source=GoogleSearch&utm_medium=cpc&utm_campaign=uk-tarot--gothic-tarot&utm_term=bohemian%20gothic%20tarot&utm_content=33088202008&gclid=CN2LmteX2LkCFQKWtAodrSMASw")).success |
"Unsalvageable bad URI" !! "http://adserver.adtech.de/adlink|3.0" ! "Provided URI string [http://adserver.adtech.de/adlink|3.0] violates RFC 2396: [Illegal character in path at index 32: http://adserver.adtech.de/adlink|3.0]".fail |> {
(_, uri, expected) => {
ConversionUtils.stringToUri(uri) must_== expected
}
}
}
class ExplodeUriTest extends Specification with DataTables {
def is =
"Exploding URIs into their component pieces with explodeUri should work" ! e1
def e1 =
"SPEC NAME" || "URI" | "EXP. SCHEME" | "EXP. HOST" | "EXP. PORT" | "EXP. PATH" | "EXP. QUERY" | "EXP. FRAGMENT" |
"With path, qs & #" !! "http://www.psychicbazaar.com/oracles/119-psycards-deck.html?view=print#detail" ! "http" ! "www.psychicbazaar.com" ! 80 ! Some("/oracles/119-psycards-deck.html") ! Some("view=print") ! Some("detail") |
"With path & space in qs" !! "http://psy.bz/genre/all/type/all?utm_source=google&utm_medium=cpc&utm_term=buy%2Btarot&utm_campaign=spring_sale" ! "http" ! "psy.bz" ! 80 ! Some("/genre/all/type/all") ! Some("utm_source=google&utm_medium=cpc&utm_term=buy%2Btarot&utm_campaign=spring_sale") ! None |
"With path & no www" !! "http://snowplowanalytics.com/analytics/index.html" ! "http" ! "snowplowanalytics.com" ! 80 ! Some("/analytics/index.html") ! None ! None |
"Port specified" !! "http://www.nbnz.co.nz:440/login.asp" ! "http" ! "www.nbnz.co.nz" ! 440 ! Some("/login.asp") ! None ! None |
"HTTPS & #" !! "https://www.lancs.ac.uk#footer" ! "https" ! "www.lancs.ac.uk" ! 80 ! None ! None ! Some("footer") |
"www2 & trailing /" !! "https://www2.williamhill.com/" ! "https" ! "www2.williamhill.com" ! 80 ! Some("/") ! None ! None |
"Tab & newline in qs" !! "http://www.ebay.co.uk/sch/i.html?_from=R40&_trksid=m570.l2736&_nkw=%09+Clear+Quartz+Point+Rock+Crystal%0ADowsing+Pendulum" ! "http" ! "www.ebay.co.uk" ! 80 ! Some("/sch/i.html") ! Some("_from=R40&_trksid=m570.l2736&_nkw=%09+Clear+Quartz+Point+Rock+Crystal%0ADowsing+Pendulum") ! None |
"Tab & newline in path" !! "https://snowplowanalytics.com/analytic%0As/index%09nasty.html" ! "https" ! "snowplowanalytics.com" ! 80 ! Some("/analytic%0As/index%09nasty.html") ! None ! None |
"Tab & newline in #" !! "http://psy.bz/oracles/psycards.html?view=print#detail%09is%0Acorrupted" ! "http" ! "psy.bz" ! 80 ! Some("/oracles/psycards.html") ! Some("view=print") ! Some("detail%09is%0Acorrupted") |> {
(_, uri, scheme, host, port, path, query, fragment) => {
val actual = ConversionUtils.explodeUri(new URI(uri))
val expected = ConversionUtils.UriComponents(scheme, host, port, path, query, fragment)
actual must_== expected
}
}
}
class FixTabsNewlinesTest extends Specification with DataTables {
val SafeTab = " "
def is =
"Replacing tabs, newlines and control characters with fixTabsNewlines should work" ! e1
def e1 =
"SPEC NAME" || "INPUT STR" | "EXPECTED" |
"Empty string" !! "" ! None |
"String with true-tab" !! " " ! SafeTab.some |
"String with \\t" !! "\t" ! SafeTab.some |
"String with \\\\t" !! "\\\t" ! "\\%s".format(SafeTab).some |
"String with \\b" !! "\b" ! None |
"String ending in newline" !! "Hello\n" ! "Hello".some |
"String with control char" !! "\u0002" ! None |
"String with space" !! "\u0020" ! " ".some |
"String with black diamond" !! "�" ! "�".some |
"String with everything" !! "Hi \u0002�\u0020\bJo\t\u0002" ! "Hi%s� Jo%s".format(SafeTab, SafeTab).some |> {
(_, str, expected) =>
ConversionUtils.fixTabsNewlines(str) must_== expected
}
}
// TODO: note that we have some functionality tweaks planned.
// See comments on ConversionUtils.decodeBase64Url for details.
class DecodeBase64UrlTest extends Specification with DataTables with ValidationMatchers with ScalaCheck { def is =
"This is a specification to test the decodeBase64Url function" ^
p^
"decodeBase64Url should return failure if passed a null" ! e1^
"decodeBase64Url should not return failure on any other string" ! e2^
"decodeBase64Url should correctly decode valid Base64 (URL-safe) encoded strings" ! e3^
end
val FieldName = "e"
// Only way of getting a failure currently
def e1 =
ConversionUtils.decodeBase64Url(FieldName, null) must beFailing("Field [%s]: exception Base64-decoding [null] (URL-safe encoding): [null]".format(FieldName))
// No string creates a failure
def e2 =
check { (str: String) => ConversionUtils.decodeBase64Url(FieldName, str) must beSuccessful }
// Taken from:
// 1. Lua Tracker's base64_spec.lua
// 2. Manual tests of the JavaScript Tracker's trackUnstructEvent()
// 3. Misc edge cases worth checking
def e3 =
"SPEC NAME" || "ENCODED STRING" | "EXPECTED" |
"Lua Tracker String #1" !! "Sm9oblNtaXRo" ! "JohnSmith" |
"Lua Tracker String #2" !! "am9obitzbWl0aA" ! "john+smith" |
"Lua Tracker String #3" !! "Sm9obiBTbWl0aA" ! "John Smith" |
"Lua Tracker JSON #1" !! "eyJhZ2UiOjIzLCJuYW1lIjoiSm9obiJ9" ! """{"age":23,"name":"John"}""" |
"Lua Tracker JSON #2" !! "eyJteVRlbXAiOjIzLjMsIm15VW5pdCI6ImNlbHNpdXMifQ" ! """{"myTemp":23.3,"myUnit":"celsius"}""" |
"Lua Tracker JSON #3" !! "eyJldmVudCI6InBhZ2VfcGluZyIsIm1vYmlsZSI6dHJ1ZSwicHJvcGVydGllcyI6eyJtYXhfeCI6OTYwLCJtYXhfeSI6MTA4MCwibWluX3giOjAsIm1pbl95IjotMTJ9fQ" ! """{"event":"page_ping","mobile":true,"properties":{"max_x":960,"max_y":1080,"min_x":0,"min_y":-12}}""" |
"Lua Tracker JSON #4" !! "eyJldmVudCI6ImJhc2tldF9jaGFuZ2UiLCJwcmljZSI6MjMuMzksInByb2R1Y3RfaWQiOiJQQlowMDAzNDUiLCJxdWFudGl0eSI6LTIsInRzdGFtcCI6MTY3ODAyMzAwMH0" ! """{"event":"basket_change","price":23.39,"product_id":"PBZ000345","quantity":-2,"tstamp":1678023000}""" |
"JS Tracker JSON #1" !! "eyJwcm9kdWN0X2lkIjoiQVNPMDEwNDMiLCJjYXRlZ29yeSI6IkRyZXNzZXMiLCJicmFuZCI6IkFDTUUiLCJyZXR1cm5pbmciOnRydWUsInByaWNlIjo0OS45NSwic2l6ZXMiOlsieHMiLCJzIiwibCIsInhsIiwieHhsIl0sImF2YWlsYWJsZV9zaW5jZSRkdCI6MTU4MDF9" ! """{"product_id":"ASO01043","category":"Dresses","brand":"ACME","returning":true,"price":49.95,"sizes":["xs","s","l","xl","xxl"],"available_since$dt":15801}""" |
"Unescaped characters" !! "äöü - &" ! "" |
"Blank string" !! "" ! "" |> {
(_, str, expected) => {
ConversionUtils.decodeBase64Url(FieldName, str) must beSuccessful(expected)
}
}
}
class StringToDoublelikeTest extends Specification with DataTables with ValidationMatchers { def is =
"This is a specification to test the stringToDoublelike function" ^
p^
"stringToDoublelike should fail if the supplied String is not parseable as a number" ! e1^
"stringToDoublelike should convert numeric Strings to 'Double-like' Strings loadable by Redshift" ! e2^
"stringToDoublelike will alas *not* fail numbers having more significant digits than Redshift supports" ! e3^
end
val FieldName = "val"
def err: (String) => String = input => "Field [%s]: cannot convert [%s] to Double-like String".format(FieldName, input)
def e1 =
"SPEC NAME" || "INPUT STR" | "EXPECTED" |
"Empty string" !! "" ! err("") |
"Number with commas" !! "19,999.99" ! err("19,999.99") |
"Hexadecimal number" !! "0x54" ! err("0x54") |
"Bad sci. notation" !! "-7.51E^9" ! err("-7.51E^9") |
"German number" !! "1.000,3932" ! err("1.000,3932") |
"NaN" !! "NaN" ! err("NaN") |
"English string" !! "hi & bye" ! err("hi & bye") |
"Vietnamese name" !! "Trịnh Công Sơn" ! err("Trịnh Công Sơn") |> {
(_, str, expected) =>
ConversionUtils.stringToDoublelike(FieldName, str) must beFailing(expected)
}
def e2 =
"SPEC NAME" || "INPUT STR" | "EXPECTED" |
"Integer #1" !! "23" ! "23" |
"Integer #2" !! "23." ! "23" |
"Negative integer" !! "-2012103" ! "-2012103" |
"Arabic number" !! "٤٥٦٧.٦٧" ! "4567.67" |
"Floating point #1" !! "1999.99" ! "1999.99" |
"Floating point #2" !! "1999.00" ! "1999.00" |
"Floating point #3" !! "78694353.00001" ! "78694353.00001" |
"Floating point #4" !! "-78694353.00001" ! "-78694353.00001" |
"Sci. notation #1" !! "4.321768E3" ! "4321.768" |
"Sci. notation #2" !! "6.72E9" ! "6720000000" |
"Sci. notation #3" !! "7.51E-9" ! "0.00000000751" |> {
(_, str, expected) =>
ConversionUtils.stringToDoublelike(FieldName, str) must beSuccessful(expected)
}
val BigNumber = "78694235323.00000001" // Redshift only supports 15 significant digits for a Double
def e3 = ConversionUtils.stringToDoublelike(FieldName, BigNumber) must beSuccessful(BigNumber)
}
class StringToBooleanlikeJByte extends Specification with DataTables with ValidationMatchers { def is =
"This is a specification to test the stringToBooleanlikeJByte function" ^
p^
"stringToBooleanlikeJByte should fail if the supplied String is not parseable as a 1 or 0 JByte" ! e1^
"stringToBooleanlikeJByte should convert '1' or '0' Strings to 'Boolean-like' JBytes loadable by Redshift" ! e2^
end
val FieldName = "val"
def err: (String) => String = input => "Field [%s]: cannot convert [%s] to Boolean-like JByte".format(FieldName, input)
def e1 =
"SPEC NAME" || "INPUT STR" | "EXPECTED" |
"Empty string" !! "" ! err("") |
"Small number" !! "2" ! err("2") |
"Negative number" !! "-1" ! err("-1") |
"Floating point number" !! "0.0" ! err("0.0") |
"Large number" !! "19,999.99" ! err("19,999.99") |
"Text #1" !! "a" ! err("a") |
"Text #2" !! "0x54" ! err("0x54") |> {
(_, str, expected) =>
ConversionUtils.stringToBooleanlikeJByte(FieldName, str) must beFailing(expected)
}
def e2 =
"SPEC NAME" || "INPUT STR" | "EXPECTED" |
"True aka 1" !! "1" ! 1.toByte |
"False aka 0" !! "0" ! 0.toByte |> {
(_, str, expected) =>
ConversionUtils.stringToBooleanlikeJByte(FieldName, str) must beSuccessful(expected)
}
}
|
pkallos/snowplow
|
3-enrich/scala-common-enrich/src/test/scala/com.snowplowanalytics.snowplow.enrich.common/utils/conversionUtilsTests.scala
|
Scala
|
apache-2.0
| 15,163
|
package redeyes.parser
import redeyes.doc._
import redeyes.util.Strings
import scalaz._
import scalaz.std.string._
import scalaz.std.vector._
import scalaz.syntax.monad._
import shapeless._
import shapeless.ops.hlist.{Comapped, ToList}
trait ParserModule { self =>
/**
* The abstract type representing the channels supported by this parser module.
*/
protected type Channel
protected implicit def ChannelEqual: Equal[Channel] // TODO: Move to functions
/**
* The abstract type representing an atom of information parsed from a channel.
* For example, in a text parser module, the atom might be a character.
*/
protected type Atom[C <: Channel, A]
/**
* Every parser module has to provide an atom algebra module.
*/
protected trait AtomAlgebra {
/**
* An atom representing the smallest unit of information that can be produced
* from the specified channel. This should not be "empty" but should actually
* represent consumption of some information.
*/
def anything[C <: Channel](channel: C): Atom[C, _]
/**
* Produces a parser which is the negation of the specified atom.
*/
def negate[C <: Channel, A](atom: Atom[C, A]): Parser[A]
/**
* Produces a parser which is the intersection of two atoms.
*/
def intersect[C <: Channel, A: Equal](a1: Atom[C, A], a2: Atom[C, A]): Parser[A]
}
protected def AtomAlgebra: AtomAlgebra
def generate[A](parser: Parser[A]): EphemeralStream[A]
def generate1[A](parser: Parser[A]): A = generate(parser).head()
/**
* The core parser abstraction, with a range of combinators.
*/
sealed trait Parser[A] {
import Parser.ParserEquivApplicative._
final def |> [B] (f: => A <=> B): Parser[B] = this <**> Pure(f)
final def <**> [B] (that: => Parser[A <=> B]): Parser[B] = Apply[A, B](Need(that), Need(this))
final def <~< [B] (that: => Parser[B]): Parser[A] = lift2[A, B, A](Equiv.id[A].leftfst(generate1(that)))(this, that)
final def >~> [B] (that: => Parser[B]): Parser[B] = lift2[A, B, B](Equiv.id[B].leftsnd(generate1(this)))(this, that)
final def <|> (that: => Parser[A]): Parser[A] = Or(Need(this), Need(that))
final def <&> (that: => Parser[A])(implicit equal: Equal[A]): Parser[A] = Intersect(Need(this), Need(that))
final def <~> [B](that: => Parser[B]): Parser[(A, B)] = Zip(Need(this), Need(that))
final def <+> (that: => Parser[A])(implicit m: Semigroup[A]): Parser[A] = Join(Need(this), Need(that))
final def const[B](k: B): Parser[B] = map(Equiv(to = a => k, from = b => generate1(this)))
final def maybeAs[B](implicit ev: A <:< B, A: scala.reflect.ClassTag[A]): Parser[Option[B]] = maybe.map(Equiv[Option[A], Option[B]](
to = oa => oa.map(ev),
from = ob => ob match {
case None => None
case Some(b) => if (A.runtimeClass.isAssignableFrom(b.getClass)) Some(b.asInstanceOf[A]) else None
}
))
final def as[B](implicit ev: A <:< B, A: scala.reflect.ClassTag[A]): Parser[B] = maybeAs[B].getOrFail
final def sepBy[B](sep: => Parser[B]): Parser[Vector[A]] = (this.one <+> (sep >~> this).many) <|>
this.maybe.map(Equiv[Option[A], Vector[A]](_.toVector, _.headOption))
final def sepBy1[B](sep: => Parser[B]): Parser[Vector[A]] = this.one <+> (sep >~> this).many
final def maybe : Parser[Option[A]] = atMost(1).map(Equiv[Vector[A], Option[A]](_.headOption, _.toVector))
final def many : Parser[Vector[A]] = Repeat(Need(this), None, None)
final def some : Parser[Vector[A]] = Repeat(Need(this), Some(1), None)
final def one: Parser[Vector[A]] = exactly(1)
final def exactly(count: Int) : Parser[Vector[A]] = Repeat(Need(this), Some(count), Some(count))
final def atLeast(min: Int) : Parser[Vector[A]] = Repeat(Need(this), Some(min), None)
final def atMost(max: Int) : Parser[Vector[A]] = Repeat(Need(this), None, Some(max))
final def between(min: Int, max: Int) : Parser[Vector[A]] = Repeat(Need(this), Some(min), Some(max))
final def filter(f: A => Boolean): Parser[A] = Filter(Need(this), f)
final def ^! [B: Documented, C: Documented](expected: B, unexpected: C): Parser[A] =
Described(Need(this), Documented[B].document(expected), Documented[C].document(unexpected))
final def ^! [B: Documented](expected: B): Parser[A] = Described(Need(this), Documented[B].document(expected), Doc.Empty)
final def unary_! : Parser[A] = Not(Need(negate), Need(this))
final def map[B](f: A <=> B): Parser[B] = Map(Need(this), f)
final def show(implicit s: Show[A]) = Cord(toString)
final def === (that: => Parser[A])(implicit equal: Equal[A]): Parser[Boolean] = Eq(Need(this), Need(that))
protected def negate: Parser[A]
}
implicit class ParserEquivSyntax[A, B](value: Parser[A <=> B]) {
@inline final def <*> (that: => Parser[A]): Parser[B] = that <**> value
}
implicit class ParserOptionSyntax[A](value: Parser[Option[A]]) {
def getOrFail: Parser[A] = self.getOrFail(value)
}
trait ParserInstances {
implicit def ParserShow[A: Show]: Show[Parser[A]] = Show.show(_.show)
def ParserAndSemigroup[A: Semigroup: Equal]: Semigroup[Parser[A]] = new Semigroup[Parser[A]] {
def append(a1: Parser[A], a2: => Parser[A]) = a1 <&> a2
}
def ParserOrSemigroup[A]: Semigroup[Parser[A]] = new Semigroup[Parser[A]] {
def append(a1: Parser[A], a2: => Parser[A]) = a1 <|> a2
}
implicit val ParserEquivApplicative = new EquivApplicative[Parser] {
def point[A](a: => A): Parser[A] = Pure(a)
def ap1[A, B](fa: => Parser[A])(f: => Parser[A <=> B]): Parser[B] = Apply(Need(f), Need(fa))
def map[A, B](fa: => Parser[A])(f: A <=> B): Parser[B] = Map(Need(fa), f)
def zip2[A, B](fa: => Parser[A], fb: => Parser[B]): Parser[(A, B)] = zip2(fa, fb)
}
}
object Parser extends ParserInstances
protected def end(channel: Channel): Parser[Unit] = End(channel)
protected def atom[C <: Channel, A](channel: C, atom: Atom[C, A]): Parser[A] = AtomParser(channel, atom)
def zip2[A, B](pa: => Parser[A], pb: => Parser[B]): Parser[(A, B)] = Zip(Need(pa), Need(pb))
def constant[A](value: A): Parser[A] = Pure(value)
def lookAhead[A](parser: => Parser[A]): Parser[A] = LookAhead(Need(parser))
def fail[A, B: Documented](expected: B): Parser[A] = (Fail.as[A] ^! Documented[B].document(expected))
def fail[A, B: Documented, C: Documented](expected: B, unexpected: C): Parser[A] =
(Fail.as[A] ^! (Documented[B].document(expected), Documented[C].document(unexpected)))
def ifThenElse[A](predicate: => Parser[Boolean])(ifTrue: => Parser[A], ifFalse: => Parser[A]) = {
IfThenElse(Need(predicate), Need(ifTrue), Need(ifFalse))
}
def getOrFail[A](p: Parser[Option[A]]): Parser[A] = GetOrFail(Need(p))
def check[A, B](condition: => Parser[A])(then: => Parser[B], orElse: => Parser[B]): Parser[B] = (lookAhead(condition) >~> then) <|> orElse
def choice[A](p1: Parser[A], ps: Parser[A]*): Parser[A] = ps.foldLeft(p1)(_ <|> _)
def manyTill[A, B](parser: => Parser[A], limit: => Parser[B]): Parser[Vector[A]] = (lookAhead(!limit) >~> parser).many
def switch[A](catchAll: Parser[A])(cases: (Parser[Boolean], Parser[A])*): Parser[A] = {
if (cases.length == 0) catchAll
else {
val (predicate, ifTrue) = cases.head
ifThenElse(predicate)(ifTrue = ifTrue, ifFalse = switch(catchAll)(cases.tail: _*))
}
}
protected final case class AtomParser[C <: Channel, A](channel: C, atom: Atom[C, A]) extends Parser[A] {
def negate : Parser[A] = AtomAlgebra.negate(atom)
override def toString = "AtomParser(" + atom + ")"
}
/**
* A parser that expects the end of input for the specified channel, and will
* fail if there is more input.
*/
protected final case class End(channel: Channel) extends Parser[Unit] {
def negate : Parser[Unit] = LookAhead(Need(AtomParser(channel, AtomAlgebra.anything(channel)))).const(Unit)
override def toString = "End(" + channel + ")"
}
/**
* A parser that consumes no input and always fails. Because this parser consumes
* no input, it is not necessary for it to be parameterized by the channel.
*/
protected final case object Fail extends Parser[Unit] {
def negate : Parser[Unit] = Pure(Unit)
def as[A]: Parser[A] = this.map(Equiv(to = unit => sys.error("impossible"), from = a => Unit)) // ???
override def toString = "Fail"
}
/**
* A parser that filters for elements that pass a specified predicate.
*/
protected final case class Filter[A](parser: Need[Parser[A]], f: A => Boolean) extends Parser[A] { self =>
def negate: Parser[A] = new Filter(parser, a => !f(a))
override def toString = "Filter(" + parser.value + ", " + f + ")"
}
protected final case class GetOrFail[A](parser: Need[Parser[Option[A]]]) extends Parser[A] { self =>
def negate: Parser[A] = GetOrFail[A](parser.map(p => !p))
override def toString = "GetOrFail(" + parser.value + ")"
}
/**
* A parser that consumes no input and always succeeds with the specified value.
*/
protected final case class Pure[A](value: A) extends Parser[A] {
def negate : Parser[A] = this
override def toString = "Pure(" + value + ")"
}
/**
* A parser that parsers a number of other parsers in some unspecified order.
*/
/* protected case class AllOf[L <: HList, O <: HList](parsers: L)(implicit val cm: Comapped.Aux[L, Parser, O], toList: ToList[L, Parser[_]]) extends Parser[O] {
def negate : Parser[O] = {
val parsers1: List[Parser[_]] = parsers.toList
???
}
override def toString = "AllOf(" + parsers + ")"
} */
/**
* A parser that consumes no input but produces the output of another parser.
*/
protected final case class LookAhead[A](parser: Need[Parser[A]]) extends Parser[A] {
def negate : Parser[A] = LookAhead(Need(!parser.value))
override def toString = "LookAhead(" + parser.value + ")"
}
/**
* A parser that determines the equivalence of two other parsers.
*
* Note: This parser is redundant and is included only for performance reasons.
*/
protected final case class Eq[A: Equal](left: Need[Parser[A]], right: Need[Parser[A]]) extends Parser[Boolean] {
def equal: Equal[A] = Equal[A]
def negate : Parser[Boolean] = Eq(left, right)(new Equal[A] {
def equal(a1: A, a2: A): Boolean = !Equal[A].equal(a1, a2)
})
override def toString = "Eq(" + left.value + ", " + right.value + ")"
}
/**
* A parser that joins together the output of two other parsers (of the same type)
* using a provided semigroup.
*
* FIXME: This parser is too powerful and needs restricting.
*/
protected final case class Join[A: Semigroup](left: Need[Parser[A]], right: Need[Parser[A]]) extends Parser[A] {
def semigroup: Semigroup[A] = Semigroup[A]
def flatten: Vector[Need[Parser[A]]] = {
def flatten0(v: Parser[A]): Need[Vector[Need[Parser[A]]]] = v match {
case x : Join[A] => for {
left <- x.left
right <- x.right
leftFlattened <- flatten0(left)
rightFlattened <- flatten0(right)
} yield leftFlattened ++ rightFlattened
case _ => Need(Vector(Need(v)))
}
flatten0(this).value
}
def negate : Parser[A] = {
val negatedLeft = left.map(p => !p)
val negatedRight = right.map(p => !p)
Join(negatedLeft, negatedRight) <|>
Join(left, negatedRight) <|>
Join(negatedLeft, right)
}
override def toString = "Join(" + left.value + ", " + right.value + ")"
}
protected final case class Intersect[A: Equal](left: Need[Parser[A]], right: Need[Parser[A]]) extends Parser[A] {
def equal: Equal[A] = Equal[A]
def flatten: Vector[Need[Parser[A]]] = {
def flatten0(v: Parser[A]): Need[Vector[Need[Parser[A]]]] = v match {
case x : Intersect[A] => for {
left <- x.left
right <- x.right
leftFlattened <- flatten0(left)
rightFlattened <- flatten0(right)
} yield leftFlattened ++ rightFlattened
case _ => Need(Vector(Need(v)))
}
flatten0(this).value
}
def negate : Parser[A] = {
val negatedLeft = left.map(p => !p)
val negatedRight = right.map(p => !p)
Intersect(negatedLeft, negatedRight) <|>
Intersect(left, negatedRight) <|>
Intersect(negatedLeft, right)
}
override def toString = "Join(" + left.value + ", " + right.value + ")"
}
protected final case class Zip[A, B](left: Need[Parser[A]], right: Need[Parser[B]]) extends Parser[(A, B)] {
def negate: Parser[(A, B)] = {
val negatedLeft = left.map(p => !p)
val negatedRight = right.map(p => !p)
Zip(negatedLeft, negatedRight) <|>
Zip(left, negatedRight) <|>
Zip(negatedLeft, right)
}
override def toString = "Zip(" + left.value + ", " + right.value + ")"
}
/**
* Parsers a boolean, then based on the value of that boolean, parses with one of two
* provided parsers. This parser provides the ability to do simple context-sensitive
* parsing. It is the only way of doing such parsing within this framework.
*/
protected final case class IfThenElse[A](predicate: Need[Parser[Boolean]], ifTrue: Need[Parser[A]], ifFalse: Need[Parser[A]]) extends Parser[A] {
def negate : Parser[A] = {
// TODO: Include other possibilities
IfThenElse(predicate, ifTrue.map(v => !v), ifFalse.map(v => !v))
}
override def toString = "IfThenElse(" + predicate.value + ", " + ifTrue.value + "," + ifFalse.value + ")"
}
/**
* A parser that parsers one of two alternatives. This parser is left-biased.
*
* TODO: Find some way to seal the hierarchy so we can delete GetOrFail
*/
protected final case class Or[A](left: Need[Parser[A]], right: Need[Parser[A]]) extends Parser[A] {
def flatten: Vector[Need[Parser[A]]] = {
def flatten0(v: Parser[A]): Need[Vector[Need[Parser[A]]]] = v match {
case x : Or[A] => for {
left <- x.left
leftMapped <- flatten0(left)
right <- x.right
rightMapped <- flatten0(right)
} yield leftMapped ++ rightMapped
case _ => Need(Vector(Need(v)))
}
flatten0(this).value
}
def negate : Parser[A] = ??? // TODO: !left.value <&> !right.value
override def toString = "Or(" + left.value + ", " + right.value + ")"
}
/**
* A parser that is the negation of some other parser.
*/
protected final case class Not[A](negated: Need[Parser[A]], original: Need[Parser[A]]) extends Parser[A] {
def negate : Parser[A] = original.value
override def toString = "Not(" + negated.value + ", " + original.value + ")"
}
/**
* A parser that repeats another parser within specified ranges.
*/
protected final case class Repeat[A](parser: Need[Parser[A]], min: Option[Int], max: Option[Int]) extends Parser[Vector[A]] {
def negate : Parser[Vector[A]] = ???
override def toString = "Repeat(" + parser.value + ", " + min + ", " + max + ")"
}
/**
* A parser that maps the value of one parser into another value using a provided function.
*/
protected final case class Map[A, B](parser: Need[Parser[A]], f: A <=> B) extends Parser[B] {
def negate : Parser[B] = Map(parser.map(v => !v), f)
override def toString = "Map(" + parser.value + ", " + f + ")"
}
/**
* A parser that applies a parser of a function to a parser of a value.
*/
protected final case class Apply[A, B](f: Need[Parser[A <=> B]], value: Need[Parser[A]]) extends Parser[B] {
def negate : Parser[B] = {
// Apply needs to parse f and then value to succeed. So we can produce
// elements not matched by this parser by negating f, value, or both.
Apply(f.map(p => !p), value.map(p => !p)) <|>
Apply(f, value.map(p => !p)) <|>
Apply(f.map(p => !p), value)
}
override def toString = "Apply(" + f + ", " + value.value + ")"
}
/**
* A parser that describes failure cases for another parser.
*/
protected final case class Described[A](parser: Need[Parser[A]], expected: Doc, unexpected: Doc) extends Parser[A] {
def negate : Parser[A] = Described(parser.map(p => !p), unexpected, expected)
override def toString = "Described(" + parser.value + ")"
}
}
|
redeyes/redeyes
|
src/main/scala/redeyes/parser/parser.scala
|
Scala
|
mit
| 16,643
|
package org.apache.spark.mllib.treelib.core
import org.apache.spark._
import org.apache.spark.SparkContext._
import org.apache.spark.rdd._
import java.io._
import java.io.DataOutputStream
import java.io.FileOutputStream
abstract class TreeModel extends Serializable {
/**
* The index of target feature
*/
var yIndex : Int = -1;
/**
* The indexes of the features which were used to predict target feature
*/
var xIndexes : Set[Int] = Set[Int]()
/**
* all features information
*/
var fullFeatureSet : FeatureSet = new FeatureSet(List[Feature]())
/**
* The features which were used to build the tree, they were re-indexed from featureSet
*/
//var usefulFeatureSet : FeatureSet = new FeatureSet(List[Feature]())
var usefulFeatures : Set[Int] = null
/**
* the root node of the tree
*/
var tree : Node = null
var minsplit : Int = 10
var threshold : Double = 0.01
var maximumComplexity : Double = 0.01
var maxDepth : Int = 63
var yFeature: String = ""
var xFeatures : Set[Any] = Set[Any]()
/***
* Is the tree empty ?
*/
def isEmpty() = (tree == null)
/**
* Is the tree is build completely
*/
var isComplete = false
/**
* The tree builder which created this model
*/
var treeBuilder : TreeBuilder = null
/******************************************************/
/* REGION FUNCTIONS */
/******************************************************/
/**
* Predict Y base on input features
*
* @param record an array, which its each element is a value of each input feature
* @param ignoreBranchSet a set of branch ID, which won't be used to predict (only use it's root node)
* @return a predicted value
* @throw Exception if the tree is empty
*/
def predict(record: Array[String], ignoreBranchSet: Set[BigInt] = Set[BigInt]()): String
/**
* Evaluate the accuracy of regression tree
* @param input an input record (uses the same delimiter with trained data set)
* @param delimiter the delimiter of training data
*/
def evaluate(input: RDD[String], delimiter : Char = ',')
/**
* Write the current tree model to file
*
* @param path where we want to write to
*/
def writeToFile(path: String) = {
val oos = new ObjectOutputStream(new FileOutputStream(path))
oos.writeObject(this)
oos.close
}
}
|
bigfootproject/spark-dectree
|
spark/mllib/src/main/scala/org/apache/spark/mllib/treelib/core/TreeModel.scala
|
Scala
|
apache-2.0
| 2,416
|
/** ********************************************************************************************
* Scaliapp
* Version 0.1
*
* The primary distribution site is
*
* http://scaliapp.alanrodas.com
*
* Copyright 2014 Alan Rodas Bonjour
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
* *********************************************************************************************/
package com.alanrodas.scaliapp.core.runtime
/**
* Represents any value passed as an argument to a command.
*
* This abstract class represent any form of value passed, weather
* named, or unnamed. It has two known subclasses,
* [[com.alanrodas.scaliapp.core.runtime.UnnamedValue UnnamedValue]] and
* [[com.alanrodas.scaliapp.core.runtime.NamedValue NamedValue]].
*
* @param value The value passed to the command
*/
abstract class AbstractValue(val value : String)
/**
* Represents an unnamed value passed to a command.
*
* This is a concrete implementation of
* [[com.alanrodas.scaliapp.core.runtime.AbstractValue AbstractValue]]
* for unnamed values, that means, the values that are passed to a command
* that accepts multiple arguments.
*
* @param value The value passed to the command
*/
case class UnnamedValue(override val value : String) extends AbstractValue(value)
/**
* Represents a named value passed to a command.
*
* This is a concrete implementation of
* [[com.alanrodas.scaliapp.core.runtime.AbstractValue AbstractValue]]
* for named values, that means, the values that are passed to a command
* that accepts a concrete amount of arguments.
*
* This class also provides accessors for the name of the value and
* a boolean stating if the value was passed as an argument or if it
* is the default value.
*
* @param name The name of the value passed to the command
* @param value The value passed to the command
* @param defined ''true'' if it was defined by the user, ''false'' otherwise
*/
case class NamedValue(name : String, override val value : String, defined : Boolean)
extends AbstractValue(value) {
/**
* Returns the value as an instance of the type ''T''
*
* @param converter The function to convert the String value to a ''T''
* @tparam T Any type argument that can be used
*/
def value[T](implicit converter : String => T) : T = value
}
|
alanrodas/scaliapp
|
src/main/scala/com/alanrodas/scaliapp/core/runtime/values.scala
|
Scala
|
apache-2.0
| 2,813
|
/*
* Copyright 2008 Juha Komulainen
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package solitarius.rules
import org.specs._
import solitarius.general._
import Utils.shuffled
import Rank._
object SpiderSpecification extends Specification {
"Pile" should {
"show only the topmost card" in {
val pile = new SpiderPile
val (card :: cards) = randomCards(6)
cards.reverse.foreach(pile.push)
pile.showOnlyTop()
pile.isEmpty must beFalse
pile.size mustEqual 5
pile.top mustEqual Some(cards.head)
pile.visibleCards mustEqual List(cards.head)
pile.visibleCount mustEqual 1
}
"be allowed to be empty" in {
val pile = new SpiderPile
pile.isEmpty must beTrue
pile.size mustEqual 0
pile.top mustEqual None
pile.visibleCards mustEqual Nil
pile.visibleCount mustEqual 0
}
"allow dropping cards on top" in {
val pile = new SpiderPile
val (card :: cards) = hearts(Five, Six, Eight, Jack, Ace, King)
cards.reverse.foreach(pile.push)
pile.showOnlyTop()
val seq = new DummySequence(card)
pile.canDrop(seq) must beTrue
pile.drop(seq)
pile.isEmpty must beFalse
pile.size mustEqual 6
pile.top mustEqual Some(card)
pile.visibleCards mustEqual List(card, cards.head)
pile.visibleCount mustEqual 2
}
"flip the new top card to be visible when top card is popped" in {
val pile = new SpiderPile
val cards = randomCards(6)
cards.reverse.foreach(pile.push)
pile.showOnlyTop()
val seq = pile.sequence(1)
seq must beSome[Sequence]
seq.get.toList mustEqual List(cards(0))
seq.get.removeFromOriginalPile()
pile.size mustEqual 5
pile.top mustEqual Some(cards(1))
pile.visibleCards mustEqual List(cards(1))
pile.visibleCount mustEqual 1
}
"support popping the last card" in {
val pile = new SpiderPile
val cards = randomCards(1)
pile.push(cards.head)
pile.visibleCards mustEqual cards
pile.visibleCount mustEqual 1
val seq = pile.sequence(1)
seq must beSome[Sequence]
seq.get.toList mustEqual List(cards.head)
seq.get.removeFromOriginalPile
pile.visibleCards must beEmpty
pile.visibleCount mustEqual 0
}
"support returning sequences of cards" in {
val pile = new SpiderPile
val cards = hearts(Five, Six, Seven, Nine, Jack, Ace, King)
cards.drop(3).reverse.foreach(pile.push)
pile.showOnlyTop()
cards.take(3).reverse.foreach(pile.push)
pile.longestDraggableSequence mustEqual 3
}
}
def hearts(ranks: Rank*) = ranks.map(new Card(_, Suit.Heart)).toList
def randomCards(count: Int) = Deck.shuffledCards.take(count)
class DummySequence(cards: Card*) extends Sequence(cards.toList) {
override def removeFromOriginalPile() { }
}
}
|
komu/solitarius
|
src/test/scala/solitarius/rules/SpiderSpecification.scala
|
Scala
|
apache-2.0
| 3,629
|
/**
* This file is part of SensApp [ http://sensapp.modelbased.net ]
*
* Copyright (C) 2011- SINTEF ICT
* Contact: SINTEF ICT <nicolas.ferry@sintef.no>
*
* Module: net.modelbased.sensapp
*
* SensApp is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation, either version 3 of
* the License, or (at your option) any later version.
*
* SensApp is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General
* Public License along with SensApp. If not, see
* <http://www.gnu.org/licenses/>.
*/
package net.modelbased.sensapp.restful
import akka.actor.Actor
import akka.http._
import javax.ws.rs.core.MediaType
/**
* A resource handler
* @param pattern the URI schema that triggers this handler
* @param request the actual requested URI
*/
abstract class ResourceHandler(val pattern: URIPattern, val request: String)
extends Actor {
/**
* Contains the pass-by-uri parameters, declared in pattern
*/
protected val _params: Map[String, String] = pattern.extract(request)
/**
* The receive method is an indirection to Sensapp dispatcher
*/
def receive = {
case r: RequestMethod => dispatch(r)
case _ => throw new IllegalArgumentException
}
/**
* A handler is a function of RequestMethod to Boolean. It implements
* the logic associated to a given method applied to a given resource
*/
protected type Handler = RequestMethod => Boolean
/**
* Bindings have to be provided by the developer to bind a HTTP verb
* to a given handler
*/
protected val _bindings: Map[String, Handler]
/**
* The dispatch method retrieve the handler associated to a request,
* and apply it to the request.
*
* @param method the requestMethod received as invocation
*/
private def dispatch(method: RequestMethod): Boolean = {
val verb = method.request.getMethod()
_bindings.get(verb) match {
case Some(handler) => handleRequest(handler, method)
case None => handleNoHandler(verb, method)
}
}
/**
* Implements the behavior when no available handlers are found
*
* @param verb the unhandled HTTP verb
* @param method the request actually received
*/
private def handleNoHandler(verb: String, method: RequestMethod): Boolean = {
method.response.setContentType(MediaType.TEXT_PLAIN)
method NotImplemented ("[" + verb + "] is not supported yet!")
}
/**
* Apply a given handler to a given request.
*
* Exceptions are forwarded to the used as ServerError response
*
* @param lambda the handler to be applied
* @param method the receive request
*/
private def handleRequest(lambda: Handler, method: RequestMethod): Boolean = {
try { lambda(method) }
catch {
case e: Exception => {
method.response.setContentType(MediaType.TEXT_PLAIN)
val msg = e.getMessage()
method Error msg
}
}
}
}
|
SINTEF-9012/sensapp
|
_attic/net.modelbased.sensapp.restful/src/main/scala/net/modelbased/sensapp/restful/ResourceHandler.scala
|
Scala
|
lgpl-3.0
| 3,255
|
package org.mrgeo.utils
object Memory {
def used(): String = {
val bytes = Runtime.getRuntime.totalMemory() - Runtime.getRuntime.freeMemory()
format(bytes)
}
def free():String = {
format(Runtime.getRuntime.freeMemory)
}
def allocated():String = {
format(Runtime.getRuntime.totalMemory())
}
def total():String = {
format(Runtime.getRuntime.maxMemory())
}
def format(bytes: Long): String = {
val unit = 1024
if (bytes < unit) {
return bytes + "B"
}
val exp = (Math.log(bytes) / Math.log(unit)).toInt
val pre = "KMGTPE".charAt(exp - 1)
"%.1f%sB".format(bytes / Math.pow(unit, exp), pre)
}
}
|
tjkrell/MrGEO
|
mrgeo-core/src/main/scala/org/mrgeo/utils/Memory.scala
|
Scala
|
apache-2.0
| 667
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.optimizer
import org.apache.spark.sql.catalyst.analysis._
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.dsl.plans._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.Literal.{FalseLiteral, TrueLiteral}
import org.apache.spark.sql.catalyst.plans.PlanTest
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.rules._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types.{IntegerType, StructField, StructType}
class BinaryComparisonSimplificationSuite extends PlanTest with PredicateHelper {
object Optimize extends RuleExecutor[LogicalPlan] {
val batches =
Batch("AnalysisNodes", Once,
EliminateSubqueryAliases) ::
Batch("Infer Filters", Once,
InferFiltersFromConstraints) ::
Batch("Constant Folding", FixedPoint(50),
NullPropagation,
ConstantFolding,
BooleanSimplification,
SimplifyBinaryComparison,
PruneFilters) :: Nil
}
val nullableRelation = LocalRelation('a.int.withNullability(true))
val nonNullableRelation = LocalRelation('a.int.withNullability(false))
test("Preserve nullable exprs when constraintPropagation is false") {
withSQLConf(SQLConf.CONSTRAINT_PROPAGATION_ENABLED.key -> "false") {
val a = Symbol("a")
for (e <- Seq(a === a, a <= a, a >= a, a < a, a > a)) {
val plan = nullableRelation.where(e).analyze
val actual = Optimize.execute(plan)
val correctAnswer = plan
comparePlans(actual, correctAnswer)
}
}
}
test("Preserve non-deterministic exprs") {
val plan = nonNullableRelation
.where(Rand(0) === Rand(0) && Rand(1) <=> Rand(1)).analyze
val actual = Optimize.execute(plan)
val correctAnswer = plan
comparePlans(actual, correctAnswer)
}
test("Nullable Simplification Primitive: <=>") {
val plan = nullableRelation.select('a <=> 'a).analyze
val actual = Optimize.execute(plan)
val correctAnswer = nullableRelation.select(Alias(TrueLiteral, "(a <=> a)")()).analyze
comparePlans(actual, correctAnswer)
}
test("Non-Nullable Simplification Primitive") {
val plan = nonNullableRelation
.select('a === 'a, 'a <=> 'a, 'a <= 'a, 'a >= 'a, 'a < 'a, 'a > 'a).analyze
val actual = Optimize.execute(plan)
val correctAnswer = nonNullableRelation
.select(
Alias(TrueLiteral, "(a = a)")(),
Alias(TrueLiteral, "(a <=> a)")(),
Alias(TrueLiteral, "(a <= a)")(),
Alias(TrueLiteral, "(a >= a)")(),
Alias(FalseLiteral, "(a < a)")(),
Alias(FalseLiteral, "(a > a)")())
.analyze
comparePlans(actual, correctAnswer)
}
test("Expression Normalization") {
val plan = nonNullableRelation.where(
'a * Literal(100) + Pi() === Pi() + Literal(100) * 'a &&
DateAdd(CurrentDate(), 'a + Literal(2)) <= DateAdd(CurrentDate(), Literal(2) + 'a))
.analyze
val actual = Optimize.execute(plan)
val correctAnswer = nonNullableRelation.analyze
comparePlans(actual, correctAnswer)
}
test("SPARK-26402: accessing nested fields with different cases in case insensitive mode") {
val expId = NamedExpression.newExprId
val qualifier = Seq.empty[String]
val structType = StructType(
StructField("a", StructType(StructField("b", IntegerType, false) :: Nil), false) :: Nil)
val fieldA1 = GetStructField(
GetStructField(
AttributeReference("data1", structType, false)(expId, qualifier),
0, Some("a1")),
0, Some("b1"))
val fieldA2 = GetStructField(
GetStructField(
AttributeReference("data2", structType, false)(expId, qualifier),
0, Some("a2")),
0, Some("b2"))
// GetStructField with different names are semantically equal; thus, `EqualTo(fieldA1, fieldA2)`
// will be optimized to `TrueLiteral` by `SimplifyBinaryComparison`.
val originalQuery = nonNullableRelation.where(EqualTo(fieldA1, fieldA2))
val optimized = Optimize.execute(originalQuery)
val correctAnswer = nonNullableRelation.analyze
comparePlans(optimized, correctAnswer)
}
test("Simplify null and nonnull with filter constraints") {
val a = Symbol("a")
Seq(a === a, a <= a, a >= a, a < a, a > a).foreach { condition =>
val plan = nonNullableRelation.where(condition).analyze
val actual = Optimize.execute(plan)
val correctAnswer = nonNullableRelation.analyze
comparePlans(actual, correctAnswer)
}
// infer filter constraints will add IsNotNull
Seq(a === a, a <= a, a >= a).foreach { condition =>
val plan = nullableRelation.where(condition).analyze
val actual = Optimize.execute(plan)
val correctAnswer = nullableRelation.where('a.isNotNull).analyze
comparePlans(actual, correctAnswer)
}
Seq(a < a, a > a).foreach { condition =>
val plan = nullableRelation.where(condition).analyze
val actual = Optimize.execute(plan)
val correctAnswer = nullableRelation.analyze
comparePlans(actual, correctAnswer)
}
}
test("Simplify nullable without constraints propagation") {
withSQLConf(SQLConf.CONSTRAINT_PROPAGATION_ENABLED.key -> "false") {
val a = Symbol("a")
Seq(And(a === a, a.isNotNull),
And(a <= a, a.isNotNull),
And(a >= a, a.isNotNull)).foreach { condition =>
val plan = nullableRelation.where(condition).analyze
val actual = Optimize.execute(plan)
val correctAnswer = nullableRelation.where('a.isNotNull).analyze
comparePlans(actual, correctAnswer)
}
Seq(And(a < a, a.isNotNull), And(a > a, a.isNotNull))
.foreach { condition =>
val plan = nullableRelation.where(condition).analyze
val actual = Optimize.execute(plan)
val correctAnswer = nullableRelation.analyze
comparePlans(actual, correctAnswer)
}
}
}
}
|
maropu/spark
|
sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/BinaryComparisonSimplificationSuite.scala
|
Scala
|
apache-2.0
| 6,819
|
package im.tox.antox.fragments
import java.io.{IOException, File}
import java.text.SimpleDateFormat
import java.util.Date
import android.app.{Dialog, Activity}
import android.content.{Context, Intent}
import android.graphics.{BitmapFactory, Bitmap}
import android.media.ThumbnailUtils
import android.net.Uri
import android.os.{Bundle, Environment}
import android.preference.PreferenceManager
import android.provider.MediaStore
import android.support.v4.app.DialogFragment
import android.support.v4.content.{CursorLoader, FileProvider}
import android.support.v7.app.AlertDialog
import android.util.Log
import android.view.View
import android.view.View.OnClickListener
import android.view.ViewGroup.LayoutParams
import android.widget.{Toast, Button, ImageView}
import im.tox.antox.data.{State, AntoxDB}
import im.tox.antox.tox.ToxSingleton
import im.tox.antox.transfer.FileUtils
import im.tox.antox.utils.Constants
import im.tox.antox.wrapper.BitmapUtils.RichBitmap
import im.tox.antox.wrapper.FileKind
import im.tox.antox.wrapper.FileKind.AVATAR
import im.tox.antoxnightly.R
//not a DialogFragment (i. because they don't work with PreferenceActivity
class AvatarDialog(activity: Activity) {
val preferences = PreferenceManager.getDefaultSharedPreferences(activity)
def onActivityResult(requestCode: Int, resultCode: Int, data: Intent) {
if (resultCode == Activity.RESULT_OK) {
val name = preferences.getString("tox_id", "")
val avatarFile = new File(AVATAR.getStorageDir(activity), name)
if (requestCode == Constants.IMAGE_RESULT) {
val uri = data.getData
val filePathColumn = Array(MediaStore.MediaColumns.DATA, MediaStore.MediaColumns.DISPLAY_NAME)
val loader = new CursorLoader(activity, uri, filePathColumn, null, null, null)
val cursor = loader.loadInBackground()
if (cursor != null) {
if (cursor.moveToFirst()) {
val imageFile = new File(cursor.getString(cursor.getColumnIndexOrThrow(filePathColumn(0))))
if (!imageFile.exists()) return
FileUtils.copy(imageFile, avatarFile)
}
}
}
resizeAvatar(avatarFile) match {
case Some(bitmap) =>
FileUtils.writeBitmap(bitmap, Bitmap.CompressFormat.PNG, 0, avatarFile)
preferences.edit().putString("avatar", name).commit()
case None =>
avatarFile.delete()
Toast.makeText(activity, activity.getResources.getString(R.string.avatar_too_large_error), Toast.LENGTH_SHORT)
}
val db = new AntoxDB(activity)
db.setAllFriendReceivedAvatar(false)
db.close()
State.transfers.updateSelfAvatar(activity)
}
}
def resizeAvatar(avatar: File): Option[Bitmap] = {
val rawBitmap = BitmapFactory.decodeFile(avatar.getPath)
val cropDimension =
if (rawBitmap.getWidth >= rawBitmap.getHeight) {
rawBitmap.getHeight
} else {
rawBitmap.getWidth
}
var bitmap = ThumbnailUtils.extractThumbnail(rawBitmap, cropDimension, cropDimension)
val MAX_DIMENSIONS = 256
val MIN_DIMENSIONS = 16
var currSize = MAX_DIMENSIONS
while (currSize >= MIN_DIMENSIONS && bitmap.getSizeInBytes > Constants.MAX_AVATAR_SIZE) {
bitmap = Bitmap.createScaledBitmap(bitmap, currSize, currSize, false)
currSize /= 2
}
if (bitmap.getSizeInBytes > Constants.MAX_AVATAR_SIZE) {
None
} else {
Some(bitmap)
}
}
def refreshAvatar(avatarView: ImageView): Unit = {
val avatar = AVATAR.getAvatarFile(preferences.getString("avatar", ""), activity)
if (avatar.isDefined && avatar.get.exists()) {
avatarView.setImageURI(Uri.fromFile(avatar.get))
} else {
avatarView.setImageResource(R.drawable.ic_action_contact)
}
}
var mDialog: Option[Dialog] = None
def show(): Unit = {
val inflator = activity.getLayoutInflater
val view = inflator.inflate(R.layout.dialog_avatar, null)
mDialog = Some(new AlertDialog.Builder(activity, R.style.AppCompatAlertDialogStyle)
.setView(view).create())
val photoButton = view.findViewById(R.id.avatar_takephoto).asInstanceOf[Button]
val fileButton = view.findViewById(R.id.avatar_pickfile).asInstanceOf[Button]
photoButton.setOnClickListener(new View.OnClickListener {
override def onClick(v: View): Unit = {
val cameraIntent = new Intent(android.provider.MediaStore.ACTION_IMAGE_CAPTURE)
if (cameraIntent.resolveActivity(activity.getPackageManager) != null) {
val fileName = preferences.getString("tox_id", "")
try {
val file = new File(AVATAR.getStorageDir(activity), fileName)
cameraIntent.putExtra(MediaStore.EXTRA_OUTPUT, Uri.fromFile(file))
activity.startActivityForResult(cameraIntent, Constants.PHOTO_RESULT)
} catch {
case e: IOException => e.printStackTrace()
}
} else {
Toast.makeText(activity, activity.getResources.getString(R.string.no_camera_intent_error), Toast.LENGTH_SHORT)
}
}
})
fileButton.setOnClickListener(new OnClickListener {
override def onClick(v: View): Unit = {
val intent = new Intent(Intent.ACTION_PICK, android.provider.MediaStore.Images.Media.EXTERNAL_CONTENT_URI)
activity.startActivityForResult(intent, Constants.IMAGE_RESULT)
}
})
refreshAvatar(view.findViewById(R.id.avatar_image).asInstanceOf[ImageView])
if (mDialog.get.isShowing) close()
mDialog.get.show()
}
def close(): Unit = {
mDialog.foreach(_.cancel())
}
}
|
Astonex/Antox
|
app/src/main/scala/im/tox/antox/fragments/AvatarDialog.scala
|
Scala
|
gpl-3.0
| 5,589
|
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author John Miller
* @version 1.2
* @date Fri Jul 1 14:01:05 EDT 2016
* @see LICENSE (MIT style license file).
*/
package scalation.util
import scala.collection.mutable.{AbstractMap, Map, MapLike}
import scala.reflect.ClassTag
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `BpTreeMap` class provides B+Tree maps. B+Trees are used as multi-level
* index structures that provide efficient access for both point queries and range
* queries.
* @see docs.scala-lang.org/overviews/collections/concrete-mutable-collection-classes.html
* @see www.scala-lang.org/api/current/#scala.collection.mutable.MapLike
*------------------------------------------------------------------------------
* @param half the number of keys per node range from 'half' to '2 * half'
*/
class BpTreeMap [K <% Ordered [K]: ClassTag, V: ClassTag] (half: Int = 2)
extends AbstractMap [K, V] with Map [K, V] with MapLike [K, V, BpTreeMap [K, V]] with Serializable
{
/** The debug flag for tracking node splits
*/
private val DEBUG = true
/** The maximum number of keys per node
*/
private val mx = 2 * half
/** The root of 'this' B+Tree
*/
private var root = new Node [K] (true, half)
/** The first leaf node in 'this' B+Tree map
*/
private val firstLeaf = root
/** The counter for the number of nodes accessed (for performance testing)
*/
private var _count = 0
/** The counter for the number of keys in 'this' B+Tree map
*/
private var keyCount = 0
// P U B L I C M E T H O D S -------------------------------------------
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Put the key-value pair 'kv' in 'this' B+Tree map, returning a reference to
* the updated tree.
* @param kv the key-value pair to insert
*/
def += (kv: (K, V)): BpTreeMap.this.type = { keyCount += 1; insert (kv._1, kv._2, root); this }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Remove the key-value pair with key 'key' from 'this' B+Tree map, returning
* a reference to the updated tree.
* @param key the key to remove
*/
def -= (key: K): BpTreeMap.this.type = throw new NoSuchMethodException ("-= method not yet supported")
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compare two keys.
* @param key1 the first key
* @param key2 the second key
*/
def compare (key1: K, key2: K): Int = key1 compare key2
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the total number of nodes accessed.
*/
def count: Int = _count
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return an empty B+Tree map.
*/
override def empty: BpTreeMap [K, V] = new BpTreeMap [K, V] (half)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Apply function 'f' to each key-value pair in 'this' B+Tree map.
* @param f the function to be applied
*/
override def foreach [U] (f: ((K, V)) => U) { for (kv <- iterator) f(kv) }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Optionally return the value in 'this' B+Tree map associated with the
* key 'key'. Use 'apply' method to remove optional.
* @param key the key used for look up
*/
def get (key: K): Option [V] = Option (find (key, root))
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Create an iterator over the key-value pairs in 'this' B+Tree map.
* The iteration proceeds over the leaf level of the tree.
*/
def iterator: Iterator [(K, V)] =
{
var nn = firstLeaf
var ii = 0
new Iterator [(K, V)] {
def hasNext: Boolean = nn != null
def next: (K, V) =
{
val kv = (nn.key(ii), nn.ref(ii).asInstanceOf [V])
if (ii < nn.nKeys - 1) ii += 1 else { ii = 0; nn = nn.ref(mx).asInstanceOf [Node [K]] }
kv
} // next
} // Iterator
} // iterator
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the size of 'this' B+Tree map.
*/
override def size: Int = keyCount
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Print the keys in 'this' B+Tree map.
*/
def printTree () { println ("BpTreeMap"); printT (root, 0); println ("-" * 50) }
// P R I V A T E M E T H O D S -----------------------------------------
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Print 'this' B+Tree map using a preorder traversal and indenting each level.
* @param n the current node to print
* @param level the current level in the B+Tree
*/
private def printT (n: Node [K], level: Int)
{
println ("-" * 50)
print ("\\t" * level + "[ . ")
for (i <- 0 until n.nKeys) print (n.key(i) + " . ")
println ("]")
if (! n.isLeaf) for (j <- 0 to n.nKeys) printT (n.asInstanceOf [Node [K]].ref(j).asInstanceOf [Node [K]], level + 1)
} // printT
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Recursive helper method for finding key 'key' in 'this' B+tree map.
* @param key the key to find
* @param n the current node
*/
private def find (key: K, n: Node [K]): V =
{
_count += 1
val ip = n.find (key)
if (n.isLeaf) {
if (ip < n.nKeys && key == n.key(ip)) n.ref(ip).asInstanceOf [V]
else null.asInstanceOf [V]
} else {
find (key, n.ref(ip).asInstanceOf [Node [K]])
} // if
} // find
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Recursive helper method for inserting 'key' and 'ref' into 'this' B+tree map.
* @param key the key to insert
* @param ref the value/node to insert
* @param n the current node
*/
private def insert (key: K, ref: V, n: Node [K]): (K, Node [K]) =
{
var kd_rt: (K, Node [K]) = null
if (n.isLeaf) { // handle leaf node
kd_rt = add (n, key, ref)
if (kd_rt != null) {
if (n != root) return kd_rt
root = new Node (root, kd_rt._1, kd_rt._2, half)
} // if
} else { // handle internal node
kd_rt = insert (key, ref, n.ref(n.find (key)).asInstanceOf [Node [K]])
if (kd_rt != null) {
kd_rt = addI (n, kd_rt._1, kd_rt._2)
if (kd_rt != null && n == root) root = new Node (root, kd_rt._1, kd_rt._2, half)
} // if
} // if
kd_rt
} // insert
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Add a new key 'k' and value 'v' into leaf node 'n'. If it is already full,
* a 'split' will be triggered, in which case the divider key and new
* right sibling node are returned.
* @param n the current node
* @param k the new key
* @param v the new left value
*/
private def add (n: Node [K], k: K, v: Any): (K, Node [K]) =
{
var kd_rt: (K, Node [K]) = null // divider key, right sibling
var split = false
if (n.isFull) {
split = true
if (DEBUG) println ("before leaf split: n = " + n)
kd_rt = n.split () // split n -> r & rt
if (DEBUG) println ("after leaf split: n = " + n + "\\nkd_rt = " + kd_rt)
if (k > n.key(n.nKeys - 1)) {
kd_rt._2.wedge (k, v, kd_rt._2.find (k), true) // wedge into right sibling
return kd_rt
} // if
} // if
n.wedge (k, v, n.find (k), true) // wedge into current node
if (split) kd_rt else null
} // add
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Add a new key 'k' and value 'v' into internal node 'n'. If it is already
* full, a 'split' will be triggered, in which case the divider key and new
* right sibling node are returned.
* @param n the current node
* @param k the new key
* @param v the new left value
*/
private def addI (n: Node [K], k: K, v: Any): (K, Node [K]) =
{
var kd_rt: (K, Node [K]) = null // divider key, right sibling
var split = false
if (n.isFull) {
split = true
if (DEBUG) println ("before internal split: n = " + n)
kd_rt = n.split () // split n -> n & rt
n.nKeys -= 1 // remove promoted largest left key
if (DEBUG) println ("after internal split: n = " + n + "\\nkd_rt = " + kd_rt)
if (k > n.key(n.nKeys - 1)) {
kd_rt._2.wedge (k, v, kd_rt._2.find (k), false) // wedge into right sibling
return kd_rt
} // if
} // if
n.wedge (k, v, n.find (k), false) // wedge into current node
if (split) kd_rt else null
} // addI
} // BpTreeMap class
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `BpTreeMapTest` object is used to test the `BpTreeMap` class by inserting
* increasing key values.
* > run-main scalation.util.BpTreeMapTest
*/
object BpTreeMapTest extends App
{
val tree = new BpTreeMap [Int, Int] ()
val totKeys = 26
for (i <- 1 until totKeys by 2) {
tree.put (i, i * i)
tree.printTree ()
println ("=" * 50)
} // for
for (i <- 0 until totKeys) println ("key = " + i + " value = " + tree.get(i))
println ("-" * 50)
for (it <- tree.iterator) println (it)
println ("-" * 50)
tree.foreach (println (_))
println ("-" * 50)
println ("size = " + tree.size)
println ("Average number of nodes accessed = " + tree.count / totKeys.toDouble)
} // BpTreeMapTest object
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `BpTreeMapTest2` object is used to test the `BpTreeMap` class by inserting
* random key values.
* > run-main scalation.util.BpTreeMapTest2
*/
object BpTreeMapTest2 extends App
{
val tree = new BpTreeMap [Int, Int] ()
val totKeys = 50
val mx = 10 * totKeys
// for unique random integers
// import scalation.random.RandiU0 // comment out due to package dependency
// val stream = 2
// val rng = RandiU0 (mx, stream)
// for (i <- 1 until totKeys) tree.put (rng.iigen (mx), i * i)
// for random integers
import java.util.Random
val seed = 1
val rng = new Random (seed)
for (i <- 1 until totKeys) tree.put (rng.nextInt (mx), i * i)
tree.printTree ()
println ("-" * 50)
println ("size = " + tree.size)
println ("Average number of nodes accessed = " + tree.count / totKeys.toDouble)
} // BpTreeMapTest2 object
|
scalation/fda
|
scalation_1.2/src/main/scala/scalation/util/BpTreeMap.scala
|
Scala
|
mit
| 11,622
|
val a: (Int) => Int = _.floatValue
//False
|
ilinum/intellij-scala
|
testdata/typeConformance/basic/FunctionFalseConformance.scala
|
Scala
|
apache-2.0
| 42
|
/**
* This file is part of mycollab-web.
*
* mycollab-web is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* mycollab-web is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with mycollab-web. If not, see <http://www.gnu.org/licenses/>.
*/
package com.esofthead.mycollab.module.project.view.settings
import com.esofthead.mycollab.common.UrlTokenizer
import com.esofthead.mycollab.core.arguments.NumberSearchField
import com.esofthead.mycollab.eventmanager.EventBusFactory
import com.esofthead.mycollab.module.project.domain.criteria.ProjectRoleSearchCriteria
import com.esofthead.mycollab.module.project.events.ProjectEvent
import com.esofthead.mycollab.module.project.view.ProjectUrlResolver
import com.esofthead.mycollab.module.project.view.parameters.{ProjectRoleScreenData, ProjectScreenData}
import com.esofthead.mycollab.vaadin.mvp.PageActionChain
/**
* @author MyCollab Ltd
* @since 5.0.9
*/
class RoleUrlResolver extends ProjectUrlResolver {
this.addSubResolver("list", new ListUrlResolver())
this.addSubResolver("preview", new PreviewUrlResolver())
private class ListUrlResolver extends ProjectUrlResolver {
protected override def handlePage(params: String*) {
val projectId: Int = new UrlTokenizer(params(0)).getInt
val roleSearchCriteria: ProjectRoleSearchCriteria = new ProjectRoleSearchCriteria
roleSearchCriteria.setProjectId(new NumberSearchField(projectId))
val chain: PageActionChain = new PageActionChain(new ProjectScreenData.Goto(projectId),
new ProjectRoleScreenData.Search(roleSearchCriteria))
EventBusFactory.getInstance.post(new ProjectEvent.GotoMyProject(this, chain))
}
}
private class PreviewUrlResolver extends ProjectUrlResolver {
protected override def handlePage(params: String*) {
val token: UrlTokenizer = new UrlTokenizer(params(0))
val projectId: Int = token.getInt
val roleId: Int = token.getInt
val chain: PageActionChain = new PageActionChain(new ProjectScreenData.Goto(projectId),
new ProjectRoleScreenData.Read(roleId))
EventBusFactory.getInstance.post(new ProjectEvent.GotoMyProject(this, chain))
}
}
}
|
uniteddiversity/mycollab
|
mycollab-web/src/main/scala/com/esofthead/mycollab/module/project/view/settings/RoleUrlResolver.scala
|
Scala
|
agpl-3.0
| 2,715
|
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.zipkin.builder
import com.twitter.finagle.stats.{StatsReceiver, OstrichStatsReceiver}
import com.twitter.finagle.tracing.{Tracer, NullTracer}
import com.twitter.finagle.exception
import com.twitter.logging.config._
import com.twitter.logging.{ConsoleHandler, Logger, LoggerFactory}
import com.twitter.ostrich.admin._
import com.twitter.util.{Timer, JavaTimer}
import java.net.{InetSocketAddress, InetAddress}
import scala.util.matching.Regex
/**
* Base builder for a Zipkin service
*/
case class ZipkinServerBuilder(
serverPort : Int,
adminPort : Int,
serverAddress : InetAddress = InetAddress.getLocalHost,
loggers : List[LoggerFactory] = List(LoggerFactory(level = Some(Level.DEBUG), handlers = List(ConsoleHandler()))),
adminStatsNodes : List[StatsFactory] = List(StatsFactory(reporters = List(TimeSeriesCollectorFactory()))),
adminStatsFilters : List[Regex] = List.empty,
statsReceiver : StatsReceiver = new OstrichStatsReceiver,
tracerFactory : Tracer.Factory = NullTracer.factory,
timer : Timer = new JavaTimer(true),
exceptionMonitorFactory : exception.MonitorFactory = exception.NullMonitorFactory
) extends Builder[(RuntimeEnvironment) => Unit] {
def serverPort(p: Int) : ZipkinServerBuilder = copy(serverPort = p)
def adminPort(p: Int) : ZipkinServerBuilder = copy(adminPort = p)
def serverAddress(a: InetAddress) : ZipkinServerBuilder = copy(serverAddress = a)
def loggers(l: List[LoggerFactory]) : ZipkinServerBuilder = copy(loggers = l)
def statsReceiver(s: StatsReceiver) : ZipkinServerBuilder = copy(statsReceiver = s)
def tracerFactory(t: Tracer.Factory) : ZipkinServerBuilder = copy(tracerFactory = t)
def exceptionMonitorFactory(h: exception.MonitorFactory) : ZipkinServerBuilder
= copy(exceptionMonitorFactory = h)
def timer(t: Timer) : ZipkinServerBuilder = copy(timer = t)
def addLogger(l: LoggerFactory) : ZipkinServerBuilder = copy(loggers = loggers :+ l)
def addAdminStatsNode(n: StatsFactory): ZipkinServerBuilder = copy(adminStatsNodes = adminStatsNodes :+ n)
def addAdminStatsFilter(f: Regex) : ZipkinServerBuilder = copy(adminStatsFilters = adminStatsFilters :+ f)
private lazy val adminServiceFactory: AdminServiceFactory =
AdminServiceFactory(
httpPort = adminPort,
statsNodes = adminStatsNodes,
statsFilters = adminStatsFilters
)
lazy val socketAddress = new InetSocketAddress(serverAddress, serverPort)
var adminHttpService: Option[AdminHttpService] = None
def apply() = (runtime: RuntimeEnvironment) => {
Logger.configure(loggers)
adminHttpService = Some(adminServiceFactory(runtime))
}
}
|
devcamcar/zipkin
|
zipkin-common/src/main/scala/com/twitter/zipkin/builder/ZipkinServerBuilder.scala
|
Scala
|
apache-2.0
| 3,608
|
/*
* Copyright 2012 Pascal Voitot (@mandubian)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.specs2.mutable._
import org.junit.runner.RunWith
import org.specs2.runner.JUnitRunner
import org.specs2.specification.{Step, Fragments}
import scala.concurrent._
import scala.concurrent.duration.Duration
// import play.api.libs.json._
// import play.api.libs.functional._
// import play.api.libs.functional.syntax._
import datomisca.executioncontext.ExecutionContextHelper._
class ShapotomicSpec extends Specification {
sequential
import shapeless._
import datomisca._
import Datomic._
import shapotomic._
// Datomic URI definition
val uri = "datomic:mem://shapotomisca"
// Datomic Connection as an implicit in scope
implicit lazy val conn = Datomic.connect(uri)
// Koala Schema
object Koala {
object ns {
val koala = Namespace("koala")
}
// schema attributes
val name = Attribute(ns.koala / "name", SchemaType.string, Cardinality.one).withDoc("Koala's name")
val age = Attribute(ns.koala / "age", SchemaType.long, Cardinality.one).withDoc("Koala's age")
val trees = Attribute(ns.koala / "trees", SchemaType.string, Cardinality.many).withDoc("Koala's trees")
// Draft playing with Datomisca and Shapeless Fields/Records
val fieldName = DField(name)
val fieldAge = DField(age)
val fieldTrees = DField(trees)
// the schema in HList form
val schema = name :: age :: trees :: HNil
// the datomic facts corresponding to schema
// (need specifying upper type for shapeless conversion to list)
val txData = schema.toList[Operation]
}
def startDB = {
println(s"Creating DB with uri $uri: "+ createDatabase(uri))
Await.result(
Datomic.transact(Koala.txData),
Duration("2 seconds")
)
}
def stopDB = {
deleteDatabase(uri)
println("Deleted DB")
defaultExecutorService.shutdownNow()
}
override def map(fs: => Fragments) = Step(startDB) ^ fs ^ Step(stopDB)
"shapotomic" should {
"convert HList to Datomic Facts & Datomic Entities from HList" in {
// creates a Temporary ID & keeps it for resolving entity after insertion
val id = DId(Partition.USER)
// creates an HList entity
val hListEntity =
id :: "kaylee" :: 3L ::
Set( "manna_gum", "tallowwood" ) ::
HNil
// builds Datomisca Entity facts statically checking at compile-time HList against Schema
val txData = hListEntity.toAddEntity(Koala.schema)
// inserts data into Datomic
val tx = Datomic.transact(txData) map { tx =>
// resolves real DEntity from temporary ID
val e = Datomic.resolveEntity(tx, id)
// rebuilds HList entity from DEntity statically typed by schema
// Explicitly typing the val to show that the compiler builds the right HList from schema
val postHListEntity: Long :: String :: Long :: Set[String] :: HNil = e.toHList(Koala.schema)
postHListEntity must beEqualTo( e.id :: "kaylee" :: 3L :: Set( "manna_gum", "tallowwood" ) :: HNil )
}
Await.result(tx, Duration("3 seconds"))
success
}
"Draft test: Field/Record" in {
import Record._
val koala =
Koala.fieldName -> "kaylee" ::
Koala.fieldAge -> 3L ::
Koala.fieldTrees -> Set( "manna_gum", "tallowwood" ) ::
HNil
println("name:"+koala.get(Koala.fieldName))
println("age:"+koala.get(Koala.fieldAge))
success
}
}
}
|
mandubian/shapotomic
|
src/test/scala/ShapotomicTestSpec.scala
|
Scala
|
apache-2.0
| 4,142
|
object Initialization1 {
case class InitA() {
val x = y
val y = 0
}
}
|
epfl-lara/stainless
|
frontends/benchmarks/extraction/invalid/Initialization1.scala
|
Scala
|
apache-2.0
| 82
|
package ru.dgolubets.jsmoduleloader.internal
import java.io.{BufferedReader, InputStreamReader}
import java.nio.CharBuffer
import scala.util.Try
/**
* Resource internal.
*/
private[jsmoduleloader] object Resource {
/**
* Reads resource string.
* @param resourceName Name of the resource
* @return
*/
def readString(resourceName: String, resourceClass: Class[_] = this.getClass): Try[String] = Try {
val resource = resourceClass.getResourceAsStream(resourceName)
if(resource == null)
throw new Exception(s"Resource was not found: $resourceName")
val reader = new BufferedReader(new InputStreamReader(resource))
try {
val stringBuilder = new StringBuilder()
val buffer = CharBuffer.allocate(1024)
while (reader.read(buffer) > 0){
buffer.flip()
stringBuilder.appendAll(buffer.array(), 0, buffer.remaining())
}
stringBuilder.result()
}
finally {
reader.close()
}
}
}
|
DGolubets/js-module-loader
|
src/main/scala/ru/dgolubets/jsmoduleloader/internal/Resource.scala
|
Scala
|
mit
| 1,005
|
package com.googlecode.kanbanik.api
import java.util
import com.googlecode.kanbanik.commands.ExecuteStatisticsCommand
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.kanbanik.statistics
import org.scalatest.FlatSpec
import net.liftweb.json._
import com.googlecode.kanbanik.dtos._
@RunWith(classOf[JUnitRunner])
class StatisticsLearningTest extends FlatSpec {
implicit val formats = DefaultFormats
"statistics" should "be possible to call it" in {
true
// val fullDescriptor = new util.HashMap[String, Object]()
// val resultDescriptor = new util.HashMap[String, Object]()
// val filterType = new util.HashMap[String, Object]()
// val resultDescriptors = new util.ArrayList[Object]()
//
// fullDescriptor.put(":reduce-function", ":merge")
// filterType.put(":eventType", "TaskDeleted")
// resultDescriptor.put(":function", ":cnt")
// resultDescriptor.put(":filter", filterType)
// resultDescriptors.add(resultDescriptor)
// fullDescriptor.put(":result-descriptors", resultDescriptors)
////
//// {:reduce-function :merge
//// :result-descriptors [{
//// :filter {:eventType "TaskDeleted"}
//// :function :pass
//// }]}
//
// val x = statistics.execute(fullDescriptor, 3600000)
// print(x)
}
it should "should parse the data properly" in {
val json = parse("""
{"reduceFunction": ":merge",
"timeframe": 100,
"resultDescriptors": [
{
"filter": {"example": {"eventType": "TaskDeleted"}}
}
]}
""")
val res = json.extract[AnalyzeDescriptorDto]
assert(res.reduceFunction === ":merge")
val cmd = new ExecuteStatisticsCommand()
val x = statistics.execute(cmd.toJDescriptor(res), 600000)
print(x)
}
}
|
kanbanik/kanbanik
|
kanbanik-server/src/test/scala/com/googlecode/kanbanik/api/StatisticsLearningTest.scala
|
Scala
|
apache-2.0
| 1,949
|
package controllers
import java.util.UUID
import com.hbc.svc.sundial.v2
import com.hbc.svc.sundial.v2.models._
import dao.SundialDao
import model._
import play.api.Logger
import util.Conversions._
object ModelConverter {
///////////////////////////////////////////////////////////////////////////////////////////////////////////
// PROCESSES
///////////////////////////////////////////////////////////////////////////////////////////////////////////
//TODO This will be RBAR if we load a lot of processes - maybe have a loadProcessesAndTasks DAO method?
def toExternalProcess(process: model.Process)(
implicit dao: SundialDao): v2.models.Process = {
val tasks = dao.processDao.loadTasksForProcess(process.id)
v2.models.Process(process.id,
process.processDefinitionName,
process.startedAt,
toExternalProcessStatus(process.status),
tasks.map(toExternalTask))
}
def toExternalProcessStatus(
status: model.ProcessStatus): v2.models.ProcessStatus = status match {
case model.ProcessStatus.Running() => v2.models.ProcessStatus.Running
case model.ProcessStatus.Succeeded(_) => v2.models.ProcessStatus.Succeeded
case model.ProcessStatus.Failed(_) => v2.models.ProcessStatus.Failed
}
//TODO This will be RBAR if we load a lot of tasks – maybe load all the logs/metadata at once?
def toExternalTask(task: model.Task)(
implicit dao: SundialDao): v2.models.Task = {
val logs = dao.taskLogsDao.loadEventsForTask(task.id)
val metadata = dao.taskMetadataDao.loadMetadataForTask(task.id)
v2.models.Task(
task.id,
task.processId,
task.processDefinitionName,
task.taskDefinitionName,
task.startedAt,
task.endedAt,
task.previousAttempts,
logs.map(toExternalLogEntry),
metadata.map(toExternalMetadataEntry),
loadExecutableMetadata(task),
toExternalTaskStatus(task.status)
)
}
def toExternalLogEntry(entry: model.TaskEventLog): v2.models.LogEntry = {
v2.models.LogEntry(entry.id, entry.when, entry.source, entry.message)
}
def toExternalMetadataEntry(
entry: model.TaskMetadataEntry): v2.models.MetadataEntry = {
v2.models.MetadataEntry(entry.id, entry.when, entry.key, entry.value)
}
def toExternalTaskStatus(status: model.TaskStatus): v2.models.TaskStatus =
status match {
case model.TaskStatus.Running() => v2.models.TaskStatus.Running
case model.TaskStatus.Success(_) => v2.models.TaskStatus.Succeeded
case model.TaskStatus.Failure(_, _) => v2.models.TaskStatus.Failed
}
def loadExecutableMetadata(task: model.Task)(
implicit dao: SundialDao): Option[Seq[v2.models.MetadataEntry]] =
task.executable match {
case _: ShellCommandExecutable =>
val stateOpt = dao.shellCommandStateDao.loadState(task.id)
stateOpt.map { state =>
// Use the task ID as the UUID for the metadata entry
Seq(
v2.models.MetadataEntry(state.taskId,
state.asOf,
"status",
state.status.toString))
}
case _: ECSExecutable =>
val stateOpt = dao.ecsContainerStateDao.loadState(task.id)
stateOpt.map { state =>
Seq(
v2.models.MetadataEntry(state.taskId,
state.asOf,
"status",
state.status.toString),
v2.models.MetadataEntry(state.taskId,
state.asOf,
"taskArn",
state.ecsTaskArn)
)
}
case _: BatchExecutable =>
val stateOpt = dao.batchContainerStateDao.loadState(task.id)
stateOpt.map { state =>
Seq(
v2.models.MetadataEntry(state.taskId,
state.asOf,
"status",
state.status.toString),
v2.models.MetadataEntry(state.taskId,
state.asOf,
"jobId",
state.jobId.toString)
)
}
case _: EmrJobExecutable =>
val stateOpt = dao.emrJobStateDao.loadState(task.id)
stateOpt.map { state =>
Seq(
v2.models.MetadataEntry(state.taskId,
state.asOf,
"status",
state.status.toString),
v2.models.MetadataEntry(state.taskId,
state.asOf,
"jobId",
state.taskId.toString)
)
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
// PROCESS DEFINITIONS
///////////////////////////////////////////////////////////////////////////////////////////////////////////
def toExternalProcessDefinition(processDefinition: model.ProcessDefinition)(
implicit dao: SundialDao): v2.models.ProcessDefinition = {
val taskDefinitions = dao.processDefinitionDao.loadTaskDefinitionTemplates(
processDefinition.name)
v2.models.ProcessDefinition(
processDefinition.name,
Some(processDefinition.isPaused),
processDefinition.description,
processDefinition.schedule.map(toExternalSchedule),
taskDefinitions.map(toExternalTaskDefinitionTemplate),
toExternalOverlapAction(processDefinition.overlapAction),
toExternalNotifications(processDefinition.notifications)
)
}
def toExternalSchedule(
schedule: model.ProcessSchedule): v2.models.ProcessSchedule =
schedule match {
case model.CronSchedule(min, hr, dom, mo, dow) =>
v2.models.CronSchedule(dow, mo, dom, hr, min)
case model.ContinuousSchedule(buffer) =>
v2.models.ContinuousSchedule(Some(buffer))
}
def toExternalOverlapAction(overlapAction: model.ProcessOverlapAction)
: v2.models.ProcessOverlapAction = overlapAction match {
case model.ProcessOverlapAction.Wait => v2.models.ProcessOverlapAction.Wait
case model.ProcessOverlapAction.Terminate =>
v2.models.ProcessOverlapAction.Terminate
}
def toExternalNotifications(notifications: Seq[model.Notification])
: Option[Seq[v2.models.Notification]] = {
def toExternalNotification
: PartialFunction[model.Notification, v2.models.Notification] = {
case email: EmailNotification =>
Email(email.name,
email.email,
NotificationOptions
.fromString(email.notifyAction)
.getOrElse(NotificationOptions.OnStateChangeAndFailures))
case pagerduty: PagerdutyNotification =>
Pagerduty(pagerduty.serviceKey,
pagerduty.numConsecutiveFailures,
pagerduty.apiUrl)
}
if (notifications.isEmpty) {
None
} else {
Some(notifications.map(toExternalNotification))
}
}
def toExternalTaskDefinition(
taskDefinition: model.TaskDefinition): v2.models.TaskDefinition = {
v2.models.TaskDefinition(
taskDefinition.name,
toExternalDependencies(taskDefinition.dependencies),
toExternalExecutable(taskDefinition.executable),
taskDefinition.limits.maxAttempts,
taskDefinition.limits.maxExecutionTimeSeconds,
taskDefinition.backoff.seconds,
taskDefinition.backoff.exponent,
taskDefinition.requireExplicitSuccess
)
}
def toExternalTaskDefinitionTemplate(
taskDefinition: model.TaskDefinitionTemplate)
: v2.models.TaskDefinition = {
v2.models.TaskDefinition(
taskDefinition.name,
toExternalDependencies(taskDefinition.dependencies),
toExternalExecutable(taskDefinition.executable),
taskDefinition.limits.maxAttempts,
taskDefinition.limits.maxExecutionTimeSeconds,
taskDefinition.backoff.seconds,
taskDefinition.backoff.exponent,
taskDefinition.requireExplicitSuccess
)
}
def toExternalDependencies(
dependencies: model.TaskDependencies): Seq[v2.models.TaskDependency] = {
val required = dependencies.required.map { taskDefinitionName =>
v2.models.TaskDependency(taskDefinitionName, true)
}
val optional = dependencies.required.map { taskDefinitionName =>
v2.models.TaskDependency(taskDefinitionName, false)
}
required ++ optional
}
def toExternalExecutable(
executable: model.Executable): v2.models.TaskExecutable =
executable match {
case model.ShellCommandExecutable(script, env) =>
val envAsEntries = {
if (env.isEmpty) {
Option.empty
} else {
Some(env.map {
case (key, value) =>
v2.models.EnvironmentVariable(key, value)
}.toSeq)
}
}
v2.models.ShellScriptCommand(script, envAsEntries)
case model.BatchExecutable(image,
tag,
command,
memory,
vCpus,
jobRoleArn,
environmentVariables,
jobQueue) =>
v2.models.BatchImageCommand(
image,
tag,
command,
memory,
vCpus,
jobRoleArn,
environmentVariables.toSeq.map(variable =>
EnvironmentVariable(variable._1, variable._2)),
jobQueue)
case model.EmrJobExecutable(emrClusterDetails,
jobName,
region,
clazz,
s3JarPath,
sparkConf,
sparkPackages,
args,
s3LogDetailsOpt,
loadData,
saveResults) => {
def toEmrInstanceGroup(instanceGroupDetails: InstanceGroupDetails) = {
val awsMarket = (instanceGroupDetails.awsMarket,
instanceGroupDetails.bidPriceOpt) match {
case ("on_demand", None) => OnDemand.OnDemand
case ("spot", Some(bidPrice)) => Spot(BigDecimal(bidPrice))
case _ => OnDemand.OnDemand
}
EmrInstanceGroupDetails(instanceGroupDetails.instanceType,
instanceGroupDetails.instanceCount,
awsMarket,
instanceGroupDetails.ebsVolumeSizeOpt)
}
val cluster = emrClusterDetails match {
case EmrClusterDetails(Some(clusterName),
None,
Some(releaseLabel),
applications,
Some(s3LogUri),
Some(masterInstanceGroup),
coreInstanceGroupOpt,
taskInstanceGroupOpt,
ec2SubnetOpt,
Some(emrServiceRole),
Some(emrJobFlowRole),
Some(visibleToAllUsers),
configuration,
false,
securityConfiguration) => {
val serviceRole =
if (emrServiceRole == DefaultEmrServiceRole.DefaultEmrServiceRole.toString) {
DefaultEmrServiceRole.DefaultEmrServiceRole
} else {
CustomEmrServiceRole(emrServiceRole)
}
val jobFlowRole =
if (emrJobFlowRole == DefaultEmrJobFlowRole.DefaultEmrJobFlowRole.toString) {
DefaultEmrJobFlowRole.DefaultEmrJobFlowRole
} else {
CustomEmrJobFlowRole(emrJobFlowRole)
}
v2.models.NewEmrCluster(
clusterName,
EmrReleaseLabel
.fromString(releaseLabel)
.getOrElse(
sys.error(s"Unrecognised EMR version $releaseLabel")),
applications.map(EmrApplication.fromString(_).get),
s3LogUri,
toEmrInstanceGroup(masterInstanceGroup),
coreInstanceGroupOpt.map(toEmrInstanceGroup),
taskInstanceGroupOpt.map(toEmrInstanceGroup),
ec2SubnetOpt,
serviceRole,
jobFlowRole,
visibleToAllUsers,
configuration,
securityConfiguration
)
}
case EmrClusterDetails(None,
Some(clusterId),
None,
applications,
None,
None,
None,
None,
None,
None,
None,
None,
None,
true,
None) if applications.isEmpty =>
v2.models.ExistingEmrCluster(clusterId)
case _ =>
throw new IllegalArgumentException(
s"Unexpected Cluster details: $emrClusterDetails")
}
val logDetailsOpt = s3LogDetailsOpt.flatMap {
case LogDetails(logGroupName, logStreamName) =>
Some(S3LogDetails(logGroupName, logStreamName))
}
val loadDataOpt = loadData.map(_.map(copyFileJob =>
S3Cp(copyFileJob.source, copyFileJob.destination)))
val saveResultsOpt = saveResults.map(_.map(copyFileJob =>
S3Cp(copyFileJob.source, copyFileJob.destination)))
v2.models.EmrCommand(cluster,
jobName,
region,
clazz,
s3JarPath,
sparkConf,
sparkPackages,
args,
logDetailsOpt,
loadDataOpt,
saveResultsOpt)
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
// REVERSE MAPPING
///////////////////////////////////////////////////////////////////////////////////////////////////////////
def toInternalSchedule(
schedule: v2.models.ProcessSchedule): model.ProcessSchedule =
schedule match {
case v2.models.ContinuousSchedule(buffer) =>
model.ContinuousSchedule(buffer.getOrElse(0))
case v2.models.CronSchedule(dow, mo, dom, hr, min) =>
model.CronSchedule(min, hr, dom, mo, dow)
case v2.models.ProcessScheduleUndefinedType(description) =>
throw new IllegalArgumentException(
s"Unknown schedule type with description [$description]")
}
def toInternalExecutable(
executable: v2.models.TaskExecutable): model.Executable =
executable match {
case v2.models.ShellScriptCommand(script, envOpt) =>
val envAsMap = envOpt match {
case Some(env) =>
env.map { envVar =>
envVar.variableName -> envVar.value
}.toMap
case _ => Map.empty[String, String]
}
model.ShellCommandExecutable(script, envAsMap)
case v2.models.BatchImageCommand(image,
tag,
command,
memory,
vCpus,
jobRoleArn,
environmentVariables,
jobQueue) =>
model.BatchExecutable(
image,
tag,
command,
memory,
vCpus,
jobRoleArn,
environmentVariables
.map(envVariable => envVariable.variableName -> envVariable.value)
.toMap,
jobQueue
)
case v2.models.EmrCommand(emrCluster,
jobName,
region,
clazz,
s3JarPath,
sparkConf,
sparkPackages,
args,
s3LogDetailsOpt,
loadDataOpt,
saveResultsOpt) => {
def toInstanceGroupDetails(
emrInstanceGroupDetails: EmrInstanceGroupDetails) = {
val (awsMarket: String, bidPriceOpt: Option[Double]) =
emrInstanceGroupDetails.awsMarket match {
case OnDemand.OnDemand => ("on_demand", None)
case Spot(bidPrice) => ("spot", Some(bidPrice.toDouble))
case _ => ("on_demand", None)
}
InstanceGroupDetails(emrInstanceGroupDetails.emrInstanceType,
emrInstanceGroupDetails.instanceCount,
awsMarket,
bidPriceOpt,
emrInstanceGroupDetails.ebsVolumeSize)
}
val clusterDetails = emrCluster match {
case NewEmrCluster(clusterName,
releaseLabel,
applications,
s3LogUri,
masterInstanceGroup,
coreInstanceGroup,
taskInstanceGroup,
ec2SubnetOpt,
emrServiceRole,
emrJobFlowRole,
visibleToAllUsers,
configuration,
securityConfiguration) => {
val serviceRoleName = emrServiceRole match {
case DefaultEmrServiceRole.DefaultEmrServiceRole =>
DefaultEmrServiceRole.DefaultEmrServiceRole.toString
case CustomEmrServiceRole(roleName) => roleName
case DefaultEmrServiceRole.UNDEFINED(undefined) =>
throw new IllegalArgumentException(
s"Unknown service role type: $undefined")
case EmrServiceRoleUndefinedType(undefined) =>
throw new IllegalArgumentException(
s"Unknown service role type: $undefined")
}
val jobFlowRoleName = emrJobFlowRole match {
case DefaultEmrJobFlowRole.DefaultEmrJobFlowRole =>
DefaultEmrJobFlowRole.DefaultEmrJobFlowRole.toString
case CustomEmrJobFlowRole(roleName) => roleName
case DefaultEmrJobFlowRole.UNDEFINED(undefined) =>
throw new IllegalArgumentException(
s"Unknown job flow role type: $undefined")
case EmrJobFlowRoleUndefinedType(undefined) =>
throw new IllegalArgumentException(
s"Unknown job flow role type: $undefined")
}
EmrClusterDetails(
Some(clusterName),
None,
Some(releaseLabel.toString),
applications.map(_.toString),
Some(s3LogUri),
Some(toInstanceGroupDetails(masterInstanceGroup)),
coreInstanceGroup.map(toInstanceGroupDetails),
taskInstanceGroup.map(toInstanceGroupDetails),
ec2Subnet = ec2SubnetOpt,
Some(serviceRoleName),
Some(jobFlowRoleName),
Some(visibleToAllUsers),
configuration,
existingCluster = false,
securityConfiguration
)
}
case ExistingEmrCluster(clusterId) =>
EmrClusterDetails(clusterName = None,
clusterId = Some(clusterId),
existingCluster = true)
case EmrClusterUndefinedType(undefinedType) => {
Logger.error(s"UnsupportedClusterType($undefinedType)")
throw new IllegalArgumentException(
s"Cluster Type not supported: $undefinedType")
}
}
val logDetailsOpt = s3LogDetailsOpt.map {
case S3LogDetails(logGroupName, logStreamName) =>
LogDetails(logGroupName, logStreamName)
}
val loadData = loadDataOpt.map(
_.map(s3Cp => CopyFileJob(s3Cp.source, s3Cp.destination)))
val saveResults = saveResultsOpt.map(
_.map(s3Cp => CopyFileJob(s3Cp.source, s3Cp.destination)))
EmrJobExecutable(clusterDetails,
jobName,
region,
clazz,
s3JarPath,
sparkConf,
sparkPackages,
args,
logDetailsOpt,
loadData,
saveResults)
}
case v2.models.TaskExecutableUndefinedType(description) =>
throw new IllegalArgumentException(
s"Unknown executable type with description [$description]")
}
def toInternalOverlapAction(
overlap: v2.models.ProcessOverlapAction): model.ProcessOverlapAction =
overlap match {
case v2.models.ProcessOverlapAction.Wait =>
model.ProcessOverlapAction.Wait
case v2.models.ProcessOverlapAction.Terminate =>
model.ProcessOverlapAction.Terminate
case _: v2.models.ProcessOverlapAction.UNDEFINED =>
throw new IllegalArgumentException("Unknown overlap action")
}
def toInternalTaskStatusType(
status: v2.models.TaskStatus): model.TaskStatusType = status match {
case v2.models.TaskStatus.Starting => model.TaskStatusType.Running
case v2.models.TaskStatus.Pending => model.TaskStatusType.Running
case v2.models.TaskStatus.Submitted => model.TaskStatusType.Running
case v2.models.TaskStatus.Runnable => model.TaskStatusType.Running
case v2.models.TaskStatus.Succeeded => model.TaskStatusType.Success
case v2.models.TaskStatus.Failed => model.TaskStatusType.Failure
case v2.models.TaskStatus.Running => model.TaskStatusType.Running
case _: v2.models.TaskStatus.UNDEFINED =>
throw new IllegalArgumentException("Unknown task status type")
}
def toInternalProcessStatusType(
status: v2.models.ProcessStatus): model.ProcessStatusType = status match {
case v2.models.ProcessStatus.Succeeded => model.ProcessStatusType.Succeeded
case v2.models.ProcessStatus.Failed => model.ProcessStatusType.Failed
case v2.models.ProcessStatus.Running => model.ProcessStatusType.Running
case _: v2.models.ProcessStatus.UNDEFINED =>
throw new IllegalArgumentException("Unknown process status type")
}
def toInternalLogEntry(taskId: UUID,
entry: v2.models.LogEntry): model.TaskEventLog = {
model.TaskEventLog(entry.logEntryId,
taskId,
entry.when,
entry.source,
entry.message)
}
def toInternalMetadataEntry(
taskId: UUID,
entry: v2.models.MetadataEntry): model.TaskMetadataEntry = {
model.TaskMetadataEntry(entry.metadataEntryId,
taskId,
entry.when,
entry.key,
entry.value)
}
def toInternalNotification
: PartialFunction[v2.models.Notification, model.Notification] = {
case email: Email =>
EmailNotification(email.name, email.email, email.notifyWhen.toString)
case pagerduty: Pagerduty =>
PagerdutyNotification(pagerduty.serviceKey,
pagerduty.apiUrl,
pagerduty.numConsecutiveFailures)
case NotificationUndefinedType(notificationTypeName) =>
throw new IllegalArgumentException(
s"Unknown notification type [$notificationTypeName]")
}
}
|
gilt/sundial
|
app/controllers/ModelConverter.scala
|
Scala
|
mit
| 24,794
|
package com.sksamuel.elastic4s
import org.elasticsearch.index.query.{HasParentFilterBuilder, HasChildFilterBuilder, NestedFilterBuilder, FilterBuilders}
import org.elasticsearch.common.geo.GeoDistance
import org.elasticsearch.common.unit.DistanceUnit
import com.sksamuel.elastic4s.DefinitionAttributes._
/** @author Stephen Samuel */
trait FilterDsl {
def existsFilter(field: String): ExistsFilter = new ExistsFilter(field)
def geoboxFilter(field: String): GeoBoundingBoxFilter = new GeoBoundingBoxFilter(field)
def geoDistance(field: String): GeoDistanceFilter = new GeoDistanceFilter(field)
def geoPolygon(field: String): GeoPolygonFilter = new GeoPolygonFilter(field)
def geoDistanceRangeFilter(field: String): GeoDistanceRangeFilterDefinition =
new GeoDistanceRangeFilterDefinition(field)
def hasChildFilter(`type`: String): HasChildExpectsQueryOrFilter = new HasChildExpectsQueryOrFilter(`type`)
class HasChildExpectsQueryOrFilter(`type`: String) {
def query(query: QueryDefinition) = new
HasChildFilterDefinition(FilterBuilders.hasChildFilter(`type`, query.builder))
def filter(filter: FilterDefinition) = new
HasChildFilterDefinition(FilterBuilders.hasChildFilter(`type`, filter.builder))
}
def hasParentFilter(`type`: String): HasParentExpectsQueryOrFilter = new HasParentExpectsQueryOrFilter(`type`)
class HasParentExpectsQueryOrFilter(`type`: String) {
def query(query: QueryDefinition) = new
HasParentFilterDefinition(FilterBuilders.hasParentFilter(`type`, query.builder))
def filter(filter: FilterDefinition) = new
HasParentFilterDefinition(FilterBuilders.hasParentFilter(`type`, filter.builder))
}
def nestedFilter(path: String): NestedFilterExpectsQueryOrFilter = new NestedFilterExpectsQueryOrFilter(path)
class NestedFilterExpectsQueryOrFilter(path: String) {
def query(query: QueryDefinition) = new NestedFilterDefinition(FilterBuilders.nestedFilter(path, query.builder))
def filter(filter: FilterDefinition) = new NestedFilterDefinition(FilterBuilders.nestedFilter(path, filter.builder))
}
def matchAllFilter: MatchAllFilter = new MatchAllFilter
def or(filters: FilterDefinition*): OrFilterDefinition = new OrFilterDefinition(filters: _*)
def or(filters: Iterable[FilterDefinition]): OrFilterDefinition = new OrFilterDefinition(filters.toSeq: _*)
def orFilter(filters: FilterDefinition*): OrFilterDefinition = new OrFilterDefinition(filters: _*)
def orFilter(filters: Iterable[FilterDefinition]): OrFilterDefinition = new OrFilterDefinition(filters.toSeq: _*)
def and(filters: FilterDefinition*): AndFilterDefinition = andFilter(filters)
def and(filters: Iterable[FilterDefinition]): AndFilterDefinition = andFilter(filters)
def andFilter(filters: FilterDefinition*): AndFilterDefinition = andFilter(filters)
def andFilter(filters: Iterable[FilterDefinition]): AndFilterDefinition = new AndFilterDefinition(filters.toSeq: _*)
def numericRangeFilter(field: String): NumericRangeFilter = new NumericRangeFilter(field)
def rangeFilter(field: String): RangeFilter = new RangeFilter(field)
def prefixFilter(field: String, prefix: Any): PrefixFilterDefinition = new PrefixFilterDefinition(field, prefix)
def prefixFilter(tuple: (String, Any)): PrefixFilterDefinition = prefixFilter(tuple._1, tuple._2)
def queryFilter(query: QueryDefinition): QueryFilterDefinition = new QueryFilterDefinition(query)
def regexFilter(field: String, regex: Any): RegexFilterDefinition = new RegexFilterDefinition(field, regex)
def regexFilter(tuple: (String, Any)): RegexFilterDefinition = regexFilter(tuple._1, tuple._2)
def scriptFilter(script: String): ScriptFilterDefinition = new ScriptFilterDefinition(script)
def termFilter(field: String, value: Any): TermFilterDefinition = new TermFilterDefinition(field, value)
def termFilter(tuple: (String, Any)): TermFilterDefinition = termFilter(tuple._1, tuple._2)
def termsFilter(field: String, values: Any*): TermsFilterDefinition = new
TermsFilterDefinition(field, values.map(_.toString): _*)
def termsLookupFilter(field: String): TermsLookupFilterDefinition = new TermsLookupFilterDefinition(field)
def typeFilter(`type`: String): TypeFilterDefinition = new TypeFilterDefinition(`type`)
def missingFilter(field: String): MissingFilterDefinition = new MissingFilterDefinition(field)
def idsFilter(ids: String*): IdFilterDefinition = new IdFilterDefinition(ids: _*)
def bool(block: => BoolFilterDefinition): FilterDefinition = block
def must(queries: FilterDefinition*): BoolFilterDefinition = new BoolFilterDefinition().must(queries: _*)
def must(queries: Iterable[FilterDefinition]): BoolFilterDefinition = new BoolFilterDefinition().must(queries)
def should(queries: FilterDefinition*): BoolFilterDefinition = new BoolFilterDefinition().should(queries: _*)
def should(queries: Iterable[FilterDefinition]): BoolFilterDefinition = new BoolFilterDefinition().should(queries)
case object not {
def filter(filter: FilterDefinition): NotFilterDefinition = new NotFilterDefinition(filter)
}
def not(filter: FilterDefinition): NotFilterDefinition = new NotFilterDefinition(filter)
def not(queries: FilterDefinition*): BoolFilterDefinition = new BoolFilterDefinition().not(queries: _*)
def not(queries: Iterable[FilterDefinition]): BoolFilterDefinition = new BoolFilterDefinition().not(queries)
}
trait FilterDefinition {
def builder: org.elasticsearch.index.query.FilterBuilder
}
class BoolFilterDefinition extends FilterDefinition {
val builder = FilterBuilders.boolFilter()
def must(filters: FilterDefinition*): this.type = {
filters.foreach(builder must _.builder)
this
}
def must(filters: Iterable[FilterDefinition]): this.type = {
filters.foreach(builder must _.builder)
this
}
def should(filters: FilterDefinition*): this.type = {
filters.foreach(builder should _.builder)
this
}
def should(filters: Iterable[FilterDefinition]): this.type = {
filters.foreach(builder should _.builder)
this
}
def not(filters: FilterDefinition*): this.type = {
filters.foreach(builder mustNot _.builder)
this
}
def not(filters: Iterable[FilterDefinition]): this.type = {
filters.foreach(builder mustNot _.builder)
this
}
}
class IdFilterDefinition(ids: String*) extends FilterDefinition {
val builder = FilterBuilders.idsFilter().addIds(ids: _*)
def filterName(filterName: String): IdFilterDefinition = {
builder.filterName(filterName)
this
}
def withIds(any: Any*): IdFilterDefinition = {
any.foreach(id => builder.addIds(id.toString))
this
}
}
class TypeFilterDefinition(`type`: String) extends FilterDefinition {
val builder = FilterBuilders.typeFilter(`type`)
}
class ExistsFilter(field: String) extends FilterDefinition {
val builder = FilterBuilders.existsFilter(field)
def filterName(filterName: String): ExistsFilter = {
builder.filterName(filterName)
this
}
}
class QueryFilterDefinition(q: QueryDefinition)
extends FilterDefinition
with DefinitionAttributeCache {
val builder = FilterBuilders.queryFilter(q.builder)
val _builder = builder
def filterName(filterName: String): QueryFilterDefinition = {
builder.filterName(filterName)
this
}
}
class MissingFilterDefinition(field: String) extends FilterDefinition {
val builder = FilterBuilders.missingFilter(field)
def includeNull(nullValue: Boolean): MissingFilterDefinition = {
builder.nullValue(nullValue)
this
}
def filterName(filterName: String): MissingFilterDefinition = {
builder.filterName(filterName)
this
}
def existence(existence: Boolean): MissingFilterDefinition = {
builder.existence(existence)
this
}
}
class ScriptFilterDefinition(script: String)
extends FilterDefinition
with DefinitionAttributeCache
with DefinitionAttributeCacheKey
with DefinitionAttributeFilterName {
val builder = FilterBuilders.scriptFilter(script)
val _builder = builder
def lang(lang: String): ScriptFilterDefinition = {
builder.lang(lang)
this
}
def param(name: String, value: Any): ScriptFilterDefinition = {
builder.addParam(name, value)
this
}
def params(map: Map[String, Any]): ScriptFilterDefinition = {
for ( entry <- map ) param(entry._1, entry._2)
this
}
}
class MatchAllFilter extends FilterDefinition {
val builder = FilterBuilders.matchAllFilter()
}
@deprecated("deprecated in elasticsearch 1.0", "1.0")
class NumericRangeFilter(field: String)
extends FilterDefinition
with DefinitionAttributeFrom
with DefinitionAttributeTo
with DefinitionAttributeLt
with DefinitionAttributeGt
with DefinitionAttributeCache
with DefinitionAttributeCacheKey {
val builder = FilterBuilders.numericRangeFilter(field)
val _builder = builder
def filterName(filterName: String): NumericRangeFilter = {
builder.filterName(filterName)
this
}
def includeLower(includeLower: Boolean): NumericRangeFilter = {
builder.includeLower(includeLower)
this
}
def includeUpper(includeUpper: Boolean): NumericRangeFilter = {
builder.includeUpper(includeUpper)
this
}
def lte(lte: Double): NumericRangeFilter = {
builder.lte(lte)
this
}
def lte(lte: Long): NumericRangeFilter = {
builder.lte(lte)
this
}
def gte(gte: Double): NumericRangeFilter = {
builder.gte(gte)
this
}
def gte(gte: Long): NumericRangeFilter = {
builder.gte(gte)
this
}
}
class RangeFilter(field: String)
extends FilterDefinition
with DefinitionAttributeTo
with DefinitionAttributeFrom
with DefinitionAttributeLt
with DefinitionAttributeGt
with DefinitionAttributeCache
with DefinitionAttributeCacheKey
with DefinitionAttributeFilterName {
val builder = FilterBuilders.rangeFilter(field)
val _builder = builder
def includeLower(includeLower: Boolean): RangeFilter = {
builder.includeLower(includeLower)
this
}
def includeUpper(includeUpper: Boolean): RangeFilter = {
builder.includeUpper(includeUpper)
this
}
def lte(lte: String): RangeFilter = {
builder.lte(lte)
this
}
def gte(gte: String): RangeFilter = {
builder.gte(gte)
this
}
def execution(execution: String): RangeFilter = {
builder.setExecution(execution)
this
}
}
class HasChildFilterDefinition(val builder: HasChildFilterBuilder)
extends FilterDefinition {
val _builder = builder
def name(name: String): HasChildFilterDefinition = {
builder.filterName(name)
this
}
}
class HasParentFilterDefinition(val builder: HasParentFilterBuilder)
extends FilterDefinition {
val _builder = builder
def name(name: String): HasParentFilterDefinition = {
builder.filterName(name)
this
}
}
class NestedFilterDefinition(val builder: NestedFilterBuilder)
extends FilterDefinition
with DefinitionAttributeCache
with DefinitionAttributeCacheKey
with DefinitionAttributeFilterName {
val _builder = builder
def join(join: Boolean): NestedFilterDefinition = {
builder.join(join)
this
}
}
class PrefixFilterDefinition(field: String, prefix: Any)
extends FilterDefinition
with DefinitionAttributeCache
with DefinitionAttributeCacheKey {
val builder = FilterBuilders.prefixFilter(field, prefix.toString)
val _builder = builder
def name(name: String): this.type = {
builder.filterName(name)
this
}
}
class TermFilterDefinition(field: String, value: Any)
extends FilterDefinition
with DefinitionAttributeCache
with DefinitionAttributeCacheKey {
val builder = FilterBuilders.termFilter(field, value.toString)
val _builder = builder
def name(name: String): this.type = {
builder.filterName(name)
this
}
}
class TermsFilterDefinition(field: String, values: Any*)
extends FilterDefinition
with DefinitionAttributeCache
with DefinitionAttributeCacheKey {
import scala.collection.JavaConverters._
val builder = FilterBuilders.termsFilter(field, values.asJava)
val _builder = builder
def name(name: String): this.type = {
builder.filterName(name)
this
}
def execution(execution: String): this.type = {
builder.execution(execution)
this
}
}
class TermsLookupFilterDefinition(field: String)
extends FilterDefinition
with DefinitionAttributeCache
with DefinitionAttributeCacheKey {
val builder = FilterBuilders.termsLookupFilter(field)
val _builder = builder
def name(name: String): this.type = {
builder.filterName(name)
this
}
def index(index: String): this.type = {
builder.lookupIndex(index)
this
}
def lookupType(`type`: String): this.type = {
builder.lookupType(`type`)
this
}
def id(id: String): this.type = {
builder.lookupId(id)
this
}
def path(path: String): this.type = {
builder.lookupPath(path)
this
}
def routing(routing: String): this.type = {
builder.lookupRouting(routing)
this
}
def lookupCache(cache: Boolean): this.type = {
builder.lookupCache(cache)
this
}
}
class GeoPolygonFilter(name: String)
extends FilterDefinition
with DefinitionAttributeCache
with DefinitionAttributeCacheKey {
val builder = FilterBuilders.geoPolygonFilter(name)
val _builder = builder
def point(geohash: String): GeoPolygonFilter = {
builder.addPoint(geohash)
this
}
def point(lat: Double, lon: Double): this.type = {
_builder.addPoint(lat, lon)
this
}
}
class GeoDistanceRangeFilterDefinition(field: String)
extends FilterDefinition
with DefinitionAttributeTo
with DefinitionAttributeFrom
with DefinitionAttributeLt
with DefinitionAttributeGt
with DefinitionAttributeLat
with DefinitionAttributeLon
with DefinitionAttributeCache
with DefinitionAttributeCacheKey
with DefinitionAttributePoint {
val builder = FilterBuilders.geoDistanceRangeFilter(field)
val _builder = builder
def geoDistance(geoDistance: GeoDistance): GeoDistanceRangeFilterDefinition = {
builder.geoDistance(geoDistance)
this
}
def geohash(geohash: String): GeoDistanceRangeFilterDefinition = {
builder.geohash(geohash)
this
}
def gte(gte: Any): GeoDistanceRangeFilterDefinition = {
builder.gte(gte)
this
}
def lte(lte: Any): GeoDistanceRangeFilterDefinition = {
builder.lte(lte)
this
}
def includeLower(includeLower: Boolean): GeoDistanceRangeFilterDefinition = {
builder.includeLower(includeLower)
this
}
def includeUpper(includeUpper: Boolean): GeoDistanceRangeFilterDefinition = {
builder.includeUpper(includeUpper)
this
}
def name(name: String): GeoDistanceRangeFilterDefinition = {
builder.filterName(name)
this
}
}
class NotFilterDefinition(filter: FilterDefinition)
extends FilterDefinition
with DefinitionAttributeCache
with DefinitionAttributeFilterName {
val builder = FilterBuilders.notFilter(filter.builder)
val _builder = builder
}
class OrFilterDefinition(filters: FilterDefinition*)
extends FilterDefinition
with DefinitionAttributeCache
with DefinitionAttributeCacheKey {
val builder = FilterBuilders.orFilter(filters.map(_.builder).toArray: _*)
val _builder = builder
def name(name: String): this.type = {
builder.filterName(name)
this
}
}
class AndFilterDefinition(filters: FilterDefinition*)
extends FilterDefinition
with DefinitionAttributeCache
with DefinitionAttributeCacheKey {
val builder = FilterBuilders.andFilter(filters.map(_.builder).toArray: _*)
val _builder = builder
def name(name: String): this.type = {
builder.filterName(name)
this
}
}
class GeoDistanceFilter(name: String)
extends FilterDefinition
with DefinitionAttributeLat
with DefinitionAttributeLon
with DefinitionAttributeCache
with DefinitionAttributeCacheKey {
val builder = FilterBuilders.geoDistanceFilter(name)
val _builder = builder
def geohash(geohash: String): GeoDistanceFilter = {
builder.geohash(geohash)
this
}
def method(method: GeoDistance): GeoDistanceFilter = geoDistance(method)
def geoDistance(geoDistance: GeoDistance): GeoDistanceFilter = {
builder.geoDistance(geoDistance)
this
}
def distance(distance: String): GeoDistanceFilter = {
builder.distance(distance)
this
}
def distance(distance: Double, unit: DistanceUnit): GeoDistanceFilter = {
builder.distance(distance, unit)
this
}
def point(lat: Double, long: Double): GeoDistanceFilter = {
builder.point(lat, long)
this
}
}
class GeoBoundingBoxFilter(name: String)
extends FilterDefinition
with DefinitionAttributeCache
with DefinitionAttributeCacheKey {
val builder = FilterBuilders.geoBoundingBoxFilter(name)
val _builder = builder
private var _left: Double = _
private var _top: Double = _
private var _right: Double = _
private var _bottom: Double = _
def left(left: Double): GeoBoundingBoxFilter = {
_left = left
builder.topLeft(_top, _left)
this
}
def top(top: Double): GeoBoundingBoxFilter = {
_top = top
builder.topLeft(_top, _left)
this
}
def right(right: Double): GeoBoundingBoxFilter = {
_right = right
builder.bottomRight(_bottom, _right)
this
}
def bottom(bottom: Double): GeoBoundingBoxFilter = {
_bottom = bottom
builder.bottomRight(_bottom, _right)
this
}
}
class RegexFilterDefinition(field: String, regex: Any)
extends FilterDefinition
with DefinitionAttributeCache
with DefinitionAttributeCacheKey {
val builder = FilterBuilders.regexpFilter(field, regex.toString)
val _builder = builder
def name(name: String): this.type = {
builder.filterName(name)
this
}
}
|
l15k4/elastic4s
|
elastic4s-core/src/main/scala/com/sksamuel/elastic4s/filters.scala
|
Scala
|
apache-2.0
| 17,711
|
package com.twitter.finagle.service
import com.twitter.conversions.time._
import com.twitter.finagle.{ChannelClosedException, Failure, TimeoutException, WriteException}
import com.twitter.util.{
TimeoutException => UtilTimeoutException, Duration, JavaSingleton, Return, Throw, Try}
import java.util.{concurrent => juc}
import java.{util => ju}
import scala.collection.JavaConverters._
/**
* A function defining retry behavior for a given value type `A`.
*
* The [[Function1]] returns [[None]] if no more retries should be made
* and [[Some]] if another retry should happen. The returned `Some` has
* a [[Duration]] field for how long to wait for the next retry as well
* as the next `RetryPolicy` to use.
*
* @see [[SimpleRetryPolicy]] for a Java friendly API.
*/
abstract class RetryPolicy[-A] extends (A => Option[(Duration, RetryPolicy[A])]) {
/**
* Creates a new `RetryPolicy` based on the current `RetryPolicy` in which values of `A`
* are first checked against a predicate function, and only if the predicate returns true
* will the value be passed on to the current `RetryPolicy`.
*
* The predicate function need not be a pure function, but can change its behavior over
* time. For example, the predicate function's decision can be based upon backpressure
* signals supplied by things like failure rates or latency, which allows `RetryPolicy`s
* to dynamically reduce the number of retries in response to backpressure.
*
* The predicate function is only called on the first failure in a chain. Any additional
* chained RetryPolicies returned by the current policy will then see additional failures
* unfiltered. Contrast this will `filterEach`, which applies the filter to each `RetryPolicy`
* in the chain.
*/
def filter[B <: A](pred: B => Boolean): RetryPolicy[B] =
RetryPolicy { e =>
if (!pred(e)) None else this(e)
}
/**
* Similar to `filter`, but the predicate is applied to each `RetryPolicy` in the chain
* returned by the current RetryPolicy. For example, if the current `RetryPolicy` returns
* `Some((D, P'))` for value `E` (of type `A`), and the given predicate returns true for `E`,
* then the value returned from the filtering `RetryPolicy` will be `Some((D, P''))` where
* `P''` is equal to `P'.filterEach(pred)`.
*
* One example where this is useful is to dynamically and fractionally allow retries based
* upon backpressure signals. If, for example, the predicate function returned true or false
* based upon a probability distribution computed from a backpressure signal, it could return
* true 50% of the time, giving you a 50% chance of performing a single retry, a 25% chance of
* performing 2 retries, 12.5% chance of performing 3 retries, etc. This might be more
* desirable than just using `filter` where you end up with a 50% chance of no retries and
* 50% chance of the full number of retries.
*/
def filterEach[B <: A](pred: B => Boolean): RetryPolicy[B] =
RetryPolicy { e =>
if (!pred(e))
None
else {
this(e).map {
case (backoff, p2) => (backoff, p2.filterEach(pred))
}
}
}
/**
* Applies a dynamically chosen retry limit to an existing `RetryPolicy` that may allow for
* more retries. When the returned `RetryPolicy` is first invoked, it will call the `maxRetries`
* by-name parameter to get the current maximum retries allowed. Regardless of the number
* of retries that the underlying policy would allow, it is capped to be no greater than the
* number returned by `maxRetries` on the first failure in the chain.
*
* Using a dynamically choosen retry limit allows for the retry count to be tuned at runtime
* based upon backpressure signals such as failure rate or request latency.
*/
def limit(maxRetries: => Int): RetryPolicy[A] =
RetryPolicy[A] { e =>
val triesRemaining = maxRetries
if (triesRemaining <= 0)
None
else {
this(e).map {
case (backoff, p2) => (backoff, p2.limit(triesRemaining - 1))
}
}
}
}
/**
* A retry policy abstract class. This is convenient to use for Java programmers. Simply implement
* the two abstract methods `shouldRetry` and `backoffAt` and you're good to go!
*/
abstract class SimpleRetryPolicy[A](i: Int) extends RetryPolicy[A]
with (A => Option[(Duration, RetryPolicy[A])])
{
def this() = this(0)
final def apply(e: A) = {
if (shouldRetry(e)) {
backoffAt(i) match {
case Duration.Top =>
None
case howlong =>
Some((howlong, new SimpleRetryPolicy[A](i + 1) {
def shouldRetry(a: A) = SimpleRetryPolicy.this.shouldRetry(a)
def backoffAt(retry: Int) = SimpleRetryPolicy.this.backoffAt(retry)
}))
}
} else {
None
}
}
override def andThen[B](that: Option[(Duration, RetryPolicy[A])] => B): A => B =
that.compose(this)
override def compose[B](that: B => A): B => Option[(Duration, RetryPolicy[A])] =
that.andThen(this)
/**
* Given a value, decide whether it is retryable. Typically the value is an exception.
*/
def shouldRetry(a: A): Boolean
/**
* Given a number of retries, return how long to wait till the next retry. Note that this is
* zero-indexed. To implement a finite number of retries, implement a method like:
* `if (i > 3) return never`
*/
def backoffAt(retry: Int): Duration
/**
* A convenience method to access Duration.Top from Java. This is a sentinel value that
* signals no-further-retries.
*/
final val never = Duration.Top
}
object RetryPolicy extends JavaSingleton {
object RetryableWriteException {
def unapply(thr: Throwable): Option[Throwable] = thr match {
// We don't retry interruptions by default since they
// indicate that the request was discarded.
case f: Failure if f.isFlagged(Failure.Interrupted) => None
case f: Failure if f.isFlagged(Failure.Restartable) => Some(f.show)
case WriteException(exc) => Some(exc)
case _ => None
}
}
/**
* Failures that are generally retryable because the request failed
* before it finished being written to the remote service.
* See [[com.twitter.finagle.WriteException]].
*/
val WriteExceptionsOnly: PartialFunction[Try[Nothing], Boolean] = {
case Throw(RetryableWriteException(_)) => true
}
val TimeoutAndWriteExceptionsOnly: PartialFunction[Try[Nothing], Boolean] =
WriteExceptionsOnly.orElse {
case Throw(Failure(Some(_: TimeoutException))) => true
case Throw(Failure(Some(_: UtilTimeoutException))) => true
case Throw(_: TimeoutException) => true
case Throw(_: UtilTimeoutException) => true
}
val ChannelClosedExceptionsOnly: PartialFunction[Try[Nothing], Boolean] = {
case Throw(_: ChannelClosedException) => true
}
val Never: RetryPolicy[Try[Nothing]] = new RetryPolicy[Try[Nothing]] {
def apply(t: Try[Nothing]): Option[(Duration, Nothing)] = None
}
/**
* Converts a `RetryPolicy[Try[Nothing]]` to a `RetryPolicy[(Req, Try[Rep])]`
* that acts only on exceptions.
*/
private[finagle] def convertExceptionPolicy[Req, Rep](
policy: RetryPolicy[Try[Nothing]]
): RetryPolicy[(Req, Try[Rep])] =
new RetryPolicy[(Req, Try[Rep])] {
def apply(input: (Req, Try[Rep])): Option[(Duration, RetryPolicy[(Req, Try[Rep])])] = input match {
case (_, t@Throw(_)) =>
policy(t.asInstanceOf[Throw[Nothing]]) match {
case Some((howlong, nextPolicy)) => Some((howlong, convertExceptionPolicy(nextPolicy)))
case None => None
}
case (_, Return(_)) => None
}
}
/**
* Lifts a function of type `A => Option[(Duration, RetryPolicy[A])]` in the `RetryPolicy` type.
*/
def apply[A](f: A => Option[(Duration, RetryPolicy[A])]): RetryPolicy[A] =
new RetryPolicy[A] {
def apply(e: A): Option[(Duration, RetryPolicy[A])] = f(e)
}
/**
* Try up to a specific number of times, based on the supplied `PartialFunction[A, Boolean]`.
* A value of type `A` is considered retryable if and only if the PartialFunction
* is defined at and returns true for that value.
*
* The returned policy has jittered backoffs between retries.
*
* @param numTries the maximum number of attempts (including retries) that can be made.
* A value of `1` means one attempt and no retries on failure.
* A value of `2` means one attempt and then a single retry if the failure meets the
* criteria of `shouldRetry`.
* @param shouldRetry which `A`-typed values are considered retryable.
*/
def tries[A](
numTries: Int,
shouldRetry: PartialFunction[A, Boolean]
): RetryPolicy[A] = {
val backoffs = Backoff.decorrelatedJittered(5.millis, 200.millis)
backoff[A](backoffs.take(numTries - 1))(shouldRetry)
}
/**
* Try up to a specific number of times of times on failures that are
* [[com.twitter.finagle.service.RetryPolicy.WriteExceptionsOnly]].
*
* The returned policy has jittered backoffs between retries.
*
* @param numTries the maximum number of attempts (including retries) that can be made.
* A value of `1` means one attempt and no retries on failure.
* A value of `2` means one attempt and then a single retry if the failure meets the
* criteria of [[com.twitter.finagle.service.RetryPolicy.WriteExceptionsOnly]].
*/
def tries(numTries: Int): RetryPolicy[Try[Nothing]] = tries(numTries, WriteExceptionsOnly)
private[this] val AlwaysFalse = Function.const(false) _
/**
* Retry based on a series of backoffs defined by a `Stream[Duration]`. The
* stream is consulted to determine the duration after which a request is to
* be retried. A `PartialFunction` argument determines which request types
* are retryable.
*
* @see [[backoffJava]] for a Java friendly API.
*/
def backoff[A](
backoffs: Stream[Duration]
)(shouldRetry: PartialFunction[A, Boolean]): RetryPolicy[A] = {
RetryPolicy { e =>
if (shouldRetry.applyOrElse(e, AlwaysFalse)) {
backoffs match {
case howlong #:: rest =>
Some((howlong, backoff(rest)(shouldRetry)))
case _ =>
None
}
} else {
None
}
}
}
/**
* A version of [[backoff]] usable from Java.
*
* @param backoffs can be created via [[Backoff.toJava]].
*/
def backoffJava[A](
backoffs: juc.Callable[ju.Iterator[Duration]],
shouldRetry: PartialFunction[A, Boolean]
): RetryPolicy[A] = {
backoff[A](backoffs.call().asScala.toStream)(shouldRetry)
}
/**
* Combines multiple `RetryPolicy`s into a single combined `RetryPolicy`, with interleaved
* backoffs. For a given value of `A`, each policy in `policies` is tried in order. If all
* policies return `None`, then the combined `RetryPolicy` returns `None`. If policy `P` returns
* `Some((D, P'))`, then the combined `RetryPolicy` returns `Some((D, P''))`, where `P''` is a
* new combined `RetryPolicy` with the same sub-policies, with the exception of `P` replaced by
* `P'`.
*
* The ordering of policies matters: earlier policies get a chance to handle the failure
* before later policies; a catch-all policy, if any, should be last.
*
* As an example, let's say you combine two `RetryPolicy`s, `R1` and `R2`, where `R1` handles
* only exception `E1` with a backoff of `(10.milliseconds, 20.milliseconds, 30.milliseconds)`,
* while `R2` handles only exception `E2` with a backoff of `(15.milliseconds, 25.milliseconds)`.
*
* If a sequence of exceptions, `(E2, E1, E1, E2)`, is fed in order to the combined retry policy,
* the backoffs seen will be `(15.milliseconds, 10.milliseconds, 20.milliseconds,
* 25.milliseconds)`.
*
* The maximum number of retries the combined policy could allow under the worst case scenario
* of exceptions is equal to the sum of the individual maximum retries of each subpolicy. To
* put a cap on the combined maximum number of retries, you can call `limit` on the combined
* policy with a smaller cap.
*/
def combine[A](policies: RetryPolicy[A]*): RetryPolicy[A] =
RetryPolicy[A] { e =>
// stores the first matched backoff
var backoffOpt: Option[Duration] = None
val policies2 =
policies.map { p =>
if (backoffOpt.nonEmpty)
p
else {
p(e) match {
case None => p
case Some((backoff, p2)) =>
backoffOpt = Some(backoff)
p2
}
}
}
backoffOpt match {
case None => None
case Some(backoff) => Some((backoff, combine(policies2: _*)))
}
}
}
|
zfy0701/finagle
|
finagle-core/src/main/scala/com/twitter/finagle/service/RetryPolicy.scala
|
Scala
|
apache-2.0
| 12,851
|
package lila.security
import scala.concurrent.duration._
import com.github.blemale.scaffeine.Cache
import lila.user.User
import lila.common.config.NetDomain
final class PromotionApi(domain: NetDomain) {
def test(user: User)(text: String, prevText: Option[String] = None): Boolean =
user.isVerified || user.isAdmin || {
val promotions = extract(text)
promotions.isEmpty || {
val prevTextPromotion = prevText ?? extract
val prev = ~cache.getIfPresent(user.id) -- prevTextPromotion
val accept = prev.sizeIs < 3 && !prev.exists(promotions.contains)
if (!accept) logger.info(s"Promotion @${user.username} ${identify(text) mkString ", "}")
accept
}
}
def save(user: User, text: String): Unit = {
val promotions = extract(text)
if (promotions.nonEmpty) cache.put(user.id, ~cache.getIfPresent(user.id) ++ promotions)
}
private type Id = String
private val cache: Cache[User.ID, Set[Id]] =
lila.memo.CacheApi.scaffeineNoScheduler
.expireAfterAccess(24 hours)
.build[User.ID, Set[Id]]()
private lazy val regexes = List(
s"$domain/team/([\\\\w-]+)",
s"$domain/tournament/(\\\\w+)",
s"$domain/swiss/(\\\\w+)",
s"$domain/simul/(\\\\w+)",
s"$domain/study/(\\\\w+)",
s"$domain/class/(\\\\w+)",
"""(?:youtube\\.com|youtu\\.be)/(?:watch)?(?:\\?v=)?([^"&?/ ]{11})""",
"""youtube\\.com/channel/([\\w-]{24})""",
"""twitch\\.tv/([a-zA-Z0-9](?:\\w{2,24}+))"""
).map(_.r.unanchored)
private def extract(text: String): Set[Id] =
regexes
.flatMap(_ findAllMatchIn text)
.view
.flatMap { m =>
Option(m group 1)
}
.toSet
private def identify(text: String): List[String] =
regexes.flatMap(_ findAllMatchIn text).map(_.matched)
}
|
luanlv/lila
|
modules/security/src/main/Promotion.scala
|
Scala
|
mit
| 1,806
|
package se.marcuslonnberg.scaladocker.remote.api
import java.io._
import org.kamranzafar.jtar.{TarEntry, TarOutputStream}
import scala.annotation.tailrec
object TarArchive {
def apply(inDir: File, out: File): File = {
val files = FileUtils.listFilesRecursive(inDir)
apply(files, out)
}
def apply(files: Map[String, File], outFile: File): File = {
val entries = files.toSeq.sortBy(_._1).map {
case (path, file) =>
createEntry(path, file)
}
apply(entries, outFile)
}
private[api] def apply(entries: Iterable[TarEntry], outFile: File): File = {
val tarFile = new FileOutputStream(outFile)
val tarStream = new TarOutputStream(new BufferedOutputStream(tarFile))
val buffer = Array.ofDim[Byte](2048)
def copyFile(file: File) = {
val fileStream = new BufferedInputStream(new FileInputStream(file))
@tailrec
def copy(input: InputStream) {
val len = input.read(buffer)
if (len != -1) {
tarStream.write(buffer, 0, len)
copy(input)
}
}
copy(fileStream)
fileStream.close()
}
entries.foreach { entry =>
tarStream.putNextEntry(entry)
if (entry.getFile.isFile) {
copyFile(entry.getFile)
}
tarStream.flush()
}
tarStream.close()
tarFile.close()
outFile
}
private[api] def createEntry(path: String, file: File) = {
val entry = new TarEntry(file, path)
entry.setUserName("")
entry.setGroupName("")
entry.setIds(0, 0)
entry.getHeader.mode = FileUtils.filePermissions(file)
entry
}
}
|
marcuslonnberg/scala-docker
|
src/main/scala/se/marcuslonnberg/scaladocker/remote/api/TarArchive.scala
|
Scala
|
mit
| 1,605
|
package service.sheet
import org.apache.commons.io.output.ByteArrayOutputStream
import org.apache.poi.hssf.usermodel._
import org.apache.poi.ss.usermodel.{BorderStyle, FillPatternType, IndexedColors}
import org.apache.poi.ss.util.{CellRangeAddress, RegionUtil}
import scala.util.Try
sealed trait Fraction
object Fraction {
object Low extends Fraction
object Medium extends Fraction
object Large extends Fraction
object AutoFit extends Fraction
}
case class Row(value: String)
case class SheetHeader(left: String, center: String, right: String)
case class RowHeader(cols: List[(Row, Fraction)], backgroundColor: IndexedColors, repeating: Boolean)
case class Signature(text: String)
case class SheetFooter(left: String, showPageNumbers: Boolean)
case class Sheet(name: String, header: SheetHeader, rowHeader: RowHeader, rowContent: List[List[Row]], footer: SheetFooter)
object SheetService {
private def withSheet(name: String)(f: (HSSFSheet, HSSFWorkbook) => Unit): Try[ByteArrayOutputStream] = {
val book = HSSFWorkbookFactory.createWorkbook()
for {
sheet <- Try(book.createSheet(name))
_ = f(sheet, book)
stream = new ByteArrayOutputStream()
_ = book.write(stream)
_ = stream.close()
_ = book.close()
} yield stream
}
def createSheet(sheet: Sheet)(custom: (HSSFSheet) => Unit): Try[ByteArrayOutputStream] = withSheet(sheet.name) { (sheet0, book) =>
val headerStyle = book.createCellStyle()
val contentStyle = book.createCellStyle()
val font = book.createFont()
createHeader(sheet0, sheet.header)
createFooter(sheet0, sheet.footer)
val (headerCells, lastRow) = createRowHeader(sheet0, font, headerStyle, sheet.rowHeader)
applyBorders(headerStyle, headerCells)
val contentCells = createRows(sheet0, lastRow + 1, sheet.rowContent)
applyBorders(contentStyle, contentCells)
custom(sheet0)
applyWidth(sheet0, sheet)
}
private def applyWidth(sheet0: HSSFSheet, sheet: Sheet): Unit = {
def fractionValue(f: Fraction): Option[Double] = f match {
case Fraction.Low => Some(0.05)
case Fraction.Medium => Some(0.15)
case Fraction.Large => Some(0.25)
case Fraction.AutoFit => None
}
val maxCols = sheet.rowHeader.cols.size
val colWidth = 16 // TODO this magic number was determined by trial and error
val totalWidth = maxCols * colWidth
val cells = sheet.rowHeader.cols.map(t => fractionValue(t._2)).zipWithIndex
val widthSoFar = cells.filter(_._1.isDefined).foldLeft(0) {
case (acc, (f, i)) =>
val width = (totalWidth * f.get).toInt
sheet0.setColumnWidth(i, width * 256)
acc + width
}
val remainingCells = cells.filter(_._1.isEmpty)
val remainingWidth = (totalWidth - widthSoFar) / remainingCells.size
remainingCells.foreach {
case (_, i) =>
sheet0.setColumnWidth(i, remainingWidth * 256)
}
}
private def applyBorders(style: HSSFCellStyle, cells: List[HSSFCell]): Unit = {
val borderColor = IndexedColors.BLACK.getIndex
val borderStyle = BorderStyle.THIN
style.setBorderBottom(borderStyle)
style.setBottomBorderColor(borderColor)
style.setBorderLeft(borderStyle)
style.setLeftBorderColor(borderColor)
style.setBorderRight(borderStyle)
style.setRightBorderColor(borderColor)
style.setBorderTop(borderStyle)
style.setTopBorderColor(borderColor)
cells.foreach(_.setCellStyle(style))
}
private def createRowHeader(sheet: HSSFSheet, font: HSSFFont, style: HSSFCellStyle, header: RowHeader): (List[HSSFCell], Int) = {
font.setBold(true)
style.setFont(font)
style.setFillForegroundColor(header.backgroundColor.index)
style.setFillPattern(FillPatternType.SOLID_FOREGROUND)
val row = sheet.createRow(0)
val cells = header.cols.zipWithIndex.map {
case (col, index) =>
val cell = row.createCell(index)
cell.setCellValue(col._1.value)
cell.setCellStyle(style)
cell
}
if (header.repeating) {
sheet.setRepeatingRows(new CellRangeAddress(row.getRowNum, row.getRowNum, row.getRowNum, header.cols.size - 1))
}
cells -> row.getRowNum
}
private def createHeader(sheet: HSSFSheet, header: SheetHeader): Unit = {
sheet.getHeader.setLeft(header.left)
sheet.getHeader.setCenter(header.center)
sheet.getHeader.setRight(header.right)
}
private def createFooter(sheet: HSSFSheet, footer: SheetFooter): Unit = {
sheet.getFooter.setLeft(footer.left)
if (footer.showPageNumbers) {
sheet.getFooter.setRight(s"${HeaderFooter.page} / ${HeaderFooter.numPages}")
}
}
private def createRows(sheet: HSSFSheet, nextRow: Int, rows: List[List[Row]]): List[HSSFCell] = {
rows.zipWithIndex.flatMap {
case (rows, rowIndex) =>
val row = sheet.createRow(rowIndex + nextRow)
rows.zipWithIndex.map {
case (col, colIndex) =>
val cell = row.createCell(colIndex)
cell.setCellValue(col.value)
cell
}
}
}
}
|
THK-ADV/lwm-reloaded
|
app/service/sheet/SheetService.scala
|
Scala
|
mit
| 5,077
|
package spinoco.protocol.mail.header
import scodec.Codec
import spinoco.protocol.mail.EmailAddress
import spinoco.protocol.mail.header.codec.{EmailAddressCodec, commaSeparated}
/**
* RFC 5322 3.6.2.
*
* The "From:" field specifies the author(s) of the message,
* that is, the mailbox(es) of the person(s) or system(s) responsible
* for the writing of the message.
*
*
* @param email Email of the person
* @param others List of other autors of this email.
*
*/
case class From(
email: EmailAddress
, others: List[EmailAddress]
) extends DefaultEmailHeaderField
object From extends DefaultHeaderDescription[From] {
val codec: Codec[From] = {
commaSeparated(EmailAddressCodec.codec, fold = true).xmap(
From.apply _ tupled, from => (from.email, from.others)
)
}
}
|
Spinoco/protocol
|
mail/src/main/scala/spinoco/protocol/mail/header/From.scala
|
Scala
|
mit
| 819
|
package com.gilt.thehand.rules.typed
import com.gilt.thehand.rules.conversions.ConvertsToLong
import com.gilt.thehand.rules.comparison.LessThan
import com.gilt.thehand.rules.SingleValueRuleParser
/**
* A typed implementation of the LessThan trait, specific to Long. This can be used for any non-decimal datatype.
*/
case class LongLt(value: Long) extends LessThan with ConvertsToLong {
// This override is necessary because the native type Long is not Ordered (RichLong mixes this in).
override def matchInnerType(v: Long) = v < value
}
/**
* Use this to differentiate LessThan[Long] from other versions of LessThan.
*/
object LongLtParser extends SingleValueRuleParser[LongLt] {
def ruleConstructor(value: Long) = LongLt(value)
def toValue(value: String) = value.toLong
}
|
gilt/the-hand
|
src/main/scala/com/gilt/thehand/rules/typed/LongLt.scala
|
Scala
|
apache-2.0
| 788
|
/*
* Copyright 2012 Atlassian PTY LTD
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kadai
package concurrent
import language.implicitConversions
import log.Logging
object IntRepeater {
implicit def AddRepeatSyntax(i: Int) = new IntRepeater(i)
}
class IntRepeater(i: Int) extends Logging {
import Logging._
require(i > 0, "Repeat amount must be > 0")
def times[A](x: => A) = {
@annotation.tailrec
def loop(last: A)(left: Int): A =
if (left <= 0) last
else loop(x)(left - 1)
loop(x)(i - 1)
}
/**
* Try executing a function n times.
* This ignores exceptions until the last try.
*/
def retries[A](f: => A): A =
retriesWith { _ => () }(f)
/**
* Try executing a function n times, executing a backoff strategy in between.
* This ignores exceptions until the last try.
*/
def retriesWith[A](backoff: Int => Unit)(f: => A): A = {
// compiler doesn't optimise tail recursion in catch clauses, hence the workaround using Option
@annotation.tailrec
def loop(t: Int): A = {
try Some(f)
catch {
case e: Exception if t <= i =>
withLog(s"retry-operation failed: ${e} attempt $t of max $i") {
None
}
}
} match {
case Some(a) => a
case None => backoff(t); loop(t + 1)
}
loop(1)
}
}
|
simpleenergy/kadai
|
concurrent/src/main/scala/kadai/concurrent/IntRepeater.scala
|
Scala
|
apache-2.0
| 1,861
|
/*
* Copyright (C) 2013-2014 by Michael Hombre Brinkmann
*/
package net.twibs.util
import com.ibm.icu.util.ULocale
import net.twibs.testutil.TwibsTest
class SettingsTest extends TwibsTest {
test("Check version for run modes") {
val sys = SystemSettings.copy(runMode = RunMode.PRODUCTION)
sys.version should be(sys.majorVersion)
SystemSettings.version should be(sys.fullVersion)
}
SystemSettings.runMode.isTest shouldBe true
test("Default Mode") {
RunMode.isProduction shouldBe false
RunMode.isStaging shouldBe false
RunMode.isDevelopment shouldBe false
RunMode.isTest shouldBe true
}
test("Default loaded configuration is Default application") {
ApplicationSettings.translators(ULocale.GERMAN).translate("message", "") should be("Runmode test Host unknown User tester Lang German (test) App Default")
}
test("Configuration for production mode is default") {
SystemSettings.default.copy(runMode = RunMode.PRODUCTION).use {
ApplicationSettings.translators(ULocale.GERMAN).translate("message", "") should be("Runmode production Host unknown User tester Lang German App Default")
}
}
test("Configuration for host overrides and joins configuration") {
SystemSettings.default.copy(hostName = "twibs-test-host").use {
ApplicationSettings.translators(ULocale.GERMAN).translate("message", "") should be("Runmode test Host testhost User tester Lang German (test on testhost) App Default")
}
}
test("Different locales for applications") {
SystemSettings.default.use {
ApplicationSettings.locales should be(List(ULocale.GERMAN, ULocale.ENGLISH, ULocale.FRENCH))
SystemSettings.applicationSettings("t1").locales should be(List(ULocale.GERMAN))
SystemSettings.applicationSettings("t2").locales should be(List(SystemSettings.default.locale))
}
}
test("Find configured applications") {
SystemSettings.default.use {
SystemSettings.applicationSettings.values.map(_.name).toList.sorted should be(List("default", "t1", "t2"))
}
}
test("Find application settings by path") {
SystemSettings.default.use {
SystemSettings.applicationSettingsForPath("/content/t1").name should be("t1")
SystemSettings.applicationSettingsForPath("/content/t").name should be(ApplicationSettings.DEFAULT_NAME)
}
}
test("Validate context path") {
intercept[NullPointerException] {
Request.requireValidContextPath(null)
}
intercept[IllegalArgumentException] {
Request.requireValidContextPath("/")
}.getMessage should include("not be /")
intercept[IllegalArgumentException] {
Request.requireValidContextPath("nix")
}.getMessage should include("start with /")
intercept[IllegalArgumentException] {
Request.requireValidContextPath("/x x")
}.getMessage should include("invalid")
}
}
|
hombre/twibs
|
twibs-util-test/src/test/scala/net/twibs/util/SettingsTest.scala
|
Scala
|
apache-2.0
| 2,860
|
/*
* Copyright (C) 2018 Lightbend Inc. <https://www.lightbend.com>
* Copyright (C) 2017-2018 Alexis Seigneurin.
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.streams.scala
import java.util.Properties
import java.util.regex.Pattern
import org.apache.kafka.streams.kstream.GlobalKTable
import org.apache.kafka.streams.processor.{ProcessorSupplier, StateStore}
import org.apache.kafka.streams.state.StoreBuilder
import org.apache.kafka.streams.{Topology, StreamsBuilder => StreamsBuilderJ}
import org.apache.kafka.streams.scala.kstream._
import ImplicitConversions._
import scala.collection.JavaConverters._
/**
* Wraps the Java class StreamsBuilder and delegates method calls to the underlying Java object.
*/
class StreamsBuilder(inner: StreamsBuilderJ = new StreamsBuilderJ) {
/**
* Create a [[kstream.KStream]] from the specified topic.
* <p>
* The `implicit Consumed` instance provides the values of `auto.offset.reset` strategy, `TimestampExtractor`,
* key and value deserializers etc. If the implicit is not found in scope, compiler error will result.
* <p>
* A convenient alternative is to have the necessary implicit serdes in scope, which will be implicitly
* converted to generate an instance of `Consumed`. @see [[ImplicitConversions]].
* {{{
* // Brings all implicit conversions in scope
* import ImplicitConversions._
*
* // Bring implicit default serdes in scope
* import Serdes._
*
* val builder = new StreamsBuilder()
*
* // stream function gets the implicit Consumed which is constructed automatically
* // from the serdes through the implicits in ImplicitConversions#consumedFromSerde
* val userClicksStream: KStream[String, Long] = builder.stream(userClicksTopic)
* }}}
*
* @param topic the topic name
* @return a [[kstream.KStream]] for the specified topic
*/
def stream[K, V](topic: String)(implicit consumed: Consumed[K, V]): KStream[K, V] =
inner.stream[K, V](topic, consumed)
/**
* Create a [[kstream.KStream]] from the specified topics.
*
* @param topics the topic names
* @return a [[kstream.KStream]] for the specified topics
* @see #stream(String)
* @see `org.apache.kafka.streams.StreamsBuilder#stream`
*/
def stream[K, V](topics: Set[String])(implicit consumed: Consumed[K, V]): KStream[K, V] =
inner.stream[K, V](topics.asJava, consumed)
/**
* Create a [[kstream.KStream]] from the specified topic pattern.
*
* @param topicPattern the topic name pattern
* @return a [[kstream.KStream]] for the specified topics
* @see #stream(String)
* @see `org.apache.kafka.streams.StreamsBuilder#stream`
*/
def stream[K, V](topicPattern: Pattern)(implicit consumed: Consumed[K, V]): KStream[K, V] =
inner.stream[K, V](topicPattern, consumed)
/**
* Create a [[kstream.KTable]] from the specified topic.
* <p>
* The `implicit Consumed` instance provides the values of `auto.offset.reset` strategy, `TimestampExtractor`,
* key and value deserializers etc. If the implicit is not found in scope, compiler error will result.
* <p>
* A convenient alternative is to have the necessary implicit serdes in scope, which will be implicitly
* converted to generate an instance of `Consumed`. @see [[ImplicitConversions]].
* {{{
* // Brings all implicit conversions in scope
* import ImplicitConversions._
*
* // Bring implicit default serdes in scope
* import Serdes._
*
* val builder = new StreamsBuilder()
*
* // stream function gets the implicit Consumed which is constructed automatically
* // from the serdes through the implicits in ImplicitConversions#consumedFromSerde
* val userClicksStream: KTable[String, Long] = builder.table(userClicksTopic)
* }}}
*
* @param topic the topic name
* @return a [[kstream.KTable]] for the specified topic
* @see `org.apache.kafka.streams.StreamsBuilder#table`
*/
def table[K, V](topic: String)(implicit consumed: Consumed[K, V]): KTable[K, V] =
inner.table[K, V](topic, consumed)
/**
* Create a [[kstream.KTable]] from the specified topic.
*
* @param topic the topic name
* @param materialized the instance of `Materialized` used to materialize a state store
* @return a [[kstream.KTable]] for the specified topic
* @see #table(String)
* @see `org.apache.kafka.streams.StreamsBuilder#table`
*/
def table[K, V](topic: String, materialized: Materialized[K, V, ByteArrayKeyValueStore])(
implicit consumed: Consumed[K, V]
): KTable[K, V] =
inner.table[K, V](topic, consumed, materialized)
/**
* Create a `GlobalKTable` from the specified topic. The serializers from the implicit `Consumed`
* instance will be used. Input records with `null` key will be dropped.
*
* @param topic the topic name
* @return a `GlobalKTable` for the specified topic
* @see `org.apache.kafka.streams.StreamsBuilder#globalTable`
*/
def globalTable[K, V](topic: String)(implicit consumed: Consumed[K, V]): GlobalKTable[K, V] =
inner.globalTable(topic, consumed)
/**
* Create a `GlobalKTable` from the specified topic. The resulting `GlobalKTable` will be materialized
* in a local `KeyValueStore` configured with the provided instance of `Materialized`. The serializers
* from the implicit `Consumed` instance will be used.
*
* @param topic the topic name
* @param materialized the instance of `Materialized` used to materialize a state store
* @return a `GlobalKTable` for the specified topic
* @see `org.apache.kafka.streams.StreamsBuilder#globalTable`
*/
def globalTable[K, V](topic: String, materialized: Materialized[K, V, ByteArrayKeyValueStore])(
implicit consumed: Consumed[K, V]
): GlobalKTable[K, V] =
inner.globalTable(topic, consumed, materialized)
/**
* Adds a state store to the underlying `Topology`. The store must still be "connected" to a `Processor`,
* `Transformer`, or `ValueTransformer` before it can be used.
* <p>
* It is required to connect state stores to `Processor`, `Transformer`, or `ValueTransformer` before they can be used.
*
* @param builder the builder used to obtain this state store `StateStore` instance
* @return the underlying Java abstraction `StreamsBuilder` after adding the `StateStore`
* @throws org.apache.kafka.streams.errors.TopologyException if state store supplier is already added
* @see `org.apache.kafka.streams.StreamsBuilder#addStateStore`
*/
def addStateStore(builder: StoreBuilder[_ <: StateStore]): StreamsBuilderJ = inner.addStateStore(builder)
/**
* Adds a global `StateStore` to the topology. Global stores should not be added to `Processor`, `Transformer`,
* or `ValueTransformer` (in contrast to regular stores).
* <p>
* It is not required to connect a global store to `Processor`, `Transformer`, or `ValueTransformer`;
* those have read-only access to all global stores by default.
*
* @see `org.apache.kafka.streams.StreamsBuilder#addGlobalStore`
*/
def addGlobalStore(storeBuilder: StoreBuilder[_ <: StateStore],
topic: String,
consumed: Consumed[_, _],
stateUpdateSupplier: ProcessorSupplier[_, _]): StreamsBuilderJ =
inner.addGlobalStore(storeBuilder, topic, consumed, stateUpdateSupplier)
def build(): Topology = inner.build()
/**
* Returns the `Topology` that represents the specified processing logic and accepts
* a `Properties` instance used to indicate whether to optimize topology or not.
*
* @param props the `Properties` used for building possibly optimized topology
* @return the `Topology` that represents the specified processing logic
* @see `org.apache.kafka.streams.StreamsBuilder#build`
*/
def build(props: Properties): Topology = inner.build(props)
}
|
gf53520/kafka
|
streams/streams-scala/src/main/scala/org/apache/kafka/streams/scala/StreamsBuilder.scala
|
Scala
|
apache-2.0
| 8,638
|
package model.repositories.anorm
import anorm.SqlParser._
import anorm._
import model.dtos._
object ConsFrequencyPerOrganizationParser{
val Parse: RowParser[ConsFrequencyPerOrganization] = {
str("date") ~
str("organizationName") ~
long("organizationId") ~
int("numberOfConsultations") ~
get[Option[String]]("groupTitle") ~
str("cons_ids")map
{
case date ~ organizationName ~ organizationId ~ numberOfConsultations ~ groupTitle ~ cons_ids =>
new ConsFrequencyPerOrganization(date, organizationName, organizationId, numberOfConsultations, groupTitle, cons_ids)
}
}
}
|
scify/DemocracIT-Web
|
app/model/repositories/anorm/ConsFrequencyPerOrganizationParser.scala
|
Scala
|
apache-2.0
| 633
|
/*
* Part of NDLA learningpath-api.
* Copyright (C) 2016 NDLA
*
* See LICENSE
*
*/
package no.ndla.learningpathapi.model.api
import org.scalatra.swagger.annotations._
import org.scalatra.swagger.runtime.annotations.ApiModelProperty
import scala.annotation.meta.field
// format: off
@ApiModel(description = "Information about a learningstep")
case class LearningStepV2(
@(ApiModelProperty @field)(description = "The id of the learningstep") id: Long,
@(ApiModelProperty @field)(description = "The revision number for this learningstep") revision: Int,
@(ApiModelProperty @field)(description = "The sequence number for the step. The first step has seqNo 0.") seqNo: Int,
@(ApiModelProperty @field)(description = "The title of the learningstep") title: Title,
@(ApiModelProperty @field)(description = "The description of the learningstep") description: Option[Description],
@(ApiModelProperty @field)(description = "The embed content for the learningstep") embedUrl: Option[EmbedUrlV2],
@(ApiModelProperty @field)(description = "Determines if the title of the step should be displayed in viewmode") showTitle: Boolean,
@(ApiModelProperty @field)(description = "The type of the step", allowableValues = "INTRODUCTION,TEXT,QUIZ,TASK,MULTIMEDIA,SUMMARY,TEST") `type`: String,
@(ApiModelProperty @field)(description = "Describes the copyright information for the learningstep") license: Option[License],
@(ApiModelProperty @field)(description = "The full url to where the complete metainformation about the learningstep can be found") metaUrl: String,
@(ApiModelProperty @field)(description = "True if authenticated user may edit this learningstep") canEdit: Boolean,
@(ApiModelProperty @field)(description = "The status of the learningstep", allowableValues = "ACTIVE,DELETED") status: String,
@(ApiModelProperty @field)(description = "The supported languages of the learningstep") supportedLanguages: Seq[String]
)
|
NDLANO/learningpath-api
|
src/main/scala/no/ndla/learningpathapi/model/api/LearningStep.scala
|
Scala
|
gpl-3.0
| 1,972
|
package im.actor.server
import akka.actor.ActorSystem
import akka.stream.Materializer
import akka.util.Timeout
import cats.data.Xor
import eu.codearte.jfairy.Fairy
import im.actor.api.rpc.ClientData
import im.actor.api.rpc.auth.AuthService
import im.actor.api.rpc.peers.{ ApiOutPeer, ApiPeerType, ApiUserOutPeer }
import im.actor.api.rpc.users.ApiUser
import im.actor.server.api.rpc.RpcApiExtension
import im.actor.server.api.rpc.service.auth.AuthServiceImpl
import im.actor.server.oauth.GoogleProvider
import im.actor.server.persist.{ AuthCodeRepo, AuthSessionRepo }
import im.actor.server.session.{ Session, SessionConfig, SessionRegion }
import im.actor.server.user.UserExtension
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{ Inside, Suite }
import slick.driver.PostgresDriver.api._
import scala.concurrent._
import scala.concurrent.duration._
import scala.util.Random
trait PersistenceHelpers {
implicit val timeout = Timeout(5.seconds)
def getUserModel(userId: Int)(implicit db: Database) = Await.result(db.run(persist.UserRepo.find(userId)), timeout.duration).get
}
trait UserStructExtensions {
implicit class ExtUser(user: ApiUser) {
def asModel()(implicit db: Database): model.User =
Await.result(db.run(persist.UserRepo.find(user.id)), 3.seconds).get
}
}
trait ServiceSpecHelpers extends PersistenceHelpers with UserStructExtensions with ScalaFutures with Inside {
this: Suite ⇒
protected val system: ActorSystem
protected val fairy = Fairy.create()
//private implicit val patienceConfig = PatienceConfig(Span(10, Seconds))
def buildPhone(): Long = {
75550000000L + scala.util.Random.nextInt(999999)
}
def buildEmail(at: String = ""): String = {
val email = fairy.person().email()
if (at.isEmpty) email else email.substring(0, email.lastIndexOf("@")) + s"@$at"
}
def createAuthId()(implicit db: Database): Long = {
val authId = scala.util.Random.nextLong()
Await.result(db.run(persist.AuthIdRepo.create(authId, None, None)), 1.second)
authId
}
def createAuthId(userId: Int)(implicit ec: ExecutionContext, system: ActorSystem, db: Database, service: AuthService): (Long, Int) = {
val authId = scala.util.Random.nextLong()
Await.result(db.run(persist.AuthIdRepo.create(authId, None, None)), 1.second)
implicit val clientData = ClientData(authId, scala.util.Random.nextLong(), None)
val phoneNumber = Await.result(db.run(persist.UserPhoneRepo.findByUserId(userId)) map (_.head.number), 1.second)
val txHash = whenReady(service.handleStartPhoneAuth(
phoneNumber = phoneNumber,
appId = 42,
apiKey = "appKey",
deviceHash = Random.nextLong.toBinaryString.getBytes,
deviceTitle = "Specs virtual device",
timeZone = None,
preferredLanguages = Vector.empty
))(_.toOption.get.transactionHash)
val code = whenReady(db.run(AuthCodeRepo.findByTransactionHash(txHash)))(_.get.code)
val res = Await.result(service.handleValidateCode(txHash, code), 5.seconds)
res match {
case Xor.Right(rsp) ⇒ rsp
case Xor.Left(e) ⇒ fail(s"Got RpcError ${e}")
}
(authId, Await.result(db.run(AuthSessionRepo.findByAuthId(authId)), 5.seconds).get.id)
}
def createSessionId(): Long =
scala.util.Random.nextLong()
def createUser()(implicit service: AuthService, db: Database, system: ActorSystem): (ApiUser, Long, Int, Long) = {
val authId = createAuthId()
val phoneNumber = buildPhone()
val (user, authSid) = createUser(authId, phoneNumber)
(user, authId, authSid, phoneNumber)
}
def createUser(phoneNumber: Long)(implicit service: AuthService, system: ActorSystem, db: Database): (ApiUser, Int) =
createUser(createAuthId(), phoneNumber)
def getOutPeer(userId: Int, clientAuthId: Long): ApiOutPeer = {
val accessHash = Await.result(UserExtension(system).getAccessHash(userId, clientAuthId), 5.seconds)
ApiOutPeer(ApiPeerType.Private, userId, accessHash)
}
def getUserOutPeer(userId: Int, clientAuthId: Long): ApiUserOutPeer = {
val outPeer = getOutPeer(userId, clientAuthId)
ApiUserOutPeer(outPeer.id, outPeer.accessHash)
}
//TODO: make same method to work with email
def createUser(authId: Long, phoneNumber: Long)(implicit service: AuthService, system: ActorSystem, db: Database): (ApiUser, Int) =
withoutLogs {
implicit val clientData = ClientData(authId, Random.nextLong(), None)
val txHash = whenReady(service.handleStartPhoneAuth(
phoneNumber = phoneNumber,
appId = 42,
apiKey = "appKey",
deviceHash = Random.nextLong.toBinaryString.getBytes,
deviceTitle = "Specs Has You",
timeZone = None,
preferredLanguages = Vector.empty
))(_.toOption.get.transactionHash)
val code = whenReady(db.run(AuthCodeRepo.findByTransactionHash(txHash)))(_.get.code)
whenReady(service.handleValidateCode(txHash, code))(_ ⇒ ())
val user = whenReady(service.handleSignUp(txHash, fairy.person().fullName(), None, None))(_.toOption.get.user)
val authSid = whenReady(db.run(AuthSessionRepo.findByAuthId(authId)))(_.get.id)
(user, authSid)
}
def buildRpcApiService(services: Seq[im.actor.api.rpc.Service])(implicit system: ActorSystem, db: Database) =
RpcApiExtension(system).register(services)
protected def withoutLogs[A](f: ⇒ A)(implicit system: ActorSystem): A = {
val logger = org.slf4j.LoggerFactory.getLogger(org.slf4j.Logger.ROOT_LOGGER_NAME).asInstanceOf[ch.qos.logback.classic.Logger]
val logLevel = logger.getLevel
val esLogLevel = system.eventStream.logLevel
logger.setLevel(ch.qos.logback.classic.Level.WARN)
system.eventStream.setLogLevel(akka.event.Logging.WarningLevel)
val res = f
logger.setLevel(logLevel)
system.eventStream.setLogLevel(esLogLevel)
res
}
protected def futureSleep(delay: Long)(implicit ec: ExecutionContext): Future[Unit] = Future { blocking { Thread.sleep(delay) } }
}
|
ufosky-server/actor-platform
|
actor-server/actor-testkit/src/main/scala/im/actor/server/ServiceSpecHelpers.scala
|
Scala
|
agpl-3.0
| 6,013
|
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn.mkldnn
import com.intel.analytics.bigdl.mkl.{AlgKind, Memory}
import com.intel.analytics.bigdl.nn.mkldnn.Phase.TrainingPhase
import com.intel.analytics.bigdl.nn.{SpatialAveragePooling, SpatialMaxPooling}
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.BigDLSpecHelper
import com.intel.analytics.bigdl.utils.RandomGenerator.RNG
import org.apache.commons.lang3.SerializationUtils
import scala.util.Random
class MaxPoolingSpec extends BigDLSpecHelper {
"Max Pooling test1" should "be correct" in {
val batchSize = 2
val input = Tensor[Float](batchSize, 480, 28, 28).apply1(e => Random.nextFloat())
RNG.setSeed(100)
val pool = MaxPooling(3, 3, 2, 2)
RNG.setSeed(100)
val layer = SpatialMaxPooling[Float](3, 3, 2, 2).ceil()
val output2 = layer.forward(input).toTensor[Float]
val seq = Sequential()
seq.add(ReorderMemory(HeapData(Array(batchSize, 480, 28, 28), Memory.Format.nchw),
HeapData(Array(batchSize, 480, 28, 28), Memory.Format.nchw)))
seq.add(pool)
seq.add(ReorderMemory(HeapData(Array(batchSize, 480, 14, 14), Memory.Format.nchw),
HeapData(Array(batchSize, 480, 14, 14), Memory.Format.nchw)))
seq.compile(Phase.TrainingPhase, Array(HeapData(Array(batchSize, 480, 28, 28),
Memory.Format.nchw)))
val output1 = seq.forward(input)
output1 should be(output2)
val grad2 = layer.backward(input, output2).toTensor[Float]
val grad1 = seq.backward(input, output2)
grad1 should be(grad2)
}
"Max Pooling test2" should "be correct" in {
val batchSize = 2
val input = Tensor[Float](batchSize, 64, 112, 112).apply1(e => Random.nextFloat())
RNG.setSeed(100)
val pool = MaxPooling(3, 3, 2, 2)
RNG.setSeed(100)
val layer = SpatialMaxPooling[Float](3, 3, 2, 2).ceil()
val output2 = layer.forward(input).toTensor[Float]
val seq = Sequential()
seq.add(ReorderMemory(HeapData(Array(batchSize, 64, 112, 112), Memory.Format.nchw),
HeapData(Array(batchSize, 64, 112, 112), Memory.Format.nchw)))
seq.add(pool)
seq.add(ReorderMemory(HeapData(Array(batchSize, 64, 56, 56), Memory.Format.nchw),
HeapData(Array(batchSize, 64, 56, 56), Memory.Format.nchw)))
seq.compile(Phase.TrainingPhase, Array(HeapData(Array(batchSize, 64, 112, 112),
Memory.Format.nchw)))
val output1 = seq.forward(input)
output1 should be(output2)
val grad2 = layer.backward(input, output2).toTensor[Float]
val grad1 = seq.backward(input, output2)
grad1 should be(grad2)
}
"max pooling with java serialization" should "be correct" in {
val batchSize = 2
val inputShape = Array(batchSize, 64, 112, 112)
val outputShape = Array(batchSize, 64, 56, 56)
val input = Tensor[Float](batchSize, 64, 112, 112).rand(-1, 1)
val pool = MaxPooling(3, 3, 2, 2)
pool.setRuntime(new MklDnnRuntime)
pool.initFwdPrimitives(Array(HeapData(inputShape, Memory.Format.nchw)), TrainingPhase)
pool.initBwdPrimitives(Array(HeapData(outputShape, Memory.Format.nchw)), TrainingPhase)
pool.initGradWPrimitives(Array(HeapData(outputShape, Memory.Format.nchw)), TrainingPhase)
val cloned = SerializationUtils.clone(pool)
cloned.setRuntime(new MklDnnRuntime)
cloned.initFwdPrimitives(Array(HeapData(inputShape, Memory.Format.nchw)), TrainingPhase)
cloned.initBwdPrimitives(Array(HeapData(outputShape, Memory.Format.nchw)), TrainingPhase)
cloned.initGradWPrimitives(Array(HeapData(outputShape, Memory.Format.nchw)), TrainingPhase)
pool.forward(input)
cloned.forward(input)
Tools.dense(pool.output) should be (Tools.dense(cloned.output))
val gradOutput = Tensor[Float](outputShape).rand(-1, 1)
pool.backward(input, gradOutput)
cloned.backward(input, gradOutput)
Tools.dense(pool.gradInput) should be (Tools.dense(cloned.gradInput))
}
}
|
yiheng/BigDL
|
spark/dl/src/test/scala/com/intel/analytics/bigdl/nn/mkldnn/MaxPoolingSpec.scala
|
Scala
|
apache-2.0
| 4,511
|
package graphique.backends.localbackend.imageserver
import java.nio.file.Path
import akka.actor.{Actor, ActorLogging, ActorRef, Props}
import akka.io.IO
import spray.can.Http
private[localbackend] class ImageServerManager(hostname: String, port: Int, imagePath: Path)
extends Actor with ActorLogging {
require(port >= 0)
import graphique.backends.localbackend.imageserver.ImageServerManager._
var imageServer: Option[ActorRef] = None
var sprayCanHttpListener: Option[ActorRef] = None
def receive: Receive = {
case Start =>
// Stop any running server first
sprayCanHttpListener foreach { listener =>
listener ! Http.Unbind
}
// Create a new managed ImageServer instance
sprayCanHttpListener = None
imageServer = Some(context actorOf(Props(new ImageServer(imagePath)), "Listener"))
IO(Http)(context.system) ! Http.Bind(imageServer.get, interface = hostname, port = port)
case Stop =>
IO(Http)(context.system) ! Http.Unbind
case Http.CommandFailed(command: Http.Bind) =>
throw new IllegalStateException(s"Failed to launch a local HTTP image server on port $port")
case Http.Bound(_) =>
sprayCanHttpListener = Some(sender) // The sender is used later for stopping
case message =>
log warning s"Received unexpected message: $message"
}
}
private[localbackend] object ImageServerManager {
/**
* Instructs the HttpServer to start serving the images.
*/
case object Start
/**
* Instructs the HttpServer to shut down.
*/
case object Stop
}
|
amrhassan/graphique
|
src/main/scala/graphique/backends/localbackend/imageserver/ImageServerManager.scala
|
Scala
|
mit
| 1,575
|
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.