code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package fr.thomasdufour.autodiff
package derived
import cats.Order
import cats.Order.catsKernelOrderingForOrder
import cats.data.Chain
import cats.data.NonEmptyChain
import cats.data.NonEmptyList
import cats.data.NonEmptyVector
import cats.data.Validated
import com.github.ghik.silencer.silent
import org.scalactic.TypeCheckedTripleEquals
import org.scalatest.Matchers
import org.scalatest.WordSpec
import scala.collection.immutable.HashSet
import scala.collection.immutable.ListSet
import scala.collection.immutable.Queue
import scala.collection.immutable.TreeSet
class RecursiveParametricStdSpec extends WordSpec with Matchers with TypeCheckedTripleEquals {
import RecursiveParametricStdSpec._
import DiffOps._
"Deriving a Diff for a recursive case class" which {
"has recursion under a Tuple" should {
val diff: Diff[TupleRec] = {
import auto._
semi.diff
}
"use the provided Tuple Diff" in {
diff(
TupleRec( Some( ( TupleRec( None ), TupleRec( Some( ( TupleRec( None ), TupleRec( None ) ) ) ) ) ) ),
TupleRec( Some( ( TupleRec( None ), TupleRec( None ) ) ) )
).tree should ===(
F(
"TupleRec",
"rec" -> T(
T.Coproduct,
"Option",
I(
T.Tuple,
"Tuple2",
2 -> F(
"TupleRec",
"rec" -> T( T.Coproduct, "Option", "Some((TupleRec(rec: None), TupleRec(rec: None)))" !== "None" )
)
)
)
)
)
}
}
"has recursion under an Option" should {
val diff: Diff[OptionRec] = {
import auto._
semi.diff
}
"use the provided Option diff" in {
diff(
OptionRec( Some( OptionRec( None ) ) ),
OptionRec( Some( OptionRec( Some( OptionRec( None ) ) ) ) )
).tree should ===(
F(
"OptionRec",
"rec" -> T(
T.Coproduct,
"Option",
F( "OptionRec", "rec" -> T( T.Coproduct, "Option", "None" !== "Some(OptionRec(rec: None))" ) )
)
)
)
}
}
"has recursion under an Either" should {
val diff: Diff[EitherRec] = {
import auto._
semi.diff
}
"use the provided either diff" in {
diff(
EitherRec( Right( EitherRec( Left( "foo" ) ) ) ),
EitherRec( Right( EitherRec( Left( "bar" ) ) ) )
).tree should ===(
F(
"EitherRec",
"rec" -> T( T.Coproduct, "Right", F( "EitherRec", "rec" -> T( T.Coproduct, "Left", "foo" !== "bar" ) ) )
)
)
}
}
"has recursion under a Validated" should {
val diff: Diff[ValidatedRec] = {
import auto._
semi.diff
}
"use the provided either diff" in {
diff(
ValidatedRec( Validated.Valid( ValidatedRec( Validated.Invalid( "foo" ) ) ) ),
ValidatedRec( Validated.Valid( ValidatedRec( Validated.Invalid( "bar" ) ) ) )
).tree should ===(
F(
"ValidatedRec",
"rec" -> T(
T.Coproduct,
"Valid",
F( "ValidatedRec", "rec" -> T( T.Coproduct, "Invalid", "foo" !== "bar" ) )
)
)
)
}
}
"has recursion under a List" should {
val diff: Diff[ListRec] = {
import auto._
semi.diff
}
"use the provided List diff" in {
diff(
ListRec( ListRec( Nil ) :: ListRec( ListRec( Nil ) :: Nil ) :: Nil ),
ListRec( ListRec( Nil ) :: ListRec( Nil ) :: Nil )
).tree should ===(
F(
"ListRec",
"rec" -> I(
T.Seq,
"List",
1 -> F( "ListRec", "rec" -> I( T.Seq, "List", 0 -> ("ListRec(rec: List())" !== "<end>") ) )
)
)
)
}
}
"has recursion under a Queue" should {
val diff: Diff[QueueRec] = {
import auto._
semi.diff
}
val empty = Queue.empty
"use the provided Queue diff" in {
diff(
QueueRec( Queue( QueueRec( empty ), QueueRec( Queue( QueueRec( empty ) ) ) ) ),
QueueRec( Queue( QueueRec( empty ), QueueRec( empty ) ) )
).tree should ===(
F(
"QueueRec",
"rec" -> I(
T.Seq,
"Queue",
1 -> F( "QueueRec", "rec" -> I( T.Seq, "Queue", 0 -> ("QueueRec(rec: Queue())" !== "<end>") ) )
)
)
)
}
}
"has recursion under a Stream" should ({
val diff: Diff[StreamRec] = {
import auto._
semi.diff
}
val empty = Stream.empty
"use the provided Stream diff" in {
diff(
StreamRec( Stream( StreamRec( empty ), StreamRec( Stream( StreamRec( empty ) ) ) ) ),
StreamRec( Stream( StreamRec( empty ), StreamRec( empty ) ) )
).tree should ===(
F(
"StreamRec",
"rec" -> I(
T.Seq,
"Stream",
1 -> F( "StreamRec", "rec" -> I( T.Seq, "Stream", 0 -> ("StreamRec(rec: Stream())" !== "<end>") ) )
)
)
)
}
}: @silent( "deprecated" ))
"has recursion under a Vector" should {
val diff: Diff[VectorRec] = {
import auto._
semi.diff
}
val empty = Vector.empty
"use the provided Vector diff" in {
diff(
VectorRec( Vector( VectorRec( empty ), VectorRec( Vector( VectorRec( empty ) ) ) ) ),
VectorRec( Vector( VectorRec( empty ), VectorRec( empty ) ) )
).tree should ===(
F(
"VectorRec",
"rec" -> I(
T.Seq,
"Vector",
1 -> F( "VectorRec", "rec" -> I( T.Seq, "Vector", 0 -> ("VectorRec(rec: Vector())" !== "<end>") ) )
)
)
)
}
}
"has recursion under a Array" should {
val diff: Diff[ArrayRec] = {
import auto._
semi.diff
}
val empty = Array.empty[ArrayRec]
"use the provided Array diff" in {
diff(
ArrayRec( Array( ArrayRec( empty ), ArrayRec( Array( ArrayRec( empty ) ) ) ) ),
ArrayRec( Array( ArrayRec( empty ), ArrayRec( empty ) ) )
).tree should ===(
F(
"ArrayRec",
"rec" -> I(
T.Seq,
"Array",
1 -> F( "ArrayRec", "rec" -> I( T.Seq, "Array", 0 -> ("ArrayRec(rec: Array())" !== "<end>") ) )
)
)
)
}
}
"has recursion under a Set" should {
val diff: Diff[SetRec] = {
import auto._
semi.diff
}
val empty = Set.empty[SetRec]
"use the provided Set diff" in {
diff(
SetRec( Set( SetRec( empty ), SetRec( Set( SetRec( empty ) ) ) ) ),
SetRec( Set( SetRec( empty ) ) )
).tree should ===(
F(
"SetRec",
"rec" -> T( T.Set, "Set", U( Some( "SetRec(rec: { SetRec(rec: { }) })" !== "" ), Nil ) )
)
)
}
}
"has recursion under a ListSet" should {
val diff: Diff[ListSetRec] = {
import auto._
semi.diff
}
val empty = ListSet.empty[ListSetRec]
"use the provided ListSet diff" in {
diff(
ListSetRec( ListSet( ListSetRec( empty ), ListSetRec( ListSet( ListSetRec( empty ) ) ) ) ),
ListSetRec( ListSet( ListSetRec( empty ) ) )
).tree should ===(
F(
"ListSetRec",
"rec" -> T( T.Set, "ListSet", U( Some( "ListSetRec(rec: { ListSetRec(rec: { }) })" !== "" ), Nil ) )
)
)
}
}
"has recursion under a HashSet" should {
val diff: Diff[HashSetRec] = {
import auto._
semi.diff
}
val empty = HashSet.empty[HashSetRec]
"use the provided HashSet diff" in {
diff(
HashSetRec( HashSet( HashSetRec( empty ), HashSetRec( HashSet( HashSetRec( empty ) ) ) ) ),
HashSetRec( HashSet( HashSetRec( empty ) ) )
).tree should ===(
F(
"HashSetRec",
"rec" -> T( T.Set, "HashSet", U( Some( "HashSetRec(rec: { HashSetRec(rec: { }) })" !== "" ), Nil ) )
)
)
}
}
"has recursion under a TreeSet" should {
val diff: Diff[TreeSetRec] = {
import auto._
semi.diff
}
val empty = TreeSet.empty[TreeSetRec]
"use the provided TreeSet diff" in {
diff(
TreeSetRec( TreeSet( TreeSetRec( empty ), TreeSetRec( TreeSet( TreeSetRec( empty ) ) ) ) ),
TreeSetRec( TreeSet( TreeSetRec( empty ) ) )
).tree should ===(
F(
"TreeSetRec",
"rec" -> T( T.Set, "TreeSet", U( Some( "TreeSetRec(rec: { TreeSetRec(rec: { }) })" !== "" ), Nil ) )
)
)
}
}
"has recursion under a Chain" should {
val diff: Diff[ChainRec] = {
import auto._
semi.diff
}
val empty = Chain.empty
"use the provided Chain diff" in {
diff(
ChainRec( Chain( ChainRec( empty ), ChainRec( Chain( ChainRec( empty ) ) ) ) ),
ChainRec( Chain( ChainRec( empty ), ChainRec( empty ) ) )
).tree should ===(
F(
"ChainRec",
"rec" -> I(
T.Seq,
"Chain",
1 -> F( "ChainRec", "rec" -> I( T.Seq, "Chain", 0 -> ("ChainRec(rec: Chain())" !== "<end>") ) )
)
)
)
}
}
"has recursion under a NonEmptyChain" should {
val diff: Diff[NecRec] = {
import auto._
semi.diff
}
"use the provided NonEmptyChain diff" in {
diff(
NecRec( Some( NonEmptyChain( NecRec( None ), NecRec( Some( NonEmptyChain( NecRec( None ) ) ) ) ) ) ),
NecRec( Some( NonEmptyChain( NecRec( None ), NecRec( None ) ) ) )
).tree should ===(
F(
"NecRec",
"rec" ->
T(
T.Coproduct,
"Option",
I(
T.Seq,
"NonEmptyChain",
1 -> F(
"NecRec",
"rec" ->
T( T.Coproduct, "Option", "Some(NonEmptyChain(NecRec(rec: None)))" !== "None" )
)
)
)
)
)
}
}
"has recursion under a NonEmptyList" should {
val diff: Diff[NelRec] = {
import auto._
semi.diff
}
"use the provided NonEmptyList diff" in {
diff(
NelRec( Some( NonEmptyList.of( NelRec( None ), NelRec( Some( NonEmptyList.of( NelRec( None ) ) ) ) ) ) ),
NelRec( Some( NonEmptyList.of( NelRec( None ), NelRec( None ) ) ) )
).tree should ===(
F(
"NelRec",
"rec" ->
T(
T.Coproduct,
"Option",
I(
T.Seq,
"NonEmptyList",
1 -> F(
"NelRec",
"rec" ->
T( T.Coproduct, "Option", "Some(NonEmptyList(NelRec(rec: None)))" !== "None" )
)
)
)
)
)
}
}
"has recursion under a NonEmptyVector" should {
val diff: Diff[NevRec] = {
import auto._
semi.diff
}
"use the provided NonEmptyVector diff" in {
diff(
NevRec( Some( NonEmptyVector.of( NevRec( None ), NevRec( Some( NonEmptyVector.of( NevRec( None ) ) ) ) ) ) ),
NevRec( Some( NonEmptyVector.of( NevRec( None ), NevRec( None ) ) ) )
).tree should ===(
F(
"NevRec",
"rec" ->
T(
T.Coproduct,
"Option",
I(
T.Seq,
"NonEmptyVector",
1 -> F(
"NevRec",
"rec" ->
T( T.Coproduct, "Option", "Some(NonEmptyVector(NevRec(rec: None)))" !== "None" )
)
)
)
)
)
}
}
}
// TODO: a bunch more
}
object RecursiveParametricStdSpec {
case class TupleRec( rec: Option[( TupleRec, TupleRec )] )
case class OptionRec( rec: Option[OptionRec] )
case class EitherRec( rec: Either[String, EitherRec] )
case class ValidatedRec( rec: Validated[String, ValidatedRec] )
case class ListRec( rec: List[ListRec] )
case class QueueRec( rec: Queue[QueueRec] )
@silent( "deprecated" )
case class StreamRec( rec: Stream[StreamRec] )
case class VectorRec( rec: Vector[VectorRec] )
case class ArrayRec( rec: Array[ArrayRec] )
case class SetRec( rec: Set[SetRec] )
case class ListSetRec( rec: ListSet[ListSetRec] )
case class HashSetRec( rec: HashSet[HashSetRec] )
case class TreeSetRec( rec: TreeSet[TreeSetRec] )
object TreeSetRec {
implicit val treeSetRecOrder: Order[TreeSetRec] =
new Order[TreeSetRec] {
override def compare( x: TreeSetRec, y: TreeSetRec ): Int = {
val cmpSize = x.rec.size.compareTo( y.rec.size )
if (cmpSize != 0)
cmpSize
else
x.rec.iterator
.zip( y.rec.iterator )
.map( (compare _).tupled )
.find( _ != 0 )
.getOrElse( 0 )
}
}
}
case class ChainRec( rec: Chain[ChainRec] )
case class NecRec( rec: Option[NonEmptyChain[NecRec]] )
case class NelRec( rec: Option[NonEmptyList[NelRec]] )
case class NevRec( rec: Option[NonEmptyVector[NevRec]] )
}
| chwthewke/auto-diff | auto-diff-tests/src/test/scala/fr/thomasdufour/autodiff/derived/RecursiveParametricStdSpec.scala | Scala | apache-2.0 | 13,958 |
package com.github.andr83.parsek.serde
import com.github.andr83.parsek._
import com.github.andr83.parsek.formatter.DateFormatter
import com.typesafe.config.Config
import net.ceedubs.ficus.Ficus._
import org.json4s._
import org.json4s.jackson.JsonMethods.{compact, parse => jsonParse, render}
/**
* @author andr83
*/
case class JsonSerDe(
fields: Option[FieldPath],
timeFormatter: DateFormatter
) extends SerDe {
def this(config: Config) = this(
fields = config.as[Option[List[String]]]("fields"),
timeFormatter = DateFormatter(config.as[Option[String]]("timeFormat"))
)
override def write(value: PValue): Array[Byte] = fields match {
case Some(fs) =>
val map = value.asInstanceOf[PMap].value
compact(render(convertToJson(map.filterKeys(fs.contains)))).asBytes
case None => compact(render(convertToJson(value))).asBytes
}
override def read(value: Array[Byte]): PValue = {
val json = jsonParse(value.asStr)
convertFromJson(json)
}
def convertToJson(value: PValue): JValue = value match {
case PString(str) => JString(str)
case PInt(num) => JInt(num)
case PLong(num) => JInt(num)//JLong(num)
case PDouble(num) => JDouble(num)
case PBool(num) => JBool(num)
case PDate(date) => convertToJson(timeFormatter.format(date))
case PList(list) => JArray(list.map(convertToJson))
case PMap(map) => JObject(map.mapValues(convertToJson).toList)
}
def convertFromJson(json: JValue): PValue = json match {
case JString(s) => PString(s)
case JDouble(num) => PDouble(num)
case JDecimal(num) => PDouble(num.toDouble)
case JInt(num) => PLong(num.toLong)
// case JLong(num) => PLong(num)
case JBool(b) => PBool(b)
case JObject(obj) =>
PMap(obj.filter {
case (_, JNothing) => false
case (_, JNull) => false
case _ => true
}.toMap.mapValues(convertFromJson))
case JArray(arr) =>
PList(arr.filter {
case JNothing => false
case JNull => false
case _ => true
}.map(convertFromJson))
case _ => throw new IllegalStateException(s"Unexpected value parsing json $json")
}
}
object JsonSerDe {
val default = apply()
def apply(): JsonSerDe = JsonSerDe(None, DateFormatter(None))
} | andr83/parsek | core/src/main/scala/com/github/andr83/parsek/serde/JsonSerDe.scala | Scala | mit | 2,263 |
package jigg.pipeline
/*
Copyright 2013-2015 Hiroshi Noji
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/** Currently, this trait is useful to assign unique id
* for each annotation.
*/
abstract class Annotation(val idPrefix: String) {
val idGen = jigg.util.IDGenerator(idPrefix)
def nextId: String = idGen.next
}
object Annotation {
object Document extends Annotation("d")
object Sentence extends Annotation("s")
object Token extends Annotation("t")
object Dependency extends Annotation("dep")
object NE extends Annotation("ne")
object Mention extends Annotation("me")
object Coreference extends Annotation("cr")
object PredArg extends Annotation("pa")
object ParseSpan extends Annotation("sp")
object Chunk extends Annotation("ch")
}
| tomeken-yoshinaga/jigg | src/main/scala/jigg/pipeline/Annotation.scala | Scala | apache-2.0 | 1,270 |
package scorex.crypto.hash
class KeccakSpecification extends HashTest {
hashCheckString(Keccak256,
Map(
"" -> "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
// "The quick brown fox jumps over the lazy dog" -> "4d741b6f1eb29cb2a9b9911c82f56fa8d73b04959d3d9d222895df6c0b28aa15"
)
)
hashCheckString(Keccak512,
Map(
"" -> "0eab42de4c3ceb9235fc91acffe746b29c29a8c366b7c60e4e67c466f36a4304c00fa9caf9d87976ba469bcbe06713b435f091ef2769fb160cdab33d3670680e",
"The quick brown fox jumps over the lazy dog" -> "d135bb84d0439dbac432247ee573a23ea7d3c9deb2a968eb31d47c4fb45f1ef4422d6c531b5b9bd6f449ebcc449ea94d0a8f05f62130fda612da53c79659f609"
)
)
}
| ScorexProject/scrypto | src/test/scala/scorex/crypto/hash/KeccakSpecification.scala | Scala | cc0-1.0 | 705 |
package cn.edu.neu.chiewen.roadDemo.road.algorithm
import cn.edu.neu.chiewen.roadDemo.road.Node
import scala.annotation.tailrec
/**
* Created by chiewen on 2015/9/19 10:26.
*/
object Dijkstra {
implicit val orderingNode = Ordering.by[(Double, Node), Double](_._1)
def kNN(nodeRoot: Node, k: Int = 1) {
var U: Vector[(Double, Node)] = for (n <- nodeRoot.neighbors) yield (nodeRoot.roadDistanceTo(n), n)
var S: Set[Node] = Set.empty[Node]
U +:=(0D, nodeRoot)
@tailrec
def execute() {
if (U.nonEmpty) U.min match {
case (length: Double, node: Node)
if node.isSite && nodeRoot.nearestSites.size == k - 1 =>
nodeRoot.nearestSites ::=(length, node)
case nextNode: (Double, Node) =>
if (nextNode._2.isSite) nodeRoot.nearestSites ::= nextNode
for (n <- nextNode._2.neighbors if !S.contains(n)) U.indexWhere(_._2 == n) match {
case -1 => U +:=(nextNode._1 + nextNode._2.roadDistanceTo(n), n)
case i: Int => if (U(i)._1 > nextNode._1 + nextNode._2.roadDistanceTo(n))
U = U.updated(i, (nextNode._1 + nextNode._2.roadDistanceTo(n), n))
}
U = U.updated(U.indexOf(nextNode), U.head).drop(1)
S += nextNode._2
execute()
}
}
nodeRoot.nearestSites = List.empty[(Double, Node)]
execute()
nodeRoot.nearestSites = nodeRoot.nearestSites.reverse
}
}
| chiewen/CkNN | CkNN/src/main/scala/cn/edu/neu/chiewen/roadDemo/road/algorithm/Dijkstra.scala | Scala | gpl-2.0 | 1,424 |
package controllers.dos.ui
import models.dos._
import com.mongodb.casbah.Imports._
import play.api.mvc._
import extensions.Extensions
import com.novus.salat.dao.SalatMongoCursor
import controllers.MultitenancySupport
/**
*
* @author Manuel Bernhardt <bernhardt.manuel@gmail.com>
*/
object Logs extends Controller with Extensions with MultitenancySupport {
def list(taskId: ObjectId, lastCount: Option[Int]) = MultitenantAction {
implicit request =>
val cursor: SalatMongoCursor[Log] = Log.dao.find(MongoDBObject("task_id" -> taskId)).limit(500).sort(MongoDBObject("date" -> 1))
val (logs, skipped) = if (lastCount != None && lastCount.get > 0) {
if (cursor.count - lastCount.get > 100) {
(cursor.skip(cursor.count - 100), true)
} else {
(cursor.skip(lastCount.get + 1), false)
}
} else {
if (cursor.count > 100) {
(cursor.skip(cursor.count - 100), true)
} else {
(cursor, false)
}
}
Json(Map("logs" -> logs.toList, "skipped" -> skipped))
}
def view(taskId: ObjectId) = MultitenantAction {
implicit request =>
{
val cursor: SalatMongoCursor[Log] = Log.dao.find(MongoDBObject("task_id" -> taskId)).sort(MongoDBObject("date" -> 1))
Ok(cursor.map(log => log.date + "\t" + s"[${log.orgId}] " + log.level.name.toUpperCase + "\t" + log.node + "\t" + log.message).mkString("\n"))
}
}
} | delving/culture-hub | modules/dos/app/controllers/dos/ui/Logs.scala | Scala | apache-2.0 | 1,448 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.models.autoencoder
import java.nio.file.Paths
import com.intel.analytics.bigdl._
import com.intel.analytics.bigdl.dataset.image._
import com.intel.analytics.bigdl.dataset.{DataSet, MiniBatch, Transformer}
import com.intel.analytics.bigdl.nn.{MSECriterion, Module}
import com.intel.analytics.bigdl.optim._
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric._
import com.intel.analytics.bigdl.utils.{Engine, T, Table}
import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkContext
object toAutoencoderBatch {
def apply(): toAutoencoderBatch[Float] = new toAutoencoderBatch[Float]()
}
class toAutoencoderBatch[T](implicit ev: TensorNumeric[T]
)extends Transformer[MiniBatch[T], MiniBatch[T]] {
override def apply(prev: Iterator[MiniBatch[T]]): Iterator[MiniBatch[T]] = {
prev.map(batch => {
MiniBatch(batch.getInput().toTensor[T], batch.getInput().toTensor[T])
})
}
}
object Train {
Logger.getLogger("org").setLevel(Level.ERROR)
Logger.getLogger("akka").setLevel(Level.ERROR)
Logger.getLogger("breeze").setLevel(Level.ERROR)
Logger.getLogger("com.intel.analytics.bigdl.optim").setLevel(Level.INFO)
import Utils._
def main(args: Array[String]): Unit = {
trainParser.parse(args, new TrainParams()).map(param => {
val conf = Engine.createSparkConf().setAppName("Train Autoencoder on MNIST")
val sc = new SparkContext(conf)
Engine.init
val trainData = Paths.get(param.folder, "/train-images-idx3-ubyte")
val trainLabel = Paths.get(param.folder, "/train-labels-idx1-ubyte")
val trainDataSet = DataSet.array(load(trainData, trainLabel), sc) ->
BytesToGreyImg(28, 28) -> GreyImgNormalizer(trainMean, trainStd) ->
GreyImgToBatch(param.batchSize) -> toAutoencoderBatch()
val model = if (param.modelSnapshot.isDefined) {
Module.load[Float](param.modelSnapshot.get)
} else {
Autoencoder(classNum = 32)
}
val optimMethod = if (param.stateSnapshot.isDefined) {
OptimMethod.load[Float](param.stateSnapshot.get)
} else {
new Adagrad[Float](learningRate = 0.01, learningRateDecay = 0.0, weightDecay = 0.0005)
}
val optimizer = Optimizer(
model = model,
dataset = trainDataSet,
criterion = new MSECriterion[Float]()
)
if (param.checkpoint.isDefined) {
optimizer.setCheckpoint(param.checkpoint.get, Trigger.everyEpoch)
}
optimizer
.setOptimMethod(optimMethod)
.setEndWhen(Trigger.maxEpoch(param.maxEpoch))
.optimize()
})
}
}
| psyyz10/BigDL | spark/dl/src/main/scala/com/intel/analytics/bigdl/models/autoencoder/Train.scala | Scala | apache-2.0 | 3,364 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.execution
import java.io._
import java.nio.charset.StandardCharsets
import java.util
import java.util.Locale
import scala.util.control.NonFatal
import org.scalatest.{BeforeAndAfterAll, GivenWhenThen}
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.Dataset
import org.apache.spark.sql.catalyst.planning.PhysicalOperation
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.util._
import org.apache.spark.sql.execution.HiveResult.hiveResultString
import org.apache.spark.sql.execution.SQLExecution
import org.apache.spark.sql.execution.command._
import org.apache.spark.sql.hive.test.{TestHive, TestHiveQueryExecution}
/**
* Allows the creations of tests that execute the same query against both hive
* and catalyst, comparing the results.
*
* The "golden" results from Hive are cached in and retrieved both from the classpath and
* [[answerCache]] to speed up testing.
*
* See the documentation of public vals in this class for information on how test execution can be
* configured using system properties.
*/
abstract class HiveComparisonTest
extends SparkFunSuite with BeforeAndAfterAll with GivenWhenThen {
override protected val enableAutoThreadAudit = false
/**
* Path to the test datasets. We find this by looking up "hive-test-path-helper.txt" file.
*
* Before we run the query in Spark, we replace "../../data" with this path.
*/
private val testDataPath: String = {
Thread.currentThread.getContextClassLoader
.getResource("hive-test-path-helper.txt")
.getPath.replace("/hive-test-path-helper.txt", "/data")
}
/**
* When set, any cache files that result in test failures will be deleted. Used when the test
* harness or hive have been updated thus requiring new golden answers to be computed for some
* tests. Also prevents the classpath being used when looking for golden answers as these are
* usually stale.
*/
val recomputeCache = System.getProperty("spark.hive.recomputeCache") != null
protected val shardRegEx = "(\\\\d+):(\\\\d+)".r
/**
* Allows multiple JVMs to be run in parallel, each responsible for portion of all test cases.
* Format `shardId:numShards`. Shard ids should be zero indexed. E.g. -Dspark.hive.testshard=0:4.
*/
val shardInfo = Option(System.getProperty("spark.hive.shard")).map {
case shardRegEx(id, total) => (id.toInt, total.toInt)
}
protected val targetDir = new File("target")
/**
* When set, this comma separated list is defines directories that contain the names of test cases
* that should be skipped.
*
* For example when `-Dspark.hive.skiptests=passed,hiveFailed` is specified and test cases listed
* in [[passedDirectory]] or [[hiveFailedDirectory]] will be skipped.
*/
val skipDirectories =
Option(System.getProperty("spark.hive.skiptests"))
.toSeq
.flatMap(_.split(","))
.map(name => new File(targetDir, s"$suiteName.$name"))
val runOnlyDirectories =
Option(System.getProperty("spark.hive.runonlytests"))
.toSeq
.flatMap(_.split(","))
.map(name => new File(targetDir, s"$suiteName.$name"))
/** The local directory with cached golden answer will be stored. */
protected val answerCache = new File("src" + File.separator + "test" +
File.separator + "resources" + File.separator + "golden")
if (!answerCache.exists) {
answerCache.mkdir()
}
/** The [[ClassLoader]] that contains test dependencies. Used to look for golden answers. */
protected val testClassLoader = this.getClass.getClassLoader
/** Directory containing a file for each test case that passes. */
val passedDirectory = new File(targetDir, s"$suiteName.passed")
if (!passedDirectory.exists()) {
passedDirectory.mkdir() // Not atomic!
}
/** Directory containing output of tests that fail to execute with Catalyst. */
val failedDirectory = new File(targetDir, s"$suiteName.failed")
if (!failedDirectory.exists()) {
failedDirectory.mkdir() // Not atomic!
}
/** Directory containing output of tests where catalyst produces the wrong answer. */
val wrongDirectory = new File(targetDir, s"$suiteName.wrong")
if (!wrongDirectory.exists()) {
wrongDirectory.mkdir() // Not atomic!
}
/** Directory containing output of tests where we fail to generate golden output with Hive. */
val hiveFailedDirectory = new File(targetDir, s"$suiteName.hiveFailed")
if (!hiveFailedDirectory.exists()) {
hiveFailedDirectory.mkdir() // Not atomic!
}
/** All directories that contain per-query output files */
val outputDirectories = Seq(
passedDirectory,
failedDirectory,
wrongDirectory,
hiveFailedDirectory)
protected val cacheDigest = java.security.MessageDigest.getInstance("MD5")
protected def getMd5(str: String): String = {
val digest = java.security.MessageDigest.getInstance("MD5")
digest.update(str.replaceAll(System.lineSeparator(), "\\n").getBytes(StandardCharsets.UTF_8))
new java.math.BigInteger(1, digest.digest).toString(16)
}
override protected def afterAll(): Unit = {
try {
TestHive.reset()
} finally {
super.afterAll()
}
}
protected def prepareAnswer(
hiveQuery: TestHiveQueryExecution,
answer: Seq[String]): Seq[String] = {
def isSorted(plan: LogicalPlan): Boolean = plan match {
case _: Join | _: Aggregate | _: Generate | _: Sample | _: Distinct => false
case PhysicalOperation(_, _, Sort(_, true, _)) => true
case _ => plan.children.iterator.exists(isSorted)
}
val orderedAnswer = hiveQuery.analyzed match {
// Clean out non-deterministic time schema info.
// Hack: Hive simply prints the result of a SET command to screen,
// and does not return it as a query answer.
case _: SetCommand => Seq("0")
case _: ExplainCommand => answer
case _: DescribeCommandBase | ShowColumnsCommand(_, _) =>
// Filter out non-deterministic lines and lines which do not have actual results but
// can introduce problems because of the way Hive formats these lines.
// Then, remove empty lines. Do not sort the results.
answer
.filterNot(r => nonDeterministicLine(r) || ignoredLine(r))
.map(_.replaceAll("from deserializer", ""))
.map(_.replaceAll("None", ""))
.map(_.trim)
.filterNot(_ == "")
case plan => if (isSorted(plan)) answer else answer.sorted
}
orderedAnswer.map(cleanPaths)
}
// TODO: Instead of filtering we should clean to avoid accidentally ignoring actual results.
lazy val nonDeterministicLineIndicators = Seq(
"CreateTime",
"transient_lastDdlTime",
"grantTime",
"lastUpdateTime",
"last_modified_by",
"last_modified_time",
"Owner:",
// The following are hive specific schema parameters which we do not need to match exactly.
"totalNumberFiles",
"maxFileSize",
"minFileSize"
)
protected def nonDeterministicLine(line: String) =
nonDeterministicLineIndicators.exists(line contains _)
// This list contains indicators for those lines which do not have actual results and we
// want to ignore.
lazy val ignoredLineIndicators = Seq(
"# Detailed Table Information",
"# Partition Information",
"# col_name"
)
protected def ignoredLine(line: String) =
ignoredLineIndicators.exists(line contains _)
/**
* Removes non-deterministic paths from `str` so cached answers will compare correctly.
*/
protected def cleanPaths(str: String): String = {
str.replaceAll("file:\\\\/.*\\\\/", "<PATH>")
}
val installHooksCommand = "(?i)SET.*hooks".r
def createQueryTest(
testCaseName: String,
sql: String,
reset: Boolean = true,
tryWithoutResettingFirst: Boolean = false,
skip: Boolean = false) {
// testCaseName must not contain ':', which is not allowed to appear in a filename of Windows
assert(!testCaseName.contains(":"))
// If test sharding is enable, skip tests that are not in the correct shard.
shardInfo.foreach {
case (shardId, numShards) if testCaseName.hashCode % numShards != shardId => return
case (shardId, _) => logDebug(s"Shard $shardId includes test '$testCaseName'")
}
// Skip tests found in directories specified by user.
skipDirectories
.map(new File(_, testCaseName))
.filter(_.exists)
.foreach(_ => return)
// If runonlytests is set, skip this test unless we find a file in one of the specified
// directories.
val runIndicators =
runOnlyDirectories
.map(new File(_, testCaseName))
.filter(_.exists)
if (runOnlyDirectories.nonEmpty && runIndicators.isEmpty) {
logDebug(
s"Skipping test '$testCaseName' not found in ${runOnlyDirectories.map(_.getCanonicalPath)}")
return
}
test(testCaseName) {
assume(!skip)
logDebug(s"=== HIVE TEST: $testCaseName ===")
val sqlWithoutComment =
sql.split("\\n").filterNot(l => l.matches("--.*(?<=[^\\\\\\\\]);")).mkString("\\n")
val allQueries =
sqlWithoutComment.split("(?<=[^\\\\\\\\]);").map(_.trim).filterNot(q => q == "").toSeq
// TODO: DOCUMENT UNSUPPORTED
val queryList =
allQueries
// In hive, setting the hive.outerjoin.supports.filters flag to "false" essentially tells
// the system to return the wrong answer. Since we have no intention of mirroring their
// previously broken behavior we simply filter out changes to this setting.
.filterNot(_ contains "hive.outerjoin.supports.filters")
.filterNot(_ contains "hive.exec.post.hooks")
if (allQueries != queryList) {
logWarning(s"Simplifications made on unsupported operations for test $testCaseName")
}
lazy val consoleTestCase = {
val quotes = "\\"\\"\\""
queryList.zipWithIndex.map {
case (query, i) =>
s"""val q$i = sql($quotes$query$quotes); q$i.collect()"""
}.mkString("\\n== Console version of this test ==\\n", "\\n", "\\n")
}
def doTest(reset: Boolean, isSpeculative: Boolean = false): Unit = {
// Clear old output for this testcase.
outputDirectories.map(new File(_, testCaseName)).filter(_.exists()).foreach(_.delete())
if (reset) {
TestHive.reset()
}
// Many tests drop indexes on src and srcpart at the beginning, so we need to load those
// tables here. Since DROP INDEX DDL is just passed to Hive, it bypasses the analyzer and
// thus the tables referenced in those DDL commands cannot be extracted for use by our
// test table auto-loading mechanism. In addition, the tests which use the SHOW TABLES
// command expect these tables to exist.
val hasShowTableCommand =
queryList.exists(_.toLowerCase(Locale.ROOT).contains("show tables"))
for (table <- Seq("src", "srcpart")) {
val hasMatchingQuery = queryList.exists { query =>
val normalizedQuery = query.toLowerCase(Locale.ROOT).stripSuffix(";")
normalizedQuery.endsWith(table) ||
normalizedQuery.contains(s"from $table") ||
normalizedQuery.contains(s"from default.$table")
}
if (hasShowTableCommand || hasMatchingQuery) {
TestHive.loadTestTable(table)
}
}
val hiveCacheFiles = queryList.zipWithIndex.map {
case (queryString, i) =>
val cachedAnswerName = s"$testCaseName-$i-${getMd5(queryString)}"
new File(answerCache, cachedAnswerName)
}
val hiveCachedResults = hiveCacheFiles.flatMap { cachedAnswerFile =>
logDebug(s"Looking for cached answer file $cachedAnswerFile.")
if (cachedAnswerFile.exists) {
Some(fileToString(cachedAnswerFile))
} else {
logDebug(s"File $cachedAnswerFile not found")
None
}
}.map {
case "" => Nil
case "\\n" => Seq("")
case other => other.split("\\n").toSeq
}
val hiveResults: Seq[Seq[String]] =
if (hiveCachedResults.size == queryList.size) {
logInfo(s"Using answer cache for test: $testCaseName")
hiveCachedResults
} else {
throw new UnsupportedOperationException(
"Cannot find result file for test case: " + testCaseName)
}
// Run w/ catalyst
val catalystResults = queryList.zip(hiveResults).map { case (queryString, hive) =>
val query = new TestHiveQueryExecution(queryString.replace("../../data", testDataPath))
def getResult(): Seq[String] = {
SQLExecution.withNewExecutionId(
query.sparkSession, query)(hiveResultString(query.executedPlan))
}
try { (query, prepareAnswer(query, getResult())) } catch {
case e: Throwable =>
val errorMessage =
s"""
|Failed to execute query using catalyst:
|Error: ${e.getMessage}
|${stackTraceToString(e)}
|$queryString
|$query
|== HIVE - ${hive.size} row(s) ==
|${hive.mkString("\\n")}
""".stripMargin
stringToFile(new File(failedDirectory, testCaseName), errorMessage + consoleTestCase)
fail(errorMessage)
}
}
(queryList, hiveResults, catalystResults).zipped.foreach {
case (query, hive, (hiveQuery, catalyst)) =>
// Check that the results match unless its an EXPLAIN query.
val preparedHive = prepareAnswer(hiveQuery, hive)
// We will ignore the ExplainCommand, ShowFunctions, DescribeFunction
if ((!hiveQuery.logical.isInstanceOf[ExplainCommand]) &&
(!hiveQuery.logical.isInstanceOf[ShowFunctionsCommand]) &&
(!hiveQuery.logical.isInstanceOf[DescribeFunctionCommand]) &&
(!hiveQuery.logical.isInstanceOf[DescribeCommandBase]) &&
preparedHive != catalyst) {
val hivePrintOut = s"== HIVE - ${preparedHive.size} row(s) ==" +: preparedHive
val catalystPrintOut = s"== CATALYST - ${catalyst.size} row(s) ==" +: catalyst
val resultComparison = sideBySide(hivePrintOut, catalystPrintOut).mkString("\\n")
if (recomputeCache) {
logWarning(s"Clearing cache files for failed test $testCaseName")
hiveCacheFiles.foreach(_.delete())
}
// If this query is reading other tables that were created during this test run
// also print out the query plans and results for those.
val computedTablesMessages: String = try {
val tablesRead = new TestHiveQueryExecution(query).executedPlan.collect {
case ts: HiveTableScanExec => ts.relation.tableMeta.identifier
}.toSet
TestHive.reset()
val executions = queryList.map(new TestHiveQueryExecution(_))
executions.foreach(_.toRdd)
val tablesGenerated = queryList.zip(executions).flatMap {
case (q, e) => e.analyzed.collect {
case i: InsertIntoHiveTable if tablesRead contains i.table.identifier =>
(q, e, i)
}
}
tablesGenerated.map { case (hiveql, execution, insert) =>
val rdd = Dataset.ofRows(TestHive.sparkSession, insert.query).queryExecution.toRdd
s"""
|=== Generated Table ===
|$hiveql
|$execution
|== Results ==
|${rdd.collect().mkString("\\n")}
""".stripMargin
}.mkString("\\n")
} catch {
case NonFatal(e) =>
logError("Failed to compute generated tables", e)
s"Couldn't compute dependent tables: $e"
}
val errorMessage =
s"""
|Results do not match for $testCaseName:
|$hiveQuery\\n${hiveQuery.analyzed.output.map(_.name).mkString("\\t")}
|$resultComparison
|$computedTablesMessages
""".stripMargin
stringToFile(new File(wrongDirectory, testCaseName), errorMessage + consoleTestCase)
if (isSpeculative && !reset) {
fail("Failed on first run; retrying")
} else {
fail(errorMessage)
}
}
}
// Touch passed file.
new FileOutputStream(new File(passedDirectory, testCaseName)).close()
}
val canSpeculativelyTryWithoutReset: Boolean = {
val excludedSubstrings = Seq(
"into table",
"create table",
"drop index"
)
!queryList.map(_.toLowerCase(Locale.ROOT)).exists { query =>
excludedSubstrings.exists(s => query.contains(s))
}
}
val savedSettings = new util.HashMap[String, String]
savedSettings.putAll(TestHive.conf.settings)
try {
try {
if (tryWithoutResettingFirst && canSpeculativelyTryWithoutReset) {
doTest(reset = false, isSpeculative = true)
} else {
doTest(reset)
}
} catch {
case tf: org.scalatest.exceptions.TestFailedException =>
if (tryWithoutResettingFirst && canSpeculativelyTryWithoutReset) {
logWarning("Test failed without reset(); retrying with reset()")
doTest(reset = true)
} else {
throw tf
}
}
} catch {
case tf: org.scalatest.exceptions.TestFailedException => throw tf
} finally {
TestHive.conf.settings.clear()
TestHive.conf.settings.putAll(savedSettings)
}
}
}
}
| aosagie/spark | sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala | Scala | apache-2.0 | 18,993 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.samza.util
/**
* Default implementation of the Clock interface, which uses the real
* system clock.
*/
class SystemClock extends Clock {
override def currentTimeMillis = System.currentTimeMillis
}
object SystemClock {
val instance = new SystemClock
}
| Swrrt/Samza | samza-core/src/main/scala/org/apache/samza/util/SystemClock.scala | Scala | apache-2.0 | 1,088 |
package suiryc.scala.util
import com.typesafe.scalalogging.StrictLogging
import java.io.File
import java.net.JarURLConnection
import java.nio.file.Paths
import java.util.{Locale, ResourceBundle}
import java.util.zip.ZipFile
import suiryc.scala.io.NameFilter._
import suiryc.scala.io.PathFinder
import suiryc.scala.io.PathFinder._
import suiryc.scala.settings.ConfigEntry
/** I18N base class. */
class I18NBase(baseName: String) {
// Note: if key is missing in bundle, it is searched in parent (if any).
// Exception is thrown if it is missing in whole chain.
/**
* Gets the resource bundle for the given base name.
*
* Uses UTF8Control to handle UTF-8 .properties files.
*/
def getResources: ResourceBundle =
ResourceBundle.getBundle(baseName, Locale.getDefault, UTF8Control)
}
/**
* I18N helper, without persistence.
*
* @param baseName resource base name
* @param defaultLanguage the language (code) of the default resource bundle
*/
class I18N(baseName: String, defaultLanguage: String = "en") extends I18NBase(baseName) with StrictLogging {
import I18N._
/** Chosen locale code. */
private var localeCode = defaultLanguage
/** Resource bundle path and name prefix. */
protected val (resourcePath, resourceNamePrefix) = {
// Use resource 'Control' to translate base name into resource name
val fullName = UTF8Control.toResourceName(baseName, resourceNameSuffix)
// Then get base path and filename from it
val path = Paths.get(fullName)
val name = path.getFileName.toString
(s"${path.getParent}/", name.substring(0, name.length - (resourceNameSuffix.length + 1)))
}
/** Resource bundle name format (non-ROOT). */
protected val resourceNameFormat = s"${resourceNamePrefix}_.*\\\\.$resourceNameSuffix"
/** Gets 'language' from resource name. */
protected def getLanguage(name: String): String =
name.substring(resourceNamePrefix.length + 1, name.length - (resourceNameSuffix.length + 1))
/**
* Languages that we handle.
*
* We search for the I18N resources path URL, which may be a standard file
* directory, or a jar (zip) file entry.
* Then we list files/entries relatively to this URL that do match the
* bundle resource name format, and extract the 'language' from its name.
* We also add the indicated default resource bundle language.
*
* Note: we could use a virtual file system framework (like common-vfs), but
* we only search for file/entry names.
*/
protected val languages: Set[String] = Option(getClass.getResource(s"/$resourcePath")).map { url =>
url.getProtocol match {
case "file" =>
// Standard directory
val file = new File(url.toURI)
val finder: PathFinder = file * resourceNameFormat.r
finder.get().map(file => getLanguage(file.getName))
case "jar" =>
// Jar (zip) file entry
// Find the actual jar file, and open it as a zip
val file = new File(url.openConnection().asInstanceOf[JarURLConnection].getJarFileURL.toURI)
val zipFile = new ZipFile(file)
try {
import scala.jdk.CollectionConverters._
// Search for entries
zipFile.entries.asScala.flatMap { entry =>
val entryName = entry.getName
if (entryName.startsWith(resourcePath)) {
val relativeName = entryName.substring(resourcePath.length)
if ((relativeName.indexOf('/') != -1) || !relativeName.matches(resourceNameFormat)) None
else Some(getLanguage(relativeName))
} else None
}.toSet
} finally {
zipFile.close()
}
case protocol =>
logger.warn(s"Unhandled resource protocol: $protocol")
Set.empty[String]
}
}.getOrElse(Set.empty) + defaultLanguage
/**
* Locales that we handle.
*
* Java resource bundles and locales use a lenient form with underscore '_'
* as separator for language/country/variant instead of hyphen as specified
* in BCP 47 (e.g. en_US instead of en-US).
* Split on the separator to get each part and build the corresponding locale.
*/
val locales: List[I18NLocale] = languages.map { lang =>
val split = lang.split("_", 3)
val locale =
if (split.length == 1) new Locale(split(0))
else if (split.length == 2) new Locale(split(0), split(1))
else new Locale(split(0), split(1), split(2))
I18NLocale(locale.toString, locale.getDisplayName(locale).capitalize, locale)
}.toList.sortBy(_.code)
/**
* Reads persisted locale code.
*
* The default implementation only keeps it in memory.
*/
protected def readLocale(): String =
localeCode
/**
* Loads locale.
*
* Reads persisted locale code and sets locale.
* If read code is unknown, applies ROOT (default) locale.
*/
def loadLocale(): Unit = {
val localeCode = readLocale()
val locale = locales.find(_.code == localeCode) match {
case Some(loc) => loc.locale
case None => Locale.ROOT
}
Locale.setDefault(locale)
}
/**
* Persists locale code.
*
* The default implementation only keeps it in memory.
*/
protected def writeLocale(code: String): Unit =
localeCode = code
/**
* Sets locale.
*
* Persists locale code and sets locale.
*
* Note: some libraries may statically load some resources which thus cannot
* be resetted; e.g. JavaFX's default dialog buttons, empty list text, etc.
* Changing locale at runtime has no effect for such resources already loaded.
*/
def setLocale(localeCode: String): Unit = {
writeLocale(localeCode)
loadLocale()
}
}
/** I18N trait with ConfigEntry persistence. */
trait I18NWithConfigEntry { this: I18N =>
val setting: ConfigEntry[String]
/** Reads locale code from Preference. */
override protected def readLocale(): String =
setting.get
/** Writes locale code to Preference. */
override protected def writeLocale(code: String): Unit =
setting.set(code)
}
/** I18N trait with cache for retrieved keys. */
trait I18NWithCache { this: I18N =>
protected var cache: Map[String, String] = Map.empty
/** Gets string from cache, or from resources (and cache it). */
def getString(key: String): String =
cache.getOrElse(key, {
val value = getResources.getString(key)
cache += key -> value
value
})
/** Resets cache. */
def reset(): Unit =
cache = Map.empty
}
object I18N {
protected val resourceNameSuffix = "properties"
}
/** I18N locale information. */
case class I18NLocale(code: String, displayName: String, locale: Locale)
| suiryc/suiryc-scala | core/src/main/scala/suiryc/scala/util/I18N.scala | Scala | gpl-3.0 | 6,643 |
/*
* Copyright 2011-2014 Chris de Vreeze
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nl.ebpi.yaidom.queryapitests.simple
import java.{ util => jutil }
import org.junit.runner.RunWith
import org.xml.sax.EntityResolver
import org.xml.sax.InputSource
import org.scalatest.junit.JUnitRunner
import nl.ebpi.yaidom.convert
import nl.ebpi.yaidom.core.Scope
import nl.ebpi.yaidom.queryapitests.AbstractScopedElemLikeQueryTest
import nl.ebpi.yaidom.resolved
import nl.ebpi.yaidom.simple.Elem
import javax.xml.parsers.DocumentBuilder
import javax.xml.parsers.DocumentBuilderFactory
/**
* Query test case for simple elements.
*
* @author Chris de Vreeze
*/
@RunWith(classOf[JUnitRunner])
class ScopedElemLikeQueryTest extends AbstractScopedElemLikeQueryTest {
private val logger: jutil.logging.Logger = jutil.logging.Logger.getLogger("nl.ebpi.yaidom.queryapitests.simple")
final type E = Elem
protected final val xsdSchemaElem: Elem = {
val dbf = DocumentBuilderFactory.newInstance
def createDocumentBuilder(documentBuilderFactory: DocumentBuilderFactory): DocumentBuilder = {
val db = documentBuilderFactory.newDocumentBuilder()
db.setEntityResolver(new EntityResolver {
def resolveEntity(publicId: String, systemId: String): InputSource = {
logger.info(s"Trying to resolve entity. Public ID: $publicId. System ID: $systemId")
if (systemId.endsWith("/XMLSchema.dtd") || systemId.endsWith("\\\\XMLSchema.dtd") || (systemId == "XMLSchema.dtd")) {
new InputSource(classOf[ScopedElemLikeQueryTest].getResourceAsStream("/nl/ebpi/yaidom/queryapitests/XMLSchema.dtd"))
} else if (systemId.endsWith("/datatypes.dtd") || systemId.endsWith("\\\\datatypes.dtd") || (systemId == "datatypes.dtd")) {
new InputSource(classOf[ScopedElemLikeQueryTest].getResourceAsStream("/nl/ebpi/yaidom/queryapitests/datatypes.dtd"))
} else {
// Default behaviour
null
}
}
})
db
}
val is = classOf[ScopedElemLikeQueryTest].getResourceAsStream("/nl/ebpi/yaidom/queryapitests/XMLSchema.xsd")
val domDoc = createDocumentBuilder(dbf).parse(is)
convert.DomConversions.convertToElem(domDoc.getDocumentElement(), Scope.Empty)
}
protected final def toResolvedElem(elem: E): resolved.Elem =
resolved.Elem(elem)
}
| EBPI/yaidom | src/test/scala/nl/ebpi/yaidom/queryapitests/simple/ScopedElemLikeQueryTest.scala | Scala | apache-2.0 | 2,877 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package scheduler
import play.api.Application
import play.api.inject.ApplicationLifecycle
import scheduler.assessment.{EvaluateAssessmentCentreJobConfig, EvaluateAssessmentCentreJobImpl}
import scheduler.fixer.{FixerJobConfig, FixerJobImpl}
import scheduler.fsb.{EvaluateFsbJobConfig, EvaluateFsbJobImpl}
import scheduler.onlinetesting._
import scheduler.scheduling.{RunningOfScheduledJobs, ScheduledJob}
import scheduler.sift._
import javax.inject.{Inject, Singleton}
import scala.concurrent.ExecutionContext
@Singleton
class Scheduler @Inject()(
sendPhase1InvitationJob: SendPhase1InvitationJob,
sendPhase2InvitationJob: SendPhase2InvitationJob,
sendPhase3InvitationJob: SendPhase3InvitationJob,
expirePhase1TestJob: ExpirePhase1TestJob,
expirePhase2TestJob: ExpirePhase2TestJob,
expirePhase3TestJob: ExpirePhase3TestJob,
firstPhase1ReminderExpiringTestJob: FirstPhase1ReminderExpiringTestJob,
secondPhase1ReminderExpiringTestJob: SecondPhase1ReminderExpiringTestJob,
firstPhase2ReminderExpiringTestJob: FirstPhase2ReminderExpiringTestJob,
secondPhase2ReminderExpiringTestJob: SecondPhase2ReminderExpiringTestJob,
firstPhase3ReminderExpiringTestJob: FirstPhase3ReminderExpiringTestJob,
secondPhase3ReminderExpiringTestJob: SecondPhase3ReminderExpiringTestJob,
failedPhase1TestJob: FailedPhase1TestJob,
failedPhase2TestJob: FailedPhase2TestJob,
failedPhase3TestJob: FailedPhase3TestJob,
failedSdipFsTestJob: FailedSdipFsTestJob,
successPhase1TestJob: SuccessPhase1TestJob,
successPhase3TestJob: SuccessPhase3TestJob,
successPhase3SdipFsTestJob: SuccessPhase3SdipFsTestJob,
retrievePhase1ResultsJob: RetrievePhase1ResultsJob,
retrievePhase2ResultsJob: RetrievePhase2ResultsJob,
evaluatePhase1ResultJob: EvaluatePhase1ResultJob,
evaluatePhase2ResultJob: EvaluatePhase2ResultJob,
evaluatePhase3ResultJob: EvaluatePhase3ResultJob,
evaluateAssessmentCentreJob: EvaluateAssessmentCentreJobImpl,
fixerJob: FixerJobImpl,
progressSdipForFaststreamCandidateJob: ProgressSdipForFaststreamCandidateJobImpl,
progressToSiftJob: ProgressToSiftJobImpl,
siftNumericalTestInvitationJob: SiftNumericalTestInvitationJobImpl,
retrieveSiftNumericalResultsJob: RetrieveSiftNumericalResultsJobImpl,
processSiftNumericalResultsReceivedJob: ProcessSiftNumericalResultsReceivedJobImpl,
progressToAssessmentCentreJob: ProgressToAssessmentCentreJobImpl,
notifyAssessorsOfNewEventsJob: NotifyAssessorsOfNewEventsJobImpl,
firstSiftReminderJob: FirstSiftReminderJobImpl,
secondSiftReminderJob: SecondSiftReminderJobImpl,
siftFailureJob: SiftFailureJob,
siftExpiryJob: SiftExpiryJobImpl,
progressToFsbOrOfferJob: ProgressToFsbOrOfferJobImpl,
reminderEventAllocationJob: ReminderEventAllocationJobImpl,
notifyOnFinalFailureJob: NotifyOnFinalFailureJobImpl,
notifyOnFinalSuccessJob: NotifyOnFinalSuccessJobImpl,
evaluateFsbJob: EvaluateFsbJobImpl,
sendPhase1InvitationJobConfig: SendPhase1InvitationJobConfig,
sendPhase2InvitationJobConfig: SendPhase2InvitationJobConfig,
sendPhase3InvitationJobConfig: SendPhase3InvitationJobConfig,
expirePhase1TestJobConfig: ExpirePhase1TestJobConfig,
expirePhase2TestJobConfig: ExpirePhase2TestJobConfig,
expirePhase3TestJobConfig: ExpirePhase3TestJobConfig,
firstPhase1ReminderExpiringTestJobConfig: FirstPhase1ReminderExpiringTestJobConfig,
secondPhase1ReminderExpiringTestJobConfig: SecondPhase1ReminderExpiringTestJobConfig,
firstPhase2ReminderExpiringTestJobConfig: FirstPhase2ReminderExpiringTestJobConfig,
secondPhase2ReminderExpiringTestJobConfig: SecondPhase2ReminderExpiringTestJobConfig,
firstPhase3ReminderExpiringTestJobConfig: FirstPhase3ReminderExpiringTestJobConfig,
secondPhase3ReminderExpiringTestJobConfig: SecondPhase3ReminderExpiringTestJobConfig,
failedPhase1TestJobConfig: FailedPhase1TestJobConfig,
failedPhase2TestJobConfig: FailedPhase2TestJobConfig,
failedPhase3TestJobConfig: FailedPhase3TestJobConfig,
failedSdipFsTestJobConfig: FailedSdipFsTestJobConfig,
successPhase1TestJobConfig: SuccessPhase1TestJobConfig,
successPhase3TestJobConfig: SuccessPhase3TestJobConfig,
successPhase3SdipFsTestJobConfig: SuccessPhase3SdipFsTestJobConfig,
retrievePhase1ResultsJobConfig: RetrievePhase1ResultsJobConfig,
retrievePhase2ResultsJobConfig: RetrievePhase2ResultsJobConfig,
evaluatePhase1ResultJobConfig: EvaluatePhase1ResultJobConfig,
evaluatePhase2ResultJobConfig: EvaluatePhase2ResultJobConfig,
evaluatePhase3ResultJobConfig: EvaluatePhase3ResultJobConfig,
evaluateAssessmentCentreJobConfig: EvaluateAssessmentCentreJobConfig,
fixerJobConfig: FixerJobConfig,
progressSdipForFaststreamCandidateJobConfig: ProgressSdipForFaststreamCandidateJobConfig,
progressToSiftJobConfig: ProgressToSiftJobConfig,
siftNumericalTestInvitationConfig: SiftNumericalTestInvitationConfig,
retrieveSiftNumericalResultsJobConfig: RetrieveSiftNumericalResultsJobConfig,
processSiftNumericalResultsReceivedJobConfig: ProcessSiftNumericalResultsReceivedJobConfig,
progressToAssessmentCentreJobConfig: ProgressToAssessmentCentreJobConfig,
notifyAssessorsOfNewEventsJobConfig: NotifyAssessorsOfNewEventsJobConfig,
firstSiftReminderJobConfig: FirstSiftReminderJobConfig,
secondSiftReminderJobConfig: SecondSiftReminderJobConfig,
siftFailureJobConfig: SiftFailureJobConfig,
siftExpiryJobConfig: SiftExpiryJobConfig,
progressToFsbOrOfferJobConfig: ProgressToFsbOrOfferJobConfig,
reminderEventAllocationJobConfig: ReminderEventAllocationJobConfig,
notifyOnFinalFailureJobConfig: NotifyOnFinalFailureJobConfig,
notifyOnFinalSuccessJobConfig: NotifyOnFinalSuccessJobConfig,
evaluateFsbJobConfig: EvaluateFsbJobConfig,
override val applicationLifecycle: ApplicationLifecycle,
override val application: Application
)
(implicit val ec: ExecutionContext) extends RunningOfScheduledJobs {
logger.info("Scheduler created")
private def maybeInitScheduler(config: BasicJobConfig[_], scheduler: => ScheduledJob): Option[ScheduledJob] = {
if (config.enabled) {
logger.warn(s"${config.name} job is enabled")
Some(scheduler)
} else {
logger.warn(s"${config.name} job is disabled")
None
}
}
override lazy val scheduledJobs: Seq[ScheduledJob] = {
Seq(
maybeInitScheduler(sendPhase1InvitationJobConfig, sendPhase1InvitationJob),
maybeInitScheduler(sendPhase2InvitationJobConfig, sendPhase2InvitationJob),
maybeInitScheduler(sendPhase3InvitationJobConfig, sendPhase3InvitationJob),
maybeInitScheduler(expirePhase1TestJobConfig, expirePhase1TestJob),
maybeInitScheduler(expirePhase2TestJobConfig, expirePhase2TestJob),
maybeInitScheduler(expirePhase3TestJobConfig, expirePhase3TestJob),
maybeInitScheduler(firstPhase1ReminderExpiringTestJobConfig, firstPhase1ReminderExpiringTestJob),
maybeInitScheduler(secondPhase1ReminderExpiringTestJobConfig, secondPhase1ReminderExpiringTestJob),
maybeInitScheduler(firstPhase2ReminderExpiringTestJobConfig, firstPhase2ReminderExpiringTestJob),
maybeInitScheduler(secondPhase2ReminderExpiringTestJobConfig, secondPhase2ReminderExpiringTestJob),
maybeInitScheduler(firstPhase3ReminderExpiringTestJobConfig, firstPhase3ReminderExpiringTestJob),
maybeInitScheduler(secondPhase3ReminderExpiringTestJobConfig, secondPhase3ReminderExpiringTestJob),
maybeInitScheduler(failedPhase1TestJobConfig, failedPhase1TestJob),
maybeInitScheduler(failedPhase2TestJobConfig, failedPhase2TestJob),
maybeInitScheduler(failedPhase3TestJobConfig, failedPhase3TestJob),
maybeInitScheduler(failedSdipFsTestJobConfig, failedSdipFsTestJob),
maybeInitScheduler(successPhase1TestJobConfig, successPhase1TestJob),
maybeInitScheduler(successPhase3TestJobConfig, successPhase3TestJob),
maybeInitScheduler(successPhase3SdipFsTestJobConfig, successPhase3SdipFsTestJob),
maybeInitScheduler(retrievePhase1ResultsJobConfig, retrievePhase1ResultsJob),
maybeInitScheduler(retrievePhase2ResultsJobConfig, retrievePhase2ResultsJob),
maybeInitScheduler(evaluatePhase1ResultJobConfig, evaluatePhase1ResultJob),
maybeInitScheduler(evaluatePhase2ResultJobConfig, evaluatePhase2ResultJob),
maybeInitScheduler(evaluatePhase3ResultJobConfig, evaluatePhase3ResultJob),
maybeInitScheduler(evaluateAssessmentCentreJobConfig, evaluateAssessmentCentreJob),
maybeInitScheduler(fixerJobConfig, fixerJob),
maybeInitScheduler(progressSdipForFaststreamCandidateJobConfig, progressSdipForFaststreamCandidateJob),
maybeInitScheduler(progressToSiftJobConfig, progressToSiftJob),
maybeInitScheduler(siftNumericalTestInvitationConfig, siftNumericalTestInvitationJob),
maybeInitScheduler(retrieveSiftNumericalResultsJobConfig, retrieveSiftNumericalResultsJob),
maybeInitScheduler(processSiftNumericalResultsReceivedJobConfig, processSiftNumericalResultsReceivedJob),
maybeInitScheduler(progressToAssessmentCentreJobConfig, progressToAssessmentCentreJob),
maybeInitScheduler(notifyAssessorsOfNewEventsJobConfig, notifyAssessorsOfNewEventsJob),
maybeInitScheduler(firstSiftReminderJobConfig, firstSiftReminderJob),
maybeInitScheduler(secondSiftReminderJobConfig, secondSiftReminderJob),
maybeInitScheduler(siftFailureJobConfig, siftFailureJob),
maybeInitScheduler(siftExpiryJobConfig, siftExpiryJob),
maybeInitScheduler(progressToFsbOrOfferJobConfig, progressToFsbOrOfferJob),
maybeInitScheduler(reminderEventAllocationJobConfig, reminderEventAllocationJob),
maybeInitScheduler(notifyOnFinalFailureJobConfig, notifyOnFinalFailureJob),
maybeInitScheduler(notifyOnFinalSuccessJobConfig, notifyOnFinalSuccessJob),
maybeInitScheduler(evaluateFsbJobConfig, evaluateFsbJob)
).flatten
}
}
| hmrc/fset-faststream | app/scheduler/Scheduler.scala | Scala | apache-2.0 | 12,618 |
package grasshopper.geocoder.api.stats
import akka.actor.{ ActorLogging, Props }
import akka.stream.actor.ActorPublisher
import grasshopper.geocoder.model.GeocodeStats
import grasshopper.geocoder.protocol.GrasshopperJsonProtocol
import spray.json._
import scala.collection.mutable
object GeocodeStatsPublisher {
case class PublishStats()
def props: Props = Props(new GeocodeStatsPublisher)
}
class GeocodeStatsPublisher extends ActorPublisher[GeocodeStats] with ActorLogging with GrasshopperJsonProtocol {
var stats = mutable.Queue[GeocodeStats]()
override def preStart(): Unit = {
log.info("Starting GeocodeStatsPublisher")
context.system.eventStream.subscribe(self, classOf[GeocodeStats])
}
override def receive: Receive = {
case g: GeocodeStats =>
log.debug(g.toJson.toString)
onNext(g)
case _ => // ignore other messages
log.warning("Message not supported")
}
}
| hkeeler/grasshopper | geocoder/src/main/scala/grasshopper/geocoder/api/stats/GeocodeStatsPublisher.scala | Scala | cc0-1.0 | 921 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import org.scalatest.prop.Tables
import org.scalatest.events.Event
trait OrderExpectedResults extends EventHelpers {
def assertOrderTest(events: List[Event])
}
object ParallelTestExecutionOrderExamples extends Tables {
def orderSuite = new ExampleParallelTestExecutionOrderSuite
def orderFixtureSuite = new ExampleParallelTestExecutionOrderFixtureSuite
def orderSpec = new ExampleParallelTestExecutionOrderSpec
def orderFixtureSpec = new ExampleParallelTestExecutionOrderFixtureSpec
def orderFunSuite = new ExampleParallelTestExecutionOrderFunSuite
def orderFixtureFunSuite = new ExampleParallelTestExecutionOrderFixtureFunSuite
def orderFunSpec = new ExampleParallelTestExecutionOrderFunSpec
def orderFixtureFunSpec = new ExampleParallelTestExecutionOrderFixtureFunSpec
def orderFeatureSpec = new ExampleParallelTestExecutionOrderFeatureSpec
def orderFixtureFeatureSpec = new ExampleParallelTestExecutionOrderFixtureFeatureSpec
def orderFlatSpec = new ExampleParallelTestExecutionOrderFlatSpec
def orderFixtureFlatSpec = new ExampleParallelTestExecutionOrderFixtureFlatSpec
def orderFreeSpec = new ExampleParallelTestExecutionOrderFreeSpec
def orderFixtureFreeSpec = new ExampleParallelTestExecutionOrderFixtureFreeSpec
def orderPropSpec = new ExampleParallelTestExecutionOrderPropSpec
def orderFixturePropSpec = new ExampleParallelTestExecutionOrderFixturePropSpec
def orderWordSpec = new ExampleParallelTestExecutionOrderWordSpec
def orderFixtureWordSpec = new ExampleParallelTestExecutionOrderFixtureWordSpec
def orderExamples =
Table(
"suite1",
orderSuite,
orderFixtureSuite,
orderSpec,
orderFixtureSpec,
orderFunSuite,
orderFixtureFunSuite,
orderFunSpec,
orderFixtureFunSpec,
orderFeatureSpec,
orderFixtureFeatureSpec,
orderFlatSpec,
orderFixtureFlatSpec,
orderFreeSpec,
orderFixtureFreeSpec,
orderPropSpec,
orderFixturePropSpec,
orderWordSpec,
orderFixtureWordSpec
)
}
@DoNotDiscover
class ExampleParallelTestExecutionOrderSuite extends Suite with OrderExpectedResults with ParallelTestExecution {
def testMethod1() {}
def testMethod2() {}
def testMethod3() {}
def assertOrderTest(events: List[Event]) {
assert(events.size === 6)
checkTestStarting(events(0), "testMethod1")
checkTestSucceeded(events(1), "testMethod1")
checkTestStarting(events(2), "testMethod2")
checkTestSucceeded(events(3), "testMethod2")
checkTestStarting(events(4), "testMethod3")
checkTestSucceeded(events(5), "testMethod3")
}
}
@DoNotDiscover
class ExampleParallelTestExecutionOrderFixtureSuite extends fixture.Suite with OrderExpectedResults with ParallelTestExecution with StringFixture {
def testFixtureMethod1() {}
def testFixtureMethod2() {}
def testFixtureMethod3() {}
def assertOrderTest(events: List[Event]) {
assert(events.size === 6)
checkTestStarting(events(0), "testFixtureMethod1")
checkTestSucceeded(events(1), "testFixtureMethod1")
checkTestStarting(events(2), "testFixtureMethod2")
checkTestSucceeded(events(3), "testFixtureMethod2")
checkTestStarting(events(4), "testFixtureMethod3")
checkTestSucceeded(events(5), "testFixtureMethod3")
}
}
@DoNotDiscover
class ExampleParallelTestExecutionOrderSpec extends Spec with OrderExpectedResults with ParallelTestExecution {
def `test 1` {}
def `test 2` {}
def `test 3` {}
def assertOrderTest(events: List[Event]) {
assert(events.size === 6)
checkTestStarting(events(0), "test 1")
checkTestSucceeded(events(1), "test 1")
checkTestStarting(events(2), "test 2")
checkTestSucceeded(events(3), "test 2")
checkTestStarting(events(4), "test 3")
checkTestSucceeded(events(5), "test 3")
}
}
@DoNotDiscover
class ExampleParallelTestExecutionOrderFixtureSpec extends fixture.Spec with OrderExpectedResults with ParallelTestExecution with StringFixture {
def `test 1`(fixture: String) {}
def `test 2`(fixture: String) {}
def `test 3`(fixture: String) {}
def assertOrderTest(events: List[Event]) {
assert(events.size === 6)
checkTestStarting(events(0), "test 1")
checkTestSucceeded(events(1), "test 1")
checkTestStarting(events(2), "test 2")
checkTestSucceeded(events(3), "test 2")
checkTestStarting(events(4), "test 3")
checkTestSucceeded(events(5), "test 3")
}
}
@DoNotDiscover
class ExampleParallelTestExecutionOrderFunSuite extends FunSuite with OrderExpectedResults with ParallelTestExecution {
test("Test 1") {}
test("Test 2") {}
test("Test 3") {}
def assertOrderTest(events: List[Event]) {
assert(events.size === 6)
checkTestStarting(events(0), "Test 1")
checkTestSucceeded(events(1), "Test 1")
checkTestStarting(events(2), "Test 2")
checkTestSucceeded(events(3), "Test 2")
checkTestStarting(events(4), "Test 3")
checkTestSucceeded(events(5), "Test 3")
}
}
@DoNotDiscover
class ExampleParallelTestExecutionOrderFixtureFunSuite extends fixture.FunSuite with OrderExpectedResults with ParallelTestExecution with StringFixture {
test("Fixture Test 1") { fixture => }
test("Fixture Test 2") { fixture => }
test("Fixture Test 3") { fixture => }
def assertOrderTest(events: List[Event]) {
assert(events.size === 6)
checkTestStarting(events(0), "Fixture Test 1")
checkTestSucceeded(events(1), "Fixture Test 1")
checkTestStarting(events(2), "Fixture Test 2")
checkTestSucceeded(events(3), "Fixture Test 2")
checkTestStarting(events(4), "Fixture Test 3")
checkTestSucceeded(events(5), "Fixture Test 3")
}
}
@DoNotDiscover
class ExampleParallelTestExecutionOrderFunSpec extends FunSpec with OrderExpectedResults with ParallelTestExecution {
describe("Scope 1") {
it("Test 1") {}
it("Test 2") {}
}
describe("Scope 2") {
it("Test 3") {}
it("Test 4") {}
}
def assertOrderTest(events: List[Event]) {
assert(events.size === 12)
checkScopeOpened(events(0), "Scope 1")
checkTestStarting(events(1), "Scope 1 Test 1")
checkTestSucceeded(events(2), "Scope 1 Test 1")
checkTestStarting(events(3), "Scope 1 Test 2")
checkTestSucceeded(events(4), "Scope 1 Test 2")
checkScopeClosed(events(5), "Scope 1")
checkScopeOpened(events(6), "Scope 2")
checkTestStarting(events(7), "Scope 2 Test 3")
checkTestSucceeded(events(8), "Scope 2 Test 3")
checkTestStarting(events(9), "Scope 2 Test 4")
checkTestSucceeded(events(10), "Scope 2 Test 4")
checkScopeClosed(events(11), "Scope 2")
}
}
@DoNotDiscover
class ExampleParallelTestExecutionOrderFixtureFunSpec extends fixture.FunSpec with OrderExpectedResults with ParallelTestExecution with StringFixture {
describe("Fixture Scope 1") {
it("Fixture Test 1") { fixture => }
it("Fixture Test 2") { fixture =>}
}
describe("Fixture Scope 2") {
it("Fixture Test 3") { fixture => }
it("Fixture Test 4") { fixture =>}
}
def assertOrderTest(events: List[Event]) {
assert(events.size === 12)
checkScopeOpened(events(0), "Fixture Scope 1")
checkTestStarting(events(1), "Fixture Scope 1 Fixture Test 1")
checkTestSucceeded(events(2), "Fixture Scope 1 Fixture Test 1")
checkTestStarting(events(3), "Fixture Scope 1 Fixture Test 2")
checkTestSucceeded(events(4), "Fixture Scope 1 Fixture Test 2")
checkScopeClosed(events(5), "Fixture Scope 1")
checkScopeOpened(events(6), "Fixture Scope 2")
checkTestStarting(events(7), "Fixture Scope 2 Fixture Test 3")
checkTestSucceeded(events(8), "Fixture Scope 2 Fixture Test 3")
checkTestStarting(events(9), "Fixture Scope 2 Fixture Test 4")
checkTestSucceeded(events(10), "Fixture Scope 2 Fixture Test 4")
checkScopeClosed(events(11), "Fixture Scope 2")
}
}
@DoNotDiscover
class ExampleParallelTestExecutionOrderFeatureSpec extends FeatureSpec with OrderExpectedResults with ParallelTestExecution {
feature("Scope 1") {
scenario("Test 1") {}
scenario("Test 2") {}
}
feature("Scope 2") {
scenario("Test 3") {}
scenario("Test 4") {}
}
def assertOrderTest(events: List[Event]) {
assert(events.size === 12)
checkScopeOpened(events(0), "Feature: Scope 1")
checkTestStarting(events(1), "Feature: Scope 1 Scenario: Test 1")
checkTestSucceeded(events(2), "Feature: Scope 1 Scenario: Test 1")
checkTestStarting(events(3), "Feature: Scope 1 Scenario: Test 2")
checkTestSucceeded(events(4), "Feature: Scope 1 Scenario: Test 2")
checkScopeClosed(events(5), "Feature: Scope 1")
checkScopeOpened(events(6), "Feature: Scope 2")
checkTestStarting(events(7), "Feature: Scope 2 Scenario: Test 3")
checkTestSucceeded(events(8), "Feature: Scope 2 Scenario: Test 3")
checkTestStarting(events(9), "Feature: Scope 2 Scenario: Test 4")
checkTestSucceeded(events(10), "Feature: Scope 2 Scenario: Test 4")
checkScopeClosed(events(11), "Feature: Scope 2")
}
}
@DoNotDiscover
class ExampleParallelTestExecutionOrderFixtureFeatureSpec extends fixture.FeatureSpec with OrderExpectedResults with ParallelTestExecution with StringFixture {
feature("Fixture Scope 1") {
scenario("Fixture Test 1") { fixture => }
scenario("Fixture Test 2") { fixture =>}
}
feature("Fixture Scope 2") {
scenario("Fixture Test 3") { fixture => }
scenario("Fixture Test 4") { fixture =>}
}
def assertOrderTest(events: List[Event]) {
assert(events.size === 12)
checkScopeOpened(events(0), "Feature: Fixture Scope 1")
checkTestStarting(events(1), "Feature: Fixture Scope 1 Scenario: Fixture Test 1")
checkTestSucceeded(events(2), "Feature: Fixture Scope 1 Scenario: Fixture Test 1")
checkTestStarting(events(3), "Feature: Fixture Scope 1 Scenario: Fixture Test 2")
checkTestSucceeded(events(4), "Feature: Fixture Scope 1 Scenario: Fixture Test 2")
checkScopeClosed(events(5), "Feature: Fixture Scope 1")
checkScopeOpened(events(6), "Feature: Fixture Scope 2")
checkTestStarting(events(7), "Feature: Fixture Scope 2 Scenario: Fixture Test 3")
checkTestSucceeded(events(8), "Feature: Fixture Scope 2 Scenario: Fixture Test 3")
checkTestStarting(events(9), "Feature: Fixture Scope 2 Scenario: Fixture Test 4")
checkTestSucceeded(events(10), "Feature: Fixture Scope 2 Scenario: Fixture Test 4")
checkScopeClosed(events(11), "Feature: Fixture Scope 2")
}
}
@DoNotDiscover
class ExampleParallelTestExecutionOrderFlatSpec extends FlatSpec with OrderExpectedResults with ParallelTestExecution {
behavior of "Scope 1"
it should "Test 1" in {}
it should "Test 2" in {}
behavior of "Scope 2"
it should "Test 3" in {}
it should "Test 4" in {}
def assertOrderTest(events: List[Event]) {
assert(events.size === 12)
checkScopeOpened(events(0), "Scope 1")
checkTestStarting(events(1), "Scope 1 should Test 1")
checkTestSucceeded(events(2), "Scope 1 should Test 1")
checkTestStarting(events(3), "Scope 1 should Test 2")
checkTestSucceeded(events(4), "Scope 1 should Test 2")
checkScopeClosed(events(5), "Scope 1")
checkScopeOpened(events(6), "Scope 2")
checkTestStarting(events(7), "Scope 2 should Test 3")
checkTestSucceeded(events(8), "Scope 2 should Test 3")
checkTestStarting(events(9), "Scope 2 should Test 4")
checkTestSucceeded(events(10), "Scope 2 should Test 4")
checkScopeClosed(events(11), "Scope 2")
}
}
@DoNotDiscover
class ExampleParallelTestExecutionOrderFixtureFlatSpec extends fixture.FlatSpec with OrderExpectedResults with ParallelTestExecution with StringFixture {
behavior of "Fixture Scope 1"
it should "Fixture Test 1" in { fixture => }
it should "Fixture Test 2" in { fixture => }
behavior of "Fixture Scope 2"
it should "Fixture Test 3" in { fixture => }
it should "Fixture Test 4" in { fixture => }
def assertOrderTest(events: List[Event]) {
assert(events.size === 12)
checkScopeOpened(events(0), "Fixture Scope 1")
checkTestStarting(events(1), "Fixture Scope 1 should Fixture Test 1")
checkTestSucceeded(events(2), "Fixture Scope 1 should Fixture Test 1")
checkTestStarting(events(3), "Fixture Scope 1 should Fixture Test 2")
checkTestSucceeded(events(4), "Fixture Scope 1 should Fixture Test 2")
checkScopeClosed(events(5), "Fixture Scope 1")
checkScopeOpened(events(6), "Fixture Scope 2")
checkTestStarting(events(7), "Fixture Scope 2 should Fixture Test 3")
checkTestSucceeded(events(8), "Fixture Scope 2 should Fixture Test 3")
checkTestStarting(events(9), "Fixture Scope 2 should Fixture Test 4")
checkTestSucceeded(events(10), "Fixture Scope 2 should Fixture Test 4")
checkScopeClosed(events(11), "Fixture Scope 2")
}
}
@DoNotDiscover
class ExampleParallelTestExecutionOrderFreeSpec extends FreeSpec with OrderExpectedResults with ParallelTestExecution {
"Scope 1" - {
"Test 1" in {}
"Test 2" in {}
}
"Scope 2" - {
"Test 3" in {}
"Test 4" in {}
}
def assertOrderTest(events: List[Event]) {
assert(events.size === 12)
checkScopeOpened(events(0), "Scope 1")
checkTestStarting(events(1), "Scope 1 Test 1")
checkTestSucceeded(events(2), "Scope 1 Test 1")
checkTestStarting(events(3), "Scope 1 Test 2")
checkTestSucceeded(events(4), "Scope 1 Test 2")
checkScopeClosed(events(5), "Scope 1")
checkScopeOpened(events(6), "Scope 2")
checkTestStarting(events(7), "Scope 2 Test 3")
checkTestSucceeded(events(8), "Scope 2 Test 3")
checkTestStarting(events(9), "Scope 2 Test 4")
checkTestSucceeded(events(10), "Scope 2 Test 4")
checkScopeClosed(events(11), "Scope 2")
}
}
@DoNotDiscover
class ExampleParallelTestExecutionOrderFixtureFreeSpec extends fixture.FreeSpec with OrderExpectedResults with ParallelTestExecution with StringFixture {
"Fixture Scope 1" - {
"Fixture Test 1" in { fixture => }
"Fixture Test 2" in { fixture => }
}
"Fixture Scope 2" - {
"Fixture Test 3" in { fixture => }
"Fixture Test 4" in { fixture => }
}
def assertOrderTest(events: List[Event]) {
assert(events.size === 12)
checkScopeOpened(events(0), "Fixture Scope 1")
checkTestStarting(events(1), "Fixture Scope 1 Fixture Test 1")
checkTestSucceeded(events(2), "Fixture Scope 1 Fixture Test 1")
checkTestStarting(events(3), "Fixture Scope 1 Fixture Test 2")
checkTestSucceeded(events(4), "Fixture Scope 1 Fixture Test 2")
checkScopeClosed(events(5), "Fixture Scope 1")
checkScopeOpened(events(6), "Fixture Scope 2")
checkTestStarting(events(7), "Fixture Scope 2 Fixture Test 3")
checkTestSucceeded(events(8), "Fixture Scope 2 Fixture Test 3")
checkTestStarting(events(9), "Fixture Scope 2 Fixture Test 4")
checkTestSucceeded(events(10), "Fixture Scope 2 Fixture Test 4")
checkScopeClosed(events(11), "Fixture Scope 2")
}
}
@DoNotDiscover
class ExampleParallelTestExecutionOrderPropSpec extends PropSpec with OrderExpectedResults with ParallelTestExecution {
property("Test 1") {}
property("Test 2") {}
property("Test 3") {}
def assertOrderTest(events: List[Event]) {
assert(events.size === 6)
checkTestStarting(events(0), "Test 1")
checkTestSucceeded(events(1), "Test 1")
checkTestStarting(events(2), "Test 2")
checkTestSucceeded(events(3), "Test 2")
checkTestStarting(events(4), "Test 3")
checkTestSucceeded(events(5), "Test 3")
}
}
@DoNotDiscover
class ExampleParallelTestExecutionOrderFixturePropSpec extends fixture.PropSpec with OrderExpectedResults with ParallelTestExecution with StringFixture {
property("Fixture Test 1") { fixture => }
property("Fixture Test 2") { fixture => }
property("Fixture Test 3") { fixture => }
def assertOrderTest(events: List[Event]) {
assert(events.size === 6)
checkTestStarting(events(0), "Fixture Test 1")
checkTestSucceeded(events(1), "Fixture Test 1")
checkTestStarting(events(2), "Fixture Test 2")
checkTestSucceeded(events(3), "Fixture Test 2")
checkTestStarting(events(4), "Fixture Test 3")
checkTestSucceeded(events(5), "Fixture Test 3")
}
}
@DoNotDiscover
class ExampleParallelTestExecutionOrderWordSpec extends WordSpec with OrderExpectedResults with ParallelTestExecution {
"Scope 1" should {
"Test 1" in {}
"Test 2" in {}
}
"Scope 2" should {
"Test 3" in {}
"Test 4" in {}
}
def assertOrderTest(events: List[Event]) {
assert(events.size === 12)
checkScopeOpened(events(0), "Scope 1")
checkTestStarting(events(1), "Scope 1 should Test 1")
checkTestSucceeded(events(2), "Scope 1 should Test 1")
checkTestStarting(events(3), "Scope 1 should Test 2")
checkTestSucceeded(events(4), "Scope 1 should Test 2")
checkScopeClosed(events(5), "Scope 1")
checkScopeOpened(events(6), "Scope 2")
checkTestStarting(events(7), "Scope 2 should Test 3")
checkTestSucceeded(events(8), "Scope 2 should Test 3")
checkTestStarting(events(9), "Scope 2 should Test 4")
checkTestSucceeded(events(10), "Scope 2 should Test 4")
checkScopeClosed(events(11), "Scope 2")
}
}
@DoNotDiscover
class ExampleParallelTestExecutionOrderFixtureWordSpec extends fixture.WordSpec with OrderExpectedResults with ParallelTestExecution with StringFixture {
"Fixture Scope 1" should {
"Fixture Test 1" in { fixture => }
"Fixture Test 2" in { fixture => }
}
"Fixture Scope 2" should {
"Fixture Test 3" in { fixture => }
"Fixture Test 4" in { fixture => }
}
def assertOrderTest(events: List[Event]) {
assert(events.size === 12)
checkScopeOpened(events(0), "Fixture Scope 1")
checkTestStarting(events(1), "Fixture Scope 1 should Fixture Test 1")
checkTestSucceeded(events(2), "Fixture Scope 1 should Fixture Test 1")
checkTestStarting(events(3), "Fixture Scope 1 should Fixture Test 2")
checkTestSucceeded(events(4), "Fixture Scope 1 should Fixture Test 2")
checkScopeClosed(events(5), "Fixture Scope 1")
checkScopeOpened(events(6), "Fixture Scope 2")
checkTestStarting(events(7), "Fixture Scope 2 should Fixture Test 3")
checkTestSucceeded(events(8), "Fixture Scope 2 should Fixture Test 3")
checkTestStarting(events(9), "Fixture Scope 2 should Fixture Test 4")
checkTestSucceeded(events(10), "Fixture Scope 2 should Fixture Test 4")
checkScopeClosed(events(11), "Fixture Scope 2")
}
}
| travisbrown/scalatest | src/test/scala/org/scalatest/ParallelTestExecutionOrderExamples.scala | Scala | apache-2.0 | 19,046 |
/*
* Copyright 2006-2011 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb
package mapper
import net.liftweb.common._
import net.liftweb.util._
import Helpers._
import net.liftweb.http.{S, SHtml}
import scala.xml.NodeSeq
import net.liftweb.http.js._
abstract class MappedUniqueId[T<:Mapper[T]](override val fieldOwner: T, override val maxLen: Int) extends MappedString[T](fieldOwner, maxLen) {
override def writePermission_? = false
override lazy val defaultValue = randomString(maxLen)
def reset(): T = this(randomString(maxLen))
}
/**
* A field that holds the birth year for the user
*/
abstract class MappedBirthYear[T <: Mapper[T]](owner: T, minAge: Int) extends MappedInt[T](owner) {
override def defaultValue = year(now) - minAge
override def _toForm: Box[NodeSeq] = {
val end = (year(now) - minAge)
val start = end - 100
Full(SHtml.selectObj((start to end).
toList.
reverse.
map(y => (y, y.toString)),
Full(get), this.set) % ("id" -> fieldId))
}
}
abstract class MappedGender[T <: Mapper[T]](owner: T) extends MappedEnum(owner, Genders) {
override def defaultValue = Genders.Male
}
object Genders extends Enumeration {
val Male = new I18NGender(1, "male")
val Female = new I18NGender(2, "female")
class I18NGender(id : Int, name: String) extends Val(id, name) {
override def toString = {
S.?(name)
}
}
}
abstract class MappedStringIndex[T<:Mapper[T]](override val fieldOwner: T, override val maxLen: Int) extends MappedUniqueId[T](fieldOwner, maxLen) with IndexedField[String] {
override def writePermission_? = false // not writable
override def dbIndexed_? = true
def defined_? = i_is_! ne null
override def dbPrimaryKey_? = true
override def dbDisplay_? = false
def makeKeyJDBCFriendly(in: String) = in
def convertKey(in: String): Box[String] = Box.legacyNullTest(in)
def convertKey(in: Int): Box[String] = Full(in.toString)
def convertKey(in: Long): Box[String] = Full(in.toString)
def convertKey(in: AnyRef): Box[String] =
Box.legacyNullTest(in).map(_.toString)
}
| sortable/framework | persistence/mapper/src/main/scala/net/liftweb/mapper/MappedUniqueId.scala | Scala | apache-2.0 | 2,649 |
/*
* Copyright 2016 Coursera Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.coursera.naptime.ari.graphql.schema
import com.linkedin.data.schema.DataSchema
import com.linkedin.data.schema.IntegerDataSchema
import com.linkedin.data.schema.Name
import com.linkedin.data.schema.RecordDataSchema
import com.linkedin.data.schema.RecordDataSchema.Field
import com.linkedin.data.schema.RecordDataSchema.RecordType
import org.junit.Test
import org.scalatest.junit.AssertionsForJUnit
import org.scalatest.mock.MockitoSugar
import com.linkedin.data.schema.UnionDataSchema
import org.coursera.naptime.ari.graphql.Models
import org.mockito.Mockito.when
import sangria.schema.IntType
import sangria.schema.ObjectType
import sangria.schema.UnionType
import scala.collection.JavaConverters._
class NaptimeUnionFieldTest extends AssertionsForJUnit with MockitoSugar {
private[this] val resourceName = "courses.v1"
private[this]val schemaMetadata = mock[SchemaMetadata]
private[this]val resource = Models.courseResource
when(schemaMetadata.getResourceOpt(resourceName)).thenReturn(Some(resource))
when(schemaMetadata.getSchema(resource)).thenReturn(Some(null))
def buildUnionDataSchema(
types: List[DataSchema],
typedDefinitions: Map[String, String] = Map.empty,
properties: Map[String, AnyRef] = Map.empty): UnionDataSchema = {
val union = new UnionDataSchema()
val stringBuilder = new java.lang.StringBuilder()
union.setTypes(types.asJava, stringBuilder)
val unionProperties = Map("typedDefinition" -> typedDefinitions.asJava.asInstanceOf[AnyRef]) ++ properties
union.setProperties(unionProperties.asJava)
union
}
private[this] def buildRecordField(name: String, fields: List[Field], namespace: String = "org.coursera.naptime") = {
val fullName = new Name(name, namespace, new java.lang.StringBuilder())
val recordDataSchema = new RecordDataSchema(fullName, RecordType.RECORD)
fields.foreach(_.setRecord(recordDataSchema))
val stringBuilder = new java.lang.StringBuilder()
recordDataSchema.setFields(fields.asJava, stringBuilder)
recordDataSchema
}
@Test
def build_SingleElementUnion() = {
val values = List(new IntegerDataSchema())
val union = buildUnionDataSchema(values)
val fieldName = "intOnlyUnion"
val field = NaptimeUnionField.build(schemaMetadata, union, fieldName, None, resourceName)
val expectedUnionTypes = List(
ObjectType("courses_v1_intMember", List(
FieldBuilder.buildPrimitiveField(fieldName, new IntegerDataSchema(), IntType))))
val expectedField = UnionType("courses_v1_intOnlyUnion", None, expectedUnionTypes)
assert(field.fieldType.toString === expectedField.toString)
}
@Test
def build_TypedDefinitionUnion() = {
val integerField = new Field(new IntegerDataSchema())
integerField.setName("integerField", new java.lang.StringBuilder())
val simpleFieldDataSchema = buildRecordField("simpleField", List(integerField))
val complexFieldDataSchema = buildRecordField("complexField", List(integerField))
val values = List(simpleFieldDataSchema, complexFieldDataSchema)
val union = buildUnionDataSchema(values, Map(
"org.coursera.naptime.simpleField" -> "easy",
"org.coursera.naptime.complexField" -> "hard"))
val fieldName = "typedDefinitionTestField"
val field = NaptimeUnionField.build(schemaMetadata, union, fieldName, None, resourceName)
val expectedUnionTypes = List(
ObjectType("courses_v1_easyMember", List(
NaptimeRecordField.build(
schemaMetadata,
simpleFieldDataSchema,
"easy",
Some("org.coursera.naptime"),
resourceName))),
ObjectType("courses_v1_hardMember", List(
NaptimeRecordField.build(
schemaMetadata,
complexFieldDataSchema,
"hard",
Some("org.coursera.naptime"),
resourceName))))
val expectedField = UnionType("courses_v1_typedDefinitionTestField", None, expectedUnionTypes)
assert(field.fieldType.toString === expectedField.toString)
}
@Test
def build_ShorthandTypedDefinitionUnion_InSameNamespace() = {
val integerField = new Field(new IntegerDataSchema())
integerField.setName("integerField", new java.lang.StringBuilder())
val simpleFieldDataSchema = buildRecordField("simpleField", List(integerField))
val complexFieldDataSchema = buildRecordField("complexField", List(integerField))
val values = List(simpleFieldDataSchema, complexFieldDataSchema)
val union = buildUnionDataSchema(values, Map(
"simpleField" -> "easy",
"complexField" -> "hard"),
Map("namespace" -> "org.coursera.naptime"))
val fieldName = "typedDefinitionTestField"
val field = NaptimeUnionField.build(schemaMetadata, union, fieldName, None, resourceName)
val expectedUnionTypes = List(
ObjectType("courses_v1_easyMember", List(
NaptimeRecordField.build(
schemaMetadata,
simpleFieldDataSchema,
"easy",
Some("org.coursera.naptime"),
resourceName))),
ObjectType("courses_v1_hardMember", List(
NaptimeRecordField.build(
schemaMetadata,
complexFieldDataSchema,
"hard",
Some("org.coursera.naptime"),
resourceName))))
val expectedField = UnionType("courses_v1_typedDefinitionTestField", None, expectedUnionTypes)
assert(field.fieldType.toString === expectedField.toString)
}
@Test
def build_ShorthandTypedDefinitionUnion_InDifferentNamespace() = {
val integerField = new Field(new IntegerDataSchema())
integerField.setName("integerField", new java.lang.StringBuilder())
val simpleFieldDataSchema = buildRecordField("simpleField", List(integerField), "org.coursera.awaketime")
val complexFieldDataSchema = buildRecordField("complexField", List(integerField))
val values = List(simpleFieldDataSchema, complexFieldDataSchema)
val union = buildUnionDataSchema(values, Map(
"simpleField" -> "easy",
"complexField" -> "hard"),
Map("namespace" -> "org.coursera.naptime"))
val fieldName = "typedDefinitionTestField"
val field = NaptimeUnionField.build(schemaMetadata, union, fieldName, None, resourceName)
val expectedUnionTypes = List(
ObjectType("courses_v1_org_coursera_awaketime_simpleFieldMember", List(
NaptimeRecordField.build(
schemaMetadata,
simpleFieldDataSchema,
"easy",
Some("org.coursera.awaketime"),
resourceName))),
ObjectType("courses_v1_hardMember", List(
NaptimeRecordField.build(
schemaMetadata,
complexFieldDataSchema,
"hard",
Some("org.coursera.naptime"),
resourceName))))
val expectedField = UnionType("courses_v1_typedDefinitionTestField", None, expectedUnionTypes)
assert(field.fieldType.toString === expectedField.toString)
}
}
| vkuo-coursera/naptime | naptime-graphql/src/test/scala/org/coursera/naptime/ari/graphql/schema/NaptimeUnionFieldTest.scala | Scala | apache-2.0 | 7,500 |
/* Copyright 2009-2013 EPFL, Lausanne
*
* Author: Ravi
* Date: 20.11.2013
**/
import leon.lang._
object HeapSort {
sealed abstract class List
case class Cons(head:Int,tail:List) extends List
case class Nil() extends List
sealed abstract class Heap
case class Leaf() extends Heap
case class Node(rk : Int, value: Int, left: Heap, right: Heap) extends Heap
private def rightHeight(h: Heap) : Int = {h match {
case Leaf() => 0
case Node(_,_,_,r) => rightHeight(r) + 1
}} ensuring(_ >= 0)
private def rank(h: Heap) : Int = h match {
case Leaf() => 0
case Node(rk,_,_,_) => rk
}
private def hasLeftistProperty(h: Heap) : Boolean = (h match {
case Leaf() => true
case Node(_,_,l,r) => hasLeftistProperty(l) && hasLeftistProperty(r) && rightHeight(l) >= rightHeight(r) && (rank(h) == rightHeight(h))
})
def heapSize(t: Heap): Int = {
require(hasLeftistProperty(t))
(t match {
case Leaf() => 0
case Node(_,v, l, r) => heapSize(l) + 1 + heapSize(r)
})
} ensuring(_ >= 0)
private def merge(h1: Heap, h2: Heap) : Heap = {
require(hasLeftistProperty(h1) && hasLeftistProperty(h2))
h1 match {
case Leaf() => h2
case Node(_, v1, l1, r1) => h2 match {
case Leaf() => h1
case Node(_, v2, l2, r2) =>
if(v1 > v2)
makeT(v1, l1, merge(r1, h2))
else
makeT(v2, l2, merge(h1, r2))
}
}
} ensuring(res => hasLeftistProperty(res) && heapSize(h1) + heapSize(h2) == heapSize(res))
private def makeT(value: Int, left: Heap, right: Heap) : Heap = {
if(rank(left) >= rank(right))
Node(rank(right) + 1, value, left, right)
else
Node(rank(left) + 1, value, right, left)
}
def insert(element: Int, heap: Heap) : Heap = {
require(hasLeftistProperty(heap))
merge(Node(1, element, Leaf(), Leaf()), heap)
} ensuring(res => heapSize(res) == heapSize(heap) + 1)
def findMax(h: Heap) : Int = {
require(hasLeftistProperty(h))
h match {
case Node(_,m,_,_) => m
case Leaf() => -1000
}
}
def removeMax(h: Heap) : Heap = {
require(hasLeftistProperty(h))
h match {
case Node(_,_,l,r) => merge(l, r)
case l @ Leaf() => l
}
}
def listSize(l : List) : Int = (l match {
case Nil() => 0
case Cons(_, xs) => 1 + listSize(xs)
}) ensuring(_ >= 0)
def removeElements(h : Heap, l : List) : List = {
require(hasLeftistProperty(h))
h match {
case Leaf() => l
case _ => removeElements(removeMax(h),Cons(findMax(h),l))
}} ensuring(res => heapSize(h) + listSize(l) == listSize(res))
def buildHeap(l : List, h: Heap) : Heap = {
require(hasLeftistProperty(h))
l match {
case Nil() => h
case Cons(x,xs) => buildHeap(xs, insert(x, h))
}} ensuring(res => hasLeftistProperty(res) && heapSize(h) + listSize(l) == heapSize(res))
def sort(l: List): List = ({
val heap = buildHeap(l,Leaf())
removeElements(heap, Nil())
}) ensuring(res => listSize(res) == listSize(l))
}
| ericpony/scala-examples | testcases/verification/datastructures/HeapSort.scala | Scala | mit | 3,060 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.connector
import scala.collection.JavaConverters._
import org.apache.spark.SparkException
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.analysis.{CannotReplaceMissingTableException, NamespaceAlreadyExistsException, NoSuchDatabaseException, NoSuchNamespaceException, NoSuchTableException, TableAlreadyExistsException}
import org.apache.spark.sql.catalyst.parser.ParseException
import org.apache.spark.sql.connector.catalog._
import org.apache.spark.sql.connector.catalog.CatalogManager.SESSION_CATALOG_NAME
import org.apache.spark.sql.connector.catalog.CatalogV2Util.withDefaultOwnership
import org.apache.spark.sql.internal.{SQLConf, StaticSQLConf}
import org.apache.spark.sql.internal.SQLConf.V2_SESSION_CATALOG_IMPLEMENTATION
import org.apache.spark.sql.internal.connector.SimpleTableProvider
import org.apache.spark.sql.sources.SimpleScanSource
import org.apache.spark.sql.types.{BooleanType, LongType, StringType, StructField, StructType}
import org.apache.spark.sql.util.CaseInsensitiveStringMap
import org.apache.spark.util.Utils
class DataSourceV2SQLSuite
extends InsertIntoTests(supportsDynamicOverwrite = true, includeSQLOnlyTests = true)
with AlterTableTests {
import org.apache.spark.sql.connector.catalog.CatalogV2Implicits._
private val v2Source = classOf[FakeV2Provider].getName
override protected val v2Format = v2Source
override protected val catalogAndNamespace = "testcat.ns1.ns2."
private val defaultUser: String = Utils.getCurrentUserName()
private def catalog(name: String): CatalogPlugin = {
spark.sessionState.catalogManager.catalog(name)
}
protected def doInsert(tableName: String, insert: DataFrame, mode: SaveMode): Unit = {
val tmpView = "tmp_view"
withTempView(tmpView) {
insert.createOrReplaceTempView(tmpView)
val overwrite = if (mode == SaveMode.Overwrite) "OVERWRITE" else "INTO"
sql(s"INSERT $overwrite TABLE $tableName SELECT * FROM $tmpView")
}
}
override def verifyTable(tableName: String, expected: DataFrame): Unit = {
checkAnswer(spark.table(tableName), expected)
}
override def getTableMetadata(tableName: String): Table = {
val nameParts = spark.sessionState.sqlParser.parseMultipartIdentifier(tableName)
val v2Catalog = catalog(nameParts.head).asTableCatalog
val namespace = nameParts.drop(1).init.toArray
v2Catalog.loadTable(Identifier.of(namespace, nameParts.last))
}
before {
spark.conf.set("spark.sql.catalog.testcat", classOf[InMemoryTableCatalog].getName)
spark.conf.set(
"spark.sql.catalog.testcat_atomic", classOf[StagingInMemoryTableCatalog].getName)
spark.conf.set("spark.sql.catalog.testcat2", classOf[InMemoryTableCatalog].getName)
spark.conf.set(
V2_SESSION_CATALOG_IMPLEMENTATION.key, classOf[InMemoryTableSessionCatalog].getName)
val df = spark.createDataFrame(Seq((1L, "a"), (2L, "b"), (3L, "c"))).toDF("id", "data")
df.createOrReplaceTempView("source")
val df2 = spark.createDataFrame(Seq((4L, "d"), (5L, "e"), (6L, "f"))).toDF("id", "data")
df2.createOrReplaceTempView("source2")
}
after {
spark.sessionState.catalog.reset()
spark.sessionState.catalogManager.reset()
spark.sessionState.conf.clear()
}
test("CreateTable: use v2 plan because catalog is set") {
spark.sql("CREATE TABLE testcat.table_name (id bigint NOT NULL, data string) USING foo")
val testCatalog = catalog("testcat").asTableCatalog
val table = testCatalog.loadTable(Identifier.of(Array(), "table_name"))
assert(table.name == "testcat.table_name")
assert(table.partitioning.isEmpty)
assert(table.properties == withDefaultOwnership(Map("provider" -> "foo")).asJava)
assert(table.schema == new StructType()
.add("id", LongType, nullable = false)
.add("data", StringType))
val rdd = spark.sparkContext.parallelize(table.asInstanceOf[InMemoryTable].rows)
checkAnswer(spark.internalCreateDataFrame(rdd, table.schema), Seq.empty)
}
test("DescribeTable using v2 catalog") {
spark.sql("CREATE TABLE testcat.table_name (id bigint, data string)" +
" USING foo" +
" PARTITIONED BY (id)")
val descriptionDf = spark.sql("DESCRIBE TABLE testcat.table_name")
assert(descriptionDf.schema.map(field => (field.name, field.dataType)) ===
Seq(
("col_name", StringType),
("data_type", StringType),
("comment", StringType)))
val description = descriptionDf.collect()
assert(description === Seq(
Row("id", "bigint", ""),
Row("data", "string", ""),
Row("", "", ""),
Row("# Partitioning", "", ""),
Row("Part 0", "id", "")))
val e = intercept[AnalysisException] {
sql("DESCRIBE TABLE testcat.table_name PARTITION (id = 1)")
}
assert(e.message.contains("DESCRIBE does not support partition for v2 tables"))
}
test("DescribeTable with v2 catalog when table does not exist.") {
intercept[AnalysisException] {
spark.sql("DESCRIBE TABLE testcat.table_name")
}
}
test("DescribeTable extended using v2 catalog") {
spark.sql("CREATE TABLE testcat.table_name (id bigint, data string)" +
" USING foo" +
" PARTITIONED BY (id)" +
" TBLPROPERTIES ('bar'='baz')" +
" COMMENT 'this is a test table'" +
" LOCATION '/tmp/testcat/table_name'")
val descriptionDf = spark.sql("DESCRIBE TABLE EXTENDED testcat.table_name")
assert(descriptionDf.schema.map(field => (field.name, field.dataType))
=== Seq(
("col_name", StringType),
("data_type", StringType),
("comment", StringType)))
assert(descriptionDf.collect()
.map(_.toSeq)
.map(_.toArray.map(_.toString.trim)) === Array(
Array("id", "bigint", ""),
Array("data", "string", ""),
Array("", "", ""),
Array("# Partitioning", "", ""),
Array("Part 0", "id", ""),
Array("", "", ""),
Array("# Detailed Table Information", "", ""),
Array("Name", "testcat.table_name", ""),
Array("Comment", "this is a test table", ""),
Array("Location", "/tmp/testcat/table_name", ""),
Array("Provider", "foo", ""),
Array(TableCatalog.PROP_OWNER.capitalize, defaultUser, ""),
Array("Table Properties", "[bar=baz]", "")))
}
test("CreateTable: use v2 plan and session catalog when provider is v2") {
spark.sql(s"CREATE TABLE table_name (id bigint, data string) USING $v2Source")
val testCatalog = catalog(SESSION_CATALOG_NAME).asTableCatalog
val table = testCatalog.loadTable(Identifier.of(Array("default"), "table_name"))
assert(table.name == "default.table_name")
assert(table.partitioning.isEmpty)
assert(table.properties == withDefaultOwnership(Map("provider" -> v2Source)).asJava)
assert(table.schema == new StructType().add("id", LongType).add("data", StringType))
val rdd = spark.sparkContext.parallelize(table.asInstanceOf[InMemoryTable].rows)
checkAnswer(spark.internalCreateDataFrame(rdd, table.schema), Seq.empty)
}
test("CreateTable: fail if table exists") {
spark.sql("CREATE TABLE testcat.table_name (id bigint, data string) USING foo")
val testCatalog = catalog("testcat").asTableCatalog
val table = testCatalog.loadTable(Identifier.of(Array(), "table_name"))
assert(table.name == "testcat.table_name")
assert(table.partitioning.isEmpty)
assert(table.properties == withDefaultOwnership(Map("provider" -> "foo")).asJava)
assert(table.schema == new StructType().add("id", LongType).add("data", StringType))
// run a second create query that should fail
val exc = intercept[TableAlreadyExistsException] {
spark.sql("CREATE TABLE testcat.table_name (id bigint, data string, id2 bigint) USING bar")
}
assert(exc.getMessage.contains("table_name"))
// table should not have changed
val table2 = testCatalog.loadTable(Identifier.of(Array(), "table_name"))
assert(table2.name == "testcat.table_name")
assert(table2.partitioning.isEmpty)
assert(table2.properties == withDefaultOwnership(Map("provider" -> "foo")).asJava)
assert(table2.schema == new StructType().add("id", LongType).add("data", StringType))
// check that the table is still empty
val rdd = spark.sparkContext.parallelize(table.asInstanceOf[InMemoryTable].rows)
checkAnswer(spark.internalCreateDataFrame(rdd, table.schema), Seq.empty)
}
test("CreateTable: if not exists") {
spark.sql(
"CREATE TABLE IF NOT EXISTS testcat.table_name (id bigint, data string) USING foo")
val testCatalog = catalog("testcat").asTableCatalog
val table = testCatalog.loadTable(Identifier.of(Array(), "table_name"))
assert(table.name == "testcat.table_name")
assert(table.partitioning.isEmpty)
assert(table.properties == withDefaultOwnership(Map("provider" -> "foo")).asJava)
assert(table.schema == new StructType().add("id", LongType).add("data", StringType))
spark.sql("CREATE TABLE IF NOT EXISTS testcat.table_name (id bigint, data string) USING bar")
// table should not have changed
val table2 = testCatalog.loadTable(Identifier.of(Array(), "table_name"))
assert(table2.name == "testcat.table_name")
assert(table2.partitioning.isEmpty)
assert(table2.properties == withDefaultOwnership(Map("provider" -> "foo")).asJava)
assert(table2.schema == new StructType().add("id", LongType).add("data", StringType))
// check that the table is still empty
val rdd2 = spark.sparkContext.parallelize(table.asInstanceOf[InMemoryTable].rows)
checkAnswer(spark.internalCreateDataFrame(rdd2, table.schema), Seq.empty)
}
test("CreateTable: use default catalog for v2 sources when default catalog is set") {
spark.conf.set(SQLConf.DEFAULT_CATALOG.key, "testcat")
spark.sql(s"CREATE TABLE table_name (id bigint, data string) USING foo")
val testCatalog = catalog("testcat").asTableCatalog
val table = testCatalog.loadTable(Identifier.of(Array(), "table_name"))
assert(table.name == "testcat.table_name")
assert(table.partitioning.isEmpty)
assert(table.properties == withDefaultOwnership(Map("provider" -> "foo")).asJava)
assert(table.schema == new StructType().add("id", LongType).add("data", StringType))
// check that the table is empty
val rdd = spark.sparkContext.parallelize(table.asInstanceOf[InMemoryTable].rows)
checkAnswer(spark.internalCreateDataFrame(rdd, table.schema), Seq.empty)
}
test("CreateTable: without USING clause") {
spark.conf.set(SQLConf.LEGACY_CREATE_HIVE_TABLE_BY_DEFAULT_ENABLED.key, "false")
// unset this config to use the default v2 session catalog.
spark.conf.unset(V2_SESSION_CATALOG_IMPLEMENTATION.key)
val testCatalog = catalog("testcat").asTableCatalog
sql("CREATE TABLE testcat.t1 (id int)")
val t1 = testCatalog.loadTable(Identifier.of(Array(), "t1"))
// Spark shouldn't set the default provider for catalog plugins.
assert(!t1.properties.containsKey(TableCatalog.PROP_PROVIDER))
sql("CREATE TABLE t2 (id int)")
val t2 = spark.sessionState.catalogManager.v2SessionCatalog.asTableCatalog
.loadTable(Identifier.of(Array("default"), "t2")).asInstanceOf[V1Table]
// Spark should set the default provider as DEFAULT_DATA_SOURCE_NAME for the session catalog.
assert(t2.v1Table.provider == Some(conf.defaultDataSourceName))
}
test("CreateTable/RepalceTable: invalid schema if has interval type") {
Seq("CREATE", "REPLACE").foreach { action =>
val e1 = intercept[AnalysisException](
sql(s"$action TABLE table_name (id int, value interval) USING $v2Format"))
assert(e1.getMessage.contains(s"Cannot use interval type in the table schema."))
val e2 = intercept[AnalysisException](
sql(s"$action TABLE table_name (id array<interval>) USING $v2Format"))
assert(e2.getMessage.contains(s"Cannot use interval type in the table schema."))
}
}
test("CTAS/RTAS: invalid schema if has interval type") {
Seq("CREATE", "REPLACE").foreach { action =>
val e1 = intercept[AnalysisException](
sql(s"$action TABLE table_name USING $v2Format as select interval 1 day"))
assert(e1.getMessage.contains(s"Cannot use interval type in the table schema."))
val e2 = intercept[AnalysisException](
sql(s"$action TABLE table_name USING $v2Format as select array(interval 1 day)"))
assert(e2.getMessage.contains(s"Cannot use interval type in the table schema."))
}
}
test("CreateTableAsSelect: use v2 plan because catalog is set") {
val basicCatalog = catalog("testcat").asTableCatalog
val atomicCatalog = catalog("testcat_atomic").asTableCatalog
val basicIdentifier = "testcat.table_name"
val atomicIdentifier = "testcat_atomic.table_name"
Seq((basicCatalog, basicIdentifier), (atomicCatalog, atomicIdentifier)).foreach {
case (catalog, identifier) =>
spark.sql(s"CREATE TABLE $identifier USING foo AS SELECT id, data FROM source")
val table = catalog.loadTable(Identifier.of(Array(), "table_name"))
assert(table.name == identifier)
assert(table.partitioning.isEmpty)
assert(table.properties == withDefaultOwnership(Map("provider" -> "foo")).asJava)
assert(table.schema == new StructType()
.add("id", LongType)
.add("data", StringType))
val rdd = spark.sparkContext.parallelize(table.asInstanceOf[InMemoryTable].rows)
checkAnswer(spark.internalCreateDataFrame(rdd, table.schema), spark.table("source"))
}
}
test("CreateTableAsSelect: do not double execute on collect(), take() and other queries") {
val basicCatalog = catalog("testcat").asTableCatalog
val atomicCatalog = catalog("testcat_atomic").asTableCatalog
val basicIdentifier = "testcat.table_name"
val atomicIdentifier = "testcat_atomic.table_name"
Seq((basicCatalog, basicIdentifier), (atomicCatalog, atomicIdentifier)).foreach {
case (catalog, identifier) =>
val df = spark.sql(s"CREATE TABLE $identifier USING foo AS SELECT id, data FROM source")
df.collect()
df.take(5)
df.tail(5)
df.where("true").collect()
df.where("true").take(5)
df.where("true").tail(5)
val table = catalog.loadTable(Identifier.of(Array(), "table_name"))
assert(table.name == identifier)
assert(table.partitioning.isEmpty)
assert(table.properties == withDefaultOwnership(Map("provider" -> "foo")).asJava)
assert(table.schema == new StructType()
.add("id", LongType)
.add("data", StringType))
val rdd = spark.sparkContext.parallelize(table.asInstanceOf[InMemoryTable].rows)
checkAnswer(spark.internalCreateDataFrame(rdd, table.schema), spark.table("source"))
}
}
test("ReplaceTableAsSelect: basic v2 implementation.") {
val basicCatalog = catalog("testcat").asTableCatalog
val atomicCatalog = catalog("testcat_atomic").asTableCatalog
val basicIdentifier = "testcat.table_name"
val atomicIdentifier = "testcat_atomic.table_name"
Seq((basicCatalog, basicIdentifier), (atomicCatalog, atomicIdentifier)).foreach {
case (catalog, identifier) =>
spark.sql(s"CREATE TABLE $identifier USING foo AS SELECT id, data FROM source")
val originalTable = catalog.loadTable(Identifier.of(Array(), "table_name"))
spark.sql(s"REPLACE TABLE $identifier USING foo AS SELECT id FROM source")
val replacedTable = catalog.loadTable(Identifier.of(Array(), "table_name"))
assert(replacedTable != originalTable, "Table should have been replaced.")
assert(replacedTable.name == identifier)
assert(replacedTable.partitioning.isEmpty)
assert(replacedTable.properties == withDefaultOwnership(Map("provider" -> "foo")).asJava)
assert(replacedTable.schema == new StructType().add("id", LongType))
val rdd = spark.sparkContext.parallelize(replacedTable.asInstanceOf[InMemoryTable].rows)
checkAnswer(
spark.internalCreateDataFrame(rdd, replacedTable.schema),
spark.table("source").select("id"))
}
}
Seq("REPLACE", "CREATE OR REPLACE").foreach { cmd =>
test(s"ReplaceTableAsSelect: do not double execute $cmd on collect()") {
val basicCatalog = catalog("testcat").asTableCatalog
val atomicCatalog = catalog("testcat_atomic").asTableCatalog
val basicIdentifier = "testcat.table_name"
val atomicIdentifier = "testcat_atomic.table_name"
Seq((basicCatalog, basicIdentifier), (atomicCatalog, atomicIdentifier)).foreach {
case (catalog, identifier) =>
spark.sql(s"CREATE TABLE $identifier USING foo AS SELECT id, data FROM source")
val originalTable = catalog.loadTable(Identifier.of(Array(), "table_name"))
val df = spark.sql(s"$cmd TABLE $identifier USING foo AS SELECT id FROM source")
df.collect()
df.take(5)
df.tail(5)
df.where("true").collect()
df.where("true").take(5)
df.where("true").tail(5)
val replacedTable = catalog.loadTable(Identifier.of(Array(), "table_name"))
assert(replacedTable != originalTable, "Table should have been replaced.")
assert(replacedTable.name == identifier)
assert(replacedTable.partitioning.isEmpty)
assert(replacedTable.properties == withDefaultOwnership(Map("provider" -> "foo")).asJava)
assert(replacedTable.schema == new StructType().add("id", LongType))
val rdd = spark.sparkContext.parallelize(replacedTable.asInstanceOf[InMemoryTable].rows)
checkAnswer(
spark.internalCreateDataFrame(rdd, replacedTable.schema),
spark.table("source").select("id"))
}
}
}
test("ReplaceTableAsSelect: Non-atomic catalog drops the table if the write fails.") {
spark.sql("CREATE TABLE testcat.table_name USING foo AS SELECT id, data FROM source")
val testCatalog = catalog("testcat").asTableCatalog
val table = testCatalog.loadTable(Identifier.of(Array(), "table_name"))
assert(table.asInstanceOf[InMemoryTable].rows.nonEmpty)
intercept[Exception] {
spark.sql("REPLACE TABLE testcat.table_name" +
s" USING foo OPTIONS (`${InMemoryTable.SIMULATE_FAILED_WRITE_OPTION}`=true)" +
s" AS SELECT id FROM source")
}
assert(!testCatalog.tableExists(Identifier.of(Array(), "table_name")),
"Table should have been dropped as a result of the replace.")
}
test("ReplaceTableAsSelect: Non-atomic catalog drops the table permanently if the" +
" subsequent table creation fails.") {
spark.sql("CREATE TABLE testcat.table_name USING foo AS SELECT id, data FROM source")
val testCatalog = catalog("testcat").asTableCatalog
val table = testCatalog.loadTable(Identifier.of(Array(), "table_name"))
assert(table.asInstanceOf[InMemoryTable].rows.nonEmpty)
intercept[Exception] {
spark.sql("REPLACE TABLE testcat.table_name" +
s" USING foo" +
s" TBLPROPERTIES (`${InMemoryTableCatalog.SIMULATE_FAILED_CREATE_PROPERTY}`=true)" +
s" AS SELECT id FROM source")
}
assert(!testCatalog.tableExists(Identifier.of(Array(), "table_name")),
"Table should have been dropped and failed to be created.")
}
test("ReplaceTableAsSelect: Atomic catalog does not drop the table when replace fails.") {
spark.sql("CREATE TABLE testcat_atomic.table_name USING foo AS SELECT id, data FROM source")
val testCatalog = catalog("testcat_atomic").asTableCatalog
val table = testCatalog.loadTable(Identifier.of(Array(), "table_name"))
intercept[Exception] {
spark.sql("REPLACE TABLE testcat_atomic.table_name" +
s" USING foo OPTIONS (`${InMemoryTable.SIMULATE_FAILED_WRITE_OPTION}=true)" +
s" AS SELECT id FROM source")
}
var maybeReplacedTable = testCatalog.loadTable(Identifier.of(Array(), "table_name"))
assert(maybeReplacedTable === table, "Table should not have changed.")
intercept[Exception] {
spark.sql("REPLACE TABLE testcat_atomic.table_name" +
s" USING foo" +
s" TBLPROPERTIES (`${InMemoryTableCatalog.SIMULATE_FAILED_CREATE_PROPERTY}`=true)" +
s" AS SELECT id FROM source")
}
maybeReplacedTable = testCatalog.loadTable(Identifier.of(Array(), "table_name"))
assert(maybeReplacedTable === table, "Table should not have changed.")
}
test("ReplaceTable: Erases the table contents and changes the metadata.") {
spark.sql(s"CREATE TABLE testcat.table_name USING $v2Source AS SELECT id, data FROM source")
val testCatalog = catalog("testcat").asTableCatalog
val table = testCatalog.loadTable(Identifier.of(Array(), "table_name"))
assert(table.asInstanceOf[InMemoryTable].rows.nonEmpty)
spark.sql("REPLACE TABLE testcat.table_name (id bigint NOT NULL) USING foo")
val replaced = testCatalog.loadTable(Identifier.of(Array(), "table_name"))
assert(replaced.asInstanceOf[InMemoryTable].rows.isEmpty,
"Replaced table should have no rows after committing.")
assert(replaced.schema().fields.length === 1,
"Replaced table should have new schema.")
assert(replaced.schema().fields(0) === StructField("id", LongType, nullable = false),
"Replaced table should have new schema.")
}
test("ReplaceTableAsSelect: CREATE OR REPLACE new table has same behavior as CTAS.") {
Seq("testcat", "testcat_atomic").foreach { catalogName =>
spark.sql(
s"""
|CREATE TABLE $catalogName.created USING $v2Source
|AS SELECT id, data FROM source
""".stripMargin)
spark.sql(
s"""
|CREATE OR REPLACE TABLE $catalogName.replaced USING $v2Source
|AS SELECT id, data FROM source
""".stripMargin)
val testCatalog = catalog(catalogName).asTableCatalog
val createdTable = testCatalog.loadTable(Identifier.of(Array(), "created"))
val replacedTable = testCatalog.loadTable(Identifier.of(Array(), "replaced"))
assert(createdTable.asInstanceOf[InMemoryTable].rows ===
replacedTable.asInstanceOf[InMemoryTable].rows)
assert(createdTable.schema === replacedTable.schema)
}
}
test("ReplaceTableAsSelect: REPLACE TABLE throws exception if table does not exist.") {
Seq("testcat", "testcat_atomic").foreach { catalog =>
spark.sql(s"CREATE TABLE $catalog.created USING $v2Source AS SELECT id, data FROM source")
intercept[CannotReplaceMissingTableException] {
spark.sql(s"REPLACE TABLE $catalog.replaced USING $v2Source AS SELECT id, data FROM source")
}
}
}
test("ReplaceTableAsSelect: REPLACE TABLE throws exception if table is dropped before commit.") {
import InMemoryTableCatalog._
spark.sql(s"CREATE TABLE testcat_atomic.created USING $v2Source AS SELECT id, data FROM source")
intercept[CannotReplaceMissingTableException] {
spark.sql(s"REPLACE TABLE testcat_atomic.replaced" +
s" USING $v2Source" +
s" TBLPROPERTIES (`$SIMULATE_DROP_BEFORE_REPLACE_PROPERTY`=true)" +
s" AS SELECT id, data FROM source")
}
}
test("CreateTableAsSelect: use v2 plan and session catalog when provider is v2") {
spark.sql(s"CREATE TABLE table_name USING $v2Source AS SELECT id, data FROM source")
val testCatalog = catalog(SESSION_CATALOG_NAME).asTableCatalog
val table = testCatalog.loadTable(Identifier.of(Array("default"), "table_name"))
assert(table.name == "default.table_name")
assert(table.partitioning.isEmpty)
assert(table.properties == withDefaultOwnership(Map("provider" -> v2Source)).asJava)
assert(table.schema == new StructType()
.add("id", LongType)
.add("data", StringType))
val rdd = spark.sparkContext.parallelize(table.asInstanceOf[InMemoryTable].rows)
checkAnswer(spark.internalCreateDataFrame(rdd, table.schema), spark.table("source"))
}
test("CreateTableAsSelect: fail if table exists") {
spark.sql("CREATE TABLE testcat.table_name USING foo AS SELECT id, data FROM source")
val testCatalog = catalog("testcat").asTableCatalog
val table = testCatalog.loadTable(Identifier.of(Array(), "table_name"))
assert(table.name == "testcat.table_name")
assert(table.partitioning.isEmpty)
assert(table.properties == withDefaultOwnership(Map("provider" -> "foo")).asJava)
assert(table.schema == new StructType()
.add("id", LongType)
.add("data", StringType))
val rdd = spark.sparkContext.parallelize(table.asInstanceOf[InMemoryTable].rows)
checkAnswer(spark.internalCreateDataFrame(rdd, table.schema), spark.table("source"))
// run a second CTAS query that should fail
val exc = intercept[TableAlreadyExistsException] {
spark.sql(
"CREATE TABLE testcat.table_name USING bar AS SELECT id, data, id as id2 FROM source2")
}
assert(exc.getMessage.contains("table_name"))
// table should not have changed
val table2 = testCatalog.loadTable(Identifier.of(Array(), "table_name"))
assert(table2.name == "testcat.table_name")
assert(table2.partitioning.isEmpty)
assert(table2.properties == withDefaultOwnership(Map("provider" -> "foo")).asJava)
assert(table2.schema == new StructType()
.add("id", LongType)
.add("data", StringType))
val rdd2 = spark.sparkContext.parallelize(table.asInstanceOf[InMemoryTable].rows)
checkAnswer(spark.internalCreateDataFrame(rdd2, table.schema), spark.table("source"))
}
test("CreateTableAsSelect: if not exists") {
spark.sql(
"CREATE TABLE IF NOT EXISTS testcat.table_name USING foo AS SELECT id, data FROM source")
val testCatalog = catalog("testcat").asTableCatalog
val table = testCatalog.loadTable(Identifier.of(Array(), "table_name"))
assert(table.name == "testcat.table_name")
assert(table.partitioning.isEmpty)
assert(table.properties == withDefaultOwnership(Map("provider" -> "foo")).asJava)
assert(table.schema == new StructType()
.add("id", LongType)
.add("data", StringType))
val rdd = spark.sparkContext.parallelize(table.asInstanceOf[InMemoryTable].rows)
checkAnswer(spark.internalCreateDataFrame(rdd, table.schema), spark.table("source"))
spark.sql(
"CREATE TABLE IF NOT EXISTS testcat.table_name USING foo AS SELECT id, data FROM source2")
// check that the table contains data from just the first CTAS
val rdd2 = spark.sparkContext.parallelize(table.asInstanceOf[InMemoryTable].rows)
checkAnswer(spark.internalCreateDataFrame(rdd2, table.schema), spark.table("source"))
}
test("CreateTableAsSelect: use default catalog for v2 sources when default catalog is set") {
spark.conf.set(SQLConf.DEFAULT_CATALOG.key, "testcat")
val df = spark.createDataFrame(Seq((1L, "a"), (2L, "b"), (3L, "c"))).toDF("id", "data")
df.createOrReplaceTempView("source")
// setting the default catalog breaks the reference to source because the default catalog is
// used and AsTableIdentifier no longer matches
spark.sql(s"CREATE TABLE table_name USING foo AS SELECT id, data FROM source")
val testCatalog = catalog("testcat").asTableCatalog
val table = testCatalog.loadTable(Identifier.of(Array(), "table_name"))
assert(table.name == "testcat.table_name")
assert(table.partitioning.isEmpty)
assert(table.properties == withDefaultOwnership(Map("provider" -> "foo")).asJava)
assert(table.schema == new StructType()
.add("id", LongType)
.add("data", StringType))
val rdd = sparkContext.parallelize(table.asInstanceOf[InMemoryTable].rows)
checkAnswer(spark.internalCreateDataFrame(rdd, table.schema), spark.table("source"))
}
test("CreateTableAsSelect: v2 session catalog can load v1 source table") {
// unset this config to use the default v2 session catalog.
spark.conf.unset(V2_SESSION_CATALOG_IMPLEMENTATION.key)
val df = spark.createDataFrame(Seq((1L, "a"), (2L, "b"), (3L, "c"))).toDF("id", "data")
df.createOrReplaceTempView("source")
sql(s"CREATE TABLE table_name USING parquet AS SELECT id, data FROM source")
checkAnswer(sql(s"TABLE default.table_name"), spark.table("source"))
// The fact that the following line doesn't throw an exception means, the session catalog
// can load the table.
val t = catalog(SESSION_CATALOG_NAME).asTableCatalog
.loadTable(Identifier.of(Array("default"), "table_name"))
assert(t.isInstanceOf[V1Table], "V1 table wasn't returned as an unresolved table")
}
test("CreateTableAsSelect: nullable schema") {
val basicCatalog = catalog("testcat").asTableCatalog
val atomicCatalog = catalog("testcat_atomic").asTableCatalog
val basicIdentifier = "testcat.table_name"
val atomicIdentifier = "testcat_atomic.table_name"
Seq((basicCatalog, basicIdentifier), (atomicCatalog, atomicIdentifier)).foreach {
case (catalog, identifier) =>
spark.sql(s"CREATE TABLE $identifier USING foo AS SELECT 1 i")
val table = catalog.loadTable(Identifier.of(Array(), "table_name"))
assert(table.name == identifier)
assert(table.partitioning.isEmpty)
assert(table.properties == withDefaultOwnership(Map("provider" -> "foo")).asJava)
assert(table.schema == new StructType().add("i", "int"))
val rdd = spark.sparkContext.parallelize(table.asInstanceOf[InMemoryTable].rows)
checkAnswer(spark.internalCreateDataFrame(rdd, table.schema), Row(1))
sql(s"INSERT INTO $identifier SELECT CAST(null AS INT)")
val rdd2 = spark.sparkContext.parallelize(table.asInstanceOf[InMemoryTable].rows)
checkAnswer(spark.internalCreateDataFrame(rdd2, table.schema), Seq(Row(1), Row(null)))
}
}
test("CreateTableAsSelect: without USING clause") {
spark.conf.set(SQLConf.LEGACY_CREATE_HIVE_TABLE_BY_DEFAULT_ENABLED.key, "false")
// unset this config to use the default v2 session catalog.
spark.conf.unset(V2_SESSION_CATALOG_IMPLEMENTATION.key)
val testCatalog = catalog("testcat").asTableCatalog
sql("CREATE TABLE testcat.t1 AS SELECT 1 i")
val t1 = testCatalog.loadTable(Identifier.of(Array(), "t1"))
// Spark shouldn't set the default provider for catalog plugins.
assert(!t1.properties.containsKey(TableCatalog.PROP_PROVIDER))
sql("CREATE TABLE t2 AS SELECT 1 i")
val t2 = spark.sessionState.catalogManager.v2SessionCatalog.asTableCatalog
.loadTable(Identifier.of(Array("default"), "t2")).asInstanceOf[V1Table]
// Spark should set the default provider as DEFAULT_DATA_SOURCE_NAME for the session catalog.
assert(t2.v1Table.provider == Some(conf.defaultDataSourceName))
}
test("DropTable: basic") {
val tableName = "testcat.ns1.ns2.tbl"
val ident = Identifier.of(Array("ns1", "ns2"), "tbl")
sql(s"CREATE TABLE $tableName USING foo AS SELECT id, data FROM source")
assert(catalog("testcat").asTableCatalog.tableExists(ident) === true)
sql(s"DROP TABLE $tableName")
assert(catalog("testcat").asTableCatalog.tableExists(ident) === false)
}
test("DropTable: table qualified with the session catalog name") {
val ident = Identifier.of(Array("default"), "tbl")
sql("CREATE TABLE tbl USING json AS SELECT 1 AS i")
assert(catalog("spark_catalog").asTableCatalog.tableExists(ident) === true)
sql("DROP TABLE spark_catalog.default.tbl")
assert(catalog("spark_catalog").asTableCatalog.tableExists(ident) === false)
}
test("DropTable: if exists") {
intercept[NoSuchTableException] {
sql(s"DROP TABLE testcat.db.notbl")
}
sql(s"DROP TABLE IF EXISTS testcat.db.notbl")
}
test("Relation: basic") {
val t1 = "testcat.ns1.ns2.tbl"
withTable(t1) {
sql(s"CREATE TABLE $t1 USING foo AS SELECT id, data FROM source")
checkAnswer(sql(s"TABLE $t1"), spark.table("source"))
checkAnswer(sql(s"SELECT * FROM $t1"), spark.table("source"))
}
}
test("Relation: SparkSession.table()") {
val t1 = "testcat.ns1.ns2.tbl"
withTable(t1) {
sql(s"CREATE TABLE $t1 USING foo AS SELECT id, data FROM source")
checkAnswer(spark.table(s"$t1"), spark.table("source"))
}
}
test("Relation: CTE") {
val t1 = "testcat.ns1.ns2.tbl"
withTable(t1) {
sql(s"CREATE TABLE $t1 USING foo AS SELECT id, data FROM source")
checkAnswer(
sql(s"""
|WITH cte AS (SELECT * FROM $t1)
|SELECT * FROM cte
""".stripMargin),
spark.table("source"))
}
}
test("Relation: view text") {
val t1 = "testcat.ns1.ns2.tbl"
withTable(t1) {
withView("view1") { v1: String =>
sql(s"CREATE TABLE $t1 USING foo AS SELECT id, data FROM source")
sql(s"CREATE VIEW $v1 AS SELECT * from $t1")
checkAnswer(sql(s"TABLE $v1"), spark.table("source"))
}
}
}
test("Relation: join tables in 2 catalogs") {
val t1 = "testcat.ns1.ns2.tbl"
val t2 = "testcat2.v2tbl"
withTable(t1, t2) {
sql(s"CREATE TABLE $t1 USING foo AS SELECT id, data FROM source")
sql(s"CREATE TABLE $t2 USING foo AS SELECT id, data FROM source2")
val df1 = spark.table("source")
val df2 = spark.table("source2")
val df_joined = df1.join(df2).where(df1("id") + 1 === df2("id"))
checkAnswer(
sql(s"""
|SELECT *
|FROM $t1 t1, $t2 t2
|WHERE t1.id + 1 = t2.id
""".stripMargin),
df_joined)
}
}
test("qualified column names for v2 tables") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
sql(s"CREATE TABLE $t (id bigint, point struct<x: bigint, y: bigint>) USING foo")
sql(s"INSERT INTO $t VALUES (1, (10, 20))")
def check(tbl: String): Unit = {
checkAnswer(
sql(s"SELECT testcat.ns1.ns2.tbl.id, testcat.ns1.ns2.tbl.point.x FROM $tbl"),
Row(1, 10))
checkAnswer(sql(s"SELECT ns1.ns2.tbl.id, ns1.ns2.tbl.point.x FROM $tbl"), Row(1, 10))
checkAnswer(sql(s"SELECT ns2.tbl.id, ns2.tbl.point.x FROM $tbl"), Row(1, 10))
checkAnswer(sql(s"SELECT tbl.id, tbl.point.x FROM $tbl"), Row(1, 10))
}
// Test with qualified table name "testcat.ns1.ns2.tbl".
check(t)
// Test if current catalog and namespace is respected in column resolution.
sql("USE testcat.ns1.ns2")
check("tbl")
val ex = intercept[AnalysisException] {
sql(s"SELECT ns1.ns2.ns3.tbl.id from $t")
}
assert(ex.getMessage.contains("cannot resolve '`ns1.ns2.ns3.tbl.id`"))
}
}
test("qualified column names for v1 tables") {
Seq(true, false).foreach { useV1Table =>
val format = if (useV1Table) "json" else v2Format
if (useV1Table) {
// unset this config to use the default v2 session catalog.
spark.conf.unset(V2_SESSION_CATALOG_IMPLEMENTATION.key)
} else {
spark.conf.set(
V2_SESSION_CATALOG_IMPLEMENTATION.key, classOf[InMemoryTableSessionCatalog].getName)
}
withTable("t") {
sql(s"CREATE TABLE t USING $format AS SELECT 1 AS i")
checkAnswer(sql("select i from t"), Row(1))
checkAnswer(sql("select t.i from t"), Row(1))
checkAnswer(sql("select default.t.i from t"), Row(1))
checkAnswer(sql("select spark_catalog.default.t.i from t"), Row(1))
checkAnswer(sql("select t.i from spark_catalog.default.t"), Row(1))
checkAnswer(sql("select default.t.i from spark_catalog.default.t"), Row(1))
checkAnswer(sql("select spark_catalog.default.t.i from spark_catalog.default.t"), Row(1))
}
}
}
test("InsertInto: append - across catalog") {
val t1 = "testcat.ns1.ns2.tbl"
val t2 = "testcat2.db.tbl"
withTable(t1, t2) {
sql(s"CREATE TABLE $t1 USING foo AS SELECT * FROM source")
sql(s"CREATE TABLE $t2 (id bigint, data string) USING foo")
sql(s"INSERT INTO $t2 SELECT * FROM $t1")
checkAnswer(spark.table(t2), spark.table("source"))
}
}
test("ShowTables: using v2 catalog") {
spark.sql("CREATE TABLE testcat.db.table_name (id bigint, data string) USING foo")
spark.sql("CREATE TABLE testcat.n1.n2.db.table_name (id bigint, data string) USING foo")
runShowTablesSql("SHOW TABLES FROM testcat.db", Seq(Row("db", "table_name")))
runShowTablesSql(
"SHOW TABLES FROM testcat.n1.n2.db",
Seq(Row("n1.n2.db", "table_name")))
}
test("ShowTables: using v2 catalog with a pattern") {
spark.sql("CREATE TABLE testcat.db.table (id bigint, data string) USING foo")
spark.sql("CREATE TABLE testcat.db.table_name_1 (id bigint, data string) USING foo")
spark.sql("CREATE TABLE testcat.db.table_name_2 (id bigint, data string) USING foo")
spark.sql("CREATE TABLE testcat.db2.table_name_2 (id bigint, data string) USING foo")
runShowTablesSql(
"SHOW TABLES FROM testcat.db",
Seq(
Row("db", "table"),
Row("db", "table_name_1"),
Row("db", "table_name_2")))
runShowTablesSql(
"SHOW TABLES FROM testcat.db LIKE '*name*'",
Seq(Row("db", "table_name_1"), Row("db", "table_name_2")))
runShowTablesSql(
"SHOW TABLES FROM testcat.db LIKE '*2'",
Seq(Row("db", "table_name_2")))
}
test("ShowTables: using v2 catalog, namespace doesn't exist") {
runShowTablesSql("SHOW TABLES FROM testcat.unknown", Seq())
}
test("ShowTables: using v1 catalog") {
runShowTablesSql(
"SHOW TABLES FROM default",
Seq(Row("", "source", true), Row("", "source2", true)),
expectV2Catalog = false)
}
test("ShowTables: using v1 catalog, db doesn't exist ") {
// 'db' below resolves to a database name for v1 catalog because there is no catalog named
// 'db' and there is no default catalog set.
val exception = intercept[NoSuchDatabaseException] {
runShowTablesSql("SHOW TABLES FROM db", Seq(), expectV2Catalog = false)
}
assert(exception.getMessage.contains("Database 'db' not found"))
}
test("ShowTables: using v1 catalog, db name with multipartIdentifier ('a.b') is not allowed.") {
val exception = intercept[AnalysisException] {
runShowTablesSql("SHOW TABLES FROM a.b", Seq(), expectV2Catalog = false)
}
assert(exception.getMessage.contains("The database name is not valid: a.b"))
}
test("ShowViews: using v1 catalog, db name with multipartIdentifier ('a.b') is not allowed.") {
val exception = intercept[AnalysisException] {
sql("SHOW TABLES FROM a.b")
}
assert(exception.getMessage.contains("The database name is not valid: a.b"))
}
test("ShowViews: using v2 catalog, command not supported.") {
val exception = intercept[AnalysisException] {
sql("SHOW VIEWS FROM testcat")
}
assert(exception.getMessage.contains("Catalog testcat doesn't support SHOW VIEWS," +
" only SessionCatalog supports this command."))
}
test("ShowTables: using v2 catalog with empty namespace") {
spark.sql("CREATE TABLE testcat.table (id bigint, data string) USING foo")
runShowTablesSql("SHOW TABLES FROM testcat", Seq(Row("", "table")))
}
test("ShowTables: namespace is not specified and default v2 catalog is set") {
spark.conf.set(SQLConf.DEFAULT_CATALOG.key, "testcat")
spark.sql("CREATE TABLE testcat.table (id bigint, data string) USING foo")
// v2 catalog is used where default namespace is empty for TestInMemoryTableCatalog.
runShowTablesSql("SHOW TABLES", Seq(Row("", "table")))
}
test("ShowTables: namespace not specified and default v2 catalog not set - fallback to v1") {
runShowTablesSql(
"SHOW TABLES",
Seq(Row("", "source", true), Row("", "source2", true)),
expectV2Catalog = false)
runShowTablesSql(
"SHOW TABLES LIKE '*2'",
Seq(Row("", "source2", true)),
expectV2Catalog = false)
}
test("ShowTables: change current catalog and namespace with USE statements") {
sql("CREATE TABLE testcat.ns1.ns2.table (id bigint) USING foo")
// Initially, the v2 session catalog (current catalog) is used.
runShowTablesSql(
"SHOW TABLES", Seq(Row("", "source", true), Row("", "source2", true)),
expectV2Catalog = false)
// Update the current catalog, and no table is matched since the current namespace is Array().
sql("USE testcat")
runShowTablesSql("SHOW TABLES", Seq())
// Update the current namespace to match ns1.ns2.table.
sql("USE testcat.ns1.ns2")
runShowTablesSql("SHOW TABLES", Seq(Row("ns1.ns2", "table")))
}
private def runShowTablesSql(
sqlText: String,
expected: Seq[Row],
expectV2Catalog: Boolean = true): Unit = {
val schema = if (expectV2Catalog) {
new StructType()
.add("namespace", StringType, nullable = false)
.add("tableName", StringType, nullable = false)
} else {
new StructType()
.add("database", StringType, nullable = false)
.add("tableName", StringType, nullable = false)
.add("isTemporary", BooleanType, nullable = false)
}
val df = spark.sql(sqlText)
assert(df.schema === schema)
assert(expected === df.collect())
}
test("SHOW TABLE EXTENDED not valid v1 database") {
def testV1CommandNamespace(sqlCommand: String, namespace: String): Unit = {
val e = intercept[AnalysisException] {
sql(sqlCommand)
}
assert(e.message.contains(s"The database name is not valid: ${namespace}"))
}
val namespace = "testcat.ns1.ns2"
val table = "tbl"
withTable(s"$namespace.$table") {
sql(s"CREATE TABLE $namespace.$table (id bigint, data string) " +
s"USING foo PARTITIONED BY (id)")
testV1CommandNamespace(s"SHOW TABLE EXTENDED FROM $namespace LIKE 'tb*'",
namespace)
testV1CommandNamespace(s"SHOW TABLE EXTENDED IN $namespace LIKE 'tb*'",
namespace)
testV1CommandNamespace("SHOW TABLE EXTENDED " +
s"FROM $namespace LIKE 'tb*' PARTITION(id=1)",
namespace)
testV1CommandNamespace("SHOW TABLE EXTENDED " +
s"IN $namespace LIKE 'tb*' PARTITION(id=1)",
namespace)
}
}
test("SHOW TABLE EXTENDED valid v1") {
val expected = Seq(Row("", "source", true), Row("", "source2", true))
val schema = new StructType()
.add("database", StringType, nullable = false)
.add("tableName", StringType, nullable = false)
.add("isTemporary", BooleanType, nullable = false)
.add("information", StringType, nullable = false)
val df = sql("SHOW TABLE EXTENDED FROM default LIKE '*source*'")
val result = df.collect()
val resultWithoutInfo = result.map{ case Row(db, table, temp, _) => Row(db, table, temp)}
assert(df.schema === schema)
assert(resultWithoutInfo === expected)
result.foreach{ case Row(_, _, _, info: String) => assert(info.nonEmpty)}
}
test("CreateNameSpace: basic tests") {
// Session catalog is used.
withNamespace("ns") {
sql("CREATE NAMESPACE ns")
testShowNamespaces("SHOW NAMESPACES", Seq("default", "ns"))
}
// V2 non-session catalog is used.
withNamespace("testcat.ns1.ns2") {
sql("CREATE NAMESPACE testcat.ns1.ns2")
testShowNamespaces("SHOW NAMESPACES IN testcat", Seq("ns1"))
testShowNamespaces("SHOW NAMESPACES IN testcat.ns1", Seq("ns1.ns2"))
}
withNamespace("testcat.test") {
withTempDir { tmpDir =>
val path = tmpDir.getCanonicalPath
sql(s"CREATE NAMESPACE testcat.test LOCATION '$path'")
val metadata =
catalog("testcat").asNamespaceCatalog.loadNamespaceMetadata(Array("test")).asScala
val catalogPath = metadata(SupportsNamespaces.PROP_LOCATION)
assert(catalogPath.equals(catalogPath))
}
}
}
test("CreateNameSpace: test handling of 'IF NOT EXIST'") {
withNamespace("testcat.ns1") {
sql("CREATE NAMESPACE IF NOT EXISTS testcat.ns1")
// The 'ns1' namespace already exists, so this should fail.
val exception = intercept[NamespaceAlreadyExistsException] {
sql("CREATE NAMESPACE testcat.ns1")
}
assert(exception.getMessage.contains("Namespace 'ns1' already exists"))
// The following will be no-op since the namespace already exists.
sql("CREATE NAMESPACE IF NOT EXISTS testcat.ns1")
}
}
test("CreateNameSpace: reserved properties") {
import SupportsNamespaces._
withSQLConf((SQLConf.LEGACY_PROPERTY_NON_RESERVED.key, "false")) {
CatalogV2Util.NAMESPACE_RESERVED_PROPERTIES.filterNot(_ == PROP_COMMENT).foreach { key =>
val exception = intercept[ParseException] {
sql(s"CREATE NAMESPACE testcat.reservedTest WITH DBPROPERTIES('$key'='dummyVal')")
}
assert(exception.getMessage.contains(s"$key is a reserved namespace property"))
}
}
withSQLConf((SQLConf.LEGACY_PROPERTY_NON_RESERVED.key, "true")) {
CatalogV2Util.NAMESPACE_RESERVED_PROPERTIES.filterNot(_ == PROP_COMMENT).foreach { key =>
withNamespace("testcat.reservedTest") {
sql(s"CREATE NAMESPACE testcat.reservedTest WITH DBPROPERTIES('$key'='foo')")
assert(sql("DESC NAMESPACE EXTENDED testcat.reservedTest")
.toDF("k", "v")
.where("k='Properties'")
.isEmpty, s"$key is a reserved namespace property and ignored")
val meta =
catalog("testcat").asNamespaceCatalog.loadNamespaceMetadata(Array("reservedTest"))
assert(meta.get(key) == null || !meta.get(key).contains("foo"),
"reserved properties should not have side effects")
}
}
}
}
test("create/replace/alter table - reserved properties") {
import TableCatalog._
withSQLConf((SQLConf.LEGACY_PROPERTY_NON_RESERVED.key, "false")) {
CatalogV2Util.TABLE_RESERVED_PROPERTIES.filterNot(_ == PROP_COMMENT).foreach { key =>
Seq("OPTIONS", "TBLPROPERTIES").foreach { clause =>
Seq("CREATE", "REPLACE").foreach { action =>
val e = intercept[ParseException] {
sql(s"$action TABLE testcat.reservedTest (key int) USING foo $clause ('$key'='bar')")
}
assert(e.getMessage.contains(s"$key is a reserved table property"))
}
}
val e1 = intercept[ParseException] {
sql(s"ALTER TABLE testcat.reservedTest SET TBLPROPERTIES ('$key'='bar')")
}
assert(e1.getMessage.contains(s"$key is a reserved table property"))
val e2 = intercept[ParseException] {
sql(s"ALTER TABLE testcat.reservedTest UNSET TBLPROPERTIES ('$key')")
}
assert(e2.getMessage.contains(s"$key is a reserved table property"))
}
}
withSQLConf((SQLConf.LEGACY_PROPERTY_NON_RESERVED.key, "true")) {
CatalogV2Util.TABLE_RESERVED_PROPERTIES.filterNot(_ == PROP_COMMENT).foreach { key =>
Seq("OPTIONS", "TBLPROPERTIES").foreach { clause =>
withTable("testcat.reservedTest") {
Seq("CREATE", "REPLACE").foreach { action =>
sql(s"$action TABLE testcat.reservedTest (key int) USING foo $clause ('$key'='bar')")
val tableCatalog = catalog("testcat").asTableCatalog
val identifier = Identifier.of(Array(), "reservedTest")
val originValue = tableCatalog.loadTable(identifier).properties().get(key)
assert(originValue != "bar", "reserved properties should not have side effects")
sql(s"ALTER TABLE testcat.reservedTest SET TBLPROPERTIES ('$key'='newValue')")
assert(tableCatalog.loadTable(identifier).properties().get(key) == originValue,
"reserved properties should not have side effects")
sql(s"ALTER TABLE testcat.reservedTest UNSET TBLPROPERTIES ('$key')")
assert(tableCatalog.loadTable(identifier).properties().get(key) == originValue,
"reserved properties should not have side effects")
}
}
}
}
}
}
test("create/replace - path property") {
Seq("true", "false").foreach { conf =>
withSQLConf((SQLConf.LEGACY_PROPERTY_NON_RESERVED.key, conf)) {
withTable("testcat.reservedTest") {
Seq("CREATE", "REPLACE").foreach { action =>
val e1 = intercept[ParseException] {
sql(s"$action TABLE testcat.reservedTest USING foo LOCATION 'foo' OPTIONS" +
s" ('path'='bar')")
}
assert(e1.getMessage.contains(s"Duplicated table paths found: 'foo' and 'bar'"))
val e2 = intercept[ParseException] {
sql(s"$action TABLE testcat.reservedTest USING foo OPTIONS" +
s" ('path'='foo', 'PaTh'='bar')")
}
assert(e2.getMessage.contains(s"Duplicated table paths found: 'foo' and 'bar'"))
sql(s"$action TABLE testcat.reservedTest USING foo LOCATION 'foo' TBLPROPERTIES" +
s" ('path'='bar', 'Path'='noop')")
val tableCatalog = catalog("testcat").asTableCatalog
val identifier = Identifier.of(Array(), "reservedTest")
assert(tableCatalog.loadTable(identifier).properties()
.get(TableCatalog.PROP_LOCATION) == "foo",
"path as a table property should not have side effects")
assert(tableCatalog.loadTable(identifier).properties().get("path") == "bar",
"path as a table property should not have side effects")
assert(tableCatalog.loadTable(identifier).properties().get("Path") == "noop",
"path as a table property should not have side effects")
}
}
}
}
}
test("DropNamespace: basic tests") {
// Session catalog is used.
sql("CREATE NAMESPACE ns")
testShowNamespaces("SHOW NAMESPACES", Seq("default", "ns"))
sql("DROP NAMESPACE ns")
testShowNamespaces("SHOW NAMESPACES", Seq("default"))
// V2 non-session catalog is used.
sql("CREATE NAMESPACE testcat.ns1")
testShowNamespaces("SHOW NAMESPACES IN testcat", Seq("ns1"))
sql("DROP NAMESPACE testcat.ns1")
testShowNamespaces("SHOW NAMESPACES IN testcat", Seq())
}
test("DropNamespace: drop non-empty namespace with a non-cascading mode") {
sql("CREATE TABLE testcat.ns1.table (id bigint) USING foo")
sql("CREATE TABLE testcat.ns1.ns2.table (id bigint) USING foo")
testShowNamespaces("SHOW NAMESPACES IN testcat", Seq("ns1"))
testShowNamespaces("SHOW NAMESPACES IN testcat.ns1", Seq("ns1.ns2"))
def assertDropFails(): Unit = {
val e = intercept[SparkException] {
sql("DROP NAMESPACE testcat.ns1")
}
assert(e.getMessage.contains("Cannot drop a non-empty namespace: ns1"))
}
// testcat.ns1.table is present, thus testcat.ns1 cannot be dropped.
assertDropFails()
sql("DROP TABLE testcat.ns1.table")
// testcat.ns1.ns2.table is present, thus testcat.ns1 cannot be dropped.
assertDropFails()
sql("DROP TABLE testcat.ns1.ns2.table")
// testcat.ns1.ns2 namespace is present, thus testcat.ns1 cannot be dropped.
assertDropFails()
sql("DROP NAMESPACE testcat.ns1.ns2")
// Now that testcat.ns1 is empty, it can be dropped.
sql("DROP NAMESPACE testcat.ns1")
testShowNamespaces("SHOW NAMESPACES IN testcat", Seq())
}
test("DropNamespace: drop non-empty namespace with a cascade mode") {
sql("CREATE TABLE testcat.ns1.table (id bigint) USING foo")
sql("CREATE TABLE testcat.ns1.ns2.table (id bigint) USING foo")
testShowNamespaces("SHOW NAMESPACES IN testcat", Seq("ns1"))
testShowNamespaces("SHOW NAMESPACES IN testcat.ns1", Seq("ns1.ns2"))
sql("DROP NAMESPACE testcat.ns1 CASCADE")
testShowNamespaces("SHOW NAMESPACES IN testcat", Seq())
}
test("DropNamespace: test handling of 'IF EXISTS'") {
sql("DROP NAMESPACE IF EXISTS testcat.unknown")
val exception = intercept[NoSuchNamespaceException] {
sql("DROP NAMESPACE testcat.ns1")
}
assert(exception.getMessage.contains("Namespace 'ns1' not found"))
}
test("DescribeNamespace using v2 catalog") {
withNamespace("testcat.ns1.ns2") {
sql("CREATE NAMESPACE IF NOT EXISTS testcat.ns1.ns2 COMMENT " +
"'test namespace' LOCATION '/tmp/ns_test'")
val descriptionDf = sql("DESCRIBE NAMESPACE testcat.ns1.ns2")
assert(descriptionDf.schema.map(field => (field.name, field.dataType)) ===
Seq(
("name", StringType),
("value", StringType)
))
val description = descriptionDf.collect()
assert(description === Seq(
Row("Namespace Name", "ns2"),
Row(SupportsNamespaces.PROP_COMMENT.capitalize, "test namespace"),
Row(SupportsNamespaces.PROP_LOCATION.capitalize, "/tmp/ns_test"),
Row(SupportsNamespaces.PROP_OWNER.capitalize, defaultUser))
)
}
}
test("AlterNamespaceSetProperties using v2 catalog") {
withNamespace("testcat.ns1.ns2") {
sql("CREATE NAMESPACE IF NOT EXISTS testcat.ns1.ns2 COMMENT " +
"'test namespace' LOCATION '/tmp/ns_test' WITH PROPERTIES ('a'='a','b'='b','c'='c')")
sql("ALTER NAMESPACE testcat.ns1.ns2 SET PROPERTIES ('a'='b','b'='a')")
val descriptionDf = sql("DESCRIBE NAMESPACE EXTENDED testcat.ns1.ns2")
assert(descriptionDf.collect() === Seq(
Row("Namespace Name", "ns2"),
Row(SupportsNamespaces.PROP_COMMENT.capitalize, "test namespace"),
Row(SupportsNamespaces.PROP_LOCATION.capitalize, "/tmp/ns_test"),
Row(SupportsNamespaces.PROP_OWNER.capitalize, defaultUser),
Row("Properties", "((a,b),(b,a),(c,c))"))
)
}
}
test("AlterNamespaceSetProperties: reserved properties") {
import SupportsNamespaces._
withSQLConf((SQLConf.LEGACY_PROPERTY_NON_RESERVED.key, "false")) {
CatalogV2Util.NAMESPACE_RESERVED_PROPERTIES.filterNot(_ == PROP_COMMENT).foreach { key =>
withNamespace("testcat.reservedTest") {
sql("CREATE NAMESPACE testcat.reservedTest")
val exception = intercept[ParseException] {
sql(s"ALTER NAMESPACE testcat.reservedTest SET PROPERTIES ('$key'='dummyVal')")
}
assert(exception.getMessage.contains(s"$key is a reserved namespace property"))
}
}
}
withSQLConf((SQLConf.LEGACY_PROPERTY_NON_RESERVED.key, "true")) {
CatalogV2Util.NAMESPACE_RESERVED_PROPERTIES.filterNot(_ == PROP_COMMENT).foreach { key =>
withNamespace("testcat.reservedTest") {
sql(s"CREATE NAMESPACE testcat.reservedTest")
sql(s"ALTER NAMESPACE testcat.reservedTest SET PROPERTIES ('$key'='foo')")
assert(sql("DESC NAMESPACE EXTENDED testcat.reservedTest")
.toDF("k", "v")
.where("k='Properties'")
.isEmpty, s"$key is a reserved namespace property and ignored")
val meta =
catalog("testcat").asNamespaceCatalog.loadNamespaceMetadata(Array("reservedTest"))
assert(meta.get(key) == null || !meta.get(key).contains("foo"),
"reserved properties should not have side effects")
}
}
}
}
test("AlterNamespaceSetLocation using v2 catalog") {
withNamespace("testcat.ns1.ns2") {
sql("CREATE NAMESPACE IF NOT EXISTS testcat.ns1.ns2 COMMENT " +
"'test namespace' LOCATION '/tmp/ns_test_1'")
sql("ALTER NAMESPACE testcat.ns1.ns2 SET LOCATION '/tmp/ns_test_2'")
val descriptionDf = sql("DESCRIBE NAMESPACE EXTENDED testcat.ns1.ns2")
assert(descriptionDf.collect() === Seq(
Row("Namespace Name", "ns2"),
Row(SupportsNamespaces.PROP_COMMENT.capitalize, "test namespace"),
Row(SupportsNamespaces.PROP_LOCATION.capitalize, "/tmp/ns_test_2"),
Row(SupportsNamespaces.PROP_OWNER.capitalize, defaultUser))
)
}
}
test("ShowNamespaces: show root namespaces with default v2 catalog") {
spark.conf.set(SQLConf.DEFAULT_CATALOG.key, "testcat")
testShowNamespaces("SHOW NAMESPACES", Seq())
spark.sql("CREATE TABLE testcat.ns1.table (id bigint) USING foo")
spark.sql("CREATE TABLE testcat.ns1.ns1_1.table (id bigint) USING foo")
spark.sql("CREATE TABLE testcat.ns2.table (id bigint) USING foo")
testShowNamespaces("SHOW NAMESPACES", Seq("ns1", "ns2"))
testShowNamespaces("SHOW NAMESPACES LIKE '*1*'", Seq("ns1"))
}
test("ShowNamespaces: show namespaces with v2 catalog") {
spark.sql("CREATE TABLE testcat.ns1.table (id bigint) USING foo")
spark.sql("CREATE TABLE testcat.ns1.ns1_1.table (id bigint) USING foo")
spark.sql("CREATE TABLE testcat.ns1.ns1_2.table (id bigint) USING foo")
spark.sql("CREATE TABLE testcat.ns2.table (id bigint) USING foo")
spark.sql("CREATE TABLE testcat.ns2.ns2_1.table (id bigint) USING foo")
// Look up only with catalog name, which should list root namespaces.
testShowNamespaces("SHOW NAMESPACES IN testcat", Seq("ns1", "ns2"))
// Look up sub-namespaces.
testShowNamespaces("SHOW NAMESPACES IN testcat.ns1", Seq("ns1.ns1_1", "ns1.ns1_2"))
testShowNamespaces("SHOW NAMESPACES IN testcat.ns1 LIKE '*2*'", Seq("ns1.ns1_2"))
testShowNamespaces("SHOW NAMESPACES IN testcat.ns2", Seq("ns2.ns2_1"))
// Try to look up namespaces that do not exist.
testShowNamespaces("SHOW NAMESPACES IN testcat.ns3", Seq())
testShowNamespaces("SHOW NAMESPACES IN testcat.ns1.ns3", Seq())
}
test("ShowNamespaces: default v2 catalog is not set") {
spark.sql("CREATE TABLE testcat.ns.table (id bigint) USING foo")
// The current catalog is resolved to a v2 session catalog.
testShowNamespaces("SHOW NAMESPACES", Seq("default"))
}
test("ShowNamespaces: default v2 catalog doesn't support namespace") {
spark.conf.set(
"spark.sql.catalog.testcat_no_namspace",
classOf[BasicInMemoryTableCatalog].getName)
spark.conf.set(SQLConf.DEFAULT_CATALOG.key, "testcat_no_namspace")
val exception = intercept[AnalysisException] {
sql("SHOW NAMESPACES")
}
assert(exception.getMessage.contains("does not support namespaces"))
}
test("ShowNamespaces: v2 catalog doesn't support namespace") {
spark.conf.set(
"spark.sql.catalog.testcat_no_namspace",
classOf[BasicInMemoryTableCatalog].getName)
val exception = intercept[AnalysisException] {
sql("SHOW NAMESPACES in testcat_no_namspace")
}
assert(exception.getMessage.contains("does not support namespaces"))
}
test("ShowNamespaces: session catalog is used and namespace doesn't exist") {
val exception = intercept[AnalysisException] {
sql("SHOW NAMESPACES in dummy")
}
assert(exception.getMessage.contains("Namespace 'dummy' not found"))
}
test("ShowNamespaces: change catalog and namespace with USE statements") {
sql("CREATE TABLE testcat.ns1.ns2.table (id bigint) USING foo")
// Initially, the current catalog is a v2 session catalog.
testShowNamespaces("SHOW NAMESPACES", Seq("default"))
// Update the current catalog to 'testcat'.
sql("USE testcat")
testShowNamespaces("SHOW NAMESPACES", Seq("ns1"))
// Update the current namespace to 'ns1'.
sql("USE ns1")
// 'SHOW NAMESPACES' is not affected by the current namespace and lists root namespaces.
testShowNamespaces("SHOW NAMESPACES", Seq("ns1"))
}
private def testShowNamespaces(
sqlText: String,
expected: Seq[String]): Unit = {
val schema = new StructType().add("namespace", StringType, nullable = false)
val df = spark.sql(sqlText)
assert(df.schema === schema)
assert(df.collect().map(_.getAs[String](0)).sorted === expected.sorted)
}
test("Use: basic tests with USE statements") {
val catalogManager = spark.sessionState.catalogManager
// Validate the initial current catalog and namespace.
assert(catalogManager.currentCatalog.name() == SESSION_CATALOG_NAME)
assert(catalogManager.currentNamespace === Array("default"))
// The following implicitly creates namespaces.
sql("CREATE TABLE testcat.ns1.ns1_1.table (id bigint) USING foo")
sql("CREATE TABLE testcat2.ns2.ns2_2.table (id bigint) USING foo")
sql("CREATE TABLE testcat2.ns3.ns3_3.table (id bigint) USING foo")
sql("CREATE TABLE testcat2.testcat.table (id bigint) USING foo")
// Catalog is resolved to 'testcat'.
sql("USE testcat.ns1.ns1_1")
assert(catalogManager.currentCatalog.name() == "testcat")
assert(catalogManager.currentNamespace === Array("ns1", "ns1_1"))
// Catalog is resolved to 'testcat2'.
sql("USE testcat2.ns2.ns2_2")
assert(catalogManager.currentCatalog.name() == "testcat2")
assert(catalogManager.currentNamespace === Array("ns2", "ns2_2"))
// Only the namespace is changed.
sql("USE ns3.ns3_3")
assert(catalogManager.currentCatalog.name() == "testcat2")
assert(catalogManager.currentNamespace === Array("ns3", "ns3_3"))
// Only the namespace is changed (explicit).
sql("USE NAMESPACE testcat")
assert(catalogManager.currentCatalog.name() == "testcat2")
assert(catalogManager.currentNamespace === Array("testcat"))
// Catalog is resolved to `testcat`.
sql("USE testcat")
assert(catalogManager.currentCatalog.name() == "testcat")
assert(catalogManager.currentNamespace === Array())
}
test("Use: set v2 catalog as a current catalog") {
val catalogManager = spark.sessionState.catalogManager
assert(catalogManager.currentCatalog.name() == SESSION_CATALOG_NAME)
sql("USE testcat")
assert(catalogManager.currentCatalog.name() == "testcat")
}
test("Use: v2 session catalog is used and namespace does not exist") {
val exception = intercept[NoSuchDatabaseException] {
sql("USE ns1")
}
assert(exception.getMessage.contains("Database 'ns1' not found"))
}
test("Use: v2 catalog is used and namespace does not exist") {
// Namespaces are not required to exist for v2 catalogs.
sql("USE testcat.ns1.ns2")
val catalogManager = spark.sessionState.catalogManager
assert(catalogManager.currentNamespace === Array("ns1", "ns2"))
}
test("ShowCurrentNamespace: basic tests") {
def testShowCurrentNamespace(expectedCatalogName: String, expectedNamespace: String): Unit = {
val schema = new StructType()
.add("catalog", StringType, nullable = false)
.add("namespace", StringType, nullable = false)
val df = sql("SHOW CURRENT NAMESPACE")
val rows = df.collect
assert(df.schema === schema)
assert(rows.length == 1)
assert(rows(0).getAs[String](0) === expectedCatalogName)
assert(rows(0).getAs[String](1) === expectedNamespace)
}
// Initially, the v2 session catalog is set as a current catalog.
testShowCurrentNamespace("spark_catalog", "default")
sql("USE testcat")
testShowCurrentNamespace("testcat", "")
sql("USE testcat.ns1.ns2")
testShowCurrentNamespace("testcat", "ns1.ns2")
}
test("tableCreation: partition column case insensitive resolution") {
val testCatalog = catalog("testcat").asTableCatalog
val sessionCatalog = catalog(SESSION_CATALOG_NAME).asTableCatalog
def checkPartitioning(cat: TableCatalog, partition: String): Unit = {
val namespace = if (cat.name == SESSION_CATALOG_NAME) {
Array("default")
} else {
Array[String]()
}
val table = cat.loadTable(Identifier.of(namespace, "tbl"))
val partitions = table.partitioning().map(_.references())
assert(partitions.length === 1)
val fieldNames = partitions.flatMap(_.map(_.fieldNames()))
assert(fieldNames === Array(Array(partition)))
}
sql(s"CREATE TABLE tbl (a int, b string) USING $v2Source PARTITIONED BY (A)")
checkPartitioning(sessionCatalog, "a")
sql(s"CREATE TABLE testcat.tbl (a int, b string) USING $v2Source PARTITIONED BY (A)")
checkPartitioning(testCatalog, "a")
sql(s"CREATE OR REPLACE TABLE tbl (a int, b string) USING $v2Source PARTITIONED BY (B)")
checkPartitioning(sessionCatalog, "b")
sql(s"CREATE OR REPLACE TABLE testcat.tbl (a int, b string) USING $v2Source PARTITIONED BY (B)")
checkPartitioning(testCatalog, "b")
}
test("tableCreation: partition column case sensitive resolution") {
def checkFailure(statement: String): Unit = {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
val e = intercept[AnalysisException] {
sql(statement)
}
assert(e.getMessage.contains("Couldn't find column"))
}
}
checkFailure(s"CREATE TABLE tbl (a int, b string) USING $v2Source PARTITIONED BY (A)")
checkFailure(s"CREATE TABLE testcat.tbl (a int, b string) USING $v2Source PARTITIONED BY (A)")
checkFailure(
s"CREATE OR REPLACE TABLE tbl (a int, b string) USING $v2Source PARTITIONED BY (B)")
checkFailure(
s"CREATE OR REPLACE TABLE testcat.tbl (a int, b string) USING $v2Source PARTITIONED BY (B)")
}
test("tableCreation: duplicate column names in the table definition") {
val errorMsg = "Found duplicate column(s) in the table definition of"
Seq((true, ("a", "a")), (false, ("aA", "Aa"))).foreach { case (caseSensitive, (c0, c1)) =>
withSQLConf(SQLConf.CASE_SENSITIVE.key -> caseSensitive.toString) {
assertAnalysisError(
s"CREATE TABLE t ($c0 INT, $c1 INT) USING $v2Source",
s"$errorMsg default.t"
)
assertAnalysisError(
s"CREATE TABLE testcat.t ($c0 INT, $c1 INT) USING $v2Source",
s"$errorMsg t"
)
assertAnalysisError(
s"CREATE OR REPLACE TABLE t ($c0 INT, $c1 INT) USING $v2Source",
s"$errorMsg default.t"
)
assertAnalysisError(
s"CREATE OR REPLACE TABLE testcat.t ($c0 INT, $c1 INT) USING $v2Source",
s"$errorMsg t"
)
}
}
}
test("tableCreation: duplicate nested column names in the table definition") {
val errorMsg = "Found duplicate column(s) in the table definition of"
Seq((true, ("a", "a")), (false, ("aA", "Aa"))).foreach { case (caseSensitive, (c0, c1)) =>
withSQLConf(SQLConf.CASE_SENSITIVE.key -> caseSensitive.toString) {
assertAnalysisError(
s"CREATE TABLE t (d struct<$c0: INT, $c1: INT>) USING $v2Source",
s"$errorMsg default.t"
)
assertAnalysisError(
s"CREATE TABLE testcat.t (d struct<$c0: INT, $c1: INT>) USING $v2Source",
s"$errorMsg t"
)
assertAnalysisError(
s"CREATE OR REPLACE TABLE t (d struct<$c0: INT, $c1: INT>) USING $v2Source",
s"$errorMsg default.t"
)
assertAnalysisError(
s"CREATE OR REPLACE TABLE testcat.t (d struct<$c0: INT, $c1: INT>) USING $v2Source",
s"$errorMsg t"
)
}
}
}
test("tableCreation: bucket column names not in table definition") {
val errorMsg = "Couldn't find column c in"
assertAnalysisError(
s"CREATE TABLE tbl (a int, b string) USING $v2Source CLUSTERED BY (c) INTO 4 BUCKETS",
errorMsg
)
assertAnalysisError(
s"CREATE TABLE testcat.tbl (a int, b string) USING $v2Source CLUSTERED BY (c) INTO 4 BUCKETS",
errorMsg
)
assertAnalysisError(
s"CREATE OR REPLACE TABLE tbl (a int, b string) USING $v2Source " +
"CLUSTERED BY (c) INTO 4 BUCKETS",
errorMsg
)
assertAnalysisError(
s"CREATE OR REPLACE TABLE testcat.tbl (a int, b string) USING $v2Source " +
"CLUSTERED BY (c) INTO 4 BUCKETS",
errorMsg
)
}
test("tableCreation: bucket column name containing dot") {
withTable("t") {
sql(
"""
|CREATE TABLE testcat.t (id int, `a.b` string) USING foo
|CLUSTERED BY (`a.b`) INTO 4 BUCKETS
|OPTIONS ('allow-unsupported-transforms'=true)
""".stripMargin)
val testCatalog = catalog("testcat").asTableCatalog.asInstanceOf[InMemoryTableCatalog]
val table = testCatalog.loadTable(Identifier.of(Array.empty, "t"))
val partitioning = table.partitioning()
assert(partitioning.length == 1 && partitioning.head.name() == "bucket")
val references = partitioning.head.references()
assert(references.length == 1)
assert(references.head.fieldNames().toSeq == Seq("a.b"))
}
}
test("tableCreation: column repeated in partition columns") {
val errorMsg = "Found duplicate column(s) in the partitioning"
Seq((true, ("a", "a")), (false, ("aA", "Aa"))).foreach { case (caseSensitive, (c0, c1)) =>
withSQLConf(SQLConf.CASE_SENSITIVE.key -> caseSensitive.toString) {
assertAnalysisError(
s"CREATE TABLE t ($c0 INT) USING $v2Source PARTITIONED BY ($c0, $c1)",
errorMsg
)
assertAnalysisError(
s"CREATE TABLE testcat.t ($c0 INT) USING $v2Source PARTITIONED BY ($c0, $c1)",
errorMsg
)
assertAnalysisError(
s"CREATE OR REPLACE TABLE t ($c0 INT) USING $v2Source PARTITIONED BY ($c0, $c1)",
errorMsg
)
assertAnalysisError(
s"CREATE OR REPLACE TABLE testcat.t ($c0 INT) USING $v2Source PARTITIONED BY ($c0, $c1)",
errorMsg
)
}
}
}
test("tableCreation: column repeated in bucket columns") {
val errorMsg = "Found duplicate column(s) in the bucket definition"
Seq((true, ("a", "a")), (false, ("aA", "Aa"))).foreach { case (caseSensitive, (c0, c1)) =>
withSQLConf(SQLConf.CASE_SENSITIVE.key -> caseSensitive.toString) {
assertAnalysisError(
s"CREATE TABLE t ($c0 INT) USING $v2Source " +
s"CLUSTERED BY ($c0, $c1) INTO 2 BUCKETS",
errorMsg
)
assertAnalysisError(
s"CREATE TABLE testcat.t ($c0 INT) USING $v2Source " +
s"CLUSTERED BY ($c0, $c1) INTO 2 BUCKETS",
errorMsg
)
assertAnalysisError(
s"CREATE OR REPLACE TABLE t ($c0 INT) USING $v2Source " +
s"CLUSTERED BY ($c0, $c1) INTO 2 BUCKETS",
errorMsg
)
assertAnalysisError(
s"CREATE OR REPLACE TABLE testcat.t ($c0 INT) USING $v2Source " +
s"CLUSTERED BY ($c0, $c1) INTO 2 BUCKETS",
errorMsg
)
}
}
}
test("REFRESH TABLE: v2 table") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
sql(s"CREATE TABLE $t (id bigint, data string) USING foo")
val testCatalog = catalog("testcat").asTableCatalog.asInstanceOf[InMemoryTableCatalog]
val identifier = Identifier.of(Array("ns1", "ns2"), "tbl")
assert(!testCatalog.isTableInvalidated(identifier))
sql(s"REFRESH TABLE $t")
assert(testCatalog.isTableInvalidated(identifier))
}
}
test("REPLACE TABLE: v1 table") {
val e = intercept[AnalysisException] {
sql(s"CREATE OR REPLACE TABLE tbl (a int) USING ${classOf[SimpleScanSource].getName}")
}
assert(e.message.contains("REPLACE TABLE is only supported with v2 tables"))
}
test("DeleteFrom: basic - delete all") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
sql(s"CREATE TABLE $t (id bigint, data string, p int) USING foo PARTITIONED BY (id, p)")
sql(s"INSERT INTO $t VALUES (2L, 'a', 2), (2L, 'b', 3), (3L, 'c', 3)")
sql(s"DELETE FROM $t")
checkAnswer(spark.table(t), Seq())
}
}
test("DeleteFrom: basic - delete with where clause") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
sql(s"CREATE TABLE $t (id bigint, data string, p int) USING foo PARTITIONED BY (id, p)")
sql(s"INSERT INTO $t VALUES (2L, 'a', 2), (2L, 'b', 3), (3L, 'c', 3)")
sql(s"DELETE FROM $t WHERE id = 2")
checkAnswer(spark.table(t), Seq(
Row(3, "c", 3)))
}
}
test("DeleteFrom: delete from aliased target table") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
sql(s"CREATE TABLE $t (id bigint, data string, p int) USING foo PARTITIONED BY (id, p)")
sql(s"INSERT INTO $t VALUES (2L, 'a', 2), (2L, 'b', 3), (3L, 'c', 3)")
sql(s"DELETE FROM $t AS tbl WHERE tbl.id = 2")
checkAnswer(spark.table(t), Seq(
Row(3, "c", 3)))
}
}
test("DeleteFrom: normalize attribute names") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
sql(s"CREATE TABLE $t (id bigint, data string, p int) USING foo PARTITIONED BY (id, p)")
sql(s"INSERT INTO $t VALUES (2L, 'a', 2), (2L, 'b', 3), (3L, 'c', 3)")
sql(s"DELETE FROM $t AS tbl WHERE tbl.ID = 2")
checkAnswer(spark.table(t), Seq(
Row(3, "c", 3)))
}
}
test("DeleteFrom: fail if has subquery") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
sql(s"CREATE TABLE $t (id bigint, data string, p int) USING foo PARTITIONED BY (id, p)")
sql(s"INSERT INTO $t VALUES (2L, 'a', 2), (2L, 'b', 3), (3L, 'c', 3)")
val exc = intercept[AnalysisException] {
sql(s"DELETE FROM $t WHERE id IN (SELECT id FROM $t)")
}
assert(spark.table(t).count === 3)
assert(exc.getMessage.contains("Delete by condition with subquery is not supported"))
}
}
test("DeleteFrom: DELETE is only supported with v2 tables") {
// unset this config to use the default v2 session catalog.
spark.conf.unset(V2_SESSION_CATALOG_IMPLEMENTATION.key)
val v1Table = "tbl"
withTable(v1Table) {
sql(s"CREATE TABLE $v1Table" +
s" USING ${classOf[SimpleScanSource].getName} OPTIONS (from=0,to=1)")
val exc = intercept[AnalysisException] {
sql(s"DELETE FROM $v1Table WHERE i = 2")
}
assert(exc.getMessage.contains("DELETE is only supported with v2 tables"))
}
}
test("UPDATE TABLE") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
sql(
s"""
|CREATE TABLE $t (id bigint, name string, age int, p int)
|USING foo
|PARTITIONED BY (id, p)
""".stripMargin)
// UPDATE non-existing table
assertAnalysisError(
"UPDATE dummy SET name='abc'",
"Table or view not found")
// UPDATE non-existing column
assertAnalysisError(
s"UPDATE $t SET dummy='abc'",
"cannot resolve")
assertAnalysisError(
s"UPDATE $t SET name='abc' WHERE dummy=1",
"cannot resolve")
// UPDATE is not implemented yet.
val e = intercept[UnsupportedOperationException] {
sql(s"UPDATE $t SET name='Robert', age=32 WHERE p=1")
}
assert(e.getMessage.contains("UPDATE TABLE is not supported temporarily"))
}
}
test("MERGE INTO TABLE") {
val target = "testcat.ns1.ns2.target"
val source = "testcat.ns1.ns2.source"
withTable(target, source) {
sql(
s"""
|CREATE TABLE $target (id bigint, name string, age int, p int)
|USING foo
|PARTITIONED BY (id, p)
""".stripMargin)
sql(
s"""
|CREATE TABLE $source (id bigint, name string, age int, p int)
|USING foo
|PARTITIONED BY (id, p)
""".stripMargin)
// MERGE INTO non-existing table
assertAnalysisError(
s"""
|MERGE INTO testcat.ns1.ns2.dummy AS target
|USING testcat.ns1.ns2.source AS source
|ON target.id = source.id
|WHEN MATCHED AND (target.age < 10) THEN DELETE
|WHEN MATCHED AND (target.age > 10) THEN UPDATE SET *
|WHEN NOT MATCHED AND (target.col2='insert')
|THEN INSERT *
""".stripMargin,
"Table or view not found")
// USING non-existing table
assertAnalysisError(
s"""
|MERGE INTO testcat.ns1.ns2.target AS target
|USING testcat.ns1.ns2.dummy AS source
|ON target.id = source.id
|WHEN MATCHED AND (target.age < 10) THEN DELETE
|WHEN MATCHED AND (target.age > 10) THEN UPDATE SET *
|WHEN NOT MATCHED AND (target.col2='insert')
|THEN INSERT *
""".stripMargin,
"Table or view not found")
// UPDATE non-existing column
assertAnalysisError(
s"""
|MERGE INTO testcat.ns1.ns2.target AS target
|USING testcat.ns1.ns2.source AS source
|ON target.id = source.id
|WHEN MATCHED AND (target.age < 10) THEN DELETE
|WHEN MATCHED AND (target.age > 10) THEN UPDATE SET target.dummy = source.age
|WHEN NOT MATCHED AND (target.col2='insert')
|THEN INSERT *
""".stripMargin,
"cannot resolve")
// UPDATE using non-existing column
assertAnalysisError(
s"""
|MERGE INTO testcat.ns1.ns2.target AS target
|USING testcat.ns1.ns2.source AS source
|ON target.id = source.id
|WHEN MATCHED AND (target.age < 10) THEN DELETE
|WHEN MATCHED AND (target.age > 10) THEN UPDATE SET target.age = source.dummy
|WHEN NOT MATCHED AND (target.col2='insert')
|THEN INSERT *
""".stripMargin,
"cannot resolve")
// MERGE INTO is not implemented yet.
val e = intercept[UnsupportedOperationException] {
sql(
s"""
|MERGE INTO testcat.ns1.ns2.target AS target
|USING testcat.ns1.ns2.source AS source
|ON target.id = source.id
|WHEN MATCHED AND (target.p < 0) THEN DELETE
|WHEN MATCHED AND (target.p > 0) THEN UPDATE SET *
|WHEN NOT MATCHED THEN INSERT *
""".stripMargin)
}
assert(e.getMessage.contains("MERGE INTO TABLE is not supported temporarily"))
}
}
test("AlterTable: rename table basic test") {
withTable("testcat.ns1.new") {
sql(s"CREATE TABLE testcat.ns1.ns2.old USING foo AS SELECT id, data FROM source")
checkAnswer(sql("SHOW TABLES FROM testcat.ns1.ns2"), Seq(Row("ns1.ns2", "old")))
sql(s"ALTER TABLE testcat.ns1.ns2.old RENAME TO ns1.new")
checkAnswer(sql("SHOW TABLES FROM testcat.ns1.ns2"), Seq.empty)
checkAnswer(sql("SHOW TABLES FROM testcat.ns1"), Seq(Row("ns1", "new")))
}
}
test("AlterTable: renaming views are not supported") {
val e = intercept[AnalysisException] {
sql(s"ALTER VIEW testcat.ns.tbl RENAME TO ns.view")
}
assert(e.getMessage.contains("Renaming view is not supported in v2 catalogs"))
}
test("ANALYZE TABLE") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
spark.sql(s"CREATE TABLE $t (id bigint, data string) USING foo")
testV1Command("ANALYZE TABLE", s"$t COMPUTE STATISTICS")
testV1CommandSupportingTempView("ANALYZE TABLE", s"$t COMPUTE STATISTICS FOR ALL COLUMNS")
}
}
test("MSCK REPAIR TABLE") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
spark.sql(s"CREATE TABLE $t (id bigint, data string) USING foo")
testV1Command("MSCK REPAIR TABLE", t)
}
}
test("TRUNCATE TABLE") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
sql(
s"""
|CREATE TABLE $t (id bigint, data string)
|USING foo
|PARTITIONED BY (id)
""".stripMargin)
testV1Command("TRUNCATE TABLE", t)
testV1Command("TRUNCATE TABLE", s"$t PARTITION(id='1')")
}
}
test("SHOW PARTITIONS") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
sql(
s"""
|CREATE TABLE $t (id bigint, data string)
|USING foo
|PARTITIONED BY (id)
""".stripMargin)
testV1Command("SHOW PARTITIONS", t)
testV1Command("SHOW PARTITIONS", s"$t PARTITION(id='1')")
}
}
test("LOAD DATA INTO TABLE") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
sql(
s"""
|CREATE TABLE $t (id bigint, data string)
|USING foo
|PARTITIONED BY (id)
""".stripMargin)
testV1Command("LOAD DATA", s"INPATH 'filepath' INTO TABLE $t")
testV1Command("LOAD DATA", s"LOCAL INPATH 'filepath' INTO TABLE $t")
testV1Command("LOAD DATA", s"LOCAL INPATH 'filepath' OVERWRITE INTO TABLE $t")
testV1Command("LOAD DATA",
s"LOCAL INPATH 'filepath' OVERWRITE INTO TABLE $t PARTITION(id=1)")
}
}
test("SHOW CREATE TABLE") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
spark.sql(s"CREATE TABLE $t (id bigint, data string) USING foo")
testV1CommandSupportingTempView("SHOW CREATE TABLE", t)
}
}
test("CACHE TABLE") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
spark.sql(s"CREATE TABLE $t (id bigint, data string) USING foo")
testV1CommandSupportingTempView("CACHE TABLE", t)
val e = intercept[AnalysisException] {
sql(s"CACHE LAZY TABLE $t")
}
assert(e.message.contains("CACHE TABLE is only supported with temp views or v1 tables"))
}
}
test("UNCACHE TABLE") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
sql(s"CREATE TABLE $t (id bigint, data string) USING foo")
testV1CommandSupportingTempView("UNCACHE TABLE", t)
testV1CommandSupportingTempView("UNCACHE TABLE", s"IF EXISTS $t")
}
}
test("SHOW COLUMNS") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
spark.sql(s"CREATE TABLE $t (id bigint, data string) USING foo")
testV1CommandSupportingTempView("SHOW COLUMNS", s"FROM $t")
testV1CommandSupportingTempView("SHOW COLUMNS", s"IN $t")
val e3 = intercept[AnalysisException] {
sql(s"SHOW COLUMNS FROM tbl IN testcat.ns1.ns2")
}
assert(e3.message.contains("Namespace name should have " +
"only one part if specified: testcat.ns1.ns2"))
}
}
test("ALTER TABLE RECOVER PARTITIONS") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
spark.sql(s"CREATE TABLE $t (id bigint, data string) USING foo")
val e = intercept[AnalysisException] {
sql(s"ALTER TABLE $t RECOVER PARTITIONS")
}
assert(e.message.contains("ALTER TABLE RECOVER PARTITIONS is only supported with v1 tables"))
}
}
test("ALTER TABLE ADD PARTITION") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
spark.sql(s"CREATE TABLE $t (id bigint, data string) USING foo PARTITIONED BY (id)")
val e = intercept[AnalysisException] {
sql(s"ALTER TABLE $t ADD PARTITION (id=1) LOCATION 'loc'")
}
assert(e.message.contains("ALTER TABLE ADD PARTITION is only supported with v1 tables"))
}
}
test("ALTER TABLE RENAME PARTITION") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
spark.sql(s"CREATE TABLE $t (id bigint, data string) USING foo PARTITIONED BY (id)")
val e = intercept[AnalysisException] {
sql(s"ALTER TABLE $t PARTITION (id=1) RENAME TO PARTITION (id=2)")
}
assert(e.message.contains("ALTER TABLE RENAME PARTITION is only supported with v1 tables"))
}
}
test("ALTER TABLE DROP PARTITIONS") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
spark.sql(s"CREATE TABLE $t (id bigint, data string) USING foo PARTITIONED BY (id)")
val e = intercept[AnalysisException] {
sql(s"ALTER TABLE $t DROP PARTITION (id=1)")
}
assert(e.message.contains("ALTER TABLE DROP PARTITION is only supported with v1 tables"))
}
}
test("ALTER TABLE SerDe properties") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
spark.sql(s"CREATE TABLE $t (id bigint, data string) USING foo PARTITIONED BY (id)")
val e = intercept[AnalysisException] {
sql(s"ALTER TABLE $t SET SERDEPROPERTIES ('columns'='foo,bar', 'field.delim' = ',')")
}
assert(e.message.contains("ALTER TABLE SerDe Properties is only supported with v1 tables"))
}
}
test("ALTER VIEW AS QUERY") {
val v = "testcat.ns1.ns2.v"
val e = intercept[AnalysisException] {
sql(s"ALTER VIEW $v AS SELECT 1")
}
assert(e.message.contains("ALTER VIEW QUERY is only supported with temp views or v1 tables"))
}
test("CREATE VIEW") {
val v = "testcat.ns1.ns2.v"
val e = intercept[AnalysisException] {
sql(s"CREATE VIEW $v AS SELECT * FROM tab1")
}
assert(e.message.contains("CREATE VIEW is only supported with v1 tables"))
}
test("SHOW TBLPROPERTIES: v2 table") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
val user = "andrew"
val status = "new"
val provider = "foo"
spark.sql(s"CREATE TABLE $t (id bigint, data string) USING $provider " +
s"TBLPROPERTIES ('user'='$user', 'status'='$status')")
val properties = sql(s"SHOW TBLPROPERTIES $t").orderBy("key")
val schema = new StructType()
.add("key", StringType, nullable = false)
.add("value", StringType, nullable = false)
val expected = Seq(
Row("status", status),
Row("user", user))
assert(properties.schema === schema)
assert(expected === properties.collect())
}
}
test("SHOW TBLPROPERTIES(key): v2 table") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
val user = "andrew"
val status = "new"
val provider = "foo"
spark.sql(s"CREATE TABLE $t (id bigint, data string) USING $provider " +
s"TBLPROPERTIES ('user'='$user', 'status'='$status')")
val properties = sql(s"SHOW TBLPROPERTIES $t ('status')")
val expected = Seq(Row("status", status))
assert(expected === properties.collect())
}
}
test("SHOW TBLPROPERTIES(key): v2 table, key not found") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
val nonExistingKey = "nonExistingKey"
spark.sql(s"CREATE TABLE $t (id bigint, data string) USING foo " +
s"TBLPROPERTIES ('user'='andrew', 'status'='new')")
val properties = sql(s"SHOW TBLPROPERTIES $t ('$nonExistingKey')")
val expected = Seq(Row(nonExistingKey, s"Table $t does not have property: $nonExistingKey"))
assert(expected === properties.collect())
}
}
test("DESCRIBE FUNCTION: only support session catalog") {
val e = intercept[AnalysisException] {
sql("DESCRIBE FUNCTION testcat.ns1.ns2.fun")
}
assert(e.message.contains("DESCRIBE FUNCTION is only supported in v1 catalog"))
val e1 = intercept[AnalysisException] {
sql("DESCRIBE FUNCTION default.ns1.ns2.fun")
}
assert(e1.message.contains(
"The namespace in session catalog must have exactly one name part: default.ns1.ns2.fun"))
}
test("SHOW FUNCTIONS not valid v1 namespace") {
val function = "testcat.ns1.ns2.fun"
val e = intercept[AnalysisException] {
sql(s"SHOW FUNCTIONS LIKE $function")
}
assert(e.message.contains("SHOW FUNCTIONS is only supported in v1 catalog"))
}
test("DROP FUNCTION: only support session catalog") {
val e = intercept[AnalysisException] {
sql("DROP FUNCTION testcat.ns1.ns2.fun")
}
assert(e.message.contains("DROP FUNCTION is only supported in v1 catalog"))
val e1 = intercept[AnalysisException] {
sql("DROP FUNCTION default.ns1.ns2.fun")
}
assert(e1.message.contains(
"The namespace in session catalog must have exactly one name part: default.ns1.ns2.fun"))
}
test("CREATE FUNCTION: only support session catalog") {
val e = intercept[AnalysisException] {
sql("CREATE FUNCTION testcat.ns1.ns2.fun as 'f'")
}
assert(e.message.contains("CREATE FUNCTION is only supported in v1 catalog"))
val e1 = intercept[AnalysisException] {
sql("CREATE FUNCTION default.ns1.ns2.fun as 'f'")
}
assert(e1.message.contains(
"The namespace in session catalog must have exactly one name part: default.ns1.ns2.fun"))
}
test("global temp view should not be masked by v2 catalog") {
val globalTempDB = spark.sessionState.conf.getConf(StaticSQLConf.GLOBAL_TEMP_DATABASE)
spark.conf.set(s"spark.sql.catalog.$globalTempDB", classOf[InMemoryTableCatalog].getName)
try {
sql("create global temp view v as select 1")
sql(s"alter view $globalTempDB.v rename to v2")
checkAnswer(spark.table(s"$globalTempDB.v2"), Row(1))
sql(s"drop view $globalTempDB.v2")
} finally {
spark.sharedState.globalTempViewManager.clear()
}
}
test("SPARK-30104: global temp db is used as a table name under v2 catalog") {
val globalTempDB = spark.sessionState.conf.getConf(StaticSQLConf.GLOBAL_TEMP_DATABASE)
val t = s"testcat.$globalTempDB"
withTable(t) {
sql(s"CREATE TABLE $t (id bigint, data string) USING foo")
sql("USE testcat")
// The following should not throw AnalysisException, but should use `testcat.$globalTempDB`.
sql(s"DESCRIBE TABLE $globalTempDB")
}
}
test("SPARK-30104: v2 catalog named global_temp will be masked") {
val globalTempDB = spark.sessionState.conf.getConf(StaticSQLConf.GLOBAL_TEMP_DATABASE)
spark.conf.set(s"spark.sql.catalog.$globalTempDB", classOf[InMemoryTableCatalog].getName)
val e = intercept[AnalysisException] {
// Since the following multi-part name starts with `globalTempDB`, it is resolved to
// the session catalog, not the `gloabl_temp` v2 catalog.
sql(s"CREATE TABLE $globalTempDB.ns1.ns2.tbl (id bigint, data string) USING json")
}
assert(e.message.contains(
"The namespace in session catalog must have exactly one name part: global_temp.ns1.ns2.tbl"))
}
test("table name same as catalog can be used") {
withTable("testcat.testcat") {
sql(s"CREATE TABLE testcat.testcat (id bigint, data string) USING foo")
sql("USE testcat")
// The following should not throw AnalysisException.
sql(s"DESCRIBE TABLE testcat")
}
}
test("SPARK-30001: session catalog name can be specified in SQL statements") {
// unset this config to use the default v2 session catalog.
spark.conf.unset(V2_SESSION_CATALOG_IMPLEMENTATION.key)
withTable("t") {
sql("CREATE TABLE t USING json AS SELECT 1 AS i")
checkAnswer(sql("select * from t"), Row(1))
checkAnswer(sql("select * from spark_catalog.default.t"), Row(1))
}
}
test("SPARK-30885: v1 table name should be fully qualified") {
def assertWrongTableIdent(): Unit = {
withTable("t") {
sql("CREATE TABLE t USING json AS SELECT 1 AS i")
val t = "spark_catalog.t"
def verify(sql: String): Unit = {
val e = intercept[AnalysisException](spark.sql(sql))
assert(e.message.contains(
s"The namespace in session catalog must have exactly one name part: $t"))
}
verify(s"select * from $t")
// Verify V1 commands that bypass table lookups.
verify(s"REFRESH TABLE $t")
verify(s"DESCRIBE $t i")
verify(s"DROP TABLE $t")
verify(s"DROP VIEW $t")
verify(s"ANALYZE TABLE $t COMPUTE STATISTICS")
verify(s"ANALYZE TABLE $t COMPUTE STATISTICS FOR ALL COLUMNS")
verify(s"MSCK REPAIR TABLE $t")
verify(s"LOAD DATA INPATH 'filepath' INTO TABLE $t")
verify(s"SHOW CREATE TABLE $t")
verify(s"SHOW CREATE TABLE $t AS SERDE")
verify(s"CACHE TABLE $t")
verify(s"UNCACHE TABLE $t")
verify(s"TRUNCATE TABLE $t")
verify(s"SHOW PARTITIONS $t")
verify(s"SHOW COLUMNS FROM $t")
}
}
assertWrongTableIdent()
// unset this config to use the default v2 session catalog.
spark.conf.unset(V2_SESSION_CATALOG_IMPLEMENTATION.key)
assertWrongTableIdent()
}
test("SPARK-30259: session catalog can be specified in CREATE TABLE AS SELECT command") {
withTable("tbl") {
val ident = Identifier.of(Array("default"), "tbl")
sql("CREATE TABLE spark_catalog.default.tbl USING json AS SELECT 1 AS i")
assert(catalog("spark_catalog").asTableCatalog.tableExists(ident) === true)
}
}
test("SPARK-30259: session catalog can be specified in CREATE TABLE command") {
withTable("tbl") {
val ident = Identifier.of(Array("default"), "tbl")
sql("CREATE TABLE spark_catalog.default.tbl (col string) USING json")
assert(catalog("spark_catalog").asTableCatalog.tableExists(ident) === true)
}
}
test("SPARK-30094: current namespace is used during table resolution") {
// unset this config to use the default v2 session catalog.
spark.conf.unset(V2_SESSION_CATALOG_IMPLEMENTATION.key)
withTable("spark_catalog.default.t", "testcat.ns.t") {
sql("CREATE TABLE t USING parquet AS SELECT 1")
sql("CREATE TABLE testcat.ns.t USING parquet AS SELECT 2")
checkAnswer(sql("SELECT * FROM t"), Row(1))
sql("USE testcat.ns")
checkAnswer(sql("SELECT * FROM t"), Row(2))
}
}
test("SPARK-30284: CREATE VIEW should track the current catalog and namespace") {
// unset this config to use the default v2 session catalog.
spark.conf.unset(V2_SESSION_CATALOG_IMPLEMENTATION.key)
val sessionCatalogName = CatalogManager.SESSION_CATALOG_NAME
sql("USE testcat.ns1.ns2")
sql("CREATE TABLE t USING foo AS SELECT 1 col")
checkAnswer(spark.table("t"), Row(1))
withTempView("t") {
spark.range(10).createTempView("t")
withView(s"$sessionCatalogName.default.v") {
val e = intercept[AnalysisException] {
sql(s"CREATE VIEW $sessionCatalogName.default.v AS SELECT * FROM t")
}
assert(e.message.contains("referencing a temporary view"))
}
}
withTempView("t") {
withView(s"$sessionCatalogName.default.v") {
sql(s"CREATE VIEW $sessionCatalogName.default.v " +
"AS SELECT t1.col FROM t t1 JOIN ns1.ns2.t t2")
sql(s"USE $sessionCatalogName")
// The view should read data from table `testcat.ns1.ns2.t` not the temp view.
spark.range(10).createTempView("t")
checkAnswer(spark.table("v"), Row(1))
}
}
}
test("COMMENT ON NAMESPACE") {
// unset this config to use the default v2 session catalog.
spark.conf.unset(V2_SESSION_CATALOG_IMPLEMENTATION.key)
// Session catalog is used.
sql("CREATE NAMESPACE ns")
checkNamespaceComment("ns", "minor revision")
checkNamespaceComment("ns", null)
checkNamespaceComment("ns", "NULL")
intercept[AnalysisException](sql("COMMENT ON NAMESPACE abc IS NULL"))
// V2 non-session catalog is used.
sql("CREATE NAMESPACE testcat.ns1")
checkNamespaceComment("testcat.ns1", "minor revision")
checkNamespaceComment("testcat.ns1", null)
checkNamespaceComment("testcat.ns1", "NULL")
intercept[AnalysisException](sql("COMMENT ON NAMESPACE testcat.abc IS NULL"))
}
private def checkNamespaceComment(namespace: String, comment: String): Unit = {
sql(s"COMMENT ON NAMESPACE $namespace IS " +
Option(comment).map("'" + _ + "'").getOrElse("NULL"))
val expectedComment = Option(comment).getOrElse("")
assert(sql(s"DESC NAMESPACE extended $namespace").toDF("k", "v")
.where(s"k='${SupportsNamespaces.PROP_COMMENT.capitalize}'")
.head().getString(1) === expectedComment)
}
test("COMMENT ON TABLE") {
// unset this config to use the default v2 session catalog.
spark.conf.unset(V2_SESSION_CATALOG_IMPLEMENTATION.key)
// Session catalog is used.
withTable("t") {
sql("CREATE TABLE t(k int) USING json")
checkTableComment("t", "minor revision")
checkTableComment("t", null)
checkTableComment("t", "NULL")
}
intercept[AnalysisException](sql("COMMENT ON TABLE abc IS NULL"))
// V2 non-session catalog is used.
withTable("testcat.ns1.ns2.t") {
sql("CREATE TABLE testcat.ns1.ns2.t(k int) USING foo")
checkTableComment("testcat.ns1.ns2.t", "minor revision")
checkTableComment("testcat.ns1.ns2.t", null)
checkTableComment("testcat.ns1.ns2.t", "NULL")
}
intercept[AnalysisException](sql("COMMENT ON TABLE testcat.abc IS NULL"))
val globalTempDB = spark.sessionState.conf.getConf(StaticSQLConf.GLOBAL_TEMP_DATABASE)
spark.conf.set(s"spark.sql.catalog.$globalTempDB", classOf[InMemoryTableCatalog].getName)
withTempView("v") {
sql("create global temp view v as select 1")
val e = intercept[AnalysisException](sql("COMMENT ON TABLE global_temp.v IS NULL"))
assert(e.getMessage.contains("global_temp.v is a temp view not table."))
}
}
private def checkTableComment(tableName: String, comment: String): Unit = {
sql(s"COMMENT ON TABLE $tableName IS " + Option(comment).map("'" + _ + "'").getOrElse("NULL"))
val expectedComment = Option(comment).getOrElse("")
assert(sql(s"DESC extended $tableName").toDF("k", "v", "c")
.where(s"k='${TableCatalog.PROP_COMMENT.capitalize}'")
.head().getString(1) === expectedComment)
}
test("SPARK-30799: temp view name can't contain catalog name") {
val sessionCatalogName = CatalogManager.SESSION_CATALOG_NAME
withTempView("v") {
spark.range(10).createTempView("v")
val e1 = intercept[AnalysisException](
sql(s"CACHE TABLE $sessionCatalogName.v")
)
assert(e1.message.contains(
"The namespace in session catalog must have exactly one name part: spark_catalog.v"))
}
val e2 = intercept[AnalysisException] {
sql(s"CREATE TEMP VIEW $sessionCatalogName.v AS SELECT 1")
}
assert(e2.message.contains("It is not allowed to add database prefix"))
}
test("SPARK-31015: star expression should work for qualified column names for v2 tables") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
sql(s"CREATE TABLE $t (id bigint, name string) USING foo")
sql(s"INSERT INTO $t VALUES (1, 'hello')")
def check(tbl: String): Unit = {
checkAnswer(sql(s"SELECT testcat.ns1.ns2.tbl.* FROM $tbl"), Row(1, "hello"))
checkAnswer(sql(s"SELECT ns1.ns2.tbl.* FROM $tbl"), Row(1, "hello"))
checkAnswer(sql(s"SELECT ns2.tbl.* FROM $tbl"), Row(1, "hello"))
checkAnswer(sql(s"SELECT tbl.* FROM $tbl"), Row(1, "hello"))
}
// Test with qualified table name "testcat.ns1.ns2.tbl".
check(t)
// Test if current catalog and namespace is respected in column resolution.
sql("USE testcat.ns1.ns2")
check("tbl")
val ex = intercept[AnalysisException] {
sql(s"SELECT ns1.ns2.ns3.tbl.* from $t")
}
assert(ex.getMessage.contains("cannot resolve 'ns1.ns2.ns3.tbl.*"))
}
}
private def testV1Command(sqlCommand: String, sqlParams: String): Unit = {
val e = intercept[AnalysisException] {
sql(s"$sqlCommand $sqlParams")
}
assert(e.message.contains(s"$sqlCommand is only supported with v1 tables"))
}
private def testV1CommandSupportingTempView(sqlCommand: String, sqlParams: String): Unit = {
val e = intercept[AnalysisException] {
sql(s"$sqlCommand $sqlParams")
}
assert(e.message.contains(s"$sqlCommand is only supported with temp views or v1 tables"))
}
private def assertAnalysisError(sqlStatement: String, expectedError: String): Unit = {
val errMsg = intercept[AnalysisException] {
sql(sqlStatement)
}.getMessage
assert(errMsg.contains(expectedError))
}
}
/** Used as a V2 DataSource for V2SessionCatalog DDL */
class FakeV2Provider extends SimpleTableProvider {
override def getTable(options: CaseInsensitiveStringMap): Table = {
throw new UnsupportedOperationException("Unnecessary for DDL tests")
}
}
| zuotingbing/spark | sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2SQLSuite.scala | Scala | apache-2.0 | 100,117 |
package com.lookout.borderpatrol.auth.keymaster
import com.lookout.borderpatrol.util.Combinators.tap
import com.lookout.borderpatrol.sessionx._
import com.lookout.borderpatrol._
import com.lookout.borderpatrol.Binder._
import com.lookout.borderpatrol.auth._
import com.twitter.finagle.httpx.path.Path
import com.twitter.finagle.{Filter, Service}
import com.twitter.finagle.httpx._
import com.twitter.util.Future
object Keymaster {
case class Credential(email: String, password: String, serviceId: ServiceIdentifier)
case class KeymasterIdentifyReq(credential: Credential) extends IdentifyRequest[Credential]
case class KeymasterIdentifyRes(tokens: Tokens) extends IdentifyResponse[Tokens] {
val identity = Identity(tokens)
}
case class KeymasterAccessReq(identity: Id[Tokens],
serviceId: ServiceIdentifier, sessionId: SessionId) extends AccessRequest[Tokens]
case class KeymasterAccessRes(access: Access[ServiceToken]) extends AccessResponse[ServiceToken]
/**
* The identity provider for Keymaster, will connect to the remote keymaster server to authenticate and get an
* identity (master token)
* @param binder Binder that binds to Keymaster identity service passed in the IdentityManager
*/
case class KeymasterIdentityProvider(binder: MBinder[Manager])
extends IdentityProvider[Credential, Tokens] {
def api(cred: Credential): Request =
tap(Request(Method.Post, cred.serviceId.loginManager.identityManager.path.toString))(req => {
req.contentType = "application/x-www-form-urlencoded"
req.contentString = Request.queryString(("e", cred.email), ("p", cred.password), ("s", cred.serviceId.name))
.drop(1) /* Drop '?' */
})
/**
* Sends credentials, if authenticated successfully will return a MasterToken otherwise a Future.exception
*/
def apply(req: IdentifyRequest[Credential]): Future[IdentifyResponse[Tokens]] =
// Authenticate user by the Keymaster
binder(BindRequest(req.credential.serviceId.loginManager.identityManager, api(req.credential)))
.flatMap(res => res.status match {
// Parse for Tokens if Status.Ok
case Status.Ok =>
Tokens.derive[Tokens](res.contentString).fold[Future[IdentifyResponse[Tokens]]](
err => Future.exception(IdentityProviderError(Status.InternalServerError,
"Failed to parse the Keymaster Identity Response")),
t => Future.value(KeymasterIdentifyRes(t))
)
// Preserve Response Status code by throwing AccessDenied exceptions
case _ => Future.exception(IdentityProviderError(res.status,
s"Invalid credentials for user ${req.credential.email}"))
})
}
/**
* Handles logins to the KeymasterIdentityProvider:
* - saves the Tokens after a successful login
* - sends the User to their original request location from before they logged in or the default location based on
* their service
* @param store
* @param secretStoreApi
*/
case class KeymasterPostLoginFilter(store: SessionStore)(implicit secretStoreApi: SecretStoreApi)
extends Filter[SessionIdRequest, Response, IdentifyRequest[Credential], IdentifyResponse[Tokens]] {
def createIdentifyReq(req: SessionIdRequest): Option[IdentifyRequest[Credential]] =
for {
u <- req.req.req.params.get("username")
p <- req.req.req.params.get("password")
} yield KeymasterIdentifyReq(Credential(u, p, req.req.serviceId))
/**
* Grab the original request from the session store, otherwise just send them to the default location of '/'
*/
def getRequestFromSessionStore(id: SessionId): Future[Request] =
store.get[Request](id).flatMap(_ match {
case Some(session) => Future.value(session.data)
case None => Future.exception(OriginalRequestNotFound(s"no request stored for $id"))
})
def apply(req: SessionIdRequest,
service: Service[IdentifyRequest[Credential], IdentifyResponse[Tokens]]): Future[Response] =
createIdentifyReq(req).fold(Future.value(Response(Status.BadRequest)))(credReq =>
for {
tokenResponse <- service(credReq)
session <- Session(tokenResponse.identity.id, AuthenticatedTag)
_ <- store.update[Tokens](session)
originReq <- getRequestFromSessionStore(req.sessionId)
_ <- store.delete(req.sessionId)
} yield tap(Response(Status.Found))(res => {
res.location = originReq.uri
res.addCookie(session.id.asCookie)
}))
}
/**
* Decodes the methods Get and Post differently
* - Get is directed to login form
* - Post processes the login credentials
*
* @param binder It binds to upstream login provider using the information passed in LoginManager
*/
case class KeymasterMethodMuxLoginFilter(binder: MBinder[LoginManager])
extends Filter[SessionIdRequest, Response, SessionIdRequest, Response] {
def apply(req: SessionIdRequest,
service: Service[SessionIdRequest, Response]): Future[Response] =
(req.req.req.method, req.req.req.path) match {
case (Method.Post, loginPath) => service(req)
case _ => binder(BindRequest(req.req.serviceId.loginManager, req.req.req))
}
}
/**
* The access issuer will use the MasterToken to gain access to service tokens
* @param binder It binds to the Keymaster Access Issuer using info in AccessManager
* @param store Session store
*/
case class KeymasterAccessIssuer(binder: MBinder[Manager], store: SessionStore)
extends AccessIssuer[Tokens, ServiceToken] {
def api(accessRequest: AccessRequest[Tokens]): Request =
tap(Request(Method.Post, accessRequest.serviceId.loginManager.accessManager.path.toString))(req => {
req.contentType = "application/x-www-form-urlencoded"
req.contentString = Request.queryString(("services" -> accessRequest.serviceId.name))
.drop(1) /* Drop '?' */
req.headerMap.add("Auth-Token", accessRequest.identity.id.master.value)
})
/**
* Fetch a valid ServiceToken, will return a ServiceToken otherwise a Future.exception
*/
def apply(req: AccessRequest[Tokens]): Future[AccessResponse[ServiceToken]] =
// Check if ServiceToken is already available for Service
req.identity.id.service(req.serviceId.name).fold[Future[ServiceToken]](
// Fetch ServiceToken from the Keymaster
binder(BindRequest(req.serviceId.loginManager.accessManager, api(req))).flatMap(res => res.status match {
// Parse for Tokens if Status.Ok
case Status.Ok =>
Tokens.derive[Tokens](res.contentString).fold[Future[ServiceToken]](
e => Future.exception(AccessIssuerError(Status.NotAcceptable,
"Failed to parse the Keymaster Access Response")),
t => t.service(req.serviceId.name).fold[Future[ServiceToken]](
Future.exception(AccessDenied(Status.NotAcceptable,
s"No access allowed to service ${req.serviceId.name}"))
)(st => for {
_ <- store.update(Session(req.sessionId, req.identity.id.add(req.serviceId.name, st)))
} yield st)
)
// Preserve Response Status code by throwing AccessDenied exceptions
case _ => Future.exception(AccessIssuerError(res.status,
s"No access allowed to service ${req.serviceId.name} due to error: ${res.status}"))
})
)(t => Future.value(t)).map(t => KeymasterAccessRes(Access(t)))
}
/**
* This filter acquires the access and then forwards the request to upstream service
*
* @param binder It binds to the upstream service endpoint using the info passed in ServiceIdentifier
*/
case class KeymasterAccessFilter(binder: MBinder[ServiceIdentifier])
extends Filter[AccessIdRequest[Tokens], Response, AccessRequest[Tokens], AccessResponse[ServiceToken]] {
def apply(req: AccessIdRequest[Tokens],
accessService: Service[AccessRequest[Tokens], AccessResponse[ServiceToken]]): Future[Response] =
accessService(AccessRequest(req.id, req.req.req.serviceId, req.req.sessionId)).flatMap(accessResp =>
binder(BindRequest(req.req.req.serviceId,
tap(req.req.req.req) { r => r.headerMap.add("Auth-Token", accessResp.access.access.value)}))
)
}
/**
* Keymaster Identity provider service Chain
* @param store
*/
def keymasterIdentityProviderChain(store: SessionStore)(
implicit secretStoreApi: SecretStoreApi): Service[SessionIdRequest, Response] = {
new KeymasterMethodMuxLoginFilter(LoginManagerBinder) andThen
new KeymasterPostLoginFilter(store) andThen
new KeymasterIdentityProvider(ManagerBinder)
}
/**
* Keymaster Access Issuer service Chain
* @param store
*/
def keymasterAccessIssuerChain(store: SessionStore)(
implicit secretStoreApi: SecretStoreApi): Service[SessionIdRequest, Response] = {
new IdentityFilter[Tokens](store) andThen
new KeymasterAccessFilter(ServiceIdentifierBinder) andThen
new KeymasterAccessIssuer(ManagerBinder, store)
}
}
| jamescway/borderpatrol | auth/src/main/scala/com/lookout/borderpatrol/auth/keymaster/Keymaster.scala | Scala | mit | 9,221 |
package controllers
import scala.util.{ Try, Success, Failure }
import play.api.data._, Forms._
import play.api.mvc._
import play.twirl.api.Html
import play.api.Play.current
import play.api.i18n.Messages.Implicits._
import lila.api.Context
import lila.app._
import lila.common.HTTPRequest
import lila.opening.{ Generated, Opening => OpeningModel, UserInfos, Attempt }
import lila.user.{ User => UserModel, UserRepo }
import views._
import views.html.opening.JsData
object Opening extends LilaController {
private def env = Env.opening
private def identify(opening: OpeningModel) =
env.api.identify(opening.fen, 5)
private def renderShow(opening: OpeningModel)(implicit ctx: Context) =
env userInfos ctx.me zip identify(opening) map {
case (infos, identified) =>
views.html.opening.show(opening, identified, infos, env.AnimationDuration)
}
private def makeData(
opening: OpeningModel,
infos: Option[UserInfos],
play: Boolean,
attempt: Option[Attempt],
win: Option[Boolean])(implicit ctx: Context): Fu[Result] =
identify(opening) map { identified =>
Ok(JsData(
opening,
identified,
infos,
play = play,
attempt = attempt,
win = win,
animationDuration = env.AnimationDuration)) as JSON
}
def home = Open { implicit ctx =>
if (HTTPRequest isXhr ctx.req) env.selector(ctx.me) zip (env userInfos ctx.me) flatMap {
case (opening, infos) => makeData(opening, infos, true, none, none)
}
else env.selector(ctx.me) flatMap { opening =>
renderShow(opening) map { Ok(_) }
}
}
def show(id: OpeningModel.ID) = Open { implicit ctx =>
OptionFuOk(env.api.opening find id)(renderShow)
}
def history = Auth { implicit ctx =>
me =>
XhrOnly {
env userInfos me map { ui => Ok(views.html.opening.history(ui)) }
}
}
private val attemptForm = Form(mapping(
"found" -> number,
"failed" -> number
)(Tuple2.apply)(Tuple2.unapply))
def attempt(id: OpeningModel.ID) = OpenBody { implicit ctx =>
implicit val req = ctx.body
OptionFuResult(env.api.opening find id) { opening =>
attemptForm.bindFromRequest.fold(
err => fuccess(BadRequest(errorsAsJson(err)) as JSON),
data => {
val (found, failed) = data
val win = found == opening.goal && failed == 0
ctx.me match {
case Some(me) => env.finisher(opening, me, win) flatMap {
case (newAttempt, None) =>
UserRepo byId me.id map (_ | me) flatMap { me2 =>
(env.api.opening find id) zip (env userInfos me2.some) flatMap {
case (o2, infos) =>
makeData(o2 | opening, infos, false, newAttempt.some, none)
}
}
case (oldAttempt, Some(win)) => env userInfos me.some flatMap { infos =>
makeData(opening, infos, false, oldAttempt.some, win.some)
}
}
case None => makeData(opening, none, false, none, win.some)
}
}
)
}
}
def importOne = Action.async(parse.json) { implicit req =>
env.api.opening.importOne(req.body, ~get("token", req)) map { id =>
Ok("kthxbye " + {
val url = s"http://lichess.org/training/opening/$id"
play.api.Logger("opening import").info(s"${req.remoteAddress} $url")
url
})
} recover {
case e =>
play.api.Logger("opening import").warn(e.getMessage)
BadRequest(e.getMessage)
}
}
}
| JimmyMow/lila | app/controllers/Opening.scala | Scala | mit | 3,596 |
package com.argcv.dvergar.ptcer.models
/**
* @param eid event id (auto increment id)
* @param nid pattern id
* @param ts timestamp
* @param ein pattern in
* @param eout node out
*/
case class Event(eid: Int, nid: Int, ts: Long, eout: Option[List[Int]] = None, ein: Option[List[Int]] = None) {
lazy val tMonitor = new AnyRef
var tFlag = -1
var tSeq = 0
def getOrSetSeq(ntFlag: Int, ntSeq: Int) = tMonitor.synchronized {
if (ntFlag == tFlag) tSeq
else {
tSeq = ntSeq
tSeq
}
}
def withOut(eout: List[Int]) = this.copy(eout = Some(eout))
def withIn(ein: List[Int]) = this.copy(ein = Some(ein))
override def toString = {
if (ein.isDefined && eout.isDefined)
s"[Event#1] event($eid), node($nid), t($ts), " +
s"_in: ${ein.get.mkString(",")}, " +
s"_out: ${eout.get.mkString(",")}"
else
s"[Event#0] event($eid), node($nid), t($ts)"
}
}
| yuikns/pattern-counter | src/main/scala/com/argcv/dvergar/ptcer/models/Event.scala | Scala | mit | 918 |
package io.iohk.ethereum.consensus.ethash.difficulty
import io.iohk.ethereum.consensus.difficulty.DifficultyCalculator
import io.iohk.ethereum.domain.BlockHeader
class TargetTimeDifficultyCalculator(powTargetTime: Long) extends DifficultyCalculator {
import DifficultyCalculator._
/**
* The lowerBoundExpectedRatio (l for abbreviation below) divides the timestamp diff into ranges:
* [0, l) => c = 1, difficulty increases
* [l, 2*l) => c = 0. difficulty stays the same
* ...
* [l*i, l*(i+1) ) => c = 1-i, difficulty decreases
*
* example:
* powTargetTime := 45 seconds
* l := 30 seconds
* [0, 0.5 min) => difficulty increases
* [0.5 min, 1 min) => difficulty stays the same (the average should be powTargetTime)
* [1 min, +infinity) => difficulty decreases
*/
private val lowerBoundExpectedRatio: Long = (powTargetTime / 1.5).toLong
def calculateDifficulty(blockNumber: BigInt, blockTimestamp: Long, parentHeader: BlockHeader): BigInt = {
val timestampDiff = blockTimestamp - parentHeader.unixTimestamp
val x: BigInt = parentHeader.difficulty / DifficultyBoundDivision
val c: BigInt = math.max(1 - (timestampDiff / lowerBoundExpectedRatio), FrontierTimestampDiffLimit)
MinimumDifficulty.max(parentHeader.difficulty + x * c)
}
}
| input-output-hk/etc-client | src/main/scala/io/iohk/ethereum/consensus/ethash/difficulty/TargetTimeDifficultyCalculator.scala | Scala | mit | 1,342 |
/*
* Copyright 2017 Mediative
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mediative.amadou.bigquery
import java.util.UUID
import com.google.api.client.googleapis.auth.oauth2.GoogleCredential
import com.google.api.client.googleapis.json.GoogleJsonResponseException
import com.google.api.client.googleapis.json.GoogleJsonError
import com.google.api.client.http.javanet.NetHttpTransport
import com.google.api.client.json.jackson2.JacksonFactory
import com.google.api.services.bigquery.model._
import com.google.api.services.bigquery.{Bigquery, BigqueryScopes}
import com.google.cloud.hadoop.io.bigquery._
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.util.Progressable
import org.slf4j.{Logger, LoggerFactory}
import scala.collection.JavaConverters._
import scala.util.control.NonFatal
import scala.util.Try
private[bigquery] object BigQueryClient {
val STAGING_DATASET_PREFIX = "bq.staging_dataset.prefix"
val STAGING_DATASET_PREFIX_DEFAULT = "spark_bigquery_staging_"
val STAGING_DATASET_LOCATION = "bq.staging_dataset.location"
val STAGING_DATASET_LOCATION_DEFAULT = "US"
val STAGING_DATASET_TABLE_EXPIRATION_MS = 86400000L
val STAGING_DATASET_DESCRIPTION = "Spark BigQuery staging dataset"
private var instance: BigQueryClient = null
def getInstance(conf: Configuration): BigQueryClient = {
if (instance == null) {
instance = new BigQueryClient(conf)
}
instance
}
}
private[bigquery] class BigQueryClient(conf: Configuration) {
import BigQueryClient._
private val logger: Logger = LoggerFactory.getLogger(classOf[BigQueryClient])
private val SCOPES = List(BigqueryScopes.BIGQUERY).asJava
private val bigquery: Bigquery = {
val credential = GoogleCredential.getApplicationDefault.createScoped(SCOPES)
new Bigquery.Builder(new NetHttpTransport, new JacksonFactory, credential)
.setApplicationName("spark-bigquery")
.build()
}
private def projectId: String = conf.get(BigQueryConfiguration.PROJECT_ID_KEY)
private def inConsole =
Thread
.currentThread()
.getStackTrace
.exists(_.getClassName.startsWith("scala.tools.nsc.interpreter."))
private val PRIORITY = if (inConsole) "INTERACTIVE" else "BATCH"
private val TABLE_ID_PREFIX = "spark_bigquery"
private val JOB_ID_PREFIX = "spark_bigquery"
/** Matcher for "Missing table" errors. */
object TableNotFound {
def unapply(error: Throwable): Option[GoogleJsonError.ErrorInfo] = error match {
case error: GoogleJsonResponseException =>
Some(error.getDetails)
.filter(_.getCode == 404)
.flatMap(_.getErrors.asScala.find(_.getReason == "notFound"))
case _ => None
}
}
def tableInfo(table: TableReference): Table =
bigquery.tables().get(table.getProjectId, table.getDatasetId, table.getTableId).execute()
/**
* Perform a BigQuery SELECT query and save results to the destination table.
*/
def query(
sqlQuery: String,
destinationTable: TableReference,
writeDisposition: WriteDisposition.Value): TableReference = {
logger.info(s"Executing query $sqlQuery")
logger.info(s"Destination table: $destinationTable")
val job =
createQueryJob(sqlQuery, destinationTable, dryRun = false, PRIORITY, writeDisposition)
logger.info("JOB ID: " + job.getId)
waitForJob(job)
logger.info("JOB STATUS: " + job.getStatus.getState)
destinationTable
}
/**
* Extract table to *.csv.gz files in GCS
*/
def extract(sourceTable: TableReference, gcsPath: String): Unit = {
val destination = gcsPath + "/*.csv.gz"
logger.info(s"extracting $sourceTable to $destination")
val extractConfig = new JobConfigurationExtract()
.setDestinationFormat("CSV")
.setCompression("GZIP")
.setDestinationUri(destination)
.setSourceTable(sourceTable)
val jobConfig = new JobConfiguration().setExtract(extractConfig)
val jobReference = createJobReference(projectId, JOB_ID_PREFIX)
val job = new Job().setConfiguration(jobConfig).setJobReference(jobReference)
bigquery.jobs().insert(projectId, job).execute()
waitForJob(job)
}
/**
* Checks if data has been loaded into a table for a specific date.
*
* Performs the following checks and returns false if either succeeds:
* - the table does not exists
* - the table never modified on or after the specified date
* - the table does not contain any data for the specified date
*
* The last check will query the table and check whether any entries exists for
* the column provided via `lastModifiedColumn`.
*/
def hasDataForDate(
table: TableReference,
date: java.sql.Date,
lastModifiedColumn: String): Boolean = {
val wasModifiedToday = Try {
val bqTable =
bigquery.tables().get(table.getProjectId, table.getDatasetId, table.getTableId).execute()
bqTable.getLastModifiedTime.longValue() >= date.getTime
} recover {
case TableNotFound(message) => false
}
def partitionHasData = {
val partition = s"${table.getProjectId}:${table.getDatasetId}.${table.getTableId}"
val job = createQueryJob(
s"SELECT count(*) > 1 FROM [$partition] WHERE $lastModifiedColumn >= TIMESTAMP($date) LIMIT 1",
null,
false,
"INTERACTIVE"
)
waitForJob(job)
val result = bigquery.jobs.getQueryResults(projectId, job.getJobReference.getJobId).execute()
val value = result.getRows.get(0).getF.get(0).getV
value == "true"
}
wasModifiedToday.get && partitionHasData
}
// XXX: Workaround to support CREATE_IF_NEEDED for date-partitioned tables
private def createPartitionedTableIfMissing(destinationTable: TableReference): Unit = {
val table = new TableReference()
.setProjectId(destinationTable.getProjectId)
.setDatasetId(destinationTable.getDatasetId)
.setTableId(destinationTable.getTableId.replaceAll("[$].*", ""))
val result = Try {
bigquery.tables().get(table.getProjectId, table.getDatasetId, table.getTableId).execute()
} recover {
case TableNotFound(message) =>
val tableConfiguration = new Table()
.setTableReference(table)
.setTimePartitioning(new TimePartitioning().setType("DAY"))
logger.info(s"Creating date-partitioned table using $tableConfiguration")
bigquery
.tables()
.insert(table.getProjectId, table.getDatasetId, tableConfiguration)
.execute()
}
result.get
()
}
/**
* Load an JSON data set on GCS to a BigQuery table.
*/
def load(
gcsPath: String,
destinationTable: TableReference,
schema: TableSchema,
writeDisposition: WriteDisposition.Value = null,
createDisposition: CreateDisposition.Value = null): Unit = {
val tableName = BigQueryStrings.toString(destinationTable)
logger.info(s"Loading $gcsPath into $tableName using ${writeDisposition}/$createDisposition")
var loadConfig = new JobConfigurationLoad()
.setDestinationTable(destinationTable)
.setSourceFormat("NEWLINE_DELIMITED_JSON")
.setSourceUris(List(gcsPath + "/*.json").asJava)
.setSchema(schema)
if (writeDisposition != null) {
loadConfig = loadConfig.setWriteDisposition(writeDisposition.toString)
}
if (createDisposition != null) {
if (createDisposition == CreateDisposition.CREATE_IF_NEEDED && destinationTable.getTableId
.contains("$"))
createPartitionedTableIfMissing(destinationTable)
else
loadConfig = loadConfig.setCreateDisposition(createDisposition.toString)
}
val jobConfig = new JobConfiguration().setLoad(loadConfig)
val jobReference = createJobReference(projectId, JOB_ID_PREFIX)
val job = new Job().setConfiguration(jobConfig).setJobReference(jobReference)
bigquery.jobs().insert(projectId, job).execute()
waitForJob(job)
}
private def waitForJob(job: Job): Unit =
BigQueryUtils.waitForJobCompletion(bigquery, projectId, job.getJobReference, new Progressable {
override def progress(): Unit = println("<BQ process>")
})
private def stagingDataset(location: String): DatasetReference = {
// Create staging dataset if it does not already exist
val prefix = conf.get(STAGING_DATASET_PREFIX, STAGING_DATASET_PREFIX_DEFAULT)
val datasetId = prefix + location.toLowerCase
try {
bigquery.datasets().get(projectId, datasetId).execute()
logger.info(s"Staging dataset $projectId:$datasetId already exists")
} catch {
case e: GoogleJsonResponseException if e.getStatusCode == 404 =>
logger.info(s"Creating staging dataset $projectId:$datasetId")
val dsRef = new DatasetReference().setProjectId(projectId).setDatasetId(datasetId)
val ds = new Dataset()
.setDatasetReference(dsRef)
.setDefaultTableExpirationMs(STAGING_DATASET_TABLE_EXPIRATION_MS)
.setDescription(STAGING_DATASET_DESCRIPTION)
.setLocation(location)
bigquery
.datasets()
.insert(projectId, ds)
.execute()
case NonFatal(e) => throw e
}
new DatasetReference().setProjectId(projectId).setDatasetId(datasetId)
}
private def createQueryJob(
sqlQuery: String,
destinationTable: TableReference,
dryRun: Boolean,
priority: String,
writeDisposition: WriteDisposition.Value = WriteDisposition.WRITE_EMPTY): Job = {
var queryConfig = new JobConfigurationQuery()
.setQuery(sqlQuery)
.setPriority(priority)
.setCreateDisposition("CREATE_IF_NEEDED")
.setWriteDisposition(writeDisposition.toString)
if (destinationTable != null) {
queryConfig = queryConfig
.setDestinationTable(destinationTable)
.setAllowLargeResults(true)
}
val jobConfig = new JobConfiguration().setQuery(queryConfig).setDryRun(dryRun)
val jobReference = createJobReference(projectId, JOB_ID_PREFIX)
val job = new Job().setConfiguration(jobConfig).setJobReference(jobReference)
try {
bigquery.jobs().insert(projectId, job).execute()
} catch {
case ex: Exception =>
logger.error(ex.getMessage, ex)
ex.printStackTrace()
throw ex
}
}
private def createJobReference(projectId: String, jobIdPrefix: String): JobReference = {
val fullJobId = projectId + "-" + UUID.randomUUID().toString
new JobReference().setProjectId(projectId).setJobId(fullJobId)
}
}
| mediative/amadou | bigquery/src/main/scala/com.mediative.amadou.bigquery/BigQueryClient.scala | Scala | apache-2.0 | 11,146 |
package akkaviz.server
import akka.actor.ActorSystem
import akkaviz.events.types._
import akkaviz.events.{Helpers, LightSnapshot}
import org.scalatest.{BeforeAndAfterAll, FunSuite, Matchers}
import scala.concurrent.Await
import scala.concurrent.duration._
class SubscriptionSessionTest extends FunSuite with SubscriptionSession with BeforeAndAfterAll with Matchers {
val system = ActorSystem()
val actorRef = system.deadLetters
val actorRefString = Helpers.actorRefToString(actorRef)
val receivedMessage = ReceivedWithId(1, actorRef, actorRef, BigDecimal(1.1), true)
test("Default filtering of SubscriptionSession") {
defaultSettings.eventAllowed(Killed(actorRef)) shouldBe false
defaultSettings.eventAllowed(receivedMessage) shouldBe false
}
test("Non filtered messages are passed through") {
val nonFilteredMessages = Seq(
Spawned(actorRef),
ReceiveDelaySet(10.millis),
ReportingEnabled,
ReportingDisabled,
SnapshotAvailable(LightSnapshot())
)
nonFilteredMessages.foreach {
defaultSettings.eventAllowed(_) shouldBe true
}
}
test("Allowing some actor makes it accepted") {
val session = updateSettings(defaultSettings, SetActorEventFilter(Set(actorRefString)))
session.eventAllowed(Killed(actorRef)) shouldBe true // only ReceivedWithId is verified
session.eventAllowed(receivedMessage) shouldBe false
}
test("Allowing some actor and message type makes it accepted") {
val session = Seq(SetActorEventFilter(Set(actorRefString)), SetAllowedClasses(Set(receivedMessage.message.getClass.getName)))
.foldLeft(defaultSettings)(updateSettings)
session.eventAllowed(receivedMessage) shouldBe true
}
override protected def afterAll(): Unit = {
super.afterAll()
Await.result(system.terminate(), Duration.Inf)
}
}
| blstream/akka-viz | monitoring/src/test/scala/akkaviz/server/SubscriptionSessionTest.scala | Scala | mit | 1,837 |
/*
* Copyright 2014–2017 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.ejson
import slamdata.Predef._
import quasar.Qspec
import quasar.contrib.matryoshka._
import quasar.contrib.matryoshka.arbitrary._
import quasar.ejson.implicits._
import quasar.fp._
import matryoshka.{equalTEqual => _, _}
import matryoshka.data.Fix
import matryoshka.implicits._
import org.specs2.scalacheck._
import scalaz._, Scalaz._
final class JsonCodecSpec extends Qspec with EJsonArbitrary {
import JsonCodec.DecodingFailed
implicit val params = Parameters(maxSize = 10)
type E = Fix[EJson]
type J = Fix[Json]
val decodeMƒ: CoalgebraM[DecodingFailed[J] \/ ?, EJson, J] =
JsonCodec.decodeƒ[J] >>> (_.run)
val roundtrip: E => DecodingFailed[J] \/ E =
_.cata(JsonCodec.encodeƒ[J]).anaM[E](decodeMƒ)
val soloKeys = JsonCodec.ExtKeys \\ ISet.fromList(List(JsonCodec.KeyK, JsonCodec.ValueK, JsonCodec.MetaK))
"faithfully roundtrip EJson" >> prop { e: E =>
roundtrip(e).toEither must beRight(equal(e))
}
soloKeys.toList foreach { k =>
s"fail when singleton map with `$k` key maps to unexpected value" >> {
val n = CommonJson(nul[J]()).embed
val j = ObjJson(Obj(ListMap(k -> n))).embed
j.anaM[E](decodeMƒ) must beLike {
case -\/(DecodingFailed(_, v)) => v must_= n
}
}
}
"map keys beginning with the codec sigil are preserved" >> prop { (k10: String, k20: String, v1: E, v2: E) =>
val (k1, k2) = (JsonCodec.Sigil.toString + k10, JsonCodec.Sigil.toString + k20)
val m = ExtEJson(Map(List(
CommonEJson(str[E](k1)).embed -> v1,
CommonEJson(str[E](k2)).embed -> v2
))).embed
roundtrip(m).toEither must beRight(equal(m))
}
"map keys equal to one of the codec keys are preserved" >> prop { v: E =>
val m = ExtEJson(Map(JsonCodec.ExtKeys.toList map { k =>
CommonEJson(str[E](k)).embed -> v
})).embed
roundtrip(m).toEither must beRight(equal(m))
}
"map keys equal to one of the codec keys with additional sigil prefix are preserved" >> prop { v: E =>
val m = ExtEJson(Map(JsonCodec.ExtKeys.toList map { k =>
CommonEJson(str[E](JsonCodec.Sigil.toString + k)).embed -> v
})).embed
roundtrip(m).toEither must beRight(equal(m))
}
}
| drostron/quasar | ejson/src/test/scala/quasar/ejson/JsonCodecSpec.scala | Scala | apache-2.0 | 2,809 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.mxnetexamples.visualization
import org.apache.mxnet.Symbol
/**
* @author Depeng Liang
*/
object AlexNet {
def getSymbol(numClasses: Int = 1000): Symbol = {
val inputData = Symbol.Variable("data")
// stage 1
val conv1 = Symbol.Convolution()()(Map(
"data" -> inputData, "kernel" -> "(11, 11)", "stride" -> "(4, 4)", "num_filter" -> 96))
val relu1 = Symbol.Activation()()(Map("data" -> conv1, "act_type" -> "relu"))
val pool1 = Symbol.Pooling()()(Map(
"data" -> relu1, "pool_type" -> "max", "kernel" -> "(3, 3)", "stride" -> "(2,2)"))
val lrn1 = Symbol.LRN()()(Map("data" -> pool1,
"alpha" -> 0.0001f, "beta" -> 0.75f, "knorm" -> 1f, "nsize" -> 5))
// stage 2
val conv2 = Symbol.Convolution()()(Map(
"data" -> lrn1, "kernel" -> "(5, 5)", "pad" -> "(2, 2)", "num_filter" -> 256))
val relu2 = Symbol.Activation()()(Map("data" -> conv2, "act_type" -> "relu"))
val pool2 = Symbol.Pooling()()(Map("data" -> relu2,
"kernel" -> "(3, 3)", "stride" -> "(2, 2)", "pool_type" -> "max"))
val lrn2 = Symbol.LRN()()(Map("data" -> pool2,
"alpha" -> 0.0001f, "beta" -> 0.75f, "knorm" -> 1f, "nsize" -> 5))
// stage 3
val conv3 = Symbol.Convolution()()(Map(
"data" -> lrn2, "kernel" -> "(3, 3)", "pad" -> "(1, 1)", "num_filter" -> 384))
val relu3 = Symbol.Activation()()(Map("data" -> conv3, "act_type" -> "relu"))
val conv4 = Symbol.Convolution()()(Map(
"data" -> relu3, "kernel" -> "(3, 3)", "pad" -> "(1, 1)", "num_filter" -> 384))
val relu4 = Symbol.Activation()()(Map("data" -> conv4, "act_type" -> "relu"))
val conv5 = Symbol.Convolution()()(Map(
"data" -> relu4, "kernel" -> "(3, 3)", "pad" -> "(1, 1)", "num_filter" -> 256))
val relu5 = Symbol.Activation()()(Map("data" -> conv5, "act_type" -> "relu"))
val pool3 = Symbol.Pooling()()(Map("data" -> relu5,
"kernel" -> "(3, 3)", "stride" -> "(2, 2)", "pool_type" -> "max"))
// stage 4
val flatten = Symbol.Flatten()()(Map("data" -> pool3))
val fc1 = Symbol.FullyConnected()()(Map("data" -> flatten, "num_hidden" -> 4096))
val relu6 = Symbol.Activation()()(Map("data" -> fc1, "act_type" -> "relu"))
val dropout1 = Symbol.Dropout()()(Map("data" -> relu6, "p" -> 0.5f))
// stage 5
val fc2 = Symbol.FullyConnected()()(Map("data" -> dropout1, "num_hidden" -> 4096))
val relu7 = Symbol.Activation()()(Map("data" -> fc2, "act_type" -> "relu"))
val dropout2 = Symbol.Dropout()()(Map("data" -> relu7, "p" -> 0.5f))
// stage 6
val fc3 = Symbol.FullyConnected()()(
Map("data" -> dropout2, "num_hidden" -> numClasses))
val softmax = Symbol.SoftmaxOutput("softmax")()(Map("data" -> fc3))
softmax
}
}
| reminisce/mxnet | scala-package/examples/src/main/scala/org/apache/mxnetexamples/visualization/AlexNet.scala | Scala | apache-2.0 | 3,569 |
/**
* © 2019 Refinitiv. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmwell.web.ld.exceptions
/**
* Created with IntelliJ IDEA.
* User: gilad
* Date: 7/15/13
* Time: 12:16 PM
* To change this template use File | Settings | File Templates.
*/
class NameSpaceCollisionException(msg: String) extends RuntimeException(msg)
| dudi3001/CM-Well | server/cmwell-ws/app/ld/exceptions/NameSpaceCollisionException.scala | Scala | apache-2.0 | 896 |
package org.apache.spark.examples.sql.hive
import org.apache.spark.sql._
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.{SparkConf, SparkContext}
import scala.util.Random
/**
* Created by liush on 17-7-3.
*/
case class Order(
name: String,
clas: Int,
s: Int)
//select student_name, class, score, sum(score) over(partition by class order by score desc) 累计 from temp_b;
case class ClassSchool(
student_name: String,
class_name: String,
score: Int)
case class City(
id: Int,
name: String,
code: Int,
prov_id: String
)
case class t(
bill_month: String,
area_code: Int,
net_type: String,
local_fare: Double
)
object WindowsOptions {
def main(args: Array[String]): Unit = {
val sparkConf = new SparkConf().setAppName("HiveFromSpark").setMaster("local")
val sc = new SparkContext(sparkConf)
val hiveContext = new HiveContext(sc)
import hiveContext.sql
def _sqlContext: SQLContext = hiveContext
val sqlContext = _sqlContext
/* sql("SHOW TABLES").toString
sql("SELECT * FROM src").toString
sql("drop TABLE src")*/
sql("USE db_shanxi_test")
sqlContext.udf.register("random0", (min:Int,max:Int) => { val random = new Random()
val s: Int = random.nextInt(max) % (max - min + 1) + min
s
})
println("sql自定义函数:"+sql("SELECT random0(1,10) ").head().getInt(0) )
windowsFuncitonTest(hiveContext)
}
def windowsFuncitonTest(hiveContext:HiveContext): Unit ={
import hiveContext.sql
val test = Seq(
ClassSchool("张三","A",90),
ClassSchool("李四","A",95),
ClassSchool("王五","A",85),
ClassSchool("芳芳","B",92),
ClassSchool("明明","B",78),
ClassSchool("亮亮","B",78),
ClassSchool("晶晶","B",75)
)
import hiveContext.implicits._
//集合自动隐式转换
test.toDF.registerTempTable("temp_b")
//分班级按成绩排名次排序如下:
// 函数dense_rank()是连续排序,有两个第二名时仍然跟着第三名。
// 函数rank()是跳跃排序,有两个第二名时接下来就是第四名(同样是在各个分组内)
/**
+------------+----------+-----+-------+
|student_name|class_name|score|mingchi|
+------------+----------+-----+-------+
|李四 |A |95 |1 |
|张三 |A |90 |2 |
|王五 |A |85 |3 |
|芳芳 |B |92 |1 |
|明明 |B |78 |2 |
|亮亮 |B |78 |2 |
|晶晶 |B |75 |3 |
+------------+----------+-----+-------+
*/
sql(
"""
|select student_name, class_name, score, dense_rank() over(partition by class_name order by score desc) mingchi from temp_b
""".stripMargin).show(false)
/**
* +------------+----------+-----+-------+
|student_name|class_name|score|mingchi|
+------------+----------+-----+-------+
|李四 |A |95 |1 |
|张三 |A |90 |2 |
|王五 |A |85 |3 |
|芳芳 |B |92 |1 |
|明明 |B |78 |2 |
|亮亮 |B |78 |2 |
|晶晶 |B |75 |4 |
+------------+----------+-----+-------+
*/
sql(
"""
|select student_name, class_name, score, rank() over(partition by class_name order by score desc) mingchi from temp_b
""".stripMargin).show(false)
println("rank:分班级按成绩排名次排序如下:")
sql(
"""
|select student_name, class_name, score, rank() over(partition by class_name order by score desc) mingchi from temp_b
""".stripMargin).show(false)
println("班级成绩累计(\\"连续\\"求和)结果如下:")
/**
* +------------+----------+-----+---+
|student_name|class_name|score|sum|
+------------+----------+-----+---+
|李四 |A |95 |95 |
|张三 |A |90 |185|
|王五 |A |85 |270|
|芳芳 |B |92 |92 |
|明明 |B |78 |248|
|亮亮 |B |78 |248|
|晶晶 |B |75 |323|
+------------+----------+-----+---+
*/
sql(
"""
| select student_name, class_name, score, sum(score) over(partition by class_name order by score desc) sum from temp_b
""".stripMargin).show(false)
println("已班级分组成绩求和,不带排序:")
/**
+------------+----------+-----+--------+
|student_name|class_name|score|classsum|
+------------+----------+-----+--------+
|张三 |A |90 |270 |
|李四 |A |95 |270 |
|王五 |A |85 |270 |
|芳芳 |B |92 |323 |
|明明 |B |78 |323 |
|亮亮 |B |78 |323 |
|晶晶 |B |75 |323 |
+------------+----------+-----+--------+
*/
sql(
"""
| select student_name, class_name, score, sum(score) over(partition by class_name) classsum from temp_b
""".stripMargin).show(false)
//总结:带排序的分组连续求和,不带排序分组求和
}
}
| tophua/spark1.52 | examples/src/main/scala/org/apache/spark/examples/sql/hive/WindowsOptions.scala | Scala | apache-2.0 | 5,793 |
/*
* Copyright (c) 2013 Typelevel
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package org.typelevel.discipline
trait Predicate[A] extends (A => Boolean) {
def apply(a: A): Boolean
def &&(that: Predicate[A]) = Predicate[A](a => this(a) && that(a))
}
object Predicate {
def apply[A](f: A => Boolean) =
new Predicate[A] {
def apply(a: A) = f(a)
}
def const[A](res: Boolean) =
new Predicate[A] {
def apply(a: A) = res
}
}
// vim: expandtab:ts=2:sw=2
| typelevel/discipline | core/src/main/scala/org/typelevel/discipline/Predicate.scala | Scala | mit | 1,522 |
/*
* Copyright 2001-2014 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalactic.anyvals
import org.scalactic.Equality
import org.scalactic.{Pass, Fail}
import org.scalactic.{Good, Bad}
import org.scalatest._
import org.scalatest.prop.GeneratorDrivenPropertyChecks
import scala.collection.mutable.WrappedArray
import OptionValues._
import scala.util.{Failure, Success, Try}
trait NegZIntSpecSupport {
implicit def tryEquality[T]: Equality[Try[T]] = new Equality[Try[T]] {
override def areEqual(a: Try[T], b: Any): Boolean = a match {
case Success(double: Double) if double.isNaN => // This is because in scala.js x/0 results to NaN not ArithmetricException like in jvm, and we need to make sure Success(NaN) == Success(NaN) is true to pass the test.
b match {
case Success(bDouble: Double) if bDouble.isNaN => true
case _ => false
}
// I needed this because with GenDrivenPropertyChecks, got:
// [info] - should offer a '%' method that is consistent with Int *** FAILED ***
// [info] Success(NaN) did not equal Success(NaN) (PosIntExperiment.scala:498)
case Success(float: Float) if float.isNaN =>
b match {
case Success(bFloat: Float) if bFloat.isNaN => true
case _ => false
}
case _: Success[_] => a == b
case Failure(ex) => b match {
case _: Success[_] => false
case Failure(otherEx) => ex.getClass == otherEx.getClass && ex.getMessage == otherEx.getMessage
case _ => false
}
}
}
}
class NegZIntSpec extends FunSpec with Matchers with GeneratorDrivenPropertyChecks with NegZIntSpecSupport {
describe("A NegZInt") {
describe("should offer a from factory method that") {
it("returns Some[NegZInt] if the passed Int is lesser than or equal to 0")
{
NegZInt.from(0).value.value shouldBe 0
NegZInt.from(-50).value.value shouldBe -50
NegZInt.from(-100).value.value shouldBe -100
}
it("returns None if the passed Int is NOT lesser than or equal to 0") {
NegZInt.from(1) shouldBe None
NegZInt.from(99) shouldBe None
}
}
describe("should offer an ensuringValid factory method that") {
it("returns NegZInt if the passed Int is lesser than or equal to 0")
{
NegZInt.ensuringValid(0).value shouldBe 0
NegZInt.ensuringValid(-50).value shouldBe -50
NegZInt.ensuringValid(-100).value shouldBe -100
}
it("throws AssertionError if the passed Int is NOT lesser than or equal to 0") {
an [AssertionError] should be thrownBy NegZInt.ensuringValid(1)
an [AssertionError] should be thrownBy NegZInt.ensuringValid(99)
}
}
describe("should offer a tryingValid factory method that") {
import TryValues._
it("returns a NegZInt wrapped in a Success if the passed Int is lesser than or equal 0") {
NegZInt.tryingValid(-0).success.value.value shouldBe -0
NegZInt.tryingValid(-50).success.value.value shouldBe -50
NegZInt.tryingValid(-100).success.value.value shouldBe -100
}
it("returns an AssertionError wrapped in a Failure if the passed Int is greater than 0") {
NegZInt.tryingValid(1).failure.exception shouldBe an [AssertionError]
NegZInt.tryingValid(99).failure.exception shouldBe an [AssertionError]
}
}
describe("should offer a passOrElse factory method that") {
it("returns a Pass if the given Int is lesser than or equal 0") {
NegZInt.passOrElse(0)(i => i) shouldBe Pass
NegZInt.passOrElse(-50)(i => i) shouldBe Pass
NegZInt.passOrElse(-100)(i => i) shouldBe Pass
}
it("returns an error value produced by passing the given Int to the given function if the passed Int is greater than 0, wrapped in a Fail") {
NegZInt.passOrElse(1)(i => i) shouldBe Fail(1)
NegZInt.passOrElse(99)(i => i.toLong + 3L) shouldBe Fail(102L)
}
}
describe("should offer a goodOrElse factory method that") {
it("returns a NegZInt wrapped in a Good if the given Int is lesser than or equal 0") {
NegZInt.goodOrElse(-0)(i => i) shouldBe Good(NegZInt(-0))
NegZInt.goodOrElse(-50)(i => i) shouldBe Good(NegZInt(-50))
NegZInt.goodOrElse(-100)(i => i) shouldBe Good(NegZInt(-100))
}
it("returns an error value produced by passing the given Int to the given function if the passed Int is greater than 0, wrapped in a Bad") {
NegZInt.goodOrElse(1)(i => i) shouldBe Bad(1)
NegZInt.goodOrElse(99)(i => i.toLong + 3L) shouldBe Bad(102L)
}
}
describe("should offer a rightOrElse factory method that") {
it("returns a NegZInt wrapped in a Right if the given Int is lesser than or equal 0") {
NegZInt.rightOrElse(0)(i => i) shouldBe Right(NegZInt(0))
NegZInt.rightOrElse(-50)(i => i) shouldBe Right(NegZInt(-50))
NegZInt.rightOrElse(-100)(i => i) shouldBe Right(NegZInt(-100))
}
it("returns an error value produced by passing the given Int to the given function if the passed Int is greater than 0, wrapped in a Left") {
NegZInt.rightOrElse(1)(i => i) shouldBe Left(1)
NegZInt.rightOrElse(99)(i => i.toLong + 3L) shouldBe Left(102L)
}
}
describe("should offer an isValid predicate method that") {
it("returns true if the passed Int is lesser than or equal to 0") {
NegZInt.isValid(-50) shouldBe true
NegZInt.isValid(-100) shouldBe true
NegZInt.isValid(0) shouldBe true
NegZInt.isValid(-0) shouldBe true
NegZInt.isValid(1) shouldBe false
NegZInt.isValid(99) shouldBe false
}
}
describe("should offer a fromOrElse factory method that") {
it("returns a NegZInt if the passed Int is lesser than or equal to 0") {
NegZInt.fromOrElse(-50, NegZInt(-42)).value shouldBe -50
NegZInt.fromOrElse(-100, NegZInt(-42)).value shouldBe -100
NegZInt.fromOrElse(0, NegZInt(-42)).value shouldBe 0
}
it("returns a given default if the passed Int is NOT greater than 0") {
NegZInt.fromOrElse(1, NegZInt(-42)).value shouldBe -42
NegZInt.fromOrElse(99, NegZInt(-42)).value shouldBe -42
}
}
it("should offer MaxValue and MinValue factory methods") {
NegZInt.MaxValue shouldEqual NegZInt.from(0).get
NegZInt.MinValue shouldEqual NegZInt.from(Int.MinValue).get
}
it("should be sortable") {
val xs = List(NegZInt(-2), NegZInt(-0), NegZInt(-1), NegZInt(-3))
xs.sorted shouldEqual List(NegZInt(-3), NegZInt(-2), NegZInt(-1), NegZInt(0))
}
describe("when created with apply method") {
it("should compile when -8 is passed in") {
"NegZInt(-8)" should compile
NegZInt(-8).value shouldEqual -8
}
it("should compile when 0 is passed in") {
"NegZInt(0)" should compile
NegZInt(0).value shouldEqual 0
}
it("should not compile when 8 is passed in") {
"NegZInt(8)" shouldNot compile
}
it("should not compile when x is passed in") {
val x: Int = -8
"NegZInt(x)" shouldNot compile
}
}
describe("when specified as a plain-old Int") {
def takesNegZInt(pos: NegZInt): Int = pos.value
it("should compile when -8 is passed in") {
"takesNegZInt(-8)" should compile
takesNegZInt(-8) shouldEqual -8
}
it("should compile when 0 is passed in") {
"takesNegZInt(0)" should compile
}
it("should not compile when 8 is passed in") {
"takesNegZInt(8)" shouldNot compile
}
it("should not compile when x is passed in") {
val x: Int = -8
"takesNegZInt(x)" shouldNot compile
}
}
it("should offer a unary ~ method that is consistent with Int") {
forAll { (pzint: NegZInt) =>
(~pzint) shouldEqual (~(pzint.toInt))
}
}
it("should offer a unary + method that is consistent with Int") {
forAll { (p: NegZInt) =>
(+p).toInt shouldEqual (+(p.toInt))
}
}
it("should offer a unary - method that returns PosZInt") {
forAll { (p: NegZInt) =>
(-p) shouldEqual (-(p.toInt))
}
}
it("should offer << methods that are consistent with Int") {
forAll { (pzint: NegZInt, shift: Int) =>
pzint << shift shouldEqual pzint.toInt << shift
}
forAll { (pzint: NegZInt, shift: Long) =>
pzint << shift shouldEqual pzint.toInt << shift
}
}
it("should offer >>> methods that are consistent with Int") {
forAll { (pzint: NegZInt, shift: Int) =>
pzint >>> shift shouldEqual pzint.toInt >>> shift
}
forAll { (pzint: NegZInt, shift: Long) =>
pzint >>> shift shouldEqual pzint.toInt >>> shift
}
}
it("should offer >> methods that are consistent with Int") {
forAll { (pzint: NegZInt, shift: Int) =>
pzint >> shift shouldEqual pzint.toInt >> shift
}
forAll { (pzint: NegZInt, shift: Long) =>
pzint >> shift shouldEqual pzint.toInt >> shift
}
}
it("should offer a '|' method that is consistent with Int") {
forAll { (pzint: NegZInt, byte: Byte) =>
(pzint | byte) shouldEqual (pzint.toInt | byte)
}
forAll { (pzint: NegZInt, short: Short) =>
(pzint | short) shouldEqual (pzint.toInt | short)
}
forAll { (pzint: NegZInt, char: Char) =>
(pzint | char) shouldEqual (pzint.toInt | char)
}
forAll { (pzint: NegZInt, int: Int) =>
(pzint | int) shouldEqual (pzint.toInt | int)
}
forAll { (pzint: NegZInt, long: Long) =>
(pzint | long) shouldEqual (pzint.toInt | long)
}
}
it("should offer an '&' method that is consistent with Int") {
forAll { (pzint: NegZInt, byte: Byte) =>
(pzint & byte) shouldEqual (pzint.toInt & byte)
}
forAll { (pzint: NegZInt, short: Short) =>
(pzint & short) shouldEqual (pzint.toInt & short)
}
forAll { (pzint: NegZInt, char: Char) =>
(pzint & char) shouldEqual (pzint.toInt & char)
}
forAll { (pzint: NegZInt, int: Int) =>
(pzint & int) shouldEqual (pzint.toInt & int)
}
forAll { (pzint: NegZInt, long: Long) =>
(pzint & long) shouldEqual (pzint.toInt & long)
}
}
it("should offer an '^' method that is consistent with Int") {
forAll { (pzint: NegZInt, byte: Byte) =>
(pzint ^ byte) shouldEqual (pzint.toInt ^ byte)
}
forAll { (pzint: NegZInt, char: Char) =>
(pzint ^ char) shouldEqual (pzint.toInt ^ char)
}
forAll { (pzint: NegZInt, short: Short) =>
(pzint ^ short) shouldEqual (pzint.toInt ^ short)
}
forAll { (pzint: NegZInt, int: Int) =>
(pzint ^ int) shouldEqual (pzint.toInt ^ int)
}
forAll { (pzint: NegZInt, long: Long) =>
(pzint ^ long) shouldEqual (pzint.toInt ^ long)
}
}
it("should offer 'min' and 'max' methods that are consistent with Int") {
forAll { (pzint1: NegZInt, pzint2: NegZInt) =>
pzint1.max(pzint2).toInt shouldEqual pzint1.toInt.max(pzint2.toInt)
pzint1.min(pzint2).toInt shouldEqual pzint1.toInt.min(pzint2.toInt)
}
}
it("should offer a 'toBinaryString' method that is consistent with Int") {
forAll { (pzint: NegZInt) =>
pzint.toBinaryString shouldEqual pzint.toInt.toBinaryString
}
}
it("should offer a 'toHexString' method that is consistent with Int") {
forAll { (pzint: NegZInt) =>
pzint.toHexString shouldEqual pzint.toInt.toHexString
}
}
it("should offer a 'toOctalString' method that is consistent with Int") {
forAll { (pzint: NegZInt) =>
pzint.toOctalString shouldEqual pzint.toInt.toOctalString
}
}
it("should offer 'to' and 'until' methods that are consistent with Int") {
// The reason we need this is that in Scala 2.10, the equals check (used by shouldEqual below) will call range.length
// and it'll cause IllegalArgumentException to be thrown when we do the Try(x) shouldEqual Try(y) assertion below,
// while starting from scala 2.11 the equals call implementation does not call .length.
// To make the behavior consistent for all scala versions, we explicitly call .length for all returned Range, and
// shall it throws IllegalArgumentException, it will be wrapped as Failure for the Try.
def ensuringValid(range: Range): Range = {
range.length // IllegalArgumentException will be thrown if it is an invalid range, this will turn the Success to Failure for Try
range
}
forAll { (pzint: NegZInt, end: Int, step: Int) =>
Try(ensuringValid(pzint.to(end)))shouldEqual Try(ensuringValid(pzint.toInt.to(end)))
Try(ensuringValid(pzint.to(end, step))) shouldEqual Try(ensuringValid(pzint.toInt.to(end, step)))
Try(ensuringValid(pzint.until(end))) shouldEqual Try(ensuringValid(pzint.toInt.until(end)))
Try(ensuringValid(pzint.until(end, step))) shouldEqual Try(ensuringValid(pzint.toInt.until(end, step)))
}
}
it("should offer an ensuringValid method that takes an Int => Int, throwing AssertionError if the result is invalid") {
NegZInt(-33).ensuringValid(_ + 1) shouldEqual NegZInt(-32)
an [AssertionError] should be thrownBy { NegZInt.MaxValue.ensuringValid(_ + 1) }
}
}
} | dotty-staging/scalatest | scalactic-test/src/test/scala/org/scalactic/anyvals/NegZIntSpec.scala | Scala | apache-2.0 | 14,075 |
/**
* This code is generated using [[https://www.scala-sbt.org/contraband/ sbt-contraband]].
*/
// DO NOT EDIT MANUALLY
package sbt.internal.bsp.codec
import _root_.sjsonnew.{ Unbuilder, Builder, JsonFormat, deserializationError }
trait BuildTargetIdentifierFormats { self: sjsonnew.BasicJsonProtocol =>
implicit lazy val BuildTargetIdentifierFormat: JsonFormat[sbt.internal.bsp.BuildTargetIdentifier] = new JsonFormat[sbt.internal.bsp.BuildTargetIdentifier] {
override def read[J](__jsOpt: Option[J], unbuilder: Unbuilder[J]): sbt.internal.bsp.BuildTargetIdentifier = {
__jsOpt match {
case Some(__js) =>
unbuilder.beginObject(__js)
val uri = unbuilder.readField[java.net.URI]("uri")
unbuilder.endObject()
sbt.internal.bsp.BuildTargetIdentifier(uri)
case None =>
deserializationError("Expected JsObject but found None")
}
}
override def write[J](obj: sbt.internal.bsp.BuildTargetIdentifier, builder: Builder[J]): Unit = {
builder.beginObject()
builder.addField("uri", obj.uri)
builder.endObject()
}
}
}
| sbt/sbt | protocol/src/main/contraband-scala/sbt/internal/bsp/codec/BuildTargetIdentifierFormats.scala | Scala | apache-2.0 | 1,077 |
package pspz3
package context
import com.microsoft.z3
trait Constructors extends AnyRef with HasContext {
// Constructor mkConstructor(String name, String recognizer, String[] fieldNames, Sort[] sorts, int[] sortRefs)
// Fixedpoint mkFixedpoint()
// Goal mkGoal(boolean models, boolean unsatCores, boolean proofs)
// IntSymbol mkSymbol(int i)
// Optimize mkOptimize()
// Params mkParams()
// Pattern mkPattern(Expr... terms)
// Solver mkSolver(String logic)
// StringSymbol mkSymbol(String name)
// Solver mkSimpleSolver()
// Solver mkSolver()
def solver: Solver = new Solver(ctx.mkSolver())
}
| paulp/pspz3 | src/main/scala/context/constructors.scala | Scala | mit | 621 |
package com.github.sstone.amqp
import Amqp._
import akka.actor._
import akka.event.LoggingReceive
import akka.pattern.ask
import akka.util.Timeout
import com.rabbitmq.client.{Connection, ShutdownSignalException, ShutdownListener, ConnectionFactory, Address => RMQAddress}
import scala.concurrent.{ExecutionContext, Await}
import concurrent.duration._
import java.util.concurrent.ExecutorService
import scala.util.{Failure, Success, Try}
import collection.JavaConversions._
object ConnectionOwner {
sealed trait State
case object Disconnected extends State
case object Connected extends State
case class Create(props: Props, name: Option[String] = None)
case object CreateChannel
def props(connFactory: ConnectionFactory, reconnectionDelay: FiniteDuration = 10000 millis,
executor: Option[ExecutorService] = None, addresses: Option[Array[RMQAddress]] = None): Props = Props(new ConnectionOwner(connFactory, reconnectionDelay, executor, addresses))
def createChildActor(conn: ActorRef, channelOwner: Props, name: Option[String] = None, timeout: Timeout = 5000.millis): ActorRef = {
val future = conn.ask(Create(channelOwner, name))(timeout).mapTo[ActorRef]
Await.result(future, timeout.duration)
}
/**
* creates an amqp uri from a ConnectionFactory. From the specs:
* <ul>
* <li>amqp_URI = "amqp://" amqp_authority [ "/" vhost ]</li>
* <li>amqp_authority = [ amqp_userinfo "@" ] host [ ":" port ]</li>
* <li>amqp_userinfo = username [ ":" password ]</li>
* </ul>
* @param cf connection factory
* @return an amqp uri
*/
def toUri(cf: ConnectionFactory): String = {
"amqp://%s:%s@%s:%d/%s".format(cf.getUsername, cf.getPassword, cf.getHost, cf.getPort, cf.getVirtualHost)
}
def buildConnFactory(host: String = "localhost", port: Int = 5672, vhost: String = "/", user: String = "guest", password: String = "guest"): ConnectionFactory = {
val connFactory = new ConnectionFactory()
connFactory.setHost(host)
connFactory.setPort(port)
connFactory.setVirtualHost(vhost)
connFactory.setUsername(user)
connFactory.setPassword(password)
connFactory
}
}
/**
* ConnectionOwner class, which holds an AMQP connection and handles re-connection
* It is implemented as a state machine which 2 possible states
* <ul>
* <li>Disconnected, and it will try to connect to the broker at regular intervals</li>
* <li>Connected; it is then holding a connection
* </ul>
* Connection owner is responsible for creating "channel aware" actor (channel are like virtual connections,
* which are multiplexed on the underlying connection). The parent connection owner will automatically tell
* its children when the connection is lost, and send them new channels when it comes back on.
* YMMV, but it is a good practice to have few connections and several channels per connection
* @param connFactory connection factory
* @param reconnectionDelay delay between reconnection attempts
*/
class ConnectionOwner(connFactory: ConnectionFactory,
reconnectionDelay: FiniteDuration = 10000 millis,
executor: Option[ExecutorService] = None,
addresses: Option[Array[RMQAddress]] = None) extends Actor with ActorLogging {
import ConnectionOwner._
import context.dispatcher
var connection: Option[Connection] = None
val statusListeners = collection.mutable.HashSet.empty[ActorRef]
val reconnectTimer = context.system.scheduler.schedule(10 milliseconds, reconnectionDelay, self, 'connect)
override def postStop = connection.map(c => Try(c.close()))
override def unhandled(message: Any): Unit = message match {
case Terminated(actor) if statusListeners.contains(actor) => {
context.unwatch(actor)
statusListeners.remove(actor)
}
case _ => super.unhandled(message)
}
/**
* ask this connection owner to create a "channel aware" child
* @param props actor creation properties
* @param name optional actor name
* @return a new actor
*/
private def createChild(props: Props, name: Option[String]) = {
// why isn't there an actorOf(props: Props, name: Option[String] = None) ?
name match {
case None => context.actorOf(props)
case Some(actorName) => context.actorOf(props, actorName)
}
}
def createConnection: Connection = {
val conn = (executor, addresses) match {
case (None, None) => connFactory.newConnection()
case (Some(ex), None) => connFactory.newConnection(ex)
case (None, Some(addr)) => connFactory.newConnection(addr)
case (Some(ex), Some(addr)) => connFactory.newConnection(ex, addr)
}
conn.addShutdownListener(new ShutdownListener {
def shutdownCompleted(cause: ShutdownSignalException) {
self ! Shutdown(cause)
statusListeners.map(a => a ! Disconnected)
}
})
conn
}
// start in disconnected mode
def receive = disconnected
def disconnected: Receive = LoggingReceive {
/**
* connect to the broker
*/
case 'connect => {
log.debug(s"trying to connect ${toUri(connFactory)}")
Try(createConnection) match {
case Success(conn) => {
log.info(s"connected to ${toUri(connFactory)}")
statusListeners.map(a => a ! Connected)
connection = Some(conn)
context.children.foreach(_ ! conn.createChannel())
context.become(connected(conn))
}
case Failure(cause) => {
log.error(cause, "connection failed")
}
}
}
/**
* add a status listener that will be sent Disconnected and Connected messages
*/
case AddStatusListener(listener) => addStatusListener(listener)
/**
* create a "channel aware" child actor
*/
case Create(props, name) => {
val child = createChild(props, name)
log.debug("creating child {} while in disconnected state", child)
sender ! child
}
}
def connected(conn: Connection): Receive = LoggingReceive {
case 'connect => ()
case Amqp.Ok(_, _) => ()
case Abort(code, message) => {
conn.abort(code, message)
context.stop(self)
}
case Close(code, message, timeout) => {
conn.close(code, message, timeout)
context.stop(self)
}
case CreateChannel => Try(conn.createChannel()) match {
case Success(channel) => sender ! channel
case Failure(cause) => {
log.error(cause, "cannot create channel")
context.become(disconnected)
}
}
case AddStatusListener(listener) => {
addStatusListener(listener)
listener ! Connected
}
case Create(props, name) => {
sender ! createChild(props, name)
}
case Shutdown(cause) => {
log.error(cause, "connection lost")
connection = None
context.children.foreach(_ ! Shutdown(cause))
self ! 'connect
context.become(disconnected)
}
}
private def addStatusListener(listener: ActorRef) {
if (!statusListeners.contains(listener)) {
context.watch(listener)
statusListeners.add(listener)
}
}
}
| sstone/amqp-client | src/main/scala/com/github/sstone/amqp/ConnectionOwner.scala | Scala | mit | 7,135 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.avro
import scala.collection.JavaConverters._
import org.apache.avro.{Schema, SchemaBuilder}
import org.apache.avro.Schema.Type._
import org.apache.spark.sql.types._
/**
* This object contains method that are used to convert sparkSQL schemas to avro schemas and vice
* versa.
*/
object SchemaConverters {
case class SchemaType(dataType: DataType, nullable: Boolean)
/**
* This function takes an avro schema and returns a sql schema.
*/
def toSqlType(avroSchema: Schema): SchemaType = {
avroSchema.getType match {
case INT => SchemaType(IntegerType, nullable = false)
case STRING => SchemaType(StringType, nullable = false)
case BOOLEAN => SchemaType(BooleanType, nullable = false)
case BYTES => SchemaType(BinaryType, nullable = false)
case DOUBLE => SchemaType(DoubleType, nullable = false)
case FLOAT => SchemaType(FloatType, nullable = false)
case LONG => SchemaType(LongType, nullable = false)
case FIXED => SchemaType(BinaryType, nullable = false)
case ENUM => SchemaType(StringType, nullable = false)
case RECORD =>
val fields = avroSchema.getFields.asScala.map { f =>
val schemaType = toSqlType(f.schema())
StructField(f.name, schemaType.dataType, schemaType.nullable)
}
SchemaType(StructType(fields), nullable = false)
case ARRAY =>
val schemaType = toSqlType(avroSchema.getElementType)
SchemaType(
ArrayType(schemaType.dataType, containsNull = schemaType.nullable),
nullable = false)
case MAP =>
val schemaType = toSqlType(avroSchema.getValueType)
SchemaType(
MapType(StringType, schemaType.dataType, valueContainsNull = schemaType.nullable),
nullable = false)
case UNION =>
if (avroSchema.getTypes.asScala.exists(_.getType == NULL)) {
// In case of a union with null, eliminate it and make a recursive call
val remainingUnionTypes = avroSchema.getTypes.asScala.filterNot(_.getType == NULL)
if (remainingUnionTypes.size == 1) {
toSqlType(remainingUnionTypes.head).copy(nullable = true)
} else {
toSqlType(Schema.createUnion(remainingUnionTypes.asJava)).copy(nullable = true)
}
} else avroSchema.getTypes.asScala.map(_.getType) match {
case Seq(t1) =>
toSqlType(avroSchema.getTypes.get(0))
case Seq(t1, t2) if Set(t1, t2) == Set(INT, LONG) =>
SchemaType(LongType, nullable = false)
case Seq(t1, t2) if Set(t1, t2) == Set(FLOAT, DOUBLE) =>
SchemaType(DoubleType, nullable = false)
case _ =>
// Convert complex unions to struct types where field names are member0, member1, etc.
// This is consistent with the behavior when converting between Avro and Parquet.
val fields = avroSchema.getTypes.asScala.zipWithIndex.map {
case (s, i) =>
val schemaType = toSqlType(s)
// All fields are nullable because only one of them is set at a time
StructField(s"member$i", schemaType.dataType, nullable = true)
}
SchemaType(StructType(fields), nullable = false)
}
case other => throw new IncompatibleSchemaException(s"Unsupported type $other")
}
}
def toAvroType(
catalystType: DataType,
nullable: Boolean = false,
recordName: String = "topLevelRecord",
prevNameSpace: String = ""): Schema = {
val builder = if (nullable) {
SchemaBuilder.builder().nullable()
} else {
SchemaBuilder.builder()
}
catalystType match {
case BooleanType => builder.booleanType()
case ByteType | ShortType | IntegerType => builder.intType()
case LongType => builder.longType()
case DateType => builder.longType()
case TimestampType => builder.longType()
case FloatType => builder.floatType()
case DoubleType => builder.doubleType()
case _: DecimalType | StringType => builder.stringType()
case BinaryType => builder.bytesType()
case ArrayType(et, containsNull) =>
builder.array().items(toAvroType(et, containsNull, recordName, prevNameSpace))
case MapType(StringType, vt, valueContainsNull) =>
builder.map().values(toAvroType(vt, valueContainsNull, recordName, prevNameSpace))
case st: StructType =>
val nameSpace = s"$prevNameSpace.$recordName"
val fieldsAssembler = builder.record(recordName).namespace(nameSpace).fields()
st.foreach { f =>
val fieldAvroType = toAvroType(f.dataType, f.nullable, f.name, nameSpace)
fieldsAssembler.name(f.name).`type`(fieldAvroType).noDefault()
}
fieldsAssembler.endRecord()
// This should never happen.
case other => throw new IncompatibleSchemaException(s"Unexpected type $other.")
}
}
}
class IncompatibleSchemaException(msg: String, ex: Throwable = null) extends Exception(msg, ex)
| tejasapatil/spark | external/avro/src/main/scala/org/apache/spark/sql/avro/SchemaConverters.scala | Scala | apache-2.0 | 5,878 |
package me.rjfarmer.rlh.client.local
import me.rjfarmer.rlh.api.{CharInfo, ListCharactersResponse, HasTimestamp}
import me.rjfarmer.rlh.client.{LittleHelper, HasResponseTimeAgo}
import me.rjfarmer.rlh.shared.SharedConfig
import org.scalajs.dom
import org.scalajs.dom.raw.HTMLElement
import scala.scalajs.js
import scalatags.JsDom.TypedTag
import scalatags.JsDom.all._
object LocalDetailsView extends HasResponseTimeAgo {
val pilotCount = span("0 pilots, ").render
val solarSystem = span().render
val corpList = tbody().render
val pilotList = tbody().render
/** cache key of current result, if any */
var resultCacheKey: Option[String] = None
val resultUrlBox = input(cls := "pure-input-2-3", `type` := "text", readonly).render
resultUrlBox.onfocus = selectAllOnFocus _
def selectAllOnFocus(ev: dom.Event) = {
js.timers.setTimeout(50.0d)(resultUrlBox.select())
}
def freshnessKlass(nowMillis: Long, wsr: HasTimestamp): String = {
val relativeAge = (nowMillis - wsr.timestamp) / SharedConfig.client.staleOlderThanMillis.toDouble
if (relativeAge < 0.5d) {
"fresh"
} else if (relativeAge < 1.0d) {
"getting-stale"
} else if (relativeAge < 2.0d) {
"stale"
} else {
"out-of-date"
}
}
def zkillboardLink(p: CharInfo): TypedTag[HTMLElement] = {
p.characterID match {
case None =>
span(p.name)
case Some(characterId) =>
a(href := s"""https://zkillboard.com/character/$characterId/""", target := "_blank", p.name)
}
}
def update(resp: ListCharactersResponse) = {
resultCacheKey = resp.cacheKey
LittleHelper.setLocationFragment(s"#localTab/${resp.cacheKey.get}")
resultUrlBox.value = LittleHelper.getLocationUrl
val pilots = resp.charinfos
pilotCount.innerHTML = s"${resp.charinfos.size} pilots, "
updateResponseTimestamp(resp.timestamp)
refreshResponseTimeAgo()
solarSystem.innerHTML = ""
resp.solarSystem match {
case None =>
case Some(ssn) => solarSystem.appendChild(span(ssn, ", ").render)
}
val cutoff = math.max(2.0d, pilots.size / 10.0d)
val byCorp: Map[AllianceOrCorp, Seq[CharInfo]] = pilots.groupBy(AllianceOrCorp.apply)
val topCorps: Seq[Seq[CharInfo]] = byCorp.values.toSeq
.filter(group => group.size >= cutoff)
.sortWith((a, b) => a.size > b.size)
val other = pilots.size - topCorps.map(_.size).sum
corpList.innerHTML = ""
for (group <- topCorps; aoc = AllianceOrCorp(group.head)) {
corpList.appendChild(tr(
td(a(href := aoc.uri, target := "_blank", aoc.name)),
td(group.size)).render)
}
if (other > 0) {
corpList.appendChild(tr(
td("Other"),
td(other)).render)
}
pilotList.innerHTML = ""
val nowMillis = System.currentTimeMillis()
for (p <- pilots; frKlass = freshnessKlass(nowMillis, p); corp = AllianceOrCorp(p)) {
val trow = tr(
td(zkillboardLink(p)),
td(corp.name, " (", byCorp(corp).size, ")"),
td(p.recentKills.getOrElse(0) + "/" + p.recentLosses.getOrElse(0)),
td(span("%4.2f".format(p.characterAge.getOrElse(-1.0d))),
span(`class` := frKlass, title := frKlass.replace('-', ' '), style := "font-size: 150%", raw("•")))
).render
pilotList.appendChild(trow)
}
}
}
trait AllianceOrCorp {
def name: String
def typ: String
def uri: String = {
s"http://evewho.com/$typ/$name"
}
}
final case class Alliance(name: String) extends AllianceOrCorp {
val typ: String = "alli"
}
final case class Corp(name: String) extends AllianceOrCorp {
val typ: String = "corp"
}
// if the rest api is slow, alliance or corp might not be known
final case class Unknown(name: String) extends AllianceOrCorp {
val typ: String = "pilot"
}
object AllianceOrCorp {
def apply(ci: CharInfo): AllianceOrCorp = {
ci.alliance.fold {
ci.corporation.fold(new Unknown(ci.name).asInstanceOf[AllianceOrCorp])(name => new Corp(name))
}(new Alliance(_))
}
}
| random-j-farmer/little-helper | app/js/src/main/scala/me/rjfarmer/rlh/client/local/LocalDetailsView.scala | Scala | mit | 4,038 |
package global
import java.lang.reflect.Modifier
import scala.collection.JavaConverters._
import play.api.libs.concurrent.AkkaGuiceSupport
import play.api.{Configuration, Environment, Logger, Mode}
import com.google.inject.AbstractModule
import com.google.inject.name.Names
import connectors.Connector
import controllers.{AssetCtrl, AssetCtrlDev, AssetCtrlProd}
import models.Migration
import net.codingwell.scalaguice.{ScalaModule, ScalaMultibinder}
import org.reflections.Reflections
import org.reflections.scanners.SubTypesScanner
import org.reflections.util.ConfigurationBuilder
import services._
import services.mappers.{MultiUserMapperSrv, UserMapper}
import org.elastic4play.models.BaseModelDef
import org.elastic4play.services.auth.MultiAuthSrv
import org.elastic4play.services.{AuthSrv, MigrationOperations, UserSrv ⇒ EUserSrv}
class TheHive(environment: Environment, val configuration: Configuration) extends AbstractModule with ScalaModule with AkkaGuiceSupport {
private[TheHive] lazy val logger = Logger(s"module")
override def configure(): Unit = {
bind[EUserSrv].to[services.UserSrv]
bind[Int].annotatedWith(Names.named("databaseVersion")).toInstance(models.modelVersion)
val modelBindings = ScalaMultibinder.newSetBinder[BaseModelDef](binder)
val auditedModelBindings = ScalaMultibinder.newSetBinder[AuditedModel](binder)
val authBindings = ScalaMultibinder.newSetBinder[AuthSrv](binder)
val ssoMapperBindings = ScalaMultibinder.newSetBinder[UserMapper](binder)
val reflectionClasses = new Reflections(
new ConfigurationBuilder()
.forPackages("org.elastic4play")
.forPackages("connectors.cortex")
.forPackages("connectors.misp")
.forPackages("connectors.metrics")
.addClassLoader(getClass.getClassLoader)
.addClassLoader(environment.getClass.getClassLoader)
.setExpandSuperTypes(false)
.setScanners(new SubTypesScanner(false))
)
reflectionClasses
.getSubTypesOf(classOf[BaseModelDef])
.asScala
.filterNot(c ⇒ Modifier.isAbstract(c.getModifiers))
.foreach { modelClass ⇒
logger.info(s"Loading model $modelClass")
modelBindings.addBinding.to(modelClass)
if (classOf[AuditedModel].isAssignableFrom(modelClass)) {
auditedModelBindings.addBinding.to(modelClass.asInstanceOf[Class[AuditedModel]])
}
}
reflectionClasses
.getSubTypesOf(classOf[AuthSrv])
.asScala
.filterNot(c ⇒ Modifier.isAbstract(c.getModifiers) || c.isMemberClass)
.filterNot(c ⇒ c == classOf[MultiAuthSrv] || c == classOf[TheHiveAuthSrv])
.foreach { authSrvClass ⇒
authBindings.addBinding.to(authSrvClass)
}
reflectionClasses
.getSubTypesOf(classOf[UserMapper])
.asScala
.filterNot(c ⇒ Modifier.isAbstract(c.getModifiers) || c.isMemberClass)
.filterNot(c ⇒ c == classOf[MultiUserMapperSrv])
.foreach(mapperCls ⇒ ssoMapperBindings.addBinding.to(mapperCls))
bind[MigrationOperations].to[Migration]
bind[AuthSrv].to[TheHiveAuthSrv]
bind[UserMapper].to[MultiUserMapperSrv]
bindActor[AuditActor]("AuditActor", props = _.withDispatcher("auditTask"))
bindActor[LocalStreamActor]("localStreamActor")
if (environment.mode == Mode.Prod)
bind[AssetCtrl].to[AssetCtrlProd]
else
bind[AssetCtrl].to[AssetCtrlDev]
ScalaMultibinder.newSetBinder[Connector](binder)
()
}
}
| CERT-BDF/TheHive | thehive-backend/app/global/TheHive.scala | Scala | agpl-3.0 | 3,499 |
package io.flow.registry.api.lib
import javax.inject.{Inject, Singleton}
import db.{ApplicationsDao, PortsDao}
import io.flow.postgresql.Authorization
/**
* Parses the name of the application and allocates the default port
* to that application. Basic approach is to recognize a few suffixes
* (e.g. -postgresql) and allocate ports consistently to those
* suffixes, while reserving blocks of 10 ports for each prefix.
*
* Some basic rules we implemented to minimize probability of port
* collissions with external software
*
* - start at 6000 ( > postgresql port )
* - denylist any number ending in 00 (things people randomly are
* more likely to use like 7000, 8000, 9000)
* - create a denylist of well known ports that we encounter
* (e.g. 8080)
*
* The basic algorithm is to grab the prefix of an application and
* then to iterate through existing blocks of ports to find an
* available one. If not found, allocate another block of ports
* and repeat the process.
*
*/
@Singleton
class DefaultPortAllocator @Inject() (
applicationsDao: ApplicationsDao,
portsDao: PortsDao
) {
private[this] val Denylist = Seq(8080L)
private[this] val MinPortNumber = 6000
private[this] val BlockSize = 10L
private[this] val defaults = Map[String, Int](
"nodejs" -> 0,
"play" -> 1,
"postgresql" -> 9
)
private[this] def prefix(name: String) = {
val idx = name.trim.lastIndexOf("-")
if (idx < 0) {
name.trim
} else {
name.trim.substring(0, idx)
}
}
/**
* The port offset for this type of application (based on its
* name). Will be a number >= 0 and <= 9. If specified, we will try
* to make sure a port is allocated that ends with this
* number. Otherwise we just generated the next sequential port
* number.
*/
def offset(serviceName: String): Option[Int] = defaults.get(serviceName)
private[this] def applicationBasePorts(name: String): Seq[Long] = applicationsDao.findAll(Authorization.All, prefix = Some(prefix(name))).
flatMap(_.ports).
map(_.external).
map(toBase(_)).
sorted.
distinct
private[this] var i = 0
private[this] var last: Long = toBase(portsDao.maxExternalPortNumber().getOrElse(MinPortNumber - BlockSize))
@scala.annotation.tailrec
private[this] def nextBlock(name: String): Long = {
val block = applicationBasePorts(name)lift(i) match {
case Some(value) => {
i += 1
value
}
case None => {
last = last + BlockSize
last
}
}
if (isBlockAvailable(block)) {
block
} else {
nextBlock(name)
}
}
/**
* The base port number (not taking into account the offset)
*
* @param name The name of the application (e.g. splashpage, splashpage-postgresql)
* @param serviceName The name of the service (e.g. postgresql, nodejs, etc.)
*/
@scala.annotation.tailrec
final def number(name: String,
serviceName: String): Long = {
firstAvailablePort(nextBlock(name), offset(serviceName)) match {
case None => number(name, serviceName)
case Some(v) => v
}
}
private def firstAvailablePort(min: Long, offset: Option[Int]): Option[Long] = {
assert(min == toBase(min), s"Min[$min] must be start of block")
offset match {
case None => {
min.until(min + BlockSize + 1).
filter { v => !defaults.values.toSeq.contains( (v % BlockSize).toInt ) }.
find { isPortAvailable }
}
case Some(value) => {
Seq(min + value).find { isPortAvailable }
}
}
}
/**
* Given a port number (e.g. 8201) returns the base port number
* (e.g. 8200)
*/
protected[lib] def toBase(number: Long): Long = {
number - (number % BlockSize)
}
protected[lib] def isPortAvailable(number: Long): Boolean = {
if (Denylist.contains(number)) {
false
} else {
portsDao.findByExternal(Authorization.All, number).isEmpty
}
}
protected[lib] def isBlockAvailable(number: Long): Boolean = {
if (Denylist.contains(number)) {
false
} else if (number % 100 == 0) {
false
} else {
true
}
}
}
| flowcommerce/registry | api/app/lib/DefaultPortAllocator.scala | Scala | mit | 4,197 |
package com.twitter.finagle.mux
import com.twitter.concurrent.AsyncQueue
import com.twitter.finagle.tracing.{
Annotation, BufferingTracer, Flags, Record, SpanId, Trace, TraceId}
import com.twitter.finagle.transport.QueueTransport
import com.twitter.finagle.stats.NullStatsReceiver
import com.twitter.finagle.{Service, ContextHandler}
import com.twitter.io.{Charsets, Buf}
import com.twitter.util.{Await, Future, Promise, Return, Throw, Time}
import java.net.InetSocketAddress
import org.jboss.netty.buffer.{ChannelBuffer, ChannelBuffers}
import org.junit.runner.RunWith
import org.mockito.Matchers.any
import org.mockito.Mockito.{never, verify, when}
import org.mockito.invocation.InvocationOnMock
import org.mockito.stubbing.Answer
import org.scalatest.junit.JUnitRunner
import org.scalatest.mock.MockitoSugar
import org.scalatest.{OneInstancePerTest, FunSuite}
object MuxContext {
var handled = Seq[Buf]()
var buf: Buf = Buf.Empty
}
class MuxContext extends ContextHandler {
import MuxContext._
val key = Buf.Utf8("com.twitter.finagle.mux.MuxContext")
def handle(body: Buf) {
handled :+= body
}
def emit(): Option[Buf] = Some(MuxContext.buf)
}
private[mux] class ClientServerTest(canDispatch: Boolean)
extends FunSuite with OneInstancePerTest with MockitoSugar
{
val tracer = new BufferingTracer
Trace.pushTracer(tracer)
val clientToServer = new AsyncQueue[ChannelBuffer]
val serverToClient = new AsyncQueue[ChannelBuffer]
val serverTransport =
new QueueTransport(writeq=serverToClient, readq=clientToServer)
val clientTransport =
new QueueTransport(writeq=clientToServer, readq=serverToClient)
val service = mock[Service[ChannelBuffer, ChannelBuffer]]
val client = new ClientDispatcher(clientTransport, NullStatsReceiver)
val server = new ServerDispatcher(serverTransport, service, canDispatch)
def buf(b: Byte*) = ChannelBuffers.wrappedBuffer(Array[Byte](b:_*))
test("handle concurrent requests, handling out of order replies") {
val p1, p2, p3 = new Promise[ChannelBuffer]
when(service(buf(1))).thenReturn(p1)
when(service(buf(2))).thenReturn(p2)
when(service(buf(3))).thenReturn(p3)
val f1 = client(buf(1))
val f2 = client(buf(2))
val f3 = client(buf(3))
for (i <- 1 to 3)
verify(service)(buf(i.toByte))
for (f <- Seq(f1, f2, f3))
assert(f.poll === None)
p2.setValue(buf(20))
assert(f1.poll === None)
assert(f2.poll === Some(Return(buf(20))))
assert(f3.poll === None)
p1.setValue(buf(10))
assert(f1.poll === Some(Return(buf(10))))
assert(f3.poll === None)
p3.setValue(buf(9))
assert(f3.poll === Some(Return(buf(9))))
}
test("server respond to pings") {
assert(client.ping().isDefined)
}
test("server nacks new requests after draining") {
val p1 = new Promise[ChannelBuffer]
when(service(buf(1))).thenReturn(p1)
val f1 = client(buf(1))
verify(service)(buf(1))
server.close(Time.now)
assert(f1.poll === None)
assert(client(buf(2)).poll === Some(Throw(RequestNackedException)))
verify(service, never)(buf(2))
p1.setValue(buf(123))
assert(f1.poll === Some(Return(buf(123))))
}
test("handle errors") {
when(service(buf(1))).thenReturn(Future.exception(new Exception("sad panda")))
assert(client(buf(1)).poll === Some(
Throw(ServerApplicationError("java.lang.Exception: sad panda"))))
}
test("propagate interrupts") {
val p = new Promise[ChannelBuffer]
when(service(buf(1))).thenReturn(p)
val f = client(buf(1))
assert(f.poll === None)
assert(p.isInterrupted === None)
val exc = new Exception("sad panda")
f.raise(exc)
assert(p.isInterrupted === Some(
ClientDiscardedRequestException("java.lang.Exception: sad panda")))
assert(f.poll === Some(Throw(exc)))
}
test("end-to-end with tracing: client-to-service") {
val p = new Promise[ChannelBuffer]
when(service(buf(1))).thenReturn(p)
verify(service, never())(any[ChannelBuffer])
val id = TraceId(Some(SpanId(1)), Some(SpanId(2)), SpanId(3), None)
val f = Trace.unwind {
Trace.setId(id)
client(buf(1))
}
verify(service)(buf(1))
assert(f.poll === None)
p.setValue(buf(2))
assert(f.poll === Some(Return(buf(2))))
val ia = new InetSocketAddress(0)
val recs = tracer.toSeq.sortBy(_.timestamp)
assert(recs match {
case Seq(
Record(`id`, _, Annotation.Message(ClientDispatcher.ClientEnabledTraceMessage), None),
Record(`id`, _, Annotation.ClientSend(), None),
Record(`id`, _, Annotation.ServerRecv(), None),
Record(`id`, _, Annotation.Message(ServerDispatcher.ServerEnabledTraceMessage), None),
Record(`id`, _, Annotation.ServerSend(), None),
Record(`id`, _, Annotation.ClientRecv(), None)
) => true
case _ => false
})
}
test("propagate trace ids") {
when(service(any[ChannelBuffer])).thenAnswer(
new Answer[Future[ChannelBuffer]]() {
def answer(invocation: InvocationOnMock) = {
val traceId = ChannelBuffers.wrappedBuffer(
Trace.id.toString.getBytes(Charsets.Utf8))
Future.value(traceId)
}
}
)
val id = Trace.nextId
val resp = Trace.unwind {
Trace.setId(id)
client(buf(1))
}
assert(resp.poll.isDefined)
val respBuf = Await.result(resp)
val respArr = new Array[Byte](respBuf.readableBytes)
respBuf.readBytes(respArr)
val respStr = new String(respArr, Charsets.Utf8)
assert(respStr === id.toString)
}
test("propagate trace flags") {
when(service(any[ChannelBuffer])).thenAnswer(
new Answer[Future[ChannelBuffer]] {
def answer(invocation: InvocationOnMock) = {
val buf = ChannelBuffers.directBuffer(8)
buf.writeLong(Trace.id.flags.toLong)
Future.value(buf)
}
}
)
val flags = Flags().setDebug
val id = Trace.nextId.copy(flags=flags)
val resp = Trace.unwind {
Trace.setId(id)
val p = client(buf(1))
p
}
assert(resp.poll.isDefined)
assert(Await.result(resp).readableBytes === 8)
val respFlags = Flags(Await.result(resp).readLong)
assert(respFlags === flags)
}
}
@RunWith(classOf[JUnitRunner])
class ClientServerTestNoDispatch extends ClientServerTest(false)
@RunWith(classOf[JUnitRunner])
class ClientServerTestDispatch extends ClientServerTest(true) {
// Note: We test trace propagation here, too,
// since it's a default request context.
test("Transmits request contexts") {
when(service(any[ChannelBuffer])).thenReturn(
Future.value(ChannelBuffers.EMPTY_BUFFER))
MuxContext.handled = Seq.empty
MuxContext.buf = Buf.ByteArray(1,2,3,4)
var f = client(ChannelBuffers.EMPTY_BUFFER)
assert(f.isDefined)
Await.result(f)
assert(MuxContext.handled === Seq(Buf.ByteArray(1,2,3,4)))
MuxContext.buf = Buf.ByteArray(9,8,7,6)
f = client(ChannelBuffers.EMPTY_BUFFER)
assert(f.isDefined)
Await.result(f)
assert(MuxContext.handled === Seq(
Buf.ByteArray(1,2,3,4), Buf.ByteArray(9,8,7,6)))
}
}
| JustinTulloss/finagle | finagle-mux/src/test/scala/com/twitter/finagle/mux/ClientServerTest.scala | Scala | apache-2.0 | 7,153 |
/*
* Copyright (C) 2012 Pavel Fatin <http://pavelfatin.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.pavelfatin.fs
package manager
import swing._
import event.FocusGained
import javax.swing.{KeyStroke, JComponent}
import javax.swing.plaf.basic.BasicSplitPaneUI
import java.awt.event.KeyEvent
import action._
private class ManagerWindow(caption: String, leftFS: FileSystem, rightFS: FileSystem)
(implicit scheme: ColorScheme, format: Format) extends MainFrame with ActionContext {
private val actions = List(
createAction("View", KeyEvent.VK_F3, new Open(this, editable = false)),
createAction("Edit", KeyEvent.VK_F4, new Open(this, editable = true)),
createAction("Copy", KeyEvent.VK_F5, new Copy(this)),
createAction("Rename", KeyEvent.VK_F6, new Rename(this)),
createAction("MkFold", KeyEvent.VK_F7, new MakeFolder(this)),
createAction("Delete", KeyEvent.VK_F8, new Delete(this)),
createAction("Move", KeyEvent.VK_F9, new Move(this)),
createAction("Quit", KeyEvent.VK_F10, new Quit(this)))
def frame = this
val left = new FolderPanelImpl(leftFS)
val right = new FolderPanelImpl(rightFS)
actions.foreach(register)
listenTo(left.component)
listenTo(right.component)
reactions += {
case _: FocusGained =>
val hasWindows = peer.getOwnedWindows.exists(_.isVisible)
if (!hasWindows) {
left.active = left.component.hasFocus
right.active = right.component.hasFocus
}
}
preferredSize = new Dimension(1100, 800)
title = caption
contents = {
val splitPane = new SplitPane(Orientation.Vertical, left, right) {
resizeWeight = 0.5D
dividerSize = 7
background = scheme.consoleForeground
peer.setUI(new BasicSplitPaneUI)
val inputMap = peer.getInputMap(JComponent.WHEN_ANCESTOR_OF_FOCUSED_COMPONENT)
inputMap.put(KeyStroke.getKeyStroke(KeyEvent.VK_F6, 0), "none")
inputMap.put(KeyStroke.getKeyStroke(KeyEvent.VK_F8, 0), "none")
}
val actionsPane = new ActionPanel(actions.init, List(actions.last))
new BorderPanel() {
add(splitPane, BorderPanel.Position.Center)
add(actionsPane, BorderPanel.Position.South)
}
}
private def createAction(title: String, key: Int, delegate: () => Unit): Action = new Action(title) {
accelerator = Some(KeyStroke.getKeyStroke(key, 0))
def apply() {
try {
delegate()
} catch {
case e: Exception =>
e.printStackTrace(System.err)
Dialog.showMessage(contents.head, e.toString, s"Error on $title", Dialog.Message.Error)
left.refresh()
right.refresh()
}
}
}
private def register(action: Action) {
action.accelerator.foreach { keyStroke =>
val root = peer.getRootPane
root.getInputMap(JComponent.WHEN_IN_FOCUSED_WINDOW).put(keyStroke, action.title)
root.getActionMap.put(action.title, action.peer)
}
}
} | pavelfatin/toyfs | src/main/scala/com/pavelfatin/fs/manager/ManagerWindow.scala | Scala | gpl-3.0 | 3,570 |
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
package pureconfig
import scala.concurrent.duration.Duration.{Inf, MinusInf}
import scala.concurrent.duration.{
DAYS,
Duration,
FiniteDuration,
HOURS,
MICROSECONDS,
MILLISECONDS,
MINUTES,
NANOSECONDS,
SECONDS,
TimeUnit
}
import scala.util.Try
import scala.util.control.NonFatal
import pureconfig.error.{CannotConvert, ExceptionThrown, FailureReason}
/** Utility functions for converting a `String` to a `Duration` and vice versa. The parser accepts the HOCON unit
* syntax.
*/
private[pureconfig] object DurationUtils {
/** Convert a string to a Duration while trying to maintain compatibility with Typesafe's abbreviations.
*/
val fromString: String => Either[FailureReason, Duration] = { string =>
if (string == UndefinedDuration) Right(Duration.Undefined)
else
try {
Right(parseDuration(addDefaultUnit(justAMinute(itsGreekToMe(string)))))
} catch {
case ex: NumberFormatException =>
val err = s"${ex.getMessage}. (try a number followed by any of ns, us, ms, s, m, h, d)"
Left(CannotConvert(string, "Duration", err))
case NonFatal(t) =>
Left(ExceptionThrown(t))
}
}
// ------------------------------
// This is a copy of Duration(str: String) that fixes the bug on precision
//
// "ms milli millisecond" -> List("ms", "milli", "millis", "millisecond", "milliseconds")
private[this] def words(s: String) = (s.trim split "\\\\s+").toList
private[this] def expandLabels(labels: String): List[String] = {
val hd :: rest = words(labels)
hd :: rest.flatMap(s => List(s, s + "s"))
}
private[this] val timeUnitLabels = List(
DAYS -> "d day",
HOURS -> "h hour",
MINUTES -> "min minute",
SECONDS -> "s sec second",
MILLISECONDS -> "ms milli millisecond",
MICROSECONDS -> "µs micro microsecond",
NANOSECONDS -> "ns nano nanosecond"
)
// Label => TimeUnit
protected[pureconfig] val timeUnit: Map[String, TimeUnit] =
timeUnitLabels.flatMap { case (unit, names) => expandLabels(names) map (_ -> unit) }.toMap
private[pureconfig] def parseDuration(s: String): Duration = {
val s1: String = s filterNot (_.isWhitespace)
s1 match {
case "Inf" | "PlusInf" | "+Inf" => Inf
case "MinusInf" | "-Inf" => MinusInf
case _ =>
val unitName = s1.reverse.takeWhile(_.isLetter).reverse
timeUnit get unitName match {
case Some(unit) =>
val valueStr = s1 dropRight unitName.length
// Reading Long first avoids losing precision unnecessarily
Try(Duration(java.lang.Long.parseLong(valueStr), unit)).getOrElse {
// But if the value is a fractional number, then we have to parse it
// as a Double, which will lose precision and possibly change the units.
Duration(java.lang.Double.parseDouble(valueStr), unit)
}
case _ => throw new NumberFormatException("format error " + s)
}
}
}
// ------------------------------
private val onlyNumberRegex = "\\\\s*[+-]?[0-9]+\\\\s*$".r
private val fauxMuRegex = "([0-9])(\\\\s*)us(\\\\s*)$".r
private val shortMinuteRegex = "([0-9])(\\\\s*)m(\\\\s*)$".r
// To maintain compatibility with Typesafe Config, use "ms" as default unit.
private val addDefaultUnit = { (s: String) => if (onlyNumberRegex.unapplySeq(s).isDefined) s + " ms" else s }
// To maintain compatibility with Typesafe Config, replace "us" with "µs".
private val itsGreekToMe =
fauxMuRegex.replaceSomeIn(_: String, m => Some(s"${m.group(1)}${m.group(2)}µs${m.group(3)}"))
// To maintain compatibility with Typesafe Config, replace "m" with "minutes".
private val justAMinute =
shortMinuteRegex.replaceSomeIn(_: String, m => Some(s"${m.group(1)}${m.group(2)}minutes${m.group(3)}"))
/** Format a possibily infinite duration as a string with a suitable time unit using units TypesafeConfig understands.
* Caveat: TypesafeConfig doesn't undersand infinite durations
*/
def fromDuration(d: Duration): String = {
d match {
case f: FiniteDuration => fromFiniteDuration(f)
case Duration.Inf => "Inf"
case Duration.MinusInf => "MinusInf"
case _ => UndefinedDuration
}
}
// We need our own constant for `Duration.Undefined` because that value's `toString` is `Duration.Undefined`
// which is inconsistent with the `Inf` and `Minus` `toString` provided by other special `Duration`s.
private final val UndefinedDuration = "Undefined"
/** Format a FiniteDuration as a string with a suitable time unit using units TypesafeConfig understands.
*/
def fromFiniteDuration(d: FiniteDuration): String = {
d.toNanos match {
case 0L => "0"
case n =>
timeUnitsToLabels
.collectFirst {
case (unitInNanos, unitLabel) if n >= unitInNanos && n % unitInNanos == 0 =>
s"${n / unitInNanos}$unitLabel"
}
.getOrElse(s"${n}ns")
}
}
private final val microsecondInNanos = 1000L
private final val millisecondInNanos = 1000L * microsecondInNanos
private final val secondInNanos = 1000L * millisecondInNanos
private final val minuteInNanos = 60L * secondInNanos
private final val hourInNanos = 60L * minuteInNanos
private final val dayInNanos = 24L * hourInNanos
// Must be sorted from largest unit to smallest.
private final val timeUnitsToLabels = Vector(
dayInNanos -> "d",
hourInNanos -> "h",
minuteInNanos -> "m",
secondInNanos -> "s",
millisecondInNanos -> "ms",
microsecondInNanos -> "us"
).sortBy(_._1)(implicitly[Ordering[Long]].reverse)
}
| pureconfig/pureconfig | core/src/main/scala/pureconfig/DurationUtils.scala | Scala | mpl-2.0 | 5,855 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.examples.scala.graph
import org.apache.flink.api.java.utils.ParameterTool
import org.apache.flink.api.scala._
import org.apache.flink.examples.java.graph.util.ConnectedComponentsData
import org.apache.flink.util.Collector
object TransitiveClosureNaive {
def main (args: Array[String]): Unit = {
val params: ParameterTool = ParameterTool.fromArgs(args)
// set up execution environment
val env = ExecutionEnvironment.getExecutionEnvironment
// make parameters available in the web interface
env.getConfig.setGlobalJobParameters(params)
val edges =
if (params.has("edges")) {
env.readCsvFile[(Long, Long)](
filePath = params.get("edges"),
fieldDelimiter = " ",
includedFields = Array(0, 1))
.map { x => (x._1, x._2)}
} else {
println("Executing TransitiveClosure example with default edges data set.")
println("Use --edges to specify file input.")
val edgeData = ConnectedComponentsData.EDGES map {
case Array(x, y) => (x.asInstanceOf[Long], y.asInstanceOf[Long])
}
env.fromCollection(edgeData)
}
val maxIterations = params.getInt("iterations", 10)
val paths = edges.iterateWithTermination(maxIterations) { prevPaths: DataSet[(Long, Long)] =>
val nextPaths = prevPaths
.join(edges)
.where(1).equalTo(0) {
(left, right) => (left._1,right._2)
}.withForwardedFieldsFirst("_1").withForwardedFieldsSecond("_2")
.union(prevPaths)
.groupBy(0, 1)
.reduce((l, r) => l).withForwardedFields("_1; _2")
val terminate = prevPaths
.coGroup(nextPaths)
.where(0).equalTo(0) {
(
prev: Iterator[(Long, Long)],
next: Iterator[(Long, Long)],
out: Collector[(Long, Long)]) => {
val prevPaths = prev.toSet
for (n <- next)
if (!prevPaths.contains(n)) out.collect(n)
}
}.withForwardedFieldsSecond("*")
(nextPaths, terminate)
}
if (params.has("output")) {
paths.writeAsCsv(params.get("output"), "\n", " ")
env.execute("Scala Transitive Closure Example")
} else {
println("Printing result to stdout. Use --output to specify output path.")
paths.print()
}
}
}
| hequn8128/flink | flink-examples/flink-examples-batch/src/main/scala/org/apache/flink/examples/scala/graph/TransitiveClosureNaive.scala | Scala | apache-2.0 | 3,166 |
package core
import concurrent.Future
trait Card {
def name: String
def cost: Int
override def toString: String = name
}
trait Actionable {
def play: Game => Future[Game]
}
trait Treasurable {
def coin: Int
}
trait Victoriable {
def vp: Int
}
class BasicActionCard(
val name: String,
val cost: Int,
val play: Game => Future[Game]
) extends Card with Actionable
class BasicTreasureCard(
val name: String,
val cost: Int,
val coin: Int
) extends Card with Treasurable
class BasicVictoryCard(
val name: String,
val cost: Int,
val vp: Int
) extends Card with Victoriable
object CardCasts {
def toActionCard: PartialFunction[Card, Card with Actionable] = {
case x: Actionable => x
}
def toActionCardWith(p: Card with Actionable => Boolean): PartialFunction[Card, Card with Actionable] = {
case x: Actionable if p(x) => x
}
def toTreasureCard: PartialFunction[Card, Card with Treasurable] = {
case x: Treasurable => x
}
def toTreasureCardWith(p: Card with Treasurable => Boolean): PartialFunction[Card, Card with Treasurable] = {
case x: Treasurable if p(x) => x
}
def toVictoryCard: PartialFunction[Card, Card with Victoriable] = {
case x: Victoriable => x
}
def toVictoryCardWith(p: Card with Victoriable => Boolean): PartialFunction[Card, Card with Victoriable] = {
case x: Victoriable if p(x) => x
}
def toCard: PartialFunction[Card, Card] = {
case x: Card => x
}
def toCardWith(p: Card => Boolean): PartialFunction[Card, Card] = {
case x: Card if p(x) => x
}
} | whence/powerlife | scala/powercards_future/core/Card.scala | Scala | mit | 1,582 |
/*
*
* Copyright 2015.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.jobimtext.run
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.jobimtext.ct2.ClassicToCT
import org.jobimtext.extract.CooccurrenceWindow
import org.jobimtext.misc.TakeTopN
import org.jobimtext.sim._
import org.jobimtext.spark.SparkConfigured
/**
* Created by Steffen Remus.
*
* execute a script snippet from your spark shell, e.g.:
*
* import org.jobimtext.run.ShellRunner
* ShellRunner.kl(...)
*
*/
object ShellRunner {
/**
*
* execute this snippet from your spark shell:
*
* import org.jobimtext.run.ShellRunner
* ShellRunner.kl(...)
*
* @param sc
* @param in
* @param out
* @param sort_output
* @param reverse_sorting
* @param trimtopn
* @return
*/
def kl(sc:SparkContext,
in:String,
out:String,
sort_output:Boolean = false,
reverse_sorting:Boolean = false,
trimtopn:Int = 20
):RDD[String] = {
val joinedprobs = sc.textFile(in).filter(_.nonEmpty)
var kl = KLDivergence(joinedprobs)
if(sort_output)
kl = TakeTopN(n = trimtopn, descending = reverse_sorting, true, kl)
kl.saveAsTextFile(path = out)
return kl
}
/**
*
* @param sc
* @param in
* @param out
* @param windowsize
* @return
*/
def extractCoocWindow(sc:SparkContext,
in:String,
out:String,
windowsize:Int = 3
):RDD[String] = {
val lines_in = sc.textFile(in).filter(_.nonEmpty)
var lines_out = CooccurrenceWindow(windowsize,lines_in)
lines_out.saveAsTextFile(out)
return lines_out
}
}
| tudarmstadt-lt/JoBimTextCT | org.jobimtext.ct/src/main/scala/org/jobimtext/run/ShellRunner.scala | Scala | apache-2.0 | 2,236 |
package chicken.domain
import scala.collection.mutable
case class User(name: String, followed: mutable.Set[String] = mutable.Set.empty[String])
case class Post(text: String, author: String, timestamp: Long)
| slavapak/chicken | src/main/scala/chicken/domain/domain.scala | Scala | epl-1.0 | 209 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.spark.jts
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql._
/**
* Common JTS test setup and utilities.
*/
trait TestEnvironment {
implicit lazy val spark: SparkSession = {
SparkSession.builder()
.appName("testSpark")
.master("local[*]")
.getOrCreate()
.withJTS
}
lazy val sc: SQLContext = spark.sqlContext
.withJTS // <-- this should be a noop given the above, but is here to test that code path
/**
* Constructor for creating a DataFrame with a single row and no columns.
* Useful for testing the invocation of data constructing UDFs.
*/
def dfBlank(implicit spark: SparkSession): DataFrame = {
// This is to enable us to do a single row creation select operation in DataFrame
// world. Probably a better/easier way of doing this.
spark.createDataFrame(spark.sparkContext.makeRDD(Seq(Row())), StructType(Seq.empty))
}
}
| elahrvivaz/geomesa | geomesa-spark/geomesa-spark-jts/src/test/scala/org/locationtech/geomesa/spark/jts/TestEnvironment.scala | Scala | apache-2.0 | 1,415 |
/*
Copyright 2013 Ilya Lakhin (Илья Александрович Лахин)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package name.lakhin.eliah.projects
package papacarlo.lexis
final case class Context(kind: Int, parent: Option[Context] = None) {
val view: String = kind + parent.map(":" + _.view).getOrElse("")
val depth: Int = parent.map(_.depth + 1).getOrElse(0)
def branch(childKind: Int) = Context(childKind, Some(this))
def intersect(another: Context) = {
var first = this
var second = another
while (first.depth > second.depth) {
first = first.parent.getOrElse(second)
}
while (second.depth > first.depth) {
second = second.parent.getOrElse(first)
}
while (first.view != second.view) {
first = first.parent.getOrElse(Context.Base)
second = second.parent.getOrElse(Context.Base)
}
first
}
override def equals(another: Any) =
another.isInstanceOf[Context] && view == another.asInstanceOf[Context].view
override def toString = view
}
object Context {
val Base: Context = Context(0)
}
| Eliah-Lakhin/papa-carlo | src/main/scala/name.lakhin.eliah.projects/papacarlo/lexis/Context.scala | Scala | apache-2.0 | 1,600 |
/*
* Copyright (C) 2016-2019 Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.internal
import io.netty.channel.{ Channel, ChannelFuture, ChannelFutureListener }
import io.netty.util.concurrent.{ GenericFutureListener, Future => NettyFuture }
import scala.concurrent.{ Future, Promise }
object NettyFutureConverters {
implicit class ToFuture[T](future: NettyFuture[T]) {
def toScala: Future[T] = {
val promise = Promise[T]()
future.addListener(new GenericFutureListener[NettyFuture[T]] {
def operationComplete(future: NettyFuture[T]) = {
if (future.isSuccess) {
promise.success(future.getNow)
} else if (future.isCancelled) {
promise.failure(new RuntimeException("Future cancelled"))
} else {
promise.failure(future.cause())
}
}
})
promise.future
}
}
implicit class ChannelFutureToFuture(future: ChannelFuture) {
def channelFutureToScala: Future[Channel] = {
val promise = Promise[Channel]()
future.addListener(new ChannelFutureListener {
def operationComplete(future: ChannelFuture) = {
if (future.isSuccess) {
promise.success(future.channel())
} else if (future.isCancelled) {
promise.failure(new RuntimeException("Future cancelled"))
} else {
promise.failure(future.cause())
}
}
})
promise.future
}
}
}
| rstento/lagom | service/core/client/src/main/scala/com/lightbend/lagom/internal/NettyFutureConverters.scala | Scala | apache-2.0 | 1,495 |
/*
* Copyright (c) 2014-2018 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.builders
import java.io.{ByteArrayInputStream, InputStream}
import minitest.SimpleTestSuite
import monix.eval.Task
import monix.execution.Ack
import monix.execution.Ack.Continue
import monix.execution.ExecutionModel.{AlwaysAsyncExecution, BatchedExecution, SynchronousExecution}
import monix.execution.exceptions.APIContractViolationException
import monix.execution.schedulers.TestScheduler
import monix.reactive.Observable
import monix.execution.exceptions.DummyException
import monix.reactive.observers.Subscriber
import scala.collection.mutable.ListBuffer
import scala.util.{Failure, Random, Success}
object InputStreamObservableSuite extends SimpleTestSuite {
test("fromInputStreamUnsafe yields a single subscriber observable") {
implicit val s = TestScheduler()
var errorThrown: Throwable = null
val obs = Observable.fromInputStreamUnsafe(new ByteArrayInputStream(randomByteArray()))
obs.unsafeSubscribeFn(Subscriber.empty(s))
s.tick()
obs.unsafeSubscribeFn(new Subscriber[Array[Byte]] {
implicit val scheduler = s
def onNext(elem: Array[Byte]): Ack =
throw new IllegalStateException("onNext")
def onComplete(): Unit =
throw new IllegalStateException("onComplete")
def onError(ex: Throwable): Unit =
errorThrown = ex
})
assert(errorThrown.isInstanceOf[APIContractViolationException])
assert(s.state.tasks.isEmpty, "should be left with no pending tasks")
}
test("fromInputStreamUnsafe works for BatchedExecution") {
implicit val s = TestScheduler(BatchedExecution(1024))
val array = randomByteArray()
val in = new ByteArrayInputStream(array)
val result = Observable.fromInputStreamUnsafe(in, 40)
.foldLeft(Array.empty[Byte])(_ ++ _)
.runAsyncGetFirst
.map(_.map(_.toList))
s.tick()
assertEquals(result.value, Some(Success(Some(array.toList))))
assert(s.state.tasks.isEmpty, "should be left with no pending tasks")
}
test("fromInputStreamUnsafe it works for AlwaysAsyncExecution") {
implicit val s = TestScheduler(AlwaysAsyncExecution)
val array = randomByteArray()
val in = new ByteArrayInputStream(array)
val result = Observable.fromInputStreamUnsafe(in, 40)
.foldLeft(Array.empty[Byte])(_ ++ _)
.runAsyncGetFirst
.map(_.map(_.toList))
s.tick()
assertEquals(result.value, Some(Success(Some(array.toList))))
assert(s.state.tasks.isEmpty, "should be left with no pending tasks")
}
test("fromInputStreamUnsafe it works for SynchronousExecution") {
implicit val s = TestScheduler(SynchronousExecution)
var wasCompleted = 0
val received = ListBuffer.empty[Byte]
val array = randomByteArray()
val in = new ByteArrayInputStream(array)
val obs: Observable[Array[Byte]] = Observable
.fromInputStreamUnsafe(in)
.foldLeft(Array.empty[Byte])(_ ++ _)
obs.unsafeSubscribeFn(new Subscriber[Array[Byte]] {
implicit val scheduler = s
def onError(ex: Throwable): Unit =
throw new IllegalStateException("onError")
def onComplete(): Unit =
wasCompleted += 1
def onNext(elem: Array[Byte]): Ack = {
received.appendAll(elem)
Continue
}
})
assertEquals(received.toList, array.toList)
assert(s.state.tasks.isEmpty, "should be left with no pending tasks")
}
test("fromInputStream closes the file handle onComplete") {
implicit val s = TestScheduler()
var wasClosed = false
val in = randomInputWithOnFinish(() => wasClosed = true)
val f = Observable.fromInputStreamF(Task(in)).completedL.runToFuture
s.tick()
assertEquals(f.value, Some(Success(())))
assert(wasClosed, "InputStream should have been closed")
assert(s.state.tasks.isEmpty, "should be left with no pending tasks")
}
test("fromInputStream closes the file handle onError on first call") {
implicit val s = TestScheduler()
var wasClosed = false
val ex = DummyException("dummy")
val in = inputWithError(ex, 1, () => wasClosed = true)
val f = Observable.fromInputStreamF(Task(in)).completedL.runToFuture
s.tick()
assertEquals(f.value, Some(Failure(ex)))
assert(wasClosed, "InputStream should have been closed")
assert(s.state.tasks.isEmpty, "should be left with no pending tasks")
}
test("fromInputStream closes the file handle onError on second call") {
implicit val s = TestScheduler()
var wasClosed = false
val ex = DummyException("dummy")
val in = inputWithError(ex, 2, () => wasClosed = true)
val f = Observable.fromInputStreamF(Task(in)).completedL.runToFuture
s.tick()
assertEquals(f.value, Some(Failure(ex)))
assert(wasClosed, "InputStream should have been closed")
assert(s.state.tasks.isEmpty, "should be left with no pending tasks")
}
test("fromInputStream closes the file handle on cancel") {
implicit val s = TestScheduler(AlwaysAsyncExecution)
var wasClosed = false
val in = randomInputWithOnFinish(() => wasClosed = true)
val f = Observable.fromInputStreamF(Task(in)).completedL.runToFuture
s.tickOne()
f.cancel()
s.tick()
assert(wasClosed, "InputStream should have been closed")
assertEquals(s.state.lastReportedError, null)
assert(s.state.tasks.isEmpty, "should be left with no pending tasks")
}
def inputWithError(ex: Throwable, whenToThrow: Int, onFinish: () => Unit): InputStream =
new InputStream {
private[this] var callIdx = 0
def read(): Int = {
callIdx += 1
if (callIdx == whenToThrow) throw ex
else 1
}
override def close(): Unit =
onFinish()
}
def randomInputWithOnFinish(onFinish: () => Unit): InputStream = {
val array = randomByteArray()
val underlying = new ByteArrayInputStream(array)
new InputStream {
def read(): Int = underlying.read()
override def read(b: Array[Byte]): Int =
underlying.read(b)
override def read(b: Array[Byte], off: Int, len: Int): Int =
underlying.read(b, off, len)
override def close(): Unit =
onFinish()
}
}
def randomByteArray(): Array[Byte] = {
val length = Random.nextInt(2048)
val bytes = new Array[Byte](length)
Random.nextBytes(bytes)
bytes
}
}
| Wogan/monix | monix-reactive/shared/src/test/scala/monix/reactive/internal/builders/InputStreamObservableSuite.scala | Scala | apache-2.0 | 7,010 |
package stronghold.strings
/**
* problem description: http://rosalind.info/problems/splc/
*/
object RnaSplicing {
object SampleData {
val sample: List[String] =
List(
">Rosalind_10",
"ATGGTCTACATAGCTGACAAACAGCACGTAGCAATCGGTCGAATCTCGAGAGGCATATGGTCACATGATCGGTCGAGCGTGTTTCAAAGTTTGCGCCTAG",
">Rosalind_12",
"ATCGGTCGAA",
">Rosalind_15",
"ATCGGTCGAGCGTGT"
)
}
import scala.annotation.tailrec
import SampleData.sample
import utils.UtilityFunctions.{Fasta,
readFastaSequences, readInputData, readRnaCodonTable, writeProteinToFileAsString}
import utils.{AminoAcid, Codon, Protein, Dna, Rna, RnaNucleotide}
import utils.Dna.transcribe
val inputFileName: String = "/stronghold/datasets/rosalind_splc.txt"
private val startCodon: Codon = Codon(Rna("AUG").sequence)
def getData(isPractice: Boolean): (Dna, Set[Dna]) = {
val data: List[String] = if (isPractice) sample else readInputData(inputFileName)
val fastaSequences: List[Fasta] = readFastaSequences(data, isLineSeparated = false)
(Dna(fastaSequences.head.string), fastaSequences.tail.map(s => Dna(s.string)).toSet)
}
def translate(rna: Rna, codonTable: Map[Codon, Option[AminoAcid]]): Option[Protein] = {
val startCodonIndex: Int = rna.sequence.sliding(3, 1).indexWhere(triplet => Codon(triplet) == startCodon)
if (startCodonIndex == -1) None
else {
val nucleotideTriplets: Iterator[List[RnaNucleotide]] =
rna.sequence.drop(startCodonIndex).grouped(3).filter(_.length == 3)
@tailrec
def loop(protein: List[AminoAcid]): List[AminoAcid] = {
if (!nucleotideTriplets.hasNext) protein
else {
val triplet: List[RnaNucleotide] = nucleotideTriplets.next()
codonTable(Codon(triplet)) match {
case Some(aminoAcid) => loop(aminoAcid :: protein)
case None => protein
}
}
}
Some(Protein(loop(Nil).reverse))
}
}
def findAndTranslateExons(dna: Dna, introns: Set[Dna], codonTable: Map[Codon, Option[AminoAcid]]): Option[Protein] = {
val exons: Dna =
Dna(introns.foldLeft(dna.toString){ case (dnaSeq, intron) => dnaSeq.replaceAll(intron.toString, "") })
translate(transcribe(exons), codonTable)
}
def main(args: Array[String]): Unit = {
val (dna, introns): (Dna, Set[Dna]) = getData(isPractice = false)
val codonTable: Map[Codon, Option[AminoAcid]] = readRnaCodonTable()
val protein: Option[Protein] = findAndTranslateExons(dna, introns, codonTable)
writeProteinToFileAsString(protein.get)
}
}
| ghostrider77/Bioinformatics | Bioinformatics/src/main/scala-2.11/stronghold/strings/RnaSplicing.scala | Scala | mit | 2,614 |
package com.scalableminds.webknossos.datastore.services
import akka.actor.{Actor, ActorRef, ActorSystem, Props}
import akka.pattern.ask
import akka.routing.RoundRobinPool
import akka.util.Timeout
import com.scalableminds.util.geometry.{BoundingBox, Vec3Int, Vec3Double}
import com.scalableminds.util.tools.{Fox, FoxImplicits}
import com.scalableminds.webknossos.datastore.models.datasource.{DataSource, ElementClass, SegmentationLayer}
import com.scalableminds.webknossos.datastore.models.requests.{
Cuboid,
DataServiceDataRequest,
DataServiceMappingRequest,
DataServiceRequestSettings
}
import com.scalableminds.webknossos.datastore.services.mcubes.MarchingCubes
import net.liftweb.common.{Box, Failure}
import java.nio._
import scala.collection.mutable
import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContext}
import scala.reflect.ClassTag
case class IsosurfaceRequest(
dataSource: Option[DataSource],
dataLayer: SegmentationLayer,
cuboid: Cuboid,
segmentId: Long,
subsamplingStrides: Vec3Int,
scale: Vec3Double,
mapping: Option[String] = None,
mappingType: Option[String] = None
)
case class DataTypeFunctors[T, B](
getTypedBufferFn: ByteBuffer => B,
copyDataFn: (B, Array[T]) => Unit,
fromLong: Long => T
)
class IsosurfaceActor(val service: IsosurfaceService, val timeout: FiniteDuration) extends Actor {
def receive: Receive = {
case request: IsosurfaceRequest =>
sender() ! Await.result(service.requestIsosurface(request).futureBox, timeout)
case _ =>
sender ! Failure("Unexpected message sent to IsosurfaceActor.")
}
}
class IsosurfaceService(binaryDataService: BinaryDataService,
mappingService: MappingService,
actorSystem: ActorSystem,
isosurfaceTimeout: FiniteDuration,
isosurfaceActorPoolSize: Int)(implicit ec: ExecutionContext)
extends FoxImplicits {
private val agglomerateService: AgglomerateService = binaryDataService.agglomerateService
implicit val timeout: Timeout = Timeout(isosurfaceTimeout)
private val actor: ActorRef = actorSystem.actorOf(
RoundRobinPool(isosurfaceActorPoolSize).props(Props(new IsosurfaceActor(this, timeout.duration))))
def requestIsosurfaceViaActor(request: IsosurfaceRequest): Fox[(Array[Float], List[Int])] =
actor.ask(request).mapTo[Box[(Array[Float], List[Int])]].recover {
case e: Exception => Failure(e.getMessage)
}
def requestIsosurface(request: IsosurfaceRequest): Fox[(Array[Float], List[Int])] =
request.dataLayer.elementClass match {
case ElementClass.uint8 =>
generateIsosurfaceImpl[Byte, ByteBuffer](request,
DataTypeFunctors[Byte, ByteBuffer](identity, _.get(_), _.toByte))
case ElementClass.uint16 =>
generateIsosurfaceImpl[Short, ShortBuffer](
request,
DataTypeFunctors[Short, ShortBuffer](_.asShortBuffer, _.get(_), _.toShort))
case ElementClass.uint32 =>
generateIsosurfaceImpl[Int, IntBuffer](request,
DataTypeFunctors[Int, IntBuffer](_.asIntBuffer, _.get(_), _.toInt))
case ElementClass.uint64 =>
generateIsosurfaceImpl[Long, LongBuffer](request,
DataTypeFunctors[Long, LongBuffer](_.asLongBuffer, _.get(_), identity))
}
private def generateIsosurfaceImpl[T: ClassTag, B <: Buffer](
request: IsosurfaceRequest,
dataTypeFunctors: DataTypeFunctors[T, B]): Fox[(Array[Float], List[Int])] = {
def applyMapping(data: Array[T]): Fox[Array[T]] =
request.mapping match {
case Some(mappingName) =>
request.mappingType match {
case Some("JSON") =>
mappingService.applyMapping(
DataServiceMappingRequest(request.dataSource.orNull, request.dataLayer, mappingName),
data,
dataTypeFunctors.fromLong)
case _ => Fox.successful(data)
}
case _ =>
Fox.successful(data)
}
def applyAgglomerate(data: Array[Byte]): Array[Byte] =
request.mapping match {
case Some(_) =>
request.mappingType match {
case Some("HDF5") =>
val dataRequest = DataServiceDataRequest(
request.dataSource.orNull,
request.dataLayer,
request.mapping,
request.cuboid,
DataServiceRequestSettings(halfByte = false, request.mapping, None),
request.subsamplingStrides
)
agglomerateService.applyAgglomerate(dataRequest)(data)
case _ =>
data
}
case _ =>
data
}
def convertData(data: Array[Byte]): Array[T] = {
val srcBuffer = dataTypeFunctors.getTypedBufferFn(ByteBuffer.wrap(data).order(ByteOrder.LITTLE_ENDIAN))
srcBuffer.rewind()
val dstArray = Array.ofDim[T](srcBuffer.remaining())
dataTypeFunctors.copyDataFn(srcBuffer, dstArray)
dstArray
}
def subVolumeContainsSegmentId[T](data: Array[T],
dataDimensions: Vec3Int,
boundingBox: BoundingBox,
segmentId: T): Boolean = {
for {
x <- boundingBox.topLeft.x until boundingBox.bottomRight.x
y <- boundingBox.topLeft.y until boundingBox.bottomRight.y
z <- boundingBox.topLeft.z until boundingBox.bottomRight.z
} {
val voxelOffset = x + y * dataDimensions.x + z * dataDimensions.x * dataDimensions.y
if (data(voxelOffset) == segmentId) return true
}
false
}
def findNeighbors[T](data: Array[T], dataDimensions: Vec3Int, segmentId: T): List[Int] = {
val x = dataDimensions.x - 1
val y = dataDimensions.y - 1
val z = dataDimensions.z - 1
val front_xy = BoundingBox(Vec3Int(0, 0, 0), x, y, 1)
val front_xz = BoundingBox(Vec3Int(0, 0, 0), x, 1, z)
val front_yz = BoundingBox(Vec3Int(0, 0, 0), 1, y, z)
val back_xy = BoundingBox(Vec3Int(0, 0, z), x, y, 1)
val back_xz = BoundingBox(Vec3Int(0, y, 0), x, 1, z)
val back_yz = BoundingBox(Vec3Int(x, 0, 0), 1, y, z)
val surfaceBoundingBoxes = List(front_xy, front_xz, front_yz, back_xy, back_xz, back_yz)
surfaceBoundingBoxes.zipWithIndex.filter {
case (surfaceBoundingBox, index) =>
subVolumeContainsSegmentId(data, dataDimensions, surfaceBoundingBox, segmentId)
}.map {
case (surfaceBoundingBox, index) => index
}
}
val cuboid = request.cuboid
val subsamplingStrides =
Vec3Double(request.subsamplingStrides.x, request.subsamplingStrides.y, request.subsamplingStrides.z)
val dataRequest = DataServiceDataRequest(request.dataSource.orNull,
request.dataLayer,
request.mapping,
cuboid,
DataServiceRequestSettings.default,
request.subsamplingStrides)
val dataDimensions = Vec3Int(
math.ceil(cuboid.width / subsamplingStrides.x).toInt,
math.ceil(cuboid.height / subsamplingStrides.y).toInt,
math.ceil(cuboid.depth / subsamplingStrides.z).toInt
)
val offset = Vec3Double(cuboid.topLeft.x, cuboid.topLeft.y, cuboid.topLeft.z)
val scale = Vec3Double(cuboid.topLeft.resolution) * request.scale
val typedSegmentId = dataTypeFunctors.fromLong(request.segmentId)
val vertexBuffer = mutable.ArrayBuffer[Vec3Double]()
for {
data <- binaryDataService.handleDataRequest(dataRequest)
agglomerateMappedData = applyAgglomerate(data)
typedData = convertData(agglomerateMappedData)
mappedData <- applyMapping(typedData)
mappedSegmentId <- applyMapping(Array(typedSegmentId)).map(_.head)
neighbors = findNeighbors(mappedData, dataDimensions, mappedSegmentId)
} yield {
for {
x <- 0 until dataDimensions.x by 32
y <- 0 until dataDimensions.y by 32
z <- 0 until dataDimensions.z by 32
} {
val boundingBox = BoundingBox(Vec3Int(x, y, z),
math.min(dataDimensions.x - x, 33),
math.min(dataDimensions.y - y, 33),
math.min(dataDimensions.z - z, 33))
if (subVolumeContainsSegmentId(mappedData, dataDimensions, boundingBox, mappedSegmentId)) {
MarchingCubes.marchingCubes[T](mappedData,
dataDimensions,
boundingBox,
mappedSegmentId,
subsamplingStrides,
offset,
scale,
vertexBuffer)
}
}
(vertexBuffer.flatMap(_.toList.map(_.toFloat)).toArray, neighbors)
}
}
}
| scalableminds/webknossos | webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/IsosurfaceService.scala | Scala | agpl-3.0 | 9,265 |
package cpup.poland.runtime.userdata
import cpup.poland.runtime.PObject
case class JavaObject(jobj: Any) extends Userdata {
override def objID(obj: PObject) = (jobj.getClass.getName, jobj.hashCode).toString
override def toString = jobj.toString
}
trait TNull extends TNil {
override def toString = "null"
}
object PNull extends TNull | CoderPuppy/poland-scala | src/main/scala/cpup/poland/runtime/userdata/JavaObject.scala | Scala | mit | 338 |
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package java.util
import scala.scalajs._
class ArrayList[E] private (private[ArrayList] val inner: js.Array[E])
extends AbstractList[E] with RandomAccess with Cloneable with Serializable {
self =>
def this(initialCapacity: Int) = {
this(new js.Array[E])
if (initialCapacity < 0)
throw new IllegalArgumentException
}
def this() =
this(new js.Array[E])
def this(c: Collection[_ <: E]) = {
this()
addAll(c)
}
def trimToSize(): Unit = {
// We ignore this as js.Array doesn't support explicit pre-allocation
}
def ensureCapacity(minCapacity: Int): Unit = {
// We ignore this as js.Array doesn't support explicit pre-allocation
}
def size(): Int =
inner.length
override def clone(): AnyRef =
new ArrayList(inner.jsSlice(0))
def get(index: Int): E = {
checkIndexInBounds(index)
inner(index)
}
override def set(index: Int, element: E): E = {
val e = get(index)
inner(index) = element
e
}
override def add(e: E): Boolean = {
inner += e
true
}
override def add(index: Int, element: E): Unit = {
checkIndexOnBounds(index)
inner.insert(index, element)
}
override def remove(index: Int): E = {
checkIndexInBounds(index)
inner.remove(index)
}
override def clear(): Unit =
inner.clear()
override def addAll(index: Int, c: Collection[_ <: E]): Boolean = {
c match {
case other: ArrayList[_] =>
inner.splice(index, 0, other.inner.toSeq: _*)
other.size > 0
case _ => super.addAll(index, c)
}
}
override protected def removeRange(fromIndex: Int, toIndex: Int): Unit =
inner.splice(fromIndex, toIndex - fromIndex)
}
| nicolasstucki/scala-js | javalib/src/main/scala/java/util/ArrayList.scala | Scala | apache-2.0 | 1,974 |
package scalads
import sun.font.TrueTypeFont
/**
* @author Bryce Anderson
* Created on 6/9/13
*/
package object core {
case class Projection(path: List[String], clazz: Class[_] = null)
sealed trait SortDirection
object SortDirection {
case object ASC extends SortDirection
case object DSC extends SortDirection
}
sealed trait Operation
object Operation {
case object GT extends Operation
case object LT extends Operation
case object GE extends Operation
case object LE extends Operation
case object EQ extends Operation
case object NE extends Operation
}
sealed trait JoinOperation
object JoinOperation {
case object AND extends JoinOperation
case object OR extends JoinOperation
}
}
| bryce-anderson/scalads | macros/src/main/scala/scalads/core/package.scala | Scala | apache-2.0 | 761 |
package com.twitter.finagle.ssl.server
import com.twitter.finagle.ssl.{Engine, SslConfigurations}
/**
* This engine factory is a default JVM-based implementation, intended to provide
* coverage for a wide array of configurations.
*/
private[ssl] object JdkServerEngineFactory extends SslServerEngineFactory {
/**
* Creates a new [[Engine]] based on an [[SslServerConfiguration]].
*
* @param config A collection of parameters which the engine factory
* should consider when creating the TLS server [[Engine]].
*
* @note [[ApplicationProtocols]] other than Unspecified are not supported.
*/
def apply(config: SslServerConfiguration): Engine = {
SslConfigurations.checkApplicationProtocolsNotSupported(
"JdkServerEngineFactory", config.applicationProtocols)
val sslContext = SslConfigurations.initializeSslContext(
config.keyCredentials, config.trustCredentials)
val engine = SslServerEngineFactory.createEngine(sslContext)
SslServerEngineFactory.configureEngine(engine, config)
engine
}
}
| spockz/finagle | finagle-core/src/main/scala/com/twitter/finagle/ssl/server/JdkServerEngineFactory.scala | Scala | apache-2.0 | 1,054 |
package scaliper
import scaliper._
import spray.json._
import dispatch._
import dispatch.Defaults._
import scala.concurrent._
import scala.concurrent.duration._
import com.typesafe.config.ConfigFactory
/** Sends a JSON representation of the run to an endpoint */
trait EndpointReport extends Benchmarks {
private val (applicationName, endpoint, failIfError) = {
val conf = ConfigFactory.load()
(conf.getString("scaliper.application.name"),
conf.getString("scaliper.report.endpoint.url"),
conf.getBoolean("scaliper.report.endpoint.failIfError"))
}
abstract override def report(run: Run): Unit = {
EndpointReport.report(NamedRun(applicationName, run), endpoint, failIfError)
super.report(run)
}
}
object EndpointReport {
def report(r: NamedRun, endpoint: String, failIfError: Boolean): Unit = {
val svc = url(endpoint).setContentType("application/json", "UTF-8") << r.toJson.compactPrint
try {
Await.result(Http(svc OK as.String), 1 second)
} catch {
case e: Exception =>
if(failIfError) throw new Exception(s"Failed to send run information to $endpoint").initCause(e)
else { println(s"Reporting to endpoint $endpoint failed: $e") }
}
}
}
| azavea/scaliper | src/main/scala/scaliper/EndpointReport.scala | Scala | apache-2.0 | 1,226 |
package org.json4s
final class SomeValue[A](val get: A) extends AnyVal {
def isEmpty: Boolean = false
}
| json4s/json4s | ast/shared/src/main/scala-2.13-/org/json4s/SomeValue.scala | Scala | apache-2.0 | 107 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.collection
import org.apache.spark.SparkFunSuite
class BitSetSuite extends SparkFunSuite {
test("basic set and get") {
val setBits = Seq(0, 9, 1, 10, 90, 96)
val bitset = new BitSet(100)
for (i <- 0 until 100) {
assert(!bitset.get(i))
}
setBits.foreach(i => bitset.set(i))
for (i <- 0 until 100) {
if (setBits.contains(i)) {
assert(bitset.get(i))
} else {
assert(!bitset.get(i))
}
}
assert(bitset.cardinality() === setBits.size)
}
test("100% full bit set") {
val bitset = new BitSet(10000)
for (i <- 0 until 10000) {
assert(!bitset.get(i))
bitset.set(i)
}
for (i <- 0 until 10000) {
assert(bitset.get(i))
}
assert(bitset.cardinality() === 10000)
}
test("nextSetBit") {
val setBits = Seq(0, 9, 1, 10, 90, 96)
val bitset = new BitSet(100)
setBits.foreach(i => bitset.set(i))
assert(bitset.nextSetBit(0) === 0)
assert(bitset.nextSetBit(1) === 1)
assert(bitset.nextSetBit(2) === 9)
assert(bitset.nextSetBit(9) === 9)
assert(bitset.nextSetBit(10) === 10)
assert(bitset.nextSetBit(11) === 90)
assert(bitset.nextSetBit(80) === 90)
assert(bitset.nextSetBit(91) === 96)
assert(bitset.nextSetBit(96) === 96)
assert(bitset.nextSetBit(97) === -1)
}
test( "xor len(bitsetX) < len(bitsetY)" ) {
val setBitsX = Seq( 0, 2, 3, 37, 41 )
val setBitsY = Seq( 0, 1, 3, 37, 38, 41, 85)
val bitsetX = new BitSet(60)
setBitsX.foreach( i => bitsetX.set(i))
val bitsetY = new BitSet(100)
setBitsY.foreach( i => bitsetY.set(i))
val bitsetXor = bitsetX ^ bitsetY
assert(bitsetXor.nextSetBit(0) === 1)
assert(bitsetXor.nextSetBit(1) === 1)
assert(bitsetXor.nextSetBit(2) === 2)
assert(bitsetXor.nextSetBit(3) === 38)
assert(bitsetXor.nextSetBit(38) === 38)
assert(bitsetXor.nextSetBit(39) === 85)
assert(bitsetXor.nextSetBit(42) === 85)
assert(bitsetXor.nextSetBit(85) === 85)
assert(bitsetXor.nextSetBit(86) === -1)
}
test( "xor len(bitsetX) > len(bitsetY)" ) {
val setBitsX = Seq( 0, 1, 3, 37, 38, 41, 85)
val setBitsY = Seq( 0, 2, 3, 37, 41)
val bitsetX = new BitSet(100)
setBitsX.foreach( i => bitsetX.set(i))
val bitsetY = new BitSet(60)
setBitsY.foreach( i => bitsetY.set(i))
val bitsetXor = bitsetX ^ bitsetY
assert(bitsetXor.nextSetBit(0) === 1)
assert(bitsetXor.nextSetBit(1) === 1)
assert(bitsetXor.nextSetBit(2) === 2)
assert(bitsetXor.nextSetBit(3) === 38)
assert(bitsetXor.nextSetBit(38) === 38)
assert(bitsetXor.nextSetBit(39) === 85)
assert(bitsetXor.nextSetBit(42) === 85)
assert(bitsetXor.nextSetBit(85) === 85)
assert(bitsetXor.nextSetBit(86) === -1)
}
test( "andNot len(bitsetX) < len(bitsetY)" ) {
val setBitsX = Seq( 0, 2, 3, 37, 41, 48 )
val setBitsY = Seq( 0, 1, 3, 37, 38, 41, 85)
val bitsetX = new BitSet(60)
setBitsX.foreach( i => bitsetX.set(i))
val bitsetY = new BitSet(100)
setBitsY.foreach( i => bitsetY.set(i))
val bitsetDiff = bitsetX.andNot( bitsetY )
assert(bitsetDiff.nextSetBit(0) === 2)
assert(bitsetDiff.nextSetBit(1) === 2)
assert(bitsetDiff.nextSetBit(2) === 2)
assert(bitsetDiff.nextSetBit(3) === 48)
assert(bitsetDiff.nextSetBit(48) === 48)
assert(bitsetDiff.nextSetBit(49) === -1)
assert(bitsetDiff.nextSetBit(65) === -1)
}
test( "andNot len(bitsetX) > len(bitsetY)" ) {
val setBitsX = Seq( 0, 1, 3, 37, 38, 41, 85)
val setBitsY = Seq( 0, 2, 3, 37, 41, 48 )
val bitsetX = new BitSet(100)
setBitsX.foreach( i => bitsetX.set(i))
val bitsetY = new BitSet(60)
setBitsY.foreach( i => bitsetY.set(i))
val bitsetDiff = bitsetX.andNot( bitsetY )
assert(bitsetDiff.nextSetBit(0) === 1)
assert(bitsetDiff.nextSetBit(1) === 1)
assert(bitsetDiff.nextSetBit(2) === 38)
assert(bitsetDiff.nextSetBit(3) === 38)
assert(bitsetDiff.nextSetBit(38) === 38)
assert(bitsetDiff.nextSetBit(39) === 85)
assert(bitsetDiff.nextSetBit(85) === 85)
assert(bitsetDiff.nextSetBit(86) === -1)
}
test( "[gs]etUntil" ) {
val bitSet = new BitSet(100)
bitSet.setUntil(bitSet.capacity)
(0 until bitSet.capacity).foreach { i =>
assert(bitSet.get(i))
}
bitSet.clearUntil(bitSet.capacity)
(0 until bitSet.capacity).foreach { i =>
assert(!bitSet.get(i))
}
val setUntil = bitSet.capacity / 2
bitSet.setUntil(setUntil)
val clearUntil = setUntil / 2
bitSet.clearUntil(clearUntil)
(0 until clearUntil).foreach { i =>
assert(!bitSet.get(i))
}
(clearUntil until setUntil).foreach { i =>
assert(bitSet.get(i))
}
(setUntil until bitSet.capacity).foreach { i =>
assert(!bitSet.get(i))
}
}
}
| esi-mineset/spark | core/src/test/scala/org/apache/spark/util/collection/BitSetSuite.scala | Scala | apache-2.0 | 5,657 |
object Test {
def main(args: Array[String]) {
val test = scala.reflect.internal.util.WeakHashSetTest
test.checkEmpty
test.checkPlusEquals
test.checkPlusEqualsCollisions
test.checkRehashing
test.checkRehashCollisions
test.checkFindOrUpdate
test.checkMinusEquals
test.checkMinusEqualsCollisions
test.checkClear
test.checkIterator
test.checkIteratorCollisions
// This test is commented out because it relies on gc behavior which isn't reliable enough in an automated environment
// test.checkRemoveUnreferencedObjects
}
}
// put the main test object in the same package as WeakHashSet because
// it uses the package private "diagnostics" method
package scala.reflect.internal.util {
object WeakHashSetTest {
// a class guaranteed to provide hash collisions
case class Collider(x : String) extends Comparable[Collider] with Serializable {
override def hashCode = 0
def compareTo(y : Collider) = this.x compareTo y.x
}
// basic emptiness check
def checkEmpty {
val hs = new WeakHashSet[String]()
assert(hs.size == 0)
hs.diagnostics.fullyValidate
}
// make sure += works
def checkPlusEquals {
val hs = new WeakHashSet[String]()
val elements = List("hello", "goodbye")
elements foreach (hs += _)
assert(hs.size == 2)
assert(hs contains "hello")
assert(hs contains "goodbye")
hs.diagnostics.fullyValidate
}
// make sure += works when there are collisions
def checkPlusEqualsCollisions {
val hs = new WeakHashSet[Collider]()
val elements = List("hello", "goodbye") map Collider
elements foreach (hs += _)
assert(hs.size == 2)
assert(hs contains Collider("hello"))
assert(hs contains Collider("goodbye"))
hs.diagnostics.fullyValidate
}
// add a large number of elements to force rehashing and then validate
def checkRehashing {
val size = 200
val hs = new WeakHashSet[String]()
val elements = (0 until size).toList map ("a" + _)
elements foreach (hs += _)
elements foreach {i => assert(hs contains i)}
hs.diagnostics.fullyValidate
}
// make sure rehashing works properly when the set is rehashed
def checkRehashCollisions {
val size = 200
val hs = new WeakHashSet[Collider]()
val elements = (0 until size).toList map {x => Collider("a" + x)}
elements foreach (hs += _)
elements foreach {i => assert(hs contains i)}
hs.diagnostics.fullyValidate
}
// test that unreferenced objects are removed
// not run in an automated environment because gc behavior can't be relied on
def checkRemoveUnreferencedObjects {
val size = 200
val hs = new WeakHashSet[Collider]()
val elements = (0 until size).toList map {x => Collider("a" + x)}
elements foreach (hs += _)
// don't throw the following into a retained collection so gc
// can remove them
for (i <- 0 until size) {
hs += Collider("b" + i)
}
System.gc()
Thread.sleep(1000)
assert(hs.size == 200)
elements foreach {i => assert(hs contains i)}
for (i <- 0 until size) {
assert(!(hs contains Collider("b" + i)))
}
hs.diagnostics.fullyValidate
}
// make sure findOrUpdate returns the originally entered element
def checkFindOrUpdate {
val size = 200
val hs = new WeakHashSet[Collider]()
val elements = (0 until size).toList map {x => Collider("a" + x)}
elements foreach {x => assert(hs findEntryOrUpdate x eq x)}
for (i <- 0 until size) {
// when we do a lookup the result should be the same reference we
// original put in
assert(hs findEntryOrUpdate(Collider("a" + i)) eq elements(i))
}
hs.diagnostics.fullyValidate
}
// check -= functionality
def checkMinusEquals {
val hs = new WeakHashSet[String]()
val elements = List("hello", "goodbye")
elements foreach (hs += _)
hs -= "goodbye"
assert(hs.size == 1)
assert(hs contains "hello")
assert(!(hs contains "goodbye"))
hs.diagnostics.fullyValidate
}
// check -= when there are collisions
def checkMinusEqualsCollisions {
val hs = new WeakHashSet[Collider]
val elements = List(Collider("hello"), Collider("goodbye"))
elements foreach (hs += _)
hs -= Collider("goodbye")
assert(hs.size == 1)
assert(hs contains Collider("hello"))
assert(!(hs contains Collider("goodbye")))
hs -= Collider("hello")
assert(hs.size == 0)
assert(!(hs contains Collider("hello")))
hs.diagnostics.fullyValidate
}
// check that the clear method actually cleans everything
def checkClear {
val size = 200
val hs = new WeakHashSet[String]()
val elements = (0 until size).toList map ("a" + _)
elements foreach (hs += _)
hs.clear()
assert(hs.size == 0)
elements foreach {i => assert(!(hs contains i))}
hs.diagnostics.fullyValidate
}
// check that the iterator covers all the contents
def checkIterator {
val hs = new WeakHashSet[String]()
val elements = (0 until 20).toList map ("a" + _)
elements foreach (hs += _)
assert(elements.iterator.toList.sorted == elements.sorted)
hs.diagnostics.fullyValidate
}
// check that the iterator covers all the contents even when there is a collision
def checkIteratorCollisions {
val hs = new WeakHashSet[Collider]
val elements = (0 until 20).toList map {x => Collider("a" + x)}
elements foreach (hs += _)
assert(elements.iterator.toList.sorted == elements.sorted)
hs.diagnostics.fullyValidate
}
}
}
| felixmulder/scala | test/files/run/WeakHashSetTest.scala | Scala | bsd-3-clause | 5,805 |
/*
* Copyright 2010 Twitter, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.logging
import java.io.{File, FileOutputStream, FilenameFilter, OutputStream}
import java.nio.charset.Charset
import java.text.SimpleDateFormat
import java.util.{Calendar, Date, logging => javalog}
import com.twitter.util.{HandleSignal, Return, StorageUnit, Time, Try}
sealed abstract class Policy
object Policy {
case object Never extends Policy
case object Hourly extends Policy
case object Daily extends Policy
case class Weekly(dayOfWeek: Int) extends Policy
case object SigHup extends Policy
case class MaxSize(size: StorageUnit) extends Policy
private[this] val singletonPolicyNames: Map[String, Policy] =
Map("never" -> Never, "hourly" -> Hourly, "daily" -> Daily, "sighup" -> SigHup)
// Regex object that matches "Weekly(n)" and extracts the `dayOfWeek` number.
private[this] val weeklyRegex = """(?i)weekly\\(([1-7]+)\\)""".r
/**
* Parse a string into a Policy object. Parsing rules are as follows:
*
* - Case-insensitive names of singleton Policy objects (e.g. Never, Hourly,
* Daily) are parsed into their corresponding objects.
* - "Weekly(n)" is parsed into `Weekly` objects with `n` as the day-of-week
* integer.
* - util-style data size strings (e.g. 3.megabytes, 1.gigabyte) are
* parsed into `StorageUnit` objects and used to produce `MaxSize` policies.
* See `StorageUnit.parse(String)` for more details.
*/
def parse(s: String): Policy =
(s, singletonPolicyNames.get(s.toLowerCase), Try(StorageUnit.parse(s.toLowerCase))) match {
case (weeklyRegex(dayOfWeek), _, _) => Weekly(dayOfWeek.toInt)
case (_, Some(singleton), _) => singleton
case (_, _, Return(storageUnit)) => MaxSize(storageUnit)
case _ => throw new Exception("Invalid log roll policy: " + s)
}
}
object FileHandler {
val UTF8 = Charset.forName("UTF-8")
/**
* Generates a HandlerFactory that returns a FileHandler
*
* @param filename
* Filename to log to.
*
* @param rollPolicy
* When to roll the logfile.
*
* @param append
* Append to an existing logfile, or truncate it?
*
* @param rotateCount
* How many rotated logfiles to keep around, maximum. -1 means to keep them all.
*/
def apply(
filename: String,
rollPolicy: Policy = Policy.Never,
append: Boolean = true,
rotateCount: Int = -1,
formatter: Formatter = new Formatter(),
level: Option[Level] = None
) = () => new FileHandler(filename, rollPolicy, append, rotateCount, formatter, level)
}
/**
* A log handler that writes log entries into a file, and rolls this file
* at a requested interval (hourly, daily, or weekly).
*/
class FileHandler(
path: String,
rollPolicy: Policy,
val append: Boolean,
rotateCount: Int,
formatter: Formatter,
level: Option[Level])
extends Handler(formatter, level) {
// This converts relative paths to absolute paths, as expected
val (filename, name) = {
val f = new File(path)
(f.getAbsolutePath, f.getName)
}
val (filenamePrefix, filenameSuffix) = {
val n = filename.lastIndexOf('.')
if (n > 0) {
(filename.substring(0, n), filename.substring(n))
} else {
(filename, "")
}
}
// Thread-safety is guarded by synchronized on this
private var stream: OutputStream = null
@volatile private var openTime: Long = 0
// Thread-safety is guarded by synchronized on this
private var nextRollTime: Option[Long] = None
// Thread-safety is guarded by synchronized on this
private var bytesWrittenToFile: Long = 0
private val maxFileSize: Option[StorageUnit] = rollPolicy match {
case Policy.MaxSize(size) => Some(size)
case _ => None
}
openLog()
// If nextRollTime.isDefined by openLog(), then it will always remain isDefined.
// This allows us to avoid volatile reads in the publish method.
private val examineRollTime = nextRollTime.isDefined
if (rollPolicy == Policy.SigHup) {
HandleSignal("HUP") { signal =>
val oldStream = stream
synchronized {
stream = openStream()
}
try {
oldStream.close()
} catch {
case e: Throwable => handleThrowable(e)
}
}
}
def flush() {
synchronized {
stream.flush()
}
}
def close() {
synchronized {
flush()
try {
stream.close()
} catch {
case e: Throwable => handleThrowable(e)
}
}
}
private def openStream(): OutputStream = {
val dir = new File(filename).getParentFile
if ((dir ne null) && !dir.exists) dir.mkdirs
new FileOutputStream(filename, append)
}
private def openLog() {
synchronized {
stream = openStream()
openTime = Time.now.inMilliseconds
nextRollTime = computeNextRollTime(openTime)
bytesWrittenToFile = 0
}
}
/**
* Compute the suffix for a rolled logfile, based on the roll policy.
*/
def timeSuffix(date: Date) = {
val dateFormat = rollPolicy match {
case Policy.Never => new SimpleDateFormat("yyyy")
case Policy.SigHup => new SimpleDateFormat("yyyy")
case Policy.Hourly => new SimpleDateFormat("yyyyMMdd-HH")
case Policy.Daily => new SimpleDateFormat("yyyyMMdd")
case Policy.Weekly(_) => new SimpleDateFormat("yyyyMMdd")
case Policy.MaxSize(_) => new SimpleDateFormat("yyyyMMdd-HHmmss")
}
dateFormat.setCalendar(formatter.calendar)
dateFormat.format(date)
}
/**
* Return the time (in absolute milliseconds) of the next desired
* logfile roll.
*/
def computeNextRollTime(now: Long): Option[Long] = {
lazy val next = {
val n = formatter.calendar.clone.asInstanceOf[Calendar]
n.setTimeInMillis(now)
n.set(Calendar.MILLISECOND, 0)
n.set(Calendar.SECOND, 0)
n.set(Calendar.MINUTE, 0)
n
}
val rv = rollPolicy match {
case Policy.MaxSize(_) | Policy.Never | Policy.SigHup => None
case Policy.Hourly => {
next.add(Calendar.HOUR_OF_DAY, 1)
Some(next)
}
case Policy.Daily => {
next.set(Calendar.HOUR_OF_DAY, 0)
next.add(Calendar.DAY_OF_MONTH, 1)
Some(next)
}
case Policy.Weekly(weekday) => {
next.set(Calendar.HOUR_OF_DAY, 0)
do {
next.add(Calendar.DAY_OF_MONTH, 1)
} while (next.get(Calendar.DAY_OF_WEEK) != weekday)
Some(next)
}
}
rv map { _.getTimeInMillis }
}
/**
* Delete files when "too many" have accumulated.
* This duplicates logrotate's "rotate count" option.
*/
private def removeOldFiles() {
if (rotateCount >= 0) {
// collect files which are not `filename`, but which share the prefix/suffix
val prefixName = new File(filenamePrefix).getName
val rotatedFiles =
new File(filename).getParentFile().listFiles(
new FilenameFilter {
def accept(f: File, fname: String): Boolean =
fname != name && fname.startsWith(prefixName) && fname.endsWith(filenameSuffix)
}
).sortBy(_.getName)
val toDeleteCount = math.max(0, rotatedFiles.length - rotateCount)
rotatedFiles.take(toDeleteCount).foreach(_.delete())
}
}
def roll() = synchronized {
stream.close()
val newFilename = filenamePrefix + "-" + timeSuffix(new Date(openTime)) + filenameSuffix
new File(filename).renameTo(new File(newFilename))
openLog()
removeOldFiles()
}
def publish(record: javalog.LogRecord) {
try {
val formattedLine = getFormatter.format(record)
val formattedBytes = formattedLine.getBytes(FileHandler.UTF8)
val lineSizeBytes = formattedBytes.length
if (examineRollTime) {
// Only allow a single thread at a time to do a roll
synchronized {
nextRollTime foreach { time =>
if (Time.now.inMilliseconds > time) roll()
}
}
}
maxFileSize foreach { size =>
synchronized {
if (bytesWrittenToFile + lineSizeBytes > size.bytes) roll()
}
}
synchronized {
stream.write(formattedBytes)
stream.flush()
bytesWrittenToFile += lineSizeBytes
}
} catch {
case e: Throwable => handleThrowable(e)
}
}
private def handleThrowable(e: Throwable) {
System.err.println(Formatter.formatStackTrace(e, 30).mkString("\\n"))
}
}
| luciferous/util | util-logging/src/main/scala/com/twitter/logging/FileHandler.scala | Scala | apache-2.0 | 8,977 |
package org.lnu.is.integration.cases.person.paper
import java.util.UUID
import scala.concurrent.duration.DurationInt
import io.gatling.core.Predef.checkBuilder2Check
import io.gatling.core.Predef.findCheckBuilder2ValidatorCheckBuilder
import io.gatling.core.Predef.exec
import io.gatling.core.Predef.stringToExpression
import io.gatling.core.Predef.validatorCheckBuilder2CheckBuilder
import io.gatling.core.Predef.value2Expression
import io.gatling.core.Predef.value2Success
import io.gatling.http.Predef.ELFileBody
import io.gatling.http.Predef.http
import io.gatling.http.Predef.jsonPath
import io.gatling.http.Predef.status
import org.lnu.is.integration.config.ComplexTest
import io.gatling.core.structure.ChainBuilder
import org.lnu.is.integration.config.helper.FirstName
import org.lnu.is.integration.config.helper.FatherName
import org.lnu.is.integration.config.helper.LastName
import org.lnu.is.integration.config.helper.Photo
import org.lnu.is.integration.config.helper.DocSeries
import org.lnu.is.integration.config.helper.BirthPlace
import java.util.Random
object PersonPaperIntegrationTest extends ComplexTest {
val testCase = exec(init)
.exec(before)
.exec(http("Post Person Paper")
.post("/persons/${personId}/papers")
.basicAuth("admin", "nimda")
.header("Content-Type", "application/json")
.body(ELFileBody("data/person/paper/post.json"))
.asJSON
.check(status.is(201))
.check(jsonPath("$.id").find.saveAs("personPaperId")))
.exec(http("Get Person Paper")
.get("/persons/${personId}/papers/${personPaperId}")
.basicAuth("admin", "nimda")
.check(status.is(200)))
.exec(http("Update Person Paper")
.put("/persons/${personId}/papers/${personPaperId}")
.basicAuth("admin", "nimda")
.header("Content-Type", "application/json")
.body(ELFileBody("data/person/paper/put.json"))
.asJSON
.check(status.is(200)))
.exec(http("Get Person Paper")
.get("/persons/${personId}/papers/${personPaperId}")
.basicAuth("admin", "nimda")
.check(status.is(200))
.check(jsonPath("$.mark").find.is("4.8")))
.exec(http("Delete Person Paper")
.delete("/persons/${personId}/papers/${personPaperId}")
.basicAuth("admin", "nimda")
.check(status.is(204)))
.exec(after)
def after(): ChainBuilder = {
exec(http("Delete Person")
.delete("/persons/${personId}")
.basicAuth("admin", "nimda")
.check(status.is(204)))
}
def before(): ChainBuilder = {
exec(http("Post Person")
.post("/persons")
.basicAuth("admin", "nimda")
.header("Content-Type", "application/json")
.body(ELFileBody("data/person/post.json"))
.asJSON
.check(status.is(201))
.check(jsonPath("$.id").find.saveAs("personId")))
}
def init(): ChainBuilder = {
exec(session => {
session
.set("person_idnum", UUID.randomUUID())
.set("person_firstname", FirstName.generate())
.set("person_fathername", FatherName.generate())
.set("person_lastname", LastName.generate())
.set("person_photo", Photo.generate())
.set("person_birthplace", BirthPlace.generate())
.set("person_docnum", new Random().nextLong())
.set("person_docseries", DocSeries.generate())
.set("docSeries", UUID.randomUUID())
.set("docNum", UUID.randomUUID())
.set("docDate", "2010-01-01")
.set("docIssued", UUID.randomUUID())
.set("docPin", UUID.randomUUID())
})
}
} | ifnul/ums-backend | is-lnu-integration/src/test/scala/org/lnu/is/integration/cases/person/paper/PersonPaperIntegrationTest.scala | Scala | apache-2.0 | 3,595 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark.load
import org.apache.spark.Accumulator
import org.apache.carbondata.common.logging.LogServiceFactory
import org.apache.carbondata.processing.loading.model.CarbonLoadModel
object GlobalSortHelper {
private val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName)
/**
*
* @param loadModel Carbon load model instance
* @param badRecordsAccum Accumulator to maintain the load state if 0 then success id !0 then
* partial successfull
* @param hasBadRecord if <code>true<code> then load bad records vice versa.
*/
def badRecordsLogger(loadModel: CarbonLoadModel,
badRecordsAccum: Accumulator[Int], hasBadRecord: Boolean): Unit = {
if (hasBadRecord) {
LOGGER.error("Data Load is partially success for table " + loadModel.getTableName)
badRecordsAccum.add(1)
} else {
LOGGER.info("Data loading is successful for table " + loadModel.getTableName)
}
}
}
| manishgupta88/carbondata | integration/spark-common/src/main/scala/org/apache/carbondata/spark/load/GlobalSortHelper.scala | Scala | apache-2.0 | 1,802 |
package opencl.ir
import lift.arithmetic.ArithExpr
import ir.ast._
package object pattern {
def Tile(size: ArithExpr): Lambda = Tile(size, size)
def Tile(x: ArithExpr, y: ArithExpr) =
Map(Map(Transpose()) o Split(y) o Transpose()) o Split(x)
def Untile2D() = Join() o Map(Map(Join()) o TransposeW())
def Untile3D() = Map(Map(Join())) o Map(Join()) o Join() o Map(Map(Map(TransposeW()))) o Map(TransposeW()) o Map(Map(TransposeW()))
def ReorderStride(s: ArithExpr) = Gather(reorderStride(s))
}
| lift-project/lift | src/main/opencl/ir/pattern/package.scala | Scala | mit | 515 |
import language.higherKinds
trait Monad[M[_]] extends Applic[M] {
def flatMap[A,B](m: M[A])(g: A => M[B]): M[B]
override def map[A,B](functor: M[A])(g: A => B): M[B] =
flatMap(functor)(a => pure(g(a)))
override def ap[A,B](applic: M[A])(g: M[A => B]): M[B] =
flatMap(applic)(a => flatMap(g)(h => pure(h(a))))
// flatMap(g)(f => flatMap(a)(x => pure(f(x))))
}
object Monad {
def flatMap[M[_]:Monad,A,B](m: M[A])(g: A => M[B]): M[B] =
implicitly[Monad[M]].flatMap(m)(g)
implicit object MonadOption extends Monad[Option] {
override def pure[A](a: A): Option[A] = Some(a)
override def flatMap[A,B](m: Option[A])(g: A => Option[B]): Option[B] =
m match {
case Some(a) => g(a)
case None => None
}
}
implicit object MonadList extends Monad[List] {
override def pure[A](a: A): List[A] = List(a)
override def flatMap[A,B](as: List[A])(g: A => List[B]): List[B] =
as match {
case x :: xs => g(x) ++ flatMap(xs)(g)
case _ => Nil
}
}
def join[M[_],A,B](mm: M[M[A]])(implicit m: Monad[M]): M[A] =
m.flatMap(mm)(identity)
def foldM[M[_],A,B](list: List[A])(v: B)(g: (B,A) => M[B])(implicit m: Monad[M]): M[B] = list match {
case Nil => m.pure(v)
case x :: xs => m.flatMap(g(v,x)){ b => foldM(xs)(b)(g) }
}
}
| grzegorzbalcerek/scala-exercises | Monad/solutionMonad.scala | Scala | bsd-2-clause | 1,315 |
/*
* Copyright 2014 Kevin Herron
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.digitalpetri.modbus
import com.digitalpetri.modbus.FunctionCodes.UnsupportedFunction
/**
* A ModbusPdu representing a request or response not supported by the library at this time.
* @param functionCode The function code of the unsupported request.
*/
case class UnsupportedPdu(functionCode: UnsupportedFunction) extends ModbusPdu
| digitalpetri/scala-modbus-tcp | modbus-core/src/main/scala/com/digitalpetri/modbus/UnsupportedPdu.scala | Scala | apache-2.0 | 944 |
/*
* Copyright 2014 http4s.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.http4s.blaze.http.parser
import java.nio.ByteBuffer
import java.nio.charset.StandardCharsets
import org.http4s.blaze.http.parser.BaseExceptions.{BadMessage, InvalidState}
import org.http4s.blaze.http.parser.BodyAndHeaderParser.EndOfContent
import org.http4s.blaze.testkit.BlazeTestSuite
import scala.collection.mutable.ListBuffer
class ServerParserSuite extends BlazeTestSuite {
private implicit def strToBuffer(str: String): ByteBuffer =
ByteBuffer.wrap(str.getBytes(StandardCharsets.ISO_8859_1))
private class Parser(maxReq: Int = 1034, maxHeader: Int = 1024)
extends Http1ServerParser(maxReq, maxHeader, 1) {
val sb = new StringBuilder
val h = new ListBuffer[(String, String)]
def parseLine(s: ByteBuffer) = parseRequestLine(s)
def parseheaders(s: ByteBuffer): Boolean = parseHeaders(s)
def parsecontent(s: ByteBuffer): ByteBuffer = {
val c = super.parseContent(s)
if (c != null) {
c.mark()
while (c.hasRemaining) sb.append(c.get().toChar)
c.reset()
}
c
}
var minorv = -1
def submitRequestLine(
methodString: String,
uri: String,
scheme: String,
majorversion: Int,
minorversion: Int) = {
// println(s"$methodString, $uri, $scheme/$majorversion.$minorversion")
minorv = minorversion
false
}
def headerComplete(name: String, value: String) = {
// println(s"Found header: '$name': '$value'")
h += ((name, value))
false
}
}
private def toChunk(str: String): String = s"${Integer.toHexString(str.length)}\\r\\n$str\\r\\n"
private val l_headers = ("From", "someuser@jmarshall.com ") ::
("HOST", "www.foo.com") ::
("User-Agent", "HTTPTool/1.0 ") ::
("Some-Header", "") :: Nil
private val request = "POST /enlighten/calais.asmx HTTP/1.1\\r\\n"
private val host = "HOST: www.foo.com\\r\\n"
private def buildHeaderString(hs: Seq[(String, String)]): String =
hs.foldLeft(new StringBuilder) { (sb, h) =>
sb.append(h._1)
if (h._2.length > 0) sb.append(": " + h._2)
sb.append("\\r\\n")
}.append("\\r\\n")
.result()
private val headers = buildHeaderString(l_headers)
private val body = "hello world"
private val lengthh = s"Content-Length: ${body.length}\\r\\n"
private val chunked = "Transfer-Encoding: chunked\\r\\n"
private val mockFiniteLength = request + host + lengthh + headers + body
private val mockChunked =
request + host + chunked + headers + toChunk(body) + toChunk(
body + " again!") + "0 \\r\\n" + "\\r\\n"
test("An Http1ServerParser should fail on non-ascii char in request line") {
val p = new Parser()
val line = "POST /enlighten/calais.asmx HTTP/1.1\\r\\n"
val ch = "£"
for (i <- 0 until line.length) yield {
p.reset()
val (h, t) = line.splitAt(i)
val l2 = h + ch + t
intercept[BadMessage](p.parseLine(l2))
}
}
test("An Http1ServerParser should parse the request line for HTTP") {
assert(new Parser().parseLine("POST /enlighten/calais.asmx HTTP/1.1\\r\\n"))
assert(new Parser().parseLine("POST /enlighten/calais.asmx HTTP/1.1\\n"))
}
test("An Http1ServerParser should parse the request line for HTTP in segments") {
val p = new Parser()
assertEquals(p.parseLine("POST /enlighten/cala"), false)
assert(p.parseLine("is.asmx HTTP/1.1\\r\\n"))
}
test("An Http1ServerParser should parse the request line for HTTPS") {
val p = new Parser()
assert(p.parseLine("POST /enlighten/calais.asmx HTTPS/1.1\\r\\n"))
assertEquals(p.minorv, 1)
}
test("An Http1ServerParser should give bad request on invalid request line") {
val p = new Parser()
intercept[BadMessage](p.parseLine("POST /enlighten/calais.asmx KTTPS/1.1\\r\\n"))
p.reset()
intercept[BadMessage](p.parseLine("POST /enlighten/calais.asmx HKTTPS/1.1\\r\\n"))
p.reset()
intercept[BadMessage](p.parseLine("POST=/enlighten/calais.asmx HKTTPS/1.1\\r\\n"))
}
test("An Http1ServerParser should give bad request on negative content-length") {
val p = new Parser()
val line = "GET /enlighten/calais.asmx HTTPS/1.0\\r\\n"
assert(p.parseLine(line))
intercept[BadMessage](p.parseheaders(buildHeaderString(Seq("content-length" -> "-1"))))
}
test("An Http1ServerParser should accept multiple same length content-length headers") {
val p = new Parser()
val line = "GET /enlighten/calais.asmx HTTPS/1.0\\r\\n"
assert(p.parseLine(line))
assert(p.parseheaders(buildHeaderString(Seq("content-length" -> "1", "content-length" -> "1"))))
}
test(
"An Http1ServerParser should give bad request on multiple different content-length headers") {
val p = new Parser()
val line = "GET /enlighten/calais.asmx HTTPS/1.0\\r\\n"
assert(p.parseLine(line))
intercept[BadMessage](
p.parseheaders(buildHeaderString(Seq("content-length" -> "1", "content-length" -> "2"))))
}
test("An Http1ServerParser should match Http1.0 requests") {
val p = new Parser()
p.parseLine("POST /enlighten/calais.asmx HTTPS/1.0\\r\\n")
assertEquals(p.minorv, 0)
}
test("An Http1ServerParser should throw an invalid state if the request line is already parsed") {
val p = new Parser()
val line = "POST /enlighten/calais.asmx HTTPS/1.0\\r\\n"
p.parseLine(line)
intercept[InvalidState](p.parseLine(line))
}
test("An Http1ServerParser should parse headers") {
val p = new Parser()
val line = "GET /enlighten/calais.asmx HTTPS/1.0\\r\\n"
p.parseLine(line)
assert(p.parseheaders(headers))
assertEquals(p.getContentType, EndOfContent.END)
assertEquals(p.h.result(), l_headers.map { case (a, b) => (a.trim, b.trim) })
}
test("An Http1ServerParser should fail on non-ascii char in header name") {
val p = new Parser()
val ch = "£"
val k = "Content-Length"
for (i <- 0 until k.length) yield {
p.reset()
val (h, t) = k.splitAt(i)
val hs = (h + ch + t, "0") :: Nil
intercept[BadMessage](p.parseheaders(buildHeaderString(hs)))
}
}
test("An Http1ServerParser should allow non-ascii char in header value") {
val p = new Parser()
val ch = "£"
val k = "Foo-Header"
val v = "this_is_some_header_value"
for (i <- 0 until v.length) yield {
p.reset()
val (h, t) = v.splitAt(i)
val hs = (k, h + ch + t) :: Nil
assert(p.parseheaders(buildHeaderString(hs)))
}
}
test("An Http1ServerParser should accept headers without values") {
val hsStr =
"If-Modified-Since\\r\\nIf-Modified-Since:\\r\\nIf-Modified-Since: \\r\\nIf-Modified-Since:\\t\\r\\n\\r\\n"
val p = new Parser()
assert(p.parseheaders(hsStr))
assertEquals(
p.getContentType,
EndOfContent.END
) // since the headers didn't indicate any content
assertEquals(p.h.result(), List.fill(4)(("If-Modified-Since", "")))
}
test("An Http1ServerParser should need input on partial headers") {
val p = new Parser()
assertEquals(p.parseHeaders(headers.substring(0, 20)), false)
assert(p.parseheaders(headers.substring(20)))
assertEquals(p.h.result(), l_headers.map { case (a, b) => (a.trim, b.trim) })
}
test("An Http1ServerParser should parse a full request") {
val p = new Parser()
val b = strToBuffer(mockFiniteLength)
assert(p.parseLine(b))
assert(p.parseheaders(b))
assertEquals(p.sb.result(), "")
assert(p.parsecontent(b) != null)
assertEquals(p.sb.result(), body)
assert(p.contentComplete(), true)
p.reset()
assertEquals(p.requestLineComplete(), false)
}
test("An Http1ServerParser should parse a full request in fragments") {
val p = new Parser()
val b = strToBuffer(mockFiniteLength)
b.limit()
b.limit(1)
while (!p.requestLineComplete() && !p.parseLine(b))
b.limit(b.limit() + 1)
while (!p.headersComplete() && !p.parseheaders(b))
b.limit(b.limit() + 1)
while (!p.contentComplete())
if (null == p.parsecontent(b)) b.limit(b.limit() + 1)
assertEquals(p.sb.result(), body)
p.reset()
assertEquals(p.requestLineComplete(), false)
}
test("An Http1ServerParser should parse a chunked request") {
val p = new Parser()
val b = strToBuffer(mockChunked)
assert(p.parseLine(b))
assert(p.parseheaders(b))
assertEquals(p.sb.result(), "")
assert(p.parsecontent(b) != null)
assert(p.parsecontent(b) != null)
// two real messages
assertEquals(p.parsecontent(b).remaining(), 0)
assertEquals(p.sb.result(), body + body + " again!")
p.reset()
}
test("An Http1ServerParser should parse a chunked request with trailers") {
val p = new Parser()
val req = mockChunked.substring(0, mockChunked.length - 2) + "Foo\\r\\n\\r\\n"
val b = strToBuffer(req)
// println(mockChunked)
assert(p.parseLine(b))
assert(p.parseheaders(b))
assertEquals(p.sb.result(), "")
p.h.clear()
assert(p.parsecontent(b) != null)
assert(p.parsecontent(b) != null)
// two real messages
assertEquals(p.parsecontent(b).remaining(), 0)
assertEquals(p.h.result(), ("Foo", "") :: Nil)
assertEquals(p.sb.result(), body + body + " again!")
p.reset()
}
test("An Http1ServerParser should give parse a chunked request in fragments") {
val p = new Parser()
val b = strToBuffer(mockChunked)
val blim = b.limit()
// Do it one char at a time /////////////////////////////////////////
b.limit(1)
b.position(0)
p.sb.clear()
while (!p.requestLineComplete() && !p.parseLine(b))
b.limit(b.limit() + 1)
while (!p.headersComplete() && !p.parseheaders(b))
b.limit(b.limit() + 1)
assertEquals(p.contentComplete(), false)
while (!p.contentComplete()) {
p.parsecontent(b)
if (b.limit() < blim) b.limit(b.limit() + 1)
}
assert(p.contentComplete(), true)
assertEquals(p.sb.result(), body + body + " again!")
}
test("An Http1ServerParser should give parse a chunked request in fragments with a trailer") {
val p = new Parser()
val req = mockChunked.substring(0, mockChunked.length - 2) + "Foo\\r\\n\\r\\n"
val b = strToBuffer(req)
val blim = b.limit()
// Do it one char at a time /////////////////////////////////////////
b.limit(1)
b.position(0)
p.sb.clear()
while (!p.requestLineComplete() && !p.parseLine(b))
b.limit(b.limit() + 1)
while (!p.headersComplete() && !p.parseheaders(b))
b.limit(b.limit() + 1)
p.h.clear()
assertEquals(p.contentComplete(), false)
while (!p.contentComplete()) {
p.parsecontent(b)
if (b.limit() < blim) b.limit(b.limit() + 1)
}
assertEquals(p.h.result(), ("Foo", "") :: Nil)
assert(p.contentComplete())
assertEquals(p.sb.result(), body + body + " again!")
}
test("An Http1ServerParser should throw an error if the headers are too long") {
val header = "From: someuser@jmarshall.com \\r\\n" +
"HOST: www.foo.com\\r\\n" +
"User-Agent: HTTPTool/1.0 \\r\\n" +
"Some-Header\\r\\n"
val p = new Parser(maxHeader = header.length - 1)
intercept[BadMessage](p.parseheaders(header))
}
test("An Http1ServerParser should throw an error if the request line is too long") {
val p = new Parser(maxReq = request.length - 1)
intercept[BadMessage](p.parseLine(request))
}
}
| http4s/blaze | http/src/test/scala/org/http4s/blaze/http/parser/ServerParserSuite.scala | Scala | apache-2.0 | 11,989 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import org.scalatest.exceptions.TestFailedException
import org.scalactic.Equality
import org.scalactic.Explicitly
import SharedHelpers.thisLineNumber
import Matchers._
// Calling this ShouldContainElementNewSpec so that it is easy to
// keep track of the new tests that we'll need to port over to
// inspector shorthands.
class ShouldContainElementNewSpec extends FunSpec with Explicitly {
// Checking for a specific size
describe("The 'contain (<value>)' syntax") {
it("should allow any type to be passed in") {
Vector(1, "2") should contain ("2")
Vector(1, "2") should contain (1)
}
it("should use an Equality of the element type of the left-hand \\"holder\\" on a GenTraversable") {
Vector(2, 2) should contain (2)
val e1 = intercept[TestFailedException] {
Vector(2, 2) should not contain (2)
}
e1.failedCodeFileName should be (Some("ShouldContainElementNewSpec.scala"))
e1.failedCodeLineNumber should be (Some(thisLineNumber - 4))
implicit val e = new Equality[Int] {
def areEqual(a: Int, b: Any): Boolean = a != b
}
val e2 = intercept[TestFailedException] {
Vector(2, 2) should contain (2)
}
Vector(2, 2) should not contain (2)
e2.failedCodeFileName should be (Some("ShouldContainElementNewSpec.scala"))
e2.failedCodeLineNumber should be (Some(thisLineNumber - 5))
(Vector(2, 2) should contain (2)) (decided by defaultEquality[Int])
val e3 = intercept[TestFailedException] {
(Vector(2, 2) should not contain (2)) (decided by defaultEquality[Int])
}
e3.failedCodeFileName should be (Some("ShouldContainElementNewSpec.scala"))
e3.failedCodeLineNumber should be (Some(thisLineNumber - 4))
}
it("should use an Equality of the element type of the left-hand \\"holder\\" on a String") {
"22" should contain ('2')
val e1 = intercept[TestFailedException] {
"22" should not contain ('2')
}
e1.failedCodeFileName should be (Some("ShouldContainElementNewSpec.scala"))
e1.failedCodeLineNumber should be (Some(thisLineNumber - 4))
implicit val e = new Equality[Char] {
def areEqual(a: Char, b: Any): Boolean = a != b
}
val e2 = intercept[TestFailedException] {
"22" should contain ('2')
}
"22" should not contain ('2')
e2.failedCodeFileName should be (Some("ShouldContainElementNewSpec.scala"))
e2.failedCodeLineNumber should be (Some(thisLineNumber - 5))
("22" should contain ('2')) (decided by defaultEquality[Char])
val e3 = intercept[TestFailedException] {
("22" should not contain ('2')) (decided by defaultEquality[Char])
}
e3.failedCodeFileName should be (Some("ShouldContainElementNewSpec.scala"))
e3.failedCodeLineNumber should be (Some(thisLineNumber - 4))
}
it("should use an Equality of the element type of the left-hand \\"holder\\" on an Array") {
Array(2, 2) should contain (2)
val e1 = intercept[TestFailedException] {
Array(2, 2) should not contain (2)
}
e1.failedCodeFileName should be (Some("ShouldContainElementNewSpec.scala"))
e1.failedCodeLineNumber should be (Some(thisLineNumber - 4))
implicit val e = new Equality[Int] {
def areEqual(a: Int, b: Any): Boolean = a != b
}
val e2 = intercept[TestFailedException] {
Array(2, 2) should contain (2)
}
Array(2, 2) should not contain (2)
e2.failedCodeFileName should be (Some("ShouldContainElementNewSpec.scala"))
e2.failedCodeLineNumber should be (Some(thisLineNumber - 5))
(Array(2, 2) should contain (2)) (decided by defaultEquality[Int])
val e3 = intercept[TestFailedException] {
(Array(2, 2) should not contain (2)) (decided by defaultEquality[Int])
}
e3.failedCodeFileName should be (Some("ShouldContainElementNewSpec.scala"))
e3.failedCodeLineNumber should be (Some(thisLineNumber - 4))
}
// SKIP-SCALATESTJS,NATIVE-START
it("should use an Equality of the element type of the left-hand \\"holder\\" on a Java Collection") {
val javaSet: java.util.Set[Int] = new java.util.HashSet
javaSet.add(2)
javaSet should contain (2)
val e1 = intercept[TestFailedException] {
javaSet should not contain (2)
}
e1.failedCodeFileName should be (Some("ShouldContainElementNewSpec.scala"))
e1.failedCodeLineNumber should be (Some(thisLineNumber - 4))
implicit val e = new Equality[Int] {
def areEqual(a: Int, b: Any): Boolean = a != b
}
val e2 = intercept[TestFailedException] {
javaSet should contain (2)
}
javaSet should not contain (2)
e2.failedCodeFileName should be (Some("ShouldContainElementNewSpec.scala"))
e2.failedCodeLineNumber should be (Some(thisLineNumber - 5))
(javaSet should contain (2)) (decided by defaultEquality[Int])
val e3 = intercept[TestFailedException] {
(javaSet should not contain (2)) (decided by defaultEquality[Int])
}
e3.failedCodeFileName should be (Some("ShouldContainElementNewSpec.scala"))
e3.failedCodeLineNumber should be (Some(thisLineNumber - 4))
}
// SKIP-SCALATESTJS,NATIVE-END
}
}
| dotty-staging/scalatest | scalatest-test/src/test/scala/org/scalatest/ShouldContainElementNewSpec.scala | Scala | apache-2.0 | 5,970 |
package uk.co.appministry.akka.zk
import akka.actor.SupervisorStrategy.Stop
import akka.actor.{Actor, OneForOneStrategy, Props, SupervisorStrategy}
import akka.testkit.TestActorRef
import org.scalatest.time.{Second, Seconds, Span}
import scala.concurrent.duration._
class ConnectionLossTest extends TestBase {
override implicit val patienceConfig = PatienceConfig(timeout = scaled(Span(30, Seconds)), interval = scaled(Span(1, Second)))
override def afterAll = {}
"Akka ZK" must {
"gracefully handle connection loss when ZooKeeper connection lost" in {
val connectRequest = ZkRequestProtocol.Connect(
zookeeper.getConnectString,
sessionTimeout = 10 seconds,
connectionAttempts = 2)
val actor = system.actorOf(Props(new ZkClientActor))
actor ! connectRequest
expectMsgPF(defaultConnectedMsgWait) { case ZkResponseProtocol.Connected(connectRequest, _) => () }
zookeeper.stop()
eventually {
expectMsg(ZkResponseProtocol.Dead(connectRequest))
}
}
}
}
class NoZooKeeperTest extends TestBase {
override def beforeAll = {
super.beforeAll
zookeeper.stop()
}
override def afterAll = {}
"Akka ZK" must {
"stop the actor if failed to connect to ZooKeeper server" in {
@volatile var failedToConnect = false
val watcher = TestActorRef(new Actor {
override def supervisorStrategy: SupervisorStrategy = OneForOneStrategy() {
case f: ZkClientConnectionFailedException =>
failedToConnect = true
Stop
case _ => Stop
}
def receive = {
case "run test" =>
val connectRequest = ZkRequestProtocol.Connect(
sessionTimeout = 1 second,
connectionAttempts = 2)
val actor = this.context.actorOf(Props(new ZkClientActor))
actor ! connectRequest
context.watch(actor)
}
})
watcher ! "run test"
eventually {
failedToConnect shouldBe true
}
}
}
}
| AppMinistry/akka-zk | src/test/scala/uk/co/appministry/akka/zk/SlowTests.scala | Scala | apache-2.0 | 2,047 |
package ch.bsisa.hyperbird.patman.simulations.messages
import ch.bsisa.hyperbird.patman.simulations.model.Bed
import java.util.Date
case class DataSetUpdateRequest(id : String, transferredBeds: List[Bed], fromHospital:String, toHospital:String, fromSchedule:Date) {
} | bsisa/hb-api | app/ch/bsisa/hyperbird/patman/simulations/messages/DataSetUpdateRequest.scala | Scala | gpl-2.0 | 271 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.features.kryo.impl
import java.io.InputStream
import com.esotericsoftware.kryo.io.Input
import org.locationtech.geomesa.features.ScalaSimpleFeature
import org.locationtech.geomesa.features.ScalaSimpleFeature.ImmutableSimpleFeature
import org.locationtech.geomesa.features.kryo.KryoFeatureSerializer
import org.locationtech.geomesa.features.kryo.impl.KryoFeatureDeserialization.getInput
import org.opengis.feature.simple.SimpleFeature
object ActiveDeserialization {
/**
* Creates mutable features
*/
trait MutableActiveDeserialization extends ActiveDeserialization {
override protected def createFeature(id: String,
attributes: Array[AnyRef],
userData: java.util.Map[AnyRef, AnyRef]): SimpleFeature = {
new ScalaSimpleFeature(deserializeSft, id, attributes, userData)
}
}
/**
* Creates immutable features
*/
trait ImmutableActiveDeserialization extends ActiveDeserialization {
override protected def createFeature(id: String,
attributes: Array[AnyRef],
userData: java.util.Map[AnyRef, AnyRef]): SimpleFeature = {
new ImmutableSimpleFeature(deserializeSft, id, attributes, userData)
}
}
}
/**
* Fully deserializes the simple feature before returning
*/
trait ActiveDeserialization extends KryoFeatureDeserialization {
protected def createFeature(id: String,
attributes: Array[AnyRef],
userData: java.util.Map[AnyRef, AnyRef]): SimpleFeature
override def deserialize(bytes: Array[Byte]): SimpleFeature = deserialize(bytes, 0, bytes.length)
override def deserialize(bytes: Array[Byte], offset: Int, length: Int): SimpleFeature =
readFeature(getInput(bytes, offset, length))
override def deserialize(in: InputStream): SimpleFeature = readFeature(getInput(in))
private def readFeature(input: Input): SimpleFeature = {
val offset = input.position()
if (input.readInt(true) != KryoFeatureSerializer.VERSION) {
throw new IllegalArgumentException("Can't process features serialized with an older version")
}
// read the start of the offsets - we'll stop reading when we hit this
val limit = offset + input.readInt()
val id = readId(input)
val attributes = Array.ofDim[AnyRef](readers.length)
var i = 0
while (i < readers.length && input.position < limit) {
attributes(i) = readers(i)(input)
i += 1
}
val userData = readUserData(input, skipOffsets = true)
createFeature(id, attributes, userData)
}
}
| jahhulbert-ccri/geomesa | geomesa-features/geomesa-feature-kryo/src/main/scala/org/locationtech/geomesa/features/kryo/impl/ActiveDeserialization.scala | Scala | apache-2.0 | 3,165 |
class Foo {
val x = "hello"
val fun1: Int => Int = n => 0 + n + list.size
val fun2: Int => Int = n => 1 + n + list.size
fun2(5)
List(5, 9).map(n => 2 + n + list.size)
final val list = List(1, 2, 3) // error
List(5, 9).map(n => 3 + n + list.size)
} | som-snytt/dotty | tests/init/neg/function1.scala | Scala | apache-2.0 | 280 |
package marge.la
trait VectorSpace[E] {
def make(s: Iterable[E]): Vector[E]
def make(s: Set[E]): Vector[E]
def make[S](s: Map[E, S])(implicit num: Numeric[S]): Vector[E]
def make[S](s: Iterable[(E, S)])(implicit num: Numeric[S]): Vector[E]
def zero: Vector[E] = make(Seq[E]())
}
object VectorSpace {
def apply[E](implicit m: Manifest[E]) {
}
}
| mikiobraun/marge | src/main/scala/marge/la/VectorSpace.scala | Scala | mit | 365 |
package app
import ui.math.Vec2
import ui.sdf.SdfScene
import ui.sdf.SdfScene._
import ui.shader.builder.Colors
import ui.shader.builder.types.{GlFloatType, GlVec4Type}
import ui.shader.builder.value.{GlFloatVal, GlValue, GlVec2Val, GlVec3Val}
object Scenes {
def main: GlValue[GlVec4Type] = layeredScene
def main3d: GlValue[GlFloatType] = threeAnim1
def threeAnim2: GlValue[GlFloatType] = {
box3d(0.5, 0.5, 0.5, 0.002d)
}
// Repeated circles
def threeAnim3: GlValue[GlFloatType] = {
sphere(0.15, repeatPoint3d(GlVec3Val(1.3, 1.3, 1.3)))
}
// Repeated boxes
def threeAnim1: GlValue[GlFloatType] = {
box3d(0.1, 0.1, 0.1, 0d, repeatPoint3d(GlVec3Val(1.3, 1.3, 1.3)))
}
def layeredScene: GlValue[GlVec4Type] =
layered(
anim3(), Colors.greenBlue,
layered(
anim3(-0.02f), Colors.grey,
layered(
anim4, Colors.bluePurple,
Colors.white,
0.1f
),
0.3f
),
0.6f
)
// Combined
def anim5: GlValue[GlFloatType] = intersect(anim2, anim4)
// Repeated point
def anim4: GlValue[GlFloatType] =
box(
animateFloat(0.1f, 0.02f), animateFloat(0.1f, 0.02f, 3.14f),
repeatPoint(GlVec2Val(animateFloat(0.25f, 0.05f), animateFloat(0.28f, 0.05f, 2.7f)))
)
// Rotate around origin
def anim3(tAdjustment: GlValue[GlFloatType] = GlFloatVal(0f)): GlValue[GlFloatType] =
box(
0.1f, 0.1f,
pointRotateAroundOrigin(
incrementFloat(0.1f, 1.8f, tAdjustment),
0.5f, 0.5f,
pointTranslate(Vec2(-0.5f, -0.5f), pointScale(1f, 1.5f))
)
)
// Union two circles animating differently
def anim2: GlValue[GlFloatType] =
union(
circle(animateFloat(0.2f, 0.1f)),
circle(animateFloat(0.2f, 0.1f, 3.14f), pointTranslate(Vec2(0.4f, 0f)))
)
// Animated cicle subtracted from box
def anim1: GlValue[GlFloatType] =
subtract(
circle(
animateFloat(0.25f, 0.2f),
pointTranslate(Vec2(-0.2f, 0.0f))
),
box(
0.4f, 0.4f,
pointRotate(animateFloat(0.25f, 0.2f))
)
)
}
| gvatn/play-scalajs-webgl-spark | client/src/main/scala/app/Scenes.scala | Scala | mit | 2,207 |
package enumeratum.values
/** @author
* Alessandro Lacava
* @since 2016-04-23
*/
sealed abstract class BsonDrinks(val value: Short, name: String) extends ShortEnumEntry
case object BsonDrinks
extends ShortEnum[BsonDrinks]
with ShortReactiveMongoBsonValueEnum[BsonDrinks] {
case object OrangeJuice extends BsonDrinks(value = 1, name = "oj")
case object AppleJuice extends BsonDrinks(value = 2, name = "aj")
case object Cola extends BsonDrinks(value = 3, name = "cola")
case object Beer extends BsonDrinks(value = 4, name = "beer")
val values = findValues
}
sealed abstract class BsonContentType(val value: Long, name: String) extends LongEnumEntry
case object BsonContentType
extends LongEnum[BsonContentType]
with LongReactiveMongoBsonValueEnum[BsonContentType] {
val values = findValues
case object Text extends BsonContentType(value = 1L, name = "text")
case object Image extends BsonContentType(value = 2L, name = "image")
case object Video extends BsonContentType(value = 3L, name = "video")
case object Audio extends BsonContentType(value = 4L, name = "audio")
}
sealed abstract class BsonLibraryItem(val value: Int, val name: String) extends IntEnumEntry
case object BsonLibraryItem
extends IntEnum[BsonLibraryItem]
with IntReactiveMongoBsonValueEnum[BsonLibraryItem] {
// A good mix of named, unnamed, named + unordered args
case object Book extends BsonLibraryItem(value = 1, name = "book")
case object Movie extends BsonLibraryItem(name = "movie", value = 2)
case object Magazine extends BsonLibraryItem(3, "magazine")
case object CD extends BsonLibraryItem(4, name = "cd")
val values = findValues
}
sealed abstract class BsonOperatingSystem(val value: String) extends StringEnumEntry
case object BsonOperatingSystem
extends StringEnum[BsonOperatingSystem]
with StringReactiveMongoBsonValueEnum[BsonOperatingSystem] {
case object Linux extends BsonOperatingSystem("linux")
case object OSX extends BsonOperatingSystem("osx")
case object Windows extends BsonOperatingSystem("windows")
case object Android extends BsonOperatingSystem("android")
val values = findValues
}
sealed abstract class BsonAlphabet(val value: Char) extends CharEnumEntry
case object BsonAlphabet
extends CharEnum[BsonAlphabet]
with CharReactiveMongoBsonValueEnum[BsonAlphabet] {
case object A extends BsonAlphabet('A')
case object B extends BsonAlphabet('B')
case object C extends BsonAlphabet('C')
case object D extends BsonAlphabet('D')
val values = findValues
}
sealed abstract class BsonBites(val value: Byte) extends ByteEnumEntry
object BsonBites extends ByteEnum[BsonBites] with ByteReactiveMongoBsonValueEnum[BsonBites] {
val values = findValues
case object OneByte extends BsonBites(1)
case object TwoByte extends BsonBites(2)
case object ThreeByte extends BsonBites(3)
case object FourByte extends BsonBites(4)
}
| lloydmeta/enumeratum | enumeratum-reactivemongo-bson/src/test/scala/enumeratum/values/BsonEnums.scala | Scala | mit | 2,981 |
package com.twitter.scrooge.swift_generator
import com.twitter.scrooge.ast.ConstDefinition
import com.twitter.scrooge.ast.EnumType
import com.twitter.scrooge.ast.FieldType
import com.twitter.scrooge.ast.Identifier
import com.twitter.scrooge.ast.ListRHS
import com.twitter.scrooge.ast.MapRHS
import com.twitter.scrooge.ast.SetRHS
import com.twitter.scrooge.java_generator.TypeController
/**
* Helps generate a class that holds all the constants.
*/
class ConstController(
defs: Seq[ConstDefinition],
generator: SwiftGenerator,
ns: Option[Identifier],
val public_interface: Boolean)
extends TypeController("Constants", generator, ns) {
val constants: Seq[Map[String, String]] = defs map { d =>
val size = d.value match {
case map: MapRHS => map.elems.size
case set: SetRHS => set.elems.size
case list: ListRHS => list.elems.size
case _ => 0
}
val camelCasedFieldTypes: FieldType = d.fieldType match {
case e: EnumType =>
val enum = e.`enum`
e.copy(`enum`.copy(values = enum.values.map({ v => v.copy(v.sid.toCamelCase) })))
case _ => d.fieldType
}
Map(
"rendered_value" -> indent(
generator.printConstValue(d.sid.name, camelCasedFieldTypes, d.value, ns, size, true),
2)
)
}
}
| twitter/scrooge | scrooge-generator/src/main/scala/com/twitter/scrooge/swift_generator/ConstController.scala | Scala | apache-2.0 | 1,291 |
package goggles.macros.interpret
import goggles.macros.errors.{GogglesError, InternalError, ErrorAt}
private[goggles] trait Parse[Type, Arg, A] {
self =>
def apply(state: MacroState[Type,Arg]): (Either[ErrorAt[Type],A], MacroState[Type,Arg])
final def map[B](f: A => B): Parse[Type,Arg,B] = {
state0 => {
val (errorOrA, state) = self(state0)
(errorOrA.right.map(f), state)
}
}
final def flatMap[B](f: A => Parse[Type,Arg,B]): Parse[Type,Arg,B] = {
state0 => self(state0) match {
case (Right(a), state) => f(a)(state)
case (Left(err), state) => (Left(err), state)
}
}
final def eval(args: List[Arg]): MacroResult[Type, A] = {
val (errorOrA, macroState) = apply(MacroState.blank(args))
MacroResult(errorOrA, macroState.infos)
}
}
private[goggles] object Parse {
def pure[Type, Arg, A](a: => A): Parse[Type,Arg,A] =
state => (Right(a), state)
def fromOption[Type, Arg, A](opt: Option[A], orElse: => GogglesError[Type]): Parse[Type, Arg, A] = opt match {
case Some(a) => pure(a)
case None => raiseError(orElse)
}
def getMacroState[Type, Arg]: Parse[Type, Arg, MacroState[Type, Arg]] =
state => (Right(state), state)
def getCurrentExprOffset[Type, Arg]: Parse[Type, Arg, Int] =
getMacroState.map(_.currentExprOffset)
def setCurrentExprOffset[Type, Arg](offset: Int): Parse[Type, Arg, Unit] =
state => (Right(()), state.copy(currentExprOffset = offset))
def fromEither[Type, Arg, A](either: Either[ErrorAt[Type], A]): Parse[Type,Arg,A] =
state => (either, state)
def raiseError[Type, Arg, A](e: GogglesError[Type]): Parse[Type, Arg, A] =
state => (Left(e.at(state.currentExprOffset)), state)
def getLastOpticInfo[Type,Arg]: Parse[Type, Arg, Option[OpticInfo[Type]]] =
state => (Right(state.lastOpticInfo), state)
def getLastOpticInfoOrElse[Type,Arg](orElse: => GogglesError[Type]): Parse[Type, Arg, OpticInfo[Type]] = {
state => state.lastOpticInfo match {
case Some(info) => (Right(info), state)
case None => (Left(orElse.at(state.currentExprOffset)), state)
}
}
def storeOpticInfo[Type,Arg](info: OpticInfo[Type]): Parse[Type, Arg, Unit] =
state => (Right(()), state.addOpticInfo(info))
def popArg[Type, Arg]: Parse[Type, Arg, Arg] = {
state0 => state0.popArg match {
case Some((arg, state)) => (Right(arg), state)
case None => (Left(InternalError.NotEnoughArguments.at(state0.currentExprOffset)), state0)
}
}
} | kenbot/goggles | macros/src/main/scala/goggles/macros/interpret/Parse.scala | Scala | mit | 2,506 |
package com.sksamuel.elastic4s.requests.searches.queries.matches
import com.sksamuel.elastic4s.requests.analyzers.Analyzer
import com.sksamuel.elastic4s.requests.common.Operator
import com.sksamuel.elastic4s.requests.searches.queries.Query
import com.sksamuel.elastic4s.ext.OptionImplicits._
case class MatchQuery(field: String,
value: Any,
analyzer: Option[String] = None,
boost: Option[Double] = None,
cutoffFrequency: Option[Double] = None,
fuzziness: Option[String] = None,
fuzzyRewrite: Option[String] = None,
fuzzyTranspositions: Option[Boolean] = None,
lenient: Option[Boolean] = None,
maxExpansions: Option[Int] = None,
minimumShouldMatch: Option[String] = None,
operator: Option[Operator] = None,
prefixLength: Option[Int] = None,
queryName: Option[String] = None,
zeroTerms: Option[String] = None)
extends Query {
def analyzer(an: String): MatchQuery = copy(analyzer = an.some)
def analyzer(an: Analyzer): MatchQuery = copy(analyzer = an.name.some)
def boost(boost: Double): MatchQuery = copy(boost = boost.some)
def cutoffFrequency(f: Double): MatchQuery = copy(cutoffFrequency = f.some)
def lenient(lenient: Boolean): MatchQuery = copy(lenient = lenient.some)
def fuzziness(fuzziness: String): MatchQuery = copy(fuzziness = fuzziness.some)
def fuzzyRewrite(fuzzyRewrite: String): MatchQuery = copy(fuzzyRewrite = fuzzyRewrite.some)
def prefixLength(prefixLength: Int): MatchQuery = copy(prefixLength = prefixLength.some)
def fuzzyTranspositions(f: Boolean): MatchQuery =
copy(fuzzyTranspositions = f.some)
def maxExpansions(max: Int): MatchQuery = copy(maxExpansions = max.some)
def minimumShouldMatch(min: String): MatchQuery = copy(minimumShouldMatch = min.some)
def withAndOperator(): MatchQuery = operator("AND")
def withOrOperator(): MatchQuery = operator("OR")
def operator(op: String): MatchQuery = copy(operator = Operator.valueOf(op.toUpperCase).some)
def operator(op: Operator): MatchQuery = copy(operator = op.some)
def queryName(queryName: String): MatchQuery = copy(queryName = queryName.some)
def zeroTermsQuery(zeroTerms: String): MatchQuery = copy(zeroTerms = zeroTerms.some)
}
| sksamuel/elastic4s | elastic4s-domain/src/main/scala/com/sksamuel/elastic4s/requests/searches/queries/matches/MatchQuery.scala | Scala | apache-2.0 | 2,464 |
package com.szadowsz.gospel.core.db.libs.basic
import com.szadowsz.gospel.core.data.{Int, Struct}
import com.szadowsz.gospel.core.{BaseEngineSpec, PrologEngine, Theory}
import org.junit.runner.RunWith
import org.scalatest.FlatSpec
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class ThrowCatchSpec extends FlatSpec with BaseEngineSpec {
override protected def init(): PrologEngine = new PrologEngine()
behavior of "throw/1 and catch/3"
it should "execute the throw/catch with the substitutions made in unification" in {
val theory = "p(0) :- p(1). p(1) :- throw(error)."
prolog.setTheory(new Theory(theory))
val goal = "atom_length(err, 3), catch(p(0), E, (atom_length(E, Length), X is 2+3)), Y is X+5."
val info = prolog.solve(goal)
info.isSuccess shouldBe true
val e = info.getTerm("E").asInstanceOf[Struct]
e shouldBe new Struct("error")
val length = info.getTerm("Length").asInstanceOf[Int]
length.intValue shouldBe 5
val x = info.getTerm("X").asInstanceOf[Int]
x.intValue shouldBe 5
val y = info.getTerm("Y").asInstanceOf[Int]
y.intValue shouldBe 10
}
it should "execute the nearest catch/3 ancestor in the tree of resolution whose second argument unifies with the throw/1 argument" in {
val theory = "p(0) :- throw(error). p(1)."
prolog.setTheory(new Theory(theory))
val goal = "catch(p(1), E, fail), catch(p(0), E, atom_length(E, Length))."
val info = prolog.solve(goal)
info.isSuccess shouldBe true
val e = info.getTerm("E").asInstanceOf[Struct]
e shouldBe new Struct("error")
val length = info.getTerm("Length").asInstanceOf[Int]
length.intValue shouldBe 5
}
it should "fail to execute if an error occurs and no catch/3 ancestor is found whose second argument unifies with the argument of the exception" in {
val theory = "p(0) :- throw(error)."
prolog.setTheory(new Theory(theory))
val goal = "catch(p(0), error(X), true)."
val info = prolog.solve(goal)
info.isSuccess shouldBe false
info.isHalted shouldBe true
}
it should "fail to execute if catch/3's handler is false" in {
val theory = "p(0) :- throw(error)."
prolog.setTheory(new Theory(theory))
val goal = "catch(p(0), E, E == err)."
val info = prolog.solve(goal)
info.isSuccess shouldBe false
}
it should "cut all choice points of a non-deterministic Goal if an exception is thrown" in {
val theory = "p(0). p(1) :- throw(error). p(2)."
prolog.setTheory(new Theory(theory))
val goal = "catch(p(X), E, E == error)."
var info = prolog.solve(goal)
info.isSuccess shouldBe true
info.hasOpenAlternatives shouldBe true
info = prolog.solveNext()
info.isSuccess shouldBe true
info.hasOpenAlternatives shouldBe false
}
it should "fail to execute if catch/3's handler throws an exception" in {
val theory = "p(0) :- throw(error)."
prolog.setTheory(new Theory(theory))
val goal = "catch(p(0), E, throw(err))."
val info = prolog.solve(goal)
info.isSuccess shouldBe false
info.isHalted shouldBe true
}
}
| zakski/project-soisceal | gospel-core/src/test/scala/com/szadowsz/gospel/core/db/libs/basic/ThrowCatchSpec.scala | Scala | lgpl-3.0 | 3,113 |
package mesosphere.marathon.state
import scala.collection.JavaConverters._
import mesosphere.marathon.Protos
import org.apache.mesos.{ Protos => MesosProtos }
case class DiscoveryInfo(ports: Seq[DiscoveryInfo.Port] = Seq.empty) {
def toProto: Protos.DiscoveryInfo = {
Protos.DiscoveryInfo.newBuilder
.addAllPorts(ports.map(_.toProto).asJava)
.build
}
def isEmpty: Boolean = DiscoveryInfo.empty.equals(this)
def nonEmpty: Boolean = !isEmpty
}
object DiscoveryInfo {
def empty: DiscoveryInfo = DiscoveryInfo()
def fromProto(proto: Protos.DiscoveryInfo): DiscoveryInfo = {
DiscoveryInfo(
proto.getPortsList.asScala.map(Port.fromProto).toList
)
}
case class Port(
number: Int,
name: String,
protocol: String,
labels: Map[String, String] = Map.empty[String, String]) {
require(Port.AllowedProtocols(protocol), "protocol can only be 'tcp' or 'udp'")
def toProto: MesosProtos.Port = {
val builder = MesosProtos.Port.newBuilder
.setNumber(number)
.setName(name)
.setProtocol(protocol)
if (labels.nonEmpty) {
val labelsBuilder = MesosProtos.Labels.newBuilder
labels
.map { case (key, value) => MesosProtos.Label.newBuilder.setKey(key).setValue(value).build }
.foreach(labelsBuilder.addLabels)
builder.setLabels(labelsBuilder.build())
}
builder.build
}
}
object Port {
val AllowedProtocols: Set[String] = Set("tcp", "udp")
def fromProto(proto: MesosProtos.Port): Port = {
val labels =
if (proto.hasLabels)
proto.getLabels.getLabelsList.asScala.map { p => p.getKey -> p.getValue }.toMap
else Map.empty[String, String]
Port(
number = proto.getNumber,
name = proto.getName,
protocol = proto.getProtocol,
labels = labels
)
}
}
}
| yp-engineering/marathon | src/main/scala/mesosphere/marathon/state/DiscoveryInfo.scala | Scala | apache-2.0 | 1,896 |
package todomvc
import japgolly.scalajs.react.Callback
import org.scalajs.dom
import upickle.default._
import scala.util.{Failure, Success, Try}
case class Storage(storage: dom.ext.Storage, namespace: String) {
def store[T: Writer](data: T): Callback =
Callback(storage(namespace) = write(data))
def load[T: Reader]: Option[T] =
Try(storage(namespace) map read[T]) match {
case Success(Some(t)) => Some(t)
case Success(None) => None
case Failure(th) =>
dom.console.error(s"Got invalid data ${th.getMessage}")
None
}
}
| gabrielmancini/interactor | src/demo/scalajs-react/src/main/scala/todomvc/Storage.scala | Scala | bsd-2-clause | 580 |
package sri.test.router
import sri.core.ReactComponent
import sri.test.components.Text
import sri.web.all._
import sri.web.router
import sri.web.router.{WebRoute, WebRouterComponent}
import scala.scalajs.js
import scala.scalajs.js.annotation.ScalaJSDefined
import scala.scalajs.js.{JSON, UndefOr => U}
import scala.util.Try
object DynamicStateScreen {
@ScalaJSDefined
class Component extends ReactComponent[Props, Unit] {
def render() = {
Text()(s"Welcome to Dynamic State Screen, Passed Id : ${props.id} and passed State : ${JSON.stringify(props.route.state.getOrElse(js.Dictionary()))}")
}
}
case class Props(id: Int, route: WebRoute)
def parser(placeholder: String) = Try(placeholder.toInt).getOrElse(-1)
def apply(id: Int, route: WebRoute, key: js.UndefOr[String] = js.undefined, ref: js.Function1[Component, Unit] = null) = makeElement[Component](Props(id, route), key = key, ref = ref)
}
| chandu0101/sri | test/src/main/scala/sri/test/router/DynamicStateScreen.scala | Scala | apache-2.0 | 932 |
// Copyright: 2010 - 2016 https://github.com/ensime/ensime-server/graphs
// Licence: http://www.gnu.org/licenses/gpl-3.0.en.html
package org.ensime.server
import java.io.File
import akka.actor._
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport
import akka.http.scaladsl.marshallers.xml.ScalaXmlSupport
import akka.http.scaladsl.model._
import akka.http.scaladsl.server._
import akka.stream._
import akka.util.{ ByteString, Timeout }
import com.google.common.io.Files
import org.ensime.api._
import org.ensime.jerk._
import scala.concurrent.Future
trait WebServer {
implicit def system: ActorSystem
implicit def timeout: Timeout
implicit def mat: Materializer
def restHandler(in: RpcRequest): Future[EnsimeServerMessage]
def websocketHandler(target: ActorRef): ActorRef
/**
* @param filename of the javadoc archive
* @param entry of the file within the archive
* @return the contents of the entry in filename
*/
def docJarContent(filename: String, entry: String): Option[ByteString]
/**
* @return all documentation jars that are available to be served.
*/
def docJars(): Set[File]
import Directives._
import JerkEnvelopeFormats._
import JerkFormats._
import Route._
import ScalaXmlSupport._
import SprayJsonSupport._
import WebSocketBoilerplate._
val route = seal {
path("rpc") {
post {
entity(as[RpcRequest]) { request =>
complete {
restHandler(request)
}
}
}
} ~ path("docs") {
complete {
<html>
<head></head>
<body>
<h1>ENSIME: Your Project's Documentation</h1>
<ul>{
docJars().toList.map(_.getName).sorted.map { f =>
<li><a href={ s"docs/$f/index.html" }>{ f }</a> </li>
}
}</ul>
</body>
</html>
}
} ~ path("docs" / """[^/]+\\.jar""".r / Rest) { (filename, entry) =>
rejectEmptyResponse {
complete {
for {
media <- MediaTypes.forExtensionOption(Files.getFileExtension(entry))
content <- docJarContent(filename, entry)
} yield {
HttpResponse(entity = HttpEntity(ContentType(media, () => HttpCharsets.`UTF-8`), content))
}
}
}
} ~ path("jerky") {
get {
jsonWebsocket[RpcRequestEnvelope, RpcResponseEnvelope](websocketHandler)
}
}
}
}
| d1egoaz/ensime-sbt | src/sbt-test/sbt-ensime/ensime-server/server/src/main/scala/org/ensime/server/WebServer.scala | Scala | apache-2.0 | 2,447 |
/*
* Copyright (C) 2015 Romain Reuillon
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openmole.plugin.method.stochastic
import org.openmole.core.workflow.tools.PluginInfoActivator
class Activator extends PluginInfoActivator
| ISCPIF/PSEExperiments | openmole-src/openmole/plugins/org.openmole.plugin.method.stochastic/src/main/scala/org/openmole/plugin/method/stochastic/Activator.scala | Scala | agpl-3.0 | 867 |
package net.revenj.database.postgres.converters
import java.awt.Point
import java.sql.PreparedStatement
import net.revenj.database.postgres.{PostgresBuffer, PostgresReader, PostgresWriter}
import org.postgresql.util.PGobject
object PointConverter extends Converter[Point] {
override val dbName = "point"
override def default() = new Point
def setParameter(sw: PostgresBuffer, ps: PreparedStatement, index: Int, value: Point): Unit = {
val pg = new PGobject
pg.setType("point")
pg.setValue(s"(${value.x},${value.y})")
ps.setObject(index, pg)
}
def setParameter(sw: PostgresBuffer, ps: PreparedStatement, index: Int, value: Option[Point]): Unit = {
val pg = new PGobject
pg.setType("point")
ps.setObject(index, pg)
}
override def parseRaw(reader: PostgresReader, start: Int, context: Int): Point = {
reader.read(context)
val x = IntConverter.parse(reader, context)
val y = IntConverter.parse(reader, context)
reader.read(context + 1)
new Point(x, y)
}
override def parseCollectionItem(reader: PostgresReader, context: Int): Point = {
val cur = reader.read()
if (cur == 'N') {
reader.read(4)
new Point
} else {
parseRaw(reader, 0, context)
}
}
override def parseNullableCollectionItem(reader: PostgresReader, context: Int): Option[Point] = {
val cur = reader.read()
if (cur == 'N') {
reader.read(4)
None
} else {
Some(parseRaw(reader, 0, context))
}
}
override def toTuple(value: Point): PostgresTuple = new PointTuple(value)
private class PointTuple(val value: Point) extends PostgresTuple {
val mustEscapeRecord = true
val mustEscapeArray = true
def insertRecord(sw: PostgresWriter, escaping: String, mappings: Option[(PostgresWriter, Char) => Unit]): Unit = {
sw.write('(')
sw.write(value.x.toString)
sw.write(',')
sw.write(value.y.toString)
sw.write(')')
}
override def buildTuple(quote: Boolean): String = {
if (quote) {
"'(" + value.x + "," + value.y + ")'"
} else {
"(" + value.x + "," + value.y + ")"
}
}
}
}
| ngs-doo/revenj | scala/revenj-core/src/main/scala/net/revenj/database/postgres/converters/PointConverter.scala | Scala | bsd-3-clause | 2,163 |
package scalainvoice.tests
import org.scalatest._
abstract class UnitSpec extends FunSpec with Matchers with
OptionValues with Inside with Inspectors {
}
| donbonifacio/scala-invoice | src/test/scala/scalainvoice/UnitSpec.scala | Scala | mit | 160 |
package challenge
object Easy198 {
def main(args: Array[String]): Unit = {
val (left, right) = determineRemainder(args(0), args(1))
val result =
if (left > right)
"Left wins!"
else if (left < right)
"Right wins!"
else
"Tie!"
println(result)
}
def determineRemainder(leftLetters: String,
rightLetters: String): (Int, Int) = {
def determineRemainderHelper(leftRemaining: String, rightRemaining: String,
leftCount: Int,
rightCount: Int): (Int, Int) = {
if (leftRemaining.isEmpty || rightRemaining.isEmpty)
(leftCount + leftRemaining.length, rightCount + rightRemaining.length)
else if (leftRemaining.head < rightRemaining.head)
determineRemainderHelper(leftRemaining.tail, rightRemaining,
leftCount + 1, rightCount)
else if (leftRemaining.head > rightRemaining.head)
determineRemainderHelper(leftRemaining, rightRemaining.tail,
leftCount, rightCount + 1)
else
determineRemainderHelper(leftRemaining.tail, rightRemaining.tail,
leftCount, rightCount)
}
def normalise(s: String): String = s.toLowerCase.sorted
determineRemainderHelper(normalise(leftLetters), normalise(rightLetters),
0, 0)
}
}
| nichwn/dailyprogrammer-scala | src/main/scala/challenge/Easy198.scala | Scala | mit | 1,353 |
/*
,i::,
:;;;;;;;
;:,,::;.
1ft1;::;1tL
t1;::;1,
:;::; _____ __ ___ __
fCLff ;:: tfLLC / ___/ / |/ /____ _ _____ / /_
CLft11 :,, i1tffLi \\__ \\ ____ / /|_/ // __ `// ___// __ \\
1t1i .;; .1tf ___/ //___// / / // /_/ // /__ / / / /
CLt1i :,: .1tfL. /____/ /_/ /_/ \\__,_/ \\___//_/ /_/
Lft1,:;: , 1tfL:
;it1i ,,,:::;;;::1tti effectful
.t1i .,::;;; ;1tt Copyright (c) 2016 S-Mach, Inc.
Lft11ii;::;ii1tfL: Author: lance.gatlin@gmail.com
.L1 1tt1ttt,,Li
...1LLLL...
*/
package effectful.free
import cats._
import cats.data.Xor
import cats.data.Xor._
import effectful.Capture
import effectful.augments._
// todo: docs
trait Interpreter[Cmd[_],E[_]] {
val C:Capture[E]
val M:Monad[E]
val D:Delay[E]
val P:Par[E]
val X:Exceptions[E]
def apply[A](cmd: Cmd[A]) : E[A]
}
object Interpreter {
abstract class Abstract[Cmd[_],E[_]](implicit
val C:Capture[E],
val M:Monad[E],
val D:Delay[E],
val P:Par[E],
val X:Exceptions[E]
) extends Interpreter[Cmd,E]
class XorInterpreter[Cmd1[_],Cmd2[_],E[_]](
left:Interpreter[Cmd1,E],
right:Interpreter[Cmd2,E]
)(implicit
val C:Capture[E],
val M:Monad[E],
val D:Delay[E],
val P:Par[E],
val X:Exceptions[E]
) extends Interpreter[({ type C[AA]=Xor[Cmd1[AA],Cmd2[AA]] })#C,E] {
type C[AA]=Xor[Cmd1[AA],Cmd2[AA]]
def apply[A](cmd: C[A]) =
cmd match {
case Left(leftCmd) => left(leftCmd)
case Right(rightCmd) => right(rightCmd)
}
}
} | S-Mach/effectful | src/main/scala/effectful/free/Interpreter.scala | Scala | mit | 1,745 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.orc
import java.io.File
import java.nio.charset.StandardCharsets
import java.sql.Timestamp
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.mapreduce.{JobID, TaskAttemptID, TaskID, TaskType}
import org.apache.hadoop.mapreduce.lib.input.FileSplit
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl
import org.apache.orc.{OrcConf, OrcFile}
import org.apache.orc.OrcConf.COMPRESS
import org.apache.orc.mapred.OrcStruct
import org.apache.orc.mapreduce.OrcInputFormat
import org.apache.spark.{SparkConf, SparkException}
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.execution.datasources.{HadoopFsRelation, LogicalRelation, RecordReaderIterator}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils
case class AllDataTypesWithNonPrimitiveType(
stringField: String,
intField: Int,
longField: Long,
floatField: Float,
doubleField: Double,
shortField: Short,
byteField: Byte,
booleanField: Boolean,
array: Seq[Int],
arrayContainsNull: Seq[Option[Int]],
map: Map[Int, Long],
mapValueContainsNull: Map[Int, Option[Long]],
data: (Seq[Int], (Int, String)))
case class BinaryData(binaryData: Array[Byte])
case class Contact(name: String, phone: String)
case class Person(name: String, age: Int, contacts: Seq[Contact])
abstract class OrcQueryTest extends OrcTest {
import testImplicits._
test("Read/write All Types") {
val data = (0 to 255).map { i =>
(s"$i", i, i.toLong, i.toFloat, i.toDouble, i.toShort, i.toByte, i % 2 == 0)
}
withOrcFile(data) { file =>
checkAnswer(
spark.read.orc(file),
data.toDF().collect())
}
}
test("Read/write binary data") {
withOrcFile(BinaryData("test".getBytes(StandardCharsets.UTF_8)) :: Nil) { file =>
val bytes = spark.read.orc(file).head().getAs[Array[Byte]](0)
assert(new String(bytes, StandardCharsets.UTF_8) === "test")
}
}
ignore("Read/write all types with non-primitive type") {
val data: Seq[AllDataTypesWithNonPrimitiveType] = (0 to 255).map { i =>
AllDataTypesWithNonPrimitiveType(
s"$i", i, i.toLong, i.toFloat, i.toDouble, i.toShort, i.toByte, i % 2 == 0,
0 until i,
(0 until i).map(Option(_).filter(_ % 3 == 0)),
(0 until i).map(i => i -> i.toLong).toMap,
(0 until i).map(i => i -> Option(i.toLong)).toMap + (i -> None),
(0 until i, (i, s"$i")))
}
withOrcFile(data) { file =>
checkAnswer(
spark.read.orc(file),
data.toDF().collect())
}
}
ignore("Read/write UserDefinedType") {
withTempPath { path =>
val data = Seq((1, new TestUDT.MyDenseVector(Array(0.25, 2.25, 4.25))))
val udtDF = data.toDF("id", "vectors")
udtDF.write.orc(path.getAbsolutePath)
val readBack = spark.read.schema(udtDF.schema).orc(path.getAbsolutePath)
checkAnswer(udtDF, readBack)
}
}
test("Creating case class RDD table") {
val data = (1 to 100).map(i => (i, s"val_$i"))
sparkContext.parallelize(data).toDF().createOrReplaceTempView("t")
withTempView("t") {
checkAnswer(sql("SELECT * FROM t"), data.toDF().collect())
}
}
ignore("Simple selection form ORC table") {
val data = (1 to 10).map { i =>
Person(s"name_$i", i, (0 to 1).map { m => Contact(s"contact_$m", s"phone_$m") })
}
withOrcTable(data, "t") {
// ppd:
// leaf-0 = (LESS_THAN_EQUALS age 5)
// expr = leaf-0
assert(sql("SELECT name FROM t WHERE age <= 5").count() === 5)
// ppd:
// leaf-0 = (LESS_THAN_EQUALS age 5)
// expr = (not leaf-0)
assertResult(10) {
sql("SELECT name, contacts FROM t where age > 5")
.rdd
.flatMap(_.getAs[Seq[_]]("contacts"))
.count()
}
// ppd:
// leaf-0 = (LESS_THAN_EQUALS age 5)
// leaf-1 = (LESS_THAN age 8)
// expr = (and (not leaf-0) leaf-1)
{
val df = sql("SELECT name, contacts FROM t WHERE age > 5 AND age < 8")
assert(df.count() === 2)
assertResult(4) {
df.rdd.flatMap(_.getAs[Seq[_]]("contacts")).count()
}
}
// ppd:
// leaf-0 = (LESS_THAN age 2)
// leaf-1 = (LESS_THAN_EQUALS age 8)
// expr = (or leaf-0 (not leaf-1))
{
val df = sql("SELECT name, contacts FROM t WHERE age < 2 OR age > 8")
assert(df.count() === 3)
assertResult(6) {
df.rdd.flatMap(_.getAs[Seq[_]]("contacts")).count()
}
}
}
}
test("save and load case class RDD with `None`s as orc") {
val data = (
Option.empty[Int],
Option.empty[Long],
Option.empty[Float],
Option.empty[Double],
Option.empty[Boolean]
) :: Nil
withOrcFile(data) { file =>
checkAnswer(
spark.read.orc(file),
Row(Seq.fill(5)(null): _*))
}
}
test("SPARK-16610: Respect orc.compress (i.e., OrcConf.COMPRESS) when compression is unset") {
// Respect `orc.compress` (i.e., OrcConf.COMPRESS).
withTempPath { file =>
spark.range(0, 10).write
.option(COMPRESS.getAttribute, "ZLIB")
.orc(file.getCanonicalPath)
val maybeOrcFile = file.listFiles().find(_.getName.endsWith(".zlib.orc"))
assert(maybeOrcFile.isDefined)
val orcFilePath = new Path(maybeOrcFile.get.getAbsolutePath)
val conf = OrcFile.readerOptions(new Configuration())
Utils.tryWithResource(OrcFile.createReader(orcFilePath, conf)) { reader =>
assert("ZLIB" === reader.getCompressionKind.name)
}
}
// `compression` overrides `orc.compress`.
withTempPath { file =>
spark.range(0, 10).write
.option("compression", "ZLIB")
.option(COMPRESS.getAttribute, "SNAPPY")
.orc(file.getCanonicalPath)
val maybeOrcFile = file.listFiles().find(_.getName.endsWith(".zlib.orc"))
assert(maybeOrcFile.isDefined)
val orcFilePath = new Path(maybeOrcFile.get.getAbsolutePath)
val conf = OrcFile.readerOptions(new Configuration())
Utils.tryWithResource(OrcFile.createReader(orcFilePath, conf)) { reader =>
assert("ZLIB" === reader.getCompressionKind.name)
}
}
}
// Hive supports zlib, snappy and none for Hive 1.2.1.
test("Compression options for writing to an ORC file (SNAPPY, ZLIB and NONE)") {
withTempPath { file =>
spark.range(0, 10).write
.option("compression", "ZLIB")
.orc(file.getCanonicalPath)
val maybeOrcFile = file.listFiles().find(_.getName.endsWith(".zlib.orc"))
assert(maybeOrcFile.isDefined)
val orcFilePath = new Path(maybeOrcFile.get.getAbsolutePath)
val conf = OrcFile.readerOptions(new Configuration())
Utils.tryWithResource(OrcFile.createReader(orcFilePath, conf)) { reader =>
assert("ZLIB" === reader.getCompressionKind.name)
}
}
withTempPath { file =>
spark.range(0, 10).write
.option("compression", "SNAPPY")
.orc(file.getCanonicalPath)
val maybeOrcFile = file.listFiles().find(_.getName.endsWith(".snappy.orc"))
assert(maybeOrcFile.isDefined)
val orcFilePath = new Path(maybeOrcFile.get.getAbsolutePath)
val conf = OrcFile.readerOptions(new Configuration())
Utils.tryWithResource(OrcFile.createReader(orcFilePath, conf)) { reader =>
assert("SNAPPY" === reader.getCompressionKind.name)
}
}
withTempPath { file =>
spark.range(0, 10).write
.option("compression", "NONE")
.orc(file.getCanonicalPath)
val maybeOrcFile = file.listFiles().find(_.getName.endsWith(".orc"))
assert(maybeOrcFile.isDefined)
val orcFilePath = new Path(maybeOrcFile.get.getAbsolutePath)
val conf = OrcFile.readerOptions(new Configuration())
Utils.tryWithResource(OrcFile.createReader(orcFilePath, conf)) { reader =>
assert("NONE" === reader.getCompressionKind.name)
}
}
}
ignore("simple select queries") {
withOrcTable((0 until 10).map(i => (i, i.toString)), "t") {
checkAnswer(
sql("SELECT `_1` FROM t where t.`_1` > 5"),
(6 until 10).map(Row.apply(_)))
checkAnswer(
sql("SELECT `_1` FROM t as tmp where tmp.`_1` < 5"),
(0 until 5).map(Row.apply(_)))
}
}
test("appending") {
val data = (0 until 10).map(i => (i, i.toString))
spark.createDataFrame(data).toDF("c1", "c2").createOrReplaceTempView("tmp")
withOrcFile(data) { file =>
withTempView("t") {
spark.read.orc(file).createOrReplaceTempView("t")
checkAnswer(spark.table("t"), data.map(Row.fromTuple))
sql("INSERT INTO TABLE t SELECT * FROM tmp")
checkAnswer(spark.table("t"), (data ++ data).map(Row.fromTuple))
}
}
spark.sessionState.catalog.dropTable(
TableIdentifier("tmp"),
ignoreIfNotExists = true,
purge = false)
}
ignore("overwriting") {
val data = (0 until 10).map(i => (i, i.toString))
spark.createDataFrame(data).toDF("c1", "c2").createOrReplaceTempView("tmp")
withOrcTable(data, "t") {
sql("INSERT OVERWRITE TABLE t SELECT * FROM tmp")
checkAnswer(spark.table("t"), data.map(Row.fromTuple))
}
spark.sessionState.catalog.dropTable(
TableIdentifier("tmp"),
ignoreIfNotExists = true,
purge = false)
}
ignore("self-join") {
// 4 rows, cells of column 1 of row 2 and row 4 are null
val data = (1 to 4).map { i =>
val maybeInt = if (i % 2 == 0) None else Some(i)
(maybeInt, i.toString)
}
withOrcTable(data, "t") {
val selfJoin = sql("SELECT * FROM t x JOIN t y WHERE x.`_1` = y.`_1`")
val queryOutput = selfJoin.queryExecution.analyzed.output
assertResult(4, "Field count mismatches")(queryOutput.size)
assertResult(2, s"Duplicated expression ID in query plan:\\n $selfJoin") {
queryOutput.filter(_.name == "_1").map(_.exprId).size
}
checkAnswer(selfJoin, List(Row(1, "1", 1, "1"), Row(3, "3", 3, "3")))
}
}
ignore("nested data - struct with array field") {
val data = (1 to 10).map(i => Tuple1((i, Seq(s"val_$i"))))
withOrcTable(data, "t") {
checkAnswer(sql("SELECT `_1`.`_2`[0] FROM t"), data.map {
case Tuple1((_, Seq(string))) => Row(string)
})
}
}
ignore("nested data - array of struct") {
val data = (1 to 10).map(i => Tuple1(Seq(i -> s"val_$i")))
withOrcTable(data, "t") {
checkAnswer(sql("SELECT `_1`[0].`_2` FROM t"), data.map {
case Tuple1(Seq((_, string))) => Row(string)
})
}
}
ignore("columns only referenced by pushed down filters should remain") {
withOrcTable((1 to 10).map(Tuple1.apply), "t") {
checkAnswer(sql("SELECT `_1` FROM t WHERE `_1` < 10"), (1 to 9).map(Row.apply(_)))
}
}
ignore("SPARK-5309 strings stored using dictionary compression in orc") {
withOrcTable((0 until 1000).map(i => ("same", "run_" + i / 100, 1)), "t") {
checkAnswer(
sql("SELECT `_1`, `_2`, SUM(`_3`) FROM t GROUP BY `_1`, `_2`"),
(0 until 10).map(i => Row("same", "run_" + i, 100)))
checkAnswer(
sql("SELECT `_1`, `_2`, SUM(`_3`) FROM t WHERE `_2` = 'run_5' GROUP BY `_1`, `_2`"),
List(Row("same", "run_5", 100)))
}
}
test("SPARK-9170: Don't implicitly lowercase of user-provided columns") {
withTempPath { dir =>
val path = dir.getCanonicalPath
spark.range(0, 10).select('id as "Acol").write.orc(path)
spark.read.orc(path).schema("Acol")
intercept[IllegalArgumentException] {
spark.read.orc(path).schema("acol")
}
checkAnswer(spark.read.orc(path).select("acol").sort("acol"),
(0 until 10).map(Row(_)))
}
}
ignore("SPARK-10623 Enable ORC PPD") {
withTempPath { dir =>
withSQLConf(SQLConf.ORC_FILTER_PUSHDOWN_ENABLED.key -> "true") {
import testImplicits._
val path = dir.getCanonicalPath
// For field "a", the first column has odds integers. This is to check the filtered count
// when `isNull` is performed. For Field "b", `isNotNull` of ORC file filters rows
// only when all the values are null (maybe this works differently when the data
// or query is complicated). So, simply here a column only having `null` is added.
val data = (0 until 10).map { i =>
val maybeInt = if (i % 2 == 0) None else Some(i)
val nullValue: Option[String] = None
(maybeInt, nullValue)
}
// It needs to repartition data so that we can have several ORC files
// in order to skip stripes in ORC.
spark.createDataFrame(data).toDF("a", "b").repartition(10).write.orc(path)
val df = spark.read.orc(path)
def checkPredicate(pred: Column, answer: Seq[Row]): Unit = {
val sourceDf = stripSparkFilter(df.where(pred))
val data = sourceDf.collect().toSet
val expectedData = answer.toSet
// When a filter is pushed to ORC, ORC can apply it to rows. So, we can check
// the number of rows returned from the ORC to make sure our filter pushdown work.
// A tricky part is, ORC does not process filter rows fully but return some possible
// results. So, this checks if the number of result is less than the original count
// of data, and then checks if it contains the expected data.
assert(
sourceDf.count < 10 && expectedData.subsetOf(data),
s"No data was filtered for predicate: $pred")
}
checkPredicate('a === 5, List(5).map(Row(_, null)))
checkPredicate('a <=> 5, List(5).map(Row(_, null)))
checkPredicate('a < 5, List(1, 3).map(Row(_, null)))
checkPredicate('a <= 5, List(1, 3, 5).map(Row(_, null)))
checkPredicate('a > 5, List(7, 9).map(Row(_, null)))
checkPredicate('a >= 5, List(5, 7, 9).map(Row(_, null)))
checkPredicate('a.isNull, List(null).map(Row(_, null)))
checkPredicate('b.isNotNull, List())
checkPredicate('a.isin(3, 5, 7), List(3, 5, 7).map(Row(_, null)))
checkPredicate('a > 0 && 'a < 3, List(1).map(Row(_, null)))
checkPredicate('a < 1 || 'a > 8, List(9).map(Row(_, null)))
checkPredicate(!('a > 3), List(1, 3).map(Row(_, null)))
checkPredicate(!('a > 0 && 'a < 3), List(3, 5, 7, 9).map(Row(_, null)))
}
}
}
ignore("SPARK-14962 Produce correct results on array type with isnotnull") {
withSQLConf(SQLConf.ORC_FILTER_PUSHDOWN_ENABLED.key -> "true") {
val data = (0 until 10).map(i => Tuple1(Array(i)))
withOrcFile(data) { file =>
val actual = spark
.read
.orc(file)
.where("_1 is not null")
val expected = data.toDF()
checkAnswer(actual, expected)
}
}
}
ignore("SPARK-15198 Support for pushing down filters for boolean types") {
withSQLConf(SQLConf.ORC_FILTER_PUSHDOWN_ENABLED.key -> "true") {
val data = (0 until 10).map(_ => (true, false))
withOrcFile(data) { file =>
val df = spark.read.orc(file).where("_2 == true")
val actual = stripSparkFilter(df).count()
// ORC filter should be applied and the total count should be 0.
assert(actual === 0)
}
}
}
ignore("Support for pushing down filters for decimal types") {
withSQLConf(SQLConf.ORC_FILTER_PUSHDOWN_ENABLED.key -> "true") {
val data = (0 until 10).map(i => Tuple1(BigDecimal.valueOf(i)))
checkPredicatePushDown(spark.createDataFrame(data).toDF("a"), 10, "a == 2")
}
}
ignore("Support for pushing down filters for timestamp types") {
withSQLConf(SQLConf.ORC_FILTER_PUSHDOWN_ENABLED.key -> "true") {
val timeString = "2015-08-20 14:57:00"
val data = (0 until 10).map { i =>
val milliseconds = Timestamp.valueOf(timeString).getTime + i * 3600
Tuple1(new Timestamp(milliseconds))
}
checkPredicatePushDown(spark.createDataFrame(data).toDF("a"), 10, s"a == '$timeString'")
}
}
test("column nullability and comment - write and then read") {
val schema = (new StructType)
.add("cl1", IntegerType, nullable = false, comment = "test")
.add("cl2", IntegerType, nullable = true)
.add("cl3", IntegerType, nullable = true)
val row = Row(3, null, 4)
val df = spark.createDataFrame(sparkContext.parallelize(row :: Nil), schema)
val tableName = "tab"
withTable(tableName) {
df.write.format("orc").mode("overwrite").saveAsTable(tableName)
// Verify the DDL command result: DESCRIBE TABLE
checkAnswer(
sql(s"desc $tableName").select("col_name", "comment").where($"comment" === "test"),
Row("cl1", "test") :: Nil)
// Verify the schema
val expectedFields = schema.fields.map(f => f.copy(nullable = true))
assert(spark.table(tableName).schema == schema.copy(fields = expectedFields))
}
}
test("Empty schema does not read data from ORC file") {
val data = Seq((1, 1), (2, 2))
withOrcFile(data) { path =>
val conf = new Configuration()
conf.set(OrcConf.INCLUDE_COLUMNS.getAttribute, "")
conf.setBoolean("hive.io.file.read.all.columns", false)
val orcRecordReader = {
val file = new File(path).listFiles().find(_.getName.endsWith(".snappy.orc")).head
val split = new FileSplit(new Path(file.toURI), 0, file.length, Array.empty[String])
val attemptId = new TaskAttemptID(new TaskID(new JobID(), TaskType.MAP, 0), 0)
val hadoopAttemptContext = new TaskAttemptContextImpl(conf, attemptId)
val oif = new OrcInputFormat[OrcStruct]
oif.createRecordReader(split, hadoopAttemptContext)
}
val recordsIterator = new RecordReaderIterator[OrcStruct](orcRecordReader)
try {
assert(recordsIterator.next().toString == "{null, null}")
} finally {
recordsIterator.close()
}
}
}
test("read from multiple orc input paths") {
val path1 = Utils.createTempDir()
val path2 = Utils.createTempDir()
makeOrcFile((1 to 10).map(Tuple1.apply), path1)
makeOrcFile((1 to 10).map(Tuple1.apply), path2)
val df = spark.read.orc(path1.getCanonicalPath, path2.getCanonicalPath)
assert(df.count() == 20)
}
ignore("Enabling/disabling ignoreCorruptFiles") {
def testIgnoreCorruptFiles(): Unit = {
withTempDir { dir =>
val basePath = dir.getCanonicalPath
spark.range(1).toDF("a").write.orc(new Path(basePath, "first").toString)
spark.range(1, 2).toDF("a").write.orc(new Path(basePath, "second").toString)
spark.range(2, 3).toDF("a").write.json(new Path(basePath, "third").toString)
val df = spark.read.orc(
new Path(basePath, "first").toString,
new Path(basePath, "second").toString,
new Path(basePath, "third").toString)
checkAnswer(df, Seq(Row(0), Row(1)))
}
}
def testIgnoreCorruptFilesWithoutSchemaInfer(): Unit = {
withTempDir { dir =>
val basePath = dir.getCanonicalPath
spark.range(1).toDF("a").write.orc(new Path(basePath, "first").toString)
spark.range(1, 2).toDF("a").write.orc(new Path(basePath, "second").toString)
spark.range(2, 3).toDF("a").write.json(new Path(basePath, "third").toString)
val df = spark.read.schema("a long").orc(
new Path(basePath, "first").toString,
new Path(basePath, "second").toString,
new Path(basePath, "third").toString)
checkAnswer(df, Seq(Row(0), Row(1)))
}
}
def testAllCorruptFiles(): Unit = {
withTempDir { dir =>
val basePath = dir.getCanonicalPath
spark.range(1).toDF("a").write.json(new Path(basePath, "first").toString)
spark.range(1, 2).toDF("a").write.json(new Path(basePath, "second").toString)
val df = spark.read.orc(
new Path(basePath, "first").toString,
new Path(basePath, "second").toString)
assert(df.count() == 0)
}
}
def testAllCorruptFilesWithoutSchemaInfer(): Unit = {
withTempDir { dir =>
val basePath = dir.getCanonicalPath
spark.range(1).toDF("a").write.json(new Path(basePath, "first").toString)
spark.range(1, 2).toDF("a").write.json(new Path(basePath, "second").toString)
val df = spark.read.schema("a long").orc(
new Path(basePath, "first").toString,
new Path(basePath, "second").toString)
assert(df.count() == 0)
}
}
withSQLConf(SQLConf.IGNORE_CORRUPT_FILES.key -> "true") {
testIgnoreCorruptFiles()
testIgnoreCorruptFilesWithoutSchemaInfer()
val m1 = intercept[AnalysisException] {
testAllCorruptFiles()
}.getMessage
assert(m1.contains("Unable to infer schema for ORC"))
testAllCorruptFilesWithoutSchemaInfer()
}
withSQLConf(SQLConf.IGNORE_CORRUPT_FILES.key -> "false") {
val e1 = intercept[SparkException] {
testIgnoreCorruptFiles()
}
assert(e1.getMessage.contains("Malformed ORC file"))
val e2 = intercept[SparkException] {
testIgnoreCorruptFilesWithoutSchemaInfer()
}
assert(e2.getMessage.contains("Malformed ORC file"))
val e3 = intercept[SparkException] {
testAllCorruptFiles()
}
assert(e3.getMessage.contains("Could not read footer for file"))
val e4 = intercept[SparkException] {
testAllCorruptFilesWithoutSchemaInfer()
}
assert(e4.getMessage.contains("Malformed ORC file"))
}
}
ignore("SPARK-27160 Predicate pushdown correctness on DecimalType for ORC") {
withTempPath { dir =>
withSQLConf(SQLConf.ORC_FILTER_PUSHDOWN_ENABLED.key -> "true") {
val path = dir.getCanonicalPath
Seq(BigDecimal(0.1), BigDecimal(0.2), BigDecimal(-0.3))
.toDF("x").write.orc(path)
val df = spark.read.orc(path)
checkAnswer(df.filter("x >= 0.1"), Seq(Row(0.1), Row(0.2)))
checkAnswer(df.filter("x > 0.1"), Seq(Row(0.2)))
checkAnswer(df.filter("x <= 0.15"), Seq(Row(0.1), Row(-0.3)))
checkAnswer(df.filter("x < 0.1"), Seq(Row(-0.3)))
checkAnswer(df.filter("x == 0.2"), Seq(Row(0.2)))
}
}
}
}
abstract class OrcQuerySuite extends OrcQueryTest with SharedSparkSession {
import testImplicits._
test("LZO compression options for writing to an ORC file") {
withTempPath { file =>
spark.range(0, 10).write
.option("compression", "LZO")
.orc(file.getCanonicalPath)
val maybeOrcFile = file.listFiles().find(_.getName.endsWith(".lzo.orc"))
assert(maybeOrcFile.isDefined)
val orcFilePath = new Path(maybeOrcFile.get.getAbsolutePath)
val conf = OrcFile.readerOptions(new Configuration())
Utils.tryWithResource(OrcFile.createReader(orcFilePath, conf)) { reader =>
assert("LZO" === reader.getCompressionKind.name)
}
}
}
test("Schema discovery on empty ORC files") {
// SPARK-8501 is fixed.
withTempPath { dir =>
val path = dir.getCanonicalPath
withTable("empty_orc") {
withTempView("empty", "single") {
spark.sql(
s"""CREATE TABLE empty_orc(key INT, value STRING)
|USING ORC
|LOCATION '${dir.toURI}'
""".stripMargin)
val emptyDF = Seq.empty[(Int, String)].toDF("key", "value").coalesce(1)
emptyDF.createOrReplaceTempView("empty")
// This creates 1 empty ORC file with ORC SerDe. We are using this trick because
// Spark SQL ORC data source always avoids write empty ORC files.
spark.sql(
s"""INSERT INTO TABLE empty_orc
|SELECT key, value FROM empty
""".stripMargin)
val df = spark.read.orc(path)
assert(df.schema === emptyDF.schema.asNullable)
checkAnswer(df, emptyDF)
}
}
}
}
test("SPARK-21791 ORC should support column names with dot") {
withTempDir { dir =>
val path = new File(dir, "orc").getCanonicalPath
Seq(Some(1), None).toDF("col.dots").write.orc(path)
assert(spark.read.orc(path).collect().length == 2)
}
}
ignore("SPARK-25579 ORC PPD should support column names with dot") {
withSQLConf(SQLConf.ORC_FILTER_PUSHDOWN_ENABLED.key -> "true") {
checkPredicatePushDown(spark.range(10).toDF("col.dot"), 10, "`col.dot` == 2")
}
}
test("SPARK-20728 Make ORCFileFormat configurable between sql/hive and sql/core") {
withSQLConf(SQLConf.ORC_IMPLEMENTATION.key -> "hive") {
val e = intercept[AnalysisException] {
sql("CREATE TABLE spark_20728(a INT) USING ORC")
}
assert(e.message.contains("Hive built-in ORC data source must be used with Hive support"))
}
withSQLConf(SQLConf.ORC_IMPLEMENTATION.key -> "native") {
withTable("spark_20728") {
sql("CREATE TABLE spark_20728(a INT) USING ORC")
val fileFormat = sql("SELECT * FROM spark_20728").queryExecution.analyzed.collectFirst {
case l: LogicalRelation => l.relation.asInstanceOf[HadoopFsRelation].fileFormat.getClass
}
assert(fileFormat == Some(classOf[OrcFileFormat]))
}
}
}
}
class OrcV1QuerySuite extends OrcQuerySuite {
override protected def sparkConf: SparkConf =
super.sparkConf
.setAppName("test")
.set("spark.sql.parquet.columnarReaderBatchSize", "4096")
.set("spark.sql.sources.useV1SourceList", "avro")
.set("spark.sql.extensions", "com.intel.oap.ColumnarPlugin")
.set("spark.sql.execution.arrow.maxRecordsPerBatch", "4096")
//.set("spark.shuffle.manager", "org.apache.spark.shuffle.sort.ColumnarShuffleManager")
.set("spark.memory.offHeap.enabled", "true")
.set("spark.memory.offHeap.size", "50m")
.set("spark.sql.join.preferSortMergeJoin", "false")
.set("spark.sql.columnar.codegen.hashAggregate", "false")
.set("spark.oap.sql.columnar.wholestagecodegen", "false")
.set("spark.sql.columnar.window", "false")
.set("spark.unsafe.exceptionOnMemoryLeak", "false")
//.set("spark.sql.columnar.tmp_dir", "/codegen/nativesql/")
.set("spark.sql.columnar.sort.broadcastJoin", "true")
.set("spark.oap.sql.columnar.preferColumnar", "true")
.set(SQLConf.USE_V1_SOURCE_LIST, "orc")
}
class OrcV2QuerySuite extends OrcQuerySuite {
override protected def sparkConf: SparkConf =
super.sparkConf
.setAppName("test")
.set("spark.sql.parquet.columnarReaderBatchSize", "4096")
.set("spark.sql.sources.useV1SourceList", "avro")
.set("spark.sql.extensions", "com.intel.oap.ColumnarPlugin")
.set("spark.sql.execution.arrow.maxRecordsPerBatch", "4096")
//.set("spark.shuffle.manager", "org.apache.spark.shuffle.sort.ColumnarShuffleManager")
.set("spark.memory.offHeap.enabled", "true")
.set("spark.memory.offHeap.size", "50m")
.set("spark.sql.join.preferSortMergeJoin", "false")
.set("spark.sql.columnar.codegen.hashAggregate", "false")
.set("spark.oap.sql.columnar.wholestagecodegen", "false")
.set("spark.sql.columnar.window", "false")
.set("spark.unsafe.exceptionOnMemoryLeak", "false")
//.set("spark.sql.columnar.tmp_dir", "/codegen/nativesql/")
.set("spark.sql.columnar.sort.broadcastJoin", "true")
.set("spark.oap.sql.columnar.preferColumnar", "true")
.set(SQLConf.USE_V1_SOURCE_LIST, "")
}
| Intel-bigdata/OAP | oap-native-sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcQuerySuite.scala | Scala | apache-2.0 | 28,549 |
package com.ezoky.ezmodel.ddd
import org.scalatest.funsuite.AnyFunSuite
import Entity._
import org.scalatest.EitherValues
/**
* @author gweinbach
*/
class EntityTest
extends AnyFunSuite
with EitherValues {
test("Equality of identity of DDD Entities is reflexive") {
val e1 = Entity[Int,String](1)("id A")
val e2 = Entity[Int,String](2)("id A")
val e3 = Entity[Int,String](3)("id B")
assert(e1.hasSameIdentity(e2) && e2.hasSameIdentity(e1))
assert(!e1.hasSameIdentity(e3) && !e3.hasSameIdentity(e1))
}
test("Equality of states of DDD Entities is reflexive") {
val e1 = Entity(1)("id A")
val e2 = Entity(1)("id B")
val e3 = Entity(2)("id C")
assert(e1.hasSameState(e2) && e2.hasSameState(e1))
assert(!e1.hasSameState(e3) && !e3.hasSameState(e1))
}
test("DDD entities are equal if they have same identity") {
val e1 = Entity("A")(stateIdentify)
val e2 = Entity("A")(stateIdentify)
val e3 = Entity("B")(stateIdentify)
assert(e1.hasSameIdentity(e2) && e1 == e2)
assert(!e1.hasSameIdentity(e3) && (e1 !== e3))
val e4 = Entity[String,String]("state 1")("id A")
val e5 = Entity[String,String]("state 2")("id A")
val e6 = Entity[String,String]("state 1")("id B")
assert(e4 == e5)
assert(e4 !== e6)
}
test("DDD entities without Identifier use their state as identifier by default") {
val e1:Entity[String,String] = Entity[String,String]("A")_
val e2:Entity[String,String] = Entity("A")(s => s)
assert(e1.hasSameIdentity(e2) && e1 == e2)
}
test("Changing state of a DDD entity does not change its identity") {
val e1 = Entity[String,String]("state 1")("id A")
val e2 = e1.changeState("state 2")
assert(!e1.hasSameState(e2))
assert(e1.hasSameIdentity(e2))
assert(e1 == e2)
}
test("DDD Entities are identical if and only if they have same identity and same state") {
val e1 = Entity[String,String]("state 1")("id A")
val e2 = Entity[String,String]("state 2")("id A")
val e3 = Entity[String,String]("state 1")("id B")
assert(!e1.isIdentical(e2))
assert(!e1.isIdentical(e3))
assert(e1.isIdentical(e2 + "state 1"))
}
test("Changing state to IdentityState does not change state") {
val e1 = Entity[String,String]("state 1")("id A")
assert(e1.hasSameState(e1 + IdentityState))
}
test("Cannot change state of an Entity to an InitialState") {
val e1 = Entity[String,String]("A")("id 1")
assert(e1.changeState(CommonInitialState("B")).state.left.value == CannotChangeToInitialState("B"))
}
test("Cannot change state after FinalState is reached") {
val e1 = Entity[String,String]("A")("id 1")
val e2 = e1.changeState(CommonFinalState("B"))
assert(e2.isStateFinal.getOrElse(false) === true)
assert(e2.changeState("C").state.left.value === CannotChangeFromFinalState("B"))
}
test("Cannot change state after InvalidState is reached") {
val e1 = Entity[String,String]("A")("id 1")
val e2 = e1.changeState(CommonFinalState("B"))
assert(e2.isStateFinal.getOrElse(false) === true)
val e3 = e2.changeState("C") // this is an invalid state
assert(e3.isStateInvalid.isLeft)
assert(e3.changeState("D").state.left.value == CannotChangeFromFinalState("B"))
}
test("+ operator changes state") {
val e1 = Entity[String,String]("A")("id")
val state1 = "state 1"
val state2 = "state 2"
val e2 = e1 + state1 + state2
val e3 = e1.changeState(state1).changeState(state2)
assert(e2.hasSameState(e3))
}
test("Identity is immutable") {
case class StateExample(id: Int, value: String)
val stateId: Identify[StateExample, Int] = _.id
val e1 = Entity(StateExample(1, "state 1"))(stateId)
assert(e1.changeState(StateExample(2, "state 1")).left.value === InvalidEntity(IdentityHasMutated,StateExample(2, "state 1"))(stateId))
}
test("Entities can have same state but be different if they have different ids") {
case class StateExample(id1: Int, id2: Int)
val state = StateExample(1, 2)
val stateId1: Identify[StateExample, Int] = _.id1
val stateId2: Identify[StateExample, Int] = _.id2
val e1 = Entity(state)(stateId1)
val e2 = Entity(state)(stateId2)
assert(e1.hasSameState(e2) && (e1 !== e2))
}
test("Entities with different state structures can be seen as equal") {
case class StateExample1(id1: Int, aVal: String)
case class StateExample2(valS: String, id2: Int)
val state1 = StateExample1(1, "val 1")
val state2 = StateExample2("val 2", 1)
val stateId1: Identify[StateExample1, Int] = _.id1
val stateId2: Identify[StateExample2, Int] = _.id2
val e1 = Entity(state1)(stateId1)
val e2 = Entity(state2)(stateId2)
assert(!e1.hasSameState(e2) && (e1 == e2))
}
}
| ezoky/ezmodel | ezmodel-core/src/test/scala/com/ezoky/ezmodel/ddd/EntityTest.scala | Scala | gpl-2.0 | 4,828 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.computations
import uk.gov.hmrc.ct.box._
import uk.gov.hmrc.ct.computations.retriever.ComputationsBoxRetriever
case class CP302(value: Option[Int]) extends CtBoxIdentifier(name = "Qualifying charitable donations EEA")
with CtOptionalInteger with Input with ValidatableBox[ComputationsBoxRetriever] {
override def validate(boxRetriever: ComputationsBoxRetriever): Set[CtValidation] = {
collectErrors(
cannotExistErrorIf(hasValue && boxRetriever.cpQ21().isFalse),
requiredErrorIf(!hasValue && boxRetriever.cpQ21().isTrue),
validateZeroOrPositiveInteger(this)
)
}
}
object CP302 {
def apply(int: Int): CP302 = CP302(Some(int))
}
| hmrc/ct-calculations | src/main/scala/uk/gov/hmrc/ct/computations/CP302.scala | Scala | apache-2.0 | 1,293 |
package services
class GreetingService {
def greetingMessage(language: String) = language match {
case "it" => "Messi"
case _ => "Hello"
}
}
| play2-maven-plugin/play2-maven-test-projects | play28/scala/macwire-di-example/app/services/GreetingService.scala | Scala | apache-2.0 | 156 |
package controllers.backend
import com.google.inject.ImplementedBy
import java.util.UUID
import javax.inject.Inject
import scala.collection.immutable
import scala.concurrent.Future
import com.overviewdocs.database.Database
import com.overviewdocs.models.Plugin
import com.overviewdocs.models.tables.Plugins
@ImplementedBy(classOf[DbPluginBackend])
trait PluginBackend {
/** Lists all Plugins, in alphabetical order. */
def index: Future[immutable.Seq[Plugin]]
/** Creates a Plugin. */
def create(attributes: Plugin.CreateAttributes): Future[Plugin]
/** Modifies a Plugin and returns the modified version.
*
* Returns None if the Plugin does not exist.
*
* If you do not run this in a transaction, there is a potential race. This
* method runs an UPDATE and then a SELECT. See
* https://github.com/slick/slick/issues/963
*/
def update(id: UUID, attributes: Plugin.UpdateAttributes): Future[Option[Plugin]]
/** Destroys a Plugin. */
def destroy(id: UUID): Future[Unit]
}
class DbPluginBackend @Inject() (val database: Database) extends PluginBackend with DbBackend {
import database.api._
import database.executionContext
override def index = database.seq(indexCompiled)
override def create(attributes: Plugin.CreateAttributes) = database.run(inserter.+=(Plugin.build(attributes)))
override def update(id: UUID, attributes: Plugin.UpdateAttributes) = {
val row = (
attributes.name,
attributes.description,
attributes.url,
attributes.serverUrlFromPlugin,
attributes.autocreate,
attributes.autocreateOrder
)
database.run {
updatePluginAttributes(id).update(row)
.flatMap(_ match {
case 0 => DBIO.successful(None)
case _ => byIdCompiled(id).result.headOption
})
}
}
override def destroy(id: UUID) = database.delete(byIdCompiled(id))
private lazy val byIdCompiled = Compiled { (id: Rep[UUID]) =>
Plugins.filter(_.id === id)
}
private lazy val indexCompiled = Compiled { Plugins.sortBy(_.name) }
protected lazy val inserter = (Plugins returning Plugins)
private lazy val updatePluginAttributes = Compiled { (id: Rep[UUID]) =>
for (p <- Plugins if p.id === id) yield (p.name, p.description, p.url, p.serverUrlFromPlugin, p.autocreate, p.autocreateOrder)
}
}
| overview/overview-server | web/app/controllers/backend/PluginBackend.scala | Scala | agpl-3.0 | 2,338 |
/**
* ____ __ ____ ____ ____,,___ ____ __ __ ____
* ( _ \\ /__\\ (_ )(_ _)( ___)/ __) ( _ \\( )( )( _ \\ Read
* ) / /(__)\\ / /_ _)(_ )__) \\__ \\ )___/ )(__)( ) _ < README.txt
* (_)\\_)(__)(__)(____)(____)(____)(___/ (__) (______)(____/ LICENSE.txt
**/
package scala.tests
import org.scalatest._
import org.scalatestplus.play.PlaySpec
import razie.tconf.BaseTextSpec
import razie.tconf.parser.JMapFoldingContext
import razie.wiki.model._
import razie.wiki.parser.{CsvParser, DomParser, WikiParserT}
// todo needs to go into tests
//class ParserSpec extends WordSpec with MustMatchers with OptionValues {
class TestWikiBasicParser extends PlaySpec {
"parser" should {
"parse md" in {
val res = applys(
"""
some text
""")
assert(res contains "some text")
}
}
val tabsp1 =
"""
Ontario (slalom and GS), within the following four age groups:
- Nancy Greene League Racing (ages 7 to 10)
- K1 League Racing (ages 11 and 12),
- K2 League Racing (ages 13 and 14) and
- J Alpine League Racing (ages 15 to 18)
"""
// println (WikiParser.applys(tabsp))
// println (Wikis.format(WID.NONE, "md", tabsp))
// val content = Wikis.preprocess(WID.NONE, "md", tabsp1).s
val content =
"""fufu
"""
// println(toXHTML(knockoff(content)).toString)
// println(Wikis.format (WID("1","2"), "md", content))
// println(Wikis.format(WID("1", "2"), "md", content))
//---------------------
// println(f())
def f() = {
val ccc =
"""
{{section:supportreq}}
present
{{/section}}
"""
}
/** the simplest spec - from a named string property */
case class XTextSpec (override val name:String, override val text:String) extends BaseTextSpec(name, text) {
override def mkParser = new XTextParser("rk")
}
class XTextParser(val realm: String) extends WikiParserT with DomParser {
withBlocks(domainBlocks)
}
def applys(s: String) = new XTextSpec("spec", s).parsed
}
| razie/diesel-rx | wiki/test/scala/tests/TestWikiBasicParser.scala | Scala | apache-2.0 | 1,999 |
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.accumulo.iterators
import com.google.common.primitives.Longs
import org.apache.accumulo.core.client.IteratorSetting
import org.apache.accumulo.core.data.{ByteSequence, Key, Value, Range => AccRange}
import org.apache.accumulo.core.iterators.{IteratorEnvironment, SortedKeyValueIterator}
import org.apache.hadoop.io.Text
import org.locationtech.geomesa.accumulo.index.legacy.z2.Z2IndexV1
import org.locationtech.geomesa.curve.Z2SFC
import org.locationtech.geomesa.index.filters.Z2Filter
import org.locationtech.sfcurve.zorder.Z2
import org.opengis.feature.simple.SimpleFeatureType
class Z2Iterator extends SortedKeyValueIterator[Key, Value] {
import org.locationtech.geomesa.accumulo.iterators.Z2Iterator._
var source: SortedKeyValueIterator[Key, Value] = null
var keyXY: String = null
var zOffset: Int = -1
var zLength: Int = -1
var xyvals: Array[Array[Int]] = null
var rowToZ: Array[Byte] => Long = null
var filter: Z2Filter = _
var topKey: Key = null
var topValue: Value = null
val row = new Text()
override def init(source: SortedKeyValueIterator[Key, Value],
options: java.util.Map[String, String],
env: IteratorEnvironment): Unit = {
this.source = source
zOffset = options.get(ZOffsetKey).toInt
zLength = options.get(ZLengthKey).toInt
rowToZ = getRowToZ(zOffset, zLength)
keyXY = options.get(ZKeyXY)
xyvals = keyXY.split(TermSeparator).map(_.split(RangeSeparator).map(_.toInt))
filter = new Z2Filter(xyvals, zOffset, zLength)
}
override def next(): Unit = {
source.next()
findTop()
}
private def findTop(): Unit = {
topKey = null
topValue = null
while (source.hasTop) {
if (inBounds(source.getTopKey)) {
topKey = source.getTopKey
topValue = source.getTopValue
return
} else {
source.next()
}
}
}
private def inBounds(k: Key): Boolean = {
k.getRow(row)
filter.inBounds(row.getBytes)
}
override def seek(range: AccRange, columnFamilies: java.util.Collection[ByteSequence], inclusive: Boolean): Unit = {
source.seek(range, columnFamilies, inclusive)
findTop()
}
override def getTopValue: Value = topValue
override def getTopKey: Key = topKey
override def hasTop: Boolean = topKey != null
override def deepCopy(env: IteratorEnvironment): SortedKeyValueIterator[Key, Value] = {
import scala.collection.JavaConversions._
val opts = Map(
ZKeyXY -> keyXY,
ZOffsetKey -> zOffset.toString,
ZLengthKey -> zLength.toString
)
val iter = new Z2Iterator
iter.init(source.deepCopy(env), opts, env)
iter
}
}
object Z2Iterator {
val ZKeyXY = "zxy"
val ZOffsetKey = "zo"
val ZLengthKey = "zl"
private val RangeSeparator = ":"
private val TermSeparator = ";"
def configure(sft: SimpleFeatureType,
bounds: Seq[(Double, Double, Double, Double)],
priority: Int): IteratorSetting = {
import org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType
val is = new IteratorSetting(priority, "z2", classOf[Z2Iterator])
// index space values for comparing in the iterator
val xyOpts = if (sft.isPoints) {
bounds.map { case (xmin, ymin, xmax, ymax) =>
s"${Z2SFC.lon.normalize(xmin)}$RangeSeparator${Z2SFC.lat.normalize(ymin)}$RangeSeparator" +
s"${Z2SFC.lon.normalize(xmax)}$RangeSeparator${Z2SFC.lat.normalize(ymax)}"
}
} else {
bounds.map { case (xmin, ymin, xmax, ymax) =>
val (lx, ly) = decodeNonPoints(xmin, ymin)
val (ux, uy) = decodeNonPoints(xmax, ymax)
s"$lx$RangeSeparator$ly$RangeSeparator$ux$RangeSeparator$uy"
}
}
is.addOption(ZKeyXY, xyOpts.mkString(TermSeparator))
// account for shard and table sharing bytes
is.addOption(ZOffsetKey, if (sft.isTableSharing) { "2" } else { "1" })
is.addOption(ZLengthKey, if (sft.isPoints) { "8" } else { Z2IndexV1.GEOM_Z_NUM_BYTES.toString })
is
}
private def decodeNonPoints(x: Double, y: Double): (Int, Int) =
Z2(Z2SFC.index(x, y).z & Z2IndexV1.GEOM_Z_MASK).decode
private def getRowToZ(offset: Int, length: Int): (Array[Byte]) => Long = {
val z0 = offset
val z1 = offset + 1
val z2 = offset + 2
val z3 = offset + 3
val z4 = offset + 4
val z5 = offset + 5
val z6 = offset + 6
val z7 = offset + 7
if (length == 8) {
(b) => Longs.fromBytes(b(z0), b(z1), b(z2), b(z3), b(z4), b(z5), b(z6), b(z7))
} else if (length == 3) {
(b) => Longs.fromBytes(b(z0), b(z1), b(z2), 0, 0, 0, 0, 0)
} else if (length == 4) {
(b) => Longs.fromBytes(b(z0), b(z1), b(z2), b(z3), 0, 0, 0, 0)
} else {
throw new IllegalArgumentException(s"Unhandled number of bytes for z value: $length")
}
}
}
| spandanagrawal/geomesa | geomesa-accumulo/geomesa-accumulo-datastore/src/main/scala/org/locationtech/geomesa/accumulo/iterators/Z2Iterator.scala | Scala | apache-2.0 | 5,345 |
package coursier
import coursier.core.Reconciliation
import coursier.error.conflict.{StrictRule, UnsatisfiedRule}
import coursier.graph.Conflict
import coursier.params.ResolutionParams
import coursier.params.rule.{
AlwaysFail,
DontBumpRootDependencies,
RuleResolution,
SameVersion,
Strict
}
import coursier.util.{ModuleMatcher, ModuleMatchers}
import utest._
import scala.async.Async.{async, await}
object ResolveRulesTests extends TestSuite {
import TestHelpers.{ec, cache, validateDependencies}
val tests = Tests {
test("alwaysFail") {
test("wrongRuleTryResolve") - async {
val rule = AlwaysFail(doTryResolve = true)
// should fail anyway (tryResolve of AlwaysFail does nothing)
val ruleRes = RuleResolution.TryResolve
val params = ResolutionParams()
.addRule(rule, ruleRes)
val ex = await {
Resolve()
.noMirrors
.addDependencies(dep"io.get-coursier:coursier-cli_2.12:1.1.0-M8")
.withResolutionParams(params)
.withCache(cache)
.future()
.failed
}
ex match {
case f: UnsatisfiedRule =>
assert(f.rule == rule)
assert(f.isInstanceOf[AlwaysFail.Nope])
case _ =>
throw new Exception("Unexpected exception type", ex)
}
}
test("failRuleTryResolve") - async {
val rule = AlwaysFail(doTryResolve = false)
// should fail anyway (tryResolve of AlwaysFail fails anyway)
val ruleRes = RuleResolution.TryResolve
val params = ResolutionParams()
.addRule(rule, ruleRes)
val ex = await {
Resolve()
.noMirrors
.addDependencies(dep"io.get-coursier:coursier-cli_2.12:1.1.0-M8")
.withResolutionParams(params)
.withCache(cache)
.future()
.failed
}
ex match {
case f: AlwaysFail.NopityNope =>
assert(f.rule == rule)
assert(f.conflict.isInstanceOf[AlwaysFail.Nope])
case _ =>
throw new Exception("Unexpected exception type", ex)
}
}
test("failRuleResolution") - async {
val rule = AlwaysFail()
val ruleRes = RuleResolution.Fail
val params = ResolutionParams()
.addRule(rule, ruleRes)
val ex = await {
Resolve()
.noMirrors
.addDependencies(dep"io.get-coursier:coursier-cli_2.12:1.1.0-M8")
.withResolutionParams(params)
.withCache(cache)
.future()
.failed
}
ex match {
case f: StrictRule =>
assert(f.rule == rule)
assert(f.conflict.isInstanceOf[AlwaysFail.Nope])
case _ =>
throw new Exception("Unexpected exception type", ex)
}
}
}
test("sameVersionRule") {
test - async {
val params = ResolutionParams()
.withScalaVersion("2.12.7")
.addRule(
SameVersion(
mod"com.fasterxml.jackson.core:jackson-annotations",
mod"com.fasterxml.jackson.core:jackson-core",
mod"com.fasterxml.jackson.core:jackson-databind"
),
RuleResolution.TryResolve
)
val res = await {
Resolve()
.noMirrors
.addDependencies(dep"sh.almond:scala-kernel_2.12.7:0.2.2")
.addRepositories(Repositories.jitpack)
.withResolutionParams(params)
.withCache(cache)
.future()
}
await(validateDependencies(res, params))
}
test - async {
val params = ResolutionParams()
.withScalaVersion("2.12.7")
.addRule(
SameVersion(mod"com.fasterxml.jackson.core:jackson-*"),
RuleResolution.TryResolve
)
val res = await {
Resolve()
.noMirrors
.addDependencies(dep"sh.almond:scala-kernel_2.12.7:0.2.2")
.addRepositories(Repositories.jitpack)
.withResolutionParams(params)
.withCache(cache)
.future()
}
await(validateDependencies(res, params))
}
}
test("strict") {
test("fail") - async {
val rule = Strict()
val ruleRes = RuleResolution.Fail
val params = ResolutionParams()
.addRule(rule, ruleRes)
val ex = await {
Resolve()
.noMirrors
.addDependencies(dep"io.get-coursier:coursier-cli_2.12:1.1.0-M8")
.withResolutionParams(params)
.withCache(cache)
.future()
.failed
}
ex match {
case f: StrictRule =>
assert(f.rule == rule)
assert(f.conflict.isInstanceOf[Strict.EvictedDependencies])
case _ =>
throw new Exception("Unexpected exception type", ex)
}
}
"for roots" - async {
val rule = Strict()
val ruleRes = RuleResolution.Fail
val params = ResolutionParams()
.addRule(rule, ruleRes)
val ex = await {
Resolve()
.noMirrors
.addDependencies(
dep"org.typelevel:cats-effect_2.11:1.3.1",
dep"org.typelevel:cats-core_2.11:1.5.0"
)
.withResolutionParams(params)
.withCache(cache)
.future()
.failed
}
val expectedEvicted = Seq(
Conflict(
mod"org.typelevel:cats-core_2.11",
"1.6.0",
"1.5.0",
wasExcluded = false,
mod"org.typelevel:cats-core_2.11",
"1.5.0"
)
)
val evicted = ex match {
case f: StrictRule =>
assert(f.rule == rule)
assert(f.conflict.isInstanceOf[Strict.EvictedDependencies])
f.conflict match {
case e: Strict.EvictedDependencies => e.evicted.map(_.conflict)
case _ => ???
}
case _ =>
throw new Exception("Unexpected exception type", ex)
}
assert(evicted == expectedEvicted)
}
"with intervals" - async {
val rule = Strict()
.withExclude(Set(
ModuleMatcher(mod"org.scala-lang:*")
))
val ruleRes = RuleResolution.Fail
val params = ResolutionParams()
.addRule(rule, ruleRes)
val ex = await {
Resolve()
.noMirrors
.addDependencies(
dep"com.github.alexarchambault:argonaut-shapeless_6.2_2.12:1.2.0-M4",
dep"com.chuusai:shapeless_2.12:[2.3.3,2.3.4)"
)
.withResolutionParams(params)
.withCache(cache)
.future()
.failed
}
val expectedEvicted = Seq(
Conflict(
mod"com.chuusai:shapeless_2.12",
"2.3.4-M1",
"2.3.2",
wasExcluded = false,
mod"com.github.alexarchambault:argonaut-shapeless_6.2_2.12",
"1.2.0-M4"
)
)
val evicted = ex match {
case f: StrictRule =>
assert(f.rule == rule)
assert(f.conflict.isInstanceOf[Strict.EvictedDependencies])
f.conflict match {
case e: Strict.EvictedDependencies => e.evicted.map(_.conflict)
case _ => ???
}
case _ =>
throw new Exception("Unexpected exception type", ex)
}
assert(evicted == expectedEvicted)
}
test("ignore if forced version") {
"do ignore" - async {
val rule = Strict()
.withExclude(Set(
ModuleMatcher(mod"org.scala-lang:*")
))
val ruleRes = RuleResolution.Fail
val params = ResolutionParams()
.addRule(rule, ruleRes)
.addForceVersion(mod"com.chuusai:shapeless_2.12" -> "2.3.3")
val res = await {
Resolve()
.noMirrors
.addDependencies(
dep"com.github.alexarchambault:argonaut-shapeless_6.2_2.12:1.2.0-M4",
dep"com.chuusai:shapeless_2.12:[2.3.3,2.3.4)"
)
.withResolutionParams(params)
.withCache(cache)
.future()
}
await(validateDependencies(res, params))
}
"do not ignore" - async {
val rule = Strict()
.withExclude(Set(
ModuleMatcher(mod"org.scala-lang:*")
))
.withIgnoreIfForcedVersion(false)
val ruleRes = RuleResolution.Fail
val params = ResolutionParams()
.addRule(rule, ruleRes)
.addForceVersion(mod"com.chuusai:shapeless_2.12" -> "2.3.3")
val ex = await {
Resolve()
.noMirrors
.addDependencies(
dep"com.github.alexarchambault:argonaut-shapeless_6.2_2.12:1.2.0-M4",
dep"com.chuusai:shapeless_2.12:[2.3.3,2.3.4)"
)
.withResolutionParams(params)
.withCache(cache)
.future()
.failed
}
val expectedEvicted = Seq(
Conflict(
mod"com.chuusai:shapeless_2.12",
"2.3.3",
"2.3.2",
wasExcluded = false,
mod"com.github.alexarchambault:argonaut-shapeless_6.2_2.12",
"1.2.0-M4"
)
)
val evicted = ex match {
case f: StrictRule =>
assert(f.rule == rule)
assert(f.conflict.isInstanceOf[Strict.EvictedDependencies])
f.conflict match {
case e: Strict.EvictedDependencies => e.evicted.map(_.conflict)
case _ => ???
}
case _ =>
throw new Exception("Unexpected exception type", ex)
}
assert(evicted == expectedEvicted)
}
}
test("viaReconciliation") - async {
val params = ResolutionParams()
.addReconciliation(ModuleMatchers.all -> Reconciliation.Strict)
val ex = await {
Resolve()
.noMirrors
.addDependencies(dep"io.get-coursier:coursier-cli_2.12:1.1.0-M8")
.withResolutionParams(params)
.withCache(cache)
.future()
.failed
}
ex match {
case f: StrictRule =>
assert(f.conflict.isInstanceOf[Strict.EvictedDependencies])
case _ =>
throw new Exception("Unexpected exception type", ex)
}
}
}
test("semVer reconciliation") {
"strict check" - async {
val params = ResolutionParams()
.addReconciliation(ModuleMatchers.all -> Reconciliation.Strict)
val ex = await {
Resolve()
.noMirrors
.addDependencies(
dep"com.github.alexarchambault:argonaut-shapeless_6.2_2.11:1.2.0-M11",
dep"io.argonaut:argonaut_2.11:6.1"
)
.withResolutionParams(params)
.withCache(cache)
.future()
.failed
}
ex match {
case f: StrictRule =>
assert(f.conflict.isInstanceOf[Strict.EvictedDependencies])
val evicted = f.conflict.asInstanceOf[Strict.EvictedDependencies]
assert(evicted.evicted.length == 2)
val conflictedModules = evicted.evicted.map(_.conflict.module).toSet
val expectedConflictedModules = Set(
mod"io.argonaut:argonaut_2.11",
mod"org.scala-lang:scala-library"
)
assert(conflictedModules == expectedConflictedModules)
case _ =>
throw new Exception("Unexpected exception type", ex)
}
}
"conflict" - async {
val params = ResolutionParams()
.addReconciliation(ModuleMatchers.all -> Reconciliation.SemVer)
val ex = await {
Resolve()
.noMirrors
.addDependencies(
dep"com.github.alexarchambault:argonaut-shapeless_6.2_2.11:1.2.0-M11",
dep"io.argonaut:argonaut_2.11:6.1"
)
.withResolutionParams(params)
.withCache(cache)
.future()
.failed
}
ex match {
case f: StrictRule =>
assert(f.conflict.isInstanceOf[Strict.EvictedDependencies])
val evicted = f.conflict.asInstanceOf[Strict.EvictedDependencies]
assert(evicted.evicted.length == 1)
val conflict = evicted.evicted.head.conflict
val expectedConflict = Conflict(
mod"io.argonaut:argonaut_2.11",
"6.2.3",
"6.1",
wasExcluded = false,
mod"io.argonaut:argonaut_2.11",
"6.1"
)
assert(conflict == expectedConflict)
case _ =>
throw new Exception("Unexpected exception type", ex)
}
}
"no conflict" - async {
val params = ResolutionParams()
.addReconciliation(ModuleMatchers.all -> Reconciliation.SemVer)
val res = await {
Resolve()
.noMirrors
.addDependencies(
dep"com.github.alexarchambault:argonaut-shapeless_6.2_2.11:1.2.0-M11",
dep"io.argonaut:argonaut_2.11:6.2"
)
.withResolutionParams(params)
.withCache(cache)
.future()
}
await(validateDependencies(res, params))
}
}
test("dontBumpRootDependencies") {
test - async {
val params = ResolutionParams()
.addRule(DontBumpRootDependencies(), RuleResolution.TryResolve)
val res = await {
Resolve()
.noMirrors
.addDependencies(
dep"com.github.alexarchambault:argonaut-shapeless_6.2_2.12:1.2.0-M9",
dep"com.chuusai:shapeless_2.12:2.3.2"
)
.withResolutionParams(params)
.withCache(cache)
.future()
}
val deps = res.dependenciesWithRetainedVersions
val shapelessVersions = deps.collect {
case dep if dep.module == mod"com.chuusai:shapeless_2.12" =>
dep.version
}
val expectedShapelessVersions = Set("2.3.2")
assert(shapelessVersions == expectedShapelessVersions)
}
test - async {
val params = ResolutionParams()
.addRule(
DontBumpRootDependencies(excl"org.scala-lang:scala-library"),
RuleResolution.TryResolve
)
val res = await {
Resolve()
.noMirrors
.addDependencies(
dep"com.github.alexarchambault:argonaut-shapeless_6.2_2.12:1.2.0-M9",
dep"com.chuusai:shapeless_2.12:2.3.2",
dep"org.scala-lang:scala-library:2.12.1"
)
.withResolutionParams(params)
.withCache(cache)
.future()
}
val deps = res.dependenciesWithRetainedVersions
val shapelessVersions = deps.collect {
case dep if dep.module == mod"com.chuusai:shapeless_2.12" =>
dep.version
}
val expectedShapelessVersions = Set("2.3.2")
assert(shapelessVersions == expectedShapelessVersions)
val scalaLibraryVersions = deps.collect {
case dep if dep.module == mod"org.scala-lang:scala-library" =>
dep.version
}
val expectedScalaLibraryVersions = Set("2.12.6")
assert(scalaLibraryVersions == expectedScalaLibraryVersions)
}
}
}
}
| coursier/coursier | modules/coursier/shared/src/test/scala/coursier/ResolveRulesTests.scala | Scala | apache-2.0 | 16,041 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming
import scala.collection.{immutable, mutable, Map}
import scala.reflect.ClassTag
import scala.util.Random
import com.esotericsoftware.kryo.{Kryo, KryoSerializable}
import com.esotericsoftware.kryo.io.{Input, Output}
import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.serializer._
import org.apache.spark.streaming.rdd.MapWithStateRDDRecord
import org.apache.spark.streaming.util.{EmptyStateMap, OpenHashMapBasedStateMap, StateMap}
class StateMapSuite extends SparkFunSuite {
private val conf = new SparkConf()
test("EmptyStateMap") {
val map = new EmptyStateMap[Int, Int]
intercept[UnsupportedOperationException] {
map.put(1, 1, 1)
}
assert(map.get(1) === None)
assert(map.getByTime(10000).isEmpty)
assert(map.getAll().isEmpty)
map.remove(1) // no exception
assert(map.copy().eq(map))
}
test("OpenHashMapBasedStateMap - put, get, getByTime, getAll, remove") {
val map = new OpenHashMapBasedStateMap[Int, Int]()
map.put(1, 100, 10)
assert(map.get(1) === Some(100))
assert(map.get(2) === None)
assert(map.getByTime(11).toSet === Set((1, 100, 10)))
assert(map.getByTime(10).toSet === Set.empty)
assert(map.getByTime(9).toSet === Set.empty)
assert(map.getAll().toSet === Set((1, 100, 10)))
map.put(2, 200, 20)
assert(map.getByTime(21).toSet === Set((1, 100, 10), (2, 200, 20)))
assert(map.getByTime(11).toSet === Set((1, 100, 10)))
assert(map.getByTime(10).toSet === Set.empty)
assert(map.getByTime(9).toSet === Set.empty)
assert(map.getAll().toSet === Set((1, 100, 10), (2, 200, 20)))
map.remove(1)
assert(map.get(1) === None)
assert(map.getAll().toSet === Set((2, 200, 20)))
}
test("OpenHashMapBasedStateMap - put, get, getByTime, getAll, remove with copy") {
val parentMap = new OpenHashMapBasedStateMap[Int, Int]()
parentMap.put(1, 100, 1)
parentMap.put(2, 200, 2)
parentMap.remove(1)
// Create child map and make changes
val map = parentMap.copy()
assert(map.get(1) === None)
assert(map.get(2) === Some(200))
assert(map.getByTime(10).toSet === Set((2, 200, 2)))
assert(map.getByTime(2).toSet === Set.empty)
assert(map.getAll().toSet === Set((2, 200, 2)))
// Add new items
map.put(3, 300, 3)
assert(map.get(3) === Some(300))
map.put(4, 400, 4)
assert(map.get(4) === Some(400))
assert(map.getByTime(10).toSet === Set((2, 200, 2), (3, 300, 3), (4, 400, 4)))
assert(map.getByTime(4).toSet === Set((2, 200, 2), (3, 300, 3)))
assert(map.getAll().toSet === Set((2, 200, 2), (3, 300, 3), (4, 400, 4)))
assert(parentMap.getAll().toSet === Set((2, 200, 2)))
// Remove items
map.remove(4)
assert(map.get(4) === None) // item added in this map, then removed in this map
map.remove(2)
assert(map.get(2) === None) // item removed in parent map, then added in this map
assert(map.getAll().toSet === Set((3, 300, 3)))
assert(parentMap.getAll().toSet === Set((2, 200, 2)))
// Update items
map.put(1, 1000, 100)
assert(map.get(1) === Some(1000)) // item removed in parent map, then added in this map
map.put(2, 2000, 200)
assert(map.get(2) === Some(2000)) // item added in parent map, then removed + added in this map
map.put(3, 3000, 300)
assert(map.get(3) === Some(3000)) // item added + updated in this map
map.put(4, 4000, 400)
assert(map.get(4) === Some(4000)) // item removed + updated in this map
assert(map.getAll().toSet ===
Set((1, 1000, 100), (2, 2000, 200), (3, 3000, 300), (4, 4000, 400)))
assert(parentMap.getAll().toSet === Set((2, 200, 2)))
map.remove(2) // remove item present in parent map, so that its not visible in child map
// Create child map and see availability of items
val childMap = map.copy()
assert(childMap.getAll().toSet === map.getAll().toSet)
assert(childMap.get(1) === Some(1000)) // item removed in grandparent, but added in parent map
assert(childMap.get(2) === None) // item added in grandparent, but removed in parent map
assert(childMap.get(3) === Some(3000)) // item added and updated in parent map
childMap.put(2, 20000, 200)
assert(childMap.get(2) === Some(20000)) // item map
}
test("OpenHashMapBasedStateMap - serializing and deserializing") {
val map1 = new OpenHashMapBasedStateMap[Int, Int]()
testSerialization(map1, "error deserializing and serialized empty map")
map1.put(1, 100, 1)
map1.put(2, 200, 2)
testSerialization(map1, "error deserializing and serialized map with data + no delta")
val map2 = map1.copy().asInstanceOf[OpenHashMapBasedStateMap[Int, Int]]
// Do not test compaction
assert(map2.shouldCompact === false)
testSerialization(map2, "error deserializing and serialized map with 1 delta + no new data")
map2.put(3, 300, 3)
map2.put(4, 400, 4)
testSerialization(map2, "error deserializing and serialized map with 1 delta + new data")
val map3 = map2.copy().asInstanceOf[OpenHashMapBasedStateMap[Int, Int]]
assert(map3.shouldCompact === false)
testSerialization(map3, "error deserializing and serialized map with 2 delta + no new data")
map3.put(3, 600, 3)
map3.remove(2)
testSerialization(map3, "error deserializing and serialized map with 2 delta + new data")
}
test("OpenHashMapBasedStateMap - serializing and deserializing with compaction") {
val targetDeltaLength = 10
val deltaChainThreshold = 5
var map = new OpenHashMapBasedStateMap[Int, Int](
deltaChainThreshold = deltaChainThreshold)
// Make large delta chain with length more than deltaChainThreshold
for(i <- 1 to targetDeltaLength) {
map.put(Random.nextInt(), Random.nextInt(), 1)
map = map.copy().asInstanceOf[OpenHashMapBasedStateMap[Int, Int]]
}
assert(map.deltaChainLength > deltaChainThreshold)
assert(map.shouldCompact)
val deser_map = testSerialization(map, "Deserialized + compacted map not same as original map")
assert(deser_map.deltaChainLength < deltaChainThreshold)
assert(deser_map.shouldCompact === false)
}
test("OpenHashMapBasedStateMap - all possible sequences of operations with copies ") {
/*
* This tests the map using all permutations of sequences operations, across multiple map
* copies as well as between copies. It is to ensure complete coverage, though it is
* kind of hard to debug this. It is set up as follows.
*
* - For any key, there can be 2 types of update ops on a state map - put or remove
*
* - These operations are done on a test map in "sets". After each set, the map is "copied"
* to create a new map, and the next set of operations are done on the new one. This tests
* whether the map data persist correctly across copies.
*
* - Within each set, there are a number of operations to test whether the map correctly
* updates and removes data without affecting the parent state map.
*
* - Overall this creates (numSets * numOpsPerSet) operations, each of which that can 2 types
* of operations. This leads to a total of [2 ^ (numSets * numOpsPerSet)] different sequence
* of operations, which we will test with different keys.
*
* Example: With numSets = 2, and numOpsPerSet = 2 give numTotalOps = 4. This means that
* 2 ^ 4 = 16 possible permutations needs to be tested using 16 keys.
* _______________________________________________
* | | Set1 | Set2 |
* | |-----------------|-----------------|
* | | Op1 Op2 |c| Op3 Op4 |
* |---------|----------------|o|----------------|
* | key 0 | put put |p| put put |
* | key 1 | put put |y| put rem |
* | key 2 | put put | | rem put |
* | key 3 | put put |t| rem rem |
* | key 4 | put rem |h| put put |
* | key 5 | put rem |e| put rem |
* | key 6 | put rem | | rem put |
* | key 7 | put rem |s| rem rem |
* | key 8 | rem put |t| put put |
* | key 9 | rem put |a| put rem |
* | key 10 | rem put |t| rem put |
* | key 11 | rem put |e| rem rem |
* | key 12 | rem rem | | put put |
* | key 13 | rem rem |m| put rem |
* | key 14 | rem rem |a| rem put |
* | key 15 | rem rem |p| rem rem |
* |_________|________________|_|________________|
*/
val numTypeMapOps = 2 // 0 = put a new value, 1 = remove value
val numSets = 3
val numOpsPerSet = 3 // to test seq of ops like update -> remove -> update in same set
val numTotalOps = numOpsPerSet * numSets
val numKeys = math.pow(numTypeMapOps, numTotalOps).toInt // to get all combinations of ops
val refMap = new mutable.HashMap[Int, (Int, Long)]()
var prevSetRefMap: immutable.Map[Int, (Int, Long)] = null
var stateMap: StateMap[Int, Int] = new OpenHashMapBasedStateMap[Int, Int]()
var prevSetStateMap: StateMap[Int, Int] = null
var time = 1L
for (setId <- 0 until numSets) {
for (opInSetId <- 0 until numOpsPerSet) {
val opId = setId * numOpsPerSet + opInSetId
for (keyId <- 0 until numKeys) {
time += 1
// Find the operation type that needs to be done
// This is similar to finding the nth bit value of a binary number
// E.g. nth bit from the right of any binary number B is [ B / (2 ^ (n - 1)) ] % 2
val opCode =
(keyId / math.pow(numTypeMapOps, numTotalOps - opId - 1).toInt) % numTypeMapOps
opCode match {
case 0 =>
val value = Random.nextInt()
stateMap.put(keyId, value, time)
refMap.put(keyId, (value, time))
case 1 =>
stateMap.remove(keyId)
refMap.remove(keyId)
}
}
// Test whether the current state map after all key updates is correct
assertMap(stateMap, refMap, time, "State map does not match reference map")
// Test whether the previous map before copy has not changed
if (prevSetStateMap != null && prevSetRefMap != null) {
assertMap(prevSetStateMap, prevSetRefMap, time,
"Parent state map somehow got modified, does not match corresponding reference map")
}
}
// Copy the map and remember the previous maps for future tests
prevSetStateMap = stateMap
prevSetRefMap = refMap.toMap
stateMap = stateMap.copy()
// Assert that the copied map has the same data
assertMap(stateMap, prevSetRefMap, time,
"State map does not match reference map after copying")
}
assertMap(stateMap, refMap.toMap, time, "Final state map does not match reference map")
}
private def testSerialization[T: ClassTag](
map: OpenHashMapBasedStateMap[T, T], msg: String): OpenHashMapBasedStateMap[T, T] = {
testSerialization(new JavaSerializer(conf), map, msg)
testSerialization(new KryoSerializer(conf), map, msg)
}
private def testSerialization[T: ClassTag](
serializer: Serializer,
map: OpenHashMapBasedStateMap[T, T],
msg: String): OpenHashMapBasedStateMap[T, T] = {
val deserMap = serializeAndDeserialize(serializer, map)
assertMap(deserMap, map, 1, msg)
deserMap
}
// Assert whether all the data and operations on a state map matches that of a reference state map
private def assertMap[T](
mapToTest: StateMap[T, T],
refMapToTestWith: StateMap[T, T],
time: Long,
msg: String): Unit = {
withClue(msg) {
// Assert all the data is same as the reference map
assert(mapToTest.getAll().toSet === refMapToTestWith.getAll().toSet)
// Assert that get on every key returns the right value
for (keyId <- refMapToTestWith.getAll().map { _._1 }) {
assert(mapToTest.get(keyId) === refMapToTestWith.get(keyId))
}
// Assert that every time threshold returns the correct data
for (t <- 0L to (time + 1)) {
assert(mapToTest.getByTime(t).toSet === refMapToTestWith.getByTime(t).toSet)
}
}
}
// Assert whether all the data and operations on a state map matches that of a reference map
private def assertMap(
mapToTest: StateMap[Int, Int],
refMapToTestWith: Map[Int, (Int, Long)],
time: Long,
msg: String): Unit = {
withClue(msg) {
// Assert all the data is same as the reference map
assert(mapToTest.getAll().toSet ===
refMapToTestWith.iterator.map { x => (x._1, x._2._1, x._2._2) }.toSet)
// Assert that get on every key returns the right value
for (keyId <- refMapToTestWith.keys) {
assert(mapToTest.get(keyId) === refMapToTestWith.get(keyId).map { _._1 })
}
// Assert that every time threshold returns the correct data
for (t <- 0L to (time + 1)) {
val expectedRecords =
refMapToTestWith.iterator.filter { _._2._2 < t }.map { x => (x._1, x._2._1, x._2._2) }
assert(mapToTest.getByTime(t).toSet === expectedRecords.toSet)
}
}
}
test("OpenHashMapBasedStateMap - serializing and deserializing with KryoSerializable states") {
val map = new OpenHashMapBasedStateMap[KryoState, KryoState]()
map.put(new KryoState("a"), new KryoState("b"), 1)
testSerialization(
new KryoSerializer(conf), map, "error deserializing and serialized KryoSerializable states")
}
test("EmptyStateMap - serializing and deserializing") {
val map = StateMap.empty[KryoState, KryoState]
// Since EmptyStateMap doesn't contains any date, KryoState won't break JavaSerializer.
assert(serializeAndDeserialize(new JavaSerializer(conf), map).
isInstanceOf[EmptyStateMap[KryoState, KryoState]])
assert(serializeAndDeserialize(new KryoSerializer(conf), map).
isInstanceOf[EmptyStateMap[KryoState, KryoState]])
}
test("MapWithStateRDDRecord - serializing and deserializing with KryoSerializable states") {
val map = new OpenHashMapBasedStateMap[KryoState, KryoState]()
map.put(new KryoState("a"), new KryoState("b"), 1)
val record =
MapWithStateRDDRecord[KryoState, KryoState, KryoState](map, Seq(new KryoState("c")))
val deserRecord = serializeAndDeserialize(new KryoSerializer(conf), record)
assert(!(record eq deserRecord))
assert(record.stateMap.getAll().toSeq === deserRecord.stateMap.getAll().toSeq)
assert(record.mappedData === deserRecord.mappedData)
}
private def serializeAndDeserialize[T: ClassTag](serializer: Serializer, t: T): T = {
val serializerInstance = serializer.newInstance()
serializerInstance.deserialize[T](
serializerInstance.serialize(t), Thread.currentThread().getContextClassLoader)
}
}
/** A class that only supports Kryo serialization. */
private[streaming] final class KryoState(var state: String) extends KryoSerializable {
override def write(kryo: Kryo, output: Output): Unit = {
kryo.writeClassAndObject(output, state)
}
override def read(kryo: Kryo, input: Input): Unit = {
state = kryo.readClassAndObject(input).asInstanceOf[String]
}
override def equals(other: Any): Boolean = other match {
case that: KryoState => state == that.state
case _ => false
}
override def hashCode(): Int = {
if (state == null) 0 else state.hashCode()
}
}
| maropu/spark | streaming/src/test/scala/org/apache/spark/streaming/StateMapSuite.scala | Scala | apache-2.0 | 16,480 |
/*
* Copyright © 2014 TU Berlin (emma@dima.tu-berlin.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.emmalanguage
package io.csv
import io.ScalaSupport
import com.univocity.parsers.csv.CsvParser
import com.univocity.parsers.csv.CsvParserSettings
import com.univocity.parsers.csv.CsvWriter
import com.univocity.parsers.csv.CsvWriterSettings
import resource._
import java.io._
import java.net.URI
/** A [[ScalaSupport]] implementation for the [[CSV]] [[io.Format]]. */
class CSVScalaSupport[A](val format: CSV)(implicit conv: CSVConverter[A])
extends ScalaSupport[A, CSV] {
private[emmalanguage] def read(path: String): TraversableOnce[A] =
new Traversable[A] {
def foreach[U](f: A => U) = for {
inp <- managed(inpStream(new URI(path)))
bis <- managed(new BufferedInputStream(inp))
isr <- managed(new InputStreamReader(bis, format.charset))
} {
val csv = new CsvParser(parserSettings(format))
csv.beginParsing(isr)
var record = csv.parseNext()
while (record != null) {
f(conv.read(record, 0)(format))
record = csv.parseNext()
}
csv.stopParsing()
}
}
private[emmalanguage] def write(path: String)(xs: Traversable[A]): Unit = {
val record = Array.ofDim[String](conv.size)
for {
out <- managed(outStream(new URI(path)))
bos <- managed(new BufferedOutputStream(out))
osw <- managed(new OutputStreamWriter(bos, format.charset))
csv <- managed(new CsvWriter(osw, writerSettings(format)))
} for (x <- xs) {
conv.write(x, record, 0)(format)
csv.writeRow(record)
}
}
private[emmalanguage] def parser(): CsvParser =
new CsvParser(parserSettings(format))
private[emmalanguage] def writer(): CsvWriter =
new CsvWriter(writerSettings(format))
// ---------------------------------------------------------------------------
// Helper functions for reading
// ---------------------------------------------------------------------------
private def writerSettings(format: CSV): CsvWriterSettings = {
val settings = new CsvWriterSettings()
// derived from the CSV format
settings.getFormat.setDelimiter(format.delimiter)
format.quote.foreach(quote => settings.getFormat.setQuote(quote))
format.escape.foreach(escape => settings.getFormat.setQuoteEscape(escape))
format.comment.foreach(comment => settings.getFormat.setComment(comment))
settings.setNullValue(format.nullValue)
// hard-coded
settings.setIgnoreLeadingWhitespaces(true)
settings.setIgnoreTrailingWhitespaces(true)
settings
}
private def parserSettings(format: CSV): CsvParserSettings = {
val settings = new CsvParserSettings()
// derived from the CSV format
settings.setHeaderExtractionEnabled(format.header)
settings.getFormat.setDelimiter(format.delimiter)
format.quote.foreach(quote => settings.getFormat.setQuote(quote))
format.escape.foreach(escape => settings.getFormat.setQuoteEscape(escape))
format.comment.foreach(comment => settings.getFormat.setComment(comment))
settings.setNullValue(format.nullValue)
settings.setNumberOfRowsToSkip(format.skipRows)
// hard-coded
settings.setIgnoreLeadingWhitespaces(true)
settings.setIgnoreTrailingWhitespaces(true)
settings
}
}
/** Companion object. */
object CSVScalaSupport {
def apply[A: CSVConverter](format: CSV): CSVScalaSupport[A] =
new CSVScalaSupport[A](format)
}
| emmalanguage/emma | emma-language/src/main/scala/org/emmalanguage/io/csv/CSVScalaSupport.scala | Scala | apache-2.0 | 4,019 |
package reactivemongo.api.commands
import reactivemongo.api.SerializationPack
private[commands] trait AggregationPipeline[P <: SerializationPack] {
val pack: P
/**
* One of MongoDBs pipeline operators for aggregation.
* Sealed as these are defined in the MongoDB specifications,
* and clients should not have custom operators.
*/
trait PipelineOperator {
protected[reactivemongo] def makePipe: pack.Document
}
/**
* Only for advanced user: Factory for stage not already provided in the API.
*
* For example for `{ \$sample: { size: 2 } }`
*
* {{{
* import reactivemongo.api.bson.BSONDocument
* import reactivemongo.api.bson.collection.BSONCollection
*
* def foo(coll: BSONCollection) =
* coll.aggregateWith[BSONDocument]() { agg =>
* import agg.PipelineOperator
*
* List(PipelineOperator(BSONDocument(
* f"$$sample" -> BSONDocument("size" -> 2))))
* }
* }}}
*/
object PipelineOperator {
def apply(pipe: => pack.Document): PipelineOperator = new PipelineOperator {
val makePipe = pipe
}
implicit def writer[Op <: PipelineOperator]: pack.Writer[Op] =
pack.writer[Op](_.makePipe)
}
/**
* Aggregation pipeline (with at least one stage operator)
*/
type Pipeline = List[PipelineOperator]
}
| ReactiveMongo/ReactiveMongo | driver/src/main/scala/api/commands/AggregationPipeline.scala | Scala | apache-2.0 | 1,324 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala
package collection
package immutable
import java.io.{ObjectInputStream, ObjectOutputStream}
import java.lang.{StringBuilder => JStringBuilder}
import scala.annotation.tailrec
import scala.collection.generic.SerializeEnd
import scala.collection.mutable.{ArrayBuffer, Builder, ReusableBuilder, StringBuilder}
import scala.language.implicitConversions
import scala.runtime.Statics
/** This class implements an immutable linked list that evaluates elements
* in order and only when needed. Here is an example:
*
* {{{
* import scala.math.BigInt
* object Main extends App {
*
* val fibs: LazyList[BigInt] = BigInt(0) #:: BigInt(1) #:: fibs.zip(fibs.tail).map { n => n._1 + n._2 }
*
* fibs take 5 foreach println
* }
*
* // prints
* //
* // 0
* // 1
* // 1
* // 2
* // 3
* }}}
*
* Elements of a `LazyList` are memoized; that is, the value of each element
* is computed only once.
* To illustrate, we will alter body of the `fibs` value above and take some
* more values:
*
* {{{
* import scala.math.BigInt
* object Main extends App {
*
* val fibs: LazyList[BigInt] = BigInt(0) #:: BigInt(1) #:: fibs.zip(
* fibs.tail).map(n => {
* println("Adding %d and %d".format(n._1, n._2))
* n._1 + n._2
* })
*
* fibs take 5 foreach println
* fibs take 6 foreach println
* }
*
* // prints
* //
* // 0
* // 1
* // Adding 0 and 1
* // 1
* // Adding 1 and 1
* // 2
* // Adding 1 and 2
* // 3
*
* // And then prints
* //
* // 0
* // 1
* // 1
* // 2
* // 3
* // Adding 2 and 3
* // 5
* }}}
*
* There are a number of subtle points to the above example.
*
* - The definition of `fibs` is a `val` not a method. The memoization of the
* `LazyList` requires us to have somewhere to store the information and a `val`
* allows us to do that.
*
* - While the `LazyList` is actually being modified during access, this does not
* change the notion of its immutability. Once the values are memoized they do
* not change and values that have yet to be memoized still "exist", they
* simply haven't been realized yet.
*
* - One must be cautious of memoization; you can very quickly eat up large
* amounts of memory if you're not careful. The reason for this is that the
* memoization of the `LazyList` creates a structure much like
* [[scala.collection.immutable.List]]. So long as something is holding on to
* the head, the head holds on to the tail, and so it continues recursively.
* If, on the other hand, there is nothing holding on to the head (e.g. we used
* `def` to define the `LazyList`) then once it is no longer being used directly,
* it disappears.
*
* - Note that some operations, including [[drop]], [[dropWhile]],
* [[flatMap]] or [[collect]] may process a large number of intermediate
* elements before returning. These necessarily hold onto the head, since
* they are methods on `LazyList`, and a lazy list holds its own head. For
* computations of this sort where memoization is not desired, use
* `Iterator` when possible.
*
* {{{
* // For example, let's build the natural numbers and do some silly iteration
* // over them.
*
* // We'll start with a silly iteration
* def loop(s: String, i: Int, iter: Iterator[Int]): Unit = {
* // Stop after 200,000
* if (i < 200001) {
* if (i % 50000 == 0) println(s + i)
* loop(s, iter.next(), iter)
* }
* }
*
* // Our first LazyList definition will be a val definition
* val lazylist1: LazyList[Int] = {
* def loop(v: Int): LazyList[Int] = v #:: loop(v + 1)
* loop(0)
* }
*
* // Because lazylist1 is a val, everything that the iterator produces is held
* // by virtue of the fact that the head of the LazyList is held in lazylist1
* val it1 = lazylist1.iterator
* loop("Iterator1: ", it1.next(), it1)
*
* // We can redefine this LazyList such that all we have is the Iterator left
* // and allow the LazyList to be garbage collected as required. Using a def
* // to provide the LazyList ensures that no val is holding onto the head as
* // is the case with lazylist1
* def lazylist2: LazyList[Int] = {
* def loop(v: Int): LazyList[Int] = v #:: loop(v + 1)
* loop(0)
* }
* val it2 = lazylist2.iterator
* loop("Iterator2: ", it2.next(), it2)
*
* // And, of course, we don't actually need a LazyList at all for such a simple
* // problem. There's no reason to use a LazyList if you don't actually need
* // one.
* val it3 = new Iterator[Int] {
* var i = -1
* def hasNext = true
* def next(): Int = { i += 1; i }
* }
* loop("Iterator3: ", it3.next(), it3)
* }}}
*
* - The fact that `tail` works at all is of interest. In the definition of
* `fibs` we have an initial `(0, 1, LazyList(...))` so `tail` is deterministic.
* If we defined `fibs` such that only `0` were concretely known then the act
* of determining `tail` would require the evaluation of `tail` which would
* cause an infinite recursion and stack overflow. If we define a definition
* where the tail is not initially computable then we're going to have an
* infinite recursion:
* {{{
* // The first time we try to access the tail we're going to need more
* // information which will require us to recurse, which will require us to
* // recurse, which...
* lazy val sov: LazyList[Vector[Int]] = Vector(0) #:: sov.zip(sov.tail).map { n => n._1 ++ n._2 }
* }}}
*
* The definition of `fibs` above creates a larger number of objects than
* necessary depending on how you might want to implement it. The following
* implementation provides a more "cost effective" implementation due to the
* fact that it has a more direct route to the numbers themselves:
*
* {{{
* lazy val fib: LazyList[Int] = {
* def loop(h: Int, n: Int): LazyList[Int] = h #:: loop(n, h + n)
* loop(1, 1)
* }
* }}}
*
* @tparam A the type of the elements contained in this lazy list.
*
* @see [[http://docs.scala-lang.org/overviews/collections/concrete-immutable-collection-classes.html#lazylists "Scala's Collection Library overview"]]
* section on `LazyLists` for more information.
* @define Coll `LazyList`
* @define coll lazy list
* @define orderDependent
* @define orderDependentFold
* @define appendStackSafety Note: Repeated chaining of calls to append methods (`appended`,
* `appendedAll`, `lazyAppendedAll`) without forcing any of the
* intermediate resulting lazy lists may overflow the stack when
* the final result is forced.
* @define preservesLaziness This method preserves laziness; elements are only evaluated
* individually as needed.
* @define initiallyLazy This method does not evaluate anything until an operation is performed
* on the result (e.g. calling `head` or `tail`, or checking if it is empty).
* @define evaluatesAllElements This method evaluates all elements of the collection.
*/
@SerialVersionUID(3L)
final class LazyList[+A] private(private[this] var lazyState: () => LazyList.State[A])
extends AbstractSeq[A]
with LinearSeq[A]
with LinearSeqOps[A, LazyList, LazyList[A]]
with IterableFactoryDefaults[A, LazyList]
with Serializable {
import LazyList._
@volatile private[this] var stateEvaluated: Boolean = false
@inline private def stateDefined: Boolean = stateEvaluated
private lazy val state: State[A] = {
val res = lazyState()
// if we set it to `true` before evaluating, we may infinite loop
// if something expects `state` to already be evaluated
stateEvaluated = true
lazyState = null // allow GC
res
}
override def iterableFactory: SeqFactory[LazyList] = LazyList
override def isEmpty: Boolean = state eq State.Empty
/** @inheritdoc
*
* $preservesLaziness
*/
override def knownSize: Int = if (knownIsEmpty) 0 else -1
override def head: A = state.head
override def tail: LazyList[A] = state.tail
@inline private[this] def knownIsEmpty: Boolean = stateEvaluated && (isEmpty: @inline)
@inline private def knownNonEmpty: Boolean = stateEvaluated && !(isEmpty: @inline)
/** Evaluates all undefined elements of the lazy list.
*
* This method detects cycles in lazy lists, and terminates after all
* elements of the cycle are evaluated. For example:
*
* {{{
* val ring: LazyList[Int] = 1 #:: 2 #:: 3 #:: ring
* ring.force
* ring.toString
*
* // prints
* //
* // LazyList(1, 2, 3, ...)
* }}}
*
* This method will *not* terminate for non-cyclic infinite-sized collections.
*
* @return this
*/
def force: this.type = {
// Use standard 2x 1x iterator trick for cycle detection ("those" is slow one)
var these, those: LazyList[A] = this
if (!these.isEmpty) {
these = these.tail
}
while (those ne these) {
if (these.isEmpty) return this
these = these.tail
if (these.isEmpty) return this
these = these.tail
if (these eq those) return this
those = those.tail
}
this
}
/** @inheritdoc
*
* The iterator returned by this method preserves laziness; elements are
* only evaluated individually as needed.
*/
override def iterator: Iterator[A] =
if (knownIsEmpty) Iterator.empty
else new LazyIterator(this)
/** Apply the given function `f` to each element of this linear sequence
* (while respecting the order of the elements).
*
* @param f The treatment to apply to each element.
* @note Overridden here as final to trigger tail-call optimization, which
* replaces 'this' with 'tail' at each iteration. This is absolutely
* necessary for allowing the GC to collect the underlying LazyList as elements
* are consumed.
* @note This function will force the realization of the entire LazyList
* unless the `f` throws an exception.
*/
@tailrec
override def foreach[U](f: A => U): Unit = {
if (!isEmpty) {
f(head)
tail.foreach(f)
}
}
/** LazyList specialization of foldLeft which allows GC to collect along the
* way.
*
* @tparam B The type of value being accumulated.
* @param z The initial value seeded into the function `op`.
* @param op The operation to perform on successive elements of the `LazyList`.
* @return The accumulated value from successive applications of `op`.
*/
@tailrec
override def foldLeft[B](z: B)(op: (B, A) => B): B =
if (isEmpty) z
else tail.foldLeft(op(z, head))(op)
// State.Empty doesn't use the SerializationProxy
protected[this] def writeReplace(): AnyRef =
if (knownNonEmpty) new LazyList.SerializationProxy[A](this) else this
override protected[this] def className = "LazyList"
/** The lazy list resulting from the concatenation of this lazy list with the argument lazy list.
*
* $preservesLaziness
*
* $appendStackSafety
*
* @param suffix The collection that gets appended to this lazy list
* @return The lazy list containing elements of this lazy list and the iterable object.
*/
def lazyAppendedAll[B >: A](suffix: => collection.IterableOnce[B]): LazyList[B] =
newLL {
if (isEmpty) suffix match {
case lazyList: LazyList[B] => lazyList.state // don't recompute the LazyList
case coll if coll.knownSize == 0 => State.Empty
case _ => stateFromIterator(suffix.iterator)
}
else sCons(head, tail lazyAppendedAll suffix)
}
/** @inheritdoc
*
* $preservesLaziness
*
* $appendStackSafety
*/
override def appendedAll[B >: A](suffix: IterableOnce[B]): LazyList[B] =
if (knownIsEmpty) LazyList.from(suffix)
else lazyAppendedAll(suffix)
/** @inheritdoc
*
* $preservesLaziness
*
* $appendStackSafety
*/
override def appended[B >: A](elem: B): LazyList[B] =
if (knownIsEmpty) newLL(sCons(elem, LazyList.empty))
else lazyAppendedAll(Iterator.single(elem))
/** @inheritdoc
*
* $evaluatesAllElements
*/
override def equals(that: Any): Boolean =
if (this eq that.asInstanceOf[AnyRef]) true else super.equals(that)
/** @inheritdoc
*
* $preservesLaziness
*/
override def scanLeft[B](z: B)(op: (B, A) => B): LazyList[B] =
if (knownIsEmpty) newLL(sCons(z, LazyList.empty))
else newLL(scanLeftState(z)(op))
private def scanLeftState[B](z: B)(op: (B, A) => B): State[B] =
sCons(
z,
newLL {
if (isEmpty) State.Empty
else tail.scanLeftState(op(z, head))(op)
}
)
/** LazyList specialization of reduceLeft which allows GC to collect
* along the way.
*
* @tparam B The type of value being accumulated.
* @param f The operation to perform on successive elements of the `LazyList`.
* @return The accumulated value from successive applications of `f`.
*/
override def reduceLeft[B >: A](f: (B, A) => B): B = {
if (this.isEmpty) throw new UnsupportedOperationException("empty.reduceLeft")
else {
var reducedRes: B = this.head
var left: LazyList[A] = this.tail
while (!left.isEmpty) {
reducedRes = f(reducedRes, left.head)
left = left.tail
}
reducedRes
}
}
/** @inheritdoc
*
* $preservesLaziness
*/
override def partition(p: A => Boolean): (LazyList[A], LazyList[A]) = (filter(p), filterNot(p))
/** @inheritdoc
*
* $preservesLaziness
*/
override def partitionMap[A1, A2](f: A => Either[A1, A2]): (LazyList[A1], LazyList[A2]) = {
val (left, right) = map(f).partition(_.isLeft)
(left.map(_.asInstanceOf[Left[A1, _]].value), right.map(_.asInstanceOf[Right[_, A2]].value))
}
/** @inheritdoc
*
* $preservesLaziness
*/
override def filter(pred: A => Boolean): LazyList[A] =
if (knownIsEmpty) LazyList.empty
else LazyList.filterImpl(this, pred, isFlipped = false)
/** @inheritdoc
*
* $preservesLaziness
*/
override def filterNot(pred: A => Boolean): LazyList[A] =
if (knownIsEmpty) LazyList.empty
else LazyList.filterImpl(this, pred, isFlipped = true)
/** A `collection.WithFilter` which allows GC of the head of lazy list during processing.
*
* This method is not particularly useful for a lazy list, as [[filter]] already preserves
* laziness.
*
* The `collection.WithFilter` returned by this method preserves laziness; elements are
* only evaluated individually as needed.
*/
override def withFilter(p: A => Boolean): collection.WithFilter[A, LazyList] =
new LazyList.WithFilter(coll, p)
/** @inheritdoc
*
* $preservesLaziness
*/
override def prepended[B >: A](elem: B): LazyList[B] = newLL(sCons(elem, this))
/** @inheritdoc
*
* $preservesLaziness
*/
override def prependedAll[B >: A](prefix: collection.IterableOnce[B]): LazyList[B] =
if (knownIsEmpty) LazyList.from(prefix)
else if (prefix.knownSize == 0) this
else newLL(stateFromIteratorConcatSuffix(prefix.iterator)(state))
/** @inheritdoc
*
* $preservesLaziness
*/
override def map[B](f: A => B): LazyList[B] =
if (knownIsEmpty) LazyList.empty
else (mapImpl(f): @inline)
/** @inheritdoc
*
* $preservesLaziness
*/
override def tapEach[U](f: A => U): LazyList[A] = map { a => f(a); a }
private def mapImpl[B](f: A => B): LazyList[B] =
newLL {
if (isEmpty) State.Empty
else sCons(f(head), tail.mapImpl(f))
}
/** @inheritdoc
*
* $preservesLaziness
*/
override def collect[B](pf: PartialFunction[A, B]): LazyList[B] =
if (knownIsEmpty) LazyList.empty
else LazyList.collectImpl(this, pf)
/** @inheritdoc
*
* This method does not evaluate any elements further than
* the first element for which the partial function is defined.
*/
@tailrec
override def collectFirst[B](pf: PartialFunction[A, B]): Option[B] =
if (isEmpty) None
else {
val res = pf.applyOrElse(head, LazyList.anyToMarker.asInstanceOf[A => B])
if (res.asInstanceOf[AnyRef] eq Statics.pfMarker) tail.collectFirst(pf)
else Some(res)
}
/** @inheritdoc
*
* This method does not evaluate any elements further than
* the first element matching the predicate.
*/
@tailrec
override def find(p: A => Boolean): Option[A] =
if (isEmpty) None
else {
val elem = head
if (p(elem)) Some(elem)
else tail.find(p)
}
/** @inheritdoc
*
* $preservesLaziness
*/
// optimisations are not for speed, but for functionality
// see tickets #153, #498, #2147, and corresponding tests in run/ (as well as run/stream_flatmap_odds.scala)
override def flatMap[B](f: A => IterableOnce[B]): LazyList[B] =
if (knownIsEmpty) LazyList.empty
else LazyList.flatMapImpl(this, f)
/** @inheritdoc
*
* $preservesLaziness
*/
override def flatten[B](implicit asIterable: A => IterableOnce[B]): LazyList[B] = flatMap(asIterable)
/** @inheritdoc
*
* $preservesLaziness
*/
override def zip[B](that: collection.IterableOnce[B]): LazyList[(A, B)] =
if (this.knownIsEmpty || that.knownSize == 0) LazyList.empty
else newLL(zipState(that.iterator))
private def zipState[B](it: Iterator[B]): State[(A, B)] =
if (this.isEmpty || !it.hasNext) State.Empty
else sCons((head, it.next()), newLL { tail zipState it })
/** @inheritdoc
*
* $preservesLaziness
*/
override def zipWithIndex: LazyList[(A, Int)] = this zip LazyList.from(0)
/** @inheritdoc
*
* $preservesLaziness
*/
override def zipAll[A1 >: A, B](that: collection.Iterable[B], thisElem: A1, thatElem: B): LazyList[(A1, B)] = {
if (this.knownIsEmpty) {
if (that.knownSize == 0) LazyList.empty
else LazyList.continually(thisElem) zip that
} else {
if (that.knownSize == 0) zip(LazyList.continually(thatElem))
else newLL(zipAllState(that.iterator, thisElem, thatElem))
}
}
private def zipAllState[A1 >: A, B](it: Iterator[B], thisElem: A1, thatElem: B): State[(A1, B)] = {
if (it.hasNext) {
if (this.isEmpty) sCons((thisElem, it.next()), newLL { LazyList.continually(thisElem) zipState it })
else sCons((this.head, it.next()), newLL { this.tail.zipAllState(it, thisElem, thatElem) })
} else {
if (this.isEmpty) State.Empty
else sCons((this.head, thatElem), this.tail zip LazyList.continually(thatElem))
}
}
/** @inheritdoc
*
* This method is not particularly useful for a lazy list, as [[zip]] already preserves
* laziness.
*
* The `collection.LazyZip2` returned by this method preserves laziness; elements are
* only evaluated individually as needed.
*/
// just in case it can be meaningfully overridden at some point
override def lazyZip[B](that: collection.Iterable[B]): LazyZip2[A, B, LazyList.this.type] =
super.lazyZip(that)
/** @inheritdoc
*
* $preservesLaziness
*/
override def unzip[A1, A2](implicit asPair: A => (A1, A2)): (LazyList[A1], LazyList[A2]) =
(map(asPair(_)._1), map(asPair(_)._2))
/** @inheritdoc
*
* $preservesLaziness
*/
override def unzip3[A1, A2, A3](implicit asTriple: A => (A1, A2, A3)): (LazyList[A1], LazyList[A2], LazyList[A3]) =
(map(asTriple(_)._1), map(asTriple(_)._2), map(asTriple(_)._3))
/** @inheritdoc
*
* $initiallyLazy
* Additionally, it preserves laziness for all except the first `n` elements.
*/
override def drop(n: Int): LazyList[A] =
if (n <= 0) this
else if (knownIsEmpty) LazyList.empty
else LazyList.dropImpl(this, n)
/** @inheritdoc
*
* $initiallyLazy
* Additionally, it preserves laziness for all elements after the predicate returns `false`.
*/
override def dropWhile(p: A => Boolean): LazyList[A] =
if (knownIsEmpty) LazyList.empty
else LazyList.dropWhileImpl(this, p)
/** @inheritdoc
*
* $initiallyLazy
*/
override def dropRight(n: Int): LazyList[A] = {
if (n <= 0) this
else if (knownIsEmpty) LazyList.empty
else newLL {
var scout = this
var remaining = n
// advance scout n elements ahead (or until empty)
while (remaining > 0 && !scout.isEmpty) {
remaining -= 1
scout = scout.tail
}
dropRightState(scout)
}
}
private def dropRightState(scout: LazyList[_]): State[A] =
if (scout.isEmpty) State.Empty
else sCons(head, newLL(tail.dropRightState(scout.tail)))
/** @inheritdoc
*
* $preservesLaziness
*/
override def take(n: Int): LazyList[A] =
if (knownIsEmpty) LazyList.empty
else (takeImpl(n): @inline)
private def takeImpl(n: Int): LazyList[A] = {
if (n <= 0) LazyList.empty
else newLL {
if (isEmpty) State.Empty
else sCons(head, tail.takeImpl(n - 1))
}
}
/** @inheritdoc
*
* $preservesLaziness
*/
override def takeWhile(p: A => Boolean): LazyList[A] =
if (knownIsEmpty) LazyList.empty
else (takeWhileImpl(p): @inline)
private def takeWhileImpl(p: A => Boolean): LazyList[A] =
newLL {
if (isEmpty || !p(head)) State.Empty
else sCons(head, tail.takeWhileImpl(p))
}
/** @inheritdoc
*
* $initiallyLazy
*/
override def takeRight(n: Int): LazyList[A] =
if (n <= 0 || knownIsEmpty) LazyList.empty
else LazyList.takeRightImpl(this, n)
/** @inheritdoc
*
* $initiallyLazy
* Additionally, it preserves laziness for all but the first `from` elements.
*/
override def slice(from: Int, until: Int): LazyList[A] = take(until).drop(from)
/** @inheritdoc
*
* $evaluatesAllElements
*/
override def reverse: LazyList[A] = reverseOnto(LazyList.empty)
// need contravariant type B to make the compiler happy - still returns LazyList[A]
@tailrec
private def reverseOnto[B >: A](tl: LazyList[B]): LazyList[B] =
if (isEmpty) tl
else tail.reverseOnto(newLL(sCons(head, tl)))
/** @inheritdoc
*
* $preservesLaziness
*/
override def diff[B >: A](that: collection.Seq[B]): LazyList[A] =
if (knownIsEmpty) LazyList.empty
else super.diff(that)
/** @inheritdoc
*
* $preservesLaziness
*/
override def intersect[B >: A](that: collection.Seq[B]): LazyList[A] =
if (knownIsEmpty) LazyList.empty
else super.intersect(that)
@tailrec
private def lengthGt(len: Int): Boolean =
if (len < 0) true
else if (isEmpty) false
else tail.lengthGt(len - 1)
/** @inheritdoc
*
* The iterator returned by this method mostly preserves laziness;
* a single element ahead of the iterator is evaluated.
*/
override def grouped(size: Int): Iterator[LazyList[A]] = {
require(size > 0, "size must be positive, but was " + size)
slidingImpl(size = size, step = size)
}
/** @inheritdoc
*
* The iterator returned by this method mostly preserves laziness;
* `size - step max 1` elements ahead of the iterator are evaluated.
*/
override def sliding(size: Int, step: Int): Iterator[LazyList[A]] = {
require(size > 0 && step > 0, s"size=$size and step=$step, but both must be positive")
slidingImpl(size = size, step = step)
}
@inline private def slidingImpl(size: Int, step: Int): Iterator[LazyList[A]] =
if (knownIsEmpty) Iterator.empty
else new SlidingIterator[A](this, size = size, step = step)
/** @inheritdoc
*
* $preservesLaziness
*/
override def padTo[B >: A](len: Int, elem: B): LazyList[B] = {
if (len <= 0) this
else newLL {
if (isEmpty) LazyList.fill(len)(elem).state
else sCons(head, tail.padTo(len - 1, elem))
}
}
/** @inheritdoc
*
* $preservesLaziness
*/
override def patch[B >: A](from: Int, other: IterableOnce[B], replaced: Int): LazyList[B] =
if (knownIsEmpty) LazyList from other
else patchImpl(from, other, replaced)
private def patchImpl[B >: A](from: Int, other: IterableOnce[B], replaced: Int): LazyList[B] =
newLL {
if (from <= 0) stateFromIteratorConcatSuffix(other.iterator)(LazyList.dropImpl(this, replaced).state)
else if (isEmpty) stateFromIterator(other.iterator)
else sCons(head, tail.patchImpl(from - 1, other, replaced))
}
/** @inheritdoc
*
* $evaluatesAllElements
*/
// overridden just in case a lazy implementation is developed at some point
override def transpose[B](implicit asIterable: A => collection.Iterable[B]): LazyList[LazyList[B]] = super.transpose
/** @inheritdoc
*
* $preservesLaziness
*/
override def updated[B >: A](index: Int, elem: B): LazyList[B] =
if (index < 0) throw new IndexOutOfBoundsException(s"$index")
else updatedImpl(index, elem, index)
private def updatedImpl[B >: A](index: Int, elem: B, startIndex: Int): LazyList[B] = {
newLL {
if (index <= 0) sCons(elem, tail)
else if (tail.isEmpty) throw new IndexOutOfBoundsException(startIndex.toString)
else sCons(head, tail.updatedImpl(index - 1, elem, startIndex))
}
}
/** Appends all elements of this $coll to a string builder using start, end, and separator strings.
* The written text begins with the string `start` and ends with the string `end`.
* Inside, the string representations (w.r.t. the method `toString`)
* of all elements of this $coll are separated by the string `sep`.
*
* An undefined state is represented with `"<not computed>"` and cycles are represented with `"<cycle>"`.
*
* $evaluatesAllElements
*
* @param sb the string builder to which elements are appended.
* @param start the starting string.
* @param sep the separator string.
* @param end the ending string.
* @return the string builder `b` to which elements were appended.
*/
override def addString(sb: StringBuilder, start: String, sep: String, end: String): StringBuilder = {
force
addStringNoForce(sb.underlying, start, sep, end)
sb
}
private[this] def addStringNoForce(b: JStringBuilder, start: String, sep: String, end: String): JStringBuilder = {
b.append(start)
if (!stateDefined) b.append("<not computed>")
else if (!isEmpty) {
b.append(head)
var cursor = this
@inline def appendCursorElement(): Unit = b.append(sep).append(cursor.head)
var scout = tail
@inline def scoutNonEmpty: Boolean = scout.stateDefined && !scout.isEmpty
if ((cursor ne scout) && (!scout.stateDefined || (cursor.state ne scout.state))) {
cursor = scout
if (scoutNonEmpty) {
scout = scout.tail
// Use 2x 1x iterator trick for cycle detection; slow iterator can add strings
while ((cursor ne scout) && scoutNonEmpty && (cursor.state ne scout.state)) {
appendCursorElement()
cursor = cursor.tail
scout = scout.tail
if (scoutNonEmpty) scout = scout.tail
}
}
}
if (!scoutNonEmpty) { // Not a cycle, scout hit an end
while (cursor ne scout) {
appendCursorElement()
cursor = cursor.tail
}
// if cursor (eq scout) has state defined, it is empty; else unknown state
if (!cursor.stateDefined) b.append(sep).append("<not computed>")
} else {
@inline def same(a: LazyList[A], b: LazyList[A]): Boolean = (a eq b) || (a.state eq b.state)
// Cycle.
// If we have a prefix of length P followed by a cycle of length C,
// the scout will be at position (P%C) in the cycle when the cursor
// enters it at P. They'll then collide when the scout advances another
// C - (P%C) ahead of the cursor.
// If we run the scout P farther, then it will be at the start of
// the cycle: (C - (P%C) + (P%C)) == C == 0. So if another runner
// starts at the beginning of the prefix, they'll collide exactly at
// the start of the loop.
var runner = this
var k = 0
while (!same(runner, scout)) {
runner = runner.tail
scout = scout.tail
k += 1
}
// Now runner and scout are at the beginning of the cycle. Advance
// cursor, adding to string, until it hits; then we'll have covered
// everything once. If cursor is already at beginning, we'd better
// advance one first unless runner didn't go anywhere (in which case
// we've already looped once).
if (same(cursor, scout) && (k > 0)) {
appendCursorElement()
cursor = cursor.tail
}
while (!same(cursor, scout)) {
appendCursorElement()
cursor = cursor.tail
}
b.append(sep).append("<cycle>")
}
}
b.append(end)
}
/** $preservesLaziness
*
* @return a string representation of this collection. An undefined state is
* represented with `"<not computed>"` and cycles are represented with `"<cycle>"`
*
* Examples:
*
* - `"LazyList(4, <not computed>)"`, a non-empty lazy list ;
* - `"LazyList(1, 2, 3, <not computed>)"`, a lazy list with at least three elements ;
* - `"LazyList(1, 2, 3, <cycle>)"`, an infinite lazy list that contains
* a cycle at the fourth element.
*/
override def toString(): String = addStringNoForce(new JStringBuilder(className), "(", ", ", ")").toString
/** @inheritdoc
*
* $evaluatesAllElements
*/
@deprecated("Check .knownSize instead of .hasDefiniteSize for more actionable information (see scaladoc for details)", "2.13.0")
override def hasDefiniteSize: Boolean = {
if (!stateDefined) false
else if (isEmpty) true
else {
// Two-iterator trick (2x & 1x speed) for cycle detection.
var those = this
var these = tail
while (those ne these) {
if (!these.stateDefined) return false
else if (these.isEmpty) return true
these = these.tail
if (!these.stateDefined) return false
else if (these.isEmpty) return true
these = these.tail
if (those eq these) return false
those = those.tail
}
false // Cycle detected
}
}
}
/**
* $factoryInfo
* @define coll lazy list
* @define Coll `LazyList`
*/
@SerialVersionUID(3L)
object LazyList extends SeqFactory[LazyList] {
// Eagerly evaluate cached empty instance
private[this] val _empty = newLL(State.Empty).force
private sealed trait State[+A] extends Serializable {
def head: A
def tail: LazyList[A]
}
private object State {
@SerialVersionUID(3L)
object Empty extends State[Nothing] {
def head: Nothing = throw new NoSuchElementException("head of empty lazy list")
def tail: LazyList[Nothing] = throw new UnsupportedOperationException("tail of empty lazy list")
}
@SerialVersionUID(3L)
final class Cons[A](val head: A, val tail: LazyList[A]) extends State[A]
}
/** Creates a new LazyList. */
@inline private def newLL[A](state: => State[A]): LazyList[A] = new LazyList[A](() => state)
/** Creates a new State.Cons. */
@inline private def sCons[A](hd: A, tl: LazyList[A]): State[A] = new State.Cons[A](hd, tl)
private val anyToMarker: Any => Any = _ => Statics.pfMarker
/* All of the following `<op>Impl` methods are carefully written so as not to
* leak the beginning of the `LazyList`. They copy the initial `LazyList` (`ll`) into
* `var rest`, which gets closed over as a `scala.runtime.ObjectRef`, thus not permanently
* leaking the head of the `LazyList`. Additionally, the methods are written so that, should
* an exception be thrown by the evaluation of the `LazyList` or any supplied function, they
* can continue their execution where they left off.
*/
private def filterImpl[A](ll: LazyList[A], p: A => Boolean, isFlipped: Boolean): LazyList[A] = {
// DO NOT REFERENCE `ll` ANYWHERE ELSE, OR IT WILL LEAK THE HEAD
var restRef = ll // val restRef = new ObjectRef(ll)
newLL {
var elem: A = null.asInstanceOf[A]
var found = false
var rest = restRef // var rest = restRef.elem
while (!found && !rest.isEmpty) {
elem = rest.head
found = p(elem) != isFlipped
rest = rest.tail
restRef = rest // restRef.elem = rest
}
if (found) sCons(elem, filterImpl(rest, p, isFlipped)) else State.Empty
}
}
private def collectImpl[A, B](ll: LazyList[A], pf: PartialFunction[A, B]): LazyList[B] = {
// DO NOT REFERENCE `ll` ANYWHERE ELSE, OR IT WILL LEAK THE HEAD
var restRef = ll // val restRef = new ObjectRef(ll)
newLL {
val marker = Statics.pfMarker
val toMarker = anyToMarker.asInstanceOf[A => B] // safe because Function1 is erased
var res: B = marker.asInstanceOf[B] // safe because B is unbounded
var rest = restRef // var rest = restRef.elem
while((res.asInstanceOf[AnyRef] eq marker) && !rest.isEmpty) {
res = pf.applyOrElse(rest.head, toMarker)
rest = rest.tail
restRef = rest // restRef.elem = rest
}
if (res.asInstanceOf[AnyRef] eq marker) State.Empty
else sCons(res, collectImpl(rest, pf))
}
}
private def flatMapImpl[A, B](ll: LazyList[A], f: A => IterableOnce[B]): LazyList[B] = {
// DO NOT REFERENCE `ll` ANYWHERE ELSE, OR IT WILL LEAK THE HEAD
var restRef = ll // val restRef = new ObjectRef(ll)
newLL {
var it: Iterator[B] = null
var itHasNext = false
var rest = restRef // var rest = restRef.elem
while (!itHasNext && !rest.isEmpty) {
it = f(rest.head).iterator
itHasNext = it.hasNext
if (!itHasNext) { // wait to advance `rest` because `it.next()` can throw
rest = rest.tail
restRef = rest // restRef.elem = rest
}
}
if (itHasNext) {
val head = it.next()
rest = rest.tail
restRef = rest // restRef.elem = rest
sCons(head, newLL(stateFromIteratorConcatSuffix(it)(flatMapImpl(rest, f).state)))
} else State.Empty
}
}
private def dropImpl[A](ll: LazyList[A], n: Int): LazyList[A] = {
// DO NOT REFERENCE `ll` ANYWHERE ELSE, OR IT WILL LEAK THE HEAD
var restRef = ll // val restRef = new ObjectRef(ll)
var iRef = n // val iRef = new IntRef(n)
newLL {
var rest = restRef // var rest = restRef.elem
var i = iRef // var i = iRef.elem
while (i > 0 && !rest.isEmpty) {
rest = rest.tail
restRef = rest // restRef.elem = rest
i -= 1
iRef = i // iRef.elem = i
}
rest.state
}
}
private def dropWhileImpl[A](ll: LazyList[A], p: A => Boolean): LazyList[A] = {
// DO NOT REFERENCE `ll` ANYWHERE ELSE, OR IT WILL LEAK THE HEAD
var restRef = ll // val restRef = new ObjectRef(ll)
newLL {
var rest = restRef // var rest = restRef.elem
while (!rest.isEmpty && p(rest.head)) {
rest = rest.tail
restRef = rest // restRef.elem = rest
}
rest.state
}
}
private def takeRightImpl[A](ll: LazyList[A], n: Int): LazyList[A] = {
// DO NOT REFERENCE `ll` ANYWHERE ELSE, OR IT WILL LEAK THE HEAD
var restRef = ll // val restRef = new ObjectRef(ll)
var scoutRef = ll // val scoutRef = new ObjectRef(ll)
var remainingRef = n // val remainingRef = new IntRef(n)
newLL {
var scout = scoutRef // var scout = scoutRef.elem
var remaining = remainingRef // var remaining = remainingRef.elem
// advance `scout` `n` elements ahead (or until empty)
while (remaining > 0 && !scout.isEmpty) {
scout = scout.tail
scoutRef = scout // scoutRef.elem = scout
remaining -= 1
remainingRef = remaining // remainingRef.elem = remaining
}
var rest = restRef // var rest = restRef.elem
// advance `rest` and `scout` in tandem until `scout` reaches the end
while(!scout.isEmpty) {
scout = scout.tail
scoutRef = scout // scoutRef.elem = scout
rest = rest.tail // can't throw an exception as `scout` has already evaluated its tail
restRef = rest // restRef.elem = rest
}
// `rest` is the last `n` elements (or all of them)
rest.state
}
}
/** An alternative way of building and matching lazy lists using LazyList.cons(hd, tl).
*/
object cons {
/** A lazy list consisting of a given first element and remaining elements
* @param hd The first element of the result lazy list
* @param tl The remaining elements of the result lazy list
*/
def apply[A](hd: => A, tl: => LazyList[A]): LazyList[A] = newLL(sCons(hd, tl))
/** Maps a lazy list to its head and tail */
def unapply[A](xs: LazyList[A]): Option[(A, LazyList[A])] = #::.unapply(xs)
}
implicit def toDeferrer[A](l: => LazyList[A]): Deferrer[A] = new Deferrer[A](() => l)
final class Deferrer[A] private[LazyList] (private val l: () => LazyList[A]) extends AnyVal {
/** Construct a LazyList consisting of a given first element followed by elements
* from another LazyList.
*/
def #:: [B >: A](elem: => B): LazyList[B] = newLL(sCons(elem, l()))
/** Construct a LazyList consisting of the concatenation of the given LazyList and
* another LazyList.
*/
def #:::[B >: A](prefix: LazyList[B]): LazyList[B] = prefix lazyAppendedAll l()
}
object #:: {
def unapply[A](s: LazyList[A]): Option[(A, LazyList[A])] =
if (!s.isEmpty) Some((s.head, s.tail)) else None
}
def from[A](coll: collection.IterableOnce[A]): LazyList[A] = coll match {
case lazyList: LazyList[A] => lazyList
case _ if coll.knownSize == 0 => empty[A]
case _ => newLL(stateFromIterator(coll.iterator))
}
def empty[A]: LazyList[A] = _empty
/** Creates a State from an Iterator, with another State appended after the Iterator
* is empty.
*/
private def stateFromIteratorConcatSuffix[A](it: Iterator[A])(suffix: => State[A]): State[A] =
if (it.hasNext) sCons(it.next(), newLL(stateFromIteratorConcatSuffix(it)(suffix)))
else suffix
/** Creates a State from an IterableOnce. */
private def stateFromIterator[A](it: Iterator[A]): State[A] =
if (it.hasNext) sCons(it.next(), newLL(stateFromIterator(it)))
else State.Empty
override def concat[A](xss: collection.Iterable[A]*): LazyList[A] =
if (xss.knownSize == 0) empty
else newLL(concatIterator(xss.iterator))
private def concatIterator[A](it: Iterator[collection.Iterable[A]]): State[A] =
if (!it.hasNext) State.Empty
else stateFromIteratorConcatSuffix(it.next().iterator)(concatIterator(it))
/** An infinite LazyList that repeatedly applies a given function to a start value.
*
* @param start the start value of the LazyList
* @param f the function that's repeatedly applied
* @return the LazyList returning the infinite sequence of values `start, f(start), f(f(start)), ...`
*/
def iterate[A](start: => A)(f: A => A): LazyList[A] =
newLL {
val head = start
sCons(head, iterate(f(head))(f))
}
/**
* Create an infinite LazyList starting at `start` and incrementing by
* step `step`.
*
* @param start the start value of the LazyList
* @param step the increment value of the LazyList
* @return the LazyList starting at value `start`.
*/
def from(start: Int, step: Int): LazyList[Int] =
newLL(sCons(start, from(start + step, step)))
/**
* Create an infinite LazyList starting at `start` and incrementing by `1`.
*
* @param start the start value of the LazyList
* @return the LazyList starting at value `start`.
*/
def from(start: Int): LazyList[Int] = from(start, 1)
/**
* Create an infinite LazyList containing the given element expression (which
* is computed for each occurrence).
*
* @param elem the element composing the resulting LazyList
* @return the LazyList containing an infinite number of elem
*/
def continually[A](elem: => A): LazyList[A] = newLL(sCons(elem, continually(elem)))
override def fill[A](n: Int)(elem: => A): LazyList[A] =
if (n > 0) newLL(sCons(elem, fill(n - 1)(elem))) else empty
override def tabulate[A](n: Int)(f: Int => A): LazyList[A] = {
def at(index: Int): LazyList[A] =
if (index < n) newLL(sCons(f(index), at(index + 1))) else empty
at(0)
}
// significantly simpler than the iterator returned by Iterator.unfold
override def unfold[A, S](init: S)(f: S => Option[(A, S)]): LazyList[A] =
newLL {
f(init) match {
case Some((elem, state)) => sCons(elem, unfold(state)(f))
case None => State.Empty
}
}
/** The builder returned by this method only evaluates elements
* of collections added to it as needed.
*
* @tparam A the type of the ${coll}’s elements
* @return A builder for $Coll objects.
*/
def newBuilder[A]: Builder[A, LazyList[A]] = new LazyBuilder[A]
private class LazyIterator[+A](private[this] var lazyList: LazyList[A]) extends AbstractIterator[A] {
override def hasNext: Boolean = !lazyList.isEmpty
override def next(): A =
if (lazyList.isEmpty) Iterator.empty.next()
else {
val res = lazyList.head
lazyList = lazyList.tail
res
}
}
private class SlidingIterator[A](private[this] var lazyList: LazyList[A], size: Int, step: Int)
extends AbstractIterator[LazyList[A]] {
private val minLen = size - step max 0
private var first = true
def hasNext: Boolean =
if (first) !lazyList.isEmpty
else lazyList.lengthGt(minLen)
def next(): LazyList[A] = {
if (!hasNext) Iterator.empty.next()
else {
first = false
val list = lazyList
lazyList = list.drop(step)
list.take(size)
}
}
}
private final class WithFilter[A] private[LazyList](lazyList: LazyList[A], p: A => Boolean)
extends collection.WithFilter[A, LazyList] {
private[this] val filtered = lazyList.filter(p)
def map[B](f: A => B): LazyList[B] = filtered.map(f)
def flatMap[B](f: A => IterableOnce[B]): LazyList[B] = filtered.flatMap(f)
def foreach[U](f: A => U): Unit = filtered.foreach(f)
def withFilter(q: A => Boolean): collection.WithFilter[A, LazyList] = new WithFilter(filtered, q)
}
private final class LazyBuilder[A] extends ReusableBuilder[A, LazyList[A]] {
import LazyBuilder._
private[this] var next: DeferredState[A] = _
private[this] var list: LazyList[A] = _
clear()
override def clear(): Unit = {
val deferred = new DeferredState[A]
list = newLL(deferred.eval())
next = deferred
}
override def result(): LazyList[A] = {
next init State.Empty
list
}
override def addOne(elem: A): this.type = {
val deferred = new DeferredState[A]
next init sCons(elem, newLL(deferred.eval()))
next = deferred
this
}
// lazy implementation which doesn't evaluate the collection being added
override def addAll(xs: IterableOnce[A]): this.type = {
if (xs.knownSize != 0) {
val deferred = new DeferredState[A]
next init stateFromIteratorConcatSuffix(xs.iterator)(deferred.eval())
next = deferred
}
this
}
}
private object LazyBuilder {
final class DeferredState[A] {
private[this] var _state: () => State[A] = _
def eval(): State[A] = {
val state = _state
if (state == null) throw new IllegalStateException("uninitialized")
state()
}
// racy
def init(state: => State[A]): Unit = {
if (_state != null) throw new IllegalStateException("already initialized")
_state = () => state
}
}
}
/** This serialization proxy is used for LazyLists which start with a sequence of evaluated cons cells.
* The forced sequence is serialized in a compact, sequential format, followed by the unevaluated tail, which uses
* standard Java serialization to store the complete structure of unevaluated thunks. This allows the serialization
* of long evaluated lazy lists without exhausting the stack through recursive serialization of cons cells.
*/
@SerialVersionUID(3L)
final class SerializationProxy[A](@transient protected var coll: LazyList[A]) extends Serializable {
private[this] def writeObject(out: ObjectOutputStream): Unit = {
out.defaultWriteObject()
var these = coll
while(these.knownNonEmpty) {
out.writeObject(these.head)
these = these.tail
}
out.writeObject(SerializeEnd)
out.writeObject(these)
}
private[this] def readObject(in: ObjectInputStream): Unit = {
in.defaultReadObject()
val init = new ArrayBuffer[A]
var initRead = false
while (!initRead) in.readObject match {
case SerializeEnd => initRead = true
case a => init += a.asInstanceOf[A]
}
val tail = in.readObject().asInstanceOf[LazyList[A]]
coll = init ++: tail
}
private[this] def readResolve(): Any = coll
}
}
| martijnhoekstra/scala | src/library/scala/collection/immutable/LazyList.scala | Scala | apache-2.0 | 46,755 |
class A {
def foo = new B().bar(null)
}
| yusuke2255/dotty | tests/untried/pos/ilya2/A.scala | Scala | bsd-3-clause | 44 |
package ch.bsisa.hyperbird.security.social
import securesocial.core.{Event,EventListener,LoginEvent,LogoutEvent,PasswordChangeEvent,PasswordResetEvent,SignUpEvent}
import play.api.mvc.{Session, RequestHeader}
import play.api.{Application, Logger}
import play.api.cache.Cache
import play.api.Play.current
import java.util.Date
import ch.bsisa.hyperbird.util.DateUtil
/**
* SecureSocial service event listener.
*
* This listener can be enabled, disabled in conf/play.plugins as:
* `10100:ch.bsisa.hyperbird.security.SecureServiceEventListener`
* Where 10100 is a priority number that determines the order in which plugins start up.
*
* @author Patrick Refondini
*/
class SecureServiceEventListener(app: Application) extends EventListener {
override def id: String = "hbSecureServiceEventListenerId"
private val logger = Logger("ch.bsisa.hyperbird.security.SecureServiceEventListener")
def onEvent(event: Event, request: RequestHeader, session: Session): Option[Session] = {
val eventName = event match {
case e: LoginEvent => {
val currentDate = new Date();
val user = event.user match {case user: User => user}
if (!(user.validFromDate.before(currentDate) && user.validToDate.after(currentDate))) {
throw new Exception(s"User ${user.identityId.userId} - ${user.fullName} access is currently disactivated (${DateUtil.hbDateFormat.format(new Date)}), please contact your system administrator.")
}
}
case e: LogoutEvent => {
// Clean up cache on logout. Useful for user to notice their roles change before cache TTL has expired.
Cache.remove(event.user.identityId.userId)
}
case e: SignUpEvent => "signup"
case e: PasswordResetEvent => {
"password reset"
// Checkout securesocial.core.providers.utils.Mailer.sendEmail for example Akka scheduler usage with MailerPlugin
// Akka.system.scheduler.scheduleOnce(1 seconds) {
// val mail = use[MailerPlugin].email
// mail.setSubject(subject)
// mail.setRecipient(recipient)
// mail.setFrom(fromAddress)
// // the mailer plugin handles null / empty string gracefully
// mail.send(body._1.map(_.body).getOrElse(""), body._2.map(_.body).getOrElse(""))
// }
}
case e: PasswordChangeEvent => "password change"
}
logger.info(s"Traced ${eventName} event for user ${event.user.fullName}. Request.path = ${request.path} , Request.rawQueryString = ${request.rawQueryString}")
// Not to change to the session simply return: None
// Otherwise to change the session, return something like: Some(session + ("your_key" -> "your_value"))
None
}
} | bsisa/hb-api | app/ch/bsisa/hyperbird/security/social/SecureServiceEventListener.scala | Scala | gpl-2.0 | 2,693 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.