code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package com.gildedrose.items
import com.gildedrose.Item
/**
* Created by jlafuente on 26/09/2015.
*/
class AgedBrie(kk : Item) extends ExtendedItem(kk) {
protected override def updateQuality(): Unit = {
increaseQuality()
}
}
| jlafuentegarcia/GildedRose-Refactoring-Kata | scala/src/main/scala/com/gildedrose/items/AgedBrie.scala | Scala | mit | 237 |
package model
trait PullRequestComponent extends TemplateComponent { self: Profile =>
import profile.simple._
lazy val PullRequests = TableQuery[PullRequests]
class PullRequests(tag: Tag) extends Table[PullRequest](tag, "PULL_REQUEST") with IssueTemplate {
val branch = column[String]("BRANCH")
val requestUserName = column[String]("REQUEST_USER_NAME")
val requestRepositoryName = column[String]("REQUEST_REPOSITORY_NAME")
val requestBranch = column[String]("REQUEST_BRANCH")
val commitIdFrom = column[String]("COMMIT_ID_FROM")
val commitIdTo = column[String]("COMMIT_ID_TO")
def * = (userName, repositoryName, issueId, branch, requestUserName, requestRepositoryName, requestBranch, commitIdFrom, commitIdTo) <> (PullRequest.tupled, PullRequest.unapply)
def byPrimaryKey(userName: String, repositoryName: String, issueId: Int) = byIssue(userName, repositoryName, issueId)
def byPrimaryKey(userName: Column[String], repositoryName: Column[String], issueId: Column[Int]) = byIssue(userName, repositoryName, issueId)
}
}
case class PullRequest(
userName: String,
repositoryName: String,
issueId: Int,
branch: String,
requestUserName: String,
requestRepositoryName: String,
requestBranch: String,
commitIdFrom: String,
commitIdTo: String
)
| tb280320889/TESTTB | src/main/scala/model/PullRequest.scala | Scala | apache-2.0 | 1,303 |
package com.dslplatform.api.patterns
import com.dslplatform.api.client.DomainProxy
import scala.concurrent.Future
/** Service for searching and counting domain objects.
* Search can be performed using {@link Specification specification},
* paged using limit and offset arguments.
* Custom sort can be provided using Seq of property->direction pairs.
*
* Specification can be declared in DSL or custom search can be built on client
* and sent to server.
*
* When permissions are applied, server can restrict which results will be returned to the client.
* Service should be used when Future is a preferred way of interacting with the remote server.
*
* @tparam TSearchable domain object type.
*/
trait SearchableRepository[TSearchable <: Searchable] {
/** Returns an IndexedSeq with all domain objects.
* @return future with all domain objects
*/
def search: Future[IndexedSeq[TSearchable]] = search()
/** Returns an IndexedSeq of domain objects satisfying {@link Specification[TSearchable] specification}
* with up to <code>limit</code> results.
* <code>offset</code> can be used to skip initial results.
* <code>order</code> should be given as a Seq of pairs of
* <code>{@literal <String, Boolean>}</code>
* where first is a property name and second is whether it should be sorted
* ascending over this property.
*
* @param specification search predicate
* @param limit maximum number of results
* @param offset number of results to be skipped
* @param order custom ordering
* @return future to domain objects which satisfy search predicate
*/
def search(
specification: Option[Specification[TSearchable]] = None,
limit: Option[Int] = None,
offset: Option[Int] = None,
order: Map[String, Boolean] = Map.empty): Future[IndexedSeq[TSearchable]]
/** Helper method for searching domain objects.
* Returns a Seq of domain objects satisfying {@link Specification[TSearchable] specification}
*
* @param specification search predicate
* @return future to domain objects which satisfy search predicate
*/
def search(
specification: Specification[TSearchable]): Future[IndexedSeq[TSearchable]] =
search(Option(specification))
/** Helper method for searching domain objects.
* Returns a Seq of domain objects satisfying {@link Specification[TSearchable] specification}
* with up to <code>limit</code> results.
*
* @param specification search predicate
* @param limit maximum number of results
* @return future to domain objects which satisfy search predicate
*/
def search(
specification: Specification[TSearchable],
limit: Int): Future[IndexedSeq[TSearchable]] =
search(Option(specification), Option(limit))
/** Returns the number of elements
*
* @return future with number of domain objects
*/
def count: Future[Long] = count()
/** Returns the number of elements satisfying provided specification.
*
* @param specification search predicate
* @return future with number of domain objects which satisfy specification
*/
def count(specification: Option[Specification[TSearchable]] = None): Future[Long]
/** Helper method for counting domain objects.
* Returns the number of elements satisfying provided specification.
*
* @param specification search predicate
* @return future with number of domain objects which satisfy specification
*/
def count(
specification: Specification[TSearchable]): Future[Long] =
count(Option(specification))
}
| ngs-doo/dsl-client-scala | core/src/main/scala/com/dslplatform/api/patterns/SearchableRepository.scala | Scala | bsd-3-clause | 3,728 |
package polymorphism
import org.specs2.mutable._
class Exercise1Spec extends Specification {
import exercises.Exercise1._
"List" can {
"be defined" >> {
List(1, 2, 3) must not (throwA [Throwable])
List("a", "b", "c") must not (throwA [Throwable])
}
"be transformed" >> {
List(1, 2, 3) map (_ + 1) must equalTo (List(2, 3, 4))
}
}
} | julienrf/scala-lessons | highlights/parametric-polymorphism/code/src/test/scala/polymorphism/Exercise1Spec.scala | Scala | mit | 375 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package jp.gihyo.spark.ch03.pairrdd_transformation
import jp.gihyo.spark.{SparkFunSuite, TestSparkContext}
class MapValuesExampleSuite extends SparkFunSuite with TestSparkContext {
test("run") {
MapValuesExample.run(sc)
}
}
| yu-iskw/gihyo-spark-book-example | src/test/scala/jp/gihyo/spark/ch03/pairrdd_transformation/MapValuesExampleSuite.scala | Scala | apache-2.0 | 1,037 |
package com.azavea.usace.programanalysis.geop
import akka.actor.Props
import akka.io.IO
import org.apache.spark.{SparkConf, SparkContext}
import spray.can.Http
object Main {
def main(args: Array[String]): Unit = {
implicit val system = akka.actor.ActorSystem("usace-programanalysis-geop")
val conf =
new SparkConf()
.setAppName("USACE Program Analysis Geoprocessing")
.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
.set("spark.kryo.registrator", "geotrellis.spark.io.kryo.KryoRegistrator")
val sc = new SparkContext(conf)
// Create and start Service Actor
val service =
system.actorOf(Props(classOf[GeopServiceActor], sc), "usace-programanalysis")
// Start HTTP Server on 8090 with Service Actor as the handler
IO(Http) ! Http.Bind(service, "0.0.0.0", 8090)
}
}
| azavea/usace-program-analysis-geoprocessing | geop/src/main/scala/com/azavea/usace/programanalysis/geop/Main.scala | Scala | apache-2.0 | 863 |
package main
import java.io.FileReader
import org.catalogueoflife.e2.schema.parse.SchemaParser
import org.catalogueoflife.e2.schema.parse.ERGraph
import org.catalogueoflife.e2.schema.parse.MySQLWriter
import org.catalogueoflife.e2.schema.parse.SQLiteWriter
import org.catalogueoflife.e2.schema.parse.DOTWriter
import org.catalogueoflife.e2.schema.parse.XSDWriter
object Main {
def printToFile(f: java.io.File)(op: java.io.PrintWriter => Unit) {
val p = new java.io.PrintWriter(f)
try { op(p) } finally { p.close() }
}
def main(args : Array[String]) {
val filename = if (args.length > 0) args(0) else "/home/scmjpg/schema/MultiSchema.txt"
val lastSlash = filename.lastIndexOf("/")
val destDir = if (args.length > 1) args(1) else filename.slice(0, lastSlash)
val lastDot = filename.lastIndexOf(".")
val stub = destDir + (if (lastDot > 0 && lastDot > lastSlash + 1) filename.slice(lastSlash, lastDot)
else filename.slice(lastSlash, filename.length))
val mysqlFile = stub + "-MySQL5.sql"
val sqliteFile = stub + "-SQLite.sql"
val dotFile = stub + ".dot"
val xsdFile = stub + ".xsd"
val reader = new FileReader(filename)
val er = new ERGraph
val parser = new SchemaParser(er)
parser.parseAll(parser.graph, reader) match {
case parser.Success(graph, _) => {
er.print()
er.validate()
val mysql = new MySQLWriter()
mysql.analyse(er)
printToFile(new java.io.File(mysqlFile)) { writer => mysql.write(writer) }
val sqlite = new SQLiteWriter()
sqlite.analyse(er)
printToFile(new java.io.File(sqliteFile)) { writer => sqlite.write(writer) }
val dot = new DOTWriter()
dot.analyse(er)
printToFile(new java.io.File(dotFile)) { writer => dot.write(writer) }
val xsd = new XSDWriter()
xsd.analyse(er)
printToFile(new java.io.File(xsdFile)) { writer => xsd.write(writer) }
}
case p@parser.Failure(msg, next) => println("Fail: " + p);
case p@parser.Error(msg, next) => println("Error: " + p);
}
}
}
| jongiddy/SchemaParse | src/main/Main.scala | Scala | apache-2.0 | 2,026 |
package com.dominikgruber.fpinscala.chapter15
import com.dominikgruber.fpinscala.chapter12.Monad
sealed trait Process[I,O] {
def apply(s: Stream[I]): Stream[O] = this match {
case Halt() => Stream()
case Await(recv) => s match {
case h #:: t => recv(Some(h))(t)
case xs => recv(None)(xs)
}
case Emit(h,t) => h #:: t(s)
}
def repeat: Process[I,O] = {
def go(p: Process[I,O]): Process[I,O] = p match {
case Halt() => go(this)
case Await(recv) => Await {
case None => recv(None)
case i => go(recv(i))
}
case Emit(h, t) => Emit(h, go(t))
}
go(this)
}
/**
* Exercise 05
* Hard: Implement |> as a method on Process. Let the types guide your
* implementation.
*/
def |>[O2](p2: Process[O,O2]): Process[I,O2] = p2 match {
case Halt() => Halt()
case Emit(h2, t2) => Emit(h2, this |> t2)
case Await(recv2) => this match {
case Halt() => Halt() |> recv2(None)
case Emit(h1, t1) => t1 |> recv2(Some(h1))
case Await(recv1) => Await(i => recv1(i) |> p2)
}
}
def map[O2](f: O => O2): Process[I,O2] =
this |> Process.lift(f)
def ++(p: => Process[I,O]): Process[I,O] = this match {
case Halt() => p
case Emit(h, t) => Emit(h, t ++ p)
case Await(recv) => Await(recv andThen (_ ++ p))
}
def flatMap[O2](f: O => Process[I,O2]): Process[I,O2] = this match {
case Halt() => Halt()
case Emit(h, t) => f(h) ++ t.flatMap(f)
case Await(recv) => Await(recv andThen (_ flatMap f))
}
}
case class Emit[I,O](head: O, tail: Process[I,O] = Halt[I,O]()) extends Process[I,O]
case class Await[I,O](recv: Option[I] => Process[I,O]) extends Process[I,O]
case class Halt[I,O]() extends Process[I,O]
object Process {
def liftOne[I,O](f: I => O): Process[I,O] = Await {
case Some(i) => Emit(f(i))
case None => Halt()
}
def lift[I,O](f: I => O): Process[I,O] = liftOne(f).repeat
def filter[I](p: I => Boolean): Process[I,I] = Await[I,I] {
case Some(i) if p(i) => Emit(i)
case _ => Halt()
}.repeat
def sum: Process[Double,Double] = {
def go(acc: Double): Process[Double,Double] = Await {
case Some(d) => Emit(d + acc, go(d + acc))
case None => Halt()
}
go(0.0)
}
/**
* Exercise 01
* Implement take, which halts the Process after it encounters the given
* number of elements, and drop, which ignores the given number of arguments
* and then emits the rest. Also implement takeWhile and dropWhile, that take
* and drop elements as long as the given predicate remains true.
*/
def take[I](n: Int): Process[I,I] =
if (n <= 0) Halt()
else Await[I,I] {
case Some(i) => Emit(i, take(n - 1))
case _ => Halt()
}
def drop[I](n: Int): Process[I,I] = Await[I,I] {
case Some(i) if n == 0 => Emit(i)
case Some(i) if n > 0 => drop(n - 1)
case _ => Halt()
}.repeat
def takeWhile[I](f: I => Boolean): Process[I,I] = Await[I,I] {
case Some(i) if f(i) => Emit(i, takeWhile(f))
case _ => Halt()
}
def dropWhile[I](f: I => Boolean): Process[I,I] = Await[I,I] {
case Some(i) if !f(i) => Emit(i, id)
case Some(i) if f(i) => dropWhile(f)
case _ => Halt()
}
def id[I]: Process[I,I] = Await[I,I] {
case Some(i) => Emit(i)
case None => Halt()
}.repeat
/**
* Exercise 02
* Implement count. It should emit the number of elements seen so far. For
* instance, count(Stream("a", "b", "c")) should yield Stream(1, 2, 3)
* (or Stream(0, 1, 2, 3), your choice).
*/
def count[I]: Process[I,Int] = {
def go(acc: Int): Process[I,Int] = Await {
case Some(_) => Emit(acc, go(acc + 1))
case None => Halt()
}
go(1)
}
/**
* Exercise 03
* Implement mean. It should emit a running average of the values seen so far.
*/
def mean: Process[Double,Double] = {
def go(sum: Double, cnt: Int): Process[Double,Double] = Await {
case Some(d) => Emit((sum + d) / (cnt + 1), go(sum + d, cnt + 1))
case None => Halt()
}
go(0, 0)
}
def loop[S,I,O](z: S)(f: (I,S) => (O,S)): Process[I,O] =
Await {
case Some(i) => f(i, z) match {
case (o, s2) => Emit(o, loop(s2)(f))
}
case None => Halt()
}
/**
* Exercise 04
* Write sum and count in terms of loop.
*/
def sumViaLoop: Process[Double,Double] =
loop(0.0)((i, z) => (z + i, z + i))
def countViaLoop[I]: Process[I,Int] =
loop(0)((_, z) => (z + 1, z + 1))
def monad[I]: Monad[({ type f[x] = Process[I,x]})#f] = new Monad[({ type f[x] = Process[I,x]})#f] {
override def unit[O](o: => O): Process[I,O] = Emit(o)
override def flatMap[O,O2](p: Process[I,O])(f: O => Process[I,O2]): Process[I,O2] = p flatMap f
}
} | TheDom/functional-programming-in-scala | src/main/scala/com/dominikgruber/fpinscala/chapter15/Process.scala | Scala | mit | 4,767 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.api.python
import java.io._
import java.net._
import java.nio.charset.StandardCharsets
import java.util.{ArrayList => JArrayList, List => JList, Map => JMap}
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.language.existentials
import scala.util.control.NonFatal
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.io.compress.CompressionCodec
import org.apache.hadoop.mapred.{InputFormat, JobConf, OutputFormat}
import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat, OutputFormat => NewOutputFormat}
import org.apache.spark._
import org.apache.spark.api.java.{JavaPairRDD, JavaRDD, JavaSparkContext}
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.input.PortableDataStream
import org.apache.spark.internal.Logging
import org.apache.spark.rdd.RDD
import org.apache.spark.util._
private[spark] class PythonRDD(
parent: RDD[_],
func: PythonFunction,
preservePartitoning: Boolean)
extends RDD[Array[Byte]](parent) {
val bufferSize = conf.getInt("spark.buffer.size", 65536)
val reuse_worker = conf.getBoolean("spark.python.worker.reuse", true)
override def getPartitions: Array[Partition] = firstParent.partitions
override val partitioner: Option[Partitioner] = {
if (preservePartitoning) firstParent.partitioner else None
}
val asJavaRDD: JavaRDD[Array[Byte]] = JavaRDD.fromRDD(this)
override def compute(split: Partition, context: TaskContext): Iterator[Array[Byte]] = {
val runner = PythonRunner(func, bufferSize, reuse_worker)
runner.compute(firstParent.iterator(split, context), split.index, context)
}
}
/**
* A wrapper for a Python function, contains all necessary context to run the function in Python
* runner.
*/
private[spark] case class PythonFunction(
command: Array[Byte],
envVars: JMap[String, String],
pythonIncludes: JList[String],
pythonExec: String,
pythonVer: String,
broadcastVars: JList[Broadcast[PythonBroadcast]],
accumulator: PythonAccumulatorV2)
/**
* A wrapper for chained Python functions (from bottom to top).
* @param funcs
*/
private[spark] case class ChainedPythonFunctions(funcs: Seq[PythonFunction])
private[spark] object PythonRunner {
def apply(func: PythonFunction, bufferSize: Int, reuse_worker: Boolean): PythonRunner = {
new PythonRunner(
Seq(ChainedPythonFunctions(Seq(func))), bufferSize, reuse_worker, false, Array(Array(0)))
}
}
/**
* A helper class to run Python mapPartition/UDFs in Spark.
*
* funcs is a list of independent Python functions, each one of them is a list of chained Python
* functions (from bottom to top).
*/
private[spark] class PythonRunner(
funcs: Seq[ChainedPythonFunctions],
bufferSize: Int,
reuse_worker: Boolean,
isUDF: Boolean,
argOffsets: Array[Array[Int]])
extends Logging {
require(funcs.length == argOffsets.length, "argOffsets should have the same length as funcs")
// All the Python functions should have the same exec, version and envvars.
private val envVars = funcs.head.funcs.head.envVars
private val pythonExec = funcs.head.funcs.head.pythonExec
private val pythonVer = funcs.head.funcs.head.pythonVer
// TODO: support accumulator in multiple UDF
private val accumulator = funcs.head.funcs.head.accumulator
def compute(
inputIterator: Iterator[_],
partitionIndex: Int,
context: TaskContext): Iterator[Array[Byte]] = {
val startTime = System.currentTimeMillis
val env = SparkEnv.get
val localdir = env.blockManager.diskBlockManager.localDirs.map(f => f.getPath()).mkString(",")
envVars.put("SPARK_LOCAL_DIRS", localdir) // it's also used in monitor thread
if (reuse_worker) {
envVars.put("SPARK_REUSE_WORKER", "1")
}
val worker: Socket = env.createPythonWorker(pythonExec, envVars.asScala.toMap)
// Whether is the worker released into idle pool
@volatile var released = false
// Start a thread to feed the process input from our parent's iterator
val writerThread = new WriterThread(env, worker, inputIterator, partitionIndex, context)
context.addTaskCompletionListener { context =>
writerThread.shutdownOnTaskCompletion()
if (!reuse_worker || !released) {
try {
worker.close()
} catch {
case e: Exception =>
logWarning("Failed to close worker socket", e)
}
}
}
writerThread.start()
new MonitorThread(env, worker, context).start()
// Return an iterator that read lines from the process's stdout
val stream = new DataInputStream(new BufferedInputStream(worker.getInputStream, bufferSize))
val stdoutIterator = new Iterator[Array[Byte]] {
override def next(): Array[Byte] = {
val obj = _nextObj
if (hasNext) {
_nextObj = read()
}
obj
}
private def read(): Array[Byte] = {
if (writerThread.exception.isDefined) {
throw writerThread.exception.get
}
try {
stream.readInt() match {
case length if length > 0 =>
val obj = new Array[Byte](length)
stream.readFully(obj)
obj
case 0 => Array.empty[Byte]
case SpecialLengths.TIMING_DATA =>
// Timing data from worker
val bootTime = stream.readLong()
val initTime = stream.readLong()
val finishTime = stream.readLong()
val boot = bootTime - startTime
val init = initTime - bootTime
val finish = finishTime - initTime
val total = finishTime - startTime
logInfo("Times: total = %s, boot = %s, init = %s, finish = %s".format(total, boot,
init, finish))
val memoryBytesSpilled = stream.readLong()
val diskBytesSpilled = stream.readLong()
context.taskMetrics.incMemoryBytesSpilled(memoryBytesSpilled)
context.taskMetrics.incDiskBytesSpilled(diskBytesSpilled)
read()
case SpecialLengths.PYTHON_EXCEPTION_THROWN =>
// Signals that an exception has been thrown in python
val exLength = stream.readInt()
val obj = new Array[Byte](exLength)
stream.readFully(obj)
throw new PythonException(new String(obj, StandardCharsets.UTF_8),
writerThread.exception.getOrElse(null))
case SpecialLengths.END_OF_DATA_SECTION =>
// We've finished the data section of the output, but we can still
// read some accumulator updates:
val numAccumulatorUpdates = stream.readInt()
(1 to numAccumulatorUpdates).foreach { _ =>
val updateLen = stream.readInt()
val update = new Array[Byte](updateLen)
stream.readFully(update)
accumulator.add(update)
}
// Check whether the worker is ready to be re-used.
if (stream.readInt() == SpecialLengths.END_OF_STREAM) {
if (reuse_worker) {
env.releasePythonWorker(pythonExec, envVars.asScala.toMap, worker)
released = true
}
}
null
}
} catch {
case e: Exception if context.isInterrupted =>
logDebug("Exception thrown after task interruption", e)
throw new TaskKilledException
case e: Exception if env.isStopped =>
logDebug("Exception thrown after context is stopped", e)
null // exit silently
case e: Exception if writerThread.exception.isDefined =>
logError("Python worker exited unexpectedly (crashed)", e)
logError("This may have been caused by a prior exception:", writerThread.exception.get)
throw writerThread.exception.get
case eof: EOFException =>
throw new SparkException("Python worker exited unexpectedly (crashed)", eof)
}
}
var _nextObj = read()
override def hasNext: Boolean = _nextObj != null
}
new InterruptibleIterator(context, stdoutIterator)
}
/**
* The thread responsible for writing the data from the PythonRDD's parent iterator to the
* Python process.
*/
class WriterThread(
env: SparkEnv,
worker: Socket,
inputIterator: Iterator[_],
partitionIndex: Int,
context: TaskContext)
extends Thread(s"stdout writer for $pythonExec") {
@volatile private var _exception: Exception = null
private val pythonIncludes = funcs.flatMap(_.funcs.flatMap(_.pythonIncludes.asScala)).toSet
private val broadcastVars = funcs.flatMap(_.funcs.flatMap(_.broadcastVars.asScala))
setDaemon(true)
/** Contains the exception thrown while writing the parent iterator to the Python process. */
def exception: Option[Exception] = Option(_exception)
/** Terminates the writer thread, ignoring any exceptions that may occur due to cleanup. */
def shutdownOnTaskCompletion() {
assert(context.isCompleted)
this.interrupt()
}
override def run(): Unit = Utils.logUncaughtExceptions {
try {
TaskContext.setTaskContext(context)
val stream = new BufferedOutputStream(worker.getOutputStream, bufferSize)
val dataOut = new DataOutputStream(stream)
// Partition index
dataOut.writeInt(partitionIndex)
// Python version of driver
PythonRDD.writeUTF(pythonVer, dataOut)
// Write out the TaskContextInfo
dataOut.writeInt(context.stageId())
dataOut.writeInt(context.partitionId())
dataOut.writeInt(context.attemptNumber())
dataOut.writeLong(context.taskAttemptId())
// sparkFilesDir
PythonRDD.writeUTF(SparkFiles.getRootDirectory(), dataOut)
// Python includes (*.zip and *.egg files)
dataOut.writeInt(pythonIncludes.size)
for (include <- pythonIncludes) {
PythonRDD.writeUTF(include, dataOut)
}
// Broadcast variables
val oldBids = PythonRDD.getWorkerBroadcasts(worker)
val newBids = broadcastVars.map(_.id).toSet
// number of different broadcasts
val toRemove = oldBids.diff(newBids)
val cnt = toRemove.size + newBids.diff(oldBids).size
dataOut.writeInt(cnt)
for (bid <- toRemove) {
// remove the broadcast from worker
dataOut.writeLong(- bid - 1) // bid >= 0
oldBids.remove(bid)
}
for (broadcast <- broadcastVars) {
if (!oldBids.contains(broadcast.id)) {
// send new broadcast
dataOut.writeLong(broadcast.id)
PythonRDD.writeUTF(broadcast.value.path, dataOut)
oldBids.add(broadcast.id)
}
}
dataOut.flush()
// Serialized command:
if (isUDF) {
dataOut.writeInt(1)
dataOut.writeInt(funcs.length)
funcs.zip(argOffsets).foreach { case (chained, offsets) =>
dataOut.writeInt(offsets.length)
offsets.foreach { offset =>
dataOut.writeInt(offset)
}
dataOut.writeInt(chained.funcs.length)
chained.funcs.foreach { f =>
dataOut.writeInt(f.command.length)
dataOut.write(f.command)
}
}
} else {
dataOut.writeInt(0)
val command = funcs.head.funcs.head.command
dataOut.writeInt(command.length)
dataOut.write(command)
}
// Data values
PythonRDD.writeIteratorToStream(inputIterator, dataOut)
dataOut.writeInt(SpecialLengths.END_OF_DATA_SECTION)
dataOut.writeInt(SpecialLengths.END_OF_STREAM)
dataOut.flush()
} catch {
case e: Exception if context.isCompleted || context.isInterrupted =>
logDebug("Exception thrown after task completion (likely due to cleanup)", e)
if (!worker.isClosed) {
Utils.tryLog(worker.shutdownOutput())
}
case e: Exception =>
// We must avoid throwing exceptions here, because the thread uncaught exception handler
// will kill the whole executor (see org.apache.spark.executor.Executor).
_exception = e
if (!worker.isClosed) {
Utils.tryLog(worker.shutdownOutput())
}
}
}
}
/**
* It is necessary to have a monitor thread for python workers if the user cancels with
* interrupts disabled. In that case we will need to explicitly kill the worker, otherwise the
* threads can block indefinitely.
*/
class MonitorThread(env: SparkEnv, worker: Socket, context: TaskContext)
extends Thread(s"Worker Monitor for $pythonExec") {
setDaemon(true)
override def run() {
// Kill the worker if it is interrupted, checking until task completion.
// TODO: This has a race condition if interruption occurs, as completed may still become true.
while (!context.isInterrupted && !context.isCompleted) {
Thread.sleep(2000)
}
if (!context.isCompleted) {
try {
logWarning("Incomplete task interrupted: Attempting to kill Python Worker")
env.destroyPythonWorker(pythonExec, envVars.asScala.toMap, worker)
} catch {
case e: Exception =>
logError("Exception when trying to kill worker", e)
}
}
}
}
}
/** Thrown for exceptions in user Python code. */
private class PythonException(msg: String, cause: Exception) extends RuntimeException(msg, cause)
/**
* Form an RDD[(Array[Byte], Array[Byte])] from key-value pairs returned from Python.
* This is used by PySpark's shuffle operations.
*/
private class PairwiseRDD(prev: RDD[Array[Byte]]) extends RDD[(Long, Array[Byte])](prev) {
override def getPartitions: Array[Partition] = prev.partitions
override val partitioner: Option[Partitioner] = prev.partitioner
override def compute(split: Partition, context: TaskContext): Iterator[(Long, Array[Byte])] =
prev.iterator(split, context).grouped(2).map {
case Seq(a, b) => (Utils.deserializeLongValue(a), b)
case x => throw new SparkException("PairwiseRDD: unexpected value: " + x)
}
val asJavaPairRDD : JavaPairRDD[Long, Array[Byte]] = JavaPairRDD.fromRDD(this)
}
private object SpecialLengths {
val END_OF_DATA_SECTION = -1
val PYTHON_EXCEPTION_THROWN = -2
val TIMING_DATA = -3
val END_OF_STREAM = -4
val NULL = -5
}
private[spark] object PythonRDD extends Logging {
// remember the broadcasts sent to each worker
private val workerBroadcasts = new mutable.WeakHashMap[Socket, mutable.Set[Long]]()
def getWorkerBroadcasts(worker: Socket): mutable.Set[Long] = {
synchronized {
workerBroadcasts.getOrElseUpdate(worker, new mutable.HashSet[Long]())
}
}
/**
* Return an RDD of values from an RDD of (Long, Array[Byte]), with preservePartitions=true
*
* This is useful for PySpark to have the partitioner after partitionBy()
*/
def valueOfPair(pair: JavaPairRDD[Long, Array[Byte]]): JavaRDD[Array[Byte]] = {
pair.rdd.mapPartitions(it => it.map(_._2), true)
}
/**
* Adapter for calling SparkContext#runJob from Python.
*
* This method will serve an iterator of an array that contains all elements in the RDD
* (effectively a collect()), but allows you to run on a certain subset of partitions,
* or to enable local execution.
*
* @return the port number of a local socket which serves the data collected from this job.
*/
def runJob(
sc: SparkContext,
rdd: JavaRDD[Array[Byte]],
partitions: JArrayList[Int]): Int = {
type ByteArray = Array[Byte]
type UnrolledPartition = Array[ByteArray]
val allPartitions: Array[UnrolledPartition] =
sc.runJob(rdd, (x: Iterator[ByteArray]) => x.toArray, partitions.asScala)
val flattenedPartition: UnrolledPartition = Array.concat(allPartitions: _*)
serveIterator(flattenedPartition.iterator,
s"serve RDD ${rdd.id} with partitions ${partitions.asScala.mkString(",")}")
}
/**
* A helper function to collect an RDD as an iterator, then serve it via socket.
*
* @return the port number of a local socket which serves the data collected from this job.
*/
def collectAndServe[T](rdd: RDD[T]): Int = {
serveIterator(rdd.collect().iterator, s"serve RDD ${rdd.id}")
}
def toLocalIteratorAndServe[T](rdd: RDD[T]): Int = {
serveIterator(rdd.toLocalIterator, s"serve toLocalIterator")
}
def readRDDFromFile(sc: JavaSparkContext, filename: String, parallelism: Int):
JavaRDD[Array[Byte]] = {
val file = new DataInputStream(new FileInputStream(filename))
try {
val objs = new mutable.ArrayBuffer[Array[Byte]]
try {
while (true) {
val length = file.readInt()
val obj = new Array[Byte](length)
file.readFully(obj)
objs += obj
}
} catch {
case eof: EOFException => // No-op
}
JavaRDD.fromRDD(sc.sc.parallelize(objs, parallelism))
} finally {
file.close()
}
}
def readBroadcastFromFile(sc: JavaSparkContext, path: String): Broadcast[PythonBroadcast] = {
sc.broadcast(new PythonBroadcast(path))
}
def writeIteratorToStream[T](iter: Iterator[T], dataOut: DataOutputStream) {
def write(obj: Any): Unit = obj match {
case null =>
dataOut.writeInt(SpecialLengths.NULL)
case arr: Array[Byte] =>
dataOut.writeInt(arr.length)
dataOut.write(arr)
case str: String =>
writeUTF(str, dataOut)
case stream: PortableDataStream =>
write(stream.toArray())
case (key, value) =>
write(key)
write(value)
case other =>
throw new SparkException("Unexpected element type " + other.getClass)
}
iter.foreach(write)
}
/**
* Create an RDD from a path using [[org.apache.hadoop.mapred.SequenceFileInputFormat]],
* key and value class.
* A key and/or value converter class can optionally be passed in
* (see [[org.apache.spark.api.python.Converter]])
*/
def sequenceFile[K, V](
sc: JavaSparkContext,
path: String,
keyClassMaybeNull: String,
valueClassMaybeNull: String,
keyConverterClass: String,
valueConverterClass: String,
minSplits: Int,
batchSize: Int): JavaRDD[Array[Byte]] = {
val keyClass = Option(keyClassMaybeNull).getOrElse("org.apache.hadoop.io.Text")
val valueClass = Option(valueClassMaybeNull).getOrElse("org.apache.hadoop.io.Text")
val kc = Utils.classForName(keyClass).asInstanceOf[Class[K]]
val vc = Utils.classForName(valueClass).asInstanceOf[Class[V]]
val rdd = sc.sc.sequenceFile[K, V](path, kc, vc, minSplits)
val confBroadcasted = sc.sc.broadcast(new SerializableConfiguration(sc.hadoopConfiguration()))
val converted = convertRDD(rdd, keyConverterClass, valueConverterClass,
new WritableToJavaConverter(confBroadcasted))
JavaRDD.fromRDD(SerDeUtil.pairRDDToPython(converted, batchSize))
}
/**
* Create an RDD from a file path, using an arbitrary [[org.apache.hadoop.mapreduce.InputFormat]],
* key and value class.
* A key and/or value converter class can optionally be passed in
* (see [[org.apache.spark.api.python.Converter]])
*/
def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]](
sc: JavaSparkContext,
path: String,
inputFormatClass: String,
keyClass: String,
valueClass: String,
keyConverterClass: String,
valueConverterClass: String,
confAsMap: java.util.HashMap[String, String],
batchSize: Int): JavaRDD[Array[Byte]] = {
val mergedConf = getMergedConf(confAsMap, sc.hadoopConfiguration())
val rdd =
newAPIHadoopRDDFromClassNames[K, V, F](sc,
Some(path), inputFormatClass, keyClass, valueClass, mergedConf)
val confBroadcasted = sc.sc.broadcast(new SerializableConfiguration(mergedConf))
val converted = convertRDD(rdd, keyConverterClass, valueConverterClass,
new WritableToJavaConverter(confBroadcasted))
JavaRDD.fromRDD(SerDeUtil.pairRDDToPython(converted, batchSize))
}
/**
* Create an RDD from a [[org.apache.hadoop.conf.Configuration]] converted from a map that is
* passed in from Python, using an arbitrary [[org.apache.hadoop.mapreduce.InputFormat]],
* key and value class.
* A key and/or value converter class can optionally be passed in
* (see [[org.apache.spark.api.python.Converter]])
*/
def newAPIHadoopRDD[K, V, F <: NewInputFormat[K, V]](
sc: JavaSparkContext,
inputFormatClass: String,
keyClass: String,
valueClass: String,
keyConverterClass: String,
valueConverterClass: String,
confAsMap: java.util.HashMap[String, String],
batchSize: Int): JavaRDD[Array[Byte]] = {
val conf = PythonHadoopUtil.mapToConf(confAsMap)
val rdd =
newAPIHadoopRDDFromClassNames[K, V, F](sc,
None, inputFormatClass, keyClass, valueClass, conf)
val confBroadcasted = sc.sc.broadcast(new SerializableConfiguration(conf))
val converted = convertRDD(rdd, keyConverterClass, valueConverterClass,
new WritableToJavaConverter(confBroadcasted))
JavaRDD.fromRDD(SerDeUtil.pairRDDToPython(converted, batchSize))
}
private def newAPIHadoopRDDFromClassNames[K, V, F <: NewInputFormat[K, V]](
sc: JavaSparkContext,
path: Option[String] = None,
inputFormatClass: String,
keyClass: String,
valueClass: String,
conf: Configuration): RDD[(K, V)] = {
val kc = Utils.classForName(keyClass).asInstanceOf[Class[K]]
val vc = Utils.classForName(valueClass).asInstanceOf[Class[V]]
val fc = Utils.classForName(inputFormatClass).asInstanceOf[Class[F]]
if (path.isDefined) {
sc.sc.newAPIHadoopFile[K, V, F](path.get, fc, kc, vc, conf)
} else {
sc.sc.newAPIHadoopRDD[K, V, F](conf, fc, kc, vc)
}
}
/**
* Create an RDD from a file path, using an arbitrary [[org.apache.hadoop.mapred.InputFormat]],
* key and value class.
* A key and/or value converter class can optionally be passed in
* (see [[org.apache.spark.api.python.Converter]])
*/
def hadoopFile[K, V, F <: InputFormat[K, V]](
sc: JavaSparkContext,
path: String,
inputFormatClass: String,
keyClass: String,
valueClass: String,
keyConverterClass: String,
valueConverterClass: String,
confAsMap: java.util.HashMap[String, String],
batchSize: Int): JavaRDD[Array[Byte]] = {
val mergedConf = getMergedConf(confAsMap, sc.hadoopConfiguration())
val rdd =
hadoopRDDFromClassNames[K, V, F](sc,
Some(path), inputFormatClass, keyClass, valueClass, mergedConf)
val confBroadcasted = sc.sc.broadcast(new SerializableConfiguration(mergedConf))
val converted = convertRDD(rdd, keyConverterClass, valueConverterClass,
new WritableToJavaConverter(confBroadcasted))
JavaRDD.fromRDD(SerDeUtil.pairRDDToPython(converted, batchSize))
}
/**
* Create an RDD from a [[org.apache.hadoop.conf.Configuration]] converted from a map
* that is passed in from Python, using an arbitrary [[org.apache.hadoop.mapred.InputFormat]],
* key and value class
* A key and/or value converter class can optionally be passed in
* (see [[org.apache.spark.api.python.Converter]])
*/
def hadoopRDD[K, V, F <: InputFormat[K, V]](
sc: JavaSparkContext,
inputFormatClass: String,
keyClass: String,
valueClass: String,
keyConverterClass: String,
valueConverterClass: String,
confAsMap: java.util.HashMap[String, String],
batchSize: Int): JavaRDD[Array[Byte]] = {
val conf = PythonHadoopUtil.mapToConf(confAsMap)
val rdd =
hadoopRDDFromClassNames[K, V, F](sc,
None, inputFormatClass, keyClass, valueClass, conf)
val confBroadcasted = sc.sc.broadcast(new SerializableConfiguration(conf))
val converted = convertRDD(rdd, keyConverterClass, valueConverterClass,
new WritableToJavaConverter(confBroadcasted))
JavaRDD.fromRDD(SerDeUtil.pairRDDToPython(converted, batchSize))
}
private def hadoopRDDFromClassNames[K, V, F <: InputFormat[K, V]](
sc: JavaSparkContext,
path: Option[String] = None,
inputFormatClass: String,
keyClass: String,
valueClass: String,
conf: Configuration) = {
val kc = Utils.classForName(keyClass).asInstanceOf[Class[K]]
val vc = Utils.classForName(valueClass).asInstanceOf[Class[V]]
val fc = Utils.classForName(inputFormatClass).asInstanceOf[Class[F]]
if (path.isDefined) {
sc.sc.hadoopFile(path.get, fc, kc, vc)
} else {
sc.sc.hadoopRDD(new JobConf(conf), fc, kc, vc)
}
}
def writeUTF(str: String, dataOut: DataOutputStream) {
val bytes = str.getBytes(StandardCharsets.UTF_8)
dataOut.writeInt(bytes.length)
dataOut.write(bytes)
}
/**
* Create a socket server and a background thread to serve the data in `items`,
*
* The socket server can only accept one connection, or close if no connection
* in 3 seconds.
*
* Once a connection comes in, it tries to serialize all the data in `items`
* and send them into this connection.
*
* The thread will terminate after all the data are sent or any exceptions happen.
*/
def serveIterator[T](items: Iterator[T], threadName: String): Int = {
val serverSocket = new ServerSocket(0, 1, InetAddress.getByName("localhost"))
// Close the socket if no connection in 3 seconds
serverSocket.setSoTimeout(3000)
new Thread(threadName) {
setDaemon(true)
override def run() {
try {
val sock = serverSocket.accept()
val out = new DataOutputStream(new BufferedOutputStream(sock.getOutputStream))
Utils.tryWithSafeFinally {
writeIteratorToStream(items, out)
} {
out.close()
}
} catch {
case NonFatal(e) =>
logError(s"Error while sending iterator", e)
} finally {
serverSocket.close()
}
}
}.start()
serverSocket.getLocalPort
}
private def getMergedConf(confAsMap: java.util.HashMap[String, String],
baseConf: Configuration): Configuration = {
val conf = PythonHadoopUtil.mapToConf(confAsMap)
PythonHadoopUtil.mergeConfs(baseConf, conf)
}
private def inferKeyValueTypes[K, V](rdd: RDD[(K, V)], keyConverterClass: String = null,
valueConverterClass: String = null): (Class[_], Class[_]) = {
// Peek at an element to figure out key/value types. Since Writables are not serializable,
// we cannot call first() on the converted RDD. Instead, we call first() on the original RDD
// and then convert locally.
val (key, value) = rdd.first()
val (kc, vc) = getKeyValueConverters(keyConverterClass, valueConverterClass,
new JavaToWritableConverter)
(kc.convert(key).getClass, vc.convert(value).getClass)
}
private def getKeyValueTypes(keyClass: String, valueClass: String):
Option[(Class[_], Class[_])] = {
for {
k <- Option(keyClass)
v <- Option(valueClass)
} yield (Utils.classForName(k), Utils.classForName(v))
}
private def getKeyValueConverters(keyConverterClass: String, valueConverterClass: String,
defaultConverter: Converter[Any, Any]): (Converter[Any, Any], Converter[Any, Any]) = {
val keyConverter = Converter.getInstance(Option(keyConverterClass), defaultConverter)
val valueConverter = Converter.getInstance(Option(valueConverterClass), defaultConverter)
(keyConverter, valueConverter)
}
/**
* Convert an RDD of key-value pairs from internal types to serializable types suitable for
* output, or vice versa.
*/
private def convertRDD[K, V](rdd: RDD[(K, V)],
keyConverterClass: String,
valueConverterClass: String,
defaultConverter: Converter[Any, Any]): RDD[(Any, Any)] = {
val (kc, vc) = getKeyValueConverters(keyConverterClass, valueConverterClass,
defaultConverter)
PythonHadoopUtil.convertRDD(rdd, kc, vc)
}
/**
* Output a Python RDD of key-value pairs as a Hadoop SequenceFile using the Writable types
* we convert from the RDD's key and value types. Note that keys and values can't be
* [[org.apache.hadoop.io.Writable]] types already, since Writables are not Java
* `Serializable` and we can't peek at them. The `path` can be on any Hadoop file system.
*/
def saveAsSequenceFile[K, V, C <: CompressionCodec](
pyRDD: JavaRDD[Array[Byte]],
batchSerialized: Boolean,
path: String,
compressionCodecClass: String): Unit = {
saveAsHadoopFile(
pyRDD, batchSerialized, path, "org.apache.hadoop.mapred.SequenceFileOutputFormat",
null, null, null, null, new java.util.HashMap(), compressionCodecClass)
}
/**
* Output a Python RDD of key-value pairs to any Hadoop file system, using old Hadoop
* `OutputFormat` in mapred package. Keys and values are converted to suitable output
* types using either user specified converters or, if not specified,
* [[org.apache.spark.api.python.JavaToWritableConverter]]. Post-conversion types
* `keyClass` and `valueClass` are automatically inferred if not specified. The passed-in
* `confAsMap` is merged with the default Hadoop conf associated with the SparkContext of
* this RDD.
*/
def saveAsHadoopFile[K, V, F <: OutputFormat[_, _], C <: CompressionCodec](
pyRDD: JavaRDD[Array[Byte]],
batchSerialized: Boolean,
path: String,
outputFormatClass: String,
keyClass: String,
valueClass: String,
keyConverterClass: String,
valueConverterClass: String,
confAsMap: java.util.HashMap[String, String],
compressionCodecClass: String): Unit = {
val rdd = SerDeUtil.pythonToPairRDD(pyRDD, batchSerialized)
val (kc, vc) = getKeyValueTypes(keyClass, valueClass).getOrElse(
inferKeyValueTypes(rdd, keyConverterClass, valueConverterClass))
val mergedConf = getMergedConf(confAsMap, pyRDD.context.hadoopConfiguration)
val codec = Option(compressionCodecClass).map(Utils.classForName(_).asInstanceOf[Class[C]])
val converted = convertRDD(rdd, keyConverterClass, valueConverterClass,
new JavaToWritableConverter)
val fc = Utils.classForName(outputFormatClass).asInstanceOf[Class[F]]
converted.saveAsHadoopFile(path, kc, vc, fc, new JobConf(mergedConf), codec = codec)
}
/**
* Output a Python RDD of key-value pairs to any Hadoop file system, using new Hadoop
* `OutputFormat` in mapreduce package. Keys and values are converted to suitable output
* types using either user specified converters or, if not specified,
* [[org.apache.spark.api.python.JavaToWritableConverter]]. Post-conversion types
* `keyClass` and `valueClass` are automatically inferred if not specified. The passed-in
* `confAsMap` is merged with the default Hadoop conf associated with the SparkContext of
* this RDD.
*/
def saveAsNewAPIHadoopFile[K, V, F <: NewOutputFormat[_, _]](
pyRDD: JavaRDD[Array[Byte]],
batchSerialized: Boolean,
path: String,
outputFormatClass: String,
keyClass: String,
valueClass: String,
keyConverterClass: String,
valueConverterClass: String,
confAsMap: java.util.HashMap[String, String]): Unit = {
val rdd = SerDeUtil.pythonToPairRDD(pyRDD, batchSerialized)
val (kc, vc) = getKeyValueTypes(keyClass, valueClass).getOrElse(
inferKeyValueTypes(rdd, keyConverterClass, valueConverterClass))
val mergedConf = getMergedConf(confAsMap, pyRDD.context.hadoopConfiguration)
val converted = convertRDD(rdd, keyConverterClass, valueConverterClass,
new JavaToWritableConverter)
val fc = Utils.classForName(outputFormatClass).asInstanceOf[Class[F]]
converted.saveAsNewAPIHadoopFile(path, kc, vc, fc, mergedConf)
}
/**
* Output a Python RDD of key-value pairs to any Hadoop file system, using a Hadoop conf
* converted from the passed-in `confAsMap`. The conf should set relevant output params (
* e.g., output path, output format, etc), in the same way as it would be configured for
* a Hadoop MapReduce job. Both old and new Hadoop OutputFormat APIs are supported
* (mapred vs. mapreduce). Keys/values are converted for output using either user specified
* converters or, by default, [[org.apache.spark.api.python.JavaToWritableConverter]].
*/
def saveAsHadoopDataset[K, V](
pyRDD: JavaRDD[Array[Byte]],
batchSerialized: Boolean,
confAsMap: java.util.HashMap[String, String],
keyConverterClass: String,
valueConverterClass: String,
useNewAPI: Boolean): Unit = {
val conf = PythonHadoopUtil.mapToConf(confAsMap)
val converted = convertRDD(SerDeUtil.pythonToPairRDD(pyRDD, batchSerialized),
keyConverterClass, valueConverterClass, new JavaToWritableConverter)
if (useNewAPI) {
converted.saveAsNewAPIHadoopDataset(conf)
} else {
converted.saveAsHadoopDataset(new JobConf(conf))
}
}
}
private
class BytesToString extends org.apache.spark.api.java.function.Function[Array[Byte], String] {
override def call(arr: Array[Byte]) : String = new String(arr, StandardCharsets.UTF_8)
}
/**
* Internal class that acts as an `AccumulatorV2` for Python accumulators. Inside, it
* collects a list of pickled strings that we pass to Python through a socket.
*/
private[spark] class PythonAccumulatorV2(
@transient private val serverHost: String,
private val serverPort: Int)
extends CollectionAccumulator[Array[Byte]] {
Utils.checkHost(serverHost, "Expected hostname")
val bufferSize = SparkEnv.get.conf.getInt("spark.buffer.size", 65536)
/**
* We try to reuse a single Socket to transfer accumulator updates, as they are all added
* by the DAGScheduler's single-threaded RpcEndpoint anyway.
*/
@transient private var socket: Socket = _
private def openSocket(): Socket = synchronized {
if (socket == null || socket.isClosed) {
socket = new Socket(serverHost, serverPort)
}
socket
}
// Need to override so the types match with PythonFunction
override def copyAndReset(): PythonAccumulatorV2 = new PythonAccumulatorV2(serverHost, serverPort)
override def merge(other: AccumulatorV2[Array[Byte], JList[Array[Byte]]]): Unit = synchronized {
val otherPythonAccumulator = other.asInstanceOf[PythonAccumulatorV2]
// This conditional isn't strictly speaking needed - merging only currently happens on the
// driver program - but that isn't gauranteed so incase this changes.
if (serverHost == null) {
// We are on the worker
super.merge(otherPythonAccumulator)
} else {
// This happens on the master, where we pass the updates to Python through a socket
val socket = openSocket()
val in = socket.getInputStream
val out = new DataOutputStream(new BufferedOutputStream(socket.getOutputStream, bufferSize))
val values = other.value
out.writeInt(values.size)
for (array <- values.asScala) {
out.writeInt(array.length)
out.write(array)
}
out.flush()
// Wait for a byte from the Python side as an acknowledgement
val byteRead = in.read()
if (byteRead == -1) {
throw new SparkException("EOF reached before Python server acknowledged")
}
}
}
}
/**
* A Wrapper for Python Broadcast, which is written into disk by Python. It also will
* write the data into disk after deserialization, then Python can read it from disks.
*/
// scalastyle:off no.finalize
private[spark] class PythonBroadcast(@transient var path: String) extends Serializable
with Logging {
/**
* Read data from disks, then copy it to `out`
*/
private def writeObject(out: ObjectOutputStream): Unit = Utils.tryOrIOException {
val in = new FileInputStream(new File(path))
try {
Utils.copyStream(in, out)
} finally {
in.close()
}
}
/**
* Write data into disk, using randomly generated name.
*/
private def readObject(in: ObjectInputStream): Unit = Utils.tryOrIOException {
val dir = new File(Utils.getLocalDir(SparkEnv.get.conf))
val file = File.createTempFile("broadcast", "", dir)
path = file.getAbsolutePath
val out = new FileOutputStream(file)
Utils.tryWithSafeFinally {
Utils.copyStream(in, out)
} {
out.close()
}
}
/**
* Delete the file once the object is GCed.
*/
override def finalize() {
if (!path.isEmpty) {
val file = new File(path)
if (file.exists()) {
if (!file.delete()) {
logWarning(s"Error deleting ${file.getPath}")
}
}
}
}
}
// scalastyle:on no.finalize
| jianran/spark | core/src/main/scala/org/apache/spark/api/python/PythonRDD.scala | Scala | apache-2.0 | 37,865 |
package se.gigurra.renderer.glimpl.vbos
import java.nio.Buffer
import javax.media.opengl.GL3ES3
import se.gigurra.util.Mutate.Mutable
class Vbo(
val _gl_init: GL3ES3,
val target: Int, // e.g. GL.GL_ARRAY_BUFFER
val byteCapacity: Int, // The size in bytes of the entire vbo
val usage: Int, // e.g. GL.GL_DYNAMIC_DRAW
val componentType: Int // e.g. GL.GL_FLOAT
) {
val id = Array(0).mutate { _gl_init.glGenBuffers(1, _, 0) }(0)
_gl_init.glBindBuffer(target, id)
_gl_init.glBufferData(target, byteCapacity, null, usage)
///////////////////////////////////////////////////////////////
def bind(gl: GL3ES3) {
gl.glBindBuffer(target, id)
}
def uploadBytes(gl: GL3ES3, tgtOffs: Int, nBytes: Int, data: Buffer, doBind: Boolean) {
if (tgtOffs + nBytes > byteCapacity)
throw new RuntimeException("Trying to upload data past size of Vbo!")
if (doBind)
bind(gl)
gl.glBufferSubData(target, tgtOffs, nBytes, data)
}
def dispose(gl: GL3ES3) {
gl.glDeleteBuffers(1, Array(id), 0)
}
} | GiGurra/gigurra-scala-2drenderer | src/main/scala/se/gigurra/renderer/glimpl/vbos/Vbo.scala | Scala | mit | 1,042 |
package xyz.nabijaczleweli.lonning.loutput
import scala.annotation.elidable
import scala.annotation.elidable._
/** @author Jędrzej
* @since 22.04.14
*/
object NullLOutput extends LogOutput {
/** Does nothing
*
* @return true, but false when elided
*/
@elidable(WARNING)
@inline
override def warn(s: String): Boolean =
true
/** Does nothing
*
* @return true, but false when elided
*/
@elidable(FINE)
@inline
override def info(s: String): Boolean =
true
/** Does nothing
*
* @return true, but false when elided
*/
@elidable(WARNING)
@inline
override def debug(s: String): Boolean =
true
/** Does nothing
*
* @return true, but false when elided
*/
override def log(s: String): Boolean =
true
/** Does nothing
*
* @return true, but false when elided
*/
@elidable(WARNING)
@inline
override def warn(s: String, throwable: Throwable): Boolean =
true
/** Does nothing
*
* @return true, but false when elided
*/
@elidable(WARNING)
@inline
override def warn(s: String, varargs: Any*): Boolean =
true
/** Does nothing
*
* @return true, but false when elided
*/
@elidable(WARNING)
@inline
override def warn: Boolean = true
/** Does nothing
*
* @return true, but false when elided
*/
@elidable(FINE)
@inline
override def info(s: String, throwable: Throwable): Boolean =
true
/** Does nothing
*
* @return true, but false when elided
*/
@elidable(FINE)
@inline
override def info(s: String, varargs: Any*): Boolean =
true
/** Does nothing
*
* @return true, but false when elided
*/
@elidable(FINE)
@inline
override def info: Boolean =
true
/** Does nothing
*
* @return true, but false when elided
*/
@elidable(WARNING)
@inline
override def debug(s: String, throwable: Throwable): Boolean =
true
/** Does nothing
*
* @return true, but false when elided
*/
@elidable(WARNING)
@inline
override def debug(s: String, varargs: Any*): Boolean =
true
/** Does nothing
*
* @return true, but false when elided
*/
@elidable(WARNING)
@inline
override def debug: Boolean =
true
/** Does nothing
*
* @return true, but false when elided
*/
@elidable(FINER)
@inline
override def log(s: String, throwable: Throwable): Boolean =
true
/** Does nothing
*
* @return true, but false when elided
*/
@elidable(FINER)
@inline
override def log(s: String, varargs: Any*): Boolean =
true
/** Does nothing
*
* @return true, but false when elided
*/
@elidable(FINER)
@inline
override def log: Boolean =
true
/** Does nothing
*
* @return true, but false when elided
*/
@elidable(SEVERE)
@inline
override def severe(s: String, throwable: Throwable): Boolean =
true
/** Does nothing
*
* @return true, but false when elided
*/
@elidable(SEVERE)
@inline
override def severe(s: String, varargs: Any*): Boolean =
true
/** Does nothing
*
* @return true, but false when elided
*/
@elidable(SEVERE)
@inline
override def severe: Boolean =
true
/** Does nothing
*
* @return true, but false when elided
*/
@elidable(SEVERE)
@inline
override def severe(s: String): Boolean =
true
/** Writes line containing date, time, etc + <code>s</code> line to the ERROR output.
*
* @return true
*/
@inline
override def ERROR(s: String): Boolean =
true
/** Writes empty (note, however, all the dates and such are kept) line to the ERROR output.
*
* @return true
*/
@inline
override def ERROR: Boolean =
true
/** Writes line containing date, time, etc + <code>s format varargs<code> line to the ERROR output.
*
* @return true
*/
@inline
override def ERROR(s: String, varargs: Any*): Boolean =
true
/** Writes line containing date, time, etc + <code>s + throwable.getClass.getSimpleName</code> and
* repeatedly getStackTrace(i) for i 1 until getStackTrace.length line to the ERROR output.
*
* @return true
*/
@inline
override def ERROR(s: String, throwable: Throwable): Boolean =
true
}
| nabijaczleweli/Scala-Game-of-Life | src/main/scala/xyz/nabijaczleweli/lonning/loutput/NullLOutput.scala | Scala | mit | 4,098 |
package utilities
import org.apache.spark.rdd.RDD
import scala.util.parsing.json.JSON
import scalaj.http.{HttpResponse, _}
class RequestExecutor {
// The elements of the original RDD are separately processed. the mapPartitions method is used to optimize performance
def processViaRestService(rdd: RDD[String]): RDD[String] = rdd.mapPartitions(x => executeRestRequest(composeQuery(x)))
// Each request involves the composition of a query to the service. Inthis case, the query is delivered to the DW API
def composeQuery(input: Iterator[String]): Iterator[String] = {
var queryList = List[String]()
while(input.hasNext) {
// val dw_query = "http://www.dw.com/api/search/global?terms=" + "madrid" + "&languageId=" + input.next() + "&contentTypes=Article,Video&startDate=2012-01-01" + "&endDate=" + "2015-10-31" + "&sortByDate=true&pageIndex=1&asTeaser=false"
val temp_q = "http://www.dw.com/api/search/global?terms=madrid&languageId=" + input.next() +
"&contentTypes=Article,Video&startDate=2012-06-01&endDate=2015-06-22&sortByDate=true&pageIndex=1&asTeaser=false"
println(temp_q)
queryList .::= (temp_q)
}
queryList.iterator
}
// Each query is delivered to the service and the response is stored
def executeRestRequest(query: Iterator[String]): Iterator[String] = {
var queryResponse = List[String]()
while (query.hasNext) {
// The REST service is queried and the response (JSON format) is obtained
val response: HttpResponse[String] = Http(query.next()).timeout(connTimeoutMs = 10000, readTimeoutMs = 50000)
.asString
// The response in JSON format is processed
if (response.isNotError)
queryResponse .::= (response.body)
}
queryResponse.iterator
}
}
object RequestExecutor {
def executeRequest(method: String, query: String, requestTimeout: Int = 50000, requestDelay: Int = 500, body: String = ""): String ={
if(method=="POST"){
executePostRequest(query, body, requestTimeout, requestDelay)
}else{
executeGetRequest(query, requestTimeout, requestDelay)
}
}
// Each query is delivered to the service and the response is stored
def executeGetRequest(query: String, requestTimeoutMs: Int, requestDelayMs: Int): String = {
// The REST service is queried and the response (JSON format) is obtained
println(s"Waiting ${requestDelayMs}")
Thread.sleep(requestDelayMs)
try {
println(s"Executing query ${query}")
println(s"Waiting response for ${requestTimeoutMs} ms")
val response: HttpResponse[String] = Http(query).timeout(connTimeoutMs = 10000, readTimeoutMs = requestTimeoutMs).asString
if (response.isError) {
println(s"HttpError: $query . ${response.body} ${response.code}")
"{}"
}
val body = response.body
body
}catch{
case e: Exception => {
println("Unexpected error executing get request")
println(s"Error: ${e.getMessage}\\n")
//println(e.getStackTrace.mkString("\\n"))
"{}"
}
}
}
def executePostRequest(query: String, postBody:String, requestTimeoutMs: Int, requestDelayMs: Int): String = {
// The REST service is queried and the response (JSON format) is obtained
println("Waiting")
Thread.sleep(requestDelayMs)
try {
val response: HttpResponse[String] = Http(query).postData(postBody).timeout(connTimeoutMs = 10000, readTimeoutMs = requestTimeoutMs).asString
if (response.isError) {
println(s"HttpError: $query . ${response.body} ${response.code}")
//Map()
"{}"
}
val body = response.body
body
}catch{
case e: Exception => {
println("Unexpected error executing post request")
//Map()
"{}"
}
}
}
}
| canademar/me_extractors | DockerSparkPipeline/src/main/scala/utilities/RequestExecutor.scala | Scala | gpl-2.0 | 3,823 |
/**
* Copyright (C) 2019 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.xforms
import enumeratum.values.{IntEnum, IntEnumEntry}
import org.scalajs.dom
object BrowserUtils {
sealed abstract class NavigationType(val value: Int) extends IntEnumEntry
object NavigationType extends IntEnum[NavigationType] {
val values = findValues
case object Navigate extends NavigationType(value = 0)
case object Reload extends NavigationType(value = 1)
case object BackForward extends NavigationType(value = 2)
case object Reserved extends NavigationType(value = 255)
}
// https://stackoverflow.com/questions/5004978/check-if-page-gets-reloaded-or-refreshed-in-javascript/53307588#53307588
// https://www.w3.org/TR/navigation-timing/
// https://www.w3.org/TR/resource-timing-2/
def getNavigationType: NavigationType =
NavigationType.withValue(dom.window.performance.navigation.`type`)
}
| orbeon/orbeon-forms | xforms-web/src/main/scala/org/orbeon/xforms/BrowserUtils.scala | Scala | lgpl-2.1 | 1,532 |
package net.liftweb.util
import scala.language.implicitConversions
/**
* This trait marks something that can be promoted into a String.
* The companion object has helpful conversions from Int,
* Symbol, Long, and Boolean
*/
trait StringPromotable
object StringPromotable {
implicit def jsCmdToStrPromo(in: ToJsCmd): StringPromotable =
new StringPromotable {
override val toString = in.toJsCmd
}
implicit def jsCmdToStrPromo(in: (_, ToJsCmd)): StringPromotable =
new StringPromotable {
override val toString = in._2.toJsCmd
}
implicit def intToStrPromo(in: Int): StringPromotable =
new StringPromotable {
override val toString = in.toString
}
implicit def symbolToStrPromo(in: Symbol): StringPromotable =
new StringPromotable {
override val toString = in.name
}
implicit def longToStrPromo(in: Long): StringPromotable =
new StringPromotable {
override val toString = in.toString
}
implicit def booleanToStrPromo(in: Boolean): StringPromotable =
new StringPromotable {
override val toString = in.toString
}
}
| lzpfmh/framework-2 | core/util/src/main/scala/net/liftweb/util/StringPromotable.scala | Scala | apache-2.0 | 1,116 |
object Main {
def main(args: Array[String]) {
val a = io.StdIn.readLine().toInt
val b = io.StdIn.readLine().toInt
println("PROD = " + (a * b))
}
}
| deniscostadsc/playground | solutions/beecrowd/1004/1004.scala | Scala | mit | 180 |
package com.datastax.spark.connector.rdd.reader
import java.io.Serializable
import com.datastax.driver.core.Row
import com.datastax.spark.connector.{ColumnRef, CassandraRow}
import com.datastax.spark.connector.cql.TableDef
import com.datastax.spark.connector.mapper.ColumnMapper
import com.datastax.spark.connector.types.TypeConverter
import com.datastax.spark.connector.util.MagicalTypeTricks.{DoesntHaveImplicit, IsNotSubclassOf}
import scala.annotation.implicitNotFound
import scala.reflect.runtime.universe._
/** Creates [[RowReader]] objects prepared for reading rows from the given Cassandra table. */
@implicitNotFound("No RowReaderFactory can be found for this type")
trait RowReaderFactory[T] {
def rowReader(table: TableDef, selectedColumns: IndexedSeq[ColumnRef]): RowReader[T]
def targetClass: Class[T]
}
/** Helper for implementing `RowReader` objects that can be used as `RowReaderFactory` objects. */
trait ThisRowReaderAsFactory[T] extends RowReaderFactory[T] {
this: RowReader[T] =>
def rowReader(table: TableDef, selectedColumns: IndexedSeq[ColumnRef]): RowReader[T] = this
}
trait LowPriorityRowReaderFactoryImplicits {
trait IsSingleColumnType[T]
implicit def isSingleColumnType[T](
implicit
ev1: TypeConverter[T],
ev2: T IsNotSubclassOf (_, _),
ev3: T IsNotSubclassOf (_, _, _)): IsSingleColumnType[T] = null
implicit def classBasedRowReaderFactory[R <: Serializable](
implicit
tt: TypeTag[R],
cm: ColumnMapper[R],
ev: R DoesntHaveImplicit IsSingleColumnType[R]): RowReaderFactory[R] =
new ClassBasedRowReaderFactory[R]
implicit def valueRowReaderFactory[T](
implicit
ev1: TypeConverter[T],
ev2: IsSingleColumnType[T]): RowReaderFactory[T] =
new ValueRowReaderFactory[T]()
}
object RowReaderFactory extends LowPriorityRowReaderFactoryImplicits {
/** Default `RowReader`: reads a `Row` into serializable [[CassandraRow]] */
implicit object GenericRowReader$
extends RowReader[CassandraRow] with ThisRowReaderAsFactory[CassandraRow] {
override def targetClass: Class[CassandraRow] = classOf[CassandraRow]
override def read(row: Row, columnNames: Array[String]) = {
assert(row.getColumnDefinitions.size() == columnNames.size,
"Number of columns in a row must match the number of columns in the table metadata")
CassandraRow.fromJavaDriverRow(row, columnNames)
}
override def neededColumns: Option[Seq[ColumnRef]] = None
}
}
| maasg/spark-cassandra-connector | spark-cassandra-connector/src/main/scala/com/datastax/spark/connector/rdd/reader/RowReaderFactory.scala | Scala | apache-2.0 | 2,489 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.controller
import kafka.api.LeaderAndIsr
import kafka.log.LogConfig
import kafka.server.KafkaConfig
import kafka.utils.TestUtils
import kafka.zk.{KafkaZkClient, TopicPartitionStateZNode}
import kafka.zk.KafkaZkClient.UpdateLeaderAndIsrResult
import kafka.zookeeper._
import org.apache.kafka.common.TopicPartition
import org.apache.zookeeper.KeeperException.Code
import org.apache.zookeeper.data.Stat
import org.easymock.EasyMock
import org.junit.Assert._
import org.junit.{Before, Test}
import org.scalatest.junit.JUnitSuite
import scala.collection.mutable
class PartitionStateMachineTest extends JUnitSuite {
private var controllerContext: ControllerContext = null
private var mockZkClient: KafkaZkClient = null
private var mockControllerBrokerRequestBatch: ControllerBrokerRequestBatch = null
private var mockTopicDeletionManager: TopicDeletionManager = null
private var partitionState: mutable.Map[TopicPartition, PartitionState] = null
private var partitionStateMachine: PartitionStateMachine = null
private val brokerId = 5
private val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(brokerId, "zkConnect"))
private val controllerEpoch = 50
private val partition = new TopicPartition("t", 0)
private val partitions = Seq(partition)
@Before
def setUp(): Unit = {
controllerContext = new ControllerContext
controllerContext.epoch = controllerEpoch
mockZkClient = EasyMock.createMock(classOf[KafkaZkClient])
mockControllerBrokerRequestBatch = EasyMock.createMock(classOf[ControllerBrokerRequestBatch])
mockTopicDeletionManager = EasyMock.createMock(classOf[TopicDeletionManager])
partitionState = mutable.Map.empty[TopicPartition, PartitionState]
partitionStateMachine = new PartitionStateMachine(config, new StateChangeLogger(brokerId, true, None), controllerContext,
mockZkClient, partitionState, mockControllerBrokerRequestBatch)
partitionStateMachine.setTopicDeletionManager(mockTopicDeletionManager)
}
@Test
def testNonexistentPartitionToNewPartitionTransition(): Unit = {
partitionStateMachine.handleStateChanges(partitions, NewPartition)
assertEquals(NewPartition, partitionState(partition))
}
@Test
def testInvalidNonexistentPartitionToOnlinePartitionTransition(): Unit = {
partitionStateMachine.handleStateChanges(partitions, OnlinePartition, Option(OfflinePartitionLeaderElectionStrategy))
assertEquals(NonExistentPartition, partitionState(partition))
}
@Test
def testInvalidNonexistentPartitionToOfflinePartitionTransition(): Unit = {
partitionStateMachine.handleStateChanges(partitions, OfflinePartition)
assertEquals(NonExistentPartition, partitionState(partition))
}
@Test
def testNewPartitionToOnlinePartitionTransition(): Unit = {
controllerContext.setLiveBrokerAndEpochs(Map(TestUtils.createBrokerAndEpoch(brokerId, "host", 0)))
controllerContext.updatePartitionReplicaAssignment(partition, Seq(brokerId))
partitionState.put(partition, NewPartition)
val leaderIsrAndControllerEpoch = LeaderIsrAndControllerEpoch(LeaderAndIsr(brokerId, List(brokerId)), controllerEpoch)
EasyMock.expect(mockControllerBrokerRequestBatch.newBatch())
EasyMock.expect(mockZkClient.createTopicPartitionStatesRaw(Map(partition -> leaderIsrAndControllerEpoch), controllerContext.epochZkVersion))
.andReturn(Seq(CreateResponse(Code.OK, null, Some(partition), null, ResponseMetadata(0, 0))))
EasyMock.expect(mockControllerBrokerRequestBatch.addLeaderAndIsrRequestForBrokers(Seq(brokerId),
partition, leaderIsrAndControllerEpoch, Seq(brokerId), isNew = true))
EasyMock.expect(mockControllerBrokerRequestBatch.sendRequestsToBrokers(controllerEpoch))
EasyMock.replay(mockZkClient, mockControllerBrokerRequestBatch)
partitionStateMachine.handleStateChanges(partitions, OnlinePartition, Option(OfflinePartitionLeaderElectionStrategy))
EasyMock.verify(mockZkClient, mockControllerBrokerRequestBatch)
assertEquals(OnlinePartition, partitionState(partition))
}
@Test
def testNewPartitionToOnlinePartitionTransitionZkUtilsExceptionFromCreateStates(): Unit = {
controllerContext.setLiveBrokerAndEpochs(Map(TestUtils.createBrokerAndEpoch(brokerId, "host", 0)))
controllerContext.updatePartitionReplicaAssignment(partition, Seq(brokerId))
partitionState.put(partition, NewPartition)
val leaderIsrAndControllerEpoch = LeaderIsrAndControllerEpoch(LeaderAndIsr(brokerId, List(brokerId)), controllerEpoch)
EasyMock.expect(mockControllerBrokerRequestBatch.newBatch())
EasyMock.expect(mockZkClient.createTopicPartitionStatesRaw(Map(partition -> leaderIsrAndControllerEpoch), controllerContext.epochZkVersion))
.andThrow(new ZooKeeperClientException("test"))
EasyMock.expect(mockControllerBrokerRequestBatch.sendRequestsToBrokers(controllerEpoch))
EasyMock.replay(mockZkClient, mockControllerBrokerRequestBatch)
partitionStateMachine.handleStateChanges(partitions, OnlinePartition, Option(OfflinePartitionLeaderElectionStrategy))
EasyMock.verify(mockZkClient, mockControllerBrokerRequestBatch)
assertEquals(NewPartition, partitionState(partition))
}
@Test
def testNewPartitionToOnlinePartitionTransitionErrorCodeFromCreateStates(): Unit = {
controllerContext.setLiveBrokerAndEpochs(Map(TestUtils.createBrokerAndEpoch(brokerId, "host", 0)))
controllerContext.updatePartitionReplicaAssignment(partition, Seq(brokerId))
partitionState.put(partition, NewPartition)
val leaderIsrAndControllerEpoch = LeaderIsrAndControllerEpoch(LeaderAndIsr(brokerId, List(brokerId)), controllerEpoch)
EasyMock.expect(mockControllerBrokerRequestBatch.newBatch())
EasyMock.expect(mockZkClient.createTopicPartitionStatesRaw(Map(partition -> leaderIsrAndControllerEpoch), controllerContext.epochZkVersion))
.andReturn(Seq(CreateResponse(Code.NODEEXISTS, null, Some(partition), null, ResponseMetadata(0, 0))))
EasyMock.expect(mockControllerBrokerRequestBatch.sendRequestsToBrokers(controllerEpoch))
EasyMock.replay(mockZkClient, mockControllerBrokerRequestBatch)
partitionStateMachine.handleStateChanges(partitions, OnlinePartition, Option(OfflinePartitionLeaderElectionStrategy))
EasyMock.verify(mockZkClient, mockControllerBrokerRequestBatch)
assertEquals(NewPartition, partitionState(partition))
}
@Test
def testNewPartitionToOfflinePartitionTransition(): Unit = {
partitionState.put(partition, NewPartition)
partitionStateMachine.handleStateChanges(partitions, OfflinePartition)
assertEquals(OfflinePartition, partitionState(partition))
}
@Test
def testInvalidNewPartitionToNonexistentPartitionTransition(): Unit = {
partitionState.put(partition, NewPartition)
partitionStateMachine.handleStateChanges(partitions, NonExistentPartition)
assertEquals(NewPartition, partitionState(partition))
}
@Test
def testOnlinePartitionToOnlineTransition(): Unit = {
controllerContext.setLiveBrokerAndEpochs(Map(TestUtils.createBrokerAndEpoch(brokerId, "host", 0)))
controllerContext.updatePartitionReplicaAssignment(partition, Seq(brokerId))
partitionState.put(partition, OnlinePartition)
val leaderAndIsr = LeaderAndIsr(brokerId, List(brokerId))
val leaderIsrAndControllerEpoch = LeaderIsrAndControllerEpoch(leaderAndIsr, controllerEpoch)
controllerContext.partitionLeadershipInfo.put(partition, leaderIsrAndControllerEpoch)
val stat = new Stat(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
EasyMock.expect(mockControllerBrokerRequestBatch.newBatch())
EasyMock.expect(mockZkClient.getTopicPartitionStatesRaw(partitions))
.andReturn(Seq(GetDataResponse(Code.OK, null, Some(partition),
TopicPartitionStateZNode.encode(leaderIsrAndControllerEpoch), stat, ResponseMetadata(0, 0))))
val leaderAndIsrAfterElection = leaderAndIsr.newLeader(brokerId)
val updatedLeaderAndIsr = leaderAndIsrAfterElection.withZkVersion(2)
EasyMock.expect(mockZkClient.updateLeaderAndIsr(Map(partition -> leaderAndIsrAfterElection), controllerEpoch, controllerContext.epochZkVersion))
.andReturn(UpdateLeaderAndIsrResult(Map(partition -> updatedLeaderAndIsr), Seq.empty, Map.empty))
EasyMock.expect(mockControllerBrokerRequestBatch.addLeaderAndIsrRequestForBrokers(Seq(brokerId),
partition, LeaderIsrAndControllerEpoch(updatedLeaderAndIsr, controllerEpoch), Seq(brokerId), isNew = false))
EasyMock.expect(mockControllerBrokerRequestBatch.sendRequestsToBrokers(controllerEpoch))
EasyMock.replay(mockZkClient, mockControllerBrokerRequestBatch)
partitionStateMachine.handleStateChanges(partitions, OnlinePartition, Option(PreferredReplicaPartitionLeaderElectionStrategy))
EasyMock.verify(mockZkClient, mockControllerBrokerRequestBatch)
assertEquals(OnlinePartition, partitionState(partition))
}
@Test
def testOnlinePartitionToOnlineTransitionForControlledShutdown(): Unit = {
val otherBrokerId = brokerId + 1
controllerContext.setLiveBrokerAndEpochs(Map(
TestUtils.createBrokerAndEpoch(brokerId, "host", 0),
TestUtils.createBrokerAndEpoch(otherBrokerId, "host", 0)))
controllerContext.shuttingDownBrokerIds.add(brokerId)
controllerContext.updatePartitionReplicaAssignment(partition, Seq(brokerId, otherBrokerId))
partitionState.put(partition, OnlinePartition)
val leaderAndIsr = LeaderAndIsr(brokerId, List(brokerId, otherBrokerId))
val leaderIsrAndControllerEpoch = LeaderIsrAndControllerEpoch(leaderAndIsr, controllerEpoch)
controllerContext.partitionLeadershipInfo.put(partition, leaderIsrAndControllerEpoch)
val stat = new Stat(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
EasyMock.expect(mockControllerBrokerRequestBatch.newBatch())
EasyMock.expect(mockZkClient.getTopicPartitionStatesRaw(partitions))
.andReturn(Seq(GetDataResponse(Code.OK, null, Some(partition),
TopicPartitionStateZNode.encode(leaderIsrAndControllerEpoch), stat, ResponseMetadata(0, 0))))
val leaderAndIsrAfterElection = leaderAndIsr.newLeaderAndIsr(otherBrokerId, List(otherBrokerId))
val updatedLeaderAndIsr = leaderAndIsrAfterElection.withZkVersion(2)
EasyMock.expect(mockZkClient.updateLeaderAndIsr(Map(partition -> leaderAndIsrAfterElection), controllerEpoch, controllerContext.epochZkVersion))
.andReturn(UpdateLeaderAndIsrResult(Map(partition -> updatedLeaderAndIsr), Seq.empty, Map.empty))
// The leaderAndIsr request should be sent to both brokers, including the shutting down one
EasyMock.expect(mockControllerBrokerRequestBatch.addLeaderAndIsrRequestForBrokers(Seq(brokerId, otherBrokerId),
partition, LeaderIsrAndControllerEpoch(updatedLeaderAndIsr, controllerEpoch), Seq(brokerId, otherBrokerId),
isNew = false))
EasyMock.expect(mockControllerBrokerRequestBatch.sendRequestsToBrokers(controllerEpoch))
EasyMock.replay(mockZkClient, mockControllerBrokerRequestBatch)
partitionStateMachine.handleStateChanges(partitions, OnlinePartition, Option(ControlledShutdownPartitionLeaderElectionStrategy))
EasyMock.verify(mockZkClient, mockControllerBrokerRequestBatch)
assertEquals(OnlinePartition, partitionState(partition))
}
@Test
def testOnlinePartitionToOfflineTransition(): Unit = {
partitionState.put(partition, OnlinePartition)
partitionStateMachine.handleStateChanges(partitions, OfflinePartition)
assertEquals(OfflinePartition, partitionState(partition))
}
@Test
def testInvalidOnlinePartitionToNonexistentPartitionTransition(): Unit = {
partitionState.put(partition, OnlinePartition)
partitionStateMachine.handleStateChanges(partitions, NonExistentPartition)
assertEquals(OnlinePartition, partitionState(partition))
}
@Test
def testInvalidOnlinePartitionToNewPartitionTransition(): Unit = {
partitionState.put(partition, OnlinePartition)
partitionStateMachine.handleStateChanges(partitions, NewPartition)
assertEquals(OnlinePartition, partitionState(partition))
}
@Test
def testOfflinePartitionToOnlinePartitionTransition(): Unit = {
controllerContext.setLiveBrokerAndEpochs(Map(TestUtils.createBrokerAndEpoch(brokerId, "host", 0)))
controllerContext.updatePartitionReplicaAssignment(partition, Seq(brokerId))
partitionState.put(partition, OfflinePartition)
val leaderAndIsr = LeaderAndIsr(LeaderAndIsr.NoLeader, List(brokerId))
val leaderIsrAndControllerEpoch = LeaderIsrAndControllerEpoch(leaderAndIsr, controllerEpoch)
controllerContext.partitionLeadershipInfo.put(partition, leaderIsrAndControllerEpoch)
val stat = new Stat(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
EasyMock.expect(mockControllerBrokerRequestBatch.newBatch())
EasyMock.expect(mockZkClient.getTopicPartitionStatesRaw(partitions))
.andReturn(Seq(GetDataResponse(Code.OK, null, Some(partition),
TopicPartitionStateZNode.encode(leaderIsrAndControllerEpoch), stat, ResponseMetadata(0, 0))))
EasyMock.expect(mockZkClient.getLogConfigs(Seq.empty, config.originals()))
.andReturn((Map(partition.topic -> LogConfig()), Map.empty))
val leaderAndIsrAfterElection = leaderAndIsr.newLeader(brokerId)
val updatedLeaderAndIsr = leaderAndIsrAfterElection.withZkVersion(2)
EasyMock.expect(mockZkClient.updateLeaderAndIsr(Map(partition -> leaderAndIsrAfterElection), controllerEpoch, controllerContext.epochZkVersion))
.andReturn(UpdateLeaderAndIsrResult(Map(partition -> updatedLeaderAndIsr), Seq.empty, Map.empty))
EasyMock.expect(mockControllerBrokerRequestBatch.addLeaderAndIsrRequestForBrokers(Seq(brokerId),
partition, LeaderIsrAndControllerEpoch(updatedLeaderAndIsr, controllerEpoch), Seq(brokerId), isNew = false))
EasyMock.expect(mockControllerBrokerRequestBatch.sendRequestsToBrokers(controllerEpoch))
EasyMock.replay(mockZkClient, mockControllerBrokerRequestBatch)
partitionStateMachine.handleStateChanges(partitions, OnlinePartition, Option(OfflinePartitionLeaderElectionStrategy))
EasyMock.verify(mockZkClient, mockControllerBrokerRequestBatch)
assertEquals(OnlinePartition, partitionState(partition))
}
@Test
def testOfflinePartitionToOnlinePartitionTransitionZkUtilsExceptionFromStateLookup(): Unit = {
controllerContext.setLiveBrokerAndEpochs(Map(TestUtils.createBrokerAndEpoch(brokerId, "host", 0)))
controllerContext.updatePartitionReplicaAssignment(partition, Seq(brokerId))
partitionState.put(partition, OfflinePartition)
val leaderAndIsr = LeaderAndIsr(LeaderAndIsr.NoLeader, List(brokerId))
val leaderIsrAndControllerEpoch = LeaderIsrAndControllerEpoch(leaderAndIsr, controllerEpoch)
controllerContext.partitionLeadershipInfo.put(partition, leaderIsrAndControllerEpoch)
EasyMock.expect(mockControllerBrokerRequestBatch.newBatch())
EasyMock.expect(mockZkClient.getTopicPartitionStatesRaw(partitions))
.andThrow(new ZooKeeperClientException(""))
EasyMock.expect(mockControllerBrokerRequestBatch.sendRequestsToBrokers(controllerEpoch))
EasyMock.replay(mockZkClient, mockControllerBrokerRequestBatch)
partitionStateMachine.handleStateChanges(partitions, OnlinePartition, Option(OfflinePartitionLeaderElectionStrategy))
EasyMock.verify(mockZkClient, mockControllerBrokerRequestBatch)
assertEquals(OfflinePartition, partitionState(partition))
}
@Test
def testOfflinePartitionToOnlinePartitionTransitionErrorCodeFromStateLookup(): Unit = {
controllerContext.setLiveBrokerAndEpochs(Map(TestUtils.createBrokerAndEpoch(brokerId, "host", 0)))
controllerContext.updatePartitionReplicaAssignment(partition, Seq(brokerId))
partitionState.put(partition, OfflinePartition)
val leaderAndIsr = LeaderAndIsr(LeaderAndIsr.NoLeader, List(brokerId))
val leaderIsrAndControllerEpoch = LeaderIsrAndControllerEpoch(leaderAndIsr, controllerEpoch)
controllerContext.partitionLeadershipInfo.put(partition, leaderIsrAndControllerEpoch)
val stat = new Stat(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
EasyMock.expect(mockControllerBrokerRequestBatch.newBatch())
EasyMock.expect(mockZkClient.getTopicPartitionStatesRaw(partitions))
.andReturn(Seq(GetDataResponse(Code.NONODE, null, Some(partition),
TopicPartitionStateZNode.encode(leaderIsrAndControllerEpoch), stat, ResponseMetadata(0, 0))))
EasyMock.expect(mockControllerBrokerRequestBatch.sendRequestsToBrokers(controllerEpoch))
EasyMock.replay(mockZkClient, mockControllerBrokerRequestBatch)
partitionStateMachine.handleStateChanges(partitions, OnlinePartition, Option(OfflinePartitionLeaderElectionStrategy))
EasyMock.verify(mockZkClient, mockControllerBrokerRequestBatch)
assertEquals(OfflinePartition, partitionState(partition))
}
@Test
def testOfflinePartitionToNonexistentPartitionTransition(): Unit = {
partitionState.put(partition, OfflinePartition)
partitionStateMachine.handleStateChanges(partitions, NonExistentPartition)
assertEquals(NonExistentPartition, partitionState(partition))
}
@Test
def testInvalidOfflinePartitionToNewPartitionTransition(): Unit = {
partitionState.put(partition, OfflinePartition)
partitionStateMachine.handleStateChanges(partitions, NewPartition)
assertEquals(OfflinePartition, partitionState(partition))
}
private def prepareMockToElectLeaderForPartitions(partitions: Seq[TopicPartition]): Unit = {
val leaderAndIsr = LeaderAndIsr(brokerId, List(brokerId))
def prepareMockToGetTopicPartitionsStatesRaw(): Unit = {
val stat = new Stat(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
val leaderIsrAndControllerEpoch = LeaderIsrAndControllerEpoch(leaderAndIsr, controllerEpoch)
val getDataResponses = partitions.map {p => GetDataResponse(Code.OK, null, Some(p),
TopicPartitionStateZNode.encode(leaderIsrAndControllerEpoch), stat, ResponseMetadata(0, 0))}
EasyMock.expect(mockZkClient.getTopicPartitionStatesRaw(partitions))
.andReturn(getDataResponses)
}
prepareMockToGetTopicPartitionsStatesRaw()
def prepareMockToGetLogConfigs(): Unit = {
val topicsForPartitionsWithNoLiveInSyncReplicas = Seq()
EasyMock.expect(mockZkClient.getLogConfigs(topicsForPartitionsWithNoLiveInSyncReplicas, config.originals()))
.andReturn(Map.empty, Map.empty)
}
prepareMockToGetLogConfigs()
def prepareMockToUpdateLeaderAndIsr(): Unit = {
val updatedLeaderAndIsr = partitions.map { partition =>
partition -> leaderAndIsr.newLeaderAndIsr(brokerId, List(brokerId))
}.toMap
EasyMock.expect(mockZkClient.updateLeaderAndIsr(updatedLeaderAndIsr, controllerEpoch, controllerContext.epochZkVersion))
.andReturn(UpdateLeaderAndIsrResult(updatedLeaderAndIsr, Seq.empty, Map.empty))
}
prepareMockToUpdateLeaderAndIsr()
}
/**
* This method tests changing partitions' state to OfflinePartition increments the offlinePartitionCount,
* and changing their state back to OnlinePartition decrements the offlinePartitionCount
*/
@Test
def testUpdatingOfflinePartitionsCount(): Unit = {
controllerContext.setLiveBrokerAndEpochs(Map(TestUtils.createBrokerAndEpoch(brokerId, "host", 0)))
val partitionIds = Seq(0, 1, 2, 3)
val topic = "test"
val partitions = partitionIds.map(new TopicPartition("test", _))
partitions.foreach { partition =>
controllerContext.updatePartitionReplicaAssignment(partition, Seq(brokerId))
}
EasyMock.expect(mockTopicDeletionManager.isTopicWithDeletionStarted(topic)).andReturn(false)
EasyMock.expectLastCall().anyTimes()
prepareMockToElectLeaderForPartitions(partitions)
EasyMock.replay(mockZkClient, mockTopicDeletionManager)
partitionStateMachine.handleStateChanges(partitions, NewPartition)
partitionStateMachine.handleStateChanges(partitions, OfflinePartition)
assertEquals(s"There should be ${partitions.size} offline partition(s)", partitions.size, partitionStateMachine.offlinePartitionCount)
partitionStateMachine.handleStateChanges(partitions, OnlinePartition, Some(OfflinePartitionLeaderElectionStrategy))
assertEquals(s"There should be no offline partition(s)", 0, partitionStateMachine.offlinePartitionCount)
}
/**
* This method tests if a topic is being deleted, then changing partitions' state to OfflinePartition makes no change
* to the offlinePartitionCount
*/
@Test
def testNoOfflinePartitionsChangeForTopicsBeingDeleted() = {
val partitionIds = Seq(0, 1, 2, 3)
val topic = "test"
val partitions = partitionIds.map(new TopicPartition("test", _))
EasyMock.expect(mockTopicDeletionManager.isTopicWithDeletionStarted(topic)).andReturn(true)
EasyMock.expectLastCall().anyTimes()
EasyMock.replay(mockTopicDeletionManager)
partitionStateMachine.handleStateChanges(partitions, NewPartition)
partitionStateMachine.handleStateChanges(partitions, OfflinePartition)
assertEquals(s"There should be no offline partition(s)", 0, partitionStateMachine.offlinePartitionCount)
}
/**
* This method tests if some partitions are already in OfflinePartition state,
* then deleting their topic will decrement the offlinePartitionCount.
* For example, if partitions test-0, test-1, test-2, test-3 are in OfflinePartition state,
* and the offlinePartitionCount is 4, trying to delete the topic "test" means these
* partitions no longer qualify as offline-partitions, and the offlinePartitionCount
* should be decremented to 0.
*/
@Test
def testUpdatingOfflinePartitionsCountDuringTopicDeletion() = {
val partitionIds = Seq(0, 1, 2, 3)
val topic = "test"
val partitions = partitionIds.map(new TopicPartition("test", _))
partitions.foreach { partition =>
controllerContext.updatePartitionReplicaAssignment(partition, Seq(brokerId))
}
val props = TestUtils.createBrokerConfig(brokerId, "zkConnect")
props.put(KafkaConfig.DeleteTopicEnableProp, "true")
val customConfig = KafkaConfig.fromProps(props)
def createMockReplicaStateMachine() = {
val replicaStateMachine: ReplicaStateMachine = EasyMock.createMock(classOf[ReplicaStateMachine])
EasyMock.expect(replicaStateMachine.areAllReplicasForTopicDeleted(topic)).andReturn(false).anyTimes()
EasyMock.expect(replicaStateMachine.isAtLeastOneReplicaInDeletionStartedState(topic)).andReturn(false).anyTimes()
EasyMock.expect(replicaStateMachine.isAnyReplicaInState(topic, ReplicaDeletionIneligible)).andReturn(false).anyTimes()
EasyMock.expect(replicaStateMachine.replicasInState(topic, ReplicaDeletionIneligible)).andReturn(Set.empty).anyTimes()
EasyMock.expect(replicaStateMachine.replicasInState(topic, ReplicaDeletionStarted)).andReturn(Set.empty).anyTimes()
EasyMock.expect(replicaStateMachine.replicasInState(topic, ReplicaDeletionSuccessful)).andReturn(Set.empty).anyTimes()
EasyMock.expect(replicaStateMachine.handleStateChanges(EasyMock.anyObject[Seq[PartitionAndReplica]],
EasyMock.anyObject[ReplicaState], EasyMock.anyObject[Callbacks]))
EasyMock.expectLastCall().anyTimes()
replicaStateMachine
}
val replicaStateMachine = createMockReplicaStateMachine()
partitionStateMachine = new PartitionStateMachine(customConfig, new StateChangeLogger(brokerId, true, None), controllerContext,
mockZkClient, partitionState, mockControllerBrokerRequestBatch)
def createMockController() = {
val mockController: KafkaController = EasyMock.createMock(classOf[KafkaController])
EasyMock.expect(mockController.controllerContext).andReturn(controllerContext).anyTimes()
EasyMock.expect(mockController.config).andReturn(customConfig).anyTimes()
EasyMock.expect(mockController.partitionStateMachine).andReturn(partitionStateMachine).anyTimes()
EasyMock.expect(mockController.replicaStateMachine).andReturn(replicaStateMachine).anyTimes()
EasyMock.expect(mockController.sendUpdateMetadataRequest(Seq.empty, partitions.toSet))
EasyMock.expectLastCall().anyTimes()
mockController
}
val mockController = createMockController()
val mockEventManager: ControllerEventManager = EasyMock.createMock(classOf[ControllerEventManager])
EasyMock.replay(mockController, replicaStateMachine, mockEventManager)
val topicDeletionManager = new TopicDeletionManager(mockController, mockEventManager, mockZkClient)
partitionStateMachine.setTopicDeletionManager(topicDeletionManager)
partitionStateMachine.handleStateChanges(partitions, NewPartition)
partitionStateMachine.handleStateChanges(partitions, OfflinePartition)
assertEquals(s"There should be ${partitions.size} offline partition(s)", partitions.size, mockController.partitionStateMachine.offlinePartitionCount)
topicDeletionManager.enqueueTopicsForDeletion(Set(topic))
assertEquals(s"There should be no offline partition(s)", 0, partitionStateMachine.offlinePartitionCount)
}
}
| gf53520/kafka | core/src/test/scala/unit/kafka/controller/PartitionStateMachineTest.scala | Scala | apache-2.0 | 25,732 |
/*
* Copyright (C) 2009-2013 Typesafe Inc. <http://www.typesafe.com>
*/
package copied.play.api.libs.functional.syntax
import scala.language.higherKinds
import scala.language.implicitConversions
import copied.play.api.libs.functional._
/**
* Don't forget to {{{import copied.play.api.libs.functional.syntax._}}} to enable functional combinators
* when using Json API.
*/
object `package` {
implicit def toAlternativeOps[M[_], A](a: M[A])(implicit app: Alternative[M]): AlternativeOps[M, A] = new AlternativeOps(a)
implicit def toApplicativeOps[M[_], A](a: M[A])(implicit app: Applicative[M]): ApplicativeOps[M, A] = new ApplicativeOps(a)
implicit def toFunctionalBuilderOps[M[_], A](a: M[A])(implicit fcb: FunctionalCanBuild[M]) = new FunctionalBuilderOps[M, A](a)(fcb)
implicit def functionalCanBuildApplicative[M[_]](implicit app: Applicative[M]): FunctionalCanBuild[M] = new FunctionalCanBuild[M] {
def apply[A, B](a: M[A], b: M[B]): M[A ~ B] = app.apply(app.map[A, B => A ~ B](a, a => ((b: B) => new ~(a, b))), b)
}
implicit def functorOption: Functor[Option] = new Functor[Option] {
def fmap[A, B](a: Option[A], f: A => B): Option[B] = a.map(f)
}
implicit def applicativeOption: Applicative[Option] = new Applicative[Option] {
def pure[A](a: A): Option[A] = Some(a)
def map[A, B](m: Option[A], f: A => B): Option[B] = m.map(f)
def apply[A, B](mf: Option[A => B], ma: Option[A]): Option[B] = mf.flatMap(f => ma.map(f))
}
implicit def functionMonoid[A] = new Monoid[A => A] {
override def append(f1: A => A, f2: A => A) = f2 compose f1
override def identity = Predef.identity
}
implicit def toMonoidOps[A](a: A)(implicit m: Monoid[A]): MonoidOps[A] = new MonoidOps(a)
implicit def toFunctorOps[M[_], A](ma: M[A])(implicit fu: Functor[M]): FunctorOps[M, A] = new FunctorOps(ma)
implicit def toContraFunctorOps[M[_], A](ma: M[A])(implicit fu: ContravariantFunctor[M]): ContravariantFunctorOps[M, A] = new ContravariantFunctorOps(ma)
implicit def toInvariantFunctorOps[M[_], A](ma: M[A])(implicit fu: InvariantFunctor[M]): InvariantFunctorOps[M, A] = new InvariantFunctorOps(ma)
def unapply[B, A](f: B => Option[A]) = { b: B => f(b).get }
def unlift[A, B](f: A => Option[B]): A => B = Function.unlift(f)
} | julienrf/editors | dependencies/play-functional/src/main/scala/copied/play/api/libs/functional/syntax/package.scala | Scala | mit | 2,295 |
package org.jetbrains.plugins.scala
package lang
package psi
package types
import _root_.org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiManager
import com.intellij.psi._
import org.jetbrains.plugins.scala.extensions.{toPsiClassExt, toPsiNamedElementExt}
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.{ScParameter, ScTypeParam}
import org.jetbrains.plugins.scala.lang.psi.api.statements.{ScTypeAlias, ScTypeAliasDefinition}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScTypedDefinition
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.{ScObject, ScTemplateDefinition}
import org.jetbrains.plugins.scala.lang.psi.types.result.TypingContext
import org.jetbrains.plugins.scala.lang.refactoring.util.ScTypeUtil.AliasType
import org.jetbrains.plugins.scala.util.ScEquivalenceUtil
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
object Bounds {
def lub(seq: Seq[ScType]): ScType = {
seq.reduce((l: ScType, r: ScType) => lub(l,r))
}
private class Options(_tp: ScType) extends {
val tp = _tp match {
case ex: ScExistentialType => ex.skolem
case other => other
}
} with AnyRef {
private val typeNamedElement: Option[(PsiNamedElement, ScSubstitutor)] = {
ScType.extractClassType(tp) match {
case None =>
tp.isAliasType match {
case Some(AliasType(ta, _, _)) => Some(ta, ScSubstitutor.empty)
case _ => None
}
case some => some
}
}
def isEmpty = typeNamedElement == None
val projectionOption: Option[ScType] = ScType.projectionOption(tp)
def getSubst: ScSubstitutor = typeNamedElement.get._2
def getSuperOptions: Seq[Options] = {
val subst = this.projectionOption match {
case Some(proj) => new ScSubstitutor(Map.empty, Map.empty, Some(proj))
case None => ScSubstitutor.empty
}
getNamedElement match {
case t: ScTemplateDefinition => t.superTypes.map(tp => new Options(subst.subst(tp))).filter(!_.isEmpty)
case p: PsiClass => p.getSupers.toSeq.map(cl => new Options(ScType.designator(cl))).filter(!_.isEmpty)
case a: ScTypeAlias =>
val upperType: ScType = tp.isAliasType.get.upper.getOrAny
val options: Seq[Options] = {
upperType match {
case ScCompoundType(comps1, _, _) => comps1.map(new Options(_))
case _ => Seq(new Options(upperType))
}
}
options.filter(!_.isEmpty)
}
}
def isInheritorOrSelf(bClass: Options): Boolean = {
(getNamedElement, bClass.getNamedElement) match {
case (base: PsiClass, inheritor: PsiClass) =>
ScEquivalenceUtil.smartEquivalence(base, inheritor) ||
ScalaPsiManager.instance(base.getProject).cachedDeepIsInheritor(inheritor, base)
case (base, inheritor: ScTypeAlias) =>
if (ScEquivalenceUtil.smartEquivalence(base, inheritor)) return true
for (opt <- bClass.getSuperOptions) {
if (isInheritorOrSelf(opt)) return true
}
false
case _ => false //class can't be inheritor of type alias
}
}
def getNamedElement: PsiNamedElement = typeNamedElement.get._1
def getTypeParameters: Array[PsiTypeParameter] = typeNamedElement.get._1 match {
case a: ScTypeAlias => a.typeParameters.toArray
case p: PsiClass => p.getTypeParameters
}
def baseDesignator: ScType = {
projectionOption match {
case Some(proj) => ScProjectionType(proj, getNamedElement, superReference = false)
case None => ScType.designator(getNamedElement)
}
}
def superSubstitutor(bClass: Options): Option[ScSubstitutor] = {
def superSubstitutor(base: PsiClass, drv: PsiClass, drvSubst: ScSubstitutor,
visited: mutable.Set[PsiClass]): Option[ScSubstitutor] = {
if (base.getManager.areElementsEquivalent(base, drv)) Some(drvSubst) else {
if (visited.contains(drv)) None else {
visited += drv
val superTypes: Seq[ScType] = drv match {
case td: ScTemplateDefinition => td.superTypes
case _ => drv.getSuperTypes.map{t => ScType.create(t, drv.getProject)}
}
val iterator = superTypes.iterator
while(iterator.hasNext) {
val st = iterator.next()
ScType.extractClassType(st) match {
case None =>
case Some((c, s)) => superSubstitutor(base, c, s, visited) match {
case None =>
case Some(subst) => return Some(subst.followed(drvSubst))
}
}
}
None
}
}
}
(getNamedElement, bClass.getNamedElement) match {
case (base: PsiClass, drv: PsiClass) =>
superSubstitutor(base, drv, bClass.typeNamedElement.get._2, mutable.Set.empty)
case (base, inheritor: ScTypeAlias) =>
if (ScEquivalenceUtil.smartEquivalence(base, inheritor)) {
bClass.tp match {
case ScParameterizedType(_, typeArgs) =>
return Some(bClass.getTypeParameters.zip(typeArgs).foldLeft(ScSubstitutor.empty) {
case (subst: ScSubstitutor, (ptp, typez)) =>
subst.bindT((ptp.name, ScalaPsiUtil.getPsiElementId(ptp)), typez)
})
case _ => return None
}
}
for (opt <- bClass.getSuperOptions) {
this.superSubstitutor(opt) match {
case Some(res) => return Some(res)
case _ =>
}
}
None
case _ => None //class can't be inheritor of type alias
}
}
}
def glb(t1: ScType, t2: ScType, checkWeak: Boolean = false): ScType = {
if (t1.conforms(t2, checkWeak)) t1
else if (t2.conforms(t1, checkWeak)) t2
else {
(t1, t2) match {
case (ScSkolemizedType(name, args, lower, upper), ScSkolemizedType(name2, args2, lower2, upper2)) =>
ScSkolemizedType(name, args, lub(lower, lower2, checkWeak), glb(upper, upper2, checkWeak))
case (ScSkolemizedType(name, args, lower, upper), _) => ScSkolemizedType(name, args, lub(lower, t2, checkWeak), glb(upper, t2))
case (_, ScSkolemizedType(name, args, lower, upper)) => ScSkolemizedType(name, args, lub(lower, t1, checkWeak), glb(upper, t1))
case (ex: ScExistentialType, _) => glb(ex.skolem, t2, checkWeak).unpackedType
case (_, ex: ScExistentialType) => glb(t1, ex.skolem, checkWeak).unpackedType
case _ => ScCompoundType(Seq(t1, t2), Map.empty, Map.empty)
}
}
}
def glb(typez: Seq[ScType], checkWeak: Boolean): ScType = {
if (typez.length == 1) typez(0)
var res = typez(0)
for (i <- 1 until typez.length) {
res = glb(res, typez(i), checkWeak)
}
res
}
def lub(t1: ScType, t2: ScType, checkWeak: Boolean = false): ScType = lub(t1, t2, 0, checkWeak)(stopAddingUpperBound = false)
def weakLub(t1: ScType, t2: ScType): ScType = lub(t1, t2, checkWeak = true)
private def lub(seq: Seq[ScType], depth : Int, checkWeak: Boolean)(implicit stopAddingUpperBound: Boolean): ScType = {
seq.reduce((l: ScType, r: ScType) => lub(l,r, depth, checkWeak))
}
private def lub(t1: ScType, t2: ScType, depth : Int, checkWeak: Boolean)(implicit stopAddingUpperBound: Boolean): ScType = {
if (Set(t1.toString, t2.toString) == Set("Rewriter.this.type#c#universe#Apply", "Rewriter.this.type#c#universe#TypeApply")) {
"stop here"
}
if (t1.conforms(t2, checkWeak)) t2
else if (t2.conforms(t1, checkWeak)) t1
else {
def lubWithExpandedAliases(t1: ScType, t2: ScType): ScType = {
(t1, t2) match {
case (ScDesignatorType(t: ScParameter), _) =>
lub(t.getRealParameterType(TypingContext.empty).getOrAny, t2, 0, checkWeak)
case (ScDesignatorType(t: ScTypedDefinition), _) if !t.isInstanceOf[ScObject] =>
lub(t.getType(TypingContext.empty).getOrAny, t2, 0, checkWeak)
case (_, ScDesignatorType(t: ScParameter)) =>
lub(t1, t.getRealParameterType(TypingContext.empty).getOrAny, 0, checkWeak)
case (_, ScDesignatorType(t: ScTypedDefinition)) if !t.isInstanceOf[ScObject] =>
lub(t1, t.getType(TypingContext.empty).getOrAny, 0, checkWeak)
case (ex: ScExistentialType, _) => lub(ex.skolem, t2, 0, checkWeak).unpackedType
case (_, ex: ScExistentialType) => lub(t1, ex.skolem, 0, checkWeak).unpackedType
case (ScTypeParameterType(_, Nil, _, upper, _), _) => lub(upper.v, t2, 0, checkWeak)
case (_, ScTypeParameterType(_, Nil, _, upper, _)) => lub(t1, upper.v, 0, checkWeak)
case (ScSkolemizedType(name, args, lower, upper), ScSkolemizedType(name2, args2, lower2, upper2)) =>
ScSkolemizedType(name, args, glb(lower, lower2, checkWeak), lub(upper, upper2, 0, checkWeak))
case (ScSkolemizedType(name, args, lower, upper), r) =>
ScSkolemizedType(name, args, glb(lower, r, checkWeak), lub(upper, t2, 0, checkWeak))
case (r, ScSkolemizedType(name, args, lower, upper)) =>
ScSkolemizedType(name, args, glb(lower, r, checkWeak), lub(upper, t2, 0, checkWeak))
case (_: ValType, _: ValType) => types.AnyVal
case (JavaArrayType(arg1), JavaArrayType(arg2)) =>
val (v, ex) = calcForTypeParamWithoutVariance(arg1, arg2, depth, checkWeak)
ex match {
case Some(w) => ScExistentialType(JavaArrayType(v), List(w))
case None => JavaArrayType(v)
}
case (JavaArrayType(arg), ScParameterizedType(des, args)) if args.length == 1 && (ScType.extractClass(des) match {
case Some(q) => q.qualifiedName == "scala.Array"
case _ => false
}) =>
val (v, ex) = calcForTypeParamWithoutVariance(arg, args(0), depth, checkWeak)
ex match {
case Some(w) => ScExistentialType(ScParameterizedType(des, Seq(v)), List(w))
case None => ScParameterizedType(des, Seq(v))
}
case (ScParameterizedType(des, args), JavaArrayType(arg)) if args.length == 1 && (ScType.extractClass(des) match {
case Some(q) => q.qualifiedName == "scala.Array"
case _ => false
}) =>
val (v, ex) = calcForTypeParamWithoutVariance(arg, args(0), depth, checkWeak)
ex match {
case Some(w) => ScExistentialType(ScParameterizedType(des, Seq(v)), List(w))
case None => ScParameterizedType(des, Seq(v))
}
case (JavaArrayType(_), tp) =>
if (tp.conforms(AnyRef)) AnyRef
else Any
case (tp, JavaArrayType(_)) =>
if (tp.conforms(AnyRef)) AnyRef
else Any
case _ =>
val aOptions: Seq[Options] = {
t1 match {
case ScCompoundType(comps1, _, _) => comps1.map(new Options(_))
case _ => Seq(new Options(t1))
}
}
val bOptions: Seq[Options] = {
t2 match {
case ScCompoundType(comps1, _, _) => comps1.map(new Options(_))
case _ => Seq(new Options(t2))
}
}
if (aOptions.exists(_.isEmpty) || bOptions.exists(_.isEmpty)) types.Any
else {
val buf = new ArrayBuffer[ScType]
val supers: Array[(Options, Int, Int)] =
getLeastUpperClasses(aOptions, bOptions)
for (sup <- supers) {
val tp = getTypeForAppending(aOptions(sup._2), bOptions(sup._3), sup._1, depth, checkWeak)
if (tp != types.Any) buf += tp
}
buf.toArray match {
case a: Array[ScType] if a.length == 0 => types.Any
case a: Array[ScType] if a.length == 1 => a(0)
case many =>
new ScCompoundType(many.toSeq, Map.empty, Map.empty)
}
}
//todo: refinement for compound types
}
}
lubWithExpandedAliases(t1, t2).unpackedType
}
}
private def calcForTypeParamWithoutVariance(substed1: ScType, substed2: ScType, depth: Int, checkWeak: Boolean, count: Int = 1)
(implicit stopAddingUpperBound: Boolean): (ScType, Option[ScExistentialArgument]) = {
if (substed1 equiv substed2) (substed1, None) else {
if (substed1 conforms substed2) {
(ScTypeVariable("_$" + count), Some(ScExistentialArgument("_$" + count, List.empty, substed1, substed2)))
} else if (substed2 conforms substed1) {
(ScTypeVariable("_$" + count), Some(ScExistentialArgument("_$" + count, List.empty, substed2, substed1)))
} else {
(substed1, substed2) match {
case (ScSkolemizedType(name, args, lower, upper), ScSkolemizedType(name2, args2, lower2, upper2)) =>
val newLub = if (stopAddingUpperBound) types.Any else lub(Seq(upper, upper2), 0, checkWeak)(stopAddingUpperBound = true)
(ScSkolemizedType(name, args, glb(lower, lower2, checkWeak), newLub), None)
case (ScSkolemizedType(name, args, lower, upper), _) =>
val newLub = if (stopAddingUpperBound) types.Any else lub(Seq(upper, substed2), 0, checkWeak)(stopAddingUpperBound = true)
(ScSkolemizedType(name, args, glb(lower, substed2), newLub), None)
case (_, ScSkolemizedType(name, args, lower, upper)) =>
val newLub = if (stopAddingUpperBound) types.Any else lub(Seq(upper, substed1), 0, checkWeak)(stopAddingUpperBound = true)
(ScSkolemizedType(name, args, glb(lower, substed1), newLub), None)
case _ =>
val newGlb = Bounds.glb(substed1, substed2)
if (!stopAddingUpperBound) {
//don't calculate the lub of the types themselves, but of the components of their compound types (if existing)
// example: the lub of "_ >: Int with Double <: AnyVal" & Long we need here should be AnyVal, not Any
def getTypesForLubEvaluation(t: ScType) = Seq(t)
val typesToCover = getTypesForLubEvaluation(substed1) ++ getTypesForLubEvaluation(substed2)
val newLub = Bounds.lub(typesToCover, 0, checkWeak = false)(stopAddingUpperBound = true)
(ScTypeVariable("_$" + count), Some(ScExistentialArgument("_$" + count, List.empty, newGlb, newLub)))
} else {
//todo: this is wrong, actually we should pick lub, just without merging parameters in this method
(ScTypeVariable("_$" + count), Some(ScExistentialArgument("_$" + count, List.empty, newGlb, types.Any)))
}
}
}
}
}
private def getTypeForAppending(clazz1: Options, clazz2: Options, baseClass: Options, depth: Int, checkWeak: Boolean)
(implicit stopAddingUpperBound: Boolean): ScType = {
val baseClassDesignator = baseClass.baseDesignator
if (baseClass.getTypeParameters.length == 0) return baseClassDesignator
(baseClass.superSubstitutor(clazz1), baseClass.superSubstitutor(clazz2)) match {
case (Some(superSubst1), Some(superSubst2)) =>
val tp = ScParameterizedType(baseClassDesignator, baseClass.
getTypeParameters.map(tp => ScalaPsiManager.instance(baseClass.getNamedElement.getProject).typeVariable(tp)))
val tp1 = superSubst1.subst(tp).asInstanceOf[ScParameterizedType]
val tp2 = superSubst2.subst(tp).asInstanceOf[ScParameterizedType]
val resTypeArgs = new ArrayBuffer[ScType]
val wildcards = new ArrayBuffer[ScExistentialArgument]()
for (i <- 0 until baseClass.getTypeParameters.length) {
val substed1 = tp1.typeArgs.apply(i)
val substed2 = tp2.typeArgs.apply(i)
resTypeArgs += (baseClass.getTypeParameters.apply(i) match {
case scp: ScTypeParam if scp.isCovariant => if (depth < 2) lub(substed1, substed2, depth + 1, checkWeak) else types.Any
case scp: ScTypeParam if scp.isContravariant => glb(substed1, substed2, checkWeak)
case _ =>
val (v, ex) = calcForTypeParamWithoutVariance(substed1, substed2, depth, checkWeak, count = wildcards.length + 1)
wildcards ++= ex
v
})
}
if (wildcards.isEmpty) ScParameterizedType(baseClassDesignator, resTypeArgs.toSeq)
else ScExistentialType(ScParameterizedType(baseClassDesignator, resTypeArgs.toSeq), wildcards.toList)
case _ => types.Any
}
}
def putAliases(template: ScTemplateDefinition, s: ScSubstitutor): ScSubstitutor = {
var run = s
for (alias <- template.aliases) {
alias match {
case aliasDef: ScTypeAliasDefinition if s.aliasesMap.get(aliasDef.name) == None =>
run = run bindA (aliasDef.name, {() => aliasDef.aliasedType(TypingContext.empty).getOrAny})
case _ =>
}
}
run
}
private def getLeastUpperClasses(aClasses: Seq[Options], bClasses: Seq[Options]): Array[(Options, Int, Int)] = {
val res = new ArrayBuffer[(Options, Int, Int)]
def addClass(aClass: Options, x: Int, y: Int) {
var i = 0
var break = false
while (!break && i < res.length) {
val clazz = res(i)._1
if (aClass.isInheritorOrSelf(clazz)) {
break = true //todo: join them somehow?
} else if (clazz.isInheritorOrSelf(aClass)) {
res(i) = (aClass, x, y)
break = true
}
i = i + 1
}
if (!break) {
res += ((aClass, x, y))
}
}
def checkClasses(aClasses: Seq[Options], baseIndex: Int = -1, visited: mutable.HashSet[PsiElement] = mutable.HashSet.empty) {
if (aClasses.length == 0) return
val aIter = aClasses.iterator
var i = 0
while (aIter.hasNext) {
val aClass = aIter.next()
val bIter = bClasses.iterator
var break = false
var j = 0
while (!break && bIter.hasNext) {
val bClass = bIter.next()
if (aClass.isInheritorOrSelf(bClass)) {
addClass(aClass, if (baseIndex == -1) i else baseIndex, j)
break = true
} else {
val element = aClass.getNamedElement
if (!visited.contains(element)) {
checkClasses(aClass.getSuperOptions, if (baseIndex == -1) i else baseIndex, visited + element)
}
}
j += 1
}
i += 1
}
}
checkClasses(aClasses)
res.toArray
}
} | consulo/consulo-scala | src/org/jetbrains/plugins/scala/lang/psi/types/Bounds.scala | Scala | apache-2.0 | 18,758 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cloudera.spark.cloud.utils
import org.apache.hadoop.conf.Configuration
import org.scalatest.Assertions
trait ExtraAssertions extends Assertions {
/**
* Expect a specific value; raise an assertion if it is not there
*
* @param v value
* @param msg message
* @tparam T type
* @return the actual value
*/
def expectSome[T](v: Option[T], msg: => String): T = {
v.getOrElse(throw new AssertionError(msg))
}
/**
* Expect a value to be non-null; return it. It will
* implicitly be non-null in further use.
*
* @param v value to check
* @param msg message for any assertion
* @tparam T type of value
* @return
*/
def expectNotNull[T](v: T, msg: => String): T = {
if (v != null) v else throw new AssertionError(msg)
}
/**
* Expect a configuration option to be set
*
* @param c config
* @param key kjey to look for
* @return the set value
*/
def expectOptionSet(c: Configuration, key: String): String = {
expectNotNull(c.get(key), s"Unset property ${key}")
}
}
| hortonworks-spark/cloud-integration | cloud-examples/src/main/scala/com/cloudera/spark/cloud/utils/ExtraAssertions.scala | Scala | apache-2.0 | 1,864 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import kafka.utils.{CoreUtils, TestUtils, ZkUtils}
import kafka.zk.ZooKeeperTestHarness
import org.easymock.EasyMock
import org.junit.Assert._
import org.junit.Test
class ServerStartupTest extends ZooKeeperTestHarness {
@Test
def testBrokerCreatesZKChroot {
val brokerId = 0
val zookeeperChroot = "/kafka-chroot-for-unittest"
val props = TestUtils.createBrokerConfig(brokerId, zkConnect)
val zooKeeperConnect = props.get("zookeeper.connect")
props.put("zookeeper.connect", zooKeeperConnect + zookeeperChroot)
val server = TestUtils.createServer(KafkaConfig.fromProps(props))
val pathExists = zkUtils.pathExists(zookeeperChroot)
assertTrue(pathExists)
server.shutdown()
CoreUtils.delete(server.config.logDirs)
}
@Test
def testConflictBrokerStartupWithSamePort {
// Create and start first broker
val brokerId1 = 0
val props1 = TestUtils.createBrokerConfig(brokerId1, zkConnect)
val server1 = TestUtils.createServer(KafkaConfig.fromProps(props1))
val port = server1.boundPort()
// Create a second broker with same port
val brokerId2 = 1
val props2 = TestUtils.createBrokerConfig(brokerId2, zkConnect, port = port)
try {
TestUtils.createServer(KafkaConfig.fromProps(props2))
fail("Starting a broker with the same port should fail")
} catch {
case _: RuntimeException => // expected
} finally {
server1.shutdown()
CoreUtils.delete(server1.config.logDirs)
}
}
@Test
def testConflictBrokerRegistration {
// Try starting a broker with the a conflicting broker id.
// This shouldn't affect the existing broker registration.
val brokerId = 0
val props1 = TestUtils.createBrokerConfig(brokerId, zkConnect)
val server1 = TestUtils.createServer(KafkaConfig.fromProps(props1))
val brokerRegistration = zkUtils.readData(ZkUtils.BrokerIdsPath + "/" + brokerId)._1
val props2 = TestUtils.createBrokerConfig(brokerId, zkConnect)
try {
TestUtils.createServer(KafkaConfig.fromProps(props2))
fail("Registering a broker with a conflicting id should fail")
} catch {
case _: RuntimeException =>
// this is expected
}
// broker registration shouldn't change
assertEquals(brokerRegistration, zkUtils.readData(ZkUtils.BrokerIdsPath + "/" + brokerId)._1)
server1.shutdown()
CoreUtils.delete(server1.config.logDirs)
}
@Test
def testBrokerSelfAware {
val brokerId = 0
val props = TestUtils.createBrokerConfig(brokerId, zkConnect)
val server = TestUtils.createServer(KafkaConfig.fromProps(props))
TestUtils.waitUntilTrue(() => server.metadataCache.getAliveBrokers.nonEmpty, "Wait for cache to update")
assertEquals(1, server.metadataCache.getAliveBrokers.size)
assertEquals(brokerId, server.metadataCache.getAliveBrokers.head.id)
server.shutdown()
CoreUtils.delete(server.config.logDirs)
}
@Test
def testBrokerStateRunningAfterZK {
val brokerId = 0
val mockBrokerState = EasyMock.niceMock(classOf[kafka.server.BrokerState])
class BrokerStateInterceptor() extends BrokerState {
override def newState(newState: BrokerStates): Unit = {
val brokers = zkUtils.getAllBrokersInCluster()
assertEquals(1, brokers.size)
assertEquals(brokerId, brokers.head.id)
}
}
class MockKafkaServer(override val config: KafkaConfig, override val brokerState: BrokerState = mockBrokerState) extends KafkaServer(config) {}
val props = TestUtils.createBrokerConfig(brokerId, zkConnect)
val server = new MockKafkaServer(KafkaConfig.fromProps(props))
EasyMock.expect(mockBrokerState.newState(RunningAsBroker)).andDelegateTo(new BrokerStateInterceptor).once()
EasyMock.replay(mockBrokerState)
server.startup()
server.shutdown()
CoreUtils.delete(server.config.logDirs)
}
}
| eribeiro/kafka | core/src/test/scala/unit/kafka/server/ServerStartupTest.scala | Scala | apache-2.0 | 4,702 |
package controllers
import java.util.UUID
import javax.inject.Inject
import com.mohiva.play.silhouette.api._
import com.mohiva.play.silhouette.api.repositories.AuthInfoRepository
import com.mohiva.play.silhouette.api.util.{ PasswordHasherRegistry, PasswordInfo }
import com.mohiva.play.silhouette.impl.providers.CredentialsProvider
import models.services.{ AuthTokenService, UserService }
import models.daos._
import play.api.i18n.{ I18nSupport, Messages, MessagesApi }
import play.api.libs.concurrent.Execution.Implicits._
import play.api.mvc.Controller
import utils.auth.DefaultEnv
import scala.concurrent.Future
class AuthorizeAdminController @Inject() (
val messagesApi: MessagesApi,
silhouette: Silhouette[DefaultEnv],
userService: UserService,
authInfoRepository: AuthInfoRepository,
passwordHasherRegistry: PasswordHasherRegistry,
authTokenService: AuthTokenService,
envDAO: EnvDAO,
userDAO: models.daos.UserDAO
)
extends Controller with I18nSupport {
def view = silhouette.SecuredAction.async { implicit request =>
Future.successful(Ok(views.html.authorizeAdmin(new myform.MyAuthorizeAdminForm())))
}
def submit = silhouette.SecuredAction.async { implicit request =>
new myform.MyAuthorizeAdminForm().bindFromRequest match {
case form: myform.MyAuthorizeAdminForm => Future.successful(BadRequest(views.html.authorizeAdmin(form)))
case data: myform.MyAuthorizeAdminFormData => {
val adminpass = envDAO.getAdminPass
if (adminpass == data.password) {
val user = request.identity.copy(admin = true)
userDAO.save(user)
Future.successful(Ok(views.html.home(user)))
} else Future.successful(Redirect(routes.AuthorizeAdminController.view()))
}
}
}
}
| serversideapps/silhmojs | server/app/controllers/AuthorizeAdminController.scala | Scala | apache-2.0 | 1,767 |
package scala.slick.session
import java.sql.{PreparedStatement, Date, Time, Timestamp, Types, Blob, Clob}
import scala.slick.jdbc.SetParameter
class PositionedParameters(val ps: PreparedStatement) {
var pos = 0
def >> [T](value: T)(implicit f: SetParameter[T]): Unit = f(value, this)
def setNull(sqlType: Int) { val npos = pos + 1; ps.setNull(npos, sqlType); pos = npos }
def setBoolean(value: Boolean) { val npos = pos + 1; ps.setBoolean (npos, value); pos = npos }
def setBlob(value: Blob) { val npos = pos + 1; ps.setBlob (npos, value); pos = npos }
def setByte(value: Byte) { val npos = pos + 1; ps.setByte (npos, value); pos = npos }
def setBytes(value: Array[Byte]) { val npos = pos + 1; ps.setBytes (npos, value); pos = npos }
def setClob(value: Clob) { val npos = pos + 1; ps.setClob (npos, value); pos = npos }
def setDate(value: Date) { val npos = pos + 1; ps.setDate (npos, value); pos = npos }
def setDouble(value: Double) { val npos = pos + 1; ps.setDouble (npos, value); pos = npos }
def setFloat(value: Float) { val npos = pos + 1; ps.setFloat (npos, value); pos = npos }
def setInt(value: Int) { val npos = pos + 1; ps.setInt (npos, value); pos = npos }
def setLong(value: Long) { val npos = pos + 1; ps.setLong (npos, value); pos = npos }
def setShort(value: Short) { val npos = pos + 1; ps.setShort (npos, value); pos = npos }
def setString(value: String) { val npos = pos + 1; ps.setString (npos, value); pos = npos }
def setTime(value: Time) { val npos = pos + 1; ps.setTime (npos, value); pos = npos }
def setTimestamp(value: Timestamp) { val npos = pos + 1; ps.setTimestamp (npos, value); pos = npos }
def setBigDecimal(value: BigDecimal) { val npos = pos + 1; ps.setBigDecimal(npos, value.bigDecimal); pos = npos }
def setObject(value: AnyRef, sqlType: Int) { val npos = pos + 1; ps.setObject(npos, value, sqlType); pos = npos }
def setBooleanOption(value: Option[Boolean]) {
val npos = pos + 1
if(value eq None) ps.setNull(npos, Types.BOOLEAN) else ps.setBoolean(npos, value.get)
pos = npos
}
def setBlobOption(value: Option[Blob]) {
val npos = pos + 1
if(value eq None) ps.setNull(npos, Types.BLOB) else ps.setBlob(npos, value.get)
pos = npos
}
def setByteOption(value: Option[Byte]) {
val npos = pos + 1
if(value eq None) ps.setNull(npos, Types.TINYINT) else ps.setByte(npos, value.get)
pos = npos
}
def setBytesOption(value: Option[Array[Byte]]) {
val npos = pos + 1
if(value eq None) ps.setNull(npos, Types.BLOB) else ps.setBytes(npos, value.get)
pos = npos
}
def setClobOption(value: Option[Clob]) {
val npos = pos + 1
if(value eq None) ps.setNull(npos, Types.CLOB) else ps.setClob(npos, value.get)
pos = npos
}
def setDateOption(value: Option[Date]) {
val npos = pos + 1
if(value eq None) ps.setNull(npos, Types.DATE) else ps.setDate(npos, value.get)
pos = npos
}
def setDoubleOption(value: Option[Double]) {
val npos = pos + 1
if(value eq None) ps.setNull(npos, Types.DOUBLE) else ps.setDouble(npos, value.get)
pos = npos
}
def setFloatOption(value: Option[Float]) {
val npos = pos + 1
if(value eq None) ps.setNull(npos, Types.FLOAT) else ps.setFloat(npos, value.get)
pos = npos
}
def setIntOption(value: Option[Int]) {
val npos = pos + 1
if(value eq None) ps.setNull(npos, Types.INTEGER) else ps.setInt(npos, value.get)
pos = npos
}
def setLongOption(value: Option[Long]) {
val npos = pos + 1
if(value eq None) ps.setNull(npos, Types.INTEGER) else ps.setLong(npos, value.get)
pos = npos
}
def setShortOption(value: Option[Short]) {
val npos = pos + 1
if(value eq None) ps.setNull(npos, Types.SMALLINT) else ps.setShort(npos, value.get)
pos = npos
}
def setStringOption(value: Option[String]) {
val npos = pos + 1
if(value eq None) ps.setNull(npos, Types.VARCHAR) else ps.setString(npos, value.get)
pos = npos
}
def setTimeOption(value: Option[Time]) {
val npos = pos + 1
if(value eq None) ps.setNull(npos, Types.TIME) else ps.setTime(npos, value.get)
pos = npos
}
def setTimestampOption(value: Option[Timestamp]) {
val npos = pos + 1
if(value eq None) ps.setNull(npos, Types.TIMESTAMP) else ps.setTimestamp(npos, value.get)
pos = npos
}
def setBigDecimalOption(value: Option[BigDecimal]) {
val npos = pos + 1
if(value eq None) ps.setNull(npos, Types.DECIMAL) else ps.setBigDecimal(npos, value.get.bigDecimal)
pos = npos
}
def setObjectOption(value: Option[AnyRef], sqlType: Int) {
val npos = pos + 1
if(value eq None) ps.setNull(npos, sqlType) else ps.setObject(npos, value.get, sqlType)
pos = npos
}
}
| zefonseca/slick-1.0.0-scala.2.11.1 | src/main/scala/scala/slick/session/PositionedParameters.scala | Scala | bsd-2-clause | 4,941 |
package edu.gemini.qv.plugin
import edu.gemini.qv.plugin.data._
import edu.gemini.qv.plugin.selector.ReferenceDateSelector
import edu.gemini.qv.plugin.ui.QvGui
import edu.gemini.qv.plugin.util.ConstraintsCache.{ConstraintCalculationEnd, ConstraintCalculationProgress, ConstraintCalculationStart}
import edu.gemini.qv.plugin.util.ScheduleCache.{ScheduleLoadStart, ScheduleLoadEnd, ScheduleLoadFailed}
import edu.gemini.qv.plugin.util._
import scala.swing.GridBagPanel.Anchor._
import scala.swing.GridBagPanel.Fill._
import scala.swing._
/**
* Status panel that shows global messages and information about background tasks.
* It also shows error messages in case any of the background tasks fail to complete successfully.
*/
class StatusPanel(ctx: QvContext) extends GridBagPanel {
var constraintsTotal = 0
var constraintsDone = 0
val messageLabel = new Label()
val observationsProgress = new ProgressBar {
indeterminate = true
visible = false
}
val observationsLabel = new Label(s"Total ${ctx.source.observations.size} loaded.")
val observations = new FlowPanel {
contents += new Label("Observations: ")
contents += observationsProgress
contents += observationsLabel
}
val constraintsProgress = new ProgressBar {
visible = false
}
val constraintsLabel = new Label("None.")
val constraints = new FlowPanel {
contents += new Label("Constraints: ")
contents += constraintsProgress
contents += constraintsLabel
}
val scheduleLabel = new Label("None.")
val schedule = new FlowPanel {
contents += new Label("Schedule: ")
contents += scheduleLabel
}
val referenceDate = new ReferenceDateSelector(ctx)
layout(messageLabel) = new Constraints {
gridx = 0
weightx = 1
anchor = West
fill = Both
}
layout(new VerticalSeparator) = new Constraints {
gridx = 1
weighty = 1
fill = Vertical
}
layout(referenceDate) = new Constraints {
gridx = 2
}
layout(new VerticalSeparator) = new Constraints {
gridx = 3
weighty = 1
fill = Vertical
}
layout(observations) = new Constraints {
gridx = 4
}
layout(new VerticalSeparator) = new Constraints {
gridx = 5
weighty = 1
fill = Vertical
}
layout(constraints) = new Constraints {
gridx = 6
}
layout(new VerticalSeparator) = new Constraints {
gridx = 7
weighty = 1
fill = Vertical
}
layout(schedule) = new Constraints {
gridx = 8
}
listenTo(ctx.source, SolutionProvider(ctx))
reactions += {
// === events from data source (ODB)
case DataSourceRefreshStart =>
observationsProgress.visible = true
observationsLabel.text = "Loading..."
revalidate()
case DataSourceRefreshEnd(result) =>
observationsProgress.visible = false
observationsLabel.text = s"Total ${result.size} loaded."
revalidate()
// === events from caches (SolutionProvider)
case ConstraintCalculationStart(c, cnt) =>
constraintsTotal += cnt
constraintsProgress.visible = true
constraintsProgress.min = 0
constraintsProgress.max = constraintsTotal
constraintsProgress.labelPainted = true
constraintsProgress.label = "0%"
constraintsLabel.text = "Calculating..."
revalidate()
case ConstraintCalculationProgress(c, cnt) =>
constraintsDone += cnt
constraintsProgress.value = constraintsDone
constraintsProgress.label = f"${100.0*constraintsDone/constraintsTotal}%3.0f%%"
case ConstraintCalculationEnd(c, cnt) =>
constraintsDone += cnt
if (constraintsDone >= constraintsTotal) {
constraintsTotal = 0
constraintsDone = 0
constraintsProgress.visible = false
constraintsLabel.text = "All up to date."
revalidate()
}
case ScheduleLoadStart =>
scheduleLabel.icon = QvGui.Spinner16Icon
scheduleLabel.text = "Loading..."
case ScheduleLoadEnd =>
scheduleLabel.icon = null
scheduleLabel.text = "Loaded."
case ScheduleLoadFailed =>
scheduleLabel.icon = null
scheduleLabel.text = "Not Available."
}
def showMessage(message: String) = messageLabel.text = message
class VerticalSeparator extends Separator(Orientation.Vertical) {
preferredSize = new Dimension(5,1)
}
}
| arturog8m/ocs | bundle/edu.gemini.qv.plugin/src/main/scala/edu/gemini/qv/plugin/StatusPanel.scala | Scala | bsd-3-clause | 4,310 |
package com.umayrh.intervalGraph
import java.sql.Date
import com.holdenkarau.spark.testing._
import org.apache.spark.sql.functions.col
import org.apache.spark.sql.DataFrame
import org.scalatest._
import org.scalatest.matchers.should._
import org.scalatest.featurespec._
/**
* Tests [[DateOverlap]]
*/
class DateOverlapIntegrationTest
extends AnyFeatureSpec
with GivenWhenThen
with Matchers
with SharedSparkContext
with DataFrameSuiteBase {
Feature("A function for grouping overlapping dates") {
Scenario("An dataset without given input columns results in an exception") {
Given("A dataset without specified input columns")
val dataset = spark.emptyDataFrame
When("groupByOverlap is invoked")
intercept[IllegalArgumentException] {
DateOverlap.groupByOverlap(dataset, ("s", "t"), "id")
}
Then("the result is an exception")
}
Scenario("An emtpy dataset results in an empty dataset with given output column") {
Given("An empty dataset")
val dataset = makeDataset(List(("2018-01-01", "2018-01-02")))
val emptyDataset = dataset.filter(col("s") > Date.valueOf("2019-01-02"))
When("groupByOverlap is invoked")
val result = DateOverlap.groupByOverlap(emptyDataset, ("s", "t"), "id")
Then("the result is an empty dataset with the given output column")
result.count() should be(0)
result.columns.contains("id") should be(true)
}
Scenario(
"A dataset of non-overlapping date ranges returns an id column with the same cardinality") {
Given("A dataset with non-overlapping dates")
val dataset = makeDataset(
List(("2018-01-01", "2018-01-02"),
("2018-02-01", "2018-02-02"),
("2018-03-01", "2018-03-02"),
("2018-04-01", "2018-04-02"),
("2018-05-01", "2018-05-02")))
When("groupByOverlap is invoked on")
val result = DateOverlap.groupByOverlap(dataset, ("s", "t"), "id")
Then("result is the original dataset with an id column of the same cardinality")
result.select("id").distinct().count() should be(result.count())
}
Scenario("A dataset of overlapping date ranges returns an id column with cardinality 1") {
Given("A dataset with overlapping dates")
val dataset = makeDataset(
List(("2018-01-01", "2018-02-02"),
("2018-02-02", "2018-02-02"),
("2018-02-02", "2018-03-02"),
("2018-03-02", "2018-04-02"),
("2018-04-02", "2018-05-02")))
When("groupByOverlap is invoked on")
val result = DateOverlap.groupByOverlap(dataset, ("s", "t"), "id")
Then("result is the original dataset with an id column of the same cardinality")
result.select("id").distinct().count() should be(1)
}
}
def makeDataset(strRanges: List[(String, String)]): DataFrame = {
val ranges = strRanges.map(k => (Date.valueOf(k._1), Date.valueOf(k._2)))
TestUtils.datesToDf(sc, sqlContext)(ranges, "s", "t")
}
}
| umayrh/sketchy-polytopes | sparkScala/intervalGraph/src/test/scala/com/umayrh/intervalGraph/DateOverlapIntegrationTest.scala | Scala | gpl-3.0 | 3,031 |
/*
* Copyright (c) 2014 Paul Bernard
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Spectrum Finance is based in part on:
* QuantLib. http://quantlib.org/
*
*/
package org.quantintel.ql.time.calendars
import org.quantintel.ql.time.Month._
import org.quantintel.ql.time.Weekday._
import org.quantintel.ql.time.{Impl, Date, Western, Calendar}
object IndiaEnum extends Enumeration {
type IndiaEnum = Value
val NSE = Value(1)
def valueOf(market: Int) : IndiaEnum = market match {
case 1 => NSE
case _ => throw new Exception("Valid units = 1")
}
}
object India {
def apply(): Calendar = {
new India()
}
def apply(market: org.quantintel.ql.time.calendars.IndiaEnum.IndiaEnum): Calendar = {
new India(market)
}
}
/**
*
* Indian calendars
* Holidays for the National Stock Exchange
* Saturdays
* Sundays
* Republic Day, JANUARY 26th
* Good Friday
* Ambedkar Jayanti, April 14th
* Independence Day, August 15th
* Gandhi Jayanti, October 2nd
* Christmas, December 25th
*
* Other holidays for which no rule is given (data available for
* 2005-2008
* Bakri Id
* Moharram
* Mahashivratri
* Holi
* Ram Navami
* Mahavir Jayanti
* Id-E-Milad
* Maharashtra Day
* Buddha Pournima
* Ganesh Chaturthi
* Dasara
* Laxmi Puja
* Bhaubeej
* Ramzan Id
* Guru Nanak Jayanti
*
* TODO: This implementation has holidays only up to year 2008.
*
* Reference:
* http://www.nse-india.com/
*
* @author Paul Bernard
*/
class India extends Calendar {
impl = new Nse
import org.quantintel.ql.time.calendars.IndiaEnum._
def this(market: org.quantintel.ql.time.calendars.IndiaEnum.IndiaEnum){
this
market match {
case NSE => impl = new Nse
case _ => throw new Exception("Valid units = 1")
}
}
private class Nse extends Western {
override def name : String = "National Stock Exchange of India"
override def isBusinessDay(date: Date): Boolean = {
val w: Weekday = date.weekday
val d: Int = date.dayOfMonth
val dd = date.dayOfYear
val m: Month = date.month
val y: Int = date.year
val em: Int = easterMonday(y)
if (isWeekend(w)
|| (d == 26 && m == JANUARY) // Republic Day
|| (dd == em-3) // Good Friday
|| (d == 14 && m == APRIL) // Ambedkar Jayanti
|| (d == 15 && m == AUGUST) // Independence Day
|| (d == 2 && m == OCTOBER) // Gandhi Jayanti
|| (d == 25 && m == DECEMBER)) // Christmas
false
else if ((y == 2005) &&
// Moharram, Holi, Maharashtra Day, and Ramzan Id fall
// on Saturday or Sunday in 2005
((d == 21 && m == JANUARY) // Bakri Id
|| (d == 7 && m == SEPTEMBER) // Ganesh Chaturthi
|| (d == 12 && m == OCTOBER) // Dasara
|| (d == 1 && m == NOVEMBER) // Laxmi Puja
|| (d == 3 && m == NOVEMBER) // Bhaubeej
|| (d == 15 && m == NOVEMBER))) // Guru Nanak Jayanti
false
else if ((y == 2006) &&
((d == 11 && m == JANUARY) // Bakri Id
|| (d == 9 && m == FEBRUARY) // Moharram
|| (d == 15 && m == MARCH) // Holi
|| (d == 6 && m == APRIL) // Ram Navami
|| (d == 11 && m == APRIL) // Mahavir Jayanti
|| (d == 1 && m == MAY) // Maharashtra Day
|| (d == 24 && m == OCTOBER) // Bhaubeej
|| (d == 25 && m == OCTOBER))) // Ramzan Id
false
else if ((y == 2007) &&
((d == 1 && m == JANUARY) // Bakri Id
|| (d == 30 && m == JANUARY) // Moharram
|| (d == 16 && m == FEBRUARY) // Mahashivratri
|| (d == 27 && m == MARCH) // Ram Navami
|| (d == 1 && m == MAY) // Maharashtra Day
|| (d == 2 && m == MAY) // Buddha Pournima
|| (d == 9 && m == NOVEMBER) // Laxmi Puja
|| (d == 21 && m == DECEMBER))) // Bakri Id (again)
false
else if ((y == 2008) &&
((d == 6 && m == MARCH) // Mahashivratri
|| (d == 20 && m == MARCH) // Id-E-Milad
|| (d == 18 && m == APRIL) // Mahavir Jayanti
|| (d == 1 && m == MAY) // Maharashtra Day
|| (d == 19 && m == MAY) // Buddha Pournima
|| (d == 3 && m == SEPTEMBER) // Ganesh Chaturthi
|| (d == 2 && m == OCTOBER) // Ramzan Id
|| (d == 9 && m == OCTOBER) // Dasara
|| (d == 28 && m == OCTOBER) // Laxmi Puja
|| (d == 30 && m == OCTOBER) // Bhau bhij
|| (d == 13 && m == NOVEMBER) // Gurunanak Jayanti
|| (d == 9 && m == DECEMBER))) // Bakri Id
false
else true
}
}
}
| quantintel/spectrum | financial/src/main/scala/org/quantintel/ql/time/calendars/India.scala | Scala | apache-2.0 | 5,271 |
/**
* Created by tgo on 24.06.2014.
*/
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
class SongTest {
@RunWith(classOf[JUnitRunner])
class MyNumber extends FunSuite {
test("add one and one"){
assert(1 + 2 === 2,"Did not match");
}
}
}
| goeckeler/katas | lessons/Learning Scala/src/test/scala/SongTest.scala | Scala | apache-2.0 | 314 |
package cs220.queue
import scala.collection.mutable.ArrayBuffer
abstract class IntQueue {
def get(): Int
def put(x: Int): Unit
}
class BasicIntQueue extends IntQueue {
private val buf = new ArrayBuffer[Int]
def get() = buf.remove(0)
def put(x: Int) {
buf += x
}
}
// Here is some added functionality:
trait Doubling extends IntQueue {
abstract override def put(x: Int) {
super.put(2 * x)
}
}
trait Incrementing extends IntQueue {
abstract override def put(x: Int) {
super.put(x + 1)
}
}
trait Filtering extends IntQueue {
abstract override def put(x: Int) {
if (x >= 0) super.put(x)
}
}
/*
// You can then create variants on a base queue:
> val q1 = new BasicIntQueue with Doubling
> val q2 = new BasicIntQueue with Incrementing
> val q3 = new BasicIntQueue with Filtering
// Or even combine different traits:
> val q4 = new BasicIntQueue with Doubling with Incrementing
> val q5 = new BasicIntQueue with Doubling with Incrementing with Filtering
// What about this:
> val q6 = new BasicIntQueue with Incrementing with Doubling with Filtering
////////////////////////////////////////////////////////////////////
// i-clicker:
////////////////////////////////////////////////////////////////////
// Does ordering matter? What do you think?
//
// A) Yes
// B) No
//
////////////////////////////////////////////////////////////////////
*/ | umass-cs-220/week-09-libraries | code/traits/src/main/scala/cs220/queue/Queue.scala | Scala | apache-2.0 | 1,408 |
package io.ddf.jdbc.content
| ddf-project/ddf-jdbc | jdbc/src/main/scala/io/ddf/jdbc/content/Representations.scala | Scala | apache-2.0 | 31 |
package org.freetrm.eventstore.utils
/**
* A sys.exit that doesn't print out all this information can be very painful to track down.
*/
object SystemExit {
def apply(status: Int, message: String, t: Throwable = null): Nothing = {
System.err.println(message)
Option(t).foreach(_.printStackTrace())
System.out.println("Exiting. Called from " + new Throwable().getStackTrace()(1))
System.out.flush()
System.err.flush()
sys.exit(status)
}
}
| freetrm/eventstore | base/src/org/freetrm/eventstore/utils/SystemExit.scala | Scala | apache-2.0 | 473 |
package com.dataintuitive.luciuscore.utilities
import org.scalatest.flatspec.AnyFlatSpec
class SignedStringTest extends AnyFlatSpec {
info("Test String extensions")
val aString = "-aString"
val extraStringExpl = new SignedString("-aString")
"Explicit creation of ExtraString" should "work" in {
assert(extraStringExpl.string === aString)
}
"abs on ExtraString" should "remove return the string with trailing - removed" in {
assert(extraStringExpl.abs === "aString")
}
"sign on ExtraString" should "remove return the sign" in {
assert(extraStringExpl.sign === "-")
}
}
| data-intuitive/LuciusCore | src/test/scala/com/dataintuitive/luciuscore/utilities/SignedStringTest.scala | Scala | apache-2.0 | 608 |
/*
* Copyright 2009-2016 DigitalGlobe, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and limitations under the License.
*
*/
package org.mrgeo.mapalgebra.binarymath
import java.awt.image.DataBuffer
import org.mrgeo.mapalgebra.parser.{ParserException, ParserNode}
import org.mrgeo.mapalgebra.raster.RasterMapOp
import org.mrgeo.mapalgebra.{MapOp, MapOpRegistrar}
object XOrMapOp extends MapOpRegistrar {
override def register: Array[String] = {
Array[String]("xor", "^")
}
def create(raster:RasterMapOp, const:Double):MapOp = {
new XOrMapOp(Some(raster), Some(const))
}
def create(rasterA:RasterMapOp, rasterB:RasterMapOp):MapOp = {
new XOrMapOp(Some(rasterA), Some(rasterB))
}
override def apply(node:ParserNode, variables: String => Option[ParserNode]): MapOp =
new XOrMapOp(node, variables)
}
class XOrMapOp extends RawBinaryMathMapOp {
private[binarymath] def this(raster: Option[RasterMapOp], paramB:Option[Any]) = {
this()
varA = raster
paramB match {
case Some(rasterB:RasterMapOp) => varB = Some(rasterB)
case Some(double:Double) => constB = Some(double)
case Some(int:Int) => constB = Some(int.toDouble)
case Some(long:Long) => constB = Some(long.toDouble)
case Some(float:Float) => constB = Some(float.toDouble)
case Some(short:Short) => constB = Some(short.toDouble)
case _ => throw new ParserException("Second term \\"" + paramB + "\\" is not a raster or constant")
}
}
private[binarymath] def this(node:ParserNode, variables: String => Option[ParserNode]) = {
this()
initialize(node, variables)
}
override private[binarymath] def function(a: Double, b: Double): Double = {
if ((a < -RasterMapOp.EPSILON || a > RasterMapOp.EPSILON) == (b < -RasterMapOp.EPSILON || b > RasterMapOp.EPSILON)) {
0
}
else {
1
}
}
override private[binarymath] def datatype():Int = { DataBuffer.TYPE_BYTE }
override private[binarymath] def nodata():Double = { 255 }
}
| akarmas/mrgeo | mrgeo-mapalgebra/mrgeo-mapalgebra-rastermath/src/main/scala/org/mrgeo/mapalgebra/binarymath/XOrMapOp.scala | Scala | apache-2.0 | 2,473 |
package spire.example
import spire.implicits._
import spire.math._
import scala.annotation.tailrec
import scala.collection.IterableLike
import scala.collection.mutable.{Builder, GrowingBuilder, MapBuilder}
import scala.collection.generic.CanBuildFrom
/**
* Some tools for simplifying decimal expressions, and playing around
* with numbers.
*
* There are three modes:
*
* nth: print the nth rational, according to diagonalization
* all: print the first n rationals, according to diagonalization
* snap: given y, look for solutions to y = nroot(x, k) / d
*/
object Simplification {
def main(args: Array[String]) {
if (args.isEmpty) {
println("usage: %s [nrat | rats | nprime | primes | snap] [number]")
} else {
args(0) match {
case "nrat" =>
val n = if (args.length == 1) 10 else args(1).toInt
val r: Rational = rationals.drop(n - 1).head
println("rational %d is %s" format (n, r.toString))
case "rats" =>
val n = if (args.length == 1) 10 else args(1).toInt
rationals.take(n).foreach(r => print(r.toString + ", "))
println("...")
case "nprime" =>
val n = if (args.length == 1) 10 else args(1).toInt
val p: Int = primes.drop(n - 1).head
println("rational %d is %s" format (n, p.toString))
case "primes" =>
val n = if (args.length == 1) 10 else args(1).toInt
primes.take(n).foreach(p => print(p.toString + ", "))
println("...")
case "snap" =>
val n = if (args.length == 1) 1.4142135623730951 else args(1).toDouble
val (base, k, div) = snap(n)
println("%s =~ nroot(%s, %s) / %s" format (n, base, k, div))
}
}
}
/**
* Using Cantor's diagonalization method, create an infinite stream
* of all rational numbers.
*
* This stream will only be able to generate the first
* 42,535,295,865,117,307,928,310,139,910,543,638,528 values, so it
* is not really infinite. Even so, it's unlikely that a user will
* be able to generate this many values.
*/
val rationals: BigStream[Rational] = {
@tailrec
def next(i: Long, n: Long, d: Long): BigStream[Rational] = {
if (n == 0L) {
next(i + 1L, i, 1L)
} else {
val r = Rational(n, d)
if (n == r.numeratorAsLong) {
new BigCons(r, new BigCons(-r, loop(i, n - 1L, d + 1L)))
} else {
next(i, n - 1L, d + 1L)
}
}
}
def loop(i: Long, n: Long, d: Long): BigStream[Rational] = next(i, n, d)
Rational.zero #:: loop(2L, 1L, 1L)
}
/**
* Naive prime stream. For each odd number, this method tries
* dividing by all previous primes <= sqrt(n).
*
* There are a lot of ways to improve this. For now it's a toy.
* It can generate the millionth prime in ~9s on my computer.
*/
val primes: Stream[Int] = {
@tailrec
def next(n: Int, stream: Stream[Int]): Stream[Int] =
if (stream.isEmpty || (stream.head ** 2) > n)
n #:: loop(n + 2, primes)
else if (n % stream.head == 0)
next(n + 2, primes)
else
next(n, stream.tail)
def loop(n: Int, stream: Stream[Int]): Stream[Int] = next(n, stream)
2 #:: loop(3, primes)
}
/**
* Given a Double y, look for whole numbers x, k, and d such that:
*
* y = nroot(x, k) / d
*
* The limit (default: 10) describes the largest root (and divisor)
* that will be checked. The epsilon (default: 0.00000000001)
* describes the maximum distance we can shift the value to find an
* "exact" match.
*/
def snap(n: Double, limit: Int = 10, epsilon: Double = 0.00000000001): (Double, Int, Int) = {
@tailrec
def loop(i: Int, ex: Int, div: Int): (Double, Int, Int) = {
if (i >= limit) {
(n, 1, 1)
} else if (div < 1) {
loop(i + 1, 1, i + 1)
} else {
val x = math.pow(n * div, ex)
val m = x % 1.0
val d = if (m < 0.5) m else m - 1.0
if (math.abs(d) < epsilon) {
(x - m, ex, div)
} else {
loop(i, ex + 1, div - 1)
}
}
}
if (n < 0.0) {
val (x, k, div) = snap(-n, limit, epsilon)
(x, k, -div)
} else {
loop(1, 1, 1)
}
}
}
/**
* BigStream is a non-memoizing stream.
*
* It's similar to Scala's Stream[A] except that it won't exhaust your
* memory for very large streams. This makes it useful for situations
* where re-computing the stream is preferrable to trying to store
* all the results in memory for next time.
*/
object BigStream {
def empty[A]: BigStream[A] = BigNil[A]()
implicit class Wrapper[A](t: => BigStream[A]) {
def #::(a: A): BigStream[A] = new BigCons(a, t)
}
def newBuilder[A]: Builder[A, BigStream[A]] =
new Builder[A, BigStream[A]] {
private var elems: List[A] = Nil
def +=(a: A): this.type = {
elems = a :: elems
this
}
def clear(): Unit = elems = Nil
def result: BigStream[A] =
elems.foldLeft(BigStream.empty[A])((t, a) => new BigCons(a, t))
}
implicit def canBuildFrom[A]: CanBuildFrom[Iterable[A], A, BigStream[A]] =
new CanBuildFrom[Iterable[A], A, BigStream[A]] {
def apply(from: Iterable[A]) = newBuilder[A]
def apply() = newBuilder[A]
}
}
trait BigStream[A] extends Iterable[A] with IterableLike[A, BigStream[A]] { self =>
override def take(n: Int): BigStream[A] =
if (isEmpty || n < 1) BigNil() else new BigCons(head, tail.take(n - 1))
override def drop(n: Int): BigStream[A] = {
@tailrec
def loop(stream: BigStream[A], i: Int): BigStream[A] =
if (isEmpty || i < 1) stream else loop(stream.tail, i - 1)
loop(this, n)
}
def iterator: Iterator[A] = new Iterator[A] {
var stream = self
def hasNext: Boolean = !stream.isEmpty
def next: A = if (stream.isEmpty) {
throw new NoSuchElementException
} else {
val a = stream.head
stream = stream.tail
a
}
}
override def foreach[U](f: A => U) {
@tailrec
def loop(stream: BigStream[A]): Unit = if (!stream.isEmpty) {
f(stream.head)
loop(stream.tail)
}
loop(this)
}
override def newBuilder: Builder[A, BigStream[A]] =
BigStream.newBuilder[A]
}
class BigCons[A](override val head: A, t: => BigStream[A]) extends BigStream[A] {
override def tail: BigStream[A] = t
override def isEmpty = false
override def toString: String = "BigStream(%s, ...)" format head.toString
override def equals(rhs: Any): Boolean = rhs match {
case s: BigStream[_] => !s.isEmpty && tail == s.tail
case _ => false
}
}
case class BigNil[A]() extends BigStream[A] {
override def head: A = sys.error("head on nil")
override def tail: BigStream[A] = sys.error("tail on nil")
override def isEmpty = true
override def toString: String = "BigStream()"
}
| lrytz/spire | examples/src/main/scala/spire/example/simplification.scala | Scala | mit | 6,912 |
package reopp.common.examples
import choco.kernel.model.variables.integer.IntegerExpressionVariable
import choco.Choco
import z3.scala.{Z3AST, Z3Context}
import reopp.common.IntPredicate
/** Example of an [[reopp.common.IntPredicate]]. */
class Even extends IntPredicate {
val choPred = (x: IntegerExpressionVariable) => Choco.eq(Choco.mod(x, 2), 0)
val z3Pred = (z:Z3Context,v:Z3AST) => z.mkEq(z.mkMod(v,z.mkInt(2,z.mkIntSort())),z.mkInt(0,z.mkIntSort()))
val funPred = (x: Int) => x % 2 == 0
override def toString = "Even"
}
| joseproenca/ip-constraints | code/src/main/scala/reopp/common/examples/Even.scala | Scala | mit | 543 |
/*
* Copyright 2017-2022 John Snow Labs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.johnsnowlabs.nlp.annotators.spell.norvig
import org.apache.spark.ml.param.{BooleanParam, IntParam, Params}
/** These are the configs for the NorvigSweeting model
*
* See [[https://github.com/JohnSnowLabs/spark-nlp/blob/master/src/test/scala/com/johnsnowlabs/nlp/annotators/spell/norvig/NorvigSweetingTestSpec.scala]] for further reference on how to use this API
*
* @groupname anno Annotator types
* @groupdesc anno Required input and expected output annotator types
* @groupname Ungrouped Members
* @groupname param Parameters
* @groupname setParam Parameter setters
* @groupname getParam Parameter getters
* @groupname Ungrouped Members
* @groupprio param 1
* @groupprio anno 2
* @groupprio Ungrouped 3
* @groupprio setParam 4
* @groupprio getParam 5
* @groupdesc param A list of (hyper-)parameter keys this annotator can take. Users can set and get the parameter values through setters and getters, respectively.
**/
trait NorvigSweetingParams extends Params {
/** Sensitivity on spell checking (Default: `true`). Might affect accuracy
*
* @group param
**/
val caseSensitive = new BooleanParam(this, "caseSensitive", "sensitivity on spell checking")
/** Increase search at cost of performance (Default: `false`). Enables extra check for word combinations, More accuracy at performance
*
* @group param
**/
val doubleVariants = new BooleanParam(this, "doubleVariants", "increase search at cost of performance")
/** Increase performance at cost of accuracy (Default: `false`). Faster but less accurate mode
*
* @group param
**/
val shortCircuit = new BooleanParam(this, "shortCircuit", "increase performance at cost of accuracy")
/** Applies frequency over hamming in intersections (Default: `true`). When false hamming takes priority
*
* @group param
**/
val frequencyPriority = new BooleanParam(this, "frequencyPriority", "applies frequency over hamming in intersections. When false hamming takes priority")
/** Minimum size of word before ignoring (Default: `3`). Minimum size of word before moving on.
*
* @group param
**/
val wordSizeIgnore = new IntParam(this, "wordSizeIgnore", "minimum size of word before ignoring. Defaults to 3")
/** Maximum duplicate of characters in a word to consider (Default: `2`). Maximum duplicate of characters to account for.
*
* @group param
**/
val dupsLimit = new IntParam(this, "dupsLimit", "maximum duplicate of characters in a word to consider. Defaults to 2")
/** Word reduction limit (Default: `3`).
*
* @group param
**/
val reductLimit = new IntParam(this, "reductLimit", "word reductions limit. Defaults to 3")
/** Hamming intersections to attempt (Default: `10`).
*
* @group param
**/
val intersections = new IntParam(this, "intersections", "hamming intersections to attempt. Defaults to 10")
/** Vowel swap attempts (Default: `6`).
*
* @group param
**/
val vowelSwapLimit = new IntParam(this, "vowelSwapLimit", "vowel swap attempts. Defaults to 6")
/** Sensitivity on spell checking (Default: `true`). Might affect accuracy
*
* @group setParam
**/
def setCaseSensitive(value: Boolean): this.type = set(caseSensitive, value)
/** Increase search at cost of performance (Default: `false`). Enables extra check for word combinations
*
* @group setParam
**/
def setDoubleVariants(value: Boolean): this.type = set(doubleVariants, value)
/** Increase performance at cost of accuracy (Default: `false`). Faster but less accurate mode
*
* @group setParam
**/
def setShortCircuit(value: Boolean): this.type = set(shortCircuit, value)
/** Applies frequency over hamming in intersections (Default: `true`). When false hamming takes priority
*
* @group setParam
**/
def setFrequencyPriority(value: Boolean): this.type = set(frequencyPriority, value)
/** Minimum size of word before ignoring (Default: `3`). Minimum size of word before moving on.
*
* @group setParam
**/
def setWordSizeIgnore(value: Int): this.type = set(wordSizeIgnore, value)
/** Maximum duplicate of characters in a word to consider (Default: `2`). Maximum duplicate of characters to account for. Defaults to 2. */
def setDupsLimit(value: Int): this.type = set(dupsLimit, value)
/** Word reduction limit (Default: `3`).
*
* @group setParam
**/
def setReductLimit(value: Int): this.type = set(reductLimit, value)
/** Hamming intersections to attempt (Default: `10`).
*
* @group setParam
**/
def setIntersections(value: Int): this.type = set(intersections, value)
/** Vowel swap attempts (Default: `6`).
*
* @group setParam
**/
def setVowelSwapLimit(value: Int): this.type = set(vowelSwapLimit, value)
/** Sensitivity on spell checking (Default: `true`). Might affect accuracy
*
* @group getParam
**/
def getCaseSensitive: Boolean = $(caseSensitive)
/** Increase search at cost of performance (Default: `false`). Enables extra check for word combinations
*
* @group getParam
**/
def getDoubleVariants: Boolean = $(doubleVariants)
/** Increase performance at cost of accuracy (Default: `false`). Faster but less accurate mode
*
* @group getParam
**/
def getShortCircuit: Boolean = $(shortCircuit)
/** Applies frequency over hamming in intersections (Default: `true`). When false hamming takes priority
*
* @group getParam
**/
def getFrequencyPriority: Boolean = $(frequencyPriority)
/** Minimum size of word before ignoring (Default: `3`). Minimum size of word before moving on.
*
* @group getParam
**/
def getWordSizeIgnore: Int = $(wordSizeIgnore)
/** Maximum duplicate of characters in a word to consider (Default: `2`). Maximum duplicate of characters to account for.
*
* @group getParam
**/
def getDupsLimit: Int = $(dupsLimit)
/** Word reduction limit (Default: `3`).
*
* @group getParam
**/
def getReductLimit: Int = $(reductLimit)
/** Hamming intersections to attempt (Default: `10`).
*
* @group getParam
**/
def getIntersections: Int = $(intersections)
/** Vowel swap attempts (Default: `6`).
*
* @group getParam
**/
def getVowelSwapLimit: Int = $(vowelSwapLimit)
}
| JohnSnowLabs/spark-nlp | src/main/scala/com/johnsnowlabs/nlp/annotators/spell/norvig/NorvigSweetingParams.scala | Scala | apache-2.0 | 6,974 |
package com.shocktrade.daycycle.routes
import com.shocktrade.server.common.TradingClock
import com.shocktrade.server.concurrent.Daemon
import com.shocktrade.server.concurrent.Daemon.DaemonRef
import io.scalajs.nodejs._
import io.scalajs.npm.express.{Application, Request, Response}
import io.scalajs.npm.mongodb.Db
import scala.concurrent.{ExecutionContext, Future}
import scala.scalajs.js
import scala.scalajs.js.JSConverters._
/**
* Daemon Routes
* @author Lawrence Daniels <lawrence.daniels@gmail.com>
*/
object DaemonRoutes {
def init[T](app: Application, daemons: Seq[DaemonRef[T]])(implicit ec: ExecutionContext, dbFuture: Future[Db]) = {
val clock = new TradingClock()
val daemonDict = js.Dictionary(daemons.map(d => d.name -> new DaemonJs(d.name, d.delay, d.frequency)): _*)
val daemonMap = Map(daemons.map(d => d.name -> d): _*)
// individual objects
app.get("/api/daemon/:id", (request: Request, response: Response, next: NextFunction) => daemonById(request, response, next))
app.get("/api/daemon/:id/pause", (request: Request, response: Response, next: NextFunction) => pauseDaemon(request, response, next))
app.get("/api/daemon/:id/resume", (request: Request, response: Response, next: NextFunction) => resumeDaemon(request, response, next))
app.get("/api/daemon/:id/start", (request: Request, response: Response, next: NextFunction) => startDaemon(request, response, next))
// collections
app.get("/api/daemons", (request: Request, response: Response, next: NextFunction) => listDaemons(request, response, next))
//////////////////////////////////////////////////////////////////////////////////////
// API Methods
//////////////////////////////////////////////////////////////////////////////////////
/**
* Returns a daemon by its unique identifier
*/
def daemonById(request: Request, response: Response, next: NextFunction) = {
val id = request.params.apply("id")
daemonDict.get(id) match {
case Some(daemon) => response.send(daemon)
case None => response.notFound(id)
}
next()
}
/**
* Returns the list of configured daemons
*/
def listDaemons(request: Request, response: Response, next: NextFunction) = {
response.send(daemonDict.values.toJSArray)
next()
}
/**
* Pauses the daemon process, if running.
*/
def pauseDaemon(request: Request, response: Response, next: NextFunction) = {
val id = request.params.apply("id")
daemonMap.get(id) match {
case Some(daemon) =>
//Daemon.run(clock, daemon)
response.send(s"${daemon.name} paused")
case None => response.notFound(id)
}
next()
}
/**
* Resume the daemon process, if paused.
*/
def resumeDaemon(request: Request, response: Response, next: NextFunction) = {
val id = request.params.apply("id")
daemonMap.get(id) match {
case Some(daemon) =>
//Daemon.run(clock, daemon)
response.send(s"${daemon.name} resumed")
case None => response.notFound(id)
}
next()
}
/**
* Starts the daemon process
*/
def startDaemon(request: Request, response: Response, next: NextFunction) = {
val id = request.params.apply("id")
daemonMap.get(id) match {
case Some(daemon) =>
Daemon.start(clock, daemon)
response.send(s"${daemon.name} started")
case None => response.notFound(id)
}
next()
}
}
/**
* The JSON representation of a daemon reference
* @param name the name of the daemon
* @param delay the initial delay before the process runs
* @param frequency the frequency/interval of the process
*/
class DaemonJs(val name: String, val delay: Int, val frequency: Int) extends js.Object
}
| ldaniels528/shocktrade.js | app/server/daycycle/src/main/scala/com/shocktrade/daycycle/routes/DaemonRoutes.scala | Scala | apache-2.0 | 3,895 |
/**
* java.awt.Rectangle类有两个很有用的方法translate和grow,但可惜的是像java.awt.geom.Ellipse2D这样的类没有。
* 在Scala中,你可以解决掉这个问题。定义一个RenctangleLike特质,加入具体的translate和grow方法。
* 提供任何你需要用来实现的抽象方法,以便你可以像如下代码这样混入该特质:
val egg = new java.awt.geom.Ellipse2D.Double(5,10,20,30) with RectangleLike
egg.translate(10,-10)
egg.grow(10,20)
*/
import java.awt.geom.Ellipse2D
trait RectangleLike{
this:Ellipse2D.Double=>
def translate(x:Double,y:Double){
this.x = x
this.y = y
}
def grow(x:Double,y:Double){
this.x += x
this.y += y
}
}
object Test extends App{
val egg = new Ellipse2D.Double(5,10,20,30) with RectangleLike
println("x = " + egg.getX + " y = " + egg.getY)
egg.translate(10,-10)
println("x = " + egg.getX + " y = " + egg.getY)
egg.grow(10,20)
println("x = " + egg.getX + " y = " + egg.getY)
} | vernonzheng/scala-for-the-Impatient | src/Chapter10/exercise01.scala | Scala | mit | 996 |
package com.tecniplast.canopen
import akka.actor._
import com.tecniplast.canopen.mailboxes.SDO_PrioMailbox
import com.typesafe.config._
import com.tecniplast.device.CanDevice._
import CanOpenMessages._
import com.tecniplast.canopen.CanOpenDispatcher._
/*
* This actor register ActorRef consumers with type and dipatch messages
* "per type"
*
*/
object CanOpenDispatcher {
case class GetPDOManager()
case class GetSDOManager()
case class GetNMTManager()
case class RefPDOManager(ref: ActorRef)
case class RefSDOManager(ref: ActorRef)
case class RefNMTManager(ref: ActorRef)
}
class CanOpenDispatcher(
pdo_manager_type: Props,
sdo_manager_type: Props,
nmt_manager_type: Props
) extends Actor with ActorLogging{
val receive_verbose =
try {
ConfigFactory.load("canopen.conf").getBoolean("dispatcher.receive.verbose")
} catch {
case _: Throwable => false
}
val send_verbose =
try {
ConfigFactory.load("canopen.conf").getBoolean("dispatcher.send.verbose")
} catch {
case _: Throwable => false
}
val pdo_manager =
context.actorOf(pdo_manager_type.withDispatcher("pdo-prio-dispatcher"),"pdo_manager")
val sdo_manager =
context.actorOf(sdo_manager_type.withDispatcher("sdo-prio-dispatcher"),"sdo_manager")
val nmt_manager =
context.actorOf(nmt_manager_type,"nmt_manager")
def printMsg(arr: Array[Byte]): String = {
arr.map(a => get2DigitsHex(a)).mkString("[",",","]")
}
def receive = {
managersGetter orElse {
case msg: SendCanOpenMessage =>
if (send_verbose)
println(new java.util.Date+" "+self.path+" sending CAN RAW "+get4DigitsHex(msg.toCan.id)+" "+printMsg(msg.toCan.msg)+" "+msg.toCan.flags)
context.parent ! msg.toCan
case CanMsgReceived(id, msg, flags) =>
if (receive_verbose)
println(new java.util.Date+" "+self.path+" received CAN RAW "+get4DigitsHex(id)+" "+printMsg(msg)+" "+flags)
RecivedCanOpenMessage(id,msg,flags) match {
case tpdo : ReceivedTPDO =>
pdo_manager ! tpdo
case rpdo : ReceivedRPDO =>
pdo_manager ! rpdo
case sdo : ReceivedSDO =>
sdo_manager ! sdo
case nmt : ReceivedNMT =>
nmt_manager ! nmt
case sync : ReceivedSYNC =>
case emergency: ReceivedEMERGENCY =>
case timestamp: ReceivedTIMESTAMP =>
case any => log.warning("unknown type!!!")
}
case CanClose =>
context.parent ! CanClose
context.children.foreach(c => context.stop(c))
context.stop(self)
}
}
def managersGetter: Receive = {
case x: GetPDOManager =>
sender ! RefPDOManager(pdo_manager)
case x: GetSDOManager =>
sender ! RefSDOManager(sdo_manager)
case x: GetNMTManager =>
sender ! RefNMTManager(nmt_manager)
}
} | TPTeam/CanOpen_Scala | src/main/scala/com/tecniplast/canopen/CanOpenDispatcher.scala | Scala | lgpl-2.1 | 2,846 |
package edu.gemini.spModel.gemini.nifs
import InstNIFS.calcWavelength
import NIFSParams.Disperser
import NIFSParams.Filter
import org.junit.Test
import org.junit.Assert._
/**
* Test cases for the observing wavelength calculation.
*/
class ObsWavelengthCalcTest {
@Test def testDisperserNone() {
Filter.values.foreach(f =>
assertEquals(f.getWavelength, calcWavelength(Disperser.MIRROR, f, 42.0))
)
}
@Test def testDisperserSome() {
Disperser.values.filter(Disperser.MIRROR.!=).foreach(d =>
assertEquals("42.0", calcWavelength(d, Filter.JH_FILTER, 42.0))
)
}
// This is an odd case that needs to be defined
@Test def testSameAsMirror() {
val same = Filter.SAME_AS_DISPERSER
assertNull(calcWavelength(Disperser.MIRROR, same, 42.0))
}
// Reject disperser lambda of 0
@Test def testDisperserLambda0() {
assertNull(calcWavelength(Disperser.K, Filter.JH_FILTER, 0.0))
}
} | arturog8m/ocs | bundle/edu.gemini.pot/src/test/scala/edu/gemini/spModel/gemini/nifs/ObsWavelengthCalcTest.scala | Scala | bsd-3-clause | 932 |
package rml.args.arg.restriction
trait Restriction | rml/scala_args | src/main/scala/rml/args/arg/restriction/Restriction.scala | Scala | gpl-3.0 | 51 |
package io.youi.paint
import io.youi.Compass
import io.youi.drawable.Context
import io.youi.path.Path
//TODO: Make Border a glorified Path
trait Border {
def paint: Paint
def size(compass: Compass): Double
def background(width: Double, height: Double, context: Context, fill: Paint): Unit
def draw(width: Double, height: Double, context: Context): Unit
def width: Double = size(Compass.West) + size(Compass.East)
def height: Double = size(Compass.North) + size(Compass.South)
}
object Border {
lazy val empty: Border = apply(Stroke.none)
def apply(stroke: Stroke, radius: Double = 0.0): RectangleBorder = RectangleBorder(stroke, radius)
}
// TODO: CompoundBorder
// TODO: PaddingBorder
case class RectangleBorder(stroke: Stroke, radius: Double) extends Border {
override def paint: Paint = stroke.paint
override def size(compass: Compass): Double = stroke.lineWidth
override def background(width: Double, height: Double, context: Context, fill: Paint): Unit = if (fill.nonEmpty) {
if (stroke.lineWidth == 0.0 && radius == 0.0) {
context.rect(0.0, 0.0, width, height)
} else {
val sizeAdjust = stroke.lineWidth
if (radius == 0.0) {
context.rect(Path.fix(sizeAdjust), Path.fix(sizeAdjust), Path.fix(width - sizeAdjust) - 1.0, Path.fix(height - sizeAdjust) - 1.0)
} else {
context.roundedRect(Path.fix(sizeAdjust), Path.fix(sizeAdjust), Path.fix(width - sizeAdjust) - 1.0, Path.fix(height - sizeAdjust) - 1.0, radius)
}
}
context.fill(fill, apply = true)
}
override def draw(width: Double, height: Double, context: Context): Unit = if (stroke.nonEmpty) {
if (stroke.lineWidth == 0.0 && radius == 0.0) {
context.rect(0.0, 0.0, width, height)
} else {
val sizeAdjust = stroke.lineWidth
if (radius == 0.0) {
context.rect(Path.fix(sizeAdjust), Path.fix(sizeAdjust), Path.fix(width - sizeAdjust) - 1.0, Path.fix(height - sizeAdjust) - 1.0)
} else {
context.roundedRect(Path.fix(sizeAdjust), Path.fix(sizeAdjust), Path.fix(width - sizeAdjust) - 1.0, Path.fix(height - sizeAdjust) - 1.0, radius)
}
}
context.stroke(stroke, apply = true)
}
} | outr/youi | gui/src/main/scala/io/youi/paint/Border.scala | Scala | mit | 2,192 |
package controllers
import jp.t2v.lab.play2.auth.AuthElement
import models.User
import play.api.data.Form
import play.api.data.Forms._
import play.api.libs.json.Json
import play.api.mvc._
class Application extends Controller with AuthElement with AuthConfigImpl {
val loginForm = Form {
mapping(
"user-id" -> text,
"password" -> text
)(User.authenticate)(_.flatMap(User.unapply))
.verifying("Invalid userId or password", result => result.isDefined)
}
def index = StackAction(AuthorityKey -> true) { implicit request =>
val user = loggedIn
val msg = s"""{"res": "Hello, ${user.id}."}"""
val json = Json.parse(msg)
Ok(json)
}
}
| kazzna/play-login-practice | app/controllers/Application.scala | Scala | apache-2.0 | 679 |
package lore.compiler.utils
import org.slf4j.LoggerFactory
import org.slf4j.helpers.SubstituteLogger
/**
* An indentation logger indents its <b>trace</b> messages with the current indentation. This can be used to provide
* visual clarity to nested relationships.
*
* `eventQueue` can be `null` if `createdPostInitialization` is `true`, at least according to the code.
*/
case class IndentationLogger(name: String, step: Int = 4) extends SubstituteLogger(name, null, true) {
setDelegate(LoggerFactory.getLogger(name))
private var indentation: Int = 0
def indent(): Unit = indentation += step
def dedent(): Unit = indentation -= step
def indented[R](block: => R): R = {
indent()
val result = block
dedent()
result
}
override def trace(msg: String): Unit = super.trace(addIndentation(msg))
override def trace(format: String, arg: Any): Unit = super.trace(addIndentation(format), arg)
override def trace(format: String, arg1: Any, arg2: Any): Unit = super.trace(addIndentation(format), arg1, arg2)
override def trace(format: String, arguments: Object*): Unit = super.trace(addIndentation(format), arguments: _*)
override def trace(msg: String, t: Throwable): Unit = super.trace(addIndentation(msg), t)
private def addIndentation(msg: String): String = " ".repeat(indentation) + msg
}
| marcopennekamp/lore | compiler/src/lore/compiler/utils/IndentationLogger.scala | Scala | mit | 1,336 |
package kneelnrise.warp10scala.services
import java.util.UUID
import akka.NotUsed
import akka.http.scaladsl.model.{HttpMethods, HttpRequest, HttpResponse, StatusCodes}
import akka.stream.scaladsl.{Flow, Source}
import kneelnrise.warp10scala.model.{Coordinates, GTS, GTSValue, WarpScript}
import spray.json.{JsArray, JsBoolean, JsNumber, JsObject, JsString, JsValue, JsonParser, JsonReader, ParserInput, deserializationError}
import scala.collection.immutable
import scala.collection.immutable.Iterable
import scala.util.{Failure, Success}
object Warp10QueryClient {
def query(implicit warp10ClientContext: Warp10ClientContext): Flow[WarpScript, GTS, NotUsed] = {
val uuid = UUID.randomUUID().toString
Flow[WarpScript]
.map(createRequest)
.map(request => request -> uuid)
.via(warp10ClientContext.poolClientFlow)
.filter(result => result._2 == uuid) // We ignore results from other requests
.map(result => result._1)
.map {
case Success(httpResponse) => httpResponse
case Failure(exception) => throw exception
}
.via(transformResponse)
.via(jsonToGTS)
}
private[services] def createRequest(warpScript: WarpScript)(implicit warp10ClientContext: Warp10ClientContext) =
HttpRequest(
method = HttpMethods.POST,
uri = warp10ClientContext.configuration.execUrl,
entity = warpScript.serialize
)
private[services] def transformResponse(implicit warp10ClientContext: Warp10ClientContext): Flow[HttpResponse, String, NotUsed] = {
import warp10ClientContext._
Flow[HttpResponse]
.flatMapConcat { httpResponse =>
if (httpResponse.status == StatusCodes.OK) {
Source.fromFuture(Warp10CommonClient.readAllDataBytes(httpResponse.entity.dataBytes))
} else {
Source.fromFuture(
Warp10CommonClient
.readAllDataBytes(httpResponse.entity.dataBytes)
.map(content => Warp10Exception(httpResponse.status.intValue(), content))
.map(throw _)
)
}
}
}
private[services] def jsonToGTS: Flow[String, GTS, NotUsed] =
Flow[String]
.map(str => JsonParser(ParserInput(str)))
.map(PartialGTS.rootPartialGTSListReader.read(_).to[immutable.Iterable])
.map(partialGTSIterable => (extractIdInfo(partialGTSIterable), partialGTSIterable))
.mapConcat { case (idInfo, partialGTSIterable) => partialGTSIterable.map(partialGTS => (idInfo, partialGTS)) }
.mapConcat { case (idInfo, partialGTS) => partialGTSToGTSList(partialGTS, partialGTS.id.flatMap(id => idInfo.get(id))) }
private[services] def extractIdInfo(partialGTSIterable: immutable.Iterable[PartialGTS]): immutable.Map[Long, PartialGTS] = {
var result = Map[Long, PartialGTS]()
partialGTSIterable
.filter(_.id.isDefined)
.foreach{ partialGTS =>
val entry = result.getOrElse(partialGTS.id.get, partialGTS)
val newEntry = entry.copy(
name = entry.name.orElse(partialGTS.name),
labels = entry.labels.orElse(partialGTS.labels),
attributes = entry.attributes.orElse(partialGTS.attributes)
)
result = result + (partialGTS.id.get -> newEntry)
}
result
}
private[services] def partialGTSToGTSList(partialGTS: PartialGTS, referencePartialGTSOpt: Option[PartialGTS]): immutable.Iterable[GTS] = {
referencePartialGTSOpt match {
case Some(referencePartialGTS) =>
partialGTS.values.map { partialGTSValue =>
GTS(
ts = Some(partialGTSValue.ts),
coordinates = partialGTSValue.coordinates,
elev = partialGTSValue.elev,
name = partialGTS.name.getOrElse(referencePartialGTS.name.getOrElse("")),
labels = partialGTS.labels.getOrElse(referencePartialGTS.labels.getOrElse(Map.empty)),
value = partialGTSValue.value
)
}
case None =>
partialGTS.values.map { partialGTSValue =>
GTS(
ts = Some(partialGTSValue.ts),
coordinates = partialGTSValue.coordinates,
elev = partialGTSValue.elev,
name = partialGTS.name.getOrElse(""),
labels = partialGTS.labels.getOrElse(Map.empty),
value = partialGTSValue.value
)
}
}
}
}
private case class PartialGTS(
name: Option[String],
labels: Option[Map[String, String]],
attributes: Option[Map[String, String]],
id: Option[Long],
values: immutable.Iterable[PartialGTSValue]
)
private case class PartialGTSValue(
ts: Long,
coordinates: Option[Coordinates],
elev: Option[Long],
value: GTSValue
)
private object PartialGTS {
def mapReader(field: String): JsonReader[Map[String, String]] = new JsonReader[Map[String, String]] {
override def read(json: JsValue): Map[String, String] = {
json match {
case JsObject(pairs) =>
pairs map {
case (key, JsString(value)) => key -> value
case _ => deserializationError(s"$field map expected")
}
case _ => deserializationError(s"$field map expected")
}
}
}
val labelsReader: JsonReader[Map[String, String]] = mapReader("labels")
val attributesReader: JsonReader[Map[String, String]] = mapReader("attributes")
val valueReader: JsonReader[GTSValue] = new JsonReader[GTSValue] {
override def read(json: JsValue): GTSValue = {
json match {
case JsString(v) => GTSValue(v)
case JsBoolean(v) => GTSValue(v)
case JsNumber(v) if v.isValidLong => GTSValue(v.toLong)
case JsNumber(v) => GTSValue(v.toDouble)
case _ => deserializationError("value is invalid")
}
}
}
val partialGTSValueReader: JsonReader[PartialGTSValue] = new JsonReader[PartialGTSValue] {
override def read(json: JsValue): PartialGTSValue = {
json match {
case JsArray(v) =>
v match {
case Vector(JsNumber(ts), jsValue) =>
PartialGTSValue(ts.toLong, None, None, valueReader.read(jsValue))
case Vector(JsNumber(ts), JsNumber(elev), jsValue) =>
PartialGTSValue(ts.toLong, None, Some(elev.toLong), valueReader.read(jsValue))
case Vector(JsNumber(ts), JsNumber(lat), JsNumber(lon), jsValue) =>
PartialGTSValue(ts.toLong, Some(Coordinates(lat.toDouble, lon.toDouble)), None, valueReader.read(jsValue))
case Vector(JsNumber(ts), JsNumber(lat), JsNumber(lon), JsNumber(elev), jsValue) =>
PartialGTSValue(ts.toLong, Some(Coordinates(lat.toDouble, lon.toDouble)), Some(elev.toLong), valueReader.read(jsValue))
}
case _ => deserializationError("value information is invalid")
}
}
}
val partialGTSValueListReader: JsonReader[immutable.Iterable[PartialGTSValue]] = new JsonReader[Iterable[PartialGTSValue]] {
override def read(json: JsValue): immutable.Iterable[PartialGTSValue] = {
json match {
case JsArray(elements) => elements.map(partialGTSValueReader.read)
case _ => deserializationError("expected list of GTS values")
}
}
}
val partialGtsReader: JsonReader[PartialGTS] = new JsonReader[PartialGTS] {
override def read(json: JsValue): PartialGTS = {
json match {
case JsObject(fields) =>
(
fields.get("c"),
fields.get("l"),
fields.get("a"),
fields.get("i"),
fields.get("v")
) match {
case (c: Option[JsString], l: Option[JsObject], a: Option[JsObject], id: Option[JsNumber], Some(v: JsArray)) =>
PartialGTS(
name = c.map(_.value),
labels = l.map(labelsReader.read),
attributes = a.map(attributesReader.read),
id = id.map(_.value.toLong),
values = partialGTSValueListReader.read(v)
)
case _ => deserializationError("GTS expected")
}
case _ => deserializationError("GTS expected")
}
}
}
val partialGTSListReader: JsonReader[Iterable[PartialGTS]] = new JsonReader[Iterable[PartialGTS]] {
override def read(json: JsValue): Iterable[PartialGTS] = {
json match {
case JsArray(elements) => elements.map(partialGtsReader.read)
case _ => deserializationError("List of GTS expected")
}
}
}
val rootPartialGTSListReader: JsonReader[Iterable[PartialGTS]] = new JsonReader[Iterable[PartialGTS]] {
override def read(json: JsValue): Iterable[PartialGTS] = {
json match {
case JsArray(elements) if elements.length == 1 => partialGTSListReader.read(elements.head)
case _ => deserializationError("List of GTS expected")
}
}
}
} | kneelnrise/warp10-scala | src/main/scala/kneelnrise/warp10scala/services/Warp10QueryClient.scala | Scala | mit | 8,767 |
/* __ *\\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2002-2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\\___/_/ |_/____/_/ | | **
** |/ **
\\* */
// DO NOT EDIT, CHANGES WILL BE LOST
// This auto-generated code can be modified in "project/GenerateAnyVals.scala".
// Afterwards, running "sbt generateSources" regenerates this source file.
package scala
/** `Short`, a 16-bit signed integer (equivalent to Java's `short` primitive type) is a
* subtype of [[scala.AnyVal]]. Instances of `Short` are not
* represented by an object in the underlying runtime system.
*
* There is an implicit conversion from [[scala.Short]] => [[scala.runtime.RichShort]]
* which provides useful non-primitive operations.
*/
final abstract class Short private extends AnyVal {
def toByte: Byte
def toShort: Short
def toChar: Char
def toInt: Int
def toLong: Long
def toFloat: Float
def toDouble: Double
/**
* Returns the bitwise negation of this value.
* @example {{{
* ~5 == -6
* // in binary: ~00000101 ==
* // 11111010
* }}}
*/
def unary_~ : Int
/** Returns this value, unmodified. */
def unary_+ : Int
/** Returns the negation of this value. */
def unary_- : Int
def +(x: String): String
/**
* Returns this value bit-shifted left by the specified number of bits,
* filling in the new right bits with zeroes.
* @example {{{ 6 << 3 == 48 // in binary: 0110 << 3 == 0110000 }}}
*/
def <<(x: Int): Int
/**
* Returns this value bit-shifted left by the specified number of bits,
* filling in the new right bits with zeroes.
* @example {{{ 6 << 3 == 48 // in binary: 0110 << 3 == 0110000 }}}
*/
def <<(x: Long): Int
/**
* Returns this value bit-shifted right by the specified number of bits,
* filling the new left bits with zeroes.
* @example {{{ 21 >>> 3 == 2 // in binary: 010101 >>> 3 == 010 }}}
* @example {{{
* -21 >>> 3 == 536870909
* // in binary: 11111111 11111111 11111111 11101011 >>> 3 ==
* // 00011111 11111111 11111111 11111101
* }}}
*/
def >>>(x: Int): Int
/**
* Returns this value bit-shifted right by the specified number of bits,
* filling the new left bits with zeroes.
* @example {{{ 21 >>> 3 == 2 // in binary: 010101 >>> 3 == 010 }}}
* @example {{{
* -21 >>> 3 == 536870909
* // in binary: 11111111 11111111 11111111 11101011 >>> 3 ==
* // 00011111 11111111 11111111 11111101
* }}}
*/
def >>>(x: Long): Int
/**
* Returns this value bit-shifted right by the specified number of bits,
* filling in the left bits with the same value as the left-most bit of this.
* The effect of this is to retain the sign of the value.
* @example {{{
* -21 >> 3 == -3
* // in binary: 11111111 11111111 11111111 11101011 >> 3 ==
* // 11111111 11111111 11111111 11111101
* }}}
*/
def >>(x: Int): Int
/**
* Returns this value bit-shifted right by the specified number of bits,
* filling in the left bits with the same value as the left-most bit of this.
* The effect of this is to retain the sign of the value.
* @example {{{
* -21 >> 3 == -3
* // in binary: 11111111 11111111 11111111 11101011 >> 3 ==
* // 11111111 11111111 11111111 11111101
* }}}
*/
def >>(x: Long): Int
/** Returns `true` if this value is equal to x, `false` otherwise. */
def ==(x: Byte): Boolean
/** Returns `true` if this value is equal to x, `false` otherwise. */
def ==(x: Short): Boolean
/** Returns `true` if this value is equal to x, `false` otherwise. */
def ==(x: Char): Boolean
/** Returns `true` if this value is equal to x, `false` otherwise. */
def ==(x: Int): Boolean
/** Returns `true` if this value is equal to x, `false` otherwise. */
def ==(x: Long): Boolean
/** Returns `true` if this value is equal to x, `false` otherwise. */
def ==(x: Float): Boolean
/** Returns `true` if this value is equal to x, `false` otherwise. */
def ==(x: Double): Boolean
/** Returns `true` if this value is not equal to x, `false` otherwise. */
def !=(x: Byte): Boolean
/** Returns `true` if this value is not equal to x, `false` otherwise. */
def !=(x: Short): Boolean
/** Returns `true` if this value is not equal to x, `false` otherwise. */
def !=(x: Char): Boolean
/** Returns `true` if this value is not equal to x, `false` otherwise. */
def !=(x: Int): Boolean
/** Returns `true` if this value is not equal to x, `false` otherwise. */
def !=(x: Long): Boolean
/** Returns `true` if this value is not equal to x, `false` otherwise. */
def !=(x: Float): Boolean
/** Returns `true` if this value is not equal to x, `false` otherwise. */
def !=(x: Double): Boolean
/** Returns `true` if this value is less than x, `false` otherwise. */
def <(x: Byte): Boolean
/** Returns `true` if this value is less than x, `false` otherwise. */
def <(x: Short): Boolean
/** Returns `true` if this value is less than x, `false` otherwise. */
def <(x: Char): Boolean
/** Returns `true` if this value is less than x, `false` otherwise. */
def <(x: Int): Boolean
/** Returns `true` if this value is less than x, `false` otherwise. */
def <(x: Long): Boolean
/** Returns `true` if this value is less than x, `false` otherwise. */
def <(x: Float): Boolean
/** Returns `true` if this value is less than x, `false` otherwise. */
def <(x: Double): Boolean
/** Returns `true` if this value is less than or equal to x, `false` otherwise. */
def <=(x: Byte): Boolean
/** Returns `true` if this value is less than or equal to x, `false` otherwise. */
def <=(x: Short): Boolean
/** Returns `true` if this value is less than or equal to x, `false` otherwise. */
def <=(x: Char): Boolean
/** Returns `true` if this value is less than or equal to x, `false` otherwise. */
def <=(x: Int): Boolean
/** Returns `true` if this value is less than or equal to x, `false` otherwise. */
def <=(x: Long): Boolean
/** Returns `true` if this value is less than or equal to x, `false` otherwise. */
def <=(x: Float): Boolean
/** Returns `true` if this value is less than or equal to x, `false` otherwise. */
def <=(x: Double): Boolean
/** Returns `true` if this value is greater than x, `false` otherwise. */
def >(x: Byte): Boolean
/** Returns `true` if this value is greater than x, `false` otherwise. */
def >(x: Short): Boolean
/** Returns `true` if this value is greater than x, `false` otherwise. */
def >(x: Char): Boolean
/** Returns `true` if this value is greater than x, `false` otherwise. */
def >(x: Int): Boolean
/** Returns `true` if this value is greater than x, `false` otherwise. */
def >(x: Long): Boolean
/** Returns `true` if this value is greater than x, `false` otherwise. */
def >(x: Float): Boolean
/** Returns `true` if this value is greater than x, `false` otherwise. */
def >(x: Double): Boolean
/** Returns `true` if this value is greater than or equal to x, `false` otherwise. */
def >=(x: Byte): Boolean
/** Returns `true` if this value is greater than or equal to x, `false` otherwise. */
def >=(x: Short): Boolean
/** Returns `true` if this value is greater than or equal to x, `false` otherwise. */
def >=(x: Char): Boolean
/** Returns `true` if this value is greater than or equal to x, `false` otherwise. */
def >=(x: Int): Boolean
/** Returns `true` if this value is greater than or equal to x, `false` otherwise. */
def >=(x: Long): Boolean
/** Returns `true` if this value is greater than or equal to x, `false` otherwise. */
def >=(x: Float): Boolean
/** Returns `true` if this value is greater than or equal to x, `false` otherwise. */
def >=(x: Double): Boolean
/**
* Returns the bitwise OR of this value and `x`.
* @example {{{
* (0xf0 | 0xaa) == 0xfa
* // in binary: 11110000
* // | 10101010
* // --------
* // 11111010
* }}}
*/
def |(x: Byte): Int
/**
* Returns the bitwise OR of this value and `x`.
* @example {{{
* (0xf0 | 0xaa) == 0xfa
* // in binary: 11110000
* // | 10101010
* // --------
* // 11111010
* }}}
*/
def |(x: Short): Int
/**
* Returns the bitwise OR of this value and `x`.
* @example {{{
* (0xf0 | 0xaa) == 0xfa
* // in binary: 11110000
* // | 10101010
* // --------
* // 11111010
* }}}
*/
def |(x: Char): Int
/**
* Returns the bitwise OR of this value and `x`.
* @example {{{
* (0xf0 | 0xaa) == 0xfa
* // in binary: 11110000
* // | 10101010
* // --------
* // 11111010
* }}}
*/
def |(x: Int): Int
/**
* Returns the bitwise OR of this value and `x`.
* @example {{{
* (0xf0 | 0xaa) == 0xfa
* // in binary: 11110000
* // | 10101010
* // --------
* // 11111010
* }}}
*/
def |(x: Long): Long
/**
* Returns the bitwise AND of this value and `x`.
* @example {{{
* (0xf0 & 0xaa) == 0xa0
* // in binary: 11110000
* // & 10101010
* // --------
* // 10100000
* }}}
*/
def &(x: Byte): Int
/**
* Returns the bitwise AND of this value and `x`.
* @example {{{
* (0xf0 & 0xaa) == 0xa0
* // in binary: 11110000
* // & 10101010
* // --------
* // 10100000
* }}}
*/
def &(x: Short): Int
/**
* Returns the bitwise AND of this value and `x`.
* @example {{{
* (0xf0 & 0xaa) == 0xa0
* // in binary: 11110000
* // & 10101010
* // --------
* // 10100000
* }}}
*/
def &(x: Char): Int
/**
* Returns the bitwise AND of this value and `x`.
* @example {{{
* (0xf0 & 0xaa) == 0xa0
* // in binary: 11110000
* // & 10101010
* // --------
* // 10100000
* }}}
*/
def &(x: Int): Int
/**
* Returns the bitwise AND of this value and `x`.
* @example {{{
* (0xf0 & 0xaa) == 0xa0
* // in binary: 11110000
* // & 10101010
* // --------
* // 10100000
* }}}
*/
def &(x: Long): Long
/**
* Returns the bitwise XOR of this value and `x`.
* @example {{{
* (0xf0 ^ 0xaa) == 0x5a
* // in binary: 11110000
* // ^ 10101010
* // --------
* // 01011010
* }}}
*/
def ^(x: Byte): Int
/**
* Returns the bitwise XOR of this value and `x`.
* @example {{{
* (0xf0 ^ 0xaa) == 0x5a
* // in binary: 11110000
* // ^ 10101010
* // --------
* // 01011010
* }}}
*/
def ^(x: Short): Int
/**
* Returns the bitwise XOR of this value and `x`.
* @example {{{
* (0xf0 ^ 0xaa) == 0x5a
* // in binary: 11110000
* // ^ 10101010
* // --------
* // 01011010
* }}}
*/
def ^(x: Char): Int
/**
* Returns the bitwise XOR of this value and `x`.
* @example {{{
* (0xf0 ^ 0xaa) == 0x5a
* // in binary: 11110000
* // ^ 10101010
* // --------
* // 01011010
* }}}
*/
def ^(x: Int): Int
/**
* Returns the bitwise XOR of this value and `x`.
* @example {{{
* (0xf0 ^ 0xaa) == 0x5a
* // in binary: 11110000
* // ^ 10101010
* // --------
* // 01011010
* }}}
*/
def ^(x: Long): Long
/** Returns the sum of this value and `x`. */
def +(x: Byte): Int
/** Returns the sum of this value and `x`. */
def +(x: Short): Int
/** Returns the sum of this value and `x`. */
def +(x: Char): Int
/** Returns the sum of this value and `x`. */
def +(x: Int): Int
/** Returns the sum of this value and `x`. */
def +(x: Long): Long
/** Returns the sum of this value and `x`. */
def +(x: Float): Float
/** Returns the sum of this value and `x`. */
def +(x: Double): Double
/** Returns the difference of this value and `x`. */
def -(x: Byte): Int
/** Returns the difference of this value and `x`. */
def -(x: Short): Int
/** Returns the difference of this value and `x`. */
def -(x: Char): Int
/** Returns the difference of this value and `x`. */
def -(x: Int): Int
/** Returns the difference of this value and `x`. */
def -(x: Long): Long
/** Returns the difference of this value and `x`. */
def -(x: Float): Float
/** Returns the difference of this value and `x`. */
def -(x: Double): Double
/** Returns the product of this value and `x`. */
def *(x: Byte): Int
/** Returns the product of this value and `x`. */
def *(x: Short): Int
/** Returns the product of this value and `x`. */
def *(x: Char): Int
/** Returns the product of this value and `x`. */
def *(x: Int): Int
/** Returns the product of this value and `x`. */
def *(x: Long): Long
/** Returns the product of this value and `x`. */
def *(x: Float): Float
/** Returns the product of this value and `x`. */
def *(x: Double): Double
/** Returns the quotient of this value and `x`. */
def /(x: Byte): Int
/** Returns the quotient of this value and `x`. */
def /(x: Short): Int
/** Returns the quotient of this value and `x`. */
def /(x: Char): Int
/** Returns the quotient of this value and `x`. */
def /(x: Int): Int
/** Returns the quotient of this value and `x`. */
def /(x: Long): Long
/** Returns the quotient of this value and `x`. */
def /(x: Float): Float
/** Returns the quotient of this value and `x`. */
def /(x: Double): Double
/** Returns the remainder of the division of this value by `x`. */
def %(x: Byte): Int
/** Returns the remainder of the division of this value by `x`. */
def %(x: Short): Int
/** Returns the remainder of the division of this value by `x`. */
def %(x: Char): Int
/** Returns the remainder of the division of this value by `x`. */
def %(x: Int): Int
/** Returns the remainder of the division of this value by `x`. */
def %(x: Long): Long
/** Returns the remainder of the division of this value by `x`. */
def %(x: Float): Float
/** Returns the remainder of the division of this value by `x`. */
def %(x: Double): Double
// Provide a more specific return type for Scaladoc
override def getClass(): Class[Short] = ???
}
object Short extends AnyValCompanion {
/** The smallest value representable as a Short. */
final val MinValue = java.lang.Short.MIN_VALUE
/** The largest value representable as a Short. */
final val MaxValue = java.lang.Short.MAX_VALUE
/** Transform a value type into a boxed reference type.
*
* Runtime implementation determined by `scala.runtime.BoxesRunTime.boxToShort`. See [[https://github.com/scala/scala src/library/scala/runtime/BoxesRunTime.java]].
*
* @param x the Short to be boxed
* @return a java.lang.Short offering `x` as its underlying value.
*/
def box(x: Short): java.lang.Short = ???
/** Transform a boxed type into a value type. Note that this
* method is not typesafe: it accepts any Object, but will throw
* an exception if the argument is not a java.lang.Short.
*
* Runtime implementation determined by `scala.runtime.BoxesRunTime.unboxToShort`. See [[https://github.com/scala/scala src/library/scala/runtime/BoxesRunTime.java]].
*
* @param x the java.lang.Short to be unboxed.
* @throws ClassCastException if the argument is not a java.lang.Short
* @return the Short resulting from calling shortValue() on `x`
*/
def unbox(x: java.lang.Object): Short = ???
/** The String representation of the scala.Short companion object. */
override def toString = "object scala.Short"
/** Language mandated coercions from Short to "wider" types. */
import scala.language.implicitConversions
implicit def short2int(x: Short): Int = x.toInt
implicit def short2long(x: Short): Long = x.toLong
implicit def short2float(x: Short): Float = x.toFloat
implicit def short2double(x: Short): Double = x.toDouble
}
| felixmulder/scala | src/library/scala/Short.scala | Scala | bsd-3-clause | 16,497 |
package controllers
/**
* GraPHPizer source code analytics engine
* Copyright (C) 2015 Martin Helmich <kontakt@martin-helmich.de>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
import java.util.UUID
import javax.inject.Inject
import akka.actor.{ActorSystem, Props}
import akka.pattern.ask
import akka.util.Timeout
import domain.model.StoredQuery
import domain.repository.StoredQueryRepository
import domain.repository.StoredQueryRepository._
import persistence.ConnectionManager
import play.api.libs.json._
import play.api.mvc._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util.{Failure, Success}
class StoredQueries @Inject()(actorSystem: ActorSystem, manager: ConnectionManager) extends Controller {
val queries = actorSystem.actorOf(Props[StoredQueryRepository])
implicit val to = Timeout(1.second)
val QueryNotFound = (id: UUID) => NotFound(Json.obj("status" -> "ko", "messsage" -> s"Query $id does not exist"))
val UnknownResponse = () => NotImplemented(Json.obj("status" -> "ko", "message" -> "Unknown response"))
class QueryWrites(implicit r: Request[_]) extends Writes[StoredQuery] {
def writes(o: StoredQuery): JsValue = Json.obj(
"__href" -> controllers.routes.StoredQueries.show(o.id).absoluteURL(),
"id" -> o.id,
"cypher" -> o.cypher
)
}
class NewQueryReads extends Reads[StoredQuery] {
def reads(json: JsValue): JsResult[StoredQuery] = json match {
case o: JsObject =>
o \ "cypher" match {
case JsString(s) => JsSuccess(StoredQuery(UUID.randomUUID(), s))
case _ => JsError("cypher-missing")
}
case _ => JsError("not-an-object")
}
}
def list() = Action.async { implicit r =>
implicit val queryWrites = new QueryWrites()
queries ? AllStoredQueries() map {
case StoredQueriesResponse(qs) => Ok(Json.toJson(qs))
case Failure(e) => InternalServerError(Json.obj("status" -> "ko", "message" -> e.getMessage))
case _ => UnknownResponse()
}
}
def show(id: UUID) = Action.async { implicit r =>
implicit val queryWrites = new QueryWrites()
queries ? StoredQueryById(id = id) map {
case StoredQueryResponse(q) => Ok(Json.toJson(q))
case EmptyResponse() => QueryNotFound(id)
case Failure(e) => InternalServerError(Json.obj("status" -> "ko", "message" -> e.getMessage))
case _ => UnknownResponse()
}
}
def delete(id: UUID) = Action.async { implicit r =>
queries ? StoredQueryById(id = id) flatMap {
case StoredQueryResponse(q) => queries ? DeleteStoredQuery(q) map { _ => Ok(Json.obj("status" -> "ok")) }
case EmptyResponse() => Future.successful(QueryNotFound(id))
case _ => Future.successful(UnknownResponse())
}
}
def create() = Action.async(BodyParsers.parse.json) { implicit r =>
implicit val queryReads = new NewQueryReads()
implicit val queryWrites = new QueryWrites()
r.body.validate[StoredQuery].fold(
errors => Future.successful(BadRequest(Json.obj("message" -> JsError.toFlatJson(errors)))),
query => {
queries ? AddStoredQuery(query) map {
case Success(_) => Created(Json.toJson(query))
case Failure(e) => InternalServerError(Json.obj("status" -> "ko", "message" -> e.getMessage))
case _ => UnknownResponse()
}
}
)
}
}
| martin-helmich/graphpizer-server | app/controllers/StoredQueries.scala | Scala | gpl-3.0 | 4,033 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.io.{Externalizable, ObjectInput, ObjectOutput}
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import org.roaringbitmap.RoaringBitmap
import org.apache.spark.SparkEnv
import org.apache.spark.internal.config
import org.apache.spark.storage.BlockManagerId
import org.apache.spark.util.Utils
/**
* Result returned by a ShuffleMapTask to a scheduler. Includes the block manager address that the
* task ran on, the sizes of outputs for each reducer, and the number of outputs of the map task,
* for passing on to the reduce tasks.
*/
private[spark] sealed trait MapStatus {
/** Location where this task was run. */
def location: BlockManagerId
/**
* Estimated size for the reduce block, in bytes.
*
* If a block is non-empty, then this method MUST return a non-zero size. This invariant is
* necessary for correctness, since block fetchers are allowed to skip zero-size blocks.
*/
def getSizeForBlock(reduceId: Int): Long
/**
* The number of outputs for the map task.
*/
def numberOfOutput: Long
}
private[spark] object MapStatus {
def apply(loc: BlockManagerId, uncompressedSizes: Array[Long], numOutput: Long): MapStatus = {
if (uncompressedSizes.length > Option(SparkEnv.get)
.map(_.conf.get(config.SHUFFLE_MIN_NUM_PARTS_TO_HIGHLY_COMPRESS))
.getOrElse(config.SHUFFLE_MIN_NUM_PARTS_TO_HIGHLY_COMPRESS.defaultValue.get)) {
HighlyCompressedMapStatus(loc, uncompressedSizes, numOutput)
} else {
new CompressedMapStatus(loc, uncompressedSizes, numOutput)
}
}
private[this] val LOG_BASE = 1.1
/**
* Compress a size in bytes to 8 bits for efficient reporting of map output sizes.
* We do this by encoding the log base 1.1 of the size as an integer, which can support
* sizes up to 35 GB with at most 10% error.
*/
def compressSize(size: Long): Byte = {
if (size == 0) {
0
} else if (size <= 1L) {
1
} else {
math.min(255, math.ceil(math.log(size) / math.log(LOG_BASE)).toInt).toByte
}
}
/**
* Decompress an 8-bit encoded block size, using the reverse operation of compressSize.
*/
def decompressSize(compressedSize: Byte): Long = {
if (compressedSize == 0) {
0
} else {
math.pow(LOG_BASE, compressedSize & 0xFF).toLong
}
}
}
/**
* A [[MapStatus]] implementation that tracks the size of each block. Size for each block is
* represented using a single byte.
*
* @param loc location where the task is being executed.
* @param compressedSizes size of the blocks, indexed by reduce partition id.
*/
private[spark] class CompressedMapStatus(
private[this] var loc: BlockManagerId,
private[this] var compressedSizes: Array[Byte],
private[this] var numOutput: Long)
extends MapStatus with Externalizable {
protected def this() = this(null, null.asInstanceOf[Array[Byte]], -1) // For deserialization only
def this(loc: BlockManagerId, uncompressedSizes: Array[Long], numOutput: Long) {
this(loc, uncompressedSizes.map(MapStatus.compressSize), numOutput)
}
override def location: BlockManagerId = loc
override def numberOfOutput: Long = numOutput
override def getSizeForBlock(reduceId: Int): Long = {
MapStatus.decompressSize(compressedSizes(reduceId))
}
override def writeExternal(out: ObjectOutput): Unit = Utils.tryOrIOException {
loc.writeExternal(out)
out.writeLong(numOutput)
out.writeInt(compressedSizes.length)
out.write(compressedSizes)
}
override def readExternal(in: ObjectInput): Unit = Utils.tryOrIOException {
loc = BlockManagerId(in)
numOutput = in.readLong()
val len = in.readInt()
compressedSizes = new Array[Byte](len)
in.readFully(compressedSizes)
}
}
/**
* A [[MapStatus]] implementation that stores the accurate size of huge blocks, which are larger
* than spark.shuffle.accurateBlockThreshold. It stores the average size of other non-empty blocks,
* plus a bitmap for tracking which blocks are empty.
*
* @param loc location where the task is being executed
* @param numNonEmptyBlocks the number of non-empty blocks
* @param emptyBlocks a bitmap tracking which blocks are empty
* @param avgSize average size of the non-empty and non-huge blocks
* @param hugeBlockSizes sizes of huge blocks by their reduceId.
*/
private[spark] class HighlyCompressedMapStatus private (
private[this] var loc: BlockManagerId,
private[this] var numNonEmptyBlocks: Int,
private[this] var emptyBlocks: RoaringBitmap,
private[this] var avgSize: Long,
private var hugeBlockSizes: Map[Int, Byte],
private[this] var numOutput: Long)
extends MapStatus with Externalizable {
// loc could be null when the default constructor is called during deserialization
require(loc == null || avgSize > 0 || hugeBlockSizes.size > 0 || numNonEmptyBlocks == 0,
"Average size can only be zero for map stages that produced no output")
protected def this() = this(null, -1, null, -1, null, -1) // For deserialization only
override def location: BlockManagerId = loc
override def numberOfOutput: Long = numOutput
override def getSizeForBlock(reduceId: Int): Long = {
assert(hugeBlockSizes != null)
if (emptyBlocks.contains(reduceId)) {
0
} else {
hugeBlockSizes.get(reduceId) match {
case Some(size) => MapStatus.decompressSize(size)
case None => avgSize
}
}
}
override def writeExternal(out: ObjectOutput): Unit = Utils.tryOrIOException {
loc.writeExternal(out)
out.writeLong(numOutput)
emptyBlocks.writeExternal(out)
out.writeLong(avgSize)
out.writeInt(hugeBlockSizes.size)
hugeBlockSizes.foreach { kv =>
out.writeInt(kv._1)
out.writeByte(kv._2)
}
}
override def readExternal(in: ObjectInput): Unit = Utils.tryOrIOException {
loc = BlockManagerId(in)
numOutput = in.readLong()
emptyBlocks = new RoaringBitmap()
emptyBlocks.readExternal(in)
avgSize = in.readLong()
val count = in.readInt()
val hugeBlockSizesArray = mutable.ArrayBuffer[Tuple2[Int, Byte]]()
(0 until count).foreach { _ =>
val block = in.readInt()
val size = in.readByte()
hugeBlockSizesArray += Tuple2(block, size)
}
hugeBlockSizes = hugeBlockSizesArray.toMap
}
}
private[spark] object HighlyCompressedMapStatus {
def apply(
loc: BlockManagerId,
uncompressedSizes: Array[Long],
numOutput: Long): HighlyCompressedMapStatus = {
// We must keep track of which blocks are empty so that we don't report a zero-sized
// block as being non-empty (or vice-versa) when using the average block size.
var i = 0
var numNonEmptyBlocks: Int = 0
var numSmallBlocks: Int = 0
var totalSmallBlockSize: Long = 0
// From a compression standpoint, it shouldn't matter whether we track empty or non-empty
// blocks. From a performance standpoint, we benefit from tracking empty blocks because
// we expect that there will be far fewer of them, so we will perform fewer bitmap insertions.
val emptyBlocks = new RoaringBitmap()
val totalNumBlocks = uncompressedSizes.length
val threshold = Option(SparkEnv.get)
.map(_.conf.get(config.SHUFFLE_ACCURATE_BLOCK_THRESHOLD))
.getOrElse(config.SHUFFLE_ACCURATE_BLOCK_THRESHOLD.defaultValue.get)
val hugeBlockSizesArray = ArrayBuffer[Tuple2[Int, Byte]]()
while (i < totalNumBlocks) {
val size = uncompressedSizes(i)
if (size > 0) {
numNonEmptyBlocks += 1
// Huge blocks are not included in the calculation for average size, thus size for smaller
// blocks is more accurate.
if (size < threshold) {
totalSmallBlockSize += size
numSmallBlocks += 1
} else {
hugeBlockSizesArray += Tuple2(i, MapStatus.compressSize(uncompressedSizes(i)))
}
} else {
emptyBlocks.add(i)
}
i += 1
}
val avgSize = if (numSmallBlocks > 0) {
totalSmallBlockSize / numSmallBlocks
} else {
0
}
emptyBlocks.trim()
emptyBlocks.runOptimize()
new HighlyCompressedMapStatus(loc, numNonEmptyBlocks, emptyBlocks, avgSize,
hugeBlockSizesArray.toMap, numOutput)
}
}
| rekhajoshm/spark | core/src/main/scala/org/apache/spark/scheduler/MapStatus.scala | Scala | apache-2.0 | 9,099 |
/*
* Copyright (c) 2013-2014, ARM Limited
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.arm.carp.apps.optimizer.passes
import com.arm.carp.pencil._
import com.arm.carp.pencil.ParentComputer
import scala.collection.mutable.Stack
import scala.collection.mutable.ListBuffer
/**
* This class implements basic loop invariant code motion.
*
* It moves the assignment statements in a loop body, with loop invariant as a RHS
* outside the loop.
*/
object LICM extends Pass("licm") {
val config = WalkerConfig.expressions
private val loops = Stack[ForOperation]()
private val invariants = Stack[ListBuffer[AssignmentOperation]]()
private var idxExpressionCounter: Int = 0
private def addInvariant(op: AssignmentOperation) = {
op.parent = loops.top.parent
invariants.top.append(op)
}
private def getInvariants() = invariants.top.toSeq
private def enterFor (loop: ForOperation) = {
invariants.push(ListBuffer[AssignmentOperation]())
loops.push(loop)
}
private def leaveFor = {
invariants.pop
loops.pop
}
case class InvariantInfo(val invariant: Boolean) extends ExpressionPassInfo with OperationPassInfo
private def isOutsideBlock(in: Operation, block: BlockOperation): Boolean = {
in.parent match {
case Some(parent) if block == parent => false
case Some(parent) => isOutsideBlock(parent, block)
case None => true
}
}
private def isOuterLoopIterator (variable: ScalarVariableDef) = {
loops.find(op => op.range.iter.variable == variable) match {
case None => false
case Some(op) => op != loops.top
}
}
private def isInvariantInBlock(variable: ScalarVariableRef, block: BlockOperation) = {
variable.info match {
case Some(defs:DefineSet) =>
if (variable.variable.iter) {
assert(defs.data.isEmpty, defs.data, "unexpected def set")
idxExpressionCounter == 0 && isOuterLoopIterator(variable.variable)
} else {
defs.data.forall(isOutsideBlock(_, block))
}
case _ => ice(variable, "reaching definition information expected")
}
}
override def walkIterationVariable(in: ScalarVariableRef) = {
(in, None)
}
override def walkScalarVariable(in: ScalarVariableRef) = {
if (!loops.isEmpty) {
val invariant = isInvariantInBlock(in, loops.top.ops)
in.info = Some(new InvariantInfo(invariant))
}
super.walkScalarVariable(in)
}
override def walkConvertExpression(in: ConvertExpression) = {
val op = walkScalarExpression(in.op1)
(asInvariantExp(in.update(op._1), isInvariant(op._1)), op._2)
}
override def walkScalarConstant(in: Constant with ScalarExpression) =
(asInvariantExp(in), None)
private def asInvariant(in: AnnotatedScalarExpression, invariant: Boolean = true) = {
(asInvariantExp(in._1), in._2)
}
private def asInvariantExp[T <: ScalarExpression](in: T, invariant: Boolean = true) = {
in.info = Some(InvariantInfo(invariant))
in
}
private def walkScalarBinaryExpressionOrig(in: ScalarBinaryExpression) = {
val op1 = walkScalarExpression(in.op1)
val op2 = walkScalarExpression(in.op2)
(asInvariantExp(in.update(op1._1, op2._1), isInvariant(op1._1, op2._1)), make(op1._2, op2._2))
}
private def walkScalarBianryExpressionSum(in: ScalarBinaryExpression) = {
val flattened = listFromSumTree(in).map(walkScalarExpression)
val (raw_invariants, raw_rest) = flattened.partition(exp => isInvariant(exp._1))
val invariants = raw_invariants.unzip
val rest = raw_rest.unzip
val init = make((invariants._2 ::: rest._2):_*)
val exp = if (invariants._1.isEmpty) {
sumTreeFromList(rest._1)
} else {
val invariant = liftInvariants(asInvariantExp(sumTreeFromList(invariants._1), true))
sumTreeFromList(invariant :: rest._1)
}
(exp, init)
}
override def walkScalarBinaryExpression(in: ScalarBinaryExpression) = {
in match {
case _: PlusExpression | _: MinusExpression => walkScalarBianryExpressionSum(in)
case _ => walkScalarBinaryExpressionOrig(in)
}
}
override def walkScalarTernaryExpression (in: TernaryExpression) = {
val op1 = walkScalarExpression(in.op1)
val op2 = walkScalarExpression(in.op2)
val op3 = walkScalarExpression(in.op3)
(asInvariantExp(in.update(op1._1, op2._1, op3._1), isInvariant(op1._1, op2._1, op3._1)),
make(op1._2, op2._2, op3._2))
}
override def walkScalarUnaryExpression(in: ScalarUnaryExpression) = {
val op = walkScalarExpression(in.op1)
(asInvariantExp(in.update(op._1), isInvariant(op._1)), op._2)
}
private def withUpdatedRvalue(mov: AssignmentOperation, upd: ScalarExpression) = {
mov.rvalue = upd
mov
}
private def isInvariant (in: ScalarExpression*) = {
!loops.isEmpty && in.forall(_.info match {
case Some(InvariantInfo(true)) => true
case _ => false
})
}
private def liftInvariants(exp: ScalarExpression) = {
exp match {
case _:ScalarExpression with Constant | _:ScalarVariableRef => exp
case _ if isInvariant(exp) => {
val tmp = ScalarVariableDef(exp.expType.updateConst(false), "loop_invariant", None)
addInvariant(new AssignmentOperation(new ScalarVariableRef(tmp), exp))
asInvariantExp(new ScalarVariableRef(tmp))
}
case _ => exp
}
}
override def walkScalarExpression(in: ScalarExpression) = {
(liftInvariants(super.walkScalarExpression(in)._1), None)
}
override def walkScalarIdxExpression (in: ScalarIdxExpression) = {
idxExpressionCounter += 1
val res = super.walkScalarIdxExpression(in)
idxExpressionCounter -= 1
res
}
override def walkArrayIdxExpression (in: ArrayIdxExpression) = {
idxExpressionCounter += 1
val res = super.walkArrayIdxExpression(in)
idxExpressionCounter -= 1
res
}
override def walkAssignment(in: AssignmentOperation) = {
val rvalue = walkScalarExpression(in.rvalue)._1
in.rvalue = rvalue
in.lvalue match {
case variable:ScalarVariableRef => Some(in)
case _ => {
in.lvalue = walkLValueExpression(in.lvalue)._1
Some(in)
}
}
}
override def walkRange(in: Range) = (in, None)
override def walkFor(in: ForOperation) = {
enterFor(in)
val res = super.walkFor(in)
val invariants = getInvariants
if (invariants.size > 0) {
set_changed
}
leaveFor
make (Some(new BlockOperation(invariants)), res)
}
override def walkFunction(in: Function) = {
ParentComputer.walkFunction(in)
ReachingDefinitions.computeForFunction(in)
super.walkFunction(in)
}
}
| carpproject/pencil | src/scala/com/arm/carp/apps/optimizer/passes/LoopInvariantCodeMotion.scala | Scala | mit | 7,696 |
package scoverage.report
import java.io.File
import scoverage.{Coverage, IOUtils, Serializer}
object CoverageAggregator {
@deprecated("1.4.0", "Used only by gradle-scoverage plugin")
def aggregate(baseDir: File, clean: Boolean): Option[Coverage] = {
aggregate(IOUtils.scoverageDataDirsSearch(baseDir))
}
// to be used by gradle-scoverage plugin
def aggregate(dataDirs: Array[File]): Option[Coverage] = aggregate(dataDirs.toSeq)
def aggregate(dataDirs: Seq[File]): Option[Coverage] = {
println(s"[info] Found ${dataDirs.size} subproject scoverage data directories [${dataDirs.mkString(",")}]")
if (dataDirs.size > 0) {
Some(aggregatedCoverage(dataDirs))
} else {
None
}
}
def aggregatedCoverage(dataDirs: Seq[File]): Coverage = {
var id = 0
val coverage = Coverage()
dataDirs foreach { dataDir =>
val coverageFile: File = Serializer.coverageFile(dataDir)
if (coverageFile.exists) {
val subcoverage: Coverage = Serializer.deserialize(coverageFile)
val measurementFiles: Array[File] = IOUtils.findMeasurementFiles(dataDir)
val measurements = IOUtils.invoked(measurementFiles.toIndexedSeq)
subcoverage.apply(measurements)
subcoverage.statements foreach { stmt =>
// need to ensure all the ids are unique otherwise the coverage object will have stmt collisions
id = id + 1
coverage add stmt.copy(id = id)
}
}
}
coverage
}
}
| gslowikowski/scalac-scoverage-plugin | scalac-scoverage-plugin/src/main/scala/scoverage/report/CoverageAggregator.scala | Scala | apache-2.0 | 1,492 |
package com.sksamuel.elastic4s.searches.suggestion
import java.util.UUID
trait SuggestionApi {
def completionSuggestion(): CompletionSuggExpectsField = completionSuggestion(UUID.randomUUID.toString)
def completionSuggestion(name: String): CompletionSuggExpectsField = new CompletionSuggExpectsField(name)
class CompletionSuggExpectsField(name: String) {
@deprecated("use on(field)", "5.0.0")
def field(field: String): CompletionSuggestionDefinition = on(name)
def on(field: String) = CompletionSuggestionDefinition(name, field)
}
def termSuggestion(): TermSuggExpectsField = termSuggestion(UUID.randomUUID.toString)
def termSuggestion(name: String, field: String, text: String) = TermSuggestionDefinition(name, field, Some(text))
def termSuggestion(name: String): TermSuggExpectsField = new TermSuggExpectsField(name)
class TermSuggExpectsField(name: String) {
@deprecated("use on(field)", "5.0.0")
def field(field: String): TermSuggestionDefinition = on(field)
def on(field: String) = TermSuggestionDefinition(name, field, Some(""))
}
def phraseSuggestion(): PhraseSuggExpectsField = phraseSuggestion(UUID.randomUUID.toString)
def phraseSuggestion(name: String): PhraseSuggExpectsField = new PhraseSuggExpectsField(name)
class PhraseSuggExpectsField(name: String) {
@deprecated("use on(field)", "5.0.0")
def field(field: String): PhraseSuggestionDefinition = on(name)
def on(field: String) = PhraseSuggestionDefinition(name, field)
}
}
| tyth/elastic4s | elastic4s-core/src/main/scala/com/sksamuel/elastic4s/searches/suggestion/SuggestionApi.scala | Scala | apache-2.0 | 1,505 |
/*
* Copyright (c) 2013-2014 Telefónica Investigación y Desarrollo S.A.U.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package es.tid.cosmos.common
/** Extractor of exception causes. */
object Wrapped {
def unapply(ex: Throwable): Option[Throwable] =
if (ex == null) None
else if (ex.getCause == null) None
else Some(ex.getCause)
}
| telefonicaid/fiware-cosmos-platform | common/src/main/scala/es/tid/cosmos/common/Wrapped.scala | Scala | apache-2.0 | 868 |
/*
Copyright 2014 MentalArray, LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/**
* This class is a wrapper that provides for utilizing Pig to join data sets without having to write an entire
* script.
* It utilizes copyMerge functionality, as well as scala-based header and schema cleaning functions to
* return a directory with part files, as well as a single merged file (.csv as default) on hdfs.
*/
package net.mentalarray.doozie.JobHelpers
import net.mentalarray.doozie.DSL.TextFile
import net.mentalarray.doozie.Utility
import net.mentalarray.doozie.Utility.Path
/**
*
* @param job reads in the jobname
* @param jobConfig reads in the Joiner configuration from the case class.
* This is a template job (slightly different from Workflow Tasks) and as such, cannot receive
* the same sort of function calling and evaluation that a typical workflow script task can take.
* This wrapper enables a WorkflowTemplate instantiation.
*/
class DataSetJoiner(job: String, jobConfig: DataSetJoinerConfiguration) extends WorkflowTemplate(job, jobConfig) with Logging {
// point to the location on hdfs where the generic table joiner script is located.
final val _scriptLocation: String = "/apps/scripts/GenericTableJoiner.pig"
// load some jar files that could be called in on the Pig Script
final val _jarLibrary: String = """REGISTER 'hdfs://yacdevmaster1:8020/apps/libs/Unstack.jar';
REGISTER 'hdfs://yacdevmaster1:8020/apps/libs/PiggyBank.jar';
REGISTER 'hdfs://yacdevmaster1:8020/apps/libs/datafu-1.2.0.jar';"""
/**
* Provide static pointers to the passed in parameters to the class
*/
val _job = job
val _firstTableJoinField1 = jobConfig.firstTableJoinField1
val _firstTableJoinField2 = jobConfig.firstTableJoinField2
val _secondTableJoinField1 = jobConfig.secondTableJoinField1
val _secondTableJoinField2 = jobConfig.secondTableJoinField2
val _firstDataSetDirectory = jobConfig.firstDataSetDirectory
val _secondDataSetDirectory = jobConfig.secondDataSetDirectory
val _firstDelimiter = jobConfig.firstDelimiter
val _secondDelimiter = jobConfig.secondDelimiter
val _dateFieldFirst = jobConfig.dateFieldFirst
val _dateFormatting = jobConfig.dateFormatting
val _daysBackFirst = jobConfig.daysBackFirst
val _csvStore = jobConfig.csvStoreDirectory
val _finalDirectory = jobConfig.finalDirectory
val _joinOption = jobConfig.joinOption
val _storageDelimiter = jobConfig.storageDelimiter
/**
* To effectively join without excessive memory overhead in Pig, it is important to
* ensure that the field(s) to be joined are not null.
* Below is case matching for string replacement that will handle joining on either one
* or two fields.
*/
val _firstElementNullFilter: String = _firstTableJoinField2 match {
case "" => "%s is not null".replace("%s", _firstTableJoinField1)
case _ => "%s is not null".replace("%s", _firstTableJoinField1) +
" AND %s is not null".replace("%s", _firstTableJoinField2)
}
val _secondElementNullFilter: String = _secondTableJoinField2 match {
case "" => "%s is not null".replace("%s", _secondTableJoinField1)
case _ => "%s is not null".replace("%s", _secondTableJoinField1) +
" AND %s is not null".replace("%s", _secondTableJoinField2)
}
/**
* Set the string replacement for the joining string on the first data set.
*/
// change the replacement test based on number of parameters supplied
val _firstJoinString = jobConfig.firstTableJoinField2 match {
case "" => _firstTableJoinField1
case _ => _firstTableJoinField1 + "," + _firstTableJoinField2
}
/** Provide error checking for users potentially providing an invalid declaration
* (INNER will throw an exception for some reason if it is included.)
* Allow for all other types to be passed (e.g. LEFT OUTER, RIGHT OUTER, etc.)
* See Pig documentation for all allowable join types.
*/
val _joinType = jobConfig.joinType match {
case "INNER" => ""
case _ => jobConfig.joinType
}
/**
* Set the string replacement for the join string on the second data set.
*/
val _secondJoinString = jobConfig.secondTableJoinField2 match {
case "" => _secondTableJoinField1
case _ => _secondTableJoinField1 + "," + _secondTableJoinField2
}
/**
* Create a random directory to handle the job store and transfer items.
*/
val _joinedStorage = Path.combine("/tmp/Pig", java.util.UUID.randomUUID().toString)
/**
* Set the final hdfs storage location for the job.
*/
val _finalStorage = Utility.Path.combine(_finalDirectory, _job)
/**
* Create a temp directory on hdfs to handle the regex cleaning of the header and schema files
*/
val _headerCleanerDir = Utility.Path.combine("/tmp/Pig", java.util.UUID.randomUUID().toString)
/**
* Final location of the merged file
*/
val _csvHDFSStore = Utility.Path.combine(_csvStore, _job)
/**
* Run the Pig script with all of the replacement parameters
*/
appendStep(PigTask { task => task.setScript(TextFile.loadFromHdfs(_scriptLocation))
task.setScriptReplacements(Replace(
"jarLibrary" -> _jarLibrary,
"firstDataSetDirectory" -> _firstDataSetDirectory,
"firstDelimiter" -> _firstDelimiter,
"secondDataSetDirectory" -> _secondDataSetDirectory,
"secondDelimiter" -> _secondDelimiter,
"firstElementNullFilter" -> _firstElementNullFilter,
"secondElementNullFilter" -> _secondElementNullFilter,
"firstJoinString" -> _firstJoinString,
"joinType" -> _joinType,
"secondJoinString" -> _secondJoinString,
"dateFormatting" -> _dateFormatting,
"joinOption" -> _joinOption,
"joinedStorage" -> _joinedStorage,
"storageDelimiter" -> _storageDelimiter,
"dateFieldFirst" -> _dateFieldFirst,
"daysBackFirst" -> _daysBackFirst
))
Log.debug(task.script)
})
/**
* copy, move header/schema, cleanup the schema, then remove the temp directories.
*/
appendStep(HdfsTask { task => task.setCommand("rm -r -f " + _finalStorage)})
appendStep(HdfsTask { task => task.setCommand("rm -r -f " + _csvHDFSStore + ".csv")})
appendStep(HdfsTask { task => task.setCommand("cp " + _joinedStorage + " " + _finalStorage)})
appendStep(HdfsTask { task => task.setCommand("rm -f " + _finalStorage + "/.pig_header")})
appendStep(HdfsTask { task => task.setCommand("rm -f " + _finalStorage + "/.pig_schema")})
appendStep(HdfsTask { task => task.setCommand("mkdir " + _headerCleanerDir)})
appendStep(HdfsTask { task => task.setCommand("cp " + _joinedStorage + "/.pig_header" + " " + _headerCleanerDir + "/.pig_header")})
appendStep(HdfsTask { task => task.setCommand("cp " + _joinedStorage + "/.pig_schema" + " " + _headerCleanerDir + "/.pig_schema")})
appendStep(ScalaTask {
val inFile = _headerCleanerDir + "/.pig_header"
val outFile = _finalStorage + "/.pig_header"
val headerClean = ".*::".r
val overWriteExisting = false
SchemaCleaner.parsePigHeader(inFile, outFile, headerClean, overWriteExisting)
true
})
appendStep(FileBuilderTask("Joiner") { task =>
task.inPath(_finalStorage)
.outPath(_csvHDFSStore + ".csv")
.srcCheckDel(false)
.srcSys("hdfs")
.destSys("hdfs")
})
appendStep(ScalaTask {
val inFile = _headerCleanerDir + "/.pig_schema"
val outFile = _finalStorage + "/.pig_schema"
val schemaClean = ":\".*::".r
val replacement = ":\""
val overWriteExisting = false
SchemaCleaner.parsePigSchema(inFile, outFile, schemaClean, replacement, overWriteExisting)
true
})
appendStep(HdfsTask { task => task.setCommand("rm -r -f " + _headerCleanerDir)})
appendStep(HdfsTask { task => task.setCommand("rm -r -f " + _joinedStorage)})
}
/**
* Script supplied elements for using this class:
* @param firstTableJoinField1 Left data set Join Field #1 (required)
* @param firstTableJoinField2 Left data set Join Field #2 (optional, required if Right supplied)
* @param secondTableJoinField1 Right data set Join Field #1 (required)
* @param secondTableJoinField2 Right data set Join Field #2 (optional, required if Left supplied)
* @param firstDataSetDirectory Location of left data set on hdfs
* @param secondDataSetDirectory Location of right data set on hdfs
* @param firstDelimiter Delimiter type of left data set
* @param secondDelimiter Delimiter type of right data set
* @param dateFieldFirst Field name of date filtering on left data set
* @param dateFormatting Standard formatting string of the dateFieldFirst field
* @param daysBackFirst Integer - number of days back from today for filtering the first data set.
* @param finalDirectory Hdfs directory location of where to store the file.
* @param csvStoreDirectory Hdfs directory location of where to store a merged file for hosting.
* @param joinType Pig option - type of join (e.g. FULL OUTER, LEFT OUTER, etc.)
* @param joinOption - Pig option - optional join types (e.g. replicated)
* @param storageDelimiter Delimiter type for the final storage table.
*/
case class DataSetJoinerConfiguration(
firstTableJoinField1: String,
firstTableJoinField2: String,
secondTableJoinField1: String,
secondTableJoinField2: String,
firstDataSetDirectory: String,
secondDataSetDirectory: String,
firstDelimiter: String,
secondDelimiter: String,
dateFieldFirst: String,
dateFormatting: String,
daysBackFirst: Int,
finalDirectory: String,
csvStoreDirectory: String,
joinType: String,
joinOption: String,
storageDelimiter: String
) extends TemplateConfiguration
| antagonist112358/tomahawk | workflow-engine/src/net/mentalarray/doozie/JobHelpers/DataSetJoiner.scala | Scala | apache-2.0 | 10,821 |
package com.aletrader.brewerydb;
/*
{
"id" : "KlSsWY",
"description" : "",
"name" : "'t Hofbrouwerijke",
"createDate" : "2012-01-02 11:50:52",
"mailingListUrl" : "",
"updateDate" : "",
"images" : {
"medium" : "",
"large" : "",
"icon" : ""
},
"established" : "",
"isOrganic" : "N",
"website" : "http://www.thofbrouwerijke.be/",
"status" : "verified",
"statusDisplay" : "Verified"
}
*/
class Brewery(
val id: String,
val description: String,
val name: String,
val createDate: String,
val mailingListUrl: String,
val updateDate: String,
val images: Images,
val established: String,
val isOrganic: String,
val website: String,
val status: String,
val statusDisplay: String) {}
| lukeforehand/aletrader | src/main/scala/com/aletrader/brewerydb/Brewery.scala | Scala | gpl-3.0 | 726 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.tools.nsc
package interactive
import java.io.{FileReader, FileWriter}
import java.util.concurrent.ConcurrentHashMap
import scala.annotation.{elidable, tailrec}
import scala.collection.mutable
import scala.collection.mutable.{HashSet, LinkedHashMap}
import scala.jdk.javaapi.CollectionConverters
import scala.language.implicitConversions
import scala.reflect.internal.Chars.isIdentifierStart
import scala.reflect.internal.util.SourceFile
import scala.tools.nsc.io.AbstractFile
import scala.tools.nsc.reporters.Reporter
import scala.tools.nsc.symtab.Flags.{ACCESSOR, PARAMACCESSOR}
import scala.tools.nsc.symtab._
import scala.tools.nsc.typechecker.{Analyzer, Typers}
import scala.util.control.Breaks._
import scala.util.control.ControlThrowable
/**
* This trait allows the IDE to have an instance of the PC that
* does not clear the comments table at every new typer run (those
* being many and close between in this context).
*/
trait CommentPreservingTypers extends Typers {
self: Analyzer =>
override def resetDocComments() = {}
}
trait InteractiveAnalyzer extends Analyzer {
val global : Global
import global._
override def newTyper(context: Context): InteractiveTyper = new Typer(context) with InteractiveTyper
override def newNamer(context: Context): InteractiveNamer = new Namer(context) with InteractiveNamer
trait InteractiveTyper extends Typer {
override def canAdaptConstantTypeToLiteral = false
override def canTranslateEmptyListToNil = false
override def missingSelectErrorTree(tree: Tree, qual: Tree, name: Name): Tree = tree match {
case Select(_, _) => treeCopy.Select(tree, qual, name)
case SelectFromTypeTree(_, _) => treeCopy.SelectFromTypeTree(tree, qual, name)
}
}
trait InteractiveNamer extends Namer {
override def saveDefaultGetter(meth: Symbol, default: Symbol): Unit = {
// save the default getters as attachments in the method symbol. if compiling the
// same local block several times (which can happen in interactive mode) we might
// otherwise not find the default symbol, because the second time it the method
// symbol will be re-entered in the scope but the default parameter will not.
meth.attachments.get[DefaultsOfLocalMethodAttachment] match {
case Some(att) => att.defaultGetters += default
case None => meth.updateAttachment(new DefaultsOfLocalMethodAttachment(default))
}
}
// this logic is needed in case typer was interrupted half
// way through and then comes back to do the tree again. In
// that case the definitions that were already attributed as
// well as any default parameters of such methods need to be
// re-entered in the current scope.
//
// Tested in test/files/presentation/t8941b
override def enterExistingSym(sym: Symbol, tree: Tree): Context = {
if (sym != null && sym.owner.isTerm) {
enterIfNotThere(sym)
for (defAtt <- sym.attachments.get[DefaultsOfLocalMethodAttachment])
defAtt.defaultGetters foreach enterIfNotThere
} else if (sym != null && sym.isClass && sym.isImplicit) {
val owningInfo = sym.owner.info
val existingDerivedSym = owningInfo.decl(sym.name.toTermName).filter(sym => sym.isSynthetic && sym.isMethod)
existingDerivedSym.alternatives foreach (owningInfo.decls.unlink)
val defTree = tree match {
case dd: DocDef => dd.definition // See scala/bug#9011, Scala IDE's presentation compiler incorporates ScaladocGlobal with InteractiveGlobal, so we have to unwrap DocDefs.
case _ => tree
}
enterImplicitWrapper(defTree.asInstanceOf[ClassDef])
}
super.enterExistingSym(sym, tree)
}
override def enterIfNotThere(sym: Symbol): Unit = {
val scope = context.scope
@tailrec def search(e: ScopeEntry): Unit = {
if ((e eq null) || (e.owner ne scope))
scope enter sym
else if (e.sym ne sym) // otherwise, aborts since we found sym
search(e.tail)
}
search(scope lookupEntry sym.name)
}
}
}
/** The main class of the presentation compiler in an interactive environment such as an IDE
*/
class Global(settings: Settings, _reporter: Reporter, projectName: String = "") extends {
/* Is the compiler initializing? Early def, so that the field is true during the
* execution of the super constructor.
*/
private var initializing = true
override val useOffsetPositions = false
} with scala.tools.nsc.Global(settings, _reporter)
with CompilerControl
with ContextTrees
with RichCompilationUnits
with Picklers {
import definitions._
if (!settings.Ymacroexpand.isSetByUser)
settings.Ymacroexpand.value = settings.MacroExpand.Discard
val debugIDE: Boolean = settings.YpresentationDebug.value
val verboseIDE: Boolean = settings.YpresentationVerbose.value
private val anyThread: Boolean = settings.YpresentationAnyThread.value
private def replayName = settings.YpresentationReplay.value
private def logName = settings.YpresentationLog.value
private def afterTypeDelay = settings.YpresentationDelay.value
private final val SleepTime = 10
val log =
if (replayName != "") new Replayer(new FileReader(replayName))
else if (logName != "") new Logger(new FileWriter(logName))
else NullLogger
import log.logreplay
debugLog(s"logger: ${log.getClass} writing to ${(new java.io.File(logName)).getAbsolutePath}")
debugLog(s"classpath: $classPath")
private var curTime = System.nanoTime
private def timeStep = {
val last = curTime
curTime = System.nanoTime
", delay = " + (curTime - last) / 1000000 + "ms"
}
/** Print msg only when debugIDE is true. */
@inline final def debugLog(msg: => String) =
if (debugIDE) println("[%s] %s".format(projectName, msg))
/** Inform with msg only when verboseIDE is true. */
@inline final def informIDE(msg: => String) =
if (verboseIDE) println("[%s][%s]".format(projectName, msg))
// don't keep the original owner in presentation compiler runs
// (the map will grow indefinitely, and the only use case is the backend)
override def defineOriginalOwner(sym: Symbol, owner: Symbol): Unit = { }
override protected def synchronizeNames = true
/** A map of all loaded files to the rich compilation units that correspond to them.
*/
val unitOfFile: mutable.Map[AbstractFile, RichCompilationUnit] = {
val m = new ConcurrentHashMap[AbstractFile, RichCompilationUnit] {
override def put(key: AbstractFile, value: RichCompilationUnit) = {
val r = super.put(key, value)
if (r == null) debugLog("added unit for "+key)
r
}
override def remove(key: Any) = {
val r = super.remove(key)
if (r != null) debugLog("removed unit for "+key)
r
}
}
CollectionConverters.asScala(m)
}
/** A set containing all those files that need to be removed
* Units are removed by getUnit, typically once a unit is finished compiled.
*/
protected val toBeRemoved: HashSet[AbstractFile] = new HashSet[AbstractFile]
/** A set containing all those files that need to be removed after a full background compiler run
*/
protected val toBeRemovedAfterRun: HashSet[AbstractFile] = new HashSet[AbstractFile]
class ResponseMap extends mutable.HashMap[SourceFile, Set[Response[Tree]]] {
override def default(key: SourceFile): Set[Response[Tree]] = Set()
override def addOne (binding: (SourceFile, Set[Response[Tree]])) = {
assert(interruptsEnabled, "delayed operation within an ask")
super.addOne(binding)
}
}
/** A map that associates with each abstract file the set of responses that are waiting
* (via waitLoadedTyped) for the unit associated with the abstract file to be loaded and completely typechecked.
*/
protected val waitLoadedTypeResponses = new ResponseMap
/** A map that associates with each abstract file the set of responses that ware waiting
* (via build) for the unit associated with the abstract file to be parsed and entered
*/
protected var getParsedEnteredResponses = new ResponseMap
private def cleanResponses(rmap: ResponseMap): Unit = {
for ((source, rs) <- rmap.toList) {
for (r <- rs) {
if (getUnit(source).isEmpty)
r raise new NoSuchUnitError(source.file)
if (r.isComplete)
rmap(source) -= r
}
if (rmap(source).isEmpty)
rmap -= source
}
}
override lazy val analyzer = new {
val global: Global.this.type = Global.this
} with InteractiveAnalyzer
private def cleanAllResponses(): Unit = {
cleanResponses(waitLoadedTypeResponses)
cleanResponses(getParsedEnteredResponses)
}
private def checkNoOutstanding(rmap: ResponseMap): Unit =
for ((_, rs) <- rmap.toList; r <- rs) {
debugLog("ERROR: missing response, request will be discarded")
r raise new MissingResponse
}
def checkNoResponsesOutstanding(): Unit = {
checkNoOutstanding(waitLoadedTypeResponses)
checkNoOutstanding(getParsedEnteredResponses)
}
/** The compilation unit corresponding to a source file
* if it does not yet exist create a new one atomically
* Note: We want to remove this.
*/
protected[interactive] def getOrCreateUnitOf(source: SourceFile): RichCompilationUnit =
unitOfFile.getOrElse(source.file, { println("precondition violated: "+source+" is not loaded"); new Exception().printStackTrace(); new RichCompilationUnit(source) })
/** Work through toBeRemoved list to remove any units.
* Then return optionally unit associated with given source.
*/
protected[interactive] def getUnit(s: SourceFile): Option[RichCompilationUnit] = {
toBeRemoved.synchronized {
for (f <- toBeRemoved) {
informIDE("removed: "+s)
unitOfFile -= f
allSources = allSources filter (_.file != f)
}
toBeRemoved.clear()
}
unitOfFile get s.file
}
/** A list giving all files to be typechecked in the order they should be checked.
*/
protected var allSources: List[SourceFile] = List()
private var lastException: Option[Throwable] = None
/** A list of files that crashed the compiler. They will be ignored during background
* compilation until they are removed from this list.
*/
private var ignoredFiles: Set[AbstractFile] = Set()
/** Flush the buffer of sources that are ignored during background compilation. */
def clearIgnoredFiles(): Unit = {
ignoredFiles = Set()
}
/** Remove a crashed file from the ignore buffer. Background compilation will take it into account
* and errors will be reported against it. */
def enableIgnoredFile(file: AbstractFile): Unit = {
ignoredFiles -= file
debugLog("Removed crashed file %s. Still in the ignored buffer: %s".format(file, ignoredFiles))
}
/** The currently active typer run */
private var currentTyperRun: TyperRun = _
newTyperRun()
/** Is a background compiler run needed?
* Note: outOfDate is true as long as there is a background compile scheduled or going on.
*/
private var outOfDate = false
def isOutOfDate: Boolean = outOfDate
def demandNewCompilerRun() = {
if (outOfDate) throw new FreshRunReq // cancel background compile
else outOfDate = true // proceed normally and enable new background compile
}
protected[interactive] var minRunId = 1
private[interactive] var interruptsEnabled = true
private val NoResponse: Response[_] = new Response[Any]
/** The response that is currently pending, i.e. the compiler
* is working on providing an answer for it.
*/
private var pendingResponse: Response[_] = NoResponse
// ----------- Overriding hooks in nsc.Global -----------------------
/** Called from parser, which signals hereby that a method definition has been parsed.
*/
override def signalParseProgress(pos: Position): Unit = {
// We only want to be interruptible when running on the PC thread.
if(onCompilerThread) {
checkForMoreWork(pos)
}
}
/** Called from typechecker, which signals hereby that a node has been completely typechecked.
* If the node includes unit.targetPos, abandons run and returns newly attributed tree.
* Otherwise, if there's some higher priority work to be done, also abandons run with a FreshRunReq.
* @param context The context that typechecked the node
* @param old The original node
* @param result The transformed node
*/
override def signalDone(context: Context, old: Tree, result: Tree): Unit = {
val canObserveTree = (
interruptsEnabled
&& lockedCount == 0
&& !context.bufferErrors // scala/bug#7558 look away during exploratory typing in "silent mode"
)
if (canObserveTree) {
if (context.unit.exists &&
result.pos.isOpaqueRange &&
(result.pos includes context.unit.targetPos)) {
var located = new TypedLocator(context.unit.targetPos) locateIn result
if (located == EmptyTree) {
println("something's wrong: no "+context.unit+" in "+result+result.pos)
located = result
}
throw new TyperResult(located)
}
else {
try {
checkForMoreWork(old.pos)
} catch {
case ex: ValidateException => // Ignore, this will have been reported elsewhere
debugLog("validate exception caught: "+ex)
case ex: Throwable =>
log.flush()
throw ex
}
}
}
}
/** Called from typechecker every time a context is created.
* Registers the context in a context tree
*/
override def registerContext(c: Context) = c.unit match {
case u: RichCompilationUnit => addContext(u.contexts, c)
case _ =>
}
/** The top level classes and objects currently seen in the presentation compiler
*/
private val currentTopLevelSyms = new mutable.LinkedHashSet[Symbol]
/** The top level classes and objects no longer seen in the presentation compiler
*/
val deletedTopLevelSyms = new mutable.LinkedHashSet[Symbol]
/** Called from typechecker every time a top-level class or object is entered.
*/
override def registerTopLevelSym(sym: Symbol): Unit = { currentTopLevelSyms += sym }
protected type SymbolLoadersInInteractive = GlobalSymbolLoaders {
val global: Global.this.type
val platform: Global.this.platform.type
}
/** Symbol loaders in the IDE parse all source files loaded from a package for
* top-level idents. Therefore, we can detect top-level symbols that have a name
* different from their source file
*/
override lazy val loaders: SymbolLoadersInInteractive = new {
val global: Global.this.type = Global.this
val platform: Global.this.platform.type = Global.this.platform
} with BrowsingLoaders
// ----------------- Polling ---------------------------------------
case class WorkEvent(atNode: Int, atMillis: Long)
private var moreWorkAtNode: Int = -1
private var nodesSeen = 0
private var lastWasReload = false
/** The number of pollForWorks after which the presentation compiler yields.
* Yielding improves responsiveness on systems with few cores because it
* gives the UI thread a chance to get new tasks and interrupt the presentation
* compiler with them.
*/
private final val yieldPeriod = 10
/** Called from runner thread and signalDone:
* Poll for interrupts and execute them immediately.
* Then, poll for exceptions and execute them.
* Then, poll for work reload/typedTreeAt/doFirst commands during background checking.
* @param pos The position of the tree if polling while typechecking, NoPosition otherwise
*
*/
private[interactive] def pollForWork(pos: Position): Unit = {
var loop: Boolean = true
while (loop) {
breakable{
loop = false
if (!interruptsEnabled) return
if (pos == NoPosition || nodesSeen % yieldPeriod == 0)
Thread.`yield`()
def nodeWithWork(): Option[WorkEvent] =
if (scheduler.moreWork || pendingResponse.isCancelled) Some(new WorkEvent(nodesSeen, System.currentTimeMillis))
else None
nodesSeen += 1
logreplay("atnode", nodeWithWork()) match {
case Some(WorkEvent(id, _)) =>
debugLog("some work at node "+id+" current = "+nodesSeen)
// assert(id >= nodesSeen)
moreWorkAtNode = id
case None =>
}
if (nodesSeen >= moreWorkAtNode) {
logreplay("asked", scheduler.pollInterrupt()) match {
case Some(ir) =>
try {
interruptsEnabled = false
debugLog("ask started"+timeStep)
ir.execute()
} finally {
debugLog("ask finished"+timeStep)
interruptsEnabled = true
}
loop = true; break
case _ =>
}
if (logreplay("cancelled", pendingResponse.isCancelled)) {
throw CancelException
}
logreplay("exception thrown", scheduler.pollThrowable()) match {
case Some(ex: FreshRunReq) =>
newTyperRun()
minRunId = currentRunId
demandNewCompilerRun()
case Some(ShutdownReq) =>
scheduler.synchronized { // lock the work queue so no more items are posted while we clean it up
val units = scheduler.dequeueAll {
case item: WorkItem => Some(item.raiseMissing())
case _ => Some(())
}
// don't forget to service interrupt requests
scheduler.dequeueAllInterrupts(_.execute())
debugLog("ShutdownReq: cleaning work queue (%d items)".format(units.size))
debugLog("Cleanup up responses (%d loadedType pending, %d parsedEntered pending)"
.format(waitLoadedTypeResponses.size, getParsedEnteredResponses.size))
checkNoResponsesOutstanding()
log.flush()
scheduler = new NoWorkScheduler
throw ShutdownReq
}
case Some(ex: Throwable) => log.flush(); throw ex
case _ =>
}
lastWasReload = false
logreplay("workitem", scheduler.nextWorkItem()) match {
case Some(action) =>
try {
debugLog("picked up work item at "+pos+": "+action+timeStep)
action()
debugLog("done with work item: "+action)
} finally {
debugLog("quitting work item: "+action+timeStep)
}
case None =>
}
}
}
}
}
protected def checkForMoreWork(pos: Position): Unit = {
val typerRun = currentTyperRun
pollForWork(pos)
if (typerRun != currentTyperRun) demandNewCompilerRun()
}
// ----------------- The Background Runner Thread -----------------------
private var threadId = 0
/** The current presentation compiler runner */
@volatile private[interactive] var compileRunner: Thread = newRunnerThread()
/** Check that the currently executing thread is the presentation compiler thread.
*
* Compiler initialization may happen on a different thread (signalled by globalPhase being NoPhase)
*/
@elidable(elidable.WARNING)
override def assertCorrectThread(): Unit = {
assert(initializing || anyThread || onCompilerThread,
"Race condition detected: You are running a presentation compiler method outside the PC thread.[phase: %s]".format(globalPhase) +
" Please file a ticket with the current stack trace at https://www.assembla.com/spaces/scala-ide/support/tickets")
}
/** Create a new presentation compiler runner.
*/
private def newRunnerThread(): Thread = {
threadId += 1
compileRunner = new PresentationCompilerThread(this, projectName)
compileRunner.setDaemon(true)
compileRunner
}
private def ensureUpToDate(unit: RichCompilationUnit) =
if (!unit.isUpToDate && unit.status != JustParsed) reset(unit) // reparse previously typechecked units.
/** Compile all loaded source files in the order given by `allSources`.
*/
private[interactive] final def backgroundCompile(): Unit = {
informIDE("Starting new presentation compiler type checking pass")
reporter.reset()
// remove any files in first that are no longer maintained by presentation compiler (i.e. closed)
allSources = allSources filter (s => unitOfFile contains (s.file))
// ensure all loaded units are parsed
for (s <- allSources; unit <- getUnit(s)) {
// checkForMoreWork(NoPosition) // disabled, as any work done here would be in an inconsistent state
ensureUpToDate(unit)
parseAndEnter(unit)
serviceParsedEntered()
}
// sleep window
if (afterTypeDelay > 0 && lastWasReload) {
val limit = System.currentTimeMillis() + afterTypeDelay
while (System.currentTimeMillis() < limit) {
Thread.sleep(SleepTime)
checkForMoreWork(NoPosition)
}
}
// ensure all loaded units are typechecked
for (s <- allSources; if !ignoredFiles(s.file); unit <- getUnit(s)) {
try {
if (!unit.isUpToDate)
if (unit.problems.isEmpty || !settings.YpresentationStrict)
typeCheck(unit)
else debugLog("%s has syntax errors. Skipped typechecking".format(unit))
else debugLog("already up to date: "+unit)
for (r <- waitLoadedTypeResponses(unit.source))
r set unit.body
serviceParsedEntered()
} catch {
case ex: FreshRunReq => throw ex // propagate a new run request
case ShutdownReq => throw ShutdownReq // propagate a shutdown request
case ex: ControlThrowable => throw ex
case ex: Throwable =>
println("[%s]: exception during background compile: ".format(unit.source) + ex)
ex.printStackTrace()
for (r <- waitLoadedTypeResponses(unit.source)) {
r.raise(ex)
}
serviceParsedEntered()
lastException = Some(ex)
ignoredFiles += unit.source.file
println("[%s] marking unit as crashed (crashedFiles: %s)".format(unit, ignoredFiles))
reporter.error(unit.body.pos, "Presentation compiler crashed while type checking this file: %s".format(ex.toString()))
}
}
// move units removable after this run to the "to-be-removed" buffer
toBeRemoved.synchronized {
toBeRemovedAfterRun.synchronized {
toBeRemoved ++= toBeRemovedAfterRun
}
}
// clean out stale waiting responses
cleanAllResponses()
// wind down
if (waitLoadedTypeResponses.nonEmpty || getParsedEnteredResponses.nonEmpty) {
// need another cycle to treat those
newTyperRun()
backgroundCompile()
} else {
outOfDate = false
informIDE("Everything is now up to date")
}
}
/** Service all pending getParsedEntered requests
*/
private def serviceParsedEntered(): Unit = {
var atOldRun = true
for ((source, rs) <- getParsedEnteredResponses; r <- rs) {
if (atOldRun) { newTyperRun(); atOldRun = false }
getParsedEnteredNow(source, r)
}
getParsedEnteredResponses.clear()
}
/** Reset unit to unloaded state */
private def reset(unit: RichCompilationUnit): Unit = {
unit.depends.clear()
unit.defined.clear()
unit.synthetics.clear()
unit.toCheck.clear()
unit.checkedFeatures = Set()
unit.targetPos = NoPosition
unit.contexts.clear()
unit.problems.clear()
unit.body = EmptyTree
unit.status = NotLoaded
unit.transformed.clear()
}
/** Parse unit and create a name index, unless this has already been done before */
private def parseAndEnter(unit: RichCompilationUnit): Unit =
if (unit.status == NotLoaded) {
debugLog("parsing: "+unit)
currentTyperRun.compileLate(unit)
if (debugIDE && !reporter.hasErrors) validatePositions(unit.body)
if (!unit.isJava) syncTopLevelSyms(unit)
unit.status = JustParsed
}
/** Make sure unit is typechecked
*/
private[scala] def typeCheck(unit: RichCompilationUnit): Unit = {
debugLog("type checking: "+unit)
parseAndEnter(unit)
unit.status = PartiallyChecked
currentTyperRun.typeCheck(unit)
unit.lastBody = unit.body
unit.status = currentRunId
}
/** Update deleted and current top-level symbols sets */
def syncTopLevelSyms(unit: RichCompilationUnit): Unit = {
val deleted = currentTopLevelSyms filter { sym =>
/** We sync after namer phase and it resets all the top-level symbols
* that survive the new parsing
* round to NoPeriod.
*/
sym.sourceFile == unit.source.file &&
sym.validTo != NoPeriod &&
runId(sym.validTo) < currentRunId
}
for (d <- deleted) {
d.owner.info.decls unlink d
deletedTopLevelSyms.synchronized { deletedTopLevelSyms += d }
currentTopLevelSyms -= d
}
}
/** Move list of files to front of allSources */
def moveToFront(fs: List[SourceFile]): Unit = {
allSources = fs ::: (allSources diff fs)
}
// ----------------- Implementations of client commands -----------------------
def respond[T](result: Response[T])(op: => T): Unit =
respondGradually(result)(LazyList(op))
def respondGradually[T](response: Response[T])(op: => LazyList[T]): Unit = {
val prevResponse = pendingResponse
try {
pendingResponse = response
if (!response.isCancelled) {
var results = op
while (!response.isCancelled && results.nonEmpty) {
val result = results.head
results = results.tail
if (results.isEmpty) {
response set result
debugLog("responded"+timeStep)
} else response setProvisionally result
}
}
} catch {
case CancelException =>
debugLog("cancelled")
case ex: FreshRunReq =>
if (debugIDE) {
println("FreshRunReq thrown during response")
ex.printStackTrace()
}
response raise ex
throw ex
case ex @ ShutdownReq =>
if (debugIDE) {
println("ShutdownReq thrown during response")
ex.printStackTrace()
}
response raise ex
throw ex
case ex: Throwable =>
if (debugIDE) {
println("exception thrown during response: "+ex)
ex.printStackTrace()
}
response raise ex
} finally {
pendingResponse = prevResponse
}
}
private[interactive] def reloadSource(source: SourceFile): Unit = {
val unit = new RichCompilationUnit(source)
unitOfFile(source.file) = unit
toBeRemoved.synchronized { toBeRemoved -= source.file }
toBeRemovedAfterRun.synchronized { toBeRemovedAfterRun -= source.file }
reset(unit)
//parseAndEnter(unit)
}
/** Make sure a set of compilation units is loaded and parsed */
private def reloadSources(sources: List[SourceFile]): Unit = {
newTyperRun()
minRunId = currentRunId
sources foreach reloadSource
moveToFront(sources)
}
/** Make sure a set of compilation units is loaded and parsed */
private[interactive] def reload(sources: List[SourceFile], response: Response[Unit]): Unit = {
informIDE("reload: " + sources)
lastWasReload = true
respond(response)(reloadSources(sources))
demandNewCompilerRun()
}
private[interactive] def filesDeleted(sources: List[SourceFile], response: Response[Unit]): Unit = {
informIDE("files deleted: " + sources)
val deletedFiles = sources.map(_.file).toSet
val deletedSyms = currentTopLevelSyms filter {sym => deletedFiles contains sym.sourceFile}
for (d <- deletedSyms) {
d.owner.info.decls unlink d
deletedTopLevelSyms.synchronized { deletedTopLevelSyms += d }
currentTopLevelSyms -= d
}
sources foreach (removeUnitOf(_))
minRunId = currentRunId
respond(response)(())
demandNewCompilerRun()
}
/** Arrange for unit to be removed after run, to give a chance to typecheck the unit fully.
* If we do just removeUnit, some problems with default parameters can ensue.
* Calls to this method could probably be replaced by removeUnit once default parameters are handled more robustly.
*/
private def afterRunRemoveUnitsOf(sources: List[SourceFile]): Unit = {
toBeRemovedAfterRun.synchronized { toBeRemovedAfterRun ++= sources map (_.file) }
}
/** A fully attributed tree located at position `pos` */
private[scala] def typedTreeAt(pos: Position): Tree = getUnit(pos.source) match {
case None =>
reloadSources(List(pos.source))
try typedTreeAt(pos)
finally afterRunRemoveUnitsOf(List(pos.source))
case Some(unit) =>
informIDE("typedTreeAt " + pos)
parseAndEnter(unit)
val tree = locateTree(pos)
debugLog("at pos "+pos+" was found: "+tree.getClass+" "+tree.pos.show)
tree match {
case Import(expr, _) =>
debugLog("import found"+expr.tpe+(if (expr.tpe == null) "" else " "+expr.tpe.members))
case _ =>
}
if (stabilizedType(tree) ne null) {
debugLog("already attributed: "+tree.symbol+" "+tree.tpe)
tree
} else {
unit.targetPos = pos
try {
debugLog("starting targeted type check")
typeCheck(unit)
// println("tree not found at "+pos)
EmptyTree
} catch {
case ex: TyperResult => new Locator(pos) locateIn ex.tree
} finally {
unit.targetPos = NoPosition
}
}
}
/** A fully attributed tree corresponding to the entire compilation unit */
private[interactive] def typedTree(source: SourceFile, forceReload: Boolean): Tree = {
informIDE("typedTree " + source + " forceReload: " + forceReload)
val unit = getOrCreateUnitOf(source)
if (forceReload) reset(unit)
parseAndEnter(unit)
if (unit.status <= PartiallyChecked) typeCheck(unit)
unit.body
}
/** Set sync var `response` to a fully attributed tree located at position `pos` */
private[interactive] def getTypedTreeAt(pos: Position, response: Response[Tree]): Unit = {
respond(response)(typedTreeAt(pos))
}
/** Set sync var `response` to a fully attributed tree corresponding to the
* entire compilation unit */
private[interactive] def getTypedTree(source: SourceFile, forceReload: Boolean, response: Response[Tree]): Unit = {
respond(response)(typedTree(source, forceReload))
}
private def withTempUnits[T](sources: List[SourceFile])(f: (SourceFile => RichCompilationUnit) => T): T = {
val unitOfSrc: SourceFile => RichCompilationUnit = src => unitOfFile(src.file)
sources filterNot (getUnit(_).isDefined) match {
case Nil =>
f(unitOfSrc)
case unknown =>
reloadSources(unknown)
try {
f(unitOfSrc)
} finally
afterRunRemoveUnitsOf(unknown)
}
}
private def withTempUnit[T](source: SourceFile)(f: RichCompilationUnit => T): T =
withTempUnits(List(source)){ srcToUnit =>
f(srcToUnit(source))
}
/** Find a 'mirror' of symbol `sym` in unit `unit`. Pre: `unit is loaded. */
private def findMirrorSymbol(sym: Symbol, unit: RichCompilationUnit): Symbol = {
val originalTypeParams = sym.owner.typeParams
ensureUpToDate(unit)
parseAndEnter(unit)
val pre = adaptToNewRunMap(ThisType(sym.owner))
val rawsym = pre.typeSymbol.info.decl(sym.name)
val newsym = rawsym filter { alt =>
sym.isType || {
try {
val tp1 = pre.memberType(alt) onTypeError NoType
val tp2 = adaptToNewRunMap(sym.tpe) substSym (originalTypeParams, sym.owner.typeParams)
matchesType(tp1, tp2, alwaysMatchSimple = false) || {
debugLog(s"findMirrorSymbol matchesType($tp1, $tp2) failed")
val tp3 = adaptToNewRunMap(sym.tpe) substSym (originalTypeParams, alt.owner.typeParams)
matchesType(tp1, tp3, alwaysMatchSimple = false) || {
debugLog(s"findMirrorSymbol fallback matchesType($tp1, $tp3) failed")
false
}
}
}
catch {
case ex: ControlThrowable => throw ex
case ex: Throwable =>
debugLog("error in findMirrorSymbol: " + ex)
ex.printStackTrace()
false
}
}
}
if (newsym == NoSymbol) {
if (rawsym.exists && !rawsym.isOverloaded) rawsym
else {
debugLog("mirror not found " + sym + " " + unit.source + " " + pre)
NoSymbol
}
} else if (newsym.isOverloaded) {
settings.uniqid.value = true
debugLog("mirror ambiguous " + sym + " " + unit.source + " " + pre + " " + newsym.alternatives)
NoSymbol
} else {
debugLog("mirror found for " + newsym + ": " + newsym.pos)
newsym
}
}
/** Implements CompilerControl.askLinkPos */
private[interactive] def getLinkPos(sym: Symbol, source: SourceFile, response: Response[Position]): Unit = {
informIDE("getLinkPos "+sym+" "+source)
respond(response) {
if (sym.owner.isClass) {
withTempUnit(source){ u =>
findMirrorSymbol(sym, u).pos
}
} else {
debugLog("link not in class "+sym+" "+source+" "+sym.owner)
NoPosition
}
}
}
private def forceDocComment(sym: Symbol, unit: RichCompilationUnit): Unit = {
unit.body foreachPartial {
case DocDef(comment, defn) if defn.symbol == sym =>
fillDocComment(defn.symbol, comment)
EmptyTree
case _: ValOrDefDef =>
EmptyTree
}
}
/** Implements CompilerControl.askDocComment */
private[interactive] def getDocComment(sym: Symbol, source: SourceFile, site: Symbol, fragments: List[(Symbol,SourceFile)],
response: Response[(String, String, Position)]): Unit = {
informIDE(s"getDocComment $sym at $source, site $site")
respond(response) {
withTempUnits(fragments.unzip._2){ units =>
for((sym, src) <- fragments) {
val mirror = findMirrorSymbol(sym, units(src))
if (mirror ne NoSymbol) forceDocComment(mirror, units(src))
}
val mirror = findMirrorSymbol(sym, units(source))
if (mirror eq NoSymbol)
("", "", NoPosition)
else {
(expandedDocComment(mirror, site), rawDocComment(mirror), docCommentPos(mirror))
}
}
}
// New typer run to remove temp units and drop per-run caches that might refer to symbols entered from temp units.
newTyperRun()
}
def stabilizedType(tree: Tree): Type = tree match {
case Ident(_) if treeInfo.admitsTypeSelection(tree) =>
singleType(NoPrefix, tree.symbol)
case Select(qual, _) if treeInfo.admitsTypeSelection(tree) =>
singleType(qual.tpe, tree.symbol)
case Import(expr, selectors) =>
tree.symbol.info match {
case ImportType(expr) => expr match {
case s@Select(qual, name) if treeInfo.admitsTypeSelection(expr) => singleType(qual.tpe, s.symbol)
case i : Ident => i.tpe
case _ => tree.tpe
}
case _ => tree.tpe
}
case _ => tree.tpe
}
import analyzer.{ImplicitSearch, SearchResult}
private[interactive] def getScopeCompletion(pos: Position, response: Response[List[Member]]): Unit = {
informIDE("getScopeCompletion" + pos)
respond(response) { scopeMembers(pos) }
}
private class Members[M <: Member] extends LinkedHashMap[Name, Set[M]] {
override def default(key: Name) = Set()
private def matching(sym: Symbol, symtpe: Type, ms: Set[M]): Option[M] = ms.find { m =>
(m.sym.name == sym.name) && (m.sym.isType || (m.tpe matches symtpe))
}
private def keepSecond(m: M, sym: Symbol, implicitlyAdded: Boolean): Boolean =
m.sym.hasFlag(ACCESSOR | PARAMACCESSOR) &&
!sym.hasFlag(ACCESSOR | PARAMACCESSOR) &&
(!implicitlyAdded || m.implicitlyAdded)
def add(sym: Symbol, pre: Type, implicitlyAdded: Boolean)(toMember: (Symbol, Type) => M): Unit = {
if ((sym.isGetter || sym.isSetter) && sym.accessed != NoSymbol) {
add(sym.accessed, pre, implicitlyAdded)(toMember)
} else if (!sym.name.decodedName.containsName("$") && !sym.isError && !sym.isArtifact && sym.hasRawInfo) {
val symtpe = pre.memberType(sym) onTypeError ErrorType
matching(sym, symtpe, this(sym.name)) match {
case Some(m) =>
if (keepSecond(m, sym, implicitlyAdded)) {
//print(" -+ "+sym.name)
this(sym.name) = this(sym.name) - m + toMember(sym, symtpe)
}
case None =>
//print(" + "+sym.name)
this(sym.name) = this(sym.name) + toMember(sym, symtpe)
}
}
}
def addNonShadowed(other: Members[M]) = {
for ((name, ms) <- other)
if (ms.nonEmpty && this(name).isEmpty) this(name) = ms
}
def allMembers: List[M] = values.toList.flatten
}
/** Return all members visible without prefix in context enclosing `pos`. */
private def scopeMembers(pos: Position): List[ScopeMember] = {
typedTreeAt(pos) // to make sure context is entered
val context = doLocateContext(pos)
val locals = new Members[ScopeMember]
val enclosing = new Members[ScopeMember]
def addScopeMember(sym: Symbol, pre: Type, viaImport: Tree) =
locals.add(sym, pre, implicitlyAdded = false) { (s, st) =>
// imported val and var are always marked as inaccessible, but they could be accessed through their getters. scala/bug#7995
val member = if (s.hasGetter)
new ScopeMember(s, st, context.isAccessible(s.getter, pre, superAccess = false), viaImport)
else
new ScopeMember(s, st, context.isAccessible(s, pre, superAccess = false), viaImport)
member.prefix = pre
member
}
def localsToEnclosing() = {
enclosing.addNonShadowed(locals)
locals.clear()
}
//print("add scope members")
var cx = context
while (cx != NoContext) {
for (sym <- cx.scope)
addScopeMember(sym, NoPrefix, EmptyTree)
localsToEnclosing()
if (cx == cx.enclClass) {
val pre = cx.prefix
for (sym <- pre.members)
addScopeMember(sym, pre, EmptyTree)
localsToEnclosing()
}
cx = cx.outer
}
//print("\\nadd imported members")
for (imp <- context.imports) {
val pre = imp.qual.tpe
for (sym <- imp.allImportedSymbols)
addScopeMember(sym, pre, imp.qual)
localsToEnclosing()
}
// println()
val result = enclosing.allMembers
// if (debugIDE) for (m <- result) println(m)
result
}
private[interactive] def getTypeCompletion(pos: Position, response: Response[List[Member]]): Unit = {
informIDE("getTypeCompletion " + pos)
respondGradually(response) { typeMembers(pos) }
//if (debugIDE) typeMembers(pos)
}
private def typeMembers(pos: Position): LazyList[List[TypeMember]] = {
// Choosing which tree will tell us the type members at the given position:
// If pos leads to an Import, type the expr
// If pos leads to a Select, type the qualifier as long as it is not erroneous
// (this implies discarding the possibly incomplete name in the Select node)
// Otherwise, type the tree found at 'pos' directly.
val tree0 = typedTreeAt(pos) match {
case sel @ Select(qual, _) if sel.tpe == ErrorType => qual
case Import(expr, _) => expr
case t => t
}
val context = doLocateContext(pos)
val shouldTypeQualifier = tree0.tpe match {
case null => true
case mt: MethodType => mt.isImplicit
case pt: PolyType => isImplicitMethodType(pt.resultType)
case _ => false
}
// TODO: guard with try/catch to deal with ill-typed qualifiers.
val tree = if (shouldTypeQualifier) analyzer newTyper context typedQualifier tree0 else tree0
debugLog("typeMembers at "+tree+" "+tree.tpe)
val superAccess = tree.isInstanceOf[Super]
val members = new Members[TypeMember]
def addTypeMember(sym: Symbol, pre: Type, inherited: Boolean, viaView: Symbol) = {
val implicitlyAdded = viaView != NoSymbol
members.add(sym, pre, implicitlyAdded) { (s, st) =>
val result = new TypeMember(s, st,
context.isAccessible(if (s.hasGetter) s.getterIn(s.owner) else s, pre, superAccess && !implicitlyAdded),
inherited,
viaView)
result.prefix = pre
result
}
}
/** Create a function application of a given view function to `tree` and typechecked it.
*/
def viewApply(view: SearchResult): Tree = {
assert(view.tree != EmptyTree)
val t = analyzer.newTyper(context.makeImplicit(reportAmbiguousErrors = false))
.typed(Apply(view.tree, List(tree)) setPos tree.pos)
if (!t.tpe.isErroneous) t
else analyzer.newTyper(context.makeSilent(reportAmbiguousErrors = true))
.typed(Apply(view.tree, List(tree)) setPos tree.pos)
.onTypeError(EmptyTree)
}
val pre = stabilizedType(tree)
val ownerTpe = tree.tpe match {
case ImportType(expr) => expr.tpe
case null => pre
case MethodType(List(), rtpe) => rtpe
case _ => tree.tpe
}
//print("add members")
for (sym <- ownerTpe.members)
addTypeMember(sym, pre, sym.owner != ownerTpe.typeSymbol, NoSymbol)
members.allMembers #:: {
//print("\\nadd enrichment")
val applicableViews: List[SearchResult] =
if (ownerTpe.isErroneous) List()
else new ImplicitSearch(
tree, functionType(List(ownerTpe), AnyTpe), isView = true, isByNamePt = false,
context0 = context.makeImplicit(reportAmbiguousErrors = false)).allImplicits
for (view <- applicableViews) {
val vtree = viewApply(view)
val vpre = stabilizedType(vtree)
for (sym <- vtree.tpe.members if sym.isTerm) {
addTypeMember(sym, vpre, inherited = false, view.tree.symbol)
}
}
//println()
LazyList(members.allMembers)
}
}
sealed abstract class CompletionResult {
type M <: Member
def results: List[M]
/** The (possibly partial) name detected that precedes the cursor */
def name: Name
/** Cursor Offset - positionDelta == position of the start of the name */
def positionDelta: Int
def matchingResults(nameMatcher: (Name) => Name => Boolean = entered => candidate => candidate.startsWith(entered)): List[M] = {
val enteredName = if (name == nme.ERROR) nme.EMPTY else name
val matcher = nameMatcher(enteredName)
results filter { (member: Member) =>
val symbol = member.sym
def isStable = member.tpe.isStable || member.sym.isStable || member.sym.getterIn(member.sym.owner).isStable
def isJunk = symbol.name.isEmpty || !isIdentifierStart(member.sym.name.charAt(0)) // e.g. <byname>
!isJunk && member.accessible && !symbol.isConstructor && (name.isEmpty || matcher(member.sym.name) && (symbol.name.isTermName == name.isTermName || name.isTypeName && isStable))
}
}
}
object CompletionResult {
final case class ScopeMembers(positionDelta: Int, results: List[ScopeMember], name: Name) extends CompletionResult {
type M = ScopeMember
}
final case class TypeMembers(positionDelta: Int, qualifier: Tree, tree: Tree, results: List[TypeMember], name: Name) extends CompletionResult {
type M = TypeMember
}
case object NoResults extends CompletionResult {
override def results = Nil
override def name = nme.EMPTY
override def positionDelta = 0
}
private val CamelRegex = "([A-Z][^A-Z]*)".r
private def camelComponents(s: String, allowSnake: Boolean): List[String] = {
if (allowSnake && s.forall(c => c.isUpper || c == '_')) s.split('_').toList.filterNot(_.isEmpty)
else CamelRegex.findAllIn("X" + s).toList match { case head :: tail => head.drop(1) :: tail; case Nil => Nil }
}
def camelMatch(entered: Name): Name => Boolean = {
val enteredS = entered.toString
val enteredLowercaseSet = enteredS.toLowerCase().toSet
val allowSnake = !enteredS.contains('_')
{
candidate: Name =>
def candidateChunks = camelComponents(candidate.dropLocal.toString, allowSnake)
// Loosely based on IntelliJ's autocompletion: the user can just write everything in
// lowercase, as we'll let `isl` match `GenIndexedSeqLike` or `isLovely`.
def lenientMatch(entered: String, candidate: List[String], matchCount: Int): Boolean = {
candidate match {
case Nil => entered.isEmpty && matchCount > 0
case head :: tail =>
val enteredAlternatives = Set(entered, entered.capitalize)
val n = head.toIterable.lazyZip(entered).count {case (c, e) => c == e || (c.isUpper && c == e.toUpper)}
head.take(n).inits.exists(init =>
enteredAlternatives.exists(entered =>
lenientMatch(entered.stripPrefix(init), tail, matchCount + (if (init.isEmpty) 0 else 1))
)
)
}
}
val containsAllEnteredChars = {
// Trying to rule out some candidates quickly before the more expensive `lenientMatch`
val candidateLowercaseSet = candidate.toString.toLowerCase().toSet
enteredLowercaseSet.diff(candidateLowercaseSet).isEmpty
}
containsAllEnteredChars && lenientMatch(enteredS, candidateChunks, 0)
}
}
}
final def completionsAt(pos: Position): CompletionResult = {
val focus1: Tree = typedTreeAt(pos)
def typeCompletions(tree: Tree, qual: Tree, nameStart: Int, name: Name): CompletionResult = {
val qualPos = qual.pos
val allTypeMembers = typeMembers(qualPos).toList.flatten
val positionDelta: Int = pos.start - nameStart
val subName: Name = name.newName(new String(pos.source.content, nameStart, pos.start - nameStart)).encodedName
CompletionResult.TypeMembers(positionDelta, qual, tree, allTypeMembers, subName)
}
focus1 match {
case imp@Import(i @ Ident(name), head :: Nil) if head.name == nme.ERROR =>
val allMembers = scopeMembers(pos)
val nameStart = i.pos.start
val positionDelta: Int = pos.start - nameStart
val subName = name.subName(0, pos.start - i.pos.start)
CompletionResult.ScopeMembers(positionDelta, allMembers, subName)
case imp@Import(qual, selectors) =>
selectors.reverseIterator.find(_.namePos <= pos.start) match {
case None => CompletionResult.NoResults
case Some(selector) =>
typeCompletions(imp, qual, selector.namePos, selector.name)
}
case sel@Select(qual, name) =>
val qualPos = qual.pos
def fallback = qualPos.end + 2
val source = pos.source
val nameStart: Int = (focus1.pos.end - 1 to qualPos.end by -1).find(p =>
source.identifier(source.position(p)).exists(_.length == 0)
).map(_ + 1).getOrElse(fallback)
typeCompletions(sel, qual, nameStart, name)
case Ident(name) =>
val allMembers = scopeMembers(pos)
val positionDelta: Int = pos.start - focus1.pos.start
val subName = name.subName(0, positionDelta)
CompletionResult.ScopeMembers(positionDelta, allMembers, subName)
case _ =>
CompletionResult.NoResults
}
}
/** Implements CompilerControl.askLoadedTyped */
private[interactive] def waitLoadedTyped(source: SourceFile, response: Response[Tree], keepLoaded: Boolean = false, onSameThread: Boolean = true): Unit = {
getUnit(source) match {
case Some(unit) =>
if (unit.isUpToDate) {
debugLog("already typed")
response set unit.body
} else if (ignoredFiles(source.file)) {
response.raise(lastException.getOrElse(CancelException))
} else if (onSameThread) {
getTypedTree(source, forceReload = false, response)
} else {
debugLog("wait for later")
outOfDate = true
waitLoadedTypeResponses(source) += response
}
case None =>
debugLog("load unit and type")
try reloadSources(List(source))
finally {
waitLoadedTyped(source, response, onSameThread)
if (!keepLoaded) removeUnitOf(source)
}
}
}
/** Implements CompilerControl.askParsedEntered */
private[interactive] def getParsedEntered(source: SourceFile, keepLoaded: Boolean, response: Response[Tree], onSameThread: Boolean = true): Unit = {
getUnit(source) match {
case Some(unit) =>
getParsedEnteredNow(source, response)
case None =>
try {
if (keepLoaded || outOfDate && onSameThread)
reloadSources(List(source))
} finally {
if (keepLoaded || !outOfDate || onSameThread)
getParsedEnteredNow(source, response)
else
getParsedEnteredResponses(source) += response
}
}
}
/** Parses and enters given source file, storing parse tree in response */
private def getParsedEnteredNow(source: SourceFile, response: Response[Tree]): Unit = {
respond(response) {
onUnitOf(source) { unit =>
parseAndEnter(unit)
unit.body
}
}
}
// ---------------- Helper classes ---------------------------
/** The typer run */
class TyperRun extends Run {
// units is always empty
/** canRedefine is used to detect double declarations of classes and objects
* in multiple source files.
* Since the IDE rechecks units several times in the same run, these tests
* are disabled by always returning true here.
*/
override def canRedefine(sym: Symbol) = true
def typeCheck(unit: CompilationUnit): Unit = {
applyPhase(typerPhase, unit)
}
/** Apply a phase to a compilation unit
* @return true iff typechecked correctly
*/
private def applyPhase(phase: Phase, unit: CompilationUnit): Unit = {
enteringPhase(phase) { phase.asInstanceOf[GlobalPhase] applyPhase unit }
}
}
def newTyperRun(): Unit = {
currentTyperRun = new TyperRun
}
class TyperResult(val tree: Tree) extends ControlThrowable
assert(globalPhase.id == 0)
implicit def addOnTypeError[T](x: => T): OnTypeError[T] = new OnTypeError(x)
// OnTypeError should still catch TypeError because of cyclic references,
// but DivergentImplicit shouldn't leak anymore here
class OnTypeError[T](op: => T) {
def onTypeError(alt: => T) = try {
op
} catch {
case ex: TypeError =>
debugLog("type error caught: "+ex)
alt
}
}
// We need to force a number of symbols that might be touched by a parser.
// Otherwise thread safety property of parseTree method would be violated.
protected def forceSymbolsUsedByParser(): Unit = {
val symbols =
Set(UnitClass, BooleanClass, ByteClass,
ShortClass, IntClass, LongClass, FloatClass,
DoubleClass, NilModule, ListClass) ++ TupleClass.seq
symbols.foreach(_.initialize)
}
forceSymbolsUsedByParser()
/** Start the compiler background thread and turn on thread confinement checks */
private def finishInitialization(): Unit = {
// this flag turns on `assertCorrectThread checks`
initializing = false
// Only start the thread if initialization was successful. A crash while forcing symbols (for example
// if the Scala library is not on the classpath) can leave running threads behind. See Scala IDE #1002016
compileRunner.start()
}
/** The compiler has been initialized. Constructors are evaluated in textual order,
* if we reached here, all super constructors and the primary constructor
* have been executed.
*/
finishInitialization()
}
object CancelException extends Exception
| martijnhoekstra/scala | src/interactive/scala/tools/nsc/interactive/Global.scala | Scala | apache-2.0 | 52,685 |
package org.nikosoft.oanda
import org.nikosoft.oanda.api.Api
import org.nikosoft.oanda.api.ApiModel.AccountModel.AccountID
import org.nikosoft.oanda.api.ApiModel.InstrumentModel.{Candlestick, CandlestickGranularity}
import org.nikosoft.oanda.api.ApiModel.PrimitivesModel.{DateTime, InstrumentName}
import org.nikosoft.oanda.api.ApiModel.TransactionModel.{TransactionFilter, TransactionID}
import org.nikosoft.oanda.api.`def`.InstrumentApi.CandlesResponse
import org.nikosoft.oanda.api.`def`.PositionApi.ClosePositionRequest
import org.nikosoft.oanda.api.impl.{AccountsApiImpl, InstrumentApiImpl, TransactionApiImpl}
import scala.concurrent.duration.DurationInt
import scalaz.{-\\/, \\/-}
/**
* Sandbox for experiments
*/
object Sandbox extends App {
/*
AccountsApiImpl.accounts match {
case \\/-(result) =>
result.accounts.foreach(println)
result.accounts.map(account => AccountsApiImpl.accountDetails(account.id)).foreach(println)
result.accounts.map(account => AccountsApiImpl.accountSummary(account.id)).foreach(println)
result.accounts.map(account => AccountsApiImpl.accountInstruments(account.id, Seq(InstrumentName("EUR_USD"), InstrumentName("EUR_CHF")))).foreach(println)
case -\\/(error) => println(error)
}
*/
/*
InstrumentApiImpl.candles(InstrumentName("EUR_USD"), granularity = CandlestickGranularity.H1, count = Option(10)) match {
case \\/-(response) => response.candles.foreach(println)
case -\\/(err) =>
}
*/
val accountId = AccountID(System.getProperty("accountID"))
// OrderApiImpl.order(AccountID(accountId), OrderRequestWrapper(LimitOrderRequest(instrument = InstrumentName("EUR_USD"), units = 1000, price = PriceValue("0.1"))))
// val \\/-(orders) = OrderApiImpl.orders(AccountID(accountId), state = OrderState.FILLED)
// orders.orders.foreach(println)
// println(OrderApiImpl.cancelOrder(AccountID(accountId), OrderSpecifier("237")))
// val \\/-(transactions) = TransactionApiImpl.transactionsIdRange(AccountID(accountId), from = TransactionID("1"), to = TransactionID("200"), `type` = Seq(TransactionFilter.ADMIN, TransactionFilter.LIMIT_ORDER))
// transactions.transactions.foreach(println)
import scala.concurrent.ExecutionContext.Implicits.global
/*
var x = 0
val queue = TransactionApiImpl.transactionsStream(AccountID(accountId), terminate = {x = x + 1; x >= 3})
Iterator.continually(queue.take()).foreach {
case \\/-(-\\/(heartbeat)) => println(heartbeat)
case \\/-(\\/-(transaction)) => println(transaction)
case -\\/(error) => println(error)
}
*/
/*
val \\/-(candles: CandlesResponse) = Api.instrumentsApi.candles(InstrumentName("EUR_USD"), granularity = CandlestickGranularity.M5, count = Option(100))
candles.candles.flatMap(_.mid).map(candle => candle.h.pips - candle.l.pips).foreach(println)
*/
/*
Api.instrumentsApi
.candles(
instrument = InstrumentName("EUR_USD"),
granularity = CandlestickGranularity.M1,
// count = Some(500)
from = Some(DateTime("2017-07-01T00:00:00Z")),
to = Some(DateTime("2017-07-05T00:00:00Z"))
).fold(println, response => {
response.candles.take(10).foreach(println)
})
*/
Api.positionApi.closePosition(AccountID(GlobalProperties.TradingAccountId), InstrumentName("EUR_USD"), ClosePositionRequest(longUnits = Some("ALL")))
}
| cnnickolay/forex-trader | oanda-scala-api/src/main/scala/org/nikosoft/oanda/Sandbox.scala | Scala | mit | 3,321 |
/*
* Copyright 2001-2008 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.tools
import org.scalatest._
import org.scalatest.events._
/*
I can't get this to work and have no more time. Will make nestedSuiteNames package access.
object DiscoverySuiteCompanionFriend {
val dsc = DiscoverySuite.getClass.getField("MODULE$").get(null)
def nestedSuiteNames(path: String, accessibleSuites: List[String], wildcard: Boolean, runpathClassLoader: ClassLoader): List[String] = {
val m = Class.forName("org.scalatest.DiscoverySuite").getDeclaredMethod("org$scalatest$DiscoverySuite$$nestedSuiteNames", Array(classOf[String], classOf[List[String]], classOf[Boolean]))
m.setAccessible(true)
m.invoke(dsc, Array[Object](path, accessibleSuites, new java.lang.Boolean(wildcard))).asInstanceOf[List[String]]
}
}
*/
private[scalatest] class DiscoverySuiteSuite extends Suite {
val loader = DiscoverySuite.getClass.getClassLoader
def testConstructor() {
intercept[NullPointerException] {
new DiscoverySuite(null, Set(), false, loader)
}
intercept[NullPointerException] {
new DiscoverySuite("hi", null, false, loader)
}
intercept[NullPointerException] {
new DiscoverySuite(null, Set(), false, null)
}
expect(Nil) {
DiscoverySuite.nestedSuiteNames("a.b.c", Set(), false)
}
expect(List("a.b.c.Hi")) {
DiscoverySuite.nestedSuiteNames("a.b.c", Set("a.b.c.Hi"), true)
}
expect(List("a.b.c.d.Hi")) {
DiscoverySuite.nestedSuiteNames("a.b.c", Set("a.b.c.d.Hi"), true)
}
expect(List("a.b.c.Hi")) {
DiscoverySuite.nestedSuiteNames("a.b.c", Set("a.b.c.Hi"), false)
}
expect(Nil) {
DiscoverySuite.nestedSuiteNames("a.b.c", Set("a.b.c"), false)
}
expect(Nil) {
DiscoverySuite.nestedSuiteNames("a.b.c", Set("a.b.c.d.Hi"), false)
}
}
}
| yyuu/scalatest | src/test/scala/org/scalatest/tools/DiscoverySuiteSuite.scala | Scala | apache-2.0 | 2,407 |
package spatial.models
case class AreaConfig[T](fields: Array[String], default: T)
case class AreaMap[T](name: String, params: Seq[String], entries: Map[String,T])(implicit val config: AreaConfig[T]) {
def fullName: String = name + "_" + params.mkString("_")
private val fields: Array[String] = config.fields
private val default = config.default
def keys: Array[String] = fields
def nonZeroFields: Array[String] = fields.filter{f => entries.contains(f) && entries(f) != default }
// HACK - get parameter which gives number
def n: Option[Int] = {
val i = params.lastIndexWhere(_ != "")
if (i >= 0) {
val x = params(i)
if (x.nonEmpty && x.forall(_.isDigit)) Some(x.toInt) else None
}
else None
}
def renameEntries(remapping: String => String): AreaMap[T] = {
val entries2 = entries.map{case (k,v) => remapping(k) -> v }
new AreaMap(name, params, entries2)
}
def toSeq: Seq[T] = fields.map{f =>
if (entries.contains(f)) {
entries(f)
}
else {
default
}
}
def apply(field: String): T = entries.getOrElse(field, default)
def seq(keys: String*): Seq[T] = keys.map{k => this(k)}
def map[R](func: T => R)(implicit config: AreaConfig[R]): AreaMap[R] = AreaMap(name, params, entries.map{case (k,v) => k -> func(v)})
def zip[S,R](that: AreaMap[S])(func: (T,S) => R)(implicit config: AreaConfig[R]): AreaMap[R] = {
AreaMap(name, params, fields.map{k => k -> func(this(k), that(k)) }.toMap)
}
def zipExists(that: AreaMap[T])(func: (T,T) => Boolean): Boolean = fields.exists{k => func(this(k), that(k)) }
def zipForall(that: AreaMap[T])(func: (T,T) => Boolean): Boolean = fields.forall{k => func(this(k), that(k)) }
def +(that: AreaMap[T])(implicit num: AffArith[T]): AreaMap[T] = this.zip(that){(a,b) => num.plus(a,b) }
def -(that: AreaMap[T])(implicit num: AffArith[T]): AreaMap[T] = this.zip(that){(a,b) => num.minus(a,b) }
def /(that: AreaMap[Double])(implicit num: AffArith[T]): AreaMap[T] = this.zip(that){(a,b) => num.div(a,b) }
def *(b: Double)(implicit num: AffArith[T]): AreaMap[T] = this.map{x => num.times(x,b) }
def /(b: Double)(implicit num: AffArith[T]): AreaMap[T] = this.map{x => num.div(x,b) }
def isNonZero(implicit num: Numeric[T], ord: Ordering[T]): Boolean = this.toSeq.exists{x => ord.gt(x, num.fromInt(0)) }
def <(that: AreaMap[T])(implicit ord: Ordering[T]): Boolean = this.zipForall(that){(a,b) => ord.lt(a,b) } // a0 < b0 && ... && aN < bN
def <=(that: AreaMap[T])(implicit ord: Ordering[T]): Boolean = this.zipForall(that){(a,b) => ord.lteq(a,b) } // a0 <= b0 && ... && aN <= bN
// These may seem a bit odd, but required to have the property !(a < b) = a >= b
def >(that: AreaMap[T])(implicit ord: Ordering[T]): Boolean = this.zipExists(that){(a,b) => ord.gt(a,b) } // a0 > b0 || ... || aN > b0
def >=(that: AreaMap[T])(implicit ord: Ordering[T]): Boolean = this.zipExists(that){(a,b) => ord.gteq(a,b) } // a0 >= b0 || ... || aN >= b0
// Alternative comparisons, where < is true if any is less than, > is true iff all are greater
def <<(that: AreaMap[T])(implicit ord: Ordering[T]): Boolean = this.zipExists(that){(a,b) => ord.lt(a,b) }
def <<=(that: AreaMap[T])(implicit ord: Ordering[T]): Boolean = this.zipExists(that){(a,b) => ord.lteq(a,b) }
def >>(that: AreaMap[T])(implicit ord: Ordering[T]): Boolean = this.zipForall(that){(a,b) => ord.gt(a,b) }
def >>=(that: AreaMap[T])(implicit ord: Ordering[T]): Boolean = this.zipForall(that){(a,b) => ord.gteq(a,b) }
override def toString: String = {
"Area" + fields.map{f => f -> this(f) }
.filterNot(_._2 == default)
.map{case (f,v) => s"$f=$v"}
.mkString("(", ", ", ")")
}
def toPrintableString(nParams: Int): String = {
val padParams = Array.fill(nParams - params.length)("")
val seq = this.toSeq
(Array(name) ++ params ++ padParams ++ seq).mkString(",")
}
}
object AreaMap {
def zero[T](implicit config: AreaConfig[T]): AreaMap[T] = new AreaMap[T]("", Nil, Map.empty)
def apply[T](entries: (String,T)*)(implicit config: AreaConfig[T]): AreaMap[T] = new AreaMap("", Nil, entries.toMap)
def fromArray[T](name: String, params: Seq[String], entries: Array[T])(implicit config: AreaConfig[T]): AreaMap[T] = {
new AreaMap(name, params, config.fields.zip(entries).toMap)
}
/*def fromLine[T](line: String, nParams: Int, indices: Seq[Int])(func: String => T)(implicit config: AreaConfig[T]): (String, AreaMap[T]) = {
val parts = line.split(",").map(_.trim)
val name = parts.head
val params = parts.slice(1,nParams+1)
val entries = indices.map{i => func(parts(i)) }
name -> AreaMap(params, config.fields.zip(entries).toMap)
}*/
}
| stanford-ppl/spatial-lang | spatial/core/src/spatial/models/AreaMap.scala | Scala | mit | 4,758 |
/***********************************************************************
* Copyright (c) 2013-2017 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.utils.stats
import org.junit.runner.RunWith
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class StatTest extends Specification with StatTestHelper {
"Stat parser" should {
"fail for malformed strings" in {
Stat(sft, "") must throwAn[Exception]
Stat(sft, "abcd") must throwAn[Exception]
Stat(sft, "RangeHistogram()") must throwAn[Exception]
Stat(sft, "RangeHistogram(foo,10,2012-01-01T00:00:00.000Z,2012-02-01T00:00:00.000Z)") must throwAn[Exception]
Stat(sft, "MinMax()") must throwAn[Exception]
Stat(sft, "MinMax(abcd)") must throwAn[Exception]
}
"work with complex values" in {
val sft = SimpleFeatureTypes.createType("foo", "foreign_key:String,*wkt:Point:srid=4326")
val min = """geoMesaNameSpace\\u001Fgeokind\\u001FtDZdPCceQg+F\\/a0tV0azJA==0"""
val max = """geoMesaNameSpace\\u001Fgeokind\\u001FtDZdPCceQg+F\\/a0tV0azJA==z"""
val stat = Stat(sft, Stat.Histogram("foreign_key", 1000, min, max))
stat must beAnInstanceOf[Histogram[String]]
val histogram = stat.asInstanceOf[Histogram[String]]
histogram.attribute mustEqual 0
histogram.bins.bounds mustEqual BinnedStringArray.normalizeBounds(min, max)
}
}
}
| ronq/geomesa | geomesa-utils/src/test/scala/org/locationtech/geomesa/utils/stats/StatTest.scala | Scala | apache-2.0 | 1,838 |
package io.finch.oauth2
import com.twitter.finagle.http.Status
import io.finch.Input
import org.scalatest.{FlatSpec, Matchers}
class OAuth2Spec extends FlatSpec with Matchers {
import Main._
behavior of "the token-generating endpoint"
it should "give an access token with the password grant type" in {
val input = Input.post("/users/auth")
.withForm(
"grant_type" -> "password",
"username" -> "user_name",
"password" -> "user_password",
"client_id" -> "user_id")
tokens(input).awaitValueUnsafe().map(_.tokenType) shouldBe Some("Bearer")
}
it should "give an access token with the client credentials grant type" in {
val input = Input.post("/users/auth")
.withForm("grant_type" -> "client_credentials")
.withHeaders("Authorization" -> "Basic dXNlcl9pZDp1c2VyX3NlY3JldA==")
tokens(input).awaitValueUnsafe().map(_.tokenType) shouldBe Some("Bearer")
}
it should "give an access token with the auth code grant type" in {
val input = Input.post("/users/auth")
.withForm(
"grant_type" -> "authorization_code",
"code" -> "user_auth_code",
"client_id" -> "user_id")
tokens(input).awaitValueUnsafe().map(_.tokenType) shouldBe Some("Bearer")
}
it should "give back bad request if we omit the password for the password grant type" in {
val input = Input.post("/users/auth")
.withForm(
"grant_type" -> "password",
"username" -> "user_name",
"client_id" -> "user_id")
tokens(input).awaitOutputUnsafe().map(_.status) shouldBe Some(Status.BadRequest)
}
it should "give back nothing for other verbs" in {
val input = Input.get("/users/auth")
.withForm("grant_type" -> "authorization_code", "code" -> "code", "client_id" -> "id")
tokens(input).awaitValueUnsafe() shouldBe None
}
behavior of "the authorized endpoint"
it should "work if the access token is a valid one" in {
val input = Input.post("/users/auth")
.withForm("grant_type" -> "client_credentials")
.withHeaders("Authorization" -> "Basic dXNlcl9pZDp1c2VyX3NlY3JldA==")
val authdUser = tokens(input).awaitValueUnsafe()
.map(_.accessToken).flatMap(t =>
users(Input.get("/users/current").withForm("access_token" -> t)).awaitValueUnsafe()
)
authdUser shouldBe Some(OAuthUser("user", "John Smith"))
}
it should "be unauthorized when using an invalid access token" in {
val input = Input.get("/users/current")
.withForm("access_token" -> "at-5b0e7e3b-943f-479f-beab-7814814d0315")
users(input).awaitOutputUnsafe().map(_.status) shouldBe Some(Status.Unauthorized)
}
it should "give back nothing for other verbs" in {
val input = Input.post("/users/current")
.withForm("access_token" -> "at-5b0e7e3b-943f-479f-beab-7814814d0315")
users(input).awaitValueUnsafe() shouldBe None
}
behavior of "the unprotected users endpoint"
it should "give back the unprotected user" in {
unprotected(Input.get("/users/unprotected")).awaitValueUnsafe() shouldBe
Some(UnprotectedUser("unprotected"))
}
it should "give back nothing for other verbs" in {
unprotected(Input.post("/users/unprotected")).awaitValueUnsafe() shouldBe None
}
}
| yanana/finch | examples/src/test/scala/io/finch/oauth2/OAuth2Spec.scala | Scala | apache-2.0 | 3,265 |
package io.getquill
import com.github.jasync.sql.db.pool.ConnectionPool
import com.github.jasync.sql.db.postgresql.PostgreSQLConnection
import com.github.jasync.sql.db.{ QueryResult => DBQueryResult }
import com.typesafe.config.Config
import io.getquill.ReturnAction.{ ReturnColumns, ReturnNothing, ReturnRecord }
import io.getquill.context.jasync.{ ArrayDecoders, ArrayEncoders, JAsyncContext, UUIDObjectEncoding }
import io.getquill.util.LoadConfig
import io.getquill.util.Messages.fail
import scala.jdk.CollectionConverters._
class PostgresJAsyncContext[N <: NamingStrategy](naming: N, pool: ConnectionPool[PostgreSQLConnection])
extends JAsyncContext[PostgresDialect, N, PostgreSQLConnection](PostgresDialect, naming, pool)
with ArrayEncoders
with ArrayDecoders
with UUIDObjectEncoding {
def this(naming: N, config: PostgresJAsyncContextConfig) = this(naming, config.pool)
def this(naming: N, config: Config) = this(naming, PostgresJAsyncContextConfig(config))
def this(naming: N, configPrefix: String) = this(naming, LoadConfig(configPrefix))
override protected def extractActionResult[O](returningAction: ReturnAction, returningExtractor: Extractor[O])(result: DBQueryResult): O =
result.getRows.asScala
.headOption
.map(row => returningExtractor(row, ()))
.getOrElse(fail("This is a bug. Cannot extract returning value."))
override protected def expandAction(sql: String, returningAction: ReturnAction): String =
returningAction match {
// The Postgres dialect will create SQL that has a 'RETURNING' clause so we don't have to add one.
case ReturnRecord => s"$sql"
// The Postgres dialect will not actually use these below variants but in case we decide to plug
// in some other dialect into this context...
case ReturnColumns(columns) => s"$sql RETURNING ${columns.mkString(", ")}"
case ReturnNothing => s"$sql"
}
}
| getquill/quill | quill-jasync-postgres/src/main/scala/io/getquill/PostgresJAsyncContext.scala | Scala | apache-2.0 | 1,933 |
package thangiee.riotapi
import org.scalactic.Good
import org.scalamock.scalatest.MockFactory
import org.scalatest._
trait BaseSpec extends FlatSpec with Matchers with MockFactory {
abstract class MockScope {
def json: String
RiotApi.key = "456267a6-1777-4763-a77f-f3b1f06ed99d"
implicit val m: ApiCaller = mock[ApiCaller]
(m.call _).expects(*, *).returning(Good(json))
}
}
| Thangiee/Riot-API-Scala | src/test/scala/thangiee/riotapi/BaseSpec.scala | Scala | mit | 396 |
package assetproviders
import assetproviders.ResultWithHeaders.ResultWithHeaders
import play.api.mvc.Action
import play.api.mvc.Controller
import play.api.mvc.AnyContent
/**
* Gives asset support for .svgz files, which require the following headers to be processed
* properly:
* {{{
* Content-Type: "image/svg+xml"
* Content-Encoding: gzip
* }}}
*
* See [[http://kaioa.com/node/45 this link for more info]].
*/
trait SvgzAssetSupport extends AssetProvider { this: Controller =>
abstract override def at(path: String, file: String): Action[AnyContent] = {
if (!file.endsWith(".svgz")) {
super.at(path, file)
} else {
Action { request =>
val result = super.at(path, file).apply(request).asInstanceOf[ResultWithHeaders]
result.withHeaders(
"Content-Encoding" -> "gzip",
"Content-Type" -> "image/svg+xml"
)
}
}
}
}
| myyk/play-assets-improvements | app/assetproviders/SvgzAssetSupport.scala | Scala | mit | 901 |
package models
import akka.actor._
import javax.inject.{ Inject, Singleton }
import play.api.libs.mailer._
class EmailActor(mailerClient: MailerClient) extends Actor {
def receive = {
case m: BidToppedMessage => sendEmail(m, views.html.email.bidTopped.render(m.itemName, m.itemUrl).body)
case m: BidReceivedMessage => sendEmail(m, views.html.email.bidReceived.render(m.itemName, m.itemUrl).body)
}
@SuppressWarnings(Array("org.wartremover.warts.NonUnitStatements"))
def sendEmail(m: EmailMessage, body: String): Unit = {
val email = Email(
m.subject,
"Lojinha JCranky <noreply@jcranky.com>",
Seq(m.to),
bodyText = Some(body)
)
mailerClient.send(email)
}
}
@Singleton
class EMailActorHelper @Inject() (system: ActorSystem) {
val actor: ActorRef = system.actorOf(
Props(
new EmailActor(new SMTPMailer(new SMTPConfiguration("localhost", 25)))
)
)
}
sealed trait EmailMessage {
val itemName: String
val itemUrl: String
val to: String
val subject: String
}
final case class BidToppedMessage(itemName: String, itemUrl: String, to: String) extends EmailMessage {
val subject = "better bid received"
}
final case class BidReceivedMessage(itemName: String, itemUrl: String, to: String) extends EmailMessage {
val subject = "your bid has been received"
}
| jcranky/lojinha | app/models/EmailActor.scala | Scala | gpl-3.0 | 1,341 |
package wow.common.database
import scalikejdbc._
/**
* Database management
*/
object Database {
def configure(): Unit = {
GlobalSettings.loggingSQLAndTime = GlobalSettings.loggingSQLAndTime.copy(
printUnprocessedStackTrace = false,
stackTraceDepth = 0,
singleLineMode = true)
}
}
/**
* Databases connection tokens
*/
object Databases extends Enumeration {
/**
* Auth database connection token
*/
val AuthServer = Value
/**
* Creates a token for a realm server
* @param id realm server id
*/
def registerRealm(id: Int): Unit = {
require(id > 0)
Value(id)
}
/**
* Get database connection token for realm server
* @param id realm server id
* @return database connection token
*/
def RealmServer(id: Int): Databases.Value = {
// This assert exists because of pain and suffering
require(id > 0)
this (id)
}
}
| SKNZ/SpinaciCore | wow/core/src/main/scala/wow/common/database/Database.scala | Scala | mit | 915 |
/*
* Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package play.sbt
import com.typesafe.config.ConfigFactory
import org.specs2.mutable._
class ApplicationSecretGeneratorSpec extends Specification {
"ApplicationSecretGenerator" should {
"override literal secret" in {
val configContent =
"""
|# test configuration
|play.http.secret.key=changeme
|""".stripMargin
val config = ConfigFactory.parseString(configContent)
val lines = configContent.split("\\n").toList
val newLines: List[String] = ApplicationSecretGenerator.getUpdatedSecretLines("newSecret", lines, config)
val newConfig = ConfigFactory.parseString(newLines.mkString("\\n"))
newConfig.getString("play.http.secret.key").should_===("newSecret")
}
"override nested secret" in {
val configContent =
"""
|# test configuration
|play {
| http {
| secret {
| key=changeme
| }
| }
|}
|""".stripMargin
val config = ConfigFactory.parseString(configContent)
val lines = configContent.split("\\n").toList
val newLines: List[String] = ApplicationSecretGenerator.getUpdatedSecretLines("newSecret", lines, config)
val newConfig = ConfigFactory.parseString(newLines.mkString("\\n"))
newConfig.getString("play.http.secret.key").should_===("newSecret")
}
"deletes existing nested play.crypto.secret while overwriting secret" in {
val configContent =
"""
|# test configuration
|play {
| http {
| secret {
| key=changeme
| }
| }
|}
|play {
| crypto {
| secret=deleteme
| }
|}
|""".stripMargin
val config = ConfigFactory.parseString(configContent)
val lines = configContent.split("\\n").toList
val newLines: List[String] = ApplicationSecretGenerator.getUpdatedSecretLines("newSecret", lines, config)
val newConfig = ConfigFactory.parseString(newLines.mkString("\\n"))
newConfig.getString("play.http.secret.key") must_== "newSecret"
newConfig.hasPath("play.crypto.secret") must beFalse
}
"deletes existing fixed play.crypto.secret while overwriting secret" in {
val configContent =
"""
|# test configuration
|play {
| http {
| secret {
| key=changeme
| }
| }
|}
|play.crypto.secret=deleteme
|
|""".stripMargin
val config = ConfigFactory.parseString(configContent)
val lines = configContent.split("\\n").toList
val newLines: List[String] = ApplicationSecretGenerator.getUpdatedSecretLines("newSecret", lines, config)
val newConfig = ConfigFactory.parseString(newLines.mkString("\\n"))
newConfig.getString("play.http.secret.key") must_== "newSecret"
newConfig.hasPath("play.crypto.secret") must beFalse
}
}
}
| wsargent/playframework | framework/src/sbt-plugin/src/test/scala/play/sbt/ApplicationSecretGeneratorSpec.scala | Scala | apache-2.0 | 3,109 |
package org.tagsharp.test
import org.jsoup.nodes.Document
import org.jsoup.nodes.Element
import org.jsoup.Jsoup
import org.tagsharp.jsoup.PackedText
/**
* Wraps a Jsoup document, a convenience access to concatenated body
* text and later on a testable page section.
*/
class HtmlPage(val html: String) {
lazy val document: Document = Jsoup.parse(html)
/**
* The default section is the whole HTML <body>
*/
lazy val section: Element = document.body()
/**
* A concatenated view of the <body> text for regex
*/
lazy val bodyText = new PackedText(document.body())
}
| reggoodwin/tagsharp | src/main/scala/org/tagsharp/test/HtmlPage.scala | Scala | mit | 605 |
/*
* Copyright 2015-2020 Noel Welsh
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package doodle
package syntax
import doodle.algebra.{Picture, Text}
import doodle.core.font.Font
trait TextSyntax {
implicit class TextPictureOps[Alg[x[_]] <: Text[x], F[_], A](
picture: Picture[Alg, F, A]) {
def font(font: Font): Picture[Alg, F, A] =
Picture{ implicit algebra: Alg[F] =>
algebra.font(picture(algebra), font)
}
}
def text[Alg[x[_]] <: Text[x], F[_]](text: String): Picture[Alg, F, Unit] =
Picture{ implicit algebra: Alg[F] =>
algebra.text(text)
}
}
| underscoreio/doodle | core/shared/src/main/scala/doodle/syntax/TextSyntax.scala | Scala | apache-2.0 | 1,119 |
package main.abstraction
import rescala._
abstract class SignalWrapper {
type InternalType
protected val internalValue: Var[Signal[InternalType]]
def toValue: InternalType = internalValue.now.now
protected def wrap[WrappedType, WrapperType](implicit wrapping: SignalWrappable[WrappedType, WrapperType]):
Signal[WrappedType] => WrapperType = (unwrapped: Signal[WrappedType]) => wrapping.wrap(unwrapped)
protected def liftPure0[ResultT](f: InternalType => ResultT)(): Signal[ResultT] =
Signal(f(internalValue()()))
protected def liftMutating0(f: InternalType => InternalType)(): Unit = {
val signal = internalValue.now
internalValue() = Signal(f(signal()))
}
protected def liftPure1[Param1T, ResultT](f: (InternalType, Param1T) => ResultT)(p1: Signal[Param1T]): Signal[ResultT] =
Signal(f(internalValue()(), p1()))
protected def liftMutating1[Param1T](f: (InternalType, Param1T) => InternalType)(p1: Signal[Param1T]): Unit = {
val signal = internalValue.now
internalValue() = Signal(f(signal(), p1()))
}
protected def liftPure2[Param1T, Param2T, ResultT](f: (InternalType, Param1T, Param2T) => ResultT)(p1: Signal[Param1T], p2: Signal[Param2T]): Signal[ResultT] =
Signal(f(internalValue()(), p1(), p2()))
protected def liftMutating2[Param1T, Param2T](f: (InternalType, Param1T, Param2T) => InternalType)(p1: Signal[Param1T], p2: Signal[Param2T]): Unit = {
val signal = internalValue.now
internalValue() = Signal(f(signal(), p1(), p2()))
}
protected def liftPure3[Param1T, Param2T, Param3T, ResultT](f: (InternalType, Param1T, Param2T, Param3T) => ResultT)(p1: Signal[Param1T], p2: Signal[Param2T], p3: Signal[Param3T]): Signal[ResultT] =
Signal(f(internalValue()(), p1(), p2(), p3()))
protected def liftMutating3[Param1T, Param2T, Param3T](f: (InternalType, Param1T, Param2T, Param3T) => InternalType)(p1: Signal[Param1T], p2: Signal[Param2T], p3: Signal[Param3T]): Unit = {
val signal = internalValue.now
internalValue() = Signal(f(signal(), p1(), p2(), p3()))
}
protected def liftPure4[Param1T, Param2T, Param3T, Param4T, ResultT](f: (InternalType, Param1T, Param2T, Param3T, Param4T) => ResultT)(p1: Signal[Param1T], p2: Signal[Param2T], p3: Signal[Param3T], p4: Signal[Param4T]): Signal[ResultT] =
Signal(f(internalValue()(), p1(), p2(), p3(), p4()))
protected def liftMutating4[Param1T, Param2T, Param3T, Param4T](f: (InternalType, Param1T, Param2T, Param3T, Param4T) => InternalType)(p1: Signal[Param1T], p2: Signal[Param2T], p3: Signal[Param3T], p4: Signal[Param4T]): Unit = {
val signal = internalValue.now
internalValue() = Signal(f(signal(), p1(), p2(), p3(), p4()))
}
}
| volkc/REScala | Extensions/Datastructures/src/main/scala/main/abstraction/SignalWrapper.scala | Scala | apache-2.0 | 2,727 |
/**
* Copyright (C) 2009-2014 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.testkit
import language.postfixOps
import scala.annotation.{ varargs, tailrec }
import scala.collection.immutable
import scala.concurrent.duration._
import scala.reflect.ClassTag
import java.util.concurrent._
import java.util.concurrent.atomic.AtomicInteger
import akka.actor._
import akka.actor.Actor._
import akka.util.{ Timeout, BoxedType }
import scala.util.control.NonFatal
import scala.Some
import java.util.concurrent.TimeUnit
import akka.actor.IllegalActorStateException
import akka.actor.DeadLetter
import akka.actor.Terminated
import akka.event.LoggingReceive
object TestActor {
type Ignore = Option[PartialFunction[Any, Boolean]]
abstract class AutoPilot {
def run(sender: ActorRef, msg: Any): AutoPilot
def noAutoPilot: AutoPilot = NoAutoPilot
def keepRunning: AutoPilot = KeepRunning
}
case object NoAutoPilot extends AutoPilot {
def run(sender: ActorRef, msg: Any): AutoPilot = this
}
case object KeepRunning extends AutoPilot {
def run(sender: ActorRef, msg: Any): AutoPilot = sys.error("must not call")
}
case class SetIgnore(i: Ignore) extends NoSerializationVerificationNeeded
case class Watch(ref: ActorRef) extends NoSerializationVerificationNeeded
case class UnWatch(ref: ActorRef) extends NoSerializationVerificationNeeded
case class SetAutoPilot(ap: AutoPilot) extends NoSerializationVerificationNeeded
trait Message {
def msg: AnyRef
def sender: ActorRef
}
case class RealMessage(msg: AnyRef, sender: ActorRef) extends Message
case object NullMessage extends Message {
override def msg: AnyRef = throw new IllegalActorStateException("last receive did not dequeue a message")
override def sender: ActorRef = throw new IllegalActorStateException("last receive did not dequeue a message")
}
val FALSE = (x: Any) ⇒ false
// make creator serializable, for VerifySerializabilitySpec
def props(queue: BlockingDeque[Message]): Props = Props(classOf[TestActor], queue)
}
class TestActor(queue: BlockingDeque[TestActor.Message]) extends Actor {
import TestActor._
var ignore: Ignore = None
var autopilot: AutoPilot = NoAutoPilot
def receive = {
case SetIgnore(ign) ⇒ ignore = ign
case Watch(ref) ⇒ context.watch(ref)
case UnWatch(ref) ⇒ context.unwatch(ref)
case SetAutoPilot(pilot) ⇒ autopilot = pilot
case x: AnyRef ⇒
autopilot = autopilot.run(sender(), x) match {
case KeepRunning ⇒ autopilot
case other ⇒ other
}
val observe = ignore map (ignoreFunc ⇒ !ignoreFunc.applyOrElse(x, FALSE)) getOrElse true
if (observe) queue.offerLast(RealMessage(x, sender()))
}
override def postStop() = {
import scala.collection.JavaConverters._
queue.asScala foreach { m ⇒ context.system.deadLetters.tell(DeadLetter(m.msg, m.sender, self), m.sender) }
}
}
/**
* Implementation trait behind the [[akka.testkit.TestKit]] class: you may use
* this if inheriting from a concrete class is not possible.
*
* <b>Use of the trait is discouraged because of potential issues with binary
* backwards compatibility in the future, use at own risk.</b>
*
* This trait requires the concrete class mixing it in to provide an
* [[akka.actor.ActorSystem]] which is available before this traits’s
* constructor is run. The recommended way is this:
*
* {{{
* class MyTest extends TestKitBase {
* implicit lazy val system = ActorSystem() // may add arguments here
* ...
* }
* }}}
*/
trait TestKitBase {
import TestActor.{ Message, RealMessage, NullMessage }
implicit val system: ActorSystem
val testKitSettings = TestKitExtension(system)
private val queue = new LinkedBlockingDeque[Message]()
private[akka] var lastMessage: Message = NullMessage
def lastSender = lastMessage.sender
/**
* ActorRef of the test actor. Access is provided to enable e.g.
* registration as message target.
*/
val testActor: ActorRef = {
val impl = system.asInstanceOf[ExtendedActorSystem]
val ref = impl.systemActorOf(TestActor.props(queue)
.withDispatcher(CallingThreadDispatcher.Id),
"testActor" + TestKit.testActorId.incrementAndGet)
awaitCond(ref match {
case r: RepointableRef ⇒ r.isStarted
case _ ⇒ true
}, 1 second, 10 millis)
ref
}
private var end: Duration = Duration.Undefined
/**
* if last assertion was expectNoMsg, disable timing failure upon within()
* block end.
*/
private var lastWasNoMsg = false
/**
* Ignore all messages in the test actor for which the given partial
* function returns true.
*/
def ignoreMsg(f: PartialFunction[Any, Boolean]) { testActor ! TestActor.SetIgnore(Some(f)) }
/**
* Stop ignoring messages in the test actor.
*/
def ignoreNoMsg() { testActor ! TestActor.SetIgnore(None) }
/**
* Have the testActor watch someone (i.e. `context.watch(...)`).
*/
def watch(ref: ActorRef): ActorRef = {
testActor ! TestActor.Watch(ref)
ref
}
/**
* Have the testActor stop watching someone (i.e. `context.unwatch(...)`).
*/
def unwatch(ref: ActorRef): ActorRef = {
testActor ! TestActor.UnWatch(ref)
ref
}
/**
* Install an AutoPilot to drive the testActor: the AutoPilot will be run
* for each received message and can be used to send or forward messages,
* etc. Each invocation must return the AutoPilot for the next round.
*/
def setAutoPilot(pilot: TestActor.AutoPilot): Unit = testActor ! TestActor.SetAutoPilot(pilot)
/**
* Obtain current time (`System.nanoTime`) as Duration.
*/
def now: FiniteDuration = System.nanoTime.nanos
/**
* Obtain time remaining for execution of the innermost enclosing `within`
* block or missing that it returns the properly dilated default for this
* case from settings (key "akka.test.single-expect-default").
*/
def remaining: FiniteDuration = remainingOr(testKitSettings.SingleExpectDefaultTimeout.dilated)
/**
* Obtain time remaining for execution of the innermost enclosing `within`
* block or missing that it returns the given duration.
*/
def remainingOr(duration: FiniteDuration): FiniteDuration = end match {
case x if x eq Duration.Undefined ⇒ duration
case x if !x.isFinite ⇒ throw new IllegalArgumentException("`end` cannot be infinite")
case f: FiniteDuration ⇒ f - now
}
private def remainingOrDilated(max: Duration): FiniteDuration = max match {
case x if x eq Duration.Undefined ⇒ remaining
case x if !x.isFinite ⇒ throw new IllegalArgumentException("max duration cannot be infinite")
case f: FiniteDuration ⇒ f.dilated
}
/**
* Query queue status.
*/
def msgAvailable = !queue.isEmpty
/**
* Await until the given condition evaluates to `true` or the timeout
* expires, whichever comes first.
*
* If no timeout is given, take it from the innermost enclosing `within`
* block.
*
* Note that the timeout is scaled using Duration.dilated,
* which uses the configuration entry "akka.test.timefactor".
*/
def awaitCond(p: ⇒ Boolean, max: Duration = Duration.Undefined, interval: Duration = 100.millis, message: String = "") {
val _max = remainingOrDilated(max)
val stop = now + _max
@tailrec
def poll(t: Duration) {
if (!p) {
assert(now < stop, "timeout " + _max + " expired: " + message)
Thread.sleep(t.toMillis)
poll((stop - now) min interval)
}
}
poll(_max min interval)
}
/**
* Await until the given assert does not throw an exception or the timeout
* expires, whichever comes first. If the timeout expires the last exception
* is thrown.
*
* If no timeout is given, take it from the innermost enclosing `within`
* block.
*
* Note that the timeout is scaled using Duration.dilated,
* which uses the configuration entry "akka.test.timefactor".
*/
def awaitAssert(a: ⇒ Any, max: Duration = Duration.Undefined, interval: Duration = 800.millis) {
val _max = remainingOrDilated(max)
val stop = now + _max
@tailrec
def poll(t: Duration) {
val failed =
try { a; false } catch {
case NonFatal(e) ⇒
if ((now + t) >= stop) throw e
true
}
if (failed) {
Thread.sleep(t.toMillis)
poll((stop - now) min interval)
}
}
poll(_max min interval)
}
/**
* Execute code block while bounding its execution time between `min` and
* `max`. `within` blocks may be nested. All methods in this trait which
* take maximum wait times are available in a version which implicitly uses
* the remaining time governed by the innermost enclosing `within` block.
*
* Note that the timeout is scaled using Duration.dilated, which uses the
* configuration entry "akka.test.timefactor", while the min Duration is not.
*
* <pre>
* val ret = within(50 millis) {
* test ! "ping"
* expectMsgClass(classOf[String])
* }
* </pre>
*/
def within[T](min: FiniteDuration, max: FiniteDuration)(f: ⇒ T): T = {
val _max = max.dilated
val start = now
val rem = if (end == Duration.Undefined) Duration.Inf else end - start
assert(rem >= min, "required min time " + min + " not possible, only " + format(min.unit, rem) + " left")
lastWasNoMsg = false
val max_diff = _max min rem
val prev_end = end
end = start + max_diff
val ret = try f finally end = prev_end
val diff = now - start
assert(min <= diff, "block took " + format(min.unit, diff) + ", should at least have been " + min)
if (!lastWasNoMsg) {
assert(diff <= max_diff, "block took " + format(_max.unit, diff) + ", exceeding " + format(_max.unit, max_diff))
}
ret
}
/**
* Same as calling `within(0 seconds, max)(f)`.
*/
def within[T](max: FiniteDuration)(f: ⇒ T): T = within(0 seconds, max)(f)
/**
* Same as `expectMsg(remaining, obj)`, but correctly treating the timeFactor.
*/
def expectMsg[T](obj: T): T = expectMsg_internal(remaining, obj)
/**
* Receive one message from the test actor and assert that it equals the
* given object. Wait time is bounded by the given duration, with an
* AssertionFailure being thrown in case of timeout.
*
* @return the received object
*/
def expectMsg[T](max: FiniteDuration, obj: T): T = expectMsg_internal(max.dilated, obj)
/**
* Receive one message from the test actor and assert that it equals the
* given object. Wait time is bounded by the given duration, with an
* AssertionFailure being thrown in case of timeout.
*
* @return the received object
*/
def expectMsg[T](max: FiniteDuration, hint: String, obj: T): T = expectMsg_internal(max.dilated, obj, Some(hint))
private def expectMsg_internal[T](max: Duration, obj: T, hint: Option[String] = None): T = {
val o = receiveOne(max)
lazy val hintOrEmptyString = hint.map(": " + _).getOrElse("")
assert(o ne null, s"timeout ($max) during expectMsg while waiting for $obj" + hintOrEmptyString)
assert(obj == o, s"expected $obj, found $o" + hintOrEmptyString)
o.asInstanceOf[T]
}
/**
* Receive one message from the test actor and assert that the given
* partial function accepts it. Wait time is bounded by the given duration,
* with an AssertionFailure being thrown in case of timeout.
*
* Use this variant to implement more complicated or conditional
* processing.
*
* @return the received object as transformed by the partial function
*/
def expectMsgPF[T](max: Duration = Duration.Undefined, hint: String = "")(f: PartialFunction[Any, T]): T = {
val _max = remainingOrDilated(max)
val o = receiveOne(_max)
assert(o ne null, "timeout (" + _max + ") during expectMsg: " + hint)
assert(f.isDefinedAt(o), "expected: " + hint + " but got unexpected message " + o)
f(o)
}
/**
* Receive one message from the test actor and assert that it is the Terminated message of the given ActorRef.
* Wait time is bounded by the given duration, with an AssertionFailure being thrown in case of timeout.
*
* @return the received Terminated message
*/
def expectTerminated(target: ActorRef, max: Duration = Duration.Undefined): Terminated =
expectMsgPF(max, "Terminated " + target) {
case t @ Terminated(`target`) ⇒ t
}
/**
* Hybrid of expectMsgPF and receiveWhile: receive messages while the
* partial function matches and returns false. Use it to ignore certain
* messages while waiting for a specific message.
*
* @return the last received messsage, i.e. the first one for which the
* partial function returned true
*/
def fishForMessage(max: Duration = Duration.Undefined, hint: String = "")(f: PartialFunction[Any, Boolean]): Any = {
val _max = remainingOrDilated(max)
val end = now + _max
@tailrec
def recv: Any = {
val o = receiveOne(end - now)
assert(o ne null, "timeout (" + _max + ") during fishForMessage, hint: " + hint)
assert(f.isDefinedAt(o), "fishForMessage(" + hint + ") found unexpected message " + o)
if (f(o)) o else recv
}
recv
}
/**
* Same as `expectMsgType[T](remaining)`, but correctly treating the timeFactor.
*/
def expectMsgType[T](implicit t: ClassTag[T]): T = expectMsgClass_internal(remaining, t.runtimeClass.asInstanceOf[Class[T]])
/**
* Receive one message from the test actor and assert that it conforms to the
* given type (after erasure). Wait time is bounded by the given duration,
* with an AssertionFailure being thrown in case of timeout.
*
* @return the received object
*/
def expectMsgType[T](max: FiniteDuration)(implicit t: ClassTag[T]): T = expectMsgClass_internal(max.dilated, t.runtimeClass.asInstanceOf[Class[T]])
/**
* Same as `expectMsgClass(remaining, c)`, but correctly treating the timeFactor.
*/
def expectMsgClass[C](c: Class[C]): C = expectMsgClass_internal(remaining, c)
/**
* Receive one message from the test actor and assert that it conforms to
* the given class. Wait time is bounded by the given duration, with an
* AssertionFailure being thrown in case of timeout.
*
* @return the received object
*/
def expectMsgClass[C](max: FiniteDuration, c: Class[C]): C = expectMsgClass_internal(max.dilated, c)
private def expectMsgClass_internal[C](max: FiniteDuration, c: Class[C]): C = {
val o = receiveOne(max)
assert(o ne null, "timeout (" + max + ") during expectMsgClass waiting for " + c)
assert(BoxedType(c) isInstance o, "expected " + c + ", found " + o.getClass)
o.asInstanceOf[C]
}
/**
* Same as `expectMsgAnyOf(remaining, obj...)`, but correctly treating the timeFactor.
*/
def expectMsgAnyOf[T](obj: T*): T = expectMsgAnyOf_internal(remaining, obj: _*)
/**
* Receive one message from the test actor and assert that it equals one of
* the given objects. Wait time is bounded by the given duration, with an
* AssertionFailure being thrown in case of timeout.
*
* @return the received object
*/
def expectMsgAnyOf[T](max: FiniteDuration, obj: T*): T = expectMsgAnyOf_internal(max.dilated, obj: _*)
private def expectMsgAnyOf_internal[T](max: FiniteDuration, obj: T*): T = {
val o = receiveOne(max)
assert(o ne null, "timeout (" + max + ") during expectMsgAnyOf waiting for " + obj.mkString("(", ", ", ")"))
assert(obj exists (_ == o), "found unexpected " + o)
o.asInstanceOf[T]
}
/**
* Same as `expectMsgAnyClassOf(remaining, obj...)`, but correctly treating the timeFactor.
*/
def expectMsgAnyClassOf[C](obj: Class[_ <: C]*): C = expectMsgAnyClassOf_internal(remaining, obj: _*)
/**
* Receive one message from the test actor and assert that it conforms to
* one of the given classes. Wait time is bounded by the given duration,
* with an AssertionFailure being thrown in case of timeout.
*
* @return the received object
*/
def expectMsgAnyClassOf[C](max: FiniteDuration, obj: Class[_ <: C]*): C = expectMsgAnyClassOf_internal(max.dilated, obj: _*)
private def expectMsgAnyClassOf_internal[C](max: FiniteDuration, obj: Class[_ <: C]*): C = {
val o = receiveOne(max)
assert(o ne null, "timeout (" + max + ") during expectMsgAnyClassOf waiting for " + obj.mkString("(", ", ", ")"))
assert(obj exists (c ⇒ BoxedType(c) isInstance o), "found unexpected " + o)
o.asInstanceOf[C]
}
/**
* Same as `expectMsgAllOf(remaining, obj...)`, but correctly treating the timeFactor.
*/
def expectMsgAllOf[T](obj: T*): immutable.Seq[T] = expectMsgAllOf_internal(remaining, obj: _*)
/**
* Receive a number of messages from the test actor matching the given
* number of objects and assert that for each given object one is received
* which equals it and vice versa. This construct is useful when the order in
* which the objects are received is not fixed. Wait time is bounded by the
* given duration, with an AssertionFailure being thrown in case of timeout.
*
* <pre>
* dispatcher ! SomeWork1()
* dispatcher ! SomeWork2()
* expectMsgAllOf(1 second, Result1(), Result2())
* </pre>
*/
def expectMsgAllOf[T](max: FiniteDuration, obj: T*): immutable.Seq[T] = expectMsgAllOf_internal(max.dilated, obj: _*)
private def checkMissingAndUnexpected(missing: Seq[Any], unexpected: Seq[Any],
missingMessage: String, unexpectedMessage: String): Unit = {
assert(missing.isEmpty && unexpected.isEmpty,
(if (missing.isEmpty) "" else missing.mkString(missingMessage + " [", ", ", "] ")) +
(if (unexpected.isEmpty) "" else unexpected.mkString(unexpectedMessage + " [", ", ", "]")))
}
private def expectMsgAllOf_internal[T](max: FiniteDuration, obj: T*): immutable.Seq[T] = {
val recv = receiveN_internal(obj.size, max)
val missing = obj filterNot (x ⇒ recv exists (x == _))
val unexpected = recv filterNot (x ⇒ obj exists (x == _))
checkMissingAndUnexpected(missing, unexpected, "not found", "found unexpected")
recv.asInstanceOf[immutable.Seq[T]]
}
/**
* Same as `expectMsgAllClassOf(remaining, obj...)`, but correctly treating the timeFactor.
*/
def expectMsgAllClassOf[T](obj: Class[_ <: T]*): immutable.Seq[T] = internalExpectMsgAllClassOf(remaining, obj: _*)
/**
* Receive a number of messages from the test actor matching the given
* number of classes and assert that for each given class one is received
* which is of that class (equality, not conformance). This construct is
* useful when the order in which the objects are received is not fixed.
* Wait time is bounded by the given duration, with an AssertionFailure
* being thrown in case of timeout.
*/
def expectMsgAllClassOf[T](max: FiniteDuration, obj: Class[_ <: T]*): immutable.Seq[T] = internalExpectMsgAllClassOf(max.dilated, obj: _*)
private def internalExpectMsgAllClassOf[T](max: FiniteDuration, obj: Class[_ <: T]*): immutable.Seq[T] = {
val recv = receiveN_internal(obj.size, max)
val missing = obj filterNot (x ⇒ recv exists (_.getClass eq BoxedType(x)))
val unexpected = recv filterNot (x ⇒ obj exists (c ⇒ BoxedType(c) eq x.getClass))
checkMissingAndUnexpected(missing, unexpected, "not found", "found non-matching object(s)")
recv.asInstanceOf[immutable.Seq[T]]
}
/**
* Same as `expectMsgAllConformingOf(remaining, obj...)`, but correctly treating the timeFactor.
*/
def expectMsgAllConformingOf[T](obj: Class[_ <: T]*): immutable.Seq[T] = internalExpectMsgAllConformingOf(remaining, obj: _*)
/**
* Receive a number of messages from the test actor matching the given
* number of classes and assert that for each given class one is received
* which conforms to that class (and vice versa). This construct is useful
* when the order in which the objects are received is not fixed. Wait time
* is bounded by the given duration, with an AssertionFailure being thrown in
* case of timeout.
*
* Beware that one object may satisfy all given class constraints, which
* may be counter-intuitive.
*/
def expectMsgAllConformingOf[T](max: FiniteDuration, obj: Class[_ <: T]*): immutable.Seq[T] = internalExpectMsgAllConformingOf(max.dilated, obj: _*)
private def internalExpectMsgAllConformingOf[T](max: FiniteDuration, obj: Class[_ <: T]*): immutable.Seq[T] = {
val recv = receiveN_internal(obj.size, max)
val missing = obj filterNot (x ⇒ recv exists (BoxedType(x) isInstance _))
val unexpected = recv filterNot (x ⇒ obj exists (c ⇒ BoxedType(c) isInstance x))
checkMissingAndUnexpected(missing, unexpected, "not found", "found non-matching object(s)")
recv.asInstanceOf[immutable.Seq[T]]
}
/**
* Same as `expectNoMsg(remaining)`, but correctly treating the timeFactor.
*/
def expectNoMsg() { expectNoMsg_internal(remaining) }
/**
* Assert that no message is received for the specified time.
*/
def expectNoMsg(max: FiniteDuration) { expectNoMsg_internal(max.dilated) }
private def expectNoMsg_internal(max: FiniteDuration) {
val o = receiveOne(max)
assert(o eq null, "received unexpected message " + o)
lastWasNoMsg = true
}
/**
* Receive a series of messages until one does not match the given partial
* function or the idle timeout is met (disabled by default) or the overall
* maximum duration is elapsed. Returns the sequence of messages.
*
* Note that it is not an error to hit the `max` duration in this case.
*
* One possible use of this method is for testing whether messages of
* certain characteristics are generated at a certain rate:
*
* <pre>
* test ! ScheduleTicks(100 millis)
* val series = receiveWhile(750 millis) {
* case Tick(count) => count
* }
* assert(series == (1 to 7).toList)
* </pre>
*/
def receiveWhile[T](max: Duration = Duration.Undefined, idle: Duration = Duration.Inf, messages: Int = Int.MaxValue)(f: PartialFunction[AnyRef, T]): immutable.Seq[T] = {
val stop = now + remainingOrDilated(max)
var msg: Message = NullMessage
@tailrec
def doit(acc: List[T], count: Int): List[T] = {
if (count >= messages) acc.reverse
else {
receiveOne((stop - now) min idle)
lastMessage match {
case NullMessage ⇒
lastMessage = msg
acc.reverse
case RealMessage(o, _) if (f isDefinedAt o) ⇒
msg = lastMessage
doit(f(o) :: acc, count + 1)
case RealMessage(o, _) ⇒
queue.offerFirst(lastMessage)
lastMessage = msg
acc.reverse
}
}
}
val ret = doit(Nil, 0)
lastWasNoMsg = true
ret
}
/**
* Same as `receiveN(n, remaining)` but correctly taking into account
* Duration.timeFactor.
*/
def receiveN(n: Int): immutable.Seq[AnyRef] = receiveN_internal(n, remaining)
/**
* Receive N messages in a row before the given deadline.
*/
def receiveN(n: Int, max: FiniteDuration): immutable.Seq[AnyRef] = receiveN_internal(n, max.dilated)
private def receiveN_internal(n: Int, max: Duration): immutable.Seq[AnyRef] = {
val stop = max + now
for { x ← 1 to n } yield {
val timeout = stop - now
val o = receiveOne(timeout)
assert(o ne null, s"timeout ($max) while expecting $n messages (got ${x - 1})")
o
}
}
/**
* Receive one message from the internal queue of the TestActor. If the given
* duration is zero, the queue is polled (non-blocking).
*
* This method does NOT automatically scale its Duration parameter!
*/
def receiveOne(max: Duration): AnyRef = {
val message =
if (max == 0.seconds) {
queue.pollFirst
} else if (max.isFinite) {
queue.pollFirst(max.length, max.unit)
} else {
queue.takeFirst
}
lastWasNoMsg = false
message match {
case null ⇒
lastMessage = NullMessage
null
case RealMessage(msg, _) ⇒
lastMessage = message
msg
}
}
/**
* Shut down an actor system and wait for termination.
* On failure debug output will be logged about the remaining actors in the system.
*
* If verifySystemShutdown is true, then an exception will be thrown on failure.
*/
def shutdown(actorSystem: ActorSystem = system,
duration: Duration = 5.seconds.dilated.min(10.seconds),
verifySystemShutdown: Boolean = false) {
TestKit.shutdownActorSystem(actorSystem, duration, verifySystemShutdown)
}
private def format(u: TimeUnit, d: Duration) = "%.3f %s".format(d.toUnit(u), u.toString.toLowerCase)
}
/**
* Test kit for testing actors. Inheriting from this trait enables reception of
* replies from actors, which are queued by an internal actor and can be
* examined using the `expectMsg...` methods. Assertions and bounds concerning
* timing are available in the form of `within` blocks.
*
* <pre>
* class Test extends TestKit(ActorSystem()) {
* try {
*
* val test = system.actorOf(Props[SomeActor]
*
* within (1 second) {
* test ! SomeWork
* expectMsg(Result1) // bounded to 1 second
* expectMsg(Result2) // bounded to the remainder of the 1 second
* }
*
* } finally {
* system.shutdown()
* }
* }
* </pre>
*
* Beware of two points:
*
* - the ActorSystem passed into the constructor needs to be shutdown,
* otherwise thread pools and memory will be leaked
* - this trait is not thread-safe (only one actor with one queue, one stack
* of `within` blocks); it is expected that the code is executed from a
* constructor as shown above, which makes this a non-issue, otherwise take
* care not to run tests within a single test class instance in parallel.
*
* It should be noted that for CI servers and the like all maximum Durations
* are scaled using their Duration.dilated method, which uses the
* TestKitExtension.Settings.TestTimeFactor settable via akka.conf entry "akka.test.timefactor".
*
* @since 1.1
*/
class TestKit(_system: ActorSystem) extends { implicit val system = _system } with TestKitBase
object TestKit {
private[testkit] val testActorId = new AtomicInteger(0)
/**
* Await until the given condition evaluates to `true` or the timeout
* expires, whichever comes first.
*/
def awaitCond(p: ⇒ Boolean, max: Duration, interval: Duration = 100.millis, noThrow: Boolean = false): Boolean = {
val stop = now + max
@tailrec
def poll(): Boolean = {
if (!p) {
val toSleep = stop - now
if (toSleep <= Duration.Zero) {
if (noThrow) false
else throw new AssertionError("timeout " + max + " expired")
} else {
Thread.sleep((toSleep min interval).toMillis)
poll()
}
} else true
}
poll()
}
/**
* Obtain current timestamp as Duration for relative measurements (using System.nanoTime).
*/
def now: Duration = System.nanoTime().nanos
/**
* Java API: Scale timeouts (durations) during tests with the configured
* 'akka.test.timefactor'.
*/
@deprecated("Use JavaTestKit.dilated", "2.3")
def dilated(duration: Duration, system: ActorSystem): Duration =
duration * TestKitExtension(system).TestTimeFactor
/**
* Shut down an actor system and wait for termination.
* On failure debug output will be logged about the remaining actors in the system.
*
* If verifySystemShutdown is true, then an exception will be thrown on failure.
*/
def shutdownActorSystem(actorSystem: ActorSystem,
duration: Duration = 10 seconds,
verifySystemShutdown: Boolean = false): Unit = {
actorSystem.shutdown()
try actorSystem.awaitTermination(duration) catch {
case _: TimeoutException ⇒
val msg = "Failed to stop [%s] within [%s] \\n%s".format(actorSystem.name, duration,
actorSystem.asInstanceOf[ActorSystemImpl].printTree)
if (verifySystemShutdown) throw new RuntimeException(msg)
else actorSystem.log.warning(msg)
}
}
}
/**
* TestKit-based probe which allows sending, reception and reply.
*/
class TestProbe(_application: ActorSystem) extends TestKit(_application) {
/**
* Shorthand to get the testActor.
*/
def ref = testActor
/**
* Send message to an actor while using the probe's TestActor as the sender.
* Replies will be available for inspection with all of TestKit's assertion
* methods.
*/
def send(actor: ActorRef, msg: Any): Unit = actor.!(msg)(testActor)
/**
* Forward this message as if in the TestActor's receive method with self.forward.
*/
def forward(actor: ActorRef, msg: Any = lastMessage.msg): Unit = actor.!(msg)(lastMessage.sender)
/**
* Get sender of last received message.
*/
def sender() = lastMessage.sender
/**
* Send message to the sender of the last dequeued message.
*/
def reply(msg: Any): Unit = sender().!(msg)(ref)
}
object TestProbe {
def apply()(implicit system: ActorSystem) = new TestProbe(system)
}
trait ImplicitSender { this: TestKitBase ⇒
implicit def self = testActor
}
trait DefaultTimeout { this: TestKitBase ⇒
implicit val timeout: Timeout = testKitSettings.DefaultTimeout
}
/**
* INTERNAL API
*
* This is a specialized variant of PartialFunction which is <b><i>only
* applicable if you know that `isDefinedAt(x)` is always called before
* `apply(x)`—with the same `x` of course.</i></b>
*
* `match(x)` will be called for `isDefinedAt(x)` only, and its semantics
* are the same as for [[akka.japi.PurePartialFunction]] (apart from the
* missing because unneeded boolean argument).
*
* This class is used internal to JavaTestKit and should not be extended
* by client code directly.
*/
private[testkit] abstract class CachingPartialFunction[A, B <: AnyRef] extends scala.runtime.AbstractPartialFunction[A, B] {
import akka.japi.JavaPartialFunction._
@throws(classOf[Exception])
def `match`(x: A): B
var cache: B = _
final def isDefinedAt(x: A): Boolean = try { cache = `match`(x); true } catch { case NoMatch ⇒ cache = null.asInstanceOf[B]; false }
final override def apply(x: A): B = cache
}
| Fincore/org.spark-project.akka | testkit/src/main/scala/akka/testkit/TestKit.scala | Scala | mit | 30,580 |
package keystoneml.nodes.learning
import breeze.linalg.DenseVector
import org.apache.spark.SparkContext
import org.scalatest.FunSuite
import keystoneml.utils.{MLlibUtils, Stats}
import keystoneml.workflow.PipelineContext
import scala.util.Random
import scala.util.control.Breaks._
object LogisticRegressionModelSuite {
// Generate input of the form Y = logistic(offset + scale*X)
def generateLogisticInput(
offset: Double,
scale: Double,
nPoints: Int,
seed: Int): Seq[(Int, DenseVector[Double])] = {
val rnd = new Random(seed)
val x1 = Array.fill[Double](nPoints)(rnd.nextGaussian())
val y = (0 until nPoints).map { i =>
val p = 1.0 / (1.0 + math.exp(-(offset + scale * x1(i))))
if (rnd.nextDouble() < p) 1 else 0
}
val testData = (0 until nPoints).map(i => (y(i), DenseVector(Array(x1(i)))))
testData
}
/**
* Generates `k` classes multinomial synthetic logistic input in `n` dimensional space given the
* model weights and mean/variance of the features. The synthetic data will be drawn from
* the probability distribution constructed by weights using the following formula.
*
* P(y = 0 | x) = 1 / norm
* P(y = 1 | x) = exp(x * w_1) / norm
* P(y = 2 | x) = exp(x * w_2) / norm
* ...
* P(y = k-1 | x) = exp(x * w_{k-1}) / norm
* where norm = 1 + exp(x * w_1) + exp(x * w_2) + ... + exp(x * w_{k-1})
*
* @param weights matrix is flatten into a vector; as a result, the dimension of weights vector
* will be (k - 1) * (n + 1) if `addIntercept == true`, and
* if `addIntercept != true`, the dimension will be (k - 1) * n.
* @param xMean the mean of the generated features. Lots of time, if the features are not properly
* standardized, the algorithm with poor implementation will have difficulty
* to converge.
* @param xVariance the variance of the generated features.
* @param addIntercept whether to add intercept.
* @param nPoints the number of instance of generated data.
* @param seed the seed for random generator. For consistent testing result, it will be fixed.
*/
def generateMultinomialLogisticInput(
weights: Array[Double],
xMean: Array[Double],
xVariance: Array[Double],
addIntercept: Boolean,
nPoints: Int,
seed: Int): Seq[(Int, DenseVector[Double])] = {
val rnd = new Random(seed)
val xDim = xMean.length
val xWithInterceptsDim = if (addIntercept) xDim + 1 else xDim
val nClasses = weights.length / xWithInterceptsDim + 1
val x = Array.fill[DenseVector[Double]](nPoints)(DenseVector(Array.fill[Double](xDim)(rnd.nextGaussian())))
x.foreach { vector =>
// This doesn't work if `vector` is a sparse vector.
val vectorArray = vector.toArray
var i = 0
val len = vectorArray.length
while (i < len) {
vectorArray(i) = vectorArray(i) * math.sqrt(xVariance(i)) + xMean(i)
i += 1
}
}
val y = (0 until nPoints).map { idx =>
val xArray = x(idx).toArray
val margins = Array.ofDim[Double](nClasses)
val probs = Array.ofDim[Double](nClasses)
for (i <- 0 until nClasses - 1) {
for (j <- 0 until xDim) margins(i + 1) += weights(i * xWithInterceptsDim + j) * xArray(j)
if (addIntercept) margins(i + 1) += weights((i + 1) * xWithInterceptsDim - 1)
}
// Preventing the overflow when we compute the probability
val maxMargin = margins.max
if (maxMargin > 0) for (i <- 0 until nClasses) margins(i) -= maxMargin
// Computing the probabilities for each class from the margins.
val norm = {
var temp = 0.0
for (i <- 0 until nClasses) {
probs(i) = math.exp(margins(i))
temp += probs(i)
}
temp
}
for (i <- 0 until nClasses) probs(i) /= norm
// Compute the cumulative probability so we can generate a random number and assign a label.
for (i <- 1 until nClasses) probs(i) += probs(i - 1)
val p = rnd.nextDouble()
var y = 0
breakable {
for (i <- 0 until nClasses) {
if (p < probs(i)) {
y = i
break
}
}
}
y
}
val testData = (0 until nPoints).map(i => (y(i), x(i)))
testData
}
}
class LogisticRegressionModelSuite extends FunSuite with PipelineContext {
def validatePrediction(
predictions: Seq[Double],
input: Seq[Double],
expectedAcc: Double) {
val numOffPredictions = predictions.zip(input).count { case (prediction, expected) =>
prediction != expected
}
assert(((input.length - numOffPredictions).toDouble / input.length) > expectedAcc)
}
// Test if we can correctly learn A, B where Y = logistic(A + B*X)
test("logistic regression with LBFGS") {
sc = new SparkContext("local", "test")
val nPoints = 10000
val A = 0.0
val B = -0.8
val testData = LogisticRegressionModelSuite.generateLogisticInput(A, B, nPoints, 42)
val testRDD = sc.parallelize(testData, 2)
testRDD.cache()
val lr = LogisticRegressionEstimator[DenseVector[Double]](2)
val model = lr.fit(testRDD.map(_._2), testRDD.map(_._1))
// Test the weights
assert(Stats.aboutEq(model.model.weights(0), B, 0.03))
assert(Stats.aboutEq(model.model.intercept, A, 0.02))
val validationData = LogisticRegressionModelSuite.generateLogisticInput(A, B, nPoints, 17)
val validationRDD = sc.parallelize(validationData, 2)
// Test prediction on RDD. Expected accuracy w/o intercept is 65%, should be 83% w/ intercept.
validatePrediction(model.apply(validationRDD.map(_._2)).collect(), validationData.map(_._1.toDouble), 0.65)
// Test prediction on Array.
validatePrediction(validationData.map(row => model.apply(row._2)), validationData.map(_._1.toDouble), 0.65)
// Only the initial RDD should be cached, the estimator shouldn't force cache.
assert(sc.getRDDStorageInfo.length == 1)
}
test("multinomial logistic regression with LBFGS") {
sc = new SparkContext("local", "test")
val nPoints = 10000
/**
* The following weights and xMean/xVariance are computed from iris dataset with lambda = 0.2.
* As a result, we are actually drawing samples from probability distribution of built model.
*/
val weights = Array(
-0.57997, 0.912083, -0.371077, -0.819866, 2.688191,
-0.16624, -0.84355, -0.048509, -0.301789, 4.170682)
val xMean = Array(5.843, 3.057, 3.758, 1.199)
val xVariance = Array(0.6856, 0.1899, 3.116, 0.581)
val testData = LogisticRegressionModelSuite.generateMultinomialLogisticInput(
weights, xMean, xVariance, addIntercept = false, nPoints, 42)
val testRDD = sc.parallelize(testData, 2)
testRDD.cache()
val lr = LogisticRegressionEstimator[DenseVector[Double]](
numClasses = 3,
numIters = 200,
convergenceTol = 1E-15)
val model = lr.fit(testRDD.map(_._2), testRDD.map(_._1))
val numFeatures = testRDD.map(_._2.size).first()
val weightsR = DenseVector(Array(
-0.5837166, 0.9285260, -0.3783612, -0.8123411, 2.6228269,
-0.1691865, -0.811048, -0.0646380))
assert(Stats.aboutEq(MLlibUtils.mllibVectorToDenseBreeze(model.model.weights), weightsR, 0.05))
val validationData = LogisticRegressionModelSuite.generateMultinomialLogisticInput(
weights, xMean, xVariance, addIntercept = false, nPoints, 17)
val validationRDD = sc.parallelize(validationData, 2)
// The validation accuracy is not good since this model (even the original weights) doesn't have
// very steep curve in logistic function so that when we draw samples from distribution, it's
// very easy to assign to another labels. However, this prediction result is consistent to R.
validatePrediction(model.apply(validationRDD.map(_._2)).collect(), validationData.map(_._1.toDouble), 0.47)
// Only the initial RDD should be cached, the estimator shouldn't force cache.
assert(sc.getRDDStorageInfo.length == 1)
}
} | amplab/keystone | src/test/scala/keystoneml/nodes/learning/LogisticRegressionModelSuite.scala | Scala | apache-2.0 | 8,095 |
package de.vorb.namcap.game.world
import scala.collection.immutable.Seq
class World(val dimensions: Pair[Int, Int], val tiles: Array[Tile]) {
override def toString = {
val sb = new StringBuilder
for (tile <- tiles) {
sb.append(tile.toString)
}
sb.toString
}
}
| pvorb/NamCap | src/main/scala/de/vorb/namcap/game/world/World.scala | Scala | mit | 289 |
package harping
object Default {
val version = "1.1"
}
case class Log(creator: Creator,
pages: Iterable[Page],
entries: Iterable[Entry],
browser: Option[Browser] = None,
version: String = Default.version,
comment: Option[String] = None)
case class Creator(name: String,
version: String,
comment: Option[String] = None)
case class Browser(name: String,
version: String,
comment: Option[String] = None)
case class Page(id: String,
title: String,
startedDateTime: String,
pageTiming: PageTimings,
comment: Option[String] = None)
case class PageTimings(onContentLoaded: Int,
onLoad: Int,
comment: Option[String] = None)
case class Entry(startedDateTime: String,
time: Int,
/*request: Request,
response: Response,
cache: Cache,
timings: Timings,*/
serverIPAddress: Option[String] = None,
connection: Option[String] = None,
pageRef: Option[String] = None,
comment: Option[String] = None)
case class Request(method: String,
url: String,
httpVersion: String,
cookies: Iterable[Cookie],
headers: Iterable[Header],
queryString: Iterable[QueryParam],
headersSize: Int,
bodySize: Int,
postData: Option[PostData] = None,
comment: Option[String] = None)
case class Response(status: Int,
statusText: String,
httpVersion: String,
cookies: Iterable[Cookie],
headers: Iterable[Header],
content: Content,
redirectURL: String,
headersSize: Int,
bodySize: Int,
comment: Option[String] = None)
case class Cookie(name: String,
value: String,
path: Option[String] = None,
secure: Option[Boolean] = None,
domain: Option[String] = None,
expires: Option[String] = None,
httpOnly: Option[Boolean] = None,
comment: Option[String] = None)
case class Header(name: String, value: String, comment: Option[String] = None)
case class QueryParam(name: String, value: String, comment: Option[String] = None)
case class PostData(mime: String,
params: Iterable[PostParam],
text: String,
comment: Option[String] = None)
case class PostParam(name: String,
value: Option[String] = None,
fileName: Option[String] = None,
contentType: Option[String] = None,
comment: Option[String] = None)
case class Content(size: Int,
mime: String,
text: Option[String] = None,
encoding: Option[String] = None,
compression: Option[Int] = None,
comment: Option[String] = None)
case class Cache(beforeRequest: Option[CacheInfo],
afterRequest: Option[CacheInfo],
comment: Option[String] = None)
case class CacheInfo(lastAccess: String,
etag: String,
hitCount: Int,
expires: Option[String] = None,
comment: Option[String] = None)
case class Timings(send: Int,
waiting: Int,
receive: Int,
blocked: Option[Int] = None,
dns: Option[Int] = None,
connect: Option[Int] = None,
ssl: Option[Int] = None,
comment: Option[String] = None)
| softprops/harping | src/main/scala/structure.scala | Scala | mit | 4,067 |
package chandu0101.scalajs.rn.apis
import scala.scalajs.js
trait ActionSheetIOS extends js.Object {
def showActionSheetWithOptions(options: js.Object, callback: js.Function): Unit = js.native
def showShareActionSheetWithOptions(options: js.Object, failureCallback: js.Function, successCallback: js.Function): Unit = js.native
}
| beni55/scalajs-react-native | core/src/main/scala/chandu0101/scalajs/rn/apis/ActionSheetIOS.scala | Scala | apache-2.0 | 337 |
package com.identityblitz.login.service.spi
/**
* The service provides access to the configuration.
*/
trait LoginConfService {
/**
* Returns a configuration value corresponding to the specified name.
* @param name - name of configuration parameter.
* @return - [[Some]] with a parameter value if it specified otherwise [[None]].
*/
def getOptLong(implicit name: String): Option[Long]
/**
* Returns a configuration value corresponding to the specified name.
* @param name - name of configuration parameter.
* @return - [[Some]] with a parameter value if it specified otherwise [[None]].
*/
def getOptString(implicit name: String): Option[String]
/**
* Returns a configuration value corresponding to the specified name.
* @param name - name of configuration parameter.
* @return - [[Some]] with a parameter value if it specified otherwise [[None]].
*/
def getOptBoolean(implicit name: String): Option[Boolean]
/**
* Returns a [[Map]] with properties filtered by specified prefix where:
* <ul>
* <li>key - is a substring of the property's name from the specified prefix to the next position of the dot (''.'')</li>
* <li>value - value of the property</li>
* </ul>
*
* @param prefix - prefix of the properties to include to the result.
* @return [[Map]] with parameter name (exclude the specified prefix) and value. If there isn't configuration with
* the specified name returns empty [[Map]].
*/
def getMapString(prefix: String): Map[String, String]
/**
* Returns a [[Map]] with properties filtered by specified prefix where:
* <ul>
* <li>key - is a substring of the property's name from the specified prefix to the next position of the dot (''.'')</li>
* <li>value - [[Map]] similarly grouped by current key </li>
* </ul>
* @param prefix - prefix of the properties to include to the result.
* @return [[Map]] with parameter name (exclude the specified prefix) and value grouped by keys (exclude the specified
* prefix). If there isn't configuration with the specified name returns empty [[Map]].
*/
def getDeepMapString(prefix: String): Map[String, Map[String, String]]
}
| brainysmith/login-framework | src/main/scala/com/identityblitz/login/service/spi/LoginConfService.scala | Scala | mit | 2,215 |
package jsrecord.operation
import shapeless._
import tag.@@
import labelled.{ FieldType, field }
/**
* Convert record keys from `Symbol @@ String(..)` to `String(..)`.
*
* Useful for APIs relying on [[shapeless.record.RecordArgs]].
*/
object StripArgs extends Poly1 {
implicit def caseKV[K, V] = at[FieldType[Symbol @@ K, V]](
f => field[K](f: V)
)
}
| nigredo-tori/jsrecord | src/main/scala/org/jsrecord/operation/StripArgs.scala | Scala | unlicense | 369 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.spark
import java.sql.Timestamp
import java.time.Instant
import java.util.{Date, UUID}
import com.typesafe.scalalogging.LazyLogging
import com.vividsolutions.jts.geom._
import com.vividsolutions.jts.index.strtree.{AbstractNode, Boundable, STRtree}
import com.vividsolutions.jts.index.sweepline.{SweepLineIndex, SweepLineInterval, SweepLineOverlapAction}
import org.apache.hadoop.conf.Configuration
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.expressions.{AttributeReference, Expression, GenericRowWithSchema, ScalaUDF}
import org.apache.spark.sql.jts.JTSTypes
import org.apache.spark.sql.sources.{Filter, _}
import org.apache.spark.sql.types.{DataTypes, StructField, StructType, TimestampType}
import org.apache.spark.storage.StorageLevel
import org.geotools.data.DataUtilities.compare
import org.geotools.data.{DataStoreFinder, Query, Transaction}
import org.geotools.factory.{CommonFactoryFinder, Hints}
import org.geotools.feature.simple.{SimpleFeatureBuilder, SimpleFeatureTypeBuilder}
import org.geotools.filter.text.ecql.ECQL
import org.locationtech.geomesa.memory.cqengine.datastore.GeoCQEngineDataStore
import org.locationtech.geomesa.spark.jts.util.WKTUtils
import org.locationtech.geomesa.utils.collection.SelfClosingIterator
import org.locationtech.geomesa.utils.geotools.{SftArgResolver, SftArgs, SimpleFeatureTypes}
import org.opengis.feature.`type`._
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
import scala.collection.Iterator
import scala.collection.JavaConversions._
import scala.collection.mutable.ListBuffer
import scala.util.Try
object GeoMesaSparkSQL {
val GEOMESA_SQL_FEATURE = "geomesa.feature"
}
import org.locationtech.geomesa.spark.GeoMesaSparkSQL._
// Spark DataSource for GeoMesa
// enables loading a GeoMesa DataFrame as
// {{
// val df = spark.read
// .format("geomesa")
// .option(GM.instanceIdParam.getName, "mycloud")
// .option(GM.userParam.getName, "user")
// .option(GM.passwordParam.getName, "password")
// .option(GM.tableNameParam.getName, "sparksql")
// .option(GM.mockParam.getName, "true")
// .option("geomesa.feature", "chicago")
// .load()
// }}
class GeoMesaDataSource extends DataSourceRegister
with RelationProvider with SchemaRelationProvider with CreatableRelationProvider
with LazyLogging {
import CaseInsensitiveMapFix._
override def shortName(): String = "geomesa"
override def createRelation(sqlContext: SQLContext, parameters: Map[String, String]): BaseRelation = {
SQLTypes.init(sqlContext)
// TODO: Need different ways to retrieve sft
// GEOMESA-1643 Add method to lookup SFT to RDD Provider
// Below the details of the Converter RDD Provider and Providers which are backed by GT DSes are leaking through
val ds = DataStoreFinder.getDataStore(parameters)
val sft = if (ds != null) {
ds.getSchema(parameters(GEOMESA_SQL_FEATURE))
} else {
if (parameters.contains(GEOMESA_SQL_FEATURE) && parameters.contains("geomesa.sft")) {
SimpleFeatureTypes.createType(parameters(GEOMESA_SQL_FEATURE), parameters("geomesa.sft"))
} else {
SftArgResolver.getArg(SftArgs(parameters(GEOMESA_SQL_FEATURE), parameters(GEOMESA_SQL_FEATURE))) match {
case Right(s) => s
case Left(e) => throw new IllegalArgumentException("Could not resolve simple feature type", e)
}
}
}
logger.trace(s"Creating GeoMesa Relation with sft : $sft")
val schema = sft2StructType(sft)
GeoMesaRelation(sqlContext, sft, schema, parameters)
}
// JNH: Q: Why doesn't this method have the call to SQLTypes.init(sqlContext)?
override def createRelation(sqlContext: SQLContext, parameters: Map[String, String], schema: StructType): BaseRelation = {
val ds = DataStoreFinder.getDataStore(parameters)
val sft = ds.getSchema(parameters(GEOMESA_SQL_FEATURE))
GeoMesaRelation(sqlContext, sft, schema, parameters)
}
private def sft2StructType(sft: SimpleFeatureType) = {
val fields = sft.getAttributeDescriptors.flatMap { ad => ad2field(ad) }.toList
StructType(StructField("__fid__", DataTypes.StringType, nullable =false) :: fields)
}
def structType2SFT(struct: StructType, name: String): SimpleFeatureType = {
import java.{lang => jl}
val fields = struct.fields
val builder = new SimpleFeatureTypeBuilder
fields.filter( _.name != "__fid__").foreach {
field =>
field.dataType match {
case DataTypes.BooleanType => builder.add(field.name, classOf[jl.Boolean])
case DataTypes.DateType => builder.add(field.name, classOf[java.util.Date])
case DataTypes.FloatType => builder.add(field.name, classOf[jl.Float])
case DataTypes.IntegerType => builder.add(field.name, classOf[jl.Integer])
case DataTypes.DoubleType => builder.add(field.name, classOf[jl.Double])
case DataTypes.StringType => builder.add(field.name, classOf[jl.String])
case DataTypes.LongType => builder.add(field.name, classOf[jl.Long])
case DataTypes.TimestampType => builder.add(field.name, classOf[java.util.Date])
case JTSTypes.PointTypeInstance => builder.add(field.name, classOf[com.vividsolutions.jts.geom.Point])
case JTSTypes.LineStringTypeInstance => builder.add(field.name, classOf[com.vividsolutions.jts.geom.LineString])
case JTSTypes.PolygonTypeInstance => builder.add(field.name, classOf[com.vividsolutions.jts.geom.Polygon])
case JTSTypes.MultipolygonTypeInstance => builder.add(field.name, classOf[com.vividsolutions.jts.geom.MultiPolygon])
case JTSTypes.GeometryTypeInstance => builder.add(field.name, classOf[com.vividsolutions.jts.geom.Geometry])
}
}
builder.setName(name)
builder.buildFeatureType()
}
private def ad2field(ad: AttributeDescriptor): Option[StructField] = {
import java.{lang => jl}
val dt = ad.getType.getBinding match {
case t if t == classOf[jl.Double] => DataTypes.DoubleType
case t if t == classOf[jl.Float] => DataTypes.FloatType
case t if t == classOf[jl.Integer] => DataTypes.IntegerType
case t if t == classOf[jl.String] => DataTypes.StringType
case t if t == classOf[jl.Boolean] => DataTypes.BooleanType
case t if t == classOf[jl.Long] => DataTypes.LongType
case t if t == classOf[java.util.Date] => DataTypes.TimestampType
case t if t == classOf[com.vividsolutions.jts.geom.Point] => JTSTypes.PointTypeInstance
case t if t == classOf[com.vividsolutions.jts.geom.MultiPoint] => JTSTypes.MultiPointTypeInstance
case t if t == classOf[com.vividsolutions.jts.geom.LineString] => JTSTypes.LineStringTypeInstance
case t if t == classOf[com.vividsolutions.jts.geom.MultiLineString] => JTSTypes.MultiLineStringTypeInstance
case t if t == classOf[com.vividsolutions.jts.geom.Polygon] => JTSTypes.PolygonTypeInstance
case t if t == classOf[com.vividsolutions.jts.geom.MultiPolygon] => JTSTypes.MultipolygonTypeInstance
case t if classOf[Geometry].isAssignableFrom(t) => JTSTypes.GeometryTypeInstance
// NB: List and Map types are not supported.
case _ => null
}
Option(dt).map(StructField(ad.getLocalName, _))
}
override def createRelation(sqlContext: SQLContext, mode: SaveMode, parameters: Map[String, String], data: DataFrame): BaseRelation = {
val newFeatureName = parameters(GEOMESA_SQL_FEATURE)
val sft: SimpleFeatureType = structType2SFT(data.schema, newFeatureName)
// reuse the __fid__ if available for joins,
// otherwise use a random id prefixed with the current time
val fidIndex = data.schema.fields.indexWhere(_.name == "__fid__")
val fidFn: Row => String =
if(fidIndex > -1) (row: Row) => row.getString(fidIndex)
else _ => "%d%s".format(Instant.now().getEpochSecond, UUID.randomUUID().toString)
val ds = DataStoreFinder.getDataStore(parameters)
val schemaInDs = ds.getTypeNames.contains(newFeatureName)
if (schemaInDs) {
if (compare(ds.getSchema(newFeatureName),sft) != 0) {
throw new IllegalStateException(s"The schema of the RDD conflicts with schema:$newFeatureName in the data store")
}
} else {
sft.getUserData.put("override.reserved.words", java.lang.Boolean.TRUE)
ds.createSchema(sft)
}
val structType = if (data.queryExecution == null) {
sft2StructType(sft)
} else {
data.schema
}
val rddToSave: RDD[SimpleFeature] = data.rdd.mapPartitions( iterRow => {
val innerDS = DataStoreFinder.getDataStore(parameters)
val sft = innerDS.getSchema(newFeatureName)
val builder = new SimpleFeatureBuilder(sft)
val nameMappings: List[(String, Int)] = SparkUtils.getSftRowNameMappings(sft, structType)
iterRow.map { r =>
SparkUtils.row2Sf(nameMappings, r, builder, fidFn(r))
}
})
GeoMesaSpark(parameters).save(rddToSave, parameters, newFeatureName)
GeoMesaRelation(sqlContext, sft, data.schema, parameters)
}
}
// the Spark Relation that builds the scan over the GeoMesa table
// used by the SQL optimization rules to push spatio-temporal predicates into the `filt` variable
case class GeoMesaRelation(sqlContext: SQLContext,
sft: SimpleFeatureType,
schema: StructType,
params: Map[String, String],
filt: org.opengis.filter.Filter = org.opengis.filter.Filter.INCLUDE,
props: Option[Seq[String]] = None,
var partitionHints : Seq[Int] = null,
var indexRDD: RDD[GeoCQEngineDataStore] = null,
var partitionedRDD: RDD[(Int, Iterable[SimpleFeature])] = null,
var indexPartRDD: RDD[(Int, GeoCQEngineDataStore)] = null)
extends BaseRelation with PrunedFilteredScan {
val cache: Boolean = Try(params("cache").toBoolean).getOrElse(false)
val indexId: Boolean = Try(params("indexId").toBoolean).getOrElse(false)
val indexGeom: Boolean = Try(params("indexGeom").toBoolean).getOrElse(false)
val numPartitions: Int = Try(params("partitions").toInt).getOrElse(sqlContext.sparkContext.defaultParallelism)
val spatiallyPartition: Boolean = Try(params("spatial").toBoolean).getOrElse(false)
val partitionStrategy: String = Try(params("strategy").toString).getOrElse("EQUAL")
var partitionEnvelopes: List[Envelope] = null
val providedBounds: String = Try(params("bounds").toString).getOrElse(null)
val coverPartition: Boolean = Try(params("cover").toBoolean).getOrElse(false)
// Control partitioning strategies that require a sample of the data
val sampleSize: Int = Try(params("sampleSize").toInt).getOrElse(100)
val thresholdMultiplier: Double = Try(params("threshold").toDouble).getOrElse(0.3)
val initialQuery: String = Try(params("query").toString).getOrElse("INCLUDE")
val geometryOrdinal: Int = sft.indexOf(sft.getGeometryDescriptor.getLocalName)
lazy val rawRDD: SpatialRDD = buildRawRDD
def buildRawRDD: SpatialRDD = {
val raw = GeoMesaSpark(params).rdd(
new Configuration(), sqlContext.sparkContext, params,
new Query(params(GEOMESA_SQL_FEATURE), ECQL.toFilter(initialQuery)))
if (raw.getNumPartitions != numPartitions && params.contains("partitions")) {
SpatialRDD(raw.repartition(numPartitions), raw.schema)
} else {
raw
}
}
val encodedSFT: String = org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes.encodeType(sft, true)
if (partitionedRDD == null && spatiallyPartition) {
if (partitionEnvelopes == null) {
val bounds: Envelope = if (providedBounds == null) {
RelationUtils.getBound(rawRDD)
} else {
WKTUtils.read(providedBounds).getEnvelopeInternal
}
partitionEnvelopes = partitionStrategy match {
case "EARTH" => RelationUtils.wholeEarthPartitioning(numPartitions)
case "EQUAL" => RelationUtils.equalPartitioning(bounds, numPartitions)
case "WEIGHTED" => RelationUtils.weightedPartitioning(rawRDD, bounds, numPartitions, sampleSize)
case "RTREE" => RelationUtils.rtreePartitioning(rawRDD, numPartitions, sampleSize, thresholdMultiplier)
case _ => throw new IllegalArgumentException(s"Invalid partitioning strategy specified: $partitionStrategy")
}
}
partitionedRDD = RelationUtils.spatiallyPartition(partitionEnvelopes, rawRDD, numPartitions, geometryOrdinal)
partitionedRDD.persist(StorageLevel.MEMORY_ONLY)
}
if (cache) {
if (!DataStoreFinder.getAvailableDataStores.exists(spi => spi.canProcess(Map("cqengine"->"true")))) {
throw new IllegalArgumentException("Cache argument set to true but GeoCQEngineDataStore is not on the classpath")
}
if (spatiallyPartition && indexPartRDD == null) {
indexPartRDD = RelationUtils.indexPartitioned(encodedSFT, sft.getTypeName, partitionedRDD, indexId, indexGeom)
partitionedRDD.unpersist() // make this call blocking?
indexPartRDD.persist(StorageLevel.MEMORY_ONLY)
} else if (indexRDD == null) {
indexRDD = RelationUtils.index(encodedSFT, sft.getTypeName, rawRDD, indexId, indexGeom)
indexRDD.persist(StorageLevel.MEMORY_ONLY)
}
}
override def buildScan(requiredColumns: Array[String], filters: Array[org.apache.spark.sql.sources.Filter]): RDD[Row] = {
if (cache) {
if (spatiallyPartition) {
RelationUtils.buildScanInMemoryPartScan(requiredColumns, filters, filt,
sqlContext.sparkContext, schema, params, partitionHints, indexPartRDD)
} else {
RelationUtils.buildScanInMemoryScan(requiredColumns, filters, filt, sqlContext.sparkContext, schema, params, indexRDD)
}
} else {
RelationUtils.buildScan(requiredColumns, filters, filt, sqlContext.sparkContext, schema, params)
}
}
override def unhandledFilters(filters: Array[Filter]): Array[Filter] = {
filters.filter {
case t @ (_:IsNotNull | _:IsNull) => true
case _ => false
}
}
}
// A special case relation that is built when a join happens across two identically partitioned relations
// Uses the sweepline algorithm to lower the complexity of the join
case class GeoMesaJoinRelation(sqlContext: SQLContext,
leftRel: GeoMesaRelation,
rightRel: GeoMesaRelation,
schema: StructType,
condition: Expression,
filt: org.opengis.filter.Filter = org.opengis.filter.Filter.INCLUDE,
props: Option[Seq[String]] = None)
extends BaseRelation with PrunedFilteredScan {
def sweeplineJoin(overlapAction: OverlapAction): RDD[(Int, (SimpleFeature, SimpleFeature))] = {
implicit val ordering = RelationUtils.CoordinateOrdering
val partitionPairs = leftRel.partitionedRDD.join(rightRel.partitionedRDD)
partitionPairs.flatMap { case (key, (left, right)) =>
val sweeplineIndex = new SweepLineIndex()
left.foreach{feature =>
val coords = feature.getDefaultGeometry.asInstanceOf[Geometry].getCoordinates
val interval = new SweepLineInterval(coords.min.x, coords.max.x, (0, feature))
sweeplineIndex.add(interval)
}
right.foreach{feature =>
val coords = feature.getDefaultGeometry.asInstanceOf[Geometry].getCoordinates
val interval = new SweepLineInterval(coords.min.x, coords.max.x, (1, feature))
sweeplineIndex.add(interval)
}
sweeplineIndex.computeOverlaps(overlapAction)
overlapAction.joinList.map{ f => (key, f)}
}
}
override def buildScan(requiredColumns: Array[String], filters: Array[org.apache.spark.sql.sources.Filter]): RDD[Row] = {
val leftSchema = leftRel.schema
val rightSchema = rightRel.schema
val leftExtractors = SparkUtils.getExtractors(leftSchema.fieldNames, leftSchema)
val rightExtractors = SparkUtils.getExtractors(rightSchema.fieldNames, rightSchema)
// Extract geometry indexes and spatial function from condition expression and relation SFTs
val (leftIndex, rightIndex, conditionFunction) = condition match {
case ScalaUDF(function: ((Geometry, Geometry) => Boolean), _, children: Seq[AttributeReference], _, _) =>
// Because the predicate may not have parameters in the right order, we must check both
val leftAttr = children(0).name
val rightAttr = children(1).name
val leftIndex = leftRel.sft.indexOf(leftAttr)
if (leftIndex == -1) {
(leftRel.sft.indexOf(rightAttr), rightRel.sft.indexOf(leftAttr), function)
} else {
(leftIndex, rightRel.sft.indexOf(rightAttr), function)
}
}
// Perform the sweepline join and build rows containing matching features
val overlapAction = new OverlapAction(leftIndex, rightIndex, conditionFunction)
val joinedRows: RDD[(Int, (SimpleFeature, SimpleFeature))] = sweeplineJoin(overlapAction)
joinedRows.mapPartitions{ iter =>
val joinedSchema = StructType(leftSchema.fields ++ rightSchema.fields)
val joinedExtractors = leftExtractors ++ rightExtractors
iter.map{ case (_, (leftFeature, rightFeature)) =>
SparkUtils.joinedSf2row(joinedSchema, leftFeature, rightFeature, joinedExtractors)
}
}
}
override def unhandledFilters(filters: Array[Filter]): Array[Filter] = {
filters.filter {
case t @ (_:IsNotNull | _:IsNull) => true
case _ => false
}
}
}
object RelationUtils extends LazyLogging {
import CaseInsensitiveMapFix._
@transient val ff = CommonFactoryFinder.getFilterFactory2
implicit val CoordinateOrdering: Ordering[Coordinate] = Ordering.by {_.x}
def indexIterator(sft: SimpleFeatureType, indexId: Boolean, indexGeom: Boolean): GeoCQEngineDataStore = {
val engineStore = new org.locationtech.geomesa.memory.cqengine.datastore.GeoCQEngineDataStore(indexGeom)
engineStore.createSchema(sft)
engineStore
}
def index(encodedSft: String, typeName: String, rdd: RDD[SimpleFeature], indexId: Boolean, indexGeom: Boolean): RDD[GeoCQEngineDataStore] = {
rdd.mapPartitions { iter =>
val sft = SimpleFeatureTypes.createType(typeName,encodedSft)
val engineStore = RelationUtils.indexIterator(sft, indexId, indexGeom)
val engine = engineStore.namesToEngine(typeName)
engine.insert(iter.toList)
Iterator(engineStore)
}
}
def indexPartitioned(encodedSft: String,
typeName: String,
rdd: RDD[(Int, Iterable[SimpleFeature])],
indexId: Boolean,
indexGeom: Boolean): RDD[(Int, GeoCQEngineDataStore)] = {
rdd.mapValues { iter =>
val sft = SimpleFeatureTypes.createType(typeName,encodedSft)
val engineStore = RelationUtils.indexIterator(sft, indexId, indexGeom)
val engine = engineStore.namesToEngine(typeName)
engine.insert(iter)
engineStore
}
}
// Maps a SimpleFeature to the id of the envelope that contains it
// Will duplicate features that belong to more than one envelope
// Returns -1 if no match was found
// TODO: Filter duplicates when querying
def gridIdMapper(sf: SimpleFeature, envelopes: List[Envelope], geometryOrdinal: Int): List[(Int, SimpleFeature)] = {
val geom = sf.getAttribute(geometryOrdinal).asInstanceOf[Geometry]
val mappings = envelopes.indices.flatMap { index =>
if (envelopes(index).intersects(geom.getEnvelopeInternal)) {
Some(index, sf)
} else {
None
}
}
if (mappings.isEmpty) {
List((-1, sf))
} else {
mappings.toList
}
}
// Maps a geometry to the id of the envelope that contains it
// Used to derive partition hints
def gridIdMapper(geom: Geometry, envelopes: List[Envelope]): List[Int] = {
val mappings = envelopes.indices.flatMap { index =>
if (envelopes(index).intersects(geom.getEnvelopeInternal)) {
Some(index)
} else {
None
}
}
if (mappings.isEmpty) {
List(-1)
} else {
mappings.toList
}
}
def spatiallyPartition(envelopes: List[Envelope],
rdd: RDD[SimpleFeature],
numPartitions: Int,
geometryOrdinal: Int): RDD[(Int, Iterable[SimpleFeature])] = {
val keyedRdd = rdd.flatMap { gridIdMapper( _, envelopes, geometryOrdinal)}
keyedRdd.groupByKey(new IndexPartitioner(numPartitions))
}
def getBound(rdd: RDD[SimpleFeature]): Envelope = {
rdd.aggregate[Envelope](new Envelope())(
(env: Envelope, sf: SimpleFeature) => {
env.expandToInclude(sf.getDefaultGeometry.asInstanceOf[Geometry].getEnvelopeInternal)
env
},
(env1: Envelope, env2: Envelope) => {
env1.expandToInclude(env2)
env1
}
)
}
def equalPartitioning(bound: Envelope, numPartitions: Int): List[Envelope] = {
// Compute bounds of each partition
val partitionsPerDim = Math.sqrt(numPartitions).toInt
val partitionWidth = bound.getWidth / partitionsPerDim
val partitionHeight = bound.getHeight / partitionsPerDim
val minX = bound.getMinX
val minY = bound.getMinY
val partitionEnvelopes: ListBuffer[Envelope] = ListBuffer()
// Build partitions
for (xIndex <- 0 until partitionsPerDim) {
val xPartitionStart = minX + (xIndex * partitionWidth)
val xPartitionEnd = xPartitionStart + partitionWidth
for (yIndex <- 0 until partitionsPerDim) {
val yPartitionStart = minY + (yIndex * partitionHeight)
val yPartitionEnd = yPartitionStart+ partitionHeight
partitionEnvelopes += new Envelope(xPartitionStart, xPartitionEnd, yPartitionStart, yPartitionEnd)
}
}
partitionEnvelopes.toList
}
def weightedPartitioning(rawRDD: RDD[SimpleFeature], bound: Envelope, numPartitions: Int, sampleSize: Int): List[Envelope] = {
val width: Int = Math.sqrt(numPartitions).toInt
val binSize = sampleSize / width
val sample = rawRDD.takeSample(withReplacement = false, sampleSize)
val xSample = sample.map{f => f.getDefaultGeometry.asInstanceOf[Geometry].getCoordinates.min.x}
val ySample = sample.map{f => f.getDefaultGeometry.asInstanceOf[Geometry].getCoordinates.min.y}
val xSorted = xSample.sorted
val ySorted = ySample.sorted
val partitionEnvelopes: ListBuffer[Envelope] = ListBuffer()
for (xBin <- 0 until width) {
val minX = xSorted(xBin * binSize)
val maxX = xSorted(((xBin + 1) * binSize) - 1)
for (yBin <- 0 until width) {
val minY = ySorted(yBin)
val maxY = ySorted(((yBin + 1) * binSize) - 1)
partitionEnvelopes += new Envelope(minX, maxX, minY, maxY)
}
}
partitionEnvelopes.toList
}
def wholeEarthPartitioning(numPartitions: Int): List[Envelope] = {
equalPartitioning(new Envelope(-180,180,-90,90), numPartitions)
}
// Constructs an RTree based on a sample of the data and returns its bounds as envelopes
// returns one less envelope than requested to account for the catch-all envelope
def rtreePartitioning(rawRDD: RDD[SimpleFeature], numPartitions: Int, sampleSize: Int, thresholdMultiplier: Double): List[Envelope] = {
val sample = rawRDD.takeSample(withReplacement = false, sampleSize)
val rtree = new STRtree()
sample.foreach{ sf =>
rtree.insert(sf.getDefaultGeometry.asInstanceOf[Geometry].getEnvelopeInternal, sf)
}
val envelopes: java.util.List[Envelope] = new java.util.ArrayList[Envelope]()
// get rtree envelopes, limited to those containing reasonable size
val reasonableSize = sampleSize / numPartitions
val threshold = (reasonableSize * thresholdMultiplier).toInt
val minSize = reasonableSize - threshold
val maxSize = reasonableSize + threshold
rtree.build()
queryBoundary(rtree.getRoot, envelopes, minSize, maxSize)
envelopes.take(numPartitions-1).toList
}
// Helper method to get the envelopes of an RTree
def queryBoundary(node: AbstractNode, boundaries: java.util.List[Envelope], minSize: Int, maxSize: Int): Int = {
// get node's immediate children
val childBoundables: java.util.List[_] = node.getChildBoundables
// True if current node is leaf
var flagLeafnode = true
var i = 0
while (i < childBoundables.size && flagLeafnode) {
val childBoundable = childBoundables.get(i).asInstanceOf[Boundable]
if (childBoundable.isInstanceOf[AbstractNode]) {
flagLeafnode = false
}
i += 1
}
if (flagLeafnode) {
childBoundables.size
} else {
var nodeCount = 0
for ( i <- 0 until childBoundables.size ) {
val childBoundable = childBoundables.get(i).asInstanceOf[Boundable]
childBoundable match {
case (child: AbstractNode) =>
val childSize = queryBoundary(child, boundaries, minSize, maxSize)
// check boundary for size and existence in chosen boundaries
if (childSize < maxSize && childSize > minSize) {
var alreadyAdded = false
if (node.getLevel != 1) {
child.getChildBoundables.asInstanceOf[java.util.List[AbstractNode]].foreach { c =>
alreadyAdded = alreadyAdded || boundaries.contains(c.getBounds.asInstanceOf[Envelope])
}
}
if (!alreadyAdded) {
boundaries.add(child.getBounds.asInstanceOf[Envelope])
}
}
nodeCount += childSize
case (_) => nodeCount += 1 // negligible difference but accurate
}
}
nodeCount
}
}
def coverPartitioning(dataRDD: RDD[SimpleFeature], coverRDD: RDD[SimpleFeature], numPartitions: Int): List[Envelope] = {
coverRDD.map {
_.getDefaultGeometry.asInstanceOf[Geometry].getEnvelopeInternal
}.collect().toList
}
def buildScan(requiredColumns: Array[String],
filters: Array[org.apache.spark.sql.sources.Filter],
filt: org.opengis.filter.Filter,
ctx: SparkContext,
schema: StructType,
params: Map[String, String]): RDD[Row] = {
logger.debug(
s"""Building scan, filt = $filt,
|filters = ${filters.mkString(",")},
|requiredColumns = ${requiredColumns.mkString(",")}""".stripMargin)
val compiledCQL = filters.flatMap(SparkUtils.sparkFilterToCQLFilter).foldLeft[org.opengis.filter.Filter](filt) { (l, r) => ff.and(l, r) }
val requiredAttributes = requiredColumns.filterNot(_ == "__fid__")
val rdd = GeoMesaSpark(params).rdd(
new Configuration(ctx.hadoopConfiguration), ctx, params,
new Query(params(GEOMESA_SQL_FEATURE), compiledCQL, requiredAttributes))
val extractors = SparkUtils.getExtractors(requiredColumns, schema)
val result = rdd.map(SparkUtils.sf2row(schema, _, extractors))
result.asInstanceOf[RDD[Row]]
}
def buildScanInMemoryScan(requiredColumns: Array[String],
filters: Array[org.apache.spark.sql.sources.Filter],
filt: org.opengis.filter.Filter,
ctx: SparkContext,
schema: StructType,
params: Map[String, String], indexRDD: RDD[GeoCQEngineDataStore]): RDD[Row] = {
logger.debug(
s"""Building in-memory scan, filt = $filt,
|filters = ${filters.mkString(",")},
|requiredColumns = ${requiredColumns.mkString(",")}""".stripMargin)
val compiledCQL = filters.flatMap(SparkUtils.sparkFilterToCQLFilter).foldLeft[org.opengis.filter.Filter](filt) { (l, r) => SparkUtils.ff.and(l, r) }
val filterString = ECQL.toCQL(compiledCQL)
val extractors = SparkUtils.getExtractors(requiredColumns, schema)
val requiredAttributes = requiredColumns.filterNot(_ == "__fid__")
val result = indexRDD.flatMap { engine =>
val cqlFilter = ECQL.toFilter(filterString)
val query = new Query(params(GEOMESA_SQL_FEATURE), cqlFilter, requiredAttributes)
SelfClosingIterator(engine.getFeatureReader(query, Transaction.AUTO_COMMIT))
}.map(SparkUtils.sf2row(schema, _, extractors))
result.asInstanceOf[RDD[Row]]
}
def buildScanInMemoryPartScan(requiredColumns: Array[String],
filters: Array[org.apache.spark.sql.sources.Filter],
filt: org.opengis.filter.Filter,
ctx: SparkContext,
schema: StructType,
params: Map[String, String],
partitionHints: Seq[Int],
indexPartRDD: RDD[(Int, GeoCQEngineDataStore)]): RDD[Row] = {
logger.debug(
s"""Building partitioned in-memory scan, filt = $filt,
|filters = ${filters.mkString(",")},
|requiredColumns = ${requiredColumns.mkString(",")}""".stripMargin)
val compiledCQL = filters.flatMap(SparkUtils.sparkFilterToCQLFilter).foldLeft[org.opengis.filter.Filter](filt) { (l, r) => SparkUtils.ff.and(l,r) }
val filterString = ECQL.toCQL(compiledCQL)
// If keys were derived from query, go straight to those partitions
val reducedRdd = if (partitionHints != null) {
indexPartRDD.filter {case (key, _) => partitionHints.contains(key) }
} else {
indexPartRDD
}
val extractors = SparkUtils.getExtractors(requiredColumns, schema)
val requiredAttributes = requiredColumns.filterNot(_ == "__fid__")
val result = reducedRdd.flatMap { case (key, engine) =>
val cqlFilter = ECQL.toFilter(filterString)
val query = new Query(params(GEOMESA_SQL_FEATURE), cqlFilter, requiredAttributes)
SelfClosingIterator(engine.getFeatureReader(query, Transaction.AUTO_COMMIT))
}.map(SparkUtils.sf2row(schema, _, extractors))
result.asInstanceOf[RDD[Row]]
}
}
object SparkUtils {
@transient val ff = CommonFactoryFinder.getFilterFactory2
// the SFT attributes do not have the __fid__ so we have to translate accordingly
def getExtractors(requiredColumns: Array[String], schema: StructType): Array[SimpleFeature => AnyRef] = {
val requiredAttributes = requiredColumns.filterNot(_ == "__fid__")
type EXTRACTOR = SimpleFeature => AnyRef
val IdExtractor: SimpleFeature => AnyRef = sf => sf.getID
requiredColumns.map {
case "__fid__" => IdExtractor
case col =>
val index = requiredAttributes.indexOf(col)
val schemaIndex = schema.fieldIndex(col)
val fieldType = schema.fields(schemaIndex).dataType
if (fieldType == TimestampType) {
sf: SimpleFeature => {
val attr = sf.getAttribute(index)
if (attr == null) { null } else {
new Timestamp(attr.asInstanceOf[Date].getTime)
}
}
} else {
sf: SimpleFeature => sf.getAttribute(index)
}
}
}
def sparkFilterToCQLFilter(filt: org.apache.spark.sql.sources.Filter): Option[org.opengis.filter.Filter] = filt match {
case GreaterThanOrEqual(attribute, v) => Some(ff.greaterOrEqual(ff.property(attribute), ff.literal(v)))
case GreaterThan(attr, v) => Some(ff.greater(ff.property(attr), ff.literal(v)))
case LessThanOrEqual(attr, v) => Some(ff.lessOrEqual(ff.property(attr), ff.literal(v)))
case LessThan(attr, v) => Some(ff.less(ff.property(attr), ff.literal(v)))
case EqualTo(attr, v) if attr == "__fid__" => Some(ff.id(ff.featureId(v.toString)))
case EqualTo(attr, v) => Some(ff.equals(ff.property(attr), ff.literal(v)))
case In(attr, values) if attr == "__fid__" => Some(ff.id(values.map(v => ff.featureId(v.toString)).toSet))
case In(attr, values) =>
Some(values.map(v => ff.equals(ff.property(attr), ff.literal(v))).reduce[org.opengis.filter.Filter]( (l,r) => ff.or(l,r)))
case And(left, right) => Some(ff.and(sparkFilterToCQLFilter(left).get, sparkFilterToCQLFilter(right).get)) // TODO: can these be null
case Or(left, right) => Some(ff.or(sparkFilterToCQLFilter(left).get, sparkFilterToCQLFilter(right).get))
case Not(f) => Some(ff.not(sparkFilterToCQLFilter(f).get))
case StringStartsWith(a, v) => Some(ff.like(ff.property(a), s"$v%"))
case StringEndsWith(a, v) => Some(ff.like(ff.property(a), s"%$v"))
case StringContains(a, v) => Some(ff.like(ff.property(a), s"%$v%"))
case IsNull(attr) => None
case IsNotNull(attr) => None
}
def sf2row(schema: StructType, sf: SimpleFeature, extractors: Array[SimpleFeature => AnyRef]): Row = {
val res = Array.ofDim[Any](extractors.length)
var i = 0
while(i < extractors.length) {
res(i) = extractors(i)(sf)
i += 1
}
new GenericRowWithSchema(res, schema)
}
def joinedSf2row(schema: StructType, sf1: SimpleFeature, sf2: SimpleFeature, extractors: Array[SimpleFeature => AnyRef]): Row = {
val leftLength = sf1.getAttributeCount + 1
val res = Array.ofDim[Any](extractors.length)
var i = 0
while(i < leftLength) {
res(i) = extractors(i)(sf1)
i += 1
}
while(i < extractors.length) {
res(i) = extractors(i)(sf2)
i += 1
}
new GenericRowWithSchema(res, schema)
}
// Since each attribute's corresponding index in the Row is fixed. Compute the mapping once
def getSftRowNameMappings(sft: SimpleFeatureType, schema: StructType): List[(String, Int)] = {
sft.getAttributeDescriptors.map{ ad =>
val name = ad.getLocalName
(name, schema.fieldIndex(ad.getLocalName))
}.toList
}
def row2Sf(nameMappings: List[(String, Int)], row: Row, builder: SimpleFeatureBuilder, id: String): SimpleFeature = {
builder.reset()
nameMappings.foreach{ case (name, index) =>
builder.set(name, row.getAs[Object](index))
}
builder.userData(Hints.USE_PROVIDED_FID, java.lang.Boolean.TRUE)
builder.buildFeature(id)
}
}
class OverlapAction(leftIndex: Int,
rightIndex: Int,
conditionFunction: (Geometry, Geometry) => Boolean) extends SweepLineOverlapAction with Serializable {
val joinList = ListBuffer[(SimpleFeature, SimpleFeature)]()
override def overlap(s0: SweepLineInterval, s1: SweepLineInterval): Unit = {
val (key0, feature0) = s0.getItem.asInstanceOf[(Int, SimpleFeature)]
val (key1, feature1) = s1.getItem.asInstanceOf[(Int, SimpleFeature)]
if (key0 == 0 && key1 == 1) {
val leftGeom = feature0.getAttribute(leftIndex).asInstanceOf[Geometry]
val rightGeom = feature1.getAttribute(rightIndex).asInstanceOf[Geometry]
if (conditionFunction(leftGeom, rightGeom)) {
joinList.append((feature0, feature1))
}
} else if (key0 == 1 && key1 == 0) {
val leftGeom = feature1.getAttribute(leftIndex).asInstanceOf[Geometry]
val rightGeom = feature0.getAttribute(rightIndex).asInstanceOf[Geometry]
if (conditionFunction(leftGeom, rightGeom)) {
joinList.append((feature1, feature0))
}
}
}
}
| ddseapy/geomesa | geomesa-spark/geomesa-spark-sql/src/main/scala/org/locationtech/geomesa/spark/GeoMesaSparkSQL.scala | Scala | apache-2.0 | 36,205 |
/*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.single_core
import akka.actor.{Actor, PoisonPill, Props}
import app.runutils.IOHandling.{MongoSource, InputSource}
import app.runutils.RunningOptions
import com.typesafe.scalalogging.LazyLogging
import logic.Examples.Example
/**
* Created by nkatz on 9/14/16.
*/
class Master[T <: InputSource](
inps: RunningOptions,
trainingDataOptions: T,
testingDataOptions: T,
trainingDataFunction: T => Iterator[Example],
testingDataFunction: T => Iterator[Example]) extends Actor with LazyLogging {
def receive = {
case "eval" =>
context.actorOf(Props(
new Dispatcher(inps, trainingDataOptions, testingDataOptions, trainingDataFunction, testingDataFunction)),
name = s"Dispatcher-Actor-eval-mode") ! "eval"
case "start" =>
context.actorOf(Props(
new Dispatcher(inps, trainingDataOptions, testingDataOptions, trainingDataFunction, testingDataFunction)),
name = s"Dispatcher-Actor-learning-mode") ! "start"
}
}
| nkatzz/OLED | src/main/scala/oled/single_core/Master.scala | Scala | gpl-3.0 | 1,724 |
package com.pawelmandera.io.tika
import java.io.{ File, FileInputStream }
import org.apache.tika.metadata.Metadata
import org.apache.tika.sax.BodyContentHandler
import org.apache.tika.parser.AutoDetectParser
/*
* Simple wrapper for Apache Tika.
*/
object TikaParser {
def text(file: File): String = {
val is = new FileInputStream(file)
val handler = new BodyContentHandler()
val metadata = new Metadata()
val parser = new AutoDetectParser()
parser.parse(is, handler, metadata)
handler.toString
}
}
| pmandera/duometer | src/main/scala/com/pawelmandera/io/tika/TikaParser.scala | Scala | apache-2.0 | 532 |
package au.id.cxd.text.preprocess
/**
* Created by cd on 6/1/17.
*/
class LinePatternFilter(val pattern: String = """[,\.\!\?\s]""") extends StringSeqFilter {
/**
* tokenise the line
*
* @param line
* @return
*/
def tokenise(line: String) = line.split(pattern).map {
item => item.replaceAll("""\W""", "").toLowerCase
}.filter(!_.isEmpty)
.filter(!_.contains("_"))
/**
* tokenise a single query instance
*
* Note this does not change
*
* @param query
* @return
*/
def tokeniseQuery(query: Array[String]): Array[String] = query.map {
item => item.replaceAll("""\W""", "").toLowerCase
}.filter(!_.isEmpty)
.filter(!_.contains("_"))
}
object LinePatternFilter {
def apply(pattern: String = """[,\.\!\?\s]""") = new LinePatternFilter(pattern)
} | cxd/scala-au.id.cxd.math | math/src/main/scala/au/id/cxd/text/preprocess/LinePatternFilter.scala | Scala | mit | 826 |
package pl.suder.scala.publisher
import akka.actor._
import akka.event.LoggingReceive
import pl.suder.scala.auctionHouse.Message._
class AuctionPublisher extends Actor {
override def receive = LoggingReceive {
case Notify(title, winner, price) => println("Current winner of action: " + title + " is " + winner + " with price " + price); sender ! Ack
}
}
| Materix/Sem7-Scala | src/main/scala/pl/suder/scala/publisher/AuctionPublisher.scala | Scala | mit | 364 |
/*
* Copyright (c) <2015-2016>, see CONTRIBUTORS
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the <organization> nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package ch.usi.inf.l3.sana.tiny.names
trait StdNames {
val noname = Name("")
}
object StdNames extends StdNames
| amanjpro/languages-a-la-carte | tiny/src/main/scala/names/StdNames.scala | Scala | bsd-3-clause | 1,690 |
/*
* The MIT License (MIT)
*
* Copyright (c) 2016 Algolia
* http://www.algolia.com/
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package algolia.dsl
import algolia.definitions.{
ListClustersDefinition,
ListIndexesDefinition,
ListKeysDefinition,
ListUserIDsDefinition
}
import algolia.responses.{ClusterList, Indices, UserIDList}
import algolia.{AlgoliaClient, Executable}
import org.json4s.Formats
import scala.concurrent.{ExecutionContext, Future}
trait ListDsl {
implicit val formats: Formats
case object list {
def indices = ListIndexesDefinition()
def indexes = ListIndexesDefinition()
def keys = ListKeysDefinition()
def clusters = ListClustersDefinition()
def userIDs = ListUserIDsDefinition()
@deprecated("use without index", "1.27.0")
def keysFrom(indexName: String) =
ListKeysDefinition(indexName = Some(indexName))
}
implicit object ListIndexesDefinitionExecutable
extends Executable[ListIndexesDefinition, Indices] {
override def apply(client: AlgoliaClient, query: ListIndexesDefinition)(
implicit executor: ExecutionContext
): Future[Indices] = {
client.request[Indices](query.build())
}
}
implicit object ListClustersExecutable
extends Executable[ListClustersDefinition, ClusterList] {
override def apply(client: AlgoliaClient, query: ListClustersDefinition)(
implicit executor: ExecutionContext
): Future[ClusterList] = {
client.request[ClusterList](query.build())
}
}
implicit object ListUserIDsExecutable
extends Executable[ListUserIDsDefinition, UserIDList] {
override def apply(client: AlgoliaClient, query: ListUserIDsDefinition)(
implicit executor: ExecutionContext
): Future[UserIDList] = {
client.request[UserIDList](query.build())
}
}
}
| algolia/algoliasearch-client-scala | src/main/scala/algolia/dsl/ListDsl.scala | Scala | mit | 2,871 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.utils
import java.util.Properties
import joptsimple.{OptionParser, OptionSpec}
import org.junit.Assert._
import org.junit.Test
class CommandLineUtilsTest {
@Test(expected = classOf[java.lang.IllegalArgumentException])
def testParseEmptyArg(): Unit = {
val argArray = Array("my.empty.property=")
CommandLineUtils.parseKeyValueArgs(argArray, acceptMissingValue = false)
}
@Test(expected = classOf[java.lang.IllegalArgumentException])
def testParseEmptyArgWithNoDelimiter(): Unit = {
val argArray = Array("my.empty.property")
CommandLineUtils.parseKeyValueArgs(argArray, acceptMissingValue = false)
}
@Test
def testParseEmptyArgAsValid(): Unit = {
val argArray = Array("my.empty.property=", "my.empty.property1")
val props = CommandLineUtils.parseKeyValueArgs(argArray)
assertEquals("Value of a key with missing value should be an empty string", props.getProperty("my.empty.property"), "")
assertEquals("Value of a key with missing value with no delimiter should be an empty string", props.getProperty("my.empty.property1"), "")
}
@Test
def testParseSingleArg(): Unit = {
val argArray = Array("my.property=value")
val props = CommandLineUtils.parseKeyValueArgs(argArray)
assertEquals("Value of a single property should be 'value' ", props.getProperty("my.property"), "value")
}
@Test
def testParseArgs(): Unit = {
val argArray = Array("first.property=first","second.property=second")
val props = CommandLineUtils.parseKeyValueArgs(argArray)
assertEquals("Value of first property should be 'first'", props.getProperty("first.property"), "first")
assertEquals("Value of second property should be 'second'", props.getProperty("second.property"), "second")
}
@Test
def testParseArgsWithMultipleDelimiters(): Unit = {
val argArray = Array("first.property==first", "second.property=second=", "third.property=thi=rd")
val props = CommandLineUtils.parseKeyValueArgs(argArray)
assertEquals("Value of first property should be '=first'", props.getProperty("first.property"), "=first")
assertEquals("Value of second property should be 'second='", props.getProperty("second.property"), "second=")
assertEquals("Value of second property should be 'thi=rd'", props.getProperty("third.property"), "thi=rd")
}
val props = new Properties()
val parser = new OptionParser(false)
var stringOpt : OptionSpec[String] = _
var intOpt : OptionSpec[java.lang.Integer] = _
var stringOptOptionalArg : OptionSpec[String] = _
var intOptOptionalArg : OptionSpec[java.lang.Integer] = _
var stringOptOptionalArgNoDefault : OptionSpec[String] = _
var intOptOptionalArgNoDefault : OptionSpec[java.lang.Integer] = _
def setUpOptions(): Unit = {
stringOpt = parser.accepts("str")
.withRequiredArg
.ofType(classOf[String])
.defaultsTo("default-string")
intOpt = parser.accepts("int")
.withRequiredArg()
.ofType(classOf[java.lang.Integer])
.defaultsTo(100)
stringOptOptionalArg = parser.accepts("str-opt")
.withOptionalArg
.ofType(classOf[String])
.defaultsTo("default-string-2")
intOptOptionalArg = parser.accepts("int-opt")
.withOptionalArg
.ofType(classOf[java.lang.Integer])
.defaultsTo(200)
stringOptOptionalArgNoDefault = parser.accepts("str-opt-nodef")
.withOptionalArg
.ofType(classOf[String])
intOptOptionalArgNoDefault = parser.accepts("int-opt-nodef")
.withOptionalArg
.ofType(classOf[java.lang.Integer])
}
@Test
def testMaybeMergeOptionsOverwriteExisting(): Unit = {
setUpOptions()
props.put("skey", "existing-string")
props.put("ikey", "300")
props.put("sokey", "existing-string-2")
props.put("iokey", "400")
props.put("sondkey", "existing-string-3")
props.put("iondkey", "500")
val options = parser.parse(
"--str", "some-string",
"--int", "600",
"--str-opt", "some-string-2",
"--int-opt", "700",
"--str-opt-nodef", "some-string-3",
"--int-opt-nodef", "800"
)
CommandLineUtils.maybeMergeOptions(props, "skey", options, stringOpt)
CommandLineUtils.maybeMergeOptions(props, "ikey", options, intOpt)
CommandLineUtils.maybeMergeOptions(props, "sokey", options, stringOptOptionalArg)
CommandLineUtils.maybeMergeOptions(props, "iokey", options, intOptOptionalArg)
CommandLineUtils.maybeMergeOptions(props, "sondkey", options, stringOptOptionalArgNoDefault)
CommandLineUtils.maybeMergeOptions(props, "iondkey", options, intOptOptionalArgNoDefault)
assertEquals("some-string", props.get("skey"))
assertEquals("600", props.get("ikey"))
assertEquals("some-string-2", props.get("sokey"))
assertEquals("700", props.get("iokey"))
assertEquals("some-string-3", props.get("sondkey"))
assertEquals("800", props.get("iondkey"))
}
@Test
def testMaybeMergeOptionsDefaultOverwriteExisting(): Unit = {
setUpOptions()
props.put("sokey", "existing-string")
props.put("iokey", "300")
props.put("sondkey", "existing-string-2")
props.put("iondkey", "400")
val options = parser.parse(
"--str-opt",
"--int-opt",
"--str-opt-nodef",
"--int-opt-nodef"
)
CommandLineUtils.maybeMergeOptions(props, "sokey", options, stringOptOptionalArg)
CommandLineUtils.maybeMergeOptions(props, "iokey", options, intOptOptionalArg)
CommandLineUtils.maybeMergeOptions(props, "sondkey", options, stringOptOptionalArgNoDefault)
CommandLineUtils.maybeMergeOptions(props, "iondkey", options, intOptOptionalArgNoDefault)
assertEquals("default-string-2", props.get("sokey"))
assertEquals("200", props.get("iokey"))
assertNull(props.get("sondkey"))
assertNull(props.get("iondkey"))
}
@Test
def testMaybeMergeOptionsDefaultValueIfNotExist(): Unit = {
setUpOptions()
val options = parser.parse()
CommandLineUtils.maybeMergeOptions(props, "skey", options, stringOpt)
CommandLineUtils.maybeMergeOptions(props, "ikey", options, intOpt)
CommandLineUtils.maybeMergeOptions(props, "sokey", options, stringOptOptionalArg)
CommandLineUtils.maybeMergeOptions(props, "iokey", options, intOptOptionalArg)
CommandLineUtils.maybeMergeOptions(props, "sondkey", options, stringOptOptionalArgNoDefault)
CommandLineUtils.maybeMergeOptions(props, "iondkey", options, intOptOptionalArgNoDefault)
assertEquals("default-string", props.get("skey"))
assertEquals("100", props.get("ikey"))
assertEquals("default-string-2", props.get("sokey"))
assertEquals("200", props.get("iokey"))
assertNull(props.get("sondkey"))
assertNull(props.get("iondkey"))
}
@Test
def testMaybeMergeOptionsNotOverwriteExisting(): Unit = {
setUpOptions()
props.put("skey", "existing-string")
props.put("ikey", "300")
props.put("sokey", "existing-string-2")
props.put("iokey", "400")
props.put("sondkey", "existing-string-3")
props.put("iondkey", "500")
val options = parser.parse()
CommandLineUtils.maybeMergeOptions(props, "skey", options, stringOpt)
CommandLineUtils.maybeMergeOptions(props, "ikey", options, intOpt)
CommandLineUtils.maybeMergeOptions(props, "sokey", options, stringOptOptionalArg)
CommandLineUtils.maybeMergeOptions(props, "iokey", options, intOptOptionalArg)
CommandLineUtils.maybeMergeOptions(props, "sondkey", options, stringOptOptionalArgNoDefault)
CommandLineUtils.maybeMergeOptions(props, "iondkey", options, intOptOptionalArgNoDefault)
assertEquals("existing-string", props.get("skey"))
assertEquals("300", props.get("ikey"))
assertEquals("existing-string-2", props.get("sokey"))
assertEquals("400", props.get("iokey"))
assertEquals("existing-string-3", props.get("sondkey"))
assertEquals("500", props.get("iondkey"))
}
}
| sslavic/kafka | core/src/test/scala/unit/kafka/utils/CommandLineUtilsTest.scala | Scala | apache-2.0 | 8,686 |
//TODO This file has been copied from the experimental section of Jackson Module Scala.
//Remove it once this feature becomes available im main branch.
package com.fasterxml.jackson.module.scala.experimental
import java.io._
import java.lang.reflect.{Type, ParameterizedType}
import java.net.URL
import com.fasterxml.jackson.core._
import com.fasterxml.jackson.core.`type`.TypeReference
import com.fasterxml.jackson.databind._
import com.fasterxml.jackson.databind.jsonFormatVisitors.JsonFormatVisitorWrapper
import com.fasterxml.jackson.databind.jsonschema.JsonSchema
trait ScalaObjectMapper {
self: ObjectMapper =>
/*
**********************************************************
* Configuration: mix-in annotations
**********************************************************
*/
/**
* Method to use for adding mix-in annotations to use for augmenting
* specified class or interface. All annotations from
* <code>mixinSource</code> are taken to override annotations
* that <code>target</code> (or its supertypes) has.
*
* @tparam Target Class (or interface) whose annotations to effectively override
* @tparam MixinSource Class (or interface) whose annotations are to
* be "added" to target's annotations, overriding as necessary
*/
final def addMixInAnnotations[Target: Manifest, MixinSource: Manifest]() {
addMixInAnnotations(manifest[Target].erasure, manifest[MixinSource].erasure)
}
final def findMixInClassFor[T: Manifest]: Class[_] = {
findMixInClassFor(manifest[T].erasure)
}
/*
**********************************************************
* Configuration, basic type handling
**********************************************************
*/
/**
* Convenience method for constructing [[com.fasterxml.jackson.databind.JavaType]] out of given
* type (typically <code>java.lang.Class</code>), but without explicit
* context.
*/
def constructType[T: Manifest]: JavaType = {
constructType(manifest[T].erasure)
}
/*
**********************************************************
* Public API (from ObjectCodec): deserialization
* (mapping from JSON to Java types);
* main methods
**********************************************************
*/
/**
* Method to deserialize JSON content into a Java type, reference
* to which is passed as argument. Type is passed using so-called
* "super type token" (see )
* and specifically needs to be used if the root type is a
* parameterized (generic) container type.
*/
def readValue[T: Manifest](jp: JsonParser): T = {
readValue(jp, typeReference[T])
}
/**
* Method for reading sequence of Objects from parser stream.
* Sequence can be either root-level "unwrapped" sequence (without surrounding
* JSON array), or a sequence contained in a JSON Array.
* In either case [[com.fasterxml.jackson.core.JsonParser]] must point to the first token of
* the first element, OR not point to any token (in which case it is advanced
* to the next token). This means, specifically, that for wrapped sequences,
* parser MUST NOT point to the surrounding <code>START_ARRAY</code> but rather
* to the token following it.
* <p>
* Note that [[com.fasterxml.jackson.databind.ObjectReader]] has more complete set of variants.
*/
def readValues[T: Manifest](jp: JsonParser): MappingIterator[T] = {
readValues(jp, typeReference[T])
}
/*
**********************************************************
* Public API (from ObjectCodec): Tree Model support
**********************************************************
*/
/**
* Convenience conversion method that will bind data given JSON tree
* contains into specific value (usually bean) type.
* <p>
* Equivalent to:
* <pre>
* objectMapper.convertValue(n, valueClass);
* </pre>
*/
def treeToValue[T: Manifest](n: TreeNode): T = {
treeToValue(n, manifest[T].erasure).asInstanceOf[T]
}
/*
**********************************************************
* Extended Public API, accessors
**********************************************************
*/
/**
* Method that can be called to check whether mapper thinks
* it could serialize an instance of given Class.
* Check is done
* by checking whether a serializer can be found for the type.
*
* @return True if mapper can find a serializer for instances of
* given class (potentially serializable), false otherwise (not
* serializable)
*/
def canSerialize[T: Manifest]: Boolean = {
canSerialize(manifest[T].erasure)
}
/**
* Method that can be called to check whether mapper thinks
* it could deserialize an Object of given type.
* Check is done
* by checking whether a deserializer can be found for the type.
*
* @return True if mapper can find a serializer for instances of
* given class (potentially serializable), false otherwise (not
* serializable)
*/
def canDeserialize[T: Manifest]: Boolean = {
canDeserialize(constructType[T])
}
/*
**********************************************************
* Extended Public API, deserialization,
* convenience methods
**********************************************************
*/
def readValue[T: Manifest](src: File): T = {
readValue(src, typeReference[T])
}
def readValue[T: Manifest](src: URL): T = {
readValue(src, typeReference[T])
}
def readValue[T: Manifest](content: String): T = {
readValue(content, typeReference[T])
}
def readValue[T: Manifest](src: Reader): T = {
readValue(src, typeReference[T])
}
def readValue[T: Manifest](src: InputStream): T = {
readValue(src, typeReference[T])
}
def readValue[T: Manifest](src: Array[Byte]): T = {
readValue(src, typeReference[T])
}
def readValue[T: Manifest](src: Array[Byte], offset: Int, len: Int): T = {
readValue(src, offset, len, typeReference[T])
}
/*
**********************************************************
* Extended Public API: constructing ObjectWriters
* for more advanced configuration
**********************************************************
*/
/**
* Factory method for constructing [[com.fasterxml.jackson.databind.ObjectWriter]] that will
* serialize objects using specified JSON View (filter).
*/
def writerWithView[T: Manifest]: ObjectWriter = {
writerWithView(manifest[T].erasure)
}
/**
* Factory method for constructing [[com.fasterxml.jackson.databind.ObjectWriter]] that will
* serialize objects using specified root type, instead of actual
* runtime type of value. Type must be a super-type of runtime
* type.
*/
def writerWithType[T: Manifest]: ObjectWriter = {
writerWithType(typeReference[T])
}
/*
**********************************************************
* Extended Public API: constructing ObjectReaders
* for more advanced configuration
**********************************************************
*/
/**
* Factory method for constructing [[com.fasterxml.jackson.databind.ObjectReader]] that will
* read or update instances of specified type
*/
def reader[T: Manifest]: ObjectReader = {
reader(typeReference[T])
}
/**
* Factory method for constructing [[com.fasterxml.jackson.databind.ObjectReader]] that will
* deserialize objects using specified JSON View (filter).
*/
def readerWithView[T: Manifest]: ObjectReader = {
readerWithView(manifest[T].erasure)
}
/*
**********************************************************
* Extended Public API: convenience type conversion
**********************************************************
*/
/**
* Convenience method for doing two-step conversion from given value, into
* instance of given value type. This is functionality equivalent to first
* serializing given value into JSON, then binding JSON data into value
* of given type, but may be executed without fully serializing into
* JSON. Same converters (serializers, deserializers) will be used as for
* data binding, meaning same object mapper configuration works.
*
* @throws IllegalArgumentException If conversion fails due to incompatible type;
* if so, root cause will contain underlying checked exception data binding
* functionality threw
*/
def convertValue[T: Manifest](fromValue: Any): T = {
convertValue(fromValue, typeReference[T])
}
/*
**********************************************************
* Extended Public API: JSON Schema generation
**********************************************************
*/
/**
* Generate <a href="http://json-schema.org/">Json-schema</a>
* instance for specified class.
*
* @tparam T The class to generate schema for
* @return Constructed JSON schema.
*/
def generateJsonSchema[T: Manifest]: JsonSchema = {
generateJsonSchema(manifest[T].erasure)
}
/**
* Method for visiting type hierarchy for given type, using specified visitor.
* <p>
* This method can be used for things like
* generating <a href="http://json-schema.org/">Json Schema</a>
* instance for specified type.
*
* @tparam T Type to generate schema for (possibly with generic signature)
*
* @since 2.1
*/
def acceptJsonFormatVisitor[T: Manifest](visitor: JsonFormatVisitorWrapper) {
acceptJsonFormatVisitor(manifest[T].erasure, visitor)
}
private[this] def typeReference[T: Manifest] = new TypeReference[T] {
override def getType = typeFromManifest(manifest[T])
}
private[this] def typeFromManifest(m: Manifest[_]): Type = {
if (m.typeArguments.isEmpty) {m.erasure}
else {
new ParameterizedType {
def getRawType = m.erasure
def getActualTypeArguments = m.typeArguments.map(typeFromManifest).toArray
def getOwnerType = null
}
}
}
} | OpenCoin/opencoin-issuer-scala | src/main/scala/com/fasterxml/jackson/module/scala/experimental/ScalaObjectMapper.scala | Scala | gpl-3.0 | 10,012 |
package com.jejking.rprng.gatling
import io.gatling.core.Predef._
import io.gatling.http.Predef._
import scala.concurrent.duration._
import scala.util.Random
/**
* Load and stress test for a locally running RPRNG web service. The intention
* is to hammer it for a bit to see that it holds up and doesn't leak resources horribly
* and behaves quite consistently.
*/
class RprngLoadSimulation extends Simulation {
val httpConf = http.baseURL("http://localhost:8080")
val byteBlockSizeFeeder = Iterator.continually(Map("blockSize" -> (Random.nextInt(768) + 256)))
val bytesScn = scenario("bytes")
.feed(byteBlockSizeFeeder)
.exec(http("byte block")
.get("/byte/block/${blockSize}")
.check(status.is(200)))
// size, count, min, max
val collectionFeeder = Iterator.continually(Map("val" -> Map("size" -> (Random.nextInt(10) + 1),
"count" -> (Random.nextInt(10) + 1),
"min" -> Random.nextInt(10),
"max" -> (Random.nextInt(100) + 100))))
val intListScn = scenario("int list")
.feed(collectionFeeder)
.exec(http("int list")
.get("/int/list")
.queryParamMap("${val}")
.check(status.is(200)))
val intSetScn = scenario("int set")
.feed(collectionFeeder)
.exec(http("int set")
.get("/int/set")
.queryParamMap("${val}")
.check(status.is(200)))
val scnList = List(bytesScn, intListScn, intSetScn)
setUp(scnList.map(scn =>
scn.inject(constantUsersPerSec(100) during (30 minutes))
.protocols(httpConf)))
.throttle(reachRps(300) in (10 minutes), holdFor(20 minutes))
.maxDuration(30 minutes)
}
| jejking/rprng | gatling/src/test/scala/com/jejking/rprng/gatling/RprngLoadSimulation.scala | Scala | apache-2.0 | 2,027 |
package models.daos
import com.mohiva.play.silhouette.api.LoginInfo
import com.mohiva.play.silhouette.impl.daos.DelegableAuthInfoDAO
import com.mohiva.play.silhouette.impl.providers.OAuth1Info
import models.daos.OAuth1InfoDAO._
import play.api.libs.concurrent.Execution.Implicits._
import scala.collection.mutable
import scala.concurrent.Future
/**
* The DAO to store the OAuth1 information.
*
* Note: Not thread safe, demo only.
*/
class OAuth1InfoDAO extends DelegableAuthInfoDAO[OAuth1Info] {
/**
* Finds the auth info which is linked with the specified login info.
*
* @param loginInfo The linked login info.
* @return The retrieved auth info or None if no auth info could be retrieved for the given login info.
*/
def find(loginInfo: LoginInfo): Future[Option[OAuth1Info]] = {
Future.successful(data.get(loginInfo))
}
/**
* Adds new auth info for the given login info.
*
* @param loginInfo The login info for which the auth info should be added.
* @param authInfo The auth info to add.
* @return The added auth info.
*/
def add(loginInfo: LoginInfo, authInfo: OAuth1Info): Future[OAuth1Info] = {
data += (loginInfo -> authInfo)
Future.successful(authInfo)
}
/**
* Updates the auth info for the given login info.
*
* @param loginInfo The login info for which the auth info should be updated.
* @param authInfo The auth info to update.
* @return The updated auth info.
*/
def update(loginInfo: LoginInfo, authInfo: OAuth1Info): Future[OAuth1Info] = {
data += (loginInfo -> authInfo)
Future.successful(authInfo)
}
/**
* Saves the auth info for the given login info.
*
* This method either adds the auth info if it doesn't exists or it updates the auth info
* if it already exists.
*
* @param loginInfo The login info for which the auth info should be saved.
* @param authInfo The auth info to save.
* @return The saved auth info.
*/
def save(loginInfo: LoginInfo, authInfo: OAuth1Info): Future[OAuth1Info] = {
find(loginInfo).flatMap {
case Some(_) => update(loginInfo, authInfo)
case None => add(loginInfo, authInfo)
case unknown => Future.failed(new RuntimeException(s"find(loginInfo) returned an unexpected type $unknown"))
}
}
/**
* Removes the auth info for the given login info.
*
* @param loginInfo The login info for which the auth info should be removed.
* @return A future to wait for the process to be completed.
*/
def remove(loginInfo: LoginInfo): Future[Unit] = {
data -= loginInfo
Future.successful(())
}
}
/**
* The companion object.
*/
object OAuth1InfoDAO {
/**
* The data store for the OAuth1 info.
*/
var data: mutable.HashMap[LoginInfo, OAuth1Info] = mutable.HashMap()
}
| glidester/play-silhouette-seed | app/models/daos/OAuth1InfoDAO.scala | Scala | apache-2.0 | 2,797 |
package org.langmeta.internal.io
import java.io.ByteArrayInputStream
import java.io.InputStream
import java.net.URI
import java.nio.charset.Charset
import java.nio.file.Paths
import org.langmeta.io._
object PlatformFileIO {
def newInputStream(uri: URI): InputStream =
new ByteArrayInputStream(readAllBytes(uri))
def readAllBytes(uri: URI): Array[Byte] =
if (uri.getScheme == "file") {
val filepath = Paths.get(uri)
readAllBytes(AbsolutePath(filepath.toString))
}
else throw new UnsupportedOperationException(s"Can't read $uri as InputStream")
def readAllBytes(path: AbsolutePath): Array[Byte] = JSIO.inNode {
val jsArray = JSIO.fs.readFileSync(path.toString)
val len = jsArray.length
val result = new Array[Byte](len)
var curr = 0
while (curr < len) {
result(curr) = jsArray(curr).toByte
curr += 1
}
result
}
def slurp(path: AbsolutePath, charset: Charset): String =
JSIO.inNode(JSIO.fs.readFileSync(path.toString, charset.toString))
def listFiles(path: AbsolutePath): ListFiles = JSIO.inNode {
if (path.isFile) new ListFiles(path, Nil)
else {
val jsArray = JSIO.fs.readdirSync(path.toString)
val builder = List.newBuilder[RelativePath]
builder.sizeHint(jsArray.length)
var curr = 0
while (curr < jsArray.length) {
builder += RelativePath(jsArray(curr))
curr += 1
}
new ListFiles(path, builder.result())
}
}
def isFile(path: AbsolutePath): Boolean =
JSIO.isFile(path.toString)
def isDirectory(path: AbsolutePath): Boolean =
JSIO.isDirectory(path.toString)
def listAllFilesRecursively(root: AbsolutePath): ListFiles = {
val builder = List.newBuilder[RelativePath]
def loop(path: AbsolutePath): Unit = {
if (path.isDirectory) listFiles(path).foreach(loop)
else builder += path.toRelative(root)
}
loop(root)
new ListFiles(root, builder.result())
}
}
| DavidDudson/scalameta | langmeta/langmeta/js/src/main/scala/org/langmeta/internal/io/PlatformFileIO.scala | Scala | bsd-3-clause | 1,960 |
/*
* Copyright (C) 2017 Michael Dippery <michael@monkey-robot.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mipadi.jupiter.math
import org.scalatest._
import com.mipadi.jupiter.math.Numeric._
class NumericSpec extends FlatSpec with Matchers {
"A long" should "return true if another long can divide evenly into it" in {
(2L divides 6L) should be (true)
}
it should "return false if another long cannot divide evenly into it" in {
(10L divides 18L) should be (false)
}
it should "return a sequence of its individual digits" in {
123L.digits should be (List(1, 2, 3))
}
it should "return its divisors" in {
24L.divisors should be (List(1, 2, 3, 4, 6, 8, 12))
}
it should "return true if it is prime" in {
2L.isPrime should be (true)
23L.isPrime should be (true)
}
it should "return false if it is not prime" in {
1L.isPrime should be (false)
1000L.isPrime should be (false)
}
it should "return its factorial" in {
5L.factorial should be (120)
}
"An int" should "return true if another int can divide evenly into it" in {
(2 divides 6) should be (true)
}
it should "return false if another int cannot divide evenly into it" in {
(10 divides 18) should be (false)
}
it should "return a sequence of its individual digits" in {
123.digits should be (List(1, 2, 3))
}
it should "return its divisors" in {
24.divisors should be (List(1, 2, 3, 4, 6, 8, 12))
}
it should "return false if it is not prime" in {
1.isPrime should be (false)
1000.isPrime should be (false)
}
it should "return true if it is prime" in {
2.isPrime should be (true)
23.isPrime should be (true)
}
it should "return a reversed range" in {
(10 downto 1).map(_.toInt) should be (List(10, 9, 8, 7, 6, 5, 4, 3, 2, 1))
}
it should "return its factorial" in {
5.factorial should be (120)
}
}
| mdippery/jupiter | src/test/scala/com/mipadi/jupiter/math/NumericSpec.scala | Scala | apache-2.0 | 2,440 |
package jigg.util
/*
Copyright 2013-2018 Hiroshi Noji
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import java.util.Properties
import org.scalatest._
import scala.xml._
import _root_.jigg.pipeline.Annotation
import _root_.jigg.pipeline.BaseAnnotatorSpec
class TreesUtilSpec extends BaseAnnotatorSpec {
"streeToNode" should "convert a s-tree string to a Node object" in {
val ex1 = "(NP (NN a))"
val sent1 = <sentence><tokens>
<token form="a" pos="NN" id="t1"/>
</tokens></sentence>
Annotation.ParseSpan.idGen.reset()
val expected1 = <parse root={"sp0"} annotators={"x"}>
<span id={"sp0"} symbol={"NP"} children={"t1"}/>
</parse>
TreesUtil.streeToNode(ex1, sent1, "x") should equal(expected1) (decided by sameElem)
Annotation.ParseSpan.idGen.reset()
val ex2 = "(S (NP (DT This)) (VP (VBN is) (NP (DT a) (NN cat))))"
val sent2 = <sentence><tokens>
<token form="This" pos="NN" id="t1"/>
<token form="is" pos="VBN" id="t2"/>
<token form="a" pos="DT" id="t3"/>
<token form="cat" pos="NN" id="t4"/>
</tokens></sentence>
val expected2 = <parse root={"sp3"} annotators={"x"}>
<span id="sp0" symbol="NP" children="t1"/>
<span id="sp1" symbol="NP" children="t3 t4"/>
<span id="sp2" symbol="VP" children="t2 sp1"/>
<span id="sp3" symbol="S" children="sp0 sp2"/>
</parse>
TreesUtil.streeToNode(ex2, sent2, "x") should equal(expected2) (decided by sameElem)
}
it should "skip successive parenteses" in {
val ex1 = "(NP (((NN a))))"
val sent1 = <sentence><tokens>
<token form="a" pos="NN" id="t1"/>
</tokens></sentence>
Annotation.ParseSpan.idGen.reset()
val expected1 = <parse root={"sp0"} annotators={"x"}>
<span id={"sp0"} symbol={"NP"} children={"t1"}/>
</parse>
TreesUtil.streeToNode(ex1, sent1, "x") should equal(expected1) (decided by sameElem)
}
it should "ignore newlines in a tree" in {
Annotation.ParseSpan.idGen.reset()
val ex2 = """(S
(NP (DT This))
(VP (VBN is) (NP (DT a) (NN cat))))"""
val sent2 = <sentence><tokens>
<token form="This" pos="NN" id="t1"/>
<token form="is" pos="VBN" id="t2"/>
<token form="a" pos="DT" id="t3"/>
<token form="cat" pos="NN" id="t4"/>
</tokens></sentence>
val expected2 = <parse root={"sp3"} annotators={"x"}>
<span id="sp0" symbol="NP" children="t1"/>
<span id="sp1" symbol="NP" children="t3 t4"/>
<span id="sp2" symbol="VP" children="t2 sp1"/>
<span id="sp3" symbol="S" children="sp0 sp2"/>
</parse>
TreesUtil.streeToNode(ex2, sent2, "x") should equal(expected2) (decided by sameElem)
}
it should "recognize more than two children" in {
Annotation.ParseSpan.idGen.reset()
val ex = """(S (NP (PRP He)) (VP (VBD ate) (NN pizza)) (. .))"""
val sent = <sentence id="s1" characterOffsetBegin="0" characterOffsetEnd="14">
<tokens annotators="corenlp">
<token characterOffsetEnd="2" characterOffsetBegin="0" id="t4" form="He" pos="PRP"/>
<token characterOffsetEnd="6" characterOffsetBegin="3" id="t5" form="ate" pos="VBD"/>
<token characterOffsetEnd="12" characterOffsetBegin="7" id="t6" form="pizza" pos="NN"/>
<token characterOffsetEnd="14" characterOffsetBegin="13" id="t7" form="." pos="."/>
</tokens>
</sentence>
val expected = <parse annotators="x" root="sp2">
<span id="sp0" symbol="NP" children="t4"/>
<span id="sp1" symbol="VP" children="t5 t6"/>
<span id="sp2" symbol="S" children="sp0 sp1 t7"/>
</parse>
TreesUtil.streeToNode(ex, sent, "x") should equal(expected) (decided by sameElem)
}
}
| mynlp/jigg | src/test/scala/jigg/util/TreesUtilSpec.scala | Scala | apache-2.0 | 4,131 |
package scalatags.rx
import java.util.concurrent.atomic.AtomicReference
import org.scalajs.dom
import org.scalajs.dom.Element
import org.scalajs.dom.ext._
import org.scalajs.dom.raw.Comment
import rx._
import scala.collection.immutable
import scala.language.implicitConversions
import scalatags.JsDom.all._
import scalatags.jsdom
import scalatags.rx.ext._
trait RxNodeInstances {
implicit class rxStringFrag(v: Rx[String])(implicit val ctx: Ctx.Owner) extends jsdom.Frag {
def render: dom.Text = {
val node = dom.document.createTextNode(v.now)
v foreach { s => node.replaceData(0, node.length, s) } attachTo node
node
}
}
implicit class bindRxElement[T <: dom.Element](e: Rx[T])(implicit val ctx: Ctx.Owner) extends Modifier {
def applyTo(t: Element) = {
val element = new AtomicReference(e.now)
t.appendChild(element.get())
e.triggerLater {
val current = e.now
val previous = element getAndSet current
t.replaceChild(current, previous)
} attachTo t
}
}
implicit class bindRxElements(e: Rx[immutable.Iterable[Element]])(implicit val ctx: Ctx.Owner) extends Modifier {
def applyTo(t: Element) = {
val nonEmpty = e.map { t => if (t.isEmpty) List(new Comment) else t }
val fragments = new AtomicReference(nonEmpty.now)
nonEmpty.now foreach t.appendChild
nonEmpty triggerLater {
val current = e.now
val previous = fragments getAndSet current
val i = t.childNodes.indexOf(previous.head)
if (i < 0) throw new IllegalStateException("Children changed")
0 to (previous.size - 1) foreach (_ => t.removeChild(t.childNodes.item(i)))
if (t.childNodes.length > i) {
val next = t.childNodes.item(i)
current foreach (t.insertBefore(_, next))
} else {
current foreach t.appendChild
}
}
}
}
}
| blstream/akka-viz | frontend/src/main/scala/scalatags/rx/nodes.scala | Scala | mit | 1,909 |
package domino
/**
* Contains functionality related to watching OSGi bundles coming and going.
*/
package object bundle_watching {
}
| helgoboss/domino | src/main/scala/domino/bundle_watching/package.scala | Scala | mit | 137 |
/* sbt -- Simple Build Tool
* Copyright 2011 Mark Harrah
*/
package sbt
import java.io.File
import Attributed.blankSeq
import Configurations.Compile
import Keys._
object IvyConsole
{
final val Name = "ivy-console"
lazy val command =
Command.command(Name) { state =>
val Dependencies(managed, repos, unmanaged) = parseDependencies(state.remainingCommands, CommandSupport.logger(state))
val base = new File(CommandSupport.bootDirectory(state), Name)
IO.createDirectory(base)
val (eval, structure) = Load.defaultLoad(state, base, CommandSupport.logger(state))
val session = Load.initialSession(structure, eval)
val extracted = Project.extract(session, structure)
import extracted._
val depSettings: Seq[Project.Setting[_]] = Seq(
libraryDependencies ++= managed.reverse,
resolvers ++= repos.reverse,
unmanagedJars in Compile ++= Attributed blankSeq unmanaged.reverse,
logLevel in Global := Level.Warn,
showSuccess in Global := false
)
val append = Load.transformSettings(Load.projectScope(currentRef), currentRef.build, rootProject, depSettings)
val newStructure = Load.reapply(session.original ++ append, structure)
val newState = state.copy(remainingCommands = "console-quick" :: Nil)
Project.setProject(session, newStructure, newState)
}
final case class Dependencies(managed: Seq[ModuleID], resolvers: Seq[Resolver], unmanaged: Seq[File])
def parseDependencies(args: Seq[String], log: Logger): Dependencies = (Dependencies(Nil, Nil, Nil) /: args)( parseArgument(log) )
def parseArgument(log: Logger)(acc: Dependencies, arg: String): Dependencies =
if(arg contains " at ")
acc.copy(resolvers = parseResolver(arg) +: acc.resolvers)
else if(arg endsWith ".jar")
acc.copy(unmanaged = new File(arg) +: acc.unmanaged)
else
acc.copy(managed = parseManaged(arg, log) ++ acc.managed)
private[this] def parseResolver(arg: String): MavenRepository =
{
val Array(name, url) = arg.split(" at ")
new MavenRepository(name.trim, url.trim)
}
val DepPattern = """([^%]+)%(%?)([^%]+)%([^%]+)""".r
def parseManaged(arg: String, log: Logger): Seq[ModuleID] =
arg match
{
case DepPattern(group, cross, name, version) => ModuleID(group.trim, name.trim, version.trim, crossVersion = !cross.trim.isEmpty) :: Nil
case _ => log.warn("Ignoring invalid argument '" + arg + "'"); Nil
}
}
| ornicar/xsbt | main/IvyConsole.scala | Scala | bsd-3-clause | 2,399 |
package jp.co.dwango.s99
object P33 {
implicit class RichInt(self: Int) {
def isCoPrime(n: Int): Boolean = P32.gcd(self, n) == 1
}
}
| dwango/S99 | src/main/scala/jp/co/dwango/s99/P33.scala | Scala | mit | 142 |
object Test {
val xs: List[?] = List(1, 2, 3)
val ys: Map[? <: AnyRef, ? >: Null] = Map()
} | som-snytt/dotty | tests/pos/wildcards.scala | Scala | apache-2.0 | 95 |
package uk.gov.gds.location.importer.model
import org.specs2.mutable.Specification
import LocalAuthorities._
class LocalAuthoritiesTests extends Specification {
"Local Authorities" should {
"have 380 entries" in {
localAuthorities.size must beEqualTo(380)
}
"names should all match up" in {
localAuthorities.foreach(la => {
if(!la.onsName.equalsIgnoreCase(la.osName)) println("%s %s %s".format(la.onsName, la.osName, la.onsName.equalsIgnoreCase(la.osName)))
})
localAuthorities.map(la => la.onsName.equalsIgnoreCase(la.osName)).toList must not(contain(false))
}
"should be able to get an LA by custodian code" in {
localAuthoritiesByCustodianCode("9051").gssCode must beEqualTo("S12000033") // Aberdeen
}
"should be able to get an LA by gss code" in {
localAuthoritiesByGssCode("S12000033").custodianCode must beEqualTo("9051") // Aberdeen
}
}
}
| alphagov/location-data-importer | src/test/scala/uk/gov/gds/location/importer/model/LocalAuthoritiesTests.scala | Scala | mit | 933 |
/*
Copyright 2016-17, Hasso-Plattner-Institut fuer Softwaresystemtechnik GmbH
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package de.hpi.ingestion.textmining.models
/**
* Represents an entry containing the number of documents in a table.
* @param countedtable table whose count this is
* @param count the number of documents in the table
*/
case class WikipediaArticleCount(
countedtable: String,
count: BigInt
)
| bpn1/ingestion | src/main/scala/de/hpi/ingestion/textmining/models/WikipediaArticleCount.scala | Scala | apache-2.0 | 911 |
package org.unisonweb.util
import java.nio.ByteBuffer
import org.unisonweb.util.Text.Text
import scala.reflect.ClassTag
/**
* A source of bytes which can only be read in a streaming fashion.
* There is no backtracking or peeking; each read advances the cursor.
* The cursor position can be accessed via the `position` method.
*/
trait Source { self =>
// todo: use a representation that supports 64-bit lengths, unlike Array
def get(n: Int): Array[Byte]
def getBoolean: Boolean = getByte != 0
def getByte: Byte
def getInt: Int
def getLong: Long
/**
* Uses the little-endian variable length encoding of unsigned integers:
* https://developers.google.com/protocol-buffers/docs/encoding#varints
*/
def getVarLong: Long = {
val b = getByte
if ((b & 0x80) == 0) b
else (getVarLong << 7) | (b & 0x7f)
}
/**
* Uses the zigzag encoding for variable-length signed numbers, described at:
* https://developers.google.com/protocol-buffers/docs/encoding#signed-integers
* https://github.com/google/protobuf/blob/0400cca/java/core/src/main/java/com/google/protobuf/CodedOutputStream.java#L949-L952
*/
def getVarSignedLong: Long = {
val n = getVarLong
(n >>> 1) ^ -(n & 1)
}
def getDouble: Double
def position: Long
// todo: use a representation that supports 64-bit lengths, unlike Array
def getFramed: Array[Byte] = get(getVarLong.toInt)
// todo: use a representation that supports 64-bit lengths, unlike String
final def getString: String = {
val bytes = getFramed
new String(bytes, java.nio.charset.StandardCharsets.UTF_8)
}
final def getText: Text = Text.fromString(getString)
def getOption1[A](a: => A): Option[A] =
if (getByte == 0) None
else Some(a)
def getOption[A](a: Source => A): Option[A] =
getOption1(a(this))
def getFramedArray1[A:reflect.ClassTag](a: => A): Array[A] =
Array.fill(getVarLong.toInt)(a)
def getFramedArray[A:reflect.ClassTag](a: Source => A): Array[A] =
getFramedArray1(a(this))
def getFramedList1[A](a: => A): List[A] =
List.fill(getVarLong.toInt)(a)
def getFramedList[A](f: Source => A): List[A] =
getFramedList1(f(this))
def getFramedSequence1[A](a: => A): Sequence[A] =
Sequence.fill(getVarLong)(a)
@annotation.tailrec
final def foreachDelimited[A](decode1: => A)(each: A => Unit): Unit =
getByte match {
case 0 => ()
case 111 =>
each(decode1)
foreachDelimited(decode1)(each)
case b => sys.error("unknown byte in foreachDelimited: " + b)
}
def take(m: Long): Source = new Source {
val end = (self.position + m) max self.position
def position = self.position
def remaining = end - self.position
def get(n: Int) =
if (remaining >= n) self.get(n)
else throw Source.Underflow()
def getByte: Byte =
if (remaining > 0) self.getByte
else throw Source.Underflow()
def getInt: Int =
if (remaining > 3) self.getInt
else throw Source.Underflow()
def getLong: Long =
if (remaining > 7) self.getLong
else throw Source.Underflow()
def getDouble: Double =
if (remaining > 7) self.getDouble
else throw Source.Underflow()
}
}
object Source {
case class Underflow() extends Throwable
case class Invalidated() extends Throwable
def fromChunks(bufferSize: Int)(chunks: Sequence[Array[Byte]]): Source = {
val bb = java.nio.ByteBuffer.allocate(bufferSize)
var rem = chunks
bb.limit(0)
Source.fromByteBuffer(bb, bb => rem.uncons match {
case None => false
case Some((chunk,chunks)) =>
if (chunk.length <= bb.remaining()) {
bb.put(chunk)
rem = chunks
}
else { // need to split up chunk
val (c1,c2) = chunk.splitAt(bb.remaining())
bb.put(c1)
rem = c2 +: chunks
}
true
})
}
object BufferUnderflow {
import java.nio.BufferUnderflowException
def unapply(e: Throwable): Boolean = e match {
case e : BufferUnderflowException => true
case i : ArrayIndexOutOfBoundsException => true
case _ => false
}
}
def fromFile(path: String): Source = {
import java.nio.file.{Files, Paths}
val byteArray = Files.readAllBytes(Paths.get(path))
fromByteBuffer(ByteBuffer.wrap(byteArray), _ => false)
}
// `onEmpty` should return `false` if it has no more elements
def fromByteBuffer(bb: ByteBuffer, onEmpty: ByteBuffer => Boolean): Source = new Source {
bb.order(java.nio.ByteOrder.BIG_ENDIAN)
var pos = 0L
def position: Long = pos + bb.position().toLong
def refill = {
pos += bb.position()
val unread =
if (bb.remaining() > 0) {
val unread = new Array[Byte](bb.remaining())
bb.put(unread)
unread
}
else Array.empty[Byte]
bb.clear()
bb.put(unread)
while (bb.remaining() > 0 && onEmpty(bb)) {}
bb.flip()
}
def get(n: Int): Array[Byte] = getImpl(n, Array.empty[Byte])
@annotation.tailrec
def getImpl(n: Int, acc: Array[Byte]): Array[Byte] = {
if (n <= bb.remaining()) {
val arr = new Array[Byte](n)
bb.get(arr)
if (acc.isEmpty) arr else acc ++ arr
}
else if (n > 0 && bb.remaining() == 0) { refill; getImpl(n, acc) }
else { // n > bb.remaining()
val hd = new Array[Byte](bb.remaining())
bb.get(hd)
getImpl(n - hd.length, acc ++ hd)
}
}
def getByte: Byte =
try bb.get
catch { case BufferUnderflow() => refill; getByte }
def getInt: Int =
try bb.getInt
catch { case BufferUnderflow() => refill; getInt }
def getLong: Long =
try bb.getLong
catch { case BufferUnderflow() => refill; getLong }
def getDouble: Double =
try bb.getDouble
catch { case BufferUnderflow() => refill; getDouble }
}
def readLong(bs: Array[Byte]): Long = {
var result = 0L
val N = java.lang.Long.BYTES
var i = 0; while (i < N) {
result <<= 8
result |= (bs(i) & 0xFF)
i += 1
}
result
}
def getFramedArray[A:ClassTag](src: Source)(f: Source => A): Array[A] = {
val len = src.getInt
Array.fill(len)(f(src))
}
def getFramedList[A](src: Source)(f: Source => A): List[A] = {
val len = src.getInt
List.fill(len)(f(src))
}
}
| paulp/unison | runtime-jvm/main/src/main/scala/util/Source.scala | Scala | mit | 6,402 |
package com.aurelpaulovic.transaction.config
import com.aurelpaulovic.transaction.TransactionManager
import com.aurelpaulovic.transaction.BlockTransaction
trait TransactionConfig {
val tm: TransactionManager
val properties: List[TransactionConfigProperty]
def apply(transBlock: => Unit): Unit = (new BlockTransaction(this)).apply(transBlock)
def tryExecute(transBlock: => Unit): Option[Boolean] = (new BlockTransaction(this)).tryExecute(transBlock)
def using[T <: TransactionConfigProperty](property: T): TransactionConfig =
new Configuration(tm, property :: properties)
def using(last: LastConfigProperty): Option[Boolean] =
(new Configuration(tm, last.property :: properties)) tryExecute (last.transBlock)
}
| AurelPaulovic/transactions-api | src/main/scala/com/aurelpaulovic/transaction/config/TransactionConfig.scala | Scala | apache-2.0 | 749 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.util
import java.util.Properties
import kafka.utils.TestUtils
import org.apache.kafka.common.Reconfigurable
import org.apache.kafka.common.config.types.Password
import org.apache.kafka.common.config.{ConfigException, SslConfigs}
import org.easymock.EasyMock
import org.junit.Assert._
import org.junit.Test
import org.scalatest.junit.JUnitSuite
import scala.collection.JavaConverters._
import scala.collection.Set
class DynamicBrokerConfigTest extends JUnitSuite {
@Test
def testConfigUpdate(): Unit = {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
val oldKeystore = "oldKs.jks"
props.put(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, oldKeystore)
val config = KafkaConfig(props)
val dynamicConfig = config.dynamicConfig
assertSame(config, dynamicConfig.currentKafkaConfig)
assertEquals(oldKeystore, config.values.get(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG))
assertEquals(oldKeystore,
config.valuesFromThisConfigWithPrefixOverride("listener.name.external.").get(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG))
assertEquals(oldKeystore, config.originalsFromThisConfig.get(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG))
(1 to 2).foreach { i =>
val props1 = new Properties
val newKeystore = s"ks$i.jks"
props1.put(s"listener.name.external.${SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG}", newKeystore)
dynamicConfig.updateBrokerConfig(0, props1)
assertNotSame(config, dynamicConfig.currentKafkaConfig)
assertEquals(newKeystore,
config.valuesWithPrefixOverride("listener.name.external.").get(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG))
assertEquals(newKeystore,
config.originalsWithPrefix("listener.name.external.").get(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG))
assertEquals(newKeystore,
config.valuesWithPrefixOverride("listener.name.external.").get(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG))
assertEquals(newKeystore,
config.originalsWithPrefix("listener.name.external.").get(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG))
assertEquals(oldKeystore, config.getString(KafkaConfig.SslKeystoreLocationProp))
assertEquals(oldKeystore, config.originals.get(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG))
assertEquals(oldKeystore, config.values.get(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG))
assertEquals(oldKeystore, config.originalsStrings.get(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG))
assertEquals(oldKeystore,
config.valuesFromThisConfigWithPrefixOverride("listener.name.external.").get(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG))
assertEquals(oldKeystore, config.originalsFromThisConfig.get(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG))
assertEquals(oldKeystore, config.valuesFromThisConfig.get(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG))
assertEquals(oldKeystore, config.originalsFromThisConfig.get(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG))
assertEquals(oldKeystore, config.valuesFromThisConfig.get(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG))
}
}
@Test
def testConfigUpdateWithSomeInvalidConfigs(): Unit = {
val origProps = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
origProps.put(SslConfigs.SSL_KEYSTORE_TYPE_CONFIG, "JKS")
val config = KafkaConfig(origProps)
val validProps = Map(s"listener.name.external.${SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG}" -> "ks.p12")
val securityPropsWithoutListenerPrefix = Map(SslConfigs.SSL_KEYSTORE_TYPE_CONFIG -> "PKCS12")
verifyConfigUpdateWithInvalidConfig(config, origProps, validProps, securityPropsWithoutListenerPrefix)
val nonDynamicProps = Map(KafkaConfig.ZkConnectProp -> "somehost:2181")
verifyConfigUpdateWithInvalidConfig(config, origProps, validProps, nonDynamicProps)
// Test update of configs with invalid type
val invalidProps = Map(KafkaConfig.LogCleanerThreadsProp -> "invalid")
verifyConfigUpdateWithInvalidConfig(config, origProps, validProps, invalidProps)
}
@Test
def testConfigUpdateWithReconfigurableValidationFailure(): Unit = {
val origProps = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
origProps.put(KafkaConfig.LogCleanerDedupeBufferSizeProp, "100000000")
val config = KafkaConfig(origProps)
val validProps = Map.empty[String, String]
val invalidProps = Map(KafkaConfig.LogCleanerThreadsProp -> "20")
def validateLogCleanerConfig(configs: util.Map[String, _]): Unit = {
val cleanerThreads = configs.get(KafkaConfig.LogCleanerThreadsProp).toString.toInt
if (cleanerThreads <=0 || cleanerThreads >= 5)
throw new ConfigException(s"Invalid cleaner threads $cleanerThreads")
}
val reconfigurable = new Reconfigurable {
override def configure(configs: util.Map[String, _]): Unit = {}
override def reconfigurableConfigs(): util.Set[String] = Set(KafkaConfig.LogCleanerThreadsProp).asJava
override def validateReconfiguration(configs: util.Map[String, _]): Unit = validateLogCleanerConfig(configs)
override def reconfigure(configs: util.Map[String, _]): Unit = {}
}
config.dynamicConfig.addReconfigurable(reconfigurable)
verifyConfigUpdateWithInvalidConfig(config, origProps, validProps, invalidProps)
config.dynamicConfig.removeReconfigurable(reconfigurable)
val brokerReconfigurable = new BrokerReconfigurable {
override def reconfigurableConfigs: collection.Set[String] = Set(KafkaConfig.LogCleanerThreadsProp)
override def validateReconfiguration(newConfig: KafkaConfig): Unit = validateLogCleanerConfig(newConfig.originals)
override def reconfigure(oldConfig: KafkaConfig, newConfig: KafkaConfig): Unit = {}
}
config.dynamicConfig.addBrokerReconfigurable(brokerReconfigurable)
verifyConfigUpdateWithInvalidConfig(config, origProps, validProps, invalidProps)
}
@Test
def testReconfigurableValidation(): Unit = {
val origProps = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
val config = KafkaConfig(origProps)
val invalidReconfigurableProps = Set(KafkaConfig.LogCleanerThreadsProp, KafkaConfig.BrokerIdProp, "some.prop")
val validReconfigurableProps = Set(KafkaConfig.LogCleanerThreadsProp, KafkaConfig.LogCleanerDedupeBufferSizeProp, "some.prop")
def createReconfigurable(configs: Set[String]) = new Reconfigurable {
override def configure(configs: util.Map[String, _]): Unit = {}
override def reconfigurableConfigs(): util.Set[String] = configs.asJava
override def validateReconfiguration(configs: util.Map[String, _]): Unit = {}
override def reconfigure(configs: util.Map[String, _]): Unit = {}
}
intercept[IllegalArgumentException] {
config.dynamicConfig.addReconfigurable(createReconfigurable(invalidReconfigurableProps))
}
config.dynamicConfig.addReconfigurable(createReconfigurable(validReconfigurableProps))
def createBrokerReconfigurable(configs: Set[String]) = new BrokerReconfigurable {
override def reconfigurableConfigs: collection.Set[String] = configs
override def validateReconfiguration(newConfig: KafkaConfig): Unit = {}
override def reconfigure(oldConfig: KafkaConfig, newConfig: KafkaConfig): Unit = {}
}
intercept[IllegalArgumentException] {
config.dynamicConfig.addBrokerReconfigurable(createBrokerReconfigurable(invalidReconfigurableProps))
}
config.dynamicConfig.addBrokerReconfigurable(createBrokerReconfigurable(validReconfigurableProps))
}
@Test
def testSecurityConfigs(): Unit = {
def verifyUpdate(name: String, value: Object): Unit = {
verifyConfigUpdate(name, value, perBrokerConfig = true, expectFailure = true)
verifyConfigUpdate(s"listener.name.external.$name", value, perBrokerConfig = true, expectFailure = false)
verifyConfigUpdate(name, value, perBrokerConfig = false, expectFailure = true)
verifyConfigUpdate(s"listener.name.external.$name", value, perBrokerConfig = false, expectFailure = true)
}
verifyUpdate(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, "ks.jks")
verifyUpdate(SslConfigs.SSL_KEYSTORE_TYPE_CONFIG, "JKS")
verifyUpdate(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, "password")
verifyUpdate(SslConfigs.SSL_KEY_PASSWORD_CONFIG, "password")
}
private def verifyConfigUpdate(name: String, value: Object, perBrokerConfig: Boolean, expectFailure: Boolean) {
val configProps = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
configProps.put(KafkaConfig.PasswordEncoderSecretProp, "broker.secret")
val config = KafkaConfig(configProps)
val props = new Properties
props.put(name, value)
val oldValue = config.originals.get(name)
def updateConfig() = {
if (perBrokerConfig)
config.dynamicConfig.updateBrokerConfig(0, config.dynamicConfig.toPersistentProps(props, perBrokerConfig))
else
config.dynamicConfig.updateDefaultConfig(props)
}
if (!expectFailure) {
config.dynamicConfig.validate(props, perBrokerConfig)
updateConfig()
assertEquals(value, config.originals.get(name))
} else {
try {
config.dynamicConfig.validate(props, perBrokerConfig)
fail("Invalid config did not fail validation")
} catch {
case e: Exception => // expected exception
}
updateConfig()
assertEquals(oldValue, config.originals.get(name))
}
}
private def verifyConfigUpdateWithInvalidConfig(config: KafkaConfig,
origProps: Properties,
validProps: Map[String, String],
invalidProps: Map[String, String]): Unit = {
val props = new Properties
validProps.foreach { case (k, v) => props.put(k, v) }
invalidProps.foreach { case (k, v) => props.put(k, v) }
// DynamicBrokerConfig#validate is used by AdminClient to validate the configs provided in
// in an AlterConfigs request. Validation should fail with an exception if any of the configs are invalid.
try {
config.dynamicConfig.validate(props, perBrokerConfig = true)
fail("Invalid config did not fail validation")
} catch {
case e: ConfigException => // expected exception
}
// DynamicBrokerConfig#updateBrokerConfig is used to update configs from ZooKeeper during
// startup and when configs are updated in ZK. Update should apply valid configs and ignore
// invalid ones.
config.dynamicConfig.updateBrokerConfig(0, props)
validProps.foreach { case (name, value) => assertEquals(value, config.originals.get(name)) }
invalidProps.keySet.foreach { name =>
assertEquals(origProps.get(name), config.originals.get(name))
}
}
@Test
def testPasswordConfigEncryption(): Unit = {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
val configWithoutSecret = KafkaConfig(props)
props.put(KafkaConfig.PasswordEncoderSecretProp, "config-encoder-secret")
val configWithSecret = KafkaConfig(props)
val dynamicProps = new Properties
dynamicProps.put(KafkaConfig.SaslJaasConfigProp, "myLoginModule required;")
try {
configWithoutSecret.dynamicConfig.toPersistentProps(dynamicProps, perBrokerConfig = true)
} catch {
case e: ConfigException => // expected exception
}
val persistedProps = configWithSecret.dynamicConfig.toPersistentProps(dynamicProps, perBrokerConfig = true)
assertFalse("Password not encoded",
persistedProps.getProperty(KafkaConfig.SaslJaasConfigProp).contains("myLoginModule"))
val decodedProps = configWithSecret.dynamicConfig.fromPersistentProps(persistedProps, perBrokerConfig = true)
assertEquals("myLoginModule required;", decodedProps.getProperty(KafkaConfig.SaslJaasConfigProp))
}
@Test
def testPasswordConfigEncoderSecretChange(): Unit = {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
props.put(KafkaConfig.SaslJaasConfigProp, "staticLoginModule required;")
props.put(KafkaConfig.PasswordEncoderSecretProp, "config-encoder-secret")
val config = KafkaConfig(props)
val dynamicProps = new Properties
dynamicProps.put(KafkaConfig.SaslJaasConfigProp, "dynamicLoginModule required;")
val persistedProps = config.dynamicConfig.toPersistentProps(dynamicProps, perBrokerConfig = true)
assertFalse("Password not encoded",
persistedProps.getProperty(KafkaConfig.SaslJaasConfigProp).contains("LoginModule"))
config.dynamicConfig.updateBrokerConfig(0, persistedProps)
assertEquals("dynamicLoginModule required;", config.values.get(KafkaConfig.SaslJaasConfigProp).asInstanceOf[Password].value)
// New config with same secret should use the dynamic password config
val newConfigWithSameSecret = KafkaConfig(props)
newConfigWithSameSecret.dynamicConfig.updateBrokerConfig(0, persistedProps)
assertEquals("dynamicLoginModule required;", newConfigWithSameSecret.values.get(KafkaConfig.SaslJaasConfigProp).asInstanceOf[Password].value)
// New config with new secret should use the dynamic password config if new and old secrets are configured in KafkaConfig
props.put(KafkaConfig.PasswordEncoderSecretProp, "new-encoder-secret")
props.put(KafkaConfig.PasswordEncoderOldSecretProp, "config-encoder-secret")
val newConfigWithNewAndOldSecret = KafkaConfig(props)
newConfigWithNewAndOldSecret.dynamicConfig.updateBrokerConfig(0, persistedProps)
assertEquals("dynamicLoginModule required;", newConfigWithSameSecret.values.get(KafkaConfig.SaslJaasConfigProp).asInstanceOf[Password].value)
// New config with new secret alone should revert to static password config since dynamic config cannot be decoded
props.put(KafkaConfig.PasswordEncoderSecretProp, "another-new-encoder-secret")
val newConfigWithNewSecret = KafkaConfig(props)
newConfigWithNewSecret.dynamicConfig.updateBrokerConfig(0, persistedProps)
assertEquals("staticLoginModule required;", newConfigWithNewSecret.values.get(KafkaConfig.SaslJaasConfigProp).asInstanceOf[Password].value)
}
@Test
def testDynamicListenerConfig(): Unit = {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 9092)
val oldConfig = KafkaConfig.fromProps(props)
val kafkaServer = EasyMock.createMock(classOf[kafka.server.KafkaServer])
EasyMock.expect(kafkaServer.config).andReturn(oldConfig).anyTimes()
EasyMock.replay(kafkaServer)
props.put(KafkaConfig.ListenersProp, "PLAINTEXT://hostname:9092,SASL_PLAINTEXT://hostname:9093")
val newConfig = KafkaConfig(props)
val dynamicListenerConfig = new DynamicListenerConfig(kafkaServer)
dynamicListenerConfig.validateReconfiguration(newConfig)
}
@Test
def testSynonyms(): Unit = {
assertEquals(List("listener.name.secure.ssl.keystore.type", "ssl.keystore.type"),
DynamicBrokerConfig.brokerConfigSynonyms("listener.name.secure.ssl.keystore.type", matchListenerOverride = true))
assertEquals(List("listener.name.sasl_ssl.plain.sasl.jaas.config", "sasl.jaas.config"),
DynamicBrokerConfig.brokerConfigSynonyms("listener.name.sasl_ssl.plain.sasl.jaas.config", matchListenerOverride = true))
assertEquals(List("some.config"),
DynamicBrokerConfig.brokerConfigSynonyms("some.config", matchListenerOverride = true))
assertEquals(List(KafkaConfig.LogRollTimeMillisProp, KafkaConfig.LogRollTimeHoursProp),
DynamicBrokerConfig.brokerConfigSynonyms(KafkaConfig.LogRollTimeMillisProp, matchListenerOverride = true))
}
@Test
def testDynamicConfigInitializationWithoutConfigsInZK(): Unit = {
val zkClient = EasyMock.createMock(classOf[kafka.zk.KafkaZkClient])
EasyMock.expect(zkClient.getEntityConfigs(EasyMock.anyString(), EasyMock.anyString())).andReturn(new java.util.Properties()).anyTimes()
EasyMock.replay(zkClient)
val oldConfig = KafkaConfig.fromProps(TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 9092))
val dynamicBrokerConfig = new DynamicBrokerConfig(oldConfig)
dynamicBrokerConfig.initialize(zkClient)
dynamicBrokerConfig.addBrokerReconfigurable(new TestDynamicThreadPool)
val newprops = new Properties()
newprops.put(KafkaConfig.NumIoThreadsProp, "10")
newprops.put(KafkaConfig.BackgroundThreadsProp, "100")
dynamicBrokerConfig.updateBrokerConfig(0, newprops)
}
}
class TestDynamicThreadPool() extends BrokerReconfigurable {
override def reconfigurableConfigs: Set[String] = {
DynamicThreadPool.ReconfigurableConfigs
}
override def reconfigure(oldConfig: KafkaConfig, newConfig: KafkaConfig): Unit = {
assertEquals(Defaults.NumIoThreads, oldConfig.numIoThreads)
assertEquals(Defaults.BackgroundThreads, oldConfig.backgroundThreads)
assertEquals(10, newConfig.numIoThreads)
assertEquals(100, newConfig.backgroundThreads)
}
override def validateReconfiguration(newConfig: KafkaConfig): Unit = {
assertEquals(10, newConfig.numIoThreads)
assertEquals(100, newConfig.backgroundThreads)
}
} | richhaase/kafka | core/src/test/scala/unit/kafka/server/DynamicBrokerConfigTest.scala | Scala | apache-2.0 | 17,956 |
/*
* Copyright (c) 2014-2016 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.iglu.schemaddl
package jsonschema.json4s
// Scala
import scala.annotation.tailrec
// json4s
import org.json4s._
// This library
import jsonschema.Schema
import jsonschema.properties.ObjectProperty._
object ObjectSerializers {
import ArraySerializers._
import implicits._
@tailrec private def allString(keys: List[JValue], acc: List[String] = Nil): Option[List[String]] = {
keys match {
case Nil => Some(acc.reverse)
case JString(h) :: t => allString(t, h :: acc)
case _ => None
}
}
object PropertiesSerializer extends CustomSerializer[Properties](_ => (
{
case obj: JObject =>
obj.extractOpt[Map[String, JObject]].map { f =>
f.map { case (key, v) => (key, Schema.parse(v: JValue).get)}
} match {
case Some(p) => Properties(p)
case None => throw new MappingException("Isn't properties")
}
case x => throw new MappingException(x + " isn't properties")
},
{
case Properties(fields) => JObject(fields.mapValues(Schema.normalize(_)).toList)
}
))
object AdditionalPropertiesSerializer extends CustomSerializer[AdditionalProperties](_ => (
{
case JBool(bool) => AdditionalProperties.AdditionalPropertiesAllowed(bool)
case obj: JObject => Schema.parse(obj: JValue) match {
case Some(schema) => AdditionalProperties.AdditionalPropertiesSchema(schema)
case None => throw new MappingException(obj + " isn't additionalProperties")
}
case x => throw new MappingException(x + " isn't bool")
},
{
case AdditionalProperties.AdditionalPropertiesAllowed(value) => JBool(value)
case AdditionalProperties.AdditionalPropertiesSchema(value) => Schema.normalize(value)
}
))
object RequiredSerializer extends CustomSerializer[Required](_ => (
{
case JArray(keys) => allString(keys) match {
case Some(k) => Required(k)
case None => throw new MappingException("required array can contain only strings")
}
case x => throw new MappingException(x + " isn't bool")
},
{
case Required(keys) => JArray(keys.map(JString))
}
))
object PatternPropertiesSerializer extends CustomSerializer[PatternProperties](_ => (
{
case obj: JObject =>
obj.extractOpt[Map[String, JObject]].map { f =>
f.map { case (key, v) => (key, Schema.parse(v: JValue).get)}
} match {
case Some(p) => PatternProperties(p)
case None => throw new MappingException("Isn't patternProperties")
}
case x => throw new MappingException(x + " isn't patternProperties")
},
{
case PatternProperties(fields) => JObject(fields.mapValues(Schema.normalize(_)).toList)
}
))
}
| snowplow/iglu | 0-common/schema-ddl/src/main/scala/com.snowplowanalytics/iglu.schemaddl/jsonschema/json4s/ObjectSerializers.scala | Scala | apache-2.0 | 3,530 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.