code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
object Snippet{val x = 123; 1 + 1./*!*/}
| felixmulder/scala | test/files/presentation/infix-completion/src/Snippet.scala | Scala | bsd-3-clause | 41 |
package monocle.std
import monocle.{Iso, PIso, PPrism, Prism}
import cats.data.{NonEmptyVector, OneAnd}
object nev extends NonEmptyVectorOptics
trait NonEmptyVectorOptics {
final def pNevToOneAnd[A, B]: PIso[NonEmptyVector[A], NonEmptyVector[B], OneAnd[Vector, A], OneAnd[Vector, B]] =
PIso((nev: NonEmptyVector[A]) => OneAnd[Vector, A](nev.head, nev.tail))((oneAnd: OneAnd[Vector, B]) =>
NonEmptyVector(oneAnd.head, oneAnd.tail)
)
final def nevToOneAnd[A]: Iso[NonEmptyVector[A], OneAnd[Vector, A]] =
pNevToOneAnd[A, A]
final def pOptNevToVector[A, B]: PIso[Option[NonEmptyVector[A]], Option[NonEmptyVector[B]], Vector[A], Vector[B]] =
PIso[Option[NonEmptyVector[A]], Option[NonEmptyVector[B]], Vector[A], Vector[B]](
_.fold(Vector.empty[A])(_.toVector)
)(
NonEmptyVector.fromVector
)
final def optNevToVector[A]: Iso[Option[NonEmptyVector[A]], Vector[A]] =
pOptNevToVector[A, A]
final def pVectorToNev[A, B]: PPrism[Vector[A], Vector[B], NonEmptyVector[A], NonEmptyVector[B]] =
PPrism((v: Vector[A]) => NonEmptyVector.fromVector[A](v).toRight(Vector.empty[B]))((nev: NonEmptyVector[B]) =>
nev.toVector
)
final def vectorToNev[A]: Prism[Vector[A], NonEmptyVector[A]] =
pVectorToNev[A, A]
}
| julien-truffaut/Monocle | core/shared/src/main/scala/monocle/std/NonEmptyVector.scala | Scala | mit | 1,275 |
// Copyright: 2010 - 2017 https://github.com/ensime/ensime-server/graphs
// License: http://www.gnu.org/licenses/gpl-3.0.en.html
package org.ensime.filewatcher
import java.io._
import java.nio.file.{ FileSystems, Path, WatchKey }
import java.nio.file.StandardWatchEventKinds._
import java.nio.file.WatchService
import java.util.UUID
import java.util.concurrent.ConcurrentHashMap
import scala.annotation.tailrec
import scala.collection.JavaConverters._
import scala.collection.concurrent.Map
import scala.collection.immutable.Set
import scala.language.implicitConversions
import scala.util.control.NonFatal
import scala.util.{ Failure, Properties, Success, Try }
import org.slf4j.LoggerFactory
abstract class Watcher(val watcherId: UUID, val file: File, val listeners: Set[WatcherListener]) {
val fileWatchService: FileWatchService
def watch(): Unit =
fileWatchService.watch(file, listeners, false)
def shutdown(): Unit = {
fileWatchService.WatchKeyManager.removeObservers(watcherId)
fileWatchService.monitorThread.foreach { thread =>
thread.interrupt()
}
}
}
trait WatcherListener {
val base: File
val recursive: Boolean
val extensions: scala.collection.Set[String]
val watcherId: UUID
def fileCreated(@deprecated("local", "") f: File): Unit = {}
def fileDeleted(@deprecated("local", "") f: File): Unit = {}
def fileModified(@deprecated("local", "") f: File): Unit = {}
def baseRegistered(): Unit = {}
def baseRemoved(): Unit = {}
def baseSubdirRemoved(@deprecated("local", "") f: File): Unit = {}
def missingBaseRegistered(): Unit = {}
def baseSubdirRegistered(@deprecated("local", "") f: File): Unit = {}
def proxyRegistered(@deprecated("local", "") f: File): Unit = {}
def existingFile(@deprecated("local", "") f: File): Unit = {}
def isWatched(f: File) =
(extensions.exists(e => {
f.getName.endsWith(e)
})) && f.getPath.startsWith(base.getPath)
def isBaseAncestor(f: File) =
base.getAbsolutePath.startsWith(f.getAbsolutePath)
}
// tested in FileWatcherSpec
class FileWatchService { self =>
private val log = LoggerFactory.getLogger(getClass)
/**
* The low priority thread used for checking the files being monitored.
*/
@volatile private[filewatcher] var monitorThread: Option[Thread] = None
/**
* A flag used to determine if the monitor thread should be running.
*/
@volatile private var shouldRun: Boolean = true
/**
* Construct a new Java7 WatchService
*/
var watchService: WatchService = null
implicit def keyToFile(k: WatchKey): File = k.watchable().asInstanceOf[Path].toFile
implicit def keyToCanonicalPath(k: WatchKey): String = k.watchable().asInstanceOf[Path].toFile.getCanonicalPath()
private def init(): Unit = {
log.debug("init watcher")
watchService = Try {
FileSystems.getDefault().newWatchService()
} match {
case Success(w) => w
case Failure(e) => throw new Exception("failed to create WatchService {}", e)
}
start()
}
/**
* Start a background monitoring thread
*/
private def start() = {
log.debug("start a background monitoring thread")
monitorThread match {
case Some(t) => log.warn(s"monitoring thread is already started")
case None => {
val thread = new Thread(
new Runnable {
override def run(): Unit =
try monitor()
catch {
case i: InterruptedException => // silently ignore
case NonFatal(e) =>
log.warn(s"caught an exception while monitoring", e)
}
}
)
thread.setName("FileWatchService-monitor")
thread.setDaemon(true)
thread.start()
monitorThread = Some(thread)
}
}
}
def watch(file: File, listeners: Set[WatcherListener], wasMissing: Boolean, retry: Int = 2): Unit = {
try {
if (file.isDirectory) {
registerDir(file, listeners, wasMissing, retry)
} else if (file.isFile) {
val fileBase = new File(file.getParent)
registerDir(fileBase, listeners, wasMissing, retry)
} else {
if (file.getParentFile.exists) {
registerDir(file.getParentFile, listeners, wasMissing, retry)
} else {
watch(file.getParentFile, listeners, wasMissing, retry)
}
}
} catch {
case e: Throwable =>
log.error(s"failed to watch ${file}")
}
}
def notifyExisting(dir: File, listeners: Set[WatcherListener]) =
for {
f <- dir.listFiles
if f.isFile
l <- listeners
if l.isWatched(f)
} { l.existingFile(f) }
def watchExistingSubdirs(dir: File, listeners: Set[WatcherListener]) =
if (listeners.exists(_.recursive))
for {
d <- dir.listFiles
if d.isDirectory
l <- listeners
} { watch(d, listeners, false) }
def registerDir(dir: File, listeners: Set[WatcherListener], wasMissing: Boolean, retry: Int = 2): Unit = {
if (wasMissing && listeners.exists { l => l.base == dir }) {
if (log.isTraceEnabled)
log.trace(s"delay ${dir} base registration")
Thread.sleep(100)
}
val observers = (listeners map { maybeBuildWatchKeyObserver(dir, _) }).flatten
if (log.isTraceEnabled)
log.trace(s"register ${dir} with WatchService")
if (!observers.isEmpty) {
val key: WatchKey = try {
dir.toPath.register(
watchService,
ENTRY_CREATE,
ENTRY_MODIFY,
ENTRY_DELETE
)
} catch {
case e: Throwable => {
if (retry < 0) {
log.warn("can not register. retrying..." + dir + " " + e)
Thread.sleep(50)
watch(dir, listeners, wasMissing, retry - 1)
}
throw new Exception(e)
}
}
notifyExisting(dir, listeners)
if (observers.exists {
case _: BaseObserver => true
case _: BaseSubdirObserver => true
case _: BaseFileObserver => true
case _ => false
})
watchExistingSubdirs(dir, listeners)
observers foreach {
case o: BaseObserver =>
if (wasMissing)
o.watcherListener.missingBaseRegistered()
else
o.watcherListener.baseRegistered()
case o: BaseFileObserver =>
if (wasMissing)
o.watcherListener.missingBaseRegistered()
else
o.watcherListener.baseRegistered()
case o: BaseSubdirObserver =>
o.watcherListener.baseSubdirRegistered(dir)
case o: ProxyObserver => o.watcherListener.proxyRegistered(dir)
}
observers foreach (WatchKeyManager.addObserver(key, _))
if (WatchKeyManager.hasProxy(key))
dir.listFiles.filter(f => (f.isDirectory || f.isFile))
.foreach(WatchKeyManager.maybeAdvanceProxy(key, _))
} else
log.warn("No listeners for {}. Skip registration.")
}
/**
* Wait for Java7 WatchService event and notify the listeners.
*/
private def monitor() = {
log.debug("start monitoring WatchService events")
while (continueMonitoring) {
Try { watchService.take() } match {
case Success(key) => {
if (WatchKeyManager.contains(key)) {
processEvents(key)
val isWindows = Properties.osName.startsWith("Windows")
// can not recover reliably from deleted base without delay
if (isWindows) Thread.sleep(1000)
else Thread.sleep(20)
if (!key.reset) {
if (log.isTraceEnabled)
log.trace("may be recover from deletion {}", keyToFile(key))
maybeRecoverFromDeletion(key)
}
} else if (log.isTraceEnabled)
log.trace(s"key {} is not managed by watcher yet", keyToFile(key))
}
case Failure(e) => {
log.error("unexpected WatchService take error. {}", e)
shouldRun = false
}
}
}
closeWatchService()
def processEvents(key: WatchKey) = {
for (event <- key.pollEvents.asScala) {
val kind = event.kind
val file = key.watchable.asInstanceOf[Path]
.resolve(event.context.asInstanceOf[Path]).toFile
if (kind == ENTRY_CREATE
&& file.isDirectory
&& WatchKeyManager.hasRecursive(key))
watch(file, WatchKeyManager.recListeners(key), false)
if (kind == ENTRY_CREATE)
WatchKeyManager.maybeAdvanceProxy(key, file)
val ls = WatchKeyManager.nonProxyListeners(key)
if (kind == ENTRY_CREATE)
ls filter { _.isWatched(file) } foreach (_.fileCreated(file))
if (kind == ENTRY_MODIFY)
ls filter { _.isWatched(file) } foreach (_.fileModified(file))
if (kind == ENTRY_DELETE) {
ls filter { _.isWatched(file) } foreach (_.fileDeleted(file))
for {
o <- WatchKeyManager.baseFileObservers(key)
if o.watcherListener.isWatched(file)
} {
WatchKeyManager.removeObserver(key, o)
o.watcherListener.baseRemoved()
watch(file, Set(o.watcherListener), true)
}
}
if (kind == OVERFLOW)
log.warn(s"overflow event for ${file}")
}
}
def maybeRecoverFromDeletion(key: WatchKey, retry: Int = 0): Unit = {
if (WatchKeyManager.hasBase(key)
|| WatchKeyManager.hasBaseFile(key)
|| WatchKeyManager.hasProxy(key)) {
if (log.isTraceEnabled)
log.trace("recover from deletion {}", keyToFile(key))
if (!key.mkdirs && !key.exists) {
if (retry <= 3) {
Thread.sleep(20)
log.error("retry re-create {} with parents", keyToFile(key))
maybeRecoverFromDeletion(key, retry + 1)
}
log.error("Unable to re-create {} with parents", keyToFile(key))
} else {
val listeners = WatchKeyManager.listeners(key)
val baseFileListeners = WatchKeyManager.baseFileListeners(key)
listeners foreach (_.baseRemoved())
baseFileListeners foreach (o => o.fileDeleted(o.base))
WatchKeyManager.removeKey(key)
watch(key, listeners, true)
}
} else if (WatchKeyManager.hasSubDir(key)) {
WatchKeyManager.keyFromFile(key.getParentFile) match {
case Some(p) => maybeRecoverFromDeletion(p)
case None => log.warn(s"can not find a parent key")
}
}
}
def continueMonitoring() =
(monitorThread match {
case Some(t) => if (t.isInterrupted) {
log.info("monitoring thread was interrupted")
false
} else true
case None => {
log.info("monitoring should run in a background thread")
false
}
}) && shouldRun
}
def closeWatchService() = {
try {
log.info("close WatchService")
shouldRun = false
watchService.close();
} catch {
case e: Throwable =>
log.error("failed to close WatchService {}", e);
}
}
def spawnWatcher(file: File, listeners: Set[WatcherListener]): Watcher = {
spawnWatcher(UUID.randomUUID(), file, listeners)
}
def spawnWatcher(uuid: UUID, file: File, listeners: Set[WatcherListener]) = {
if (log.isTraceEnabled)
log.trace(s"spawn ${uuid} watcher for ${file} base")
val w = new Watcher(uuid, file, listeners) {
val fileWatchService = self;
}
w.watch()
w
}
def maybeBuildWatchKeyObserver(f: File, l: WatcherListener): Option[WatchKeyObserver] = {
if (!f.isDirectory) {
log.warn("building a WatchKeyObserver for a non-existent {} doesn't make sense.", f)
return None
}
if (l.base == f)
Some(new BaseObserver(l))
else if (l.base.isFile && l.base.getParentFile == f)
Some(new BaseFileObserver(l))
else if (l.recursive && f.getAbsolutePath.startsWith(l.base.getAbsolutePath))
Some(new BaseSubdirObserver(l))
else if (l.base.getAbsolutePath.startsWith(f.getAbsolutePath))
Some(new ProxyObserver(l))
else {
log.warn(s"don't know what observer to create dir: ${f} for ${l.base.getAbsolutePath} base")
None
}
}
init()
case class BaseObserver(val watcherListener: WatcherListener) extends WatchKeyObserver {
override lazy val recursive = watcherListener.recursive
override val observerType = "BaseObserver"
}
case class BaseFileObserver(val watcherListener: WatcherListener) extends WatchKeyObserver {
val treatExistingAsNew = true
val recursive = false
override val observerType = "BaseFileObserver"
}
case class ProxyObserver(val watcherListener: WatcherListener) extends WatchKeyObserver {
val recursive = false
override val observerType = "ProxyObserver"
}
case class BaseSubdirObserver(val watcherListener: WatcherListener) extends WatchKeyObserver {
override lazy val recursive = watcherListener.recursive
override val observerType = "BaseSubdirObserver"
}
trait WatchKeyObserver {
val watcherListener: WatcherListener
val recursive: Boolean
val observerType: String
}
object WatchKeyManager {
val keymap: Map[WatchKey, Set[WatchKeyObserver]] = new ConcurrentHashMap().asScala
def contains(key: WatchKey) = {
keymap.contains(key)
}
@tailrec
def addObserver(key: WatchKey, o: WatchKeyObserver): Unit = {
val l = Set[WatchKeyObserver]()
val oldListeners = keymap.putIfAbsent(key, l).getOrElse(l)
val newListeners = oldListeners + o
val status = keymap.replace(key, oldListeners, newListeners)
if (!status) {
log.warn(s"retry adding ${o.observerType} to ${keyToFile(key)}")
addObserver(key, o)
}
}
@tailrec
def removeObserver(key: WatchKey, o: WatchKeyObserver, retry: Int = 2): Unit = {
keymap.get(key) match {
case Some(oldObservers) => {
val newObservers = oldObservers - o
if (newObservers.isEmpty) {
keymap.remove(key)
key.cancel()
} else if (!keymap.replace(key, oldObservers, newObservers))
if (retry > 0)
removeObserver(key, o)
else
log.warn("unable to remove an observer from {}", keyToFile(key))
}
case None => log.warn(s"watcher doesn't monitor ${keyToFile(key)}")
}
}
def maybeAdvanceProxy(key: WatchKey, createdFile: File) =
proxies(key) foreach (o =>
if (o.watcherListener.isBaseAncestor(createdFile))
if (createdFile.isDirectory || createdFile.isFile) {
removeObserver(key, o)
watch(createdFile, Set(o.watcherListener), true)
} else
log.warn("unable to advance a proxy {}", o))
def removeObservers(id: UUID) =
keymap.keys foreach (
key => {
val observers = keymap.get(key).getOrElse { Set() }
val unneeded = observers filter { _.watcherListener.watcherId == id }
val retained = observers filter { _.watcherListener.watcherId != id }
if (observers.size == 0 || unneeded.size == observers.size) {
key.cancel() // can hang, https://bugs.openjdk.java.net/browse/JDK-8029516
keymap.remove(key)
} else if (observers.size != retained.size)
if (!keymap.replace(key, observers, retained))
log.error(s"failed to remove ${unneeded.size} listeners from ${keyToFile(key)}")
}
)
def baseFileObservers(key: WatchKey) =
keymap getOrElse (key, Set()) filter {
case _: BaseFileObserver => true
case _ => false
}
def recListeners(key: WatchKey) =
listeners(key) filter { _.recursive }
def baseListeners(key: WatchKey) =
keymap getOrElse (key, Set()) filter {
case _: BaseObserver => true
case _ => false
} map { _.watcherListener }
def baseFileListeners(key: WatchKey) =
keymap getOrElse (key, Set()) filter {
case _: BaseFileObserver => true
case _ => false
} map { _.watcherListener }
def proxyListeners(key: WatchKey) =
keymap getOrElse (key, Set()) filter {
case _: ProxyObserver => true
case _ => false
} map { _.watcherListener }
def nonProxyListeners(key: WatchKey) =
keymap getOrElse (key, Set()) filter {
case _: ProxyObserver => false
case _ => true
} map { _.watcherListener }
def proxies(key: WatchKey) =
keymap getOrElse (key, Set()) filter {
case _: ProxyObserver => true
case _ => false
}
def listeners(key: WatchKey) =
keymap getOrElse (key, Set()) map { _.watcherListener }
def removeKey(key: WatchKey): Unit = {
key.cancel()
keymap.remove(key)
}
def hasRecursive(key: WatchKey) =
keymap.get(key) match {
case Some(os) => os.exists { _.recursive }
case None => false
}
def hasBase(key: WatchKey) =
keymap.get(key) match {
case Some(os) => os.exists {
case _: BaseObserver => true
case _ => false
}
case None => false
}
def hasSubDir(key: WatchKey) =
keymap.get(key) match {
case Some(os) => os.exists {
case _: BaseSubdirObserver => true
case _ => false
}
case None => false
}
def hasBaseFile(key: WatchKey) =
keymap.get(key) match {
case Some(os) => os.exists {
case _: BaseFileObserver => true
case _ => false
}
case None => false
}
def hasProxy(key: WatchKey) =
keymap.get(key) match {
case Some(os) => os.exists {
case _: ProxyObserver => true
case _ => false
}
case None => false
}
def hasBaseSubdir(key: WatchKey) =
keymap.get(key) match {
case Some(os) => os.exists {
case _: BaseSubdirObserver => true
case _ => false
}
case None => false
}
def totalKeyNum() =
keymap.keys.foldLeft(0) { (a, _) => a + 1 }
def keyFromFile(f: File): Option[WatchKey] =
keymap.keys.find { k => keyToFile(k).getAbsolutePath == f.getAbsolutePath }
}
}
| VlachJosef/ensime-server | core/src/main/scala/org/ensime/filewatcher/FileWatchService.scala | Scala | gpl-3.0 | 18,300 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util
import java.io.NotSerializableException
import java.util.Random
import org.apache.spark.LocalSparkContext._
import org.apache.spark.{SparkContext, SparkException, SparkFunSuite, TaskContext}
import org.apache.spark.partial.CountEvaluator
import org.apache.spark.rdd.RDD
class ClosureCleanerSuite extends SparkFunSuite {
test("closures inside an object") {
assert(TestObject.run() === 30) // 6 + 7 + 8 + 9
}
test("closures inside a class") {
val obj = new TestClass
assert(obj.run() === 30) // 6 + 7 + 8 + 9
}
test("closures inside a class with no default constructor") {
val obj = new TestClassWithoutDefaultConstructor(5)
assert(obj.run() === 30) // 6 + 7 + 8 + 9
}
test("closures that don't use fields of the outer class") {
val obj = new TestClassWithoutFieldAccess
assert(obj.run() === 30) // 6 + 7 + 8 + 9
}
test("nested closures inside an object") {
assert(TestObjectWithNesting.run() === 96) // 4 * (1+2+3+4) + 4 * (1+2+3+4) + 16 * 1
}
test("nested closures inside a class") {
val obj = new TestClassWithNesting(1)
assert(obj.run() === 96) // 4 * (1+2+3+4) + 4 * (1+2+3+4) + 16 * 1
}
test("toplevel return statements in closures are identified at cleaning time") {
intercept[ReturnStatementInClosureException] {
TestObjectWithBogusReturns.run()
}
}
test("return statements from named functions nested in closures don't raise exceptions") {
val result = TestObjectWithNestedReturns.run()
assert(result === 1)
}
test("user provided closures are actually cleaned") {
// We use return statements as an indication that a closure is actually being cleaned
// We expect closure cleaner to find the return statements in the user provided closures
def expectCorrectException(body: => Unit): Unit = {
try {
body
} catch {
case rse: ReturnStatementInClosureException => // Success!
case e @ (_: NotSerializableException | _: SparkException) =>
fail(s"Expected ReturnStatementInClosureException, but got $e.\\n" +
"This means the closure provided by user is not actually cleaned.")
}
}
withSpark(new SparkContext("local", "test")) { sc =>
val rdd = sc.parallelize(1 to 10)
val pairRdd = rdd.map { i => (i, i) }
expectCorrectException { TestUserClosuresActuallyCleaned.testMap(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testFlatMap(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testFilter(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testSortBy(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testGroupBy(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testKeyBy(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testMapPartitions(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testMapPartitionsWithIndex(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testMapPartitionsWithContext(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testFlatMapWith(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testFilterWith(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testForEachWith(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testMapWith(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testZipPartitions2(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testZipPartitions3(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testZipPartitions4(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testForeach(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testForeachPartition(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testReduce(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testTreeReduce(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testFold(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testAggregate(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testTreeAggregate(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testCombineByKey(pairRdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testAggregateByKey(pairRdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testFoldByKey(pairRdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testReduceByKey(pairRdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testReduceByKeyLocally(pairRdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testMapValues(pairRdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testFlatMapValues(pairRdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testForeachAsync(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testForeachPartitionAsync(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testRunJob1(sc) }
expectCorrectException { TestUserClosuresActuallyCleaned.testRunJob2(sc) }
expectCorrectException { TestUserClosuresActuallyCleaned.testRunApproximateJob(sc) }
expectCorrectException { TestUserClosuresActuallyCleaned.testSubmitJob(sc) }
}
}
test("createNullValue") {
new TestCreateNullValue().run()
}
}
// A non-serializable class we create in closures to make sure that we aren't
// keeping references to unneeded variables from our outer closures.
class NonSerializable(val id: Int = -1) {
override def equals(other: Any): Boolean = {
other match {
case o: NonSerializable => id == o.id
case _ => false
}
}
}
object TestObject {
def run(): Int = {
var nonSer = new NonSerializable
val x = 5
withSpark(new SparkContext("local", "test")) { sc =>
val nums = sc.parallelize(Array(1, 2, 3, 4))
nums.map(_ + x).reduce(_ + _)
}
}
}
class TestClass extends Serializable {
var x = 5
def getX: Int = x
def run(): Int = {
var nonSer = new NonSerializable
withSpark(new SparkContext("local", "test")) { sc =>
val nums = sc.parallelize(Array(1, 2, 3, 4))
nums.map(_ + getX).reduce(_ + _)
}
}
}
class TestClassWithoutDefaultConstructor(x: Int) extends Serializable {
def getX: Int = x
def run(): Int = {
var nonSer = new NonSerializable
withSpark(new SparkContext("local", "test")) { sc =>
val nums = sc.parallelize(Array(1, 2, 3, 4))
nums.map(_ + getX).reduce(_ + _)
}
}
}
// This class is not serializable, but we aren't using any of its fields in our
// closures, so they won't have a $outer pointing to it and should still work.
class TestClassWithoutFieldAccess {
var nonSer = new NonSerializable
def run(): Int = {
var nonSer2 = new NonSerializable
var x = 5
withSpark(new SparkContext("local", "test")) { sc =>
val nums = sc.parallelize(Array(1, 2, 3, 4))
nums.map(_ + x).reduce(_ + _)
}
}
}
object TestObjectWithBogusReturns {
def run(): Int = {
withSpark(new SparkContext("local", "test")) { sc =>
val nums = sc.parallelize(Array(1, 2, 3, 4))
// this return is invalid since it will transfer control outside the closure
nums.map {x => return 1 ; x * 2}
1
}
}
}
object TestObjectWithNestedReturns {
def run(): Int = {
withSpark(new SparkContext("local", "test")) { sc =>
val nums = sc.parallelize(Array(1, 2, 3, 4))
nums.map {x =>
// this return is fine since it will not transfer control outside the closure
def foo(): Int = { return 5; 1 }
foo()
}
1
}
}
}
object TestObjectWithNesting {
def run(): Int = {
var nonSer = new NonSerializable
var answer = 0
withSpark(new SparkContext("local", "test")) { sc =>
val nums = sc.parallelize(Array(1, 2, 3, 4))
var y = 1
for (i <- 1 to 4) {
var nonSer2 = new NonSerializable
var x = i
answer += nums.map(_ + x + y).reduce(_ + _)
}
answer
}
}
}
class TestClassWithNesting(val y: Int) extends Serializable {
def getY: Int = y
def run(): Int = {
var nonSer = new NonSerializable
var answer = 0
withSpark(new SparkContext("local", "test")) { sc =>
val nums = sc.parallelize(Array(1, 2, 3, 4))
for (i <- 1 to 4) {
var nonSer2 = new NonSerializable
var x = i
answer += nums.map(_ + x + getY).reduce(_ + _)
}
answer
}
}
}
/**
* Test whether closures passed in through public APIs are actually cleaned.
*
* We put a return statement in each of these closures as a mechanism to detect whether the
* ClosureCleaner actually cleaned our closure. If it did, then it would throw an appropriate
* exception explicitly complaining about the return statement. Otherwise, we know the
* ClosureCleaner did not actually clean our closure, in which case we should fail the test.
*/
private object TestUserClosuresActuallyCleaned {
def testMap(rdd: RDD[Int]): Unit = { rdd.map { _ => return; 0 }.count() }
def testFlatMap(rdd: RDD[Int]): Unit = { rdd.flatMap { _ => return; Seq() }.count() }
def testFilter(rdd: RDD[Int]): Unit = { rdd.filter { _ => return; true }.count() }
def testSortBy(rdd: RDD[Int]): Unit = { rdd.sortBy { _ => return; 1 }.count() }
def testKeyBy(rdd: RDD[Int]): Unit = { rdd.keyBy { _ => return; 1 }.count() }
def testGroupBy(rdd: RDD[Int]): Unit = { rdd.groupBy { _ => return; 1 }.count() }
def testMapPartitions(rdd: RDD[Int]): Unit = { rdd.mapPartitions { it => return; it }.count() }
def testMapPartitionsWithIndex(rdd: RDD[Int]): Unit = {
rdd.mapPartitionsWithIndex { (_, it) => return; it }.count()
}
def testFlatMapWith(rdd: RDD[Int]): Unit = {
rdd.flatMapWith ((index: Int) => new Random(index + 42)){ (_, it) => return; Seq() }.count()
}
def testMapWith(rdd: RDD[Int]): Unit = {
rdd.mapWith ((index: Int) => new Random(index + 42)){ (_, it) => return; 0 }.count()
}
def testFilterWith(rdd: RDD[Int]): Unit = {
rdd.filterWith ((index: Int) => new Random(index + 42)){ (_, it) => return; true }.count()
}
def testForEachWith(rdd: RDD[Int]): Unit = {
rdd.foreachWith ((index: Int) => new Random(index + 42)){ (_, it) => return }
}
def testMapPartitionsWithContext(rdd: RDD[Int]): Unit = {
rdd.mapPartitionsWithContext { (_, it) => return; it }.count()
}
def testZipPartitions2(rdd: RDD[Int]): Unit = {
rdd.zipPartitions(rdd) { case (it1, it2) => return; it1 }.count()
}
def testZipPartitions3(rdd: RDD[Int]): Unit = {
rdd.zipPartitions(rdd, rdd) { case (it1, it2, it3) => return; it1 }.count()
}
def testZipPartitions4(rdd: RDD[Int]): Unit = {
rdd.zipPartitions(rdd, rdd, rdd) { case (it1, it2, it3, it4) => return; it1 }.count()
}
def testForeach(rdd: RDD[Int]): Unit = { rdd.foreach { _ => return } }
def testForeachPartition(rdd: RDD[Int]): Unit = { rdd.foreachPartition { _ => return } }
def testReduce(rdd: RDD[Int]): Unit = { rdd.reduce { case (_, _) => return; 1 } }
def testTreeReduce(rdd: RDD[Int]): Unit = { rdd.treeReduce { case (_, _) => return; 1 } }
def testFold(rdd: RDD[Int]): Unit = { rdd.fold(0) { case (_, _) => return; 1 } }
def testAggregate(rdd: RDD[Int]): Unit = {
rdd.aggregate(0)({ case (_, _) => return; 1 }, { case (_, _) => return; 1 })
}
def testTreeAggregate(rdd: RDD[Int]): Unit = {
rdd.treeAggregate(0)({ case (_, _) => return; 1 }, { case (_, _) => return; 1 })
}
// Test pair RDD functions
def testCombineByKey(rdd: RDD[(Int, Int)]): Unit = {
rdd.combineByKey(
{ _ => return; 1 }: Int => Int,
{ case (_, _) => return; 1 }: (Int, Int) => Int,
{ case (_, _) => return; 1 }: (Int, Int) => Int
).count()
}
def testAggregateByKey(rdd: RDD[(Int, Int)]): Unit = {
rdd.aggregateByKey(0)({ case (_, _) => return; 1 }, { case (_, _) => return; 1 }).count()
}
def testFoldByKey(rdd: RDD[(Int, Int)]): Unit = { rdd.foldByKey(0) { case (_, _) => return; 1 } }
def testReduceByKey(rdd: RDD[(Int, Int)]): Unit = { rdd.reduceByKey { case (_, _) => return; 1 } }
def testReduceByKeyLocally(rdd: RDD[(Int, Int)]): Unit = {
rdd.reduceByKeyLocally { case (_, _) => return; 1 }
}
def testMapValues(rdd: RDD[(Int, Int)]): Unit = { rdd.mapValues { _ => return; 1 } }
def testFlatMapValues(rdd: RDD[(Int, Int)]): Unit = { rdd.flatMapValues { _ => return; Seq() } }
// Test async RDD actions
def testForeachAsync(rdd: RDD[Int]): Unit = { rdd.foreachAsync { _ => return } }
def testForeachPartitionAsync(rdd: RDD[Int]): Unit = { rdd.foreachPartitionAsync { _ => return } }
// Test SparkContext runJob
def testRunJob1(sc: SparkContext): Unit = {
val rdd = sc.parallelize(1 to 10, 10)
sc.runJob(rdd, { (ctx: TaskContext, iter: Iterator[Int]) => return; 1 } )
}
def testRunJob2(sc: SparkContext): Unit = {
val rdd = sc.parallelize(1 to 10, 10)
sc.runJob(rdd, { iter: Iterator[Int] => return; 1 } )
}
def testRunApproximateJob(sc: SparkContext): Unit = {
val rdd = sc.parallelize(1 to 10, 10)
val evaluator = new CountEvaluator(1, 0.5)
sc.runApproximateJob(
rdd, { (ctx: TaskContext, iter: Iterator[Int]) => return; 1L }, evaluator, 1000)
}
def testSubmitJob(sc: SparkContext): Unit = {
val rdd = sc.parallelize(1 to 10, 10)
sc.submitJob(
rdd,
{ _ => return; 1 }: Iterator[Int] => Int,
Seq.empty,
{ case (_, _) => return }: (Int, Int) => Unit,
{ return }
)
}
}
class TestCreateNullValue {
var x = 5
def getX: Int = x
def run(): Unit = {
val bo: Boolean = true
val c: Char = '1'
val b: Byte = 1
val s: Short = 1
val i: Int = 1
val l: Long = 1
val f: Float = 1
val d: Double = 1
// Bring in all primitive types into the closure such that they become
// parameters of the closure constructor. This allows us to test whether
// null values are created correctly for each type.
val nestedClosure = () => {
if (s.toString == "123") { // Don't really output them to avoid noisy
println(bo)
println(c)
println(b)
println(s)
println(i)
println(l)
println(f)
println(d)
}
val closure = () => {
println(getX)
}
ClosureCleaner.clean(closure)
}
nestedClosure()
}
}
| andrewor14/iolap | core/src/test/scala/org/apache/spark/util/ClosureCleanerSuite.scala | Scala | apache-2.0 | 15,402 |
package dotty.tools
package dotc
package core
import Names._
import NameOps._
import StdNames._
import NameTags._
import Contexts.Context
import collection.mutable
import scala.annotation.internal.sharable
/** Defines possible kinds of NameInfo of a derived name */
object NameKinds {
// These are sharable since all NameKinds are created eagerly at the start of the program
// before any concurrent threads are forked. for this to work, NameKinds should never
// be created lazily or in modules that start running after compilers are forked.
@sharable private val simpleNameKinds = new mutable.HashMap[Int, ClassifiedNameKind]
@sharable private val qualifiedNameKinds = new mutable.HashMap[Int, QualifiedNameKind]
@sharable private val numberedNameKinds = new mutable.HashMap[Int, NumberedNameKind]
@sharable private val uniqueNameKinds = new mutable.HashMap[String, UniqueNameKind]
/** A class for the info stored in a derived name */
abstract class NameInfo {
def kind: NameKind
def mkString(underlying: TermName): String
def map(f: SimpleName => SimpleName): NameInfo = this
}
/** An abstract base class of classes that define the kind of a derived name info */
abstract class NameKind(val tag: Int) { self =>
/** The info class defined by this kind */
type ThisInfo <: Info
/** A simple info type; some subclasses of Kind define more refined versions */
class Info extends NameInfo { this: ThisInfo =>
def kind: NameKind = self
def mkString(underlying: TermName): String = self.mkString(underlying, this)
override def toString: String = infoString
}
/** Does this kind define logically a new name (respectively qualified name)?
* Tested by the `replace` and `collect` combinators of class `Name`.
*/
def definesNewName: Boolean = false
def definesQualifiedName: Boolean = false
/** Unmangle simple name `name` into a name of this kind, or return
* original name if this is not possible.
*/
def unmangle(name: SimpleName): TermName = name
/** Turn a name of this kind consisting of an `underlying` prefix
* and the given `info` into a string. Used to turn structured into
* simple name.
*/
def mkString(underlying: TermName, info: ThisInfo): String
/** A string used for displaying the structure of a name */
def infoString: String
}
object SimpleNameKind extends NameKind(UTF8) { self =>
type ThisInfo = Info
val info: Info = new Info
def mkString(underlying: TermName, info: ThisInfo): Nothing = unsupported("mkString")
def infoString: Nothing = unsupported("infoString")
}
/** The kind of names that add a simple classification to an underlying name.
*/
abstract class ClassifiedNameKind(tag: Int, val infoString: String) extends NameKind(tag) {
type ThisInfo = Info
val info: Info = new Info
/** Build a new name of this kind from an underlying name */
def apply(underlying: TermName): TermName = underlying.derived(info)
/** Extractor operation for names of this kind */
def unapply(name: DerivedName): Option[TermName] = name match {
case DerivedName(underlying, `info`) => Some(underlying)
case _ => None
}
simpleNameKinds(tag) = this
}
/** The kind of names that get formed by adding a prefix to an underlying name */
class PrefixNameKind(tag: Int, prefix: String, optInfoString: String = "")
extends ClassifiedNameKind(tag, if (optInfoString.isEmpty) s"Prefix $prefix" else optInfoString) {
def mkString(underlying: TermName, info: ThisInfo): String =
underlying.qualToString(_.toString, n => prefix + n.toString)
override def unmangle(name: SimpleName): TermName =
if (name.startsWith(prefix)) apply(name.drop(prefix.length).asSimpleName)
else name
}
/** The kind of names that get formed by appending a suffix to an underlying name */
class SuffixNameKind(tag: Int, suffix: String, optInfoString: String = "")
extends ClassifiedNameKind(tag, if (optInfoString.isEmpty) s"Suffix $suffix" else optInfoString) {
def mkString(underlying: TermName, info: ThisInfo): String =
underlying.qualToString(_.toString, n => n.toString + suffix)
override def unmangle(name: SimpleName): TermName =
if (name.endsWith(suffix)) apply(name.take(name.length - suffix.length).asSimpleName)
else name
}
/** A base trait for infos that define an additional selector name */
trait QualifiedInfo extends NameInfo {
val name: SimpleName
}
/** The kind of qualified names, consisting of an underlying name as a prefix,
* followed by a separator, followed by a simple selector name.
*
* A qualified names always constitutes a new name, different from its underlying name.
*/
class QualifiedNameKind(tag: Int, val separator: String)
extends NameKind(tag) {
type ThisInfo = QualInfo
case class QualInfo(name: SimpleName) extends Info with QualifiedInfo {
override def map(f: SimpleName => SimpleName): NameInfo = new QualInfo(f(name))
override def toString: String = s"$infoString $name"
}
def apply(qual: TermName, name: SimpleName): TermName =
qual.derived(new QualInfo(name))
/** Overloaded version used only for ExpandedName and TraitSetterName.
* Needed because the suffix of an expanded name may itself be expanded.
* For example, look at javap of scala.App.initCode
*/
def apply(qual: TermName, name: TermName): TermName = name replace {
case name: SimpleName => apply(qual, name)
case AnyQualifiedName(_, _) => apply(qual, name.toSimpleName)
}
def unapply(name: DerivedName): Option[(TermName, SimpleName)] = name match {
case DerivedName(qual, info: this.QualInfo) => Some((qual, info.name))
case _ => None
}
override def definesNewName: Boolean = true
override def definesQualifiedName: Boolean = true
def mkString(underlying: TermName, info: ThisInfo): String =
s"$underlying$separator${info.name}"
def infoString: String = s"Qualified $separator"
qualifiedNameKinds(tag) = this
}
/** An extractor for qualified names of an arbitrary kind */
object AnyQualifiedName {
def unapply(name: DerivedName): Option[(TermName, SimpleName)] = name match {
case DerivedName(qual, info: QualifiedInfo) =>
Some((name.underlying, info.name))
case _ => None
}
}
/** A base trait for infos that contain a number */
trait NumberedInfo extends NameInfo {
def num: Int
}
/** The kind of numbered names consisting of an underlying name and a number */
abstract class NumberedNameKind(tag: Int, val infoString: String) extends NameKind(tag) { self =>
type ThisInfo = NumberedInfo
case class NumberedInfo(val num: Int) extends Info with NameKinds.NumberedInfo {
override def toString: String = s"$infoString $num"
}
def apply(qual: TermName, num: Int): TermName =
qual.derived(new NumberedInfo(num))
def unapply(name: DerivedName): Option[(TermName, Int)] = name match {
case DerivedName(underlying, info: this.NumberedInfo) => Some((underlying, info.num))
case _ => None
}
protected def skipSeparatorAndNum(name: SimpleName, separator: String): Int = {
var i = name.length
while (i > 0 && name(i - 1).isDigit) i -= 1
if (i > separator.length && i < name.length &&
name.slice(i - separator.length, i).toString == separator) i
else -1
}
numberedNameKinds(tag) = this
}
/** An extractor for numbered names of arbitrary kind */
object AnyNumberedName {
def unapply(name: DerivedName): Option[(TermName, Int)] = name match {
case DerivedName(qual, info: NumberedInfo) => Some((qual, info.num))
case _ => None
}
}
/** The kind of unique names that consist of an underlying name (can be empty),
* a separator indicating the class of unique name, and a unique number.
*
* A unique names always constitutes a new name, different from its underlying name.
*/
case class UniqueNameKind(val separator: String)
extends NumberedNameKind(UNIQUE, s"Unique $separator") {
override def definesNewName: Boolean = true
def mkString(underlying: TermName, info: ThisInfo): String = {
val safePrefix = str.sanitize(underlying.toString) + separator
safePrefix + info.num
}
/** Generate fresh unique term name of this kind with given prefix name */
def fresh(prefix: TermName = EmptyTermName)(implicit ctx: Context): TermName =
ctx.compilationUnit.freshNames.newName(prefix, this)
/** Generate fresh unique type name of this kind with given prefix name */
def fresh(prefix: TypeName)(implicit ctx: Context): TypeName =
fresh(prefix.toTermName).toTypeName
uniqueNameKinds(separator) = this
}
/** An extractor for unique names of arbitrary kind */
object AnyUniqueName {
def unapply(name: DerivedName): Option[(TermName, String, Int)] = name match {
case DerivedName(qual, info: NumberedInfo) =>
info.kind match {
case unique: UniqueNameKind => Some((qual, unique.separator, info.num))
case _ => None
}
case _ => None
}
}
/** Names of the form `prefix . name` */
val QualifiedName: QualifiedNameKind = new QualifiedNameKind(QUALIFIED, ".")
/** Names of the form `prefix $ name` that are constructed as a result of flattening */
val FlatName: QualifiedNameKind = new QualifiedNameKind(FLATTENED, "$")
/** Names of the form `prefix $ name` that are prefixes of expanded names */
val ExpandPrefixName: QualifiedNameKind = new QualifiedNameKind(EXPANDPREFIX, "$")
/** Expanded names of the form `prefix $$ name`. */
val ExpandedName: QualifiedNameKind = new QualifiedNameKind(EXPANDED, str.EXPAND_SEPARATOR) {
private val FalseSuper = termName("$$super")
private val FalseSuperLength = FalseSuper.length
override def unmangle(name: SimpleName): TermName = {
var i = name.lastIndexOfSlice(str.EXPAND_SEPARATOR)
if (i < 0) name
else {
// Hack to make super accessors from traits work. They would otherwise fail because of #765
// The problem is that in `x$$super$$plus` the expansion prefix needs to be `x`
// instead of `x$$super`.
if (i > FalseSuperLength && name.slice(i - FalseSuperLength, i) == FalseSuper)
i -= FalseSuper.length
apply(name.take(i).asTermName, name.drop(i + str.EXPAND_SEPARATOR.length).asSimpleName)
}
}
}
/** Expanded names of the form `prefix $_setter_$ name`. These only occur in Scala2. */
val TraitSetterName: QualifiedNameKind = new QualifiedNameKind(TRAITSETTER, str.TRAIT_SETTER_SEPARATOR)
/** Unique names of the form `prefix $ n` or `$ n $` */
val UniqueName: UniqueNameKind = new UniqueNameKind("$") {
override def mkString(underlying: TermName, info: ThisInfo) =
if (underlying.isEmpty) "$" + info.num + "$" else super.mkString(underlying, info)
}
/** Other unique names */
val TempResultName: UniqueNameKind = new UniqueNameKind("ev$")
val EvidenceParamName: UniqueNameKind = new UniqueNameKind("evidence$")
val DepParamName: UniqueNameKind = new UniqueNameKind("(param)")
val LazyImplicitName: UniqueNameKind = new UniqueNameKind("$_lazy_implicit_$")
val LazyLocalName: UniqueNameKind = new UniqueNameKind("$lzy")
val LazyLocalInitName: UniqueNameKind = new UniqueNameKind("$lzyINIT")
val LazyFieldOffsetName: UniqueNameKind = new UniqueNameKind("$OFFSET")
val LazyBitMapName: UniqueNameKind = new UniqueNameKind(nme.BITMAP_PREFIX.toString)
val NonLocalReturnKeyName: UniqueNameKind = new UniqueNameKind("nonLocalReturnKey")
val WildcardParamName: UniqueNameKind = new UniqueNameKind("_$")
val TailLabelName: UniqueNameKind = new UniqueNameKind("tailLabel")
val TailLocalName: UniqueNameKind = new UniqueNameKind("$tailLocal")
val TailTempName: UniqueNameKind = new UniqueNameKind("$tmp")
val ExceptionBinderName: UniqueNameKind = new UniqueNameKind("ex")
val SkolemName: UniqueNameKind = new UniqueNameKind("?")
val LiftedTreeName: UniqueNameKind = new UniqueNameKind("liftedTree")
val SuperArgName: UniqueNameKind = new UniqueNameKind("$superArg$")
val DocArtifactName: UniqueNameKind = new UniqueNameKind("$doc")
val UniqueInlineName: UniqueNameKind = new UniqueNameKind("$i")
val InlineScrutineeName: UniqueNameKind = new UniqueNameKind("$scrutinee")
val InlineBinderName: UniqueNameKind = new UniqueNameKind("$elem")
/** A kind of unique extension methods; Unlike other unique names, these can be
* unmangled.
*/
val UniqueExtMethName: UniqueNameKind = new UniqueNameKind("$extension") {
override def unmangle(name: SimpleName): TermName = {
val i = skipSeparatorAndNum(name, separator)
if (i > 0) {
val index = name.drop(i).toString.toInt
val original = name.take(i - separator.length).asTermName
apply(original, index)
}
else name
}
}
/** Kinds of unique names generated by the pattern matcher */
val PatMatStdBinderName: UniqueNameKind = new UniqueNameKind("x")
val PatMatAltsName: UniqueNameKind = new UniqueNameKind("matchAlts")
val PatMatResultName: UniqueNameKind = new UniqueNameKind("matchResult")
val LocalOptInlineLocalObj: UniqueNameKind = new UniqueNameKind("ilo")
/** The kind of names of default argument getters */
val DefaultGetterName: NumberedNameKind = new NumberedNameKind(DEFAULTGETTER, "DefaultGetter") {
def mkString(underlying: TermName, info: ThisInfo) = {
val prefix = if (underlying.isConstructorName) nme.DEFAULT_GETTER_INIT else underlying
prefix.toString + str.DEFAULT_GETTER + (info.num + 1)
}
// TODO: Reduce code duplication with UniqueExtMethName
override def unmangle(name: SimpleName): TermName = {
val i = skipSeparatorAndNum(name, str.DEFAULT_GETTER)
if (i > 0) {
val index = name.drop(i).toString.toInt - 1
var original = name.take(i - str.DEFAULT_GETTER.length).asTermName
if (original == nme.DEFAULT_GETTER_INIT) original = nme.CONSTRUCTOR
apply(original, index)
}
else name
}
}
/** Names of the form N_<outer>. Emitted by inliner, replaced by outer path
* in ExplicitOuter.
*/
val OuterSelectName: NumberedNameKind = new NumberedNameKind(OUTERSELECT, "OuterSelect") {
def mkString(underlying: TermName, info: ThisInfo) = {
assert(underlying.isEmpty)
s"${info.num}_<outer>"
}
}
val SuperAccessorName: PrefixNameKind = new PrefixNameKind(SUPERACCESSOR, "super$")
val InitializerName: PrefixNameKind = new PrefixNameKind(INITIALIZER, "initial$")
val ProtectedAccessorName: PrefixNameKind = new PrefixNameKind(PROTECTEDACCESSOR, "protected$")
val InlineAccessorName: PrefixNameKind = new PrefixNameKind(INLINEACCESSOR, "inline$")
val BodyRetainerName: SuffixNameKind = new SuffixNameKind(BODYRETAINER, "$retainedBody")
val FieldName: SuffixNameKind = new SuffixNameKind(FIELD, "$$local") {
override def mkString(underlying: TermName, info: ThisInfo) = underlying.toString
}
val ExtMethName: SuffixNameKind = new SuffixNameKind(EXTMETH, "$extension")
val ParamAccessorName: SuffixNameKind = new SuffixNameKind(PARAMACC, "$accessor")
val ModuleClassName: SuffixNameKind = new SuffixNameKind(OBJECTCLASS, "$", optInfoString = "ModuleClass")
val ImplMethName: SuffixNameKind = new SuffixNameKind(IMPLMETH, "$")
val AdaptedClosureName: SuffixNameKind = new SuffixNameKind(ADAPTEDCLOSURE, "$adapted") { override def definesNewName = true }
/** A name together with a signature. Used in Tasty trees. */
object SignedName extends NameKind(SIGNED) {
case class SignedInfo(sig: Signature) extends Info {
assert(sig ne Signature.NotAMethod)
override def toString: String = s"$infoString $sig"
}
type ThisInfo = SignedInfo
def apply(qual: TermName, sig: Signature): TermName =
qual.derived(new SignedInfo(sig))
def unapply(name: DerivedName): Option[(TermName, Signature)] = name match {
case DerivedName(underlying, info: SignedInfo) => Some((underlying, info.sig))
case _ => None
}
def mkString(underlying: TermName, info: ThisInfo): String = s"$underlying[with sig ${info.sig}]"
def infoString: String = "Signed"
}
/** Possible name kinds of a method that comes from Scala2 pickling info.
* and that need to be unmangled. Note: Scala2 protected accessors and setters
* can be left mangled, so they are not included in thus list.
*/
val Scala2MethodNameKinds: List[NameKind] =
List(DefaultGetterName, ExtMethName, UniqueExtMethName)
def simpleNameKindOfTag : collection.Map[Int, ClassifiedNameKind] = simpleNameKinds
def qualifiedNameKindOfTag : collection.Map[Int, QualifiedNameKind] = qualifiedNameKinds
def numberedNameKindOfTag : collection.Map[Int, NumberedNameKind] = numberedNameKinds
def uniqueNameKindOfSeparator: collection.Map[String, UniqueNameKind] = uniqueNameKinds
}
| som-snytt/dotty | compiler/src/dotty/tools/dotc/core/NameKinds.scala | Scala | apache-2.0 | 17,290 |
package sample
import org.slf4j.Logger
import org.slf4j.LoggerFactory
case class Account(owner: String, var balance: Double)
object Account {
val logger = LoggerFactory.getLogger(Account.getClass)
def transfer(from: Account, to: Account, amount: Double) = {
logger.info(s"tranfering from ${from.owner} to=${to.owner}") //TODO: more than two argument needs java array
from.balance -= amount
to.balance += amount
logger.info(s"tranfer of amount $amount completed")
}
def credit(to: Account, amount: Double) { to.balance += amount }
def valid(account: Account): Boolean = false //TODO: waiting to be implemented
}
| notyy/spray-template | src/main/scala/sample/Account.scala | Scala | apache-2.0 | 641 |
package controllers
import models.BasicUser
import play.api.Logger
import play.api.mvc.{Action, AnyContent, RequestHeader}
import securesocial.controllers.BaseLoginPage
import securesocial.core.services.RoutesService
import securesocial.core.{IdentityProvider, RuntimeEnvironment}
class CustomLoginController(implicit override val env: RuntimeEnvironment[BasicUser]) extends BaseLoginPage[BasicUser] {
override def login: Action[AnyContent] = {
Logger.debug("using CustomLoginController")
super.login
}
}
class CustomRoutesService extends RoutesService.Default {
override def loginPageUrl(implicit req: RequestHeader): String = controllers.routes.CustomLoginController.login().absoluteURL(IdentityProvider.sslEnabled)
} | enotys/moneychanger | app/controllers/CustomLoginController.scala | Scala | apache-2.0 | 737 |
package com.jeff.chaser.models.components.ai.detect
import com.badlogic.ashley.core.Component
class DetectionComponent() extends Component
| jregistr/Academia | CSC455-Game-Programming/Chaser/core/src/com/jeff/chaser/models/components/ai/detect/DetectionComponent.scala | Scala | mit | 142 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.online.joins
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.expressions.{Expression, Row}
import org.apache.spark.sql.catalyst.plans.physical.{Distribution, Partitioning, UnspecifiedDistribution}
import org.apache.spark.sql.execution.joins._
import org.apache.spark.sql.execution.{BinaryNode, SparkPlan}
import org.apache.spark.sql.hive.online.{OTStateful, OnlineDataFrame, OpId}
import org.apache.spark.sql.metric.SQLMetrics
import scala.concurrent._
import scala.concurrent.duration._
/**
* One-time broadcast hash join.
*/
case class OTBroadcastHashJoin(
leftKeys: Seq[Expression],
rightKeys: Seq[Expression],
buildSide: BuildSide,
left: SparkPlan,
right: SparkPlan)(
@transient val controller: OnlineDataFrame,
@transient val trace: List[Int] = -1 :: Nil,
opId: OpId = OpId.newOpId)
extends BinaryNode with HashJoin with OTStateful {
override private[sql] lazy val metrics = Map(
"numLeftRows" -> SQLMetrics.createLongMetric(sparkContext, "number of left rows"),
"numRightRows" -> SQLMetrics.createLongMetric(sparkContext, "number of right rows"),
"numOutputRows" -> SQLMetrics.createLongMetric(sparkContext, "number of output rows"))
override def outputPartitioning: Partitioning = streamedPlan.outputPartitioning
override def requiredChildDistribution: Seq[Distribution] =
UnspecifiedDistribution :: UnspecifiedDistribution :: Nil
val timeout = {
val timeoutValue = sqlContext.conf.broadcastTimeout
if (timeoutValue < 0) {
Duration.Inf
} else {
timeoutValue.seconds
}
}
@transient
private lazy val broadcastFuture = future {
prevBatch match {
case None =>
val input: Array[Row] = buildPlan.execute().map(_.copy()).collect()
val hashed = HashedRelation(
input.iterator, SQLMetrics.nullLongMetric, buildSideKeyGenerator, input.length)
val broadcast = sparkContext.broadcast(hashed)
controller.broadcasts((opId, currentBatch)) = broadcast
broadcast
case Some(bId) =>
controller.broadcasts((opId, bId)).asInstanceOf[Broadcast[HashedRelation]]
}
}(BroadcastHashJoin.broadcastHashJoinExecutionContext)
override def doExecute(): RDD[Row] = {
val numStreamedRows = buildSide match {
case BuildLeft => longMetric("numRightRows")
case BuildRight => longMetric("numLeftRows")
}
val numOutputRows = longMetric("numOutputRows")
val broadcastRelation = Await.result(broadcastFuture, timeout)
streamedPlan.execute().mapPartitions { streamedIter =>
hashJoin(streamedIter, numStreamedRows, broadcastRelation.value, numOutputRows)
}
}
override protected final def otherCopyArgs: Seq[AnyRef] = controller :: trace :: opId :: Nil
override def simpleString: String = s"${super.simpleString} $opId"
override def newBatch(newTrace: List[Int]): SparkPlan = {
val join = OTBroadcastHashJoin(leftKeys, rightKeys, buildSide, left, right)(
controller, newTrace, opId)
join.broadcastFuture
join
}
}
| andrewor14/iolap | sql/hive/src/main/scala/org/apache/spark/sql/hive/online/joins/OTBroadcastHashJoin.scala | Scala | apache-2.0 | 3,937 |
package org.libss.lift.form.fields
import net.liftweb.http.js.{JsCmd, JsExp}
/**
* Created by Kaa
* on 08.06.2016 at 23:38.
*/
trait FieldValueAnalyzer[T] {
def analyze(value: Option[T]): Option[FieldValueMessage]
def clientSideAnalyze: Option[(JsExp) => JsCmd] = None
def priority: Int
}
sealed trait FieldValueMessage {
def messageText: String
def preventsSubmit: Boolean
}
case class FieldValueErrorMessage(messageText: String) extends FieldValueMessage {
def preventsSubmit = true
}
case class FieldValueWarningMessage(messageText: String) extends FieldValueMessage {
def preventsSubmit = false
}
case class FieldValueSuccessMessage(messageText: String) extends FieldValueMessage {
def preventsSubmit = false
}
| kanischev/libss | libss-web/src/main/scala/org/libss/lift/form/fields/FieldValueAnalyzer.scala | Scala | apache-2.0 | 744 |
/*
The MIT License (MIT)
Copyright (c) 2017 Chris Camel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package com.ccm.me.playground.bindingscala.calc
import com.ccm.me.playground.bindingscala.ShowCase
import com.thoughtworks.binding.Binding.{BindingSeq, Constant, Constants, Var}
import com.thoughtworks.binding.{Binding, dom}
import org.scalajs.dom.Node
import org.scalajs.dom.html.Anchor
import org.scalajs.dom.raw.Event
class ui extends ShowCase {
val calc = Var(CalcModel())
def name: String = "playground-binding.scala/calc"
@dom def description: Binding[Node] = <div>A very simple and basic calculator</div>
def link: String = s"#playground-binding.scala/calc"
def scalaFiddle: Option[String] = Some("https://scalafiddle.io/sf/hbwbCOe/0")
@dom def css: Binding[BindingSeq[Node]] = <style>
{s"""
.calc {
width: 400px;
border-style: solid;
border-width: 1px;
border-color: #e8e8e8;
border-radius: 10px;
background-color: #f7f7f7;
padding: 10px;
}
.calc .display {
border-radius: 7px;
background-color: #eee;
margin: 0px 15px 15px 0px;
}
.calc .lcd {
font-family: 'Cutive Mono', monospace;
text-align:right;
font-size: 21px;
}
.calc .tag {
font-size: 10px;
}
.calc .btn {
width: 60px;
padding: 0px;
}
"""
}
</style>
<!-- -->
@dom def render: Binding[Node] = {
val btns = List(List("7", "8", "9", "+", "C"),
List("4", "5", "6", "-", "MS"),
List("1", "2", "3", "x", "MR"),
List(".", "0", "=", "/", "MC"))
<div class="container">
<h5>Calc</h5>
<hr/>
<p>A very simple calc implementation.</p>
<div class="calc">
<div class="row display" >
<div class="col s11">
<input class="lcd"
type="Text"
readOnly={true}
value={display.bind}></input>
</div>
{renderMemoryTag.bind}
{renderOperatorTag.bind}
</div>
{Constants(btns: _*).map { l =>
<div class="row">
{Constants(l: _*).map { c =>
val push = if( List("+", "-", "x", "/", "C", "MS", "MR", "MC").contains(c)) "push-s1" else ""
<div class={s"col s2 ${push}"}>{b(c).bind}</div>
}}
</div>
}}
</div>
</div>
}
@dom def renderMemoryTag = <div class="tag">
{calc.bind.memory.map(it => "M").getOrElse(" ")}
</div>
@dom def renderOperatorTag = <div class="tag">
{calc.bind.operators.headOption.map {
case Plus() => "+"
case Minus() => "-"
case Multiply() => "x"
case Divide() => "/"
case _ => " "
}.getOrElse(" ")}
</div>
@dom def display = Option(calc.bind.accumulator).filterNot(_.isEmpty).getOrElse("0")
@dom def b(label: String): Binding[Anchor] = {
val op: (String, Token) = label match {
case "1" | "2" | "3" | "4" | "5" | "6" | "7" | "8" | "9" | "0" => ("grey darken-1", Digit(label.toInt))
case "." => ("grey darken-1", Dot())
case "+" => ("blue lighten-5 blue-text text-darken-3", Plus())
case "-" => ("blue lighten-5 blue-text text-darken-3", Minus())
case "x" => ("blue lighten-5 blue-text text-darken-3", Multiply())
case "/" => ("blue lighten-5 blue-text text-darken-3", Divide())
case "=" => ("grey lighten-2 black-text text-darken-3", Result())
case "C" => ("red lighten-5 red-text text-darken-4", Clear())
case "MR" => ("green lighten-5 green-text", MR())
case "MC" => ("green lighten-5 green-text", MC())
case "MS" => ("green lighten-5 green-text", MS())
case _ => ("", NoOp())
}
def disabled = Binding { if (calc.bind.isDefinedAt(op._2)) "" else "disabled" }
<a class={s"btn ${op._1} ${disabled.bind} waves-effect waves-light"}
onclick={_: Event => calc.value = calc.value(op._2)}>
{label}
</a>
}
}
| ccamel/playground-binding.scala | src/main/scala/com/ccm/me/playground/bindingscala/calc/ui.scala | Scala | mit | 5,155 |
package special.sigma
import org.bouncycastle.math.ec.ECPoint
/** This extensions are used from SigmaDsl.
* If you add something here, make sure the corresponding syntax is supported by SigmaScript. */
class DslSyntaxExtensions(dsl: SigmaDslBuilder) {
implicit class BooleanOps(source: Boolean) {
/** Logical AND between Boolean on the left and SigmaProp value on the right. */
def &&(prop: SigmaProp) = dsl.sigmaProp(source) && prop
/** Logical AND between Boolean on the left and SigmaProp value on the right. */
def ||(prop: SigmaProp) = dsl.sigmaProp(source) || prop
}
}
object Extensions {
def showECPoint(p: ECPoint): String = {
if (p.isInfinity) {
"INF"
}
else {
val rawX = p.getRawXCoord.toString.substring(0, 6)
val rawY = p.getRawYCoord.toString.substring(0, 6)
s"ECPoint($rawX,$rawY,...)"
}
}
implicit class GroupElementOps(val source: GroupElement) extends AnyVal {
def showToString: String = showECPoint(source.value)
}
}
| ScorexFoundation/sigmastate-interpreter | sigma-impl/src/main/scala/special/sigma/Extensions.scala | Scala | mit | 1,016 |
package pl.japila.akka.http
import akka.actor.Actor
import akka.stream.actor.{ActorPublisher, ActorPublisherMessage}
/**
* Based upon http://stackoverflow.com/a/29077212/1305344
*/
class ActorBasedSource extends Actor with ActorPublisher[Int] {
import akka.stream.actor.ActorPublisherMessage._
var items: List[Int] = List.empty
def receive = {
case s: String =>
if (totalDemand == 0)
items = items :+ s.toInt
else
onNext(s.toInt)
case Request(demand) =>
if (demand > items.size) {
items foreach onNext
items = List.empty
}
else {
val (send, keep) = items.splitAt(demand.toInt)
items = keep
items foreach onNext
}
case other =>
println(s"got other $other")
}
} | jaceklaskowski/akka-http-sandbox | src/main/scala/pl/japila/akka/http/ActorBasedSource.scala | Scala | apache-2.0 | 787 |
package com.ee.assets.transformers
import java.io.{StringWriter, StringReader}
import com.ee.log.Logger
class CssMinifier extends Transformer[String, String] {
lazy val logger = Logger("css-minifier")
override def run(elements: Seq[Element[String]]): Seq[Element[String]] = {
elements.map {
e =>
val (name, suffix) = com.ee.utils.file.nameAndSuffix(e.path)
val nameOut = s"$name.min.$suffix"
ContentElement(nameOut, minifyCss(e.contents), e.lastModified)
}
}
private def minifyCss(contents: String): String = {
val compressor = new com.yahoo.platform.yui.compressor.CssCompressor(new StringReader(contents))
val writer = new StringWriter()
compressor.compress(writer, 0)
writer.toString
}
}
| edeustace/assets-loader | plugin/app/com/ee/assets/transformers/CssMinifier.scala | Scala | mit | 761 |
package com.github.jlprat.gameserver
import akka.actor.{Actor, ActorLogging, Props}
class PingActor extends Actor with ActorLogging {
import PingActor._
var counter = 0
val pongActor = context.actorOf(PongActor.props, "pongActor")
def receive = {
case Initialize =>
log.info("In PingActor - starting ping-pong")
pongActor ! PingMessage("ping")
case PongActor.PongMessage(text) =>
log.info("In PingActor - received message: {}", text)
counter += 1
if (counter == 3) context.system.shutdown()
else sender() ! PingMessage("ping")
}
}
object PingActor {
val props = Props[PingActor]
case object Initialize
case class PingMessage(text: String)
} | jlprat/akka-gameserver | src/main/scala/com/github/jlprat/gameserver/PingActor.scala | Scala | apache-2.0 | 705 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn
import com.intel.analytics.bigdl.nn.abstractnn.{Initializable, TensorModule}
import com.intel.analytics.bigdl.optim.Regularizer
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.RandomGenerator._
import com.intel.analytics.bigdl.utils.{T, Table}
import scala.reflect.ClassTag
/**
* This layer has a weight tensor with given size. The weight will be multiplied element wise to
* the input tensor. If the element number of the weight tensor match the input tensor, a simply
* element wise multiply will be done. Or the bias will be expanded to the same size of the input.
* The expand means repeat on unmatched singleton dimension(if some unmatched dimension isn't
* singleton dimension, it will report an error). If the input is a batch, a singleton dimension
* will be add to the first dimension before the expand.
*
* @param size the size of the bias
* @param ev numeric operator
* @tparam T numeric type
*/
@SerialVersionUID(8888147326550637025L)
class CMul[@specialized(Float, Double) T: ClassTag](
val size: Array[Int],
var wRegularizer: Regularizer[T] = null)(
implicit ev: TensorNumeric[T]) extends TensorModule[T] with Initializable {
val weight: Tensor[T] = Tensor[T](size)
val gradWeight : Tensor[T] = Tensor[T](size)
private val _sum = Tensor[T]()
private val _repeat = Tensor[T]()
{
val stdv = 1 / math.sqrt(weight.nElement())
val wInit: InitializationMethod = RandomUniform(-stdv, stdv)
setInitMethod(weightInitMethod = wInit)
}
override def reset(): Unit = {
weightInitMethod.init(weight, VariableFormat.ONE_D)
zeroGradParameters()
}
override def updateOutput(input: Tensor[T]): Tensor[T] = {
output.resizeAs(input).copy(input)
if (input.nElement() == weight.nElement()) {
output.cmul(weight)
} else {
val expand = if (weight.dim() == input.dim()) {
weight.view(weight.size())
} else {
weight.view(Array(1) ++ weight.size())
}
val pivotDim = Utils.getOnlyDimGtOne(expand.size())
if (pivotDim > 0) {
mulOneDimWeight(pivotDim, expand, output)
} else {
expand.expandAs(output)
output.cmul(expand)
}
}
output
}
private def mulOneDimWeight(dim: Int, expand: Tensor[T], output: Tensor[T]): Unit = {
val (innerNum, outerNum) = Utils.getInnerOuterNum(dim, output)
val weightData = expand.storage().array()
val weightOffset = expand.storageOffset() - 1
var outer = 0
var offset = output.storageOffset() - 1
while (outer < outerNum) {
var k = 0
while (k < expand.nElement()) {
ev.scal(innerNum, weightData(k + weightOffset), output.storage().array(), offset, 1)
offset += innerNum
k += 1
}
outer += 1
}
}
override def updateGradInput(input: Tensor[T], gradOutput: Tensor[T]): Tensor[T] = {
gradInput.resizeAs(input).zero()
if (weight.nElement() == gradOutput.nElement()) {
gradInput.addcmul(ev.fromType[Int](1), weight, gradOutput)
} else {
val expand = if (weight.dim() == gradOutput.dim()) {
weight.view(weight.size())
} else {
weight.view(Array(1) ++ weight.size())
}
val pivotDim = Utils.getOnlyDimGtOne(expand.size())
if (pivotDim > 0) {
gradInput.copy(gradOutput)
mulOneDimWeight(pivotDim, expand, gradInput)
} else {
expand.expandAs(gradOutput)
gradInput.cmul(expand, gradOutput)
}
}
gradInput
}
override def accGradParameters(input: Tensor[T], gradOutput: Tensor[T],
scale: Double = 1.0): Unit = {
if (weight.nElement() == gradOutput.nElement()) {
gradWeight.addcmul(ev.fromType[Double](scale), input, gradOutput)
} else {
if (weight.dim() == input.dim()) {
_repeat.resizeAs(input).cmul(input, gradOutput)
var sumFrom = _repeat
var sumInto = _sum
var i = 1
while (i <= weight.dim()) {
if (weight.size(i) != input.size(i)) {
sumInto.sum(sumFrom, i)
sumInto = sumFrom
sumFrom = if (sumFrom == _repeat) _sum else _repeat
}
i += 1
}
gradWeight.add(ev.fromType[Double](scale), sumFrom)
} else {
_repeat.resizeAs(input).cmul(input, gradOutput)
_sum.sum(_repeat, 1)
gradWeight.add(ev.fromType[Double](scale), _sum)
}
}
if (null != wRegularizer) {
wRegularizer.accRegularization(weight, gradWeight)
}
}
override def updateParameters(learningRate: T): Unit = {
weight.map(gradWeight, (a, b) => ev.minus(a, ev.times(learningRate, b)))
}
override def zeroGradParameters(): Unit = {
gradWeight.zero()
}
override def parameters(): (Array[Tensor[T]], Array[Tensor[T]]) = {
(Array(this.weight), Array(this.gradWeight))
}
override def getParametersTable(): Table = {
T(getName() -> T("weight" -> weight, "gradWeight" -> gradWeight))
}
override def clearState(): this.type = {
super.clearState()
_repeat.set()
_sum.set()
this
}
override def equals(obj: Any): Boolean = {
if (!super.equals(obj)) {
return false
}
if (!obj.isInstanceOf[CMul[T]]) {
return false
}
val other = obj.asInstanceOf[CMul[T]]
if (this.eq(other)) {
return true
}
size == other.size &&
gradWeight == other.gradWeight &&
weight == other.weight
}
override def hashCode() : Int = {
val seed = 37
var hash = super.hashCode()
hash = hash * seed + size.hashCode()
hash = hash * seed + gradWeight.hashCode()
hash = hash * seed + weight.hashCode()
hash
}
override def toString(): String = {
s"${getPrintName}(${java.util.Arrays.toString(size)})"
}
}
object CMul {
def apply[@specialized(Float, Double) T: ClassTag](
size: Array[Int], wRegularizer: Regularizer[T] = null)
(implicit ev: TensorNumeric[T]) : CMul[T] = {
new CMul[T](size, wRegularizer)
}
}
| 122689305/BigDL | spark/dl/src/main/scala/com/intel/analytics/bigdl/nn/CMul.scala | Scala | apache-2.0 | 6,725 |
package gr.cslab.ece.ntua.musqle.sql
import gr.cslab.ece.ntua.musqle.plan.hypergraph.{DPJoinPlan, Join, Move}
import gr.cslab.ece.ntua.musqle.plan.spark._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.execution.datasources.LogicalRelation
import org.apache.spark.sql.types.StringType
import scala.collection.mutable
class SQLCodeGen(val info: MQueryInfo) {
def genSQL(scan: MuSQLEScan): String = {
val projection = {
if (!scan.projections.isEmpty) {
scan.projections.reduceLeft(_.toLowerCase + ", " + _.toLowerCase)
}
else { "*" }
}
if (scan.isRoot) {
val exp = getAggregateExpressions()
scan.projections.clear()
exp.foreach(scan.projections.add)
}
var sql = s"""SELECT ${projection}
|FROM ${scan.tmpName}""".stripMargin
if (scan.vertex.filter != null) {
val filter = makeCondition(scan.vertex.filter.condition)
sql += s" WHERE $filter"
}
if (scan.isRoot) {
val aggs = getAggregations()
sql += "\\n"+aggs
}
sql
}
def genSQL(plan: MuSQLEJoin): String = {
val subQueryTables = findJoinKeys(plan)
val conditions = subQueryTables.map(key => info.idToCondition(key))
val filters = findFiltersInSubQuery(plan)
val names = findTableNames(plan)
val projection = {
if (!plan.projections.isEmpty) {
plan.projections.reduceLeft(_.toLowerCase + ", " + _.toLowerCase)
}
else { "*" }
}
val commaSeparatedNames = names.reduceLeft(_ + ", " + _)
if (plan.isRoot) {
val exp = getAggregateExpressions()
plan.projections.clear()
exp.foreach(plan.projections.add)
}
var SQL =
s"""SELECT ${projection}
|FROM $commaSeparatedNames""".stripMargin
var WHERE = ""
if (conditions.size > 0) {
WHERE += "\\nWHERE " + conditions.map(makeCondition).reduce(_ + "\\nAND " + _)
}
if (filters.size > 0) {
val f = filters.map(f => makeCondition(f.condition)).reduce(_ + "\\nAND " + _)
if (WHERE.equals("")) {
WHERE += "\\nWHERE " + f
}
else {
WHERE += "\\nAND " + f
}
}
SQL += WHERE
if (plan.isRoot) {
val aggs = getAggregations()
SQL += "\\n"+aggs
}
SQL
}
private def getAggregateExpressions(): Set[String] = {
var root = info.rootLogicalPlan
while (root.children.size < 2 && root.children.size > 0) {
root match {
case agg: Aggregate => {
if (agg.aggregateExpressions.size > 0) {
return agg.aggregateExpressions.map(parseAggregateExpression).toSet
}
}
case _ => {}
}
root = root.children(0)
}
Set.empty
}
private def parseAggregateExpression(expression: Expression): String = {
expression match {
case attRef: AttributeReference => parseAttributeReference(attRef)
case alias: Alias => {
s"${parseAggregateExpression(alias.child)} AS ${alias.name}${alias.exprId.id}"
}
case divide: Divide => { s"${parseAggregateExpression(divide.left)}/${parseAggregateExpression(divide.right)}" }
case multiply: Multiply => {s"${parseAggregateExpression(multiply.left)}*${parseAggregateExpression(multiply.right)}"}
case subtract: Subtract => {s"${parseAggregateExpression(subtract.left)}-${parseAggregateExpression(subtract.right)}"}
case declarativeAgg: DeclarativeAggregate => {
declarativeAgg match {
case sum: Sum => {s"SUM(${parseAggregateExpression(sum.child)})"}
case avg: Average => {s"AVG(${parseAggregateExpression(avg.child)})"}
case count: Count => {s"COUNT(${parseAggregateExpression(count.children(0))})"}
}
}
case _ => {
if (expression.children.size > 0)
parseAggregateExpression(expression.children(0))
else
expression.toString()
}
}
}
private def getAggregations(): String = {
var root = info.rootLogicalPlan
var aggString = ""
var groupBy = ""
var orderBy = ""
while (root.children.size < 2 && root.children.size > 0) {
root match {
case agg: Aggregate => {
if (agg.aggregateExpressions.size > 0) {
}
if (agg.groupingExpressions.size > 0) {
groupBy = "GROUP BY " +agg.groupingExpressions
.map(exp => parseAttributeReference(exp))
.reduceLeft(_ + ", " + _)
}
}
case sort: Sort => {
orderBy = "ORDER BY "+sort.order.map(attr => parseAttributeReference(attr.child)).reduceLeft(_ +", "+_)
}
case _ => {}
}
root = root.children(0)
}
aggString = groupBy
if (!orderBy.isEmpty){
if (!aggString.isEmpty) { aggString += "\\n" }
aggString += orderBy
}
aggString
}
private def getSparkPlanProjections(plan: LogicalPlan): mutable.HashSet[String] = {
val projections = new mutable.HashSet[String]()
if (plan.children.size == 1){
plan.output.foreach(att => projections.add(att.toString()))
getSparkPlanProjections(plan.children(0)).foreach(projections.add)
}
projections
}
private def getProjections(plan: DPJoinPlan): mutable.HashSet[String] = {
val sparkPlanProjections = getSparkPlanProjections(plan.info.asInstanceOf[MQueryInfo].rootLogicalPlan)
sparkPlanProjections
}
private def makeCondition(expr: Expression): String ={
expr match {
case eq: EqualTo => {
val left = {
eq.left match{
case ar: AttributeReference =>{
parseAttributeReference(ar)
}
case cast: Cast => {
parseAttributeReference(cast.child)
}
case literal: Literal => {
literal.dataType match {
case StringType => s"""'${literal.value}'"""
case _ => literal.value
}
}
}
}
val right = {
eq.right match{
case ar: AttributeReference =>{
parseAttributeReference(ar)
}
case cast: Cast => {
parseAttributeReference(cast.child)
}
case literal: Literal => {
literal.dataType match {
case StringType => s"""'${literal.value}'"""
case _ => literal.value
}
}
}
}
return s"$left = $right"
}
case lt: LessThan => {
val left = {
lt.left match{
case ar: AttributeReference =>{
parseAttributeReference(ar)
}
case literal: Literal => {
literal.dataType match {
case StringType => s"""'${literal.value}'"""
case _ => literal.value
}
}
}
}
val right = {
lt.right match{
case ar: AttributeReference =>{
parseAttributeReference(ar)
}
case literal: Literal => {
literal.dataType match {
case StringType => s"""'${literal.value}'"""
case _ => literal.value
}
}
}
}
return s"$left < $right"}
case gt: GreaterThan => {
val left = {
gt.left match{
case ar: AttributeReference =>{
parseAttributeReference(ar)
}
case literal: Literal => {
literal.dataType match {
case StringType => s"""'${literal.value}'"""
case _ => literal.value
}
}
}
}
val right = {
gt.right match{
case ar: AttributeReference =>{
parseAttributeReference(ar)
}
case literal: Literal => {
literal.dataType match {
case StringType => s"""'${literal.value}'"""
case _ => literal.value
}
}
}
}
return s"$left > $right"
}
case ltoet: LessThanOrEqual => {
val left = {
ltoet.left match{
case ar: AttributeReference =>{
parseAttributeReference(ar)
}
case literal: Literal => {
literal.dataType match {
case StringType => s"""'${literal.value}'"""
case _ => literal.value
}
}
}
}
val right = {
ltoet.right match{
case ar: AttributeReference =>{
parseAttributeReference(ar)
}
case literal: Literal => {
literal.dataType match {
case StringType => s"""'${literal.value}'"""
case _ => literal.value
}
}
}
}
return s"$left <= $right"
}
case gtoet: GreaterThanOrEqual => {
val left = {
gtoet.left match{
case ar: AttributeReference =>{
parseAttributeReference(ar)
}
case literal: Literal => {
literal.dataType match {
case StringType => s"""'${literal.value}'"""
case _ => literal.value
}
}
}
}
val right = {
gtoet.right match{
case ar: AttributeReference =>{
parseAttributeReference(ar)
}
case literal: Literal => {
literal.dataType match {
case StringType => s"""'${literal.value}'"""
case _ => literal.value
}
}
}
}
return s"$left >= $right"
}
case and: And => makeCondition(and.left) + " AND " + makeCondition(and.right)
case or: Or => makeCondition(or.left) + " OR " + makeCondition(or.right)
case notNull: IsNotNull => {
val attribute = notNull.child.asInstanceOf[AttributeReference]
val key = parseAttributeReference(attribute)
s"$key IS NOT NULL"
}
case in: In => {
val attribute = in.value.asInstanceOf[AttributeReference]
val list = s"(${in.list.map(_.sql).reduceLeft(_+ ", " + _)})"
val key = parseAttributeReference(attribute)
val result = s"$key IN $list"
result
}
case _ => throw new UnsupportedOperationException(s"Operator: ${expr}")
}
}
private def parseAttributeReference(ar: Expression): String = {
val a = s"${ar.toString.replace("#","").toLowerCase}"
a
}
private def extractKey(expr: Expression): String = {
expr match {
case ar: AttributeReference => parseAttributeReference(ar)
case cast: Cast => parseAttributeReference(cast.child)
case literal: Literal => {
literal.dataType match {
case StringType => s"""'${literal.value}'"""
case _ => literal.value.toString
}
}
}
}
/**
* @return A [[mutable.HashSet]] with the tables contained in the input subquery
* */
private def findJoinKeys(plan: DPJoinPlan): mutable.HashSet[Integer] ={
val hashSet = new mutable.HashSet[Integer]()
plan match {
case join: Join => {
join.vars.foreach(x => hashSet.add(x))
findJoinKeys(plan.left).foreach(hashSet.add)
findJoinKeys(plan.right).foreach(hashSet.add)
}
case _ => {}
}
hashSet
}
private def findTableNames(plan: DPJoinPlan): mutable.HashSet[String] = {
val names = new mutable.HashSet[String]()
plan match {
case join: MuSQLEJoin => {
findTableNames(join.left).foreach(names.add)
findTableNames(join.right).foreach(names.add)
}
case move: MuSQLEMove => {
names.add(move.tmpName)
}
case scan: MuSQLEScan => {
val n = matchTableName(scan.table.asInstanceOf[SparkPlanVertex].plan, this.info)
names.add(scan.tmpName)
}
}
names
}
private def findFiltersInSubQuery(plan: DPJoinPlan): mutable.HashSet[Filter] ={
val hashSet = new mutable.HashSet[Filter]()
plan match{
case join: MuSQLEJoin => {
findFiltersInSubQuery(plan.left).foreach(hashSet.add)
findFiltersInSubQuery(plan.right).foreach(hashSet.add)
}
case scan: MuSQLEScan => {
val filter = scan.vertex.filter
if (filter != null) {
hashSet.add(filter)
}
}
case move: Move => {}
case _ => throw new Exception()
}
hashSet
}
def matchTableName(logicalRelation: LogicalRelation, info: MQueryInfo): String = {
val candidateTableAttributes = logicalRelation.attributeMap.map(_._2.name)
info.planToTableName.foreach{ plan =>
var flag = true
val tmpTableAttributes = plan._1.attributeMap.map(_._2.name)
for (attr <- tmpTableAttributes){
if (!candidateTableAttributes.toSeq.contains(attr)){
flag = false
}
}
if (flag) return plan._2
}
throw new Exception("Cannot find matching table")
}
}
| gsvic/MuSQLE | src/main/scala/gr/cslab/ece/ntua/musqle/sql/SQLCodeGen.scala | Scala | apache-2.0 | 13,297 |
package x
object Main:
def main(args: Array[String]):Unit =
Macro.genOp(10)
| lampepfl/dotty | tests/pos-macros/i14137/Test_2.scala | Scala | apache-2.0 | 84 |
package graphx
import org.apache.spark.graphx._
import scala.reflect.ClassTag
/** Strongly connected components algorithm implementation. */
object StronglyConnectedComponents {
/**
* Compute the strongly connected component (SCC) of each vertex and return a graph with the
* vertex value containing the lowest vertex id in the SCC containing that vertex.
*
* Source: https://github.com/apache/spark/blob/master/graphx/src/main/scala/org/apache/spark/graphx/lib/StronglyConnectedComponents.scala
*/
def apply[VD: ClassTag, ED: ClassTag] (graph: Graph[VD, ED], numIter: Int): Graph[VertexId, ED] = {
// the graph we update with final SCC ids, and the graph we return at the end
var sccGraph = graph.mapVertices { case (vid, _) => vid }
// graph we are going to work with in our iterations
var sccWorkGraph = graph.mapVertices { case (vid, _) => (vid, false) }.cache()
var numVertices = sccWorkGraph.numVertices
var iter = 0
while (sccWorkGraph.numVertices > 0 && iter < numIter) {
// record the number of supersteps
iter += 1
// remove trivial SCCs from both sccGraph and sccWorkGraph
do {
numVertices = sccWorkGraph.numVertices
// identify the vertices without incoming or outgoing edges
sccWorkGraph = sccWorkGraph.outerJoinVertices(sccWorkGraph.outDegrees) {
(vid, data, degreeOpt) => if (degreeOpt.isDefined) data else (vid, true)
}.outerJoinVertices(sccWorkGraph.inDegrees) {
(vid, data, degreeOpt) => if (degreeOpt.isDefined) data else (vid, true)
}.cache()
// get the colors of all trivial SCCs
val finalVertices = sccWorkGraph.vertices
.filter { case (vid, (scc, isFinal)) => isFinal }
.mapValues { (vid, data) => data._1 }
// write the colors of the trivial SCCs to sccGraph
sccGraph = sccGraph.outerJoinVertices(finalVertices) {
(vid, scc, opt) => opt.getOrElse(scc)
}
// only keep vertices that are not trivial SCCs
sccWorkGraph = sccWorkGraph.subgraph(vpred = (vid, data) => !data._2).cache()
} while (sccWorkGraph.numVertices < numVertices) // repeat until no trivial SCCs are left
// reset the color of each vertex to itself
sccWorkGraph = sccWorkGraph.mapVertices { case (vid, (color, isFinal)) => (vid, isFinal) }
// collect min of all my neighbor's scc values, update if it's smaller than mine
// then notify any neighbors with scc values larger than mine
sccWorkGraph = Pregel[(VertexId, Boolean), ED, VertexId](
sccWorkGraph, Long.MaxValue, activeDirection = EdgeDirection.Out)(
(vid, myScc, neighborScc) => (math.min(myScc._1, neighborScc), myScc._2),
e => {
if (e.srcAttr._1 < e.dstAttr._1) {
Iterator((e.dstId, e.srcAttr._1))
} else {
Iterator()
}
},
(vid1, vid2) => math.min(vid1, vid2))
// start at root of SCCs. Traverse values in reverse, notify all my neighbors
// do not propagate if colors do not match!
sccWorkGraph = Pregel[(VertexId, Boolean), ED, Boolean](
sccWorkGraph, false, activeDirection = EdgeDirection.In)(
// vertex is final if it is the root of a color
// or it has the same color as a neighbor that is final
(vid, myScc, existsSameColorFinalNeighbor) => {
val isColorRoot = vid == myScc._1
(myScc._1, myScc._2 || isColorRoot || existsSameColorFinalNeighbor)
},
// activate neighbor if they are not final, you are, and you have the same color
e => {
val sameColor = e.dstAttr._1 == e.srcAttr._1
val onlyDstIsFinal = e.dstAttr._2 && !e.srcAttr._2
if (sameColor && onlyDstIsFinal) {
Iterator((e.srcId, true))
} else {
Iterator()
}
},
(final1, final2) => final1 || final2)
} // end of main loop
sccGraph
}
} | ericpony/graphx-examples | src/main/scala/graphx/StronglyConnectedComponents.scala | Scala | gpl-3.0 | 4,147 |
// Copyright: 2010 - 2016 https://github.com/ensime/ensime-server/graphs
// Licence: http://www.gnu.org/licenses/gpl-3.0.en.html
package org.ensime.sexp
import org.parboiled2._
import scala.util.{ Failure, Success }
/**
* Parse Emacs Lisp into an `Sexp`. Other lisp variants may
* require tweaking, e.g. Scheme's nil, infinity, NaN, etc.
*/
object SexpParser {
def parse(desc: String): Sexp = {
val parser = new SexpParser(desc)
parser.SexpP.run() match {
case Success(d) =>
d
case Failure(error: ParseError) =>
val msg = parser.formatError(error, new ErrorFormatter(showTraces = true))
throw new Exception("Failed to parse sexp: " + msg)
case Failure(other) =>
throw new Exception("Failed to parse sexp: ", other)
}
}
// https://www.gnu.org/software/emacs/manual/html_node/elisp/Basic-Char-Syntax.html
// https://www.gnu.org/software/emacs/manual/html_node/elisp/Syntax-for-Strings.html
// Not supported: https://www.gnu.org/software/emacs/manual/html_node/elisp/Non_002dASCII-in-Strings.html
private[sexp] val specialChars = Map[String, String](
"\"" -> "\"",
"a" -> "\u0007",
"b" -> "\b",
"t" -> "\t",
"n" -> "\n",
"v" -> "\u000b",
"f" -> "\f",
"r" -> "\r",
"e" -> "\u001b",
"s" -> " ",
"d" -> "\u007f",
"\\" -> "\\"
)
val SexpQuote = SexpSymbol("quote")
val SymbolsPredicate = CharPredicate("+-*/_~!@$%^&=:<>{}")
val NormalCharPredicate = CharPredicate.Printable -- "\"\\"
val WhiteSpacePredicate = CharPredicate(" \n\r\t\f")
val NotNewLinePredicate = CharPredicate.Printable -- '\n'
val SymbolStartCharPredicate = CharPredicate.AlphaNum ++ SymbolsPredicate
val SymbolBodyCharPredicate = SymbolStartCharPredicate ++ "."
val PlusMinusPredicate = CharPredicate("+-")
val ExpPredicate = CharPredicate("eE")
val QuoteBackslash = CharPredicate("\"\\")
val QuoteSlashBackSlash = QuoteBackslash ++ "/"
val NCCharPredicate = CharPredicate.All -- "\"\\"
}
/**
* Parse Emacs Lisp into an `Sexp`. Other lisp variants may
* require tweaking, e.g. Scheme's nil, infinity, NaN, etc.
*/
class SexpParser(val input: ParserInput) extends Parser with StringBuilding {
import SexpParser._
private def SexpP: Rule1[Sexp] = rule {
SexpAtomP | SexpListP | SexpEmptyList | SexpConsP | SexpQuotedP
}
private def SexpConsP: Rule1[SexpCons] = rule {
LeftBrace ~ SexpP ~ Whitespace ~ '.' ~ Whitespace ~ SexpP ~ RightBrace ~> {
(x: Sexp, y: Sexp) => SexpCons(x, y)
}
}
private def SexpListP: Rule1[Sexp] = rule {
LeftBrace ~ SexpP ~ zeroOrMore(Whitespace ~ SexpP) ~ RightBrace ~> {
(head: Sexp, tail: Seq[Sexp]) => { SexpList(head :: tail.toList) }
}
}
private def SexpAtomP: Rule1[SexpAtom] = rule {
SexpCharP | SexpStringP | SexpNaNP | SexpNumberP | SexpSymbolP
}
private def SexpCharP: Rule1[SexpChar] = rule {
'?' ~ NormalChar ~> { SexpChar }
}
def SexpStringP = rule { '"' ~ clearSB() ~ CharactersSB ~ '"' ~ push(SexpString(sb.toString)) }
def CharactersSB = rule { zeroOrMore(NormalCharSB | '\\' ~ EscapedCharSB) }
def NormalCharSB = rule { NCCharPredicate ~ appendSB() }
def EscapedCharSB = rule(
QuoteSlashBackSlash ~ appendSB()
| '\"' ~ appendSB('\"')
| 'b' ~ appendSB('\b')
| 's' ~ appendSB(' ')
| 'f' ~ appendSB('\f')
| 'n' ~ appendSB('\n')
| 'r' ~ appendSB('\r')
| 't' ~ appendSB('\t')
| ' ' ~ appendSB("") // special emacs magic for comments \<space< and \<newline> are removed
| '\n' ~ appendSB("")
| 'a' ~ appendSB('\u0007') // bell
| 'v' ~ appendSB('\u000b') // vertical tab
| 'e' ~ appendSB('\u001b') // escape
| 'd' ~ appendSB('\u007f') // DEL
)
def SexpNumberP = rule {
capture(Integer ~ optional(Frac) ~ optional(Exp)) ~> { s: String => SexpNumber(BigDecimal(s)) }
}
import CharPredicate.{ Digit, Digit19 }
def Integer = rule {
optional('-') ~ (Digit19 ~ Digits | Digit)
}
def Digits = rule {
oneOrMore(Digit)
}
def Frac = rule {
'.' ~ Digits
}
def Exp = rule {
ExpPredicate ~ optional(PlusMinusPredicate) ~ Digits
}
private def SexpNaNP: Rule1[SexpAtom] = rule {
"-1.0e+INF" ~ push(SexpNegInf) |
"1.0e+INF" ~ push(SexpPosInf) |
optional('-') ~ "0.0e+NaN" ~ push(SexpNaN)
}
private def SexpQuotedP: Rule1[Sexp] = rule {
'\'' ~ SexpP ~> { v: Sexp => SexpCons(SexpQuote, v) }
}
private def SexpSymbolP: Rule1[SexpAtom] = rule {
// ? allowed at the end of symbol names
capture(oneOrMore(SymbolStartCharPredicate) ~ zeroOrMore(SymbolBodyCharPredicate) ~ optional('?')) ~> { sym: String =>
if (sym == "nil") SexpNil
else SexpSymbol(sym)
}
}
private def SexpEmptyList: Rule1[SexpNil.type] = rule {
LeftBrace ~ RightBrace ~ push(SexpNil)
}
private def NormalChar: Rule1[Char] = rule {
NormalCharPredicate ~ push(lastChar)
}
private def Whitespace: Rule0 = rule {
zeroOrMore(Comment | WhiteSpacePredicate)
}
private def Comment: Rule0 = rule {
';' ~ zeroOrMore(NotNewLinePredicate) ~ ("\n" | EOI)
}
private def LeftBrace: Rule0 = rule {
Whitespace ~ '(' ~ Whitespace
}
private def RightBrace: Rule0 = rule {
Whitespace ~ ')' ~ Whitespace
}
}
| d1egoaz/ensime-sbt | src/sbt-test/sbt-ensime/ensime-server/s-express/src/main/scala/org/ensime/sexp/SexpParser.scala | Scala | apache-2.0 | 5,318 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.refspec
import java.lang.reflect.{Method, Modifier, InvocationTargetException}
import org.scalatest.{Suite, Finders, Resources}
import RefSpec.equalIfRequiredCompactify
import RefSpec.isTestMethod
/**
* Facilitates a “behavior-driven” style of development (BDD), in which tests
* are methods, optionally nested inside singleton objects defining textual scopes.
*
* <table><tr><td class="usage">
* <strong>Recommended Usage</strong>:
* Class <code>RefSpec</code> allows you to define tests as methods, which saves one function literal per test compared to style classes that represent tests as functions.
* Fewer function literals translates into faster compile times and fewer generated class files, which can help minimize build times.
* As a result, using <code>RefSpec</code> can be a good choice in large projects where build times are a concern as well as when generating large numbers of
* tests programatically via static code generators.
* </td></tr></table>
*
* <p>
* Here's an example <code>RefSpec</code>:
* </p>
*
* <pre class="stHighlight">
* package org.scalatest.examples.spec
*
* import org.scalatest.RefSpec
*
* class SetSpec extends RefSpec {
*
* object `A Set` {
* object `when empty` {
* def `should have size 0` {
* assert(Set.empty.size === 0)
* }
*
* def `should produce NoSuchElementException when head is invoked` {
* assertThrows[NoSuchElementException] {
* Set.empty.head
* }
* }
* }
* }
* }
* </pre>
*
* <p>
* A <code>RefSpec</code> can contain <em>scopes</em> and tests. You define a scope
* with a nested singleton object, and a test with a method. The names of both <em>scope objects</em> and <em>test methods</em>
* must be expressed in back ticks and contain at least one space character.
* <p>
*
* <p>
* A space placed in backticks is encoded by the Scala compiler as <code>$u0020</code>, as
* illustrated here:
* </p>
*
* <pre class="stREPL">
* scala> def `an example` = ()
* an$u0020example: Unit
* </pre>
*
* <p>
* <code>RefSpec</code> uses reflection to discover scope objects and test methods.
* During discovery, <code>RefSpec</code> will consider any nested singleton object whose name
* includes <code>$u0020</code> a scope object, and any method whose name includes <code>$u0020</code> a test method.
* It will ignore any singleton objects or methods that do not include a <code>$u0020</code> character. Thus, <code>RefSpec</code> would
* not consider the following singleton object a scope object:
* </p>
*
* <pre class="stHighlight">
* object `Set` { // Not discovered, because no space character
* }
* </pre>
*
* <p>
* You can make such a scope discoverable by placing a space at the end, like this:
* </p>
*
* <pre class="stHighlight">
* object `Set ` { // Discovered, because of the trailing space character
* }
* </pre>
*
* <p>
* Rather than performing this discovery during construction, when instance variables used by scope objects may as yet be uninitialized,
* <code>RefSpec</code> performs discovery lazily, the first time a method needing the results of discovery is invoked.
* For example, methods <code>run</code>, <code>runTests</code>, <code>tags</code>, <code>expectedTestCount</code>,
* <code>runTest</code>, and <code>testNames</code> all ensure that scopes and tests have already been discovered prior to doing anything
* else. Discovery is performed, and the results recorded, only once for each <code>RefSpec</code> instance.
* </p>
*
* <p>
* A scope names, or gives more information about, the <em>subject</em> (class or other entity) you are specifying
* and testing. In the previous example, <code>`A Set`</code>
* is the subject under specification and test. With each test name you provide a string (the <em>test text</em>) that specifies
* one bit of behavior of the subject, and a block of code (the body of the test method) that verifies that behavior.
* </p>
*
* <p>
* When you execute a <code>RefSpec</code>, it will send <a href="../events/Formatter.html"><code>Formatter</code></a>s in the events it sends to the
* <a href="../Reporter.html"><code>Reporter</code></a>. ScalaTest's built-in reporters will report these events in such a way
* that the output is easy to read as an informal specification of the <em>subject</em> being tested.
* For example, were you to run <code>SetSpec</code> from within the Scala interpreter:
* </p>
*
* <pre class="stREPL">
* scala> org.scalatest.run(new SetSpec)
* </pre>
*
* <p>
* You would see:
* </p>
*
* <pre class="stREPL">
* <span class="stGreen">A Set</span>
* <span class="stGreen"> when empty</span>
* <span class="stGreen"> - should have size 0</span>
* <span class="stGreen"> - should produce NoSuchElementException when head is invoked</span>
* </pre>
*
* <p>
* Or, to run just the test named <code>A Set when empty should have size 0</code>, you could pass that test's name, or any unique substring of the
* name, such as <code>"size 0"</code> or even just <code>"0"</code>. Here's an example:
* </p>
*
* <pre class="stREPL">
* scala> org.scalatest.run(new SetSuite, "size 0")
* <span class="stGreen">A Set</span>
* <span class="stGreen"> when empty</span>
* <span class="stGreen"> - should have size 0</span>
* </pre>
*
* <p>
* You can also pass to <code>execute</code> a <a href="../ConfigMap.html"><em>config map</em></a> of key-value
* pairs, which will be passed down into suites and tests, as well as other parameters that configure the run itself.
* For more information on running in the Scala interpreter, see the documentation for the
* <a href="../Shell.html">ScalaTest shell</a>.
* </p>
*
* <p>
* The <code>execute</code> method invokes a <code>run</code> method that takes two
* parameters. This <code>run</code> method, which actually executes the suite, will usually be invoked by a test runner, such
* as <a href="../run$.html"><code>run</code></a>, <a href="../tools/Runner$.html"><code>tools.Runner</code></a>, a build tool, or an IDE.
* </p>
*
* <p>
* The test methods shown in this example are parameterless. This is recommended even for test methods with obvious side effects. In production code
* you would normally declare no-arg, side-effecting methods as <em>empty-paren</em> methods, and call them with
* empty parentheses, to make it more obvious to readers of the code that they have a side effect. Whether or not a test method has
* a side effect, however, is a less important distinction than it is for methods in production code. Moreover, test methods are not
* normally invoked directly by client code, but rather through reflection by running the <code>Suite</code> that contains them, so a
* lack of parentheses on an invocation of a side-effecting test method would not normally appear in any client code. Given the empty
* parentheses do not add much value in the test methods case, the recommended style is to simply always leave them off.
* </p>
*
* <p>
* <em>Note: The approach of using backticks around test method names to make it easier to write descriptive test names was
* inspired by the <a href="http://github.com/SimpleFinance/simplespec" target="_blank"><code>SimpleSpec</code></a> test framework, originally created by Coda Hale.</em>
* </p>
*
* <a name="ignoredTests"></a><h2>Ignored tests</h2>
*
* <p>
* To support the common use case of temporarily disabling a test in a <code>RefSpec</code>, with the
* good intention of resurrecting the test at a later time, you can annotate the test method with <code>@Ignore</code>.
* For example, to temporarily disable the test method with the name <code>`should have size zero"</code>, just annotate
* it with <code>@Ignore</code>, like this:
* </p>
*
* <pre class="stHighlight">
* package org.scalatest.examples.spec.ignore
*
* import org.scalatest._
*
* class SetSpec extends RefSpec {
*
* object `A Set` {
* object `when empty` {
* @Ignore def `should have size 0` {
* assert(Set.empty.size === 0)
* }
*
* def `should produce NoSuchElementException when head is invoked` {
* assertThrows[NoSuchElementException] {
* Set.empty.head
* }
* }
* }
* }
* }
* </pre>
*
* <p>
* If you run this version of <code>SetSpec</code> with:
* </p>
*
* <pre class="stREPL">
* scala> org.scalatest.run(new SetSpec)
* </pre>
*
* <p>
* It will run only the second test and report that the first test was ignored:
* </p>
*
* <pre class="stREPL">
* <span class="stGreen">A Set</span>
* <span class="stGreen"> when empty</span>
* <span class="stYellow"> - should have size 0 !!! IGNORED !!!</span>
* <span class="stGreen"> - should produce NoSuchElementException when head is invoked</span>
* </pre>
*
* <p>
* If you wish to temporarily ignore an entire suite of tests, you can annotate the test class with <code>@Ignore</code>, like this:
* </p>
*
* <pre class="stHighlight">
* package org.scalatest.examples.spec.ignoreall
*
* import org.scalatest._
*
* @Ignore
* class SetSpec extends RefSpec {
*
* object `A Set` {
* object `when empty` {
* def `should have size 0` {
* assert(Set.empty.size === 0)
* }
*
* def `should produce NoSuchElementException when head is invoked` {
* assertThrows[NoSuchElementException] {
* Set.empty.head
* }
* }
* }
* }
* }
* </pre>
*
* <p>
* When you mark a test class with a tag annotation, ScalaTest will mark each test defined in that class with that tag.
* Thus, marking the <code>SetSpec</code> in the above example with the <code>@Ignore</code> tag annotation means that both tests
* in the class will be ignored. If you run the above <code>SetSpec</code> in the Scala interpreter, you'll see:
* </p>
*
* <pre class="stREPL">
* scala> org.scalatest.run(new SetSpec)
* <span class="stGreen">SetSpec:
* A Set
* when empty</span>
* <span class="stYellow"> - should have size 0 !!! IGNORED !!!</span>
* <span class="stYellow"> - should produce NoSuchElementException when head is invoked !!! IGNORED !!!</span>
* </pre>
*
* <p>
* Note that marking a test class as ignored won't prevent it from being discovered by ScalaTest. Ignored classes
* will be discovered and run, and all their tests will be reported as ignored. This is intended to keep the ignored
* class visible, to encourage the developers to eventually fix and “un-ignore” it. If you want to
* prevent a class from being discovered at all, use the <a href="../DoNotDiscover.html"><code>DoNotDiscover</code></a> annotation instead.
* </p>
*
*
* <a name="informers"></a><h2>Informers</h2>
*
* <p>
* One of the objects to <code>RefSpec</code>'s <code>run</code> method is a <code>Reporter</code>, which
* will collect and report information about the running suite of tests.
* Information about suites and tests that were run, whether tests succeeded or failed,
* and tests that were ignored will be passed to the <code>Reporter</code> as the suite runs.
* Most often the reporting done by default by <code>RefSpec</code>'s methods will be sufficient, but
* occasionally you may wish to provide custom information to the <code>Reporter</code> from a test.
* For this purpose, an <a href="../Informer.html"><code>Informer</code></a> that will forward information to the current <code>Reporter</code>
* is provided via the <code>info</code> parameterless method.
* You can pass the extra information to the <code>Informer</code> via one of its <code>apply</code> methods.
* The <code>Informer</code> will then pass the information to the <code>Reporter</code> via an <a href="../events/InfoProvided.html"><code>InfoProvided</code></a> event.
* Here's an example in which the <code>Informer</code> returned by <code>info</code> is used implicitly by the
* <code>Given</code>, <code>When</code>, and <code>Then</code> methods of trait <a href="../GivenWhenThen.html"><code>GivenWhenThen</code></a>:
* </p>
*
* <pre class="stHighlight">
* package org.scalatest.examples.spec.info
*
* import collection.mutable
* import org.scalatest._
*
* class SetSpec extends RefSpec with GivenWhenThen {
*
* object `A mutable Set` {
* def `should allow an element to be added` {
* Given("an empty mutable Set")
* val set = mutable.Set.empty[String]
*
* When("an element is added")
* set += "clarity"
*
* Then("the Set should have size 1")
* assert(set.size === 1)
*
* And("the Set should contain the added element")
* assert(set.contains("clarity"))
*
* info("That's all folks!")
* }
* }
* }
* </pre>
*
* If you run this <code>RefSpec</code> from the interpreter, you will see the following output:
*
* <pre class="stREPL">
* scala> org.scalatest.run(new SetSpec)
* <span class="stGreen">A mutable Set
* - should allow an element to be added
* + Given an empty mutable Set
* + When an element is added
* + Then the Set should have size 1
* + And the Set should contain the added element
* + That's all folks! </span>
* </pre>
*
* <a name="documenters"></a><h2>Documenters</h2>
*
* <p>
* <code>RefSpec</code> also provides a <code>markup</code> method that returns a <a href="../Documenter.html"><code>Documenter</code></a>, which allows you to send
* to the <code>Reporter</code> text formatted in <a href="http://daringfireball.net/projects/markdown/" target="_blank">Markdown syntax</a>.
* You can pass the extra information to the <code>Documenter</code> via its <code>apply</code> method.
* The <code>Documenter</code> will then pass the information to the <code>Reporter</code> via an <a href="../events/MarkupProvided.html"><code>MarkupProvided</code></a> event.
* </p>
*
* <p>
* Here's an example <code>RefSpec</code> that uses <code>markup</code>:
* </p>
*
* <pre class="stHighlight">
* package org.scalatest.examples.spec.markup
*
* import collection.mutable
* import org.scalatest._
*
* class SetSpec extends RefSpec with GivenWhenThen {
*
* markup { """
*
* Mutable Set
* -----------
*
* A set is a collection that contains no duplicate elements.
*
* To implement a concrete mutable set, you need to provide implementations
* of the following methods:
*
* def contains(elem: A): Boolean
* def iterator: Iterator[A]
* def += (elem: A): this.type
* def -= (elem: A): this.type
*
* If you wish that methods like `take`,
* `drop`, `filter` return the same kind of set,
* you should also override:
*
* def empty: This
*
* It is also good idea to override methods `foreach` and
* `size` for efficiency.
*
* """ }
*
* object `A mutable Set` {
* def `should allow an element to be added` {
* Given("an empty mutable Set")
* val set = mutable.Set.empty[String]
*
* When("an element is added")
* set += "clarity"
*
* Then("the Set should have size 1")
* assert(set.size === 1)
*
* And("the Set should contain the added element")
* assert(set.contains("clarity"))
*
* markup("This test finished with a **bold** statement!")
* }
* }
* }
* </pre>
*
* <p>
* Although all of ScalaTest's built-in reporters will display the markup text in some form,
* the HTML reporter will format the markup information into HTML. Thus, the main purpose of <code>markup</code> is to
* add nicely formatted text to HTML reports. Here's what the above <code>SetSpec</code> would look like in the HTML reporter:
* </p>
*
* <img class="stScreenShot" src="../../../lib/spec.gif">
*
* <a name="notifiersAlerters"></a><h2>Notifiers and alerters</h2>
*
* <p>
* ScalaTest records text passed to <code>info</code> and <code>markup</code> during tests, and sends the recorded text in the <code>recordedEvents</code> field of
* test completion events like <code>TestSucceeded</code> and <code>TestFailed</code>. This allows string reporters (like the standard out reporter) to show
* <code>info</code> and <code>markup</code> text <em>after</em> the test name in a color determined by the outcome of the test. For example, if the test fails, string
* reporters will show the <code>info</code> and <code>markup</code> text in red. If a test succeeds, string reporters will show the <code>info</code>
* and <code>markup</code> text in green. While this approach helps the readability of reports, it means that you can't use <code>info</code> to get status
* updates from long running tests.
* </p>
*
* <p>
* To get immediate (<em>i.e.</em>, non-recorded) notifications from tests, you can use <code>note</code> (a <a href="../Notifier.html"><code>Notifier</code></a>) and <code>alert</code>
* (an <a href="../Alerter.html"><code>Alerter</code></a>). Here's an example showing the differences:
* </p>
*
* <pre class="stHighlight">
* package org.scalatest.examples.spec.note
*
* import collection.mutable
* import org.scalatest._
*
* class SetSpec extends RefSpec {
*
* object `A mutable Set` {
* def `should allow an element to be added` {
*
* info("info is recorded")
* markup("markup is *also* recorded")
* note("notes are sent immediately")
* alert("alerts are also sent immediately")
*
* val set = mutable.Set.empty[String]
* set += "clarity"
* assert(set.size === 1)
* assert(set.contains("clarity"))
* }
* }
* }
* </pre>
*
* <p>
* Because <code>note</code> and <code>alert</code> information is sent immediately, it will appear <em>before</em> the test name in string reporters, and its color will
* be unrelated to the ultimate outcome of the test: <code>note</code> text will always appear in green, <code>alert</code> text will always appear in yellow.
* Here's an example:
* </p>
*
* <pre class="stREPL">
* scala> org.scalatest.run(new SetSpec)
* <span class="stGreen">SetSpec:
* A mutable Set
* + notes are sent immediately</span>
* <span class="stYellow">+ alerts are also sent immediately</span>
* <span class="stGreen">- should allow an element to be added
* + info is recorded
* + markup is *also* recorded</span>
* </pre>
*
* <p>
* Another example is <a href="../tools/Runner$.html#slowpokeNotifications">slowpoke notifications</a>.
* If you find a test is taking a long time to complete, but you're not sure which test, you can enable
* slowpoke notifications. ScalaTest will use an <code>Alerter</code> to fire an event whenever a test has been running
* longer than a specified amount of time.
* </p>
*
* <p>
* In summary, use <code>info</code> and <code>markup</code> for text that should form part of the specification output. Use
* <code>note</code> and <code>alert</code> to send status notifications. (Because the HTML reporter is intended to produce a
* readable, printable specification, <code>info</code> and <code>markup</code> text will appear in the HTML report, but
* <code>note</code> and <code>alert</code> text will not.)
* </p>
*
* <a name="pendingTests"></a><h2>Pending tests</h2>
*
* <p>
* A <em>pending test</em> is one that has been given a name but is not yet implemented. The purpose of
* pending tests is to facilitate a style of testing in which documentation of behavior is sketched
* out before tests are written to verify that behavior (and often, before the behavior of
* the system being tested is itself implemented). Such sketches form a kind of specification of
* what tests and functionality to implement later.
* </p>
*
* <p>
* To support this style of testing, a test can be given a name that specifies one
* bit of behavior required by the system being tested. The test can also include some code that
* sends more information about the behavior to the reporter when the tests run. At the end of the test,
* it can call method <code>pending</code>, which will cause it to complete abruptly with <a href="../exceptions/TestPendingException.html"><code>TestPendingException</code></a>.
* </p>
*
* <p>
* Because tests in ScalaTest can be designated as pending with <code>TestPendingException</code>, both the test name and any information
* sent to the reporter when running the test can appear in the report of a test run.
* (The code of a pending test is executed just like any other test.) However, because the test completes abruptly
* with <code>TestPendingException</code>, the test will be reported as pending, to indicate
* the actual test, and possibly the functionality, has not yet been implemented.
* </p>
*
* <p>
* You can mark a test as pending in <code>RefSpec</code> by using "<code>{ pending }</code>" as the body of the test method,
* like this:
* </p>
*
* <pre class="stHighlight">
* package org.scalatest.examples.spec.pending
*
* import org.scalatest._
*
* class SetSpec extends RefSpec {
*
* object `A Set` {
* object `when empty` {
* def `should have size 0` { pending }
*
* def `should produce NoSuchElementException when head is invoked` {
* assertThrows[NoSuchElementException] {
* Set.empty.head
* }
* }
* }
* }
* }
* </pre>
*
* <p>
* (Note: “<code>pending</code>” is the body of the test. Thus the test contains just one statement, an invocation
* of the <code>pending</code> method, which throws <code>TestPendingException</code>.)
* If you run this version of <code>SetSpec</code> with:
* </p>
*
* <pre class="stREPL">
* scala> org.scalatest.run(new SetSpec)
* </pre>
*
* <p>
* It will run both tests, but report that test "<code>should have size 0</code>" is pending. You'll see:
* </p>
*
* <pre class="stREPL">
* <span class="stGreen">A Set</span>
* <span class="stGreen"> when empty</span>
* <span class="stYellow"> - should have size 0 (pending)</span>
* <span class="stGreen"> - should produce NoSuchElementException when head is invoked</span>
* </pre>
*
* <a name="taggingTests"></a><h2>Tagging tests</h2>
*
* <p>
* A <code>RefSpec</code>'s tests may be classified into groups by <em>tagging</em> them with string names. When executing
* a <code>RefSpec</code>, groups of tests can optionally be included and/or excluded. In this
* trait's implementation, tags are indicated by annotations attached to the test method. To
* create a new tag type to use in <code>RefSpec</code>s, simply define a new Java annotation that itself is annotated with
* the <code>org.scalatest.TagAnnotation</code> annotation.
* (Currently, for annotations to be
* visible in Scala programs via Java reflection, the annotations themselves must be written in Java.) For example,
* to create tags named <code>SlowTest</code> and <code>DbTest</code>, you would
* write in Java:
* </p>
*
* <pre>
* package org.scalatest.examples.spec.tagging;
* import java.lang.annotation.*;
* import org.scalatest.TagAnnotation;
*
* @TagAnnotation
* @Retention(RetentionPolicy.RUNTIME)
* @Target({ElementType.METHOD, ElementType.TYPE})
* public @interface SlowTest {}
*
* @TagAnnotation
* @Retention(RetentionPolicy.RUNTIME)
* @Target({ElementType.METHOD, ElementType.TYPE})
* public @interface DbTest {}
* </pre>
*
* <p>
* Given these annotations, you could tag <code>RefSpec</code> tests like this:
* </p>
*
* <pre class="stHighlight">
* package org.scalatest.examples.spec.tagging
*
* import org.scalatest.RefSpec
*
* class SetSpec extends RefSpec {
*
* object `A Set` {
* object `when empty` {
* @SlowTest
* def `should have size 0` {
* assert(Set.empty.size === 0)
* }
*
* @SlowTest @DbTest
* def `should produce NoSuchElementException when head is invoked` {
* assertThrows[NoSuchElementException] {
* Set.empty.head
* }
* }
* }
* }
* }
* </pre>
*
* <p>
* The <code>run</code> method takes a <a href="../Filter.html"><code>Filter</code></a>, whose constructor takes an optional
* <code>Set[String]</code> called <code>tagsToInclude</code> and a <code>Set[String]</code> called
* <code>tagsToExclude</code>. If <code>tagsToInclude</code> is <code>None</code>, all tests will be run
* except those those with tags listed in the
* <code>tagsToExclude</code> <code>Set</code>. If <code>tagsToInclude</code> is defined, only tests
* with tags mentioned in the <code>tagsToInclude</code> set, and not mentioned in <code>tagsToExclude</code>,
* will be run.
* </p>
*
* <p>
* A tag annotation also allows you to tag all the tests of a <code>RefSpec</code> in
* one stroke by annotating the class. For more information and examples, see the
* <a href="../Tag.html">documentation for class <code>Tag</code></a>.
* </p>
*
* <a name="sharedFixtures"></a>
* <h2>Shared fixtures</h2>
*
* <p>
* A test <em>fixture</em> is composed of the objects and other artifacts (files, sockets, database
* connections, <em>etc.</em>) tests use to do their work.
* When multiple tests need to work with the same fixtures, it is important to try and avoid
* duplicating the fixture code across those tests. The more code duplication you have in your
* tests, the greater drag the tests will have on refactoring the actual production code.
* </p>
*
* <p>
* ScalaTest recommends three techniques to eliminate such code duplication:
* </p>
*
* <ul>
* <li>Refactor using Scala</li>
* <li>Override <code>withFixture</code></li>
* <li>Mix in a <em>before-and-after</em> trait</li>
* </ul>
*
* <p>Each technique is geared towards helping you reduce code duplication without introducing
* instance <code>var</code>s, shared mutable objects, or other dependencies between tests. Eliminating shared
* mutable state across tests will make your test code easier to reason about and more amenable for parallel
* test execution.</p><p>The following sections
* describe these techniques, including explaining the recommended usage
* for each. But first, here's a table summarizing the options:</p>
*
* <table style="border-collapse: collapse; border: 1px solid black">
*
* <tr>
* <td colspan="2" style="background-color: #CCCCCC; border-width: 1px; padding: 3px; padding-top: 7px; border: 1px solid black; text-align: left">
* <strong>Refactor using Scala when different tests need different fixtures.</strong>
* </td>
* </tr>
*
* <tr>
* <td style="border-width: 1px; padding: 3px; border: 1px solid black; text-align: right">
* <a href="#getFixtureMethods">get-fixture methods</a>
* </td>
* <td style="border-width: 1px; padding: 3px; border: 1px solid black; text-align: left">
* The <em>extract method</em> refactor helps you create a fresh instances of mutable fixture objects in each test
* that needs them, but doesn't help you clean them up when you're done.
* </td>
* </tr>
*
* <tr>
* <td style="border-width: 1px; padding: 3px; border: 1px solid black; text-align: right">
* <a href="#fixtureContextObjects">fixture-context objects</a>
* </td>
* <td style="border-width: 1px; padding: 3px; border: 1px solid black; text-align: left">
* By placing fixture methods and fields into traits, you can easily give each test just the newly created
* fixtures it needs by mixing together traits. Use this technique when you need <em>different combinations
* of mutable fixture objects in different tests</em>, and don't need to clean up after.
* </td>
* </tr>
*
* <tr>
* <td style="border-width: 1px; padding: 3px; border: 1px solid black; text-align: right">
* <a href="#loanFixtureMethods">loan-fixture methods</a>
* </td>
* <td style="border-width: 1px; padding: 3px; border: 1px solid black; text-align: left">
* Factor out dupicate code with the <em>loan pattern</em> when different tests need different fixtures <em>that must be cleaned up afterwards</em>.
* </td>
* </tr>
*
* <tr>
* <td colspan="2" style="background-color: #CCCCCC; border-width: 1px; padding: 3px; padding-top: 7px; border: 1px solid black; text-align: left">
* <strong>Override <code>withFixture</code> when most or all tests need the same fixture.</strong>
* </td>
* </tr>
*
* <tr>
* <td style="border-width: 1px; padding: 3px; border: 1px solid black; text-align: right">
* <a href="#withFixtureNoArgTest">
* <code>withFixture(NoArgTest)</code></a>
* </td>
* <td style="border-width: 1px; padding: 3px; border: 1px solid black; text-align: left">
* <p>
* The recommended default approach when most or all tests need the same fixture treatment. This general technique
* allows you, for example, to perform side effects at the beginning and end of all or most tests,
* transform the outcome of tests, retry tests, make decisions based on test names, tags, or other test data.
* Use this technique unless:
* </p>
* <dl>
* <dd style="display: list-item; list-style-type: disc; margin-left: 1.2em;">Different tests need different fixtures (refactor using Scala instead)</dd>
* <dd style="display: list-item; list-style-type: disc; margin-left: 1.2em;">An exception in fixture code should abort the suite, not fail the test (use a <em>before-and-after</em> trait instead)</dd>
* <dd style="display: list-item; list-style-type: disc; margin-left: 1.2em;">You have objects to pass into tests (override <code>withFixture(<em>One</em>ArgTest)</code> instead)</dd>
* </dl>
* </td>
* </tr>
*
* <tr>
* <td style="border-width: 1px; padding: 3px; border: 1px solid black; text-align: right">
* <a href="#withFixtureOneArgTest">
* <code>withFixture(OneArgTest)</code>
* </a>
* </td>
* <td style="border-width: 1px; padding: 3px; border: 1px solid black; text-align: left">
* Use when you want to pass the same fixture object or objects as a parameter into all or most tests.
* </td>
* </tr>
*
* <tr>
* <td colspan="2" style="background-color: #CCCCCC; border-width: 1px; padding: 3px; padding-top: 7px; border: 1px solid black; text-align: left">
* <strong>Mix in a before-and-after trait when you want an aborted suite, not a failed test, if the fixture code fails.</strong>
* </td>
* </tr>
*
* <tr>
* <td style="border-width: 1px; padding: 3px; border: 1px solid black; text-align: right">
* <a href="#beforeAndAfter"><code>BeforeAndAfter</code></a>
* </td>
* <td style="border-width: 1px; padding: 3px; border: 1px solid black; text-align: left">
* Use this boilerplate-buster when you need to perform the same side-effects before and/or after tests, rather than at the beginning or end of tests.
* </td>
* </tr>
*
* <tr>
* <td style="border-width: 1px; padding: 3px; border: 1px solid black; text-align: right">
* <a href="#composingFixtures"><code>BeforeAndAfterEach</code></a>
* </td>
* <td style="border-width: 1px; padding: 3px; border: 1px solid black; text-align: left">
* Use when you want to <em>stack traits</em> that perform the same side-effects before and/or after tests, rather than at the beginning or end of tests.
* </td>
* </tr>
*
* </table>
*
* <a name="getFixtureMethods"></a>
* <h4>Calling get-fixture methods</h4>
*
* <p>
* If you need to create the same mutable fixture objects in multiple tests, and don't need to clean them up after using them, the simplest approach is to write one or
* more <em>get-fixture</em> methods. A get-fixture method returns a new instance of a needed fixture object (or a holder object containing
* multiple fixture objects) each time it is called. You can call a get-fixture method at the beginning of each
* test that needs the fixture, storing the returned object or objects in local variables. Here's an example:
* </p>
*
* <pre class="stHighlight">
* package org.scalatest.examples.spec.getfixture
*
* import org.scalatest.RefSpec
* import collection.mutable.ListBuffer
*
* class ExampleSpec extends RefSpec {
*
* class Fixture {
* val builder = new StringBuilder("ScalaTest is ")
* val buffer = new ListBuffer[String]
* }
*
* def fixture = new Fixture
*
* object `Testing ` {
* def `should be easy` {
* val f = fixture
* f.builder.append("easy!")
* assert(f.builder.toString === "ScalaTest is easy!")
* assert(f.buffer.isEmpty)
* f.buffer += "sweet"
* }
*
* def `should be fun` {
* val f = fixture
* f.builder.append("fun!")
* assert(f.builder.toString === "ScalaTest is fun!")
* assert(f.buffer.isEmpty)
* }
* }
* }
* </pre>
*
* <p>
* The “<code>f.</code>” in front of each use of a fixture object provides a visual indication of which objects
* are part of the fixture, but if you prefer, you can import the the members with “<code>import f._</code>” and use the names directly.
* </p>
*
* <p>
* If you need to configure fixture objects differently in different tests, you can pass configuration into the get-fixture method. For example, you could pass
* in an initial value for a mutable fixture object as a parameter to the get-fixture method.
* </p>
*
* <a name="fixtureContextObjects"></a>
* <h4>Instantiating fixture-context objects </h4>
*
* <p>
* An alternate technique that is especially useful when different tests need different combinations of fixture objects is to define the fixture objects as instance variables
* of <em>fixture-context objects</em> whose instantiation forms the body of tests. Like get-fixture methods, fixture-context objects are only
* appropriate if you don't need to clean up the fixtures after using them.
* </p>
*
* To use this technique, you define instance variables intialized with fixture objects in traits and/or classes, then in each test instantiate an object that
* contains just the fixture objects needed by the test. Traits allow you to mix together just the fixture objects needed by each test, whereas classes
* allow you to pass data in via a constructor to configure the fixture objects. Here's an example in which fixture objects are partitioned into two traits
* and each test just mixes together the traits it needs:
* </p>
*
* <pre class="stHighlight">
* package org.scalatest.examples.spec.fixturecontext
*
* import collection.mutable.ListBuffer
* import org.scalatest.RefSpec
*
* class ExampleSpec extends RefSpec {
*
* trait Builder {
* val builder = new StringBuilder("ScalaTest is ")
* }
*
* trait Buffer {
* val buffer = ListBuffer("ScalaTest", "is")
* }
*
* object `Testing ` {
* // This test needs the StringBuilder fixture
* def `should be productive` {
* new Builder {
* builder.append("productive!")
* assert(builder.toString === "ScalaTest is productive!")
* }
* }
* }
*
* object `Test code` {
* // This test needs the ListBuffer[String] fixture
* def `should be readable` {
* new Buffer {
* buffer += ("readable!")
* assert(buffer === List("ScalaTest", "is", "readable!"))
* }
* }
*
* // This test needs both the StringBuilder and ListBuffer
* def `should be clear and concise` {
* new Builder with Buffer {
* builder.append("clear!")
* buffer += ("concise!")
* assert(builder.toString === "ScalaTest is clear!")
* assert(buffer === List("ScalaTest", "is", "concise!"))
* }
* }
* }
* }
* </pre>
*
* <a name="withFixtureNoArgTest"></a>
* <h4>Overriding <code>withFixture(NoArgTest)</code></h4>
*
* <p>
* Although the get-fixture method and fixture-context object approaches take care of setting up a fixture at the beginning of each
* test, they don't address the problem of cleaning up a fixture at the end of the test. If you just need to perform a side-effect at the beginning or end of
* a test, and don't need to actually pass any fixture objects into the test, you can override <code>withFixture(NoArgTest)</code>, one of ScalaTest's
* lifecycle methods defined in trait <a href="../Suite.html#lifecycle-methods"><code>Suite</code></a>.
* </p>
*
* <p>
* Trait <code>Suite</code>'s implementation of <code>runTest</code> passes a no-arg test function to <code>withFixture(NoArgTest)</code>. It is <code>withFixture</code>'s
* responsibility to invoke that test function. <code>Suite</code>'s implementation of <code>withFixture</code> simply
* invokes the function, like this:
* </p>
*
* <pre class="stHighlight">
* // Default implementation in trait Suite
* protected def withFixture(test: NoArgTest) = {
* test()
* }
* </pre>
*
* <p>
* You can, therefore, override <code>withFixture</code> to perform setup before and/or cleanup after invoking the test function. If
* you have cleanup to perform, you should invoke the test function inside a <code>try</code> block and perform the cleanup in
* a <code>finally</code> clause, in case an exception propagates back through <code>withFixture</code>. (If a test fails because of an exception,
* the test function invoked by withFixture will result in a [[org.scalatest.Failed <code>Failed</code>]] wrapping the exception. Nevertheless,
* best practice is to perform cleanup in a finally clause just in case an exception occurs.)
* </p>
*
* <p>
* The <code>withFixture</code> method is designed to be stacked, and to enable this, you should always call the <code>super</code> implementation
* of <code>withFixture</code>, and let it invoke the test function rather than invoking the test function directly. In other words, instead of writing
* “<code>test()</code>”, you should write “<code>super.withFixture(test)</code>”, like this:
* </p>
*
* <pre class="stHighlight">
* // Your implementation
* override def withFixture(test: NoArgTest) = {
* // Perform setup
* try super.withFixture(test) // Invoke the test function
* finally {
* // Perform cleanup
* }
* }
* </pre>
*
* <p>
* Here's an example in which <code>withFixture(NoArgTest)</code> is used to take a snapshot of the working directory if a test fails, and
* and send that information to the reporter:
* </p>
*
* <pre class="stHighlight">
* package org.scalatest.examples.spec.noargtest
*
* import java.io.File
* import org.scalatest._
*
* class ExampleSpec extends RefSpec {
*
* override def withFixture(test: NoArgTest) = {
*
* super.withFixture(test) match {
* case failed: Failed =>
* val currDir = new File(".")
* val fileNames = currDir.list()
* info("Dir snapshot: " + fileNames.mkString(", "))
* failed
* case other => other
* }
* }
*
* object `This test` {
* def `should succeed` {
* assert(1 + 1 === 2)
* }
*
* def `should fail` {
* assert(1 + 1 === 3)
* }
* }
* }
* </pre>
*
* <p>
* Running this version of <code>ExampleSuite</code> in the interpreter in a directory with two files, <code>hello.txt</code> and <code>world.txt</code>
* would give the following output:
* </p>
*
* <pre class="stREPL">
* scala> org.scalatest.run(new ExampleSuite)
* <span class="stGreen">ExampleSuite:
* This test</span>
* <span class="stRed">- should fail *** FAILED ***
* 2 did not equal 3 (<console>:33)
* + Dir snapshot: hello.txt, world.txt </span>
* - should succeed
* </pre>
*
* <p>
* Note that the <a href="../Suite$NoArgTest.html"><code>NoArgTest</code></a> passed to <code>withFixture</code>, in addition to
* an <code>apply</code> method that executes the test, also includes the test name and the <a href="../ConfigMap.html">config
* map</a> passed to <code>runTest</code>. Thus you can also use the test name and configuration objects in your <code>withFixture</code>
* implementation.
* </p>
*
* <a name="loanFixtureMethods"></a>
* <h4>Calling loan-fixture methods</h4>
*
* <p>
* If you need to both pass a fixture object into a test <em>and</em> perform cleanup at the end of the test, you'll need to use the <em>loan pattern</em>.
* If different tests need different fixtures that require cleanup, you can implement the loan pattern directly by writing <em>loan-fixture</em> methods.
* A loan-fixture method takes a function whose body forms part or all of a test's code. It creates a fixture, passes it to the test code by invoking the
* function, then cleans up the fixture after the function returns.
* </p>
*
* <p>
* The following example shows three tests that use two fixtures, a database and a file. Both require cleanup after, so each is provided via a
* loan-fixture method. (In this example, the database is simulated with a <code>StringBuffer</code>.)
* </p>
*
* <pre class="stHighlight">
* package org.scalatest.examples.spec.loanfixture
*
* import java.util.concurrent.ConcurrentHashMap
*
* object DbServer { // Simulating a database server
* type Db = StringBuffer
* private val databases = new ConcurrentHashMap[String, Db]
* def createDb(name: String): Db = {
* val db = new StringBuffer
* databases.put(name, db)
* db
* }
* def removeDb(name: String) {
* databases.remove(name)
* }
* }
*
* import org.scalatest.RefSpec
* import DbServer._
* import java.util.UUID.randomUUID
* import java.io._
*
* class ExampleSpec extends RefSpec {
*
* def withDatabase(testCode: Db => Any) {
* val dbName = randomUUID.toString
* val db = createDb(dbName) // create the fixture
* try {
* db.append("ScalaTest is ") // perform setup
* testCode(db) // "loan" the fixture to the test
* }
* finally removeDb(dbName) // clean up the fixture
* }
*
* def withFile(testCode: (File, FileWriter) => Any) {
* val file = File.createTempFile("hello", "world") // create the fixture
* val writer = new FileWriter(file)
* try {
* writer.write("ScalaTest is ") // set up the fixture
* testCode(file, writer) // "loan" the fixture to the test
* }
* finally writer.close() // clean up the fixture
* }
*
* object `Testing ` {
* // This test needs the file fixture
* def `should be productive` {
* withFile { (file, writer) =>
* writer.write("productive!")
* writer.flush()
* assert(file.length === 24)
* }
* }
* }
*
* object `Test code` {
* // This test needs the database fixture
* def `should be readable` {
* withDatabase { db =>
* db.append("readable!")
* assert(db.toString === "ScalaTest is readable!")
* }
* }
*
* // This test needs both the file and the database
* def `should be clear and concise` {
* withDatabase { db =>
* withFile { (file, writer) => // loan-fixture methods compose
* db.append("clear!")
* writer.write("concise!")
* writer.flush()
* assert(db.toString === "ScalaTest is clear!")
* assert(file.length === 21)
* }
* }
* }
* }
* }
* </pre>
*
* <p>
* As demonstrated by the last test, loan-fixture methods compose. Not only do loan-fixture methods allow you to
* give each test the fixture it needs, they allow you to give a test multiple fixtures and clean everything up afterwards.
* </p>
*
* <p>
* Also demonstrated in this example is the technique of giving each test its own "fixture sandbox" to play in. When your fixtures
* involve external side-effects, like creating files or databases, it is a good idea to give each file or database a unique name as is
* done in this example. This keeps tests completely isolated, allowing you to run them in parallel if desired.
* </p>
*
* <a name="withFixtureOneArgTest"></a>
* <h4>Overriding <code>withFixture(OneArgTest)</code></h4>
*
* <p>
* <code>fixture.Spec</code> is deprecated, please use <code>fixture.FunSpec</code> instead.
*
* <a name="beforeAndAfter"></a>
* <h4>Mixing in <code>BeforeAndAfter</code></h4>
*
* <p>
* In all the shared fixture examples shown so far, the activities of creating, setting up, and cleaning up the fixture objects have been
* performed <em>during</em> the test. This means that if an exception occurs during any of these activities, it will be reported as a test failure.
* Sometimes, however, you may want setup to happen <em>before</em> the test starts, and cleanup <em>after</em> the test has completed, so that if an
* exception occurs during setup or cleanup, the entire suite aborts and no more tests are attempted. The simplest way to accomplish this in ScalaTest is
* to mix in trait <a href="../BeforeAndAfter.html"><code>BeforeAndAfter</code></a>. With this trait you can denote a bit of code to run before each test
* with <code>before</code> and/or after each test each test with <code>after</code>, like this:
* </p>
*
* <pre class="stHighlight">
* package org.scalatest.examples.spec.beforeandafter
*
* import org.scalatest.RefSpec
* import org.scalatest.BeforeAndAfter
* import collection.mutable.ListBuffer
*
* class ExampleSpec extends RefSpec with BeforeAndAfter {
*
* val builder = new StringBuilder
* val buffer = new ListBuffer[String]
*
* before {
* builder.append("ScalaTest is ")
* }
*
* after {
* builder.clear()
* buffer.clear()
* }
*
* object `Testing ` {
* def `should be easy` {
* builder.append("easy!")
* assert(builder.toString === "ScalaTest is easy!")
* assert(buffer.isEmpty)
* buffer += "sweet"
* }
*
* def `should be fun` {
* builder.append("fun!")
* assert(builder.toString === "ScalaTest is fun!")
* assert(buffer.isEmpty)
* }
* }
* }
* </pre>
*
* <p>
* Note that the only way <code>before</code> and <code>after</code> code can communicate with test code is via some side-effecting mechanism, commonly by
* reassigning instance <code>var</code>s or by changing the state of mutable objects held from instance <code>val</code>s (as in this example). If using
* instance <code>var</code>s or mutable objects held from instance <code>val</code>s you wouldn't be able to run tests in parallel in the same instance
* of the test class unless you synchronized access to the shared, mutable state. This is why ScalaTest's <code>ParallelTestExecution</code> trait extends
* <a href="../OneInstancePerTest.html"><code>OneInstancePerTest</code></a>. By running each test in its own instance of the class, each test has its own copy of the instance variables, so you
* don't need to synchronize. If you mixed <code>ParallelTestExecution</code> into the <code>ExampleSuite</code> above, the tests would run in parallel just fine
* without any synchronization needed on the mutable <code>StringBuilder</code> and <code>ListBuffer[String]</code> objects.
* </p>
*
* <p>
* Although <code>BeforeAndAfter</code> provides a minimal-boilerplate way to execute code before and after tests, it isn't designed to enable stackable
* traits, because the order of execution would be non-obvious. If you want to factor out before and after code that is common to multiple test suites, you
* should use trait <code>BeforeAndAfterEach</code> instead, as shown later in the next section,
* <a href="#composingFixtures.html">composing fixtures by stacking traits</a>.
* </p>
*
* <a name="composingFixtures"></a><h2>Composing fixtures by stacking traits</h2>
*
* <p>
* In larger projects, teams often end up with several different fixtures that test classes need in different combinations,
* and possibly initialized (and cleaned up) in different orders. A good way to accomplish this in ScalaTest is to factor the individual
* fixtures into traits that can be composed using the <em>stackable trait</em> pattern. This can be done, for example, by placing
* <code>withFixture</code> methods in several traits, each of which call <code>super.withFixture</code>. Here's an example in
* which the <code>StringBuilder</code> and <code>ListBuffer[String]</code> fixtures used in the previous examples have been
* factored out into two <em>stackable fixture traits</em> named <code>Builder</code> and <code>Buffer</code>:
* </p>
*
* <pre class="stHighlight">
* package org.scalatest.examples.spec.composingwithfixture
*
* import org.scalatest._
* import collection.mutable.ListBuffer
*
* trait Builder extends TestSuiteMixin { this: TestSuite =>
*
* val builder = new StringBuilder
*
* abstract override def withFixture(test: NoArgTest) = {
* builder.append("ScalaTest is ")
* try super.withFixture(test) // To be stackable, must call super.withFixture
* finally builder.clear()
* }
* }
*
* trait Buffer extends TestSuiteMixin { this: TestSuite =>
*
* val buffer = new ListBuffer[String]
*
* abstract override def withFixture(test: NoArgTest) = {
* try super.withFixture(test) // To be stackable, must call super.withFixture
* finally buffer.clear()
* }
* }
*
* class ExampleSpec extends RefSpec with Builder with Buffer {
*
* object `Testing ` {
* def `should be easy` {
* builder.append("easy!")
* assert(builder.toString === "ScalaTest is easy!")
* assert(buffer.isEmpty)
* buffer += "sweet"
* }
*
* def `should be fun` {
* builder.append("fun!")
* assert(builder.toString === "ScalaTest is fun!")
* assert(buffer.isEmpty)
* buffer += "clear"
* }
* }
* }
* </pre>
*
* <p>
* By mixing in both the <code>Builder</code> and <code>Buffer</code> traits, <code>ExampleSpec</code> gets both fixtures, which will be
* initialized before each test and cleaned up after. The order the traits are mixed together determines the order of execution.
* In this case, <code>Builder</code> is “super” to <code>Buffer</code>. If you wanted <code>Buffer</code> to be “super”
* to <code>Builder</code>, you need only switch the order you mix them together, like this:
* </p>
*
* <pre class="stHighlight">
* class Example2Spec extends RefSpec with Buffer with Builder
* </pre>
*
* <p>
* And if you only need one fixture you mix in only that trait:
* </p>
*
* <pre class="stHighlight">
* class Example3Spec extends RefSpec with Builder
* </pre>
*
* <p>
* Another way to create stackable fixture traits is by extending the <a href="../BeforeAndAfterEach.html"><code>BeforeAndAfterEach</code></a>
* and/or <a href="../BeforeAndAfterAll.html"><code>BeforeAndAfterAll</code></a> traits.
* <code>BeforeAndAfterEach</code> has a <code>beforeEach</code> method that will be run before each test (like JUnit's <code>setUp</code>),
* and an <code>afterEach</code> method that will be run after (like JUnit's <code>tearDown</code>).
* Similarly, <code>BeforeAndAfterAll</code> has a <code>beforeAll</code> method that will be run before all tests,
* and an <code>afterAll</code> method that will be run after all tests. Here's what the previously shown example would look like if it
* were rewritten to use the <code>BeforeAndAfterEach</code> methods instead of <code>withFixture</code>:
* </p>
*
* <pre class="stHighlight">
* package org.scalatest.examples.spec.composingbeforeandaftereach
*
* import org.scalatest._
* import org.scalatest.BeforeAndAfterEach
* import collection.mutable.ListBuffer
*
* trait Builder extends BeforeAndAfterEach { this: Suite =>
*
* val builder = new StringBuilder
*
* override def beforeEach() {
* builder.append("ScalaTest is ")
* super.beforeEach() // To be stackable, must call super.beforeEach
* }
*
* override def afterEach() {
* try super.afterEach() // To be stackable, must call super.afterEach
* finally builder.clear()
* }
* }
*
* trait Buffer extends BeforeAndAfterEach { this: Suite =>
*
* val buffer = new ListBuffer[String]
*
* override def afterEach() {
* try super.afterEach() // To be stackable, must call super.afterEach
* finally buffer.clear()
* }
* }
*
* class ExampleSpec extends RefSpec with Builder with Buffer {
*
* object `Testing ` {
* def `should be easy` {
* builder.append("easy!")
* assert(builder.toString === "ScalaTest is easy!")
* assert(buffer.isEmpty)
* buffer += "sweet"
* }
*
* def `should be fun` {
* builder.append("fun!")
* assert(builder.toString === "ScalaTest is fun!")
* assert(buffer.isEmpty)
* buffer += "clear"
* }
* }
* }
* </pre>
*
* <p>
* To get the same ordering as <code>withFixture</code>, place your <code>super.beforeEach</code> call at the end of each
* <code>beforeEach</code> method, and the <code>super.afterEach</code> call at the beginning of each <code>afterEach</code>
* method, as shown in the previous example. It is a good idea to invoke <code>super.afterEach</code> in a <code>try</code>
* block and perform cleanup in a <code>finally</code> clause, as shown in the previous example, because this ensures the
* cleanup code is performed even if <code>super.afterEach</code> throws an exception.
* </p>
*
* <p>
* The difference between stacking traits that extend <code>BeforeAndAfterEach</code> versus traits that implement <code>withFixture</code> is
* that setup and cleanup code happens before and after the test in <code>BeforeAndAfterEach</code>, but at the beginning and
* end of the test in <code>withFixture</code>. Thus if a <code>withFixture</code> method completes abruptly with an exception, it is
* considered a failed test. By contrast, if any of the <code>beforeEach</code> or <code>afterEach</code> methods of <code>BeforeAndAfterEach</code>
* complete abruptly, it is considered an aborted suite, which will result in a <a href="../events/SuiteAborted.html"><code>SuiteAborted</code></a> event.
* </p>
*
* <a name="sharedTests"></a><h2>Shared tests</h2>
*
* <p>
* Because <code>RefSpec</code> represents tests as methods, you cannot share or otherwise dynamically generate tests. Instead, use static code generation
* if you want to generate tests in a <code>RefSpec</code>. In other words, write a program that statically generates the entire source file of
* a <code>RefSpec</code> subclass.
* </p>
*
* @author Bill Venners
*/
@Finders(Array("org.scalatest.finders.SpecFinder"))
class RefSpec extends RefSpecLike {
/**
* Returns a user friendly string for this suite, composed of the
* simple name of the class (possibly simplified further by removing dollar signs if added by the Scala interpeter) and, if this suite
* contains nested suites, the result of invoking <code>toString</code> on each
* of the nested suites, separated by commas and surrounded by parentheses.
*
* @return a user-friendly string for this suite
*/
override def toString: String = Suite.suiteToString(None, this)
}
private[scalatest] object RefSpec {
def isTestMethod(m: Method): Boolean = {
val isInstanceMethod = !Modifier.isStatic(m.getModifiers())
val hasNoParams = m.getParameterTypes.isEmpty
// name must have at least one encoded space: "$u0220"
val includesEncodedSpace = m.getName.indexOf("$u0020") >= 0
val isOuterMethod = m.getName.endsWith("$$outer")
val isNestedMethod = m.getName.matches(".+\\\\$\\\\$.+\\\\$[1-9]+")
//val isOuterMethod = m.getName.endsWith("$$$outer")
// def maybe(b: Boolean) = if (b) "" else "!"
// println("m.getName: " + m.getName + ": " + maybe(isInstanceMethod) + "isInstanceMethod, " + maybe(hasNoParams) + "hasNoParams, " + maybe(includesEncodedSpace) + "includesEncodedSpace")
isInstanceMethod && hasNoParams && includesEncodedSpace && !isOuterMethod && !isNestedMethod
}
import java.security.MessageDigest
import scala.io.Codec
// The following compactify code is written based on scala compiler source code at:-
// https://github.com/scala/scala/blob/master/src/reflect/scala/reflect/internal/StdNames.scala#L47
private val compactifiedMarker = "$$$$"
def equalIfRequiredCompactify(value: String, compactified: String): Boolean = {
if (compactified.matches(".+\\\\$\\\\$\\\\$\\\\$.+\\\\$\\\\$\\\\$\\\\$.+")) {
val firstDolarIdx = compactified.indexOf("$$$$")
val lastDolarIdx = compactified.lastIndexOf("$$$$")
val prefix = compactified.substring(0, firstDolarIdx)
val suffix = compactified.substring(lastDolarIdx + 4)
val lastIndexOfDot = value.lastIndexOf(".")
val toHash =
if (lastIndexOfDot >= 0)
value.substring(0, value.length - 1).substring(value.lastIndexOf(".") + 1)
else
value
val bytes = Codec.toUTF8(toHash)
val md5 = MessageDigest.getInstance("MD5")
md5.update(bytes)
val md5chars = (md5.digest() map (b => (b & 0xFF).toHexString)).mkString
(prefix + compactifiedMarker + md5chars + compactifiedMarker + suffix) == compactified
}
else
value == compactified
}
}
| scalatest/scalatest | jvm/refspec/src/main/scala/org/scalatest/refspec/RefSpec.scala | Scala | apache-2.0 | 58,361 |
/*
* Copyright 2001-2009 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.fixture
import org.scalatest._
import events.TestFailed
class FixtureFreeSpecSpec extends org.scalatest.Spec with PrivateMethodTester with SharedHelpers {
describe("A FixtureFreeSpec") {
it("should return the test names in order of registration from testNames") {
val a = new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) {}
"Something" - {
"should do that" in { fixture => ()
}
"should do this" in { fixture =>
}
}
}
expect(List("Something should do that", "Something should do this")) {
a.testNames.iterator.toList
}
val b = new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) {}
}
expect(List[String]()) {
b.testNames.iterator.toList
}
val c = new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) {}
"Something" - {
"should do this" in { fixture =>
}
"should do that" in { fixture =>
}
}
}
expect(List("Something should do this", "Something should do that")) {
c.testNames.iterator.toList
}
}
it("should throw DuplicateTestNameException if a duplicate test name registration is attempted") {
intercept[DuplicateTestNameException] {
new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) {}
"should test this" in { fixture => }
"should test this" in { fixture => }
}
}
intercept[DuplicateTestNameException] {
new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) {}
"should test this" in { fixture => }
"should test this" ignore { fixture => }
}
}
intercept[DuplicateTestNameException] {
new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) {}
"should test this" ignore { fixture => }
"should test this" ignore { fixture => }
}
}
intercept[DuplicateTestNameException] {
new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) {}
"should test this" ignore { fixture => }
"should test this" in { fixture => }
}
}
}
it("should pass in the fixture to every test method") {
val a = new FixtureFreeSpec {
type FixtureParam = String
val hello = "Hello, world!"
def withFixture(test: OneArgTest) {
test(hello)
}
"Something" - {
"should do this" in { fixture =>
assert(fixture === hello)
}
"should do that" in { fixture =>
assert(fixture === hello)
}
}
}
val rep = new EventRecordingReporter
a.run(None, rep, new Stopper {}, Filter(), Map(), None, new Tracker())
assert(!rep.eventsReceived.exists(_.isInstanceOf[TestFailed]))
}
it("should throw NullPointerException if a null test tag is provided") {
// it
intercept[NullPointerException] {
new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) {}
"hi" taggedAs(null) in { fixture => }
}
}
val caught = intercept[NullPointerException] {
new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) {}
"hi" taggedAs(mytags.SlowAsMolasses, null) in { fixture => }
}
}
assert(caught.getMessage === "a test tag was null")
intercept[NullPointerException] {
new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) {}
"hi" taggedAs(mytags.SlowAsMolasses, null, mytags.WeakAsAKitten) in { fixture => }
}
}
// ignore
intercept[NullPointerException] {
new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) {}
"hi" taggedAs(null) ignore { fixture => }
}
}
val caught2 = intercept[NullPointerException] {
new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) {}
"hi" taggedAs(mytags.SlowAsMolasses, null) ignore { fixture => }
}
}
assert(caught2.getMessage === "a test tag was null")
intercept[NullPointerException] {
new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) {}
"hi" taggedAs(mytags.SlowAsMolasses, null, mytags.WeakAsAKitten) ignore { fixture => }
}
}
}
it("should return a correct tags map from the tags method") {
val a = new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) {}
"test this" ignore { fixture => }
"test that" in { fixture => }
}
expect(Map("test this" -> Set("org.scalatest.Ignore"))) {
a.tags
}
val b = new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) {}
"test this" in { fixture => }
"test that" ignore { fixture => }
}
expect(Map("test that" -> Set("org.scalatest.Ignore"))) {
b.tags
}
val c = new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) {}
"test this" ignore { fixture => }
"test that" ignore { fixture => }
}
expect(Map("test this" -> Set("org.scalatest.Ignore"), "test that" -> Set("org.scalatest.Ignore"))) {
c.tags
}
val d = new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) {}
"test this" taggedAs(mytags.SlowAsMolasses) in { fixture => }
"test that" taggedAs(mytags.SlowAsMolasses) ignore { fixture => }
}
expect(Map("test this" -> Set("org.scalatest.SlowAsMolasses"), "test that" -> Set("org.scalatest.Ignore", "org.scalatest.SlowAsMolasses"))) {
d.tags
}
val e = new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) {}
"test this" in { fixture => }
"test that" in { fixture => }
}
expect(Map()) {
e.tags
}
val f = new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) {}
"test this" taggedAs(mytags.SlowAsMolasses, mytags.WeakAsAKitten) in { fixture => }
"test that" taggedAs(mytags.SlowAsMolasses) in { fixture => }
}
expect(Map("test this" -> Set("org.scalatest.SlowAsMolasses", "org.scalatest.WeakAsAKitten"), "test that" -> Set("org.scalatest.SlowAsMolasses"))) {
f.tags
}
val g = new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) {}
"test this" taggedAs(mytags.SlowAsMolasses, mytags.WeakAsAKitten) in { fixture => }
"test that" taggedAs(mytags.SlowAsMolasses) in { fixture => }
}
expect(Map("test this" -> Set("org.scalatest.SlowAsMolasses", "org.scalatest.WeakAsAKitten"), "test that" -> Set("org.scalatest.SlowAsMolasses"))) {
g.tags
}
}
it("should return a correct tags map from the tags method using is (pending)") {
val a = new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) {}
"test this" ignore { fixture => }
"test that" is (pending)
}
expect(Map("test this" -> Set("org.scalatest.Ignore"))) {
a.tags
}
val b = new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) {}
"test this" is (pending)
"test that" ignore { fixture => }
}
expect(Map("test that" -> Set("org.scalatest.Ignore"))) {
b.tags
}
val c = new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) {}
"test this" ignore { fixture => }
"test that" ignore { fixture => }
}
expect(Map("test this" -> Set("org.scalatest.Ignore"), "test that" -> Set("org.scalatest.Ignore"))) {
c.tags
}
val d = new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) {}
"test this" taggedAs(mytags.SlowAsMolasses) is (pending)
"test that" taggedAs(mytags.SlowAsMolasses) ignore { fixture => }
}
expect(Map("test this" -> Set("org.scalatest.SlowAsMolasses"), "test that" -> Set("org.scalatest.Ignore", "org.scalatest.SlowAsMolasses"))) {
d.tags
}
val e = new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) {}
"test this" is (pending)
"test that" is (pending)
}
expect(Map()) {
e.tags
}
val f = new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) {}
"test this" taggedAs(mytags.SlowAsMolasses, mytags.WeakAsAKitten) is (pending)
"test that" taggedAs(mytags.SlowAsMolasses) is (pending)
}
expect(Map("test this" -> Set("org.scalatest.SlowAsMolasses", "org.scalatest.WeakAsAKitten"), "test that" -> Set("org.scalatest.SlowAsMolasses"))) {
f.tags
}
val g = new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) {}
"test this" taggedAs(mytags.SlowAsMolasses, mytags.WeakAsAKitten) is (pending)
"test that" taggedAs(mytags.SlowAsMolasses) is (pending)
}
expect(Map("test this" -> Set("org.scalatest.SlowAsMolasses", "org.scalatest.WeakAsAKitten"), "test that" -> Set("org.scalatest.SlowAsMolasses"))) {
g.tags
}
}
class TestWasCalledSuite extends FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
"run this" in { fixture => theTestThisCalled = true }
"run that, maybe" in { fixture => theTestThatCalled = true }
}
it("should execute all tests when run is called with testName None") {
val b = new TestWasCalledSuite
b.run(None, SilentReporter, new Stopper {}, Filter(), Map(), None, new Tracker)
assert(b.theTestThisCalled)
assert(b.theTestThatCalled)
}
it("should execute one test when run is called with a defined testName") {
val a = new TestWasCalledSuite
a.run(Some("run this"), SilentReporter, new Stopper {}, Filter(), Map(), None, new Tracker)
assert(a.theTestThisCalled)
assert(!a.theTestThatCalled)
}
it("should report as ignored, and not run, tests marked ignored") {
val a = new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
"test this" in { fixture => theTestThisCalled = true }
"test that" in { fixture => theTestThatCalled = true }
}
val repA = new TestIgnoredTrackingReporter
a.run(None, repA, new Stopper {}, Filter(), Map(), None, new Tracker)
assert(!repA.testIgnoredReceived)
assert(a.theTestThisCalled)
assert(a.theTestThatCalled)
val b = new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
"test this" ignore { fixture => theTestThisCalled = true }
"test that" in { fixture => theTestThatCalled = true }
}
val repB = new TestIgnoredTrackingReporter
b.run(None, repB, new Stopper {}, Filter(), Map(), None, new Tracker)
assert(repB.testIgnoredReceived)
assert(repB.lastEvent.isDefined)
assert(repB.lastEvent.get.testName endsWith "test this")
assert(!b.theTestThisCalled)
assert(b.theTestThatCalled)
val c = new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
"test this" in { fixture => theTestThisCalled = true }
"test that" ignore { fixture => theTestThatCalled = true }
}
val repC = new TestIgnoredTrackingReporter
c.run(None, repC, new Stopper {}, Filter(), Map(), None, new Tracker)
assert(repC.testIgnoredReceived)
assert(repC.lastEvent.isDefined)
assert(repC.lastEvent.get.testName endsWith "test that", repC.lastEvent.get.testName)
assert(c.theTestThisCalled)
assert(!c.theTestThatCalled)
// The order I want is order of appearance in the file.
// Will try and implement that tomorrow. Subtypes will be able to change the order.
val d = new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
"test this" ignore { fixture => theTestThisCalled = true }
"test that" ignore { fixture => theTestThatCalled = true }
}
val repD = new TestIgnoredTrackingReporter
d.run(None, repD, new Stopper {}, Filter(), Map(), None, new Tracker)
assert(repD.testIgnoredReceived)
assert(repD.lastEvent.isDefined)
assert(repD.lastEvent.get.testName endsWith "test that") // last because should be in order of appearance
assert(!d.theTestThisCalled)
assert(!d.theTestThatCalled)
}
it("should run a test marked as ignored if run is invoked with that testName") {
// If I provide a specific testName to run, then it should ignore an Ignore on that test
// method and actually invoke it.
val e = new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
"test this" ignore { fixture => theTestThisCalled = true }
"test that" in { fixture => theTestThatCalled = true }
}
val repE = new TestIgnoredTrackingReporter
e.run(Some("test this"), repE, new Stopper {}, Filter(), Map(), None, new Tracker)
assert(!repE.testIgnoredReceived)
assert(e.theTestThisCalled)
assert(!e.theTestThatCalled)
}
it("should run only those tests selected by the tags to include and exclude sets") {
// Nothing is excluded
val a = new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
"test this" taggedAs(mytags.SlowAsMolasses) in { fixture => theTestThisCalled = true }
"test that" in { fixture => theTestThatCalled = true }
}
val repA = new TestIgnoredTrackingReporter
a.run(None, repA, new Stopper {}, Filter(), Map(), None, new Tracker)
assert(!repA.testIgnoredReceived)
assert(a.theTestThisCalled)
assert(a.theTestThatCalled)
// SlowAsMolasses is included, one test should be excluded
val b = new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
"test this" taggedAs(mytags.SlowAsMolasses) in { fixture => theTestThisCalled = true }
"test that" in { fixture => theTestThatCalled = true }
}
val repB = new TestIgnoredTrackingReporter
b.run(None, repB, new Stopper {}, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set()), Map(), None, new Tracker)
assert(!repB.testIgnoredReceived)
assert(b.theTestThisCalled)
assert(!b.theTestThatCalled)
// SlowAsMolasses is included, and both tests should be included
val c = new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
"test this" taggedAs(mytags.SlowAsMolasses) in { fixture => theTestThisCalled = true }
"test that" taggedAs(mytags.SlowAsMolasses) in { fixture => theTestThatCalled = true }
}
val repC = new TestIgnoredTrackingReporter
c.run(None, repB, new Stopper {}, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set()), Map(), None, new Tracker)
assert(!repC.testIgnoredReceived)
assert(c.theTestThisCalled)
assert(c.theTestThatCalled)
// SlowAsMolasses is included. both tests should be included but one ignored
val d = new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
"test this" taggedAs(mytags.SlowAsMolasses) ignore { fixture => theTestThisCalled = true }
"test that" taggedAs(mytags.SlowAsMolasses) in { fixture => theTestThatCalled = true }
}
val repD = new TestIgnoredTrackingReporter
d.run(None, repD, new Stopper {}, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.Ignore")), Map(), None, new Tracker)
assert(repD.testIgnoredReceived)
assert(!d.theTestThisCalled)
assert(d.theTestThatCalled)
// SlowAsMolasses included, FastAsLight excluded
val e = new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
var theTestTheOtherCalled = false
"test this" taggedAs(mytags.SlowAsMolasses, mytags.FastAsLight) in { fixture => theTestThisCalled = true }
"test that" taggedAs(mytags.SlowAsMolasses) in { fixture => theTestThatCalled = true }
"test the other" in { fixture => theTestTheOtherCalled = true }
}
val repE = new TestIgnoredTrackingReporter
e.run(None, repE, new Stopper {}, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight")),
Map(), None, new Tracker)
assert(!repE.testIgnoredReceived)
assert(!e.theTestThisCalled)
assert(e.theTestThatCalled)
assert(!e.theTestTheOtherCalled)
// An Ignored test that was both included and excluded should not generate a TestIgnored event
val f = new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
var theTestTheOtherCalled = false
"test this" taggedAs(mytags.SlowAsMolasses, mytags.FastAsLight) ignore { fixture => theTestThisCalled = true }
"test that" taggedAs(mytags.SlowAsMolasses) in { fixture => theTestThatCalled = true }
"test the other" in { fixture => theTestTheOtherCalled = true }
}
val repF = new TestIgnoredTrackingReporter
f.run(None, repF, new Stopper {}, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight")),
Map(), None, new Tracker)
assert(!repF.testIgnoredReceived)
assert(!f.theTestThisCalled)
assert(f.theTestThatCalled)
assert(!f.theTestTheOtherCalled)
// An Ignored test that was not included should not generate a TestIgnored event
val g = new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
var theTestTheOtherCalled = false
"test this" taggedAs(mytags.SlowAsMolasses, mytags.FastAsLight) in { fixture => theTestThisCalled = true }
"test that" taggedAs(mytags.SlowAsMolasses) in { fixture => theTestThatCalled = true }
"test the other" ignore { fixture => theTestTheOtherCalled = true }
}
val repG = new TestIgnoredTrackingReporter
g.run(None, repG, new Stopper {}, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight")),
Map(), None, new Tracker)
assert(!repG.testIgnoredReceived)
assert(!g.theTestThisCalled)
assert(g.theTestThatCalled)
assert(!g.theTestTheOtherCalled)
// No tagsToInclude set, FastAsLight excluded
val h = new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
var theTestTheOtherCalled = false
"test this" taggedAs(mytags.SlowAsMolasses, mytags.FastAsLight) in { fixture => theTestThisCalled = true }
"test that" taggedAs(mytags.SlowAsMolasses) in { fixture => theTestThatCalled = true }
"test the other" in { fixture => theTestTheOtherCalled = true }
}
val repH = new TestIgnoredTrackingReporter
h.run(None, repH, new Stopper {}, Filter(None, Set("org.scalatest.FastAsLight")), Map(), None, new Tracker)
assert(!repH.testIgnoredReceived)
assert(!h.theTestThisCalled)
assert(h.theTestThatCalled)
assert(h.theTestTheOtherCalled)
// No tagsToInclude set, SlowAsMolasses excluded
val i = new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
var theTestTheOtherCalled = false
"test this" taggedAs(mytags.SlowAsMolasses, mytags.FastAsLight) in { fixture => theTestThisCalled = true }
"test that" taggedAs(mytags.SlowAsMolasses) in { fixture => theTestThatCalled = true }
"test the other" in { fixture => theTestTheOtherCalled = true }
}
val repI = new TestIgnoredTrackingReporter
i.run(None, repI, new Stopper {}, Filter(None, Set("org.scalatest.SlowAsMolasses")), Map(), None, new Tracker)
assert(!repI.testIgnoredReceived)
assert(!i.theTestThisCalled)
assert(!i.theTestThatCalled)
assert(i.theTestTheOtherCalled)
// No tagsToInclude set, SlowAsMolasses excluded, TestIgnored should not be received on excluded ones
val j = new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
var theTestTheOtherCalled = false
"test this" taggedAs(mytags.SlowAsMolasses, mytags.FastAsLight) ignore { fixture => theTestThisCalled = true }
"test that" taggedAs(mytags.SlowAsMolasses) ignore { fixture => theTestThatCalled = true }
"test the other" in { fixture => theTestTheOtherCalled = true }
}
val repJ = new TestIgnoredTrackingReporter
j.run(None, repJ, new Stopper {}, Filter(None, Set("org.scalatest.SlowAsMolasses")), Map(), None, new Tracker)
assert(!repI.testIgnoredReceived)
assert(!j.theTestThisCalled)
assert(!j.theTestThatCalled)
assert(j.theTestTheOtherCalled)
// Same as previous, except Ignore specifically mentioned in excludes set
val k = new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
var theTestTheOtherCalled = false
"test this" taggedAs(mytags.SlowAsMolasses, mytags.FastAsLight) ignore { fixture => theTestThisCalled = true }
"test that" taggedAs(mytags.SlowAsMolasses) ignore { fixture => theTestThatCalled = true }
"test the other" ignore { fixture => theTestTheOtherCalled = true }
}
val repK = new TestIgnoredTrackingReporter
k.run(None, repK, new Stopper {}, Filter(None, Set("org.scalatest.SlowAsMolasses", "org.scalatest.Ignore")), Map(), None, new Tracker)
assert(repK.testIgnoredReceived)
assert(!k.theTestThisCalled)
assert(!k.theTestThatCalled)
assert(!k.theTestTheOtherCalled)
}
it("should return the correct test count from its expectedTestCount method") {
val a = new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
"test this" in { fixture => }
"test that" in { fixture => }
}
assert(a.expectedTestCount(Filter()) === 2)
val b = new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
"test this" ignore { fixture => }
"test that" in { fixture => }
}
assert(b.expectedTestCount(Filter()) === 1)
val c = new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
"test this" taggedAs(mytags.FastAsLight) in { fixture => }
"test that" in { fixture => }
}
assert(c.expectedTestCount(Filter(Some(Set("org.scalatest.FastAsLight")), Set())) === 1)
assert(c.expectedTestCount(Filter(None, Set("org.scalatest.FastAsLight"))) === 1)
val d = new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
"test this" taggedAs(mytags.FastAsLight, mytags.SlowAsMolasses) in { fixture => }
"test that" taggedAs(mytags.SlowAsMolasses) in { fixture => }
"test the other thing" in { fixture => }
}
assert(d.expectedTestCount(Filter(Some(Set("org.scalatest.FastAsLight")), Set())) === 1)
assert(d.expectedTestCount(Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight"))) === 1)
assert(d.expectedTestCount(Filter(None, Set("org.scalatest.SlowAsMolasses"))) === 1)
assert(d.expectedTestCount(Filter()) === 3)
val e = new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
"test this" taggedAs(mytags.FastAsLight, mytags.SlowAsMolasses) in { fixture => }
"test that" taggedAs(mytags.SlowAsMolasses) in { fixture => }
"test the other thing" ignore { fixture => }
}
assert(e.expectedTestCount(Filter(Some(Set("org.scalatest.FastAsLight")), Set())) === 1)
assert(e.expectedTestCount(Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight"))) === 1)
assert(e.expectedTestCount(Filter(None, Set("org.scalatest.SlowAsMolasses"))) === 0)
assert(e.expectedTestCount(Filter()) === 2)
val f = new Suites(a, b, c, d, e)
assert(f.expectedTestCount(Filter()) === 10)
}
it("should generate a TestPending message when the test body is (pending)") {
val a = new FixtureFreeSpec {
type FixtureParam = String
val hello = "Hello, world!"
def withFixture(test: OneArgTest) {
test(hello)
}
"should do this" is (pending)
"should do that" in { fixture =>
assert(fixture === hello)
}
"should do something else" in { fixture =>
assert(fixture === hello)
pending
}
}
val rep = new EventRecordingReporter
a.run(None, rep, new Stopper {}, Filter(), Map(), None, new Tracker())
val tp = rep.testPendingEventsReceived
assert(tp.size === 2)
}
it("should generate a test failure if a Throwable, or an Error other than direct Error subtypes " +
"known in JDK 1.5, excluding AssertionError") {
val a = new FixtureFreeSpec {
type FixtureParam = String
val hello = "Hello, world!"
def withFixture(test: OneArgTest) {
test(hello)
}
"This FreeSpec" - {
"should throw AssertionError" in { s => throw new AssertionError }
"should throw plain old Error" in { s => throw new Error }
"should throw Throwable" in { s => throw new Throwable }
}
}
val rep = new EventRecordingReporter
a.run(None, rep, new Stopper {}, Filter(), Map(), None, new Tracker())
val tf = rep.testFailedEventsReceived
assert(tf.size === 3)
}
it("should propagate out Errors that are direct subtypes of Error in JDK 1.5, other than " +
"AssertionError, causing Suites and Runs to abort.") {
val a = new FixtureFreeSpec {
type FixtureParam = String
val hello = "Hello, world!"
def withFixture(test: OneArgTest) {
test(hello)
}
"This FreeSpec" - {
"should throw AssertionError" in { s => throw new OutOfMemoryError }
}
}
intercept[OutOfMemoryError] {
a.run(None, SilentReporter, new Stopper {}, Filter(), Map(), None, new Tracker())
}
}
it("should send InfoProvided events with aboutAPendingTest set to true for info " +
"calls made from a test that is pending") {
val a = new FixtureFreeSpec with GivenWhenThen {
type FixtureParam = String
val hello = "Hello, world!"
def withFixture(test: OneArgTest) {
test(hello)
}
"A FreeSpec" - {
"should do something" in { s =>
given("two integers")
when("one is subracted from the other")
then("the result is the difference between the two numbers")
pending
}
}
}
val rep = new EventRecordingReporter
a.run(None, rep, new Stopper {}, Filter(), Map(), None, new Tracker())
val ip = rep.infoProvidedEventsReceived
assert(ip.size === 4)
for (event <- ip) {
assert(event.message == "A FreeSpec" || event.aboutAPendingTest.isDefined && event.aboutAPendingTest.get)
}
}
it("should send InfoProvided events with aboutAPendingTest set to false for info " +
"calls made from a test that is not pending") {
val a = new FixtureFreeSpec with GivenWhenThen {
type FixtureParam = String
val hello = "Hello, world!"
def withFixture(test: OneArgTest) {
test(hello)
}
"A FreeSpec" - {
"should do something" in { s =>
given("two integers")
when("one is subracted from the other")
then("the result is the difference between the two numbers")
assert(1 + 1 === 2)
}
}
}
val rep = new EventRecordingReporter
a.run(None, rep, new Stopper {}, Filter(), Map(), None, new Tracker())
val ip = rep.infoProvidedEventsReceived
assert(ip.size === 4)
for (event <- ip) {
assert(event.message == "A FreeSpec" || event.aboutAPendingTest.isDefined && !event.aboutAPendingTest.get)
}
}
it("should allow both tests that take fixtures and tests that don't") {
val a = new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) {
test("Hello, world!")
}
var takesNoArgsInvoked = false
var takesAFixtureInvoked = false
"A FreeSpec" - {
"should take no args" in { () => takesNoArgsInvoked = true }
"should take a fixture" in { s => takesAFixtureInvoked = true }
}
}
a.run(None, SilentReporter, new Stopper {}, Filter(), Map(), None, new Tracker())
assert(a.testNames.size === 2, a.testNames)
assert(a.takesNoArgsInvoked)
assert(a.takesAFixtureInvoked)
}
it("should work with test functions whose inferred result type is not Unit") {
val a = new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) {
test("Hello, world!")
}
var takesNoArgsInvoked = false
var takesAFixtureInvoked = false
"A FreeSpec" - {
"should take no args" in { () => takesNoArgsInvoked = true; true }
"should take a fixture" in { s => takesAFixtureInvoked = true; true }
}
}
assert(!a.takesNoArgsInvoked)
assert(!a.takesAFixtureInvoked)
a.run(None, SilentReporter, new Stopper {}, Filter(), Map(), None, new Tracker())
assert(a.testNames.size === 2, a.testNames)
assert(a.takesNoArgsInvoked)
assert(a.takesAFixtureInvoked)
}
it("should work with ignored tests whose inferred result type is not Unit") {
val a = new FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
var takeNoArgsInvoked = false
var takeAFixtureInvoked = false
"A FreeSpec" - {
"should take no args" ignore { () => takeNoArgsInvoked = true; "hi" }
"should take a fixture" ignore { s => takeAFixtureInvoked = true; 42 }
}
}
assert(!a.takeNoArgsInvoked)
assert(!a.takeAFixtureInvoked)
val reporter = new EventRecordingReporter
a.run(None, reporter, new Stopper {}, Filter(), Map(), None, new Tracker)
assert(reporter.testIgnoredEventsReceived.size === 2)
assert(!a.takeNoArgsInvoked)
assert(!a.takeAFixtureInvoked)
}
it("should pass a NoArgTest to withFixture for tests that take no fixture") {
class MySpec extends FixtureFreeSpec {
type FixtureParam = String
var aNoArgTestWasPassed = false
var aOneArgTestWasPassed = false
override def withFixture(test: NoArgTest) {
aNoArgTestWasPassed = true
}
def withFixture(test: OneArgTest) {
aOneArgTestWasPassed = true
}
"do something" in { () =>
assert(1 + 1 === 2)
}
}
val s = new MySpec
s.run(None, SilentReporter, new Stopper {}, Filter(), Map(), None, new Tracker())
assert(s.aNoArgTestWasPassed)
assert(!s.aOneArgTestWasPassed)
}
it("should not pass a NoArgTest to withFixture for tests that take a Fixture") {
class MySpec extends FixtureFreeSpec {
type FixtureParam = String
var aNoArgTestWasPassed = false
var aOneArgTestWasPassed = false
override def withFixture(test: NoArgTest) {
aNoArgTestWasPassed = true
}
def withFixture(test: OneArgTest) {
aOneArgTestWasPassed = true
}
"do something" in { fixture =>
assert(1 + 1 === 2)
}
}
val s = new MySpec
s.run(None, SilentReporter, new Stopper {}, Filter(), Map(), None, new Tracker())
assert(!s.aNoArgTestWasPassed)
assert(s.aOneArgTestWasPassed)
}
it("should pass a NoArgTest that invokes the no-arg test when the " +
"NoArgTest's no-arg apply method is invoked") {
class MySpec extends FixtureFreeSpec {
type FixtureParam = String
var theNoArgTestWasInvoked = false
def withFixture(test: OneArgTest) {
// Shouldn't be called, but just in case don't invoke a OneArgTest
}
"do something" in { () =>
theNoArgTestWasInvoked = true
}
}
val s = new MySpec
s.run(None, SilentReporter, new Stopper {}, Filter(), Map(), None, new Tracker())
assert(s.theNoArgTestWasInvoked)
}
it("should pass the correct test name in the OneArgTest passed to withFixture") {
val a = new FixtureFreeSpec {
type FixtureParam = String
var correctTestNameWasPassed = false
def withFixture(test: OneArgTest) {
correctTestNameWasPassed = test.name == "do something"
test("hi")
}
"do something" in { fixture => }
}
a.run(None, SilentReporter, new Stopper {}, Filter(), Map(), None, new Tracker())
assert(a.correctTestNameWasPassed)
}
it("should pass the correct config map in the OneArgTest passed to withFixture") {
val a = new FixtureFreeSpec {
type FixtureParam = String
var correctConfigMapWasPassed = false
def withFixture(test: OneArgTest) {
correctConfigMapWasPassed = (test.configMap == Map("hi" -> 7))
test("hi")
}
"do something" in { fixture => }
}
a.run(None, SilentReporter, new Stopper {}, Filter(), Map("hi" -> 7), None, new Tracker())
assert(a.correctConfigMapWasPassed)
}
describe("(when a nesting rule has been violated)") {
it("should, if they call a describe from within an it clause, result in a TestFailedException when running the test") {
class MySpec extends FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
"should blow up" in { fixture =>
"in the wrong place, at the wrong time" - {
}
}
}
val spec = new MySpec
ensureTestFailedEventReceived(spec, "should blow up")
}
it("should, if they call a describe with a nested it from within an it clause, result in a TestFailedException when running the test") {
class MySpec extends FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
"should blow up" in { fixture =>
"in the wrong place, at the wrong time" - {
"should never run" in { fixture =>
assert(1 === 1)
}
}
}
}
val spec = new MySpec
ensureTestFailedEventReceived(spec, "should blow up")
}
it("should, if they call a nested it from within an it clause, result in a TestFailedException when running the test") {
class MySpec extends FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
"should blow up" in { fixture =>
"should never run" in { fixture =>
assert(1 === 1)
}
}
}
val spec = new MySpec
ensureTestFailedEventReceived(spec, "should blow up")
}
it("should, if they call a nested it with tags from within an it clause, result in a TestFailedException when running the test") {
class MySpec extends FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
"should blow up" in { fixture =>
"should never run" taggedAs(mytags.SlowAsMolasses) in { fixture =>
assert(1 === 1)
}
}
}
val spec = new MySpec
ensureTestFailedEventReceived(spec, "should blow up")
}
it("should, if they call a describe with a nested ignore from within an it clause, result in a TestFailedException when running the test") {
class MySpec extends FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
"should blow up" in { fixture =>
"in the wrong place, at the wrong time" - {
"should never run" ignore { fixture =>
assert(1 === 1)
}
}
}
}
val spec = new MySpec
ensureTestFailedEventReceived(spec, "should blow up")
}
it("should, if they call a nested ignore from within an it clause, result in a TestFailedException when running the test") {
class MySpec extends FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
"should blow up" in { fixture =>
"should never run" ignore { fixture =>
assert(1 === 1)
}
}
}
val spec = new MySpec
ensureTestFailedEventReceived(spec, "should blow up")
}
it("should, if they call a nested ignore with tags from within an it clause, result in a TestFailedException when running the test") {
class MySpec extends FixtureFreeSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
"should blow up" in { fixture =>
"should never run" taggedAs(mytags.SlowAsMolasses) ignore { fixture =>
assert(1 === 1)
}
}
}
val spec = new MySpec
ensureTestFailedEventReceived(spec, "should blow up")
}
}
}
}
| JimCallahan/Graphics | external/scalatest/src/test/scala/org/scalatest/fixture/FixtureFreeSpecSpec.scala | Scala | apache-2.0 | 41,221 |
/**
*
* SecretKeyTest
* Ledger wallet
*
* Created by Pierre Pollastri on 16/10/15.
*
* The MIT License (MIT)
*
* Copyright (c) 2015 Ledger
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
package co.ledger.wallet.core.crypto
import android.test.InstrumentationTestCase
import co.ledger.wallet.core.security.Keystore
class SecretKeyTest extends InstrumentationTestCase {
def testShouldStoreSecretKeyAndRetrieveIt(): Unit = {
implicit val context = getInstrumentation.getContext
val secret = "This is a secret"
SecretKey.create(context, Keystore.defaultInstance, "TheAlias", secret.getBytes)
}
}
| LedgerHQ/ledger-wallet-android | app/src/androidTest/scala/co/ledger/wallet/core/crypto/SecretKeyTest.scala | Scala | mit | 1,656 |
package models.admin.reports
import scalaz._
import Scalaz._
import scalaz.effect.IO
import scalaz.EitherT._
import scalaz.Validation
import scalaz.Validation.FlatMap._
import scalaz.NonEmptyList._
import scala.collection.immutable.ListMap
import net.liftweb.json._
import io.megam.util.Time
import org.joda.time.{DateTime, Period}
import org.joda.time.format.DateTimeFormat
import models.Constants.{JSON_CLAZ, REPORTSCLAZ, REPORT_SALES}
import models.admin.{ReportInput, ReportResult}
//Reporter needs to extends trait Reporter or else you'll only get a NoOpReport
//Every report will have 3 steps (build, aggregate, and generate report data)
class Sales(ri: ReportInput) extends Reporter {
def report: ValidationNel[Throwable, Option[ReportResult]] = {
for {
abt <- build(ri.start_date, ri.end_date) leftMap { err: NonEmptyList[Throwable] ⇒ err }
sal <- aggregate(abt).successNel
} yield {
ReportResult(REPORT_SALES, sal.map(_.map(_.toKeyList)), REPORTSCLAZ, Time.now.toString).some
}
}
def build(startdate: String, enddate: String): ValidationNel[Throwable,Tuple2[Seq[models.tosca.AssemblyResult],
Seq[models.billing.BilledhistoriesResult]]] = {
for {
a <- (models.tosca.Assembly.findByDateRange(startdate, enddate) leftMap { err: NonEmptyList[Throwable] ⇒ err })
b <- (models.billing.Billedhistories.findByDateRange(startdate, enddate) leftMap { err: NonEmptyList[Throwable] ⇒ err })
} yield {
(a, b)
}
}
def reportFor(email: String, org: String): ValidationNel[Throwable, Option[ReportResult]] = {
for {
abt <- buildFor(email, org, ri.start_date, ri.end_date) leftMap { err: NonEmptyList[Throwable] ⇒ err }
sal <- aggregate(abt).successNel
} yield {
ReportResult(REPORT_SALES, sal.map(_.map(_.toKeyList)), REPORTSCLAZ, Time.now.toString).some
}
}
def buildFor(email: String, org: String, startdate: String, enddate: String): ValidationNel[Throwable,Tuple2[Seq[models.tosca.AssemblyResult],
Seq[models.billing.BilledhistoriesResult]]] = {
for {
a <- (models.tosca.Assembly.findByDateRangeFor(email, org, startdate, enddate) leftMap { err: NonEmptyList[Throwable] ⇒ err })
b <- (models.billing.Billedhistories.findByDateRangeFor(email, startdate, enddate) leftMap { err: NonEmptyList[Throwable] ⇒ err })
} yield {
(a, b)
}
}
def aggregate(abt: Tuple2[Seq[models.tosca.AssemblyResult], Seq[models.billing.BilledhistoriesResult]]) = {
for {
ba <- (abt._2.groupBy(_.assembly_id).map { case (k,v) => (k -> BillingAggregate(k,v)) }).some
sa <- SalesAggregate(abt._1, ba).some
} yield {
sa.aggregate
}
}
}
case class SalesAggregate(als: Seq[models.tosca.AssemblyResult], bh: Map[String, BillingAggregate]) {
lazy val aggregate: Seq[SalesResult] = als.map(al => {
bh.get(al.id) match {
case Some(bhi) => SalesResult(al.id, al.name,al.status, al.state, bhi.start_date, bhi.end_date, bhi.sum.toString)
case None => SalesResult( al.id, al.name, al.status, al.state, "", "","")
}
})
}
case class BillingAggregate(aid: String, b: Seq[models.billing.BilledhistoriesResult]) {
private lazy val start_dates = b.map(_.start_date.toString)
lazy val start_date = start_dates.sortBy({r => r}).head
private lazy val end_dates = b.map(_.end_date.toString)
lazy val end_date = end_dates.sortBy({r => r}).head
lazy val sum = (b.map{x => scala.util.Try { x.billing_amount.toDouble }.toOption.getOrElse(0.0)}).sum
override def toString() = "[" + aid + " sales from " + start_date + " to " + end_date + " is:"+ sum +"]";
}
case class SalesResult( asm_id: String, asm_name: String, status: String, state: String,startdate: String, enddate: String, cost: String) {
val X = "x"
val Y = "y"
val NAME = "name"
val STATUS = "status"
val START_DATE = "start_date"
val END_DATE = "end_date"
val NUMBER_OF_HOURS = "number_of_hours"
def isEmpty(x: String) = Option(x).forall(_.isEmpty)
lazy val shouldZero = isEmpty(startdate) || isEmpty(enddate)
lazy val shortenedDate = {
if (startdate.length > 0) {
val dt = DateTime.parse(startdate)
dt.monthOfYear.getAsText + "-" + dt.dayOfMonth.getAsText
} else { startdate }
}
lazy val calculateHours = {
if (shouldZero) { "0" } else {
val hoursObject = org.joda.time.Hours.hoursBetween(
DateTime.parse(startdate), new DateTime(enddate))
hoursObject.getHours.toString
}
}
def toKeyList: models.tosca.KeyValueList = models.tosca.KeyValueList(
ListMap((X -> shortenedDate),
(Y -> cost),
(NAME -> asm_name),
(NUMBER_OF_HOURS -> calculateHours),
(STATUS -> status)))
}
| VirtEngine/gateway | app/models/admin/reports/Sales.scala | Scala | mit | 4,975 |
package com.agecomp
abstract class Component {
private var _id = -1
def id = _id
def id_= (value: Int):Unit = { _id = value }
def destroy = {}
}
class InputComponent extends Component
class OutputComponent extends Component
class EntityLabel(val label: String, var unique: String = "") extends Component {
if (unique == "") unique = label
}
| abdielbrilhante/agecomp | src/main/scala/com/agecomp/Component.scala | Scala | mit | 358 |
package com.example
import com.example.actors.SystemActor
import com.example.models._
import akka.actor.ActorSystem
class PlacesService {
val system = ActorSystem("place-service-system")
val systemActor = system.actorOf(SystemActor.props, "systemActor")
def processLocation(location: Location) = {
systemActor ! location
}
}
object PlacesService {
def apply() = new PlacesService()
}
| divanvisagie/lazy-places | src/main/scala/com/example/services/PlacesService.scala | Scala | apache-2.0 | 404 |
package ml.wolfe.nlp.io
import scala.collection.mutable.ArrayBuffer
/**
* Created by narad on 8/1/14.
*/
abstract class ProtoNode {
def search: Seq[ProtoNode]
}
case class ProtoValueNode(field: String, value: String) extends ProtoNode {
def search: Seq[ProtoNode] = Seq(this)
}
case class ProtoParentNode(field: String, value: Seq[ProtoNode]) extends ProtoNode {
def search: Seq[ProtoNode] = Seq(this) ++ value.map(_.search).flatten
}
class ProtoReader(ldelim: String = "(", rdelim: String = ")") {
val VALUE_PATTERN = """ *([^:]+): (.*)""".r
val PARENT_PATTERN = """ *([^ ]+) \\{""".r
def parse(text: String): Seq[ProtoNode] = {
var lines = text.split("\\n")
val nodes = new ArrayBuffer[ProtoNode]
while (lines.nonEmpty) {
lines.head match {
case VALUE_PATTERN(field, value) => {
nodes += ProtoValueNode(field, value)
lines = lines.tail
}
case PARENT_PATTERN(field) => {
val name = lines.head.split("\\\\{").head.trim
val inner = lookAhead(lines)
nodes += ProtoParentNode(name, parse(inner.mkString("\\n")))
lines = lines.slice(inner.size+1, lines.size)
}
case _ => lines = lines.tail
}
}
nodes.toSeq
}
def lookAhead(lines: Array[String]): Array[String] = {
var count = 1
var i = 1
while (i < lines.size && count > 0) {
if (lines(i).contains("{")) count += 1
if (lines(i).contains("}")) count -= 1
i += 1
}
lines.slice(1, i - 1)
}
}
object ProtoReader {
def main(args: Array[String]): Unit = {
val sreader = new ProtoReader(ldelim = "{", rdelim = "}")
val str =
"""test {
f1: v1
f2: v2
inner {
if1: iv1
deeper {
df1: dv1
}
}
}
""".stripMargin
println("Begin...")
sreader.parse(str).foreach { n =>
println(n)
}
println("\\nNodes:")
println(sreader.parse(str).head.search.mkString("\\n"))
}
}
//
// def readfromFile(filename: String): Iterator[String] = {
// val text = scala.io.Source.fromFile(filename).getLines().mkString(" ")
// Iterator.continually(readNext(text)).takeWhile(_ != None)
// }
//
// def readfromFile[T](filename: String, parseString: String => T): Iterator[T] = {
// val text = scala.io.Source.fromFile(filename).getLines().mkString(" ")
// Iterator.continually(parseString(readNext(text))).takeWhile(_ != null)
// }
//
// def readFromString(str: String): Iterator[ProtoNode] = {
// Iterator.continually(parseSExp(str)).takeWhile(_ != null)
// }
// def parseSExp(text: String): SExpNode = {
// println(text.replaceAll("\\n", " "))
// var stack = List[Int]()
// var level = 0
// var processed = 0
// var ntext = ""
// var children = new ArrayBuffer[SExpNode]
// while (processed < text.size) {
// println(processed)
// val letter = text.substring(processed, processed+1)
// if (letter == ldelim) {
// level += 1
// if (processed == 0) {
// println("starting")
// }
// else {
// val child = parseSExp(text.substring(processed))
// println("child: " + child.text)
// children += child
// // stack = processed +: stack
// // level += 1
// }
// }
// else if (letter == rdelim) {
// SExpNode(ntext, children.toList)
// }
// processed += 1
// }
// null // SExpNode(ntext, children)
// }
// def parseSExp(text: String): SExpNode = {
// var count = 0
// println("size = " + text.size)
// println("c: " + count)
// val start = processed
// if (start >= text.size)
// return null
// var letter = text.substring(processed, processed+1)
// while (processed < text.size) {
// println(processed + " : " + count)
// processed += 1
// letter = text.substring(processed-1, processed)
// if (letter == ldelim) {
// count += 1
// }
// else if (letter == rdelim) {
// count -= 1
// if (count == 0) {
// return text.substring(start, processed).trim
// }
// }
// }
// null
// }
// case clas | wolfe-pack/wolfe | wolfe-nlp/src/main/scala/ml/wolfe/nlp/io/SExpressionReader.scala | Scala | apache-2.0 | 4,236 |
/*
* Copyright University of Basel, Graphics and Vision Research Group
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package scalismo.mesh
import scalismo.ScalismoTestSuite
import scalismo.common.{PointId, UnstructuredPoints}
import scalismo.geometry.{_3D, EuclideanVector3D, Point, Point3D}
import scalismo.transformations.{Rotation, Translation}
import scalismo.utils.Random
class TetrahedralMeshTest extends ScalismoTestSuite {
implicit val rng = Random(42L)
def createTetrahedronsInUnitCube(): TetrahedralMesh3D = {
// points around unit cube
val points = IndexedSeq(Point(0, 0, 0),
Point(1, 0, 0),
Point(1, 1, 0),
Point(0, 1, 0),
Point(0, 0, 1),
Point(1, 0, 1),
Point(1, 1, 1),
Point(0, 1, 1))
val domain = UnstructuredPoints(points)
// cells covering the complete cube
implicit def intToPointId(i: Int): PointId = PointId(i)
val cells = IndexedSeq(TetrahedralCell(0, 2, 7, 3),
TetrahedralCell(0, 2, 5, 1),
TetrahedralCell(2, 5, 7, 6),
TetrahedralCell(0, 5, 7, 4),
TetrahedralCell(0, 2, 5, 7))
val list = TetrahedralList(cells)
TetrahedralMesh3D(domain, list)
}
def createTetrahedronsTwoUnitCubes(): TetrahedralMesh3D = {
// points around unit cube
val points = IndexedSeq(
Point(0, 0, 0),
Point(1, 0, 0),
Point(1, 1, 0),
Point(0, 1, 0),
Point(0, 0, 1),
Point(1, 0, 1),
Point(1, 1, 1),
Point(0, 1, 1),
Point(0, 0, 2),
Point(1, 0, 2),
Point(1, 1, 2),
Point(0, 1, 2)
)
val domain = UnstructuredPoints(points)
// cells covering the complete cube
implicit def intToPointId(i: Int): PointId = PointId(i)
val cells = IndexedSeq(
TetrahedralCell(0, 2, 7, 3),
TetrahedralCell(0, 2, 5, 1),
TetrahedralCell(2, 5, 7, 6),
TetrahedralCell(0, 5, 7, 4),
TetrahedralCell(0, 2, 5, 7),
TetrahedralCell(4, 6, 11, 7),
TetrahedralCell(4, 6, 9, 5),
TetrahedralCell(6, 9, 11, 10),
TetrahedralCell(4, 9, 11, 8),
TetrahedralCell(4, 6, 9, 11)
)
val list = TetrahedralList(cells)
TetrahedralMesh3D(domain, list)
}
def createRandomTetrahedralMesh(): TetrahedralMesh3D = {
val rng = Random(42L)
val N = 200
val points = IndexedSeq.fill(N)(
Point(rng.scalaRandom.nextGaussian() * 2,
rng.scalaRandom.nextGaussian() * 1000,
rng.scalaRandom.nextGaussian() * 1000000)
)
val domain = UnstructuredPoints(points)
implicit def intToPointId(i: Int): PointId = PointId(i)
val T = 200
val indices = rng.scalaRandom.shuffle((0 until N).toIndexedSeq).take(4)
val cells = IndexedSeq.fill(T)(TetrahedralCell(indices(0), indices(1), indices(2), indices(3)))
val list = TetrahedralList(cells)
TetrahedralMesh3D(domain, list)
}
describe("a tetrahedral mesh") {
it("should calculate the correct volume of a tetrahedral mesh and its tetrahedrals") {
val epsilonVolume = 1.0e-8
for (i <- 0 until 20) {
val t = Translation(
EuclideanVector3D(rng.scalaRandom.nextGaussian() * 50,
rng.scalaRandom.nextGaussian() * 50,
rng.scalaRandom.nextGaussian() * 50)
)
val R = Rotation(rng.scalaRandom.nextGaussian() * Math.PI,
rng.scalaRandom.nextGaussian() * Math.PI,
rng.scalaRandom.nextGaussian() * Math.PI,
Point3D.origin)
def mapping(pt: Point[_3D]) = R(t(pt))
val tetrahedron = createTetrahedronsInUnitCube().transform(mapping)
val ref = tetrahedron.computeTetrahedronVolume(tetrahedron.cells(0))
tetrahedron.computeTetrahedronVolume(tetrahedron.cells(1)) should be(ref +- epsilonVolume)
tetrahedron.computeTetrahedronVolume(tetrahedron.cells(2)) should be(ref +- epsilonVolume)
tetrahedron.computeTetrahedronVolume(tetrahedron.cells(3)) should be(ref +- epsilonVolume)
tetrahedron.computeTetrahedronVolume(tetrahedron.cells(4)) should be((1.0 - 4.0 * ref) +- 1.0e-8)
tetrahedron.volume should be(1.0 +- epsilonVolume)
}
}
it("can have an empty cell list") {
val pts = IndexedSeq(Point(0.0, 0.0, 0.0), Point(1.0, 1.0, 1.0), Point(1.0, 1.0, 5.0), Point(1.0, -1.0, 5.0))
val cells = IndexedSeq[TetrahedralCell]()
try {
TetrahedralMesh3D(UnstructuredPoints(pts), TetrahedralList(cells)) // would throw exception on fail
} catch {
case e: Exception => fail("It should be possible to create tetrahedralMesh with an empty cell list")
}
}
it("should return the correct adjacent points for a point") {
val tetrahedron = createTetrahedronsInUnitCube()
{
val pid = PointId(0)
val shouldReturn = Seq(1, 2, 3, 4, 5, 7).sorted
val neighbours = tetrahedron.tetrahedralization.adjacentPointsForPoint(pid).map(_.id).sorted
assert(shouldReturn.size == neighbours.size)
assert(shouldReturn.zip(neighbours).forall { case (a, b) => a == b })
}
{
val pid = PointId(1)
val shouldReturn = Seq(0, 2, 5).sorted
val neighbours = tetrahedron.tetrahedralization.adjacentPointsForPoint(pid).map(_.id).sorted
assert(shouldReturn.size == neighbours.size)
assert(shouldReturn.zip(neighbours).forall { case (a, b) => a == b })
}
{
val pid = PointId(2)
val shouldReturn = Seq(0, 1, 3, 5, 6, 7).sorted
val neighbours = tetrahedron.tetrahedralization.adjacentPointsForPoint(pid).map(_.id).sorted
assert(shouldReturn.size == neighbours.size)
assert(shouldReturn.zip(neighbours).forall { case (a, b) => a == b })
}
}
it("should return the correct adjacent tetrahedral cells for a point") {
val tetrahedron = createTetrahedronsInUnitCube()
{
val pid = PointId(0)
val shouldReturn = Seq(0, 1, 3, 4).sorted
val neighbours = tetrahedron.tetrahedralization.adjacentTetrahedronsForPoint(pid).map(_.id).sorted
assert(shouldReturn.size == neighbours.size)
assert(shouldReturn.zip(neighbours).forall { case (a, b) => a == b })
}
{
val pid = PointId(1)
val shouldReturn = Seq(1).sorted
val neighbours = tetrahedron.tetrahedralization.adjacentTetrahedronsForPoint(pid).map(_.id).sorted
assert(shouldReturn.size == neighbours.size)
assert(shouldReturn.zip(neighbours).forall { case (a, b) => a == b })
}
{
val pid = PointId(2)
val shouldReturn = Seq(0, 1, 2, 4).sorted
val neighbours = tetrahedron.tetrahedralization.adjacentTetrahedronsForPoint(pid).map(_.id).sorted
assert(shouldReturn.size == neighbours.size)
assert(shouldReturn.zip(neighbours).forall { case (a, b) => a == b })
}
}
it("should return the correct adjacent tetrahedrons for a tetrahedron") {
val tetrahedron = createTetrahedronsTwoUnitCubes()
{
val tid = TetrahedronId(0)
val shouldReturn = Seq(1, 2, 3, 4, 5).sorted
val neighbours = tetrahedron.tetrahedralization.adjacentTetrahedronsForTetrahedron(tid).map(_.id).sorted
assert(shouldReturn.size == neighbours.size)
assert(shouldReturn.zip(neighbours).forall { case (a, b) => a == b })
}
{
val tid = TetrahedronId(1)
val shouldReturn = Seq(0, 2, 3, 4, 6).sorted
val neighbours = tetrahedron.tetrahedralization.adjacentTetrahedronsForTetrahedron(tid).map(_.id).sorted
assert(shouldReturn.size == neighbours.size)
assert(shouldReturn.zip(neighbours).forall { case (a, b) => a == b })
}
{
val tid = TetrahedronId(4)
val shouldReturn = Seq(0, 1, 2, 3, 5, 6).sorted
val neighbours = tetrahedron.tetrahedralization.adjacentTetrahedronsForTetrahedron(tid).map(_.id).sorted
assert(shouldReturn.size == neighbours.size)
assert(shouldReturn.zip(neighbours).forall { case (a, b) => a == b })
}
}
it("should return only sampled points within a tetrahedron when trying to sample uniformly in it") {
val mesh = createRandomTetrahedralMesh()
for (tetrahedron <- mesh.tetrahedrons) {
for (_ <- 0 until 20) {
val point = mesh.samplePointInTetrahedralCell(tetrahedron)
require(mesh.isInsideTetrahedralCell(point, tetrahedron))
}
}
}
it("should not throw an error when sampling points uniformly in a tetrahedron") {
val tetrahedron = createTetrahedronsInUnitCube()
try {
for (i <- 0 until 1000) {
tetrahedron.samplePointInTetrahedralCell(tetrahedron.tetrahedralization.tetrahedron(TetrahedronId(0)))
}
} catch {
case e: Exception =>
fail("It should be possible to sample points in a tetrahedron without throwing an exception")
}
}
}
}
| unibas-gravis/scalismo | src/test/scala/scalismo/mesh/TetrahedralMeshTest.scala | Scala | apache-2.0 | 9,741 |
/*
* Copyright (c) 2015-2017 Toby Weston
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package s4j.scala.chapter03
object Variables {
val immutable: String = "Scala" // immutable can not be reassigned
var language: String = "Java"
language = "Scala" // var can be reassigned
val age = 35
var maxHeartRate = 210 - age * .5 // maxHeartRate: Double = 191.5
}
| tobyweston/learn-scala-java-devs | src/main/scala/s4j/scala/chapter03/Variables.scala | Scala | apache-2.0 | 912 |
/*
* Copyright (c) 2013-2014 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.collectors
package scalastream
package sinks
// Java
import java.nio.ByteBuffer
// Amazon
import com.amazonaws.AmazonServiceException
import com.amazonaws.auth.{
BasicAWSCredentials,
ClasspathPropertiesFileCredentialsProvider
}
// Scalazon (for Kinesis interaction)
import io.github.cloudify.scala.aws.kinesis.Client
import io.github.cloudify.scala.aws.kinesis.Client.ImplicitExecution._
import io.github.cloudify.scala.aws.kinesis.Definitions.{
Stream,
PutResult,
Record
}
import io.github.cloudify.scala.aws.kinesis.KinesisDsl._
import io.github.cloudify.scala.aws.auth.CredentialsProvider.InstanceProfile
// Config
import com.typesafe.config.Config
// Concurrent libraries
import scala.concurrent.{Future,Await,TimeoutException}
import scala.concurrent.duration._
// Logging
import org.slf4j.LoggerFactory
// Mutable data structures
import scala.collection.mutable.StringBuilder
import scala.collection.mutable.MutableList
import scala.util.{Success, Failure}
// Snowplow
import scalastream._
import thrift.SnowplowRawEvent
/**
* Kinesis Sink for the Scala collector.
*/
class KinesisSink(config: CollectorConfig) extends AbstractSink {
private lazy val log = LoggerFactory.getLogger(getClass())
import log.{error, debug, info, trace}
implicit lazy val ec = {
info("Creating thread pool of size " + config.threadpoolSize)
val executorService = java.util.concurrent.Executors.newFixedThreadPool(config.threadpoolSize)
concurrent.ExecutionContext.fromExecutorService(executorService)
}
// Create a Kinesis client for stream interactions.
private implicit val kinesis = createKinesisClient
// The output stream for enriched events.
private val enrichedStream = createAndLoadStream()
// Checks if a stream exists.
def streamExists(name: String, timeout: Int = 60): Boolean = {
val streamDescribeFuture = for {
s <- Kinesis.stream(name).describe
} yield s
val description =
Await.result(streamDescribeFuture, Duration(timeout, SECONDS))
if (description.isActive) {
info(s"Stream $name exists and is active")
return true
}
info(s"Stream $name doesn't exist or is not active")
false
}
// Creates a new stream if one doesn't exist.
def createAndLoadStream(timeout: Int = 60): Stream = {
val name = config.streamName
val size = config.streamSize
if (streamExists(name)) {
Kinesis.stream(name)
} else {
info(s"Creating stream $name of size $size")
val createStream = for {
s <- Kinesis.streams.create(name)
} yield s
try {
val stream = Await.result(createStream, Duration(timeout, SECONDS))
info(s"Successfully created stream $name. Waiting until it's active")
Await.result(stream.waitActive.retrying(timeout),
Duration(timeout, SECONDS))
info(s"Stream $name active")
stream
} catch {
case _: TimeoutException =>
throw new RuntimeException("Error: Timed out")
}
}
}
/**
* Creates a new Kinesis client from provided AWS access key and secret
* key. If both are set to "cpf", then authenticate using the classpath
* properties file.
*
* @return the initialized AmazonKinesisClient
*/
private def createKinesisClient: Client = {
val accessKey = config.awsAccessKey
val secretKey = config.awsSecretKey
if (isCpf(accessKey) && isCpf(secretKey)) {
Client.fromCredentials(new ClasspathPropertiesFileCredentialsProvider())
} else if (isCpf(accessKey) || isCpf(secretKey)) {
throw new RuntimeException("access-key and secret-key must both be set to 'cpf', or neither of them")
} else if (isIam(accessKey) && isIam(secretKey)) {
Client.fromCredentials(InstanceProfile)
} else if (isIam(accessKey) || isIam(secretKey)) {
throw new RuntimeException("access-key and secret-key must both be set to 'iam', or neither of them")
} else {
Client.fromCredentials(accessKey, secretKey)
}
}
def storeRawEvent(event: SnowplowRawEvent, key: String) = {
info(s"Writing Thrift record to Kinesis: ${event.toString}")
val putData = for {
p <- enrichedStream.put(
ByteBuffer.wrap(serializeEvent(event)),
key
)
} yield p
putData onComplete {
case Success(result) => {
info(s"Writing successful.")
info(s" + ShardId: ${result.shardId}")
info(s" + SequenceNumber: ${result.sequenceNumber}")
}
case Failure(f) => {
error(s"Writing failed.")
error(s" + " + f.getMessage)
}
}
null
}
/**
* Is the access/secret key set to the special value "cpf" i.e. use
* the classpath properties file for credentials.
*
* @param key The key to check
* @return true if key is cpf, false otherwise
*/
private def isCpf(key: String): Boolean = (key == "cpf")
/**
* Is the access/secret key set to the special value "iam" i.e. use
* the IAM role to get credentials.
*
* @param key The key to check
* @return true if key is iam, false otherwise
*/
private def isIam(key: String): Boolean = (key == "iam")
}
| pkallos/snowplow | 2-collectors/scala-stream-collector/src/main/scala/com.snowplowanalytics.snowplow.collectors.scalastream/sinks/KinesisSink.scala | Scala | apache-2.0 | 5,911 |
package dotty.tools
package dotc
import core._
import Contexts._
import Periods._
import Symbols._
import Scopes._
import Names.Name
import Denotations.Denotation
import typer.Typer
import typer.ImportInfo.withRootImports
import Decorators._
import io.{AbstractFile, VirtualFile}
import Phases.unfusedPhases
import util._
import reporting.{Suppression, Action}
import reporting.Diagnostic
import reporting.Diagnostic.Warning
import rewrites.Rewrites
import profile.Profiler
import printing.XprintMode
import typer.ImplicitRunInfo
import config.Feature
import StdNames.nme
import java.io.{BufferedWriter, OutputStreamWriter}
import java.nio.charset.StandardCharsets
import scala.collection.mutable
import scala.util.control.NonFatal
import scala.io.Codec
/** A compiler run. Exports various methods to compile source files */
class Run(comp: Compiler, ictx: Context) extends ImplicitRunInfo with ConstraintRunInfo {
/** Default timeout to stop looking for further implicit suggestions, in ms.
* This is usually for the first import suggestion; subsequent suggestions
* may get smaller timeouts. @see ImportSuggestions.reduceTimeBudget
*/
private var myImportSuggestionBudget: Int =
Int.MinValue // sentinel value; means whatever is set in command line option
def importSuggestionBudget =
if myImportSuggestionBudget == Int.MinValue then ictx.settings.XimportSuggestionTimeout.value
else myImportSuggestionBudget
def importSuggestionBudget_=(x: Int) =
myImportSuggestionBudget = x
/** If this variable is set to `true`, some core typer operations will
* return immediately. Currently these early abort operations are
* `Typer.typed` and `Implicits.typedImplicit`.
*/
@volatile var isCancelled = false
private var compiling = false
private var myUnits: List[CompilationUnit] = Nil
private var myUnitsCached: List[CompilationUnit] = Nil
private var myFiles: Set[AbstractFile] = _
// `@nowarn` annotations by source file, populated during typer
private val mySuppressions: mutable.LinkedHashMap[SourceFile, mutable.ListBuffer[Suppression]] = mutable.LinkedHashMap.empty
// source files whose `@nowarn` annotations are processed
private val mySuppressionsComplete: mutable.Set[SourceFile] = mutable.Set.empty
// warnings issued before a source file's `@nowarn` annotations are processed, suspended so that `@nowarn` can filter them
private val mySuspendedMessages: mutable.LinkedHashMap[SourceFile, mutable.LinkedHashSet[Warning]] = mutable.LinkedHashMap.empty
object suppressions:
// When the REPL creates a new run (ReplDriver.compile), parsing is already done in the old context, with the
// previous Run. Parser warnings were suspended in the old run and need to be copied over so they are not lost.
// Same as scala/scala/commit/79ca1408c7.
def initSuspendedMessages(oldRun: Run | Null) = if oldRun != null then
mySuspendedMessages.clear()
mySuspendedMessages ++= oldRun.mySuspendedMessages
def suppressionsComplete(source: SourceFile) = source == NoSource || mySuppressionsComplete(source)
def addSuspendedMessage(warning: Warning) =
mySuspendedMessages.getOrElseUpdate(warning.pos.source, mutable.LinkedHashSet.empty) += warning
def nowarnAction(dia: Diagnostic): Action.Warning.type | Action.Verbose.type | Action.Silent.type =
mySuppressions.getOrElse(dia.pos.source, Nil).find(_.matches(dia)) match {
case Some(s) =>
s.markUsed()
if (s.verbose) Action.Verbose
else Action.Silent
case _ =>
Action.Warning
}
def addSuppression(sup: Suppression): Unit =
val source = sup.annotPos.source
mySuppressions.getOrElseUpdate(source, mutable.ListBuffer.empty) += sup
def reportSuspendedMessages(source: SourceFile)(using Context): Unit = {
// sort suppressions. they are not added in any particular order because of lazy type completion
for (sups <- mySuppressions.get(source))
mySuppressions(source) = sups.sortBy(sup => 0 - sup.start)
mySuppressionsComplete += source
mySuspendedMessages.remove(source).foreach(_.foreach(ctx.reporter.issueIfNotSuppressed))
}
def runFinished(hasErrors: Boolean): Unit =
// report suspended messages (in case the run finished before typer)
mySuspendedMessages.keysIterator.toList.foreach(reportSuspendedMessages)
// report unused nowarns only if all all phases are done
if !hasErrors && ctx.settings.WunusedHas.nowarn then
for {
source <- mySuppressions.keysIterator.toList
sups <- mySuppressions.remove(source)
sup <- sups.reverse
} if (!sup.used)
report.warning("@nowarn annotation does not suppress any warnings", sup.annotPos)
/** The compilation units currently being compiled, this may return different
* results over time.
*/
def units: List[CompilationUnit] = myUnits
private def units_=(us: List[CompilationUnit]): Unit =
myUnits = us
var suspendedUnits: mutable.ListBuffer[CompilationUnit] = mutable.ListBuffer()
def checkSuspendedUnits(newUnits: List[CompilationUnit])(using Context): Unit =
if newUnits.isEmpty && suspendedUnits.nonEmpty && !ctx.reporter.errorsReported then
val where =
if suspendedUnits.size == 1 then i"in ${suspendedUnits.head}."
else i"""among
|
| ${suspendedUnits.toList}%, %
|"""
val enableXprintSuspensionHint =
if ctx.settings.XprintSuspension.value then ""
else "\n\nCompiling with -Xprint-suspension gives more information."
report.error(em"""Cyclic macro dependencies $where
|Compilation stopped since no further progress can be made.
|
|To fix this, place macros in one set of files and their callers in another.$enableXprintSuspensionHint""")
/** The files currently being compiled (active or suspended).
* This may return different results over time.
* These files do not have to be source files since it's possible to compile
* from TASTY.
*/
def files: Set[AbstractFile] = {
if (myUnits ne myUnitsCached) {
myUnitsCached = myUnits
myFiles = (myUnits ++ suspendedUnits).map(_.source.file).toSet
}
myFiles
}
/** The source files of all late entered symbols, as a set */
private var lateFiles = mutable.Set[AbstractFile]()
/** A cache for static references to packages and classes */
val staticRefs = util.EqHashMap[Name, Denotation](initialCapacity = 1024)
/** Actions that need to be performed at the end of the current compilation run */
private var finalizeActions = mutable.ListBuffer[() => Unit]()
def compile(files: List[AbstractFile]): Unit =
try
val sources = files.map(runContext.getSource(_))
compileSources(sources)
catch
case NonFatal(ex) =>
if units.nonEmpty then report.echo(i"exception occurred while compiling $units%, %")
else report.echo(s"exception occurred while compiling ${files.map(_.name).mkString(", ")}")
throw ex
/** TODO: There's a fundamental design problem here: We assemble phases using `fusePhases`
* when we first build the compiler. But we modify them with -Yskip, -Ystop
* on each run. That modification needs to either transform the tree structure,
* or we need to assemble phases on each run, and take -Yskip, -Ystop into
* account. I think the latter would be preferable.
*/
def compileSources(sources: List[SourceFile]): Unit =
if (sources forall (_.exists)) {
units = sources.map(CompilationUnit(_))
compileUnits()
}
def compileUnits(us: List[CompilationUnit]): Unit = {
units = us
compileUnits()
}
def compileUnits(us: List[CompilationUnit], ctx: Context): Unit = {
units = us
compileUnits()(using ctx)
}
private def compileUnits()(using Context) = Stats.maybeMonitored {
if (!ctx.mode.is(Mode.Interactive)) // IDEs might have multi-threaded access, accesses are synchronized
ctx.base.checkSingleThreaded()
compiling = true
// If testing pickler, make sure to stop after pickling phase:
val stopAfter =
if (ctx.settings.YtestPickler.value) List("pickler")
else ctx.settings.YstopAfter.value
val pluginPlan = ctx.base.addPluginPhases(ctx.base.phasePlan)
val phases = ctx.base.fusePhases(pluginPlan,
ctx.settings.Yskip.value, ctx.settings.YstopBefore.value, stopAfter, ctx.settings.Ycheck.value)
ctx.base.usePhases(phases)
def runPhases(using Context) = {
var lastPrintedTree: PrintedTree = NoPrintedTree
val profiler = ctx.profiler
for (phase <- ctx.base.allPhases)
if (phase.isRunnable)
Stats.trackTime(s"$phase ms ") {
val start = System.currentTimeMillis
val profileBefore = profiler.beforePhase(phase)
units = phase.runOn(units)
profiler.afterPhase(phase, profileBefore)
if (ctx.settings.Xprint.value.containsPhase(phase))
for (unit <- units)
lastPrintedTree =
printTree(lastPrintedTree)(using ctx.fresh.setPhase(phase.next).setCompilationUnit(unit))
report.informTime(s"$phase ", start)
Stats.record(s"total trees at end of $phase", ast.Trees.ntrees)
for (unit <- units)
Stats.record(s"retained typed trees at end of $phase", unit.tpdTree.treeSize)
ctx.typerState.gc()
}
profiler.finished()
}
val runCtx = ctx.fresh
runCtx.setProfiler(Profiler())
unfusedPhases.foreach(_.initContext(runCtx))
runPhases(using runCtx)
if (!ctx.reporter.hasErrors)
Rewrites.writeBack()
suppressions.runFinished(hasErrors = ctx.reporter.hasErrors)
while (finalizeActions.nonEmpty) {
val action = finalizeActions.remove(0)
action()
}
compiling = false
}
/** Enter top-level definitions of classes and objects contained in source file `file`.
* The newly added symbols replace any previously entered symbols.
* If `typeCheck = true`, also run typer on the compilation unit, and set
* `rootTreeOrProvider`.
*/
def lateCompile(file: AbstractFile, typeCheck: Boolean)(using Context): Unit =
if (!files.contains(file) && !lateFiles.contains(file)) {
lateFiles += file
val unit = CompilationUnit(ctx.getSource(file))
val unitCtx = runContext.fresh
.setCompilationUnit(unit)
.withRootImports
def process()(using Context) =
ctx.typer.lateEnterUnit(doTypeCheck =>
if typeCheck then
if compiling then finalizeActions += doTypeCheck
else doTypeCheck()
)
process()(using unitCtx)
}
private sealed trait PrintedTree
private /*final*/ case class SomePrintedTree(phase: String, tree: String) extends PrintedTree
private object NoPrintedTree extends PrintedTree
private def printTree(last: PrintedTree)(using Context): PrintedTree = {
val unit = ctx.compilationUnit
val prevPhase = ctx.phase.prev // can be a mini-phase
val fusedPhase = ctx.base.fusedContaining(prevPhase)
val echoHeader = f"[[syntax trees at end of $fusedPhase%25s]] // ${unit.source}"
val tree = if ctx.isAfterTyper then unit.tpdTree else unit.untpdTree
val treeString = tree.show(using ctx.withProperty(XprintMode, Some(())))
last match {
case SomePrintedTree(phase, lastTreeString) if lastTreeString == treeString =>
report.echo(s"$echoHeader: unchanged since $phase")
last
case SomePrintedTree(phase, lastTreeString) if ctx.settings.XprintDiff.value || ctx.settings.XprintDiffDel.value =>
val diff = DiffUtil.mkColoredCodeDiff(treeString, lastTreeString, ctx.settings.XprintDiffDel.value)
report.echo(s"$echoHeader\n$diff\n")
SomePrintedTree(fusedPhase.phaseName, treeString)
case _ =>
report.echo(s"$echoHeader\n$treeString\n")
SomePrintedTree(fusedPhase.phaseName, treeString)
}
}
def compileFromStrings(scalaSources: List[String], javaSources: List[String] = Nil): Unit = {
def sourceFile(source: String, isJava: Boolean): SourceFile = {
val uuid = java.util.UUID.randomUUID().toString
val ext = if (isJava) ".java" else ".scala"
val virtualFile = new VirtualFile(s"compileFromString-$uuid.$ext")
val writer = new BufferedWriter(new OutputStreamWriter(virtualFile.output, StandardCharsets.UTF_8.nn.name)) // buffering is still advised by javadoc
writer.write(source)
writer.close()
new SourceFile(virtualFile, Codec.UTF8)
}
val sources =
scalaSources.map(sourceFile(_, isJava = false)) ++
javaSources.map(sourceFile(_, isJava = true))
compileSources(sources)
}
/** Print summary of warnings and errors encountered */
def printSummary(): Unit = {
printMaxConstraint()
val r = runContext.reporter
r.summarizeUnreportedWarnings
r.printSummary
}
override def reset(): Unit = {
super[ImplicitRunInfo].reset()
super[ConstraintRunInfo].reset()
myCtx = null
myUnits = Nil
myUnitsCached = Nil
}
/** Produces the following contexts, from outermost to innermost
*
* bootStrap: A context with next available runId and a scope consisting of
* the RootPackage _root_
* start A context with RootClass as owner and the necessary initializations
* for type checking.
* imports For each element of RootImports, an import context
*/
protected def rootContext(using Context): Context = {
ctx.initialize()
ctx.base.setPhasePlan(comp.phases)
val rootScope = new MutableScope(0)
val bootstrap = ctx.fresh
.setPeriod(Period(comp.nextRunId, FirstPhaseId))
.setScope(rootScope)
rootScope.enter(ctx.definitions.RootPackage)(using bootstrap)
var start = bootstrap.fresh
.setOwner(defn.RootClass)
.setTyper(new Typer)
.addMode(Mode.ImplicitsEnabled)
.setTyperState(ctx.typerState.fresh(ctx.reporter))
if ctx.settings.YexplicitNulls.value && !Feature.enabledBySetting(nme.unsafeNulls) then
start = start.addMode(Mode.SafeNulls)
ctx.initialize()(using start) // re-initialize the base context with start
// `this` must be unchecked for safe initialization because by being passed to setRun during
// initialization, it is not yet considered fully initialized by the initialization checker
start.setRun(this: @unchecked)
}
private var myCtx: Context | Null = rootContext(using ictx)
/** The context created for this run */
given runContext[Dummy_so_its_a_def]: Context = myCtx.nn
assert(runContext.runId <= Periods.MaxPossibleRunId)
}
| dotty-staging/dotty | compiler/src/dotty/tools/dotc/Run.scala | Scala | apache-2.0 | 14,883 |
package org.clulab.dynet
import java.io.PrintWriter
import edu.cmu.dynet.{Expression, ExpressionVector, ParameterCollection}
import org.clulab.struct.Counter
import org.clulab.utils.Configured
import org.clulab.dynet.Utils._
import org.clulab.fatdynet.utils.Synchronizer
import scala.collection.mutable.ArrayBuffer
/**
* A sequence of layers that implements a complete NN architecture for sequence modeling
*/
class Layers (val initialLayer: Option[InitialLayer],
val intermediateLayers: IndexedSeq[IntermediateLayer],
val finalLayer: Option[FinalLayer]) extends Saveable {
def outDim: Option[Int] = {
if(finalLayer.nonEmpty) {
return Some(finalLayer.get.outDim)
}
if(intermediateLayers.nonEmpty) {
return Some(intermediateLayers.last.outDim)
}
if(initialLayer.nonEmpty) {
return Some(initialLayer.get.outDim)
}
None
}
override def toString: String = {
val sb = new StringBuilder
var started = false
if(initialLayer.nonEmpty) {
sb.append("initial = " + initialLayer.get)
started = true
}
for(i <- intermediateLayers.indices) {
if(started) sb.append(" ")
sb.append(s"intermediate (${i + 1}) = " + intermediateLayers(i))
started = true
}
if(finalLayer.nonEmpty) {
if(started) sb.append(" ")
sb.append("final = " + finalLayer.get)
}
sb.toString()
}
def isEmpty: Boolean = initialLayer.isEmpty && intermediateLayers.isEmpty && finalLayer.isEmpty
def nonEmpty: Boolean = ! isEmpty
protected def forward(sentence: AnnotatedSentence,
constEmbeddings: ConstEmbeddingParameters,
doDropout: Boolean): ExpressionVector = {
if(initialLayer.isEmpty) {
throw new RuntimeException(s"ERROR: you can't call forward() on a Layers object that does not have an initial layer: $toString!")
}
var states = initialLayer.get.forward(sentence, constEmbeddings, doDropout)
for (i <- intermediateLayers.indices) {
states = intermediateLayers(i).forward(states, doDropout)
}
if(finalLayer.nonEmpty) {
states = finalLayer.get.forward(states, sentence.headPositions, doDropout)
}
states
}
protected def forwardFrom(inStates: ExpressionVector,
headPositions: Option[IndexedSeq[Int]],
doDropout: Boolean): ExpressionVector = {
if(initialLayer.nonEmpty) {
throw new RuntimeException(s"ERROR: you can't call forwardFrom() on a Layers object that has an initial layer: $toString!")
}
var states = inStates
for (i <- intermediateLayers.indices) {
states = intermediateLayers(i).forward(states, doDropout)
}
if(finalLayer.nonEmpty) {
states = finalLayer.get.forward(states, headPositions, doDropout)
}
states
}
override def saveX2i(printWriter: PrintWriter): Unit = {
if(initialLayer.nonEmpty) {
save(printWriter, 1, "hasInitial")
initialLayer.get.saveX2i(printWriter)
} else {
save(printWriter, 0, "hasInitial")
}
save(printWriter, intermediateLayers.length, "intermediateCount")
for(il <- intermediateLayers) {
il.saveX2i(printWriter)
}
if(finalLayer.nonEmpty) {
save(printWriter, 1, "hasFinal")
finalLayer.get.saveX2i(printWriter)
} else {
save(printWriter, 0, "hasFinal")
}
}
}
object Layers {
def apply(config: Configured,
paramPrefix: String,
parameters: ParameterCollection,
wordCounter: Counter[String],
labelCounterOpt: Option[Counter[String]],
isDual: Boolean,
providedInputSize: Option[Int]): Layers = {
val initialLayer = EmbeddingLayer.initialize(config, paramPrefix + ".initial", parameters, wordCounter)
var inputSize =
if(initialLayer.nonEmpty) {
Some(initialLayer.get.outDim)
} else if(providedInputSize.nonEmpty) {
providedInputSize
} else {
None
}
val intermediateLayers = new ArrayBuffer[IntermediateLayer]()
var done = false
for(i <- 1 to MAX_INTERMEDIATE_LAYERS if ! done) {
if(inputSize.isEmpty) {
throw new RuntimeException("ERROR: trying to construct an intermediate layer without a known input size!")
}
val intermediateLayer = RnnLayer.initialize(config, paramPrefix + s".intermediate$i", parameters, inputSize.get)
if(intermediateLayer.nonEmpty) {
intermediateLayers += intermediateLayer.get
inputSize = Some(intermediateLayer.get.outDim)
} else {
done = true
}
}
val finalLayer =
if(labelCounterOpt.nonEmpty) {
if(inputSize.isEmpty) {
throw new RuntimeException("ERROR: trying to construct a final layer without a known input size!")
}
ForwardLayer.initialize(config, paramPrefix + ".final", parameters,
labelCounterOpt.get, isDual, inputSize.get)
} else {
None
}
new Layers(initialLayer, intermediateLayers, finalLayer)
}
val MAX_INTERMEDIATE_LAYERS = 10
def loadX2i(parameters: ParameterCollection, lines: BufferedIterator[String]): Layers = {
val byLineIntBuilder = new ByLineIntBuilder()
val hasInitial = byLineIntBuilder.build(lines, "hasInitial")
val initialLayer =
if(hasInitial == 1) {
val layer = EmbeddingLayer.load(parameters, lines)
//println("loaded initial layer!")
Some(layer)
} else {
None
}
val intermediateLayers = new ArrayBuffer[IntermediateLayer]()
val intermCount = byLineIntBuilder.build(lines, "intermediateCount")
for(_ <- 0 until intermCount) {
val il = RnnLayer.load(parameters, lines)
//println("loaded one intermediate layer!")
intermediateLayers += il
}
val hasFinal = byLineIntBuilder.build(lines, "hasFinal")
val finalLayer =
if(hasFinal == 1) {
val layer = ForwardLayer.load(parameters, lines)
//println("loaded final layer!")
Some(layer)
} else {
None
}
new Layers(initialLayer, intermediateLayers, finalLayer)
}
def predictJointly(layers: IndexedSeq[Layers],
sentence: AnnotatedSentence,
constEmbeddings: ConstEmbeddingParameters): IndexedSeq[IndexedSeq[String]] = {
val labelsPerTask = new ArrayBuffer[IndexedSeq[String]]()
// DyNet's computation graph is a static variable, so this block must be synchronized
Synchronizer.withComputationGraph("Layers.predictJointly()") {
// layers(0) contains the shared layers
if (layers(0).nonEmpty) {
val sharedStates = layers(0).forward(sentence, constEmbeddings, doDropout = false)
for (i <- 1 until layers.length) {
val states = layers(i).forwardFrom(sharedStates, sentence.headPositions, doDropout = false)
val emissionScores: Array[Array[Float]] = Utils.emissionScoresToArrays(states)
val labels = layers(i).finalLayer.get.inference(emissionScores)
labelsPerTask += labels
}
}
// no shared layer
else {
for (i <- 1 until layers.length) {
val states = layers(i).forward(sentence, constEmbeddings, doDropout = false)
val emissionScores: Array[Array[Float]] = Utils.emissionScoresToArrays(states)
val labels = layers(i).finalLayer.get.inference(emissionScores)
labelsPerTask += labels
}
}
}
labelsPerTask
}
private def forwardForTask(layers: IndexedSeq[Layers],
taskId: Int,
sentence: AnnotatedSentence,
constEmbeddings: ConstEmbeddingParameters,
doDropout: Boolean): ExpressionVector = {
//
// make sure this code is:
// (a) called inside a synchronized block, and
// (b) called after the computational graph is renewed (see predict below for correct usage)
//
val states = {
// layers(0) contains the shared layers
if (layers(0).nonEmpty) {
val sharedStates = layers(0).forward(sentence, constEmbeddings, doDropout)
layers(taskId + 1).forwardFrom(sharedStates, sentence.headPositions, doDropout)
}
// no shared layer
else {
layers(taskId + 1).forward(sentence, constEmbeddings, doDropout)
}
}
states
}
def predict(layers: IndexedSeq[Layers],
taskId: Int,
sentence: AnnotatedSentence,
constEmbeddings: ConstEmbeddingParameters): IndexedSeq[String] = {
val labelsForTask =
// DyNet's computation graph is a static variable, so this block must be synchronized.
Synchronizer.withComputationGraph("Layers.predict()") {
val states = forwardForTask(layers, taskId, sentence, constEmbeddings, doDropout = false)
val emissionScores: Array[Array[Float]] = Utils.emissionScoresToArrays(states)
val out = layers(taskId + 1).finalLayer.get.inference(emissionScores)
out
}
labelsForTask
}
def predictWithScores(layers: IndexedSeq[Layers],
taskId: Int,
sentence: AnnotatedSentence,
constEmbeddings: ConstEmbeddingParameters): IndexedSeq[IndexedSeq[(String, Float)]] = {
val labelsForTask =
// DyNet's computation graph is a static variable, so this block must be synchronized
Synchronizer.withComputationGraph("Layers.predictWithScores()") {
val states = forwardForTask(layers, taskId, sentence, constEmbeddings, doDropout = false)
val emissionScores: Array[Array[Float]] = Utils.emissionScoresToArrays(states)
val out = layers(taskId + 1).finalLayer.get.inferenceWithScores(emissionScores)
out
}
labelsForTask
}
def parse(layers: IndexedSeq[Layers],
sentence: AnnotatedSentence,
constEmbeddings: ConstEmbeddingParameters): IndexedSeq[(Int, String)] = {
val headsAndLabels =
// DyNet's computation graph is a static variable, so this block must be synchronized
Synchronizer.withComputationGraph("Layers.parse()") {
//
// first get the output of the layers that are shared between the two tasks
//
assert(layers(0).nonEmpty)
val sharedStates = layers(0).forward(sentence, constEmbeddings, doDropout = false)
//
// now predict the heads (first task)
//
val headStates = layers(1).forwardFrom(sharedStates, None, doDropout = false)
val headEmissionScores: Array[Array[Float]] = Utils.emissionScoresToArrays(headStates)
val headScores = layers(1).finalLayer.get.inferenceWithScores(headEmissionScores)
// store the head values here
val heads = new ArrayBuffer[Int]()
for(wi <- headScores.indices) {
val predictionsForThisWord = headScores(wi)
// pick the prediction with the highest score, which is within the boundaries of the current sentence
var done = false
for(hi <- predictionsForThisWord.indices if ! done) {
try {
val relativeHead = predictionsForThisWord(hi)._1.toInt
if (relativeHead == 0) { // this is the root
heads += -1
done = true
} else {
val headPosition = wi + relativeHead
if (headPosition >= 0 && headPosition < sentence.size) {
heads += headPosition
done = true
}
}
} catch {
// some valid predictions may not be integers, e.g., "<STOP>" may be predicted by the sequence model
case e: NumberFormatException => done = false
}
}
if(! done) {
// we should not be here, but let's be safe
// if nothing good was found, assume root
heads += -1
}
}
//
// next, predict the labels using the predicted heads
//
val labelStates = layers(2).forwardFrom(sharedStates, Some(heads), doDropout = false)
val emissionScores: Array[Array[Float]] = Utils.emissionScoresToArrays(labelStates)
val labels = layers(2).finalLayer.get.inference(emissionScores)
assert(labels.size == heads.size)
heads.zip(labels)
}
headsAndLabels
}
def loss(layers: IndexedSeq[Layers],
taskId: Int,
sentence: AnnotatedSentence,
goldLabels: IndexedSeq[String]): Expression = {
val constEmbeddings = ConstEmbeddingsGlove.mkConstLookupParams(sentence.words)
val states = forwardForTask(layers, taskId, sentence, constEmbeddings, doDropout = true) // use dropout during training!
layers(taskId + 1).finalLayer.get.loss(states, goldLabels)
}
}
| sistanlp/processors | main/src/main/scala/org/clulab/dynet/Layers.scala | Scala | apache-2.0 | 12,949 |
package first.actors
import akka.actor.{ActorLogging, Actor, ActorRef, Props}
/**
* Created by szekai on 20/08/2014.
*/
//case class Word(word:String, count:Int)
case class Result()
case class MapData(dataList: List[(String, Int)])
case class ReduceData(reduceDataMap: Map[String,Int])
class MasterActor extends Actor with ActorLogging{
override def preStart() {
log.info("Starting MasterActor instance hashcode # {}", this.hashCode())
}
override def postStop() {
log.info("Stopping MasterActor instance hashcode # {}", this.hashCode())
}
val aggregateActor: ActorRef = context.actorOf(Props[AggregateActor], name = "aggregate")
val reduceActor: ActorRef = context.actorOf(Props(classOf[ReduceActor],aggregateActor), name = "reduce")
val mapActor: ActorRef = context.actorOf(Props(classOf[MapActor],reduceActor), name = "map")
def receive: Receive = {
case message: String => mapActor ! message
case message: Result => aggregateActor ! message
}
}
| szekai/akka-example | RemoteMapRed/src/main/scala/first.actors/MasterActor.scala | Scala | apache-2.0 | 992 |
/**
* Copyright 2015 Otto (GmbH & Co KG)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.schedoscope.scheduler.states
import java.util.Date
import org.schedoscope.Schedoscope
import org.schedoscope.dsl.View
import org.schedoscope.dsl.transformations.Checksum.defaultDigest
import org.schedoscope.scheduler.messages.MaterializeViewMode._
class ViewSchedulingStateMachineImpl extends ViewSchedulingStateMachine {
def toWaitingTransformingOrMaterialize(view: View, lastTransformationChecksum: String, lastTransformationTimestamp: Long, listener: PartyInterestedInViewSchedulingStateChange, materializationMode: MaterializeViewMode, currentTime: Long) = {
if (materializationMode == SET_ONLY)
ResultingViewSchedulingState(
Materialized(
view,
view.transformation().checksum,
currentTime,
false,
false), {
if (lastTransformationChecksum != view.transformation().checksum)
Set(WriteTransformationCheckum(view))
else
Set()
} ++ Set(
WriteTransformationTimestamp(view, currentTime),
TouchSuccessFlag(view),
ReportMaterialized(
view,
Set(listener),
currentTime,
false,
false)))
else if (view.dependencies.isEmpty || materializationMode == TRANSFORM_ONLY)
leaveWaitingState(
Waiting(view,
lastTransformationChecksum,
lastTransformationTimestamp,
Set(),
Set(listener),
materializationMode,
oneDependencyReturnedData = true,
withErrors = false,
incomplete = false,
1l),
setIncomplete = false,
setError = false,
currentTime)
else {
val dependencies = view.dependencies.toSet
ResultingViewSchedulingState(
Waiting(view,
lastTransformationChecksum,
lastTransformationTimestamp,
dependenciesMaterializing = dependencies,
listenersWaitingForMaterialize = Set(listener),
materializationMode = materializationMode,
oneDependencyReturnedData = false,
withErrors = false,
incomplete = false,
dependenciesFreshness = 0l),
dependencies.map {
Materialize(_, materializationMode)
}.toSet)
}
}
def leaveWaitingState(currentState: Waiting, setIncomplete: Boolean, setError: Boolean, currentTime: Long) = currentState match {
case Waiting(
view,
lastTransformationChecksum,
lastTransformationTimestamp,
dependenciesMaterializing,
listenersWaitingForMaterialize,
materializationMode,
oneDependencyReturnedData,
withErrors,
incomplete,
dependenciesFreshness) =>
if (view.isMaterializeOnce && lastTransformationTimestamp > 0)
ResultingViewSchedulingState(
Materialized(
view,
lastTransformationChecksum,
lastTransformationTimestamp,
withErrors = withErrors,
incomplete = incomplete),
Set(
ReportMaterialized(
view,
listenersWaitingForMaterialize,
lastTransformationTimestamp,
withErrors = withErrors,
incomplete = incomplete)) ++ {
if (materializationMode == RESET_TRANSFORMATION_CHECKSUMS)
Set(WriteTransformationCheckum(view))
else
Set()
})
else if (oneDependencyReturnedData) {
if (lastTransformationTimestamp < dependenciesFreshness || (materializationMode != RESET_TRANSFORMATION_CHECKSUMS && lastTransformationChecksum != view.transformation().checksum)) {
if (materializationMode == RESET_TRANSFORMATION_CHECKSUMS_AND_TIMESTAMPS)
ResultingViewSchedulingState(
Materialized(
view,
view.transformation().checksum,
currentTime,
withErrors = withErrors | setError,
incomplete = incomplete | setIncomplete), {
if (lastTransformationChecksum != view.transformation().checksum)
Set(WriteTransformationCheckum(view))
else
Set()
} ++ Set(
WriteTransformationTimestamp(view, currentTime),
ReportMaterialized(
view,
listenersWaitingForMaterialize,
currentTime,
withErrors = withErrors | setError,
incomplete = incomplete | setIncomplete)))
else
ResultingViewSchedulingState(
Transforming(
view,
lastTransformationChecksum,
listenersWaitingForMaterialize,
materializationMode,
withErrors = withErrors,
incomplete = incomplete,
0),
Set(Transform(view)))
}
else
ResultingViewSchedulingState(
Materialized(
view,
lastTransformationChecksum,
lastTransformationTimestamp,
withErrors = withErrors | setError,
incomplete = incomplete | setIncomplete),
Set(
ReportMaterialized(
view,
listenersWaitingForMaterialize,
lastTransformationTimestamp,
withErrors = withErrors | setError,
incomplete = incomplete | setIncomplete)) ++ {
if (materializationMode == RESET_TRANSFORMATION_CHECKSUMS)
Set(WriteTransformationCheckum(view))
else
Set()
})
} else
ResultingViewSchedulingState(
NoData(view),
Set(
ReportNoDataAvailable(view, listenersWaitingForMaterialize)))
}
def materialize(
currentState: ViewSchedulingState,
listener: PartyInterestedInViewSchedulingStateChange,
materializationMode: MaterializeViewMode = DEFAULT,
currentTime: Long = new Date().getTime) = currentState match {
case CreatedByViewManager(view) =>
toWaitingTransformingOrMaterialize(view, defaultDigest, 0, listener, materializationMode, currentTime)
case ReadFromSchemaManager(view, lastTransformationChecksum, lastTransformationTimestamp) =>
toWaitingTransformingOrMaterialize(view, lastTransformationChecksum, lastTransformationTimestamp, listener, materializationMode, currentTime)
case NoData(view) =>
toWaitingTransformingOrMaterialize(view, defaultDigest, 0, listener, materializationMode, currentTime)
case Invalidated(view) =>
toWaitingTransformingOrMaterialize(view, defaultDigest, 0, listener, materializationMode, currentTime)
case Materialized(view, lastTransformationChecksum, lastTransformationTimestamp, _, _) =>
toWaitingTransformingOrMaterialize(view, lastTransformationChecksum, lastTransformationTimestamp, listener, materializationMode, currentTime)
case Failed(view) =>
ResultingViewSchedulingState(
Failed(view),
Set(ReportFailed(view, Set(listener))))
case Waiting(
view,
lastTransformationChecksum,
lastTransformationTimestamp,
dependenciesMaterializing,
listenersWaitingForMaterialize,
currentMaterializationMode,
oneDependencyReturnedData,
withErrors,
incomplete,
dependenciesFreshness) =>
ResultingViewSchedulingState(
Waiting(
view,
lastTransformationChecksum,
lastTransformationTimestamp,
dependenciesMaterializing,
listenersWaitingForMaterialize + listener,
currentMaterializationMode,
oneDependencyReturnedData,
withErrors = withErrors,
incomplete = incomplete,
dependenciesFreshness), Set())
case Retrying(
view,
lastTransformationChecksum,
listenersWaitingForMaterialize,
currentMaterializationMode,
withErrors,
incomplete,
retry) =>
ResultingViewSchedulingState(
Retrying(
view,
lastTransformationChecksum,
listenersWaitingForMaterialize + listener,
currentMaterializationMode,
withErrors = withErrors,
incomplete = incomplete,
retry), Set())
case Transforming(
view,
lastTransformationChecksum,
listenersWaitingForMaterialize,
currentMaterializationMode,
withErrors,
incomplete,
retry) =>
ResultingViewSchedulingState(
Transforming(
view,
lastTransformationChecksum,
listenersWaitingForMaterialize + listener,
currentMaterializationMode,
withErrors = withErrors,
incomplete = incomplete,
retry), Set())
}
def retry(currentState: Retrying) = currentState match {
case Retrying(
view,
lastTransformationChecksum,
listenersWaitingForMaterialize,
currentMaterializationMode,
withErrors,
incomplete,
retry) =>
ResultingViewSchedulingState(
Transforming(
view,
lastTransformationChecksum,
listenersWaitingForMaterialize,
currentMaterializationMode,
withErrors = withErrors,
incomplete = incomplete,
retry + 1),
Set(Transform(view)))
}
def invalidate(currentState: ViewSchedulingState, issuer: PartyInterestedInViewSchedulingStateChange) = currentState match {
case waiting: Waiting => ResultingViewSchedulingState(
waiting, Set(ReportNotInvalidated(waiting.view, Set(issuer))))
case transforming: Transforming => ResultingViewSchedulingState(
transforming, Set(ReportNotInvalidated(transforming.view, Set(issuer))))
case retrying: Retrying => ResultingViewSchedulingState(
retrying, Set(ReportNotInvalidated(retrying.view, Set(issuer))))
case _ => ResultingViewSchedulingState(
Invalidated(currentState.view),
Set(
ReportInvalidated(currentState.view, Set(issuer))))
}
def noDataAvailable(currentState: Waiting, reportingDependency: View, currentTime: Long = new Date().getTime) = currentState match {
case Waiting(
view,
lastTransformationChecksum,
lastTransformationTimestamp,
dependenciesMaterializing,
listenersWaitingForMaterialize,
materializationMode,
oneDependencyReturnedData,
withErrors,
incomplete,
dependenciesFreshness) =>
if (dependenciesMaterializing.size == 1 && dependenciesMaterializing.contains(reportingDependency))
leaveWaitingState(currentState, setIncomplete = true, setError = false, currentTime)
else
ResultingViewSchedulingState(
Waiting(
view,
lastTransformationChecksum,
lastTransformationTimestamp,
dependenciesMaterializing - reportingDependency,
listenersWaitingForMaterialize,
materializationMode,
oneDependencyReturnedData,
withErrors = withErrors,
incomplete = true,
dependenciesFreshness), Set())
}
def failed(currentState: Waiting, reportingDependency: View, currentTime: Long = new Date().getTime) = currentState match {
case Waiting(
view,
lastTransformationChecksum,
lastTransformationTimestamp,
dependenciesMaterializing,
listenersWaitingForMaterialize,
materializationMode,
oneDependencyReturnedData,
withErrors,
incomplete,
dependenciesFreshness) =>
if (dependenciesMaterializing.size == 1 && dependenciesMaterializing.contains(reportingDependency))
leaveWaitingState(currentState, setIncomplete = true, setError = true, currentTime)
else
ResultingViewSchedulingState(
Waiting(
view,
lastTransformationChecksum,
lastTransformationTimestamp,
dependenciesMaterializing - reportingDependency,
listenersWaitingForMaterialize,
materializationMode,
oneDependencyReturnedData,
withErrors = true,
incomplete = true,
dependenciesFreshness), Set())
}
def materialized(currentState: Waiting, reportingDependency: View, transformationTimestamp: Long, withErrors: Boolean, incomplete: Boolean, currentTime: Long = new Date().getTime) = currentState match {
case Waiting(
view,
lastTransformationChecksum,
lastTransformationTimestamp,
dependenciesMaterializing,
listenersWaitingForMaterialize,
materializationMode,
oneDependencyReturnedData,
currentWithErrors,
currentIncomplete,
dependenciesFreshness) =>
val updatedWaitingState = Waiting(
view,
lastTransformationChecksum,
lastTransformationTimestamp,
dependenciesMaterializing - reportingDependency,
listenersWaitingForMaterialize,
materializationMode,
oneDependencyReturnedData = true,
withErrors = withErrors || currentWithErrors,
incomplete = incomplete || currentIncomplete,
Math.max(dependenciesFreshness, transformationTimestamp))
if (dependenciesMaterializing.size == 1 && dependenciesMaterializing.contains(reportingDependency))
leaveWaitingState(updatedWaitingState, setIncomplete = false, setError = false, currentTime)
else
ResultingViewSchedulingState(updatedWaitingState, Set())
}
def transformationSucceeded(currentState: Transforming, folderEmpty: Boolean, currentTime: Long = new Date().getTime) = currentState match {
case Transforming(
view,
lastTransformationChecksum,
listenersWaitingForMaterialize,
materializationMode,
withErrors,
incomplete,
retry) =>
if (!folderEmpty)
ResultingViewSchedulingState(
Materialized(
view,
view.transformation().checksum,
currentTime,
withErrors = withErrors, //HERE!!
incomplete = incomplete), {
if (materializationMode == RESET_TRANSFORMATION_CHECKSUMS || lastTransformationChecksum != view.transformation().checksum)
Set(WriteTransformationCheckum(view))
else
Set()
} ++ Set(
WriteTransformationTimestamp(view, currentTime),
TouchSuccessFlag(view),
ReportMaterialized(
view,
listenersWaitingForMaterialize,
currentTime,
withErrors = withErrors,
incomplete = incomplete)))
else
ResultingViewSchedulingState(
NoData(view),
Set(
ReportNoDataAvailable(view, listenersWaitingForMaterialize)))
}
def transformationFailed(currentState: Transforming, maxRetries: Int = Schedoscope.settings.retries, currentTime: Long = new Date().getTime) = currentState match {
case Transforming(
view,
lastTransformationChecksum,
listenersWaitingForMaterialize,
materializationMode,
withErrors,
incomplete,
retry) =>
if (retry < maxRetries)
ResultingViewSchedulingState(
Retrying(
view,
lastTransformationChecksum,
listenersWaitingForMaterialize,
materializationMode,
withErrors = withErrors,
incomplete = incomplete,
retry + 1),
Set())
else
ResultingViewSchedulingState(
Failed(view),
Set(ReportFailed(view, listenersWaitingForMaterialize)))
}
} | christianrichter/schedoscope | schedoscope-core/src/main/scala/org/schedoscope/scheduler/states/ViewSchedulingStateMachineImpl.scala | Scala | apache-2.0 | 16,228 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package iht.views
import iht.views.html.sign_out
class SignOutViewTest extends ViewTestHelper {
lazy val signOutView: sign_out = app.injector.instanceOf[sign_out]
def view: String = signOutView()(createFakeRequest(), messages).toString
"Sign out view" must {
"have no message keys in html" in {
noMessageKeysShouldBePresent(view)
}
"have the correct title" in {
val doc = asDocument(view)
val headers = doc.getElementsByTag("h1")
headers.size mustBe 1
headers.get(0).text() mustBe messagesApi("iht.signedOut")
}
"have the correct message" in {
messagesShouldBePresent(view, messagesApi("page.iht.sign-out.p1"))
}
}
}
| hmrc/iht-frontend | test/iht/views/SignOutViewTest.scala | Scala | apache-2.0 | 1,295 |
package com.karumi.shot.base64
import java.io.{ByteArrayOutputStream, File}
import java.util.Base64
import javax.imageio.ImageIO
import org.apache.commons.io.Charsets
class Base64Encoder {
def base64FromFile(filePath: String): Option[String] = {
var outputStream: ByteArrayOutputStream = null
try {
val diffScreenshotFile = new File(filePath)
val bufferedImage = ImageIO.read(diffScreenshotFile)
outputStream = new ByteArrayOutputStream()
ImageIO.write(bufferedImage, "png", outputStream)
val diffImageBase64Encoded =
Base64.getEncoder.encode(outputStream.toByteArray)
val diffBase64UTF8 = new String(diffImageBase64Encoded, Charsets.UTF_8)
Some(diffBase64UTF8)
} catch {
case _: Exception => None
} finally {
if (outputStream != null) {
outputStream.close()
}
}
}
}
| Karumi/Shot | core/src/main/scala/com/karumi/shot/base64/Base64Encoder.scala | Scala | apache-2.0 | 876 |
/*
* Copyright 2016 Miroslav Janíček
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.sandius.rembulan.test.fragments
import net.sandius.rembulan.Table
import net.sandius.rembulan.runtime.LuaFunction
import net.sandius.rembulan.test.{FragmentBundle, FragmentExpectations, OneLiners}
object BasicLibFragments extends FragmentBundle with FragmentExpectations with OneLiners {
in (BasicContext) {
about ("type") {
program ("return type(nil)") succeedsWith "nil"
program ("return type(true)") succeedsWith "boolean"
program ("return type(false)") succeedsWith "boolean"
program ("return type(0)") succeedsWith "number"
program ("return type(0.1)") succeedsWith "number"
program ("return type(\\"\\")") succeedsWith "string"
program ("return type(\\"hello\\")") succeedsWith "string"
program ("return type(\\"2\\")") succeedsWith "string"
program ("return type(\\"0.2\\")") succeedsWith "string"
program ("return type(function() end)") succeedsWith "function"
program ("return type(type)") succeedsWith "function"
program ("return type({})") succeedsWith "table"
program ("return type()") failsWith "bad argument #1 to 'type' (value expected)"
}
about ("print") {
fragment ("print retrieves tostring once") {
"""local n = 0
|
|local tos = tostring
|local function cf(x)
| return function(y) return '['..x..'|'..tos(y)..']' end
|end
|setmetatable(_ENV, {__index=function(t,k) n = n + 1; return cf(n) end})
|
|tostring = nil
|
|print(nil, 10, "x")
|return n
"""
} in BasicContext succeedsWith(1)
}
about ("tostring") {
program ("return tostring(nil)") succeedsWith "nil"
program ("return tostring(true)") succeedsWith "true"
program ("return tostring(false)") succeedsWith "false"
program ("return tostring(0)") succeedsWith "0"
program ("return tostring(-0)") succeedsWith "0"
program ("return tostring(0.0)") succeedsWith "0.0"
program ("return tostring(-0.0)") succeedsWith "-0.0"
program ("return tostring(\\"\\")") succeedsWith ""
program ("return tostring(\\"1\\")") succeedsWith "1"
program ("return tostring(\\"1.00\\")") succeedsWith "1.00"
program ("return tostring(1 / 0)") succeedsWith "inf"
program ("return tostring(-1 / 0)") succeedsWith "-inf"
program ("return tostring(0 / 0)") succeedsWith "nan"
program ("return tostring(function() end)") succeedsWith (stringStartingWith("function: "))
program ("return tostring(tostring)") succeedsWith (stringStartingWith("function: "))
program ("return tostring({})") succeedsWith (stringStartingWith("table: "))
program ("return tostring()") failsWith "bad argument #1 to 'tostring' (value expected)"
}
about ("_VERSION") {
program ("return _VERSION") succeedsWith "Lua 5.3"
}
about ("tonumber") {
program ("return tonumber(nil)") succeedsWith null
program ("return tonumber(1)") succeedsWith 1
program ("return tonumber(0.3)") succeedsWith 0.3
program ("return tonumber(0)") succeedsWith 0
program ("return tonumber(0.0)") succeedsWith 0.0
program ("return tonumber(\\"x\\")") succeedsWith null
program ("return tonumber(\\"2\\")") succeedsWith 2
program ("return tonumber(\\"0.4\\")") succeedsWith 0.4
program ("return tonumber(\\"3.0\\")") succeedsWith 3.0
program ("return tonumber({})") succeedsWith null
program ("tonumber(1, \\"x\\")") failsWith "bad argument #2 to 'tonumber' (number expected, got string)"
program ("tonumber(\\"1\\", 1)") failsWith "bad argument #2 to 'tonumber' (base out of range)"
program ("tonumber(\\"1\\", 37)") failsWith "bad argument #2 to 'tonumber' (base out of range)"
program ("tonumber(1, 1)") failsWith "bad argument #1 to 'tonumber' (string expected, got number)"
program ("tonumber(nil, 10)") failsWith "bad argument #1 to 'tonumber' (string expected, got nil)"
program ("tonumber(nil, 1)") failsWith "bad argument #1 to 'tonumber' (string expected, got nil)"
program ("return tonumber(\\"-AbCd\\", 14)") succeedsWith -29777
program ("return tonumber(\\"+Hello\\", 36)") succeedsWith 29234652
program ("return tonumber(\\" spaces \\", 36)") succeedsWith 1735525972
program ("return tonumber(\\"spaces\\", 36)") succeedsWith 1735525972
program ("return tonumber(\\"A0\\", 10)") succeedsWith null
program ("return tonumber(\\"99\\", 9)") succeedsWith null
program ("return tonumber(\\"zzz\\", 36)") succeedsWith 46655
program ("return tonumber(1 / 0, 36)") failsWith "bad argument #1 to 'tonumber' (string expected, got number)"
program ("return tonumber(0 / 0, 36)") failsWith "bad argument #1 to 'tonumber' (string expected, got number)"
program ("return tonumber(0.2, 10)") failsWith "bad argument #1 to 'tonumber' (string expected, got number)"
program ("return tonumber()") failsWith "bad argument #1 to 'tonumber' (value expected)"
}
about ("getmetatable") {
program ("return getmetatable(nil)") succeedsWith null
program ("return getmetatable(true)") succeedsWith null
program ("return getmetatable(0)") succeedsWith null
program ("return getmetatable(\\"hurray\\")") succeedsWith null // defined by the string library
program ("getmetatable()") failsWith "bad argument #1 to 'getmetatable' (value expected)"
}
about ("setmetatable") {
program ("setmetatable(0, nil)") failsWith "bad argument #1 to 'setmetatable' (table expected, got number)"
program ("setmetatable({}, 0)") failsWith "bad argument #2 to 'setmetatable' (nil or table expected)"
program ("setmetatable({})") failsWith "bad argument #2 to 'setmetatable' (nil or table expected)"
val SetMetatableReturnsItsFirstArgument = fragment("setmetatable returns its first argument") {
"""local x = {}
|local y = setmetatable(x, {})
|return x == y, x == {}
"""
}
SetMetatableReturnsItsFirstArgument in thisContext succeedsWith (true, false)
val SetMetatableAndGetMetatable = fragment("setmetatable and getmetatable") {
"""local t = {}
|local mt0 = getmetatable(t)
|local mt1 = {}
|setmetatable(t, mt1)
|local mt2 = getmetatable(t)
|setmetatable(t, nil)
|local mt3 = getmetatable(t)
|return mt0 == nil, mt2 == mt1, mt2 == {}, mt3 == nil
"""
}
SetMetatableAndGetMetatable in thisContext succeedsWith (true, true, false, true)
program (
"""mt = {}
|t = {}
|setmetatable(t, mt)
|mt.__metatable = 'hello'
|return getmetatable(t)
""") succeedsWith "hello"
}
about ("pcall") {
program ("return pcall(nil)") succeedsWith (false, "attempt to call a nil value")
program ("return pcall(function() end)") succeedsWith true
program ("return pcall(pcall)") succeedsWith (false, "bad argument #1 to 'pcall' (value expected)")
program ("return pcall(pcall, pcall, pcall)") succeedsWith (true, true, false, "bad argument #1 to 'pcall' (value expected)")
program ("pcall()") failsWith "bad argument #1 to 'pcall' (value expected)"
val PCallHonoursTheCallMetamethod = fragment ("pcall honours the __call metamethod") {
"""function callable()
| local mt = {}
| mt.__call = function() return 42 end
| local t = {}
| setmetatable(t, mt)
| return t
|end
|
|x = callable()
|return pcall(x)
"""
}
PCallHonoursTheCallMetamethod in thisContext succeedsWith (true, 42)
val PCallCatchesErrorInACallMetamethod = fragment ("pcall catches error in a __call metamethod") {
"""function callable()
| local mt = {}
| mt.__call = function() error('kaboom') end
| local t = {}
| setmetatable(t, mt)
| return t
|end
|
|x = callable()
|return pcall(x)
"""
}
// FIXME: the error object should actually be "stdin:3: kaboom"
PCallCatchesErrorInACallMetamethod in thisContext succeedsWith (false, "kaboom")
}
about ("xpcall") {
program ("xpcall()") failsWith "bad argument #2 to 'xpcall' (function expected, got no value)"
program ("return xpcall(nil)") failsWith "bad argument #2 to 'xpcall' (function expected, got no value)"
program ("return xpcall(function() end)") failsWith "bad argument #2 to 'xpcall' (function expected, got no value)"
program ("return xpcall(nil, nil)") failsWith "bad argument #2 to 'xpcall' (function expected, got nil)"
program ("return xpcall(nil, function(...) return ... end)") succeedsWith (false, "attempt to call a nil value")
program ("return xpcall(xpcall, pcall)") succeedsWith (false, false)
program ("return xpcall(pcall, xpcall)") succeedsWith (false, "error in error handling")
program (
"""count = 0
|function handle(eo)
| count = count + 1
| error(eo)
|end
|xpcall(nil, handle)
|return count
""") succeedsWith 220
}
about ("error") {
program ("return error()") failsWithLuaError (null)
program ("return error(nil)") failsWithLuaError (null)
program ("error(1)") failsWithLuaError (java.lang.Long.valueOf(1))
program ("error(1.0)") failsWithLuaError (java.lang.Double.valueOf(1.0))
program ("error(\\"boom\\")") failsWithLuaError "boom"
program ("return pcall(error)") succeedsWith (false, null)
}
about ("assert") {
program ("assert(nil)") failsWith "assertion failed!"
program ("assert(false, 'boom')") failsWith "boom"
program ("return assert(true)") succeedsWith true
program ("return assert(1, false, 'x')") succeedsWith (1, false, "x")
program ("assert()") failsWith "bad argument #1 to 'assert' (value expected)"
program ("assert(pcall(error, 'boom'))") failsWith "boom"
}
about ("rawequal") {
program ("return rawequal()") failsWith "bad argument #1 to 'rawequal' (value expected)"
program ("return rawequal(nil)") failsWith "bad argument #2 to 'rawequal' (value expected)"
program ("return rawequal(nil, nil)") succeedsWith true
program ("return rawequal(0, 0)") succeedsWith true
program ("return rawequal(0 / 0, 0 / 0)") succeedsWith false
// TODO: add tests for values that do have the __eq metamethod
}
about ("rawget") {
program ("rawget()") failsWith "bad argument #1 to 'rawget' (table expected, got no value)"
program ("rawget(nil)") failsWith "bad argument #1 to 'rawget' (table expected, got nil)"
program ("rawget('x')") failsWith "bad argument #1 to 'rawget' (table expected, got string)"
program (
"""x = {}
|x.hello = 'world'
|return rawget(x, 'hello')
""") succeedsWith "world"
// TODO: add tests for values that do have the __index metamethod
}
about ("rawset") {
program ("rawset()") failsWith "bad argument #1 to 'rawset' (table expected, got no value)"
program ("rawset(nil)") failsWith "bad argument #1 to 'rawset' (table expected, got nil)"
program ("rawset('x')") failsWith "bad argument #1 to 'rawset' (table expected, got string)"
program ("rawset({}, nil)") failsWith "bad argument #3 to 'rawset' (value expected)"
program (
"""x = {}
|rawset(x, 'hello', 'world')
|return x.hello
""") succeedsWith "world"
program ("rawset({}, nil, 1)") failsWith "table index is nil"
program ("rawset({}, 0 / 0, 1)") failsWith "table index is NaN"
program (
"""x = {}
|y = rawset(x, 0, 'hi')
|return x == y
""") succeedsWith true
// TODO: add tests for values that do have the __newindex metamethod
}
about ("rawlen") {
program ("rawlen()") failsWith "bad argument #1 to 'rawlen' (table or string expected)"
program ("rawlen(1)") failsWith "bad argument #1 to 'rawlen' (table or string expected)"
program ("return rawlen('x')") succeedsWith 1
program ("return rawlen({'x', 1, true})") succeedsWith 3
// TODO: add tests for values that do have the __len metamethod
}
about ("select") {
program ("select()") failsWith "bad argument #1 to 'select' (number expected, got no value)"
program ("select('x')") failsWith "bad argument #1 to 'select' (number expected, got string)"
program ("select(' #')") failsWith "bad argument #1 to 'select' (number expected, got string)"
program ("select(' # ')") failsWith "bad argument #1 to 'select' (number expected, got string)"
program ("return select('#')") succeedsWith 0
program ("return select('#', nil)") succeedsWith 1
program ("return select('#', 1, 2, 3, 4, 5)") succeedsWith 5
program ("return select('+1', true, false)") succeedsWith (true, false)
program ("return select('-1', true, false)") succeedsWith (false)
program ("return select(7, true, false)") succeedsWith ()
program ("select(0, true, false)") failsWith "bad argument #1 to 'select' (index out of range)"
program ("select(-3, true, false)") failsWith "bad argument #1 to 'select' (index out of range)"
program ("select(1.5, true, false)") failsWith "bad argument #1 to 'select' (number has no integer representation)"
program ("return select(1, 1, 2, 3, 4, 5)") succeedsWith (1, 2, 3, 4, 5)
program ("return select(-1, 1, 2, 3, 4, 5)") succeedsWith (5)
program ("return select(2, 1, 2, 3, 4, 5)") succeedsWith (2, 3, 4, 5)
program ("return select(3, 1, 2, 3, 4, 5)") succeedsWith (3, 4, 5)
program ("return select(-2, 1, 2, 3, 4, 5)") succeedsWith (4, 5)
program ("return select(-3, 1, 2, 3, 4, 5)") succeedsWith (3, 4, 5)
}
about ("load") {
program ("load()") failsWith "bad argument #1 to 'load' (function expected, got no value)"
program ("load({})") failsWith "bad argument #1 to 'load' (function expected, got table)"
program ("load(nil)") failsWith "bad argument #1 to 'load' (function expected, got nil)"
program ("return load(42)") succeedsWith (null, classOf[String])
program ("return load(42, 42, 42)") succeedsWith (null, "attempt to load a text chunk (mode is '42')")
program ("return load('return nil', nil)") succeedsWith (classOf[LuaFunction])
program ("return load('return 1 + 2')()") succeedsWith (3)
program ("return load('return x', nil, 't', {x = 10})()") succeedsWith (10)
program ("local x = 20; return load('return x')()") succeedsWith (null)
program ("x = 20; return load('return x')()") succeedsWith (20)
program ("local e = load('return _ENV')(); return _ENV, e, _ENV == e") succeedsWith (classOf[Table], classOf[Table], true)
// error reporting: FIXME: actual loader error messages are different from PUC-Lua!
program ("""return load("x(")""") succeedsWith (null, "[string \\"x(\\"]:?: function call expected at line 1")
program ("""return load("'")""") succeedsWith (null, "[string \\"'\\"]:?: Lexical error at line 1, column 2. Encountered: <EOF> after : \\"\\"")
program ("""return load('"')""") succeedsWith (null, "[string \\"\\"\\"]:?: Lexical error at line 1, column 2. Encountered: <EOF> after : \\"\\"")
program (
"""local n = 5
|local i = 0
|local function f()
| i = i + 1
| if i == 1 then return "return "
| elseif i < n+1 then return (i-1) .. ","
| elseif i == n+1 then return (i-1)
| else return nil
| end
|end
|return load(f)()
""") succeedsWith (1, 2, 3, 4, 5)
}
}
}
| mjanicek/rembulan | rembulan-tests/src/test/scala/net/sandius/rembulan/test/fragments/BasicLibFragments.scala | Scala | apache-2.0 | 16,667 |
package models.topics
import scala.collection._
class DetailedTopics {
val topics: mutable.ListBuffer[Topic] = new mutable.ListBuffer()
def getTopics : mutable.ListBuffer[Topic] = topics
def add(topic: Topic) = topics+=topic
} | alanlavintman/kafka-admin | app/models/topics/DetailedTopics.scala | Scala | mit | 246 |
/**
* Copyright (c) 2002-2012 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher.docgen.cookbook
import org.junit.Test
import org.junit.Assert._
import org.neo4j.cypher.docgen.DocumentingTestBase
import org.junit.Ignore
import org.neo4j.graphdb.Node
//modeled after a mailing list suggestion in
//http://groups.google.com/group/neo4j/browse_thread/thread/57dbec6e904fca42
class MealTestIgnored extends DocumentingTestBase {
def graphDescription = List(
"Peter eats Potatoes",
"Peter eats Burger",
"Burger composed_of Meat",
"Burger composed_of Bread",
"Bread composed_of Salt",
"Bread composed_of Flour",
"Bread composed_of Cereals")
override val properties = Map(
"Potatoes" -> Map("weight" -> 10),
"Meat" -> Map("weight" -> 40),
"Salt" -> Map("weight" -> 10),
"Flour" -> Map("weight" -> 30),
"Cereals" -> Map("weight" -> 80))
//TODO:
//Rels props:
//burger--meat //quantity=2
//burger--bread //quantity=2
//bread--salt //quantity=2
//bread--flour //quantity=1
//bread--cereals //quantity=3
//peter.weight = (potatoes.weight) + ( (meal.weight*composed_of.quantity) + ( (salt.weight*composed_of.quantity) + (flour.weight*composed_of.quantity) (cereal.weight*composed_of.quantity) ) )
def section = "cookbook"
@Test def weightedMeal() {
testQuery(
title = "Longest Paths -- find the leaf ingredients",
text = """From the root, find the paths to all the leaf ingredients in order to return the paths for the weight calculation""",
queryText = "" +
"START me=node(%Peter%) " +
"MATCH me-[:eats]->meal, " +
"path=meal-[r:composed_of*0..]->ingredient " +
"WHERE not(ingredient --> ())" +
"RETURN ingredient.name ",
returns = "The activity stream for Jane is returned.",
(p) => assertEquals(List(Map("ingredient.name" -> "Potatoes"),
Map("ingredient.name" -> "Meat"),
Map("ingredient.name" -> "Salt"),
Map("ingredient.name" -> "Flour"),
Map("ingredient.name" -> "Cereals")), p.toList))
}
}
| dksaputra/community | cypher/src/test/scala/org/neo4j/cypher/docgen/cookbook/MealTestIgnored.scala | Scala | gpl-3.0 | 2,848 |
/*
* Copyright (C) 2014 - 2016 Softwaremill <http://softwaremill.com>
* Copyright (C) 2016 - 2019 Lightbend Inc. <http://www.lightbend.com>
*/
package akka.kafka.internal
import java.util
import akka.annotation.InternalApi
import com.typesafe.config.{Config, ConfigObject}
import scala.annotation.tailrec
import scala.jdk.CollectionConverters._
import scala.concurrent.duration.Duration
import akka.util.JavaDurationConverters._
/**
* INTERNAL API
*
* Converts a [[com.typesafe.config.Config]] section to a Map for use with Kafka Consumer or Producer.
*/
@InternalApi private[kafka] object ConfigSettings {
def parseKafkaClientsProperties(config: Config): Map[String, String] = {
@tailrec
def collectKeys(c: ConfigObject, processedKeys: Set[String], unprocessedKeys: List[String]): Set[String] =
if (unprocessedKeys.isEmpty) processedKeys
else {
c.toConfig.getAnyRef(unprocessedKeys.head) match {
case o: util.Map[_, _] =>
collectKeys(c,
processedKeys,
unprocessedKeys.tail ::: o.keySet().asScala.toList.map(unprocessedKeys.head + "." + _))
case _ =>
collectKeys(c, processedKeys + unprocessedKeys.head, unprocessedKeys.tail)
}
}
val keys = collectKeys(config.root, Set.empty[String], config.root().keySet().asScala.toList)
keys.map(key => key -> config.getString(key)).toMap
}
def getPotentiallyInfiniteDuration(underlying: Config, path: String): Duration = underlying.getString(path) match {
case "infinite" => Duration.Inf
case _ => underlying.getDuration(path).asScala
}
}
| softwaremill/reactive-kafka | core/src/main/scala/akka/kafka/internal/ConfigSettings.scala | Scala | apache-2.0 | 1,651 |
package scala.meta.tests
package ast
import org.scalatest._
import scala.reflect.runtime.universe._
import scala.reflect.runtime.{universe => ru}
class AstSuite extends FunSuite {
object AstReflection extends {
val u: ru.type = ru
val mirror: u.Mirror = u.runtimeMirror(classOf[scala.meta.Tree].getClassLoader)
} with org.scalameta.ast.Reflection
def symbolOf[T: TypeTag]: TypeSymbol = ru.symbolOf[T]
}
| beni55/scalameta | scalameta/trees/src/test/scala/scala/meta/tests/ast/AstSuite.scala | Scala | bsd-3-clause | 419 |
package be.mygod.content
import android.content.{ServiceConnection, ComponentName}
import android.os.IBinder
import be.mygod.app.ServicePlus
import be.mygod.os.BinderPlus
/**
* @author Mygod
*/
class ServicePlusConnection[S <: ServicePlus] extends ServiceConnection {
var service: Option[S] = _
def onServiceDisconnected(name: ComponentName): Unit = service = None
def onServiceConnected(name: ComponentName, binder: IBinder): Unit =
service = Some(binder.asInstanceOf[BinderPlus].service.asInstanceOf[S])
}
| Mygod/mygod-lib-android | src/main/scala/be/mygod/content/ServicePlusConnection.scala | Scala | gpl-3.0 | 524 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.samza.job.yarn
import org.apache.hadoop.yarn.api.records.Container
import org.joda.time.Period
import org.joda.time.format.{ DateTimeFormatter, ISODateTimeFormat, ISOPeriodFormat, PeriodFormatter }
object YarnContainerUtils {
val dateFormater = ISODateTimeFormat.dateTime
val periodFormater = ISOPeriodFormat.standard
}
/**
* YARN container information plus start time and up time
*/
class YarnContainer(container: Container) {
val id = container.getId()
val nodeId = container.getNodeId();
val nodeHttpAddress = container.getNodeHttpAddress();
val resource = container.getResource();
val priority = container.getPriority();
val containerToken = container.getContainerToken();
val startTime = System.currentTimeMillis()
def startTimeStr(dtFormatter: Option[DateTimeFormatter] = None) =
dtFormatter.getOrElse(YarnContainerUtils.dateFormater).print(startTime)
val upTime = System.currentTimeMillis()
def upTimeStr(periodFormatter: Option[PeriodFormatter] = None) =
periodFormatter.getOrElse(YarnContainerUtils.periodFormater).print(new Period(startTime, upTime))
}
| bharathkk/samza | samza-yarn/src/main/scala/org/apache/samza/job/yarn/YarnContainer.scala | Scala | apache-2.0 | 1,928 |
/*
* Copyright (c) 2014, Brook 'redattack34' Heisler
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the ModularRayguns team nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package com.castlebravostudios.rayguns.mod
import com.castlebravostudios.rayguns.blocks.TileEntities
import com.castlebravostudios.rayguns.entities.Entities
import com.castlebravostudios.rayguns.entities.effects.Effects
import com.castlebravostudios.rayguns.utils.Extensions.ItemExtensions
import net.minecraft.creativetab.CreativeTabs
import net.minecraft.item.ItemStack
import net.minecraft.util.ResourceLocation
import cpw.mods.fml.common.Mod
import cpw.mods.fml.common.Mod.EventHandler
import cpw.mods.fml.common.SidedProxy
import cpw.mods.fml.common.event.FMLInitializationEvent
import cpw.mods.fml.common.event.FMLPostInitializationEvent
import cpw.mods.fml.common.event.FMLPreInitializationEvent
import cpw.mods.fml.common.network.NetworkRegistry
import org.apache.logging.log4j.Logger
import com.castlebravostudios.rayguns.items.RaygunsItems
import com.castlebravostudios.rayguns.items.RaygunsBlocks
import com.castlebravostudios.rayguns.items.frames.FireflyFrame
import net.minecraft.item.Item
import cpw.mods.fml.common.event.FMLInterModComms
import net.minecraft.nbt.NBTTagCompound
@Mod(modid="mod_ModularRayguns", version="1.0-alpha2", modLanguage="scala", useMetadata=true)
object ModularRayguns {
private var _logger : Logger = _
def logger : Logger = _logger
@SidedProxy(clientSide="com.castlebravostudios.rayguns.mod.ClientProxy",
serverSide="com.castlebravostudios.rayguns.mod.CommonProxy")
var proxy : CommonProxy = null
@EventHandler
def preInit( event : FMLPreInitializationEvent ) : Unit = {
_logger = event.getModLog()
Config.load( event.getSuggestedConfigurationFile() )
}
@EventHandler
def postInit( event : FMLPostInitializationEvent ) : Unit = Unit
@EventHandler
def load( event : FMLInitializationEvent ) : Unit = {
RaygunsItems.registerItems
RaygunsBlocks.registerBlocks
Entities.registerEntities
TileEntities.registerTileEntities
Effects.registerEffects
NetworkRegistry.INSTANCE.registerGuiHandler(ModularRayguns, proxy)
Config.recipeLibrary.registerRecipes()
proxy.registerRenderers()
proxy.loadTextures()
FMLInterModComms.sendMessage("Waila", "register",
"com.castlebravostudios.rayguns.plugins.waila.RaygunsWailaModule.register" )
val guideMessage = new NBTTagCompound
guideMessage.setString("name", "Modular Rayguns")
guideMessage.setString("location", "rayguns:doc/ModularRayguns.md")
FMLInterModComms.sendMessage("mod_TheGuide", "RegisterIndexFile",
guideMessage )
}
val raygunsTab = new CreativeTabs("tabRayguns") {
override def getTabIconItem : Item =
FireflyFrame.item.get
}
def texture( path : String ) : ResourceLocation =
new ResourceLocation( "rayguns", path )
}
| Redattack34/ModularRayguns | src/main/scala/com/castlebravostudios/rayguns/mod/ModularRayguns.scala | Scala | bsd-3-clause | 4,347 |
/*
* Copyright (C) 2016-2019 Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.scaladsl.testkit
import akka.Done
import akka.persistence.query.Offset
import akka.stream.Materializer
import akka.stream.scaladsl.Flow
import akka.stream.scaladsl.Sink
import akka.stream.scaladsl.Source
import com.lightbend.internal.broker.TaggedOffsetTopicProducer
import com.lightbend.lagom.internal.scaladsl.api.broker.TopicFactory
import com.lightbend.lagom.internal.scaladsl.api.broker.TopicFactoryProvider
import com.lightbend.lagom.scaladsl.api.Descriptor.TopicCall
import com.lightbend.lagom.scaladsl.api.Service
import com.lightbend.lagom.scaladsl.api.ServiceSupport.ScalaMethodTopic
import com.lightbend.lagom.scaladsl.api.broker.Topic.TopicId
import com.lightbend.lagom.scaladsl.api.broker.Message
import com.lightbend.lagom.scaladsl.api.broker.Subscriber
import com.lightbend.lagom.scaladsl.api.broker.Topic
import com.lightbend.lagom.scaladsl.persistence.AggregateEvent
import com.lightbend.lagom.scaladsl.server.LagomServer
import scala.concurrent.Future
trait TestTopicComponents extends TopicFactoryProvider {
def lagomServer: LagomServer
def materializer: Materializer
override def optionalTopicFactory: Option[TopicFactory] = Some(topicFactory)
override def topicPublisherName: Option[String] = super.topicPublisherName match {
case Some(other) =>
sys.error(
s"Cannot provide the test topic factory as the default topic publisher since a default topic publisher has already been mixed into this cake: $other"
)
case None => Some("test")
}
lazy val topicFactory: TopicFactory = new TestTopicFactory(lagomServer)(materializer)
}
private[lagom] class TestTopicFactory(lagomServer: LagomServer)(implicit materializer: Materializer)
extends TopicFactory {
private val topics: Map[TopicId, Service] = {
val binding = lagomServer.serviceBinding
binding.descriptor.topics.map { topic =>
topic.topicId -> binding.service.asInstanceOf[Service]
}.toMap
}
override def create[Message](topicCall: TopicCall[Message]): Topic[Message] =
topics.get(topicCall.topicId) match {
case Some(service) =>
topicCall.topicHolder match {
case method: ScalaMethodTopic[Message] =>
method.method.invoke(service) match {
case topicProducer: TaggedOffsetTopicProducer[Message, _] =>
new TestTopic(topicCall, topicProducer)(materializer)
case _ =>
throw new IllegalArgumentException(
s"Testkit does not know how to handle the topic type for ${topicCall.topicId}"
)
}
case _ =>
throw new IllegalArgumentException(s"Testkit does not know how to handle topic ${topicCall.topicId}")
}
case None =>
throw new IllegalArgumentException(s"${topicCall.topicId} hasn't been resolved")
}
}
private[lagom] class TestTopic[Payload, Event <: AggregateEvent[Event]](
topicCall: TopicCall[Payload],
topicProducer: TaggedOffsetTopicProducer[Payload, Event]
)(implicit materializer: Materializer)
extends Topic[Payload] {
override def topicId: TopicId = topicCall.topicId
override def subscribe: Subscriber[Payload] = new TestSubscriber[Payload](identity)
private class TestSubscriber[WrappedPayload](transform: Payload => WrappedPayload)
extends Subscriber[WrappedPayload] {
override def withGroupId(groupId: String): Subscriber[WrappedPayload] = this
override def withMetadata = new TestSubscriber[Message[WrappedPayload]](transform.andThen(Message.apply))
override def atMostOnceSource: Source[WrappedPayload, _] = {
val serializer = topicCall.messageSerializer
Source(topicProducer.tags)
.flatMapMerge(topicProducer.tags.size, { tag =>
topicProducer.readSideStream.apply(tag, Offset.noOffset).map(_._1)
})
.map { evt =>
serializer.serializerForRequest.serialize(evt)
}
.map { bytes =>
serializer.deserializer(serializer.acceptResponseProtocols.head).deserialize(bytes)
}
.map(transform)
}
override def atLeastOnce(flow: Flow[WrappedPayload, Done, _]): Future[Done] =
atMostOnceSource.via(flow).runWith(Sink.ignore)
}
}
| ignasi35/lagom | testkit/scaladsl/src/main/scala/com/lightbend/lagom/scaladsl/testkit/TestTopicComponents.scala | Scala | apache-2.0 | 4,326 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.atlas.typesystem.builders
import com.google.common.collect.ImmutableList
import org.apache.atlas.typesystem.TypesDef
import org.apache.atlas.typesystem.types._
import org.apache.atlas.typesystem.types.utils.TypesUtil
import scala.collection.mutable.ArrayBuffer
import scala.language.{dynamics, implicitConversions, postfixOps}
import scala.util.DynamicVariable
import com.google.common.collect.ImmutableSet
object TypesBuilder {
case class Context(enums : ArrayBuffer[EnumTypeDefinition],
structs : ArrayBuffer[StructTypeDefinition],
classes : ArrayBuffer[HierarchicalTypeDefinition[ClassType]],
traits : ArrayBuffer[HierarchicalTypeDefinition[TraitType]],
currentTypeAttrs : ArrayBuffer[Attr] = null)
class AttrOption()
class ReverseAttributeName(val rNm : String) extends AttrOption
class MultiplicityOption(val lower: Int, val upper: Int, val isUnique: Boolean) extends AttrOption
val required = new AttrOption()
val optional = new AttrOption()
val collection = new AttrOption()
val set = new AttrOption()
val composite = new AttrOption()
val unique = new AttrOption()
val indexed = new AttrOption()
def reverseAttributeName(rNm : String) = new ReverseAttributeName(rNm)
def multiplicty(lower: Int, upper: Int, isUnique: Boolean) = new MultiplicityOption(lower, upper, isUnique)
val boolean = DataTypes.BOOLEAN_TYPE.getName
val byte = DataTypes.BYTE_TYPE.getName
val short = DataTypes.SHORT_TYPE.getName
val int = DataTypes.INT_TYPE.getName
val long = DataTypes.LONG_TYPE.getName
val float = DataTypes.FLOAT_TYPE.getName
val double = DataTypes.DOUBLE_TYPE.getName
val bigint = DataTypes.BIGINTEGER_TYPE.getName
val bigdecimal = DataTypes.BIGDECIMAL_TYPE.getName
val date = DataTypes.DATE_TYPE.getName
val string = DataTypes.STRING_TYPE.getName
def array(t : String) : String = {
DataTypes.arrayTypeName(t)
}
def map(kt : String, vt : String) : String = {
DataTypes.mapTypeName(kt, vt)
}
class Attr(ctx : Context, val name : String) {
private var dataTypeName : String = DataTypes.BOOLEAN_TYPE.getName
private var multiplicity: Multiplicity = Multiplicity.OPTIONAL
private var isComposite: Boolean = false
private var reverseAttributeName: String = null
private var isUnique: Boolean = false
private var isIndexable: Boolean = false
ctx.currentTypeAttrs += this
def getDef : AttributeDefinition =
new AttributeDefinition(name, dataTypeName,
multiplicity, isComposite, isUnique, isIndexable, reverseAttributeName)
def `~`(dT : String, options : AttrOption*) : Attr = {
dataTypeName = dT
options.foreach { o =>
o match {
case `required` => {multiplicity = Multiplicity.REQUIRED}
case `optional` => {multiplicity = Multiplicity.OPTIONAL}
case `collection` => {multiplicity = Multiplicity.COLLECTION}
case `set` => {multiplicity = Multiplicity.SET}
case `composite` => {isComposite = true}
case `unique` => {isUnique = true}
case `indexed` => {isIndexable = true}
case m : MultiplicityOption => {multiplicity = new Multiplicity(m.lower, m.upper, m.isUnique)}
case r : ReverseAttributeName => {reverseAttributeName = r.rNm}
case _ => ()
}
}
this
}
}
}
class TypesBuilder {
import org.apache.atlas.typesystem.builders.TypesBuilder.{Attr, Context}
val required = TypesBuilder.required
val optional = TypesBuilder.optional
val collection = TypesBuilder.collection
val set = TypesBuilder.set
val composite = TypesBuilder.composite
val unique = TypesBuilder.unique
val indexed = TypesBuilder.indexed
def multiplicty = TypesBuilder.multiplicty _
def reverseAttributeName = TypesBuilder.reverseAttributeName _
val boolean = TypesBuilder.boolean
val byte = TypesBuilder.byte
val short = TypesBuilder.short
val int = TypesBuilder.int
val long = TypesBuilder.long
val float = TypesBuilder.float
val double = TypesBuilder.double
val bigint = TypesBuilder.bigint
val bigdecimal = TypesBuilder.bigdecimal
val date = TypesBuilder.date
val string = TypesBuilder.string
def array = TypesBuilder.array _
def map = TypesBuilder.map _
val context = new DynamicVariable[Context](Context(new ArrayBuffer(),
new ArrayBuffer(),
new ArrayBuffer(),
new ArrayBuffer()))
implicit def strToAttr(s : String) = new Attr(context.value, s)
def types(f : => Unit ) : TypesDef = {
f
TypesDef(context.value.enums.toSeq,
context.value.structs.toSeq,
context.value.traits.toSeq,
context.value.classes.toSeq)
}
def _class(name : String, superTypes : List[String] = List())(f : => Unit): Unit = {
val attrs = new ArrayBuffer[Attr]()
context.withValue(context.value.copy(currentTypeAttrs = attrs)){f}
context.value.classes +=
TypesUtil.createClassTypeDef(name, ImmutableSet.copyOf[String](superTypes.toArray), attrs.map(_.getDef):_*)
}
def _trait(name : String, superTypes : List[String] = List())(f : => Unit): Unit = {
val attrs = new ArrayBuffer[Attr]()
context.withValue(context.value.copy(currentTypeAttrs = attrs)){f}
context.value.traits +=
TypesUtil.createTraitTypeDef(name, ImmutableSet.copyOf[String](superTypes.toArray), attrs.map(_.getDef):_*)
val v = context.value
v.traits.size
}
def struct(name : String)(f : => Unit): Unit = {
val attrs = new ArrayBuffer[Attr]()
context.withValue(context.value.copy(currentTypeAttrs = attrs)){f}
context.value.structs +=
new StructTypeDefinition(name, attrs.map(_.getDef).toArray)
}
def enum(name : String, values : String*) : Unit = {
val enums = values.zipWithIndex.map{ case (v, i) =>
new EnumValue(v,i)
}
context.value.enums +=
TypesUtil.createEnumTypeDef(name, enums:_*)
}
}
| jnhagelberg/incubator-atlas | typesystem/src/main/scala/org/apache/atlas/typesystem/builders/TypesBuilder.scala | Scala | apache-2.0 | 6,787 |
package sqlpt.ast.expressions
import sqlpt._, column._, Column._, AggregationFuncs._, Type._
sealed trait Rows[Cols <: Product] {
def cols: Cols
def join[OtherCols <: Product](right: Rows[OtherCols])(on: (Cols, OtherCols) => Column[Bool]) =
Joined2[Cols, OtherCols, Cols, OtherCols](this, right, Seq(on(this.cols, right.cols)), Seq(JoinMode.Inner))
// TODO: Remove direct call to `Outer`.
def leftJoin[OtherCols <: Product](right: Rows[OtherCols])(on: (Cols, OtherCols) => Column[Bool]) =
Joined2[Cols, Nullabled[OtherCols], Cols, OtherCols](this, Outer(right), Seq(on(this.cols, right.cols)), Seq(JoinMode.Left))
// TODO: Remove direct call to `Outer`.
def rightJoin[OtherCols <: Product](right: Rows[OtherCols])(on: (Cols, OtherCols) => Column[Bool]) =
Joined2[Nullabled[Cols], OtherCols, Cols, OtherCols](Outer(this), right, Seq(on(this.cols, right.cols)), Seq(JoinMode.Right))
}
case class Filtered[Src <: Product](source: Rows[Src], sourceFilters: Set[Column[Bool]]) {
def where(f: Src => Column[Bool]): Filtered[Src] =
copy(sourceFilters = sourceFilters + f(source.cols))
def select[Cols <: Product : Columns](p: Src => Cols): SimpleSelection[Cols, Src] =
SimpleSelection(p(source.cols), source, sourceFilters)
def selectDistinct[Cols <: Product : Columns](p: Src => Cols): SimpleSelection[Cols, Src] =
select(p).distinct
def groupBy[GrpCols <: Product : Columns](selectGroupingCols: Src => GrpCols) =
Grouped[GrpCols, Src](selectGroupingCols(source.cols), source, sourceFilters)
}
// TODO: Can the type parameter be made covariant?
sealed trait Selection[Cols <: Product] extends Rows[Cols] {
def cols: Cols
def unionAll(other: Selection[Cols]) =
UnionAll(Seq(this, other))
}
case class SimpleSelection[Cols <: Product, Src <: Product](
cols: Cols,
source: Rows[Src],
filters: Set[Column[Bool]], // TODO: Does this need to be a Set? We can AND.
isDistinct: Boolean = false
) extends Selection[Cols] {
def distinct: SimpleSelection[Cols, Src] =
copy(isDistinct = true)
}
case class Nullabled[Cols <: Product](cols: Cols) {
def apply[T <: Type](f: Cols => Column[T]) =
f(cols).asInstanceOf[Column[Nullable[T]]]
}
case class Outer[Cols <: Product] private (rows: Rows[Cols]) extends Rows[Nullabled[Cols]] {
override def cols = Nullabled(rows.cols)
}
sealed trait JoinMode
object JoinMode {
case object Inner extends JoinMode
case object Left extends JoinMode
case object Right extends JoinMode
}
trait BaseJoined {
def ons: Seq[Column[Bool]] // TODO: Define type alias 'JoinCondition' for 'Column[Bool]'.
def joinModes: Seq[JoinMode]
def sourceSeq: Seq[Rows[_ <: Product]]
protected def outer[Inner <: Product](rows: Rows[_ <: Product]): Outer[Inner] = rows match {
case alreadyOuter: Outer[_] => alreadyOuter.asInstanceOf[Outer[Inner]]
case nonOuter => Outer(nonOuter).asInstanceOf[Outer[Inner]]
}
}
case class Joined2
[Cols1 <: Product, Cols2 <: Product, Inner1 <: Product, Inner2 <: Product]
(rows1: Rows[Cols1], rows2: Rows[Cols2], ons: Seq[Column[Bool]], joinModes: Seq[JoinMode])
extends Rows[(Cols1, Cols2)]
with BaseJoined
{
override def cols =
(rows1.cols, rows2.cols)
override def sourceSeq =
Seq(rows1, rows2)
def join[NewCols <: Product](newRows: Rows[NewCols])(on: (Cols1, Cols2, NewCols) => Column[Bool]) =
Joined3[Cols1, Cols2, NewCols, Inner1, Inner2, NewCols](
rows1, rows2, newRows, ons :+ on(rows1.cols, rows2.cols, newRows.cols), joinModes :+ JoinMode.Inner)
def leftJoin[NewCols <: Product](newRows: Rows[NewCols])(on: (Cols1, Cols2, NewCols) => Column[Bool]) =
Joined3[Cols1, Cols2, Nullabled[NewCols], Inner1, Inner2, NewCols](
rows1, rows2, outer(newRows), ons :+ on(rows1.cols, rows2.cols, newRows.cols), joinModes :+ JoinMode.Left)
def rightJoin[NewCols <: Product](newRows: Rows[NewCols])(on: (Cols1, Cols2, NewCols) => Column[Bool]) =
Joined3[Nullabled[Inner1], Nullabled[Inner2], NewCols, Inner1, Inner2, NewCols](
outer(rows1), outer(rows2), newRows, ons :+ on(rows1.cols, rows2.cols, newRows.cols), joinModes :+ JoinMode.Right)
}
case class Joined3
[Cols1 <: Product, Cols2 <: Product, Cols3 <: Product, Inner1 <: Product, Inner2 <: Product, Inner3 <: Product]
(rows1: Rows[Cols1], rows2: Rows[Cols2], rows3: Rows[Cols3], ons: Seq[Column[Bool]], joinModes: Seq[JoinMode])
extends Rows[(Cols1, Cols2, Cols3)]
with BaseJoined
{
override def cols =
(rows1.cols, rows2.cols, rows3.cols)
override def sourceSeq =
Seq(rows1, rows2, rows3)
def join[NewCols <: Product](newRows: Rows[NewCols])(on: (Cols1, Cols2, Cols3, NewCols) => Column[Bool]) =
Joined4[Cols1, Cols2, Cols3, NewCols, Inner1, Inner2, Inner3, NewCols](
rows1, rows2, rows3, newRows, ons :+ on(rows1.cols, rows2.cols, rows3.cols, newRows.cols), joinModes :+ JoinMode.Inner)
def leftJoin[NewCols <: Product](newRows: Rows[NewCols])(on: (Cols1, Cols2, Cols3, NewCols) => Column[Bool]) =
Joined4[Cols1, Cols2, Cols3, Nullabled[NewCols], Inner1, Inner2, Inner3, NewCols](
rows1, rows2, rows3, outer(newRows), ons :+ on(rows1.cols, rows2.cols, rows3.cols, newRows.cols), joinModes :+ JoinMode.Left)
def rightJoin[NewCols <: Product](newRows: Rows[NewCols])(on: (Cols1, Cols2, Cols3, NewCols) => Column[Bool]) =
Joined4[Nullabled[Inner1], Nullabled[Inner2], Nullabled[Inner3], NewCols, Inner1, Inner2, Inner3, NewCols](
outer(rows1), outer(rows2), outer(rows3), newRows, ons :+ on(rows1.cols, rows2.cols, rows3.cols, newRows.cols), joinModes :+ JoinMode.Right)
}
case class Joined4
[Cols1 <: Product, Cols2 <: Product, Cols3 <: Product, Cols4 <: Product, Inner1 <: Product, Inner2 <: Product, Inner3 <: Product, Inner4 <: Product]
(rows1: Rows[Cols1], rows2: Rows[Cols2], rows3: Rows[Cols3], rows4: Rows[Cols4], ons: Seq[Column[Bool]], joinModes: Seq[JoinMode])
extends Rows[(Cols1, Cols2, Cols3, Cols4)]
with BaseJoined
{
override def cols =
(rows1.cols, rows2.cols, rows3.cols, rows4.cols)
override def sourceSeq =
Seq(rows1, rows2, rows3, rows4)
def join[NewCols <: Product](newRows: Rows[NewCols])(on: (Cols1, Cols2, Cols3, Cols4, NewCols) => Column[Bool]) =
Joined5[Cols1, Cols2, Cols3, Cols4, NewCols, Inner1, Inner2, Inner3, Inner4, NewCols](
rows1, rows2, rows3, rows4, newRows, ons :+ on(rows1.cols, rows2.cols, rows3.cols, rows4.cols, newRows.cols), joinModes :+ JoinMode.Inner)
def leftJoin[NewCols <: Product](newRows: Rows[NewCols])(on: (Cols1, Cols2, Cols3, Cols4, NewCols) => Column[Bool]) =
Joined5[Cols1, Cols2, Cols3, Cols4, Nullabled[NewCols], Inner1, Inner2, Inner3, Inner4, NewCols](
rows1, rows2, rows3, rows4, outer(newRows), ons :+ on(rows1.cols, rows2.cols, rows3.cols, rows4.cols, newRows.cols), joinModes :+ JoinMode.Left)
def rightJoin[NewCols <: Product](newRows: Rows[NewCols])(on: (Cols1, Cols2, Cols3, Cols4, NewCols) => Column[Bool]) =
Joined5[Nullabled[Inner1], Nullabled[Inner2], Nullabled[Inner3], Nullabled[Inner4], NewCols, Inner1, Inner2, Inner3, Inner4, NewCols](
outer(rows1), outer(rows2), outer(rows3), outer(rows4), newRows, ons :+ on(rows1.cols, rows2.cols, rows3.cols, rows4.cols, newRows.cols), joinModes :+ JoinMode.Right)
}
case class Joined5
[Cols1 <: Product, Cols2 <: Product, Cols3 <: Product, Cols4 <: Product, Cols5 <: Product, Inner1 <: Product, Inner2 <: Product, Inner3 <: Product, Inner4 <: Product, Inner5 <: Product]
(rows1: Rows[Cols1], rows2: Rows[Cols2], rows3: Rows[Cols3], rows4: Rows[Cols4], rows5: Rows[Cols5], ons: Seq[Column[Bool]], joinModes: Seq[JoinMode])
extends Rows[(Cols1, Cols2, Cols3, Cols4, Cols5)]
with BaseJoined
{
override def cols =
(rows1.cols, rows2.cols, rows3.cols, rows4.cols, rows5.cols)
override def sourceSeq =
Seq(rows1, rows2, rows3, rows4, rows5)
}
case class Table[Cols <: Product](name: String, cols: Cols) extends Rows[Cols]
case class Grouped[GrpCols <: Product, Src <: Product](
groupingCols: GrpCols,
source: Rows[Src],
sourceFilters: Set[Column[Bool]]
) {
class Aggregator {
def count(s: Src => Column[_ <: Type]) =
Count(s(source.cols))
def sum(s: Src => Column[Num]) =
Sum(s(source.cols))
def max[T <: Type](s: Src => Column[T]) =
Max(s(source.cols))
}
def select[Cols <: Product : Columns](f: (GrpCols, Aggregator) => Cols) =
AggrSelection[Cols, GrpCols, Src](f(groupingCols, new Aggregator), groupingCols, source, sourceFilters, Set.empty)
}
// TODO: Do these support 'DISTINCT' in SQL?
case class AggrSelection[Cols <: Product, GrpCols <: Product, Src <: Product](
cols: Cols,
groupingCols: GrpCols,
source: Rows[Src],
sourceFilters: Set[Column[Bool]],
groupFilters: Set[Column[Bool]]
) extends Selection[Cols] {
def having(f: Cols => Column[Bool]) =
copy(groupFilters = groupFilters + f(cols))
}
case class UnionAll[Cols <: Product] private (selects: Seq[Selection[Cols]]) extends Rows[Cols] {
override def cols = selects.head.cols // TODO: Does this make sense?
}
| pmellati/SQLpt | sqlpt/src/main/scala/sqlpt/ast/expressions/expressions.scala | Scala | apache-2.0 | 9,102 |
package com.whiteprompt
import java.util.UUID
import com.whiteprompt.domain.TaskEntity
import com.whiteprompt.persistence.TaskRepository
import scala.collection.mutable
import scala.concurrent.ExecutionContext
trait TestData {
val taskEntity1 = TaskEntity(UUID.fromString("c698cafa-de48-428d-a13c-949ab893384f"), "Foo name", "Foo description")
val taskEntity2 = TaskEntity(UUID.fromString("f92bd520-758f-46ff-b3b8-16c503e08777"), "Bar name", "Bar description")
val nonExistentTaskId = UUID.fromString("cc5909cc-711f-4a0e-bb29-4109cf0f899d")
def taskRepository()(implicit context: ExecutionContext) = new TaskRepository {
implicit lazy val ec = context
val store = new mutable.HashMap[UUID, TaskEntity]()
def init(): Unit = {
store += taskEntity1.id -> taskEntity1
store += taskEntity2.id -> taskEntity2
}
init()
}
} | whiteprompt/akka-http-vs-play | core/src/test/scala/com/whiteprompt/TestData.scala | Scala | apache-2.0 | 867 |
/*
* Copyright 2017 Nicolas Rinaudo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kantan.mongodb
import kantan.codecs.CodecCompanion
object BsonDocumentCodec
extends GeneratedBsonDocumentCodecs with CodecCompanion[BsonDocument, MongoError.Decode, codecs.type]
trait BsonDocumentCodecInstances extends BsonDocumentDecoderInstances with BsonDocumentEncoderInstances
| nrinaudo/kantan.mongodb | core/src/main/scala/kantan/mongodb/BsonDocumentCodec.scala | Scala | apache-2.0 | 897 |
/*
* Copyright 2016 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.box
import org.joda.time.LocalDate
import org.scalatest.{Matchers, WordSpec}
class TypesSpec extends WordSpec with Matchers {
"CtOptionalInteger" should {
"asInt will return the value " in {
val expectedVal = Some(26)
class TestBox extends CtBoxIdentifier("Test") with CtOptionalInteger {
override def value: Option[Int] = expectedVal
}
new TestBox().asInt shouldBe expectedVal
}
}
"CtOptionalBigDecimal" should {
"asInt will return the value " in {
val expectedVal = Some(26)
class TestBox extends CtBoxIdentifier("Test") with CtOptionalBigDecimal{
override def value: Option[BigDecimal] = Some(BigDecimal(26.8))
}
new TestBox().asInt shouldBe expectedVal
}
}
"CtInteger" should {
"asInt will return the value " in {
val expectedVal = Some(26)
class TestBox extends CtBoxIdentifier("Test") with CtInteger {
override def value: Int = 26
}
new TestBox().asInt shouldBe expectedVal
}
}
"CtBigDecimal" should {
"asInt will return the value " in {
val expectedVal = Some(26)
class TestBox extends CtBoxIdentifier("Test") with CtBigDecimal{
override def value: BigDecimal = BigDecimal(26.8)
}
new TestBox().asInt shouldBe expectedVal
}
}
"CtBoolean" should {
"asBoolean will return the value " in {
val expectedVal = Some(true)
class TestBox extends CtBoxIdentifier("Test") with CtBoolean{
override def value: Boolean = true
}
new TestBox().asBoolean shouldBe expectedVal
}
}
"CtOptionalBoolean" should {
"asBoolean will return the value " in {
val expectedVal = Some(true)
class TestBox extends CtBoxIdentifier("Test") with CtOptionalBoolean{
override def value: Option[Boolean] = Some(true)
}
new TestBox().asBoolean shouldBe expectedVal
}
}
"CtDate" should {
"asLocalDate will return the value " in {
val expectedVal = Some(new LocalDate(2015,1,1))
class TestBox extends CtBoxIdentifier("Test") with CtDate{
override def value: LocalDate = new LocalDate(2015,1,1)
}
new TestBox().asLocalDate shouldBe expectedVal
}
}
"CtOptionalDate" should {
"asLocalDate will return the value " in {
val expectedVal = Some(new LocalDate(2015,1,1))
class TestBox extends CtBoxIdentifier("Test") with CtOptionalDate{
override def value: Option[LocalDate] = Some(new LocalDate(2015,1,1))
}
new TestBox().asLocalDate shouldBe expectedVal
}
}
}
| ahudspith-equalexperts/ct-calculations | src/test/scala/uk/gov/hmrc/ct/box/TypesSpec.scala | Scala | apache-2.0 | 3,259 |
package konstructs.shard
import java.util.UUID
import konstructs.api.{Position, Block, BlockTypeId, Direction, Rotation, Orientation, LightLevel, Colour, Health}
import konstructs.Db
case class BlockData(w: Int,
health: Int,
direction: Int,
rotation: Int,
ambient: Int,
red: Int,
green: Int,
blue: Int,
light: Int) {
def write(data: Array[Byte], i: Int) {
BlockData.write(data, i, w, health, direction, rotation, ambient, red, green, blue, light)
}
def block(id: UUID, blockTypeId: BlockTypeId) =
new Block(id, blockTypeId, Health.get(health), Orientation.get(direction, rotation))
}
object BlockData {
val Size = 7
def w(data: Array[Byte], i: Int): Int =
(data(i * Size) & 0xFF) + ((data(i * Size + 1) & 0xFF) << 8)
def hp(data: Array[Byte], i: Int): Int =
(data(i * Size + 2) & 0xFF) + ((data(i * Size + 3) & 0x07) << 8)
def direction(data: Array[Byte], i: Int): Int =
((data(i * Size + 3) & 0xE0) >> 5)
def rotation(data: Array[Byte], i: Int): Int =
((data(i * Size + 3) & 0x18) >> 3)
def ambientLight(data: Array[Byte], i: Int): Int =
((data(i * Size + 4) & 0x0F))
def red(data: Array[Byte], i: Int): Int =
((data(i * Size + 4) & 0xF0) >> 4)
def green(data: Array[Byte], i: Int): Int =
((data(i * Size + 5) & 0x0F))
def blue(data: Array[Byte], i: Int): Int =
((data(i * Size + 5) & 0xF0) >> 4)
def light(data: Array[Byte], i: Int): Int =
((data(i * Size + 6) & 0x0F))
def apply(data: Array[Byte], i: Int): BlockData = {
apply(w(data, i),
hp(data, i),
direction(data, i),
rotation(data, i),
ambientLight(data, i),
red(data, i),
green(data, i),
blue(data, i),
light(data, i))
}
def apply(w: Int, block: Block, ambient: Int, colour: Colour, level: LightLevel): BlockData = {
apply(
w,
block.getHealth.getHealth,
block.getOrientation.getDirection.getEncoding,
block.getOrientation.getRotation.getEncoding,
ambient,
colour.getRed,
colour.getGreen,
colour.getBlue,
level.getLevel
)
}
def write(data: Array[Byte],
i: Int,
w: Int,
health: Int,
direction: Int,
rotation: Int,
ambient: Int,
red: Int,
green: Int,
blue: Int,
light: Int) {
writeW(data, i, w)
writeHealthAndOrientation(data, i, health, direction, rotation)
writeLight(data, i, ambient, red, green, blue, light)
}
def write(data: Array[Byte],
i: Int,
w: Int,
block: Block,
ambientLight: LightLevel,
colour: Colour,
light: LightLevel) {
write(data,
i,
w,
block.getHealth.getHealth,
block.getOrientation.getDirection.getEncoding,
block.getOrientation.getRotation.getEncoding,
ambientLight.getLevel,
colour.getRed,
colour.getGreen,
colour.getBlue,
light.getLevel)
}
def writeW(data: Array[Byte], i: Int, w: Int) {
data(i * Size) = (w & 0xFF).toByte
data(i * Size + 1) = ((w >> 8) & 0xFF).toByte
}
def writeHealthAndOrientation(data: Array[Byte], i: Int, health: Int, direction: Int, rotation: Int) {
data(i * Size + 2) = (health & 0xFF).toByte
val b = (((direction << 5) & 0xE0) + ((rotation << 3) & 0x18) + ((health >> 8) & 0x07)).toByte
data(i * Size + 3) = b
}
def writeLight(data: Array[Byte], i: Int, ambient: Int, red: Int, green: Int, blue: Int, light: Int) {
val b4 = ((ambient & 0x0F) + ((red << 4) & 0xF0)).toByte
data(i * Size + 4) = b4
val b5 = ((green & 0x0F) + ((blue << 4) & 0xF0)).toByte
data(i * Size + 5) = b5
data(i * Size + 6) = (light & 0x0F).toByte
}
}
| konstructs/server | src/main/scala/konstructs/shard/BlockData.scala | Scala | mit | 4,012 |
package se.blea.flexiconf
/** Values needed to determine whether a possible directive matches an actual one */
case class MaybeDirective(private[flexiconf] val name: String,
private[flexiconf] val arguments: Seq[Argument] = Seq.empty,
private[flexiconf] val hasBlock: Boolean = false) {
/** Returns true if the provided provided Directive matches this MaybeDirective */
private[flexiconf] def matches(directive: DirectiveDefinition): Boolean = {
val argumentKinds = arguments map (_.kind)
val parameterKinds = directive.parameters map (_.kind)
val matchesName = name == directive.name
val matchesArgs = argumentKinds == parameterKinds
val matchesBlock = directive.requiresBlock && hasBlock || !directive.requiresBlock && !hasBlock
matchesName && matchesArgs && matchesBlock
}
/** Returns true if the provided Directive doesn't match this MaybeDirective */
private[flexiconf] def doesNotMatch(directive: DirectiveDefinition): Boolean = {
!matches(directive)
}
override def toString: String = {
var res = name
if (arguments.nonEmpty) {
res ++= arguments.map({ a => s"<${a.value}>:${a.kind}" }).mkString(" ", " ", "")
}
if (hasBlock) {
res ++= " {}"
}
res
}
}
| thetristan/flexiconf | flexiconf-core/src/main/scala/se/blea/flexiconf/MaybeDirective.scala | Scala | mit | 1,299 |
/* __ *\\
** ________ ___ / / ___ __ ____ Scala.js CLI **
** / __/ __// _ | / / / _ | __ / // __/ (c) 2013-2014, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ |/_// /_\\ \\ http://scala-js.org/ **
** /____/\\___/_/ |_/____/_/ | |__/ /____/ **
** |/____/ **
\\* */
package org.scalajs.cli
import org.scalajs.core.ir.ScalaJSVersions
import org.scalajs.core.tools.sem._
import org.scalajs.core.tools.javascript.OutputMode
import org.scalajs.core.tools.io._
import org.scalajs.core.tools.logging._
import org.scalajs.core.tools.classpath._
import org.scalajs.core.tools.classpath.builder._
import CheckedBehavior.Compliant
import org.scalajs.core.tools.optimizer.{
ScalaJSOptimizer,
ScalaJSClosureOptimizer,
ParIncOptimizer
}
import scala.collection.immutable.Seq
import java.io.File
import java.net.URI
object Scalajsld {
case class Options(
cp: Seq[File] = Seq.empty,
output: File = null,
jsoutput: Option[File] = None,
semantics: Semantics = Semantics.Defaults,
outputMode: OutputMode = OutputMode.ECMAScript51Isolated,
noOpt: Boolean = false,
fullOpt: Boolean = false,
prettyPrint: Boolean = false,
sourceMap: Boolean = false,
relativizeSourceMap: Option[URI] = None,
bypassLinkingErrors: Boolean = false,
checkIR: Boolean = false,
stdLib: Option[File] = None,
logLevel: Level = Level.Info)
implicit object OutputModeRead extends scopt.Read[OutputMode] {
val arity = 1
val reads = { (s: String) =>
OutputMode.All.find(_.toString() == s).getOrElse(
throw new IllegalArgumentException(s"$s is not a valid output mode"))
}
}
def main(args: Array[String]): Unit = {
val parser = new scopt.OptionParser[Options]("scalajsld") {
head("scalajsld", ScalaJSVersions.current)
arg[File]("<value> ...")
.unbounded()
.action { (x, c) => c.copy(cp = c.cp :+ x) }
.text("Entries of Scala.js classpath to link")
opt[File]('o', "output")
.valueName("<file>")
.required()
.action { (x, c) => c.copy(output = x) }
.text("Output file of linker (required)")
opt[File]("jsoutput")
.valueName("<file>")
.abbr("jo")
.action { (x, c) => c.copy(jsoutput = Some(x)) }
.text("Concatenate all JavaScript libary dependencies to this file")
opt[Unit]('f', "fastOpt")
.action { (_, c) => c.copy(noOpt = false, fullOpt = false) }
.text("Optimize code (this is the default)")
opt[Unit]('n', "noOpt")
.action { (_, c) => c.copy(noOpt = true, fullOpt = false) }
.text("Don't optimize code")
opt[Unit]('u', "fullOpt")
.action { (_, c) => c.copy(noOpt = false, fullOpt = true) }
.text("Fully optimize code (uses Google Closure Compiler)")
opt[Unit]('p', "prettyPrint")
.action { (_, c) => c.copy(prettyPrint = true) }
.text("Pretty print full opted code (meaningful with -u)")
opt[Unit]('s', "sourceMap")
.action { (_, c) => c.copy(sourceMap = true) }
.text("Produce a source map for the produced code")
opt[Unit]("compliantAsInstanceOfs")
.action { (_, c) => c.copy(semantics =
c.semantics.withAsInstanceOfs(Compliant))
}
.text("Use compliant asInstanceOfs")
opt[OutputMode]('m', "outputMode")
.action { (mode, c) => c.copy(outputMode = mode) }
.text("Output mode " + OutputMode.All.mkString("(", ", ", ")"))
opt[Unit]('b', "bypassLinkingErrors")
.action { (_, c) => c.copy(bypassLinkingErrors = true) }
.text("Only warn if there are linking errors")
opt[Unit]('c', "checkIR")
.action { (_, c) => c.copy(checkIR = true) }
.text("Check IR before optimizing")
opt[File]('r', "relativizeSourceMap")
.valueName("<path>")
.action { (x, c) => c.copy(relativizeSourceMap = Some(x.toURI)) }
.text("Relativize source map with respect to given path (meaningful with -s)")
opt[Unit]("noStdlib")
.action { (_, c) => c.copy(stdLib = None) }
.text("Don't automatcially include Scala.js standard library")
opt[File]("stdlib")
.valueName("<scala.js stdlib jar>")
.hidden()
.action { (x, c) => c.copy(stdLib = Some(x)) }
.text("Location of Scala.js standard libarary. This is set by the " +
"runner script and automatically prepended to the classpath. " +
"Use -n to not include it.")
opt[Unit]('d', "debug")
.action { (_, c) => c.copy(logLevel = Level.Debug) }
.text("Debug mode: Show full log")
opt[Unit]('q', "quiet")
.action { (_, c) => c.copy(logLevel = Level.Warn) }
.text("Only show warnings & errors")
opt[Unit]("really-quiet")
.abbr("qq")
.action { (_, c) => c.copy(logLevel = Level.Error) }
.text("Only show errors")
version("version")
.abbr("v")
.text("Show scalajsld version")
help("help")
.abbr("h")
.text("prints this usage text")
override def showUsageOnError = true
}
for (options <- parser.parse(args, Options())) {
val cpFiles = options.stdLib.toList ++ options.cp
// Load and resolve classpath
val cp = PartialClasspathBuilder.build(cpFiles).resolve()
// Write JS dependencies if requested
for (jsout <- options.jsoutput)
IO.concatFiles(WritableFileVirtualJSFile(jsout), cp.jsLibs.map(_.lib))
// Link Scala.js code
val outFile = WritableFileVirtualJSFile(options.output)
if (options.fullOpt)
fullOpt(cp, outFile, options)
else
fastOpt(cp, outFile, options)
}
}
private def fullOpt(cp: IRClasspath,
output: WritableVirtualJSFile, options: Options) = {
import ScalaJSClosureOptimizer._
val semantics = options.semantics.optimized
new ScalaJSClosureOptimizer().optimizeCP(
newScalaJSOptimizer(semantics, options.outputMode), cp,
Config(
output = output,
wantSourceMap = options.sourceMap,
relativizeSourceMapBase = options.relativizeSourceMap,
bypassLinkingErrors = options.bypassLinkingErrors,
checkIR = options.checkIR,
prettyPrint = options.prettyPrint),
newLogger(options))
}
private def fastOpt(cp: IRClasspath,
output: WritableVirtualJSFile, options: Options) = {
import ScalaJSOptimizer._
newScalaJSOptimizer(options.semantics, options.outputMode).optimizeCP(cp,
Config(
output = output,
wantSourceMap = options.sourceMap,
bypassLinkingErrors = options.bypassLinkingErrors,
checkIR = options.checkIR,
disableOptimizer = options.noOpt,
relativizeSourceMapBase = options.relativizeSourceMap),
newLogger(options))
}
private def newLogger(options: Options) =
new ScalaConsoleLogger(options.logLevel)
private def newScalaJSOptimizer(semantics: Semantics, outputMode: OutputMode) =
new ScalaJSOptimizer(semantics, outputMode, ParIncOptimizer.factory)
}
| matthughes/scala-js | cli/src/main/scala/org/scalajs/cli/Scalajsld.scala | Scala | bsd-3-clause | 7,396 |
/*
* Copyright (C) 2009-2013 Typesafe Inc. <http://www.typesafe.com>
*/
package play.api
import org.joda.time.format.DateTimeFormat
import org.joda.time.DateTimeZone
/**
* Contains standard HTTP constants.
* For example:
* {{{
* val text = ContentTypes.TEXT
* val ok = Status.OK
* val accept = HeaderNames.ACCEPT
* }}}
*/
package object http {
/** HTTP date formatter, compliant to RFC 1123 */
val dateFormat = DateTimeFormat.forPattern("EEE, dd MMM yyyy HH:mm:ss 'GMT'").withLocale(java.util.Locale.ENGLISH).withZone(DateTimeZone.forID("GMT"))
} | jyotikamboj/container | pf-framework/src/play/src/main/scala/play/api/http/package.scala | Scala | mit | 562 |
/*
* Conversion.scala
* Represents DNA, which is an Element[Nucleotide].
*
* Created By: Brian Ruttenberg (bruttenberg@cra.com)
* Creation Date: Oct 1, 2012
*
* Copyright 2013 Avrom J. Pfeffer and Charles River Analytics, Inc.
* See http://www.cra.com or email figaro@cra.com for information.
*
* See http://www.github.com/p2t2/figaro for a copy of the software license.
*/
package com.cra.figaro.example.dosage
import com.cra.figaro.language._
import com.cra.figaro.library.atomic.discrete._
import com.cra.figaro.example.dosage.Conversion._
/**
* Represents DNA, which is an Element[Nucleotide]. Given an input AminoAcidSequence,
* this represents the distribution over all possible Nucleotide sequences that
* are possible (since an AA -> Nuclotide is a one to many mapping)
*/
class DNA(name: Name[NucleotideSequence], arg1: Element[AminoAcidSequence], collection: ElementCollection)
extends NonCachingChain[AminoAcidSequence, NucleotideSequence](name, arg1, DNA.genFcn, collection)
object DNA {
def genFcn = (n: AminoAcidSequence) => genDNA(n)
def nucToElement(n: String) = if (symbolToN(n).length > 1) Uniform(symbolToN(n): _*) else Constant(symbolToN(n)(0))
def codonToElement(c: List[String]): Element[String] = {
val clist = c.map { t =>
val listElem = t.map(s => nucToElement(s.toString()))
Apply(Inject(listElem: _*), (s: List[String]) => s reduce (_ + _))
}
if (clist.size > 1) Uniform(clist: _*) else clist(0)
}
def genDNA(seq: AminoAcidSequence): Element[NucleotideSequence] = {
val codons = seq.seq.map(c => codonToElement(aaToComp(c.toString).toList))
val concat = Inject(codons: _*)
Apply(concat, (s: List[String]) => (NucleotideSequence("") /: s)(_ + NucleotideSequence(_)))
}
def apply(arg: Element[AminoAcidSequence])(implicit name: Name[NucleotideSequence], collection: ElementCollection) =
new DNA(name, arg, collection)
}
| wkretschmer/figaro | FigaroExamples/src/main/scala/com/cra/figaro/example/dosage/DNA.scala | Scala | bsd-3-clause | 1,939 |
/**
* Copyright (c) 2016 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.trustedanalytics.sparktk.frame.internal.ops.join
import org.apache.spark.rdd.RDD
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.expressions.GenericRow
import org.trustedanalytics.sparktk.frame.{ SchemaHelper, FrameSchema }
import org.trustedanalytics.sparktk.frame.internal.rdd.FrameRdd
import scala.language.implicitConversions
/**
*
* This is a wrapper to encapsulate methods that may need to be serialized to executed on Spark worker nodes.
* If you don't know what this means please read about Closure Mishap
* [[http://ampcamp.berkeley.edu/wp-content/uploads/2012/06/matei-zaharia-part-1-amp-camp-2012-spark-intro.pdf]]
* and Task Serialization
* [[http://stackoverflow.com/questions/22592811/scala-spark-task-not-serializable-java-io-notserializableexceptionon-when]]
*/
object JoinRddFunctions extends Serializable {
implicit def joinRddToBroadcastJoinRddFunctions(joinParam: RddJoinParam): BroadcastJoinRddFunctions =
new BroadcastJoinRddFunctions(joinParam)
/**
* Perform inner join
*
* Inner joins return all rows with matching keys in the first and second data frame.
*
* @param left join parameter for first data frame
* @param right join parameter for second data frame
* @param useBroadcast If one of your tables is small enough to fit in the memory of a single machine, you can use a broadcast join.
* Specify which table to broadcast (left or right). Default is None.
* @return Joined RDD
*/
def innerJoin(left: RddJoinParam,
right: RddJoinParam,
useBroadcast: Option[String]): FrameRdd = {
val joinedRdd = if (useBroadcast == Some("left") || useBroadcast == Some("right")) {
left.innerBroadcastJoin(right, useBroadcast)
}
else {
val leftFrame = left.frame
val rightFrame = right.frame
// Alias columns before using spark data frame join
val (aliasedLeftFrame, aliasedRightFrame) = SchemaHelper.resolveColumnNamesConflictForJoin(leftFrame,
rightFrame,
leftFrame.schema.copySubset(left.joinColumns).columns.toList)
val leftDf = aliasedLeftFrame.toDataFrame
val rightDf = aliasedRightFrame.toDataFrame
// If the join columns have same names in both the frames, specify the columns explicitly while
// calling dataframe join to not have duplicate columns - cause once one has duplicate columns,
// one can't select one out of those 2 columns. Else use expression maker.
val joinedFrame = right.joinColumns.sorted.equals(left.joinColumns.sorted) match {
case true => leftDf.join(rightDf, left.joinColumns)
case false =>
val expression = expressionMaker(leftDf, rightDf, left.joinColumns, right.joinColumns)
leftDf.join(rightDf, expression)
}
// Get RDD out of the dataframe with frame columns in order
val columnsInJoinedFrame = aliasedLeftFrame.schema.columnNames ++
aliasedRightFrame.schema.columnNames.filterNot(right.joinColumns.contains(_))
joinedFrame.selectExpr(columnsInJoinedFrame: _*).rdd
}
createJoinedFrame(joinedRdd, left, right, "inner")
}
/**
* expression maker helps for generating conditions to check when join invoked with composite keys
*
* @param leftFrame left data frame
* @param rightFrame rigth data frame
* @param leftJoinCols list of left frame column names used in join
* @param rightJoinCols list of right frame column name used in join
* @return
*/
def expressionMaker(leftFrame: DataFrame, rightFrame: DataFrame, leftJoinCols: Seq[String], rightJoinCols: Seq[String]): Column = {
val columnsTuple = leftJoinCols.zip(rightJoinCols)
def makeExpression(leftCol: String, rightCol: String): Column = {
leftFrame(leftCol).equalTo(rightFrame(rightCol))
}
val expression = columnsTuple.map { case (lc, rc) => makeExpression(lc, rc) }.reduce(_ && _)
expression
}
/**
* Perform left-outer join
*
* Left-outer join or Left join return all the rows in the first data-frame, and matching rows in the second data frame.
*
* @param left join parameter for first data frame
* @param right join parameter for second data frame
* @param useBroadcastRight If right table is small enough to fit in the memory of a single machine, you can set useBroadcastRight to True to perform broadcast join.
* Default is False.
* @return Joined RDD
*/
def leftJoin(left: RddJoinParam,
right: RddJoinParam,
useBroadcastRight: Boolean): FrameRdd = {
val joinedRdd = if (useBroadcastRight) {
left.leftJoinBroadcastingRightTable(right)
}
else {
val leftFrame = left.frame.toDataFrame
val rightFrame = right.frame.toDataFrame
val expression = expressionMaker(leftFrame, rightFrame, left.joinColumns, right.joinColumns)
val joinedFrame = leftFrame.join(rightFrame,
expression,
joinType = "left"
)
joinedFrame.rdd
}
createJoinedFrame(joinedRdd, left, right, "left")
}
/**
* Perform right-outer join
*
* Right-outer join or Right Join return all the rows in the second data-frame, and matching rows in the first data frame.
*
* @param left join parameter for first data frame
* @param right join parameter for second data frame
* @param useBroadcastLeft If left table is small enough to fit in the memory of a single machine, you can set useBroadcastLeft to True to perform broadcast join.
* Default is False.
* @return Joined RDD
*/
def rightJoin(left: RddJoinParam,
right: RddJoinParam,
useBroadcastLeft: Boolean): FrameRdd = {
val joinedRdd = if (useBroadcastLeft) {
left.rightJoinBroadcastingLeftTable(right)
}
else {
val leftFrame = left.frame.toDataFrame
val rightFrame = right.frame.toDataFrame
val expression = expressionMaker(leftFrame, rightFrame, left.joinColumns, right.joinColumns)
val joinedFrame = leftFrame.join(rightFrame,
expression,
joinType = "right"
)
joinedFrame.rdd
}
createJoinedFrame(joinedRdd, left, right, "right")
}
/**
* Perform full-outer join
*
* Full-outer joins return both matching, and non-matching rows in the first and second data frame.
* Broadcast join is not supported.
*
* @param left join parameter for first data frame
* @param right join parameter for second data frame
* @return Joined RDD
*/
def outerJoin(left: RddJoinParam, right: RddJoinParam): FrameRdd = {
val leftFrame = left.frame.toDataFrame
val rightFrame = right.frame.toDataFrame
val expression = expressionMaker(leftFrame, rightFrame, left.joinColumns, right.joinColumns)
val joinedFrame = leftFrame.join(rightFrame,
expression,
joinType = "fullouter"
)
createJoinedFrame(joinedFrame.rdd, left, right, "outer")
}
/**
* Merge joined columns for full outer join
*
* Replaces null values in left join column with value in right join column
*
* @param joinedRdd Joined RDD
* @param left join parameter for first data frame
* @param right join parameter for second data frame
* @return Merged RDD
*/
def mergeJoinColumns(joinedRdd: RDD[Row],
left: RddJoinParam,
right: RddJoinParam): RDD[Row] = {
val leftSchema = left.frame.frameSchema
val rightSchema = right.frame.frameSchema
val leftJoinIndices = leftSchema.columnIndices(left.joinColumns)
val rightJoinIndices = rightSchema.columnIndices(right.joinColumns).map(rightindex => rightindex + leftSchema.columns.size)
joinedRdd.map(row => {
val rowArray = row.toSeq.toArray
leftJoinIndices.zip(rightJoinIndices).foreach {
case (leftIndex, rightIndex) => {
if (row.get(leftIndex) == null) {
rowArray(leftIndex) = row.get(rightIndex)
}
}
}
new GenericRow(rowArray)
})
}
/**
* Create joined frame
*
* The duplicate join column in the joined RDD is dropped in the joined frame.
*
* @param joinedRdd Joined RDD
* @param left join parameter for first data frame
* @param right join parameter for second data frame
* @param how join method
* @return Joined frame
*/
def createJoinedFrame(joinedRdd: RDD[Row],
left: RddJoinParam,
right: RddJoinParam,
how: String): FrameRdd = {
how match {
case "outer" => {
val mergedRdd = mergeJoinColumns(joinedRdd, left, right)
dropJoinColumn(mergedRdd, left, right, how)
}
case _ => {
dropJoinColumn(joinedRdd, left, right, how)
}
}
}
/**
* Drop duplicate data in right join column
*
* Used for inner, left-outer, and full-outer joins
*
* @param joinedRdd Joined RDD
* @param left join parameter for first data frame
* @param right join parameter for second data frame
* @param how Join method
* @return Joined frame
*/
def dropJoinColumn(joinedRdd: RDD[Row],
left: RddJoinParam,
right: RddJoinParam,
how: String): FrameRdd = {
val leftSchema = left.frame.frameSchema
val rightSchema = right.frame.frameSchema
// Create new schema
if (how == "inner") {
val newRightSchema = rightSchema.dropColumns(right.joinColumns.toList)
val newSchema = FrameSchema(SchemaHelper.join(leftSchema.columns, newRightSchema.columns).toVector)
new FrameRdd(newSchema, joinedRdd)
}
else {
val newSchema = FrameSchema(SchemaHelper.join(leftSchema.columns, rightSchema.columns).toVector)
val frameRdd = new FrameRdd(newSchema, joinedRdd)
val colIndices = if (how == "left" || how == "outer") {
rightSchema.columnIndices(right.joinColumns).map(rightIndex => leftSchema.columns.size + rightIndex)
}
else {
leftSchema.columnIndices(left.joinColumns)
}
val rightColNames = colIndices.map(colIndex => newSchema.column(colIndex).name)
frameRdd.dropColumns(rightColNames.toList)
}
}
}
| skavulya/spark-tk | sparktk-core/src/main/scala/org/trustedanalytics/sparktk/frame/internal/ops/join/JoinRddFunctions.scala | Scala | apache-2.0 | 11,012 |
package com.kakao.shaded.jackson.module.scala.deser
import com.kakao.shaded.jackson.module.scala.modifiers.SetTypeModifierModule
import com.kakao.shaded.jackson.databind.deser.{ValueInstantiator, ContextualDeserializer, Deserializers}
import com.kakao.shaded.jackson.databind.deser.std.{StdValueInstantiator, CollectionDeserializer, ContainerDeserializerBase}
import com.kakao.shaded.jackson.databind._
import com.kakao.shaded.jackson.databind.jsontype.TypeDeserializer
import com.kakao.shaded.jackson.core.JsonParser
import scala.collection._
import com.kakao.shaded.jackson.databind.`type`.CollectionLikeType
import java.util.AbstractCollection
import com.kakao.shaded.jackson.module.scala.introspect.OrderingLocator
import java.lang.Object
import scala.collection.generic.SortedSetFactory
import scala.Some
import scala.collection.immutable
import scala.language.postfixOps
private class SortedSetBuilderWrapper[E](val builder: mutable.Builder[E, _ <: collection.SortedSet[E]]) extends AbstractCollection[E] {
override def add(e: E) = { builder += e; true }
// Required by AbstractCollection, but the deserializer doesn't care about them.
def iterator() = null
def size() = 0
}
private object SortedSetDeserializer {
type BuilderFactory = (Ordering[AnyRef]) => mutable.Builder[AnyRef, SortedSet[AnyRef]]
def lookupClass(s: String): Option[Class[_]] = try {
Some(Predef.getClass.getClassLoader.loadClass(s))
} catch {
case e: ClassNotFoundException => None
}
def lookupBuilder(s: String): BuilderFactory = {
val moduleClass = lookupClass(s + "$").get
val module = moduleClass.getField("MODULE$").get(null).asInstanceOf[SortedSetFactory[SortedSet]]
(o) => module.newBuilder(o)
}
def classAndBuilder(s: String): Option[(Class[_], BuilderFactory)] = {
lookupClass(s).map(c => c -> lookupBuilder(s))
}
val BUILDERS = {
val builder = mutable.LinkedHashMap.newBuilder[Class[_], BuilderFactory]
// These were added in 2.10. We want to support them, but can't statically reference them, because
// the 2.9 library doesn't include them, and multi-target builds are awkward.
classAndBuilder("scala.collection.mutable.TreeSet").foreach(builder +=)
classAndBuilder("scala.collection.mutable.SortedSet").foreach(builder +=)
builder += (classOf[immutable.TreeSet[_]] -> (immutable.TreeSet.newBuilder[AnyRef](_)))
builder += (classOf[SortedSet[_]] -> (SortedSet.newBuilder[AnyRef](_)))
builder.result()
}
def builderFor(cls: Class[_], valueType: JavaType): mutable.Builder[AnyRef, SortedSet[AnyRef]] = {
val ordering = OrderingLocator.locate(valueType)
val found: Option[BuilderFactory] = BUILDERS.find(_._1.isAssignableFrom(cls)).map(_._2)
if (found.isDefined) found.get(ordering)
else throw new IllegalArgumentException(cls.getCanonicalName + " is not a supported SortedSet")
}
}
private class SortedSetInstantiator(config: DeserializationConfig, collectionType: Class[_], valueType: JavaType)
extends StdValueInstantiator(config, collectionType) {
override def canCreateUsingDefault = true
override def createUsingDefault(ctxt: DeserializationContext) =
new SortedSetBuilderWrapper[AnyRef](SortedSetDeserializer.builderFor(collectionType, valueType))
}
private class SortedSetDeserializer(collectionType: JavaType, containerDeserializer: CollectionDeserializer)
extends ContainerDeserializerBase[collection.SortedSet[_]](collectionType)
with ContextualDeserializer
{
def this(collectionType: JavaType, valueDeser: JsonDeserializer[Object], valueTypeDeser: TypeDeserializer, valueInstantiator: ValueInstantiator) =
this(collectionType, new CollectionDeserializer(collectionType, valueDeser, valueTypeDeser, valueInstantiator))
def createContextual(ctxt: DeserializationContext, property: BeanProperty) = {
val newDelegate = containerDeserializer.createContextual(ctxt, property)
new SortedSetDeserializer(collectionType, newDelegate)
}
override def getContentType = containerDeserializer.getContentType
override def getContentDeserializer = containerDeserializer.getContentDeserializer
override def deserialize(jp: JsonParser, ctxt: DeserializationContext): collection.SortedSet[_] =
containerDeserializer.deserialize(jp, ctxt) match {
case wrapper: SortedSetBuilderWrapper[_] => wrapper.builder.result()
}
}
private object SortedSetDeserializerResolver extends Deserializers.Base {
private final val SORTED_SET = classOf[collection.SortedSet[_]]
override def findCollectionLikeDeserializer(collectionType: CollectionLikeType,
config: DeserializationConfig,
beanDesc: BeanDescription,
elementTypeDeserializer: TypeDeserializer,
elementDeserializer: JsonDeserializer[_]): JsonDeserializer[_] = {
val rawClass = collectionType.getRawClass
if (!SORTED_SET.isAssignableFrom(rawClass)) null
else {
val deser = elementDeserializer.asInstanceOf[JsonDeserializer[AnyRef]]
val instantiator = new SortedSetInstantiator(config, rawClass, collectionType.getContentType)
new SortedSetDeserializer(collectionType, deser, elementTypeDeserializer, instantiator)
}
}
}
trait SortedSetDeserializerModule extends SetTypeModifierModule {
this += (_ addDeserializers SortedSetDeserializerResolver)
}
| kakao/mango | mango-shaded/src/main/scala/com/kakao/shaded/jackson/module/scala/deser/SortedSetDeserializerModule.scala | Scala | apache-2.0 | 5,476 |
package dotty.tools.dotc
import org.junit.Test
import org.junit.Ignore
import org.junit.Assert._
import dotty.tools.io._
import dotty.tools.dotc.util.ClasspathFromClassloader
import scala.quoted._
import scala.tasty.inspector._
import java.io.File.pathSeparator
import java.io.File.separator
class BootstrappedStdLibTASYyTest:
import BootstrappedStdLibTASYyTest._
/** Test that we can load trees from TASTy */
@Test def testTastyInspector: Unit =
loadWithTastyInspector(loadBlacklisted)
/** Test that we can load and compile trees from TASTy in a Jar */
@Test def testFromTastyInJar: Unit =
compileFromTastyInJar(loadBlacklisted.union(compileBlacklisted))
/** Test that we can load and compile trees from TASTy */
@Test def testFromTasty: Unit =
compileFromTasty(loadBlacklisted.union(compileBlacklisted))
@Test def blacklistNoDuplicates =
def testDup(name: String, list: List[String], set: Set[String]) =
assert(list.size == set.size,
list.diff(set.toSeq).mkString(s"`$name` has duplicate entries:\n ", "\n ", "\n\n"))
testDup("loadBlacklist", loadBlacklist, loadBlacklisted)
testDup("compileBlacklist", compileBlacklist, compileBlacklisted)
@Test def blacklistsNoIntersection =
val intersection = loadBlacklisted & compileBlacklisted
assert(intersection.isEmpty,
intersection.mkString(
"`compileBlacklist` contains names that are already in `loadBlacklist`: \n ", "\n ", "\n\n"))
@Test def blacklistsOnlyContainsClassesThatExist =
val scalaLibTastyPathsSet = scalaLibTastyPaths.toSet
val intersection = loadBlacklisted & compileBlacklisted
assert(loadBlacklisted.diff(scalaLibTastyPathsSet).isEmpty,
loadBlacklisted.diff(scalaLibTastyPathsSet).mkString(
"`loadBlacklisted` contains names that are not in `scalaLibTastyPaths`: \n ", "\n ", "\n\n"))
assert(compileBlacklisted.diff(scalaLibTastyPathsSet).isEmpty,
compileBlacklisted.diff(scalaLibTastyPathsSet).mkString(
"`loadBlacklisted` contains names that are not in `scalaLibTastyPaths`: \n ", "\n ", "\n\n"))
@Ignore
@Test def testLoadBacklistIsMinimal =
var shouldBeWhitelisted = List.empty[String]
val size = loadBlacklisted.size
for (notBlacklisted, i) <- loadBlacklist.zipWithIndex do
val blacklist = loadBlacklisted - notBlacklisted
println(s"Trying withouth $notBlacklisted in the blacklist (${i+1}/$size)")
try {
loadWithTastyInspector(blacklist)
shouldBeWhitelisted = notBlacklisted :: shouldBeWhitelisted
}
catch {
case ex: Throwable => // ok
}
assert(shouldBeWhitelisted.isEmpty,
shouldBeWhitelisted.mkString("Some classes do not need to be blacklisted in `loadBlacklisted`\n ", "\n ", "\n\n"))
@Ignore
@Test def testCompileBlacklistIsMinimal =
var shouldBeWhitelisted = List.empty[String]
val size = compileBlacklisted.size
val blacklist0 = loadBlacklisted.union(compileBlacklisted)
for (notBlacklisted, i) <- compileBlacklist.zipWithIndex do
val blacklist = blacklist0 - notBlacklisted
println(s"Trying withouth $notBlacklisted in the blacklist (${i+1}/$size)")
try {
compileFromTastyInJar(blacklist)
shouldBeWhitelisted = notBlacklisted :: shouldBeWhitelisted
}
catch {
case ex: Throwable => // ok
}
assert(shouldBeWhitelisted.isEmpty,
shouldBeWhitelisted.mkString("Some classes do not need to be blacklisted in `compileBlacklisted`\n ", "\n ", "\n\n"))
end BootstrappedStdLibTASYyTest
object BootstrappedStdLibTASYyTest:
def scalaLibJarPath = System.getProperty("dotty.scala.library")
def scalaLibClassesPath =
java.nio.file.Paths.get(scalaLibJarPath).getParent.resolve("classes").normalize
val scalaLibTastyPaths =
new Directory(scalaLibClassesPath).deepFiles
.filter(_.`extension` == "tasty")
.map(_.normalize.path.stripPrefix(scalaLibClassesPath.toString + "/"))
.toList
def loadWithTastyInspector(blacklisted: Set[String]): Unit =
val inspector = new scala.tasty.inspector.Inspector {
def inspect(using Quotes)(tastys: List[Tasty[quotes.type]]): Unit =
for tasty <- tastys do
tasty.ast.show(using quotes.reflect.Printer.TreeStructure) // Check that we can traverse the full tree
()
}
val tastyFiles = scalaLibTastyPaths.filterNot(blacklisted)
val isSuccess = TastyInspector.inspectTastyFiles(tastyFiles.map(x => scalaLibClassesPath.resolve(x).toString))(inspector)
assert(isSuccess, "Errors reported while loading from TASTy")
def compileFromTastyInJar(blacklisted: Set[String]): Unit = {
val driver = new dotty.tools.dotc.Driver
val yFromTastyBlacklist =
blacklisted.mkString("-Yfrom-tasty-ignore-list:", ",", "")
val args = Array(
"-classpath", ClasspathFromClassloader(getClass.getClassLoader),
"-from-tasty",
"-nowarn",
yFromTastyBlacklist,
scalaLibJarPath,
)
val reporter = driver.process(args)
assert(reporter.errorCount == 0, "Errors while re-compiling")
}
def compileFromTasty(blacklisted: Set[String]): Unit = {
val driver = new dotty.tools.dotc.Driver
val tastyFiles = scalaLibTastyPaths.filterNot(blacklisted)
val args = Array(
"-classpath", ClasspathFromClassloader(getClass.getClassLoader),
"-from-tasty",
"-nowarn",
) ++ tastyFiles.map(x => scalaLibClassesPath.resolve(x).toString)
val reporter = driver.process(args)
assert(reporter.errorCount == 0, "Errors while re-compiling")
}
/** List of tasty files that cannot be loaded from TASTy */
def loadBlacklist = List[String](
// No issues :)
)
/** List of tasty files that cannot be recompilied from TASTy */
def compileBlacklist = List[String](
// See #10048
// failed: java.lang.AssertionError: assertion failed: class Boolean
// at dotty.tools.backend.jvm.BCodeHelpers$BCInnerClassGen.assertClassNotArrayNotPrimitive(BCodeHelpers.scala:247)
// at dotty.tools.backend.jvm.BCodeHelpers$BCInnerClassGen.getClassBTypeAndRegisterInnerClass(BCodeHelpers.scala:265)
// at dotty.tools.backend.jvm.BCodeHelpers$BCInnerClassGen.getClassBTypeAndRegisterInnerClass$(BCodeHelpers.scala:210)
// at dotty.tools.backend.jvm.BCodeSkelBuilder$PlainSkelBuilder.getClassBTypeAndRegisterInnerClass(BCodeSkelBuilder.scala:62)
// at dotty.tools.backend.jvm.BCodeHelpers$BCInnerClassGen.internalName(BCodeHelpers.scala:237)
"scala/Array.tasty",
"scala/Boolean.tasty",
"scala/Byte.tasty",
"scala/Char.tasty",
"scala/Double.tasty",
"scala/Float.tasty",
"scala/Int.tasty",
"scala/Long.tasty",
"scala/Short.tasty",
"scala/Unit.tasty",
).map(_.replace("/", separator))
/** Set of tasty files that cannot be loaded from TASTy */
def loadBlacklisted = loadBlacklist.toSet
/** Set of tasty files that cannot be recompilied from TASTy */
def compileBlacklisted = compileBlacklist.toSet
end BootstrappedStdLibTASYyTest
| dotty-staging/dotty | stdlib-bootstrapped-tasty-tests/test/BootstrappedStdLibTASYyTest.scala | Scala | apache-2.0 | 7,062 |
package dataframe
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._
object Select {
case class Cust(id: Integer, name: String, sales: Double, discount: Double, state: String)
def main(args: Array[String]) {
val spark =
SparkSession.builder()
.appName("DataFrame-Select")
.master("local[4]")
.getOrCreate()
import spark.implicits._
// create an RDD with some data
val custs = Seq(
Cust(1, "Widget Co", 120000.00, 0.00, "AZ"),
Cust(2, "Acme Widgets", 410500.00, 500.00, "CA"),
Cust(3, "Widgetry", 410500.00, 200.00, "CA"),
Cust(4, "Widgets R Us", 410500.00, 0.0, "CA"),
Cust(5, "Ye Olde Widgete", 500.00, 0.0, "MA")
)
val customerDF = spark.sparkContext.parallelize(custs, 4).toDF()
println("*** use * to select() all columns")
customerDF.select("*").show()
println("*** select multiple columns")
customerDF.select("id", "discount").show()
println("*** use apply() on DataFrame to create column objects, and select though them")
customerDF.select(customerDF("id"), customerDF("discount")).show()
println("*** use as() on Column to rename")
customerDF.select(customerDF("id").as("Customer ID"),
customerDF("discount").as("Total Discount")).show()
println("*** $ as shorthand to obtain Column")
customerDF.select($"id".as("Customer ID"), $"discount".as("Total Discount")).show()
println("*** use DSL to manipulate values")
customerDF.select(($"discount" * 2).as("Double Discount")).show()
customerDF.select(
($"sales" - $"discount").as("After Discount")).show()
println("*** use * to select() all columns and add more")
customerDF.select(customerDF("*"), $"id".as("newID")).show()
println("*** use lit() to add a literal column")
customerDF.select($"id", $"name", lit(42).as("FortyTwo")).show()
println("*** use array() to combine multiple results into a single array column")
customerDF.select($"id", array($"name", $"state", lit("hello")).as("Stuff")).show()
println("*** use rand() to add random numbers between 0.0 and 1.0 inclusive ")
customerDF.select($"id", rand().as("r")).show()
}
}
| chocolateBlack/LearningSpark | src/main/scala/dataframe/Select.scala | Scala | mit | 2,248 |
package jigg.pipeline
/*
Copyright 2013-2015 Takafumi Sakakibara and Hiroshi Noji
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import java.util.Properties
import scala.xml.Node
import org.scalatest._
import com.atilika.kuromoji.{TokenBase, TokenizerBase}
import com.atilika.kuromoji.ipadic.{Token=>IToken, Tokenizer=>ITokenizer}
class KuromojiAnnotatorSpec extends FlatSpec with Matchers {
"Annotator" should "assign token id using sentence id" in {
val annotator = KuromojiAnnotator.fromProps("kuromoji", new Properties)
val sentence = <sentence id="a">あ</sentence>
val annotated = annotator newSentenceAnnotation sentence
val tokenId = annotated \\\\ "token" \\@ "id"
tokenId should be ("a_0")
}
"TokenAnnotator" should "segment into tokens" in {
val annotator = KuromojiAnnotator.fromProps("kuromoji[tokenize]", new Properties)
val sentence = <sentence id = "a">あ</sentence>
val annotated = annotator newSentenceAnnotation sentence
val token = annotated \\\\ "token"
token \\@ "form" should be ("あ")
token \\@ "pos" should be ("")
}
"POSAnnotator" should "assign POS tags" in {
val annotator = KuromojiAnnotator.fromProps("kuromoji[pos]", new Properties)
val sentence = <sentence id = "a">
<tokens>
<token id="a_0" form="あ" characterOffsetBegin="0" characterOffsetEnd="1" dummy="a"/>
</tokens>
</sentence>
val annotated = annotator newSentenceAnnotation sentence
val token = annotated \\\\ "token"
token \\@ "pos" should not be ("")
token \\@ "dummy" should be ("a") // not removed (overriden)
}
}
| mynlp/jigg | src/test/scala/jigg/pipeline/KuromojiAnnotatorSpec.scala | Scala | apache-2.0 | 2,102 |
/*
* Copyright 2016 Renaud Bruneliere
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.bruneli.scalaopt.core.variable
import com.github.bruneli.scalaopt.core.RealVectorType
import com.github.bruneli.scalaopt.core.linalg.FromToDoubleConversions.ToDouble
import com.github.bruneli.scalaopt.core.linalg.SimpleDenseVector
/**
* Feature/Predicate/Input variable for a regression model
*
* @author bruneli
*/
case class Input(x: Double) extends AnyVal with ToDouble
/**
* Vector of input features
*/
class Inputs(raw: Array[Double])
extends SimpleDenseVector[Input](raw)(ConversionsFromDouble.InputFromDouble)
object Inputs {
def apply(inputs: Double*): Inputs = {
new Inputs(inputs.toArray)
}
def apply(vector: RealVectorType): Inputs = {
val coordinates = new Array[Double](vector.length)
for (i <- coordinates.indices) {
coordinates(i) = vector.coordinate(i)
}
new Inputs(coordinates)
}
} | bruneli/scalaopt | core/src/main/scala/com/github/bruneli/scalaopt/core/variable/Input.scala | Scala | apache-2.0 | 1,467 |
package com.cevaris.dedup
import java.io.{File, FileNotFoundException}
import com.cevaris.dedup.exceptions.MissingSourceFile
import com.cevaris.dedup.io.{FilesMap, MD5Mapper, MOV, MP4}
import com.twitter.app.App
import com.twitter.logging._
import scala.util.{Failure, Success, Try}
object DedupApp extends App with Logging {
def file(s: String) = Try({
val f = new File(s)
if(f.exists()) f else throw new FileNotFoundException(s)
})
def md5Mapper(f: File) = FilesMap(f, MD5Mapper, List(MOV, MP4))
val sourceFlag = flag("file", "", "Source file/directory")
def main() {
val sourceFileStr = sourceFlag.getWithDefault.map(_.trim) match {
case Some(x) => x
case None => throw MissingSourceFile
}
val fileMap = file(sourceFileStr) match {
case Success(x) => md5Mapper(x)
case Failure(e) => {
log.error(e, s"Failed reading file: ${sourceFileStr}")
return
}
}
log.info(fileMap.toString)
}
}
| cevaris/dedup | src/main/scala/com/cevaris/dedup/DedupApp.scala | Scala | apache-2.0 | 984 |
/*
* Copyright 2015 Textocat
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.textocat.textokit.shaltef.mappings.pattern
import com.textocat.textokit.commons.cas.FSUtils
import com.textocat.textokit.morph.fs.Wordform
import scala.collection.JavaConversions.asScalaSet
/**
* @author Rinat Gareev
*/
private[pattern] trait GrammemeExtractor {
// means to be ids of target grammatical category
protected val gramIds: Set[String]
protected def extractGrammeme(wf: Wordform): String = {
wf.getGrammems match {
case null => null
case triggerGramsFsArr =>
val allTriggerGrams = FSUtils.toSet(triggerGramsFsArr)
val triggerGrams = gramIds.intersect(allTriggerGrams)
if (triggerGrams.isEmpty) null
else {
val result = triggerGrams.head
if (triggerGrams.size > 1)
packageLogger.warn("Too much grammems sharing the same category: %s".format(triggerGrams))
result
}
}
}
} | textocat/textokit-core | Textokit.ShalTeF/src/main/scala/com/textocat/textokit/shaltef/mappings/pattern/GrammemeExtractor.scala | Scala | apache-2.0 | 1,534 |
/**
* Copyright (C) 2013 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.xforms.submission
import org.orbeon.oxf.util.XPath
import org.orbeon.oxf.xml.TransformerUtils
import org.orbeon.oxf.xml.dom4j.Dom4jUtils
import org.orbeon.saxon.dom4j.DocumentWrapper
import org.orbeon.saxon.om.{VirtualNode, NodeInfo}
import collection.JavaConverters._
import collection.mutable
import org.dom4j._
import org.orbeon.oxf.util.ScalaUtils._
import org.orbeon.oxf.xforms.XFormsConstants._
import org.orbeon.oxf.xforms.analysis.model.ValidationLevels._
import org.orbeon.oxf.xforms.control.XFormsSingleNodeControl
import org.orbeon.oxf.xforms.event.ListenersTrait
import org.orbeon.oxf.xforms.{InstanceData, XFormsContainingDocument}
import org.orbeon.oxf.xforms.model.BindNode
abstract class XFormsModelSubmissionBase extends ListenersTrait
object XFormsModelSubmissionBase {
import Private._
// Prepare XML for submission
//
// - re-root if `ref` points to an element other than the root element
// - annotate with `xxf:id` if requested
// - prune non-relevant nodes if requested
// - annotate with alerts if requested
def prepareXML(xfcd: XFormsContainingDocument, ref: NodeInfo, prune: Boolean, annotateWith: String): Document =
ref match {
case virtualNode: VirtualNode ⇒
// "A node from the instance data is selected, based on attributes on the submission
// element. The indicated node and all nodes for which it is an ancestor are considered for
// the remainder of the submit process. "
val copy =
virtualNode.getUnderlyingNode match {
case e: Element ⇒ Dom4jUtils.createDocumentCopyParentNamespaces(e)
case n: Node ⇒ Dom4jUtils.createDocumentCopyElement(n.getDocument.getRootElement)
case _ ⇒ throw new IllegalStateException
}
val annotationTokens = stringToSet(annotateWith)
// Annotate ids before pruning so that it is easier for other code (Form Runner) to infer the same ids
if (annotationTokens("id"))
annotateWithHashes(copy)
// "Any node which is considered not relevant as defined in 6.1.4 is removed."
if (prune)
pruneNonRelevantNodes(copy)
annotateWithAlerts(xfcd, copy, annotationTokens collect LevelByName)
copy
// Submitting read-only instance backed by TinyTree (no MIPs to check)
// TODO: What about re-rooting and annotations?
case nodeInfo if ref.getNodeKind == org.w3c.dom.Node.ELEMENT_NODE ⇒
TransformerUtils.tinyTreeToDom4j(ref)
case nodeInfo ⇒
TransformerUtils.tinyTreeToDom4j(ref.getRoot)
}
def pruneNonRelevantNodes(doc: Document): Unit =
Iterator.iterateWhileDefined(findNextNodeToDetach(doc)) foreach (_.detach())
def annotateWithHashes(doc: Document): Unit = {
val wrapper = new DocumentWrapper(doc, null, XPath.GlobalConfiguration)
var annotated = false
doc.accept(new VisitorSupport() {
override def visit(element: Element): Unit = {
val hash = SubmissionUtils.dataNodeHash(wrapper.wrap(element))
element.addAttribute(QName.get("id", XXFORMS_NAMESPACE_SHORT), hash)
annotated = true
}
})
if (annotated)
addRootElementNamespace(doc)
}
// Annotate elements which have failed constraints with an xxf:error, xxf:warning or xxf:info attribute containing
// the alert message. Only the levels passed in `annotate` are handled.
def annotateWithAlerts(
xfcd : XFormsContainingDocument,
doc : Document,
levelsToAnnotate : Set[ValidationLevel]
): Unit =
if (levelsToAnnotate.nonEmpty) {
val elementsToAnnotate = mutable.Map[ValidationLevel, mutable.Map[Set[String], Element]]()
// Iterate data to gather elements with failed constraints
doc.accept(new VisitorSupport() {
override def visit(element: Element): Unit = {
val failedValidations = BindNode.failedValidationsForAllLevelsPrioritizeRequired(element)
for (level ← levelsToAnnotate) {
// NOTE: Annotate all levels specified. If we decide to store only one level of validation
// in bind nodes, then we would have to change this to take the highest level only and ignore
// the other levels.
val failedValidationsForLevel = failedValidations.getOrElse(level, Nil)
if (failedValidationsForLevel.nonEmpty) {
val map = elementsToAnnotate.getOrElseUpdate(level, mutable.Map[Set[String], Element]())
map += (failedValidationsForLevel map (_.id) toSet) → element
}
}
}
})
if (elementsToAnnotate.nonEmpty) {
val controls = xfcd.getControls.getCurrentControlTree.getEffectiveIdsToControls.asScala
val relevantLevels = elementsToAnnotate.keySet
def controlsIterator =
controls.iterator collect {
case (_, control: XFormsSingleNodeControl)
if control.isRelevant && control.alertLevel.toList.toSet.subsetOf(relevantLevels) ⇒ control
}
var annotated = false
def annotateElementIfPossible(control: XFormsSingleNodeControl) = {
// NOTE: We check on the whole set of constraint ids. Since the control reads in all the failed
// constraints for the level, the sets of ids must match.
for {
level ← control.alertLevel
controlAlert ← Option(control.getAlert)
failedValidationsIds = (control.failedValidations map (_.id) toSet)
elementsMap ← elementsToAnnotate.get(level)
element ← elementsMap.get(failedValidationsIds)
qName = QName.get(level.name, XXFORMS_NAMESPACE_SHORT)
} locally {
// There can be an existing attribute if more than one control bind to the same element
Option(element.attribute(qName)) match {
case Some(existing) ⇒ existing.setValue(existing.getValue + controlAlert)
case None ⇒ element.addAttribute(qName, controlAlert)
}
annotated = true
}
}
// Iterate all controls with warnings and try to annotate the associated element nodes
controlsIterator foreach annotateElementIfPossible
// If there is any annotation, make sure the attribute's namespace prefix is in scope on the root
// element
if (annotated)
addRootElementNamespace(doc)
}
}
import XFormsSubmissionUtils._
def defaultSerialization(xformsMethod: String): Option[String] =
nonEmptyOrNone(xformsMethod) collect {
case "multipart-post" ⇒ "multipart/related"
case "form-data-post" ⇒ "multipart/form-data"
case "urlencoded-post" ⇒ "application/x-www-form-urlencoded"
case method if isPost(method) || isPut(method) ⇒ "application/xml"
case method if isGet(method) || isDelete(method) ⇒ "application/x-www-form-urlencoded"
}
def requestedSerialization(xformsSerialization: String, xformsMethod: String) =
nonEmptyOrNone(xformsSerialization) orElse defaultSerialization(xformsMethod)
def getRequestedSerializationOrNull(xformsSerialization: String, xformsMethod: String) =
requestedSerialization(xformsSerialization, xformsMethod).orNull
private object Private {
val processBreaks = new scala.util.control.Breaks
import processBreaks._
def findNextNodeToDetach(doc: Document) = {
var nodeToDetach: Node = null
tryBreakable[Option[Node]] {
doc.accept(
new VisitorSupport {
override def visit(element: Element) =
checkInstanceData(element)
override def visit(attribute: Attribute) =
checkInstanceData(attribute)
private def checkInstanceData(node: Node) =
if (! InstanceData.getInheritedRelevant(node)) {
nodeToDetach = node
break()
}
}
)
None
} catchBreak {
Some(nodeToDetach)
}
}
def addRootElementNamespace(doc: Document) =
doc.getRootElement.addNamespace(XXFORMS_NAMESPACE_SHORT.getPrefix, XXFORMS_NAMESPACE_SHORT.getURI)
}
} | wesley1001/orbeon-forms | src/main/scala/org/orbeon/oxf/xforms/submission/XFormsModelSubmissionBase.scala | Scala | lgpl-2.1 | 9,064 |
// Copyright (C) 2015 IBM Corp. All Rights Reserved.
// See the LICENCE.txt file distributed with this work for additional
// information regarding copyright ownership.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.ibm.watson.developer_cloud.utils
import spray.json._
import spray.json.DefaultJsonProtocol._
object VCAPServicesProtocol extends DefaultJsonProtocol {
implicit val credentialsFormat = jsonFormat(VCAPCredentials, "url", "username", "password")
implicit val serviceFormat = jsonFormat(VCAPService, "name", "label", "plan", "tags", "credentials")
implicit val vcapPropertiesFormat = new JsonFormat[VCAPProperties] {
def write(m: VCAPProperties) = JsObject {
m.properties.map { case (k: String, v: List[VCAPService]) =>
k.toJson match {
case JsString(x) => x -> v.toJson
case x => throw new SerializationException("Map key must be formatted as JsString, not '" + x + "'")
}
}
}
def read(value: JsValue) : VCAPProperties = {
val map = value match {
case x: JsObject => x.fields.map { case (k,v) =>
(JsString(k).convertTo[String], v.convertTo[List[VCAPService]])
}
case x => deserializationError("Expected Map as JsObject, but got " + x)
}
VCAPProperties(map)
}
}
}
case class VCAPCredentials(url: String, username: String, password: String)
case class VCAPService(name : String, label: String, plan: String, tags: Option[List[String]], credentials: VCAPCredentials)
case class VCAPProperties(properties: Map[String, List[VCAPService]])
| kane77/watson-scala-wrapper | src/main/scala/com/ibm/watson/developer_cloud/utils/VCAPUtils.scala | Scala | apache-2.0 | 2,098 |
package es.juanc.katas.cardgame
import es.juanc.katas.UnitSpec
/**
* TODO list:
* - entorno configurado para ejecutar las especificaciones
* - dado 1 carta to cada jugador gana el jugador uno: "Win player one: 1 to 0"
* - dada 1 carta to cada jugador empatan: "Tie"
* - dada 1 carta to cada jugador gana el jugador dos: "Win player two: 1 to 0"
* - dada 2 cartas to cada jugador gana el jugador dos: "Win player two: 2 to 0"
* - dada 3 cartas to cada jugador gana el jugador dos: "Win player two: 2 to 1"
* - ...
*/
class CardGameSpec extends UnitSpec {
"Spec" must "pass test" in {
"" shouldBe empty
}
"dado N carta to cada jugador" must "se obtiene el resultado parametrizado, con resultado en ingles" in {
val values =
Table(
("deck1", "deck2", "expected"),
(List("12"), List("1"), "Win player one: 1 to 0"),
(List("1"), List("12"), "Win player two: 1 to 0"),
(List("12", "11"), List("2", "1"), "Win player one: 2 to 0"),
(List("1", "2"), List("12", "11"), "Win player two: 2 to 0"),
(List("1", "12", "2"), List("12", "11", "10"), "Win player two: 2 to 1"),
(List("12", "11", "10"), List("1", "12", "2"), "Win player one: 2 to 1"),
(List("12", "11", "10", "2"), List("1", "12", "2", "10"), "Tie"),
(List("11", "1", "10", "2", "1"), List("11", "1", "10", "10", "5"), "Win player two: 5 to 3")
)
forAll(values) { (deck1: List[String], deck2: List[String], expected: String) =>
CardGameInEn.whoWins(deck1, deck2) shouldBe expected
}
}
"dado 1 carta to cada jugador" must "gana el jugador uno, con resultado en español" in {
CardGameInEs.whoWins(List("12"), List("1")) should equal("Gana el jugador uno: 1 a 0")
}
}
| juancsch/katas | scala/CardGame/src/test/scala/es/juanc/katas/cardgame/CardGameSpec.scala | Scala | unlicense | 2,022 |
package name.abhijitsarkar.scala.scalaimpatient.inheritance
/**
* Q8: Compile the `Person` and `SecretAgent` classes in Section 8.10, "Overriding Fields" and analyze the class files
* with `javap`. How many `name` fields are there? How many `name` getter methods are there? What do they get?
*/
/* Has a private final 'name' and public 'name' and 'toString' methods. */
class Person(val name: String) {
override def toString = getClass.getName + "[name=" + name + "]"
} | abhijitsarkar/scala-impatient | src/main/scala/name/abhijitsarkar/scala/scalaimpatient/inheritance/Person.scala | Scala | gpl-3.0 | 475 |
package com.josephpconley.swagger2postman.utils
object ConversionUtils {
def genUUID = java.util.UUID.randomUUID.toString
}
| hasithalakmal/RIP | RIP_Test/swagger2postman-master/src/main/scala/com/josephpconley/swagger2postman/utils/ConversionUtils.scala | Scala | apache-2.0 | 128 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package connectors.launchpadgateway.exchangeobjects.in
import org.joda.time.{ DateTime, LocalDate }
import play.api.libs.json.JodaWrites._ // This is needed for DateTime serialization
import play.api.libs.json.JodaReads._ // This is needed for DateTime serialization
import play.api.libs.json.Json
import reactivemongo.bson.{ BSONDocument, BSONHandler, Macros }
case class QuestionCallbackRequest(received: DateTime, candidateId: String, customCandidateId: String, interviewId: Int,
customInterviewId: Option[String], customInviteId: String, deadline: LocalDate,
questionNumber: String)
object QuestionCallbackRequest {
// Should match LaunchpadTestsCallback case class
val key = "question"
implicit val questionCallbackFormat = Json.format[QuestionCallbackRequest]
import repositories.BSONDateTimeHandler
import repositories.BSONLocalDateHandler
implicit val bsonHandler: BSONHandler[BSONDocument, QuestionCallbackRequest] = Macros.handler[QuestionCallbackRequest]
}
| hmrc/fset-faststream | app/connectors/launchpadgateway/exchangeobjects/in/QuestionCallbackRequest.scala | Scala | apache-2.0 | 1,592 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.carbondata.vectorreader
import org.apache.spark.sql.{CarbonToSparkAdapter, Row}
import org.apache.spark.sql.execution.strategy.CarbonDataSourceScan
import org.apache.spark.sql.test.util.QueryTest
import org.scalatest.BeforeAndAfterAll
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.util.CarbonProperties
class VectorReaderTestCase extends QueryTest with BeforeAndAfterAll {
override def beforeAll {
sql("DROP TABLE IF EXISTS vectorreader")
// clean data folder
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd")
sql("CREATE TABLE vectorreader (ID Int, date Timestamp, country String, name String, " +
"phonetype String,serialname String, salary Int) STORED AS carbondata")
sql(s"LOAD DATA INPATH '$resourcesPath/source.csv' INTO TABLE vectorreader")
}
test("test vector reader") {
sqlContext.setConf("carbon.enable.vector.reader", "true")
val plan = sql(
"""select * from vectorreader""".stripMargin).queryExecution.executedPlan
var batchReader = false
plan.collect {
case _: CarbonDataSourceScan => batchReader = true
}
assert(batchReader, "batch reader should exist when carbon.enable.vector.reader is true")
}
test("test without vector reader") {
sqlContext.setConf("carbon.enable.vector.reader", "false")
val plan = sql(
"""select * from vectorreader""".stripMargin).queryExecution.executedPlan
var rowReader = false
plan.collect {
case scan: CarbonDataSourceScan => rowReader = !CarbonToSparkAdapter
.supportsBatchOrColumnar(scan)
}
assert(rowReader, "row reader should exist by default")
}
test("test vector reader for random measure selection") {
sqlContext.setConf("carbon.enable.vector.reader", "true")
checkAnswer(sql("""select salary, ID from vectorreader where ID = 94""".stripMargin),
Seq(Row(15093, 94)))
}
override def afterAll {
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT)
sqlContext.setConf(
"carbon.enable.vector.reader", CarbonCommonConstants.ENABLE_VECTOR_READER_DEFAULT)
sql("DROP TABLE IF EXISTS vectorreader")
}
}
| zzcclp/carbondata | integration/spark/src/test/scala/org/apache/spark/carbondata/vectorreader/VectorReaderTestCase.scala | Scala | apache-2.0 | 3,163 |
package hex
//Not sure to get the exact purpose of this thing
//Ok reserve design, Valid?
//Two last def a really hard to read
//import format.pgn.{ Parser, Reader, Tag }
case class Replay(setup: Game, moves: List[Move], state: Game) {
lazy val chronoMoves = moves.reverse
def addMove(move: Move) = copy(
moves = move :: moves,
state = state(move))
def moveAtPly(ply: Int): Option[Move] = chronoMoves lift (ply - 1)
}
object Replay {
def apply(game: Game) = new Replay(game, Nil, game)
/*def apply(
moveStrs: List[String],
initialFen: Option[String],
variant: hex.variant.Variant): Valid[Replay] =
moveStrs.some.filter(_.nonEmpty) toValid "[replay] pgn is empty" flatMap { nonEmptyMoves =>
Reader.moves(
nonEmptyMoves,
List(
initialFen map { fen => Tag(_.FEN, fen) },
variant.some.filterNot(_.standard) map { v => Tag(_.Variant, v.name) }
).flatten)
}
def boards(moveStrs: List[String], initialFen: Option[String]): Valid[List[Board]] = {
val sit = initialFen.flatMap(format.Forsyth.<<) | Situation(hex.variant.Standard)
val init = sit -> List(sit.board)
Parser moves(moveStrs, sit.board.variant) flatMap { sans =>
sans.foldLeft[Valid[(Situation, List[Board])]](init.success) {
case (scalaz.Success((sit, boards)), san) =>
san(sit) map { move =>
val after = move.afterWithLastMove
Situation(after, !sit.color) -> (after :: boards)
}
case (x, _) => x
}
}
}.map(_._2.reverse)*/
}
| ThomasCabaret/scalahex | src/main/scala/Replay.scala | Scala | mit | 1,568 |
def flatMap[A,B](ma: F[A])(f: A => F[B]) =
join(map(ma)(f))
def compose[A,B,C](f: A => F[B], g: B => F[C]): A => F[C] =
a => join(map(f(a))(g))
| ud3sh/coursework | functional-programming-in-scala-textbook/answerkey/monads/13.answer.scala | Scala | unlicense | 149 |
package org.jetbrains.plugins.scala.lang.psi.api.statements
import javax.swing.Icon
import com.intellij.psi.tree.IElementType
import org.jetbrains.plugins.scala.icons.Icons
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes.kVAL
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScBlock
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.templates.ScExtendsBlock
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef._
/**
* @author Alexander Podkhalyuzin
*/
trait ScValue extends ScValueOrVariable {
override protected def keywordElementType: IElementType = kVAL
def hasExplicitType: Boolean = typeElement.isDefined
override protected def isSimilarMemberForNavigation(member: ScMember, isStrict: Boolean): Boolean = member match {
case other: ScValue => super.isSimilarMemberForNavigation(other, isStrict)
case _ => false
}
override def getIcon(flags: Int): Icon = {
var parent = getParent
while (parent != null) {
parent match {
case _: ScExtendsBlock => return Icons.FIELD_VAL
case _: ScBlock => return Icons.VAL
case _ => parent = parent.getParent
}
}
null
}
} | gtache/intellij-lsp | intellij-lsp-dotty/src/org/jetbrains/plugins/scala/lang/psi/api/statements/ScValue.scala | Scala | apache-2.0 | 1,175 |
package com.vwo.oldmonk
package object deduplication extends ProvidesDeduplication with ProvidesAvoidRepeats
| wingify/Oldmonk | src/main/scala/com/vwo/oldmonk/deduplication/package.scala | Scala | gpl-3.0 | 110 |
package jigg.nlp.ccg.lexicon
/*
Copyright 2013-2015 Hiroshi Noji
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
trait BunsetsuBase {
def offset: Int
def wordSeq: Seq[Word]
def posSeq: Seq[PoS]
def shuji: Int
def gokei: Int
def includesParens: Boolean
def includesPuncs: Boolean
def word(i: Int): Word = if (i < wordSeq.size) wordSeq(i) else wordSeq.last
def pos(i: Int): PoS = if (i < posSeq.size) posSeq(i) else posSeq.last
def size = wordSeq.size
def cabochaStr = (0 until size).map { i => word(i).v + "\\t" + pos(i).v }.mkString("\\n")
def str = wordSeq.map(_.v).mkString
def posStr = posSeq.map(_.v).mkString("|")
}
// TODO: Currently the settings below are hard-coded.
// Do we have to support other PoS systems as well by abstracting these strings?
case class Bunsetsu(
override val offset: Int,
override val wordSeq: Seq[Word],
override val posSeq: Seq[PoS]
) extends BunsetsuBase {
val shuji = posSeq.lastIndexWhere { p => p.first.v != "記号" && p.first.v != "助詞" && p.first.v != "接尾辞" }
val gokei = posSeq.lastIndexWhere { _.first.v != "記号" }
val includesParens = posSeq.indexWhere { _.second.v.startsWith("記号-括弧") } != -1
val includesPuncs = posSeq.lastIndexWhere { p => p.second.v == "記号-読点" || p.second.v == "記号-句点" } != -1
}
trait BunsetsuSeq extends hasSize {
def bunsetsuSeq: Seq[Bunsetsu]
def apply(i: Int) = bunsetsuSeq(i)
def size = bunsetsuSeq.size
}
case class BunsetsuSentence(override val bunsetsuSeq: Seq[Bunsetsu]) extends BunsetsuSeq {
def parseWithCCGDerivation(derivation: Derivation): ParsedBunsetsuSentence = {
val subTreeHeadMap: Array[Array[Int]] = derivation.map.map { _.map { _ => -1 } }
val headSeq = Array.fill(derivation.map.size)(-1)
def fillHeadsBottomup(root: Point) = {
derivation.foreachPointBottomup({ p => derivation.get(p) match {
case Some(AppliedRule(BinaryChildrenPoints(left, right), _)) =>
def head(p: Point): Int = subTreeHeadMap(p.x)(p.y)
subTreeHeadMap(p.x)(p.y) = head(right) // cache value for parent computations
headSeq(head(left)) = head(right)
case Some(AppliedRule(NoneChildPoint(), _)) => subTreeHeadMap(p.x)(p.y) = p.x
case _ =>
} }, root)
}
derivation.roots.foreach { r => fillHeadsBottomup(r) }
val word2bunsetsuIdx: Array[Int] = (0 until size).flatMap { i => this(i).wordSeq.map { _ => i } }.toArray
val bunsetsuDepsSeq: Seq[Seq[Int]] = (0 until size).map { head =>
val headBunsetsu = this(head)
def inHeadRange(word: Int): Boolean = word >= headBunsetsu.offset && word < headBunsetsu.offset + headBunsetsu.size
def containsLinkToHead(idx: Int): Boolean = {
val bunsetsu = this(idx)
(bunsetsu.offset until bunsetsu.offset + bunsetsu.size).indexWhere { i => inHeadRange(headSeq(i)) } != -1
}
(0 until head).filter { containsLinkToHead(_) }
}
val bunsetsuHeadSeq = Array.fill(size)(-1)
bunsetsuDepsSeq.zipWithIndex.foreach { case (deps, head) => deps.foreach { d =>
val existingHead = bunsetsuHeadSeq(d) // sometimes, d already have a head in another position
if (existingHead != -1) bunsetsuHeadSeq(d) = Math.min(existingHead, head)
else bunsetsuHeadSeq(d) = head
} }
ParsedBunsetsuSentence(bunsetsuSeq, bunsetsuHeadSeq)
}
}
case class ParsedBunsetsuSentence(
override val bunsetsuSeq: Seq[Bunsetsu],
val headSeq: Seq[Int]) extends BunsetsuSeq {
def head(i: Int) = headSeq(i)
def renderInCabocha: String = headSeq.zipWithIndex.zip(bunsetsuSeq).map { case ((h, i), bunsetsu) =>
val depStr = "* " + i + " " + h + "D"
depStr + "\\n" + bunsetsu.cabochaStr
}.mkString("\\n") + "\\nEOS"
def renderInCoNLL: String = headSeq.zipWithIndex.zip(bunsetsuSeq).map { case ((h, i), bunsetsu) =>
"%d\\t%s\\t_\\t_\\t%s\\t_\\t%d\\t_\\t_\\t_".format(i + 1, bunsetsu.str, bunsetsu.posStr, h + 1)
}.mkString("\\n") + "\\n"
}
| sakabar/jigg | src/main/scala/jigg/nlp/ccg/lexicon/Bunsetsu.scala | Scala | apache-2.0 | 4,454 |
package views.html.climate
import play.templates._
import play.templates.TemplateMagic._
import play.api.templates._
import play.api.templates.PlayMagic._
import models._
import controllers._
import java.lang._
import java.util._
import scala.collection.JavaConversions._
import scala.collection.JavaConverters._
import play.api.i18n._
import play.core.j.PlayMagicForJava._
import play.mvc._
import play.data._
import play.api.data.Field
import play.mvc.Http.Context.Implicit._
import views.html._
/*******************************************************************************
* Licensed to the Apache Software Foundation (ASF) under one or more *
* contributor license agreements. See the NOTICE file distributed with *
* this work for additional information regarding copyright ownership. *
* The ASF licenses this file to You under the Apache License, Version 2.0 *
* (the "License"); you may not use this file except in compliance with *
* the License. You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
*******************************************************************************/
object aboutProject extends BaseScalaTemplate[play.api.templates.HtmlFormat.Appendable,Format[play.api.templates.HtmlFormat.Appendable]](play.api.templates.HtmlFormat) with play.api.templates.Template1[String,play.api.templates.HtmlFormat.Appendable] {
/*******************************************************************************
* Licensed to the Apache Software Foundation (ASF) under one or more *
* contributor license agreements. See the NOTICE file distributed with *
* this work for additional information regarding copyright ownership. *
* The ASF licenses this file to You under the Apache License, Version 2.0 *
* (the "License"); you may not use this file except in compliance with *
* the License. You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
*******************************************************************************/
def apply/*18.2*/(message: String):play.api.templates.HtmlFormat.Appendable = {
_display_ {
Seq[Any](format.raw/*18.19*/("""
"""),_display_(Seq[Any](/*20.2*/main("About Project")/*20.23*/ {_display_(Seq[Any](format.raw/*20.25*/("""
<h1>About Project</h1>
<div class="jumbotron">
<p>This project aims to develop an online collaborative working environment, where scientists can not only efficiently perform their
climate data analyses but also effectively share their analysis tools, datasets, and results with others. This project is based on a tight collaboration between Jet Propulsion
Laboratory (JPL) and Carnegie Mellon University (CMU).</p>
<p>JPL has developed a technology that wraps existing climate data analysis tools into web services, with programmable interfaces that are universally accessible from the Internet.
Based on the technology, JPL has developed a collection of web services that enable multi-aspect physics-based and phenomenon-oriented climate model performance evaluation and
diagnosis through the comprehensive and synergistic use of multiple observational data, reanalysis data, and model outputs. The system is called Climate Model Diagnostic Analyzer (CMDA).</p>
<p>CMU has developed a technology to model software usage behaviors into social networks, and based on semantics-oriented analytics to assist Earth scientists in designing data
analysis procedures. Using the technology, CMU has developed a semantic model for CMDA services in order to capture various hidden relationships, including semantic relationships
and usage relationships in CMDA services, to help users not only learn available datasets and web services, but also learn how to use them and design climate analytics workflows
faster than before. CMU has also developed a provenance model to record and track scientists’ activities and behaviors using CMDA services.</p>
<p>One of the key outcomes of this project will be a repository of climate data analytics web services, equipped with mechanisms to organize and manage these climate analytics
services and help share and reuse the services properly. Our vision is that the climate data analytics web services can be shared, organized, searched, and recommended like photos
and videos on YouTube and Shutterfly. We intend to build the web service repository with a full understanding of the needs of the Earth science researchers, and make it easy for the
next-generation Earth scientists to contribute to a pool of data analytics tools, build communities, and form collaborative relationships.</p>
</div>
""")))})))}
}
def render(message:String): play.api.templates.HtmlFormat.Appendable = apply(message)
def f:((String) => play.api.templates.HtmlFormat.Appendable) = (message) => apply(message)
def ref: this.type = this
}
/*
-- GENERATED --
DATE: Thu Apr 07 14:52:42 PDT 2016
SOURCE: /home/dimitris/CMU/SA&D/Project/ApacheCMDA-Frontend/app/views/climate/aboutProject.scala.html
HASH: 7cb317fa19429d31e30582c2c4e2ec9270f50fa4
MATRIX: 3184->1206|3296->1223|3334->1226|3364->1247|3404->1249
LINES: 56->18|59->18|61->20|61->20|61->20
-- GENERATED --
*/
| dsarlis/SAD-Spring-2016-Project-Team4 | ApacheCMDA-Frontend/target/scala-2.10/src_managed/main/views/html/climate/aboutProject.template.scala | Scala | mit | 6,561 |
package edu.cmu.lti.nlp.amr.GraphDecoder
import edu.cmu.lti.nlp.amr._
import edu.cmu.lti.nlp.amr.FastFeatureVector._
import java.lang.Math.abs
import java.lang.Math.log
import java.lang.Math.exp
import java.lang.Math.random
import java.lang.Math.floor
import java.lang.Math.min
import java.lang.Math.max
import scala.io.Source
import scala.util.matching.Regex
import scala.collection.mutable.Map
import scala.collection.mutable.Set
import scala.collection.mutable.ArrayBuffer
import scala.collection.mutable.PriorityQueue
import Double.{NegativeInfinity => minusInfty}
case class GraphObj(graph: Graph,
nodes: Array[Node], // usually 'nodes' is graph.nodes.filter(_.name != None).toArray
features: Features) {
// GraphObj is an object to keep track of the connectivity of the graph as edges are added to the graph.
// It is code that was factored out of Alg2. It is now also used in Alg1.
// The two main functions are:
// def connected : Boolean Determines if the graph is connected
// def addEdge() Adds an edge to the graph, keeping track of connectivity
// Each node is numbered by its index in 'nodes'
// Each set is numbered by its index in 'setArray'
// 'set' contains the index of the set that each node is assigned to
// At the start each node is in its own set
var set: Array[Int] = nodes.zipWithIndex.map(_._2).toArray
var setArray: Array[Set[Int]] = nodes.zipWithIndex.map(x => Set(x._2)).toArray
var score: Double = 0.0
var feats: FeatureVector = FeatureVector(features.weights.labelset)
val edgeWeights : Array[Array[Array[(String, Double)]]] = computeWeightMatrix
def largestWeight : Double = {
val largest = try {
edgeWeights.map(x => x.map(y => y.map(z => abs(z._2)).filter(z => z < 1000000000).max).max).max // filter to less than 100000000 because we don't want to include infinite ramp weights
} catch {
case e: java.lang.UnsupportedOperationException => {
logger(0, "No largest weight.")
1.0 // if we are passed a graph with no edges or if no edges pass our filter
}
}
logger(0, "Largest weight = " + largest)
return largest
}
def getSet(nodeIndex : Int) : Set[Int] = { setArray(set(nodeIndex)) }
def connected : Boolean = if(set.size > 0) { getSet(0).size == set.size } else { true }
def addEdge(node1: Node, index1: Int, node2: Node, index2: Int, label: String, weight: Double, addRelation: Boolean = true) {
if (!node1.relations.exists(x => ((x._1 == label) && (x._2.id == node2.id))) || !addRelation) { // Prevent adding an edge twice
logger(1, "Adding edge ("+node1.concept+", "+label +", "+node2.concept + ") with weight "+weight.toString)
if (addRelation) {
node1.relations = (label, node2) :: node1.relations
}
feats += features.localFeatures(node1, node2, label)
score += weight
}
//logger(1, "set = " + set.toList)
//logger(1, "nodes = " + nodes.map(x => x.concept).toList)
//logger(1, "setArray = " + setArray.toList)
if (set(index1) != set(index2)) { // If different sets, then merge them
//logger(1, "Merging sets")
getSet(index1) ++= getSet(index2)
val set2 = getSet(index2)
for (index <- set2) {
set(index) = set(index1)
}
set2.clear()
}
//logger(1, "set = " + set.toList)
//logger(1, "nodes = " + nodes.map(x => x.concept).toList)
//logger(1, "setArray = " + setArray.toList)
}
def localScore(nodeIndex1: Int, nodeIndex2: Int, label: Int) : Double = {
return edgeWeights(nodeIndex1)(nodeIndex2)(label)._2
}
private def computeWeightMatrix : Array[Array[Array[(String, Double)]]] = {
val edgeWeights : Array[Array[Array[(String, Double)]]] = nodes.map(x => Array.fill(0)(Array.fill(0)("",0.0)))
for (i <- 0 until nodes.size) {
edgeWeights(i) = nodes.map(x => Array.fill(0)(("",0.0)))
for (j <- 0 until nodes.size) {
if (i == j) {
edgeWeights(i)(j) = Array((":self", 0.0)) // we won't add this to the queue anyway, so it's ok
} else {
edgeWeights(i)(j) = Array.fill(features.weights.labelset.size)(("", 0.0))
val feats = features.localFeatures(nodes(i), nodes(j))
features.weights.iterateOverLabels2(feats,
x => edgeWeights(i)(j)(x.labelIndex) = (features.weights.labelset(x.labelIndex), x.value))
}
}
}
return edgeWeights
}
def log {
logger(1, "set = " + set.toList)
logger(1, "nodes = " + nodes.map(x => x.concept).toList)
logger(1, "setArray = " + setArray.toList)
}
logger(1, "Adding edges already there")
val nodeIds : Array[String] = nodes.map(_.id).toArray
for { (node1, index1) <- nodes.zipWithIndex
(label, node2) <- node1.relations } {
if (nodeIds.indexWhere(_ == node2.id) != -1) {
val index2 = nodeIds.indexWhere(_ == node2.id)
addEdge(node1, index1, node2, index2, label, features.localScore(node1, node2, label), addRelation=false)
} else {
feats += features.localFeatures(node1, node2, label)
score += features.localScore(node1, node2, label)
}
}
}
| jflanigan/jamr | src/GraphDecoder/GraphObj.scala | Scala | bsd-2-clause | 5,586 |
/**
* Copyright 2015 Otto (GmbH & Co KG)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.schedoscope.dsl
/**
* A trait for entities that have a name, e.g., views, fields, and parameters.
*/
trait Named {
/**
* The base name of the Named entity.
*/
def namingBase: String
/**
* The base name of the Named entity converted to a database-friendly format, i.e., using only lowercase / underscore.
*/
def n = Named.camelToLowerUnderscore(namingBase)
}
/**
* Helpers for Named entities
*/
object Named {
/**
* converts camel case to lower_case / underscore format.
*/
def camelToLowerUnderscore(name: String) = {
val formattedName = new StringBuffer()
for (c <- name) {
if (c >= 'A' && c <= 'Z')
formattedName.append("_" + c.toString.toLowerCase())
else
formattedName.append(c)
}
formattedName.toString().replaceAll("^_", "")
}
} | utzwestermann/schedoscope | schedoscope-core/src/main/scala/org/schedoscope/dsl/Named.scala | Scala | apache-2.0 | 1,455 |
package scala.collection.concurrent
import java.util.concurrent.Executors
import scala.concurrent.duration.TimeUnit
class ConcurrentMapTester[K, V](map: Map[K, V]) {
def runTasks(executionTimeout: Long, unit: TimeUnit)(tasks: (Map[K, V] => Unit)*): Unit = {
val exec = Executors.newCachedThreadPool()
for (task <- tasks) exec.execute(() => task(map))
exec.shutdown()
exec.awaitTermination(executionTimeout, unit)
}
@throws[AssertionError]
def assertContainsEntry(k: K, v: V): Unit = {
val value = map.get(k)
assert(value.isDefined, s"map does not contain key '$k'")
assert(value.contains(v), s"key '$k' is mapped to '${value.get}', not to '$v'")
}
@throws[AssertionError]
def assertExistsEntry(k: K, p: V => Boolean): Unit = {
val value = map.get(k)
assert(value.isDefined, s"map does not contain key '$k'")
assert(value.exists(p), s"key '$k' is mapped to '${value.get}', which does not match the predicate")
}
@throws[AssertionError]
def assertDoesNotContain(k: K): Unit = {
val value = map.get(k)
assert(value.isEmpty, s"key '$k' is not empty and is mapped to '${value.get}'")
}
}
| scala/scala | test/junit/scala/collection/concurrent/ConcurrentMapTester.scala | Scala | apache-2.0 | 1,158 |
class Foo {
def foo(x: quoted.Expr[Int])(using scala.quoted.Quotes): Unit = x match {
case '{ 1 } =>
case '{ 2 } =>
case _ =>
}
}
| lampepfl/dotty | tests/patmat/i6255.scala | Scala | apache-2.0 | 146 |
package fish.philwants
object Util {
val emailregex = """[^@]+@[^\\.].{2,3}"""
def emailToUsername(email: String): String = {
if(email.matches(emailregex)) email.substring(0, email.indexOf('@'))
else email
}
}
| philwantsfish/shard | src/main/scala/fish/philwants/Util.scala | Scala | mit | 225 |
package scalan.collections
import scalan._
import scala.reflect.runtime.universe._
import scala.collection.mutable.Map
import scalan.staged.BaseExp
import scalan.util.{Covariant, Invariant};
trait MapOps extends Base { self: Scalan =>
type MM[K, V] = Rep[MMap[K, V]]
trait MMap[K, V] extends Def[MMap[K,V]] {
implicit def elemKey: Elem[K]
implicit def elemValue: Elem[V]
val selfType = mMapElement(elemKey, elemValue)
def union(that: MM[K, V]): MM[K, V]
def difference(that: MM[K, V]): MM[K, V]
def join[V2:Elem](that: MM[K, V2]): MM[K, (V, V2)]
def reduce(that: MM[K, V], f:Rep[((V, V))=>V]): MM[K, V]
def isEmpty: Rep[Boolean] = (size === 0)
def contains(k: Rep[K]): Rep[Boolean]
def apply(key: Rep[K]): Rep[V]
def getOrElse(key: Rep[K], otherwise: => Rep[V]): Rep[V] = getOrElseBy(key, fun { _: Rep[Unit] => otherwise })
def getOrElseBy(key: Rep[K], otherwise: Rep[Unit => V]): Rep[V]
def mapValueIfExists[T: Elem](key: Rep[K], exists: Rep[V] => Rep[T], otherwise: () => Rep[T]): Rep[T] =
mapValueIfExistsBy(key, fun(exists), fun { _: Rep[Unit] => otherwise() })
def mapValueIfExistsBy[T](key: Rep[K], exists: Rep[V => T], otherwise: Rep[Unit => T]): Rep[T]
def update(key: Rep[K], value: Rep[V]): Rep[Unit]
def mapValues[T:Elem](f: Rep[V] => Rep[T]): MM[K, T] = mapValuesBy(fun(f))
def mapValuesBy[T:Elem](f: Rep[V => T]): MM[K, T]
def keys: Arr[K]
def values: Arr[V]
def toArray: Arr[(K,V)]
def size: Rep[Int]
}
object MMap {
def empty[K: Elem, V: Elem] = emptyMap[K, V]
def create[K: Elem, V: Elem](count:Rep[Int], f:Rep[Int=>(K,V)]) = createMap[K, V](count,f)
def make[K: Elem, V: Elem](name:Rep[String]) = makeMap[K, V](name)
def fromArray[K: Elem, V: Elem](arr: Arr[(K, V)]) = mapFromArray(arr)
}
case class MMapElem[K, V](eKey: Elem[K], eValue: Elem[V]) extends Elem[MMap[K, V]] {
lazy val tag = {
implicit val kt = eKey.tag
implicit val vt = eValue.tag
weakTypeTag[MMap[K, V]]
}
protected def getDefaultRep = emptyMap[K, V](eKey, eValue)
lazy val typeArgs = TypeArgs("K" -> (eKey -> Invariant), "V" -> (eValue -> Covariant))
}
implicit def mMapElement[K, V](implicit eKey: Elem[K], eValue: Elem[V]): MMapElem[K, V] = new MMapElem(eKey, eValue)
def extendMMapElement[K, V](implicit elem: Elem[MMap[K, V]]) = elem.asInstanceOf[MMapElem[K, V]]
implicit def resolveMMap[K: Elem, V: Elem](map: MM[K, V]): MMap[K, V]
def emptyMap[K: Elem, V: Elem]: MM[K, V]
def mapFromArray[K: Elem, V: Elem](arr: Arr[(K, V)]): MM[K, V]
def createMap[K: Elem, V: Elem](count: Rep[Int], f: Rep[Int=>(K,V)]): MM[K, V]
def makeMap[K: Elem, V: Elem](name: Rep[String]): MM[K, V]
}
trait MapOpsStd extends MapOps { self: ScalanStd =>
case class SeqMMap[K, V](val impl: Map[K, V])(implicit val elemKey: Elem[K], val elemValue: Elem[V]) extends MMap[K, V] {
private def implOf[A,B](that: MMap[A, B]) = that match {
case m: SeqMMap[A, B] => m.impl
case _ => !!!(s"$that implements MMap in sequential context but is not SeqMap")
}
def union(that: MM[K, V]): MM[K, V] = impl ++ implOf(that)
def difference(that: MM[K, V]): MM[K, V] = impl -- implOf(that).keys
def join[V2:Elem](that: MM[K, V2]): MM[K, (V, V2)] = {
val res = Map.empty[K, (V, V2)]
val left = impl
val right = implOf(that)
for ((k,v) <- left) {
if (right.contains(k)) res.update(k, (v, right(k)))
}
res
}
def reduce(that: MM[K, V], f:Rep[((V,V))=>V]): MM[K, V] = {
val res = Map.empty[K, V]
val left = impl
val right = implOf(that)
for ((k,v) <- left) {
res.update(k, if (right.contains(k)) f((v, right(k))) else v)
}
for ((k,v) <- right) {
if (!left.contains(k)) res.update(k, v)
}
res
}
def contains(key: Rep[K]): Rep[Boolean] = impl.contains(key)
def apply(key: Rep[K]): Rep[V] = impl(key)
def getOrElseBy(key: Rep[K], otherwise: Rep[Unit => V]): Rep[V] = {
impl.getOrElse(key, otherwise(()))
}
def mapValueIfExistsBy[T](key: Rep[K], exists: Rep[V => T], otherwise: Rep[Unit => T]): Rep[T] = {
if (impl.contains(key)) exists(impl(key)) else otherwise(())
}
def update(key: Rep[K], value: Rep[V]): Rep[Unit] = { impl.update(key, value) ; () }
def keys: Arr[K] = impl.keys.toArray(elemKey.classTag)
def values: Arr[V] = impl.values.toArray(elemValue.classTag)
def toArray: Arr[(K, V)] = impl.toArray
def size: Rep[Int] = impl.size
def mapValuesBy[T:Elem](f: Rep[V => T]): MM[K, T] = {
val res = Map.empty[K, T]
for ((k,v) <- impl) {
res.update(k, f(v))
}
res
}
}
implicit def extendMap[K:Elem,V:Elem](m: Map[K,V]): MMap[K,V] = new SeqMMap(m)
implicit def resolveMMap[K: Elem, V: Elem](map: MM[K, V]): MMap[K, V] = map
def emptyMap[K: Elem, V: Elem]: MM[K, V] = Map.empty[K, V]
def mapFromArray[K: Elem, V: Elem](arr: Arr[(K, V)]): MM[K, V] = Map(arr: _*)
def createMap[K: Elem, V: Elem](count:Rep[Int], f:Rep[Int=>(K,V)]): MM[K, V] = {
val map = Map.empty[K, V]
for (i <- 0 until count) {
val p = f(i)
map.update(p._1, p._2)
}
map
}
def makeMap[K: Elem, V: Elem](name: Rep[String]): MM[K, V] = {
Map.empty[K, V]
}
}
trait MapOpsExp extends MapOps with BaseExp { self: ScalanExp =>
abstract class MMapDef[K, V](implicit val elemKey: Elem[K], val elemValue: Elem[V]) extends MMap[K, V] {
def union(that: MM[K, V]): MM[K, V] = MapUnion(this, that)
def difference(that: MM[K, V]): MM[K, V] = MapDifference(this, that)
def join[V2:Elem](that: MM[K, V2]): MM[K, (V, V2)] = MapJoin(this, that)
def reduce(that: MM[K, V], f:Rep[((V,V))=>V]): MM[K, V] = MapReduce(this, that, f)
def contains(key: Rep[K]): Rep[Boolean] = MapContains(this, key)
def apply(key: Rep[K]): Rep[V] = MapApply(this, key)
def getOrElseBy(key: Rep[K], otherwise: Rep[Unit => V]): Rep[V] = {
MapGetOrElse(self, key, otherwise)
}
def mapValueIfExistsBy[T](key: Rep[K], exists:Rep[V => T], otherwise: Rep[Unit => T]): Rep[T] = {
implicit val eT: Elem[T] = otherwise.elem.eRange
MapMapValueIfExists(this, key, exists, otherwise)
}
def update(key: Rep[K], value: Rep[V]): Rep[Unit] = MapUpdate(this, key, value)
def size: Rep[Int] = MapSize(this)
def keys: Arr[K] = MapKeys(this)
def values: Arr[V] = MapValues(this)
def toArray: Arr[(K, V)] = MapToArray(this)
def mapValuesBy[T:Elem](f: Rep[V => T]): MM[K, T] = MapTransformValues[K,V,T](this, f)
}
// def emptyMap[K: Elem, V: Elem]: PM[K, V] = EmptyMap[K, V]()
def emptyMap[K: Elem, V: Elem]: MM[K, V] = MapUsingFunc(0, fun { i => (element[K].defaultRepValue, element[V].defaultRepValue) })
def mapFromArray[K: Elem, V: Elem](arr: Arr[(K, V)]) = MapFromArray(arr)
def createMap[K: Elem, V: Elem](count:Rep[Int], f:Rep[Int=>(K,V)]) = MapUsingFunc(count, f)
def makeMap[K: Elem, V: Elem](name: Rep[String]): MM[K, V] = MakeMap[K,V](name)
case class AppendMultiMap[K, V](map: Rep[MMap[K, ArrayBuffer[V]]], key: Rep[K], value: Rep[V])(implicit elemKey: Elem[K], val eV: Elem[V])
extends MMapDef[K,ArrayBuffer[V]]
case class EmptyMap[K, V]()(implicit eK: Elem[K], eV: Elem[V]) extends MMapDef[K, V] {
override def equals(other:Any) = {
other match {
case that:EmptyMap[_,_] => (this.selfType equals that.selfType)
case _ => false
}
}
}
case class MapFromArray[K, V](arr: Arr[(K, V)])(implicit elemKey: Elem[K], elemValue: Elem[V]) extends MMapDef[K, V]
case class MapUsingFunc[K, V](count: Rep[Int], f:Rep[Int => (K,V)])(implicit elemKey: Elem[K], elemValue: Elem[V]) extends MMapDef[K, V]
case class MakeMap[K, V](ctx: Rep[String])(implicit elemKey: Elem[K], elemValue: Elem[V]) extends MMapDef[K, V]
case class MapUnion[K, V](left: MM[K, V], right: MM[K, V])(implicit elemKey: Elem[K], elemValue: Elem[V]) extends MMapDef[K, V]
case class MapDifference[K, V](left: MM[K, V], right: MM[K, V])(implicit elemKey: Elem[K], elemValue: Elem[V]) extends MMapDef[K, V]
case class MapJoin[K, V1, V2](left: MM[K, V1], right: MM[K, V2])(implicit elemKey: Elem[K], val elemV1: Elem[V1], val elemV2: Elem[V2]) extends MMapDef[K, (V1, V2)]
case class MapReduce[K, V](left: MM[K, V], right: MM[K, V], f:Rep[((V, V)) => V])(implicit elemKey: Elem[K], elemValue: Elem[V]) extends MMapDef[K, V]
case class MapContains[K, V](map: MM[K, V], key: Rep[K])(implicit val eK: Elem[K], val eV: Elem[V]) extends BaseDef[Boolean]
case class MapApply[K, V](map: MM[K, V], key: Rep[K])(implicit val eK: Elem[K], val eV: Elem[V]) extends BaseDef[V]
case class MapGetOrElse[K, V]
(map: MM[K, V], key: Rep[K], otherwise: Rep[Unit=>V])
(implicit val eK: Elem[K] = extendMMapElement(map.elem).eKey,
override val selfType: Elem[V] = extendMMapElement(map.elem).eValue) extends BaseDef[V]
case class MapMapValueIfExists[K, V, T](map: MM[K, V], key: Rep[K], ifExists: Rep[V => T], otherwise: Rep[Unit=>T])(implicit val eK: Elem[K], val eV: Elem[V], selfType: Elem[T]) extends BaseDef[T]
case class MapUpdate[K, V](map: MM[K, V], key: Rep[K], value: Rep[V])(implicit val eK: Elem[K], val eV: Elem[V]) extends BaseDef[Unit]
case class MapSize[K, V](map: MM[K, V])(implicit val eK: Elem[K], val eV: Elem[V]) extends BaseDef[Int]
case class MapToArray[K, V](map: MM[K, V])(implicit val eK: Elem[K], val eV: Elem[V]) extends ArrayDef[(K, V)]
case class MapKeys[K, V](map: MM[K, V])(implicit val eK: Elem[K], val eV: Elem[V]) extends ArrayDef[K]
case class MapValues[K, V](map: MM[K, V])(implicit val eK: Elem[K], val eV: Elem[V]) extends ArrayDef[V]
case class MapTransformValues[K, V, T](map: MM[K, V], f: Rep[V => T])(implicit elemKey: Elem[K], val eV: Elem[V], val eT: Elem[T]) extends MMapDef[K, T]
case class VarMM[K, V](map: MM[K, V])(implicit elemKey: Elem[K], elemValue: Elem[V]) extends MMapDef[K, V]
implicit def resolveMMap[K: Elem, V: Elem](sym: MM[K, V]): MMap[K, V] = sym match {
case Def(d: MMapDef[_, _]) => d.asInstanceOf[MMap[K, V]]
case s: Exp[_] => {
val pmElem = s.elem.asInstanceOf[MMapElem[K, V]]
VarMM(sym)(pmElem.eKey, pmElem.eValue)
}
case _ => ???("cannot resolve MMap", sym)
}
override def rewriteDef[T](d: Def[T]) = d match {
case MapFromArray(arr @ Def(ArrayZip(Def(MapKeys(m1)), Def(MapValues(m2))))) if (m1 == m2) => m1
// This rule is only valid if the array has distinct values for the keys
// case MapKeys(Def(d@MapFromArray(arr: Rep[Array[(k,v)]]))) => {
// implicit val eK = d.elemKey.asElem[k]
// implicit val eV = d.elemValue.asElem[v]
// implicit val eKV = PairElem(eK,eV)
// array_map(arr, fun({a: Rep[(k,v)] => a._1})(toLazyElem(eKV), eK))(eK)
// }
case MapValues(Def(d@MapFromArray(arr: Rep[Array[(k,v)]]))) => {
implicit val eK = d.elemKey.asElem[k]
implicit val eV = d.elemValue.asElem[v]
implicit val eKV = PairElem(eK,eV)
array_map(arr, fun({a: Rep[(k,v)] => a._2})(toLazyElem(eKV), eV))(eV)
}
/* TODO: uncomment when flatten will be supported
case MapUnion(Def(m1Def: MapFromArray[k,v]), Def(m2Def: MapFromArray[_,_])) => {
implicit val eK = m2Def.elemKey.asElem[k]
implicit val eV = m2Def.elemValue.asElem[v]
implicit val eKV = PairElem(eK,eV)
MMap.fromArray(array_concat(m1Def.arr,m2Def.arr.asRep[Array[(k,v)]])(eKV))(eK,eV)
}*/
case MapUnion(Def(MapUsingFunc(count, func)), m2@Def(m2Def: MapFromArray[k,v])) => {
(count == toRep(0)) match {
case true => m2
case _ =>
implicit val eK = m2Def.elemKey.asElem[k]
implicit val eV = m2Def.elemValue.asElem[v]
implicit val eKV = PairElem(eK,eV)
val keys1 = array_map(m2Def.arr, fun({a: Rep[(k,v)]=>a._1})(toLazyElem(eKV), eK))(eK)
val vals1 = array_map(m2Def.arr, fun({a: Rep[(k,v)]=>a._2})(toLazyElem(eKV), eV))(eV)
val keys2 = SArray.rangeFrom0(count).map {i: Rep[Int] => func(i)._1}(eK)
val vals2 = SArray.rangeFrom0(count).map {i: Rep[Int] => func(i)._2}(eV)
val keys = array_concat(keys1, keys2)(eK)
val vals = array_concat(vals1, vals2)(eV)
MMap.fromArray(array_zip(keys,vals))(eK,eV)
}
}
case _ =>
super.rewriteDef(d)
}
}
| scalan/scalan | core/src/main/scala/scalan/collections/MapOps.scala | Scala | apache-2.0 | 12,440 |
package com.github.alixba.vast
trait Creative extends VASTElement {
def element: CreativeElement
def id: Option[String]
def sequence: Option[Int]
def AdID: Option[String]
}
trait CreativeElement extends VASTElement
| AlixBa/vast | src/main/scala/com/github/alixba/vast/Creative.scala | Scala | mit | 230 |
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.operators
import minitest.TestSuite
import monix.execution.Ack.Continue
import monix.execution.schedulers.TestScheduler
import monix.execution.exceptions.DummyException
import monix.reactive.{Observable, Observer}
import scala.concurrent.Promise
object MiscFailedSuite extends TestSuite[TestScheduler] {
def setup() = TestScheduler()
def tearDown(s: TestScheduler) = {
assert(s.state.tasks.isEmpty, "TestScheduler should have no pending tasks")
}
test("should complete") { implicit s =>
var received = 0
var wasCompleted = false
Observable
.now(1)
.failed
.unsafeSubscribeFn(new Observer[Throwable] {
def onNext(elem: Throwable) = {
received += 1
Continue
}
def onError(ex: Throwable) = ()
def onComplete() = wasCompleted = true
})
assertEquals(received, 0)
assert(wasCompleted)
}
test("should signal error without back-pressure applied") { implicit s =>
var wasCompleted = false
var thrown: Throwable = null
val p = Promise[Continue.type]()
Observable
.raiseError(DummyException("dummy"))
.failed
.unsafeSubscribeFn(new Observer[Throwable] {
def onError(ex: Throwable) = ()
def onComplete() = wasCompleted = true
def onNext(elem: Throwable) = {
thrown = elem
p.future
}
})
assertEquals(thrown, DummyException("dummy"))
assert(wasCompleted)
p.success(Continue)
s.tick()
}
}
| alexandru/monifu | monix-reactive/shared/src/test/scala/monix/reactive/internal/operators/MiscFailedSuite.scala | Scala | apache-2.0 | 2,212 |
package coursier.publish.fileset
import java.nio.charset.StandardCharsets
import java.time.{Instant, ZoneOffset}
import coursier.publish
import coursier.publish.{Content, Pom}
import coursier.publish.dir.DirContent
import coursier.core.{ModuleName, Organization, Version}
import coursier.maven.MavenRepository
import coursier.publish.Pom.{Developer, License}
import coursier.publish.download.Download
import coursier.publish.download.logger.DownloadLogger
import coursier.util.Task
import scala.xml.{Elem, XML}
/** A subset of a [[FileSet]], with particular semantic.
*/
sealed abstract class Group extends Product with Serializable {
/** [[FileSet]] corresponding to this [[Group]]
*/
def fileSet: FileSet
def organization: Organization
/** Changes any reference to the first coordinates to the second ones.
*
* Coordinates can be our coordinates, or those of dependencies, exclusions, …
*/
def transform(
map: Map[(Organization, ModuleName), (Organization, ModuleName)],
now: Instant
): Task[Group]
def transformVersion(
map: Map[(Organization, ModuleName), (String, String)],
now: Instant
): Task[Group]
/** Ensure the files of this [[Group]] are ordered (POMs last for [[Group.Module]], etc.) */
def ordered: Group
}
object Group {
/** Subset of a [[FileSet]] corresponding to a particular module.
*
* That is to the files of a particular - published - version of a given module.
*/
final case class Module(
organization: Organization,
name: ModuleName,
version: String,
snapshotVersioning: Option[String],
files: DirContent
) extends Group {
def module: coursier.core.Module =
coursier.core.Module(organization, name, Map.empty)
def baseDir: Seq[String] =
organization.value.split('.').toSeq ++ Seq(name.value, version)
def fileSet: FileSet = {
val dirPath = Path(baseDir)
FileSet(
files.elements.map {
case (n, c) =>
(dirPath / n) -> c
}
)
}
private def stripPrefixes: Module = {
val prefix = s"${name.value}-${snapshotVersioning.getOrElse(version)}"
val updatedContent = DirContent(
files.elements.map {
case (n, c) =>
val newName =
if (n == "maven-metadata.xml" || n.startsWith("maven-metadata.xml."))
n
else {
assert(n.startsWith(prefix), s"nope for $n.startsWith($prefix)")
n.stripPrefix(prefix)
}
(newName, c)
}
)
copy(files = updatedContent)
}
private def updateFileNames: Module = {
val newPrefix = s"${name.value}-${snapshotVersioning.getOrElse(version)}"
val updatedContent = DirContent(
files.elements.collect {
case (n, c) =>
val newName =
if (n == "maven-metadata.xml" || n.startsWith("maven-metadata.xml."))
n
else
s"$newPrefix$n"
(newName, c)
}
)
copy(files = updatedContent)
}
private def updateOrgNameVer(
org: Option[Organization],
name: Option[ModuleName],
version: Option[String]
): Module = {
val base =
version match {
case Some(v) if !v.endsWith("SNAPSHOT") =>
clearSnapshotVersioning
case _ =>
this
}
base
.stripPrefixes
.copy(
organization = org.getOrElse(base.organization),
name = name.getOrElse(base.name),
version = version.getOrElse(base.version)
)
.updateFileNames
}
/** Adjust the organization / name / version.
*
* Possibly changing those in POM or maven-metadata.xml files.
*/
def updateMetadata(
org: Option[Organization],
name: Option[ModuleName],
version: Option[String],
licenses: Option[Seq[License]],
developers: Option[Seq[Developer]],
homePage: Option[String],
gitDomainPath: Option[(String, String)],
distMgmtRepo: Option[(String, String, String)],
now: Instant
): Task[Module] =
if (
org.isEmpty && name.isEmpty && version.isEmpty && licenses.isEmpty && developers.isEmpty && homePage.isEmpty && gitDomainPath.isEmpty
)
Task.point(this)
else
updateOrgNameVer(org, name, version)
.updatePom(now, licenses, developers, homePage, gitDomainPath, distMgmtRepo)
.flatMap(_.updateMavenMetadata(now))
def removeMavenMetadata: Module =
copy(
files = files.remove("maven-metadata.xml")
)
def clearSnapshotVersioning: Module =
if (snapshotVersioning.isEmpty)
this
else
stripPrefixes
.removeMavenMetadata
.copy(snapshotVersioning = None)
.updateFileNames
def transform(
map: Map[(Organization, ModuleName), (Organization, ModuleName)],
now: Instant
): Task[Module] = {
val base = map.get((organization, name)) match {
case None => Task.point(this)
case Some(to) =>
updateMetadata(Some(to._1), Some(to._2), None, None, None, None, None, None, now)
}
base.flatMap { m =>
m.transformPom(now) { elem =>
map.foldLeft(elem) {
case (acc, (from, to)) =>
Pom.transformDependency(acc, from, to)
}
}
}
}
def transformVersion(
map: Map[(Organization, ModuleName), (String, String)],
now: Instant
): Task[Module] =
transformPom(now) { elem =>
map.foldLeft(elem) {
case (acc, ((org, name), (fromVer, toVer))) =>
Pom.transformDependencyVersion(acc, org, name, fromVer, toVer)
}
}
private def pomFileName: String =
s"${name.value}-${snapshotVersioning.getOrElse(version)}.pom"
/** The POM file of this [[Module]], if any.
*/
def pomOpt: Option[(String, Content)] = {
val fileName = pomFileName
files
.elements
.collectFirst {
case (`fileName`, c) =>
(fileName, c)
}
}
def dependenciesOpt: Task[Seq[coursier.core.Module]] =
pomOpt match {
case None => Task.point(Nil)
case Some((_, content)) =>
content.contentTask.flatMap { b =>
val s = new String(b, StandardCharsets.UTF_8)
coursier.maven.MavenRepository.parseRawPomSax(s) match {
case Left(e) =>
Task.fail(new Exception(s"Error parsing POM: $e"))
case Right(proj) =>
Task.point(proj.dependencies.map(_._2.module))
}
}
}
/** Adjust the POM of this [[Module]], so that it contains the same org / name / version as this
* [[Module]].
*
* Calling this method, or running its [[Task]], doesn't write anything on disk. The new POM
* stays in memory (via [[Content.InMemory]]). The returned [[Module]] only lives in memory.
* The only effect here is possibly reading stuff on disk.
*
* @param now:
* if the POM is edited, its last modified time.
*/
def updatePom(
now: Instant,
licenses: Option[Seq[License]],
developers: Option[Seq[Developer]],
homePage: Option[String],
gitDomainPath: Option[(String, String)],
distMgmtRepo: Option[(String, String, String)]
): Task[Module] =
transformPom(now) { elem =>
var elem0 = elem
elem0 = Pom.overrideOrganization(organization, elem0)
elem0 = Pom.overrideModuleName(name, elem0)
elem0 = Pom.overrideVersion(version, elem0)
for (l <- licenses)
elem0 = Pom.overrideLicenses(l, elem0)
for (l <- developers)
elem0 = Pom.overrideDevelopers(l, elem0)
for (h <- homePage)
elem0 = Pom.overrideHomepage(h, elem0)
for ((domain, path) <- gitDomainPath)
elem0 = Pom.overrideScm(domain, path, elem0)
for ((id, name, url) <- distMgmtRepo)
elem0 = Pom.overrideDistributionManagementRepository(id, name, url, elem0)
elem0
}
def transformPom(now: Instant)(f: Elem => Elem): Task[Module] =
pomOpt match {
case None =>
Task.fail(
new Exception(s"No POM found (files: ${files.elements.map(_._1).mkString(", ")})")
)
case Some((fileName, c)) =>
c.contentTask.map { pomBytes =>
val elem = f(XML.loadString(new String(pomBytes, StandardCharsets.UTF_8)))
val pomContent0 =
Content.InMemory(now, Pom.print(elem).getBytes(StandardCharsets.UTF_8))
val updatedContent = files.update(fileName, pomContent0)
copy(files = updatedContent)
}
}
/** Adds a maven-metadata.xml file to this module if it doesn't have one already.
* @param now:
* last modified time of the added maven-metadata.xml, if one is indeed added.
*/
def addMavenMetadata(now: Instant): Module = {
val mavenMetadataFound = files
.elements
.exists(_._1 == "maven-metadata.xml")
if (mavenMetadataFound)
this
else {
val updatedContent = {
val b = {
val content = coursier.publish.MavenMetadata.create(
organization,
name,
None,
None,
Nil,
now
)
coursier.publish.MavenMetadata.print(content).getBytes(StandardCharsets.UTF_8)
}
files.update("maven-metadata.xml", Content.InMemory(now, b))
}
copy(files = updatedContent)
}
}
def mavenMetadataContentOpt = files
.elements
.find(_._1 == "maven-metadata.xml")
.map(_._2)
/** Updates the maven-metadata.xml file of this [[Module]], so that it contains the same org /
* name.
* @param now:
* if maven-metadata.xml is edited, its last modified time.
*/
def updateMavenMetadata(now: Instant): Task[Module] =
mavenMetadataContentOpt match {
case None =>
Task.point(this)
case Some(content) =>
content.contentTask.map { b =>
val updatedMetadataBytes = {
val elem = XML.loadString(new String(b, StandardCharsets.UTF_8))
val newContent = coursier.publish.MavenMetadata.update(
elem,
Some(organization),
Some(name),
None,
None,
Nil,
Some(now.atOffset(ZoneOffset.UTC).toLocalDateTime)
)
coursier.publish.MavenMetadata.print(newContent).getBytes(StandardCharsets.UTF_8)
}
val updatedContent =
files.update("maven-metadata.xml", Content.InMemory(now, updatedMetadataBytes))
copy(files = updatedContent)
}
}
def addSnapshotVersioning(now: Instant, ignoreExtensions: Set[String]): Task[Module] = {
assert(version.endsWith("-SNAPSHOT") || version.endsWith(".SNAPSHOT"))
val versionPrefix = version.stripSuffix("SNAPSHOT").dropRight(1)
val initialFilePrefix = s"${name.value}-${snapshotVersioning.getOrElse(version)}"
def updatedVersion(buildNumber: Int) =
s"$versionPrefix-${now.atOffset(ZoneOffset.UTC).toLocalDateTime.format(publish.MavenMetadata.timestampPattern)}-$buildNumber"
def artifacts(buildNumber: Int) = {
val updatedVersion0 = updatedVersion(buildNumber)
files.elements.collect {
case (n, _) if n.startsWith(initialFilePrefix + ".") =>
if (ignoreExtensions.exists(e => n.endsWith("." + e)))
Nil
else
Seq((
None,
n.stripPrefix(initialFilePrefix + "."),
updatedVersion0,
now.atOffset(ZoneOffset.UTC).toLocalDateTime
))
case (n, _) if n.startsWith(initialFilePrefix + "-") =>
val suffix = n.stripPrefix(initialFilePrefix + "-")
val idx = suffix.indexOf('.')
if (idx < 0)
???
else if (ignoreExtensions.exists(e => n.endsWith("." + e)))
Nil
else {
val classifier = suffix.take(idx)
val ext = suffix.drop(idx + 1)
Seq((
Some(classifier),
ext,
updatedVersion0,
now.atOffset(ZoneOffset.UTC).toLocalDateTime
))
}
case (n, _) if n.startsWith("maven-metadata.xml.") =>
Nil
case ("maven-metadata.xml", _) =>
Nil
case (other, _) =>
// unrecognized file…
???
}.flatten
}
def files0(buildNumber: Int) = {
val updatedVersion0 = updatedVersion(buildNumber)
val updatedFilePrefix = s"${name.value}-$updatedVersion0"
DirContent(
files.elements.map {
case (n, c)
if n.startsWith(initialFilePrefix + ".") || n.startsWith(initialFilePrefix + "-") =>
(updatedFilePrefix + n.stripPrefix(initialFilePrefix), c)
case t =>
t
}
)
}
val content = mavenMetadataContentOpt match {
case None =>
Task.point {
val buildNumber = 1
buildNumber -> publish.MavenMetadata.createSnapshotVersioning(
organization,
name,
version,
(now.atOffset(ZoneOffset.UTC).toLocalDateTime, buildNumber),
now,
artifacts(buildNumber)
)
}
case Some(c) =>
c.contentTask.map { b =>
val elem = XML.loadString(new String(b, StandardCharsets.UTF_8))
val latestSnapshotParams =
publish.MavenMetadata.currentSnapshotVersioning(elem).getOrElse {
???
}
val latestSnapshotVer =
s"$versionPrefix-${latestSnapshotParams._2.atOffset(ZoneOffset.UTC).toLocalDateTime.format(publish.MavenMetadata.timestampPattern)}-${latestSnapshotParams._1}"
if (snapshotVersioning.contains(latestSnapshotVer))
latestSnapshotParams._1 -> elem // kind of meh, this is in case the source already has snapshot ver, and the dest hasn't, so the current maven metadata only comes from the source
else {
val buildNumber = latestSnapshotParams._1 + 1
buildNumber -> publish.MavenMetadata.updateSnapshotVersioning(
elem,
None,
None,
Some(version),
Some((now.atOffset(ZoneOffset.UTC).toLocalDateTime, buildNumber)),
Some(now.atZone(ZoneOffset.UTC).toLocalDateTime),
artifacts(buildNumber)
)
}
}
}
content.map {
case (buildNumber, elem) =>
val b = publish.MavenMetadata.print(elem).getBytes(StandardCharsets.UTF_8)
val files1 = files0(buildNumber).update("maven-metadata.xml", Content.InMemory(now, b))
copy(
snapshotVersioning = Some(updatedVersion(buildNumber)),
files = files1
)
}
}
def ordered: Module = {
// POM file last
// checksum before underlying file
// signatures before underlying file
val pomFileName0 = pomFileName
val (pomFiles, other) = files.elements.partition {
case (n, _) =>
n == pomFileName0 || n.startsWith(pomFileName0 + ".")
}
val sortedFiles = DirContent((pomFiles.sortBy(_._1) ++ other.sortBy(_._1)).reverse)
copy(files = sortedFiles)
}
}
/** Subset of a [[FileSet]] corresponding to maven-metadata.xml files.
*
* This correspond to the maven-metadata.xml file under org/name/maven-metadata.xml, not the ones
* that can be found under org/name/version/maven-metadata.xml (these are in [[Module]]).
*/
final case class MavenMetadata(
organization: Organization,
name: ModuleName,
files: DirContent
) extends Group {
def module: coursier.core.Module =
coursier.core.Module(organization, name, Map.empty)
def fileSet: FileSet = {
val dirPath = Path(organization.value.split('.').toSeq ++ Seq(name.value))
FileSet(
files.elements.map {
case (n, c) =>
(dirPath / n) -> c
}
)
}
def xmlOpt: Option[Content] = {
val fileName = "maven-metadata.xml"
files
.elements
.collectFirst {
case (`fileName`, c) =>
c
}
}
def updateContent(
org: Option[Organization],
name: Option[ModuleName],
latest: Option[String],
release: Option[String],
addVersions: Seq[String],
now: Instant
): Task[MavenMetadata] =
xmlOpt match {
case None =>
Task.point(this)
case Some(c) =>
c.contentTask.map { b =>
val elem = XML.loadString(new String(b, StandardCharsets.UTF_8))
val updated = coursier.publish.MavenMetadata.update(
elem,
org,
name,
latest,
release,
addVersions,
Some(now.atOffset(ZoneOffset.UTC).toLocalDateTime)
)
val b0 = coursier.publish.MavenMetadata.print(updated)
.getBytes(StandardCharsets.UTF_8)
val c0 = Content.InMemory(now, b0)
copy(
files = files.update("maven-metadata.xml", c0)
)
}
}
def transform(
map: Map[(Organization, ModuleName), (Organization, ModuleName)],
now: Instant
): Task[MavenMetadata] =
map.get((organization, name)) match {
case Some(to) if to != (organization, name) =>
updateContent(
Some(to._1).filter(_ != organization),
Some(to._2).filter(_ != name),
None,
None,
Nil,
now
).map { m =>
m.copy(
organization = to._1,
name = to._2
)
}
case _ =>
Task.point(this)
}
def transformVersion(
map: Map[(Organization, ModuleName), (String, String)],
now: Instant
): Task[MavenMetadata] =
Task.point(this)
def ordered: MavenMetadata = {
// reverse alphabetical order should be enough here (will put checksums and signatures before underlying files)
val sortedFiles = DirContent(files.elements.sortBy(_._1).reverse)
copy(files = sortedFiles)
}
}
/** Identify the [[Group]] s each file of the passed [[FileSet]] correspond to.
*/
def split(fs: FileSet): Seq[Group] = {
val byDir = fs.elements.groupBy(_._1.dropLast)
// FIXME Plenty of unhandled errors here
byDir.toSeq.map {
case (dir, elements) =>
val canBeMavenMetadata =
elements.exists(_._1.elements.lastOption.contains("maven-metadata.xml")) &&
!elements.exists(_._1.elements.lastOption.exists(_.endsWith(".pom")))
dir.elements.reverse match {
case Seq(ver, strName, reverseOrg @ _*) if reverseOrg.nonEmpty && !canBeMavenMetadata =>
val org = Organization(reverseOrg.reverse.mkString("."))
val name = ModuleName(strName)
val snapshotVersioningOpt =
if (ver.endsWith("SNAPSHOT"))
Some(elements.map(_._1.elements.last).filter(_.endsWith(".pom")))
.filter(_.nonEmpty)
.map(_.minBy(_.length))
.filter(_.startsWith(s"${name.value}-"))
.map(_.stripPrefix(s"${name.value}-").stripSuffix(".pom"))
.filter(_ != ver)
else
None
val fileNamePrefixes = {
val p = s"${name.value}-${snapshotVersioningOpt.getOrElse(ver)}"
Set(".", "-").map(p + _)
}
def recognized(p: Path): Boolean =
p.elements.lastOption.exists(n => fileNamePrefixes.exists(n.startsWith)) ||
p.elements.lastOption.contains("maven-metadata.xml") ||
p.elements.lastOption.exists(_.startsWith("maven-metadata.xml."))
if (elements.forall(t => recognized(t._1))) {
val strippedDir = elements.map {
case (p, c) =>
p.elements.last -> c
}
Module(org, name, ver, snapshotVersioningOpt, DirContent(strippedDir))
}
else
throw new Exception(
s"Unrecognized files: ${elements.filter(t => !recognized(t._1)).map(_._1.repr).mkString(", ")}"
)
case Seq(strName, reverseOrg @ _*) if reverseOrg.nonEmpty && canBeMavenMetadata =>
val org = Organization(reverseOrg.reverse.mkString("."))
val name = ModuleName(strName)
def recognized(p: Path): Boolean =
p.elements.lastOption.contains("maven-metadata.xml") ||
p.elements.lastOption.exists(_.startsWith("maven-metadata.xml."))
if (elements.forall(t => recognized(t._1))) {
val strippedDir = elements.map {
case (p, c) =>
p.elements.last -> c
}
MavenMetadata(org, name, DirContent(strippedDir))
}
else
sys.error(
s"Unrecognized: ${dir.elements} (${elements.filter(t => !recognized(t._1))})"
)
case _ =>
???
}
}
}
/** Merge [[Group]] s as a [[FileSet]].
*
* Can be "left" if some duplicated [[Module]] s or [[MavenMetadata]] s are found.
*/
def merge(groups: Seq[Group]): Either[String, FileSet] = {
val duplicatedModules = groups
.collect { case m: Module => m }
.groupBy(m => (m.organization, m.name, m.version))
.filter(_._2.lengthCompare(1) > 0)
.iterator
.toMap
val duplicatedMeta = groups
.collect { case m: MavenMetadata => m }
.groupBy(m => (m.organization, m.name))
.filter(_._2.lengthCompare(1) > 0)
.iterator
.toMap
if (duplicatedModules.isEmpty && duplicatedMeta.isEmpty)
Right(groups.foldLeft(FileSet.empty)(_ ++ _.fileSet))
else
???
}
private[coursier] def mergeUnsafe(groups: Seq[Group]): FileSet =
FileSet(groups.flatMap(_.fileSet.elements))
/** Ensure all [[Module]] s in the passed `groups` have a corresponding [[MavenMetadata]] group.
*
* @param now:
* if new files are created, their last-modified time.
*/
def addOrUpdateMavenMetadata(groups: Seq[Group], now: Instant): Task[Seq[Group]] = {
val modules = groups
.collect { case m: Group.Module => m }
.groupBy(m => (m.organization, m.name))
val meta = groups
.collect { case m: Group.MavenMetadata => m }
.groupBy(m => (m.organization, m.name))
.mapValues {
case Seq(md) => md
case l => ???
}
.iterator
.toMap
val a = for ((k @ (org, name), m) <- modules.toSeq) yield {
val versions = m.map(_.version)
val latest = versions.map(Version(_)).max.repr
val releaseOpt = Some(versions.filter(publish.MavenMetadata.isReleaseVersion).map(Version(_)))
.filter(_.nonEmpty)
.map(_.max.repr)
meta.get(k) match {
case None =>
val elem = publish.MavenMetadata.create(
org,
name,
Some(latest),
releaseOpt,
versions,
now
)
val b = publish.MavenMetadata.print(elem).getBytes(StandardCharsets.UTF_8)
val content = DirContent(Seq(
"maven-metadata.xml" -> Content.InMemory(now, b)
))
Seq(Task.point(k -> Group.MavenMetadata(org, name, content)))
case Some(md) =>
Seq(md.updateContent(
None,
None,
Some(latest),
releaseOpt,
versions,
now
).map(k -> _))
}
}
Task.gather.gather(a.flatten)
.map(l => modules.values.toSeq.flatten ++ (meta ++ l.toMap).values.toSeq)
}
def downloadMavenMetadata(
orgNames: Seq[(Organization, ModuleName)],
download: Download,
repository: MavenRepository,
logger: DownloadLogger
): Task[Seq[MavenMetadata]] = {
val root = repository.root + "/"
Task.gather.gather {
orgNames.map {
case (org, name) =>
val url = root + s"${org.value.split('.').mkString("/")}/${name.value}/maven-metadata.xml"
download.downloadIfExists(url, repository.authentication, logger).map(_.map {
case (lastModifiedOpt, b) =>
// download and verify checksums too?
MavenMetadata(
org,
name,
DirContent(
Seq(
"maven-metadata.xml" -> Content.InMemory(
lastModifiedOpt.getOrElse(Instant.EPOCH),
b
)
)
)
)
})
}
}.map(_.flatten)
}
def downloadSnapshotVersioningMetadata(
m: Module,
download: Download,
repository: MavenRepository,
logger: DownloadLogger
): Task[Module] = {
// assert(m.snapshotVersioning.isEmpty)
val root = repository.root + "/"
val url = root + s"${m.baseDir.mkString("/")}/maven-metadata.xml"
download.downloadIfExists(url, repository.authentication, logger).map {
case Some((lastModifiedOpt, b)) =>
m.copy(
files = m.files.update(
"maven-metadata.xml",
Content.InMemory(lastModifiedOpt.getOrElse(Instant.EPOCH), b)
)
)
case None =>
m
}
}
def mergeMavenMetadata(
groups: Seq[MavenMetadata],
now: Instant
): Task[Seq[MavenMetadata]] = {
val tasks = groups
.groupBy(m => (m.organization, m.name))
.valuesIterator
.map { l =>
val (dontKnow, withContent) = l.partition(_.xmlOpt.isEmpty)
// dontKnow should be empty anyway…
val merged = withContent match {
case Seq() => sys.error("can't possibly happen")
case Seq(m) => Task.point(m)
case Seq(m, others @ _*) =>
m.xmlOpt.get.contentTask.flatMap { b =>
val mainElem = XML.loadString(new String(b, StandardCharsets.UTF_8))
others.foldLeft(Task.point(mainElem)) {
case (mainElemTask, m0) =>
for {
mainElem0 <- mainElemTask
b <- m0.xmlOpt.get.contentTask
} yield {
val elem = XML.loadString(new String(b, StandardCharsets.UTF_8))
val info = publish.MavenMetadata.info(elem)
publish.MavenMetadata.update(
mainElem0,
None,
None,
info.latest,
info.release,
info.versions,
info.lastUpdated
)
}
}.map { elem =>
val b = publish.MavenMetadata.print(elem).getBytes(StandardCharsets.UTF_8)
m.copy(
files = m.files.update("maven-metadata.xml", Content.InMemory(now, b))
)
}
}
}
merged.map(dontKnow :+ _)
}
.toSeq
Task.gather.gather(tasks).map(_.flatten)
}
}
| coursier/coursier | modules/publish/src/main/scala/coursier/publish/fileset/Group.scala | Scala | apache-2.0 | 27,984 |
/*
active-learning-scala: Active Learning library for Scala
Copyright (c) 2014 Davi Pereira dos Santos
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package al.strategies
import ml.Pattern
import ml.classifiers.Learner
import ml.models.Model
case class DensityWeightedLabelUtility2(learner: Learner, pool: Seq[Pattern], distance_name: String, alpha: Double = 1, beta: Double = 1, debug: Boolean = false)
extends StrategyWithLearnerAndMapsLU with MarginMeasure {
override val toString = "Density Weighted LU a" + alpha + " b" + beta + " (" + distance_name + ")"
val abr = "\\\\textbf{LU" + distance_name.take(3) + "}"
//+ beta
val id = if (alpha == 1 && beta == 1 || alpha == 0.5 && beta == 0.5) distance_name match {
case "eucl" => 36 + (100 * (1 - alpha)).toInt
case "cheb" => 38 + (100 * (1 - alpha)).toInt
case "maha" => 39 + (100 * (1 - alpha)).toInt
case "manh" => 37 + (100 * (1 - alpha)).toInt
} else throw new Error("Parametros inesperados para DWLU.")
protected def next(mapU: => Map[Pattern, Double], mapsL: => Seq[Map[Pattern, Double]], current_model: Model, unlabeled: Seq[Pattern], labeled: Seq[Pattern], hist: Seq[Int]) = {
val us = unlabeled.size
val selected = unlabeled maxBy { x =>
val similarityU = mapU(x) / us
val similaritiesL = simL(mapsL, x, hist)
(1 - margin(current_model)(x)) * math.pow(similarityU, beta) / math.pow(similaritiesL, alpha)
}
selected
}
def simL(mapsL: => Seq[Map[Pattern, Double]], patt: Pattern, hist: Seq[Int]) = {
val tot = hist.sum
mapsL.zipWithIndex.map { case (m, lab) =>
val n = hist(lab).toDouble
val p = n / tot
math.pow(m(patt) / n, p)
}.product
}
}
| active-learning/active-learning-scala | src/main/scala/al/strategies/DensityWeightedLabelUtility2.scala | Scala | gpl-2.0 | 2,359 |
import de.tototec.sbuild._
import de.tototec.sbuild.ant._
import de.tototec.sbuild.ant.tasks._
@version("0.7.1")
@classpath(
"mvn:org.sbuild:org.sbuild.plugins.sbuildplugin:0.3.0",
"mvn:org.apache.ant:ant:1.8.4",
"mvn:org.sbuild:org.sbuild.plugins.mavendeploy:0.1.0"
)
class SBuild(implicit _project: Project) {
val namespace = "org.sbuild.plugins.http"
val version = "0.0.9000"
val url = "https://github.com/SBuild-org/sbuild-http-plugin"
val sourcesJar = s"target/${namespace}-${version}-sources.jar"
val sourcesDir = "src/main/scala"
val sbuildVersion = "0.7.9010.0-8-0-M1"
val targetSBuildVersion = "0.7.9013"
// val sbuildBaseDir = Prop("SBUILD_BASE_DIR", "../..")
Target("phony:all") dependsOn "jar" ~ sourcesJar // ~ "test"
import org.sbuild.plugins.sbuildplugin._
val scalaVersion = "2.11.0"
val scalaBinVersion = "2.11"
val sbuildPluginVersion = new SBuildVersion {
override val version: String = sbuildVersion
override val sbuildClasspath: TargetRefs =
s"http://sbuild.org/uploads/sbuild/${sbuildVersion}/org.sbuild-${sbuildVersion}.jar"
override val scalaClasspath: TargetRefs =
s"mvn:org.scala-lang:scala-library:${scalaVersion}" ~
s"mvn:org.scala-lang:scala-reflect:${scalaVersion}" ~
s"mvn:org.scala-lang.modules:scala-xml_${scalaBinVersion}:1.0.1"
override val scalaCompilerClasspath: TargetRefs =
s"mvn:org.scala-lang:scala-library:${scalaVersion}" ~
s"mvn:org.scala-lang:scala-reflect:${scalaVersion}" ~
s"mvn:org.scala-lang:scala-compiler:${scalaVersion}"
override val scalaTestClasspath: TargetRefs =
s"mvn:org.scalatest:scalatest_${scalaBinVersion}:2.1.3"
}
Plugin[SBuildPlugin] configure {
_.copy(
pluginClass = s"${namespace}.Http",
pluginVersion = version,
deps = Seq(),
// testDeps = Seq(s"http://sbuild.org/uploads/sbuild/${sbuildVersion}/org.sbuild.runner-${sbuildVersion}.jar"),
sbuildVersion = sbuildPluginVersion,
// require the new plugin api change
manifest = Map("SBuild-Version" -> targetSBuildVersion)
)
}
import org.sbuild.plugins.mavendeploy._
Plugin[MavenDeploy] configure {
_.copy(
groupId = "org.sbuild",
artifactId = namespace,
version = version,
artifactName = Some("SBuild HTTP Plugin"),
description = Some("An SBuild Plugin that provides HTTP support and a HTTP Scheme Handler."),
repository = Repository.SonatypeOss,
scm = Option(Scm(url = url, connection = url)),
developers = Seq(Developer(id = "TobiasRoeser", name = "Tobias Roeser", email = "le.petit.fou@web.de")),
gpg = true,
licenses = Seq(License.Apache20),
url = Some(url),
files = Map(
"jar" -> s"target/${namespace}-${version}.jar",
"sources" -> s"target/${namespace}-${version}-sources.jar",
"javadoc" -> "target/fake.jar"
)
)
}
Target(sourcesJar) dependsOn s"scan:${sourcesDir}" ~ "LICENSE.txt" exec { ctx: TargetContext =>
AntZip(destFile = ctx.targetFile.get, fileSets = Seq(
AntFileSet(dir = Path(sourcesDir)),
AntFileSet(file = Path("LICENSE.txt"))
))
}
Target("target/fake.jar") dependsOn "LICENSE.txt" exec { ctx: TargetContext =>
import de.tototec.sbuild.ant._
tasks.AntJar(destFile = ctx.targetFile.get, fileSet = AntFileSet(file = "LICENSE.txt".files.head))
}
}
| SBuild-org/sbuild-http-plugin | org.sbuild.plugins.http/SBuild.scala | Scala | apache-2.0 | 3,405 |
package nest.sparkle.datastream
import scala.concurrent.Future
import scala.reflect.runtime.universe._
/** Asynchronous results from reducing a data stream. Includes state to propagate between reduction
* runs. Passing state between reduction runs allows multiple streams to be stitched to together
* into one logical reduction. */
trait ReductionResult[K,V,S] {
def reducedStream: DataStream[K,Option[V]]
def finishState: Future[Option[S]]
}
object ReductionResult {
/** Asynchronous results from reducing a data stream. No state is passed between reduction runs. */
def simple[K,V](stream:DataStream[K,Option[V]]): ReductionResult[K,V,Unit] =
SimpleReductionResult(stream)
def stateOnly[K:TypeTag,V:TypeTag,S](oldState:Option[S]): ReductionResult[K,V,S] =
StateOnlyResult(oldState)
private case class SimpleReductionResult[K,V](override val reducedStream:DataStream[K,Option[V]])
extends ReductionResult[K,V,Unit] {
def finishState = Future.successful(Some(Unit))
}
private case class StateOnlyResult[K: TypeTag, V: TypeTag,S](oldState:Option[S]) extends ReductionResult[K,V,S] {
def reducedStream = DataStream.empty[K,Option[V]]
def finishState = Future.successful(oldState)
}
}
| mighdoll/sparkle | sparkle/src/main/scala/nest/sparkle/datastream/ReductionResult.scala | Scala | apache-2.0 | 1,235 |
package org.jetbrains.plugins.scala.project.settings
import java.awt._
import java.awt.event.MouseEvent
import java.util
import java.util.Objects
import com.intellij.icons.AllIcons
import com.intellij.openapi.actionSystem.{ActionManager, AnActionEvent, ShortcutSet}
import com.intellij.openapi.module.{Module, ModuleManager}
import com.intellij.openapi.project.Project
import com.intellij.openapi.ui.popup.JBPopupFactory
import com.intellij.openapi.ui.{InputValidatorEx, Messages, Splitter}
import com.intellij.openapi.util.text.StringUtil
import com.intellij.openapi.util.{Comparing, Key}
import com.intellij.ui._
import com.intellij.ui.awt.RelativePoint
import com.intellij.ui.treeStructure.Tree
import com.intellij.util.ui.tree.TreeUtil
import com.intellij.util.ui.{EditableTreeModel, JBUI}
import javax.swing._
import javax.swing.event.TreeSelectionEvent
import javax.swing.tree.{DefaultMutableTreeNode, DefaultTreeModel, TreePath}
import org.jetbrains.plugins.scala.ScalaBundle
import org.jetbrains.plugins.scala.extensions.OptionExt
import org.jetbrains.plugins.scala.project.settings.ScalaCompilerProfilesPanel._
import scala.jdk.CollectionConverters._
/**
* NOTE: This was initially almost the exact clone of [[com.intellij.compiler.options.AnnotationProcessorsPanel]]
* later converted to Scala with some refactorings
*/
class ScalaCompilerProfilesPanel(val myProject: Project) extends JPanel(new BorderLayout) {
private val myDefaultProfile = new ScalaCompilerSettingsProfile("") // TODO: make immutable?
private var myModuleProfiles: Seq[ScalaCompilerSettingsProfile] = Seq.empty
private val myAllModulesMap = ModuleManager.getInstance(myProject).getModules
.groupBy(_.getName)
.view.mapValues(_.head).toMap
private val myTree = new Tree(new MyTreeModel)
private val mySettingsPanel = new ScalaCompilerSettingsPanel // right panel
private var mySelectedProfile: ScalaCompilerSettingsProfile = _
initPanel()
private def initPanel(): Unit = {
val splitter = new Splitter(false, 0.3f)
add(splitter, BorderLayout.CENTER)
val treePanel = ToolbarDecorator.createDecorator(myTree)
.addExtraAction(new MoveToAction)
.createPanel
splitter.setFirstComponent(treePanel)
myTree.setRootVisible(false)
myTree.setCellRenderer(new MyCellRenderer)
myTree.addTreeSelectionListener(onNodeSelected)
val settingsComponent = mySettingsPanel.getComponent
settingsComponent.setBorder(JBUI.Borders.emptyLeft(6))
splitter.setSecondComponent(settingsComponent)
val search = new TreeSpeedSearch(myTree)
search.setComparator(new SpeedSearchComparator(false))
}
private def onNodeSelected(__ : TreeSelectionEvent): Unit =
for {
selectedNodeProfile <- getSelectedProfileNode(myTree).map(_.profile)
if selectedNodeProfile != mySelectedProfile
} {
if (mySelectedProfile != null) {
mySettingsPanel.saveTo(mySelectedProfile)
}
mySelectedProfile = selectedNodeProfile
mySettingsPanel.setProfile(selectedNodeProfile)
}
private def getSelectedProfileNode(tree: Tree): Option[ProfileNode] =
Option(tree.getSelectionPath).flatMap { path =>
val node = path.getLastPathComponent match {
case moduleNode: MyModuleNode => moduleNode.getParent
case n => n
}
Option(node).filterByType[ProfileNode]
}
def getDefaultProfile: ScalaCompilerSettingsProfile = {
val selectedProfile = mySelectedProfile
if (myDefaultProfile == selectedProfile)
mySettingsPanel.saveTo(selectedProfile)
myDefaultProfile
}
def getModuleProfiles: Seq[ScalaCompilerSettingsProfile] = {
val selectedProfile = mySelectedProfile
if (myDefaultProfile != selectedProfile)
mySettingsPanel.saveTo(selectedProfile)
myModuleProfiles
}
def initProfiles(defaultProfile: ScalaCompilerSettingsProfile, moduleProfiles: Seq[ScalaCompilerSettingsProfile]): Unit = {
myDefaultProfile.initFrom(defaultProfile)
myModuleProfiles = moduleProfiles.map { profile =>
val copy = new ScalaCompilerSettingsProfile("") // TODO: make immutable
copy.initFrom(profile)
copy
}
val root = myTree.getModel.getRoot.asInstanceOf[RootNode]
root.sync()
preselectProfile(root)
}
private def preselectProfile(root: RootNode): Unit = {
val tempSelectProfile = getTemporarySelectProfile(myProject).flatMap(findProfileNodeWithName(root, _))
val nodeToSelect = tempSelectProfile.orElse(Option(TreeUtil.findNodeWithObject(root, myDefaultProfile)))
nodeToSelect.foreach { node =>
TreeUtil.selectNode(myTree, node)
clearTemporarySelectProfile(myProject)
}
}
private def findProfileNodeWithName(root: RootNode, profileName: String): Option[DefaultMutableTreeNode] =
Option(TreeUtil.findNode(root, {
case node: ProfileNode => node.profile.getName == profileName
case _ => false
}))
private class MyTreeModel() extends DefaultTreeModel(new RootNode) with EditableTreeModel {
override def addNode(parentOrNeighbour: TreePath): TreePath = {
val newProfileName = readProfileNameInDialog()
newProfileName.foreach(createNewProfile)
null
}
private def createNewProfile(newProfileName: String): Unit = {
val profile = new ScalaCompilerSettingsProfile(newProfileName)
myModuleProfiles = myModuleProfiles :+ profile
getRoot.asInstanceOf[DataSynchronizable].sync()
val node = TreeUtil.findNodeWithObject(getRoot.asInstanceOf[DefaultMutableTreeNode], profile)
if (node != null) {
TreeUtil.selectNode(myTree, node)
}
}
private def readProfileNameInDialog(): Option[String] = {
val result = Messages.showInputDialog(myProject,
ScalaBundle.message("scala.compiler.profiles.panel.profile.name"),
ScalaBundle.message("scala.compiler.profiles.panel.create.new.profile"),
null, "",
new ProfileNameValidator
)
Option(result)
}
private class ProfileNameValidator extends InputValidatorEx {
override def checkInput(inputString: String): Boolean = {
if (StringUtil.isEmpty(inputString)) return false
if (Objects.equals(inputString, myDefaultProfile.getName)) return false
!myModuleProfiles.exists(p => Objects.equals(inputString, p.getName))
}
override def canClose(inputString: String): Boolean = checkInput(inputString)
override def getErrorText(inputString: String): String = {
if (checkInput(inputString)) return null
if (StringUtil.isEmpty(inputString)) {
ScalaBundle.message("scala.compiler.profiles.panel.profile.should.not.be.empty")
} else {
ScalaBundle.message("scala.compiler.profiles.panel.profile.already.exists", inputString)
}
}
}
override def removeNode(nodePath: TreePath): Unit = {
nodePath.getLastPathComponent match {
case node: ProfileNode =>
val nodeProfile = node.profile
if (nodeProfile != myDefaultProfile) {
if (mySelectedProfile == nodeProfile)
mySelectedProfile = null
myModuleProfiles = myModuleProfiles.filter(_ != nodeProfile)
getRoot.asInstanceOf[DataSynchronizable].sync()
val foundNode = TreeUtil.findNodeWithObject(getRoot.asInstanceOf[DefaultMutableTreeNode], myDefaultProfile)
if (foundNode != null) {
TreeUtil.selectNode(myTree, foundNode)
}
}
case _ =>
}
}
override def removeNodes(path: util.Collection[_ <: TreePath]): Unit = ()
override def moveNodeTo(parentOrNeighbour: TreePath): Unit = ()
}
private class MoveToAction
extends AnActionButton(ScalaBundle.message("scala.compiler.profiles.panel.move.to"), AllIcons.Actions.Forward) {
override def getShortcut: ShortcutSet =
ActionManager.getInstance.getAction("Move").getShortcutSet
override def isEnabled: Boolean =
myTree.getSelectionPath match {
case null => false
case entry => entry.getLastPathComponent.isInstanceOf[MyModuleNode] && myModuleProfiles.nonEmpty
}
override def actionPerformed(e: AnActionEvent): Unit = {
val selectionPath = myTree.getSelectionPath
if (selectionPath == null) return
val moduleNode = selectionPath.getLastPathComponent.asInstanceOf[MyModuleNode]
val moduleProfile = {
val profileNode = moduleNode.getParent.asInstanceOf[ProfileNode]
profileNode.profile
}
val otherProfiles = allProfiles.filter(_ != moduleProfile).toList
val popup = JBPopupFactory.getInstance
.createPopupChooserBuilder(otherProfiles.asJava)
.setTitle(ScalaBundle.message("scala.compiler.profiles.panel.move.to"))
.setItemChosenCallback { profile =>
if (profile != null) {
onProfileSelected(moduleNode, moduleProfile, profile)
}
}
.createPopup
val point = relativePoint(e)
popup.show(point)
}
private def allProfiles: Seq[ScalaCompilerSettingsProfile] = Seq(myDefaultProfile) ++ myModuleProfiles
private def onProfileSelected(moduleNode: MyModuleNode, nodeProfile: ScalaCompilerSettingsProfile, selectedProfile: ScalaCompilerSettingsProfile): Unit = {
val selectedNodes = Option(myTree.getSelectionPaths).getOrElse(Array())
val selectedModules = selectedNodes.map(_.getLastPathComponent).collect { case n: MyModuleNode => n.module }
selectedModules
.foreach { module =>
if (nodeProfile != myDefaultProfile) {
nodeProfile.removeModuleName(module.getName)
}
if (selectedProfile != myDefaultProfile) {
selectedProfile.addModuleName(module.getName)
}
}
val root = myTree.getModel.getRoot.asInstanceOf[RootNode]
root.sync()
val node1 = TreeUtil.findNodeWithObject(root, moduleNode.module)
if (node1 != null) {
TreeUtil.selectNode(myTree, node1)
}
}
private def relativePoint(e: AnActionEvent): RelativePoint = {
val point =
if (e.getInputEvent.isInstanceOf[MouseEvent]) getPreferredPopupPoint
else TreeUtil.getPointForSelection(myTree)
if (point != null) point
else TreeUtil.getPointForSelection(myTree)
}
}
private class RootNode extends DefaultMutableTreeNode with DataSynchronizable {
override def sync(): this.type = {
val newKids = Seq(new ProfileNode(myDefaultProfile, this, true).sync()) ++
myModuleProfiles.map(p => new ProfileNode(p, this, false).sync())
children = new util.Vector(newKids.asJava)
myTree.getModel.asInstanceOf[DefaultTreeModel].reload()
expand(myTree)
this
}
}
private class ProfileNode(val profile: ScalaCompilerSettingsProfile, parent: RootNode, isDefault: Boolean)
extends DefaultMutableTreeNode(profile)
with DataSynchronizable {
setParent(parent)
override def sync(): this.type = {
val nodeModules: Seq[Module] = if (isDefault) {
val nonDefaultProfileModules = myModuleProfiles.flatMap(_.moduleNames).toSet
myAllModulesMap.toSeq.collect { case (key, value) if !nonDefaultProfileModules.contains(key) => value }
} else {
profile.moduleNames.flatMap(myAllModulesMap.get)
}
val newChildren = nodeModules
.sortBy(_.getName)
.map(m => new MyModuleNode(m, this))
.asJava
children = new util.Vector(newChildren)
this
}
}
private class MyModuleNode(val module: Module, parent: ProfileNode) extends DefaultMutableTreeNode(module) {
setParent(parent)
setAllowsChildren(false)
}
private class MyCellRenderer extends ColoredTreeCellRenderer {
override def customizeCellRenderer(tree: JTree, value: Any, selected: Boolean, expanded: Boolean, leaf: Boolean, row: Int, hasFocus: Boolean): Unit =
value match {
case node: ProfileNode =>
append(node.profile.getName)
case node: MyModuleNode =>
val module = node.getUserObject.asInstanceOf[Module]
setIcon(AllIcons.Nodes.Module)
//noinspection ReferencePassedToNls
append(module.getName)
case _ =>
}
}
}
object ScalaCompilerProfilesPanel {
// used like a global variable within a project to pass context about which profile should we select on settings panel open
// not intended to be persisted (for now) so should be reset once read
private val SELECTED_PROFILE_NAME = new Key[String]("SelectedScalaCompilerProfileName")
private def clearTemporarySelectProfile(project: Project): Unit =
project.putUserData(ScalaCompilerProfilesPanel.SELECTED_PROFILE_NAME, null)
private def getTemporarySelectProfile(project: Project): Option[String] =
Option(project.getUserData(ScalaCompilerProfilesPanel.SELECTED_PROFILE_NAME))
def withTemporarySelectedProfile[T](project: Project, profileName: Option[String])(body: => T): T =
try {
profileName.foreach(project.putUserData(SELECTED_PROFILE_NAME, _))
body
} finally {
project.putUserData(SELECTED_PROFILE_NAME, null)
}
private def expand(tree: JTree): Unit = {
var oldRowCount = 0
do {
val rowCount = tree.getRowCount
if (rowCount == oldRowCount) return
oldRowCount = rowCount
for (i <- 0 until rowCount) {
tree.expandRow(i)
}
} while (true)
}
private trait DataSynchronizable {
def sync(): this.type
}
} | JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/project/settings/ScalaCompilerProfilesPanel.scala | Scala | apache-2.0 | 13,596 |
import org.scalatest.FunSpec
import org.scalatest.matchers.ShouldMatchers
class SentenceSpec extends FunSpec with ShouldMatchers {
describe("Sentence") {
it("should tokenize an untagged sentence"){
val s = "I saw a man with a dog ."
val tokens = Sentence.tokenize(s)
tokens should be === List[Token](
Token("I", None, 0),
Token("saw", None, 2),
Token("a", None, 6),
Token("man", None, 8),
Token("with", None, 12),
Token("a", None, 17),
Token("dog", None, 19),
Token(".", None, 23))
}
it("should tokenize a tagged sentence"){
val s = "I/PRP saw/VBD a/DT man/NN with/IN a/DT dog/NN ./."
val tokens = Sentence.tokenize(s)
tokens should be === List[Token](
Token("I", Some("PRP"), 0),
Token("saw", Some("VBD"), 6),
Token("a", Some("DT"), 14),
Token("man", Some("NN"), 19),
Token("with", Some("IN"), 26),
Token("a", Some("DT"), 34),
Token("dog", Some("NN"), 39),
Token(".", Some("."), 46))
}
it("should tokenize a partially tagged sentence") {
val s = "I saw/VBD a/DT man with/IN a/DT dog ./."
val tokens = Sentence.tokenize(s)
tokens should be === List[Token](
Token("I", None, 0),
Token("saw", Some("VBD"), 2),
Token("a", Some("DT"), 10),
Token("man", None, 15),
Token("with", Some("IN"), 19),
Token("a", Some("DT"), 27),
Token("dog", None, 32),
Token(".", Some("."), 36))
}
}
}
| cordarei/scala-parser-server | src/test/scala/SentenceSpec.scala | Scala | gpl-3.0 | 1,564 |
/*
* Copyright 2017 FOLIO Co., Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.folio_sec.example.domain.simplebank.scala_api.customer
import com.folio_sec.example.domain.simplebank.scala_api.customer_account.CustomerAccountList
import com.folio_sec.example.domain.simplebank.{ CustomerList => JavaCustomerList }
import com.folio_sec.example.domain.simplebank.{ Customer => JavaCustomer }
import com.folio_sec.reladomo.scala_api.TransactionalList
case class CustomerList(underlying: JavaCustomerList, override val newValueAppliers: Seq[() => Unit] = Seq.empty)
extends TransactionalList[Customer, JavaCustomer] {
override def toScalaObject(mithraTxObject: JavaCustomer): Customer = Customer(mithraTxObject)
lazy val accounts = CustomerAccountList(underlying.getAccounts)
def withCustomerId(customerId: Int) = {
CustomerList(
underlying = underlying,
newValueAppliers = newValueAppliers :+ { () =>
underlying.setCustomerId(customerId)
}
)
}
def withFirstName(firstName: String) = {
CustomerList(
underlying = underlying,
newValueAppliers = newValueAppliers :+ { () =>
underlying.setFirstName(firstName)
}
)
}
def withLastName(lastName: String) = {
CustomerList(
underlying = underlying,
newValueAppliers = newValueAppliers :+ { () =>
underlying.setLastName(lastName)
}
)
}
def withCountry(country: String) = {
CustomerList(
underlying = underlying,
newValueAppliers = newValueAppliers :+ { () =>
underlying.setCountry(country)
}
)
}
}
| folio-sec/reladomo-scala | reladomo-scala-common/src/test/scala/com/folio_sec/example/domain/simplebank/scala_api/customer/CustomerList.scala | Scala | apache-2.0 | 2,130 |
package org.http4s
import cats.{Applicative, Monoid, Semigroup}
import cats.data.Kleisli
import cats.effect.Sync
import cats.implicits._
@deprecated("Deprecated in favor of Kleisli", "0.18")
object Service {
/**
* Lifts a total function to a `Service`. The function is expected to handle
* all requests it is given. If `f` is a `PartialFunction`, use `apply`
* instead.
*/
def lift[F[_], A, B](f: A => F[B]): Service[F, A, B] =
Kleisli(f)
/** Lifts a partial function to an `Service`. Responds with the
* zero of [B] for any request where `pf` is not defined.
*/
def apply[F[_], A, B: Monoid](pf: PartialFunction[A, F[B]])(
implicit F: Applicative[F]): Service[F, A, B] =
lift(req => pf.applyOrElse(req, Function.const(F.pure(Monoid[B].empty))))
/**
* Lifts a F into a [[Service]].
*
*/
def const[F[_], A, B](b: F[B]): Service[F, A, B] =
lift(_ => b)
/**
* Lifts a value into a [[Service]].
*
*/
def constVal[F[_], A, B](b: => B)(implicit F: Sync[F]): Service[F, A, B] =
lift(_ => F.delay(b))
/** Allows Service chaining through a Monoid instance. */
def withFallback[F[_], A, B](fallback: Service[F, A, B])(service: Service[F, A, B])(
implicit M: Semigroup[F[B]]): Service[F, A, B] =
service |+| fallback
/** A service that always returns the zero of B. */
def empty[F[_]: Sync, A, B: Monoid]: Service[F, A, B] =
constVal(Monoid[B].empty)
}
| aeons/http4s | core/src/main/scala/org/http4s/Service.scala | Scala | apache-2.0 | 1,461 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalactic
import scala.collection.GenTraversable
import scala.collection.mutable.Buffer
import scala.collection.mutable.ListBuffer
// SKIP-SCALATESTJS,NATIVE-START
import org.scalatest.CompatParColls.Converters._
// SKIP-SCALATESTJS,NATIVE-END
class ChainSpec extends UnitSpec {
"A Chain" can "be constructed with one element" in {
val onesie = Chain(3)
onesie.length shouldBe 1
onesie(0) shouldBe 3
}
it can "be constructed with many elements" in {
val twosie = Chain(2, 3)
twosie.length shouldBe 2
twosie(0) shouldBe 2
twosie(1) shouldBe 3
val threesie = Chain(1, 2, 3)
threesie.length shouldBe 3
threesie(0) shouldBe 1
threesie(1) shouldBe 2
threesie(2) shouldBe 3
}
it can "be constructed from a GenTraversable via the from method on Chain singleton" in {
Chain.from(List.empty[String]) shouldBe None
Chain.from(List("1")) shouldBe Some(Chain("1"))
Chain.from(List(1, 2, 3)) shouldBe Some(Chain(1, 2, 3))
// SKIP-SCALATESTJS,NATIVE-START
Chain.from(List.empty[String].par) shouldBe None
Chain.from(List("1").par) shouldBe Some(Chain("1"))
Chain.from(List(1, 2, 3).par) shouldBe Some(Chain(1, 2, 3))
// SKIP-SCALATESTJS,NATIVE-END
}
it can "be constructed with null elements" in {
noException should be thrownBy Chain("hi", null, "ho")
noException should be thrownBy Chain(null)
noException should be thrownBy Chain("ho", null)
}
it can "be constructed using cons-End style" in {
0 :: 1 :: End shouldBe Chain(0, 1)
0 :: 1 :: 2 :: End shouldBe Chain(0, 1, 2)
"zero" :: "one" :: "two" :: End shouldBe Chain("zero", "one", "two")
}
it can "be deconstructed with Chain" in {
Chain(1) match {
case Chain(x) => x shouldEqual 1
case _ => fail()
}
Chain("hi") match {
case Chain(s) => s shouldEqual "hi"
case _ => fail()
}
}
it can "be deconstructed with Many" in {
Chain(1, 2, 3) match {
case Chain(x, y, z) => (x, y, z) shouldEqual (1, 2, 3)
case _ => fail()
}
Chain("hi", "there") match {
case Chain(s, t) => (s, t) shouldEqual ("hi", "there")
case _ => fail()
}
Chain(1, 2, 3) match {
case Chain(x, y, _) => (x, y) shouldEqual (1, 2)
case _ => fail()
}
Chain(1, 2, 3, 4, 5) match {
case Chain(x, y, _*) => (x, y) shouldEqual (1, 2)
case _ => fail()
}
}
it can "be deconstructed with Every" in {
Chain(1, 2, 3) match {
case Chain(x, y, z) => (x, y, z) shouldEqual (1, 2, 3)
case _ => fail()
}
Chain("hi", "there") match {
case Chain(s, t) => (s, t) shouldEqual ("hi", "there")
case _ => fail()
}
Chain(1, 2, 3) match {
case Chain(x, y, _) => (x, y) shouldEqual (1, 2)
case _ => fail()
}
Chain(1, 2, 3, 4, 5) match {
case Chain(x, y, _*) => (x, y) shouldEqual (1, 2)
case _ => fail()
}
Chain(1, 2, 3) match {
case Chain(x, _*) => x shouldEqual 1
case _ => fail()
}
Chain("hi") match {
case Chain(s) => s shouldEqual "hi"
case _ => fail()
}
Chain(1, 2, 3) match {
case Chain(x, y, z) => (x, y, z) shouldEqual (1, 2, 3)
case _ => fail()
}
Chain("hi", "there") match {
case Chain(s, t) => (s, t) shouldEqual ("hi", "there")
case _ => fail()
}
Chain(1, 2, 3) match {
case Chain(x, y, _) => (x, y) shouldEqual (1, 2)
case _ => fail()
}
Chain(1, 2, 3, 4, 5) match {
case Chain(x, y, _*) => (x, y) shouldEqual (1, 2)
case _ => fail()
}
Chain(1, 2, 3) match {
case Chain(x, _*) => x shouldEqual 1
case _ => fail()
}
}
it should "have an apply method" in {
Chain(1, 2, 3)(0) shouldEqual 1
Chain(1, 2, 3)(1) shouldEqual 2
Chain("hi")(0) shouldEqual "hi"
Chain(7, 8, 9)(2) shouldEqual 9
the [IndexOutOfBoundsException] thrownBy {
Chain(1, 2, 3)(3)
} should have message "3"
}
it should "have a length method" in {
Chain(1).length shouldBe 1
Chain(1, 2).length shouldBe 2
Chain(1, 2, 3, 4, 5).length shouldBe 5
}
it should "have a ++ method that takes another Chain" in {
Chain(1, 2, 3) ++ Chain(4) shouldEqual Chain(1, 2, 3, 4)
Chain(1, 2, 3) ++ Chain(4, 5) shouldEqual Chain(1, 2, 3, 4, 5)
Chain(1, 2, 3) ++ Chain(4, 5, 6) shouldEqual Chain(1, 2, 3, 4, 5, 6)
}
it should "have a ++ method that takes an Every" in {
Chain(1, 2, 3) ++ One(4) shouldEqual Chain(1, 2, 3, 4)
Chain(1, 2, 3) ++ Every(4) shouldEqual Chain(1, 2, 3, 4)
Chain(1, 2, 3) ++ Every(4, 5, 6) shouldEqual Chain(1, 2, 3, 4, 5, 6)
Chain(1, 2, 3) ++ One(4) shouldEqual Chain(1, 2, 3, 4)
Chain(1, 2, 3) ++ One(4) shouldEqual Chain(1, 2, 3, 4)
Chain(1, 2, 3) ++ Every(4) shouldEqual Chain(1, 2, 3, 4)
Chain(1, 2, 3) ++ Every(4) shouldEqual Chain(1, 2, 3, 4)
Chain(1, 2, 3) ++ One(4) shouldEqual Chain(1, 2, 3, 4)
}
it should "have a ++ method that takes a GenTraversableOnce" in {
Chain(1, 2, 3) ++ List(4) shouldEqual Chain(1, 2, 3, 4)
Chain(1, 2, 3) ++ Vector(4, 5, 6) shouldEqual Chain(1, 2, 3, 4, 5, 6)
Chain(1, 2, 3) ++ GenTraversable(4) shouldEqual Chain(1, 2, 3, 4)
Chain(1, 2, 3) ++ Set(4, 5) shouldEqual Chain(1, 2, 3, 4, 5)
Chain(1, 2, 3) ++ Set(4, 5).iterator shouldEqual Chain(1, 2, 3, 4, 5)
}
it should "have a +: method" in {
0 +: Chain(1) shouldBe Chain(0, 1)
0 +: Chain(1, 2) shouldBe Chain(0, 1, 2)
"zero" +: Chain("one", "two") shouldBe Chain("zero", "one", "two")
}
it should "have a :: method" in {
0 :: Chain(1) shouldBe Chain(0, 1)
0 :: Chain(1, 2) shouldBe Chain(0, 1, 2)
"zero" :: Chain("one", "two") shouldBe Chain("zero", "one", "two")
}
it should "have a ::: method that takes another Chain" in {
Chain(1, 2, 3) ::: Chain(4) shouldEqual Chain(1, 2, 3, 4)
Chain(1, 2, 3) ::: Chain(4, 5) shouldEqual Chain(1, 2, 3, 4, 5)
Chain(1, 2, 3) ::: Chain(4, 5, 6) shouldEqual Chain(1, 2, 3, 4, 5, 6)
}
it should "have a ::: method that takes an Every" in {
One(1) ::: Chain(2, 3, 4) shouldEqual Chain(1, 2, 3, 4)
Every(1) ::: Chain(2, 3, 4) shouldEqual Chain(1, 2, 3, 4)
Every(1, 2, 3) ::: Chain(4, 5, 6) shouldEqual Chain(1, 2, 3, 4, 5, 6)
One(1) ::: Chain(2, 3, 4) shouldEqual Chain(1, 2, 3, 4)
One(1) ::: Chain(2, 3, 4) shouldEqual Chain(1, 2, 3, 4)
Every(1) ::: Chain(2, 3, 4) shouldEqual Chain(1, 2, 3, 4)
Every(1) ::: Chain(2, 3, 4) shouldEqual Chain(1, 2, 3, 4)
One(1) ::: Chain(2, 3, 4) shouldEqual Chain(1, 2, 3, 4)
}
it should "have a ::: method that takes a GenTraversableOnce" in {
List(1) ::: Chain(2, 3, 4) shouldEqual Chain(1, 2, 3, 4)
Vector(1, 2, 3) ::: Chain(4, 5, 6) shouldEqual Chain(1, 2, 3, 4, 5, 6)
GenTraversable(1) ::: Chain(2, 3, 4) shouldEqual Chain(1, 2, 3, 4)
Set(1, 2) ::: Chain(3, 4, 5) shouldEqual Chain(1, 2, 3, 4, 5)
Set(1, 2).iterator ::: Chain(3, 4, 5) shouldEqual Chain(1, 2, 3, 4, 5)
}
it should "implement PartialFunction[Int, T]" in {
val pf1: PartialFunction[Int, Int] = Chain(1)
pf1.isDefinedAt(0) shouldBe true
pf1.isDefinedAt(1) shouldBe false
}
it should "have a /: method" in {
(0 /: Chain(1))(_ + _) shouldBe 1
(1 /: Chain(1))(_ + _) shouldBe 2
(0 /: Chain(1, 2, 3))(_ + _) shouldBe 6
(1 /: Chain(1, 2, 3))(_ + _) shouldBe 7
}
it should "have a :+ method" in {
Chain(1) :+ 2 shouldBe Chain(1, 2)
Chain(1, 2) :+ 3 shouldBe Chain(1, 2, 3)
}
it should "have a :\\\\ method" in {
(Chain(1) :\\ 0)(_ + _) shouldBe 1
(Chain(1) :\\ 1)(_ + _) shouldBe 2
(Chain(1, 2, 3) :\\ 0)(_ + _) shouldBe 6
(Chain(1, 2, 3) :\\ 1)(_ + _) shouldBe 7
}
it should "have 3 addString methods" in {
Chain("hi").addString(new StringBuilder) shouldBe new StringBuilder("hi")
Chain(1, 2, 3).addString(new StringBuilder) shouldBe new StringBuilder("123")
Chain("hi").addString(new StringBuilder, "#") shouldBe new StringBuilder("hi")
Chain(1, 2, 3).addString(new StringBuilder, "#") shouldBe new StringBuilder("1#2#3")
Chain(1, 2, 3).addString(new StringBuilder, ", ") shouldBe new StringBuilder("1, 2, 3")
Chain("hi").addString(new StringBuilder, "<", "#", ">") shouldBe new StringBuilder("<hi>")
Chain(1, 2, 3).addString(new StringBuilder, "<", "#", ">") shouldBe new StringBuilder("<1#2#3>")
Chain(1, 2, 3).addString(new StringBuilder, " ( ", ", ", " ) ") shouldBe new StringBuilder(" ( 1, 2, 3 ) ")
}
it should "have an andThen method (inherited from PartialFunction)" in {
val pf1 = Chain(1) andThen (_ + 1)
pf1(0) shouldEqual 2
val pf2 = Chain(1, 2, 3) andThen (_ + 1)
pf2(0) shouldEqual 2
pf2(1) shouldEqual 3
pf2(2) shouldEqual 4
}
it should "have an applyOrElse method (inherited from PartialFunction)" in {
Chain(1, 2, 3).applyOrElse(0, (_: Int) * -1) shouldEqual 1
Chain(1, 2, 3).applyOrElse(1, (_: Int) * -1) shouldEqual 2
Chain(1, 2, 3).applyOrElse(2, (_: Int) * -1) shouldEqual 3
Chain(1, 2, 3).applyOrElse(3, (_: Int) * -1) shouldEqual -3
Chain(1, 2, 3).applyOrElse(4, (_: Int) * -1) shouldEqual -4
}
it should "have an canEqual method" is pending
// it should "have an charAt method" is pending
// Could have an implicit conversion from Every[Char] to CharSequence like
// there is for Seq in Predef.
/*
scala> Vector(1, 2, 3, 4, 5, 6, 7, 8, 9, 10).collect { case i if i > 10 == 0 => i / 2 }
res1: scala.collection.immutable.Vector[Int] = Vector()
*/
it should "have an collectFirst method" in {
Chain(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) collectFirst { case i if i > 10 => i / 2 } shouldBe None
Chain(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12) collectFirst { case i if i > 10 => i / 2 } shouldBe Some(5)
}
/*
scala> Vector(1).combinations(2).toVector
res2: Vector[scala.collection.immutable.Vector[Int]] = Vector()
*/
/*
companion method not relevant. Has an empty and other GenTraverable stuff.
*/
it should "have an compose method, inherited from PartialFunction" in {
val fn: Int => Int = Chain(1, 2, 3).compose((_: Int) + 1)
fn(-1) shouldBe 1
fn(0) shouldBe 2
fn(1) shouldBe 3
}
it should "have a contains method" in {
val e = Chain(1, 2, 3)
e.contains(-1) shouldBe false
e.contains(0) shouldBe false
e.contains(1) shouldBe true
e.contains(2) shouldBe true
e.contains(3) shouldBe true
e.contains(4) shouldBe false
val es = Chain("one", "two", "three")
es.contains("one") shouldBe true
es.contains("ONE") shouldBe false
// SKIP-DOTTY-START
// https://github.com/lampepfl/dotty/issues/6114
implicit val strEq = StringNormalizations.lowerCased.toEquality
//DOTTY-ONLY implicit val strEq: NormalizingEquality[String] = StringNormalizations.lowerCased.toEquality
es.contains("one") shouldBe true;
es.contains("ONE") shouldBe false
// SKIP-DOTTY-END
}
// Decided to just overload one for GenSeq and one for Every. Could have done
// what that has a Slicing nature, but that's a bit too fancy pants.
it should "have a containsSlice method that takes GenSeq" in {
val chain = Chain(1, 2, 3, 4, 5)
chain.containsSlice(List(2, 3)) shouldBe true
chain.containsSlice(List(2, 3, 5)) shouldBe false
chain.containsSlice(List.empty) shouldBe true
chain.containsSlice(Vector(2, 3)) shouldBe true
chain.containsSlice(Vector(2, 3, 5)) shouldBe false
chain.containsSlice(Vector.empty) shouldBe true
chain.containsSlice(ListBuffer(2, 3)) shouldBe true
chain.containsSlice(ListBuffer(2, 3, 5)) shouldBe false
chain.containsSlice(ListBuffer.empty) shouldBe true
}
it should "have a containsSlice method that takes an Every" in {
val chain = Chain(1, 2, 3, 4, 5)
chain.containsSlice(Every(2, 3)) shouldBe true
chain.containsSlice(Every(2, 3, 5)) shouldBe false
chain.containsSlice(Every(3)) shouldBe true
}
it should "have a containsSlice method that takes a Chain" in {
val chain = Chain(1, 2, 3, 4, 5)
chain.containsSlice(Chain(2, 3)) shouldBe true
chain.containsSlice(Chain(2, 3, 5)) shouldBe false
chain.containsSlice(Chain(3)) shouldBe true
}
it should "have 3 copyToArray methods" in {
val arr1 = Array.fill(5)(-1)
Chain(1, 2, 3, 4, 5).copyToArray(arr1)
arr1 shouldEqual Array(1, 2, 3, 4, 5)
val arr2 = Array.fill(5)(-1)
Chain(1, 2, 3, 4, 5).copyToArray(arr2, 1)
arr2 shouldEqual Array(-1, 1, 2, 3, 4)
val arr3 = Array.fill(5)(-1)
Chain(1, 2, 3, 4, 5).copyToArray(arr3, 1, 2)
arr3 shouldEqual Array(-1, 1, 2, -1, -1)
}
it should "have a copyToBuffer method" in {
val buf = ListBuffer.fill(3)(-1)
Chain(1, 2, 3, 4, 5).copyToBuffer(buf)
buf shouldEqual Buffer(-1, -1, -1, 1, 2, 3, 4, 5)
}
it should "have a corresponds method that takes a GenSeq" in {
val chain = Chain(1, 2, 3, 4, 5)
chain.corresponds(List(2, 4, 6, 8, 10))(_ * 2 == _) shouldBe true
chain.corresponds(List(2, 4, 6, 8, 11))(_ * 2 == _) shouldBe false
chain.corresponds(List(2, 4, 6, 8))(_ * 2 == _) shouldBe false
chain.corresponds(List(2, 4, 6, 8, 10, 12))(_ * 2 == _) shouldBe false
}
it should "have a corresponds method that takes an Every" in {
val chain = Chain(1, 2, 3, 4, 5)
chain.corresponds(Many(2, 4, 6, 8, 10))(_ * 2 == _) shouldBe true
chain.corresponds(Many(2, 4, 6, 8, 11))(_ * 2 == _) shouldBe false
chain.corresponds(Many(2, 4, 6, 8))(_ * 2 == _) shouldBe false
chain.corresponds(Many(2, 4, 6, 8, 10, 12))(_ * 2 == _) shouldBe false
}
it should "have a corresponds method that takes a Chain" in {
val chain = Chain(1, 2, 3, 4, 5)
chain.corresponds(Chain(2, 4, 6, 8, 10))(_ * 2 == _) shouldBe true
chain.corresponds(Chain(2, 4, 6, 8, 11))(_ * 2 == _) shouldBe false
chain.corresponds(Chain(2, 4, 6, 8))(_ * 2 == _) shouldBe false
chain.corresponds(Chain(2, 4, 6, 8, 10, 12))(_ * 2 == _) shouldBe false
}
it should "have a count method" in {
val chain = Chain(1, 2, 3, 4, 5)
chain.count(_ > 10) shouldBe 0
chain.count(_ % 2 == 0) shouldBe 2
chain.count(_ % 2 == 1) shouldBe 3
}
/*
it should not have a diff method
scala> Vector(1, 2, 3).diff(Vector(1, 2, 3))
res0: scala.collection.immutable.Vector[Int] = Vector()
*/
it should "have a distinct method" in {
Chain(1, 2, 3).distinct shouldBe Chain(1, 2, 3)
Chain(1).distinct shouldBe Chain(1)
Chain(1, 2, 1, 1).distinct shouldBe Chain(1, 2)
Chain(1, 1, 1).distinct shouldBe Chain(1)
}
/*
it should not have an drop method
scala> Vector(1, 2, 3).drop(3)
res1: scala.collection.immutable.Vector[Int] = Vector()
it should not have an dropRight method
scala> Vector(1, 2, 3).dropRight(3)
res0: scala.collection.immutable.Vector[Int] = Vector()
it should not have an dropWhile method
scala> Vector(1, 2, 3).dropWhile(_ < 10)
res2: scala.collection.immutable.Vector[Int] = Vector()
*/
it should "have an endsWith method that takes a GenSeq" in {
Chain(1).endsWith(List(1)) shouldBe true
Chain(1).endsWith(List(1, 2)) shouldBe false
Chain(1, 2).endsWith(List(1, 2)) shouldBe true
Chain(1, 2, 3, 4, 5).endsWith(List(1, 2)) shouldBe false
Chain(1, 2, 3, 4, 5).endsWith(List(5)) shouldBe true
Chain(1, 2, 3, 4, 5).endsWith(List(3, 4, 5)) shouldBe true
}
it should "have an endsWith method that takes an Every" in {
Chain(1).endsWith(Every(1)) shouldBe true
Chain(1).endsWith(Every(1, 2)) shouldBe false
Chain(1, 2).endsWith(Every(1, 2)) shouldBe true
Chain(1, 2, 3, 4, 5).endsWith(Every(1, 2)) shouldBe false
Chain(1, 2, 3, 4, 5).endsWith(Every(5)) shouldBe true
Chain(1, 2, 3, 4, 5).endsWith(Every(3, 4, 5)) shouldBe true
}
it should "have an endsWith method that takes a Chain" in {
Chain(1).endsWith(Chain(1)) shouldBe true
Chain(1).endsWith(Chain(1, 2)) shouldBe false
Chain(1, 2).endsWith(Chain(1, 2)) shouldBe true
Chain(1, 2, 3, 4, 5).endsWith(Chain(1, 2)) shouldBe false
Chain(1, 2, 3, 4, 5).endsWith(Chain(5)) shouldBe true
Chain(1, 2, 3, 4, 5).endsWith(Chain(3, 4, 5)) shouldBe true
}
it should "have an equals method" in {
Chain(1) shouldEqual Chain(1)
Chain(1) should not equal Chain(2)
Chain(1, 2) should not equal Chain(2, 3)
}
it should "have an exists method" in {
Chain(1, 2, 3).exists(_ == 2) shouldBe true
Chain(1, 2, 3).exists(_ == 5) shouldBe false
}
/*
it should not have a filter method
scala> Vector(1, 2, 3).filter(_ > 10)
res12: scala.collection.immutable.Vector[Int] = Vector()
it should not have a filterNot method
scala> Vector(1, 2, 3).filterNot(_ < 10)
res13: scala.collection.immutable.Vector[Int] = Vector()
*/
it should "have a find method" in {
Chain(1, 2, 3).find(_ == 5) shouldBe None
Chain(1, 2, 3).find(_ == 2) shouldBe Some(2)
}
it should "have a flatMap method" in {
Chain(1, 2, 3) flatMap (i => Chain(i + 1)) shouldBe Chain(2, 3, 4)
val ss = Chain("hi", "ho")
val is = Chain(1, 2, 3)
(for (s <- ss; i <- is) yield (s, i)) shouldBe
Chain(
("hi",1), ("hi",2), ("hi",3), ("ho",1), ("ho",2), ("ho",3)
)
Chain(5) flatMap (i => Chain(i + 3)) shouldBe Chain(8)
Chain(8) flatMap (i => Chain(i.toString)) shouldBe Chain("8")
}
/*
Can only flatten Chains
scala> Vector(Set.empty[Int], Set.empty[Int]).flatten
res17: scala.collection.immutable.Vector[Int] = Vector()
*/
// TODO: Actually it would make sense to flatten Everys too
it should "have a flatten method that works on nested Chains" in {
Chain(Chain(1, 2, 3), Chain(1, 2, 3)).flatten shouldBe Chain(1, 2, 3, 1, 2, 3)
Chain(Chain(1)).flatten shouldBe Chain(1)
}
it can "be flattened when in a GenTraversableOnce" in {
Vector(Chain(1, 2, 3), Chain(1, 2, 3)).flatten shouldBe Vector(1, 2, 3, 1, 2, 3)
List(Chain(1, 2, 3), Chain(1, 2, 3)).flatten shouldBe List(1, 2, 3, 1, 2, 3)
List(Chain(1, 2, 3), Chain(1, 2, 3)).toIterator.flatten.toStream shouldBe List(1, 2, 3, 1, 2, 3).toIterator.toStream
// SKIP-SCALATESTJS,NATIVE-START
List(Chain(1, 2, 3), Chain(1, 2, 3)).par.flatten shouldBe List(1, 2, 3, 1, 2, 3).par
// SKIP-SCALATESTJS,NATIVE-END
}
it should "have a fold method" in {
Chain(1).fold(0)(_ + _) shouldBe 1
Chain(1).fold(1)(_ * _) shouldBe 1
Chain(2).fold(0)(_ + _) shouldBe 2
Chain(2).fold(1)(_ * _) shouldBe 2
Chain(3).fold(0)(_ + _) shouldBe 3
Chain(3).fold(1)(_ * _) shouldBe 3
Chain(1, 2, 3).fold(0)(_ + _) shouldBe 6
Chain(1, 2, 3).fold(1)(_ * _) shouldBe 6
Chain(1, 2, 3, 4, 5).fold(0)(_ + _) shouldBe 15
Chain(1, 2, 3, 4, 5).fold(1)(_ * _) shouldBe 120
}
it should "have a foldLeft method" in {
Chain(1).foldLeft(0)(_ + _) shouldBe 1
Chain(1).foldLeft(1)(_ + _) shouldBe 2
Chain(1, 2, 3).foldLeft(0)(_ + _) shouldBe 6
Chain(1, 2, 3).foldLeft(1)(_ + _) shouldBe 7
}
it should "have a foldRight method" in {
Chain(1).foldRight(0)(_ + _) shouldBe 1
Chain(1).foldRight(1)(_ + _) shouldBe 2
Chain(1, 2, 3).foldRight(0)(_ + _) shouldBe 6
Chain(1, 2, 3).foldRight(1)(_ + _) shouldBe 7
}
it should "have a forall method" in {
Chain(1, 2, 3, 4, 5).forall(_ > 0) shouldBe true
Chain(1, 2, 3, 4, 5).forall(_ < 0) shouldBe false
}
it should "have a foreach method" in {
var num = 0
Chain(1, 2, 3) foreach (num += _)
num shouldBe 6
for (i <- Chain(1, 2, 3))
num += i
num shouldBe 12
Chain(5) foreach (num *= _)
num shouldBe 60
}
it should "have a groupBy method" in {
Chain(1, 2, 3, 4, 5).groupBy(_ % 2) shouldBe Map(1 -> Chain(1, 3, 5), 0 -> Chain(2, 4))
Chain(1, 2, 3, 3, 3).groupBy(_ % 2) shouldBe Map(1 -> Chain(1, 3, 3, 3), 0 -> Chain(2))
Chain(1, 1, 3, 3, 3).groupBy(_ % 2) shouldBe Map(1 -> Chain(1, 1, 3, 3, 3))
Chain(1, 2, 3, 5, 7).groupBy(_ % 2) shouldBe Map(1 -> Chain(1, 3, 5, 7), 0 -> Chain(2))
}
it should "have a grouped method" in {
Chain(1, 2, 3).grouped(2).toList shouldBe List(Chain(1, 2), Chain(3))
Chain(1, 2, 3).grouped(1).toList shouldBe List(Chain(1), Chain(2), Chain(3))
an [IllegalArgumentException] should be thrownBy { Chain(1, 2, 3).grouped(0).toList }
Chain(1, 2, 3, 4, 5, 6, 7, 8, 9, 10).grouped(2).toList shouldBe List(Chain(1, 2), Chain(3, 4), Chain(5, 6), Chain(7, 8), Chain(9, 10))
Chain(1, 2, 3, 4, 5, 6, 7, 8, 9, 10).grouped(3).toList shouldBe List(Chain(1, 2, 3), Chain(4, 5, 6), Chain(7, 8, 9), Chain(10))
Chain(1, 2, 3, 4, 5, 6, 7, 8, 9, 10).grouped(4).toList shouldBe List(Chain(1, 2, 3, 4), Chain(5, 6, 7, 8), Chain(9, 10))
Chain(1).grouped(2).toList shouldBe List(Chain(1))
Chain(1).grouped(1).toList shouldBe List(Chain(1))
}
it should "have a hasDefiniteSize method" in {
Chain(1).hasDefiniteSize shouldBe true
Chain(1, 2).hasDefiniteSize shouldBe true
}
it should "have a hashCode method" in {
Chain(1).hashCode shouldEqual Chain(1).hashCode
Chain(1, 2).hashCode shouldEqual Chain(1, 2).hashCode
}
it should "have a head method" in {
Chain("hi").head shouldBe "hi"
Chain(1, 2, 3).head shouldBe 1
}
it should "have a headOption method" in {
Chain("hi").headOption shouldBe Some("hi")
Chain(1, 2, 3).headOption shouldBe Some(1)
}
it should "have 2 indexOf methods" in {
Chain(1, 2, 3, 4, 5).indexOf(3) shouldBe 2
Chain(1, 2, 3, 4, 5).indexOf(1) shouldBe 0
Chain(1, 2, 3, 4, 5).indexOf(1, 2) shouldBe -1
Chain(1, 2, 3, 4, 5).indexOf(6) shouldBe -1
Chain(1, 2, 3, 4, 5).indexOf(5, 3) shouldBe 4
val es = Chain("one", "two", "three")
es.indexOf("one") shouldBe 0
es.indexOf("one", 1) shouldBe -1
es.indexOf("ONE") shouldBe -1
// SKIP-DOTTY-START
// https://github.com/lampepfl/dotty/issues/6114
implicit val strEq = StringNormalizations.lowerCased.toEquality
//DOTTY-ONLY implicit val strEq: NormalizingEquality[String] = StringNormalizations.lowerCased.toEquality
es.indexOf("one") shouldBe 0;
es.indexOf("ONE") shouldBe -1
// SKIP-DOTTY-END
}
it should "have 2 indexOfSlice methods that take a GenSeq" in {
Chain(1, 2, 3, 4, 5).indexOfSlice(List(2, 3)) shouldBe List(1, 2, 3, 4, 5).indexOfSlice(List(2, 3))
Chain(1, 2, 3, 4, 5).indexOfSlice(List(2, 3), 3) shouldBe List(1, 2, 3, 4, 5).indexOfSlice(List(2, 3), 3)
Chain(1, 2, 3, 4, 5).indexOfSlice(List(2, 3, 5), 3) shouldBe List(1, 2, 3, 4, 5).indexOfSlice(List(2, 3, 5), 3)
Chain(1, 2, 3, 4, 5).indexOfSlice(List(2, 3, 5)) shouldBe List(1, 2, 3, 4, 5).indexOfSlice(List(2, 3, 5))
Chain(1, 2, 3, 4, 5).indexOfSlice(List(5)) shouldBe List(1, 2, 3, 4, 5).indexOfSlice(List(5))
Chain(1, 2, 3, 4, 5).indexOfSlice(List(1, 2, 3, 4, 5)) shouldBe List(1, 2, 3, 4, 5).indexOfSlice(List(1, 2, 3, 4, 5))
Chain(1, 2, 3, 4, 5).indexOfSlice(List(1, 2, 3, 4, 5), 0) shouldBe List(1, 2, 3, 4, 5).indexOfSlice(List(1, 2, 3, 4, 5), 0)
Chain(1, 2, 3, 4, 5).indexOfSlice(List(1, 2, 3, 4, 5), 1) shouldBe List(1, 2, 3, 4, 5).indexOfSlice(List(1, 2, 3, 4, 5), 1)
Chain(1, 2, 3, 4, 5).indexOfSlice(List(1, 2, 3, 4, 5), -1) shouldBe List(1, 2, 3, 4, 5).indexOfSlice(List(1, 2, 3, 4, 5), -1)
Chain(1, 2, 3, 4, 5).indexOfSlice(List.empty) shouldBe List(1, 2, 3, 4, 5).indexOfSlice(List.empty)
Chain(1, 2, 3, 4, 5).indexOfSlice(List.empty, 6) shouldBe List(1, 2, 3, 4, 5).indexOfSlice(List.empty, 6)
Chain(1, 2, 3, 4, 5).indexOfSlice(List.empty, 4) shouldBe List(1, 2, 3, 4, 5).indexOfSlice(List.empty, 4)
val es = Chain("one", "two", "three", "four", "five")
val el = List("one", "two", "three", "four", "five")
es.indexOfSlice(List("one", "two")) shouldBe el.indexOfSlice(List("one", "two"))
es.indexOfSlice(List("one", "two"), 1) shouldBe el.indexOfSlice(List("one", "two"), 1)
es.indexOfSlice(List("ONE", "TWO")) shouldBe el.indexOfSlice(List("ONE", "TWO"))
// SKIP-DOTTY-START
// https://github.com/lampepfl/dotty/issues/6114
implicit val strEq = StringNormalizations.lowerCased.toEquality
//DOTTY-ONLY implicit val strEq: NormalizingEquality[String] = StringNormalizations.lowerCased.toEquality
es.indexOfSlice(List("one", "two")) shouldBe 0
es.indexOfSlice(List("ONE", "TWO")) shouldBe -1
// SKIP-DOTTY-END
}
it should "have 2 indexOfSlice methods that take an Every" in {
Chain(1, 2, 3, 4, 5).indexOfSlice(Every(2, 3)) shouldBe List(1, 2, 3, 4, 5).indexOfSlice(Every(2, 3))
Chain(1, 2, 3, 4, 5).indexOfSlice(Every(2, 3), 3) shouldBe List(1, 2, 3, 4, 5).indexOfSlice(Every(2, 3), 3)
Chain(1, 2, 3, 4, 5).indexOfSlice(Every(2, 3, 5), 3) shouldBe List(1, 2, 3, 4, 5).indexOfSlice(Every(2, 3, 5), 3)
Chain(1, 2, 3, 4, 5).indexOfSlice(Every(2, 3, 5)) shouldBe List(1, 2, 3, 4, 5).indexOfSlice(Every(2, 3, 5))
Chain(1, 2, 3, 4, 5).indexOfSlice(Every(5)) shouldBe List(1, 2, 3, 4, 5).indexOfSlice(Every(5))
Chain(1, 2, 3, 4, 5).indexOfSlice(Every(1, 2, 3, 4, 5)) shouldBe List(1, 2, 3, 4, 5).indexOfSlice(Every(1, 2, 3, 4, 5))
Chain(1, 2, 3, 4, 5).indexOfSlice(Every(1, 2, 3, 4, 5), 0) shouldBe List(1, 2, 3, 4, 5).indexOfSlice(Every(1, 2, 3, 4, 5), 0)
Chain(1, 2, 3, 4, 5).indexOfSlice(Every(1, 2, 3, 4, 5), 1) shouldBe List(1, 2, 3, 4, 5).indexOfSlice(Every(1, 2, 3, 4, 5), 1)
Chain(1, 2, 3, 4, 5).indexOfSlice(Every(1, 2, 3, 4, 5), -1) shouldBe List(1, 2, 3, 4, 5).indexOfSlice(Every(1, 2, 3, 4, 5), -1)
val es = Chain("one", "two", "three", "four", "five")
val el = List("one", "two", "three", "four", "five")
es.indexOfSlice(Every("one", "two")) shouldBe el.indexOfSlice(Every("one", "two"))
es.indexOfSlice(Every("one", "two"), 1) shouldBe el.indexOfSlice(Every("one", "two"), 1)
es.indexOfSlice(Every("ONE", "TWO")) shouldBe el.indexOfSlice(Every("ONE", "TWO"))
// SKIP-DOTTY-START
// https://github.com/lampepfl/dotty/issues/6114
implicit val strEq = StringNormalizations.lowerCased.toEquality
//DOTTY-ONLY implicit val strEq: NormalizingEquality[String] = StringNormalizations.lowerCased.toEquality
es.indexOfSlice(Every("one", "two")) shouldBe 0;
es.indexOfSlice(Every("ONE", "TWO")) shouldBe -1
// SKIP-DOTTY-END
}
it should "have 2 indexOfSlice methods that take a Chain" in {
Chain(1, 2, 3, 4, 5).indexOfSlice(Chain(2, 3)) shouldBe List(1, 2, 3, 4, 5).indexOfSlice(List(2, 3))
Chain(1, 2, 3, 4, 5).indexOfSlice(Chain(2, 3), 3) shouldBe List(1, 2, 3, 4, 5).indexOfSlice(List(2, 3), 3)
Chain(1, 2, 3, 4, 5).indexOfSlice(Chain(2, 3, 5), 3) shouldBe List(1, 2, 3, 4, 5).indexOfSlice(List(2, 3, 5), 3)
Chain(1, 2, 3, 4, 5).indexOfSlice(Chain(2, 3, 5)) shouldBe List(1, 2, 3, 4, 5).indexOfSlice(List(2, 3, 5))
Chain(1, 2, 3, 4, 5).indexOfSlice(Chain(5)) shouldBe List(1, 2, 3, 4, 5).indexOfSlice(List(5))
Chain(1, 2, 3, 4, 5).indexOfSlice(Chain(1, 2, 3, 4, 5)) shouldBe List(1, 2, 3, 4, 5).indexOfSlice(List(1, 2, 3, 4, 5))
Chain(1, 2, 3, 4, 5).indexOfSlice(Chain(1, 2, 3, 4, 5), 0) shouldBe List(1, 2, 3, 4, 5).indexOfSlice(List(1, 2, 3, 4, 5), 0)
Chain(1, 2, 3, 4, 5).indexOfSlice(Chain(1, 2, 3, 4, 5), 1) shouldBe List(1, 2, 3, 4, 5).indexOfSlice(List(1, 2, 3, 4, 5), 1)
Chain(1, 2, 3, 4, 5).indexOfSlice(Chain(1, 2, 3, 4, 5), -1) shouldBe List(1, 2, 3, 4, 5).indexOfSlice(List(1, 2, 3, 4, 5), -1)
val es = Chain("one", "two", "three", "four", "five")
val el = Chain("one", "two", "three", "four", "five")
es.indexOfSlice(Chain("one", "two")) shouldBe el.indexOfSlice(List("one", "two"))
es.indexOfSlice(Chain("one", "two"), 1) shouldBe el.indexOfSlice(List("one", "two"), 1)
es.indexOfSlice(Chain("ONE", "TWO")) shouldBe el.indexOfSlice(List("ONE", "TWO"))
// SKIP-DOTTY-START
// https://github.com/lampepfl/dotty/issues/6114
implicit val strEq = StringNormalizations.lowerCased.toEquality
//DOTTY-ONLY implicit val strEq: NormalizingEquality[String] = StringNormalizations.lowerCased.toEquality
es.indexOfSlice(Chain("one", "two")) shouldBe 0;
es.indexOfSlice(Chain("ONE", "TWO")) shouldBe -1
// SKIP-DOTTY-END
}
it should "have 2 indexWhere methods" in {
Chain(1, 2, 3, 4, 5).indexWhere(_ == 3) shouldBe 2
Chain(1, 2, 3, 4, 5).indexWhere(_ == 1) shouldBe 0
Chain(1, 2, 3, 4, 5).indexWhere(_ == 1, 2) shouldBe -1
Chain(1, 2, 3, 4, 5).indexWhere(_ == 6) shouldBe -1
Chain(1, 2, 3, 4, 5).indexWhere(_ == 5, 3) shouldBe 4
}
it should "have an indices method" in {
Chain(1).indices shouldBe List(1).indices
Chain(1, 2, 3).indices shouldBe (0 to 2)
Chain(1, 2, 3, 4, 5).indices shouldBe (0 to 4)
}
/*
it should not have an init method
scala> Vector(1).init
res30: scala.collection.immutable.Vector[Int] = Vector()
it should "have an inits method" is pending
scala> Vector(1).inits.toList
res32: List[scala.collection.immutable.Vector[Int]] = List(Vector(1), Vector())
it should "have an intersect method" is pending
scala> Vector(1, 2, 3) intersect Vector(4, 5)
res33: scala.collection.immutable.Vector[Int] = Vector()
*/
it should "have an isDefinedAt method, inherited from PartialFunction" in {
Chain(1).isDefinedAt(0) shouldBe true
Chain(1).isDefinedAt(1) shouldBe false
Chain(1, 2, 3).isDefinedAt(1) shouldBe true
Chain(1, 2, 3).isDefinedAt(2) shouldBe true
Chain(1, 2, 3).isDefinedAt(3) shouldBe false
Chain(1, 2, 3).isDefinedAt(0) shouldBe true
Chain(1, 2, 3).isDefinedAt(-1) shouldBe false
}
it should "have an isEmpty method" in {
Chain("hi").isEmpty shouldBe false
Chain(1, 2, 3).isEmpty shouldBe false
}
it should "have an isTraversableAgain method" in {
Chain("hi").isTraversableAgain shouldBe true
Chain(1, 2, 3).isTraversableAgain shouldBe true
}
it should "have an iterator method" in {
Chain("hi").iterator.toList shouldBe List("hi")
Chain(1, 2, 3).iterator.toList shouldBe List(1, 2, 3)
}
it should "have a last method" in {
Chain("hi").last shouldBe "hi"
Chain(1, 2, 3).last shouldBe 3
}
it should "have 2 lastIndexOf methods" in {
Chain(1, 2, 3, 4, 5).lastIndexOf(2) shouldBe 1
Chain(1, 2, 3, 4, 5, 1).lastIndexOf(1) shouldBe 5
Chain(1, 2, 3, 4, 5).lastIndexOf(0) shouldBe -1
Chain(1, 2, 3, 4, 5).lastIndexOf(5) shouldBe 4
Chain(1, 2, 3, 3, 5).lastIndexOf(3) shouldBe 3
Chain(1).lastIndexOf(1) shouldBe 0
Chain(1, 2, 3, 4, 5).lastIndexOf(2, 3) shouldBe 1
Chain(1, 2, 3, 4, 5).lastIndexOf(2, 0) shouldBe -1
Chain(1, 2, 3, 4, 5).lastIndexOf(2, 1) shouldBe 1
val es = Every("one", "two", "three")
es.lastIndexOf("one") shouldBe 0
es.lastIndexOf("two") shouldBe 1
es.lastIndexOf("three") shouldBe 2
es.lastIndexOf("three", 1) shouldBe -1
es.lastIndexOf("ONE") shouldBe -1
// SKIP-DOTTY-START
// https://github.com/lampepfl/dotty/issues/6114
implicit val strEq = StringNormalizations.lowerCased.toEquality
//DOTTY-ONLY implicit val strEq: NormalizingEquality[String] = StringNormalizations.lowerCased.toEquality
es.lastIndexOf("one") shouldBe 0
es.lastIndexOf("ONE") shouldBe -1
// SKIP-DOTTY-END
}
it should "have 2 lastIndexOfSlice methods that take a GenSeq" in {
Chain(1, 2, 3, 4, 5).lastIndexOfSlice(List(2, 3)) shouldBe 1
Chain(1, 2, 3, 4, 5).lastIndexOfSlice(List(2, 3), 3) shouldBe 1
Chain(1, 2, 3, 4, 5).lastIndexOfSlice(List(2, 3, 5), 3) shouldBe -1
Chain(1, 2, 3, 4, 5).lastIndexOfSlice(List(2, 3, 5)) shouldBe -1
Chain(1, 2, 3, 4, 5).lastIndexOfSlice(List(5)) shouldBe 4
Chain(1, 2, 3, 4, 5).lastIndexOfSlice(List(1, 2, 3, 4, 5)) shouldBe 0
Chain(1, 2, 3, 4, 5).lastIndexOfSlice(List(1, 2, 3, 4, 5), 0) shouldBe 0
Chain(1, 2, 3, 4, 5).lastIndexOfSlice(List(1, 2, 3, 4, 5), 1) shouldBe 0
Chain(1, 2, 3, 4, 5).lastIndexOfSlice(List(1, 2, 3, 4, 5), -1) shouldBe -1
Chain(1, 2, 3, 4, 5).lastIndexOfSlice(List.empty) shouldBe 5
Chain(1, 2, 3, 4, 5).lastIndexOfSlice(List.empty, 6) shouldBe 5
Chain(1, 2, 3, 4, 5).lastIndexOfSlice(List.empty, 4) shouldBe 4
val es = Chain("one", "two", "three", "four", "five")
es.lastIndexOfSlice(List("one", "two")) shouldBe 0;
es.lastIndexOfSlice(List("two", "three"), 0) shouldBe -1
es.lastIndexOfSlice(List("ONE", "TWO")) shouldBe -1
// SKIP-DOTTY-START
// https://github.com/lampepfl/dotty/issues/6114
implicit val strEq = StringNormalizations.lowerCased.toEquality
//DOTTY-ONLY implicit val strEq: NormalizingEquality[String] = StringNormalizations.lowerCased.toEquality
es.lastIndexOfSlice(List("one", "two")) shouldBe 0
es.lastIndexOfSlice(List("ONE", "TWO")) shouldBe -1
// SKIP-DOTTY-END
}
it should "have 2 lastIndexOfSlice methods that take an Every" in {
Chain(1, 2, 3, 4, 5).lastIndexOfSlice(Every(2, 3)) shouldBe 1
Chain(1, 2, 3, 4, 5).lastIndexOfSlice(Every(2, 3), 3) shouldBe 1
Chain(1, 2, 3, 4, 5).lastIndexOfSlice(Every(2, 3, 5), 3) shouldBe -1
Chain(1, 2, 3, 4, 5).lastIndexOfSlice(Every(2, 3, 5)) shouldBe -1
Chain(1, 2, 3, 4, 5).lastIndexOfSlice(Every(5)) shouldBe 4
Chain(1, 2, 3, 4, 5).lastIndexOfSlice(Every(1, 2, 3, 4, 5)) shouldBe 0
Chain(1, 2, 3, 4, 5).lastIndexOfSlice(Every(1, 2, 3, 4, 5), 0) shouldBe 0
Chain(1, 2, 3, 4, 5).lastIndexOfSlice(Every(1, 2, 3, 4, 5), 1) shouldBe 0
Chain(1, 2, 3, 4, 5).lastIndexOfSlice(Every(1, 2, 3, 4, 5), -1) shouldBe -1
val es = Chain("one", "two", "three", "four", "five")
es.lastIndexOfSlice(Every("one", "two")) shouldBe 0
es.lastIndexOfSlice(Every("two", "three"), 0) shouldBe -1
es.lastIndexOfSlice(Every("ONE", "TWO")) shouldBe -1
// SKIP-DOTTY-START
// https://github.com/lampepfl/dotty/issues/6114
implicit val strEq = StringNormalizations.lowerCased.toEquality
//DOTTY-ONLY implicit val strEq: NormalizingEquality[String] = StringNormalizations.lowerCased.toEquality
es.lastIndexOfSlice(Every("one", "two")) shouldBe 0
es.lastIndexOfSlice(Every("ONE", "TWO")) shouldBe -1
// SKIP-DOTTY-END
}
it should "have 2 lastIndexOfSlice methods that take a Chain" in {
Chain(1, 2, 3, 4, 5).lastIndexOfSlice(Chain(2, 3)) shouldBe 1
Chain(1, 2, 3, 4, 5).lastIndexOfSlice(Chain(2, 3), 3) shouldBe 1
Chain(1, 2, 3, 4, 5).lastIndexOfSlice(Chain(2, 3, 5), 3) shouldBe -1
Chain(1, 2, 3, 4, 5).lastIndexOfSlice(Chain(2, 3, 5)) shouldBe -1
Chain(1, 2, 3, 4, 5).lastIndexOfSlice(Chain(5)) shouldBe 4
Chain(1, 2, 3, 4, 5).lastIndexOfSlice(Chain(1, 2, 3, 4, 5)) shouldBe 0
Chain(1, 2, 3, 4, 5).lastIndexOfSlice(Chain(1, 2, 3, 4, 5), 0) shouldBe 0
Chain(1, 2, 3, 4, 5).lastIndexOfSlice(Chain(1, 2, 3, 4, 5), 1) shouldBe 0
Chain(1, 2, 3, 4, 5).lastIndexOfSlice(Chain(1, 2, 3, 4, 5), -1) shouldBe -1
val es = Chain("one", "two", "three", "four", "five")
es.lastIndexOfSlice(Chain("one", "two")) shouldBe 0;
es.lastIndexOfSlice(Chain("two", "three"), 0) shouldBe -1
es.lastIndexOfSlice(Chain("ONE", "TWO")) shouldBe -1
// SKIP-DOTTY-START
// https://github.com/lampepfl/dotty/issues/6114
implicit val strEq = StringNormalizations.lowerCased.toEquality
//DOTTY-ONLY implicit val strEq: NormalizingEquality[String] = StringNormalizations.lowerCased.toEquality
es.lastIndexOfSlice(Chain("one", "two")) shouldBe 0
es.lastIndexOfSlice(Chain("ONE", "TWO")) shouldBe -1
// SKIP-DOTTY-END
}
it should "have 2 lastIndexWhere methods" in {
Chain(1, 2, 3, 4, 5).lastIndexWhere(_ == 2) shouldBe 1
Chain(1, 2, 3, 4, 5).lastIndexWhere(_ == 0) shouldBe -1
Chain(1, 2, 3, 4, 5).lastIndexWhere(_ == 5) shouldBe 4
Chain(1, 2, 3, 3, 5).lastIndexWhere(_ == 3) shouldBe 3
Chain(1).lastIndexWhere(_ == 1) shouldBe 0
Chain(1, 2, 3, 4, 5).lastIndexWhere(_ == 2, 3) shouldBe 1
Chain(1, 2, 3, 4, 5).lastIndexWhere(_ == 2, 0) shouldBe -1
Chain(1, 2, 3, 4, 5).lastIndexWhere(_ == 2, 1) shouldBe 1
}
it should "have an lastOption method" in {
Chain("hi").lastOption shouldBe Some("hi")
Chain(1, 2, 3).lastOption shouldBe Some(3)
}
it should "have an lengthCompare method" in {
Chain("hi").lengthCompare(0) should be > 0
Chain("hi").lengthCompare(1) shouldEqual 0
Chain("hi").lengthCompare(2) should be < 0
Chain(1, 2, 3).lengthCompare(0) should be > 0
Chain(1, 2, 3).lengthCompare(1) should be > 0
Chain(1, 2, 3).lengthCompare(2) should be > 0
Chain(1, 2, 3).lengthCompare(3) shouldEqual 0
Chain(1, 2, 3).lengthCompare(4) should be < 0
}
it should "have an inherited lift method" in {
val liftedOne = Chain("hi").lift
liftedOne(0) shouldBe Some("hi")
liftedOne(1) shouldBe None
liftedOne(-1) shouldBe None
val liftedMany = Chain(1, 2, 3).lift
liftedMany(0) shouldBe Some(1)
liftedMany(1) shouldBe Some(2)
liftedMany(2) shouldBe Some(3)
liftedMany(3) shouldBe None
liftedMany(-1) shouldBe None
}
it should "have a map method" in {
Chain(1, 2, 3) map (_ + 1) shouldBe Chain(2, 3, 4)
(for (ele <- Chain(1, 2, 3)) yield ele * 2) shouldBe Chain(2, 4, 6)
Chain(5) map (_ + 3) shouldBe Chain(8)
Chain(8) map (_.toString) shouldBe Chain("8")
}
it should "have a max method" in {
Chain(1, 2, 3, 4, 5).max shouldBe 5
Chain(1).max shouldBe 1
Chain(-1).max shouldBe -1
Chain("aaa", "ccc", "bbb").max shouldBe "ccc"
}
it should "have a maxBy method" in {
Chain(1, 2, 3, 4, 5).maxBy(_.abs) shouldBe 5
Chain(1, 2, 3, 4, -5).maxBy(_.abs) shouldBe -5
}
it should "have a min method" in {
Chain(1, 2, 3, 4, 5).min shouldBe 1
Chain(1).min shouldBe 1
Chain(-1).min shouldBe -1
Chain("aaa", "ccc", "bbb").min shouldBe "aaa"
}
it should "have a minBy method" in {
Chain(1, 2, 3, 4, 5).minBy(_.abs) shouldBe 1
Chain(-1, -2, 3, 4, 5).minBy(_.abs) shouldBe -1
}
it should "have a mkString method" in {
// SKIP-DOTTY-START
// https://github.com/lampepfl/dotty/issues/6705
Chain("hi").mkString shouldBe "hi"
Chain(1, 2, 3).mkString shouldBe "123"
// SKIP-DOTTY-END
Chain("hi").mkString("#") shouldBe "hi"
Chain(1, 2, 3).mkString("#") shouldBe "1#2#3"
Chain(1, 2, 3).mkString(", ") shouldBe "1, 2, 3"
Chain("hi").mkString("<", "#", ">") shouldBe "<hi>"
Chain(1, 2, 3).mkString("<", "#", ">") shouldBe "<1#2#3>"
Chain(1, 2, 3).mkString(" ( ", ", ", " ) ") shouldBe " ( 1, 2, 3 ) "
}
it should "have an nonEmpty method" in {
Chain("hi").nonEmpty shouldBe true
Chain(1, 2, 3).nonEmpty shouldBe true
}
it should "have an orElse method, inherited from PartialFunction" in {
val pf: PartialFunction[Int, Int] = { case i => -i }
val f = Chain(1, 2, 3) orElse pf
f(0) shouldBe 1
f(1) shouldBe 2
f(2) shouldBe 3
f(3) shouldBe -3
f(-1) shouldBe 1
}
it should "have a padTo method" in {
Chain(1).padTo(0, -1) shouldBe Chain(1)
Chain(1).padTo(1, -1) shouldBe Chain(1)
Chain(1).padTo(2, -1) shouldBe Chain(1, -1)
Chain(1).padTo(3, -1) shouldBe Chain(1, -1, -1)
Chain(1, 2, 3).padTo(3, -1) shouldBe Chain(1, 2, 3)
Chain(1, 2, 3).padTo(4, -1) shouldBe Chain(1, 2, 3, -1)
Chain(1, 2, 3).padTo(5, -1) shouldBe Chain(1, 2, 3, -1, -1)
}
// it should not have a par method, because I don't want to support that. If the user
// needs a parallel collection, they can use a parallel collection: chain.toVector.par...
/*
it should not have an partition method
scala> Vector(1, 2, 3, 4, 5).partition(_ > 10)
res10: (scala.collection.immutable.Vector[Int], scala.collection.immutable.Vector[Int]) = (Vector(),Vector(1, 2, 3, 4, 5))
*/
it should "have a patch method" in {
Chain(1, 2, 3, 4, 5).patch(2, Chain(-3, -4), 2) shouldBe Chain(1, 2, -3, -4, 5)
Chain(1, 2, 3, 4, 5).patch(2, Chain(-3, -4), 5) shouldBe Chain(1, 2, -3, -4)
Chain(1, 2, 3, 4, 5).patch(2, Chain(-3, -4), 1) shouldBe Chain(1, 2, -3, -4, 4, 5)
Chain(1, 2, 3, 4, 5).patch(4, Chain(-3, -4), 2) shouldBe Chain(1, 2, 3, 4, -3, -4)
Chain(1, 2, 3, 4, 5).patch(5, Chain(-3, -4), 2) shouldBe Chain(1, 2, 3, 4, 5, -3, -4)
Chain(1, 2, 3, 4, 5).patch(6, Chain(-3, -4), 2) shouldBe Chain(1, 2, 3, 4, 5, -3, -4)
Chain(1, 2, 3, 4, 5).patch(0, Chain(-3, -4), 2) shouldBe Chain(-3, -4, 3, 4, 5)
Chain(1, 2, 3, 4, 5).patch(0, Chain(-3, -4), 3) shouldBe Chain(-3, -4, 4, 5)
}
it should "have a permutations method" in {
Chain(1, 2, 3).permutations.toStream shouldBe Stream(Chain(1, 2, 3), Chain(1, 3, 2), Chain(2, 1, 3), Chain(2, 3, 1), Chain(3, 1, 2), Chain(3, 2, 1))
Chain(1).permutations.toStream shouldBe Stream(Chain(1))
Chain(1, 2).permutations.toStream shouldBe Stream(Chain(1, 2), Chain(2, 1))
}
it should "have a prefixLength method" in {
Chain(1, 2, 3, 4, 5).prefixLength(_ == 1) shouldBe 1
Chain(1, 2, 3, 4, 5).prefixLength(_ == 2) shouldBe 0
Chain(1, 2, 3, 4, 5).prefixLength(_ <= 2) shouldBe 2
Chain(1, 2, 3, 4, 5).prefixLength(_ <= 10) shouldBe 5
Chain(1, 2, 3, 4, 5).prefixLength(_ <= 4) shouldBe 4
}
it should "have a product method" in {
Chain(1, 2, 3).product shouldBe 6
Chain(3).product shouldBe 3
Chain(3, 4, 5).product shouldBe 60
Chain(3, 4, 5).product shouldBe 60
Chain(3.1, 4.2, 5.3).product shouldBe 69.006
}
it should "have a reduce method" in {
Chain(1, 2, 3, 4, 5).reduce(_ + _) shouldBe 15
Chain(1, 2, 3, 4, 5).reduce(_ * _) shouldBe 120
Chain(5).reduce(_ + _) shouldBe 5
Chain(5).reduce(_ * _) shouldBe 5
}
it should "have a reduceLeft method" in {
Chain(1).reduceLeft(_ + _) shouldBe 1
Chain(1).reduceLeft(_ * _) shouldBe 1
Chain(1, 2, 3).reduceLeft(_ + _) shouldBe 6
Chain(1, 2, 3).reduceLeft(_ * _) shouldBe 6
Chain(1, 2, 3, 4, 5).reduceLeft(_ * _) shouldBe 120
}
it should "have a reduceLeftOption method" in {
Chain(1).reduceLeftOption(_ + _) shouldBe Some(1)
Chain(1).reduceLeftOption(_ * _) shouldBe Some(1)
Chain(1, 2, 3).reduceLeftOption(_ + _) shouldBe Some(6)
Chain(1, 2, 3).reduceLeftOption(_ * _) shouldBe Some(6)
Chain(1, 2, 3, 4, 5).reduceLeftOption(_ * _) shouldBe Some(120)
}
it should "have a reduceOption method" in {
Chain(1, 2, 3, 4, 5).reduceOption(_ + _) shouldBe Some(15)
Chain(1, 2, 3, 4, 5).reduceOption(_ * _) shouldBe Some(120)
Chain(5).reduceOption(_ + _) shouldBe Some(5)
Chain(5).reduceOption(_ * _) shouldBe Some(5)
}
it should "have a reduceRight method" in { One(1).reduceRight(_ + _) shouldBe 1
Chain(1).reduceRight(_ * _) shouldBe 1
Chain(1, 2, 3).reduceRight(_ + _) shouldBe 6
Chain(1, 2, 3).reduceRight(_ * _) shouldBe 6
Chain(1, 2, 3, 4, 5).reduceRight(_ * _) shouldBe 120
}
it should "have a reduceRightOption method" in {
Chain(1).reduceRightOption(_ + _) shouldBe Some(1)
Chain(1).reduceRightOption(_ * _) shouldBe Some(1)
Chain(1, 2, 3).reduceRightOption(_ + _) shouldBe Some(6)
Chain(1, 2, 3).reduceRightOption(_ * _) shouldBe Some(6)
Chain(1, 2, 3, 4, 5).reduceRightOption(_ * _) shouldBe Some(120)
}
it should "have a reverse method" in {
Chain(33).reverse shouldBe Chain(33)
Chain(33, 34, 35).reverse shouldBe Chain(35, 34, 33)
}
it should "have a reverseIterator method" in {
Chain(3).reverseIterator.toStream shouldBe Stream(3)
Chain(1, 2, 3).reverseIterator.toList shouldBe Stream(3, 2, 1)
}
it should "have a reverseMap method" in {
Chain(3).reverseMap(_ + 1) shouldBe Chain(4)
Chain(1, 2, 3).reverseMap(_ + 1) shouldBe Chain(4, 3, 2)
}
it should "have a runWith method, inherited from PartialFunction" in {
// TODO: What is this? Seems to be testing Vector or List instead of Every or Chain.
var x = 0
val f = List(1, 2, 3).runWith(x += _)
f(0) shouldBe true
x shouldBe 1
f(1) shouldBe true
x shouldBe 3
f(2) shouldBe true
x shouldBe 6
f(3) shouldBe false
var y = 0
val g = List(3).runWith(y += _)
g(0) shouldBe true
y shouldBe 3
g(0) shouldBe true
y shouldBe 6
}
it should "have a sameElements method that takes a GenIterable" in {
Chain(1, 2, 3, 4, 5).sameElements(List(1, 2, 3, 4, 5)) shouldBe true
Chain(1, 2, 3, 4, 5).sameElements(List(1, 2, 3, 4)) shouldBe false
Chain(1, 2, 3, 4, 5).sameElements(List(1, 2, 3, 4, 5, 6)) shouldBe false
Chain(1, 2, 3, 4, 5).sameElements(List(1, 2, 3, 4, 4)) shouldBe false
Chain(3).sameElements(List(1, 2, 3, 4, 5)) shouldBe false
Chain(3).sameElements(List(1)) shouldBe false
Chain(3).sameElements(List(3)) shouldBe true
}
it should "have a sameElements method that takes an Every" in {
Chain(1, 2, 3, 4, 5).sameElements(Every(1, 2, 3, 4, 5)) shouldBe true
Chain(1, 2, 3, 4, 5).sameElements(Every(1, 2, 3, 4)) shouldBe false
Chain(1, 2, 3, 4, 5).sameElements(Every(1, 2, 3, 4, 5, 6)) shouldBe false
Chain(1, 2, 3, 4, 5).sameElements(Every(1, 2, 3, 4, 4)) shouldBe false
Chain(3).sameElements(Every(1, 2, 3, 4, 5)) shouldBe false
Chain(3).sameElements(Every(1)) shouldBe false
Chain(3).sameElements(Every(3)) shouldBe true
}
it should "have a sameElements method that takes a Chain" in {
Chain(1, 2, 3, 4, 5).sameElements(Chain(1, 2, 3, 4, 5)) shouldBe true
Chain(1, 2, 3, 4, 5).sameElements(Chain(1, 2, 3, 4)) shouldBe false
Chain(1, 2, 3, 4, 5).sameElements(Chain(1, 2, 3, 4, 5, 6)) shouldBe false
Chain(1, 2, 3, 4, 5).sameElements(Chain(1, 2, 3, 4, 4)) shouldBe false
Chain(3).sameElements(Chain(1, 2, 3, 4, 5)) shouldBe false
Chain(3).sameElements(Chain(1)) shouldBe false
Chain(3).sameElements(Chain(3)) shouldBe true
}
it should "have a scan method" in {
Chain(1).scan(0)(_ + _) shouldBe Chain(0, 1)
Chain(1, 2, 3).scan(0)(_ + _) shouldBe Chain(0, 1, 3, 6)
Chain(1, 2, 3).scan("z")(_.toString + _.toString) shouldBe Chain("z", "z1", "z12", "z123")
Chain(0).scan("z")(_.toString + _.toString) shouldBe Chain("z", "z0")
}
it should "have a scanLeft method" in {
Chain(1).scanLeft(0)(_ + _) shouldBe Chain(0, 1)
Chain(1, 2, 3).scanLeft(0)(_ + _) shouldBe Chain(0, 1, 3, 6)
Chain(1, 2, 3).scanLeft("z")(_ + _) shouldBe Chain("z", "z1", "z12", "z123")
Chain(0).scanLeft("z")(_ + _) shouldBe Chain("z", "z0")
}
it should "have a scanRight method" in {
Chain(1).scanRight(0)(_ + _) shouldBe Chain(1, 0)
Chain(1, 2, 3).scanRight(0)(_ + _) shouldBe Chain(6, 5, 3, 0)
Chain(1, 2, 3).scanRight("z")(_ + _) shouldBe Chain("123z", "23z", "3z", "z")
Chain(0).scanRight("z")(_ + _) shouldBe Chain("0z", "z")
}
it should "have a segmentLength method" in {
Chain(1, 2, 3, 4, 5, 6, 6, 7, 8, 10).segmentLength(_ > 7, 0) shouldBe 0
Chain(1, 2, 3, 4, 5, 6, 6, 7, 8, 10).segmentLength(_ == 7, 0) shouldBe 0
Chain(1, 2, 3, 4, 5, 6, 6, 7, 8, 10).segmentLength(_ > 0, 0) shouldBe 10
Chain(1, 2, 3, 4, 5, 6, 6, 7, 8, 10).segmentLength(_ > 1, 0) shouldBe 0
Chain(1, 2, 3, 4, 5, 6, 6, 7, 8, 10).segmentLength(_ > 0, 10) shouldBe 0
Chain(1, 2, 3, 4, 5, 6, 6, 7, 8, 10).segmentLength(_ > 0, 8) shouldBe 2
Chain(1, 2, 3, 4, 5, 6, 6, 7, 8, 10).segmentLength(_ < 3, 0) shouldBe 2
Chain(1, 2, 3, 4, 5, 6, 6, 7, 8, 10).segmentLength(_ < 5, 0) shouldBe 4
Chain(1, 2, 3, 4, 5, 6, 6, 7, 8, 10).segmentLength(_ > 5, 0) shouldBe 0
Chain(1, 2, 3, 4, 5, 6, 6, 7, 8, 10).segmentLength(_ > 5, 5) shouldBe 5
Chain(1, 2, 3, 4, 5, 6, 6, 7, 8, 10).segmentLength(_ > 5, 4) shouldBe 0
Chain(1, 2, 3, 4, 5, 6, 6, 7, 8, 10).segmentLength(_ > 5, 6) shouldBe 4
}
// it should "have a seq method" is pending
it should "have a size method" in {
Chain(5).size shouldBe 1
Chain(1, 2, 3).size shouldBe 3
}
/*
it should not have a slice method
scala> Vector(3).slice(0, 0)
res83: scala.collection.immutable.Vector[Int] = Vector()
scala> Vector(1, 2, 3, 4, 5).slice(2, 1)
res84: scala.collection.immutable.Vector[Int] = Vector()
*/
it should "have 2 sliding methods" in {
Chain(1).sliding(1).toList shouldBe List(Chain(1))
Chain(1).sliding(2).toList shouldBe List(Chain(1))
Chain(1, 2, 3).sliding(2).toList shouldBe List(Chain(1, 2), Chain(2, 3))
Chain(1, 2, 3).sliding(1).toList shouldBe List(Chain(1), Chain(2), Chain(3))
Chain(1, 2, 3).sliding(3).toList shouldBe List(Chain(1, 2, 3))
Chain(1, 2, 3, 4, 5).sliding(3).toList shouldBe List(Chain(1, 2, 3), Chain(2, 3, 4), Chain(3, 4, 5))
Chain(1, 2, 3, 4, 5).sliding(2).toList shouldBe List(Chain(1, 2), Chain(2, 3), Chain(3, 4), Chain(4, 5))
Chain(1, 2, 3, 4, 5).sliding(1).toList shouldBe List(Chain(1), Chain(2), Chain(3), Chain(4), Chain(5))
Chain(1, 2, 3, 4, 5).sliding(4).toList shouldBe List(Chain(1, 2, 3, 4), Chain(2, 3, 4, 5))
Chain(1, 2, 3, 4, 5).sliding(5).toList shouldBe List(Chain(1, 2, 3, 4, 5))
Chain(1).sliding(1, 1).toList shouldBe List(Chain(1))
Chain(1).sliding(1, 2).toList shouldBe List(Chain(1))
Chain(1, 2, 3).sliding(1, 1).toList shouldBe List(Chain(1), Chain(2), Chain(3))
Chain(1, 2, 3).sliding(2, 1).toList shouldBe List(Chain(1, 2), Chain(2, 3))
Chain(1, 2, 3).sliding(2, 2).toList shouldBe List(Chain(1, 2), Chain(3))
Chain(1, 2, 3).sliding(3, 2).toList shouldBe List(Chain(1, 2, 3))
Chain(1, 2, 3).sliding(3, 1).toList shouldBe List(Chain(1, 2, 3))
Chain(1, 2, 3, 4, 5).sliding(3, 1).toList shouldBe List(Chain(1, 2, 3), Chain(2, 3, 4), Chain(3, 4, 5))
Chain(1, 2, 3, 4, 5).sliding(2, 2).toList shouldBe List(Chain(1, 2), Chain(3, 4), Chain(5))
Chain(1, 2, 3, 4, 5).sliding(2, 3).toList shouldBe List(Chain(1, 2), Chain(4, 5))
Chain(1, 2, 3, 4, 5).sliding(2, 4).toList shouldBe List(Chain(1, 2), Chain(5))
Chain(1, 2, 3, 4, 5).sliding(3, 1).toList shouldBe List(Chain(1, 2, 3), Chain(2, 3, 4), Chain(3, 4, 5))
Chain(1, 2, 3, 4, 5).sliding(3, 2).toList shouldBe List(Chain(1, 2, 3), Chain(3, 4, 5))
Chain(1, 2, 3, 4, 5).sliding(3, 3).toList shouldBe List(Chain(1, 2, 3), Chain(4, 5))
Chain(1, 2, 3, 4, 5).sliding(3, 4).toList shouldBe List(Chain(1, 2, 3), Chain(5))
}
it should "have a sortBy method" in {
val regFun: String => Int = {
case "one" => 1
case "two" => 2
case "three" => 3
case "four" => 4
case "five" => 5
case "-one" => -1
case "-two" => -2
case "-three" => -3
case "-four" => -4
case "-five" => -5
}
val absFun: String => Int = {
case "one" => 1
case "two" => 2
case "three" => 3
case "four" => 4
case "five" => 5
case "-one" => 1
case "-two" => 2
case "-three" => 3
case "-four" => 4
case "-five" => 5
}
Chain("five", "four", "three", "two", "one").sortBy(regFun) shouldBe Chain("one", "two", "three", "four", "five")
Chain("two", "one", "four", "five", "three").sortBy(regFun) shouldBe Chain("one", "two", "three", "four", "five")
Chain("two", "-one", "four", "-five", "-three").sortBy(regFun) shouldBe Chain("-five", "-three", "-one", "two", "four")
Chain("two", "-one", "four", "-five", "-three").sortBy(absFun) shouldBe Chain("-one", "two", "-three", "four", "-five")
}
it should "have a sortWith method" in {
Chain(1, 2, 3, 4, 5).sortWith(_ > _) shouldBe Chain(5, 4, 3, 2, 1)
Chain(2, 1, 4, 5, 3).sortWith(_ > _) shouldBe Chain(5, 4, 3, 2, 1)
Chain(2, -1, 4, -5, -3).sortWith(_.abs > _.abs) shouldBe Chain(-5, 4, -3, 2, -1)
Chain(2, -1, 4, -5, -3).sortWith(_.abs < _.abs) shouldBe Chain(-1, 2, -3, 4, -5)
}
it should "have a sorted method" in {
Chain(1, 2, 3, 4, 5).sorted shouldBe Chain(1, 2, 3, 4, 5)
Chain(5, 4, 3, 2, 1).sorted shouldBe Chain(1, 2, 3, 4, 5)
Chain(2, 1, 4, 5, 3).sorted shouldBe Chain(1, 2, 3, 4, 5)
}
/*
it should not have a span method
scala> Vector(1, 2, 3, 4, 5).span(_ > 10)
res105: (scala.collection.immutable.Vector[Int], scala.collection.immutable.Vector[Int]) = (Vector(),Vector(1, 2, 3, 4, 5))
it should not have a splitAt method
scala> Vector(1, 2, 3, 4, 5).splitAt(0)
res106: (scala.collection.immutable.Vector[Int], scala.collection.immutable.Vector[Int]) = (Vector(),Vector(1, 2, 3, 4, 5))
*/
it should "have 2 startsWith methods that take a GenSeq" in {
Chain(1, 2, 3).startsWith(List(1)) shouldBe true
Chain(1, 2, 3).startsWith(List(1, 2)) shouldBe true
Chain(1, 2, 3).startsWith(List(1, 2, 3)) shouldBe true
Chain(1, 2, 3).startsWith(List(1, 2, 3, 4)) shouldBe false
Chain(1).startsWith(List(1, 2, 3, 4)) shouldBe false
Chain(1).startsWith(List(1)) shouldBe true
Chain(1).startsWith(List(2)) shouldBe false
Chain(1).startsWith(List(1), 0) shouldBe true
Chain(1).startsWith(List(1), 1) shouldBe false
Chain(1, 2, 3).startsWith(List(1), 1) shouldBe false
Chain(1, 2, 3).startsWith(List(1), 2) shouldBe false
Chain(1, 2, 3).startsWith(List(2), 2) shouldBe false
Chain(1, 2, 3).startsWith(List(2), 1) shouldBe true
Chain(1, 2, 3).startsWith(List(2, 3), 1) shouldBe true
Chain(1, 2, 3).startsWith(List(1, 2, 3), 1) shouldBe false
Chain(1, 2, 3).startsWith(List(1, 2, 3), 0) shouldBe true
Chain(1, 2, 3, 4, 5).startsWith(List(3, 4), 2) shouldBe true
Chain(1, 2, 3, 4, 5).startsWith(List(3, 4, 5), 2) shouldBe true
Chain(1, 2, 3, 4, 5).startsWith(List(3, 4, 5, 6), 2) shouldBe false
}
it should "have 2 startsWith methods that take an Every" in {
Chain(1, 2, 3).startsWith(Every(1)) shouldBe true
Chain(1, 2, 3).startsWith(Every(1, 2)) shouldBe true
Chain(1, 2, 3).startsWith(Every(1, 2, 3)) shouldBe true
Chain(1, 2, 3).startsWith(Every(1, 2, 3, 4)) shouldBe false
Chain(1).startsWith(Every(1, 2, 3, 4)) shouldBe false
Chain(1).startsWith(Every(1)) shouldBe true
Chain(1).startsWith(Every(2)) shouldBe false
Chain(1).startsWith(Every(1), 0) shouldBe true
Chain(1).startsWith(Every(1), 1) shouldBe false
Chain(1, 2, 3).startsWith(Every(1), 1) shouldBe false
Chain(1, 2, 3).startsWith(Every(1), 2) shouldBe false
Chain(1, 2, 3).startsWith(Every(2), 2) shouldBe false
Chain(1, 2, 3).startsWith(Every(2), 1) shouldBe true
Chain(1, 2, 3).startsWith(Every(2, 3), 1) shouldBe true
Chain(1, 2, 3).startsWith(Every(1, 2, 3), 1) shouldBe false
Chain(1, 2, 3).startsWith(Every(1, 2, 3), 0) shouldBe true
Chain(1, 2, 3, 4, 5).startsWith(Every(3, 4), 2) shouldBe true
Chain(1, 2, 3, 4, 5).startsWith(Every(3, 4, 5), 2) shouldBe true
Chain(1, 2, 3, 4, 5).startsWith(Every(3, 4, 5, 6), 2) shouldBe false
}
it should "have 2 startsWith methods that take a Chain" in {
Chain(1, 2, 3).startsWith(Chain(1)) shouldBe true
Chain(1, 2, 3).startsWith(Chain(1, 2)) shouldBe true
Chain(1, 2, 3).startsWith(Chain(1, 2, 3)) shouldBe true
Chain(1, 2, 3).startsWith(Chain(1, 2, 3, 4)) shouldBe false
Chain(1).startsWith(Chain(1, 2, 3, 4)) shouldBe false
Chain(1).startsWith(Chain(1)) shouldBe true
Chain(1).startsWith(Chain(2)) shouldBe false
Chain(1).startsWith(Chain(1), 0) shouldBe true
Chain(1).startsWith(Chain(1), 1) shouldBe false
Chain(1, 2, 3).startsWith(Chain(1), 1) shouldBe false
Chain(1, 2, 3).startsWith(Chain(1), 2) shouldBe false
Chain(1, 2, 3).startsWith(Chain(2), 2) shouldBe false
Chain(1, 2, 3).startsWith(Chain(2), 1) shouldBe true
Chain(1, 2, 3).startsWith(Chain(2, 3), 1) shouldBe true
Chain(1, 2, 3).startsWith(Chain(1, 2, 3), 1) shouldBe false
Chain(1, 2, 3).startsWith(Chain(1, 2, 3), 0) shouldBe true
Chain(1, 2, 3, 4, 5).startsWith(Chain(3, 4), 2) shouldBe true
Chain(1, 2, 3, 4, 5).startsWith(Chain(3, 4, 5), 2) shouldBe true
Chain(1, 2, 3, 4, 5).startsWith(Chain(3, 4, 5, 6), 2) shouldBe false
}
it should "have a stringPrefix method" in {
Chain(1).stringPrefix shouldBe "NonEmptyList"
Chain(1, 2, 3).stringPrefix shouldBe "NonEmptyList"
}
it should "have a sum method" in {
Chain(1).sum shouldBe 1
Chain(5).sum shouldBe 5
Chain(1, 2, 3).sum shouldBe 6
Chain(1, 2, 3, 4, 5).sum shouldBe 15
Chain(1.1, 2.2, 3.3).sum shouldBe 6.6
}
/*
it should not have a tail method
scala> Vector(1).tail
res7: scala.collection.immutable.Vector[Int] = Vector()
it should not have a tails method
scala> Vector(1).tails.toList
res8: List[scala.collection.immutable.Vector[Int]] = List(Vector(1), Vector())
it should not have a take method
scala> Vector(1).take(0)
res10: scala.collection.immutable.Vector[Int] = Vector()
scala> Vector(1, 2, 3).take(0)
res11: scala.collection.immutable.Vector[Int] = Vector()
scala> Vector(1, 2, 3).take(-1)
res12: scala.collection.immutable.Vector[Int] = Vector()
it should not have a takeRight method
scala> Vector(1).takeRight(1)
res13: scala.collection.immutable.Vector[Int] = Vector(1)
scala> Vector(1).takeRight(0)
res14: scala.collection.immutable.Vector[Int] = Vector()
scala> Vector(1, 2, 3).takeRight(0)
res15: scala.collection.immutable.Vector[Int] = Vector()
it should not have a takeWhile method
scala> Vector(1, 2, 3).takeWhile(_ > 10)
res17: scala.collection.immutable.Vector[Int] = Vector()
scala> Vector(1).takeWhile(_ > 10)
res18: scala.collection.immutable.Vector[Int] = Vector()
*/
it should "have a to method" in {
import org.scalactic.ColCompatHelper.Factory._
Chain(1).to(List) shouldBe List(1)
Chain(1, 2, 3).to(List) shouldBe List(1, 2, 3)
Chain(1, 2, 3).to(scala.collection.mutable.ListBuffer) shouldBe ListBuffer(1, 2, 3)
Chain(1, 2, 3).to(Vector) shouldBe Vector(1, 2, 3)
}
it should "have a toArray method" in {
Chain(1, 2, 3).toArray should === (Array(1, 2, 3))
Chain("a", "b").toArray should === (Array("a", "b"))
Chain(1).toArray should === (Array(1))
}
it should "have a toBuffer method" in {
Chain(1, 2, 3).toBuffer should === (Buffer(1, 2, 3))
Chain("a", "b").toBuffer should === (Buffer("a", "b"))
Chain(1).toBuffer should === (Buffer(1))
}
it should "have a toIndexedSeq method" in {
Chain(1, 2, 3).toIndexedSeq should === (IndexedSeq(1, 2, 3))
Chain("a", "b").toIndexedSeq should === (IndexedSeq("a", "b"))
Chain(1).toIndexedSeq should === (IndexedSeq(1))
}
it should "have a toIterable method" in {
Chain(1, 2, 3).toIterable should === (Iterable(1, 2, 3))
Chain("a", "b").toIterable should === (Iterable("a", "b"))
Chain(1).toIterable should === (Iterable(1))
}
it should "have a toIterator method" in {
Chain(1, 2, 3).toIterator.toList should === (Iterator(1, 2, 3).toList)
Chain("a", "b").toIterator.toList should === (Iterator("a", "b").toList)
Chain(1).toIterator.toList should === (Iterator(1).toList)
Chain(1, 2, 3).toIterator shouldBe an [Iterator[_]]
Chain("a", "b").toIterator shouldBe an [Iterator[_]]
Chain(1).toIterator shouldBe an [Iterator[_]]
}
it should "have a toList method" in {
Chain(1, 2, 3).toList should === (List(1, 2, 3))
Chain("a", "b").toList should === (List("a", "b"))
Chain(1).toList should === (List(1))
}
it should "have a toMap method" in {
Chain("1" -> 1, "2" -> 2, "3" -> 3).toMap should === (Map("1" -> 1, "2" -> 2, "3" -> 3))
Chain('A' -> "a", 'B' -> "b").toMap should === (Map('A' -> "a", 'B' -> "b"))
Chain("1" -> 1).toMap should === (Map("1" -> 1))
}
it should "have a toSeq method" in {
Chain(1, 2, 3).toSeq should === (Seq(1, 2, 3))
Chain("a", "b").toSeq should === (Seq("a", "b"))
Chain(1).toSeq should === (Seq(1))
}
it should "have a toSet method" in {
Chain(1, 2, 3).toSet should === (Set(1, 2, 3))
Chain("a", "b").toSet should === (Set("a", "b"))
Chain(1).toSet should === (Set(1))
}
it should "have a toStream method" in {
Chain(1, 2, 3).toStream should === (Stream(1, 2, 3))
Chain("a", "b").toStream should === (Stream("a", "b"))
Chain(1).toStream should === (Stream(1))
}
it should "have a toString method" in {
Chain(1, 2, 3).toString should === ("NonEmptyList(1, 2, 3)")
Chain(1, 2, 3).toString should === ("NonEmptyList(1, 2, 3)")
Chain(1).toString should === ("NonEmptyList(1)")
}
it should "have a toVector method" in {
Chain(1, 2, 3).toVector should === (Vector(1, 2, 3))
Chain("a", "b").toVector should === (Vector("a", "b"))
Chain(1).toVector should === (Vector(1))
}
it should "have a transpose method" in {
Chain(Chain(1, 2, 3), Chain(4, 5, 6), Chain(7, 8, 9)).transpose shouldBe Chain(Chain(1, 4, 7), Chain(2, 5, 8), Chain(3, 6, 9))
Chain(Chain(1, 2), Chain(3, 4), Chain(5, 6), Chain(7, 8)).transpose shouldBe Chain(Chain(1, 3, 5, 7), Chain(2, 4, 6, 8))
Chain(Chain(1, 2), Chain(3, 4), Chain(5, 6), Chain(7, 8)).transpose.transpose shouldBe Chain(Chain(1, 2), Chain(3, 4), Chain(5, 6), Chain(7, 8))
Chain(Chain(1, 2, 3), Chain(4, 5, 6), Chain(7, 8, 9)).transpose.transpose shouldBe Chain(Chain(1, 2, 3), Chain(4, 5, 6), Chain(7, 8, 9))
}
it should "have a union method that takes a GenSeq" in {
Chain(1) union List(1) shouldBe Chain(1, 1)
Chain(1) union List(1, 2) shouldBe Chain(1, 1, 2)
Chain(1, 2) union List(1, 2) shouldBe Chain(1, 2, 1, 2)
Chain(1, 2) union List(1) shouldBe Chain(1, 2, 1)
Chain(1, 2) union List(3, 4, 5) shouldBe Chain(1, 2, 3, 4, 5)
Chain(1, 2, 3) union List(3, 4, 5) shouldBe Chain(1, 2, 3, 3, 4, 5)
}
it should "have a union method that takes an Every" in {
Chain(1) union Every(1) shouldBe Chain(1, 1)
Chain(1) union Every(1, 2) shouldBe Chain(1, 1, 2)
Chain(1, 2) union Every(1, 2) shouldBe Chain(1, 2, 1, 2)
Chain(1, 2) union Every(1) shouldBe Chain(1, 2, 1)
Chain(1, 2) union Every(3, 4, 5) shouldBe Chain(1, 2, 3, 4, 5)
Chain(1, 2, 3) union Every(3, 4, 5) shouldBe Chain(1, 2, 3, 3, 4, 5)
}
it should "have a union method that takes a Chain" in {
Chain(1) union Chain(1) shouldBe Chain(1, 1)
Chain(1) union Chain(1, 2) shouldBe Chain(1, 1, 2)
Chain(1, 2) union Chain(1, 2) shouldBe Chain(1, 2, 1, 2)
Chain(1, 2) union Chain(1) shouldBe Chain(1, 2, 1)
Chain(1, 2) union Chain(3, 4, 5) shouldBe Chain(1, 2, 3, 4, 5)
Chain(1, 2, 3) union Chain(3, 4, 5) shouldBe Chain(1, 2, 3, 3, 4, 5)
}
it should "have an unzip method" in {
Chain((1, 2)).unzip shouldBe (Chain(1),Chain(2))
Chain((1, 2), (3, 4)).unzip shouldBe (Chain(1, 3), Chain(2, 4))
Chain((1, 2), (3, 4), (5, 6)).unzip shouldBe (Chain(1, 3, 5), Chain(2, 4, 6))
}
it should "have an unzip3 method" in {
Chain((1, 2, 3)).unzip3 shouldBe (Chain(1), Chain(2), Chain(3))
Chain((1, 2, 3), (4, 5, 6)).unzip3 shouldBe (Chain(1, 4), Chain(2, 5), Chain(3, 6))
Chain((1, 2, 3), (4, 5, 6), (7, 8, 9)).unzip3 shouldBe (Chain(1, 4, 7), Chain(2, 5, 8), Chain(3, 6, 9))
}
it should "have an updated method" in {
Chain(1).updated(0, 2) shouldBe Chain(2)
an [IndexOutOfBoundsException] should be thrownBy { Chain(1).updated(1, 2) }
Chain(1, 1, 1).updated(1, 2) shouldBe Chain(1, 2, 1)
Chain(1, 1, 1).updated(2, 2) shouldBe Chain(1, 1, 2)
Chain(1, 1, 1).updated(0, 2) shouldBe Chain(2, 1, 1)
}
/*
it should not have 2 view methods, because I don't want to support views in Every
*/
/*
it should not have a zip method
scala> List(1) zip Nil
res0: List[(Int, Nothing)] = List()
*/
it should "have a zipAll method that takes an Iterable" in {
// Empty on right
Chain(1).zipAll(Nil, -1, -2) shouldBe Chain((1, -2))
Chain(1, 2).zipAll(Nil, -1, -2) shouldBe Chain((1, -2), (2, -2))
// Same length
Chain(1).zipAll(List(1), -1, -2) shouldBe Chain((1, 1))
Chain(1, 2).zipAll(List(1, 2), -1, -2) shouldBe Chain((1, 1), (2, 2))
// Non-empty, longer on right
Chain(1).zipAll(List(10, 20), -1, -2) shouldBe Chain((1,10), (-1,20))
Chain(1, 2).zipAll(List(10, 20, 30), -1, -2) shouldBe Chain((1,10), (2,20), (-1,30))
// Non-empty, shorter on right
Chain(1, 2, 3).zipAll(List(10, 20), -1, -2) shouldBe Chain((1,10), (2,20), (3,-2))
Chain(1, 2, 3, 4).zipAll(List(10, 20, 30), -1, -2) shouldBe Chain((1,10), (2,20), (3,30), (4,-2))
}
it should "have a zipAll method that takes an Every" in {
// Same length
Chain(1).zipAll(Every(1), -1, -2) shouldBe Chain((1, 1))
Chain(1, 2).zipAll(Every(1, 2), -1, -2) shouldBe Chain((1, 1), (2, 2))
// Non-empty, longer on right
Chain(1).zipAll(Every(10, 20), -1, -2) shouldBe Chain((1,10), (-1,20))
Chain(1, 2).zipAll(Every(10, 20, 30), -1, -2) shouldBe Chain((1,10), (2,20), (-1,30))
// Non-empty, shorter on right
Chain(1, 2, 3).zipAll(Every(10, 20), -1, -2) shouldBe Chain((1,10), (2,20), (3,-2))
Chain(1, 2, 3, 4).zipAll(Every(10, 20, 30), -1, -2) shouldBe Chain((1,10), (2,20), (3,30), (4,-2))
}
it should "have a zipAll method that takes a Chain" in {
// Same length
Chain(1).zipAll(Chain(1), -1, -2) shouldBe Chain((1, 1))
Chain(1, 2).zipAll(Chain(1, 2), -1, -2) shouldBe Chain((1, 1), (2, 2))
// Non-empty, longer on right
Chain(1).zipAll(Chain(10, 20), -1, -2) shouldBe Chain((1,10), (-1,20))
Chain(1, 2).zipAll(Chain(10, 20, 30), -1, -2) shouldBe Chain((1,10), (2,20), (-1,30))
// Non-empty, shorter on right
Chain(1, 2, 3).zipAll(Chain(10, 20), -1, -2) shouldBe Chain((1,10), (2,20), (3,-2))
Chain(1, 2, 3, 4).zipAll(Chain(10, 20, 30), -1, -2) shouldBe Chain((1,10), (2,20), (3,30), (4,-2))
}
it should "have a zipWithIndex method" in {
Chain(99).zipWithIndex shouldBe Chain((99,0))
Chain(1, 2, 3, 4, 5).zipWithIndex shouldBe Chain((1,0), (2,1), (3,2), (4,3), (5,4))
}
"End" should "have a pretty toString" in {
End.toString shouldBe "End"
}
}
| dotty-staging/scalatest | scalactic-test/src/test/scala/org/scalactic/ChainSpec.scala | Scala | apache-2.0 | 66,270 |
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.accumulo.index
import java.util.Map.Entry
import com.google.common.collect.{ImmutableSet, ImmutableSortedSet}
import com.typesafe.scalalogging.LazyLogging
import org.apache.accumulo.core.client.mock.MockConnector
import org.apache.accumulo.core.conf.Property
import org.apache.accumulo.core.data.{Key, Range, Value}
import org.apache.hadoop.io.Text
import org.geotools.filter.identity.FeatureIdImpl
import org.locationtech.geomesa.accumulo.data._
import org.locationtech.geomesa.accumulo.index.legacy.attribute.{AttributeIndexV2, AttributeIndexV3, AttributeIndexV4}
import org.locationtech.geomesa.accumulo.index.legacy.id.RecordIndexV1
import org.locationtech.geomesa.accumulo.index.legacy.z2.{Z2IndexV1, Z2IndexV2}
import org.locationtech.geomesa.accumulo.index.legacy.z3.{Z3IndexV1, Z3IndexV2, Z3IndexV3}
import org.locationtech.geomesa.accumulo.util.GeoMesaBatchWriterConfig
import org.locationtech.geomesa.accumulo.{AccumuloFeatureIndexType, AccumuloIndexManagerType, AccumuloVersion}
import org.locationtech.geomesa.features.SerializationOption.SerializationOptions
import org.locationtech.geomesa.features.{SerializationType, SimpleFeatureDeserializers}
import org.locationtech.geomesa.security.SecurityUtils
import org.locationtech.geomesa.utils.index.IndexMode.IndexMode
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
import scala.util.Try
import scala.util.control.NonFatal
object AccumuloFeatureIndex extends AccumuloIndexManagerType with LazyLogging {
val FullColumnFamily = new Text("F")
val IndexColumnFamily = new Text("I")
val BinColumnFamily = new Text("B")
val AttributeColumnFamily = new Text("A")
val EmptyColumnQualifier = new Text()
private val SpatialIndices = Seq(Z2Index, XZ2Index, Z2IndexV2, Z2IndexV1)
private val SpatioTemporalIndices = Seq(Z3Index, XZ3Index, Z3IndexV3, Z3IndexV2, Z3IndexV1)
private val AttributeIndices = Seq(AttributeIndex, AttributeIndexV4, AttributeIndexV3, AttributeIndexV2)
private val RecordIndices = Seq(RecordIndex, RecordIndexV1)
// note: keep in priority order for running full table scans
// before changing the order, consider the effect of feature validation in
// org.locationtech.geomesa.index.geotools.GeoMesaFeatureWriter
override val AllIndices: Seq[AccumuloFeatureIndex] =
SpatioTemporalIndices ++ SpatialIndices ++ RecordIndices ++ AttributeIndices
override val CurrentIndices: Seq[AccumuloFeatureIndex] =
Seq(Z3Index, XZ3Index, Z2Index, XZ2Index, RecordIndex, AttributeIndex)
override def indices(sft: SimpleFeatureType, mode: IndexMode): Seq[AccumuloFeatureIndex] =
super.indices(sft, mode).asInstanceOf[Seq[AccumuloFeatureIndex]]
override def index(identifier: String): AccumuloFeatureIndex =
super.index(identifier).asInstanceOf[AccumuloFeatureIndex]
override def lookup: Map[(String, Int), AccumuloFeatureIndex] =
super.lookup.asInstanceOf[Map[(String, Int), AccumuloFeatureIndex]]
object Schemes {
val Z3TableScheme: List[String] = List(AttributeIndex, RecordIndex, Z3Index, XZ3Index).map(_.name)
val Z2TableScheme: List[String] = List(AttributeIndex, RecordIndex, Z2Index, XZ2Index).map(_.name)
}
/**
* Look up the existing index that could be replaced by the new index, if any
*
* @param index new index
* @param existing list of existing indices
* @return
*/
def replaces(index: AccumuloFeatureIndexType,
existing: Seq[AccumuloFeatureIndexType]): Option[AccumuloFeatureIndexType] = {
if (SpatialIndices.contains(index)) {
existing.find(SpatialIndices.contains)
} else if (SpatioTemporalIndices.contains(index)) {
existing.find(SpatioTemporalIndices.contains)
} else if (AttributeIndices.contains(index)) {
existing.find(AttributeIndices.contains)
} else {
None
}
}
/**
* Maps a simple feature type to a set of default indices based on
* attributes and when it was created (schema version).
*
* Note that schema version has been deprecated and this method should not be called
* except when transitioning from schema version to per-index versions.
*
* @param sft simple feature type
* @return
*/
def getDefaultIndices(sft: SimpleFeatureType): Seq[AccumuloFeatureIndex] = {
import org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType
lazy val docs =
"http://www.geomesa.org/documentation/user/jobs.html#updating-existing-data-to-the-latest-index-format"
val version = sft.getSchemaVersion
val indices = if (version > 8) {
// note: version 9 was never in a release
Seq(Z3IndexV3, XZ3Index, Z2IndexV2, XZ2Index, RecordIndex, AttributeIndexV3)
} else if (version == 8) {
Seq(Z3IndexV2, Z2IndexV1, RecordIndexV1, AttributeIndexV2)
} else if (version > 5) {
logger.warn("The GeoHash index is no longer supported. Some queries may take longer than normal. To " +
s"update your data to a newer format, see $docs")
version match {
case 7 => Seq(Z3IndexV2, RecordIndexV1, AttributeIndexV2)
case 6 => Seq(Z3IndexV1, RecordIndexV1, AttributeIndexV2)
}
} else {
throw new NotImplementedError("This schema format is no longer supported. Please use " +
s"GeoMesa 1.2.6+ to update you data to a newer format. For more information, see $docs")
}
indices.filter(_.supports(sft))
}
def applyVisibility(sf: SimpleFeature, key: Key): Unit = {
val visibility = key.getColumnVisibility
if (visibility.getLength > 0) {
SecurityUtils.setFeatureVisibility(sf, visibility.toString)
}
}
}
trait AccumuloFeatureIndex extends AccumuloFeatureIndexType {
import AccumuloFeatureIndex.{AttributeColumnFamily, BinColumnFamily, FullColumnFamily}
import org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType
/**
* Indicates whether the ID for each feature is serialized with the feature or in the row
*
* @return
*/
def serializedWithId: Boolean
protected def hasPrecomputedBins: Boolean
override def configure(sft: SimpleFeatureType, ds: AccumuloDataStore): Unit = {
import scala.collection.JavaConversions._
super.configure(sft, ds)
val table = getTableName(sft.getTypeName, ds)
// create table if needed
AccumuloVersion.ensureTableExists(ds.connector, table)
// create splits
val splitsToAdd = getSplits(sft).map(new Text(_)).toSet -- ds.tableOps.listSplits(table).toSet
if (splitsToAdd.nonEmpty) {
// noinspection RedundantCollectionConversion
ds.tableOps.addSplits(table, ImmutableSortedSet.copyOf(splitsToAdd.toIterable))
}
// create locality groups
val cfs = if (hasPrecomputedBins) {
Seq(FullColumnFamily, BinColumnFamily, AttributeColumnFamily)
} else {
Seq(FullColumnFamily, AttributeColumnFamily)
}
val localityGroups = cfs.map(cf => (cf.toString, ImmutableSet.of(cf))).toMap
ds.tableOps.setLocalityGroups(table, localityGroups)
// enable block cache
ds.tableOps.setProperty(table, Property.TABLE_BLOCKCACHE_ENABLED.getKey, "true")
}
override def delete(sft: SimpleFeatureType, ds: AccumuloDataStore, shared: Boolean): Unit = {
import scala.collection.JavaConversions._
val table = getTableName(sft.getTypeName, ds)
if (ds.tableOps.exists(table)) {
if (shared) {
val auths = ds.config.authProvider.getAuthorizations
val config = GeoMesaBatchWriterConfig().setMaxWriteThreads(ds.config.writeThreads)
val prefix = new Text(sft.getTableSharingPrefix)
val deleter = ds.connector.createBatchDeleter(table, auths, ds.config.queryThreads, config)
try {
deleter.setRanges(Seq(new Range(prefix, true, Range.followingPrefix(prefix), false)))
deleter.delete()
} finally {
deleter.close()
}
} else {
// we need to synchronize deleting of tables in mock accumulo as it's not thread safe
if (ds.connector.isInstanceOf[MockConnector]) {
ds.connector.synchronized(ds.tableOps.delete(table))
} else {
ds.tableOps.delete(table)
}
}
}
}
// back compatibility check for old metadata keys
abstract override def getTableName(typeName: String, ds: AccumuloDataStore): String = {
lazy val oldKey = this match {
case i if i.name == RecordIndex.name => "tables.record.name"
case i if i.name == AttributeIndex.name => "tables.idx.attr.name"
case i => s"tables.${i.name}.name"
}
Try(super.getTableName(typeName, ds)).recoverWith {
case NonFatal(e) => Try(ds.metadata.read(typeName, oldKey).getOrElse(throw e))
}.get
}
/**
* Turns accumulo results into simple features
*
* @param sft simple feature type
* @param returnSft return simple feature type (transform, etc)
* @return
*/
private [index] def entriesToFeatures(sft: SimpleFeatureType,
returnSft: SimpleFeatureType): (Entry[Key, Value]) => SimpleFeature = {
// Perform a projecting decode of the simple feature
if (serializedWithId) {
val deserializer = SimpleFeatureDeserializers(returnSft, SerializationType.KRYO)
(kv: Entry[Key, Value]) => {
val sf = deserializer.deserialize(kv.getValue.get)
AccumuloFeatureIndex.applyVisibility(sf, kv.getKey)
sf
}
} else {
val getId = getIdFromRow(sft)
val deserializer = SimpleFeatureDeserializers(returnSft, SerializationType.KRYO, SerializationOptions.withoutId)
(kv: Entry[Key, Value]) => {
val sf = deserializer.deserialize(kv.getValue.get)
val row = kv.getKey.getRow
sf.getIdentifier.asInstanceOf[FeatureIdImpl].setID(getId(row.getBytes, 0, row.getLength))
AccumuloFeatureIndex.applyVisibility(sf, kv.getKey)
sf
}
}
}
}
| spandanagrawal/geomesa | geomesa-accumulo/geomesa-accumulo-datastore/src/main/scala/org/locationtech/geomesa/accumulo/index/AccumuloFeatureIndex.scala | Scala | apache-2.0 | 10,463 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy
import org.apache.kylin.common.KylinConfig
import org.apache.kylin.engine.spark.utils.RestService
import org.apache.spark.internal.Logging
import java.io.IOException
import scala.collection.mutable
import scala.util.parsing.json.JSON._
object StandaloneAppClient extends Logging {
private val JOB_STEP_PREFIX = "job_step_"
// appId -> (appName, state, starttime)
private val cachedKylinJobMap: mutable.Map[String, (String, String, Long)] = new mutable.LinkedHashMap[String, (String, String, Long)]()
private var jobInfoUpdateTime = System.currentTimeMillis()
private val cacheTtl = 3600 * 1000 * 24 * 5
private val cacheMaxSize = 30000
private val masterUrlJson: String = KylinConfig.getInstanceFromEnv.getSparkStandaloneMasterWebUI + "/json"
private val restService: RestService = new RestService(10000, 10000)
/**
* @see org.apache.spark.deploy.master.ApplicationInfo
* @return Kylin's Build Job 's ApplicationInfo, update every 5 minutes
*/
def getRunningJobs: mutable.Map[String, (String, String, Long)] = cachedKylinJobMap.synchronized {
val currMills = System.currentTimeMillis
if (cachedKylinJobMap.isEmpty || currMills - jobInfoUpdateTime >= 10000) {
logDebug("Updating app status ...")
try {
val realResp = restService.getRequest(masterUrlJson)
parseApplicationState(realResp)
} catch {
case ioe: IOException => logError("Can not connect to standalone master service.", ioe)
case e: Exception => logError("Error .", e)
}
jobInfoUpdateTime = currMills
}
cachedKylinJobMap
}
def getAppState(stepId: String): String = {
getRunningJobs
val doNothing: PartialFunction[(String, String, Long), (String, String, Long)] = {
case x => x
}
val res: Iterable[(String, String, Long)] = cachedKylinJobMap.values.filter(app => app._1.contains(stepId)).collect(doNothing)
res.size match {
case 0 => "SUBMITTED"
case 1 => res.head._2
case _ =>
// find the recent submitted application
res.maxBy(x => x._3)._2
}
}
def getAppUrl(appId: String, standaloneMaster: String): String = {
var sparkUI = KylinConfig.getInstanceFromEnv.getSparkStandaloneMasterWebUI
if (sparkUI.isEmpty) {
sparkUI = "http://" + getMasterHost(standaloneMaster) + ":8080/"
logWarning("Parameter 'kylin.engine.spark.standalone.master.httpUrl' is not configured. Use " +
sparkUI + " as the spark standalone Web UI address.")
}
if (!sparkUI.endsWith("/")) {
sparkUI = sparkUI + "/"
}
val sparkApp = sparkUI + "app/?appId="
sparkApp + appId
}
def getMasterHost(master: String): String = {
master.split("(://|:)").tail.head
}
def parseApplicationState(responseStr: String): Unit = {
val curr = System.currentTimeMillis()
var respJson = Map.empty[String, Any]
val tree = parseFull(responseStr)
respJson = tree match {
case Some(map: Map[String, Any]) => map
}
val app1 = respJson.getOrElse("completedapps", Array())
val completedApps = app1.asInstanceOf[List[Map[String, Any]]]
for (app <- completedApps) {
val name: String = app.getOrElse("name", "").asInstanceOf[String]
val id: String = app.getOrElse("id", "").asInstanceOf[String]
val state: String = app.getOrElse("state", "").asInstanceOf[String]
val startTime: Double = app.getOrElse("starttime", "0").asInstanceOf[Double]
if (name.contains(JOB_STEP_PREFIX)) {
cachedKylinJobMap(id) = (name, state, startTime.toLong)
}
}
// Clean too old jobs
if (cachedKylinJobMap.size > cacheMaxSize) {
for (id <- cachedKylinJobMap.keys) {
val app = cachedKylinJobMap.get(id)
if (app.isDefined && curr - app.get._3 > cacheTtl) {
cachedKylinJobMap.remove(id)
}
}
}
}
} | apache/kylin | kylin-spark-project/kylin-spark-engine/src/main/scala/org/apache/spark/deploy/StandaloneAppClient.scala | Scala | apache-2.0 | 4,715 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.consumer
import java.util.concurrent._
import java.util.concurrent.atomic._
import locks.ReentrantLock
import collection._
import kafka.cluster._
import kafka.utils._
import org.I0Itec.zkclient.exception.ZkNodeExistsException
import java.net.InetAddress
import org.I0Itec.zkclient.{IZkDataListener, IZkStateListener, IZkChildListener, ZkClient}
import org.apache.zookeeper.Watcher.Event.KeeperState
import java.util.UUID
import kafka.serializer._
import kafka.utils.ZkUtils._
import kafka.utils.Utils.inLock
import kafka.common._
import com.yammer.metrics.core.Gauge
import kafka.metrics._
import scala.Some
/**
* This class handles the consumers interaction with zookeeper
*
* Directories:
* 1. Consumer id registry:
* /consumers/[group_id]/ids[consumer_id] -> topic1,...topicN
* A consumer has a unique consumer id within a consumer group. A consumer registers its id as an ephemeral znode
* and puts all topics that it subscribes to as the value of the znode. The znode is deleted when the client is gone.
* A consumer subscribes to event changes of the consumer id registry within its group.
*
* The consumer id is picked up from configuration, instead of the sequential id assigned by ZK. Generated sequential
* ids are hard to recover during temporary connection loss to ZK, since it's difficult for the client to figure out
* whether the creation of a sequential znode has succeeded or not. More details can be found at
* (http://wiki.apache.org/hadoop/ZooKeeper/ErrorHandling)
*
* 2. Broker node registry:
* /brokers/[0...N] --> { "host" : "host:port",
* "topics" : {"topic1": ["partition1" ... "partitionN"], ...,
* "topicN": ["partition1" ... "partitionN"] } }
* This is a list of all present broker brokers. A unique logical node id is configured on each broker node. A broker
* node registers itself on start-up and creates a znode with the logical node id under /brokers. The value of the znode
* is a JSON String that contains (1) the host name and the port the broker is listening to, (2) a list of topics that
* the broker serves, (3) a list of logical partitions assigned to each topic on the broker.
* A consumer subscribes to event changes of the broker node registry.
*
* 3. Partition owner registry:
* /consumers/[group_id]/owner/[topic]/[broker_id-partition_id] --> consumer_node_id
* This stores the mapping before broker partitions and consumers. Each partition is owned by a unique consumer
* within a consumer group. The mapping is reestablished after each rebalancing.
*
* 4. Consumer offset tracking:
* /consumers/[group_id]/offsets/[topic]/[broker_id-partition_id] --> offset_counter_value
* Each consumer tracks the offset of the latest message consumed for each partition.
*
*/
private[kafka] object ZookeeperConsumerConnector {
val shutdownCommand: FetchedDataChunk = new FetchedDataChunk(null, null, -1L)
}
private[kafka] class ZookeeperConsumerConnector(val config: ConsumerConfig,
val enableFetcher: Boolean) // for testing only
extends ConsumerConnector with Logging with KafkaMetricsGroup {
private val isShuttingDown = new AtomicBoolean(false)
private val rebalanceLock = new Object
private var fetcher: Option[ConsumerFetcherManager] = None
private var zkClient: ZkClient = null
private var topicRegistry = new Pool[String, Pool[Int, PartitionTopicInfo]]
private var checkpointedOffsets = new Pool[TopicAndPartition, Long]
private val topicThreadIdAndQueues = new Pool[(String,String), BlockingQueue[FetchedDataChunk]]
private val scheduler = new KafkaScheduler(threads = 1, threadNamePrefix = "kafka-consumer-scheduler-")
private val messageStreamCreated = new AtomicBoolean(false)
private var sessionExpirationListener: ZKSessionExpireListener = null
private var topicPartitionChangeListener: ZKTopicPartitionChangeListener = null
private var loadBalancerListener: ZKRebalancerListener = null
private var wildcardTopicWatcher: ZookeeperTopicEventWatcher = null
val consumerIdString = {
var consumerUuid : String = null
config.consumerId match {
case Some(consumerId) // for testing only
=> consumerUuid = consumerId
case None // generate unique consumerId automatically
=> val uuid = UUID.randomUUID()
consumerUuid = "%s-%d-%s".format(
InetAddress.getLocalHost.getHostName, System.currentTimeMillis,
uuid.getMostSignificantBits().toHexString.substring(0,8))
}
config.groupId + "_" + consumerUuid
}
this.logIdent = "[" + consumerIdString + "], "
connectZk()
createFetcher()
if (config.autoCommitEnable) {
scheduler.startup
info("starting auto committer every " + config.autoCommitIntervalMs + " ms")
scheduler.schedule("kafka-consumer-autocommit",
autoCommit,
delay = config.autoCommitIntervalMs,
period = config.autoCommitIntervalMs,
unit = TimeUnit.MILLISECONDS)
}
KafkaMetricsReporter.startReporters(config.props)
def this(config: ConsumerConfig) = this(config, true)
def createMessageStreams(topicCountMap: Map[String,Int]): Map[String, List[KafkaStream[Array[Byte],Array[Byte]]]] =
createMessageStreams(topicCountMap, new DefaultDecoder(), new DefaultDecoder())
def createMessageStreams[K,V](topicCountMap: Map[String,Int], keyDecoder: Decoder[K], valueDecoder: Decoder[V])
: Map[String, List[KafkaStream[K,V]]] = {
if (messageStreamCreated.getAndSet(true))
throw new RuntimeException(this.getClass.getSimpleName +
" can create message streams at most once")
consume(topicCountMap, keyDecoder, valueDecoder)
}
def createMessageStreamsByFilter[K,V](topicFilter: TopicFilter,
numStreams: Int,
keyDecoder: Decoder[K] = new DefaultDecoder(),
valueDecoder: Decoder[V] = new DefaultDecoder()) = {
val wildcardStreamsHandler = new WildcardStreamsHandler[K,V](topicFilter, numStreams, keyDecoder, valueDecoder)
wildcardStreamsHandler.streams
}
private def createFetcher() {
if (enableFetcher)
fetcher = Some(new ConsumerFetcherManager(consumerIdString, config, zkClient))
}
private def connectZk() {
info("Connecting to zookeeper instance at " + config.zkConnect)
zkClient = new ZkClient(config.zkConnect, config.zkSessionTimeoutMs, config.zkConnectionTimeoutMs, ZKStringSerializer)
}
def shutdown() {
rebalanceLock synchronized {
val canShutdown = isShuttingDown.compareAndSet(false, true);
if (canShutdown) {
info("ZKConsumerConnector shutting down")
if (wildcardTopicWatcher != null)
wildcardTopicWatcher.shutdown()
try {
if (config.autoCommitEnable)
scheduler.shutdown()
fetcher match {
case Some(f) => f.stopConnections
case None =>
}
sendShutdownToAllQueues()
if (config.autoCommitEnable)
commitOffsets()
if (zkClient != null) {
zkClient.close()
zkClient = null
}
} catch {
case e: Throwable =>
fatal("error during consumer connector shutdown", e)
}
info("ZKConsumerConnector shut down completed")
}
}
}
def consume[K, V](topicCountMap: scala.collection.Map[String,Int], keyDecoder: Decoder[K], valueDecoder: Decoder[V])
: Map[String,List[KafkaStream[K,V]]] = {
debug("entering consume ")
if (topicCountMap == null)
throw new RuntimeException("topicCountMap is null")
val topicCount = TopicCount.constructTopicCount(consumerIdString, topicCountMap)
val topicThreadIds = topicCount.getConsumerThreadIdsPerTopic
// make a list of (queue,stream) pairs, one pair for each threadId
val queuesAndStreams = topicThreadIds.values.map(threadIdSet =>
threadIdSet.map(_ => {
val queue = new LinkedBlockingQueue[FetchedDataChunk](config.queuedMaxMessages)
val stream = new KafkaStream[K,V](
queue, config.consumerTimeoutMs, keyDecoder, valueDecoder, config.clientId)
(queue, stream)
})
).flatten.toList
val dirs = new ZKGroupDirs(config.groupId)
registerConsumerInZK(dirs, consumerIdString, topicCount)
reinitializeConsumer(topicCount, queuesAndStreams)
loadBalancerListener.kafkaMessageAndMetadataStreams.asInstanceOf[Map[String, List[KafkaStream[K,V]]]]
}
// this API is used by unit tests only
def getTopicRegistry: Pool[String, Pool[Int, PartitionTopicInfo]] = topicRegistry
private def registerConsumerInZK(dirs: ZKGroupDirs, consumerIdString: String, topicCount: TopicCount) {
info("begin registering consumer " + consumerIdString + " in ZK")
val timestamp = SystemTime.milliseconds.toString
val consumerRegistrationInfo = Json.encode(Map("version" -> 1, "subscription" -> topicCount.getTopicCountMap, "pattern" -> topicCount.pattern,
"timestamp" -> timestamp))
createEphemeralPathExpectConflictHandleZKBug(zkClient, dirs.consumerRegistryDir + "/" + consumerIdString, consumerRegistrationInfo, null,
(consumerZKString, consumer) => true, config.zkSessionTimeoutMs)
info("end registering consumer " + consumerIdString + " in ZK")
}
private def sendShutdownToAllQueues() = {
for (queue <- topicThreadIdAndQueues.values) {
debug("Clearing up queue")
queue.clear()
queue.put(ZookeeperConsumerConnector.shutdownCommand)
debug("Cleared queue and sent shutdown command")
}
}
def autoCommit() {
trace("auto committing")
try {
commitOffsets()
}
catch {
case t: Throwable =>
// log it and let it go
error("exception during autoCommit: ", t)
}
}
def commitOffsets() {
if (zkClient == null) {
error("zk client is null. Cannot commit offsets")
return
}
for ((topic, infos) <- topicRegistry) {
val topicDirs = new ZKGroupTopicDirs(config.groupId, topic)
for (info <- infos.values) {
val newOffset = info.getConsumeOffset
if (newOffset != checkpointedOffsets.get(TopicAndPartition(topic, info.partitionId))) {
try {
updatePersistentPath(zkClient, topicDirs.consumerOffsetDir + "/" + info.partitionId, newOffset.toString)
checkpointedOffsets.put(TopicAndPartition(topic, info.partitionId), newOffset)
} catch {
case t: Throwable =>
// log it and let it go
warn("exception during commitOffsets", t)
}
debug("Committed offset " + newOffset + " for topic " + info)
}
}
}
}
class ZKSessionExpireListener(val dirs: ZKGroupDirs,
val consumerIdString: String,
val topicCount: TopicCount,
val loadBalancerListener: ZKRebalancerListener)
extends IZkStateListener {
@throws(classOf[Exception])
def handleStateChanged(state: KeeperState) {
// do nothing, since zkclient will do reconnect for us.
}
/**
* Called after the zookeeper session has expired and a new session has been created. You would have to re-create
* any ephemeral nodes here.
*
* @throws Exception
* On any error.
*/
@throws(classOf[Exception])
def handleNewSession() {
/**
* When we get a SessionExpired event, we lost all ephemeral nodes and zkclient has reestablished a
* connection for us. We need to release the ownership of the current consumer and re-register this
* consumer in the consumer registry and trigger a rebalance.
*/
info("ZK expired; release old broker parition ownership; re-register consumer " + consumerIdString)
loadBalancerListener.resetState()
registerConsumerInZK(dirs, consumerIdString, topicCount)
// explicitly trigger load balancing for this consumer
loadBalancerListener.syncedRebalance()
// There is no need to resubscribe to child and state changes.
// The child change watchers will be set inside rebalance when we read the children list.
}
}
class ZKTopicPartitionChangeListener(val loadBalancerListener: ZKRebalancerListener)
extends IZkDataListener {
def handleDataChange(dataPath : String, data: Object) {
try {
info("Topic info for path " + dataPath + " changed to " + data.toString + ", triggering rebalance")
// queue up the rebalance event
loadBalancerListener.rebalanceEventTriggered()
// There is no need to re-subscribe the watcher since it will be automatically
// re-registered upon firing of this event by zkClient
} catch {
case e: Throwable => error("Error while handling topic partition change for data path " + dataPath, e )
}
}
@throws(classOf[Exception])
def handleDataDeleted(dataPath : String) {
// TODO: This need to be implemented when we support delete topic
warn("Topic for path " + dataPath + " gets deleted, which should not happen at this time")
}
}
class ZKRebalancerListener(val group: String, val consumerIdString: String,
val kafkaMessageAndMetadataStreams: mutable.Map[String,List[KafkaStream[_,_]]])
extends IZkChildListener {
private var isWatcherTriggered = false
private val lock = new ReentrantLock
private val cond = lock.newCondition()
private val watcherExecutorThread = new Thread(consumerIdString + "_watcher_executor") {
override def run() {
info("starting watcher executor thread for consumer " + consumerIdString)
var doRebalance = false
while (!isShuttingDown.get) {
try {
lock.lock()
try {
if (!isWatcherTriggered)
cond.await(1000, TimeUnit.MILLISECONDS) // wake up periodically so that it can check the shutdown flag
} finally {
doRebalance = isWatcherTriggered
isWatcherTriggered = false
lock.unlock()
}
if (doRebalance)
syncedRebalance
} catch {
case t: Throwable => error("error during syncedRebalance", t)
}
}
info("stopping watcher executor thread for consumer " + consumerIdString)
}
}
watcherExecutorThread.start()
@throws(classOf[Exception])
def handleChildChange(parentPath : String, curChilds : java.util.List[String]) {
rebalanceEventTriggered()
}
def rebalanceEventTriggered() {
inLock(lock) {
isWatcherTriggered = true
cond.signalAll()
}
}
private def deletePartitionOwnershipFromZK(topic: String, partition: Int) {
val topicDirs = new ZKGroupTopicDirs(group, topic)
val znode = topicDirs.consumerOwnerDir + "/" + partition
deletePath(zkClient, znode)
debug("Consumer " + consumerIdString + " releasing " + znode)
}
private def releasePartitionOwnership(localTopicRegistry: Pool[String, Pool[Int, PartitionTopicInfo]])= {
info("Releasing partition ownership")
for ((topic, infos) <- localTopicRegistry) {
for(partition <- infos.keys)
deletePartitionOwnershipFromZK(topic, partition)
localTopicRegistry.remove(topic)
}
}
def resetState() {
topicRegistry.clear
}
def syncedRebalance() {
rebalanceLock synchronized {
if(isShuttingDown.get()) {
return
} else {
for (i <- 0 until config.rebalanceMaxRetries) {
info("begin rebalancing consumer " + consumerIdString + " try #" + i)
var done = false
var cluster: Cluster = null
try {
cluster = getCluster(zkClient)
done = rebalance(cluster)
} catch {
case e: Throwable =>
/** occasionally, we may hit a ZK exception because the ZK state is changing while we are iterating.
* For example, a ZK node can disappear between the time we get all children and the time we try to get
* the value of a child. Just let this go since another rebalance will be triggered.
**/
info("exception during rebalance ", e)
}
info("end rebalancing consumer " + consumerIdString + " try #" + i)
if (done) {
return
} else {
/* Here the cache is at a risk of being stale. To take future rebalancing decisions correctly, we should
* clear the cache */
info("Rebalancing attempt failed. Clearing the cache before the next rebalancing operation is triggered")
}
// stop all fetchers and clear all the queues to avoid data duplication
closeFetchersForQueues(cluster, kafkaMessageAndMetadataStreams, topicThreadIdAndQueues.map(q => q._2))
Thread.sleep(config.rebalanceBackoffMs)
}
}
}
throw new ConsumerRebalanceFailedException(consumerIdString + " can't rebalance after " + config.rebalanceMaxRetries +" retries")
}
private def rebalance(cluster: Cluster): Boolean = {
val myTopicThreadIdsMap = TopicCount.constructTopicCount(group, consumerIdString, zkClient).getConsumerThreadIdsPerTopic
val consumersPerTopicMap = getConsumersPerTopic(zkClient, group)
val brokers = getAllBrokersInCluster(zkClient)
if (brokers.size == 0) {
// This can happen in a rare case when there are no brokers available in the cluster when the consumer is started.
// We log an warning and register for child changes on brokers/id so that rebalance can be triggered when the brokers
// are up.
warn("no brokers found when trying to rebalance.")
zkClient.subscribeChildChanges(ZkUtils.BrokerIdsPath, loadBalancerListener)
true
}
else {
val partitionsAssignmentPerTopicMap = getPartitionAssignmentForTopics(zkClient, myTopicThreadIdsMap.keySet.toSeq)
val partitionsPerTopicMap = partitionsAssignmentPerTopicMap.map(p => (p._1, p._2.keySet.toSeq.sorted))
/**
* fetchers must be stopped to avoid data duplication, since if the current
* rebalancing attempt fails, the partitions that are released could be owned by another consumer.
* But if we don't stop the fetchers first, this consumer would continue returning data for released
* partitions in parallel. So, not stopping the fetchers leads to duplicate data.
*/
closeFetchers(cluster, kafkaMessageAndMetadataStreams, myTopicThreadIdsMap)
releasePartitionOwnership(topicRegistry)
var partitionOwnershipDecision = new collection.mutable.HashMap[(String, Int), String]()
val currentTopicRegistry = new Pool[String, Pool[Int, PartitionTopicInfo]]
for ((topic, consumerThreadIdSet) <- myTopicThreadIdsMap) {
currentTopicRegistry.put(topic, new Pool[Int, PartitionTopicInfo])
val topicDirs = new ZKGroupTopicDirs(group, topic)
val curConsumers = consumersPerTopicMap.get(topic).get
val curPartitions: Seq[Int] = partitionsPerTopicMap.get(topic).get
val nPartsPerConsumer = curPartitions.size / curConsumers.size
val nConsumersWithExtraPart = curPartitions.size % curConsumers.size
info("Consumer " + consumerIdString + " rebalancing the following partitions: " + curPartitions +
" for topic " + topic + " with consumers: " + curConsumers)
for (consumerThreadId <- consumerThreadIdSet) {
val myConsumerPosition = curConsumers.indexOf(consumerThreadId)
assert(myConsumerPosition >= 0)
val startPart = nPartsPerConsumer*myConsumerPosition + myConsumerPosition.min(nConsumersWithExtraPart)
val nParts = nPartsPerConsumer + (if (myConsumerPosition + 1 > nConsumersWithExtraPart) 0 else 1)
/**
* Range-partition the sorted partitions to consumers for better locality.
* The first few consumers pick up an extra partition, if any.
*/
if (nParts <= 0)
warn("No broker partitions consumed by consumer thread " + consumerThreadId + " for topic " + topic)
else {
for (i <- startPart until startPart + nParts) {
val partition = curPartitions(i)
info(consumerThreadId + " attempting to claim partition " + partition)
addPartitionTopicInfo(currentTopicRegistry, topicDirs, partition, topic, consumerThreadId)
// record the partition ownership decision
partitionOwnershipDecision += ((topic, partition) -> consumerThreadId)
}
}
}
}
/**
* move the partition ownership here, since that can be used to indicate a truly successful rebalancing attempt
* A rebalancing attempt is completed successfully only after the fetchers have been started correctly
*/
if(reflectPartitionOwnershipDecision(partitionOwnershipDecision.toMap)) {
info("Updating the cache")
debug("Partitions per topic cache " + partitionsPerTopicMap)
debug("Consumers per topic cache " + consumersPerTopicMap)
topicRegistry = currentTopicRegistry
updateFetcher(cluster)
true
} else {
false
}
}
}
private def closeFetchersForQueues(cluster: Cluster,
messageStreams: Map[String,List[KafkaStream[_,_]]],
queuesToBeCleared: Iterable[BlockingQueue[FetchedDataChunk]]) {
val allPartitionInfos = topicRegistry.values.map(p => p.values).flatten
fetcher match {
case Some(f) =>
f.stopConnections
clearFetcherQueues(allPartitionInfos, cluster, queuesToBeCleared, messageStreams)
info("Committing all offsets after clearing the fetcher queues")
/**
* here, we need to commit offsets before stopping the consumer from returning any more messages
* from the current data chunk. Since partition ownership is not yet released, this commit offsets
* call will ensure that the offsets committed now will be used by the next consumer thread owning the partition
* for the current data chunk. Since the fetchers are already shutdown and this is the last chunk to be iterated
* by the consumer, there will be no more messages returned by this iterator until the rebalancing finishes
* successfully and the fetchers restart to fetch more data chunks
**/
if (config.autoCommitEnable)
commitOffsets
case None =>
}
}
private def clearFetcherQueues(topicInfos: Iterable[PartitionTopicInfo], cluster: Cluster,
queuesTobeCleared: Iterable[BlockingQueue[FetchedDataChunk]],
messageStreams: Map[String,List[KafkaStream[_,_]]]) {
// Clear all but the currently iterated upon chunk in the consumer thread's queue
queuesTobeCleared.foreach(_.clear)
info("Cleared all relevant queues for this fetcher")
// Also clear the currently iterated upon chunk in the consumer threads
if(messageStreams != null)
messageStreams.foreach(_._2.foreach(s => s.clear()))
info("Cleared the data chunks in all the consumer message iterators")
}
private def closeFetchers(cluster: Cluster, messageStreams: Map[String,List[KafkaStream[_,_]]],
relevantTopicThreadIdsMap: Map[String, Set[String]]) {
// only clear the fetcher queues for certain topic partitions that *might* no longer be served by this consumer
// after this rebalancing attempt
val queuesTobeCleared = topicThreadIdAndQueues.filter(q => relevantTopicThreadIdsMap.contains(q._1._1)).map(q => q._2)
closeFetchersForQueues(cluster, messageStreams, queuesTobeCleared)
}
private def updateFetcher(cluster: Cluster) {
// update partitions for fetcher
var allPartitionInfos : List[PartitionTopicInfo] = Nil
for (partitionInfos <- topicRegistry.values)
for (partition <- partitionInfos.values)
allPartitionInfos ::= partition
info("Consumer " + consumerIdString + " selected partitions : " +
allPartitionInfos.sortWith((s,t) => s.partitionId < t.partitionId).map(_.toString).mkString(","))
fetcher match {
case Some(f) =>
f.startConnections(allPartitionInfos, cluster)
case None =>
}
}
private def reflectPartitionOwnershipDecision(partitionOwnershipDecision: Map[(String, Int), String]): Boolean = {
var successfullyOwnedPartitions : List[(String, Int)] = Nil
val partitionOwnershipSuccessful = partitionOwnershipDecision.map { partitionOwner =>
val topic = partitionOwner._1._1
val partition = partitionOwner._1._2
val consumerThreadId = partitionOwner._2
val partitionOwnerPath = getConsumerPartitionOwnerPath(group, topic, partition)
try {
createEphemeralPathExpectConflict(zkClient, partitionOwnerPath, consumerThreadId)
info(consumerThreadId + " successfully owned partition " + partition + " for topic " + topic)
successfullyOwnedPartitions ::= (topic, partition)
true
} catch {
case e: ZkNodeExistsException =>
// The node hasn't been deleted by the original owner. So wait a bit and retry.
info("waiting for the partition ownership to be deleted: " + partition)
false
case e2: Throwable => throw e2
}
}
val hasPartitionOwnershipFailed = partitionOwnershipSuccessful.foldLeft(0)((sum, decision) => sum + (if(decision) 0 else 1))
/* even if one of the partition ownership attempt has failed, return false */
if(hasPartitionOwnershipFailed > 0) {
// remove all paths that we have owned in ZK
successfullyOwnedPartitions.foreach(topicAndPartition => deletePartitionOwnershipFromZK(topicAndPartition._1, topicAndPartition._2))
false
}
else true
}
private def addPartitionTopicInfo(currentTopicRegistry: Pool[String, Pool[Int, PartitionTopicInfo]],
topicDirs: ZKGroupTopicDirs, partition: Int,
topic: String, consumerThreadId: String) {
val partTopicInfoMap = currentTopicRegistry.get(topic)
val znode = topicDirs.consumerOffsetDir + "/" + partition
val offsetString = readDataMaybeNull(zkClient, znode)._1
// If first time starting a consumer, set the initial offset to -1
val offset =
offsetString match {
case Some(offsetStr) => offsetStr.toLong
case None => PartitionTopicInfo.InvalidOffset
}
val queue = topicThreadIdAndQueues.get((topic, consumerThreadId))
val consumedOffset = new AtomicLong(offset)
val fetchedOffset = new AtomicLong(offset)
val partTopicInfo = new PartitionTopicInfo(topic,
partition,
queue,
consumedOffset,
fetchedOffset,
new AtomicInteger(config.fetchMessageMaxBytes),
config.clientId)
partTopicInfoMap.put(partition, partTopicInfo)
debug(partTopicInfo + " selected new offset " + offset)
checkpointedOffsets.put(TopicAndPartition(topic, partition), offset)
}
}
private def reinitializeConsumer[K,V](
topicCount: TopicCount,
queuesAndStreams: List[(LinkedBlockingQueue[FetchedDataChunk],KafkaStream[K,V])]) {
val dirs = new ZKGroupDirs(config.groupId)
// listener to consumer and partition changes
if (loadBalancerListener == null) {
val topicStreamsMap = new mutable.HashMap[String,List[KafkaStream[K,V]]]
loadBalancerListener = new ZKRebalancerListener(
config.groupId, consumerIdString, topicStreamsMap.asInstanceOf[scala.collection.mutable.Map[String, List[KafkaStream[_,_]]]])
}
// create listener for session expired event if not exist yet
if (sessionExpirationListener == null)
sessionExpirationListener = new ZKSessionExpireListener(
dirs, consumerIdString, topicCount, loadBalancerListener)
// create listener for topic partition change event if not exist yet
if (topicPartitionChangeListener == null)
topicPartitionChangeListener = new ZKTopicPartitionChangeListener(loadBalancerListener)
val topicStreamsMap = loadBalancerListener.kafkaMessageAndMetadataStreams
// map of {topic -> Set(thread-1, thread-2, ...)}
val consumerThreadIdsPerTopic: Map[String, Set[String]] =
topicCount.getConsumerThreadIdsPerTopic
val allQueuesAndStreams = topicCount match {
case wildTopicCount: WildcardTopicCount =>
/*
* Wild-card consumption streams share the same queues, so we need to
* duplicate the list for the subsequent zip operation.
*/
(1 to consumerThreadIdsPerTopic.keySet.size).flatMap(_ => queuesAndStreams).toList
case statTopicCount: StaticTopicCount =>
queuesAndStreams
}
val topicThreadIds = consumerThreadIdsPerTopic.map {
case(topic, threadIds) =>
threadIds.map((topic, _))
}.flatten
require(topicThreadIds.size == allQueuesAndStreams.size,
"Mismatch between thread ID count (%d) and queue count (%d)"
.format(topicThreadIds.size, allQueuesAndStreams.size))
val threadQueueStreamPairs = topicThreadIds.zip(allQueuesAndStreams)
threadQueueStreamPairs.foreach(e => {
val topicThreadId = e._1
val q = e._2._1
topicThreadIdAndQueues.put(topicThreadId, q)
debug("Adding topicThreadId %s and queue %s to topicThreadIdAndQueues data structure".format(topicThreadId, q.toString))
newGauge(
config.clientId + "-" + config.groupId + "-" + topicThreadId._1 + "-" + topicThreadId._2 + "-FetchQueueSize",
new Gauge[Int] {
def value = q.size
}
)
})
val groupedByTopic = threadQueueStreamPairs.groupBy(_._1._1)
groupedByTopic.foreach(e => {
val topic = e._1
val streams = e._2.map(_._2._2).toList
topicStreamsMap += (topic -> streams)
debug("adding topic %s and %d streams to map.".format(topic, streams.size))
})
// listener to consumer and partition changes
zkClient.subscribeStateChanges(sessionExpirationListener)
zkClient.subscribeChildChanges(dirs.consumerRegistryDir, loadBalancerListener)
topicStreamsMap.foreach { topicAndStreams =>
// register on broker partition path changes
val topicPath = BrokerTopicsPath + "/" + topicAndStreams._1
zkClient.subscribeDataChanges(topicPath, topicPartitionChangeListener)
}
// explicitly trigger load balancing for this consumer
loadBalancerListener.syncedRebalance()
}
class WildcardStreamsHandler[K,V](topicFilter: TopicFilter,
numStreams: Int,
keyDecoder: Decoder[K],
valueDecoder: Decoder[V])
extends TopicEventHandler[String] {
if (messageStreamCreated.getAndSet(true))
throw new RuntimeException("Each consumer connector can create " +
"message streams by filter at most once.")
private val wildcardQueuesAndStreams = (1 to numStreams)
.map(e => {
val queue = new LinkedBlockingQueue[FetchedDataChunk](config.queuedMaxMessages)
val stream = new KafkaStream[K,V](queue,
config.consumerTimeoutMs,
keyDecoder,
valueDecoder,
config.clientId)
(queue, stream)
}).toList
// bootstrap with existing topics
private var wildcardTopics =
getChildrenParentMayNotExist(zkClient, BrokerTopicsPath)
.filter(topicFilter.isTopicAllowed)
private val wildcardTopicCount = TopicCount.constructTopicCount(
consumerIdString, topicFilter, numStreams, zkClient)
val dirs = new ZKGroupDirs(config.groupId)
registerConsumerInZK(dirs, consumerIdString, wildcardTopicCount)
reinitializeConsumer(wildcardTopicCount, wildcardQueuesAndStreams)
/*
* Topic events will trigger subsequent synced rebalances.
*/
info("Creating topic event watcher for topics " + topicFilter)
wildcardTopicWatcher = new ZookeeperTopicEventWatcher(zkClient, this)
def handleTopicEvent(allTopics: Seq[String]) {
debug("Handling topic event")
val updatedTopics = allTopics.filter(topicFilter.isTopicAllowed)
val addedTopics = updatedTopics filterNot (wildcardTopics contains)
if (addedTopics.nonEmpty)
info("Topic event: added topics = %s"
.format(addedTopics))
/*
* TODO: Deleted topics are interesting (and will not be a concern until
* 0.8 release). We may need to remove these topics from the rebalance
* listener's map in reinitializeConsumer.
*/
val deletedTopics = wildcardTopics filterNot (updatedTopics contains)
if (deletedTopics.nonEmpty)
info("Topic event: deleted topics = %s"
.format(deletedTopics))
wildcardTopics = updatedTopics
info("Topics to consume = %s".format(wildcardTopics))
if (addedTopics.nonEmpty || deletedTopics.nonEmpty)
reinitializeConsumer(wildcardTopicCount, wildcardQueuesAndStreams)
}
def streams: Seq[KafkaStream[K,V]] =
wildcardQueuesAndStreams.map(_._2)
}
}
| unix1986/universe | tool/kafka-0.8.1.1-src/core/src/main/scala/kafka/consumer/ZookeeperConsumerConnector.scala | Scala | bsd-2-clause | 35,333 |
package com.github.jonfreedman.timeseries.matchers
import com.github.jonfreedman.timeseries.calculation.TemporalCalculator
import com.github.jonfreedman.timeseries.calculation.TemporalCalculator.Observation
import org.hamcrest.{Description, Matcher, TypeSafeMatcher}
/**
* @author jon
*/
class TemporalObservationMatcher[T, R] private(timeValueMatcher: Matcher[_ <: T], valueMatcher: Matcher[_ <: R])(implicit ev: T <:< Comparable[_ >: T]) extends TypeSafeMatcher[TemporalCalculator.Observation[T, R]] {
override def matchesSafely(item: Observation[T, R]): Boolean = timeValueMatcher.matches(item.getTimeValue) && valueMatcher.matches(item.getValue)
override def describeTo(description: Description): Unit = {
description.appendText("Observation[timeValue: ")
timeValueMatcher.describeTo(description)
description.appendText(" value: ")
valueMatcher.describeTo(description)
description.appendText("]")
}
}
object TemporalObservationMatcher {
def observation[T, R](timeValueMatcher: Matcher[_ <: T], valueMatcher: Matcher[_ <: R])(implicit ev: T <:< Comparable[_ >: T]): TemporalObservationMatcher[T, R] =
new TemporalObservationMatcher[T, R](timeValueMatcher, valueMatcher)
}
| jonfreedman/timeseries | src/test/scala/com/github/jonfreedman/timeseries/matchers/TemporalObservationMatcher.scala | Scala | gpl-3.0 | 1,213 |
package x /*caret*/
import a._
class f
class g( ) extends k {
def foo( ) = return true
}
/*
*/ | ilinum/intellij-scala | testdata/keywordCompletion/generatedTests/autoTest_10.scala | Scala | apache-2.0 | 107 |
/*
*
* ____ __ ____________ ______
* / __/______ _/ /__ /_ __/ _/ //_/_ /
* _\\ \\/ __/ _ `/ / _ `// / _/ // ,< / /_
* /___/\\__/\\_,_/_/\\_,_//_/ /___/_/|_| /___/
*
* A PGF/TIKZ plot library for Scala.
*
*/
package scalatikz.pgf.plots.enums
import enumeratum._
import scala.collection.immutable._
sealed abstract class Mark(override val entryName: String) extends EnumEntry {
override def toString: String = entryName
}
object Mark extends Enum[Mark] {
val values: IndexedSeq[Mark] = findValues
case object NONE extends Mark("none")
case object X extends Mark("x")
case object DASH extends Mark("-")
case object DOT extends Mark("*")
case object CIRCLE extends Mark("o")
case object HALF_CIRCLE extends Mark("half" + "circle")
case object HALF_CIRCLE_FILLED extends Mark("half" + "circle*")
case object STAR extends Mark("star")
case object TEN_POINTED_STAR extends Mark("10-pointed star")
case object PLUS extends Mark("+")
case object O_PLUS extends Mark("o" + "plus")
case object O_PLUS_FILLED extends Mark("o" + "plus*")
case object O_TIMES extends Mark("o" + "times")
case object O_TIMES_FILLED extends Mark("o" + "times*")
case object ASTERISK extends Mark("asterisk")
case object TRIANGLE extends Mark("triangle")
case object TRIANGLE_FILLED extends Mark("triangle*")
case object DIAMOND extends Mark("diamond")
case object DIAMOND_FILLED extends Mark("diamond*")
case object HALF_DIAMOND extends Mark("half" + "diamond*")
case object SQUARE extends Mark("square")
case object SQUARE_FILLED extends Mark("square*")
case object HALF_SQUARE extends Mark("half" + "square*")
case object HALF_SQUARE_LEFT extends Mark("half" + "square left*")
case object HALF_SQUARE_RIGHT extends Mark("half" + "square right*")
case object PENTAGON extends Mark("pentagon")
case object PENTAGON_FILLED extends Mark("pentagon*")
case object CUBE extends Mark("cube")
case object CUBE_FILLED extends Mark("cube*")
case object BALL extends Mark("ball")
}
| vagmcs/ScalaTIKZ | src/main/scala/scalatikz/pgf/plots/enums/Mark.scala | Scala | lgpl-3.0 | 2,036 |
/**
* Copyright 2009 Jorge Ortiz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**/
package org.scala_tools.javautils.j2s
import java.util.List
trait JMutableListWrapper[T] extends RandomAccessSeq.Mutable[T] with JListWrapper[T] {
type Wrapped <: List[T]
def update(n: Int, elem: T): Unit =
underlying.set(n, elem)
override def elements: Iterator[T] =
super[Mutable].elements
}
| jorgeortiz85/scala-javautils | src/main/scala/org/scala_tools/javautils/j2s/JMutableListWrapper.scala | Scala | apache-2.0 | 912 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.