code
stringlengths
5
1M
repo_name
stringlengths
5
109
path
stringlengths
6
208
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
5
1M
package guiobjects import gui.{Tooltip, TopRight} /** * ScoreBoard lists players from first to last in terms of points. */ object ScoreBoard extends Tooltip() { setPoint(TopRight) setSize(200, 10) private val background = createTexture() background.setAllPoints() background.setVertexColor(198 / 255.0, 195 / 255.0, 214 / 255.0) def setPlayerNames(points: Map[String, Int]): Unit = { setHeight(points.size * 25) clearLines() points.toList.sortBy(_._2).reverse.foreach({case (player, point) => addDoubleLine( player, point.toString, 24 / 255.0, 77 / 255.0, 30 / 255.0, 20, 24 / 255.0, 77 / 255.0, 30 / 255.0, 20 )}) } }
sherpal/oh-hell-card-game
gameplaying/src/main/scala/guiobjects/ScoreBoard.scala
Scala
mit
702
/* Copyright 2016-17, Hasso-Plattner-Institut fuer Softwaresystemtechnik GmbH Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package de.hpi.ingestion.graphframes.models import java.util.UUID /** * Contains an extracted subgraph of the original subject graph * @param id Unique identifier for this graph * @param graphtype Type of graph (can be used to differentiate different job's output) * @param nodes List of the UUIDs of the nodes contained in the graph * @param nodenames List of the names of the nodes contained in the graph * @param size Number of nodes in this graph */ case class ResultGraph ( id: UUID, graphtype: String, nodes: List[UUID], nodenames: List[String], size: Int ) /** * Companion object of ResultGraph case class */ object ResultGraph { /** * Alternative constructor for easier creation of ResultGraph instances * @param graphType Type of graph (can be used to differentiate different job's output) * @param nodes List of the UUIDs of the nodes contained in the graph * @param nodeNames List of the names of the nodes contained in the graph * @return ResultGraph instance that contains the given nodes data */ def apply(graphType: String, nodes: List[UUID], nodeNames: List[String]): ResultGraph = { ResultGraph(UUID.randomUUID(), graphType, nodes, nodeNames, nodes.length) } }
bpn1/ingestion
src/main/scala/de/hpi/ingestion/graphframes/models/ResultGraph.scala
Scala
apache-2.0
1,878
/* * Copyright 2013 Twitter Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. You may obtain * a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.twitter.storehaus.algebra import com.twitter.algebird.Semigroup import com.twitter.storehaus.{ CollectionOps, UnpivotedStore } import com.twitter.util.{Future, Time} /** * MergeableStore enrichment which presents a MergeableStore[K, V] * over top of a packed MergeableStore[OuterK, Map[InnerK, V]]. * * @author Sam Ritchie */ class UnpivotedMergeableStore[-K, OuterK, InnerK, V: Semigroup]( store: MergeableStore[OuterK, Map[InnerK, V]])(split: K => (OuterK, InnerK) ) extends UnpivotedStore[K, OuterK, InnerK, V](store)(split) with MergeableStore[K, V] { override def semigroup: Semigroup[V] = implicitly[Semigroup[V]] override def merge(pair: (K, V)): Future[Option[V]] = { val (k, v) = pair val (outerK, innerK) = split(k) store.merge(outerK -> Map(innerK -> v)) .map { _.flatMap { inner => inner.get(innerK) } } } override def multiMerge[K1 <: K](kvs: Map[K1, V]): Map[K1, Future[Option[V]]] = { val pivoted: Map[OuterK, Map[InnerK, V]] = CollectionOps.pivotMap[K1, OuterK, InnerK, V](kvs)(split) val ret: Map[OuterK, Future[Option[Map[InnerK, V]]]] = store.multiMerge(pivoted) kvs.map { case (k, _) => val (outerK, innerK) = split(k) k -> ret(outerK).map(_.flatMap { innerM => innerM.get(innerK) }) } } override def close(t: Time): Future[Unit] = store.close(t) }
twitter/storehaus
storehaus-algebra/src/main/scala/com/twitter/storehaus/algebra/UnpivotedMergeableStore.scala
Scala
apache-2.0
1,965
class C: type T >: String <: Any class D extends C: class T // error
som-snytt/dotty
tests/neg/override-inner-class.scala
Scala
apache-2.0
75
package ohnosequences.sbt.nice.release import ohnosequences.sbt.nice._ import sbt._, Keys._ import ohnosequences.sbt.GithubRelease import VersionSettings.autoImport._ import com.markatta.sbttaglist.TagListPlugin.autoImport._ import scala.collection.immutable.SortedSet import java.nio.file.Files import AssemblySettings.autoImport._ import Git._ case object tasks { /* Asks user for the confirmation to continue */ private def confirmOrAbort(msg: String): Unit = { SimpleReader.readLine(s"\\n${msg} (y/n) ").map(_.toLowerCase) match { case Some("y") => {} // go on case Some("n") => sys.error("Aborted by user.") case _ => { println("Didn't understand your answer. Try again.") confirmOrAbort(msg) } } } private def confirmOptional(msg: String)(taskDef: DefTask[Unit]): DefTask[Unit] = Def.taskDyn { SimpleReader.readLine(s"\\n${msg} (y/n) ").map(_.toLowerCase) match { case Some("y") => taskDef // go on case Some("n") => Def.task {} case _ => { println("Didn't understand your answer. Try again.") confirmOptional(msg)(taskDef) } } } private def announce(msg: String): DefTask[Unit] = Def.task { val log = streams.value.log log.info("") log.info(msg) log.info("") } /* We try to check as much as possible _before_ making any release-related changes. If these checks are not passed, it doesn't make sense to start release process at all */ def prepareRelease(releaseVersion: Version): DefTask[Unit] = Def.sequential( clean, announce("Checking git repository..."), checkGit(releaseVersion), GithubRelease.defs.ghreleaseGetRepo, announce("Checking code notes..."), checkCodeNotes, announce("Checking project dependencies..."), checkDependencies, update, announce("Running non-release tests..."), test.in(Test), announce("Preparing release notes and creating git tag..."), prepareReleaseNotesAndTag(releaseVersion) ) def checkGit(releaseVersion: Version): DefTask[Unit] = Def.task { val log = streams.value.log val git = Git.task.value if (git.hasChanges) { log.error("You have uncommited changes. Commit or stash them first.") sys.error("Git repository is not clean.") } val loaded = gitVersion.value val actual = git.version if (loaded != actual) { log.error(s"Current version ${loaded} is outdated (should be ${actual}). Reload and start release process again.") sys.error("Outdated version setting.") } // TODO: probably remote name should be configurable val remoteName = git.currentRemote.getOrElse(origin) log.info(s"Updating remote [${remoteName}].") if (git.silent.remote("update", remoteName).exitCode != 0) { log.error(s"Remote [${remoteName}] is not set or is not accessible. Check your git-config or internet connection.") sys.error("Remote repository is not accessible.") } val tagName = s"v${releaseVersion}" if (git.silent.tagList(tagName) contains tagName) { log.error(s"Git tag ${tagName} already exists. You cannot release this version.") sys.error("Git tag already exists.") } val current: String = git.currentBranch.getOrElse(HEAD) val upstream: String = git.currentUpstream.getOrElse { sys.error("Couldn't get current branch upstream.") } val commitsBehind: Int = git.commitsNumber(s"${current}..${upstream}").getOrElse { sys.error("Couldn't compare current branch with its upstream.") } if (commitsBehind > 0) { log.error(s"Local branch [${current}] is ${commitsBehind} commits behind [${upstream}]. You need to pull the changes.") sys.error("Local branch state is outdated.") } else { log.info(s"Local branch [${current}] is up to date with its remote upstream.") } } def checkCodeNotes: DefTask[Unit] = Def.task { // NOTE: this task outputs the list val list = tagList.value if (list.flatMap{ _._2 }.nonEmpty) { confirmOrAbort("Are you sure you want to continue without fixing it?") } } /* Returns the list of dependencies with changing/snapshot versions */ def snapshotDependencies: DefTask[Seq[ModuleID]] = Def.task { libraryDependencies.value.filter { mod => mod.isChanging || mod.revision.endsWith("-SNAPSHOT") } } /* Almost the same as the task `dependencyUpdates`, but it outputs result as a warning and asks for a confirmation if needed */ def checkDependencies: DefTask[Unit] = Def.taskDyn { import com.timushev.sbt.updates._, versions.{ Version => UpdVer }, UpdatesKeys._ val log = streams.value.log val snapshots: Seq[ModuleID] = snapshotDependencies.value if (snapshots.nonEmpty) { log.error(s"You cannot start release process with snapshot dependencies:") snapshots.foreach { mod => log.error(s" - ${mod}") } log.error("Update dependencies, commit and run release process again.") sys.error("Project has unstable dependencies.") } else Def.task { val updatesData: Map[ModuleID, SortedSet[UpdVer]] = dependencyUpdatesData.value if (updatesData.nonEmpty) { log.warn( Reporter.dependencyUpdatesReport(projectID.value, updatesData) ) confirmOrAbort("Are you sure you want to continue with outdated dependencies?") } else log.info("All dependencies seem to be up to date.") } } /* This generates scalatest tags for marking tests (for now just release-only tests) */ def generateTestTags: DefTask[Seq[File]] = Def.task { val file = sourceManaged.in(Test).value / "test" / "releaseOnlyTag.scala" lazy val parts = keys.releaseOnlyTestTag.value.split('.') lazy val pkg = parts.init.mkString(".") lazy val obj = parts.last IO.write(file, s""" |package ${pkg} | |case object ${obj} extends org.scalatest.Tag("${pkg}.${obj}") |""".stripMargin ) Seq(file) } /* This task checks the precense of release notes file and renames it if needed */ def prepareReleaseNotes(releaseVersion: Version): DefTask[File] = Def.task { val log = streams.value.log val git = Git.task.value val notesDir = baseDirectory.value / "notes" // TODO: these could be configurable val alternativeNames = Set("Changelog", "Next") val acceptableExtensions = Set("markdown", "md") val notesFinder: PathFinder = (notesDir * "*") filter { file => (acceptableExtensions contains file.ext) && ( (file.base == releaseVersion.toString) || (alternativeNames.map(_.toLowerCase) contains file.base.toLowerCase) ) } val finalMessage = "Write release notes, commit and run release process again." notesFinder.get match { case Nil => { val acceptableNames = { alternativeNames.map(_+".md") + s"${releaseVersion}.markdown" } log.error(s"""No release notes found. Place them in the notes/ directory with one of the following names: ${acceptableNames.mkString("'", "', '", "'")}.""") log.error(finalMessage) sys.error("Absent release notes.") } case Seq(notesFile) => { val notes = IO.read(notesFile) val notesPath = notesFile.relPath(baseDirectory.value) if (notes.isEmpty) { log.error(s"Notes file [${notesPath}] is empty.") log.error(finalMessage) sys.error("Empty release notes.") } else { log.info(s"Taking release notes from the [${notesPath}] file:") println(notes) confirmOrAbort("Do you want to proceed with these release notes?") // Either take the version-named file or rename the changelog-file and commit it val versionFile = baseDirectory.value / "notes" / s"${releaseVersion}.markdown" if (notesFile.absPath != versionFile.absPath) { log.info(s"Renaming [${notesPath}] to [${versionFile}]...") git.mv(notesFile, versionFile).critical git.commit(s"Release notes for v${releaseVersion}")().critical } // TODO: (optionally) symlink notes/latest.md (useful for bintray) versionFile } } case multipleFiles => { log.error("You have several release notes files:") multipleFiles.foreach { f => log.error(s" - notes/${f.name}") } log.error("Please, leave only one of them, commit and run release process again.") sys.error("Multiple release notes.") } } } def prepareReleaseNotesAndTag(releaseVersion: Version): DefTask[Unit] = Def.task { val log = streams.value.log val git = Git.task.value val notesFile = prepareReleaseNotes(releaseVersion).value git.createTag(notesFile, releaseVersion) log.info(s"Created git tag [v${releaseVersion}].") } /* After release is prepared this sequence is going to actually make the release (publish, etc.) */ def makeRelease(releaseVersion: Version): DefTask[Unit] = Def.taskDyn { val log = streams.value.log val git = Git.task.value if (git.version != releaseVersion) { log.error(s"This task should be run after ${keys.prepareRelease.key.label} and reload.") log.error(s" Versions don't coincide: git version is [${git.version}], should be [${releaseVersion}].") sys.error("Outdated version setting.") } Def.sequential( announce("Running release tests..."), publishFatArtifactIfNeeded, test.in(keys.Release), announce("Publishing release artifacts..."), publish, announce("Publishing release on Github..."), pushHeadAndTag, GithubRelease.defs.githubRelease(s"v${releaseVersion}"), announce("Release has successfully finished!"), confirmOptional( "Do you want to generate and publish API docs to gh-pages?" )(publishApiDocs(latest = true)) ) } /* This task pushes current branch and tag to the remote */ def pushHeadAndTag: DefTask[Unit] = Def.task { val git = Git.task.value val tagName = s"v${git.version}" val remoteName = git.currentRemote.getOrElse(origin) // We call .get on Try to fail the task if push was unsuccessful git.push(remoteName)(HEAD, tagName).critical } def publishFatArtifactIfNeeded: DefTask[Unit] = Def.taskDyn { if (keys.publishFatArtifact.in(keys.Release).value) Def.task { fatArtifactUpload.value } else Def.task { streams.value.log.info("Skipping fat-jar publishing.") } } /* This task - clones `gh-pages` branch in a temporary directory (or offers to create it) - generates api docs with the standard sbt task `docs` - copies it to `docs/api/<version>` - symlinks `docs/api/latest` to it - commits and pushes `gh-pages` branch */ // TODO: destination (gh-pages) could be configurable, probably with a help of sbt-site def publishApiDocs(latest: Boolean): DefTask[Unit] = Def.task { val log = streams.value.log val git = Git(baseDirectory.value, log) val generatedDocs = doc.in(Compile).value val remoteName = git.currentRemote.getOrElse(origin) val url = git.remoteUrl(remoteName).getOrElse { sys.error(s"Couldn't get remote [${remoteName}] url") } val ghpagesDir = IO.createTemporaryDirectory val ghpagesGit = Git(ghpagesDir, log) val gh_pages = "gh-pages" log.info(s"\\nCloning gh-pages branch to the temporary directory ${ghpagesDir}") if (git.silent.clone("--branch", gh_pages, "--single-branch", url, ghpagesDir.getPath).exitCode != 0) { log.warn("Couldn't clone gh-pages branch, probably this repo doesn't have it yet.") confirmOrAbort("Do you want to create gh-pages branch automatically?") log.debug(s"Cloning this repo to the temporary directory ${ghpagesDir}") git.silent.clone(git.workingDir.getPath, ghpagesDir.getPath).critical log.debug(s"Creating an orphan branch") ghpagesGit.silent.checkout("--orphan", gh_pages).critical ghpagesGit.rm("-rf", ".").critical log.info(s"Successfully created gh-pages branch") } val destBase = ghpagesDir / "docs" / "api" val destVer = destBase / version.value log.info(s"Copying ${generatedDocs.relPath(git.workingDir)} to <gh-pages>/${destVer.relPath(ghpagesDir)}") if (destVer.exists) IO.delete(destVer) IO.copyDirectory(generatedDocs, destVer, overwrite = true) if (! ghpagesGit.hasChangesOrUntracked) { // If there are no changes we don't do anything else log.warn("No changes to commit and publish") } else { ghpagesGit.stageAndCommit(s"API docs v${git.version}")(destVer) log.debug(s"Committed ${destVer}") if (latest) { // NOTE: .nojekyll file is needed for symlinks (see https://github.com/isaacs/github/issues/553) val _nojekyll = ghpagesDir / ".nojekyll" if (! _nojekyll.exists) { log.info(s"Adding .nojekyll file") IO.write(_nojekyll, "") ghpagesGit.stageAndCommit("Added .nojekyll file for symlinks")(_nojekyll) } val destLatest = destBase / "latest" log.info(s"Symlinking ${destLatest.relPath(ghpagesDir)} to ${destVer.relPath(ghpagesDir)}") Files.deleteIfExists(destLatest.absPath) Files.createSymbolicLink(destLatest.absPath, destVer.relPath(destLatest.getParentFile)) ghpagesGit.stageAndCommit(s"Symlinked ${git.version} as latest")(destLatest) } log.info("Publishing API docs...") ghpagesGit.push(url)(gh_pages).critical } } }
ohnosequences/nice-sbt-settings
src/main/scala/release/tasks.scala
Scala
agpl-3.0
13,620
import scala.tools.partest.ReplTest object Test extends ReplTest { def code = """def f(x: => Int): Int = x f _ """ // replace indylambda function names by <function1> override def eval() = { val lines = super.eval val r = """\\$\\$Lambda.*""".r lines.map(l => r.replaceAllIn(l, "<function1>")) } }
felixmulder/scala
test/files/run/t6434.scala
Scala
bsd-3-clause
318
package actors //#messages case class TransformationJob(text: String) case class TransformationResult(text: String) case class JobFailed(reason: String, job: TransformationJob) case object BackendRegistration //#messages
luzhuomi/playakkahbase
scala/playAkka/app/actors/TransformationMessages.scala
Scala
gpl-2.0
222
package org.http4s package headers object `X-Powered-By` extends HeaderKey.Default
ZizhengTai/http4s
core/src/main/scala/org/http4s/headers/X-Powered-By.scala
Scala
apache-2.0
85
package mathParser.algebra import mathParser.MathParser import org.scalatest.{FunSuite, Matchers} import spire.math.Complex import mathParser.implicits._ class ParseComplexSpec extends FunSuite with Matchers { val lang = MathParser.complexLanguage import lang.{optimize, parse, constantNode} test("parse complex literals") { parse("i").map(optimize) shouldBe Some(constantNode(Complex(0, 1))) parse("5*i").map(optimize) shouldBe Some(constantNode(Complex(0, 5))) parse("i + i").map(optimize) shouldBe Some(constantNode(Complex(0, 2))) parse("i*i").map(optimize) shouldBe Some(constantNode(Complex(-1, 0))) parse("-i").map(optimize) shouldBe Some(constantNode(Complex(0, -1))) parse("1+i").map(optimize) shouldBe Some(constantNode(Complex(1, 1))) parse("1-i").map(optimize) shouldBe Some(constantNode(Complex(1, -1))) } }
gregor-i/mathParser
math-parser/src/test/scala/mathParser/algebra/ParseComplexSpec.scala
Scala
mit
861
/** * Copyright (c) 2013 Saddle Development Team * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. **/ package org.saddle.util import scala.{ specialized => spec } import org.saddle._ import org.saddle.scalar._ /** * Machinery for concatenating arrays of differing types, with NA-handling */ private[saddle] trait LowPriorityConcatImplicits { implicit def waa[T <: Any] = new Concat.Promoter[T, T, T](Concat.id, Concat.id) } /** * Provides a way to append two arrays of possibly different types together by * intelligently promoting primitive types where possible. * * Key method is Concat.append(array1, array2) */ object Concat extends LowPriorityConcatImplicits { val sy = ScalarTagByte val sc = ScalarTagChar val ss = ScalarTagShort val si = ScalarTagInt val sl = ScalarTagLong val sf = ScalarTagFloat val sd = ScalarTagDouble val sr = new ScalarTagAny[AnyRef] /** * Existence of an instance of this class yields a way to promote instances of the first * two types to an instance of the third type. Concrete instances are values held within * the Concat object. * * @param promoteA A function to take A to C * @param promoteB A functino to take B to C * @tparam A The first type to promote * @tparam B The second type to promote * @tparam C The joint promotion type */ case class Promoter[@spec(Boolean, Byte, Int, Long, Double) -A, @spec(Boolean, Byte, Int, Long, Double) -B, @spec(Boolean, Byte, Int, Long, Double) +C](promoteA: A => C, promoteB: B => C) implicit def id[T](a: T): T = a // boolean promoting implicit val promoteBY: Boolean => Byte = if (_) 1 else 0 implicit val promoteBC: Boolean => Char = if (_) 1 else 0 implicit val promoteBS: Boolean => Short = if (_) 1 else 0 implicit val promoteBI: Boolean => Int = if (_) 1 else 0 implicit val promoteBL: Boolean => Long = if (_) 1L else 0L implicit val promoteBF: Boolean => Float = if (_) 1f else 0f implicit val promoteBD: Boolean => Double = if (_) 1d else 0d implicit val promoteBA: Boolean => AnyRef = Boolean.box _ implicit object wbb extends Promoter[Boolean, Boolean, Boolean](id, id) implicit object wyb extends Promoter[Boolean, Byte, Byte](promoteBY, id) implicit object wby extends Promoter[Byte, Boolean, Byte](id, promoteBY) implicit object wcb extends Promoter[Boolean, Char, Char](promoteBC, id) implicit object wbc extends Promoter[Char, Boolean, Char](id, promoteBC) implicit object wsb extends Promoter[Boolean, Short, Short](promoteBS, id) implicit object wbs extends Promoter[Short, Boolean, Short](id, promoteBS) implicit object wib extends Promoter[Boolean, Int, Int](promoteBI, id) implicit object wbi extends Promoter[Int, Boolean, Int](id, promoteBI) implicit object wlb extends Promoter[Boolean, Long, Long](promoteBL, id) implicit object wbl extends Promoter[Long, Boolean, Long](id, promoteBL) implicit object wfb extends Promoter[Boolean, Float, Float](promoteBF, id) implicit object wbf extends Promoter[Float, Boolean, Float](id, promoteBF) implicit object wdb extends Promoter[Boolean, Double, Double](promoteBD, id) implicit object wbd extends Promoter[Double, Boolean, Double](id, promoteBD) implicit object wrb extends Promoter[Boolean, AnyRef, AnyRef](promoteBA, id) implicit object wbr extends Promoter[AnyRef, Boolean, AnyRef](id, promoteBA) // byte promoting implicit val promoteYC: Byte => Char = (y: Byte) => if (sy.isMissing(y)) sc.missing else y.toChar implicit val promoteYS: Byte => Short = (y: Byte) => if (sy.isMissing(y)) ss.missing else y.toShort implicit val promoteYI: Byte => Int = (y: Byte) => if (sy.isMissing(y)) si.missing else y.toInt implicit val promoteYL: Byte => Long = (y: Byte) => if (sy.isMissing(y)) sl.missing else y.toLong implicit val promoteYF: Byte => Float = (y: Byte) => if (sy.isMissing(y)) sf.missing else y.toFloat implicit val promoteYD: Byte => Double = (y: Byte) => if (sy.isMissing(y)) sd.missing else y.toDouble implicit val promoteYR: Byte => AnyRef = (y: Byte) => if (sy.isMissing(y)) sr.missing else Byte.box(y) implicit object wyy extends Promoter[Byte, Byte, Byte](id, id) implicit object wcy extends Promoter[Byte, Char, Char](promoteYC, id) implicit object wyc extends Promoter[Char, Byte, Char](id, promoteYC) implicit object wsy extends Promoter[Byte, Short, Short](promoteYS, id) implicit object wys extends Promoter[Short, Byte, Short](id, promoteYS) implicit object wiy extends Promoter[Byte, Int, Int](promoteYI, id) implicit object wyi extends Promoter[Int, Byte, Int](id, promoteYI) implicit object wly extends Promoter[Byte, Long, Long](promoteYL, id) implicit object wyl extends Promoter[Long, Byte, Long](id, promoteYL) implicit object wfy extends Promoter[Byte, Float, Float](promoteYF, id) implicit object wyf extends Promoter[Float, Byte, Float](id, promoteYF) implicit object wdy extends Promoter[Byte, Double, Double](promoteYD, id) implicit object wyd extends Promoter[Double, Byte, Double](id, promoteYD) implicit object wry extends Promoter[Byte, AnyRef, AnyRef](promoteYR, id) implicit object wyr extends Promoter[AnyRef, Byte, AnyRef](id, promoteYR) // char promoting implicit val promoteCS: Char => Short = (y: Char) => if (sc.isMissing(y)) ss.missing else y.toShort implicit val promoteCI: Char => Int = (y: Char) => if (sc.isMissing(y)) si.missing else y.toInt implicit val promoteCL: Char => Long = (y: Char) => if (sc.isMissing(y)) sl.missing else y.toLong implicit val promoteCF: Char => Float = (y: Char) => if (sc.isMissing(y)) sf.missing else y.toFloat implicit val promoteCD: Char => Double = (y: Char) => if (sc.isMissing(y)) sd.missing else y.toDouble implicit val promoteCR: Char => AnyRef = (y: Char) => if (sc.isMissing(y)) sr.missing else Char.box(y) implicit object wcc extends Promoter[Char, Char, Char](id, id) implicit object wsc extends Promoter[Char, Short, Short](promoteCS, id) implicit object wcs extends Promoter[Short, Char, Short](id, promoteCS) implicit object wic extends Promoter[Char, Int, Int](promoteCI, id) implicit object wci extends Promoter[Int, Char, Int](id, promoteCI) implicit object wlc extends Promoter[Char, Long, Long](promoteCL, id) implicit object wcl extends Promoter[Long, Char, Long](id, promoteCL) implicit object wfc extends Promoter[Char, Float, Float](promoteCF, id) implicit object wcf extends Promoter[Float, Char, Float](id, promoteCF) implicit object wdc extends Promoter[Char, Double, Double](promoteCD, id) implicit object wcd extends Promoter[Double, Char, Double](id, promoteCD) implicit object wrc extends Promoter[Char, AnyRef, AnyRef](promoteCR, id) implicit object wcr extends Promoter[AnyRef, Char, AnyRef](id, promoteCR) // short promoting implicit val promoteSI: Short => Int = (y: Short) => if (ss.isMissing(y)) si.missing else y.toInt implicit val promoteSL: Short => Long = (y: Short) => if (ss.isMissing(y)) sl.missing else y.toLong implicit val promoteSF: Short => Float = (y: Short) => if (ss.isMissing(y)) sf.missing else y.toFloat implicit val promoteSD: Short => Double = (y: Short) => if (ss.isMissing(y)) sd.missing else y.toDouble implicit val promoteSR: Short => AnyRef = (y: Short) => if (ss.isMissing(y)) sr.missing else Short.box(y) implicit object wss extends Promoter[Short, Short, Short](id, id) implicit object wis extends Promoter[Short, Int, Int](promoteSI, id) implicit object wsi extends Promoter[Int, Short, Int](id, promoteSI) implicit object wls extends Promoter[Short, Long, Long](promoteSL, id) implicit object wsl extends Promoter[Long, Short, Long](id, promoteSL) implicit object wfs extends Promoter[Short, Float, Float](promoteSF, id) implicit object wsf extends Promoter[Float, Short, Float](id, promoteSF) implicit object wds extends Promoter[Short, Double, Double](promoteSD, id) implicit object wsd extends Promoter[Double, Short, Double](id, promoteSD) implicit object wrs extends Promoter[Short, AnyRef, AnyRef](promoteSR, id) implicit object wsr extends Promoter[AnyRef, Short, AnyRef](id, promoteSR) // int promoting implicit val promoteIL: Int => Long = (i: Int) => if (si.isMissing(i)) sl.missing else i.toLong implicit val promoteIF: Int => Float = (i: Int) => if (si.isMissing(i)) sf.missing else i.toFloat implicit val promoteID: Int => Double = (i: Int) => if (si.isMissing(i)) sd.missing else i.toDouble implicit val promoteIR: Int => AnyRef = (i: Int) => if (si.isMissing(i)) sr.missing else Int.box(i) implicit object wii extends Promoter[Int, Int, Int](id, id) implicit object wfi extends Promoter[Int, Float, Float](promoteIF, id) implicit object wif extends Promoter[Float, Int, Float](id, promoteIF) implicit object wli extends Promoter[Int, Long, Long](promoteIL, id) implicit object wil extends Promoter[Long, Int, Long](id, promoteIL) implicit object wdi extends Promoter[Int, Double, Double](promoteID, id) implicit object wid extends Promoter[Double, Int, Double](id, promoteID) implicit object wri extends Promoter[Int, AnyRef, AnyRef](promoteIR, id) implicit object wir extends Promoter[AnyRef, Int, AnyRef](id, promoteIR) // float promoting implicit val promoteFD: Float => Double = (i: Float) => if (sf.isMissing(i)) sd.missing else i.toDouble implicit val promoteFR: Float => AnyRef = (i: Float) => if (sf.isMissing(i)) sr.missing else Float.box(i) implicit object wff extends Promoter[Float, Float, Float](id, id) implicit object wdf extends Promoter[Float, Double, Double](promoteFD, id) implicit object wfd extends Promoter[Double, Float, Double](id, promoteFD) implicit object wrf extends Promoter[Float, AnyRef, AnyRef](promoteFR, id) implicit object wfr extends Promoter[AnyRef, Float, AnyRef](id, promoteFR) // long promoting implicit val promoteLD: Long => Double = (l: Long) => if (sl.isMissing(l)) sd.missing else l.toDouble implicit val promoteLR: Long => AnyRef = (l: Long) => if (sl.isMissing(l)) sr.missing else Long.box(l) implicit object wll extends Promoter[Long, Long, Long](id, id) implicit object wdl extends Promoter[Long, Double, Double](promoteLD, id) implicit object wld extends Promoter[Double, Long, Double](id, promoteLD) implicit object wrl extends Promoter[Long, AnyRef, AnyRef](promoteLR, id) implicit object wlr extends Promoter[AnyRef, Long, AnyRef](id, promoteLR) // double promoting implicit val promoteDR: Double => AnyRef = (d: Double) => if (sd.isMissing(d)) sr.missing else Double.box(d) implicit object wdd extends Promoter[Double, Double, Double](id, id) implicit object wrd extends Promoter[Double, AnyRef, AnyRef](promoteDR, id) implicit object wdr extends Promoter[AnyRef, Double, AnyRef](id, promoteDR) // AnyRef promotes to itself implicit def wrr[T <: AnyRef] = new Promoter[T, T, T](id, id) /** * Append two arrays of possibly different types together by intelligently * promoting primitive types where possible. * * @param a1 First array * @param a2 Second array * @param wd Evidence of instance of Promoter for involved types * @param mc Evidence of ST for result type * @tparam A First array type * @tparam B Second array type * @tparam C Result array type */ def append[@spec(Boolean, Byte, Int, Long, Double) A, @spec(Boolean, Byte, Int, Long, Double) B, @spec(Boolean, Byte, Int, Long, Double) C](a1: Array[A], a2: Array[B])( implicit wd: Promoter[A, B, C], mc: ST[C]): Array[C] = { val result = array.empty[C](a1.length + a2.length) var i = 0 while(i < a1.length) { result(i) = wd.promoteA(a1(i)) i += 1 } var j = 0 while(j < a2.length) { result(i) = wd.promoteB(a2(j)) i += 1 j += 1 } result } }
jyt109/saddle
saddle-core/src/main/scala/org/saddle/util/Concat.scala
Scala
apache-2.0
12,459
package dundertext.ui.editor import org.scalajs.dom import org.scalajs.dom.html class EditorsPanel(tr: html.TableRow) { val td: html.Element = dom.document.createElement("td").asInstanceOf[html.Element] tr.appendChild(td) td.className = "dt-editor" val left: EditorPanel = new EditorPanel(td) }
dundertext/dundertext
ui/src/main/scala/dundertext/ui/editor/EditorsPanel.scala
Scala
gpl-3.0
306
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.api import java.util.Properties import java.util.concurrent.ExecutionException import kafka.log.LogConfig import kafka.utils.TestUtils import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord} import org.apache.kafka.common.errors.{InvalidTimestampException, SerializationException} import org.apache.kafka.common.record.TimestampType import org.junit.Assert._ import org.junit.Test class PlaintextProducerSendTest extends BaseProducerSendTest { @Test(expected = classOf[SerializationException]) def testWrongSerializer() { val producerProps = new Properties() producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList) producerProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer") producerProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer") val producer = registerProducer(new KafkaProducer(producerProps)) val record = new ProducerRecord[Array[Byte], Array[Byte]](topic, new Integer(0), "key".getBytes, "value".getBytes) producer.send(record) } @Test def testBatchSizeZero() { val producerProps = new Properties() producerProps.setProperty(ProducerConfig.BATCH_SIZE_CONFIG, "0") val producer = createProducer(brokerList = brokerList, lingerMs = Long.MaxValue, props = Some(producerProps)) sendAndVerify(producer) } @Test def testSendCompressedMessageWithLogAppendTime() { val producerProps = new Properties() producerProps.setProperty(ProducerConfig.COMPRESSION_TYPE_CONFIG, "gzip") val producer = createProducer(brokerList = brokerList, lingerMs = Long.MaxValue, props = Some(producerProps)) sendAndVerifyTimestamp(producer, TimestampType.LOG_APPEND_TIME) } @Test def testSendNonCompressedMessageWithLogAppendTime() { val producer = createProducer(brokerList = brokerList, lingerMs = Long.MaxValue) sendAndVerifyTimestamp(producer, TimestampType.LOG_APPEND_TIME) } /** * testAutoCreateTopic * * The topic should be created upon sending the first message */ @Test def testAutoCreateTopic() { val producer = createProducer(brokerList, retries = 5) try { // Send a message to auto-create the topic val record = new ProducerRecord(topic, null, "key".getBytes, "value".getBytes) assertEquals("Should have offset 0", 0L, producer.send(record).get.offset) // double check that the topic is created with leader elected TestUtils.waitUntilLeaderIsElectedOrChanged(zkUtils, topic, 0) } finally { producer.close() } } @Test def testSendWithInvalidCreateTime() { val topicProps = new Properties() topicProps.setProperty(LogConfig.MessageTimestampDifferenceMaxMsProp, "1000") TestUtils.createTopic(zkUtils, topic, 1, 2, servers, topicProps) val producer = createProducer(brokerList = brokerList) try { producer.send(new ProducerRecord(topic, 0, System.currentTimeMillis() - 1001, "key".getBytes, "value".getBytes)).get() fail("Should throw CorruptedRecordException") } catch { case e: ExecutionException => assertTrue(e.getCause.isInstanceOf[InvalidTimestampException]) } finally { producer.close() } // Test compressed messages. val producerProps = new Properties() producerProps.setProperty(ProducerConfig.COMPRESSION_TYPE_CONFIG, "gzip") val compressedProducer = createProducer(brokerList = brokerList, props = Some(producerProps)) try { compressedProducer.send(new ProducerRecord(topic, 0, System.currentTimeMillis() - 1001, "key".getBytes, "value".getBytes)).get() fail("Should throw CorruptedRecordException") } catch { case e: ExecutionException => assertTrue(e.getCause.isInstanceOf[InvalidTimestampException]) } finally { compressedProducer.close() } } }
wangcy6/storm_app
frame/kafka-0.11.0/kafka-0.11.0.1-src/core/src/test/scala/integration/kafka/api/PlaintextProducerSendTest.scala
Scala
apache-2.0
4,746
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.knockdata.spark.highcharts object Init { val allHighchartUrls = List( "http://code.highcharts.com/highcharts.js", "http://code.highcharts.com/highcharts-more.js", "http://code.highcharts.com/highcharts-3d.js", "http://code.highcharts.com/adapters/standalone-framework.js", "http://code.highcharts.com/modules/annotations.js", "http://code.highcharts.com/modules/boost.js", "http://code.highcharts.com/modules/broken-axis.js", "http://code.highcharts.com/modules/canvas-tools.js", "http://code.highcharts.com/modules/data.js", "http://code.highcharts.com/modules/exporting.js", "http://code.highcharts.com/modules/drilldown.js", "http://code.highcharts.com/modules/funnel.js", "http://code.highcharts.com/modules/heatmap.js", "http://code.highcharts.com/modules/no-data-to-display.js", "http://code.highcharts.com/modules/offline-exporting.js", "http://code.highcharts.com/modules/solid-gauge.js", "http://code.highcharts.com/modules/treemap.js", "http://code.highcharts.com/maps/modules/map.js", "http://code.highcharts.com/mapdata/countries/us/us-all.js") // from name to url, name like "map", "us-all" val highchartName2Url = allHighchartUrls.map { url => val name = url.stripSuffix(".js").split("/").last name -> url }.toMap /** * add one or few Highchart modules to the Zeppelin notebook * * if no parameters provided, then add all modules */ def init(highcharts: String*): Unit = { val highchartUrls = if (highcharts.isEmpty) allHighchartUrls else { highcharts.map(name => highchartName2Url(name)) } val jq = "$" // load js modules val loaders = highchartUrls.map { url => s""" |$jq.getScript("$url") | .done(function( script, textStatus ) { | console.log( "load $url " + textStatus ); | }) | .fail(function(jqxhr, settings, exception ) { | console.log("load $url " + exception); | }); | """.stripMargin } // wrap everything in %angular val template = s""" |%angular |<script type="text/javascript"> | |$jq(function () { | |${loaders.mkString("\\n")} | |}); | |</script> """.stripMargin println(template) } }
knockdata/spark-highcharts
src/main/scala/com/knockdata/spark/highcharts/Init.scala
Scala
apache-2.0
3,215
package rps import org.mockito.Mockito._ import org.scalatest.mock.MockitoSugar trait ComputerAiGestureChooserMock extends MockitoSugar { /** * builds a gesture chooser which produces the given gestures */ def computerGestureChooserWhichReturns(gestures: Gesture*): ComputerAiGestureChooser = { val aiGestureChooser = mock[ComputerAiGestureChooser] when(aiGestureChooser.nextGesture()).thenReturn(gestures.head, gestures.tail: _*) aiGestureChooser } }
fkoehler/rock-paper-scissors-scala
src/test/scala/rps/ComputerAiGestureChooserMock.scala
Scala
apache-2.0
481
package com.avsystem.commons package serialization.cbor import com.avsystem.commons.jiop.JFactory import com.avsystem.commons.serialization.GenCodec.OOOFieldsObjectCodec import com.avsystem.commons.serialization._ import scala.collection.compat._ trait CborOptimizedCodecs { /** * Creates a `GenCodec` for map type that leverages CBOR's ability to write object keys of arbitrary type. * Map keys are not written as strings but are serialized into raw CBOR instead. * Therefore, this codec requires a `GenCodec` for key type rather than `GenKeyCodec` required by standard * map codec. * * If the underlying `Input` or `Output` is not a CBOR input/output and there is no `GenKeyCodec` * for key type then a fallback `GenKeyCodec` is used that converts keys into strings by taking HEX representation * of their CBOR serialization. If the key type has a `GenKeyCodec` then this `GenCodec` behaves exactly the same * as the standard one for non-CBOR inputs/outputs. */ implicit def cborMapCodec[M[X, Y] <: BMap[X, Y], K: GenCodec : OptGenKeyCodec, V: GenCodec]( implicit fac: Factory[(K, V), M[K, V]] ): GenObjectCodec[M[K, V]] = mkMapCodec(implicit keyCodec => GenCodec.mapCodec[M, K, V]) implicit def cborJMapCodec[M[X, Y] <: JMap[X, Y], K: GenCodec : OptGenKeyCodec, V: GenCodec]( implicit fac: JFactory[(K, V), M[K, V]] ): GenObjectCodec[M[K, V]] = mkMapCodec(implicit keyCodec => GenCodec.jMapCodec[M, K, V]) private def mkMapCodec[M[X, Y] <: AnyRef, K: GenCodec : OptGenKeyCodec, V: GenCodec]( mkStdCodec: GenKeyCodec[K] => GenObjectCodec[M[K, V]] )(implicit fac: Factory[(K, V), M[K, V]] ): GenObjectCodec[M[K, V]] = { val hexKeysStdCodec = mkStdCodec(new GenKeyCodec[K] { def read(key: String): K = CborInput.readRawCbor[K](RawCbor.fromHex(key)) def write(value: K): String = CborOutput.writeRawCbor[K](value).toString }) val regularStdCodec = OptGenKeyCodec[K].keyCodec.map(mkStdCodec).getOrElse(hexKeysStdCodec) val cborKeyCodec = new CborKeyCodec { def writeFieldKey(fieldName: String, output: CborOutput): Unit = output.writeRawCbor(RawCbor.fromHex(fieldName)) def readFieldKey(input: CborInput): String = input.readRawCbor().toString } new GenObjectCodec[M[K, V]] { override def readObject(input: ObjectInput): M[K, V] = if (input.customEvent(ForceCborKeyCodec, cborKeyCodec)) hexKeysStdCodec.readObject(input) else regularStdCodec.readObject(input) override def writeObject(output: ObjectOutput, value: M[K, V]): Unit = if (output.customEvent(ForceCborKeyCodec, cborKeyCodec)) hexKeysStdCodec.writeObject(output, value) else regularStdCodec.writeObject(output, value) } } } object CborOptimizedCodecs extends CborOptimizedCodecs class CborRawKeysCodec[T](stdObjectCodec: GenObjectCodec[T], keyCodec: CborKeyCodec) extends GenObjectCodec[T] { def readObject(input: ObjectInput): T = { input.customEvent(ForceCborKeyCodec, keyCodec) stdObjectCodec.readObject(input) } def writeObject(output: ObjectOutput, value: T): Unit = { output.customEvent(ForceCborKeyCodec, keyCodec) stdObjectCodec.writeObject(output, value) } } class OOOFieldCborRawKeysCodec[T](stdObjectCodec: OOOFieldsObjectCodec[T], keyCodec: CborKeyCodec) extends OOOFieldsObjectCodec[T] { def readObject(input: ObjectInput, outOfOrderFields: FieldValues): T = { input.customEvent(ForceCborKeyCodec, keyCodec) stdObjectCodec.readObject(input, outOfOrderFields) } def writeFields(output: ObjectOutput, value: T): Unit = { output.customEvent(ForceCborKeyCodec, keyCodec) stdObjectCodec.writeFields(output, value) } def size(value: T): Int = stdObjectCodec.size(value) def nullable: Boolean = stdObjectCodec.nullable } /** * Makes sure that [[CborRawKeysCodec]] for map types behaves exactly the same for non-CBOR inputs/outputs as * standard `GenCodec` when the key type has a `GenKeyCodec`. However, when `GenKeyCodec` is not available for key * type, [[CborRawKeysCodec]] can still work with non-CBOR inputs/outputs by serializing keys into CBOR and taking * their HEX representation as standard string keys. */ case class OptGenKeyCodec[K](keyCodec: Opt[GenKeyCodec[K]]) object OptGenKeyCodec extends OptGenKeyCodecLowPriority { def apply[K](implicit optGenKeyCodec: OptGenKeyCodec[K]): OptGenKeyCodec[K] = optGenKeyCodec implicit def fromKeyCodec[K: GenKeyCodec]: OptGenKeyCodec[K] = OptGenKeyCodec(Opt(GenKeyCodec[K])) } trait OptGenKeyCodecLowPriority { this: OptGenKeyCodec.type => implicit def noKeyCodec[K]: OptGenKeyCodec[K] = OptGenKeyCodec(Opt.Empty) }
AVSystem/scala-commons
commons-core/src/main/scala/com/avsystem/commons/serialization/cbor/CborOptimizedCodecs.scala
Scala
mit
4,749
package eu.ace_design.island.map.processes import eu.ace_design.island.geom.{Face, Registry} import eu.ace_design.island.map._ import eu.ace_design.island.util.{LogSilos, Logger} /** * A face is considered as an ocean one if it is a water face connected to the borders of the m. Lakes are faces * identified as water but not as ocean. * * Pre-conditions: * - Faces touching the edge of the m are identified as "IsBorder(true)" * - Faces are identified as "IsWater(b)", with b in {true, false}. * * Post-conditions: * - Water faces connected to the border by a path are annotated as "WaterKind(OCEAN)" * - Water faces which are not ocean ones are annotated as "WaterKind(LAKE)" */ object IdentifyLakesAndOcean extends Process { import ExistingWaterKind.{OCEAN, LAKE} override def apply(m: IslandMap): IslandMap = { info("Annotating faces") val borders = getRefs(m, IsBorder()) val oceans = propagate(borders, m.faceProps, m, IsWater()) val water = getRefs(m, IsWater()) val lakes = water diff oceans debug("Faces tagged as ocean: " + oceans.toSeq.sorted.mkString("(",",",")")) debug("Faces tagged as lake: " + lakes.toSeq.sorted.mkString("(",",",")")) val fProps = m.faceProps bulkAdd (oceans -> WaterKind(OCEAN)) bulkAdd (lakes -> WaterKind(LAKE)) m.copy(faceProps = fProps) } /** * Returns the set of references to the faces holding a given property * @param m the m containing the faces * @param prop the property one is looking for * @return the set of integer references for such faces */ private def getRefs(m: IslandMap, prop: Property[_]): Set[Int] = m.findFacesWith(Set(prop)) map { f => m.faceRef(f) } /** * Compute the transitive closure of a given property, based on an initial set of faces. If a face satisfy a given * property, its neighbors are then transitively subject to consideration * @param init the initial set of faces to investigate * @param props the PropertySet associated to the faces * @param m the IslandMap we are propagating in (used to retrieve neighbors of a given face) * @param p the property to propagate transitively. * @return a set of references representing the transitive closure of 'p', starting with init. */ private def propagate(init: Set[Int], props: PropertySet, m: IslandMap, p: Property[_]): Set[Int] = { // internal function used to compute the transitive closure in an accumulator def loop(candidates: Set[Int], acc: Set[Int]): Set[Int] = candidates.headOption match { case None => acc // no more candidate, the accumulator is the result case Some(c) => props.check(c, p) match { case false => loop(candidates.tail, acc) // does not satisfy 'p' => forgot about it case true => { val newAcc = acc + c // satisfy 'p' => must be included in the accumulator val subjects = m.face(c).neighbors.get diff newAcc // investigating neighbors not already in the accumulator loop(candidates.tail ++ subjects, newAcc) // continue the computation on these new subjects } } } loop(init, Set()) } }
ace-design/island
engine/src/main/scala/eu/ace_design/island/map/processes/IdentifyLakesAndOcean.scala
Scala
lgpl-3.0
3,207
package models import java.util.UUID import org.joda.time.{DateTime, LocalDate, LocalTime} import play.api.libs.functional.syntax.{unlift, _} import play.api.libs.json.{JsPath, Json, Reads, Writes} import utils.date.DateTimeJsonFormatter._ trait ReportCardRescheduledLike extends UniqueEntity case class ReportCardRescheduled( date: LocalDate, start: LocalTime, end: LocalTime, room: UUID, reason: Option[String] = None, lastModified: DateTime, id: UUID = UUID.randomUUID ) extends ReportCardRescheduledLike case class ReportCardRescheduledProtocol( reportCardEntry: UUID, date: LocalDate, start: LocalTime, end: LocalTime, room: UUID, reason: Option[String] = None ) case class ReportCardRescheduledAtom(date: LocalDate, start: LocalTime, end: LocalTime, room: Room, reason: Option[String], lastModified: DateTime, id: UUID ) extends ReportCardRescheduledLike object ReportCardRescheduledLike { implicit val writes: Writes[ReportCardRescheduledLike] = { case normal: ReportCardRescheduled => Json.toJson(normal)(ReportCardRescheduled.writes) case atom: ReportCardRescheduledAtom => Json.toJson(atom)(ReportCardRescheduledAtom.writes) } } object ReportCardRescheduled { implicit val writes: Writes[ReportCardRescheduled] = Json.writes[ReportCardRescheduled] } object ReportCardRescheduledProtocol { implicit val reads: Reads[ReportCardRescheduledProtocol] = Json.reads[ReportCardRescheduledProtocol] } object ReportCardRescheduledAtom { implicit val writes: Writes[ReportCardRescheduledAtom] = ( (JsPath \\ "date").write[LocalDate] and (JsPath \\ "start").write[LocalTime] and (JsPath \\ "end").write[LocalTime] and (JsPath \\ "room").write[Room](Room.writes) and (JsPath \\ "reason").writeNullable[String] and (JsPath \\ "lastModified").write[DateTime] and (JsPath \\ "id").write[UUID] ) (unlift(ReportCardRescheduledAtom.unapply)) }
THK-ADV/lwm-reloaded
app/models/ReportCardRescheduled.scala
Scala
mit
1,938
package org.jetbrains.plugins.scala package lang package psi package stubs package impl import com.intellij.psi.PsiElement import com.intellij.psi.stubs.{IStubElementType, StubBase, StubElement} import org.jetbrains.plugins.scala.lang.psi.api.toplevel.templates.ScExtendsBlock import scala.collection.immutable.ArraySeq /** * @author ilyas */ class ScExtendsBlockStubImpl(parent: StubElement[_ <: PsiElement], elementType: IStubElementType[_ <: StubElement[_ <: PsiElement], _ <: PsiElement], override val baseClasses: ArraySeq[String]) extends StubBase[ScExtendsBlock](parent, elementType) with ScExtendsBlockStub
JetBrains/intellij-scala
scala/scala-impl/src/org/jetbrains/plugins/scala/lang/psi/stubs/impl/ScExtendsBlockStubImpl.scala
Scala
apache-2.0
681
package org.bitcoins.core.serializers import org.bitcoins.core.currency.Satoshis import org.bitcoins.core.gen.CurrencyUnitGenerator import org.scalacheck.{Prop, Properties} /** * Created by chris on 6/23/16. */ class RawSatoshisSerializerSpec extends Properties("RawSatoshiSerializerSpec") { property("Symmetrical serialization") = Prop.forAll(CurrencyUnitGenerator.satoshis) { satoshis => Satoshis(satoshis.hex) == satoshis } }
SuredBits/bitcoin-s-sidechains
src/test/scala/org/bitcoins/core/serializers/RawSatoshisSerializerSpec.scala
Scala
mit
454
/*********************************************************************** * Copyright (c) 2013-2022 Commonwealth Computer Research, Inc. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Apache License, Version 2.0 * which accompanies this distribution and is available at * http://www.opensource.org/licenses/apache2.0.php. ***********************************************************************/ package org.locationtech.geomesa.filter.function import org.geotools.filter.FunctionExpressionImpl import org.geotools.filter.capability.FunctionNameImpl import org.opengis.feature.simple.SimpleFeature class FastProperty extends FunctionExpressionImpl(FastProperty.Name) { private var idx: Int = -1 def this(i: Int) = { this() idx = i } override def evaluate(o: AnyRef): AnyRef = { if (idx == -1) { idx = getExpression(0).evaluate(null).asInstanceOf[Long].toInt } o.asInstanceOf[SimpleFeature].getAttribute(idx) } } object FastProperty { val Name = new FunctionNameImpl( "fastproperty", FunctionNameImpl.parameter("propertyValue", classOf[Object]), FunctionNameImpl.parameter("propertyIndex", classOf[Integer]) ) }
locationtech/geomesa
geomesa-filter/src/main/scala/org/locationtech/geomesa/filter/function/FastProperty.scala
Scala
apache-2.0
1,238
package com.datastax.spark.connector.sql import org.scalatest.BeforeAndAfterEach import scala.concurrent.Future import org.apache.spark.Logging import org.apache.spark.sql.SaveMode._ import org.apache.spark.sql.cassandra.{ AnalyzedPredicates, CassandraPredicateRules, CassandraSourceRelation, TableRef } import org.apache.spark.sql.{DataFrame, SQLContext} import com.datastax.spark.connector.SparkCassandraITFlatSpecBase import com.datastax.spark.connector.cql.{TableDef, CassandraConnector} class CassandraDataSourceSpec extends SparkCassandraITFlatSpecBase with Logging with BeforeAndAfterEach { useCassandraConfig(Seq("cassandra-default.yaml.template")) useSparkConf(defaultConf) val conn = CassandraConnector(defaultConf) conn.withSessionDo { session => createKeyspace(session) awaitAll( Future { session.execute(s"""CREATE TABLE $ks.test1 (a INT, b INT, c INT, d INT, e INT, f INT, g INT, h INT, PRIMARY KEY ((a, b, c), d , e, f))""") session.execute(s"""INSERT INTO $ks.test1 (a, b, c, d, e, f, g, h) VALUES (1, 1, 1, 1, 1, 1, 1, 1)""") session.execute(s"""INSERT INTO $ks.test1 (a, b, c, d, e, f, g, h) VALUES (1, 1, 1, 1, 2, 1, 1, 2)""") session.execute(s"""INSERT INTO $ks.test1 (a, b, c, d, e, f, g, h) VALUES (1, 1, 1, 2, 1, 1, 2, 1)""") session.execute(s"""INSERT INTO $ks.test1 (a, b, c, d, e, f, g, h) VALUES (1, 1, 1, 2, 2, 1, 2, 2)""") session.execute(s"""INSERT INTO $ks.test1 (a, b, c, d, e, f, g, h) VALUES (1, 2, 1, 1, 1, 2, 1, 1)""") session.execute(s"""INSERT INTO $ks.test1 (a, b, c, d, e, f, g, h) VALUES (1, 2, 1, 1, 2, 2, 1, 2)""") session.execute(s"""INSERT INTO $ks.test1 (a, b, c, d, e, f, g, h) VALUES (1, 2, 1, 2, 1, 2, 2, 1)""") session.execute(s"""INSERT INTO $ks.test1 (a, b, c, d, e, f, g, h) VALUES (1, 2, 1, 2, 2, 2, 2, 2)""") }, Future { session.execute(s"CREATE TABLE $ks.test_insert (a INT PRIMARY KEY, b INT)") }, Future { session.execute(s"CREATE TABLE $ks.test_insert1 (a INT PRIMARY KEY, b INT)") }, Future { session.execute(s"CREATE TABLE $ks.test_insert2 (a INT PRIMARY KEY, b INT)") session.execute(s"INSERT INTO $ks.test_insert2 (a, b) VALUES (3,4)") session.execute(s"INSERT INTO $ks.test_insert2 (a, b) VALUES (5,6)") }, Future { session.execute( s""" |CREATE TABLE $ks.df_test( | customer_id int, | uri text, | browser text, | epoch bigint, | PRIMARY KEY (customer_id, epoch, uri) |)""".stripMargin.replaceAll("\\n", " ")) }, Future { session.execute( s""" |CREATE TABLE $ks.df_test2( | customer_id int, | uri text, | browser text, | epoch bigint, | PRIMARY KEY (customer_id, epoch) |)""".stripMargin.replaceAll("\\n", " ")) } ) } val sqlContext: SQLContext = new SQLContext(sc) def pushDown: Boolean = true override def beforeAll() { createTempTable(ks, "test1", "tmpTable") } override def afterAll() { super.afterAll() sqlContext.dropTempTable("tmpTable") } override def afterEach(): Unit ={ sc.setLocalProperty(CassandraSourceRelation.AdditionalCassandraPushDownRulesParam.name, null) } def createTempTable(keyspace: String, table: String, tmpTable: String) = { sqlContext.sql( s""" |CREATE TEMPORARY TABLE $tmpTable |USING org.apache.spark.sql.cassandra |OPTIONS ( | table "$table", | keyspace "$keyspace", | pushdown "$pushDown") """.stripMargin.replaceAll("\\n", " ")) } def cassandraTable(tableRef: TableRef) : DataFrame = { sqlContext.baseRelationToDataFrame(CassandraSourceRelation(tableRef, sqlContext)) } it should "allow to select all rows" in { val result = cassandraTable(TableRef("test1", ks)).select("a").collect() result should have length 8 result.head should have length 1 } it should "allow to register as a temp table" in { cassandraTable(TableRef("test1", ks)).registerTempTable("test1") val temp = sqlContext.sql("SELECT * from test1").select("b").collect() temp should have length 8 temp.head should have length 1 sqlContext.dropTempTable("test1") } it should "allow to insert data into a cassandra table" in { createTempTable(ks, "test_insert", "insertTable") sqlContext.sql("SELECT * FROM insertTable").collect() should have length 0 sqlContext.sql("INSERT OVERWRITE TABLE insertTable SELECT a, b FROM tmpTable") sqlContext.sql("SELECT * FROM insertTable").collect() should have length 1 sqlContext.dropTempTable("insertTable") } it should "allow to save data to a cassandra table" in { sqlContext.sql("SELECT a, b from tmpTable") .write .format("org.apache.spark.sql.cassandra") .mode(ErrorIfExists) .options(Map("table" -> "test_insert1", "keyspace" -> ks)) .save() cassandraTable(TableRef("test_insert1", ks)).collect() should have length 1 val message = intercept[UnsupportedOperationException] { sqlContext.sql("SELECT a, b from tmpTable") .write .format("org.apache.spark.sql.cassandra") .mode(ErrorIfExists) .options(Map("table" -> "test_insert1", "keyspace" -> ks)) .save() }.getMessage assert( message.contains("Writing to a non-empty Cassandra Table is not allowed."), "We should complain that 'Writing to a non-empty Cassandra table is not allowed.'") } it should "allow to overwrite a cassandra table" in { sqlContext.sql("SELECT a, b from tmpTable") .write .format("org.apache.spark.sql.cassandra") .mode(Overwrite) .options(Map("table" -> "test_insert2", "keyspace" -> ks)) .save() createTempTable(ks, "test_insert2", "insertTable2") sqlContext.sql("SELECT * FROM insertTable2").collect() should have length 1 sqlContext.dropTempTable("insertTable2") } it should "allow to filter a table" in { sqlContext.sql("SELECT a, b FROM tmpTable WHERE a=1 and b=2 and c=1 and e=1").collect() should have length 2 } it should "allow to filter a table with a function for a column alias" in { sqlContext.sql("SELECT * FROM (SELECT (a + b + c) AS x, d FROM tmpTable) " + "AS tmpTable1 WHERE x= 3").collect() should have length 4 } it should "allow to filter a table with alias" in { sqlContext.sql("SELECT * FROM (SELECT a AS a1, b AS b1, c AS c1, d AS d1, e AS e1" + " FROM tmpTable) AS tmpTable1 WHERE a1=1 and b1=2 and c1=1 and e1=1 ").collect() should have length 2 } it should "be able to save DF with reversed order columns to a Cassandra table" in { val test_df = Test(1400820884, "http://foobar", "Firefox", 123242) import sqlContext.implicits._ val df = sc.parallelize(Seq(test_df)).toDF df.write .format("org.apache.spark.sql.cassandra") .mode(Overwrite) .options(Map("table" -> "df_test", "keyspace" -> ks)) .save() cassandraTable(TableRef("df_test", ks)).collect() should have length 1 } it should "be able to save DF with partial columns to a Cassandra table" in { val test_df = TestPartialColumns(1400820884, "Firefox", 123242) import sqlContext.implicits._ val df = sc.parallelize(Seq(test_df)).toDF df.write .format("org.apache.spark.sql.cassandra") .mode(Overwrite) .options(Map("table" -> "df_test2", "keyspace" -> ks)) .save() cassandraTable(TableRef("df_test2", ks)).collect() should have length 1 } it should "apply user custom predicates which erase basic pushdowns" in { sc.setLocalProperty( CassandraSourceRelation.AdditionalCassandraPushDownRulesParam.name, "com.datastax.spark.connector.sql.PushdownNothing") val df = sqlContext .read .format("org.apache.spark.sql.cassandra") .options(Map("keyspace" -> ks, "table" -> "test1")) .load().filter("a=1 and b=2 and c=1 and e=1") val qp = df.queryExecution.executedPlan.toString qp should include ("Filter (") // Should have a Spark Filter Step println(qp) } it should "apply user custom predicates in the order they are specified" in { sc.setLocalProperty( CassandraSourceRelation.AdditionalCassandraPushDownRulesParam.name, "com.datastax.spark.connector.sql.PushdownNothing,com.datastax.spark.connector.sql.PushdownEverything") val df = sqlContext .read .format("org.apache.spark.sql.cassandra") .options(Map("keyspace" -> ks, "table" -> "test1")) .load().filter("a=1 and b=2 and c=1 and e=1") val qp = df.queryExecution.executedPlan.toString qp should not include ("Filter (") // No Spark Filter Step println(qp) } } case class Test(val epoch:Long, val uri:String, val browser:String, val customer_id:Int) case class TestPartialColumns(val epoch:Long, val browser:String, val customer_id:Int) object PushdownEverything extends CassandraPredicateRules { override def apply( predicates: AnalyzedPredicates, tableDef: TableDef): AnalyzedPredicates = { AnalyzedPredicates(predicates.handledByCassandra ++ predicates.handledBySpark, Set.empty) } } object PushdownNothing extends CassandraPredicateRules { override def apply( predicates: AnalyzedPredicates, tableDef: TableDef): AnalyzedPredicates = { AnalyzedPredicates(Set.empty, predicates.handledByCassandra ++ predicates.handledBySpark) } }
jimenefe/spark-cassandra-connector
spark-cassandra-connector/src/it/scala/com/datastax/spark/connector/sql/CassandraDataSourceSpec.scala
Scala
apache-2.0
9,648
package com.socrata.util.`implicits-impl` import java.util.Comparator class ComparatorOrdering[T](comparator: Comparator[T]) extends Ordering[T] { def compare(x: T, y: T) = comparator.compare(x, y) } class ComparableOrdering[T <: Comparable[T]] extends Ordering[T] { def compare(x: T, y: T) = x.compareTo(y) } object ComparableOrderingImpl extends ComparableOrdering[Nothing] // Evil type erasure exploit hack
socrata-platform/socrata-utils
src/main/scala/com/socrata/util/implicits-impl/JavaComparisonCompat.scala
Scala
apache-2.0
418
package de.endrullis.sta import java.awt.Color import utils.ColorUtils /** * Color variable. * * @author Stefan Endrullis &lt;stefan@endrullis.de&gt; */ object ColorVar { def apply[TM](startColor: Color): ColorVar[TM] = new ColorVar[TM](startColor, VarState(startColor)) } case class ColorVar[TM](startColor: Color, state: VarState[Color]) extends AbstractVar[Color, TM, ColorVar[TM]] { protected def copyWithState(state: VarState[Color]) = copy(state = state) /** Cross fades to the given color. */ def changeTo(color2: Color) = new ChangeTo(color2) class ChangeTo(color2: Color) { /** In the given duration. */ def in(duration: Double)(implicit timeMap: TimeMap[TM]) = change(ColorUtils.fadeOver(state.lastValue, color2)).in(duration) /** In the given duration. */ def in(timeMap: TimeMap[TM], duration: Double) = change(ColorUtils.fadeOver(state.lastValue, color2)).in(duration)(timeMap) } override def toStringGenerator = map(c => c.getRed+","+c.getGreen+","+c.getBlue) }
xylo/scala-tikz-animations
src/de/endrullis/sta/ColorVar.scala
Scala
apache-2.0
1,026
package org.jetbrains.plugins.scala.lang.resolve2 /** * @author Alexander Podkhalyuzin */ class OverloadingTest extends ResolveTestBase { override def folderPath: String = { super.folderPath + "overloading/" } def testAmbiguos(): Unit = doTest() def testCanConformsWeak(): Unit = doTest() def testCannotChoose(): Unit = doTest() def testCantConformsWeak(): Unit = doTest() def testDefault(): Unit = doTest() def testDefaultIgnored(): Unit = doTest() def testDerivedMoreSpecific(): Unit = doTest() def testDifferent(): Unit = doTest() def testFewWeakConforms(): Unit = doTest() def testImplicitApplied(): Unit = doTest() def testImplicitIgnored(): Unit = doTest() def testImplicitVSLiteralNarrowing(): Unit = doTest() def testImplicitVSValueDiscarding(): Unit = doTest() def testImplicitVSWeak(): Unit = doTest() def testMoreSpecificRight(): Unit = doTest() def testNaming(): Unit = doTest() def testNoLiteralNarrowing(): Unit = doTest() def testNoOveloadingScope(): Unit = doTest() def testNoVAlueDiscarding(): Unit = doTest() def testObjectFunction(): Unit = doTest() def testSameScoreForMoreSpecific(): Unit = doTest() def testSimple(): Unit = doTest() def testTooMuchImplicits(): Unit = doTest() def testWeakResolve(): Unit = doTest() def testWrong(): Unit = {doTest()} //SCL-12375 def testApplyFromImplicit(): Unit = doTest() //SCL-12452 def testApplyFromImplicit2(): Unit = doTest() }
JetBrains/intellij-scala
scala/scala-impl/test/org/jetbrains/plugins/scala/lang/resolve2/OverloadingTest.scala
Scala
apache-2.0
1,461
/* Copyright 2013 Twitter, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.mikegagnon.decl import org.junit.runner.RunWith import org.scalatest.FlatSpec import org.scalatest.junit.JUnitRunner import org.scalatest.matchers.ShouldMatchers import cascading.flow.FlowDef import com.twitter.algebird.Monoid import com.twitter.scalding._ import scala.collection.mutable.Buffer object TaskSetSpec { import Dsl._ import TDsl._ /** * Dummy features ************************************************************************************************/ case object UserId extends Feature[Long] case object ScreenName extends Feature[String] case object Bio extends Feature[String] // lists of UserIds. followers.size <= MaxFollowings case object Followers extends Feature[List[Long]] case object Followees extends Feature[List[Long]] case object Tweets extends FeatureWithReducer[List[String]] // The subset of tweets that are considered spam case object SpamTweets extends FeatureWithReducer[Set[String]] // the length of followers and followees, respectively case object NumFollowers extends Feature[Int] case object NumFollowees extends Feature[Int] // 1.0 if the user has a screenName, bio, and at least one tweet // 0.0 otherwise case object IsBusiness extends Feature[Double] // 1.0 if any of the user's tweets exactly match any keyword for Keywords // 0.0 otherwise case object IsSpam extends Feature[Double] // 1.0 if the user has at least one tweet, follower, and followee // 0.0 otherwise case object IsEngaged extends Feature[Double] // This feature is erroneous because it is non-reducible yet there are duplicate values for the // feature case object ErroneousFeature extends Feature[Double] // Error case: CycleFeature1 depends on CycleFeature2 and vice versa case object CycleFeature1 extends Feature[Double] case object CycleFeature2 extends Feature[Double] // No tasks compute this feature, so requesting this feature will result in an exception case object OrphanFeature extends Feature[Double] /** * Config values ************************************************************************************************/ case object MaxFollowings extends ConfigValue[Int] { override def init(args: Args) = args("MaxFollowings").toInt } case object Keywords extends ConfigValue[Set[String]] { override def init(args: Args) = args.list("Keywords").toSet } case object MaxTweetLen extends ConfigValue[Option[Int]] { override def init(args: Args) = args.optional("MaxTweetLen").map{ _.toInt } } /** * Load tasks ************************************************************************************************/ case object LoadUserTable extends LoadTask { override val inputConfig = Set[ConfigValue[_]]() override val outputFeatures = Set[FeatureBase[_]](UserId, ScreenName, Bio) override def load(config: ConfigMap)(implicit flowDef: FlowDef, mode: Mode) : TypedPipe[FeatureMap] = { TypedTsv[(Long, String, String)]("LoadUserTable.tsv") .map { case (userId, sn, bio) => FeatureMap( UserId -> userId, ScreenName -> sn, Bio -> bio) } } } case object LoadFollowings extends LoadTask { override val inputConfig = Set[ConfigValue[_]](MaxFollowings) override val outputFeatures = Set[FeatureBase[_]](UserId, Followees, Followers) override def load(config: ConfigMap)(implicit flowDef: FlowDef, mode: Mode) : TypedPipe[FeatureMap] = { val maxFollowings = config(MaxFollowings) TypedTsv[(Long, List[Long], List[Long])]("LoadFollowings.tsv") .map { case (userId, followees, followers) => FeatureMap( UserId -> userId, Followees -> followees.take(maxFollowings), Followers -> followers.take(maxFollowings)) } } } case object LoadTweets extends LoadTask { override val inputConfig = Set[ConfigValue[_]](MaxTweetLen) override val outputFeatures = Set[FeatureBase[_]](UserId, Tweets) override def load(config: ConfigMap)(implicit flowDef: FlowDef, mode: Mode) : TypedPipe[FeatureMap] = { val maxTweetLen = config(MaxTweetLen) TypedTsv[(Long, String)]("LoadTweets.tsv") .map { case (userId, tweet) => val tweetTrimmed = maxTweetLen match { case None => tweet case Some(len) => tweet.take(len) } FeatureMap( UserId -> userId, Tweets -> List(tweetTrimmed)) } } } case object LoadErroneousFeature extends LoadTask { override val inputConfig = Set[ConfigValue[_]]() override val outputFeatures = Set[FeatureBase[_]](UserId, ErroneousFeature) override def load(config: ConfigMap)(implicit flowDef: FlowDef, mode: Mode) : TypedPipe[FeatureMap] = { TypedTsv[(Long, Double)]("LoadErroneousFeature.tsv") .map { case (userId, num) => FeatureMap( UserId -> userId, ErroneousFeature -> num) } } } /** * Compute tasks ************************************************************************************************/ case object ComputeFollowCounts extends ComputeTask { override val inputConfig = Set[ConfigValue[_]]() override val inputFeatures = Set[FeatureBase[_]](Followees, Followers) override val outputFeatures = Set[FeatureBase[_]](NumFollowers, NumFollowees) override def compute(features: FeatureMap, config: ConfigMap): FeatureMap = { val numFollowees = features.get(Followees) .map{ followees => FeatureMap(NumFollowees -> followees.size) } .getOrElse(FeatureMap()) val numFollowers = features.get(Followers) .map{ followers => FeatureMap(NumFollowers -> followers.size) } .getOrElse(FeatureMap()) Monoid.plus(numFollowees, numFollowers) } } case object ComputeBusiness extends ComputeTask { override val inputConfig = Set[ConfigValue[_]]() override val inputFeatures = Set[FeatureBase[_]](Bio, ScreenName, Tweets) override val outputFeatures = Set[FeatureBase[_]](IsBusiness) override def compute(features: FeatureMap, config: ConfigMap): FeatureMap = { (features.get(Bio), features.get(ScreenName), features.get(Tweets)) match { case (Some(bio), Some(sn), Some(head :: tail)) => FeatureMap(IsBusiness -> 1.0) case _ => FeatureMap(IsBusiness -> 0.0) } } } case object ComputeIsSpam extends ComputeTask { override val inputConfig = Set[ConfigValue[_]](Keywords) override val inputFeatures = Set[FeatureBase[_]](Tweets) override val outputFeatures = Set[FeatureBase[_]](IsSpam) override def compute(features: FeatureMap, config: ConfigMap): FeatureMap = { features.get(Tweets) .map{ tweets => val matches = tweets.toSet & config(Keywords) if (matches.isEmpty) { FeatureMap(IsSpam -> 0.0) } else { FeatureMap(IsSpam -> 1.0) } } .getOrElse(FeatureMap(IsSpam -> 0.0)) } } class ComputeSpamTweets(keyword: String) extends ComputeTask { override val inputConfig = Set[ConfigValue[_]]() override val inputFeatures = Set[FeatureBase[_]](Tweets) override val outputFeatures = Set[FeatureBase[_]](SpamTweets) override def compute(features: FeatureMap, config: ConfigMap): FeatureMap = { features.get(Tweets) .map{ tweets => val spamTweets = tweets .toSet .filter(_ == keyword) FeatureMap(SpamTweets -> spamTweets) } .getOrElse(FeatureMap()) } } // these two tasks compute the same feature, but do it differently case object ComputeSpamTweets1 extends ComputeSpamTweets("spam1") case object ComputeSpamTweets2 extends ComputeSpamTweets("spam2") case object ComputeEngaged extends ComputeTask { override val inputConfig = Set[ConfigValue[_]]() override val inputFeatures = Set[FeatureBase[_]](Tweets, NumFollowers, NumFollowees) override val outputFeatures = Set[FeatureBase[_]](IsEngaged) override def compute(features: FeatureMap, config: ConfigMap): FeatureMap = { (features.get(Tweets), features.get(NumFollowers), features.get(NumFollowees)) match { case (Some(head :: tail), Some(numFollowers), Some(numFollowees)) => { if (numFollowers > 0 && numFollowees > 0) { FeatureMap(IsEngaged -> 1.0) } else { FeatureMap(IsEngaged -> 0.0) } } case _ => FeatureMap(IsEngaged -> 0.0) } } } case object ComputeCycle1 extends ComputeTask { override val inputConfig = Set[ConfigValue[_]]() override val inputFeatures = Set[FeatureBase[_]](CycleFeature2) override val outputFeatures = Set[FeatureBase[_]](CycleFeature1) override def compute(features: FeatureMap, config: ConfigMap): FeatureMap = FeatureMap() } case object ComputeCycle2 extends ComputeTask { override val inputConfig = Set[ConfigValue[_]]() override val inputFeatures = Set[FeatureBase[_]](CycleFeature1) override val outputFeatures = Set[FeatureBase[_]](CycleFeature2) override def compute(features: FeatureMap, config: ConfigMap): FeatureMap = FeatureMap() } /** * TaskSet ************************************************************************************************/ val dummyTasks = Set[Task]( LoadUserTable, LoadFollowings, LoadTweets, LoadErroneousFeature, ComputeFollowCounts, ComputeBusiness, ComputeIsSpam, ComputeEngaged, ComputeSpamTweets1, ComputeSpamTweets2, ComputeCycle1, ComputeCycle2) object Users extends TaskSet[Long] { override val groupByKey = UserId override val tasks = dummyTasks } /** * Dummy data ************************************************************************************************/ // userId, screenName, bio val userTableData: List[(Long, String, String)] = List( (1L, "foo", "bar"), (2L, "baz", "pickle")) // userId, followees, followers val followingsData: List[(Long, List[Long], List[Long])] = List( (1L, List(2L, 3L, 4L), List(4L, 5L, 6L)), (3L, List(1L, 2L), List(6L))) val tweetData: List[(Long, String)] = List( (1L, "spam1"), (1L, "spam2"), (1L, "c"), (2L, "spam2"), (2L, "pickle"), (3L, "bananas")) // This data is erroneous because user 1L has multiple records val erroneousFeatureData: List[(Long, Double)] = List( (1L, 1.0), (1L, 2.0), (2L, 3.0)) /** * Helper functions ************************************************************************************************/ def testJob[T: Manifest](jobName: String, args: Map[String, List[String]]): JobTest = { val jobTest = JobTest("com.mikegagnon.decl." + jobName) .source(TypedTsv[(Long, String, String)]("LoadUserTable.tsv"), userTableData) .source(TypedTsv[(Long, List[Long], List[Long])]("LoadFollowings.tsv"), followingsData) .source(TypedTsv[(Long, String)]("LoadTweets.tsv"), tweetData) .source(TypedTsv[(Long, Double)]("LoadErroneousFeature.tsv"), erroneousFeatureData) args.foreach { case (key, value) => jobTest.arg(key, value) } jobTest } } /** * Scalding jobs **************************************************************************************************/ class ScreenNameJob(args: Args) extends Job(args) { import TaskSetSpec._ Users.get(args, UserId, ScreenName) .write(TypedTsv[(Option[Long], Option[String])]("output.tsv")) } class TweetsJob(args: Args) extends Job(args) { import TaskSetSpec._ Users.get(args, UserId, Tweets) .write(TypedTsv[(Option[Long], Option[List[String]])]("output.tsv")) } class FollowersScreenNameJob(args: Args) extends Job(args) { import TaskSetSpec._ Users.get(args, UserId, Followers, ScreenName) .write(TypedTsv[(Option[Long], Option[List[Long]], Option[String])]("output.tsv")) } class ComputeNumFollowersJob(args: Args) extends Job(args) { import TaskSetSpec._ Users.get(args, UserId, ScreenName, NumFollowers) .write(TypedTsv[(Option[Long], Option[String], Option[Int])]("output.tsv")) } class ComputeSpamTweetsJob(args: Args) extends Job(args) { import TaskSetSpec._ Users.get(args, UserId, SpamTweets) .write(TypedTsv[(Option[Long], Option[Set[String]])]("output.tsv")) } class IsBusinessJob(args: Args) extends Job(args) { import TaskSetSpec._ Users.get(args, UserId, IsBusiness) .write(TypedTsv[(Option[Long], Option[Double])]("output.tsv")) } class IsSpamJob(args: Args) extends Job(args) { import TaskSetSpec._ Users.get(args, UserId, IsSpam) .write(TypedTsv[(Option[Long], Option[Double])]("output.tsv")) } class IsEngagedJob(args: Args) extends Job(args) { import TaskSetSpec._ Users.get(args, UserId, IsEngaged) .write(TypedTsv[(Option[Long], Option[Double])]("output.tsv")) } class ErroneousFeatureJob(args: Args) extends Job(args) { import TaskSetSpec._ Users.get(args, UserId, ErroneousFeature) .write(TypedTsv[(Option[Long], Option[Double])]("output.tsv")) } class ErroneousCycleJob(args: Args) extends Job(args) { import TaskSetSpec._ Users.get(args, UserId, CycleFeature1) .write(TypedTsv[(Option[Long], Option[Double])]("output.tsv")) } class ErroneousOrphanJob(args: Args) extends Job(args) { import TaskSetSpec._ Users.get(args, UserId, OrphanFeature) .write(TypedTsv[(Option[Long], Option[Double])]("output.tsv")) } /** * Tests **************************************************************************************************/ @RunWith(classOf[JUnitRunner]) class TaskSetSpec extends FlatSpec with ShouldMatchers { import Dsl._ import TaskSetSpec._ /** * TaskSet.initConfig ************************************************************************************************/ "TaskSet.initConfig" should "initialize MaxTweetLen to None when args is empty" in { TaskSet.initConfig( Args(""), loadTasks=Set(LoadTweets), computeTasks=Nil) should equal (ConfigMap(MaxTweetLen -> None)) } it should "initialize MaxTweetLen to correct value when args contains the value" in { TaskSet.initConfig( Args("--MaxTweetLen 140"), loadTasks=Set(LoadTweets), computeTasks=Nil) should equal (ConfigMap(MaxTweetLen -> Some(140))) } it should "initialize Keywords and MaxTweetLen to empty value when args is empty" in { TaskSet.initConfig( Args(""), loadTasks=Set(LoadTweets), computeTasks=List(ComputeIsSpam)) should equal(ConfigMap( MaxTweetLen -> None, Keywords -> Set[String]())) } it should "initialize Keywords to correct value when args contains the value" in { TaskSet.initConfig( Args("--Keywords a b c"), loadTasks=Set(LoadTweets), computeTasks=List(ComputeIsSpam)) should equal(ConfigMap( MaxTweetLen -> None, Keywords -> Set("a", "b", "c"))) } it should "throw exception if MaxFollowings is needed but args is empty" in { evaluating { TaskSet.initConfig( Args(""), loadTasks=Set(LoadFollowings), computeTasks=Nil) } should produce [RuntimeException] } it should "initialize MaxFollowings to correct value when args contains the value" in { TaskSet.initConfig( Args("--MaxFollowings 7"), loadTasks=Set(LoadFollowings), computeTasks=Nil) should equal(ConfigMap(MaxFollowings -> 7)) } /** * TaskSet.scheduleTasks ************************************************************************************************/ "TaskSet.scheduleTasks" should "schedule a single load task if that is all that is needed" in { TaskSet.scheduleTasks(dummyTasks, Set(ScreenName)) should equal ( Set(LoadUserTable), Nil) } it should "schedule a single load task if that is all that is needed for multiple features" in { TaskSet.scheduleTasks(dummyTasks, Set(ScreenName, Bio)) should equal ( Set(LoadUserTable), Nil) } it should "schedule load and compute tasks if a single computed feature is needed" in { TaskSet.scheduleTasks(dummyTasks, Set(IsBusiness)) should equal ( Set(LoadUserTable, LoadTweets), List(ComputeBusiness)) } it should "schedule __ordered__ compute tasks if one computed feature depends on another" in { TaskSet.scheduleTasks(dummyTasks, Set(IsEngaged)) should equal ( Set(LoadFollowings, LoadTweets), List(ComputeFollowCounts, ComputeEngaged)) } it should "schedule many tasks if many features are needed" in { val result = TaskSet.scheduleTasks(dummyTasks, Set(IsEngaged, IsBusiness)) val expectedLoadTasks = Set[LoadTask](LoadUserTable, LoadFollowings, LoadTweets) /** * computeFollowCounts, computeEngaged must be ordered relative to each other * computeBusiness can appear anywhere */ val possibleResults = Set[(Set[LoadTask], List[ComputeTask])] ( (expectedLoadTasks, List(ComputeBusiness, ComputeFollowCounts, ComputeEngaged)), (expectedLoadTasks, List(ComputeFollowCounts, ComputeBusiness, ComputeEngaged)), (expectedLoadTasks, List(ComputeFollowCounts, ComputeEngaged, ComputeBusiness))) assert(possibleResults.contains(result)) } /** * TaskSet.get ************************************************************************************************/ "ScreenNameJob" should "fetch two features from a single source" in { type OutputTuple = (Option[Long], Option[String]) val expectedOutput: Set[OutputTuple] = Set( (Some(1L), Some("foo")), (Some(2L), Some("baz"))) testJob("ScreenNameJob", args = Map()) .sink[OutputTuple](TypedTsv[OutputTuple]("output.tsv")) { buf => buf.toSet should equal (expectedOutput) } .run .finish } "TweetsJob" should "reduce multiple tweet records into a single Tweets feature and use a configuration value" in { type OutputTuple = (Option[Long], Option[List[String]]) val expectedOutput: Set[OutputTuple] = Set( (Some(1L), Some(List("s", "s", "c"))), (Some(2L), Some(List("s", "p"))), (Some(3L), Some(List("b")))) testJob( jobName ="TweetsJob", args = Map("MaxTweetLen" -> List("1"))) .sink[OutputTuple](TypedTsv[OutputTuple]("output.tsv")) { buf => buf.toSet should equal (expectedOutput) } .run .finish } "FollowersScreenNameJob" should "fetch and join three features from multiple sources" in { type OutputTuple = (Option[Long], Option[List[Long]], Option[String]) val expectedOutput: Set[OutputTuple] = Set( (Some(1L), Some(List(4L, 5L)), Some("foo")), (Some(2L), None, Some("baz")), (Some(3L), Some(List(6L)), None)) testJob( jobName = "FollowersScreenNameJob", args = Map("MaxFollowings" -> List("2"))) .sink[OutputTuple](TypedTsv[OutputTuple]("output.tsv")) { buf => buf.toSet should equal (expectedOutput) } .run .finish } "ComputeNumFollowersJob" should "compute a simple feature" in { type OutputTuple = (Option[Long], Option[String], Option[Int]) val expectedOutput: Set[OutputTuple] = Set( (Some(1L), Some("foo"), Some(3)), (Some(2L), Some("baz"), None), (Some(3L), None, Some(1))) testJob( jobName = "ComputeNumFollowersJob", args = Map("MaxFollowings" -> List("10"))) .sink[OutputTuple](TypedTsv[OutputTuple]("output.tsv")) { buf => buf.toSet should equal (expectedOutput) } .run .finish } "ComputeSpamTweetsJob" should ("yield a single feature that is computed by two distinct compute tasks, " + "and reduced via the feature's semigroup") in { type OutputTuple = (Option[Long], Option[Set[String]]) val expectedOutput: Set[OutputTuple] = Set( (Some(1L), Some(Set("spam1", "spam2"))), (Some(2L), Some(Set("spam2"))), (Some(3L), Some(Set()))) testJob(jobName = "ComputeSpamTweetsJob", args = Map()) .sink[OutputTuple](TypedTsv[OutputTuple]("output.tsv")) { buf => buf.toSet should equal (expectedOutput) } .run .finish } "IsBusinessJob" should "compute a feature that is derived from multiple sources" in { type OutputTuple = (Option[Long], Option[Double]) val expectedOutput: Set[OutputTuple] = Set( (Some(1L), Some(1.0)), (Some(2L), Some(1.0)), (Some(3L), Some(0.0))) testJob(jobName = "IsBusinessJob", args = Map()) .sink[OutputTuple](TypedTsv[OutputTuple]("output.tsv")) { buf => buf.toSet should equal (expectedOutput) } .run .finish } "IsEngagedJob" should "compute a feature that as function of other computed features and a loaded feature" in { type OutputTuple = (Option[Long], Option[Double]) val expectedOutput: Set[OutputTuple] = Set( (Some(1L), Some(1.0)), (Some(2L), Some(0.0)), (Some(3L), Some(1.0))) testJob( jobName = "IsEngagedJob", args = Map("MaxFollowings" -> List("10"))) .sink[OutputTuple](TypedTsv[OutputTuple]("output.tsv")) { buf => buf.toSet should equal (expectedOutput) } .run .finish } /** * calls to TaskSet.get that raise an exception. * TODO: is there a way to silence the error messages caused by these test cases? ************************************************************************************************/ "ErroneousFeatureJob" should "throw a FlowException caused by a DuplicateFeatureException exception" in { type OutputTuple = (Option[Long], Option[Double]) evaluating { testJob( jobName = "ErroneousFeatureJob", args = Map()) .sink[OutputTuple](TypedTsv[OutputTuple]("output.tsv")) { buf => buf.toSet should equal (Set()) } .run .finish } should produce [cascading.flow.FlowException] } "ErroneousCycleJob" should "throw a InvocationTargetException caused by a CycleException exception" in { type OutputTuple = (Option[Long], Option[Double]) evaluating { testJob( jobName = "ErroneousCycleJob", args = Map()) .sink[OutputTuple](TypedTsv[OutputTuple]("output.tsv")) { buf => buf.toSet should equal (Set()) } .run .finish } should produce [java.lang.reflect.InvocationTargetException] } "ComputeNumFollowersJob" should "throw a InvocationTargetExcept exception if MaxFollowings isn't specified" in { type OutputTuple = (Option[Long], Option[String], Option[Int]) evaluating { testJob( jobName = "ComputeNumFollowersJob", args = Map()) .sink[OutputTuple](TypedTsv[OutputTuple]("output.tsv")) { buf => buf.toSet should equal (Set()) } .run .finish } should produce [java.lang.reflect.InvocationTargetException] } "ErroneousOrphanJob" should "throw a InvocationTargetException caused by a NoLoadTasksException exception" in { type OutputTuple = (Option[Long], Option[Double]) evaluating { testJob( jobName = "ErroneousOrphanJob", args = Map()) .sink[OutputTuple](TypedTsv[OutputTuple]("output.tsv")) { buf => buf.toSet should equal (Set()) } .run .finish } should produce [java.lang.reflect.InvocationTargetException] } }
mikegagnon/decl
src/test/scala/com/mikegagnon/decl/TaskSetSpec.scala
Scala
apache-2.0
24,124
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution.joins import java.io._ import com.esotericsoftware.kryo.{Kryo, KryoSerializable} import com.esotericsoftware.kryo.io.{Input, Output} import org.apache.spark.{SparkConf, SparkEnv, SparkException} import org.apache.spark.internal.config.{BUFFER_PAGESIZE, MEMORY_OFFHEAP_ENABLED} import org.apache.spark.memory._ import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.plans.physical.BroadcastMode import org.apache.spark.sql.types.LongType import org.apache.spark.unsafe.Platform import org.apache.spark.unsafe.map.BytesToBytesMap import org.apache.spark.util.{KnownSizeEstimation, Utils} /** * Interface for a hashed relation by some key. Use [[HashedRelation.apply]] to create a concrete * object. */ private[execution] sealed trait HashedRelation extends KnownSizeEstimation { /** * Returns matched rows. * * Returns null if there is no matched rows. */ def get(key: InternalRow): Iterator[InternalRow] /** * Returns matched rows for a key that has only one column with LongType. * * Returns null if there is no matched rows. */ def get(key: Long): Iterator[InternalRow] = { throw new UnsupportedOperationException } /** * Returns the matched single row. */ def getValue(key: InternalRow): InternalRow /** * Returns the matched single row with key that have only one column of LongType. */ def getValue(key: Long): InternalRow = { throw new UnsupportedOperationException } /** * Returns an iterator for key index and matched rows. * * Returns null if there is no matched rows. */ def getWithKeyIndex(key: InternalRow): Iterator[ValueRowWithKeyIndex] = { throw new UnsupportedOperationException } /** * Returns key index and matched single row. * This is for unique key case. * * Returns null if there is no matched rows. */ def getValueWithKeyIndex(key: InternalRow): ValueRowWithKeyIndex = { throw new UnsupportedOperationException } /** * Returns an iterator for keys index and rows of InternalRow type. */ def valuesWithKeyIndex(): Iterator[ValueRowWithKeyIndex] = { throw new UnsupportedOperationException } /** * Returns the maximum number of allowed keys index. */ def maxNumKeysIndex: Int = { throw new UnsupportedOperationException } /** * Returns true iff all the keys are unique. */ def keyIsUnique: Boolean /** * Returns an iterator for keys of InternalRow type. */ def keys(): Iterator[InternalRow] /** * Returns a read-only copy of this, to be safely used in current thread. */ def asReadOnlyCopy(): HashedRelation /** * Release any used resources. */ def close(): Unit } private[execution] object HashedRelation { /** * Create a HashedRelation from an Iterator of InternalRow. * * @param allowsNullKey Allow NULL keys in HashedRelation. * This is used for full outer join in `ShuffledHashJoinExec` only. */ def apply( input: Iterator[InternalRow], key: Seq[Expression], sizeEstimate: Int = 64, taskMemoryManager: TaskMemoryManager = null, isNullAware: Boolean = false, allowsNullKey: Boolean = false): HashedRelation = { val mm = Option(taskMemoryManager).getOrElse { new TaskMemoryManager( new UnifiedMemoryManager( new SparkConf().set(MEMORY_OFFHEAP_ENABLED.key, "false"), Long.MaxValue, Long.MaxValue / 2, 1), 0) } if (!input.hasNext && !allowsNullKey) { EmptyHashedRelation } else if (key.length == 1 && key.head.dataType == LongType && !allowsNullKey) { // NOTE: LongHashedRelation does not support NULL keys. LongHashedRelation(input, key, sizeEstimate, mm, isNullAware) } else { UnsafeHashedRelation(input, key, sizeEstimate, mm, isNullAware, allowsNullKey) } } } /** * A wrapper for key index and value in InternalRow type. * Designed to be instantiated once per thread and reused. */ private[execution] class ValueRowWithKeyIndex { private var keyIndex: Int = _ private var value: InternalRow = _ /** Updates this ValueRowWithKeyIndex by updating its key index. Returns itself. */ def withNewKeyIndex(newKeyIndex: Int): ValueRowWithKeyIndex = { keyIndex = newKeyIndex this } /** Updates this ValueRowWithKeyIndex by updating its value. Returns itself. */ def withNewValue(newValue: InternalRow): ValueRowWithKeyIndex = { value = newValue this } /** Updates this ValueRowWithKeyIndex. Returns itself. */ def update(newKeyIndex: Int, newValue: InternalRow): ValueRowWithKeyIndex = { keyIndex = newKeyIndex value = newValue this } def getKeyIndex: Int = { keyIndex } def getValue: InternalRow = { value } } /** * A HashedRelation for UnsafeRow, which is backed BytesToBytesMap. * * It's serialized in the following format: * [number of keys] * [size of key] [size of value] [key bytes] [bytes for value] */ private[joins] class UnsafeHashedRelation( private var numKeys: Int, private var numFields: Int, private var binaryMap: BytesToBytesMap) extends HashedRelation with Externalizable with KryoSerializable { private[joins] def this() = this(0, 0, null) // Needed for serialization override def keyIsUnique: Boolean = binaryMap.numKeys() == binaryMap.numValues() override def asReadOnlyCopy(): UnsafeHashedRelation = { new UnsafeHashedRelation(numKeys, numFields, binaryMap) } override def estimatedSize: Long = binaryMap.getTotalMemoryConsumption // re-used in get()/getValue()/getWithKeyIndex()/getValueWithKeyIndex()/valuesWithKeyIndex() var resultRow = new UnsafeRow(numFields) // re-used in getWithKeyIndex()/getValueWithKeyIndex()/valuesWithKeyIndex() var valueRowWithKeyIndex = new ValueRowWithKeyIndex override def get(key: InternalRow): Iterator[InternalRow] = { val unsafeKey = key.asInstanceOf[UnsafeRow] val map = binaryMap // avoid the compiler error val loc = new map.Location // this could be allocated in stack binaryMap.safeLookup(unsafeKey.getBaseObject, unsafeKey.getBaseOffset, unsafeKey.getSizeInBytes, loc, unsafeKey.hashCode()) if (loc.isDefined) { new Iterator[UnsafeRow] { private var _hasNext = true override def hasNext: Boolean = _hasNext override def next(): UnsafeRow = { resultRow.pointTo(loc.getValueBase, loc.getValueOffset, loc.getValueLength) _hasNext = loc.nextValue() resultRow } } } else { null } } def getValue(key: InternalRow): InternalRow = { val unsafeKey = key.asInstanceOf[UnsafeRow] val map = binaryMap // avoid the compiler error val loc = new map.Location // this could be allocated in stack binaryMap.safeLookup(unsafeKey.getBaseObject, unsafeKey.getBaseOffset, unsafeKey.getSizeInBytes, loc, unsafeKey.hashCode()) if (loc.isDefined) { resultRow.pointTo(loc.getValueBase, loc.getValueOffset, loc.getValueLength) resultRow } else { null } } override def getWithKeyIndex(key: InternalRow): Iterator[ValueRowWithKeyIndex] = { val unsafeKey = key.asInstanceOf[UnsafeRow] val map = binaryMap // avoid the compiler error val loc = new map.Location // this could be allocated in stack binaryMap.safeLookup(unsafeKey.getBaseObject, unsafeKey.getBaseOffset, unsafeKey.getSizeInBytes, loc, unsafeKey.hashCode()) if (loc.isDefined) { valueRowWithKeyIndex.withNewKeyIndex(loc.getKeyIndex) new Iterator[ValueRowWithKeyIndex] { private var _hasNext = true override def hasNext: Boolean = _hasNext override def next(): ValueRowWithKeyIndex = { resultRow.pointTo(loc.getValueBase, loc.getValueOffset, loc.getValueLength) _hasNext = loc.nextValue() valueRowWithKeyIndex.withNewValue(resultRow) } } } else { null } } override def getValueWithKeyIndex(key: InternalRow): ValueRowWithKeyIndex = { val unsafeKey = key.asInstanceOf[UnsafeRow] val map = binaryMap // avoid the compiler error val loc = new map.Location // this could be allocated in stack binaryMap.safeLookup(unsafeKey.getBaseObject, unsafeKey.getBaseOffset, unsafeKey.getSizeInBytes, loc, unsafeKey.hashCode()) if (loc.isDefined) { resultRow.pointTo(loc.getValueBase, loc.getValueOffset, loc.getValueLength) valueRowWithKeyIndex.update(loc.getKeyIndex, resultRow) } else { null } } override def valuesWithKeyIndex(): Iterator[ValueRowWithKeyIndex] = { val iter = binaryMap.iteratorWithKeyIndex() new Iterator[ValueRowWithKeyIndex] { override def hasNext: Boolean = iter.hasNext override def next(): ValueRowWithKeyIndex = { if (!hasNext) { throw new NoSuchElementException("End of the iterator") } val loc = iter.next() resultRow.pointTo(loc.getValueBase, loc.getValueOffset, loc.getValueLength) valueRowWithKeyIndex.update(loc.getKeyIndex, resultRow) } } } override def maxNumKeysIndex: Int = { binaryMap.maxNumKeysIndex } override def keys(): Iterator[InternalRow] = { val iter = binaryMap.iterator() new Iterator[InternalRow] { val unsafeRow = new UnsafeRow(numKeys) override def hasNext: Boolean = { iter.hasNext } override def next(): InternalRow = { if (!hasNext) { throw new NoSuchElementException("End of the iterator") } else { val loc = iter.next() unsafeRow.pointTo(loc.getKeyBase, loc.getKeyOffset, loc.getKeyLength) unsafeRow } } } } override def close(): Unit = { binaryMap.free() } override def writeExternal(out: ObjectOutput): Unit = Utils.tryOrIOException { write(out.writeInt, out.writeLong, out.write) } override def write(kryo: Kryo, out: Output): Unit = Utils.tryOrIOException { write(out.writeInt, out.writeLong, out.write) } private def write( writeInt: (Int) => Unit, writeLong: (Long) => Unit, writeBuffer: (Array[Byte], Int, Int) => Unit) : Unit = { writeInt(numFields) // TODO: move these into BytesToBytesMap writeLong(binaryMap.numKeys()) writeLong(binaryMap.numValues()) var buffer = new Array[Byte](64) def write(base: Object, offset: Long, length: Int): Unit = { if (buffer.length < length) { buffer = new Array[Byte](length) } Platform.copyMemory(base, offset, buffer, Platform.BYTE_ARRAY_OFFSET, length) writeBuffer(buffer, 0, length) } val iter = binaryMap.iterator() while (iter.hasNext) { val loc = iter.next() // [key size] [values size] [key bytes] [value bytes] writeInt(loc.getKeyLength) writeInt(loc.getValueLength) write(loc.getKeyBase, loc.getKeyOffset, loc.getKeyLength) write(loc.getValueBase, loc.getValueOffset, loc.getValueLength) } } override def readExternal(in: ObjectInput): Unit = Utils.tryOrIOException { read(() => in.readInt(), () => in.readLong(), in.readFully) } private def read( readInt: () => Int, readLong: () => Long, readBuffer: (Array[Byte], Int, Int) => Unit): Unit = { numFields = readInt() resultRow = new UnsafeRow(numFields) val nKeys = readLong() val nValues = readLong() // This is used in Broadcast, shared by multiple tasks, so we use on-heap memory // TODO(josh): This needs to be revisited before we merge this patch; making this change now // so that tests compile: val taskMemoryManager = new TaskMemoryManager( new UnifiedMemoryManager( new SparkConf().set(MEMORY_OFFHEAP_ENABLED.key, "false"), Long.MaxValue, Long.MaxValue / 2, 1), 0) val pageSizeBytes = Option(SparkEnv.get).map(_.memoryManager.pageSizeBytes) .getOrElse(new SparkConf().get(BUFFER_PAGESIZE).getOrElse(16L * 1024 * 1024)) // TODO(josh): We won't need this dummy memory manager after future refactorings; revisit // during code review binaryMap = new BytesToBytesMap( taskMemoryManager, (nKeys * 1.5 + 1).toInt, // reduce hash collision pageSizeBytes) var i = 0 var keyBuffer = new Array[Byte](1024) var valuesBuffer = new Array[Byte](1024) while (i < nValues) { val keySize = readInt() val valuesSize = readInt() if (keySize > keyBuffer.length) { keyBuffer = new Array[Byte](keySize) } readBuffer(keyBuffer, 0, keySize) if (valuesSize > valuesBuffer.length) { valuesBuffer = new Array[Byte](valuesSize) } readBuffer(valuesBuffer, 0, valuesSize) val loc = binaryMap.lookup(keyBuffer, Platform.BYTE_ARRAY_OFFSET, keySize) val putSuceeded = loc.append(keyBuffer, Platform.BYTE_ARRAY_OFFSET, keySize, valuesBuffer, Platform.BYTE_ARRAY_OFFSET, valuesSize) if (!putSuceeded) { binaryMap.free() throw new IOException("Could not allocate memory to grow BytesToBytesMap") } i += 1 } } override def read(kryo: Kryo, in: Input): Unit = Utils.tryOrIOException { read(() => in.readInt(), () => in.readLong(), in.readBytes) } } private[joins] object UnsafeHashedRelation { def apply( input: Iterator[InternalRow], key: Seq[Expression], sizeEstimate: Int, taskMemoryManager: TaskMemoryManager, isNullAware: Boolean = false, allowsNullKey: Boolean = false): HashedRelation = { require(!(isNullAware && allowsNullKey), "isNullAware and allowsNullKey cannot be enabled at same time") val pageSizeBytes = Option(SparkEnv.get).map(_.memoryManager.pageSizeBytes) .getOrElse(new SparkConf().get(BUFFER_PAGESIZE).getOrElse(16L * 1024 * 1024)) val binaryMap = new BytesToBytesMap( taskMemoryManager, // Only 70% of the slots can be used before growing, more capacity help to reduce collision (sizeEstimate * 1.5 + 1).toInt, pageSizeBytes) // Create a mapping of buildKeys -> rows val keyGenerator = UnsafeProjection.create(key) var numFields = 0 while (input.hasNext) { val row = input.next().asInstanceOf[UnsafeRow] numFields = row.numFields() val key = keyGenerator(row) if (!key.anyNull || allowsNullKey) { val loc = binaryMap.lookup(key.getBaseObject, key.getBaseOffset, key.getSizeInBytes) val success = loc.append( key.getBaseObject, key.getBaseOffset, key.getSizeInBytes, row.getBaseObject, row.getBaseOffset, row.getSizeInBytes) if (!success) { binaryMap.free() // scalastyle:off throwerror throw new SparkOutOfMemoryError("There is not enough memory to build hash map") // scalastyle:on throwerror } } else if (isNullAware) { return HashedRelationWithAllNullKeys } } new UnsafeHashedRelation(key.size, numFields, binaryMap) } } /** * An append-only hash map mapping from key of Long to UnsafeRow. * * The underlying bytes of all values (UnsafeRows) are packed together as a single byte array * (`page`) in this format: * * [bytes of row1][address1][bytes of row2][address1] ... * * address1 (8 bytes) is the offset and size of next value for the same key as row1, any key * could have multiple values. the address at the end of last value for every key is 0. * * The keys and addresses of their values could be stored in two modes: * * 1) sparse mode: the keys and addresses are stored in `array` as: * * [key1][address1][key2][address2]...[] * * address1 (Long) is the offset (in `page`) and size of the value for key1. The position of key1 * is determined by `key1 % cap`. Quadratic probing with triangular numbers is used to address * hash collision. * * 2) dense mode: all the addresses are packed into a single array of long, as: * * [address1] [address2] ... * * address1 (Long) is the offset (in `page`) and size of the value for key1, the position is * determined by `key1 - minKey`. * * The map is created as sparse mode, then key-value could be appended into it. Once finish * appending, caller could call optimize() to try to turn the map into dense mode, which is faster * to probe. * * see http://java-performance.info/implementing-world-fastest-java-int-to-int-hash-map/ */ private[execution] final class LongToUnsafeRowMap(val mm: TaskMemoryManager, capacity: Int) extends MemoryConsumer(mm) with Externalizable with KryoSerializable { // Whether the keys are stored in dense mode or not. private var isDense = false // The minimum key private var minKey = Long.MaxValue // The maximum key private var maxKey = Long.MinValue // The array to store the key and offset of UnsafeRow in the page. // // Sparse mode: [key1] [offset1 | size1] [key2] [offset | size2] ... // Dense mode: [offset1 | size1] [offset2 | size2] private var array: Array[Long] = null private var mask: Int = 0 // The page to store all bytes of UnsafeRow and the pointer to next rows. // [row1][pointer1] [row2][pointer2] private var page: Array[Long] = null // Current write cursor in the page. private var cursor: Long = Platform.LONG_ARRAY_OFFSET // The number of bits for size in address private val SIZE_BITS = 28 private val SIZE_MASK = 0xfffffff // The total number of values of all keys. private var numValues = 0L // The number of unique keys. private var numKeys = 0L // needed by serializer def this() = { this( new TaskMemoryManager( new UnifiedMemoryManager( new SparkConf().set(MEMORY_OFFHEAP_ENABLED.key, "false"), Long.MaxValue, Long.MaxValue / 2, 1), 0), 0) } private def ensureAcquireMemory(size: Long): Unit = { // do not support spilling val got = acquireMemory(size) if (got < size) { freeMemory(got) throw new SparkException(s"Can't acquire $size bytes memory to build hash relation, " + s"got $got bytes") } } private def init(): Unit = { if (mm != null) { require(capacity < 512000000, "Cannot broadcast 512 million or more rows") var n = 1 while (n < capacity) n *= 2 ensureAcquireMemory(n * 2L * 8 + (1 << 20)) array = new Array[Long](n * 2) mask = n * 2 - 2 page = new Array[Long](1 << 17) // 1M bytes } } init() def spill(size: Long, trigger: MemoryConsumer): Long = 0L /** * Returns whether all the keys are unique. */ def keyIsUnique: Boolean = numKeys == numValues /** * Returns total memory consumption. */ def getTotalMemoryConsumption: Long = array.length * 8L + page.length * 8L /** * Returns the first slot of array that store the keys (sparse mode). */ private def firstSlot(key: Long): Int = { val h = key * 0x9E3779B9L (h ^ (h >> 32)).toInt & mask } /** * Returns the next probe in the array. */ private def nextSlot(pos: Int): Int = (pos + 2) & mask private[this] def toAddress(offset: Long, size: Int): Long = { ((offset - Platform.LONG_ARRAY_OFFSET) << SIZE_BITS) | size } private[this] def toOffset(address: Long): Long = { (address >>> SIZE_BITS) + Platform.LONG_ARRAY_OFFSET } private[this] def toSize(address: Long): Int = { (address & SIZE_MASK).toInt } private def getRow(address: Long, resultRow: UnsafeRow): UnsafeRow = { resultRow.pointTo(page, toOffset(address), toSize(address)) resultRow } /** * Returns the single UnsafeRow for given key, or null if not found. */ def getValue(key: Long, resultRow: UnsafeRow): UnsafeRow = { if (isDense) { if (key >= minKey && key <= maxKey) { val value = array((key - minKey).toInt) if (value > 0) { return getRow(value, resultRow) } } } else { var pos = firstSlot(key) while (array(pos + 1) != 0) { if (array(pos) == key) { return getRow(array(pos + 1), resultRow) } pos = nextSlot(pos) } } null } /** * Returns an iterator of UnsafeRow for multiple linked values. */ private def valueIter(address: Long, resultRow: UnsafeRow): Iterator[UnsafeRow] = { new Iterator[UnsafeRow] { var addr = address override def hasNext: Boolean = addr != 0 override def next(): UnsafeRow = { val offset = toOffset(addr) val size = toSize(addr) resultRow.pointTo(page, offset, size) addr = Platform.getLong(page, offset + size) resultRow } } } /** * Returns an iterator for all the values for the given key, or null if no value found. */ def get(key: Long, resultRow: UnsafeRow): Iterator[UnsafeRow] = { if (isDense) { if (key >= minKey && key <= maxKey) { val value = array((key - minKey).toInt) if (value > 0) { return valueIter(value, resultRow) } } } else { var pos = firstSlot(key) while (array(pos + 1) != 0) { if (array(pos) == key) { return valueIter(array(pos + 1), resultRow) } pos = nextSlot(pos) } } null } /** * Builds an iterator on a sparse array. */ def keys(): Iterator[InternalRow] = { val row = new GenericInternalRow(1) // a) in dense mode the array stores the address // => (k, v) = (minKey + index, array(index)) // b) in sparse mode the array stores both the key and the address // => (k, v) = (array(index), array(index+1)) new Iterator[InternalRow] { // cursor that indicates the position of the next key which was not read by a next() call var pos = 0 // when we iterate in dense mode we need to jump two positions at a time val step = if (isDense) 0 else 1 override def hasNext: Boolean = { // go to the next key if the current key slot is empty while (pos + step < array.length) { if (array(pos + step) > 0) { return true } pos += step + 1 } false } override def next(): InternalRow = { if (!hasNext) { throw new NoSuchElementException("End of the iterator") } else { // the key is retrieved based on the map mode val ret = if (isDense) minKey + pos else array(pos) // advance the cursor to the next index pos += step + 1 row.setLong(0, ret) row } } } } /** * Appends the key and row into this map. */ def append(key: Long, row: UnsafeRow): Unit = { val sizeInBytes = row.getSizeInBytes if (sizeInBytes >= (1 << SIZE_BITS)) { throw new UnsupportedOperationException("Does not support row that is larger than 256M") } if (key < minKey) { minKey = key } if (key > maxKey) { maxKey = key } grow(row.getSizeInBytes) // copy the bytes of UnsafeRow val offset = cursor Platform.copyMemory(row.getBaseObject, row.getBaseOffset, page, cursor, row.getSizeInBytes) cursor += row.getSizeInBytes Platform.putLong(page, cursor, 0) cursor += 8 numValues += 1 updateIndex(key, toAddress(offset, row.getSizeInBytes)) } /** * Update the address in array for given key. */ private def updateIndex(key: Long, address: Long): Unit = { var pos = firstSlot(key) assert(numKeys < array.length / 2) while (array(pos) != key && array(pos + 1) != 0) { pos = nextSlot(pos) } if (array(pos + 1) == 0) { // this is the first value for this key, put the address in array. array(pos) = key array(pos + 1) = address numKeys += 1 if (numKeys * 4 > array.length) { // reach half of the capacity if (array.length < (1 << 30)) { // Cannot allocate an array with 2G elements growArray() } else if (numKeys > array.length / 2 * 0.75) { // The fill ratio should be less than 0.75 throw new UnsupportedOperationException( "Cannot build HashedRelation with more than 1/3 billions unique keys") } } } else { // there are some values for this key, put the address in the front of them. val pointer = toOffset(address) + toSize(address) Platform.putLong(page, pointer, array(pos + 1)) array(pos + 1) = address } } private def grow(inputRowSize: Int): Unit = { // There is 8 bytes for the pointer to next value val neededNumWords = (cursor - Platform.LONG_ARRAY_OFFSET + 8 + inputRowSize + 7) / 8 if (neededNumWords > page.length) { if (neededNumWords > (1 << 30)) { throw new UnsupportedOperationException( "Can not build a HashedRelation that is larger than 8G") } val newNumWords = math.max(neededNumWords, math.min(page.length * 2, 1 << 30)) ensureAcquireMemory(newNumWords * 8L) val newPage = new Array[Long](newNumWords.toInt) Platform.copyMemory(page, Platform.LONG_ARRAY_OFFSET, newPage, Platform.LONG_ARRAY_OFFSET, cursor - Platform.LONG_ARRAY_OFFSET) val used = page.length page = newPage freeMemory(used * 8L) } } private def growArray(): Unit = { var old_array = array val n = array.length numKeys = 0 ensureAcquireMemory(n * 2 * 8L) array = new Array[Long](n * 2) mask = n * 2 - 2 var i = 0 while (i < old_array.length) { if (old_array(i + 1) > 0) { updateIndex(old_array(i), old_array(i + 1)) } i += 2 } old_array = null // release the reference to old array freeMemory(n * 8L) } /** * Try to turn the map into dense mode, which is faster to probe. */ def optimize(): Unit = { val range = maxKey - minKey // Convert to dense mode if it does not require more memory or could fit within L1 cache // SPARK-16740: Make sure range doesn't overflow if minKey has a large negative value if (range >= 0 && (range < array.length || range < 1024)) { try { ensureAcquireMemory((range + 1) * 8L) } catch { case e: SparkException => // there is no enough memory to convert return } val denseArray = new Array[Long]((range + 1).toInt) var i = 0 while (i < array.length) { if (array(i + 1) > 0) { val idx = (array(i) - minKey).toInt denseArray(idx) = array(i + 1) } i += 2 } val old_length = array.length array = denseArray isDense = true freeMemory(old_length * 8L) } } /** * Free all the memory acquired by this map. */ def free(): Unit = { if (page != null) { freeMemory(page.length * 8L) page = null } if (array != null) { freeMemory(array.length * 8L) array = null } } private def writeLongArray( writeBuffer: (Array[Byte], Int, Int) => Unit, arr: Array[Long], len: Int): Unit = { val buffer = new Array[Byte](4 << 10) var offset: Long = Platform.LONG_ARRAY_OFFSET val end = len * 8L + Platform.LONG_ARRAY_OFFSET while (offset < end) { val size = Math.min(buffer.length, end - offset) Platform.copyMemory(arr, offset, buffer, Platform.BYTE_ARRAY_OFFSET, size) writeBuffer(buffer, 0, size.toInt) offset += size } } private def write( writeBoolean: (Boolean) => Unit, writeLong: (Long) => Unit, writeBuffer: (Array[Byte], Int, Int) => Unit): Unit = { writeBoolean(isDense) writeLong(minKey) writeLong(maxKey) writeLong(numKeys) writeLong(numValues) writeLong(array.length) writeLongArray(writeBuffer, array, array.length) val used = ((cursor - Platform.LONG_ARRAY_OFFSET) / 8).toInt writeLong(used) writeLongArray(writeBuffer, page, used) } override def writeExternal(output: ObjectOutput): Unit = { write(output.writeBoolean, output.writeLong, output.write) } override def write(kryo: Kryo, out: Output): Unit = { write(out.writeBoolean, out.writeLong, out.write) } private def readLongArray( readBuffer: (Array[Byte], Int, Int) => Unit, length: Int): Array[Long] = { val array = new Array[Long](length) val buffer = new Array[Byte](4 << 10) var offset: Long = Platform.LONG_ARRAY_OFFSET val end = length * 8L + Platform.LONG_ARRAY_OFFSET while (offset < end) { val size = Math.min(buffer.length, end - offset) readBuffer(buffer, 0, size.toInt) Platform.copyMemory(buffer, Platform.BYTE_ARRAY_OFFSET, array, offset, size) offset += size } array } private def read( readBoolean: () => Boolean, readLong: () => Long, readBuffer: (Array[Byte], Int, Int) => Unit): Unit = { isDense = readBoolean() minKey = readLong() maxKey = readLong() numKeys = readLong() numValues = readLong() val length = readLong().toInt mask = length - 2 array = readLongArray(readBuffer, length) val pageLength = readLong().toInt page = readLongArray(readBuffer, pageLength) // Restore cursor variable to make this map able to be serialized again on executors. cursor = pageLength * 8 + Platform.LONG_ARRAY_OFFSET } override def readExternal(in: ObjectInput): Unit = { read(() => in.readBoolean(), () => in.readLong(), in.readFully) } override def read(kryo: Kryo, in: Input): Unit = { read(() => in.readBoolean(), () => in.readLong(), in.readBytes) } } class LongHashedRelation( private var nFields: Int, private var map: LongToUnsafeRowMap) extends HashedRelation with Externalizable { private var resultRow: UnsafeRow = new UnsafeRow(nFields) // Needed for serialization (it is public to make Java serialization work) def this() = this(0, null) override def asReadOnlyCopy(): LongHashedRelation = new LongHashedRelation(nFields, map) override def estimatedSize: Long = map.getTotalMemoryConsumption override def get(key: InternalRow): Iterator[InternalRow] = { if (key.isNullAt(0)) { null } else { get(key.getLong(0)) } } override def getValue(key: InternalRow): InternalRow = { if (key.isNullAt(0)) { null } else { getValue(key.getLong(0)) } } override def get(key: Long): Iterator[InternalRow] = map.get(key, resultRow) override def getValue(key: Long): InternalRow = map.getValue(key, resultRow) override def keyIsUnique: Boolean = map.keyIsUnique override def close(): Unit = { map.free() } override def writeExternal(out: ObjectOutput): Unit = { out.writeInt(nFields) out.writeObject(map) } override def readExternal(in: ObjectInput): Unit = { nFields = in.readInt() resultRow = new UnsafeRow(nFields) map = in.readObject().asInstanceOf[LongToUnsafeRowMap] } /** * Returns an iterator for keys of InternalRow type. */ override def keys(): Iterator[InternalRow] = map.keys() override def getWithKeyIndex(key: InternalRow): Iterator[ValueRowWithKeyIndex] = { throw new UnsupportedOperationException } override def getValueWithKeyIndex(key: InternalRow): ValueRowWithKeyIndex = { throw new UnsupportedOperationException } override def valuesWithKeyIndex(): Iterator[ValueRowWithKeyIndex] = { throw new UnsupportedOperationException } override def maxNumKeysIndex: Int = { throw new UnsupportedOperationException } } /** * Create hashed relation with key that is long. */ private[joins] object LongHashedRelation { def apply( input: Iterator[InternalRow], key: Seq[Expression], sizeEstimate: Int, taskMemoryManager: TaskMemoryManager, isNullAware: Boolean = false): HashedRelation = { val map = new LongToUnsafeRowMap(taskMemoryManager, sizeEstimate) val keyGenerator = UnsafeProjection.create(key) // Create a mapping of key -> rows var numFields = 0 while (input.hasNext) { val unsafeRow = input.next().asInstanceOf[UnsafeRow] numFields = unsafeRow.numFields() val rowKey = keyGenerator(unsafeRow) if (!rowKey.isNullAt(0)) { val key = rowKey.getLong(0) map.append(key, unsafeRow) } else if (isNullAware) { return HashedRelationWithAllNullKeys } } map.optimize() new LongHashedRelation(numFields, map) } } /** * A special HashedRelation indicating that it's built from a empty input:Iterator[InternalRow]. * get & getValue will return null just like * empty LongHashedRelation or empty UnsafeHashedRelation does. */ case object EmptyHashedRelation extends HashedRelation { override def get(key: Long): Iterator[InternalRow] = null override def get(key: InternalRow): Iterator[InternalRow] = null override def getValue(key: Long): InternalRow = null override def getValue(key: InternalRow): InternalRow = null override def asReadOnlyCopy(): EmptyHashedRelation.type = this override def keyIsUnique: Boolean = true override def keys(): Iterator[InternalRow] = { throw new UnsupportedOperationException } override def close(): Unit = {} override def estimatedSize: Long = 0 } /** * A special HashedRelation indicating that it's built from a non-empty input:Iterator[InternalRow] * with all the keys to be null. */ case object HashedRelationWithAllNullKeys extends HashedRelation { override def get(key: InternalRow): Iterator[InternalRow] = { throw new UnsupportedOperationException } override def getValue(key: InternalRow): InternalRow = { throw new UnsupportedOperationException } override def asReadOnlyCopy(): HashedRelationWithAllNullKeys.type = this override def keyIsUnique: Boolean = true override def keys(): Iterator[InternalRow] = { throw new UnsupportedOperationException } override def close(): Unit = {} override def estimatedSize: Long = 0 } /** The HashedRelationBroadcastMode requires that rows are broadcasted as a HashedRelation. */ case class HashedRelationBroadcastMode(key: Seq[Expression], isNullAware: Boolean = false) extends BroadcastMode { override def transform(rows: Array[InternalRow]): HashedRelation = { transform(rows.iterator, Some(rows.length)) } override def transform( rows: Iterator[InternalRow], sizeHint: Option[Long]): HashedRelation = { sizeHint match { case Some(numRows) => HashedRelation(rows, canonicalized.key, numRows.toInt, isNullAware = isNullAware) case None => HashedRelation(rows, canonicalized.key, isNullAware = isNullAware) } } override lazy val canonicalized: HashedRelationBroadcastMode = { this.copy(key = key.map(_.canonicalized)) } }
rednaxelafx/apache-spark
sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashedRelation.scala
Scala
apache-2.0
35,848
package uk.gov.dvla.vehicles.presentation.common.views object ValtechRadioView { final val KeeperType_Private = "Private" final val KeeperType_Business = "Business" }
dvla/vehicles-presentation-common
common-test/app/uk/gov/dvla/vehicles/presentation/common/views/ValtechRadioView.scala
Scala
mit
172
package freecli package argument import dsl._ object implicits extends AllImplicits trait AllImplicits extends ArgumentDslImplicits with ArgumentFieldImplicits with MergerImplicits
pavlosgi/freecli
core/src/main/scala/freecli/argument/implicits.scala
Scala
apache-2.0
188
/* * This file is a part of the "sur la plaque" toolkit for cycling * data analytics and visualization. * * Copyright (c) 2013--2014 William C. Benton and Red Hat, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.freevariable.surlaplaque.data; import com.github.nscala_time.time.Imports._ sealed case class Coordinates(lat: Double, lon: Double) extends Ordered[Coordinates] { import scala.math.Ordered.orderingToOrdered import com.freevariable.surlaplaque.util.RWDistance.{distance => rw_distance} /** Approximate distance between this and other in meters */ def distance(other:Coordinates) = rw_distance((lat, lon), (other.lat, other.lon)) /** Ordering based on longitude then latitude */ def compare(other: Coordinates) = (this.lon, this.lat) compare (other.lon, other.lat) /** Ordering based on latitude (then longitude, if necessary) */ def compare_lat(other: Coordinates) = (this.lat, this.lon) compare (other.lat, other.lon) } sealed case class Trackpoint(timestamp: Long, latlong: Coordinates, altitude: Double, watts: Double, speed: Option[Double], distance: Option[Double], heartrate: Option[Double], cadence: Option[Double], activity: Option[String] ) extends Ordered[Trackpoint] { import scala.math.Ordered.orderingToOrdered import Timestamp.{stringify => stringify_ts} val timestring = stringify_ts(timestamp) def elevDelta(other: Trackpoint) = other.altitude - altitude def timeDelta(other: Trackpoint) = (other.timestamp - timestamp).toDouble / 1000 def distanceDelta(other: Trackpoint) = (other.latlong.distance(latlong)) def kphBetween(other:Trackpoint) = ((other.latlong.distance(latlong)) / timeDelta(other)) * 3600 def gradeBetween(other:Trackpoint) = { val rise = elevDelta(other) // rise is in meters val run = distanceDelta(other) * 10 // run is in km, but we want to get a percentage grade rise/run } // NB: this ordering is meant to make convex hull calculation easier def compare(other: Trackpoint) = (this.latlong.lon, this.latlong.lat, this.timestamp, this.altitude, this.watts) compare (other.latlong.lon, other.latlong.lat, other.timestamp, other.altitude, other.watts) } object Timestamp { def stringify(ts: Long) = ts.toDateTime.toString() } object Trackpoint { def apply(ts_string: String, latlong: Coordinates, altitude: Double, watts: Double, activity: Option[String] = None, speed: Option[Double] = None, distance: Option[Double] = None, cadence: Option[Double] = None, heartrate: Option[Double] = None) = new Trackpoint(timestamp=ts_string.toDateTime.getMillis(), latlong=latlong, altitude=altitude, watts=watts, activity=activity, speed=speed, distance=distance, cadence=cadence, heartrate=heartrate) }
willb/sur-la-plaque
analysis/src/main/scala/com/freevariable/surlaplaque/data/trackpoint.scala
Scala
apache-2.0
3,304
package gitbucket.core.api import gitbucket.core.util.RepositoryName import org.json4s.jackson.JsonMethods.parse import org.json4s._ import org.scalatest.FunSuite import java.util.{Calendar, TimeZone, Date} class JsonFormatSpec extends FunSuite { val date1 = { val d = Calendar.getInstance(TimeZone.getTimeZone("UTC")) d.set(2011,3,14,16,0,49) d.getTime } def date(date:String): Date = { val f = new java.text.SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'") f.setTimeZone(TimeZone.getTimeZone("UTC")) f.parse(date) } val sha1 = "6dcb09b5b57875f334f61aebed695e2e4193db5e" val repo1Name = RepositoryName("octocat/Hello-World") implicit val context = JsonFormat.Context("http://gitbucket.exmple.com") val apiUser = ApiUser( login = "octocat", email = "octocat@example.com", `type` = "User", site_admin = false, created_at = date1) val apiUserJson = """{ "login":"octocat", "email":"octocat@example.com", "type":"User", "site_admin":false, "created_at":"2011-04-14T16:00:49Z", "url":"http://gitbucket.exmple.com/api/v3/users/octocat", "html_url":"http://gitbucket.exmple.com/octocat", "avatar_url":"http://gitbucket.exmple.com/octocat/_avatar" }""" val repository = ApiRepository( name = repo1Name.name, full_name = repo1Name.fullName, description = "This your first repo!", watchers = 0, forks = 0, `private` = false, default_branch = "master", owner = apiUser)(urlIsHtmlUrl = false) val repositoryJson = s"""{ "name" : "Hello-World", "full_name" : "octocat/Hello-World", "description" : "This your first repo!", "watchers" : 0, "forks" : 0, "private" : false, "default_branch" : "master", "owner" : $apiUserJson, "forks_count" : 0, "watchers_count" : 0, "url" : "${context.baseUrl}/api/v3/repos/octocat/Hello-World", "http_url" : "${context.baseUrl}/git/octocat/Hello-World.git", "clone_url" : "${context.baseUrl}/git/octocat/Hello-World.git", "html_url" : "${context.baseUrl}/octocat/Hello-World" }""" val apiCommitStatus = ApiCommitStatus( created_at = date1, updated_at = date1, state = "success", target_url = Some("https://ci.example.com/1000/output"), description = Some("Build has completed successfully"), id = 1, context = "Default", creator = apiUser )(sha1, repo1Name) val apiCommitStatusJson = s"""{ "created_at":"2011-04-14T16:00:49Z", "updated_at":"2011-04-14T16:00:49Z", "state":"success", "target_url":"https://ci.example.com/1000/output", "description":"Build has completed successfully", "id":1, "context":"Default", "creator":$apiUserJson, "url": "http://gitbucket.exmple.com/api/v3/repos/octocat/Hello-World/commits/6dcb09b5b57875f334f61aebed695e2e4193db5e/statuses" }""" val apiPushCommit = ApiCommit( id = "0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c", message = "Update README.md", timestamp = date1, added = Nil, removed = Nil, modified = List("README.md"), author = ApiPersonIdent("baxterthehacker","baxterthehacker@users.noreply.github.com",date1), committer = ApiPersonIdent("baxterthehacker","baxterthehacker@users.noreply.github.com",date1))(RepositoryName("baxterthehacker", "public-repo"), true) val apiPushCommitJson = s"""{ "id": "0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c", // "distinct": true, "message": "Update README.md", "timestamp": "2011-04-14T16:00:49Z", "url": "http://gitbucket.exmple.com/baxterthehacker/public-repo/commit/0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c", "author": { "name": "baxterthehacker", "email": "baxterthehacker@users.noreply.github.com", // "username": "baxterthehacker", "date" : "2011-04-14T16:00:49Z" }, "committer": { "name": "baxterthehacker", "email": "baxterthehacker@users.noreply.github.com", // "username": "baxterthehacker", "date" : "2011-04-14T16:00:49Z" }, "added": [ ], "removed": [ ], "modified": [ "README.md" ] }""" val apiComment = ApiComment( id =1, user = apiUser, body= "Me too", created_at= date1, updated_at= date1)(RepositoryName("octocat","Hello-World"), 100, false) val apiCommentJson = s"""{ "id": 1, "body": "Me too", "user": $apiUserJson, "html_url" : "${context.baseUrl}/octocat/Hello-World/issues/100#comment-1", "created_at": "2011-04-14T16:00:49Z", "updated_at": "2011-04-14T16:00:49Z" }""" val apiCommentPR = ApiComment( id =1, user = apiUser, body= "Me too", created_at= date1, updated_at= date1)(RepositoryName("octocat","Hello-World"), 100, true) val apiCommentPRJson = s"""{ "id": 1, "body": "Me too", "user": $apiUserJson, "html_url" : "${context.baseUrl}/octocat/Hello-World/pull/100#comment-1", "created_at": "2011-04-14T16:00:49Z", "updated_at": "2011-04-14T16:00:49Z" }""" val apiPersonIdent = ApiPersonIdent("Monalisa Octocat","support@example.com",date1) val apiPersonIdentJson = """ { "name": "Monalisa Octocat", "email": "support@example.com", "date": "2011-04-14T16:00:49Z" }""" val apiCommitListItem = ApiCommitListItem( sha = sha1, commit = ApiCommitListItem.Commit( message = "Fix all the bugs", author = apiPersonIdent, committer = apiPersonIdent )(sha1, repo1Name), author = Some(apiUser), committer= Some(apiUser), parents= Seq(ApiCommitListItem.Parent("6dcb09b5b57875f334f61aebed695e2e4193db5e")(repo1Name)))(repo1Name) val apiCommitListItemJson = s"""{ "url": "${context.baseUrl}/api/v3/repos/octocat/Hello-World/commits/6dcb09b5b57875f334f61aebed695e2e4193db5e", "sha": "6dcb09b5b57875f334f61aebed695e2e4193db5e", "commit": { "url": "${context.baseUrl}/api/v3/repos/octocat/Hello-World/git/commits/6dcb09b5b57875f334f61aebed695e2e4193db5e", "author": $apiPersonIdentJson, "committer": $apiPersonIdentJson, "message": "Fix all the bugs" }, "author": $apiUserJson, "committer": $apiUserJson, "parents": [ { "url": "${context.baseUrl}/api/v3/repos/octocat/Hello-World/commits/6dcb09b5b57875f334f61aebed695e2e4193db5e", "sha": "6dcb09b5b57875f334f61aebed695e2e4193db5e" } ] }""" val apiCombinedCommitStatus = ApiCombinedCommitStatus( state = "success", sha = sha1, total_count = 2, statuses = List(apiCommitStatus), repository = repository) val apiCombinedCommitStatusJson = s"""{ "state": "success", "sha": "$sha1", "total_count": 2, "statuses": [ $apiCommitStatusJson ], "repository": $repositoryJson, "url": "${context.baseUrl}/api/v3/repos/octocat/Hello-World/commits/$sha1/status" }""" val apiLabel = ApiLabel( name = "bug", color = "f29513")(RepositoryName("octocat","Hello-World")) val apiLabelJson = s"""{ "name": "bug", "color": "f29513", "url": "${context.baseUrl}/api/v3/repos/octocat/Hello-World/labels/bug" }""" val apiIssue = ApiIssue( number = 1347, title = "Found a bug", user = apiUser, state = "open", body = "I'm having a problem with this.", created_at = date1, updated_at = date1)(RepositoryName("octocat","Hello-World"), false) val apiIssueJson = s"""{ "number": 1347, "state": "open", "title": "Found a bug", "body": "I'm having a problem with this.", "user": $apiUserJson, "comments_url": "${context.baseUrl}/api/v3/repos/octocat/Hello-World/issues/1347/comments", "html_url": "${context.baseUrl}/octocat/Hello-World/issues/1347", "created_at": "2011-04-14T16:00:49Z", "updated_at": "2011-04-14T16:00:49Z" }""" val apiIssuePR = ApiIssue( number = 1347, title = "Found a bug", user = apiUser, state = "open", body = "I'm having a problem with this.", created_at = date1, updated_at = date1)(RepositoryName("octocat","Hello-World"), true) val apiIssuePRJson = s"""{ "number": 1347, "state": "open", "title": "Found a bug", "body": "I'm having a problem with this.", "user": $apiUserJson, "comments_url": "${context.baseUrl}/api/v3/repos/octocat/Hello-World/issues/1347/comments", "html_url": "${context.baseUrl}/octocat/Hello-World/pull/1347", "pull_request": { "url": "${context.baseUrl}/api/v3/repos/octocat/Hello-World/pulls/1347", "html_url": "${context.baseUrl}/octocat/Hello-World/pull/1347" // "diff_url": "${context.baseUrl}/octocat/Hello-World/pull/1347.diff", // "patch_url": "${context.baseUrl}/octocat/Hello-World/pull/1347.patch" }, "created_at": "2011-04-14T16:00:49Z", "updated_at": "2011-04-14T16:00:49Z" }""" val apiPullRequest = ApiPullRequest( number = 1347, updated_at = date1, created_at = date1, head = ApiPullRequest.Commit( sha = sha1, ref = "new-topic", repo = repository)("octocat"), base = ApiPullRequest.Commit( sha = sha1, ref = "master", repo = repository)("octocat"), mergeable = None, merged = false, merged_at = Some(date1), merged_by = Some(apiUser), title = "new-feature", body = "Please pull these awesome changes", user = apiUser ) val apiPullRequestJson = s"""{ "number": 1347, "updated_at": "2011-04-14T16:00:49Z", "created_at": "2011-04-14T16:00:49Z", // "closed_at": "2011-04-14T16:00:49Z", "head": { "sha": "6dcb09b5b57875f334f61aebed695e2e4193db5e", "ref": "new-topic", "repo": $repositoryJson, "label": "new-topic", "user": $apiUserJson }, "base": { "sha": "6dcb09b5b57875f334f61aebed695e2e4193db5e", "ref": "master", "repo": $repositoryJson, "label": "master", "user": $apiUserJson }, // "merge_commit_sha": "e5bd3914e2e596debea16f433f57875b5b90bcd6", // "mergeable": true, "merged": false, "merged_at": "2011-04-14T16:00:49Z", "merged_by": $apiUserJson, "title": "new-feature", "body": "Please pull these awesome changes", "user": $apiUserJson, "html_url": "${context.baseUrl}/octocat/Hello-World/pull/1347", "url": "${context.baseUrl}/api/v3/repos/octocat/Hello-World/pulls/1347", "commits_url": "${context.baseUrl}/api/v3/repos/octocat/Hello-World/pulls/1347/commits", "review_comments_url": "${context.baseUrl}/api/v3/repos/octocat/Hello-World/pulls/1347/comments", "review_comment_url": "${context.baseUrl}/api/v3/repos/octocat/Hello-World/pulls/comments/{number}", "comments_url": "${context.baseUrl}/api/v3/repos/octocat/Hello-World/issues/1347/comments", "statuses_url": "${context.baseUrl}/api/v3/repos/octocat/Hello-World/statuses/6dcb09b5b57875f334f61aebed695e2e4193db5e" // "diff_url": "${context.baseUrl}/octocat/Hello-World/pull/1347.diff", // "patch_url": "${context.baseUrl}/octocat/Hello-World/pull/1347.patch", // "issue_url": "${context.baseUrl}/api/v3/repos/octocat/Hello-World/issues/1347", // "state": "open", // "comments": 10, // "commits": 3, // "additions": 100, // "deletions": 3, // "changed_files": 5 }""" // https://developer.github.com/v3/activity/events/types/#pullrequestreviewcommentevent val apiPullRequestReviewComment = ApiPullRequestReviewComment( id = 29724692, // "diff_hunk": "@@ -1 +1 @@\\n-# public-repo", path = "README.md", // "position": 1, // "original_position": 1, commit_id = "0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c", // "original_commit_id": "0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c", user = apiUser, body = "Maybe you should use more emojji on this line.", created_at = date("2015-05-05T23:40:27Z"), updated_at = date("2015-05-05T23:40:27Z") )(RepositoryName("baxterthehacker/public-repo"), 1) val apiPullRequestReviewCommentJson = s"""{ "url": "http://gitbucket.exmple.com/api/v3/repos/baxterthehacker/public-repo/pulls/comments/29724692", "id": 29724692, // "diff_hunk": "@@ -1 +1 @@\\\\n-# public-repo", "path": "README.md", // "position": 1, // "original_position": 1, "commit_id": "0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c", // "original_commit_id": "0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c", "user": $apiUserJson, "body": "Maybe you should use more emojji on this line.", "created_at": "2015-05-05T23:40:27Z", "updated_at": "2015-05-05T23:40:27Z", "html_url": "http://gitbucket.exmple.com/baxterthehacker/public-repo/pull/1#discussion_r29724692", "pull_request_url": "http://gitbucket.exmple.com/api/v3/repos/baxterthehacker/public-repo/pulls/1", "_links": { "self": { "href": "http://gitbucket.exmple.com/api/v3/repos/baxterthehacker/public-repo/pulls/comments/29724692" }, "html": { "href": "http://gitbucket.exmple.com/baxterthehacker/public-repo/pull/1#discussion_r29724692" }, "pull_request": { "href": "http://gitbucket.exmple.com/api/v3/repos/baxterthehacker/public-repo/pulls/1" } } }""" val apiBranchProtection = ApiBranchProtection(true, Some(ApiBranchProtection.Status(ApiBranchProtection.Everyone, Seq("continuous-integration/travis-ci")))) val apiBranchProtectionJson = """{ "enabled": true, "required_status_checks": { "enforcement_level": "everyone", "contexts": [ "continuous-integration/travis-ci" ] } }""" def assertJson(resultJson: String, expectJson: String) = { import java.util.regex.Pattern val json2 = Pattern.compile("""^\\s*//.*$""", Pattern.MULTILINE).matcher(expectJson).replaceAll("") val js2 = try { parse(json2) } catch { case e: com.fasterxml.jackson.core.JsonParseException => { val p = java.lang.Math.max(e.getLocation.getCharOffset() - 10, 0).toInt val message = json2.substring(p, java.lang.Math.min(p + 100, json2.length)) throw new com.fasterxml.jackson.core.JsonParseException(message + e.getMessage, e.getLocation) } } val js1 = parse(resultJson) assert(js1 === js2) } test("apiUser") { assertJson(JsonFormat(apiUser), apiUserJson) } test("repository") { assertJson(JsonFormat(repository), repositoryJson) } test("apiPushCommit") { assertJson(JsonFormat(apiPushCommit), apiPushCommitJson) } test("apiComment") { assertJson(JsonFormat(apiComment), apiCommentJson) assertJson(JsonFormat(apiCommentPR), apiCommentPRJson) } test("apiCommitListItem") { assertJson(JsonFormat(apiCommitListItem), apiCommitListItemJson) } test("apiCommitStatus") { assertJson(JsonFormat(apiCommitStatus), apiCommitStatusJson) } test("apiCombinedCommitStatus") { assertJson(JsonFormat(apiCombinedCommitStatus), apiCombinedCommitStatusJson) } test("apiLabel") { assertJson(JsonFormat(apiLabel), apiLabelJson) } test("apiIssue") { assertJson(JsonFormat(apiIssue), apiIssueJson) assertJson(JsonFormat(apiIssuePR), apiIssuePRJson) } test("apiPullRequest") { assertJson(JsonFormat(apiPullRequest), apiPullRequestJson) } test("apiPullRequestReviewComment") { assertJson(JsonFormat(apiPullRequestReviewComment), apiPullRequestReviewCommentJson) } test("apiBranchProtection") { assertJson(JsonFormat(apiBranchProtection), apiBranchProtectionJson) } }
zhoffice/gitbucket
src/test/scala/gitbucket/core/api/JsonFormatSpec.scala
Scala
apache-2.0
15,672
package cmdreader.std import types._ import util._ import cmdreader._ class OGe extends CommandOperator { override def getName(): String = "ge" override def isValidArg0(n: Int): Boolean = n == 2 override def apply(args: Array[Type]): Type = { try { MathUtil.rel(args(0), args(1), !_.lt(_)) } catch { case uo: UnsupportedOperationException => new TError(1) } } override def getOpAlias(): String = ">=" override def isUnary(): Boolean = false def getPrecedence(): Int = PStandard.RELATION def isReversed(): Boolean = false def hasAssignmentEquiv(): Boolean = false def getDoubleBase(): Option[Type] = None }
bluebear94/bag
src/main/scala/cmdreader/std/OGe.scala
Scala
gpl-3.0
655
/* * Copyright (C) 2016-2019 Lightbend Inc. <https://www.lightbend.com> */ package com.lightbend.lagom.internal.javadsl.testkit import java.util.concurrent.CompletionStage import akka.Done import akka.actor.ActorRef import akka.stream.Materializer import akka.stream.javadsl.Flow import akka.stream.javadsl.Source import akka.stream.scaladsl.{ Flow => ScalaFlow } import com.lightbend.lagom.internal.testkit.InternalSubscriberStub import com.lightbend.lagom.javadsl.api.broker.Message import com.lightbend.lagom.javadsl.api.broker.Subscriber import com.lightbend.lagom.javadsl.api.broker.Topic import scala.compat.java8.FutureConverters.toJava private[lagom] class TopicStub[T](val topicId: Topic.TopicId, topicBuffer: ActorRef)( implicit materializer: Materializer ) extends Topic[T] { // TODO: use ServiceInfo's name as a default value. def subscribe = new SubscriberStub("default", topicBuffer, _.getPayload) class SubscriberStub[SubscriberPayload]( groupId: String, topicBuffer: ActorRef, transform: Message[T] => SubscriberPayload )(implicit materializer: Materializer) extends InternalSubscriberStub[T, Message](groupId, topicBuffer)(materializer) with Subscriber[SubscriberPayload] { override def withGroupId(groupId: String): Subscriber[SubscriberPayload] = new SubscriberStub(groupId, topicBuffer, transform) override def withMetadata(): Subscriber[Message[SubscriberPayload]] = new SubscriberStub[Message[SubscriberPayload]](groupId, topicBuffer, msg => msg.withPayload(transform(msg))) override def atMostOnceSource(): Source[SubscriberPayload, _] = super.mostOnceSource.map(transform).asJava override def atLeastOnce(flow: Flow[SubscriberPayload, Done, _]): CompletionStage[Done] = toJava(super.leastOnce(ScalaFlow[Message[T]].map(transform).via(flow.asScala))) } }
rcavalcanti/lagom
testkit/javadsl/src/main/scala/com/lightbend/lagom/internal/javadsl/testkit/TopicStub.scala
Scala
apache-2.0
1,875
package com.eigengo.lift.analysis.exercise.rt /** * Accelerometer data groups ``values`` at the given ``samplingRate`` * @param samplingRate the sampling rate in Hz * @param values the values */ case class AccelerometerData(samplingRate: Int, values: List[AccelerometerValue]) /** * Accelerometer data * @param x the x * @param y the y * @param z the z */ case class AccelerometerValue(x: Int, y: Int, z: Int)
lachatak/lift
analysis/exercise-rt-protocol/src/main/scala/com/eigengo/lift/analysis/exercise/rt/AccelerometerData.scala
Scala
apache-2.0
421
package lila.study import play.api.libs.json._ import reactivemongo.api.bson._ import scala.concurrent.duration._ import lila.common.Future import lila.db.AsyncColl import lila.db.dsl._ import lila.user.User case class StudyTopic(value: String) extends AnyVal with StringValue object StudyTopic { val minLength = 2 val maxLength = 50 def fromStr(str: String): Option[StudyTopic] = str.trim match { case s if s.lengthIs >= minLength && s.lengthIs <= maxLength => StudyTopic(s).some case _ => none } implicit val topicIso = lila.common.Iso.string[StudyTopic](StudyTopic.apply, _.value) } case class StudyTopics(value: List[StudyTopic]) extends AnyVal { def diff(other: StudyTopics) = StudyTopics { value.toSet.diff(other.value.toSet).toList } def ++(other: StudyTopics) = StudyTopics { value.toSet.++(other.value.toSet).toList } } object StudyTopics { val empty = StudyTopics(Nil) def fromStrs(strs: Seq[String]) = StudyTopics { strs.view .flatMap(StudyTopic.fromStr) .take(64) .toList .distinct } } final private class StudyTopicRepo(val coll: AsyncColl) final private class StudyUserTopicRepo(val coll: AsyncColl) final class StudyTopicApi(topicRepo: StudyTopicRepo, userTopicRepo: StudyUserTopicRepo, studyRepo: StudyRepo)( implicit ec: scala.concurrent.ExecutionContext, system: akka.actor.ActorSystem ) { import BSONHandlers.{ StudyTopicBSONHandler, StudyTopicsBSONHandler } def byId(str: String): Fu[Option[StudyTopic]] = topicRepo.coll(_.byId[Bdoc](str)) dmap { _ flatMap docTopic } def findLike(str: String, myId: Option[User.ID], nb: Int = 10): Fu[StudyTopics] = { (str.lengthIs >= 2) ?? { val favsFu: Fu[List[StudyTopic]] = myId.?? { userId => userTopics(userId).map { _.value.filter(_.value startsWith str) take nb } } favsFu flatMap { favs => topicRepo .coll { _.find($doc("_id".$startsWith(java.util.regex.Pattern.quote(str), "i"))) .sort($sort.naturalAsc) .cursor[Bdoc](readPref) .list(nb - favs.size) } .dmap { _ flatMap docTopic } .dmap { favs ::: _ } } } } dmap StudyTopics.apply def userTopics(userId: User.ID): Fu[StudyTopics] = userTopicRepo.coll(_.byId(userId)).dmap { _.flatMap(_.getAsOpt[StudyTopics]("topics")) | StudyTopics.empty } private case class TagifyTopic(value: String) implicit private val TagifyTopicReads = Json.reads[TagifyTopic] def userTopics(user: User, json: String): Funit = { val topics = if (json.trim.isEmpty) StudyTopics.empty else Json.parse(json).validate[List[TagifyTopic]] match { case JsSuccess(topics, _) => StudyTopics fromStrs topics.map(_.value) case _ => StudyTopics.empty } userTopicRepo.coll { _.update.one( $id(user.id), $set("topics" -> topics), upsert = true ) }.void } def userTopicsAdd(userId: User.ID, topics: StudyTopics): Funit = topics.value.nonEmpty ?? userTopicRepo.coll { _.update .one( $id(userId), $addToSet("topics" -> $doc("$each" -> topics)), upsert = true ) }.void def popular(nb: Int): Fu[StudyTopics] = topicRepo .coll { _.find($empty) .sort($sort.naturalAsc) .cursor[Bdoc]() .list(nb) } .dmap { _ flatMap docTopic } .dmap(StudyTopics.apply) private def docTopic(doc: Bdoc): Option[StudyTopic] = doc.getAsOpt[StudyTopic]("_id") private val recomputeWorkQueue = new lila.hub.AsyncActorSequencer( maxSize = 1, timeout = 61 seconds, name = "studyTopicAggregation", logging = false ) def recompute(): Unit = recomputeWorkQueue(Future.makeItLast(60 seconds)(recomputeNow)).recover { case _: lila.hub.BoundedAsyncActor.EnqueueException => () case e: Exception => logger.warn("Can't recompute study topics!", e) }.unit private def recomputeNow: Funit = studyRepo.coll { _.aggregateWith[Bdoc]() { framework => import framework._ List( Match( $doc( "topics" $exists true, "visibility" -> "public" ) ), Project($doc("topics" -> true, "_id" -> false)), UnwindField("topics"), SortByFieldCount("topics"), Project($doc("_id" -> true)), Out(topicRepo.coll.name.value) ) }.headOption }.void }
luanlv/lila
modules/study/src/main/StudyTopic.scala
Scala
mit
4,798
package java.time case class ZoneId(code:String) { } object ZoneId { val SHORT_IDS = Map( "ACT" -> "Australia/Darwin", "AET" -> "Australia/Sydney", "AGT" -> "America/Argentina/Buenos_Aires", "ART" -> "Africa/Cairo", "AST" -> "America/Anchorage", "BET" -> "America/Sao_Paulo", "BST" -> "Asia/Dhaka", "CAT" -> "Africa/Harare", "CNT" -> "America/St_Johns", "CST" -> "America/Chicago", "CTT" -> "Asia/Shanghai", "EAT" -> "Africa/Addis_Ababa", "ECT" -> "Europe/Paris", "IET" -> "America/Indiana/Indianapolis", "IST" -> "Asia/Kolkata", "JST" -> "Asia/Tokyo", "MIT" -> "Pacific/Apia", "NET" -> "Asia/Yerevan", "NST" -> "Pacific/Auckland", "PLT" -> "Asia/Karachi", "PNT" -> "America/Phoenix", "PRT" -> "America/Puerto_Rico", "PST" -> "America/Los_Angeles", "SST" -> "Pacific/Guadalcanal", "VST" -> "Asia/Ho_Chi_Minh", "EST" -> "-05:00", "MST" -> "-07:00", "HST" -> "-10:00") def systemDefault=ZoneId("ECT") }
7thsense/play-json-extra
scalajs-joda-time/src/main/scala/java/time/ZoneId.scala
Scala
apache-2.0
1,019
/* * Copyright 2021 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package models.notifications import models.confirmation.Currency case class ReminderDetails(paymentAmount: Currency, referenceNumber: String)
hmrc/amls-frontend
app/models/notifications/ReminderDetails.scala
Scala
apache-2.0
748
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.table.planner.codegen.calls import org.apache.flink.table.planner.codegen.CodeGenUtils.{BINARY_STRING, qualifyMethod} import org.apache.flink.table.planner.codegen.GenerateUtils.generateCallIfArgsNotNull import org.apache.flink.table.planner.codegen.{CodeGeneratorContext, GeneratedExpression} import org.apache.flink.table.types.logical.LogicalType import java.lang.reflect.Method import java.util.TimeZone class MethodCallGen(method: Method) extends CallGenerator { override def generate( ctx: CodeGeneratorContext, operands: Seq[GeneratedExpression], returnType: LogicalType): GeneratedExpression = { generateCallIfArgsNotNull(ctx, returnType, operands, !method.getReturnType.isPrimitive) { originalTerms => { val terms = originalTerms.zip(method.getParameterTypes).map { case (term, clazz) => // convert the BinaryString parameter to String if the method parameter accept String if (clazz == classOf[String]) { s"$term.toString()" } else { term } } // generate method invoke code and adapt when it's a time zone related function val call = if (terms.length + 1 == method.getParameterCount && method.getParameterTypes()(terms.length) == classOf[TimeZone]) { // insert the zoneID parameters for timestamp functions val timeZone = ctx.addReusableTimeZone() s""" |${qualifyMethod(method)}(${terms.mkString(", ")}, $timeZone) """.stripMargin } else { s""" |${qualifyMethod(method)}(${terms.mkString(", ")}) """.stripMargin } // convert String to BinaryString if the return type is String if (method.getReturnType == classOf[String]) { s"$BINARY_STRING.fromString($call)" } else { call } } } } }
gyfora/flink
flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/codegen/calls/MethodCallGen.scala
Scala
apache-2.0
2,736
package spatutorial.client.components import spatutorial.components.JQuery import scala.language.implicitConversions import scala.scalajs.js /** * Common Bootstrap components for scalajs-react */ object Bootstrap { implicit def jq2bootstrap(jq: JQuery): BootstrapJQuery = jq.asInstanceOf[BootstrapJQuery] // shorthand for styles @inline private def bss = GlobalStyles.bootstrapStyles trait BootstrapJQuery extends JQuery { def modal(action: String): BootstrapJQuery = js.native def modal(options: js.Any): BootstrapJQuery = js.native } // Common Bootstrap contextual styles object CommonStyle extends Enumeration { val default, primary, success, info, warning, danger = Value } //removed a bunch of stuff moving to scala-js.react 0.10 }
read-write-web/rww-scala-js
src/main/scala/spatutorial/client/components/Bootstrap.scala
Scala
apache-2.0
778
package scala.reflect.quasiquotes import org.scalacheck._, Prop._, Gen._, Arbitrary._ import scala.tools.reflect.{ToolBox, ToolBoxError} import scala.reflect.runtime.currentMirror import scala.reflect.runtime.universe._, Flag._, internal.reificationSupport.setSymbol abstract class QuasiquoteProperties(name: String) extends Properties(name) with ArbitraryTreesAndNames with Helpers trait Helpers { /** Runs a code block and returns proof confirmation * if no exception has been thrown while executing code * block. This is useful for simple one-off tests. */ def test[T](block: => T) = Prop { params => block Result(Prop.Proof) } object simplify extends Transformer { object SimplifiedName { val st = scala.reflect.runtime.universe.asInstanceOf[scala.reflect.internal.SymbolTable] val FreshName = new st.FreshNameExtractor def unapply[T <: Name](name: T): Option[T] = name.asInstanceOf[st.Name] match { case FreshName(prefix) => Some((if (name.isTermName) TermName(prefix) else TypeName(prefix)).asInstanceOf[T]) case x => throw new MatchError(x) } } override def transform(tree: Tree): Tree = tree match { case Ident(SimplifiedName(name)) => Ident(name) case ValDef(mods, SimplifiedName(name), tpt, rhs) => ValDef(mods, name, transform(tpt), transform(rhs)) case Bind(SimplifiedName(name), rhs) => Bind(name, rhs) case _ => super.transform(tree) } def apply(tree: Tree): Tree = transform(tree) } implicit class TestSimilarTree(tree1: Tree) { def ≈(tree2: Tree) = simplify(tree1).equalsStructure(simplify(tree2)) } implicit class TestSimilarListTree(lst: List[Tree]) { def ≈(other: List[Tree]) = (lst.length == other.length) && lst.zip(other).forall { case (t1, t2) => t1 ≈ t2 } } implicit class TestSimilarListListTree(lst: List[List[Tree]]) { def ≈(other: List[List[Tree]]) = (lst.length == other.length) && lst.zip(other).forall { case (l1, l2) => l1 ≈ l2 } } implicit class TestSimilarName(name: Name) { def ≈(other: Name) = name == other } implicit class TestSimilarMods(mods: Modifiers) { def ≈(other: Modifiers) = (mods.flags == other.flags) && (mods.privateWithin ≈ other.privateWithin) && (mods.annotations ≈ other.annotations) } def assertThrows[T <: AnyRef](f: => Any)(implicit manifest: Manifest[T]): Unit = { val clazz = manifest.runtimeClass.asInstanceOf[Class[T]] val thrown = try { f false } catch { case u: Throwable => if (!clazz.isAssignableFrom(u.getClass)) assert(false, s"wrong exception: expected ${clazz.getName} but was ${u.getClass.getName}") true } if(!thrown) assert(false, "exception wasn't thrown") } def assertEqAst(tree: Tree, code: String) = assert(eqAst(tree, code), s"""quasiquote tree != parse(code) tree |quasiquote: $tree |parse tree: ${parse(code)} |code (str): $code""".stripMargin) def eqAst(tree: Tree, code: String) = tree ≈ parse(code) val toolbox = currentMirror.mkToolBox() val parse = toolbox.parse(_) val compile = toolbox.compile(_) val eval = toolbox.eval(_) def typecheck(tree: Tree) = toolbox.typecheck(tree) def typecheckTyp(tree: Tree) = { val q"type $_ = $res" = typecheck(q"type T = $tree") res } def typecheckPat(tree: Tree) = { val q"$_ match { case $res => }" = typecheck(q"((): Any) match { case $tree => }") res } def fails(msg: String, block: String) = { def result(ok: Boolean, description: String = "") = { val status = if (ok) Prop.Proof else Prop.False val labels = if (description != "") Set(description) else Set.empty[String] Prop { new Prop.Result(status, Nil, Set.empty, labels) } } try { compile(parse(s""" object Wrapper extends Helpers { import scala.reflect.runtime.universe._ $block } """)) result(false, "given code doesn't fail to typecheck") } catch { case ToolBoxError(emsg, _) => if (!emsg.contains(msg)) result(false, s"error message '${emsg}' is not the same as expected '$msg'") else result(true) } } val scalapkg = setSymbol(Ident(TermName("scala")), definitions.ScalaPackage) }
scala/scala
test/scalacheck/scala/reflect/quasiquotes/QuasiquoteProperties.scala
Scala
apache-2.0
4,468
package fpscala.errorhandling case class Person(name: Name, age: Age) sealed class Name(val value: String) sealed class Age(val value: Int) object Person { def map2[A, B, C](a: Either[List[String], A], b: Either[List[String], B])(f: (A, B) => C): Either[List[String], C] = { a match { case Left(es) => b match { case Left(bs) => Left(es ++ bs) case _ => Left(es) } case Right(aa) => b match { case Left(bs) => Left(bs) case Right(bb) => Right(f(aa, bb)) } } } def mkName(name: String): Either[List[String], Name] = if (name == "" || name == null) Left(List("Name is empty.")) else Right(new Name(name)) def mkAge(age: Int): Either[List[String], Age] = if (age < 0) Left(List("Age is out of range.")) else Right(new Age(age)) def mkPerson(name: String, age: Int): Either[List[String], Person] = map2(mkName(name), mkAge(age))(Person(_, _)) }
hnfmr/fpscala
Person.scala
Scala
mit
942
package x7c1.linen.repository.source.unread import x7c1.linen.database.struct.HasSourceId import x7c1.linen.repository.source.unread.selector.SourceRowSelector import x7c1.wheat.modern.database.selector.SelectorProvidable sealed trait SourceRowContent object SourceRowContent { implicit object providable extends SelectorProvidable[SourceRowContent, SourceRowSelector] } case class UnreadSource( id: Long, url: String, title: String, description: String, rating: Int, accountId: Long, latestEntryId: Long, latestEntryCreatedAt: Int, startEntryId: Option[Long], startEntryCreatedAt: Option[Int] ) extends SourceRowContent case class SourceFooterContent() extends SourceRowContent object UnreadSource { implicit object id extends HasSourceId[UnreadSource] { override def toId = _.id } }
x7c1/Linen
linen-repository/src/main/scala/x7c1/linen/repository/source/unread/SourceRowContent.scala
Scala
mit
821
/* * Copyright 2013 Twitter Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. You may obtain * a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.twitter.storehaus import com.twitter.util.Future import com.twitter.concurrent.AsyncSemaphore /** * Ever wished you could do a multiGet for 10,000 keys, but spread out over several multiGets? * Use the BatchedReadableStore. * * @param self the store to fetch values from * @param maxMultiGetSize a multiGet to `store` will fetch values for at most `maxMultiGetSize` keys * @param maxConcurrentMultiGets the maximum number of multigets to concurrently issue */ class BatchedReadableStore[K, V]( override protected val self: ReadableStore[K, V], maxMultiGetSize: Int, maxConcurrentMultiGets: Int) (implicit fc: FutureCollector) extends ReadableStoreProxy[K, V] { protected val connectionLock = new AsyncSemaphore(maxConcurrentMultiGets) override def multiGet[K1 <: K](keys: Set[K1]): Map[K1, Future[Option[V]]] = keys .grouped(maxMultiGetSize) .map { keyBatch: Set[K1] => // mapCollect the result of the multiget so we can release the permit at the end val batchResult: Future[Map[K1, Option[V]]] = connectionLock .acquire() .flatMap { permit => FutureOps.mapCollect(self.multiGet(keyBatch)) .ensure { permit.release() } } // now undo the mapCollect to yield a Map of future FutureOps.liftValues(keyBatch, batchResult) } .reduceOption(_ ++ _) .getOrElse(Map.empty) }
twitter/storehaus
storehaus-core/src/main/scala/com/twitter/storehaus/BatchedReadableStore.scala
Scala
apache-2.0
2,029
package spark.streaming.util import spark.SparkContext import spark.SparkContext._ import it.unimi.dsi.fastutil.objects.{Object2LongOpenHashMap => OLMap} import scala.collection.JavaConversions.mapAsScalaMap object RawTextHelper { /** * Splits lines and counts the words in them using specialized object-to-long hashmap * (to avoid boxing-unboxing overhead of Long in java/scala HashMap) */ def splitAndCountPartitions(iter: Iterator[String]): Iterator[(String, Long)] = { val map = new OLMap[String] var i = 0 var j = 0 while (iter.hasNext) { val s = iter.next() i = 0 while (i < s.length) { j = i while (j < s.length && s.charAt(j) != ' ') { j += 1 } if (j > i) { val w = s.substring(i, j) val c = map.getLong(w) map.put(w, c + 1) } i = j while (i < s.length && s.charAt(i) == ' ') { i += 1 } } } map.toIterator.map{case (k, v) => (k, v)} } /** * Gets the top k words in terms of word counts. Assumes that each word exists only once * in the `data` iterator (that is, the counts have been reduced). */ def topK(data: Iterator[(String, Long)], k: Int): Iterator[(String, Long)] = { val taken = new Array[(String, Long)](k) var i = 0 var len = 0 var done = false var value: (String, Long) = null var swap: (String, Long) = null var count = 0 while(data.hasNext) { value = data.next if (value != null) { count += 1 if (len == 0) { taken(0) = value len = 1 } else if (len < k || value._2 > taken(len - 1)._2) { if (len < k) { len += 1 } taken(len - 1) = value i = len - 1 while(i > 0 && taken(i - 1)._2 < taken(i)._2) { swap = taken(i) taken(i) = taken(i-1) taken(i - 1) = swap i -= 1 } } } } return taken.toIterator } /** * Warms up the SparkContext in master and slave by running tasks to force JIT kick in * before real workload starts. */ def warmUp(sc: SparkContext) { for(i <- 0 to 1) { sc.parallelize(1 to 200000, 1000) .map(_ % 1331).map(_.toString) .mapPartitions(splitAndCountPartitions).reduceByKey(_ + _, 10) .count() } } def add(v1: Long, v2: Long) = (v1 + v2) def subtract(v1: Long, v2: Long) = (v1 - v2) def max(v1: Long, v2: Long) = math.max(v1, v2) }
koeninger/spark
streaming/src/main/scala/spark/streaming/util/RawTextHelper.scala
Scala
bsd-3-clause
2,573
/* * Copyright 2011-2017 Chris de Vreeze * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package eu.cdevreeze.yaidom.queryapitests import scala.reflect.ClassTag import eu.cdevreeze.yaidom import eu.cdevreeze.yaidom.core.EName import eu.cdevreeze.yaidom.core.Path import eu.cdevreeze.yaidom.parse.DocumentParserUsingStax import eu.cdevreeze.yaidom.queryapi.ClarkNodes import eu.cdevreeze.yaidom.queryapi.ElemWithPath import eu.cdevreeze.yaidom.queryapi.TransformableElemApi import eu.cdevreeze.yaidom.queryapi.UpdatableElemApi import eu.cdevreeze.yaidom.simple.Elem import org.scalatest.funsuite.AnyFunSuite /** * Update test, using different yaidom methods of updating. * * @author Chris de Vreeze */ abstract class AbstractUpdateTest extends AnyFunSuite { // Note below how we prevent the following error: "inferred type ... contains type selection from volatile type ..." // We do this by type E no longer extending type N. type E <: ClarkNodes.Elem.Aux[N, E] with UpdatableElemApi.Aux[N, E] with TransformableElemApi.Aux[N, E] type N >: E <: ClarkNodes.Node implicit val clsTag: ClassTag[E] // Does not compile in a Scala 2.13.0-M3 build, due to regression: // "inferred type ... contains type selection from volatile type ..." // Below, we update the measure elements, replacing the unprefixed measures with prefixed ones (using prefix xbrli) test("testTransformElems") { val newRootElem = rootElem transformElems { case e if e.resolvedName == EName(XbrliNs, "measure") => updateMeasure(e) case e => e } checkElemAfterMeasureUpdate(newRootElem) } test("testTransformElemsToNodeSeq") { val newRootElem = rootElem transformElemsToNodeSeq { case e if e.resolvedName == EName(XbrliNs, "measure") => Vector(updateMeasure(e)) case e => Vector(e) } checkElemAfterMeasureUpdate(newRootElem) } test("testTransformElemsOrSelfToNodeSeq") { val newRootElems = rootElem transformElemsOrSelfToNodeSeq { case e if e.resolvedName == EName(XbrliNs, "measure") => Vector(updateMeasure(e)) case e => Vector(e) } assertResult(1) { newRootElems.size } checkElemAfterMeasureUpdate(newRootElems.head.asInstanceOf[E]) } // Below, we update the unit IDs and references for unit U-Monetary. // There are many of those, so updates take time. test("testTransformElemsForUnitUpdate") { val newRootElem = rootElem transformElems { case e if e.resolvedName == EName(XbrliNs, "unit") => updateUnitId(e) case e if e.attributeOption(EName("unitRef")).contains("U-Monetary") => updateUnitRef(e) case e => e } checkElemAfterUnitUpdate(newRootElem) } test("testTransformElemsOrSelfForUnitUpdate") { val newRootElem = rootElem transformElemsOrSelf { case e => updateUnitRef(updateUnitId(e)) } checkElemAfterUnitUpdate(newRootElem) } test("testUpdateElemsWithNodeSeqAgainForUnitUpdate") { val pathAwareClarkElem = ElemWithPath(rootElem) val elems = pathAwareClarkElem filterElems { case e if e.elem.resolvedName == EName(XbrliNs, "unit") => true case e if e.elem.attributeOption(EName("unitRef")).contains("U-Monetary") => true case e => false } val paths: Set[Path] = elems.map(_.path).toSet val newRootElem = rootElem.updateElemsOrSelf(paths) { case (e, p) if e.resolvedName == EName(XbrliNs, "unit") => updateUnitId(e) case (e, p) if e.attributeOption(EName("unitRef")).contains("U-Monetary") => updateUnitRef(e) case (e, p) => e } checkElemAfterUnitUpdate(newRootElem) } // Below, we update the context IDs and references for context D-2007-PPE-Other. // There are only a few of those, so updates are fast. test("testTransformElemsForContextUpdate") { val newRootElem = rootElem transformElems { case e if e.resolvedName == EName(XbrliNs, "context") => updateContextId(e) case e if localNamesForContextUpdate.contains(e.localName) => updateContextRef(e) case e => e } checkElemAfterContextUpdate(newRootElem) } test("testTransformElemsOrSelfForContextUpdate") { val newRootElem = rootElem transformElemsOrSelf { case e => updateContextRef(updateContextId(e)) } checkElemAfterContextUpdate(newRootElem) } test("testUpdateElemsWithNodeSeqAgainForContextUpdate") { val pathAwareClarkElem = ElemWithPath(rootElem) val elems = pathAwareClarkElem filterElems { case e if e.elem.resolvedName == EName(XbrliNs, "context") => true case e if localNamesForContextUpdate.contains(e.elem.localName) => true case e => false } val paths: Set[Path] = elems.map(_.path).toSet val newRootElem = rootElem.updateElemsOrSelf(paths) { case (e, p) if e.resolvedName == EName(XbrliNs, "context") => updateContextId(e) case (e, p) if e.attributeOption(EName("contextRef")).contains("D-2007-PPE-Other") => updateContextRef(e) case (e, p) => e } checkElemAfterContextUpdate(newRootElem) } // Update the value of a specific fact test("testUpdateFact") { val gaapNs = "http://xasb.org/gaap" val elems = ElemWithPath(rootElem) filterChildElems { e => e.elem.resolvedName == EName(gaapNs, "AverageNumberEmployees") && e.elem.attributeOption(EName("contextRef")).contains("D-2003") && e.elem.attributeOption(EName("unitRef")).contains("U-Pure") } val paths = elems.map(_.path) assertResult(List(Path.from(EName(gaapNs, "AverageNumberEmployees") -> 2))) { paths } val newRootElem = rootElem.updateElemsOrSelf(paths.toSet) { (e, p) => require(e.resolvedName == EName(gaapNs, "AverageNumberEmployees")) updateFactValue(e) } assertResult(Set("100", "200", "235", "240", "250", "300")) { newRootElem.filterChildElems(_.resolvedName == EName(gaapNs, "AverageNumberEmployees")).map(_.text).toSet } } // Helper methods private def checkElemAfterMeasureUpdate(elm: E): Unit = { assertResult(resolvedExpectedRootElemAfterMeasureUpdate) { yaidom.resolved.Elem.from(elm) } } private def checkElemAfterUnitUpdate(elm: E): Unit = { assertResult(resolvedExpectedRootElemAfterUnitUpdate) { yaidom.resolved.Elem.from(elm) } } private def checkElemAfterContextUpdate(elm: E): Unit = { assertResult(resolvedExpectedRootElemAfterContextUpdate) { yaidom.resolved.Elem.from(elm) } } private val rootElem: E = { val docParser = DocumentParserUsingStax.newInstance() val uri = classOf[AbstractUpdateTest].getResource("sample-xbrl-instance.xml").toURI val doc = docParser.parse(uri) fromSimpleElem(doc.documentElement) } private val XbrliNs = "http://www.xbrl.org/2003/instance" protected val localNamesForContextUpdate: Set[String] = Set( "PropertyPlantAndEquipmentMeasurementBasis", "PropertyPlantAndEquipmentDepreciationMethod", "PropertyPlantAndEquipmentEstimatedUsefulLife") private val resolvedExpectedRootElemAfterMeasureUpdate: yaidom.resolved.Elem = yaidom.resolved.Elem.from(rootElem.transformElems(updateMeasure)) assert(resolvedExpectedRootElemAfterMeasureUpdate != yaidom.resolved.Elem.from(rootElem)) private val resolvedExpectedRootElemAfterUnitUpdate: yaidom.resolved.Elem = yaidom.resolved.Elem.from(rootElem.transformElems((updateUnitId _) andThen (updateUnitRef _))) assert(resolvedExpectedRootElemAfterUnitUpdate != yaidom.resolved.Elem.from(rootElem)) private val resolvedExpectedRootElemAfterContextUpdate: yaidom.resolved.Elem = yaidom.resolved.Elem.from(rootElem.transformElems((updateContextId _) andThen (updateContextRef _))) assert(resolvedExpectedRootElemAfterContextUpdate != yaidom.resolved.Elem.from(rootElem)) protected def fromSimpleElem(e: Elem): E protected def updateMeasure(e: E): E protected def updateUnitId(e: E): E protected def updateUnitRef(e: E): E protected def updateContextId(e: E): E protected def updateContextRef(e: E): E protected def reorderSegmentChildren(e: E): E protected def updateFactValue(e: E): E }
dvreeze/yaidom
jvm/src/test/scala/eu/cdevreeze/yaidom/queryapitests/AbstractUpdateTest.scala
Scala
apache-2.0
8,750
package com.scalegun.aws package lambda import com.amazonaws.services.lambda._ import com.amazonaws.services.lambda.model._ import com.amazonaws._ import scala.collection.JavaConverters._ import java.util._ import java.nio._ import java.nio.charset._ trait Requests { val region: String val clientConfiguration: ClientConfiguration lazy val lambda: AWSLambda = { import Conversions._ val client = new AWSLambdaClient(clientConfiguration) client.setRegion(region) client } lazy val functions = lambda.listFunctions.getFunctions.asScala.map(_.getFunctionName) def createFunction( functionName: String, description: String, s3Bucket: String, s3Key: String, roleArn: ARN, handler: String = "process", memorySize: Int = 128, timeout: Int = 3, publish: Boolean = true) = { if (functions.contains(functionName)) { updateFunctionCode( functionName = functionName, s3Bucket = s3Bucket, s3Key = s3Key, publish = publish) } else { lambda.createFunction(new CreateFunctionRequest { withFunctionName(functionName) withDescription(description) withRole(roleArn.get) withHandler(handler) withMemorySize(memorySize) withTimeout(timeout) withPublish(publish) withRuntime("java8") withCode(new FunctionCode { withS3Bucket(s3Bucket) withS3Key(s3Key) }) }) } functionName } def updateFunctionCode( functionName: String, s3Bucket: String, s3Key: String, publish: Boolean) = { lambda.updateFunctionCode(new UpdateFunctionCodeRequest { withFunctionName(functionName) withS3Bucket(s3Bucket) withS3Key(s3Key) }) functionName } }
scalegun/aws-utils
src/main/scala/com/scalegun/aws/lambda/Requests.scala
Scala
mit
1,818
package org.jetbrains.plugins.scala package format import org.jetbrains.plugins.scala.base.SimpleTestCase import org.jetbrains.plugins.scala.lang.psi.api.expr.ScExpression import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory.createExpressionFromText import org.junit.Assert._ /** * Pavel Fatin */ class FormattedStringFormatterTest extends SimpleTestCase { def testEmpty() { assertEquals(call("", ""), format()) } def testText() { assertEquals(call("foo", ""), format(Text("foo"))) } def testEscapeChar() { assertEquals(call("\\\\n", ""), format(Text("\\n"))) } def testSlash() { assertEquals(call("\\\\\\\\", ""), format(Text("\\\\"))) } def testPlainExpression() { assertEquals(call("%s", "foo"), format(Injection(exp("foo"), None))) } def testExpressionWithDispensableFormat() { assertEquals(call("%d", "foo"), format(Injection(exp("foo"), Some(Specifier(null, "%d"))))) } def testExpressionWithMadatoryFormat() { assertEquals(call("%2d", "foo"), format(Injection(exp("foo"), Some(Specifier(null, "%2d"))))) } def testPlainLiteral() { assertEquals(call("123", ""), format(Injection(exp("123"), None))) } def testLiteralWithDispensableFormat() { assertEquals(call("%d", "123"), format(Injection(exp("123"), Some(Specifier(null, "%d"))))) } def testLiteralWithMadatoryFormat() { assertEquals(call("%2d", "123"), format(Injection(exp("123"), Some(Specifier(null, "%2d"))))) } def testPlainComplexExpression() { assertEquals(call("%s", "foo.bar"), format(Injection(exp("foo.bar"), None))) } def testComplexExpressionWithDispensableFormat() { assertEquals(call("%d", "foo.bar"), format(Injection(exp("foo.bar"), Some(Specifier(null, "%d"))))) } def testComplexExpressionWithMadatoryFormat() { assertEquals(call("%2d", "foo.bar"), format(Injection(exp("foo.bar"), Some(Specifier(null, "%2d"))))) } def testPlainBlockExpression() { assertEquals(call("%s", "foo.bar"), format(Injection(exp("{foo.bar}"), None))) } def testBlockExpressionWithDispensableFormat() { assertEquals(call("%d", "foo.bar"), format(Injection(exp("{foo.bar}"), Some(Specifier(null, "%d"))))) } def testBlockExpressionWithMadatoryFormat() { assertEquals(call("%2d", "foo.bar"), format(Injection(exp("{foo.bar}"), Some(Specifier(null, "%2d"))))) } def testPlainComplexBlockExpression() { assertEquals(call("%s", "{null; foo.bar}"), format(Injection(exp("{null; foo.bar}"), None))) } def testComplexBlockExpressionWithDispensableFormat() { assertEquals(call("%d", "{null; foo.bar}"), format(Injection(exp("{null; foo.bar}"), Some(Specifier(null, "%d"))))) } def testComplexBlockExpressionWithMadatoryFormat() { assertEquals(call("%2d", "{null; foo.bar}"), format(Injection(exp("{null; foo.bar}"), Some(Specifier(null, "%2d"))))) } def testMixedParts() { assertEquals(call("foo %s bar %s", "a, b"), format(Text("foo "), Injection(exp("a"), None), Text(" bar "), Injection(exp("b"), None))) } def testLiterals() { assertEquals(call("foo", ""), format(Injection(exp('"' + "foo" + '"'), None))) assertEquals(call("123", ""), format(Injection(exp("123L"), None))) assertEquals(call("true", ""), format(Injection(exp("true"), None))) } def testOther() { assertEquals(call("", ""), format(UnboundExpression(exp("foo")))) } private def format(parts: StringPart*): String = { FormattedStringFormatter.format(parts) } def call(formatter: String, arguments: String) = '"' + formatter + '"' + ".format(%s)".format(arguments) private def exp(s: String): ScExpression = { createExpressionFromText(s) } }
ilinum/intellij-scala
test/org/jetbrains/plugins/scala/format/FormattedStringFormatterTest.scala
Scala
apache-2.0
3,700
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.table.planner.plan.stream.sql.agg import org.apache.flink.api.common.time.Time import org.apache.flink.api.scala._ import org.apache.flink.table.api._ import org.apache.flink.table.api.config.OptimizerConfigOptions import org.apache.flink.table.planner.plan.rules.physical.stream.IncrementalAggregateRule import org.apache.flink.table.planner.utils.{AggregatePhaseStrategy, StreamTableTestUtil, TableTestBase} import org.junit.runner.RunWith import org.junit.runners.Parameterized import org.junit.{Before, Test} import java.util @RunWith(classOf[Parameterized]) class DistinctAggregateTest( splitDistinctAggEnabled: Boolean, aggPhaseEnforcer: AggregatePhaseStrategy) extends TableTestBase { protected val util: StreamTableTestUtil = streamTestUtil() util.addTableSource[(Int, Long, String)]("MyTable", 'a, 'b, 'c) @Before def before(): Unit = { util.tableEnv.getConfig.setIdleStateRetentionTime(Time.hours(1), Time.hours(2)) util.enableMiniBatch() util.tableEnv.getConfig.getConfiguration.setString( OptimizerConfigOptions.TABLE_OPTIMIZER_AGG_PHASE_STRATEGY, aggPhaseEnforcer.toString) util.tableEnv.getConfig.getConfiguration.setBoolean( OptimizerConfigOptions.TABLE_OPTIMIZER_DISTINCT_AGG_SPLIT_ENABLED, splitDistinctAggEnabled) // disable incremental agg util.tableEnv.getConfig.getConfiguration.setBoolean( IncrementalAggregateRule.TABLE_OPTIMIZER_INCREMENTAL_AGG_ENABLED, false) } @Test def testSingleDistinctAgg(): Unit = { util.verifyExecPlan("SELECT COUNT(DISTINCT c) FROM MyTable") } @Test def testMultiDistinctAggs(): Unit = { util.verifyExecPlan("SELECT COUNT(DISTINCT a), SUM(DISTINCT b) FROM MyTable") } @Test def testSingleMaxWithDistinctAgg(): Unit = { val sqlQuery = """ |SELECT a, COUNT(DISTINCT b), MAX(c) |FROM MyTable |GROUP BY a """.stripMargin util.verifyExecPlan(sqlQuery) } @Test def testSingleFirstValueWithDistinctAgg(): Unit = { // FIRST_VALUE is not mergeable, so the final plan does not contain local agg util.verifyExecPlan("SELECT a, FIRST_VALUE(c), COUNT(DISTINCT b) FROM MyTable GROUP BY a") } @Test def testSingleLastValueWithDistinctAgg(): Unit = { // LAST_VALUE is not mergeable, so the final plan does not contain local agg util.verifyExecPlan("SELECT a, LAST_VALUE(c), COUNT(DISTINCT b) FROM MyTable GROUP BY a") } @Test def testSingleListAggWithDistinctAgg(): Unit = { util.verifyExecPlan("SELECT a, LISTAGG(c), COUNT(DISTINCT b) FROM MyTable GROUP BY a") } @Test def testSingleDistinctAggWithAllNonDistinctAgg(): Unit = { val sqlQuery = """ |SELECT a, COUNT(DISTINCT c), SUM(b), AVG(b), MAX(b), MIN(b), COUNT(b), COUNT(*) |FROM MyTable |GROUP BY a """.stripMargin util.verifyExecPlan(sqlQuery) } @Test def testTwoDistinctAggregateWithNonDistinctAgg(): Unit = { util.verifyExecPlan( "SELECT c, SUM(DISTINCT a), SUM(a), COUNT(DISTINCT b) FROM MyTable GROUP BY c") } @Test def testSingleDistinctAggWithGroupBy(): Unit = { util.verifyExecPlan("SELECT a, COUNT(DISTINCT c) FROM MyTable GROUP BY a") } @Test def testSingleDistinctAggWithAndNonDistinctAggOnSameColumn(): Unit = { util.verifyExecPlan("SELECT a, COUNT(DISTINCT b), SUM(b), AVG(b) FROM MyTable GROUP BY a") } @Test def testSomeColumnsBothInDistinctAggAndGroupBy(): Unit = { // TODO: the COUNT(DISTINCT a) can be optimized to literal 1 util.verifyExecPlan("SELECT a, COUNT(DISTINCT a), COUNT(b) FROM MyTable GROUP BY a") } @Test def testAggWithFilterClause(): Unit = { val sqlQuery = s""" |SELECT | a, | COUNT(DISTINCT b) FILTER (WHERE NOT b = 2), | SUM(b) FILTER (WHERE NOT b = 5), | SUM(b) FILTER (WHERE NOT b = 2) |FROM MyTable |GROUP BY a """.stripMargin util.verifyExecPlan(sqlQuery) } @Test def testMultiGroupBys(): Unit = { val sqlQuery = s""" |SELECT | c, MIN(b), MAX(b), SUM(b), COUNT(*), COUNT(DISTINCT a) |FROM( | SELECT | a, AVG(b) as b, MAX(c) as c | FROM MyTable | GROUP BY a |) GROUP BY c """.stripMargin util.verifyExecPlan(sqlQuery) } @Test def testSingleDistinctWithRetraction(): Unit = { val sqlQuery = """ |SELECT a, COUNT(DISTINCT b), COUNT(1) |FROM ( | SELECT c, AVG(a) as a, AVG(b) as b | FROM MyTable | GROUP BY c |) GROUP BY a """.stripMargin util.verifyRelPlan(sqlQuery, ExplainDetail.CHANGELOG_MODE) } @Test def testSumCountWithSingleDistinctAndRetraction(): Unit = { val sqlQuery = s""" |SELECT | b, SUM(b1), COUNT(DISTINCT b1), COUNT(1) |FROM( | SELECT | a, COUNT(b) as b, MAX(b) as b1 | FROM MyTable | GROUP BY a |) GROUP BY b """.stripMargin util.verifyRelPlan(sqlQuery, ExplainDetail.CHANGELOG_MODE) } @Test def testMinMaxWithRetraction(): Unit = { val sqlQuery = s""" |SELECT | c, MIN(b), MAX(b), SUM(b), COUNT(*), COUNT(DISTINCT a) |FROM( | SELECT | a, AVG(b) as b, MAX(c) as c | FROM MyTable | GROUP BY a |) GROUP BY c """.stripMargin util.verifyRelPlan(sqlQuery, ExplainDetail.CHANGELOG_MODE) } @Test def testFirstValueLastValueWithRetraction(): Unit = { val sqlQuery = s""" |SELECT | b, FIRST_VALUE(c), LAST_VALUE(c), COUNT(DISTINCT c) |FROM( | SELECT | a, COUNT(DISTINCT b) as b, MAX(b) as c | FROM MyTable | GROUP BY a |) GROUP BY b """.stripMargin util.verifyRelPlan(sqlQuery, ExplainDetail.CHANGELOG_MODE) } } object DistinctAggregateTest { @Parameterized.Parameters(name = "splitDistinctAggEnabled={0}, aggPhaseEnforcer={1}") def parameters(): util.Collection[Array[Any]] = { util.Arrays.asList( Array(true, AggregatePhaseStrategy.ONE_PHASE), Array(true, AggregatePhaseStrategy.TWO_PHASE), Array(false, AggregatePhaseStrategy.ONE_PHASE), Array(false, AggregatePhaseStrategy.TWO_PHASE) ) } }
apache/flink
flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/planner/plan/stream/sql/agg/DistinctAggregateTest.scala
Scala
apache-2.0
7,193
/* * Copyright 2021 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package uk.gov.hmrc.crypto import java.security.SecureRandom import com.typesafe.config.ConfigFactory import org.apache.commons.codec.binary.Base64 import org.mockito.MockitoSugar import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AnyWordSpecLike import collection.JavaConverters._ class CryptoGCMWithKeysFromConfigSpec extends AnyWordSpecLike with Matchers with MockitoSugar { private val keybytes = new Array[Byte](16 * 2) private val previousKeybytes1 = new Array[Byte](16 * 2) private val previousKeybytes2 = new Array[Byte](16 * 2) val rand = new SecureRandom() rand.nextBytes(keybytes) rand.nextBytes(previousKeybytes1) rand.nextBytes(previousKeybytes2) private val baseConfigKey = "crypto.spec" private object CurrentKey { val configKey = baseConfigKey + ".key" val encryptionKey = Base64.encodeBase64String(keybytes) val plainMessage = PlainText("this is my message") val plainByteMessage = PlainBytes("this is a bunch of bytes".getBytes) val plainByteMessageResponse = PlainText("this is a bunch of bytes") } private object PreviousKey1 { val encryptionKey = Base64.encodeBase64String(previousKeybytes1) val plainMessage = PlainText("this is the first plain message") val plainByteMessage = PlainBytes("this is the first bunch of bytes".getBytes) val plainByteMessageResponse = PlainText("this is the first bunch of bytes") } private object PreviousKey2 { val encryptionKey = Base64.encodeBase64String(previousKeybytes2) val plainMessage = PlainText("this is the second plain message") val plainByteMessage = PlainBytes("this is the second bunch of bytes".getBytes) val plainByteMessageResponse = PlainText("this is the second bunch of bytes") } private object PreviousKeys { val configKey = baseConfigKey + ".previousKeys" val encryptionKeys = Seq(PreviousKey1.encryptionKey, PreviousKey2.encryptionKey) } "Constructing a CompositeCryptoWithKeysFromConfig with a current key, but no previous keys configured" should { val config = ConfigFactory.parseMap( Map( CurrentKey.configKey -> CurrentKey.encryptionKey ).asJava ) "return a properly initialised, functional AuthenticatedEncryption object that works with the current key only" in { val crypto = new CryptoGCMWithKeysFromConfig(baseConfigKey, config) crypto.decrypt(crypto.encrypt(CurrentKey.plainMessage)) shouldBe CurrentKey.plainMessage crypto.decrypt(crypto.encrypt(CurrentKey.plainByteMessage)) shouldBe CurrentKey.plainByteMessageResponse val previousKey1Crypto = CompositeSymmetricCrypto.aesGCM(PreviousKey1.encryptionKey, Seq.empty) val encryptedWithPreviousKey1 = crypto.encrypt(PreviousKey1.plainMessage, previousKey1Crypto) intercept[SecurityException] { crypto.decrypt(encryptedWithPreviousKey1) } } } "Constructing a CryptoGCMWithKeysFromConfig with a current key and empty previous keys" should { val config = ConfigFactory.parseMap( Map( CurrentKey.configKey -> CurrentKey.encryptionKey, PreviousKeys.configKey -> List.empty.asJava ).asJava ) "return a properly initialised, functional AuthenticatedEncryption object that works with the current key only" in { val crypto = new CryptoGCMWithKeysFromConfig(baseConfigKey, config) crypto.decrypt(crypto.encrypt(CurrentKey.plainMessage)) shouldBe CurrentKey.plainMessage crypto.decrypt(crypto.encrypt(CurrentKey.plainByteMessage)) shouldBe CurrentKey.plainByteMessageResponse val previousKey1Crypto = CompositeSymmetricCrypto.aesGCM(PreviousKey1.encryptionKey, Seq.empty) val encryptedWithPreviousKey1 = crypto.encrypt(PreviousKey1.plainMessage, previousKey1Crypto) intercept[SecurityException] { crypto.decrypt(encryptedWithPreviousKey1) } } } "Constructing a CompositeCryptoWithKeysFromConfig with both current and previous keys" should { val config = ConfigFactory.parseMap( Map( CurrentKey.configKey -> CurrentKey.encryptionKey, PreviousKeys.configKey -> PreviousKeys.encryptionKeys.asJava ).asJava ) "allows decrypting payloads that were encrypted using previous keys" in { val crypto = new CryptoGCMWithKeysFromConfig(baseConfigKey, config) val previousKey1Crypto = CompositeSymmetricCrypto.aesGCM(PreviousKey1.encryptionKey, Seq.empty) val encryptedWithPreviousKey1 = crypto.encrypt(PreviousKey1.plainMessage, previousKey1Crypto) val encryptedBytesWithPreviousKey1 = crypto.encrypt(PreviousKey1.plainByteMessage, previousKey1Crypto) crypto.decrypt(encryptedWithPreviousKey1) shouldBe PreviousKey1.plainMessage crypto.decrypt(encryptedBytesWithPreviousKey1) shouldBe PreviousKey1.plainByteMessageResponse val previousKey2Crypto = CompositeSymmetricCrypto.aesGCM(PreviousKey2.encryptionKey, Seq.empty) val encryptedWithPreviousKey2 = crypto.encrypt(PreviousKey2.plainMessage, previousKey2Crypto) val encryptedBytesWithPreviousKey2 = crypto.encrypt(PreviousKey2.plainByteMessage, previousKey2Crypto) crypto.decrypt(encryptedWithPreviousKey2) shouldBe PreviousKey2.plainMessage crypto.decrypt(encryptedBytesWithPreviousKey2) shouldBe PreviousKey2.plainByteMessageResponse } } "Constructing a CompositeCryptoWithKeysFromConfig without current or previous keys" should { "throw a SecurityException on construction" in { intercept[SecurityException] { new CryptoGCMWithKeysFromConfig(baseConfigKey, ConfigFactory.empty()) } } } "Constructing a CompositeCryptoWithKeysFromConfig without a current key, but with previous keys" should { "throw a SecurityException on construction" in { val config = ConfigFactory.parseMap( Map( PreviousKeys.configKey -> PreviousKeys.encryptionKeys.asJava ).asJava ) intercept[SecurityException] { new CryptoGCMWithKeysFromConfig(baseConfigKey, config) } } } "Constructing a CryptoGCMWithKeysFromConfig with an invalid key" should { "throw a SecurityException if the current key is too short" in { val keyWithInvalidNumberOfBits = "ZGVmZ2hpamtsbW4K" val config = ConfigFactory.parseMap( Map( CurrentKey.configKey -> keyWithInvalidNumberOfBits, PreviousKeys.configKey -> PreviousKeys.encryptionKeys.asJava ).asJava ) intercept[SecurityException] { new CryptoGCMWithKeysFromConfig(baseConfigKey, config) } } "throw a SecurityException if the current key length is not 128 bits" in { val keyWithInvalidKeySize = "defgh£jklmn" val config = ConfigFactory.parseMap( Map( CurrentKey.configKey -> keyWithInvalidKeySize, PreviousKeys.configKey -> PreviousKeys.encryptionKeys.asJava ).asJava ) intercept[SecurityException] { new CryptoGCMWithKeysFromConfig(baseConfigKey, config) } } } }
hmrc/crypto
src/test/scala/uk/gov/hmrc/crypto/CryptoGCMWithKeysFromConfigSpec.scala
Scala
apache-2.0
7,858
package com.codahale.jerkson.ser import org.codehaus.jackson.JsonGenerator import org.codehaus.jackson.map.{SerializerProvider, JsonSerializer} import org.codehaus.jackson.map.annotate.JsonCachable @JsonCachable class OptionSerializer extends JsonSerializer[Option[_]] { def serialize(value: Option[_], json: JsonGenerator, provider: SerializerProvider) { provider.defaultSerializeValue(value.orNull, json) } }
cphylabs/jerkson-old
src/main/scala/com/codahale/jerkson/ser/OptionSerializer.scala
Scala
mit
437
package org.akka.essentials.future.example import akka.actor.ActorSystem import akka.actor.Props case class Order(userId: Int, orderNo: Int, amount: Float, noOfItems: Int) case class Address(userId: Int, fullName: String, address1: String, address2: String) case class OrderHistory(order: Order, address: Address) object TestActorSystem { def main(args: Array[String]): Unit = { val _system = ActorSystem("FutureUsageExample") val processOrder = _system.actorOf(Props[ProcessOrderActor]) processOrder ! 456 Thread.sleep(5000) _system.shutdown } }
rokumar7/trial
ActorMessagingExample/src/main/scala/org/akka/essentials/future/example/TestActorSystem.scala
Scala
unlicense
574
package org.tensorframes.impl import org.apache.spark.sql.Row import org.apache.spark.sql.types.StructType import org.tensorframes.{ColumnInformation, Logging, NodePath, Shape} import org.{tensorflow => tf} import scala.collection.mutable /** * Converts data between the C++ runtime of TensorFlow and the Spark runtime. * * Current (only) implementation exports each row and copies it back into a C++ buffer.* * This implementation uses the official Java Tensorflow API (experimental). */ object TFDataOps extends Logging { /** * Performs size checks and resolutions, and converts the data from the row format to the C++ * buffers. * * @param it * @param struct the structure of the block. It should contain all the extra meta-data required by * TensorFrames. * @param requestedTFCols: the columns that will be fed into TF * @return pairs of plaholder path -> input tensor */ def convert( it: Array[Row], struct: StructType, requestedTFCols: Array[(NodePath, Int)]): Seq[(String, tf.Tensor[_])] = { // This is a very simple and very inefficient implementation. It should be kept // as is for correctness checks. val convertersWithPaths = requestedTFCols.map { case (npath, idx) => val f = struct.fields(idx) // Extract and check the shape val ci = ColumnInformation(f).stf.getOrElse { throw new Exception(s"Could not column information for column $f") } val leadDim = ci.shape.dims.headOption.getOrElse { throw new Exception(s"Column $f found to be scalar, but its dimensions should be >= 1") } .toInt if (leadDim != Shape.Unknown && leadDim != it.length) { throw new Exception(s"Lead dimension for column $f (found to be $leadDim)" + s" is not compatible with a block of size ${it.length}. " + s"Expected block structure: $struct, meta info = $ci") } val conv = SupportedOperations.opsFor(ci.dataType).tfConverter(ci.shape.tail, it.length) conv.reserve() npath -> conv } val converters = convertersWithPaths.map(_._2) // The indexes requested by tensorflow val requestedTFColIdxs = requestedTFCols.map(_._2) DataOps.convertFast0(it, converters, requestedTFColIdxs) convertersWithPaths.map { case (npath, conv) => npath -> conv.tensor2() } } /** * Converts a single row at a time. * * @param r the row to convert * @param blockStruct the structure of the block that produced this row * @param requestedTFCols the requested columns * @return */ def convert( r: Row, blockStruct: StructType, requestedTFCols: Array[(NodePath, Int)]): Seq[(String, tf.Tensor[_])] = { // This is a very simple and very inefficient implementation. It should be kept // as is for correctness checks. The columnar implementation is meant to be more // efficient. logDebug(s"Calling convert on one with struct: $blockStruct") val elts = requestedTFCols.map { case (npath, idx) => val f = blockStruct.fields(idx) // Extract and check the shape val ci = ColumnInformation(f).stf.getOrElse { throw new Exception(s"Could not column information for column $f") } assert(ci.shape.numDims >= 1, s"Column $f found to be a scala, but its dimensions should be >= 1") // Special case: if the cell shape has undefined size in its first argument, we // still accept it and attempt to get it from the shape. This is to support rows // with different vector sizes. All other dimensions must match, although this // could be relaxed in the future as well. It is harder to check. val cellShape = { val givenCellShape = ci.shape.tail if (givenCellShape.dims.headOption == Some(Shape.Unknown)) { r.get(idx) match { case s: Array[_] => givenCellShape.tail.prepend(s.length.toLong) case s: Seq[_] => givenCellShape.tail.prepend(s.length.toLong) case _ => givenCellShape } } else { givenCellShape } } assert(!cellShape.hasUnknown, s"The computed shape for the cell $idx (field $f) is $cellShape, which has unknowns") val conv = SupportedOperations.opsFor(ci.dataType).tfConverter(cellShape, 1) conv.reserve() conv.append(r, idx) npath -> conv.tensor2() } elts } /** * (Slow) implementation that takes data in C++ and puts it back into SQL rows, following * the structure provided and merging back all the columns from the input. * * @param tv * @param tf_struct the structure of the block represented in TF * @return an iterator that lazily computes the rows back. */ // Note that doing it this way is very inefficient, but columnar implementation should prevent all this // data copying in most cases. // TODO PERF: the current code allocates a new row for each of the rows returned. // Instead of doing that, it could allocate once the memory and reuse the same rows and objects. def convertBack( tv: Seq[tf.Tensor[_]], tf_struct: StructType, input: Array[Row], input_struct: StructType, appendInput: Boolean): Iterator[Row] = { // The structures should already have been validated. // Output has all the TF columns first, and then the other columns logDebug(s"convertBack: ${input.length} input rows, tv=$tv tf_struct=$tf_struct input_struct=$input_struct " + s"append=$appendInput") val tfSizesAndIters = for ((field, t) <- tf_struct.fields.zip(tv).toSeq) yield { val info = ColumnInformation(field).stf.getOrElse { throw new Exception(s"Missing info in field $field") } // logTrace(s"convertBack: $field $info") // Drop the first cell, this is a block. val expLength = if (appendInput) { Some(input.length) } else { None } val (numRows, iter) = getColumn(t, info.dataType, info.shape.tail, expLength) numRows -> iter } val tfSizes = tfSizesAndIters.map(_._1) val tfNumRows: Int = tfSizes.distinct match { case Seq(x) => x case Seq() => throw new Exception(s"Output cannot be empty. tf_struct=$tf_struct") case _ => throw new Exception(s"Multiple number of rows detected. tf_struct=$tf_struct," + s" tfSizes = $tfSizes") } assert((!appendInput) || tfNumRows == input.length, s"Incompatible sizes detected: appendInput=$appendInput, tf num rows = $tfNumRows, " + s"input num rows = ${input.length}") val tfIters = tfSizesAndIters.map(_._2.iterator).toArray val outputSchema = if (appendInput) { StructType(tf_struct.fields ++ input_struct.fields) } else { StructType(tf_struct.fields) } val res: Iterator[Row] = DataOps.convertBackFast0(input, tfIters, tfNumRows, input_struct, outputSchema) res } /** * Extracts the content of a column as objects amenable to SQL. * * @param t * @param scalaType the scalar type of the tensor * @param cellShape the shape of each cell of data * @param expectedNumRows the expected number of rows in the output. Depending on the shape * (which may have unknowns) and the expected number of rows (which may * also be unknown), this function will try to compute both the physical * shape and the actual number of rows based on the size of the * flattened tensor. * @return the number of rows and an iterable over the rows */ private def getColumn( t: tf.Tensor[_], scalaType: ScalarType, cellShape: Shape, expectedNumRows: Option[Int], fastPath: Boolean = true): (Int, Iterable[Any]) = { val allDataBuffer: mutable.WrappedArray[_] = SupportedOperations.opsFor(scalaType).convertTensor(t) val numData = allDataBuffer.size // Infer if necessary the reshaping size. val (inferredNumRows, inferredShape) = DataOps.inferPhysicalShape(numData, cellShape, expectedNumRows) val reshapeShape = inferredShape.prepend(inferredNumRows) val res = if (fastPath) { DataOps.getColumnFast0(reshapeShape, scalaType, allDataBuffer) } else { DataOps.reshapeIter(allDataBuffer.asInstanceOf[mutable.WrappedArray[Any]], inferredShape.dims.toList) } inferredNumRows -> res } }
tjhunter/tensorframes
src/main/scala/org/tensorframes/impl/TFDataOps.scala
Scala
apache-2.0
8,506
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License */ package org.apache.toree.kernel.protocol.v5.client.socket import java.util.UUID import akka.actor.{ActorRef, ActorSystem, Props} import akka.testkit.{TestProbe, ImplicitSender, TestKit} import org.apache.toree.communication.ZMQMessage import org.apache.toree.communication.security.SecurityActorType import org.apache.toree.kernel.protocol.v5._ import org.apache.toree.kernel.protocol.v5.client.ActorLoader import org.apache.toree.kernel.protocol.v5.content.ExecuteRequest import org.scalatestplus.mockito.MockitoSugar import org.scalatest.{Matchers, FunSpecLike} import org.mockito.Mockito._ import org.mockito.Matchers._ import play.api.libs.json.Json class ShellClientSpec extends TestKit(ActorSystem("ShellActorSpec")) with ImplicitSender with FunSpecLike with Matchers with MockitoSugar { private val SignatureEnabled = true describe("ShellClientActor") { val socketFactory = mock[SocketFactory] val mockActorLoader = mock[ActorLoader] val probe : TestProbe = TestProbe() when(socketFactory.ShellClient( any(classOf[ActorSystem]), any(classOf[ActorRef]) )).thenReturn(probe.ref) val signatureManagerProbe = TestProbe() doReturn(system.actorSelection(signatureManagerProbe.ref.path.toString)) .when(mockActorLoader).load(SecurityActorType.SignatureManager) val shellClient = system.actorOf(Props( classOf[ShellClient], socketFactory, mockActorLoader, SignatureEnabled )) describe("send execute request") { it("should send execute request") { val request = ExecuteRequest( "foo", false, true, UserExpressions(), true ) val header = Header( UUID.randomUUID().toString, "spark", UUID.randomUUID().toString, MessageType.Incoming.ExecuteRequest.toString, "5.0" ) val kernelMessage = KernelMessage( Seq[Array[Byte]](), "", header, HeaderBuilder.empty, Metadata(), Json.toJson(request).toString ) shellClient ! kernelMessage // Echo back the kernel message sent to have a signature injected signatureManagerProbe.expectMsgClass(classOf[KernelMessage]) signatureManagerProbe.reply(kernelMessage) probe.expectMsgClass(classOf[ZMQMessage]) } } } }
lresende/incubator-toree
client/src/test/scala/org/apache/toree/kernel/protocol/v5/client/socket/ShellClientSpec.scala
Scala
apache-2.0
3,105
/* * Copyright (C) 2020 MapRoulette contributors (see CONTRIBUTORS.md). * Licensed under the Apache License, Version 2.0 (see LICENSE). */ package org.maproulette.framework.psql import java.sql.SQLException import org.scalatestplus.play.PlaySpec /** * @author mcuthbert */ class GroupingSpec extends PlaySpec { "Grouping" should { "not generate sql if no strings provided" in { Grouping().sql() mustEqual "" } "generate the correct group by value" in { (Grouping > "test").sql() mustEqual "GROUP BY test" } "generate multiple groups correctly" in { (Grouping > ("test1", "test2")).sql() mustEqual "GROUP BY test1,test2" } "fail if provide invalid column name" in { intercept[SQLException] { (Grouping > "$%invalud.name").sql() } } } }
mgcuthbert/maproulette2
test/org/maproulette/framework/psql/GroupingSpec.scala
Scala
apache-2.0
823
import org.scalatest._ import org.apache.log4j.Level import scala.util.Random class SparkSpec extends UnitTestSpec with SparkLocalMode with BeforeAndAfter { before { setLoggingLevel(Level.ERROR) initSparkContext } after { resetSparkContext } "A Spark" should "reduce RDD" in { val n = 10 val vector = for (i <- 0 until n) yield Random.nextInt(n) val randoms = sc.parallelize(vector) randoms.reduce(_+_) should equal (vector.sum) } it should "map RDD" in { val lines = Array("simple", "lines", "for", "Spark") val lined = sc.parallelize(lines). map(_.length) lined.reduce(_+_) should equal (lines.map(_.length).sum) } }
sadikovi/sbt-multi-project-example
foo/src/test/scala/SparkSpec.scala
Scala
mit
753
package com.sksamuel.elastic4s.anaylzers trait AnalyzerDsl { def stopAnalyzer(name: String): StopAnalyzerDefinition = StopAnalyzerDefinition(name) def standardAnalyzer(name: String): StandardAnalyzerDefinition = StandardAnalyzerDefinition(name) def patternAnalyzer(name: String, regex: String): PatternAnalyzerDefinition = PatternAnalyzerDefinition(name, regex) def snowballAnalyzer(name: String): SnowballAnalyzerDefinition = SnowballAnalyzerDefinition(name) }
beni55/elastic4s
elastic4s-core/src/main/scala/com/sksamuel/elastic4s/anaylzers/AnalyzerDsl.scala
Scala
apache-2.0
471
package yuuto.yuutolib.block import java.util.List import net.minecraft.inventory.Container import net.minecraft.entity.player.InventoryPlayer import net.minecraft.inventory.IInventory import net.minecraft.inventory.Slot import net.minecraft.entity.player.EntityPlayer import net.minecraft.item.ItemStack /** * @author Jacob */ abstract class ContainerAlt(val inventory:IInventory, val playerInventory:InventoryPlayer) extends Container{ var playerInvStart:Int = 0; var playerInvEnd:Int = 0; def init(){ bindPlayerInventory(bindInventorySlots()); bindOtherSlots(); } def bindInventorySlots():Array[Int]; def bindPlayerInventory(pos:Array[Int]){ playerInvStart = this.inventorySlots.size(); var i = 0 var j = 0; for (i<-0 until 3) { for (j<-0 until 9) { addSlotToContainer(new Slot(playerInventory, j + i * 9 + 9, pos(0) + j * 18, pos(1) + i * 18)); } } i = 0; for (i<-0 until 9) { addSlotToContainer(new Slot(playerInventory, i, pos(0) + i * 18, pos(1)+58)); } playerInvEnd = this.inventorySlots.size(); } def bindOtherSlots()={} override def transferStackInSlot(player:EntityPlayer, slot:Int):ItemStack={ var stack:ItemStack = null; val slotObject:Slot = inventorySlots.get(slot).asInstanceOf[Slot]; //null checks and checks if the item can be stacked (maxStackSize > 1) if (slotObject != null && slotObject.getHasStack()) { val stackInSlot:ItemStack = slotObject.getStack(); stack = stackInSlot.copy(); //merges the item into player inventory since its in the tileEntity if (slot < playerInvStart) { if (!this.mergeItemStack(stackInSlot, playerInvStart, playerInvEnd, true)) { return null; } } //places it into the tileEntity is possible since its in the player inventory else if (!this.mergeItemStack(stackInSlot, 0, playerInvStart, false)) { return null; } if (stackInSlot.stackSize == 0) { slotObject.putStack(null); } else { slotObject.onSlotChanged(); } if (stackInSlot.stackSize == stack.stackSize) { return null; } slotObject.onPickupFromSlot(player, stackInSlot); } return stack; } }
AnimeniacYuuto/YuutoLib
src/main/scala/yuuto/yuutolib/block/ContainerAlt.scala
Scala
gpl-3.0
2,752
package com.ereisman.esurient.etl.format import java.io.OutputStream import java.sql.ResultSet /** * Pluggable interface to output each record from a ResultSet for output to HDFS. */ trait EtlOutputFormatter { def formatRecord(resultSet: ResultSet, toHdfsFile: OutputStream): Long = { 0L } }
initialcontext/esurient
src/main/scala/com/ereisman/esurient/etl/format/EtlOutputFormatter.scala
Scala
apache-2.0
302
// Copyright (C) 2017 Calin Cruceru <calin.cruceru@stud.acs.upb.ro>. // // See the LICENCE file distributed with this work for additional // information regarding copyright ownership. package org.symnet package models.iptables package extensions.tcp // project import core.{BaseParsers, Match} object SynMatch extends BaseParsers { import ParserMP.monadPlusSyntax._ def parser: Parser[Match] = for { _ <- spacesParser n1 <- optional(parseChar('!') >> someSpacesParser) _ <- parseString("--syn") } yield Match.maybeNegated(TcpFlagsMatch(Set("SYN", "RST", "ACK", "FIN"), Set("SYN")), n1) }
calincru/iptables-sefl
src/main/scala/org/symnet/models/iptables/extensions/tcp/SynMatch.scala
Scala
mit
698
/* * (c) Copyright 2016 Hewlett Packard Enterprise Development LP * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package cogx import scala.language.reflectiveCalls import org.scalatest.junit.JUnitRunner import org.scalatest.FunSuite import org.scalatest.MustMatchers import org.junit.runner.RunWith import cogx.reference._ import cogx.helper.ScalarFieldBuilderInterface import cogx.helper.MatrixFieldBuilderInterface import cogx.helper.VectorFieldBuilderInterface import cogx.api.ImplicitConversions /** Test code for VectorFields. */ @RunWith(classOf[JUnitRunner]) class VectorFieldSpec extends FunSuite with MustMatchers with ImplicitConversions with ScalarFieldBuilderInterface with MatrixFieldBuilderInterface with VectorFieldBuilderInterface { val VectorSize = 3 val VectorShape = Shape(VectorSize) val Optimize = true /** Test combining a dynamic vector field and a constant. */ test("vector field / constant") { def vecGreater(v: Vector, thresh: Float) = v.map(x => if (x > thresh) 1f else 0f) def vecGreaterEq(v: Vector, thresh: Float) = v.map(x => if (x >= thresh) 1f else 0f) def vecLess(v: Vector, thresh: Float) = v.map(x => if (x < thresh) 1f else 0f) def vecLessEq(v: Vector, thresh: Float) = v.map(x => if (x <= thresh) 1f else 0f) def vecEq(v: Vector, thresh: Float) = v.map(x => if (x == thresh) 1f else 0f) def vecNotEq(v: Vector, thresh: Float) = v.map(x => if (x != thresh) 1f else 0f) def doTest(vectorLen: Int): Unit = { val vectorShape = Shape(vectorLen) val Size = 5 val R = 3.21f val initField = RefVectorField.random(Size, Size, vectorShape) // Tests that involve equality should involve a threshold that matches // at least one element. Here are some elements: val elem1 = initField.read(0,0).read(0) val elem2 = initField.read(Size-1,0).read(0) val elem3 = initField.read(Size-1,Size-1).read(0) val elem4 = initField.read(0,0).read(vectorLen-1) val graph = new ComputeGraph(Optimize) with RefTestInterface { val a = TestVectorField(initField) val sum = a + R val diff = a - R val product = a * R val quotient = a / R val maxa = max(a, 0.5f) val mina = min(a, 0.5f) val power = pow(a, 3.0f) val pown = pow(a, 3) // Constant appears first in mutator expression val constSum = R + a val constDiff = R - a val constProduct = R * a val constQuotient = R / a val greater = a > 0.5f val greaterEq = a >= elem1 val less = a < 0.5f val lessEq = a <= elem2 val eq3 = a === elem3 val nEq3 = a !=== elem4 // Above tests are all on 2D inputs. Test 0D, 1D and 3D: val a0D = TestVectorField(RefVectorField.random(vectorShape)) val a1D = TestVectorField(RefVectorField.random(Size, vectorShape)) val a3D = TestVectorField(RefVectorField.random(Size, Size, Size, vectorShape)) val sum0D = a0D + R val sum1D = a1D + R val sum3D = a3D + R probe(sum, a, diff, product, quotient, maxa, mina, power, pown, constSum, constDiff, constProduct, constQuotient, greater, greaterEq, less, lessEq, eq3, nEq3, sum0D, a0D, sum1D, a1D, sum3D, a3D) } import graph._ withRelease { step require(readVector(sum) == (readVector(graph.a) + R)) require(readVector(diff) == (readVector(graph.a) - R)) require(readVector(product) == (readVector(graph.a) * R)) require(readVector(quotient) ~== (readVector(graph.a) / R)) require(readVector(maxa) ~== readVector(graph.a).map(v => v.map(_ max 0.5f))) require(readVector(mina) ~== readVector(graph.a).map(v => v.map(_ min 0.5f))) require(readVector(power) ~== (readVector(graph.a) :* readVector(graph.a) :* readVector(graph.a))) require(readVector(pown) ~== (readVector(graph.a) :* readVector(graph.a) :* readVector(graph.a))) require(readVector(constSum) == (readVector(graph.a) + R)) require(readVector(constDiff) == (-readVector(graph.a) + R)) require(readVector(constProduct) == (readVector(graph.a) * R)) require(readVector(constQuotient) ~== (readVector(graph.a).reciprocal * R)) require(readVector(greater) == readVector(graph.a).map(vecGreater(_, 0.5f))) require(readVector(greaterEq) == readVector(graph.a).map(vecGreaterEq(_, elem1))) require(readVector(less) == readVector(graph.a).map(vecLess(_, 0.5f))) require(readVector(lessEq) == readVector(graph.a).map(vecLessEq(_, elem2))) require(readVector(eq3) == readVector(graph.a).map(vecEq(_, elem3))) require(readVector(nEq3) == readVector(graph.a).map(vecNotEq(_, elem4))) require(readVector(sum0D) == (readVector(a0D) + R)) require(readVector(sum1D) == (readVector(a1D) + R)) require(readVector(sum3D) == (readVector(a3D) + R)) } } // The implementation of pown has length-specific code for all the "SmallTensor" types. // We test all these vector lengths, plus the non-SmallTensor value 5. val vectorLengths = Seq(2,3,4,5,8,16) for (vectorLength <- vectorLengths) doTest(vectorLength) } /** Test combining a dynamic vector field and a vector constant. */ test("vector field / vector constant") { val Size = 5 val R = Vector.random(VectorSize) val initField = RefVectorField.random(Size, Size, VectorShape) // Tests that involve equality should involve a threshold that matches // at least one element. Here are some elements: val elem1 = initField.read(0,0) val elem2 = initField.read(Size-1,0) val elem3 = initField.read(Size-1,Size-1) val elem4 = initField.read(0,0) val graph = new ComputeGraph(Optimize) with RefTestInterface { val a = TestVectorField(initField) val sum = a + R val diff = a - R val product = a * R val quotient = a / R val maxa = max(a, R) val mina = min(a, R) // Constant appears first in mutator expression val constSum = R + a val constDiff = R - a val constProduct = R * a val constQuotient = R / a val greater = a > R val greaterEq = a >= elem1 val less = a < R val lessEq = a <= elem2 val eq3 = a === elem3 val nEq3 = a !=== elem4 // Above tests are all on 2D inputs. Test 0D, 1D and 3D: val a0D = TestVectorField(RefVectorField.random(VectorShape)) val a1D = TestVectorField(RefVectorField.random(Size, VectorShape)) val a3D = TestVectorField(RefVectorField.random(Size, Size, Size, VectorShape)) val sum0D = a0D + R val sum1D = a1D + R val sum3D = a3D + R probe(sum, a, diff, product, quotient, maxa, mina, constSum, constDiff, constProduct, constQuotient, greater, greaterEq, less, lessEq, eq3, nEq3, sum0D, a0D, sum1D, a1D, sum3D, a3D) } import graph._ withRelease { step require(readVector(sum) == (readVector(graph.a) + R)) require(readVector(diff) == (readVector(graph.a) - R)) require(readVector(product) == (readVector(graph.a) :* R)) require(readVector(quotient) ~== (readVector(graph.a) :/ R)) require(readVector(maxa) ~== readVector(graph.a).map(vecMax(_, R))) require(readVector(mina) ~== readVector(graph.a).map(vecMin(_, R))) require(readVector(constSum) == (readVector(graph.a) + R)) require(readVector(constDiff) == (-readVector(graph.a) + R)) require(readVector(constProduct) == (readVector(graph.a) :* R)) require(readVector(constQuotient) ~== (readVector(graph.a).reciprocal :* R)) require(readVector(greater) == readVector(graph.a).map(vecGreater(_, R))) require(readVector(greaterEq) == readVector(graph.a).map(vecGreaterEq(_, elem1))) require(readVector(less) == readVector(graph.a).map(vecLess(_, R))) require(readVector(lessEq) == readVector(graph.a).map(vecLessEq(_, elem2))) require(readVector(eq3) == readVector(graph.a).map(vecEq(_, elem3))) require(readVector(nEq3) == readVector(graph.a).map(vecNotEq(_, elem4))) require(readVector(sum0D) == (readVector(a0D) + R)) require(readVector(sum1D) == (readVector(a1D) + R)) require(readVector(sum3D) == (readVector(a3D) + R)) } def vecMax(v: Vector, thresh: Vector) = Vector(thresh.length, (i) => if (v(i) >= thresh(i)) v(i) else thresh(i)) def vecMin(v: Vector, thresh: Vector) = Vector(thresh.length, (i) => if (v(i) <= thresh(i)) v(i) else thresh(i)) def vecGreater(v: Vector, thresh: Vector) = Vector(thresh.length, (i) => if (v(i) > thresh(i)) 1f else 0f) def vecGreaterEq(v: Vector, thresh: Vector) = Vector(thresh.length, (i) => if (v(i) >= thresh(i)) 1f else 0f) def vecLess(v: Vector, thresh: Vector) = Vector(thresh.length, (i) => if (v(i) < thresh(i)) 1f else 0f) def vecLessEq(v: Vector, thresh: Vector) = Vector(thresh.length, (i) => if (v(i) <= thresh(i)) 1f else 0f) def vecEq(v: Vector, thresh: Vector) = Vector(thresh.length, (i) => if (v(i) == thresh(i)) 1f else 0f) def vecNotEq(v: Vector, thresh: Vector) = Vector(thresh.length, (i) => if (v(i) != thresh(i)) 1f else 0f) } /** Test combining two dynamic vector fields. */ test("vector field / field") { //val Size = 125 val Size = 4 val Constant = 0.123f val ConstantVector = Vector(1.53f, -2.38f, 0.81f) val BigConstantVector = Vector(1.03f, -2.68f, 10.81f, 7.4f, -9.23f) val graph = new ComputeGraph(Optimize) with RefTestInterface { val a = TestVectorField(RefVectorField.random(Size, Size, VectorShape)) val b = TestVectorField(RefVectorField.random(Size, Size, VectorShape) + 0.1f) val s = TestScalarField(RefScalarField.random(Size, Size) + 0.1f) val sum = a + b val diff = a - b val product = a * b val quotient = a / b val sumS = a + s val diffS = a - s val productS = a * s val quotientS = a / s /** The scalar field can appear as the first operand. */ val sSum = s + a val sDiff = s - a val sProduct = s * a val sQuotient = s / a val greater = a > b val greaterEq = a >= b val less = a < b val lessEq = a <= b val eq1 = a === a val eq2 = a === b val nEq1 = a !=== a val nEq2 = a !=== b /** A 0-dimensional scalar field, can be combined with any other. */ val c = TestScalarField(RefScalarField(Constant)) val sum0D = a + c val diff0D = a - c val product0D = a * c val quotient0D = a / c /** A 0-dimensional vector field, can be combined with any other. */ val v = TestVectorField(RefVectorField(ConstantVector)) val sum0DVector = a + v val diff0DVector = a - v val product0DVector = a * v val quotient0DVector = a / v /** Repeat vectorfield op 0Dvectorfield with big tensor fields */ val BigVectorShape = Shape(5) val bigA = TestVectorField(RefVectorField.random(Size, Size, BigVectorShape)) val bigV = TestVectorField(RefVectorField(BigConstantVector)) val bigSum0DVector = bigA + bigV val bigDiff0DVector = bigA - bigV val bigProduct0DVector = bigA * bigV val bigQuotient0DVector = bigA / bigV /** Repeat 0D tests with the 0D field appearing as the first operand */ val sum0D2 = c + a val diff0D2 = c - a val product0D2 = c * a val quotient0D2 = c / a /** A 0-dimensional vector field, can be combined with any other. */ val sum0DVector2 = v + a val diff0DVector2 = v - a val product0DVector2 = v * a val quotient0DVector2 = v / a /** Repeat vectorfield op 0Dvectorfield with big tensor fields */ val bigSum0DVector2 = bigV + bigA val bigDiff0DVector2 = bigV - bigA val bigProduct0DVector2 = bigV * bigA val bigQuotient0DVector2 = bigV / bigA probe(sum, a, b, diff, product, quotient, sumS, s, diffS, productS, quotientS, sSum, sDiff, sProduct, sQuotient, greater, greaterEq, less, lessEq, eq1, eq2, nEq1, nEq2, sum0D, diff0D, product0D, quotient0D, sum0DVector, diff0DVector, product0DVector, quotient0DVector, bigSum0DVector, bigA, bigDiff0DVector, bigProduct0DVector, bigQuotient0DVector, sum0D2, diff0D2, product0D2, quotient0D2, sum0DVector2, diff0DVector2, product0DVector2, quotient0DVector2, bigSum0DVector2, bigDiff0DVector2, bigProduct0DVector2, bigQuotient0DVector2) } import graph._ withRelease { step require(readVector(sum) == (readVector(graph.a) + readVector(b))) require(readVector(diff) == (readVector(graph.a) - readVector(b))) require(readVector(product) == (readVector(graph.a) :* readVector(b))) require(readVector(quotient) ~== (readVector(graph.a) :/ readVector(b))) require(readVector(sumS) == (readVector(graph.a) + readScalar(s))) require(readVector(diffS) == (readVector(graph.a) - readScalar(s))) require(readVector(productS) == (readVector(graph.a) :* readScalar(s))) require(readVector(quotientS) ~== (readVector(graph.a) :/ readScalar(s))) require(readVector(sSum) == (readVector(graph.a) + readScalar(s))) require(readVector(sDiff) == (-readVector(graph.a) + readScalar(s))) require(readVector(sProduct) == (readVector(graph.a) :* readScalar(s))) require(readVector(sQuotient) ~== (readVector(graph.a).reciprocal :* readScalar(s))) val aa = readVector(graph.a) val bb = readVector(b) require(readVector(greater) == aa.combine(bb, greaterThan _)) require(readVector(greaterEq) == aa.combine(bb, greaterThanEq _)) require(readVector(less) == aa.combine(bb, lessThan _)) require(readVector(lessEq) == aa.combine(bb, lessThanEq _)) require(readVector(eq1) == aa.combine(aa, Eq _)) require(readVector(eq2) == aa.combine(bb, Eq _)) require(readVector(nEq1) == aa.combine(aa, notEq _)) require(readVector(nEq2) == aa.combine(bb, notEq _)) require(readVector(sum0D) == (readVector(graph.a) + Constant)) require(readVector(diff0D) == (readVector(graph.a) - Constant)) require(readVector(product0D) == (readVector(graph.a) * Constant)) require(readVector(quotient0D) ~== (readVector(graph.a) / Constant)) require(readVector(sum0DVector) == readVector(graph.a).map(_ + ConstantVector)) require(readVector(diff0DVector) == readVector(graph.a).map(_ - ConstantVector)) require(readVector(product0DVector) == readVector(graph.a).map(_ :* ConstantVector)) require(readVector(quotient0DVector) ~== readVector(graph.a).map(_ :/ ConstantVector)) require(readVector(bigSum0DVector) == readVector(bigA).map(_ + BigConstantVector)) require(readVector(bigDiff0DVector) == readVector(bigA).map(_ - BigConstantVector)) require(readVector(bigProduct0DVector) == readVector(bigA).map(_ :* BigConstantVector)) require(readVector(bigQuotient0DVector) ~== readVector(bigA).map(_ :/ BigConstantVector)) require(readVector(sum0D2) == (readVector(graph.a) + Constant)) require(readVector(diff0D2) == (readVector(graph.a)*(-1f) + Constant)) require(readVector(product0D2) == (readVector(graph.a) * Constant)) require(readVector(quotient0D2) ~== (readVector(graph.a).reciprocal * Constant)) require(readVector(sum0DVector2) == readVector(graph.a).map(ConstantVector + _)) require(readVector(diff0DVector2) == readVector(graph.a).map(ConstantVector - _)) require(readVector(product0DVector2) == readVector(graph.a).map(ConstantVector :* _)) require(readVector(quotient0DVector2) ~== readVector(graph.a).map(ConstantVector :/ _)) require(readVector(bigSum0DVector2) == readVector(bigA).map(BigConstantVector + _)) require(readVector(bigDiff0DVector2) == readVector(bigA).map(BigConstantVector - _)) require(readVector(bigProduct0DVector2) == readVector(bigA).map(BigConstantVector :* _)) require(readVector(bigQuotient0DVector2) ~== readVector(bigA).map(BigConstantVector :/ _)) } def greaterThan(a: Vector, b: Vector): Vector = compare(a, b, _ > _) def greaterThanEq(a: Vector, b: Vector): Vector = compare(a, b, _ >= _) def lessThan(a: Vector, b: Vector): Vector = compare(a, b, _ < _) def lessThanEq(a: Vector, b: Vector): Vector = compare(a, b, _ <= _) def Eq(a: Vector, b: Vector): Vector = compare(a, b, _ == _) def notEq(a: Vector, b: Vector): Vector = compare(a, b, _ != _) def compare(a: Vector, b: Vector, f: (Float, Float) => Boolean): Vector = { val result = new Vector(a.length) for (i <- 0 until result.length) result(i) = if (f(a(i), b(i))) 1f else 0f result } } /** Test applying unary operations on dynamic vector fields. */ test("vector field / unary") { val Size = 5 val graph = new ComputeGraph(Optimize) with RefTestInterface { val a = TestVectorField( RefVectorField.random(Size, Size, VectorShape) - 0.5f) val b = TestVectorField( RefVectorField.random(Size, Size, VectorShape) + 0.5f) val aAbs = abs(a) val aAcos = acos(a) val aAsin = asin(a) val aCos = cos(a) val aCosh = cosh(a) val aExp = exp(a) val bLog = log(b) val aSignum = signum(a) val aSin = sin(a) val aSinh = sinh(a) val aSq = sq(a) val aSqrt = sqrt(b) val aTan = tan(a) val aTanh = tanh(a) val aNegative = -a probe(aAbs, a, aAcos, aAsin, aCos, aCosh, aExp, bLog, b, aSignum, aSin, aSinh, aSq, aSqrt, aTan, aTanh, aNegative) } //val Subsize = (Size + 1) / 2 //val subsample = new DynamicVectorField(Subsize, Subsize, VectorShape) { // this <== a.subsample //} import graph._ withRelease { step require(readVector(aAbs) == readVector(graph.a).map(v => v.map(_.abs))) require(readVector(aAcos) ~== readVector(graph.a).map(v => v.map(e => math.acos(e).toFloat))) require(readVector(aAsin) ~== readVector(graph.a).map(v => v.map(e => math.asin(e).toFloat))) require(readVector(aCos) ~== readVector(graph.a).map(v => v.map(e => math.cos(e).toFloat))) require(readVector(aCosh) ~== readVector(graph.a).map(v => v.map(e => math.cosh(e).toFloat))) require(readVector(aExp) ~== readVector(graph.a).map(v => v.map(e => math.exp(e).toFloat))) require(readVector(bLog) ~== readVector(graph.b).map(v => v.map(e => math.log(e).toFloat))) require(readVector(aSignum) ~== readVector(graph.a).map(v => v.map(e => if (e < 0) -1f else if (e > 0) 1f else 0f))) require(readVector(aSin) ~== readVector(graph.a).map(v => v.map(e => math.sin(e).toFloat))) require(readVector(aSinh) ~== readVector(graph.a).map(v => v.map(e => math.sinh(e).toFloat))) require(readVector(aSq) ~== readVector(graph.a).map(v => v.map(e => e * e))) require(readVector(aSqrt) ~== readVector(graph.b).map(v => v.map(e => math.sqrt(e).toFloat))) require(readVector(aTan) ~== readVector(graph.a).map(v => v.map(e => math.tan(e).toFloat))) require(readVector(aTanh) ~== readVector(graph.a).map(v => v.map(e => math.tanh(e).toFloat))) require(readVector(aNegative) == readVector(graph.a) * -1f) } } /** Test the shift and shiftCyclic operators. */ test("vector field / shift") { val InRows = 9 val InColumns = 22 val rowShift = Array(3, -1, 2, 7, -5) val columnShift = Array(4, 6, 0, -8, -2) // Test vectors of length 1, 2, 3, 4, 5 val NumTests = 5 val inputImage = Array.tabulate(NumTests) { i => RefVectorField(InRows, InColumns, (row, col) => new Vector(i+1).randomize) } // Use Matrix.shift() on tensorSlices to create expected result // Matix.shift uses opposite sense! val expectedImage = Array.tabulate(NumTests) { i => { val slices = Array.tabulate(i+1) { j => RefScalarField(inputImage(i).sliceTensor(j).toTensor[Matrix].shift(rowShift(i), columnShift(i))) } val first = slices(0) val rest = Array.tabulate(slices.length - 1) { i => slices(i+1) } if (i == 0) RefVectorField(InRows, InColumns, (row, col) => new Vector(first.read(row, col))) else first.stackTensor(rest : _*) }} // Use Matrix.shift() on tensorSlices to create expected result val expectedCyclicImage = Array.tabulate(NumTests) { i => { val slices = Array.tabulate(i+1) { j => RefScalarField(inputImage(i).sliceTensor(j).toTensor[Matrix].shiftCyclic(rowShift(i), columnShift(i))) } val first = slices(0) val rest = Array.tabulate(slices.length - 1) { i => slices(i+1) } if (i == 0) RefVectorField(InRows, InColumns, (row, col) => new Vector(first.read(row, col))) else first.stackTensor(rest : _*) }} val graph = new ComputeGraph(Optimize) with RefTestInterface { val in = Array.tabulate(NumTests) { i => TestVectorField(inputImage(i)) } val shifted = Array.tabulate(NumTests) { i => shift(in(i), rowShift(i), columnShift(i)) } val shiftedCyclic = Array.tabulate(NumTests) { i => shiftCyclic(in(i), rowShift(i), columnShift(i)) } probe(shifted: _*) probe(shiftedCyclic: _*) } import graph._ withRelease { step for (i <- 0 until NumTests) { require(readVector(shifted(i)) == expectedImage(i)) require(readVector(shiftedCyclic(i)) == expectedCyclicImage(i)) } } } /** Test FFT: 1D, 2D, 3D. * * Note: these tests merely test that the inverse FFT undoes what the * forward FFT does, not that the FFT actually calculates the frequency- * space version of its input. XXX * */ test("vector field / fft") { def testShape(fieldShape: Shape, vectorSize: Int) { // Offsets added to help relative error of ~== val VectorShape = Shape(vectorSize) val field = RefVectorField.random(fieldShape, VectorShape) + 0.1f val graph = new ComputeGraph(Optimize) with RefTestInterface { val space = TestVectorField(field) val recovered = realPart(fftInverse(fft(space))) probe(recovered) } import graph._ withRelease { step require(readVector(recovered) ~== field) } } def test1D(columns: Int, vectorSize: Int) = testShape(Shape(columns), vectorSize) def test2D(rows: Int, columns: Int, vectorSize: Int) = testShape(Shape(rows, columns), vectorSize) def test3D(layers: Int, rows: Int, columns: Int, vectorSize: Int) = testShape(Shape(layers, rows, columns), vectorSize) def runtest(vectorSize: Int) { test1D(256, vectorSize) test2D(64, 4, vectorSize) test3D(8, 2, 32, vectorSize) } val vectorSizes = Seq(2,3,4,5) vectorSizes.foreach(t => runtest(t)) } /** Test FFT: 1D, 2D, 3D. * * Note: these tests merely test that the inverse FFT undoes what the * forward FFT does, not that the FFT actually calculates the frequency- * space version of its input. XXX * */ test("vector field / fftRI (split real/imaginary)") { def testShape(fieldShape: Shape, vectorSize: Int) { // Offsets added to help relative error of ~== val VectorShape = Shape(vectorSize) val field = RefVectorField.random(fieldShape, VectorShape) + 0.1f val graph = new ComputeGraph(Optimize) with RefTestInterface { val space = TestVectorField(field) val (freqR, freqI) = fftRI(space) val recovered = fftInverseRI(freqR, freqI)._1 probe(recovered) } import graph._ withRelease { step require(readVector(recovered) ~== field) } } def test1D(columns: Int, vectorSize: Int) = testShape(Shape(columns), vectorSize) def test2D(rows: Int, columns: Int, vectorSize: Int) = testShape(Shape(rows, columns), vectorSize) def test3D(layers: Int, rows: Int, columns: Int, vectorSize: Int) = testShape(Shape(layers, rows, columns), vectorSize) def runtest(vectorSize: Int) { test1D(128, vectorSize) test2D(512, 2, vectorSize) test3D(1, 4, 16, vectorSize) } val vectorSizes = Seq(2,3,4,5) vectorSizes.foreach(t => runtest(t)) } test("vector field / dot") { val Rows = 13 val Columns = 37 // Test vectors of length 1, 2, 3, 4, 5 val NumTests = 5 val fieldA = Array.tabulate(NumTests) { i => RefVectorField(Rows, Columns, (row, col) => new Vector(i+1).randomize) } val fieldB = Array.tabulate(NumTests) { i => RefVectorField(Rows, Columns, (row, col) => new Vector(i+1).randomize) } // The 2nd operand of 'dot' is allowed to be a 0D field val fieldB_0D = Array.tabulate(NumTests) { i => RefVectorField(new Vector(i+1).randomize) } val graph = new ComputeGraph(Optimize) with RefTestInterface { val A = Array.tabulate(NumTests) { i => TestVectorField(fieldA(i)) } val B = Array.tabulate(NumTests) { i => TestVectorField(fieldB(i)) } val B_0D = Array.tabulate(NumTests) { i => TestVectorField(fieldB_0D(i)) } val C = Array.tabulate(NumTests) { i => dot(A(i), B(i)) } val C_0D = Array.tabulate(NumTests) { i => dot(A(i), B_0D(i)) } // Concatenate all the arrays and then probe all the elements probe((A ++ B ++ C ++ B_0D ++ C_0D): _*) } import graph._ withRelease { step // Check A dot B, where B is a 2D field for (i <- 0 until NumTests) { val a = readVector(A(i)) val b = readVector(B(i)) val c = readScalar(C(i)) for (row <- 0 until Rows; col <- 0 until Columns) { val aVector = a.read(row, col) val bVector = b.read(row, col) val dotProduct = aVector dot bVector require(c.read(row, col) ~== dotProduct) } } // Check A dot B, where B is a 0D field for (i <- 0 until NumTests) { val a = readVector(A(i)) val bVector = readVector(B_0D(i)).read() val c = readScalar(C_0D(i)) for (row <- 0 until Rows; col <- 0 until Columns) { val aVector = a.read(row, col) val dotProduct = aVector dot bVector require(c.read(row, col) ~== dotProduct) } } } } test("vector field / cross dot") { val Size = 5 val graph = new ComputeGraph(Optimize) with RefTestInterface { val s = TestScalarField(RefScalarField.random(VectorSize)) val w = TestVectorField(RefVectorField.random(Size, Size, VectorShape)) val z = crossDot(w, s) probe(w, s, z) } import graph._ withRelease { step for (row <- 0 until Size; col <- 0 until Size) { val wVector = readVector(w).read(row, col) val sVector = readScalar(s).toTensor[Vector] val dotProduct = wVector dot sVector require(readScalar(z).read(row, col) ~== dotProduct) } } } test("vector field / reverse cross dot") { val Rows = 5 val Columns = 7 val graph = new ComputeGraph(Optimize) with RefTestInterface { val z = TestScalarField(RefScalarField.random(Rows, Columns)) val w = TestVectorField(RefVectorField.random(Rows, Columns, VectorShape)) val x = reverseCrossDot(w, z) probe(w, z, x) } import graph._ withRelease { step var sum = new Vector(VectorSize) for (r <- 0 until Rows; c <- 0 until Columns) sum += readVector(w).read(r, c) * readScalar(z).read(r, c) for (r <- 0 until VectorSize) require(readScalar(x).read(r) ~== sum(r)) } } /** Test the expand operator. */ test("vector field / expand border") { // 2D test parameters val InRows = 9 val InColumns = 22 val OutRows = 13 val OutColumns = 37 // 1D test parameters val InColumns_1D = 7 val OutColumns_1D = 11 // Test vectors of length 1, 2, 3, 4, 5 val NumTests = 5 val inputImage = Array.tabulate(NumTests) { i => RefVectorField(InRows, InColumns, (row, col) => new Vector(i+1).randomize) } // Use Matrix.expand() on tensorSlices to create expected result val expectedImage = Array.tabulate(NumTests) { i => { val slices = Array.tabulate(i+1) { j => RefScalarField(inputImage(i).sliceTensor(j).toTensor[Matrix]. expand(OutRows, OutColumns, borderFill = true)) } val first = slices(0) val rest = Array.tabulate(slices.length - 1) { i => slices(i+1) } if (i == 0) RefVectorField(OutRows, OutColumns, (row, col) => new Vector(first.read(row, col))) else first.stackTensor(rest : _*) }} // 1D test inputs and expected outputs val inputImage_1D = Array.tabulate(NumTests) { i => RefVectorField(InColumns_1D, (col) => new Vector(i+1).randomize) } // Use Vector.expand() on tensorSlices to create expected result val expectedImage_1D = Array.tabulate(NumTests) { i => { val slices = Array.tabulate(i+1) { j => RefScalarField(inputImage_1D(i).sliceTensor(j).toTensor[Vector]. expand(OutColumns_1D, borderFill = true)) } val first = slices(0) val rest = Array.tabulate(slices.length - 1) { i => slices(i+1) } if (i == 0) RefVectorField(OutColumns_1D, (col) => new Vector(first.read(col))) else first.stackTensor(rest : _*) }} val graph = new ComputeGraph(Optimize) with RefTestInterface { val in = Array.tabulate(NumTests) { i => TestVectorField(inputImage(i)) } val expanded = Array.tabulate(NumTests) { i => expand(in(i), BorderClamp, OutRows, OutColumns) } val in_1D = Array.tabulate(NumTests) { i => TestVectorField(inputImage_1D(i)) } val expanded_1D = Array.tabulate(NumTests) { i => expand(in_1D(i), BorderClamp, OutColumns_1D) } // Concatenate all the arrays and then probe all the elements probe((expanded ++ expanded_1D): _*) } import graph._ withRelease { step for (i <- 0 until NumTests) require(readVector(expanded(i)) == expectedImage(i)) for (i <- 0 until NumTests) require(readVector(expanded_1D(i)) == expectedImage_1D(i)) } } /** Test the (Int) operator. */ test("vector field / slice") { val image = RefVectorField(3, 3, (row, col) => new Vector(Array[Float](row, col))) val expect0 = RefVectorField(3, (col) => new Vector(Array[Float](0, col))) val expect1 = RefVectorField(3, (col) => new Vector(Array[Float](1, col))) val expect2 = RefVectorField(3, (col) => new Vector(Array[Float](2, col))) val graph = new ComputeGraph(Optimize) with RefTestInterface { val field1 = TestVectorField(image) val row0 = field1(0) val row1 = field1(1) val row2 = field1(2) probe(row0, row1, row2) } import graph._ withRelease { step require(readVector(row0) == expect0) require(readVector(row1) == expect1) require(readVector(row2) == expect2) } } /** Test the (0D-ScalarField) operator (slice). */ test("vector field / slice point") { // 1D input val rand = new cogx.utilities.Random val shape1D = Shape(17) val vectorShape = Shape(4) val image1D = RefVectorField.random(shape1D, vectorShape) // 2D input val shape2D = Shape(5, 7) val vectorShape2 = Shape(5) val image2D = RefVectorField.random(shape2D, vectorShape2) // 3D input val shape3D = Shape(3, 9, 11) val vectorShape3 = Shape(2) val image3D = RefVectorField.random(shape2D, vectorShape3) val sliceToExtract1D = shape1D(0) * rand.nextFloat val sliceToExtract2D = shape2D(0) * rand.nextFloat val sliceToExtract3D = shape3D(0) * rand.nextFloat val graph = new ComputeGraph(Optimize) with RefTestInterface { val field1D = TestVectorField(image1D) val indexField1D = TestScalarField(RefScalarField(sliceToExtract1D)) val slicedField0D = field1D(indexField1D) val field2D = TestVectorField(image2D) val indexField2D = TestScalarField(RefScalarField(sliceToExtract2D)) val slicedField1D = field2D(indexField2D) val field3D = TestVectorField(image3D) val indexField3D = TestScalarField(RefScalarField(sliceToExtract3D)) val slicedField2D = field3D(indexField3D) probe(slicedField0D, slicedField1D, slicedField2D) } import graph._ withRelease { step require(readVector(slicedField0D) == image1D.slice(sliceToExtract1D.toInt)) require(readVector(slicedField1D) == image2D.slice(sliceToExtract2D.toInt)) require(readVector(slicedField2D) == image3D.slice(sliceToExtract3D.toInt)) } } /** Test the stack operator. */ test("vector field / stack") { val vectorShape = Shape(11) val field0 = RefVectorField.random(5, 7, vectorShape) val field1 = RefVectorField.random(5, 7, vectorShape) val field2 = RefVectorField.random(5, 7, vectorShape) val expected = RefVectorField(3, 5, 7, (layer, row, col) => { if (layer == 0) field0.read(row, col) else if (layer == 1) field1.read(row, col) else field2.read(row, col) } ) val graph = new ComputeGraph(Optimize) with RefTestInterface { val f0 = TestVectorField(field0) val f1 = TestVectorField(field1) val f2 = TestVectorField(field2) val fArray = Array(f0, f1, f2) val stack0 = stack(f0, f1, f2) val stack1 = stack(fArray) probe(stack0, stack1) } import graph._ withRelease { step require(readVector(stack0) == expected) require(readVector(stack1) == expected) } } /** Test the tensors(Int) operator. This should technically be in the * ScalarFieldGeneratorSpec, since the output is a ScalarField. XXX */ test("vector field / tensor slice") { val Rows = 3 val Columns = 3 val image = RefVectorField(Rows, Columns, (row, col) => new Vector(Array[Float](row, col, row + col))) val expect0 = RefScalarField(Rows, Columns, (row, col) => row) val expect1 = RefScalarField(Rows, Columns, (row, col) => col) val expect2 = RefScalarField(Rows, Columns, (row, col) => row + col) val bigImage = RefVectorField(Rows, Columns, (row, col) => new Vector(Array[Float](row, col, row - col, row + col, row * col))) val bigExpect0 = RefScalarField(Rows, Columns, (row, col) => row) val bigExpect1 = RefScalarField(Rows, Columns, (row, col) => col) val bigExpect2 = RefScalarField(Rows, Columns, (row, col) => row - col) val bigExpect3 = RefScalarField(Rows, Columns, (row, col) => row + col) val bigExpect4 = RefScalarField(Rows, Columns, (row, col) => row * col) val graph = new ComputeGraph(Optimize) with RefTestInterface { val field = TestVectorField(image) val slice0 = vectorElement(field, 0) val slice1 = vectorElement(field, 1) val slice2 = vectorElement(field, 2) val bigField = TestVectorField(bigImage) val bigSlice0 = vectorElement(bigField, 0) val bigSlice1 = vectorElement(bigField, 1) val bigSlice2 = vectorElement(bigField, 2) val bigSlice3 = vectorElement(bigField, 3) val bigSlice4 = vectorElement(bigField, 4) probe(slice0, slice1, slice2, bigSlice0, bigSlice1, bigSlice2, bigSlice3, bigSlice4) } import graph._ withRelease { step require(readScalar(slice0) == expect0) require(readScalar(slice1) == expect1) require(readScalar(slice2) == expect2) require(readScalar(bigSlice0) == bigExpect0) require(readScalar(bigSlice1) == bigExpect1) require(readScalar(bigSlice2) == bigExpect2) require(readScalar(bigSlice3) == bigExpect3) require(readScalar(bigSlice4) == bigExpect4) } } /** Test the stackTensors operator. */ test("vector field / tensor stack") { val Rows = 3 val Columns = 3 val VectorLength = 2 val MatrixRows = 3 val image0 = RefVectorField(Rows, Columns, (row, col) => new Vector(Array[Float](row, col))) val image1 = RefVectorField(Rows, Columns, (row, col) => new Vector(Array[Float](col, row))) val image2 = RefVectorField(Rows, Columns, (row, col) => new Vector(Array[Float](col * row, row + 5))) val expected = RefMatrixField(Rows, Columns, (row, col) => Matrix( Array[Float](row, col), Array[Float](col, row), Array[Float](col * row, row + 5) ) ) val graph = new ComputeGraph(Optimize) with RefTestInterface { val field0 = TestVectorField(image0) val field1 = TestVectorField(image1) val field2 = TestVectorField(image2) val stackedField = matrixField(field0, field1, field2) probe(stackedField) } import graph._ withRelease { step require(readMatrix(stackedField) == expected) } } /** Test the tensor reduction operators. */ test("vector field / tensor reduce") { val Rows = 3 val Columns = 3 def expectedReducedSum(field: RefVectorField) = RefScalarField(Rows, Columns, (row, col) => field.read(row, col).toArray.reduceLeft(_ + _)) def expectedReducedMin(field: RefVectorField) = RefScalarField(Rows, Columns, (row, col) => field.read(row, col).toArray.reduceLeft(_ min _)) def expectedReducedMax(field: RefVectorField) = RefScalarField(Rows, Columns, (row, col) => field.read(row, col).toArray.reduceLeft(_ max _)) // Test vectors of length 1, 2, 3, 4, 5 val field = Array.tabulate(5) { i => RefVectorField(Rows, Columns, (row, col) => new Vector(i+1).randomize) } val graph = new ComputeGraph(Optimize) with RefTestInterface { val v = Array.tabulate(5) { i => TestVectorField(field(i)) } val vSum = Array.tabulate(5) { i => reduceSum(v(i)) } val vMax = Array.tabulate(5) { i => reduceMax(v(i)) } val vMin = Array.tabulate(5) { i => reduceMin(v(i)) } // Concatenate all the arrays and then probe all the elements probe((vSum ++ vMax ++ vMin): _*) } import graph._ withRelease { step for (i <- 0 until 5) { // GPU sum done as binary tree, not reduceLeft, so answer is approx. require(readScalar(vSum(i)) ~== expectedReducedSum(field(i))) require(readScalar(vMax(i)) == expectedReducedMax(field(i))) require(readScalar(vMin(i)) == expectedReducedMin(field(i))) } } } /** Test the tensor reduction operators. */ test("vector field / tensor reduce sum precision") { // This tests Cog's ability to perform large reductions fairly precisely. // The test adds the value (2^12 + 1) to itself 2^13 times, so the result is: // // (2^12 + 1) * 2^13 = 2^25 + 2^13 // // The value is representable by a single precision float with its 23-bit // mantissa exactly, but the result will not be obtained if a single accumulator // is used. val VectorLen = 8192 val VectorVal = 4097L val ExpectedSum = VectorLen * VectorVal val graph = new ComputeGraph(Optimize) { val v = VectorField(Vector(VectorLen, (c) => VectorVal)) val vSum = reduceSum(v) // Concatenate all the arrays and then probe all the elements probe(vSum) } import graph._ withRelease { step val actualSum = read(vSum).asInstanceOf[ScalarFieldReader].read().toLong require(actualSum == ExpectedSum, s"Expecting tensor sum $ExpectedSum, saw $actualSum") } } /** Test the tensor reduction operators. */ test("vector field / tensor block reduce sum precision") { // This tests Cog's ability to perform large reductions fairly precisely. // The test adds the value (2^12 + 1) to itself (2^13 + 2^3) times, so the result is: // // (2^12 + 1) * (2^13 + 2^3) = 2^25 + 2^15 + 2^13 + 2^3 // // The value is representable by a single precision float with its 23-bit // mantissa exactly, but the result will not be obtained if a single accumulator // is used. val VectorLen = 8192 + 8 val VectorVal = 4097L val OutputLen = 2 val ExpectedSum = VectorLen * VectorVal val graph = new ComputeGraph(Optimize) { val v = VectorField(Vector(VectorLen*OutputLen, (c) => VectorVal)) val vSum = blockReduceSum(v, VectorLen) // Concatenate all the arrays and then probe all the elements probe(vSum) } import graph._ withRelease { step val reader = read(vSum).asInstanceOf[VectorFieldReader] val outVector = new Vector(OutputLen) reader.read(outVector) for (i <- 0 until OutputLen) { val actualSum = outVector(i).toLong require(actualSum == ExpectedSum, s"Expecting tensor sum $ExpectedSum, saw $actualSum") } } } /** Test the block tensor reduction operators. */ test("vector field / block tensor reduce") { val Rows = 3 val Columns = 3 // Test reduction factors of 2, 3, 4 and 5 val numTests = 4 def outVLen(testNum: Int) = testNum + 2 def factor(testNum: Int) = testNum + 2 def inVLen(testNum: Int) = outVLen(testNum) * factor(testNum) def expectedBlockReducedSum(testNum: Int, field: RefVectorField) = RefVectorField(Rows, Columns, (row, col) => Vector(outVLen(testNum), i => field.read(row, col). subvector(i*factor(testNum) until (i+1)*factor(testNum)).toArray.reduceLeft(_ + _))) def expectedBlockReducedMax(testNum: Int, field: RefVectorField) = RefVectorField(Rows, Columns, (row, col) => Vector(outVLen(testNum), i => field.read(row, col). subvector(i*factor(testNum) until (i+1)*factor(testNum)).toArray.reduceLeft(_ max _))) def expectedBlockReducedMin(testNum: Int, field: RefVectorField) = RefVectorField(Rows, Columns, (row, col) => Vector(outVLen(testNum), i => field.read(row, col). subvector(i*factor(testNum) until (i+1)*factor(testNum)).toArray.reduceLeft(_ min _))) val field = Array.tabulate(numTests) { i => RefVectorField(Rows, Columns, (row, col) => new Vector(inVLen(i)).randomize) } val graph = new ComputeGraph(Optimize) with RefTestInterface { val v = Array.tabulate(numTests) { i => TestVectorField(field(i)) } val vSum = Array.tabulate(numTests) { i => blockReduceSum(v(i), factor(i)) } val vMax = Array.tabulate(numTests) { i => blockReduceMax(v(i), factor(i)) } val vMin = Array.tabulate(numTests) { i => blockReduceMin(v(i), factor(i)) } // Concatenate all the arrays and then probe all the elements probe((vSum ++ vMax ++ vMin): _*) } import graph._ withRelease { step for (i <- 0 until numTests) { // GPU sum done as binary tree, not reduceLeft, so answer is approx. require(readVector(vSum(i)) ~== expectedBlockReducedSum(i, field(i))) require(readVector(vMax(i)) == expectedBlockReducedMax(i, field(i))) require(readVector(vMin(i)) == expectedBlockReducedMin(i, field(i))) } } } /** Test the fieldReduceSum operator. */ test("vector field / field reduce sum") { val Rows = 3 val Columns = 3 val VectorLength = 5 val field = RefVectorField(Rows, Columns, (row, col) => new Vector(VectorLength).randomize) val sum = new Vector(VectorLength) for (r <- 0 until Rows; c <- 0 until Columns) sum += field.read(r, c) val expectedReducedSum = RefVectorField(sum) // Try another case: 0D field val VectorLength0 = 4 val sum0 = new Vector(VectorLength0).randomize val field0 = RefVectorField(sum0) val expectedReducedSum0 = RefVectorField(sum0) val VectorLength0_2 = 5 val sum0_2 = new Vector(VectorLength0_2).randomize val field0_2 = RefVectorField(sum0_2) val expectedReducedSum0_2 = RefVectorField(sum0_2) // Try another 2 cases: 1D fields // Vector fields of length 1 are important to check: they can exercise unique code paths val Rows1 = 511 val VectorLength1 = 1 val field1 = RefVectorField(Rows1, (row) => new Vector(VectorLength1).randomize) val sum1 = new Vector(VectorLength1) for (r <- 0 until Rows1) sum1 += field1.read(r) val expectedReducedSum1 = RefVectorField(sum1) val Rows2 = 257 val VectorLength2 = 13 val field2 = RefVectorField(Rows2, (row) => new Vector(VectorLength2).randomize) val sum2 = new Vector(VectorLength2) for (r <- 0 until Rows2) sum2 += field2.read(r) val expectedReducedSum2 = RefVectorField(sum2) // Try another case: 2D Tensor Field val Rows3 = 371 val Columns3 = 113 val VectorLength3 = 2 val field3 = RefVectorField(Rows3, Columns3, (row, col) => new Vector(VectorLength3).randomize) val sum3 = new Vector(VectorLength3) for (r <- 0 until Rows3; c <- 0 until Columns3) sum3 += field3.read(r, c) val expectedReducedSum3 = RefVectorField(sum3) // Try another case: 3D vector field val Layers4 = 5 val Rows4 = 3 val Columns4 = 155 val VectorShape4 = Shape(4) val field4 = RefVectorField.random(Layers4, Rows4, Columns4, VectorShape4) val sum4 = new Vector(VectorShape4(0)) for (l <- 0 until Layers4; r <- 0 until Rows4; c <- 0 until Columns4) sum4 += field4.read(l, r, c) val expectedReducedSum4 = RefVectorField(sum4) // Try another case: 3D Tensor Field val Layers5 = 7 val Rows5 = 17 val Columns5 = 17 val VectorShape5 = Shape(3) val field5 = RefVectorField.random(Layers5, Rows5, Columns5, VectorShape5) val sum5 = new Vector(VectorShape5(0)) for (l <- 0 until Layers5; r <- 0 until Rows5; c <- 0 until Columns5) sum5 += field5.read(l, r, c) val expectedReducedSum5 = RefVectorField(sum5) val graph = new ComputeGraph(Optimize) with RefTestInterface { val v = TestVectorField(field) val vSum = fieldReduceSum(v) val v0 = TestVectorField(field0) val vSum0 = fieldReduceSum(v0) val v0_2 = TestVectorField(field0_2) val vSum0_2 = fieldReduceSum(v0_2) val v1 = TestVectorField(field1) val vSum1 = fieldReduceSum(v1) val v2 = TestVectorField(field2) val vSum2 = fieldReduceSum(v2) val v3 = TestVectorField(field3) val vSum3 = fieldReduceSum(v3) val input4 = TestVectorField(field4) val vSum4 = fieldReduceSum(input4) val input5 = TestVectorField(field5) val vSum5 = fieldReduceSum(input5) probe(vSum, vSum0, vSum0_2, vSum1, vSum2, vSum3, vSum4, vSum5) } import graph._ withRelease { step require(readVector(vSum) ~== expectedReducedSum) require(readVector(vSum0) ~== expectedReducedSum0) require(readVector(vSum0_2) ~== expectedReducedSum0_2) require(readVector(vSum1) ~== expectedReducedSum1) require(readVector(vSum2) ~== expectedReducedSum2) require(readVector(vSum3) ~== expectedReducedSum3) require(readVector(vSum4) ~== expectedReducedSum4) require(readVector(vSum5) ~== expectedReducedSum5) } } /** Test the fieldReduceMax operator. */ test("vector field / field reduce max") { val Rows = 3 val Columns = 3 val VectorLength = 5 val field = RefVectorField(Rows, Columns, (row, col) => new Vector(VectorLength).randomize) // Create a vector that is the component-wise maximum of two vectors def vectorMax(a: Vector, b: Vector) = Vector(a.length, (i) => math.max(a(i),b(i))) // Create a RefVectorField that is the fieldReduceMax of a RefVectorField def refVectorFieldMax(field: RefVectorField) = RefVectorField(field.reduce(vectorMax(_ , _))) val expectedReducedMax = refVectorFieldMax(field) // Try another case: 0D field val VectorLength0 = 4 val max0 = new Vector(VectorLength0).randomize val field0 = RefVectorField(max0) val expectedReducedMax0 = RefVectorField(max0) val VectorLength0_2 = 5 val max0_2 = new Vector(VectorLength0_2).randomize val field0_2 = RefVectorField(max0_2) val expectedReducedMax0_2 = RefVectorField(max0_2) // Try another 2 cases: 1D fields // Vector fields of length 1 are important to check: they can exercise unique code paths val Rows1 = 511 val VectorLength1 = 1 val field1 = RefVectorField(Rows1, (row) => new Vector(VectorLength1).randomize) val expectedReducedMax1 = refVectorFieldMax(field1) val Rows2 = 257 val VectorLength2 = 13 val field2 = RefVectorField(Rows2, (row) => new Vector(VectorLength2).randomize) val expectedReducedMax2 = refVectorFieldMax(field2) // Try another case: 2D Tensor Field val Rows3 = 371 val Columns3 = 113 val VectorLength3 = 2 val field3 = RefVectorField(Rows3, Columns3, (row, col) => new Vector(VectorLength3).randomize) val expectedReducedMax3 = refVectorFieldMax(field3) // Try another case: 3D vector field val Layers4 = 5 val Rows4 = 3 val Columns4 = 155 val VectorShape4 = Shape(4) val field4 = RefVectorField.random(Layers4, Rows4, Columns4, VectorShape4) val expectedReducedMax4 = refVectorFieldMax(field4) // Try another case: 3D Tensor Field val Layers5 = 7 val Rows5 = 17 val Columns5 = 17 val VectorShape5 = Shape(3) val field5 = RefVectorField.random(Layers5, Rows5, Columns5, VectorShape5) val expectedReducedMax5 = refVectorFieldMax(field5) val graph = new ComputeGraph(Optimize) with RefTestInterface { val v = TestVectorField(field) val vSum = fieldReduceMax(v) val v0 = TestVectorField(field0) val vSum0 = fieldReduceMax(v0) val v0_2 = TestVectorField(field0_2) val vSum0_2 = fieldReduceMax(v0_2) val v1 = TestVectorField(field1) val vSum1 = fieldReduceMax(v1) val v2 = TestVectorField(field2) val vSum2 = fieldReduceMax(v2) val v3 = TestVectorField(field3) val vSum3 = fieldReduceMax(v3) val input4 = TestVectorField(field4) val vSum4 = fieldReduceMax(input4) val input5 = TestVectorField(field5) val vSum5 = fieldReduceMax(input5) probe(vSum, vSum0, vSum0_2, vSum1, vSum2, vSum3, vSum4, vSum5) } import graph._ withRelease { step require(readVector(vSum) ~== expectedReducedMax) require(readVector(vSum0) ~== expectedReducedMax0) require(readVector(vSum0_2) ~== expectedReducedMax0_2) require(readVector(vSum1) ~== expectedReducedMax1) require(readVector(vSum2) ~== expectedReducedMax2) require(readVector(vSum3) ~== expectedReducedMax3) require(readVector(vSum4) ~== expectedReducedMax4) require(readVector(vSum5) ~== expectedReducedMax5) } } /** Test the fieldReduceMin operator. */ test("vector field / field reduce min") { val Rows = 3 val Columns = 3 val VectorLength = 5 val field = RefVectorField(Rows, Columns, (row, col) => new Vector(VectorLength).randomize) // Create a vector that is the component-wise minimum of two vectors def vectorMin(a: Vector, b: Vector) = Vector(a.length, (i) => math.min(a(i),b(i))) // Create a RefVectorField that is the fieldReduceMin of a RefVectorField def refVectorFieldMin(field: RefVectorField) = RefVectorField(field.reduce(vectorMin(_ , _))) val expectedReducedMin = refVectorFieldMin(field) // Try another case: 0D field val VectorLength0 = 4 val min0 = new Vector(VectorLength0).randomize val field0 = RefVectorField(min0) val expectedReducedMin0 = RefVectorField(min0) val VectorLength0_2 = 5 val min0_2 = new Vector(VectorLength0_2).randomize val field0_2 = RefVectorField(min0_2) val expectedReducedMin0_2 = RefVectorField(min0_2) // Try another 2 cases: 1D fields // Vector fields of length 1 are important to check: they can exercise unique code paths val Rows1 = 511 val VectorLength1 = 1 val field1 = RefVectorField(Rows1, (row) => new Vector(VectorLength1).randomize) val expectedReducedMin1 = refVectorFieldMin(field1) val Rows2 = 257 val VectorLength2 = 13 val field2 = RefVectorField(Rows2, (row) => new Vector(VectorLength2).randomize) val expectedReducedMin2 = refVectorFieldMin(field2) // Try another case: 2D Tensor Field val Rows3 = 371 val Columns3 = 113 val VectorLength3 = 2 val field3 = RefVectorField(Rows3, Columns3, (row, col) => new Vector(VectorLength3).randomize) val expectedReducedMin3 = refVectorFieldMin(field3) // Try another case: 3D vector field val Layers4 = 5 val Rows4 = 3 val Columns4 = 155 val VectorShape4 = Shape(4) val field4 = RefVectorField.random(Layers4, Rows4, Columns4, VectorShape4) val expectedReducedMin4 = refVectorFieldMin(field4) // Try another case: 3D Tensor Field val Layers5 = 7 val Rows5 = 17 val Columns5 = 17 val VectorShape5 = Shape(3) val field5 = RefVectorField.random(Layers5, Rows5, Columns5, VectorShape5) val expectedReducedMin5 = refVectorFieldMin(field5) val graph = new ComputeGraph(Optimize) with RefTestInterface { val v = TestVectorField(field) val vSum = fieldReduceMin(v) val v0 = TestVectorField(field0) val vSum0 = fieldReduceMin(v0) val v0_2 = TestVectorField(field0_2) val vSum0_2 = fieldReduceMin(v0_2) val v1 = TestVectorField(field1) val vSum1 = fieldReduceMin(v1) val v2 = TestVectorField(field2) val vSum2 = fieldReduceMin(v2) val v3 = TestVectorField(field3) val vSum3 = fieldReduceMin(v3) val input4 = TestVectorField(field4) val vSum4 = fieldReduceMin(input4) val input5 = TestVectorField(field5) val vSum5 = fieldReduceMin(input5) probe(vSum, vSum0, vSum0_2, vSum1, vSum2, vSum3, vSum4, vSum5) } import graph._ withRelease { step require(readVector(vSum) ~== expectedReducedMin) require(readVector(vSum0) ~== expectedReducedMin0) require(readVector(vSum0_2) ~== expectedReducedMin0_2) require(readVector(vSum1) ~== expectedReducedMin1) require(readVector(vSum2) ~== expectedReducedMin2) require(readVector(vSum3) ~== expectedReducedMin3) require(readVector(vSum4) ~== expectedReducedMin4) require(readVector(vSum5) ~== expectedReducedMin5) } } /** Test the winnerTakeAll operator. */ test("vector field / winner take all") { // 2D test parameters, will generate 2-kernel chained reduction val Rows = 11 val Columns = 42 // 1D test parameters val Columns_1D = 7 // Test vectors of length 1, 2, 3, 4, 5 val NumTests = 5 val input = Array.tabulate(NumTests) { i => RefVectorField(Rows, Columns, (row, col) => new Vector(i+1).randomize) } // Use RefScalarField.winnerTakeAll on tensorSlices to create expected result val expectedOutput = Array.tabulate(NumTests) { i => { val slices = Array.tabulate[RefScalarField](i+1) { j => input(i).sliceTensor(j).winnerTakeAll } val first = slices(0) val rest = Array.tabulate(slices.length - 1) { i => slices(i+1) } if (i == 0) RefVectorField(Rows, Columns, (row, col) => new Vector(first.read(row, col))) else first.stackTensor(rest : _*) }} // 1D test inputs and expected outputs val input_1D = Array.tabulate(NumTests) { i => RefVectorField(Columns_1D, (col) => new Vector(i+1).randomize) } // Use RefScalarField.winnerTakeAll on tensorSlices to create expected result val expectedOutput_1D = Array.tabulate(NumTests) { i => { val slices = Array.tabulate(i+1) { j => input_1D(i).sliceTensor(j).winnerTakeAll } val first = slices(0) val rest = Array.tabulate(slices.length - 1) { i => slices(i+1) } if (i == 0) RefVectorField(Columns_1D, (col) => new Vector(first.read(col))) else first.stackTensor(rest : _*) }} val graph = new ComputeGraph(Optimize) with RefTestInterface { val in = Array.tabulate(NumTests) { i => TestVectorField(input(i)) } val wta = Array.tabulate(NumTests) { i => winnerTakeAll(in(i)) } val in_1D = Array.tabulate(NumTests) { i => TestVectorField(input_1D(i)) } val wta_1D = Array.tabulate(NumTests) { i => winnerTakeAll(in_1D(i)) } probe((wta ++ wta_1D): _*) } import graph._ withRelease { step for (i <- 0 until NumTests) require(readVector(wta(i)) == expectedOutput(i)) for (i <- 0 until NumTests) require(readVector(wta_1D(i)) == expectedOutput_1D(i)) } } /** Test the field reduction operators. Only sum defined for vectors */ test("vector field / max position") { val Layers = 5 val Rows = 4 val Columns = 7 // normL2 used to create functions that max-out at a specified point // 1D input field val correctWinner1 = new Vector(3f) val init1 = RefScalarField(Columns, (c) => 1.5f - (new Vector(c.toFloat) - correctWinner1).normL2) val correctWinner2 = new Vector(2f, 5f) val init2 = RefScalarField(Rows, Columns, (r, c) => 3.5f - (new Vector(r.toFloat, c.toFloat) - correctWinner2).normL2) val correctWinner3 = new Vector(3f, 2f, 6f) val init3 = RefScalarField(Layers, Rows, Columns, (l, r, c) => -(new Vector(l.toFloat, r.toFloat, c.toFloat) - correctWinner3).normL2) val graph = new ComputeGraph(Optimize) with RefTestInterface { val input1 = TestScalarField(init1) val winner1 = maxPosition(input1) // 2D input field val input2 = TestScalarField(init2) val winner2 = maxPosition(input2) // 3D input field val input3 = TestScalarField(init3) val winner3 = maxPosition(input3) probe(winner1, winner2, winner3) } import graph._ withRelease { step require(readVector(winner1).read() == correctWinner1) require(readVector(winner2).read() == correctWinner2) require(readVector(winner3).read() == correctWinner3) } } test("vector field / commutative") { val graph = new ComputeGraph(Optimize) with RefTestInterface { val field = TestVectorField(RefVectorField.random(10, 10, Shape(5))) val plus = field + 1 val plusReverse = 1 + field val minus = 1 - field val minusReverse = -field + 1 val times = 2 * field val timesReverse = field * 2 probe(plus, plusReverse, minus, minusReverse, times, timesReverse) } import graph._ withRelease { step require(readVector(plus) == readVector(plusReverse)) require(readVector(minus) == readVector(minusReverse)) require(readVector(times) == readVector(timesReverse)) } } test("vector field / transpose") { // Try vectors of length 1, 4 and 5 val field1 = RefVectorField.random(100, 100, Shape(1)) val transposed1 = RefMatrixField(100, 100, (row, col) => field1.read(row, col).transpose) val field4 = RefVectorField.random(100, 100, Shape(4)) val transposed4 = RefMatrixField(100, 100, (row, col) => field4.read(row, col).transpose) val field5 = RefVectorField.random(100, 100, Shape(5)) val transposed5 = RefMatrixField(100, 100, (row, col) => field5.read(row, col).transpose) val graph = new ComputeGraph(Optimize) with RefTestInterface { val v1 = TestVectorField(field1) val m1 = transposeVectors(v1) val v4= TestVectorField(field4) val m4 = transposeVectors(v4) val v5 = TestVectorField(field5) val m5 = transposeVectors(v5) probe(m1, m4, m5) } import graph._ withRelease { step require(readMatrix(m1) == transposed1) require(readMatrix(m4) == transposed4) require(readMatrix(m5) == transposed5) } } /* test("vector field / push") { // 1D VectorField Stack, 0D VectorField Slice def point1D(col: Int) = new Vector(col, 2 * col + 1) val Columns_1D = 3 val stack_1D = RefVectorField(Columns_1D, point1D _) val stack0_1D = RefVectorField(point1D(0)) val stack1_1D = RefVectorField(point1D(1)) val stack2_1D = RefVectorField(point1D(2)) val slice_1D = RefVectorField(point1D(5)) // 2D VectorField Stack, 1D VectorField Slice def point2D(row: Int, col: Int) = new Vector(row + col, row * col) val Rows_2D = 4 val Columns_2D = 5 val stack_2D = RefVectorField(Rows_2D, Columns_2D, point2D _) val stack0_2D = RefVectorField(Columns_2D, point2D(0, _)) val stack1_2D = RefVectorField(Columns_2D, point2D(1, _)) val stack2_2D = RefVectorField(Columns_2D, point2D(2, _)) val stack3_2D = RefVectorField(Columns_2D, point2D(3, _)) val slice_2D = RefVectorField(Columns_2D, point2D(5, _)) // 3D VectorField Stack, 2D VectorField Slice def point3D(depth: Int, row: Int, col: Int) = new Vector(depth + row, row + col, row * col, col - depth, col * depth) val Depth_3D = 3 val Rows_3D = 4 val Columns_3D = 5 val stack_3D = RefVectorField(Depth_3D, Rows_3D, Columns_3D, point3D _) val stack0_3D = RefVectorField(Rows_3D, Columns_3D, point3D(0, _, _)) val stack1_3D = RefVectorField(Rows_3D, Columns_3D, point3D(1, _, _)) val stack2_3D = RefVectorField(Rows_3D, Columns_3D, point3D(2, _, _)) val slice_3D = RefVectorField(Rows_3D, Columns_3D, point3D(5, _, _)) val graph = new ComputeGraph(Optimize) with RefTestInterface2 { val a_1D = TestVectorField(stack_1D) val b_1D = a_1D push TestVectorField(slice_1D) val a_2D = TestVectorField(stack_2D) val b_2D = a_2D push TestVectorField(slice_2D) val a_3D = TestVectorField(stack_3D) val b_3D = a_3D push TestVectorField(slice_3D) } import graph._ withRelease { step // check 1D stack require(readVector(a_1D).slice(0) == stack0_1D) require(readVector(a_1D).slice(1) == stack1_1D) require(readVector(a_1D).slice(2) == stack2_1D) require(readVector(b_1D).slice(0) == slice_1D) require(readVector(b_1D).slice(1) == stack0_1D) require(readVector(b_1D).slice(2) == stack1_1D) // check 2D stack require(readVector(a_2D).slice(0) == stack0_2D) require(readVector(a_2D).slice(1) == stack1_2D) require(readVector(a_2D).slice(2) == stack2_2D) require(readVector(a_2D).slice(3) == stack3_2D) require(readVector(b_2D).slice(0) == slice_2D) require(readVector(b_2D).slice(1) == stack0_2D) require(readVector(b_2D).slice(2) == stack1_2D) require(readVector(b_2D).slice(3) == stack2_2D) // check 3D stack require(readVector(a_3D).slice(0) == stack0_3D) require(readVector(a_3D).slice(1) == stack1_3D) require(readVector(a_3D).slice(2) == stack2_3D) require(readVector(b_3D).slice(0) == slice_3D) require(readVector(b_3D).slice(1) == stack0_3D) require(readVector(b_3D).slice(2) == stack1_3D) } } */ test("vector field / warp2D") { // For this test, we assume that warp has been tested for // scalar fields. val Rows = 4 val Columns = 5 val VectorLength = 3 val VectorShape = Shape(VectorLength) val input = RefVectorField.random(Rows, Columns, VectorShape) val graph = new ComputeGraph(Optimize) with RefTestInterface { val inputField = TestVectorField(input) // Translation by a 0D vector field val vectorField0D = TestVectorField(RefVectorField(new Vector(0.5f, -1.5f))) val translated0D = warp(inputField, vectorField0D) val expected0D = vectorField( warp(vectorElement(inputField, 0), vectorField0D), warp(vectorElement(inputField, 1), vectorField0D), warp(vectorElement(inputField, 2), vectorField0D) ) // Translation by a 2D vector field val vectorField2D = TestVectorField(RefVectorField.random(Rows, Columns, Shape(2))) val translated2D = warp(inputField, vectorField2D) val expected2D = vectorField( warp(vectorElement(inputField, 0), vectorField2D), warp(vectorElement(inputField, 1), vectorField2D), warp(vectorElement(inputField, 2), vectorField2D) ) // Translation of a TensorField by a 2D vector field val inputTensor = RefVectorField.random(Rows, Columns, Shape(2)) val inputTensorField = TestVectorField(inputTensor) val vectorField2D2 = TestVectorField(RefVectorField.random(Rows, Columns, Shape(2))) val translatedTensor2D = warp(inputTensorField, vectorField2D2) val expected2D2 = vectorField( warp(vectorElement(inputTensorField, 0), vectorField2D2), warp(vectorElement(inputTensorField, 1), vectorField2D2) ) // Test implicit trimming that occurs when guide field is smaller than input val TrimmedRows = Rows - 1 val TrimmedColumns = Columns - 2 val trimmedGuide = trim(vectorField2D2, Shape(TrimmedRows, TrimmedColumns)) val translatedTrimmed = warp(inputTensorField, trimmedGuide) val expectedTrimmed = vectorField( warp(vectorElement(inputTensorField, 0), trimmedGuide), warp(vectorElement(inputTensorField, 1), trimmedGuide) ) probe(translated0D, expected0D, translated2D, expected2D, translatedTensor2D, expected2D2, translatedTrimmed, expectedTrimmed) } import graph._ withRelease { step require(readVector(translated0D) ~== readVector(expected0D)) require(readVector(translated2D) ~== readVector(expected2D)) require(readVector(translatedTensor2D) ~== readVector(expected2D2)) require(readVector(translatedTrimmed) ~== readVector(expectedTrimmed)) } } test("vector field / subfield") { // For this test, we assume that subfield has been tested for // scalar fields. val Rows = 4 val Columns = 5 val VectorLength = 3 val VectorShape = Shape(VectorLength) val OutputRows = 3 val OutputCols = 3 val OutputShape = Shape(OutputRows, OutputCols) val input = RefVectorField.random(Rows, Columns, VectorShape) val graph = new ComputeGraph(Optimize) with RefTestInterface { val inputField = TestVectorField(input) // guide field is a 0D vector field val vectorField0D = TestVectorField(RefVectorField(new Vector(0.5f, -1.5f))) val windowed = subfield(inputField, vectorField0D, OutputShape) val expected = vectorField( subfield(vectorElement(inputField, 0), vectorField0D, OutputShape), subfield(vectorElement(inputField, 1), vectorField0D, OutputShape), subfield(vectorElement(inputField, 2), vectorField0D, OutputShape)) // Repeat with a vector length that cannot be handled in SmallTensorAddressing mode. val BigTensorVectorLength = 5 val BigVectorShape = Shape(BigTensorVectorLength) val bigInput = RefVectorField.random(Rows, Columns, BigVectorShape) val bigInputField = TestVectorField(bigInput) val bigWindowed = subfield(bigInputField, vectorField0D, OutputShape) val bigExpected = vectorField( subfield(vectorElement(bigInputField, 0), vectorField0D, OutputShape), subfield(vectorElement(bigInputField, 1), vectorField0D, OutputShape), subfield(vectorElement(bigInputField, 2), vectorField0D, OutputShape), subfield(vectorElement(bigInputField, 3), vectorField0D, OutputShape), subfield(vectorElement(bigInputField, 4), vectorField0D, OutputShape) ) // Repeat with a 1D input field val OneDColumns = 7 val oneDinput = RefVectorField.random(OneDColumns, VectorShape) val oneDinputField = TestVectorField(oneDinput) val OneDOutputCols = 5 val oneDOutputShape = Shape(OneDOutputCols) val guideField0D = TestVectorField(RefVectorField(new Vector(2.5f))) val oneDWindowed = subfield(oneDinputField, guideField0D, oneDOutputShape) val oneDExpected0D = vectorField( subfield(vectorElement(oneDinputField, 0), guideField0D, oneDOutputShape), subfield(vectorElement(oneDinputField, 1), guideField0D, oneDOutputShape), subfield(vectorElement(oneDinputField, 2), guideField0D, oneDOutputShape)) probe(windowed, expected, bigWindowed, bigExpected, oneDWindowed, oneDExpected0D) } import graph._ withRelease { step require(readVector(windowed) ~== readVector(expected)) require(readVector(bigWindowed) ~== readVector(bigExpected)) require(readVector(oneDWindowed) ~== readVector(oneDExpected0D)) } } test("vector field / subsample") { val m0 = Matrix( Array(1f, 2f, 3f, 4f), Array(5f, 6f, 7f, 8f), Array(0f, 2f, 4f, 6f) ) val m1 = Matrix( Array(-1f, -2f, -3f, -4f), Array(-5f, -6f, -7f, -8f), Array( 0f, -2f, -4f, -6f) ) val m2 = Matrix( Array(-11f, -12f, -13f, -14f), Array(-15f, -16f, -17f, -18f), Array( -10f, -12f, -14f, -16f) ) val sub0 = Matrix( Array(1f, 3f), Array(0f, 4f) ) val sub1 = Matrix( Array(-1f, -3f), Array( 0f, -4f) ) val sub2 = Matrix( Array(-11f, -13f), Array( -10f, -14f) ) val expect = RefVectorField(2, 2, (r, c) => new Vector(sub0(r, c), sub1(r, c))) val expect3 = RefVectorField(2, 2, (r, c) => new Vector(sub0(r, c), sub1(r, c), sub2(r, c))) val graph = new ComputeGraph(Optimize) with RefTestInterface { val vField = TestVectorField(RefVectorField(3, 4, (r, c) => new Vector(m0(r, c), m1(r, c)))) val vSub = downsample(vField, 2,0) val vField3 = TestVectorField(RefVectorField(3, 4, (r, c) => new Vector(m0(r, c), m1(r, c), m2(r, c)))) val vSub3 = downsample(vField3, 2,0) probe(vSub, vSub3) } import graph._ withRelease { step require(readVector(vSub) == expect) require(readVector(vSub3) == expect3) } } test("vector field / upsample") { val data0 = Matrix( Array( Array(1f, 7f, 5f), Array(-2f, 0f, 4f), Array(4f, -7f, 3f) ) ) val data1 = data0 * 2f val data2 = data1 * 3.5f val data3 = data2 * 7.1f val data4 = data3 * 1.9f val input = RefVectorField(3, 3, (r, c) => new Vector(data0(r, c), data1(r, c))) val expectedData0 = Matrix( Array( Array( 1f, 0f, 7f, 0f, 5f, 0f), Array( 0f, 0f, 0f, 0f, 0f, 0f), Array(-2f, 0f, 0f, 0f, 4f, 0f), Array( 0f, 0f, 0f, 0f, 0f, 0f), Array( 4f, 0f,-7f, 0f, 3f, 0f), Array( 0f, 0f, 0f, 0f, 0f, 0f) ) ) val expectedData1 = expectedData0 * 2f val expectedData2 = expectedData1 * 3.5f val expectedData3 = expectedData2 * 7.1f val expectedData4 = expectedData3 * 1.9f val expected = RefVectorField(6, 6, (r, c) => new Vector(expectedData0(r, c), expectedData1(r, c))) val input2 = RefVectorField(3, 3, (r, c) => new Vector(data0(r, c), data1(r, c), data2(r,c))) val expected2 = RefVectorField(6, 6, (r, c) => new Vector(expectedData0(r, c), expectedData1(r, c), expectedData2(r, c))) val input3 = RefVectorField(3, 3, (r, c) => new Vector(data0(r, c), data1(r, c), data2(r,c), data3(r,c), data4(r,c))) val expected3 = RefVectorField(6, 6, (r, c) => new Vector(expectedData0(r, c), expectedData1(r, c), expectedData2(r, c), expectedData3(r, c), expectedData4(r, c))) val graph = new ComputeGraph(Optimize) with RefTestInterface { val inField = TestVectorField(input) val outField = upsample(inField) // A second test where input isn't a TensorField val inField2 = TestVectorField(input2) val outField2 = upsample(inField2) // A third test where input is a BigTensorField val inField3 = TestVectorField(input3) val outField3 = upsample(inField3) probe(outField, outField2, outField3) } import graph._ withRelease { step require(readVector(outField) == expected) require(readVector(outField2) == expected2) require(readVector(outField3) == expected3) } } test("vector field / supersample") { val data0 = Matrix( Array( Array(1f, 7f, 5f), Array(-2f, 0f, 4f), Array(4f, -7f, 3f) ) ) val data1 = data0 * 2f val input = RefVectorField(3, 3, (r, c) => new Vector(data0(r, c), data1(r, c))) val expectedData0 = Matrix( Array( Array( 1f, 1f, 7f, 7f, 5f, 5f), Array( 1f, 1f, 7f, 7f, 5f, 5f), Array(-2f,-2f, 0f, 0f, 4f, 4f), Array(-2f,-2f, 0f, 0f, 4f, 4f), Array( 4f, 4f,-7f,-7f, 3f, 3f), Array( 4f, 4f,-7f,-7f, 3f, 3f) ) ) val expectedData1 = expectedData0 * 2f val expected = RefVectorField(6, 6, (r, c) => new Vector(expectedData0(r, c), expectedData1(r, c))) val graph = new ComputeGraph(Optimize) with RefTestInterface { val inField = TestVectorField(input) val outField = supersample(inField) probe(outField) } import graph._ withRelease { step require(readVector(outField) == expected) } } test("vector field / divergence") { val data0 = Matrix( Array( Array(1f, 7f, 5f), Array(-2f, 0f, 4f), Array(4f, -7f, 3f) ) ) val data1 = data0 * -2 val div = data0.backwardDivergence(data1) val vectorField = RefVectorField(3, 3, (r, c) => new Vector(data0(r, c), data1(r, c))) val expectedDivergence = RefScalarField(3, 3, (r, c) => div(r, c)) val graph = new ComputeGraph(Optimize) with RefTestInterface { val input = TestVectorField(vectorField) val divergence = backwardDivergence(input) probe(divergence) } import graph._ withRelease { step require(readScalar(divergence) ~== expectedDivergence) } } test("vector field / trim") { val input0 = Matrix( Array( 0f, 1f, 2f, 3f, 4f), Array(10f, 11f, 12f, 13f, 14f), Array(20f, 21f, 22f, 23f, 24f), Array(30f, 31f, 32f, 33f, 34f) ) val input1 = Matrix( Array(20f, 21f, 22f, 23f, 24f), Array( 0f, 1f, 2f, 3f, 4f), Array(30f, 31f, 32f, 33f, 34f), Array(10f, 11f, 12f, 13f, 14f) ) val input2 = Matrix( Array(21f, 22f, 23f, 24f, 25f), Array(10f, 11f, 12f, 13f, 14f), Array(32f, 32f, 33f, 36f, 39f), Array(12f, 18f, 13f, 12f, 17f) ) val expected0 = Matrix( Array( 0f, 1f, 2f, 3f), Array(10f, 11f, 12f, 13f) ) val expected1 = Matrix( Array(20f, 21f, 22f, 23f), Array( 0f, 1f, 2f, 3f) ) val expected2 = Matrix( Array(21f, 22f, 23f, 24f), Array(10f, 11f, 12f, 13f) ) val expected = RefVectorField(2, 4, (r, c) => new Vector(expected0(r, c), expected1(r, c))) val outShape = Shape(2, 4) val vectorShape = Shape(2) val expectedB = RefVectorField(2, 4, (r, c) => new Vector(expected0(r, c), expected1(r, c), expected2(r, c))) val graph = new ComputeGraph(Optimize) with RefTestInterface { val in = TestVectorField(RefVectorField(4, 5, (r, c) => new Vector(input0(r, c), input1(r, c)))) val out = trim(in, outShape) val autoTrimmedOut = VectorField(outShape, vectorShape) autoTrimmedOut <== in // A second test where the field is not a tensor field (invokes different code // path in HyperKernel code generation. val inB = TestVectorField(RefVectorField(4, 5, (r, c) => new Vector(input0(r, c), input1(r, c), input2(r, c)))) val outB = trim(inB, outShape) probe(out, autoTrimmedOut, outB) } import graph._ withRelease { step require(readVector(out) == expected) require(readVector(autoTrimmedOut) == expected) require(readVector(outB) == expectedB) } } test("vector field / apply") { val input0 = Matrix( Array( 0f, 1f, 2f, 3f, 4f), Array(10f, 11f, 12f, 13f, 14f), Array(20f, 21f, 22f, 23f, 24f), Array(30f, 31f, 32f, 33f, 34f) ) val input1 = Matrix( Array(20f, 21f, 22f, 23f, 24f), Array( 0f, 1f, 2f, 3f, 4f), Array(30f, 31f, 32f, 33f, 34f), Array(10f, 11f, 12f, 13f, 14f) ) val input2 = Matrix( Array(21f, 22f, 23f, 24f, 25f), Array(10f, 11f, 12f, 13f, 14f), Array(32f, 32f, 33f, 36f, 39f), Array(12f, 18f, 13f, 12f, 17f) ) val input3 = Matrix( Array(32f, 32f, 33f, 36f, 39f), Array(21f, 22f, 23f, 24f, 25f), Array(10f, 11f, 12f, 13f, 14f), Array(12f, 18f, 13f, 12f, 17f) ) val input4 = Matrix( Array(21f, 22f, 23f, 24f, 25f), Array(32f, 32f, 33f, 36f, 39f), Array(12f, 18f, 13f, 12f, 17f), Array(10f, 11f, 12f, 13f, 14f) ) val expected0 = Matrix( Array(12f, 13f), Array(22f, 23f) ) val expected1 = Matrix( Array( 2f, 3f), Array(32f, 33f) ) val expected2 = Matrix( Array(12f, 13f), Array(33f, 36f) ) val expected3 = Matrix( Array(23f, 24f), Array(12f, 13f) ) val expected4 = Matrix( Array(33f, 36f), Array(13f, 12f) ) val expectedShort = RefVectorField(2, 2, (r, c) => new Vector(expected0(r, c), expected1(r, c), expected2(r,c))) val expectedLong = RefVectorField(2, 2, (r, c) => new Vector(expected0(r, c), expected1(r, c), expected2(r,c), expected3(r,c), expected4(r,c))) val graph = new ComputeGraph(Optimize) with RefTestInterface { val inShort = TestVectorField(RefVectorField(4, 5, (r, c) => new Vector(input0(r, c), input1(r, c), input2(r,c)))) val outShort = inShort(1 to 2, 2 to 3) val inLong = TestVectorField(RefVectorField(4, 5, (r, c) => new Vector(input0(r, c), input1(r, c), input2(r,c), input3(r,c), input4(r,c)))) val outLong = inLong(1 to 2, 2 to 3) probe(outShort, outLong) } import graph._ withRelease { step require(readVector(outShort) == expectedShort) readVector(outLong).print require(readVector(outLong) == expectedLong) } } /* test("vector field / multiplex") { val Rows = 11 val Columns = 13 val VectorShape = Shape(5) val field0 = RefVectorField.random(Rows, Columns, VectorShape) val field1 = RefVectorField.random(Rows, Columns, VectorShape) val field2 = RefVectorField.random(Rows, Columns, VectorShape) val field3 = RefVectorField.random(Rows, Columns, VectorShape) val fields = Array(field0, field1, field2, field3) val graph = new ComputeGraph(Optimize) with RefTestInterface2 { val fieldArray = Array.tabulate(fields.length) { i => TestVectorField(fields(i)) } val indexField = TestScalarField(RefScalarField(2f)) val outArray = Array.tabulate(fields.length) { i => TestVectorField(fields(i) * 2) } // Test selection (reading) val selectArray = fieldArray.select(indexField) // Test writing outArray.insert(indexField) <== field0 } import graph._ withRelease { step require(readVector(selectArray) == field2) require(readVector(outArray(0)) == field0 * 2) require(readVector(outArray(1)) == field1 * 2) require(readVector(outArray(2)) == field0) require(readVector(outArray(3)) == field3 * 2) } } test("vector field / multiplex 2D") { val Rows = 11 val Columns = 13 val VectorShape = Shape(5) val field0 = RefVectorField.random(Rows, Columns, VectorShape) val field1 = RefVectorField.random(Rows, Columns, VectorShape) val field2 = RefVectorField.random(Rows, Columns, VectorShape) val field3 = RefVectorField.random(Rows, Columns, VectorShape) val field4 = RefVectorField.random(Rows, Columns, VectorShape) val field5 = RefVectorField.random(Rows, Columns, VectorShape) val fields = Array( Array(field0, field1, field2), Array(field3, field4, field5) ) val fieldsRows = fields.length val fieldsColumns = fields(0).length val graph = new ComputeGraph(Optimize) with RefTestInterface2 { val fieldArray = Array.tabulate(fields.length, fields(0).length) { (i, j) => TestVectorField(fields(i)(j)) } val indexField1 = TestScalarField(RefScalarField(1f)) val indexField2 = TestScalarField(RefScalarField(0f)) val outArray = Array.tabulate(fieldsRows, fieldsColumns) { (i, j) => TestVectorField(fields(i)(j) * 2) } // Test selection (reading) val selectArray = fieldArray.select(indexField1, indexField2) // Test writing outArray.insert(indexField1, indexField2) <== field0 } import graph._ withRelease { step require(readVector(selectArray) == field3) require(readVector(outArray(0)(0)) == field0 * 2) require(readVector(outArray(0)(1)) == field1 * 2) require(readVector(outArray(0)(2)) == field2 * 2) require(readVector(outArray(1)(0)) == field0) require(readVector(outArray(1)(1)) == field4 * 2) require(readVector(outArray(1)(2)) == field5 * 2) } } */ test("vector field / non max supression") { val Rows = 4 val Cols = 5 // hit all 3 cases: border, corner, interior /* val input0 = Matrix( Array( 0f, 1f, 13f, 15f, 4f), Array(10f, 22f, 12f, 13f, 14f), Array(20f, 20f, 22f, 20f, 19f), Array(30f, 16f, 19f, 33f, 34f) ) val input1 = Matrix( Array(10f, 22f, 12f, 13f, 14f), Array(30f, 16f, 19f, 33f, 34f), Array(20f, 20f, 22f, 20f, 19f), Array( 0f, 1f, 13f, 15f, 4f) ) val input2 = Matrix( Array(10f, 22f, 12f, 13f, 14f), Array( 0f, 1f, 13f, 15f, 4f), Array(20f, 20f, 22f, 20f, 19f), Array(30f, 16f, 19f, 33f, 34f) ) val input3 = Matrix( Array(10f, 22f, 12f, 13f, 14f), Array( 0f, 22f, 13f, 15f, 4f), Array(20f, 20f, 22f, 20f, 19f), Array(30f, 16f, 19f, 33f, 34f) ) val input4= Matrix( Array(10f, 22f, 12f, 13f, 14f), Array( 0f, 1f, 13f, 15f, 4f), Array(20f, 20f, 22f, 20f, 19f), Array(30f, 16f, 33f, 33f, 34f) ) */ val expectedOut0 = Matrix( Array( 0f, 0f, 0f, 15f, 0f), Array( 0f, 22f, 0f, 0f, 0f), Array( 0f, 0f, 0f, 0f, 0f), Array(30f, 0f, 0f, 0f, 34f) ) val expectedOut1 = Matrix( Array( 0f, 0f, 0f, 0f, 0f), Array(30f, 0f, 0f, 0f, 34f), Array( 0f, 0f, 0f, 0f, 0f), Array( 0f, 0f, 0f, 0f, 0f) ) val expectedOut2 = Matrix( Array( 0f, 22f, 0f, 0f, 0f), Array( 0f, 0f, 0f, 0f, 0f), Array( 0f, 0f, 0f, 0f, 0f), Array(30f, 0f, 0f, 0f, 34f) ) val expectedOut3 = Matrix( Array( 0f, 22f, 0f, 0f, 0f), Array( 0f, 22f, 0f, 0f, 0f), Array( 0f, 0f, 0f, 0f, 0f), Array(30f, 0f, 0f, 0f, 34f) ) val expectedOut4 = Matrix( Array( 0f, 22f, 0f, 0f, 0f), Array( 0f, 0f, 0f, 0f, 0f), Array( 0f, 0f, 0f, 0f, 0f), Array(30f, 0f, 33f, 0f, 34f) ) val expectedOutput = RefVectorField(Rows, Cols, (r, c) => new Vector(expectedOut0(r, c), expectedOut1(r, c), expectedOut2(r, c), expectedOut3(r, c), expectedOut4(r, c))) val expectedOutput2 = RefVectorField(Rows, Cols, (r, c) => new Vector(expectedOut0(r, c), expectedOut1(r, c), expectedOut2(r, c), expectedOut3(r, c))) val graph = new ComputeGraph(Optimize) with RefTestInterface { val in = TestVectorField(expectedOutput) val out = nonMaximumSuppression(in) val in2 = TestVectorField(expectedOutput2) val out2 = nonMaximumSuppression(in2) probe(out, out2) } import graph._ withRelease { step require(readVector(out) == expectedOutput) require(readVector(out2) == expectedOutput2) } } test("vector field / local max position") { val neighborhood = Matrix( Array( Array(1f, 1f, 0f), Array(1f, 1f, 1f), Array(0f, 1f, 1f) ) ) val input = RefScalarField(Matrix( Array(1.0f, 1.1f, 0.0f, 2.1f, 3.3f), Array(0.0f, 5.0f, 3.0f, 1.2f, 6.0f), Array(1.0f, 2.0f, 0.0f, 2.1f, 0.0f), Array(1.1f, 1.2f, 1.0f, 2.0f, 2.3f), Array(3.0f, 1.1f, 4.0f, 3.0f, 1.0f) )) val expectedOutRow = Matrix( Array( 1f, 1f, 1f, 1f, 1f), Array( 0f, 0f, 0f, 0f, 0f), Array( 0f, -1f, -1f, -1f, -1f), Array( 1f, 1f, 1f, 1f, 0f), Array( 0f, 0f, 0f, 0f, 0f) ) val expectedOutCol = Matrix( Array( 1f, 0f, 0f, 1f, 0f), Array( 1f, 0f, -1f, 1f, 0f), Array( 1f, 0f, -1f, -1f, 0f), Array( 0f, 1f, 0f, 0f, 0f), Array( 0f, 1f, 0f, -1f, -1f) ) val expectedOutput = RefVectorField(5, 5, (r, c) => new Vector(expectedOutRow(r, c), expectedOutCol(r, c)) ) val graph = new ComputeGraph(Optimize) with RefTestInterface { val inField = TestScalarField(input) val outField = localMaxPosition(inField, neighborhood) probe(outField) } import graph._ withRelease { step require(readVector(outField) == expectedOutput) } } test("vector field / local min position") { val neighborhood = Matrix( Array( Array(1f, 1f, 0f), Array(1f, 1f, 1f), Array(0f, 1f, 1f) ) ) val input = RefScalarField(Matrix( Array(1.0f, 1.1f, 0.0f, 2.1f, 3.3f), Array(0.0f, 5.0f, 3.0f, 1.2f, 6.0f), Array(1.0f, 2.0f, 0.1f, 2.1f, 0.2f), Array(1.1f, 1.2f, 1.0f, 2.0f, 2.3f), Array(3.0f, 1.3f, 4.0f, 3.0f, 0.9f) )) val expectedOutRow = Matrix( Array( 1f, 0f, 0f, 0f, 0f), Array( 0f, 0f, -1f, -1f, 1f), Array(-1f, -1f, 0f, 0f, 0f), Array(-1f, -1f, -1f, -1f, -1f), Array(-1f, -1f, -1f, 0f, 0f) ) val expectedOutCol = Matrix( Array( 0f, 1f, 0f, -1f, -1f), Array( 0f, -1f, 0f, -1f, 0f), Array( 0f, -1f, 0f, -1f, 0f), Array( 0f, -1f, 0f, -1f, 0f), Array( 0f, -1f, 0f, 1f, 0f) ) val expectedOutput = RefVectorField(5, 5, (r, c) => new Vector(expectedOutRow(r, c), expectedOutCol(r, c)) ) val graph = new ComputeGraph(Optimize) with RefTestInterface { val inField = TestScalarField(input) val outField = localMinPosition(inField, neighborhood) probe(outField) } import graph._ withRelease { step require(readVector(outField) == expectedOutput) } } /** Test the subfields operator on a 2D MatrixField producing a field * of a smaller shape based on using only valid subfields. */ test("vector field / small subfields") { val SubfieldSize = 3 // Input scalar field val input = Matrix( Array(1f, 2f, 3f, 4f), Array(5f, 6f, 7f, 8f), Array(9f, 8f, 7f, 6f), Array(3f, 4f, 5f, 6f), Array(2f, 4f, 6f, 8f) ) // Expected output subfields (layers of vector field) val out00 = Matrix( Array(1f, 2f, 3f), Array(5f, 6f, 7f), Array(9f, 8f, 7f) ) val out01 = Matrix( Array(2f, 3f, 4f), Array(6f, 7f, 8f), Array(8f, 7f, 6f) ) val out10 = Matrix( Array(5f, 6f, 7f), Array(9f, 8f, 7f), Array(3f, 4f, 5f) ) val out11 = Matrix( Array(6f, 7f, 8f), Array(8f, 7f, 6f), Array(4f, 5f, 6f) ) val out20 = Matrix( Array(9f, 8f, 7f), Array(3f, 4f, 5f), Array(2f, 4f, 6f) ) val out21 = Matrix( Array(8f, 7f, 6f), Array(4f, 5f, 6f), Array(4f, 6f, 8f) ) val expectedOutput = RefVectorField(SubfieldSize, SubfieldSize, (r, c) => new Vector( out00(r, c), out01(r, c), out10(r, c), out11(r, c), out20(r, c), out21(r, c) ) ) val graph = new ComputeGraph(Optimize) with RefTestInterface { val in = TestScalarField(input) val out = subfields(in, SubfieldSize) probe(out) } import graph._ withRelease { step require(readVector(out) == expectedOutput) } } /** Test the subfields operator on a 2D MatrixField producing a field * of a smaller shape based on using only valid subfields. */ test("vector field / big subfields") { val Rows = 19 val Columns = 21 val SubfieldSize = 15 val SubfieldRows = Rows - SubfieldSize + 1 val SubfieldColumns = Columns - SubfieldSize + 1 val Subfields = SubfieldRows * SubfieldColumns // Input scalar field val input = Matrix.random(Rows, Columns) // Subfields val subfieldsx = Array.tabulate(SubfieldRows, SubfieldColumns) { (r, c) => new Matrix(SubfieldSize, SubfieldSize) { for (row <- 0 until rows; col <- 0 until columns) { this(row, col) = input(row + r, col + c) } } } val layers = new Array[Matrix](Subfields) var index = 0 for (r <- 0 until SubfieldRows; c <- 0 until SubfieldColumns) { layers(index) = subfieldsx(r)(c) index += 1 } // Subfields stacked into vector field (expected output) val expectedOut = RefVectorField(SubfieldSize, SubfieldSize, (r, c) => { val v = new Vector(Subfields) for (i <- 0 until Subfields) v(i) = layers(i)(r, c) v } ) val graph = new ComputeGraph(Optimize) with RefTestInterface { val in = TestScalarField(input) val out = subfields(in, SubfieldSize) probe(out) } import graph._ withRelease { step require(readVector(out) == expectedOut) } } }
hpe-cct/cct-core
src/test/scala/cogx/VectorFieldSpec.scala
Scala
apache-2.0
92,585
package codechicken.microblock import net.minecraft.world.World import net.minecraft.entity.player.EntityPlayer import net.minecraft.util.MovingObjectPosition import codechicken.lib.vec.Vector3 import codechicken.lib.vec.BlockCoord import codechicken.lib.vec.Rotation import codechicken.multipart.ControlKeyModifer._ import net.minecraft.item.ItemStack import codechicken.multipart.TileMultipart import codechicken.lib.raytracer.ExtendedMOP abstract class ExecutablePlacement(val pos:BlockCoord, val part:Microblock) { def place(world:World, player:EntityPlayer, item:ItemStack) def consume(world:World, player:EntityPlayer, item:ItemStack) } class AdditionPlacement($pos:BlockCoord, $part:Microblock) extends ExecutablePlacement($pos, $part) { def place(world:World, player:EntityPlayer, item:ItemStack) { TileMultipart.addPart(world, pos, part) } def consume(world:World, player:EntityPlayer, item:ItemStack) { item.stackSize-=1 } } class ExpandingPlacement($pos:BlockCoord, $part:Microblock, opart:Microblock) extends ExecutablePlacement($pos, $part) { def place(world:World, player:EntityPlayer, item:ItemStack) { opart.shape = part.shape opart.tile.notifyPartChange(opart) opart.sendShapeUpdate() } def consume(world:World, player:EntityPlayer, item:ItemStack) { item.stackSize-=1 } } abstract class PlacementProperties { def opposite(slot:Int, side:Int):Int def sneakOpposite(slot:Int, side:Int) = true def expand(slot:Int, side:Int) = true def microClass:MicroblockClass def placementGrid:PlacementGrid def customPlacement(pmt:MicroblockPlacement):ExecutablePlacement = null } object MicroblockPlacement { def apply(player:EntityPlayer, hit:MovingObjectPosition, size:Int, material:Int, checkMaterial:Boolean, pp:PlacementProperties):ExecutablePlacement = new MicroblockPlacement(player, hit, size, material, checkMaterial, pp)() } class MicroblockPlacement(val player:EntityPlayer, val hit:MovingObjectPosition, val size:Int, val material:Int, val checkMaterial:Boolean, val pp:PlacementProperties) { val world = player.worldObj val mcrClass = pp.microClass val pos = new BlockCoord(hit.blockX, hit.blockY, hit.blockZ) val vhit = new Vector3(hit.hitVec).add(-pos.x, -pos.y, -pos.z) val gtile = TileMultipart.getOrConvertTile2(world, pos) val htile = gtile._1 val slot = pp.placementGrid.getHitSlot(vhit, hit.sideHit) val oslot = pp.opposite(slot, hit.sideHit) val d = getHitDepth(vhit, hit.sideHit) val useOppMod = pp.sneakOpposite(slot, hit.sideHit) val oppMod = player.isControlDown val internal = d < 1 && htile != null val doExpand = internal && !gtile._2 && !player.isSneaking && !(oppMod && useOppMod) && pp.expand(slot, hit.sideHit) val side = hit.sideHit def apply():ExecutablePlacement = { val customPlacement = pp.customPlacement(this) if(customPlacement != null) return customPlacement if(slot < 0) return null if(doExpand) { val hpart = htile.partList(ExtendedMOP.getData[(Int, _)](hit)._1) if(hpart.getType == mcrClass.getName) { val mpart = hpart.asInstanceOf[CommonMicroblock] if(mpart.material == material && mpart.getSize + size < 8) return expand(mpart) } } if(internal) { if(d < 0.5 || !useOppMod) { val ret = internalPlacement(htile, slot) if(ret != null) { if(!useOppMod || !oppMod) return ret else return internalPlacement(htile, oslot) } } if(useOppMod && !oppMod) return internalPlacement(htile, oslot) else return externalPlacement(slot) } if(!useOppMod || !oppMod) return externalPlacement(slot) else return externalPlacement(oslot) } def expand(mpart:CommonMicroblock):ExecutablePlacement = expand(mpart, create(mpart.getSize+size, mpart.getSlot, mpart.material)) def expand(mpart:Microblock, npart:Microblock):ExecutablePlacement = { val pos = new BlockCoord(mpart.tile) if(TileMultipart.checkNoEntityCollision(world, pos, npart) && mpart.tile.canReplacePart(mpart, npart)) return new ExpandingPlacement(pos, npart, mpart) return null } def internalPlacement(htile:TileMultipart, slot:Int):ExecutablePlacement = internalPlacement(htile, create(size, slot, material)) def internalPlacement(htile:TileMultipart, npart:Microblock):ExecutablePlacement = { val pos = new BlockCoord(htile) if(TileMultipart.checkNoEntityCollision(world, pos, npart) && htile.canAddPart(npart)) return new AdditionPlacement(pos, npart) return null } def externalPlacement(slot:Int):ExecutablePlacement = externalPlacement(create(size, slot, material)) def externalPlacement(npart:Microblock):ExecutablePlacement = { val pos = this.pos.copy().offset(side) if(TileMultipart.canPlacePart(world, pos, npart)) return new AdditionPlacement(pos, npart) return null } def getHitDepth(vhit:Vector3, side:Int):Double = vhit.copy.scalarProject(Rotation.axes(side)) + (side%2^1) def create(size:Int, slot:Int, material:Int) = { val part = mcrClass.create(world.isRemote, material) part.setShape(size, slot) part } }
kenzierocks/ForgeMultipart
src/codechicken/microblock/MicroblockPlacement.scala
Scala
lgpl-2.1
5,764
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.scheduler.cluster.mesos import java.util.concurrent.TimeUnit import scala.collection.JavaConverters._ import scala.concurrent.duration._ import scala.reflect.ClassTag import org.apache.mesos.{Protos, Scheduler, SchedulerDriver} import org.apache.mesos.Protos._ import org.mockito.Matchers import org.mockito.Matchers._ import org.mockito.Mockito._ import org.scalatest.BeforeAndAfter import org.scalatest.concurrent.ScalaFutures import org.scalatest.mockito.MockitoSugar import org.apache.spark.{LocalSparkContext, SecurityManager, SparkConf, SparkContext, SparkFunSuite} import org.apache.spark.deploy.mesos.config._ import org.apache.spark.internal.config._ import org.apache.spark.network.shuffle.mesos.MesosExternalShuffleClient import org.apache.spark.rpc.{RpcAddress, RpcEndpointRef} import org.apache.spark.scheduler.TaskSchedulerImpl import org.apache.spark.scheduler.cluster.CoarseGrainedClusterMessages.{RegisterExecutor, RemoveExecutor} import org.apache.spark.scheduler.cluster.mesos.Utils._ class MesosCoarseGrainedSchedulerBackendSuite extends SparkFunSuite with LocalSparkContext with MockitoSugar with BeforeAndAfter with ScalaFutures { private var sparkConf: SparkConf = _ private var driver: SchedulerDriver = _ private var taskScheduler: TaskSchedulerImpl = _ private var backend: MesosCoarseGrainedSchedulerBackend = _ private var externalShuffleClient: MesosExternalShuffleClient = _ private var driverEndpoint: RpcEndpointRef = _ @volatile private var stopCalled = false // All 'requests' to the scheduler run immediately on the same thread, so // demand that all futures have their value available immediately. implicit override val patienceConfig = PatienceConfig(timeout = Duration(0, TimeUnit.SECONDS)) test("mesos supports killing and limiting executors") { setBackend() sparkConf.set("spark.driver.host", "driverHost") sparkConf.set("spark.driver.port", "1234") val minMem = backend.executorMemory(sc) val minCpu = 4 val offers = List(Resources(minMem, minCpu)) // launches a task on a valid offer offerResources(offers) verifyTaskLaunched(driver, "o1") // kills executors assert(backend.doRequestTotalExecutors(0).futureValue) assert(backend.doKillExecutors(Seq("0")).futureValue) val taskID0 = createTaskId("0") verify(driver, times(1)).killTask(taskID0) // doesn't launch a new task when requested executors == 0 offerResources(offers, 2) verifyDeclinedOffer(driver, createOfferId("o2")) // Launches a new task when requested executors is positive backend.doRequestTotalExecutors(2) offerResources(offers, 2) verifyTaskLaunched(driver, "o2") } test("mesos supports killing and relaunching tasks with executors") { setBackend() // launches a task on a valid offer val minMem = backend.executorMemory(sc) + 1024 val minCpu = 4 val offer1 = Resources(minMem, minCpu) val offer2 = Resources(minMem, 1) offerResources(List(offer1, offer2)) verifyTaskLaunched(driver, "o1") // accounts for a killed task val status = createTaskStatus("0", "s1", TaskState.TASK_KILLED) backend.statusUpdate(driver, status) verify(driver, times(1)).reviveOffers() // Launches a new task on a valid offer from the same slave offerResources(List(offer2)) verifyTaskLaunched(driver, "o2") } test("mesos supports spark.executor.cores") { val executorCores = 4 setBackend(Map("spark.executor.cores" -> executorCores.toString)) val executorMemory = backend.executorMemory(sc) val offers = List(Resources(executorMemory * 2, executorCores + 1)) offerResources(offers) val taskInfos = verifyTaskLaunched(driver, "o1") assert(taskInfos.length == 1) val cpus = backend.getResource(taskInfos.head.getResourcesList, "cpus") assert(cpus == executorCores) } test("mesos supports unset spark.executor.cores") { setBackend() val executorMemory = backend.executorMemory(sc) val offerCores = 10 offerResources(List(Resources(executorMemory * 2, offerCores))) val taskInfos = verifyTaskLaunched(driver, "o1") assert(taskInfos.length == 1) val cpus = backend.getResource(taskInfos.head.getResourcesList, "cpus") assert(cpus == offerCores) } test("mesos does not acquire more than spark.cores.max") { val maxCores = 10 setBackend(Map("spark.cores.max" -> maxCores.toString)) val executorMemory = backend.executorMemory(sc) offerResources(List(Resources(executorMemory, maxCores + 1))) val taskInfos = verifyTaskLaunched(driver, "o1") assert(taskInfos.length == 1) val cpus = backend.getResource(taskInfos.head.getResourcesList, "cpus") assert(cpus == maxCores) } test("mesos does not acquire gpus if not specified") { setBackend() val executorMemory = backend.executorMemory(sc) offerResources(List(Resources(executorMemory, 1, 1))) val taskInfos = verifyTaskLaunched(driver, "o1") assert(taskInfos.length == 1) val gpus = backend.getResource(taskInfos.head.getResourcesList, "gpus") assert(gpus == 0.0) } test("mesos does not acquire more than spark.mesos.gpus.max") { val maxGpus = 5 setBackend(Map("spark.mesos.gpus.max" -> maxGpus.toString)) val executorMemory = backend.executorMemory(sc) offerResources(List(Resources(executorMemory, 1, maxGpus + 1))) val taskInfos = verifyTaskLaunched(driver, "o1") assert(taskInfos.length == 1) val gpus = backend.getResource(taskInfos.head.getResourcesList, "gpus") assert(gpus == maxGpus) } test("mesos declines offers that violate attribute constraints") { setBackend(Map("spark.mesos.constraints" -> "x:true")) offerResources(List(Resources(backend.executorMemory(sc), 4))) verifyDeclinedOffer(driver, createOfferId("o1"), true) } test("mesos declines offers with a filter when reached spark.cores.max") { val maxCores = 3 setBackend(Map("spark.cores.max" -> maxCores.toString)) val executorMemory = backend.executorMemory(sc) offerResources(List( Resources(executorMemory, maxCores + 1), Resources(executorMemory, maxCores + 1))) verifyTaskLaunched(driver, "o1") verifyDeclinedOffer(driver, createOfferId("o2"), true) } test("mesos declines offers with a filter when maxCores not a multiple of executor.cores") { val maxCores = 4 val executorCores = 3 setBackend(Map( "spark.cores.max" -> maxCores.toString, "spark.executor.cores" -> executorCores.toString )) val executorMemory = backend.executorMemory(sc) offerResources(List( Resources(executorMemory, maxCores + 1), Resources(executorMemory, maxCores + 1) )) verifyTaskLaunched(driver, "o1") verifyDeclinedOffer(driver, createOfferId("o2"), true) } test("mesos declines offers with a filter when reached spark.cores.max with executor.cores") { val maxCores = 4 val executorCores = 2 setBackend(Map( "spark.cores.max" -> maxCores.toString, "spark.executor.cores" -> executorCores.toString )) val executorMemory = backend.executorMemory(sc) offerResources(List( Resources(executorMemory, maxCores + 1), Resources(executorMemory, maxCores + 1), Resources(executorMemory, maxCores + 1) )) verifyTaskLaunched(driver, "o1") verifyTaskLaunched(driver, "o2") verifyDeclinedOffer(driver, createOfferId("o3"), true) } test("mesos assigns tasks round-robin on offers") { val executorCores = 4 val maxCores = executorCores * 2 setBackend(Map("spark.executor.cores" -> executorCores.toString, "spark.cores.max" -> maxCores.toString)) val executorMemory = backend.executorMemory(sc) offerResources(List( Resources(executorMemory * 2, executorCores * 2), Resources(executorMemory * 2, executorCores * 2))) verifyTaskLaunched(driver, "o1") verifyTaskLaunched(driver, "o2") } test("mesos creates multiple executors on a single slave") { val executorCores = 4 setBackend(Map("spark.executor.cores" -> executorCores.toString)) // offer with room for two executors val executorMemory = backend.executorMemory(sc) offerResources(List(Resources(executorMemory * 2, executorCores * 2))) // verify two executors were started on a single offer val taskInfos = verifyTaskLaunched(driver, "o1") assert(taskInfos.length == 2) } test("mesos doesn't register twice with the same shuffle service") { setBackend(Map("spark.shuffle.service.enabled" -> "true")) val (mem, cpu) = (backend.executorMemory(sc), 4) val offer1 = createOffer("o1", "s1", mem, cpu) backend.resourceOffers(driver, List(offer1).asJava) verifyTaskLaunched(driver, "o1") val offer2 = createOffer("o2", "s1", mem, cpu) backend.resourceOffers(driver, List(offer2).asJava) verifyTaskLaunched(driver, "o2") val status1 = createTaskStatus("0", "s1", TaskState.TASK_RUNNING) backend.statusUpdate(driver, status1) val status2 = createTaskStatus("1", "s1", TaskState.TASK_RUNNING) backend.statusUpdate(driver, status2) verify(externalShuffleClient, times(1)) .registerDriverWithShuffleService(anyString, anyInt, anyLong, anyLong) } test("Port offer decline when there is no appropriate range") { setBackend(Map(BLOCK_MANAGER_PORT.key -> "30100")) val offeredPorts = (31100L, 31200L) val (mem, cpu) = (backend.executorMemory(sc), 4) val offer1 = createOffer("o1", "s1", mem, cpu, Some(offeredPorts)) backend.resourceOffers(driver, List(offer1).asJava) verify(driver, times(1)).declineOffer(offer1.getId) } test("Port offer accepted when ephemeral ports are used") { setBackend() val offeredPorts = (31100L, 31200L) val (mem, cpu) = (backend.executorMemory(sc), 4) val offer1 = createOffer("o1", "s1", mem, cpu, Some(offeredPorts)) backend.resourceOffers(driver, List(offer1).asJava) verifyTaskLaunched(driver, "o1") } test("Port offer accepted with user defined port numbers") { val port = 30100 setBackend(Map(BLOCK_MANAGER_PORT.key -> s"$port")) val offeredPorts = (30000L, 31000L) val (mem, cpu) = (backend.executorMemory(sc), 4) val offer1 = createOffer("o1", "s1", mem, cpu, Some(offeredPorts)) backend.resourceOffers(driver, List(offer1).asJava) val taskInfo = verifyTaskLaunched(driver, "o1") val taskPortResources = taskInfo.head.getResourcesList.asScala. find(r => r.getType == Value.Type.RANGES && r.getName == "ports") val isPortInOffer = (r: Resource) => { r.getRanges().getRangeList .asScala.exists(range => range.getBegin == port && range.getEnd == port) } assert(taskPortResources.exists(isPortInOffer)) } test("mesos kills an executor when told") { setBackend() val (mem, cpu) = (backend.executorMemory(sc), 4) val offer1 = createOffer("o1", "s1", mem, cpu) backend.resourceOffers(driver, List(offer1).asJava) verifyTaskLaunched(driver, "o1") backend.doKillExecutors(List("0")) verify(driver, times(1)).killTask(createTaskId("0")) } test("weburi is set in created scheduler driver") { initializeSparkConf() sc = new SparkContext(sparkConf) val taskScheduler = mock[TaskSchedulerImpl] when(taskScheduler.sc).thenReturn(sc) val driver = mock[SchedulerDriver] when(driver.start()).thenReturn(Protos.Status.DRIVER_RUNNING) val securityManager = mock[SecurityManager] val backend = new MesosCoarseGrainedSchedulerBackend( taskScheduler, sc, "master", securityManager) { override protected def createSchedulerDriver( masterUrl: String, scheduler: Scheduler, sparkUser: String, appName: String, conf: SparkConf, webuiUrl: Option[String] = None, checkpoint: Option[Boolean] = None, failoverTimeout: Option[Double] = None, frameworkId: Option[String] = None): SchedulerDriver = { markRegistered() assert(webuiUrl.isDefined) assert(webuiUrl.get.equals("http://webui")) driver } } backend.start() } test("failover timeout is set in created scheduler driver") { val failoverTimeoutIn = 3600.0 initializeSparkConf(Map(DRIVER_FAILOVER_TIMEOUT.key -> failoverTimeoutIn.toString)) sc = new SparkContext(sparkConf) val taskScheduler = mock[TaskSchedulerImpl] when(taskScheduler.sc).thenReturn(sc) val driver = mock[SchedulerDriver] when(driver.start()).thenReturn(Protos.Status.DRIVER_RUNNING) val securityManager = mock[SecurityManager] val backend = new MesosCoarseGrainedSchedulerBackend( taskScheduler, sc, "master", securityManager) { override protected def createSchedulerDriver( masterUrl: String, scheduler: Scheduler, sparkUser: String, appName: String, conf: SparkConf, webuiUrl: Option[String] = None, checkpoint: Option[Boolean] = None, failoverTimeout: Option[Double] = None, frameworkId: Option[String] = None): SchedulerDriver = { markRegistered() assert(failoverTimeout.isDefined) assert(failoverTimeout.get.equals(failoverTimeoutIn)) driver } } backend.start() } test("honors unset spark.mesos.containerizer") { setBackend(Map("spark.mesos.executor.docker.image" -> "test")) val (mem, cpu) = (backend.executorMemory(sc), 4) val offer1 = createOffer("o1", "s1", mem, cpu) backend.resourceOffers(driver, List(offer1).asJava) val taskInfos = verifyTaskLaunched(driver, "o1") assert(taskInfos.head.getContainer.getType == ContainerInfo.Type.DOCKER) } test("honors spark.mesos.containerizer=\"mesos\"") { setBackend(Map( "spark.mesos.executor.docker.image" -> "test", "spark.mesos.containerizer" -> "mesos")) val (mem, cpu) = (backend.executorMemory(sc), 4) val offer1 = createOffer("o1", "s1", mem, cpu) backend.resourceOffers(driver, List(offer1).asJava) val taskInfos = verifyTaskLaunched(driver, "o1") assert(taskInfos.head.getContainer.getType == ContainerInfo.Type.MESOS) } test("docker settings are reflected in created tasks") { setBackend(Map( "spark.mesos.executor.docker.image" -> "some_image", "spark.mesos.executor.docker.forcePullImage" -> "true", "spark.mesos.executor.docker.volumes" -> "/host_vol:/container_vol:ro", "spark.mesos.executor.docker.portmaps" -> "8080:80:tcp" )) val (mem, cpu) = (backend.executorMemory(sc), 4) val offer1 = createOffer("o1", "s1", mem, cpu) backend.resourceOffers(driver, List(offer1).asJava) val launchedTasks = verifyTaskLaunched(driver, "o1") assert(launchedTasks.size == 1) val containerInfo = launchedTasks.head.getContainer assert(containerInfo.getType == ContainerInfo.Type.DOCKER) val volumes = containerInfo.getVolumesList.asScala assert(volumes.size == 1) val volume = volumes.head assert(volume.getHostPath == "/host_vol") assert(volume.getContainerPath == "/container_vol") assert(volume.getMode == Volume.Mode.RO) val dockerInfo = containerInfo.getDocker val portMappings = dockerInfo.getPortMappingsList.asScala assert(portMappings.size == 1) val portMapping = portMappings.head assert(portMapping.getHostPort == 8080) assert(portMapping.getContainerPort == 80) assert(portMapping.getProtocol == "tcp") } test("force-pull-image option is disabled by default") { setBackend(Map( "spark.mesos.executor.docker.image" -> "some_image" )) val (mem, cpu) = (backend.executorMemory(sc), 4) val offer1 = createOffer("o1", "s1", mem, cpu) backend.resourceOffers(driver, List(offer1).asJava) val launchedTasks = verifyTaskLaunched(driver, "o1") assert(launchedTasks.size == 1) val containerInfo = launchedTasks.head.getContainer assert(containerInfo.getType == ContainerInfo.Type.DOCKER) val dockerInfo = containerInfo.getDocker assert(dockerInfo.getImage == "some_image") assert(!dockerInfo.getForcePullImage) } test("mesos supports spark.executor.uri") { val url = "spark.spark.spark.com" setBackend(Map( "spark.executor.uri" -> url ), null) val (mem, cpu) = (backend.executorMemory(sc), 4) val offer1 = createOffer("o1", "s1", mem, cpu) backend.resourceOffers(driver, List(offer1).asJava) val launchedTasks = verifyTaskLaunched(driver, "o1") assert(launchedTasks.head.getCommand.getUrisList.asScala(0).getValue == url) } test("mesos supports setting fetcher cache") { val url = "spark.spark.spark.com" setBackend(Map( "spark.mesos.fetcherCache.enable" -> "true", "spark.executor.uri" -> url ), null) val offers = List(Resources(backend.executorMemory(sc), 1)) offerResources(offers) val launchedTasks = verifyTaskLaunched(driver, "o1") val uris = launchedTasks.head.getCommand.getUrisList assert(uris.size() == 1) assert(uris.asScala.head.getCache) } test("mesos supports disabling fetcher cache") { val url = "spark.spark.spark.com" setBackend(Map( "spark.mesos.fetcherCache.enable" -> "false", "spark.executor.uri" -> url ), null) val offers = List(Resources(backend.executorMemory(sc), 1)) offerResources(offers) val launchedTasks = verifyTaskLaunched(driver, "o1") val uris = launchedTasks.head.getCommand.getUrisList assert(uris.size() == 1) assert(!uris.asScala.head.getCache) } test("mesos sets task name to spark.app.name") { setBackend() val offers = List(Resources(backend.executorMemory(sc), 1)) offerResources(offers) val launchedTasks = verifyTaskLaunched(driver, "o1") // Add " 0" to the taskName to match the executor number that is appended assert(launchedTasks.head.getName == "test-mesos-dynamic-alloc 0") } test("mesos sets configurable labels on tasks") { val taskLabelsString = "mesos:test,label:test" setBackend(Map( "spark.mesos.task.labels" -> taskLabelsString )) // Build up the labels val taskLabels = Protos.Labels.newBuilder() .addLabels(Protos.Label.newBuilder() .setKey("mesos").setValue("test").build()) .addLabels(Protos.Label.newBuilder() .setKey("label").setValue("test").build()) .build() val offers = List(Resources(backend.executorMemory(sc), 1)) offerResources(offers) val launchedTasks = verifyTaskLaunched(driver, "o1") val labels = launchedTasks.head.getLabels assert(launchedTasks.head.getLabels.equals(taskLabels)) } test("mesos supports spark.mesos.network.name and spark.mesos.network.labels") { setBackend(Map( "spark.mesos.network.name" -> "test-network-name", "spark.mesos.network.labels" -> "key1:val1,key2:val2" )) val (mem, cpu) = (backend.executorMemory(sc), 4) val offer1 = createOffer("o1", "s1", mem, cpu) backend.resourceOffers(driver, List(offer1).asJava) val launchedTasks = verifyTaskLaunched(driver, "o1") val networkInfos = launchedTasks.head.getContainer.getNetworkInfosList assert(networkInfos.size == 1) assert(networkInfos.get(0).getName == "test-network-name") assert(networkInfos.get(0).getLabels.getLabels(0).getKey == "key1") assert(networkInfos.get(0).getLabels.getLabels(0).getValue == "val1") assert(networkInfos.get(0).getLabels.getLabels(1).getKey == "key2") assert(networkInfos.get(0).getLabels.getLabels(1).getValue == "val2") } test("supports spark.scheduler.minRegisteredResourcesRatio") { val expectedCores = 1 setBackend(Map( "spark.cores.max" -> expectedCores.toString, "spark.scheduler.minRegisteredResourcesRatio" -> "1.0")) val offers = List(Resources(backend.executorMemory(sc), expectedCores)) offerResources(offers) val launchedTasks = verifyTaskLaunched(driver, "o1") assert(!backend.isReady) registerMockExecutor(launchedTasks(0).getTaskId.getValue, "s1", expectedCores) assert(backend.isReady) } test("supports data locality with dynamic allocation") { setBackend(Map( "spark.dynamicAllocation.enabled" -> "true", "spark.dynamicAllocation.testing" -> "true", "spark.locality.wait" -> "1s")) assert(backend.getExecutorIds().isEmpty) backend.requestTotalExecutors(2, 2, Map("hosts10" -> 1, "hosts11" -> 1)) // Offer non-local resources, which should be rejected offerResourcesAndVerify(1, false) offerResourcesAndVerify(2, false) // Offer local resource offerResourcesAndVerify(10, true) // Wait longer than spark.locality.wait Thread.sleep(2000) // Offer non-local resource, which should be accepted offerResourcesAndVerify(1, true) // Update total executors backend.requestTotalExecutors(3, 3, Map("hosts10" -> 1, "hosts11" -> 1, "hosts12" -> 1)) // Offer non-local resources, which should be rejected offerResourcesAndVerify(3, false) // Wait longer than spark.locality.wait Thread.sleep(2000) // Update total executors backend.requestTotalExecutors(4, 4, Map("hosts10" -> 1, "hosts11" -> 1, "hosts12" -> 1, "hosts13" -> 1)) // Offer non-local resources, which should be rejected offerResourcesAndVerify(3, false) // Offer local resource offerResourcesAndVerify(13, true) // Wait longer than spark.locality.wait Thread.sleep(2000) // Offer non-local resource, which should be accepted offerResourcesAndVerify(2, true) } private case class Resources(mem: Int, cpus: Int, gpus: Int = 0) private def registerMockExecutor(executorId: String, slaveId: String, cores: Integer) = { val mockEndpointRef = mock[RpcEndpointRef] val mockAddress = mock[RpcAddress] val message = RegisterExecutor(executorId, mockEndpointRef, slaveId, cores, Map.empty) backend.driverEndpoint.askSync[Boolean](message) } private def verifyDeclinedOffer(driver: SchedulerDriver, offerId: OfferID, filter: Boolean = false): Unit = { if (filter) { verify(driver, times(1)).declineOffer(Matchers.eq(offerId), anyObject[Filters]) } else { verify(driver, times(1)).declineOffer(Matchers.eq(offerId)) } } private def offerResources(offers: List[Resources], startId: Int = 1): Unit = { val mesosOffers = offers.zipWithIndex.map {case (offer, i) => createOffer(s"o${i + startId}", s"s${i + startId}", offer.mem, offer.cpus, None, offer.gpus)} backend.resourceOffers(driver, mesosOffers.asJava) } private def offerResourcesAndVerify(id: Int, expectAccept: Boolean): Unit = { offerResources(List(Resources(backend.executorMemory(sc), 1)), id) if (expectAccept) { val numExecutors = backend.getExecutorIds().size val launchedTasks = verifyTaskLaunched(driver, s"o$id") assert(s"s$id" == launchedTasks.head.getSlaveId.getValue) registerMockExecutor(launchedTasks.head.getTaskId.getValue, s"s$id", 1) assert(backend.getExecutorIds().size == numExecutors + 1) } else { verifyTaskNotLaunched(driver, s"o$id") } } private def createTaskStatus(taskId: String, slaveId: String, state: TaskState): TaskStatus = { TaskStatus.newBuilder() .setTaskId(TaskID.newBuilder().setValue(taskId).build()) .setSlaveId(SlaveID.newBuilder().setValue(slaveId).build()) .setState(state) .build } private def createSchedulerBackend( taskScheduler: TaskSchedulerImpl, driver: SchedulerDriver, shuffleClient: MesosExternalShuffleClient) = { val securityManager = mock[SecurityManager] val backend = new MesosCoarseGrainedSchedulerBackend( taskScheduler, sc, "master", securityManager) { override protected def createSchedulerDriver( masterUrl: String, scheduler: Scheduler, sparkUser: String, appName: String, conf: SparkConf, webuiUrl: Option[String] = None, checkpoint: Option[Boolean] = None, failoverTimeout: Option[Double] = None, frameworkId: Option[String] = None): SchedulerDriver = driver override protected def getShuffleClient(): MesosExternalShuffleClient = shuffleClient // override to avoid race condition with the driver thread on `mesosDriver` override def startScheduler(newDriver: SchedulerDriver): Unit = {} override def stopExecutors(): Unit = { stopCalled = true } } backend.start() backend.registered(driver, Utils.TEST_FRAMEWORK_ID, Utils.TEST_MASTER_INFO) backend } private def initializeSparkConf( sparkConfVars: Map[String, String] = null, home: String = "/path"): Unit = { sparkConf = (new SparkConf) .setMaster("local[*]") .setAppName("test-mesos-dynamic-alloc") .set("spark.mesos.driver.webui.url", "http://webui") if (home != null) { sparkConf.setSparkHome(home) } if (sparkConfVars != null) { sparkConf.setAll(sparkConfVars) } } private def setBackend(sparkConfVars: Map[String, String] = null, home: String = "/path") { initializeSparkConf(sparkConfVars, home) sc = new SparkContext(sparkConf) driver = mock[SchedulerDriver] when(driver.start()).thenReturn(Protos.Status.DRIVER_RUNNING) taskScheduler = mock[TaskSchedulerImpl] when(taskScheduler.sc).thenReturn(sc) externalShuffleClient = mock[MesosExternalShuffleClient] backend = createSchedulerBackend(taskScheduler, driver, externalShuffleClient) } }
aray/spark
resource-managers/mesos/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackendSuite.scala
Scala
apache-2.0
26,574
package ch08_stack class BrowserDemo(var currentPageOpt: Option[String], val backStack: StackDemo[String], val forwardStack: StackDemo[String]) { def this() = this(None, new StackDemo[String], new StackDemo[String]) def open(page: String) = { currentPageOpt.foreach(backStack.push) forwardStack.clear() currentPageOpt = Some(page) } def canGoBack(): Boolean = backStack.size > 0 def goBack(): Unit = { backStack.pop().foreach(page => { forwardStack.push(currentPageOpt.get) currentPageOpt = Some(page.data) }) } def canGoForward(): Boolean = forwardStack.size > 0 def goForward(): Unit = { forwardStack.pop().foreach(page => { backStack.push(currentPageOpt.get) currentPageOpt = Some(page.data) }) } }
wangzheng0822/algo
scala/src/main/scala/ch08_stack/BrowserDemo.scala
Scala
apache-2.0
797
package com.ximalaya.ratel.server import com.esotericsoftware.kryo.Kryo import com.esotericsoftware.kryo.io.Output import com.ximalaya.ratel.common.{MessageType, RpcRequest} import io.netty.buffer.ByteBuf import io.netty.channel.ChannelHandlerContext import io.netty.handler.codec.MessageToByteEncoder /** * Created by think on 2017/7/22. */ class ResponseEncoder(val kryo: Kryo) extends MessageToByteEncoder[RpcRequest] { private val INIT_MESSAGE_SIZE: Int = 64 * 1024 override def encode(channelHandlerContext: ChannelHandlerContext, i: RpcRequest, byteBuf: ByteBuf): Unit = { def decode(kryo: Kryo, message: AnyRef): ByteBuf = { val output = new Output(INIT_MESSAGE_SIZE, Int.MaxValue) kryo.writeObject(output, message) val bytes = output.toBytes val buf = channelHandlerContext.alloc().buffer(bytes.length + 12) buf.writeLong(bytes.length + 4) buf.writeInt(MessageType.NORM.id) buf.writeBytes(bytes, 0, bytes.length) buf } byteBuf.writeBytes(decode(kryo, i)) } }
dongjiaqiang/Ratel
ratel-rpc/src/main/scala/com/ximalaya/ratel/server/ResponseEncoder.scala
Scala
mit
1,040
package com.letstalkdata.scalinear import scala.reflect.ClassTag object Vector { /** * Creates a new Vector that contains the supplied values. * @param values the values use to fill the Vector. * @tparam T the type of object to be stored in the Vector. * @return a new Vector that contains the supplied values. */ def apply[T:ClassTag](values:T*) = { new Vector(values.toArray) } /** * Creates a new Vector padded with zeros. * @param length the number of zeros to include * @tparam T the numeric type that 0 should be cast to * @return a new Vector padded with zeros */ def zeros[T:ClassTag](length:Int)(implicit num:Numeric[T]):Vector[T] = { new Vector(Array.fill(length) { num.zero }) } /** * Creates a new Vector padded with ones. * @param length the number of ones to include * @tparam T the numeric type that 1 should be cast to * @return a new Vector padded with ones */ def ones[T:ClassTag](length:Int)(implicit num:Numeric[T]):Vector[T] = { new Vector(Array.fill(length) { num.one }) } } /** * A one-dimensional matrix that contains items of a given type T. * * Although any type of object can be stored in a Vector, numbers make the * most sense. * * Author: Phillip Johnson * Date: 7/6/15 */ class Vector[T] private(private val values:Array[T]) { /** * Returns the item at the given index. * @param index the position whose value will be retrieved. * @return the item at the given index. */ def apply(index:Int):T = values(index) /** * Returns the number of items in the Vector. * @return the number of items in the Vector. */ def length:Int = values.length /** * Returns the Vector as an Array * @return the Vector as an Array */ def asArray:Array[T] = values /** * Returns `true` if this Vector is opposite to the given Vector else `false`. * * The Vector <em>u</em> is opposite to the Vector <em>v</em> if <em>u</em><sub>1</sub> = -<em>v</em><sub>1</sub>, <em>u</em><sub>2</sub> = -<em>v</em><sub>2</sub>, ..., <em>u</em><sub>n</sub> = -<em>v</em><sub>n</sub> * * @param that the Vector to compare to this Vector. * @return `true` if this Vector is opposite to the given Vector else `false`. */ def isOpposite[S >: T](that:Vector[T])(implicit num:Numeric[S]):Boolean = { val thisVals = this.values.zipWithIndex this.length == that.length && thisVals.forall(p => p._1 == num.negate(that(p._2))) } /** * Returns the sum of this Vector and a given Vector. * * This operation adds the elements at each index. * For example: `Vector(1,2,3) + Vector(2,2,2)` yields `Vector(3,4,5)` * * @param that the Vector to add * @return the sum of this Vector and a given Vector. */ def +[S >: T:ClassTag](that:Vector[T])(implicit num:Numeric[S]):Vector[S] = { require(this.length == that.length, "Vectors must be of same length.") val added:Array[S] = this.values.zip(that.values).map(p => num.plus(p._1, p._2)) new Vector(added) } /** * Returns the sum of this Vector's elements and a given number. * * Adds the given number to every element in the Vector. * For example: `Vector(1,2,3) + 2` yields `Vector(3,4,5)` * * @param n the number to add * @return the resulting Vector */ def +[S >: T:ClassTag](n:T)(implicit num:Numeric[S]):Vector[S] = { new Vector(this.values.map(m => num.plus(m, n))) } /** * Returns the difference of this Vector and a given Vector. * * This operation subtracts the elements at each index. * For example: `Vector(1,2,3) - Vector(2,2,2)` yields `Vector(-1,0,1)` * * @param that the Vector to subtract * @return the resulting Vector */ def -[S >: T:ClassTag](that:Vector[T])(implicit num:Numeric[S]):Vector[S] = { require(this.length == that.length, "Vectors must be of same length.") val minused:Array[S] = this.values.zip(that.values).map(p => num.minus(p._1, p._2)) new Vector(minused) } /** * Returns the difference of this Vector's elements and a given number. * * Subtracts the number from every element in the Vector. * For example: `Vector(1,2,3) - 2` yields `Vector(-1,0,1)` * * @param n the number to subtract * @return the difference of this Vector's elements and a given number. */ def -[S >: T:ClassTag](n:T)(implicit num:Numeric[S]):Vector[S] = { new Vector(this.values.map(m => num.minus(m, n))) } /** * Returns a copy of this Vector scaled by a magnitude. * * This operation multiplies every element in the Vector by the given magnitude. * For example: `Vector(1,2,3) * 2` yields `Vector(2,4,6)`. * * @param r the number to scale by * @return the resulting Vector */ def scaleBy[S >: T:ClassTag](r:S)(implicit num:Numeric[S]):Vector[S] = { new Vector(values.map(n => num.times(n, r))) } /** * Returns the quotient of this Vector's elements and a given number. * * This operation divides every element in the Vector by the given number. For example * `Vector(2.0, 4.0, 6.0) / 2` yields `Vector(1.0, 2.0, 3.0)` * * @param n the number to divide by * @return the resulting Vector */ def /[S >: T:ClassTag](n:T)(implicit num:Numeric[S]):Vector[S] = num match { case num: Fractional[T] => new Vector(this.values.map(m => num.div(m, n))) case num: Integral[T] => new Vector(this.values.map(m => num.quot(m, n))) case _ => throw new IllegalArgumentException("Unable to divide type.") } /** * Returns the dot product of the Vectors. * * Alias for [[dot]]. * * @param that the other Vector to dot product multiply by * @return the dot product of the Vectors. */ def ∙[S >: T:ClassTag](that:Vector[T])(implicit num:Numeric[S]):S = { dot[S](that) } /** * Returns the dot product of the Vectors. * @param that the other Vector to dot product multiply by * @return the dot product of the Vectors. */ def dot[S >: T:ClassTag](that:Vector[T])(implicit num:Numeric[S]):S = { require(this.length == that.length, "Vectors must be of same length.") this.values.zip(that.values).map(p => num.times(p._1, p._2)).sum } /** * Updates the value in-place at a given index. * @param index the index to update * @param value the new value */ def update(index:Int, value:T):Unit = { values.update(index, value) } /** * Returns a new Vector with the value appended * @param x the value to append * @return a new Vector with the value appended */ def append[S >: T:ClassTag](x: T):Vector[S] = { new Vector(values :+ x) } /** * Returns a new Vector extended by the supplied Vector * @param xs the Vector to append * @return a new Vector extended by the supplied Vector */ def extend[S >: T:ClassTag](xs:Vector[T]):Vector[S] = { new Vector(values ++ xs.values) } override def toString = values mkString("[", ", ", "]") def canEqual(other: Any): Boolean = other.isInstanceOf[Vector[T]] override def equals(other: Any): Boolean = other match { case that: Vector[T] => (that canEqual this) && values.sameElements(that.values) case _ => false } override def hashCode(): Int = { val state = Seq(values) state.map(_.hashCode()).foldLeft(0)((a, b) => 31 * a + b) } }
phdoerfler/scalinear
src/main/scala/com/letstalkdata/scalinear/Vector.scala
Scala
mit
7,356
/* Copyright (C) 2016 Luis Rodero-Merino * * This file is part of MxCompanions * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ package mxcompanions.circuitbreaker /**State 'monad'. * CREDITS: This implementation is based on the one defined in * [[https://www.manning.com/books/functional-programming-in-scala * "Functional Programming in Scala" book]] (Chapter 6). */ case class State[S,A](run: S => (A, S)) { def map[B](f: A => B): State[S,B] = State { st => val (a, st2) = run(st) (f(a), st2) } def flatMap[B](f: A => State[S,B]): State[S,B] = State { st => val (a, st2) = run(st) f(a).run(st2) } def get: State[S,S] = State{ st => (st, st) } def set(s: S): State[S, Unit] = State { _ => ((), s) } def modify(f: S => S): State[S, Unit] = for { s <- get _ <- set(f(s)) } yield() def map2[B,Z](sb: State[S,B])(f: (A,B) => Z): State[S,Z] = State { st => val (a, st2) = run(st) val (b, st3) = sb.run(st2) (f(a,b), st3) } def unit(a: A): State[S,A] = State pure a } object State { def pure[S,A](a: A): State[S,A] = State { (st:S) => (a, st) } def map[S,A,B](sa: State[S,A])(f: A => B): State[S,B] = sa map f def map2[S,A,B,Z](sa: State[S,A], sb: State[S,B])(f: (A,B) => Z): State[S,Z] = sa.map2(sb)(f) def flatMap[S,A,B](sa: State[S,A])(f: A => State[S,B]): State[S,B] = sa flatMap f def ap[S,A,B](sf: State[S,(A) => B])(sa: State[S,A]): State[S,B] = State { st => val (f, st2) = sf.run(st) (sa map f).run(st2) } /**Trying to get our state 'monad' aligned with monix * proper monads. */ import monix.types.Monad def stateMonad[S] = new Monad.Instance[({type f[x] = State[S,x]})#f] { override def pure[A](a: A) = State pure a override def map[A,B](sa: State[S,A])(f: A => B): State[S,B] = State.map(sa)(f) override def map2[A,B,Z](sa: State[S,A], sb: State[S,B])(f: (A,B) => Z): State[S,Z] = State.map2(sa,sb)(f) override def flatMap[A,B](sa: State[S,A])(f: A => State[S,B]): State[S,B] = State.flatMap(sa)(f) override def ap[A,B](sf: State[S,(A) => B])(sa: State[S,A]): State[S,B] = State.ap(sf)(sa) } }
lrodero/mxcompanions
src/main/scala/mxcompanions/circuitbreaker/StateMonad.scala
Scala
gpl-2.0
2,859
package top.spoofer.hbrdd.config import org.apache.hadoop.conf.Configuration import org.apache.hadoop.hbase.HBaseConfiguration class HbRddConfig(config: Configuration) extends Serializable { def getHbaseConfig = HBaseConfiguration.create(config) } object HbRddConfig { type configOption = (String, String) private[HbRddConfig] case class HbaseOption(name: String, value: String) def apply(config: Configuration): HbRddConfig = new HbRddConfig(config) def apply(configs: configOption*): HbRddConfig = { val hbConfig = HBaseConfiguration.create() for { option <- configs hbOption = HbaseOption(option._1, option._2) //使用新的case class 只是为了表达更加清晰 } hbConfig.set(hbOption.name, hbOption.value) this.apply(hbConfig) } def apply(configs: { def rootDir: String; def quorum: String }): HbRddConfig = { apply( "hbase.rootdir" -> configs.rootDir, "hbase.zookeeper.quorum" -> configs.quorum ) } def apply(configs: Map[String, String]): HbRddConfig = { val hbConfig = HBaseConfiguration.create() configs.keys foreach { name => hbConfig.set(name, configs(name)) } this.apply(hbConfig) } def apply(configs: TraversableOnce[configOption]): HbRddConfig = { val hbConfig = HBaseConfiguration.create() configs foreach { option => val hbOption = HbaseOption(option._1, option._2) hbConfig.set(hbOption.name, hbOption.value) } this.apply(hbConfig) } }
TopSpoofer/hbrdd
src/main/scala/top/spoofer/hbrdd/config/HbRddConfig.scala
Scala
apache-2.0
1,494
/* Copyright (c) 2012-2014, Université de Lorraine, Nancy, France Copyright (c) 2014-2015, Christophe Calvès All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Université de Lorraine nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package zk.examples /** * Created by christophe on 28/02/14. */ object COOL { abstract class Type case class ClassType(name : String) extends Type case object SelfType extends Type case class TypeDeclaration(name : String, theType : Type) extends Feature case class Class(name : Type, inherits : Option[Type], features : List[Feature]) type Params = List[TypeDeclaration] abstract class Feature case class Var(name : String, theType : Type) extends Feature case class Method(name : String, params : Params, theType : Type) extends Feature abstract class Expr case object Self extends Expr case class New(theType : Type, params : Params) extends Expr case class Dot(expression : Expr, attribute : String, params : Option[Params]) extends Expr case class Seq(expressions : List[Expr]) extends Expr // Conformance }
christophe-calves/zk.scala
scala/src/main/scala/zk/examples/COOL.scala
Scala
bsd-3-clause
2,536
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark import java.util.concurrent.TimeUnit import scala.collection.mutable import scala.util.control.ControlThrowable import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.spark.internal.Logging import org.apache.spark.internal.config.{DYN_ALLOCATION_MAX_EXECUTORS, DYN_ALLOCATION_MIN_EXECUTORS} import org.apache.spark.metrics.source.Source import org.apache.spark.scheduler._ import org.apache.spark.util.{Clock, SystemClock, ThreadUtils, Utils} /** * An agent that dynamically allocates and removes executors based on the workload. * * The ExecutorAllocationManager maintains a moving target number of executors which is periodically * synced to the cluster manager. The target starts at a configured initial value and changes with * the number of pending and running tasks. * * Decreasing the target number of executors happens when the current target is more than needed to * handle the current load. The target number of executors is always truncated to the number of * executors that could run all current running and pending tasks at once. * * Increasing the target number of executors happens in response to backlogged tasks waiting to be * scheduled. If the scheduler queue is not drained in N seconds, then new executors are added. If * the queue persists for another M seconds, then more executors are added and so on. The number * added in each round increases exponentially from the previous round until an upper bound has been * reached. The upper bound is based both on a configured property and on the current number of * running and pending tasks, as described above. * * The rationale for the exponential increase is twofold: (1) Executors should be added slowly * in the beginning in case the number of extra executors needed turns out to be small. Otherwise, * we may add more executors than we need just to remove them later. (2) Executors should be added * quickly over time in case the maximum number of executors is very high. Otherwise, it will take * a long time to ramp up under heavy workloads. * * The remove policy is simpler: If an executor has been idle for K seconds, meaning it has not * been scheduled to run any tasks, then it is removed. * * There is no retry logic in either case because we make the assumption that the cluster manager * will eventually fulfill all requests it receives asynchronously. * * The relevant Spark properties include the following: * * spark.dynamicAllocation.enabled - Whether this feature is enabled * spark.dynamicAllocation.minExecutors - Lower bound on the number of executors * spark.dynamicAllocation.maxExecutors - Upper bound on the number of executors * spark.dynamicAllocation.initialExecutors - Number of executors to start with * * spark.dynamicAllocation.schedulerBacklogTimeout (M) - * If there are backlogged tasks for this duration, add new executors * * spark.dynamicAllocation.sustainedSchedulerBacklogTimeout (N) - * If the backlog is sustained for this duration, add more executors * This is used only after the initial backlog timeout is exceeded * * spark.dynamicAllocation.executorIdleTimeout (K) - * If an executor has been idle for this duration, remove it */ private[spark] class ExecutorAllocationManager( client: ExecutorAllocationClient, listenerBus: LiveListenerBus, conf: SparkConf) extends Logging { allocationManager => import ExecutorAllocationManager._ // Lower and upper bounds on the number of executors. private val minNumExecutors = conf.get(DYN_ALLOCATION_MIN_EXECUTORS) private val maxNumExecutors = conf.get(DYN_ALLOCATION_MAX_EXECUTORS) private val initialNumExecutors = Utils.getDynamicAllocationInitialExecutors(conf) // How long there must be backlogged tasks for before an addition is triggered (seconds) private val schedulerBacklogTimeoutS = conf.getTimeAsSeconds( "spark.dynamicAllocation.schedulerBacklogTimeout", "1s") // Same as above, but used only after `schedulerBacklogTimeoutS` is exceeded private val sustainedSchedulerBacklogTimeoutS = conf.getTimeAsSeconds( "spark.dynamicAllocation.sustainedSchedulerBacklogTimeout", s"${schedulerBacklogTimeoutS}s") // How long an executor must be idle for before it is removed (seconds) private val executorIdleTimeoutS = conf.getTimeAsSeconds( "spark.dynamicAllocation.executorIdleTimeout", "60s") private val cachedExecutorIdleTimeoutS = conf.getTimeAsSeconds( "spark.dynamicAllocation.cachedExecutorIdleTimeout", s"${Integer.MAX_VALUE}s") // During testing, the methods to actually kill and add executors are mocked out private val testing = conf.getBoolean("spark.dynamicAllocation.testing", false) // TODO: The default value of 1 for spark.executor.cores works right now because dynamic // allocation is only supported for YARN and the default number of cores per executor in YARN is // 1, but it might need to be attained differently for different cluster managers private val tasksPerExecutor = conf.getInt("spark.executor.cores", 1) / conf.getInt("spark.task.cpus", 1) validateSettings() // Number of executors to add in the next round private var numExecutorsToAdd = 1 // The desired number of executors at this moment in time. If all our executors were to die, this // is the number of executors we would immediately want from the cluster manager. private var numExecutorsTarget = initialNumExecutors // Executors that have been requested to be removed but have not been killed yet private val executorsPendingToRemove = new mutable.HashSet[String] // All known executors private val executorIds = new mutable.HashSet[String] // A timestamp of when an addition should be triggered, or NOT_SET if it is not set // This is set when pending tasks are added but not scheduled yet private var addTime: Long = NOT_SET // A timestamp for each executor of when the executor should be removed, indexed by the ID // This is set when an executor is no longer running a task, or when it first registers private val removeTimes = new mutable.HashMap[String, Long] // Polling loop interval (ms) private val intervalMillis: Long = 100 // Clock used to schedule when executors should be added and removed private var clock: Clock = new SystemClock() // Listener for Spark events that impact the allocation policy private val listener = new ExecutorAllocationListener // Executor that handles the scheduling task. private val executor = ThreadUtils.newDaemonSingleThreadScheduledExecutor("spark-dynamic-executor-allocation") // Metric source for ExecutorAllocationManager to expose internal status to MetricsSystem. val executorAllocationManagerSource = new ExecutorAllocationManagerSource // Whether we are still waiting for the initial set of executors to be allocated. // While this is true, we will not cancel outstanding executor requests. This is // set to false when: // (1) a stage is submitted, or // (2) an executor idle timeout has elapsed. @volatile private var initializing: Boolean = true // Number of locality aware tasks, used for executor placement. private var localityAwareTasks = 0 // Host to possible task running on it, used for executor placement. private var hostToLocalTaskCount: Map[String, Int] = Map.empty /** * Verify that the settings specified through the config are valid. * If not, throw an appropriate exception. */ private def validateSettings(): Unit = { if (minNumExecutors < 0 || maxNumExecutors < 0) { throw new SparkException("spark.dynamicAllocation.{min/max}Executors must be positive!") } if (maxNumExecutors == 0) { throw new SparkException("spark.dynamicAllocation.maxExecutors cannot be 0!") } if (minNumExecutors > maxNumExecutors) { throw new SparkException(s"spark.dynamicAllocation.minExecutors ($minNumExecutors) must " + s"be less than or equal to spark.dynamicAllocation.maxExecutors ($maxNumExecutors)!") } if (schedulerBacklogTimeoutS <= 0) { throw new SparkException("spark.dynamicAllocation.schedulerBacklogTimeout must be > 0!") } if (sustainedSchedulerBacklogTimeoutS <= 0) { throw new SparkException( "spark.dynamicAllocation.sustainedSchedulerBacklogTimeout must be > 0!") } if (executorIdleTimeoutS <= 0) { throw new SparkException("spark.dynamicAllocation.executorIdleTimeout must be > 0!") } // Require external shuffle service for dynamic allocation // Otherwise, we may lose shuffle files when killing executors if (!conf.getBoolean("spark.shuffle.service.enabled", false) && !testing) { throw new SparkException("Dynamic allocation of executors requires the external " + "shuffle service. You may enable this through spark.shuffle.service.enabled.") } if (tasksPerExecutor == 0) { throw new SparkException("spark.executor.cores must not be less than spark.task.cpus.") } } /** * Use a different clock for this allocation manager. This is mainly used for testing. */ def setClock(newClock: Clock): Unit = { clock = newClock } /** * Register for scheduler callbacks to decide when to add and remove executors, and start * the scheduling task. */ def start(): Unit = { listenerBus.addListener(listener) val scheduleTask = new Runnable() { override def run(): Unit = { try { schedule() } catch { case ct: ControlThrowable => throw ct case t: Throwable => logWarning(s"Uncaught exception in thread ${Thread.currentThread().getName}", t) } } } executor.scheduleWithFixedDelay(scheduleTask, 0, intervalMillis, TimeUnit.MILLISECONDS) client.requestTotalExecutors(numExecutorsTarget, localityAwareTasks, hostToLocalTaskCount) } /** * Stop the allocation manager. */ def stop(): Unit = { executor.shutdown() executor.awaitTermination(10, TimeUnit.SECONDS) } /** * Reset the allocation manager to the initial state. Currently this will only be called in * yarn-client mode when AM re-registers after a failure. */ def reset(): Unit = synchronized { initializing = true numExecutorsTarget = initialNumExecutors numExecutorsToAdd = 1 executorsPendingToRemove.clear() removeTimes.clear() } /** * The maximum number of executors we would need under the current load to satisfy all running * and pending tasks, rounded up. */ private def maxNumExecutorsNeeded(): Int = { val numRunningOrPendingTasks = listener.totalPendingTasks + listener.totalRunningTasks (numRunningOrPendingTasks + tasksPerExecutor - 1) / tasksPerExecutor } /** * This is called at a fixed interval to regulate the number of pending executor requests * and number of executors running. * * First, adjust our requested executors based on the add time and our current needs. * Then, if the remove time for an existing executor has expired, kill the executor. * * This is factored out into its own method for testing. */ private def schedule(): Unit = synchronized { val now = clock.getTimeMillis updateAndSyncNumExecutorsTarget(now) removeTimes.retain { case (executorId, expireTime) => val expired = now >= expireTime if (expired) { initializing = false removeExecutor(executorId) } !expired } } /** * Updates our target number of executors and syncs the result with the cluster manager. * * Check to see whether our existing allocation and the requests we've made previously exceed our * current needs. If so, truncate our target and let the cluster manager know so that it can * cancel pending requests that are unneeded. * * If not, and the add time has expired, see if we can request new executors and refresh the add * time. * * @return the delta in the target number of executors. */ private def updateAndSyncNumExecutorsTarget(now: Long): Int = synchronized { val maxNeeded = maxNumExecutorsNeeded if (initializing) { // Do not change our target while we are still initializing, // Otherwise the first job may have to ramp up unnecessarily 0 } else if (maxNeeded < numExecutorsTarget) { // The target number exceeds the number we actually need, so stop adding new // executors and inform the cluster manager to cancel the extra pending requests val oldNumExecutorsTarget = numExecutorsTarget numExecutorsTarget = math.max(maxNeeded, minNumExecutors) numExecutorsToAdd = 1 // If the new target has not changed, avoid sending a message to the cluster manager if (numExecutorsTarget < oldNumExecutorsTarget) { client.requestTotalExecutors(numExecutorsTarget, localityAwareTasks, hostToLocalTaskCount) logDebug(s"Lowering target number of executors to $numExecutorsTarget (previously " + s"$oldNumExecutorsTarget) because not all requested executors are actually needed") } numExecutorsTarget - oldNumExecutorsTarget } else if (addTime != NOT_SET && now >= addTime) { val delta = addExecutors(maxNeeded) logDebug(s"Starting timer to add more executors (to " + s"expire in $sustainedSchedulerBacklogTimeoutS seconds)") addTime += sustainedSchedulerBacklogTimeoutS * 1000 delta } else { 0 } } /** * Request a number of executors from the cluster manager. * If the cap on the number of executors is reached, give up and reset the * number of executors to add next round instead of continuing to double it. * * @param maxNumExecutorsNeeded the maximum number of executors all currently running or pending * tasks could fill * @return the number of additional executors actually requested. */ private def addExecutors(maxNumExecutorsNeeded: Int): Int = { // Do not request more executors if it would put our target over the upper bound if (numExecutorsTarget >= maxNumExecutors) { logDebug(s"Not adding executors because our current target total " + s"is already $numExecutorsTarget (limit $maxNumExecutors)") numExecutorsToAdd = 1 return 0 } val oldNumExecutorsTarget = numExecutorsTarget // There's no point in wasting time ramping up to the number of executors we already have, so // make sure our target is at least as much as our current allocation: numExecutorsTarget = math.max(numExecutorsTarget, executorIds.size) // Boost our target with the number to add for this round: numExecutorsTarget += numExecutorsToAdd // Ensure that our target doesn't exceed what we need at the present moment: numExecutorsTarget = math.min(numExecutorsTarget, maxNumExecutorsNeeded) // Ensure that our target fits within configured bounds: numExecutorsTarget = math.max(math.min(numExecutorsTarget, maxNumExecutors), minNumExecutors) val delta = numExecutorsTarget - oldNumExecutorsTarget // If our target has not changed, do not send a message // to the cluster manager and reset our exponential growth if (delta == 0) { numExecutorsToAdd = 1 return 0 } val addRequestAcknowledged = testing || client.requestTotalExecutors(numExecutorsTarget, localityAwareTasks, hostToLocalTaskCount) if (addRequestAcknowledged) { val executorsString = "executor" + { if (delta > 1) "s" else "" } logInfo(s"Requesting $delta new $executorsString because tasks are backlogged" + s" (new desired total will be $numExecutorsTarget)") numExecutorsToAdd = if (delta == numExecutorsToAdd) { numExecutorsToAdd * 2 } else { 1 } delta } else { logWarning( s"Unable to reach the cluster manager to request $numExecutorsTarget total executors!") numExecutorsTarget = oldNumExecutorsTarget 0 } } /** * Request the cluster manager to remove the given executor. * Return whether the request is received. */ private def removeExecutor(executorId: String): Boolean = synchronized { // Do not kill the executor if we are not aware of it (should never happen) if (!executorIds.contains(executorId)) { logWarning(s"Attempted to remove unknown executor $executorId!") return false } // Do not kill the executor again if it is already pending to be killed (should never happen) if (executorsPendingToRemove.contains(executorId)) { logWarning(s"Attempted to remove executor $executorId " + s"when it is already pending to be removed!") return false } // Do not kill the executor if we have already reached the lower bound val numExistingExecutors = executorIds.size - executorsPendingToRemove.size if (numExistingExecutors - 1 < minNumExecutors) { logDebug(s"Not removing idle executor $executorId because there are only " + s"$numExistingExecutors executor(s) left (limit $minNumExecutors)") return false } // Send a request to the backend to kill this executor val removeRequestAcknowledged = testing || client.killExecutor(executorId) if (removeRequestAcknowledged) { logInfo(s"Removing executor $executorId because it has been idle for " + s"$executorIdleTimeoutS seconds (new desired total will be ${numExistingExecutors - 1})") executorsPendingToRemove.add(executorId) true } else { logWarning(s"Unable to reach the cluster manager to kill executor $executorId," + s"or no executor eligible to kill!") false } } /** * Callback invoked when the specified executor has been added. */ private def onExecutorAdded(executorId: String): Unit = synchronized { if (!executorIds.contains(executorId)) { executorIds.add(executorId) // If an executor (call this executor X) is not removed because the lower bound // has been reached, it will no longer be marked as idle. When new executors join, // however, we are no longer at the lower bound, and so we must mark executor X // as idle again so as not to forget that it is a candidate for removal. (see SPARK-4951) executorIds.filter(listener.isExecutorIdle).foreach(onExecutorIdle) logInfo(s"New executor $executorId has registered (new total is ${executorIds.size})") } else { logWarning(s"Duplicate executor $executorId has registered") } } /** * Callback invoked when the specified executor has been removed. */ private def onExecutorRemoved(executorId: String): Unit = synchronized { if (executorIds.contains(executorId)) { executorIds.remove(executorId) removeTimes.remove(executorId) logInfo(s"Existing executor $executorId has been removed (new total is ${executorIds.size})") if (executorsPendingToRemove.contains(executorId)) { executorsPendingToRemove.remove(executorId) logDebug(s"Executor $executorId is no longer pending to " + s"be removed (${executorsPendingToRemove.size} left)") } } else { logWarning(s"Unknown executor $executorId has been removed!") } } /** * Callback invoked when the scheduler receives new pending tasks. * This sets a time in the future that decides when executors should be added * if it is not already set. */ private def onSchedulerBacklogged(): Unit = synchronized { if (addTime == NOT_SET) { logDebug(s"Starting timer to add executors because pending tasks " + s"are building up (to expire in $schedulerBacklogTimeoutS seconds)") addTime = clock.getTimeMillis + schedulerBacklogTimeoutS * 1000 } } /** * Callback invoked when the scheduler queue is drained. * This resets all variables used for adding executors. */ private def onSchedulerQueueEmpty(): Unit = synchronized { logDebug("Clearing timer to add executors because there are no more pending tasks") addTime = NOT_SET numExecutorsToAdd = 1 } /** * Callback invoked when the specified executor is no longer running any tasks. * This sets a time in the future that decides when this executor should be removed if * the executor is not already marked as idle. */ private def onExecutorIdle(executorId: String): Unit = synchronized { if (executorIds.contains(executorId)) { if (!removeTimes.contains(executorId) && !executorsPendingToRemove.contains(executorId)) { // Note that it is not necessary to query the executors since all the cached // blocks we are concerned with are reported to the driver. Note that this // does not include broadcast blocks. val hasCachedBlocks = SparkEnv.get.blockManager.master.hasCachedBlocks(executorId) val now = clock.getTimeMillis() val timeout = { if (hasCachedBlocks) { // Use a different timeout if the executor has cached blocks. now + cachedExecutorIdleTimeoutS * 1000 } else { now + executorIdleTimeoutS * 1000 } } val realTimeout = if (timeout <= 0) Long.MaxValue else timeout // overflow removeTimes(executorId) = realTimeout logDebug(s"Starting idle timer for $executorId because there are no more tasks " + s"scheduled to run on the executor (to expire in ${(realTimeout - now)/1000} seconds)") } } else { logWarning(s"Attempted to mark unknown executor $executorId idle") } } /** * Callback invoked when the specified executor is now running a task. * This resets all variables used for removing this executor. */ private def onExecutorBusy(executorId: String): Unit = synchronized { logDebug(s"Clearing idle timer for $executorId because it is now running a task") removeTimes.remove(executorId) } /** * A listener that notifies the given allocation manager of when to add and remove executors. * * This class is intentionally conservative in its assumptions about the relative ordering * and consistency of events returned by the listener. For simplicity, it does not account * for speculated tasks. */ private class ExecutorAllocationListener extends SparkListener { private val stageIdToNumTasks = new mutable.HashMap[Int, Int] private val stageIdToTaskIndices = new mutable.HashMap[Int, mutable.HashSet[Int]] private val executorIdToTaskIds = new mutable.HashMap[String, mutable.HashSet[Long]] // Number of tasks currently running on the cluster. Should be 0 when no stages are active. private var numRunningTasks: Int = _ // stageId to tuple (the number of task with locality preferences, a map where each pair is a // node and the number of tasks that would like to be scheduled on that node) map, // maintain the executor placement hints for each stage Id used by resource framework to better // place the executors. private val stageIdToExecutorPlacementHints = new mutable.HashMap[Int, (Int, Map[String, Int])] override def onStageSubmitted(stageSubmitted: SparkListenerStageSubmitted): Unit = { initializing = false val stageId = stageSubmitted.stageInfo.stageId val numTasks = stageSubmitted.stageInfo.numTasks allocationManager.synchronized { stageIdToNumTasks(stageId) = numTasks allocationManager.onSchedulerBacklogged() // Compute the number of tasks requested by the stage on each host var numTasksPending = 0 val hostToLocalTaskCountPerStage = new mutable.HashMap[String, Int]() stageSubmitted.stageInfo.taskLocalityPreferences.foreach { locality => if (!locality.isEmpty) { numTasksPending += 1 locality.foreach { location => val count = hostToLocalTaskCountPerStage.getOrElse(location.host, 0) + 1 hostToLocalTaskCountPerStage(location.host) = count } } } stageIdToExecutorPlacementHints.put(stageId, (numTasksPending, hostToLocalTaskCountPerStage.toMap)) // Update the executor placement hints updateExecutorPlacementHints() } } override def onStageCompleted(stageCompleted: SparkListenerStageCompleted): Unit = { val stageId = stageCompleted.stageInfo.stageId allocationManager.synchronized { stageIdToNumTasks -= stageId stageIdToTaskIndices -= stageId stageIdToExecutorPlacementHints -= stageId // Update the executor placement hints updateExecutorPlacementHints() // If this is the last stage with pending tasks, mark the scheduler queue as empty // This is needed in case the stage is aborted for any reason if (stageIdToNumTasks.isEmpty) { allocationManager.onSchedulerQueueEmpty() if (numRunningTasks != 0) { logWarning("No stages are running, but numRunningTasks != 0") numRunningTasks = 0 } } } } override def onTaskStart(taskStart: SparkListenerTaskStart): Unit = { val stageId = taskStart.stageId val taskId = taskStart.taskInfo.taskId val taskIndex = taskStart.taskInfo.index val executorId = taskStart.taskInfo.executorId allocationManager.synchronized { numRunningTasks += 1 // This guards against the race condition in which the `SparkListenerTaskStart` // event is posted before the `SparkListenerBlockManagerAdded` event, which is // possible because these events are posted in different threads. (see SPARK-4951) if (!allocationManager.executorIds.contains(executorId)) { allocationManager.onExecutorAdded(executorId) } // If this is the last pending task, mark the scheduler queue as empty stageIdToTaskIndices.getOrElseUpdate(stageId, new mutable.HashSet[Int]) += taskIndex if (totalPendingTasks() == 0) { allocationManager.onSchedulerQueueEmpty() } // Mark the executor on which this task is scheduled as busy executorIdToTaskIds.getOrElseUpdate(executorId, new mutable.HashSet[Long]) += taskId allocationManager.onExecutorBusy(executorId) } } override def onTaskEnd(taskEnd: SparkListenerTaskEnd): Unit = { val executorId = taskEnd.taskInfo.executorId val taskId = taskEnd.taskInfo.taskId val taskIndex = taskEnd.taskInfo.index val stageId = taskEnd.stageId allocationManager.synchronized { numRunningTasks -= 1 // If the executor is no longer running any scheduled tasks, mark it as idle if (executorIdToTaskIds.contains(executorId)) { executorIdToTaskIds(executorId) -= taskId if (executorIdToTaskIds(executorId).isEmpty) { executorIdToTaskIds -= executorId allocationManager.onExecutorIdle(executorId) } } // If the task failed, we expect it to be resubmitted later. To ensure we have // enough resources to run the resubmitted task, we need to mark the scheduler // as backlogged again if it's not already marked as such (SPARK-8366) if (taskEnd.reason != Success) { if (totalPendingTasks() == 0) { allocationManager.onSchedulerBacklogged() } stageIdToTaskIndices.get(stageId).foreach { _.remove(taskIndex) } } } } override def onExecutorAdded(executorAdded: SparkListenerExecutorAdded): Unit = { val executorId = executorAdded.executorId if (executorId != SparkContext.DRIVER_IDENTIFIER) { // This guards against the race condition in which the `SparkListenerTaskStart` // event is posted before the `SparkListenerBlockManagerAdded` event, which is // possible because these events are posted in different threads. (see SPARK-4951) if (!allocationManager.executorIds.contains(executorId)) { allocationManager.onExecutorAdded(executorId) } } } override def onExecutorRemoved(executorRemoved: SparkListenerExecutorRemoved): Unit = { allocationManager.onExecutorRemoved(executorRemoved.executorId) } /** * An estimate of the total number of pending tasks remaining for currently running stages. Does * not account for tasks which may have failed and been resubmitted. * * Note: This is not thread-safe without the caller owning the `allocationManager` lock. */ def totalPendingTasks(): Int = { stageIdToNumTasks.map { case (stageId, numTasks) => numTasks - stageIdToTaskIndices.get(stageId).map(_.size).getOrElse(0) }.sum } /** * The number of tasks currently running across all stages. */ def totalRunningTasks(): Int = numRunningTasks /** * Return true if an executor is not currently running a task, and false otherwise. * * Note: This is not thread-safe without the caller owning the `allocationManager` lock. */ def isExecutorIdle(executorId: String): Boolean = { !executorIdToTaskIds.contains(executorId) } /** * Update the Executor placement hints (the number of tasks with locality preferences, * a map where each pair is a node and the number of tasks that would like to be scheduled * on that node). * * These hints are updated when stages arrive and complete, so are not up-to-date at task * granularity within stages. */ def updateExecutorPlacementHints(): Unit = { var localityAwareTasks = 0 val localityToCount = new mutable.HashMap[String, Int]() stageIdToExecutorPlacementHints.values.foreach { case (numTasksPending, localities) => localityAwareTasks += numTasksPending localities.foreach { case (hostname, count) => val updatedCount = localityToCount.getOrElse(hostname, 0) + count localityToCount(hostname) = updatedCount } } allocationManager.localityAwareTasks = localityAwareTasks allocationManager.hostToLocalTaskCount = localityToCount.toMap } } /** * Metric source for ExecutorAllocationManager to expose its internal executor allocation * status to MetricsSystem. * Note: These metrics heavily rely on the internal implementation of * ExecutorAllocationManager, metrics or value of metrics will be changed when internal * implementation is changed, so these metrics are not stable across Spark version. */ private[spark] class ExecutorAllocationManagerSource extends Source { val sourceName = "ExecutorAllocationManager" val metricRegistry = new MetricRegistry() private def registerGauge[T](name: String, value: => T, defaultValue: T): Unit = { metricRegistry.register(MetricRegistry.name("executors", name), new Gauge[T] { override def getValue: T = synchronized { Option(value).getOrElse(defaultValue) } }) } registerGauge("numberExecutorsToAdd", numExecutorsToAdd, 0) registerGauge("numberExecutorsPendingToRemove", executorsPendingToRemove.size, 0) registerGauge("numberAllExecutors", executorIds.size, 0) registerGauge("numberTargetExecutors", numExecutorsTarget, 0) registerGauge("numberMaxNeededExecutors", maxNumExecutorsNeeded(), 0) } } private object ExecutorAllocationManager { val NOT_SET = Long.MaxValue }
gioenn/xSpark
core/src/main/scala/org/apache/spark/ExecutorAllocationManager.scala
Scala
apache-2.0
32,356
/*********************************************************************** * Copyright (c) 2013-2016 Commonwealth Computer Research, Inc. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Apache License, Version 2.0 * which accompanies this distribution and is available at * http://www.opensource.org/licenses/apache2.0.php. *************************************************************************/ package org.locationtech.geomesa.accumulo.util import java.util.concurrent.atomic.AtomicBoolean import java.util.concurrent.{Executors, TimeUnit} import com.google.common.collect.Queues import com.typesafe.scalalogging.LazyLogging import org.apache.accumulo.core.client.{BatchScanner, ScannerBase} import org.apache.accumulo.core.data.{Key, Range => AccRange, Value} import scala.collection.JavaConversions._ class BatchMultiScanner(in: ScannerBase, out: BatchScanner, joinFn: java.util.Map.Entry[Key, Value] => AccRange, batchSize: Int = 32768) extends Iterable[java.util.Map.Entry[Key, Value]] with AutoCloseable with LazyLogging { if(batchSize < 1) { throw new IllegalArgumentException(f"Illegal batchSize($batchSize%d). Value must be > 0") } logger.trace(f"Creating BatchMultiScanner with batchSize $batchSize%d") type KVEntry = java.util.Map.Entry[Key, Value] val inExecutor = Executors.newSingleThreadExecutor() val outExecutor = Executors.newSingleThreadExecutor() val inQ = Queues.newLinkedBlockingQueue[KVEntry](batchSize) val outQ = Queues.newArrayBlockingQueue[KVEntry](batchSize) val inDone = new AtomicBoolean(false) val outDone = new AtomicBoolean(false) inExecutor.submit(new Runnable { override def run(): Unit = { try { in.iterator().foreach(inQ.put) } finally { inDone.set(true) } } }) def mightHaveAnother = !inDone.get || !inQ.isEmpty outExecutor.submit(new Runnable { override def run(): Unit = { try { while (mightHaveAnother) { val entry = inQ.poll(5, TimeUnit.MILLISECONDS) if (entry != null) { val entries = new collection.mutable.ListBuffer[KVEntry]() inQ.drainTo(entries) val ranges = (List(entry) ++ entries).map(joinFn) out.setRanges(ranges) out.iterator().foreach(outQ.put) } } } catch { case _: InterruptedException => } finally { outDone.set(true) } } }) override def close() { if (!inExecutor.isShutdown) inExecutor.shutdownNow() if (!outExecutor.isShutdown) outExecutor.shutdownNow() in.close() out.close() } override def iterator: Iterator[KVEntry] = new Iterator[KVEntry] { var prefetch: KVEntry = null // Indicate there MAY be one more in the outQ but not for sure def mightHaveAnother = !outDone.get || !outQ.isEmpty def prefetchIfNull() = { if (prefetch == null) { // loop while we might have another and we haven't set prefetch while (mightHaveAnother && prefetch == null) { prefetch = outQ.poll } } } // must attempt a prefetch since we don't know whether or not the outQ // will actually be filled with an item (filters may not match and the // in scanner may never return a range) override def hasNext(): Boolean = { prefetchIfNull() prefetch != null } override def next(): KVEntry = { prefetchIfNull() val ret = prefetch prefetch = null ret } } }
mdzimmerman/geomesa
geomesa-accumulo/geomesa-accumulo-datastore/src/main/scala/org/locationtech/geomesa/accumulo/util/BatchMultiScanner.scala
Scala
apache-2.0
3,630
package models.analysis.events import models.analysis.ActorStamp import org.joda.time.DateTime import play.api.libs.json.{Format, Json} /** * Analysis' performed on sample objects may e.g. be part of a larger study. In * some of these cases it's desirable for the result(s) to have restrictions on * visibility for a set duration of time. Only when these restrictions are lifted, * typically when the study is published, will the results be publicly available. */ case class Restriction( requester: String, expirationDate: DateTime, reason: String, caseNumbers: Option[Seq[String]] = None, registeredStamp: Option[ActorStamp] = None, cancelledStamp: Option[ActorStamp] = None, cancelledReason: Option[String] = None ) object Restriction { implicit val f: Format[Restriction] = Json.format[Restriction] }
kpmeen/musit
service_management/app/models/analysis/events/Restriction.scala
Scala
gpl-2.0
844
package edu.osu.cse.groenkeb.logic.model case class ModelException(msg: String) extends Exception(msg)
bgroenks96/PropLogic
modelvf/src/main/scala/edu/osu/cse/groenkeb/logic/model/ModelException.scala
Scala
mit
103
package scala2ch.models.stream import akka.stream.{ActorMaterializer} import akka.stream.scaladsl._ import org.joda.time.DateTime import scalikejdbc._ import scala2ch.models.{ThreadTag, Thread, Tag} object ThreadFlow { case class Create(ownerId: Long, title: String, createdAt: DateTime) case class Delete(id: Long, ownerId: Long) case class Result(thread: Thread, tags: List[Tag]) } trait ThreadFlow { import ThreadFlow._ object TagStream extends TagFlow object ThreadTagStream extends ThreadTagFlow def selectFlow(implicit session: DBSession) = Flow[Long].map { id => import ThreadTag._ import Tag._ Thread.find(id) match { case Some(thread) => { val tags = sql"SELECT ${t.id}, ${t.name}, ${t.createdAt} FROM ${Tag as t} LEFT JOIN ${ThreadTag as tt} ON ${tt.threadId} = ${t.id} WHERE ${tt.threadId} = $id" .map(rs => Tag(rs.long(1), rs.string(2), rs.jodaDateTime(3))).list().apply() Some(Result(thread, tags)) } case None => None } } def createFlow(implicit session: DBSession) = Flow[Create].map { case Create(ownerId, title, createdAt) => Thread.create(ownerId, title, createdAt)(session) } def deleteFlow(implicit session: DBSession) = Flow[Delete].map { req => sql"""DELETE FROM ${Thread.table} WHERE ${Thread.column.id} = ${req.id} AND ${Thread.column.ownerId} = ${req.ownerId}""".update.apply() } def select(id: Long)(implicit session: DBSession, materializer: ActorMaterializer) = Source.single(id).via(selectFlow).runWith(Sink.head) def createWithTag(param: Create, tags: List[TagFlow.Create])(implicit session: DBSession, materializer: ActorMaterializer) = { FlowGraph.closed(Sink.head[ThreadTagFlow.Result]) { implicit builder => sink => import FlowGraph.Implicits._ val tagIn = Source.single(tags) val threadIn = Source.single(param) val zip = builder.add(Zip[Seq[Tag], Thread]()) val tagFlow = builder.add(TagStream.createFlow) val threadFlow = builder.add(createFlow) val threadTagFlow = builder.add(ThreadTagStream.createFlow) tagIn ~> tagFlow ~> zip.in0 threadIn ~> threadFlow ~> zip.in1 zip.out ~> threadTagFlow.inlet threadTagFlow.outlet ~> sink }.run() } def deleteThread(id: Long, userId: Long)(implicit session: DBSession, materializer: ActorMaterializer) = Source.single(Delete(id, userId)).via(deleteFlow).runWith(Sink.head) }
TanUkkii007/scala2ch
src/main/scala/scala2ch/models/stream/ThreadFlow.scala
Scala
mit
2,441
package nomad.strat.service.rest import nomad.strat.service.persistence.entities.Dummy1 import spray.json.DefaultJsonProtocol // Collector of all json format we'd like to use on REST interface // Can be Slick mapped case classes too. object JsonProtocol extends DefaultJsonProtocol { implicit val dummy1Format = jsonFormat2(Dummy1) }
NomadHu/first_steps
service/src/main/scala/nomad/strat/service/rest/JsonProtocol.scala
Scala
apache-2.0
342
package domala.internal.macros.reflect.mock case class MockEmbeddable(value1: Int, value2: String)
bakenezumi/domala
core/src/test/scala/domala/internal/macros/reflect/mock/MockEmbeddable.scala
Scala
apache-2.0
100
/** * Copyright: Copyright (C) 2016, Jaguar Land Rover * License: MPL-2.0 */ package org.genivi.sota.db import slick.driver.MySQLDriver.api._ /** * Some database operators are shared between the core and the * resolver. */ object Operators { /** * The database layer, Slick, doesn't know that MariaDB supports * regex search. * * @see {@link http://slick.typesafe.com/docs/} * @see {@link https://mariadb.com/kb/en/mariadb/regexp/} */ val regex = SimpleBinaryOperator[Boolean]("REGEXP") implicit class QueryReg[QTable, Row, S[_]](baseQuery: Query[QTable, Row, S]) { def regexFilter(reg: Option[String])(fieldsFn: (QTable => Rep[_])*): Query[QTable, Row, S] = { reg match { case Some(r) => baseQuery.filter { table => fieldsFn.foldLeft(false.bind) { case (acc, rep) => acc || regex(rep(table), r) } } case None => baseQuery } } } }
PDXostc/rvi_sota_server
common/src/main/scala/org/genivi/sota/db/Operators.scala
Scala
mpl-2.0
946
trait C[+T <: C[T, U], -U <: C[T, U]] { } trait HasY { type Y } // This works in scalac. trait Foo1[-X] { def bar[Y <: X](y: Y) = y } // A variant of Foo1 using a dependent method type (doesn't work using // scalac) trait Foo2[-X] { def bar(x: HasY { type Y <: X })(y: x.Y) = y } // This works in scalac. trait Foo3[+X] { def bar[Y >: X](y: Y) = y } // A variant of Foo3 using a dependent method type (doesn't work // using scalac) trait Foo4[+X] { def bar(x: HasY { type Y >: X })(y: x.Y) = y }
som-snytt/dotty
tests/pos/variances.scala
Scala
apache-2.0
501
/* * Copyright 2018 data Artisans GmbH, 2019 Ververica GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.ververica.flinktraining.solutions.datastream_scala.broadcast import java.util.Map.Entry import com.ververica.flinktraining.exercises.datastream_java.datatypes.TaxiRide import com.ververica.flinktraining.exercises.datastream_java.sources.TaxiRideSource import com.ververica.flinktraining.exercises.datastream_java.utils.ExerciseBase import com.ververica.flinktraining.exercises.datastream_java.utils.ExerciseBase.{printOrTest, rideSourceOrTest} import org.apache.flink.api.common.state.{MapStateDescriptor, ValueState, ValueStateDescriptor} import org.apache.flink.api.java.utils.ParameterTool import org.apache.flink.streaming.api.TimeCharacteristic import org.apache.flink.streaming.api.functions.KeyedProcessFunction import org.apache.flink.streaming.api.functions.co.KeyedBroadcastProcessFunction import org.apache.flink.streaming.api.scala.{StreamExecutionEnvironment, createTypeInformation} import org.apache.flink.util.Collector import scala.collection.JavaConversions._ import scala.util.Random /** * Reference solution for the "Nearest Future Taxi" exercise of the Flink training * (http://training.ververica.com). * * Given a location that is broadcast, the goal of this exercise is to watch the stream of * taxi rides and report on taxis that complete rides closest to the requested location. * The application should be able to handle simultaneous queries. * * Parameters: * -input path-to-input-file * * Use * * nc -lk 9999 * * (or nc -l -p 9999, depending on your version of netcat) * to establish a socket stream from stdin on port 9999. * On Windows you can use ncat from https://nmap.org/ncat/. * * Some good locations: * * -74, 41 (Near, but outside the city to the NNW) * -73.7781, 40.6413 (JFK Airport) * -73.977664, 40.761484 (Museum of Modern Art) */ case class Query(queryId: Long, longitude: Float, latitude: Float) object Query { def apply(longitude: Float, latitude: Float): Query = new Query(Random.nextLong, longitude, latitude) } object NearestTaxiSolution { val queryDescriptor = new MapStateDescriptor[Long, Query]("queries", createTypeInformation[Long], createTypeInformation[Query]) def main(args: Array[String]): Unit = { // parse parameters val params = ParameterTool.fromArgs(args) val ridesFile = params.get("input", ExerciseBase.pathToRideData) val maxEventDelay = 60 // events are out of order by at most 60 seconds val servingSpeedFactor = 600 // 10 minutes worth of events are served every second // set up streaming execution environment val env = StreamExecutionEnvironment.getExecutionEnvironment env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime) env.setParallelism(ExerciseBase.parallelism) val rides = env.addSource(new TaxiRideSource(ridesFile, maxEventDelay, servingSpeedFactor)) val splitQuery = (msg: String) => { val parts = msg.split(",\\s*").map(_.toFloat) Query(parts(0), parts(1)) } // add a socket source val queryStream = env.socketTextStream("localhost", 9999) .map(splitQuery) .broadcast(queryDescriptor) val reports = rides .keyBy(_.taxiId) .connect(queryStream) .process(new QueryFunction) val nearest = reports // key by the queryId .keyBy(_._1) // the minimum, for each query, by distance .minBy(2) nearest.print() env.execute("Nearest Available Taxi") } // Note that in order to have consistent results after a restore from a checkpoint, the // behavior of this method must be deterministic, and NOT depend on characteristics of an // individual sub-task. class QueryFunction extends KeyedBroadcastProcessFunction[Long, TaxiRide, Query, (Long, Long, Float)]{ override def processElement(ride: TaxiRide, readOnlyContext: KeyedBroadcastProcessFunction[Long, TaxiRide, Query, (Long, Long, Float)]#ReadOnlyContext, out: Collector[(Long, Long, Float)]): Unit = if (!ride.isStart) for (entry: Entry[Long, Query] <- readOnlyContext.getBroadcastState(queryDescriptor).immutableEntries()) { val q = entry.getValue val dist = ride.getEuclideanDistance(q.longitude, q.latitude).toFloat out.collect((entry.getKey, ride.taxiId, dist)) } override def processBroadcastElement(query: Query, context: KeyedBroadcastProcessFunction[Long, TaxiRide, Query, (Long, Long, Float)]#Context, out: Collector[(Long, Long, Float)]): Unit = { println("New query: " + query) context.getBroadcastState(queryDescriptor).put(query.queryId, query) } } }
dataArtisans/flink-training-exercises
src/main/scala/com/ververica/flinktraining/solutions/datastream_scala/broadcast/NearestTaxiSolution.scala
Scala
apache-2.0
5,372
/* * Scala.js (https://www.scala-js.org/) * * Copyright EPFL. * * Licensed under Apache License 2.0 * (https://www.apache.org/licenses/LICENSE-2.0). * * See the NOTICE file distributed with this work for * additional information regarding copyright ownership. */ package scala.scalajs.js.annotation import scala.annotation.meta._ /** Specifies the JavaScript name of an entity. * * @see [[http://www.scala-js.org/doc/calling-javascript.html Calling JavaScript from Scala.js]] */ @field @getter @setter class JSName private () extends scala.annotation.StaticAnnotation { def this(name: String) = this() def this(symbol: scala.scalajs.js.Symbol) = this() }
scala-js/scala-js
library/src/main/scala/scala/scalajs/js/annotation/JSName.scala
Scala
apache-2.0
676
package games.input case class Position(x: Int, y: Int) private[games] class BiMap[R, T](entries: (R, T)*) { private val map = entries.toMap private val reverseMap = entries.map { case (a, b) => (b, a) }.toMap def getForLocal(loc: R): Option[T] = map.get(loc) def getForRemote(rem: T): Option[R] = reverseMap.get(rem) }
joelross/scalajs-games
demo/shared/src/main/scala/games/input/Input.scala
Scala
bsd-3-clause
330
package silky.audit import java.util.{Date, TimeZone} import clairvoyance.ProducesCapturedInputsAndOutputs import clairvoyance.plugins.SequenceDiagram import clairvoyance.scalatest.ClairvoyantContext import clairvoyance.scalatest.tags.skipInteractions import org.scalatest.MustMatchers import org.scalatest.refspec.RefSpec import silky.audit.Formatter._ import scala.io.Source import scala.xml.parsing.ConstructingParser class SequenceDiagramSpec extends RefSpec with MustMatchers with ClairvoyantContext with SequenceDiagram { override def capturedInputsAndOutputs: Seq[ProducesCapturedInputsAndOutputs] = Seq(this) object `Audit messages` { interestingGivens += ("initial value" -> "1234") @skipInteractions def `can be rendered as a sequence diagram` { parse(fromString( """ |1970-01-01 00:00:01,000 Begin: 123 |Message-Id: 4babe38093 |From: Bar |To: Foo | |End: 123 | |1970-01-01 00:00:03,000 Begin: 456 |Message-Id: 4babe38095 |From: Foo |To: Bar | |<foo>value: 1234</foo> | |End: 456 | |1970-01-01 00:00:05,000 Begin: 789 |Message-Id: 4babe38097 |From: Bar |To: Foo | |<bar>value: 5678, was: 1234</bar> | |End: 789 |""")) mustBe Stream( AuditMessage(from = "Bar", to = "Foo", timestamp = new Date(1000L), id = "4babe38093", payload = ""), AuditMessage(from = "Foo", to = "Bar", timestamp = new Date(3000L), id = "4babe38095", payload = "<foo>value: 1234</foo>"), AuditMessage(from = "Bar", to = "Foo", timestamp = new Date(5000L), id = "4babe38097", payload = "<bar>value: 5678, was: 1234</bar>") ) } } private def parse(source: Source): Stream[AuditMessage] = { val parser = new Parser parser.dateFormat.setTimeZone(TimeZone.getTimeZone("UTC")) val stream = parser.parse(source) stream.foreach { message => captureValue(s"${classify(message.payload)} from ${message.from} to ${message.to}" -> message.payload) } stream } private def fromString(string: String): Source = Source.fromString(withoutMargin(string).trim) private def classify(payload: AnyRef): String = payload match { case "" => "Unknown" case s: String => ConstructingParser.fromSource(Source.fromString(s), preserveWS = false).document().docElem.label.capitalize } }
PILTT/silky
src/test/scala/silky/audit/SequenceDiagramSpec.scala
Scala
apache-2.0
2,503
/* * Copyright 2022 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package mocks.connectors import mocks.Mock import org.mockito.stubbing.OngoingStubbing import org.scalatest.Suite import play.api.libs.json.JsValue import router.connectors.PropertyConnector import router.httpParsers.SelfAssessmentHttpParser.SelfAssessmentOutcome import uk.gov.hmrc.http.HeaderCarrier import scala.concurrent.Future trait MockPropertyConnector extends Mock { _: Suite => val mockPropertyConnector = mock[PropertyConnector] object MockPropertyConnector { def get(uri: String): OngoingStubbing[Future[SelfAssessmentOutcome]] = { when(mockPropertyConnector.get(eqTo(uri))(any[HeaderCarrier]())) } def post(uri: String, body: JsValue): OngoingStubbing[Future[SelfAssessmentOutcome]] = { when(mockPropertyConnector.post(eqTo(uri), eqTo(body))(any[HeaderCarrier]())) } } override protected def beforeEach(): Unit = { super.beforeEach() reset(mockPropertyConnector) } }
hmrc/self-assessment-api
test/mocks/connectors/MockPropertyConnector.scala
Scala
apache-2.0
1,537
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ package scaps.api sealed trait Definition { def name: String def shortName: String = EntityName.splitName(name).last def module: Module def withModule(m: Module): this.type = (this match { case t: TypeDef => t.copy(module = m) case v: ValueDef => v.copy(module = m) case v: ViewDef => v.copy(module = m) }).asInstanceOf[this.type] } case class TypeDef( name: String, typeParameters: List[TypeParameter], comment: String = "", module: Module = Module.Unknown, typeFrequency: Map[Variance, Float] = Map()) extends Definition { override def toString() = { val params = typeParameters match { case Nil => "" case ps => ps.mkString("[", ", ", "]") } s"$name$params" } def isFunction = typeParameters.length > 0 && name == TypeRef.Function.name(typeParameters.length - 1) def frequency(v: Variance) = typeFrequency.get(v).getOrElse(0f) def toType: TypeRef = TypeRef(name, Covariant, typeParameters.map(p => TypeRef(p.name, p.variance, Nil, true))) } case class ValueDef( name: String, typeParameters: List[TypeParameter], tpe: TypeRef, comment: DocComment, flags: Set[ValueDef.Flag] = Set(), source: Source = UnknownSource, module: Module = Module.Unknown, docLink: Option[String] = None) extends Definition { override def toString() = { val c = comment match { case DocComment.empty => "" case _ => s"$comment\\n" } val mods = flags.map(_.name).mkString(" ") val params = typeParameters match { case Nil => "" case ps => ps.mkString("[", ", ", "]") } s"$c$mods $name$params: $tpe" } /** * A unique description of the value including its name and type. */ def signature: String = signature(false) def signature(withImplicits: Boolean): String = { val params = typeParameters match { case Nil => "" case ps => ps.mkString("[", ", ", "]") } s"$name$params: ${tpe.signature(withImplicits)}" } lazy val group = ValueDef(EntityName.splitName(name).last, Nil, tpe.curried.structure, DocComment.empty, Set(), UnknownSource, Module.Unknown, None) def withoutComment = copy(comment = DocComment.empty) def isOverride = flags(ValueDef.Overrides) def isImplicit = flags(ValueDef.Implicit) def isStatic = flags(ValueDef.Static) } object ValueDef { sealed trait Flag { def name: String } case object Overrides extends Flag { val name = "overrides" } case object Implicit extends Flag { val name = "implicit" } case object Static extends Flag { val name = "static" } } case class TypeParameter( name: String, variance: Variance, lowerBound: TypeRef = TypeRef.Nothing(Covariant), upperBound: TypeRef = TypeRef.Any(Contravariant)) { import TypeRef._ assert(lowerBound.variance == Covariant) assert(upperBound.variance == Contravariant) override def toString() = { val lbound = if (lowerBound == Nothing(Covariant)) "" else s" >: ${lowerBound.withVariance(Invariant)}" val ubound = if (upperBound == Any(Contravariant)) "" else s" <: ${upperBound.withVariance(Invariant)}" s"$name$lbound$ubound" } } case class ViewDef(from: TypeRef, to: TypeRef, definingEntityName: String = "", module: Module = Module.Unknown) extends Definition { def name = s"$definingEntityName:$fromKey:$toKey" def fromKey = ViewDef.key(from) def toKey = ViewDef.key(to) def apply(t: TypeRef): Option[TypeRef] = { findParamMap(t).map { paramMap => paramMap.foldLeft(to) { (t, paramWithArg) => t(paramWithArg._1, paramWithArg._2) } } } def findParamMap(t: TypeRef): Option[List[(String, TypeRef)]] = findParamMap(List(from), List(t)) def findParamMap(from: List[TypeRef], t: List[TypeRef]): Option[List[(String, TypeRef)]] = from.zip(t).foldLeft(Option(List[(String, TypeRef)]())) { (paramMapOpt, fromWithT) => paramMapOpt.flatMap { paramOpt => val (f, t) = fromWithT if (f.isTypeParam) Some((f.name -> t) :: paramOpt) else if (f.name == t.name && f.variance == t.variance) findParamMap(f.args, t.args).map(_ ::: paramOpt) else None } } lazy val retainedInformation: Double = { if (from.isTypeParam) { 1d } else { val fromParts = from.toList val toParts = to.toList val fromParams = fromParts.filter(_.isTypeParam) val droppedParams = fromParams.count(p => !toParts.exists(_.name == p.name)) (fromParts.size - droppedParams).toDouble / fromParts.size } } def compose(rhs: ViewDef): Option[ViewDef] = { rhs(to).map { newTo => ViewDef(from, newTo) } } } object ViewDef { def key(tpe: TypeRef) = tpe.withArgsAsParams.renameTypeParams(_ => "_").annotatedSignature def bidirectional(from: TypeRef, to: TypeRef, definingEntityName: String) = List( ViewDef(from, to, definingEntityName), ViewDef(to.withVariance(to.variance.flip), from.withVariance(from.variance.flip), definingEntityName)) }
scala-search/scaps
api/shared/src/main/scala/scaps/api/Definitions.scala
Scala
mpl-2.0
5,316
package pl.touk.nussknacker.engine.canonize import cats.data.{NonEmptyList, ValidatedNel} import cats.instances.list._ import cats.syntax.traverse._ import pl.touk.nussknacker.engine.canonicalgraph._ import pl.touk.nussknacker.engine.graph._ import pl.touk.nussknacker.engine.graph.node.{BranchEnd, BranchEndData} object ProcessCanonizer { import MaybeArtificial.applicative import cats.syntax.apply._ def canonize(process: EspProcess): CanonicalProcess = { CanonicalProcess( process.metaData, NodeCanonizer.canonize(process.roots.head), process.roots.tail.map(NodeCanonizer.canonize) ) } def uncanonizeUnsafe(canonicalProcess: CanonicalProcess): EspProcess = uncanonize(canonicalProcess).valueOr(err => throw new IllegalArgumentException(err.toList.mkString("Unmarshalling errors: ", ", ", ""))) def uncanonize(canonicalProcess: CanonicalProcess): ValidatedNel[ProcessUncanonizationError, EspProcess] = uncanonizeArtificial(canonicalProcess).toValidNel def uncanonizeArtificial(canonicalProcess: CanonicalProcess): MaybeArtificial[EspProcess] = { val branches: MaybeArtificial[NonEmptyList[pl.touk.nussknacker.engine.graph.node.SourceNode]] = canonicalProcess.allStartNodes.map(uncanonizeSource).sequence branches.map(bList => EspProcess(canonicalProcess.metaData, bList)) } private def uncanonizeSource(canonicalNode: List[canonicalnode.CanonicalNode]): MaybeArtificial[node.SourceNode] = canonicalNode match { case (a@canonicalnode.FlatNode(data: node.StartingNodeData)) :: tail => uncanonize(a, tail).map(node.SourceNode(data, _)) case other :: _ => MaybeArtificial.artificialSource(InvalidRootNode(other.id)) case _ => MaybeArtificial.artificialSource(EmptyProcess) } private def uncanonize(previous: canonicalnode.CanonicalNode, canonicalNode: List[canonicalnode.CanonicalNode]): MaybeArtificial[node.SubsequentNode] = canonicalNode match { case canonicalnode.FlatNode(data: node.BranchEndData) :: Nil => new MaybeArtificial(node.BranchEnd(data), Nil) case canonicalnode.FlatNode(data: node.EndingNodeData) :: Nil => new MaybeArtificial(node.EndingNode(data), Nil) case (a@canonicalnode.FlatNode(data: node.OneOutputSubsequentNodeData)) :: tail => uncanonize(a, tail).map(node.OneOutputSubsequentNode(data, _)) case (a@canonicalnode.FilterNode(data, nextFalse)) :: tail if nextFalse.isEmpty => uncanonize(a, tail).map(node.FilterNode(data, _, None)) case (a@canonicalnode.FilterNode(data, nextFalse)) :: tail => (uncanonize(a, tail), uncanonize(a, nextFalse)).mapN { (nextTrue, nextFalseV) => node.FilterNode(data, nextTrue, Some(nextFalseV)) } case (a@canonicalnode.SwitchNode(data, Nil, defaultNext)) :: Nil => MaybeArtificial.artificialSink(InvalidTailOfBranch(data.id)) case (a@canonicalnode.SwitchNode(data, nexts, defaultNext)) :: Nil if defaultNext.isEmpty => nexts.map { casee => uncanonize(a, casee.nodes).map(node.Case(casee.expression, _)) }.sequence[MaybeArtificial, node.Case].map(node.SwitchNode(data, _, None)) case (a@canonicalnode.SwitchNode(data, nexts, defaultNext)) :: Nil => val unFlattenNexts = nexts.map { casee => uncanonize(a, casee.nodes).map(node.Case(casee.expression, _)) }.sequence[MaybeArtificial, node.Case] (unFlattenNexts, uncanonize(a, defaultNext)).mapN { (nextsV, defaultNextV) => node.SwitchNode(data, nextsV, Some(defaultNextV)) } case (a@canonicalnode.SplitNode(bare, Nil)) :: Nil => MaybeArtificial.artificialSink(InvalidTailOfBranch(bare.id)) case (a@canonicalnode.SplitNode(bare, nexts)) :: Nil => nexts.map(uncanonize(a, _)).sequence[MaybeArtificial, node.SubsequentNode].map { uncanonized => node.SplitNode(bare, uncanonized) } case invalidHead :: _ => MaybeArtificial.artificialSink(InvalidTailOfBranch(invalidHead.id)) case Nil => MaybeArtificial.artificialSink(InvalidTailOfBranch(previous.id)) } } object NodeCanonizer { def canonize(n: node.Node): List[canonicalnode.CanonicalNode] = n match { case oneOut: node.OneOutputNode => canonicalnode.FlatNode(oneOut.data) :: canonize(oneOut.next) case node.FilterNode(data, nextTrue, nextFalse) => canonicalnode.FilterNode(data, nextFalse.toList.flatMap(canonize)) :: canonize(nextTrue) case node.SwitchNode(data, nexts, defaultNext) => canonicalnode.SwitchNode( data = data, nexts = nexts.map { next => canonicalnode.Case(next.expression, canonize(next.node)) }, defaultNext = defaultNext.toList.flatMap(canonize) ) :: Nil case ending: node.EndingNode => canonicalnode.FlatNode(ending.data) :: Nil case node.SplitNode(bare, nexts) => canonicalnode.SplitNode(bare, nexts.map(canonize)) :: Nil case node.SubprocessNode(input, nexts) => canonicalnode.Subprocess(input, nexts.mapValues(canonize)) :: Nil case BranchEnd(e:BranchEndData) => canonicalnode.FlatNode(e) :: Nil } }
TouK/nussknacker
scenario-api/src/main/scala/pl/touk/nussknacker/engine/canonize/ProcessCanonizer.scala
Scala
apache-2.0
5,284
/* * Copyright 2013-2015 Websudos, Limited. * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * - Explicit consent must be obtained from the copyright owner, Websudos Limited before any redistribution is made. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ package com.websudos.phantom.builder.query import com.websudos.phantom.CassandraTable import com.websudos.phantom.builder.{QueryBuilder, Unspecified, ConsistencyBound} import com.websudos.phantom.connectors.KeySpace class TruncateQuery[ Table <: CassandraTable[Table, _], Record, Status <: ConsistencyBound ](table: Table, val qb: CQLQuery) extends ExecutableStatement object TruncateQuery { type Default[T <: CassandraTable[T, _], R] = TruncateQuery[T, R, Unspecified] def apply[T <: CassandraTable[T, _], R](table: T)(implicit keySpace: KeySpace): TruncateQuery.Default[T, R] = { new TruncateQuery(table, QueryBuilder.truncate(QueryBuilder.keyspace(keySpace.name, table.tableName).queryString)) } }
dan-mi-sun/phantom
phantom-dsl/src/main/scala/com/websudos/phantom/builder/query/TruncateQuery.scala
Scala
bsd-2-clause
2,217
package com.twitter.server.util import com.twitter.finagle.stats.{BroadcastStatsReceiver, StatsReceiver} import com.twitter.util.Try import java.lang.management.ManagementFactory import java.lang.reflect.Method import scala.collection.mutable object JvmStats { import com.twitter.conversions.string._ import scala.collection.JavaConverters._ // set used for keeping track of jvm gauges (otherwise only weakly referenced) private[this] val gauges = mutable.Set.empty[Any] def register(statsReceiver: StatsReceiver) = { val stats = statsReceiver.scope("jvm") val mem = ManagementFactory.getMemoryMXBean() def heap = mem.getHeapMemoryUsage() val heapStats = stats.scope("heap") gauges.add(heapStats.addGauge("committed") { heap.getCommitted() }) gauges.add(heapStats.addGauge("max") { heap.getMax() }) gauges.add(heapStats.addGauge("used") { heap.getUsed() }) def nonHeap = mem.getNonHeapMemoryUsage() val nonHeapStats = stats.scope("nonheap") gauges.add(nonHeapStats.addGauge("committed") { nonHeap.getCommitted() }) gauges.add(nonHeapStats.addGauge("max") { nonHeap.getMax() }) gauges.add(nonHeapStats.addGauge("used") { nonHeap.getUsed() }) val threads = ManagementFactory.getThreadMXBean() val threadStats = stats.scope("thread") gauges.add(threadStats.addGauge("daemon_count") { threads.getDaemonThreadCount().toLong }) gauges.add(threadStats.addGauge("count") { threads.getThreadCount().toLong }) gauges.add(threadStats.addGauge("peak_count") { threads.getPeakThreadCount().toLong }) val runtime = ManagementFactory.getRuntimeMXBean() gauges.add(stats.addGauge("start_time") { runtime.getStartTime() }) gauges.add(stats.addGauge("uptime") { runtime.getUptime() }) val os = ManagementFactory.getOperatingSystemMXBean() gauges.add(stats.addGauge("num_cpus") { os.getAvailableProcessors().toLong }) os match { case unix: com.sun.management.UnixOperatingSystemMXBean => gauges.add(stats.addGauge("fd_count") { unix.getOpenFileDescriptorCount }) gauges.add(stats.addGauge("fd_limit") { unix.getMaxFileDescriptorCount }) case _ => } val compilation = ManagementFactory.getCompilationMXBean() val compilationStats = stats.scope("compilation") gauges.add(compilationStats.addGauge("time_msec") { compilation.getTotalCompilationTime() }) val classes = ManagementFactory.getClassLoadingMXBean() val classLoadingStats = stats.scope("classes") gauges.add(classLoadingStats.addGauge("total_loaded") { classes.getTotalLoadedClassCount() }) gauges.add(classLoadingStats.addGauge("total_unloaded") { classes.getUnloadedClassCount() }) gauges.add(classLoadingStats.addGauge("current_loaded") { classes.getLoadedClassCount().toLong }) val memPool = ManagementFactory.getMemoryPoolMXBeans.asScala val memStats = stats.scope("mem") val currentMem = memStats.scope("current") // TODO: Refactor postGCStats when we confirmed that no one is using this stats anymore // val postGCStats = memStats.scope("postGC") val postGCMem = memStats.scope("postGC") val postGCStats = BroadcastStatsReceiver(Seq(stats.scope("postGC"), postGCMem)) memPool foreach { pool => val name = pool.getName.regexSub("""[^\w]""".r) { m => "_" } if (pool.getCollectionUsage != null) { def usage = pool.getCollectionUsage // this is a snapshot, we can't reuse the value gauges.add(postGCStats.addGauge(name, "used") { usage.getUsed }) gauges.add(postGCStats.addGauge(name, "max") { usage.getMax }) } if (pool.getUsage != null) { def usage = pool.getUsage // this is a snapshot, we can't reuse the value gauges.add(currentMem.addGauge(name, "used") { usage.getUsed }) gauges.add(currentMem.addGauge(name, "max") { usage.getMax }) } } gauges.add(postGCStats.addGauge("used") { memPool flatMap(p => Option(p.getCollectionUsage)) map(_.getUsed) sum }) gauges.add(currentMem.addGauge("used") { memPool flatMap(p => Option(p.getUsage)) map(_.getUsed) sum }) // `BufferPoolMXBean` and `ManagementFactory.getPlatfromMXBeans` are introduced in Java 1.7. // Use reflection to add these gauges so we can still compile with 1.6 val bufferPoolStats = memStats.scope("buffer") for { bufferPoolMXBean <- Try[Class[_]] { ClassLoader.getSystemClassLoader.loadClass("java.lang.management.BufferPoolMXBean") } getPlatformMXBeans <- classOf[ManagementFactory].getMethods.find { m => m.getName == "getPlatformMXBeans" && m.getParameterTypes.length == 1 } pool <- getPlatformMXBeans.invoke(null /* static method */, bufferPoolMXBean) .asInstanceOf[java.util.List[_]].asScala } { val name = bufferPoolMXBean.getMethod("getName").invoke(pool).asInstanceOf[String] val getCount: Method = bufferPoolMXBean.getMethod("getCount") gauges.add(bufferPoolStats.addGauge(name, "count") { getCount.invoke(pool).asInstanceOf[Long] }) val getMemoryUsed: Method = bufferPoolMXBean.getMethod("getMemoryUsed") gauges.add(bufferPoolStats.addGauge(name, "used") { getMemoryUsed.invoke(pool).asInstanceOf[Long] }) val getTotalCapacity: Method = bufferPoolMXBean.getMethod("getTotalCapacity") gauges.add(bufferPoolStats.addGauge(name, "max") { getTotalCapacity.invoke(pool).asInstanceOf[Long] }) } val gcPool = ManagementFactory.getGarbageCollectorMXBeans.asScala val gcStats = stats.scope("gc") gcPool foreach { gc => val name = gc.getName.regexSub("""[^\w]""".r) { m => "_" } gauges.add(gcStats.addGauge(name, "cycles") { gc.getCollectionCount }) gauges.add(gcStats.addGauge(name, "msec") { gc.getCollectionTime }) } // note, these could be -1 if the collector doesn't have support for it. gauges.add(gcStats.addGauge("cycles") { gcPool map(_.getCollectionCount) filter(_ > 0) sum }) gauges.add(gcStats.addGauge("msec") { gcPool map(_.getCollectionTime) filter(_ > 0) sum }) } }
nshkrob/twitter-server
src/main/scala/com/twitter/server/util/JvmStats.scala
Scala
apache-2.0
6,088
package mesosphere.marathon.core.appinfo.impl import mesosphere.marathon.Protos.MarathonTask import mesosphere.marathon.core.appinfo.{ EnrichedTask, TaskCounts, AppInfo } import mesosphere.marathon.state._ import mesosphere.marathon.upgrade.DeploymentManager.DeploymentStepInfo import mesosphere.marathon.upgrade.{ DeploymentStep, DeploymentPlan } import mesosphere.marathon.{ MarathonSchedulerService, MarathonSpec } import mesosphere.marathon.health.{ Health, HealthCounts, MarathonHealthCheckManager, HealthCheckManager } import mesosphere.marathon.tasks.TaskTracker import mesosphere.util.Mockito import org.apache.mesos.Protos import org.scalatest.{ Matchers, GivenWhenThen } import scala.collection.immutable.Seq import scala.concurrent.Future class AppInfoBaseDataTest extends MarathonSpec with GivenWhenThen with Mockito with Matchers { import org.scalatest.concurrent.ScalaFutures._ class Fixture { lazy val taskTracker = mock[TaskTracker] lazy val healthCheckManager = mock[HealthCheckManager] lazy val marathonSchedulerService = mock[MarathonSchedulerService] lazy val taskFailureRepository = mock[TaskFailureRepository] lazy val baseData = new AppInfoBaseData( taskTracker, healthCheckManager, marathonSchedulerService, taskFailureRepository ) def verifyNoMoreInteractions(): Unit = { noMoreInteractions(taskTracker) noMoreInteractions(healthCheckManager) noMoreInteractions(marathonSchedulerService) noMoreInteractions(taskFailureRepository) } } val app = AppDefinition(PathId("/test")) val other = AppDefinition(PathId("/other")) test("not embedding anything results in no calls") { val f = new Fixture When("getting AppInfos without embeds") val appInfo = f.baseData.appInfoFuture(app, Set.empty).futureValue Then("we get an empty appInfo") appInfo should be(AppInfo(app)) And("we have no more interactions") f.verifyNoMoreInteractions() } test("requesting tasks retrieves tasks from taskTracker and health infos") { val f = new Fixture Given("three tasks in the task tracker") val running1 = MarathonTask .newBuilder() .setId("task1") .setStatus(Protos.TaskStatus.newBuilder().setState(Protos.TaskState.TASK_RUNNING).buildPartial()) .buildPartial() val running2 = running1.toBuilder.setId("task2").buildPartial() val running3 = running1.toBuilder.setId("task3").buildPartial() f.taskTracker.get(app.id) returns Set(running1, running2, running3) val alive = Health("task2", lastSuccess = Some(Timestamp(1))) val unhealthy = Health("task3", lastFailure = Some(Timestamp(1))) f.healthCheckManager.statuses(app.id) returns Future.successful( Map( running1.getId -> Seq.empty, running2.getId -> Seq(alive), running3.getId -> Seq(unhealthy) ) ) When("requesting AppInfos with tasks") val appInfo = f.baseData.appInfoFuture(app, Set(AppInfo.Embed.Tasks)).futureValue Then("we get a tasks object in the appInfo") appInfo.maybeTasks should not be (empty) appInfo.maybeTasks.get.map(_.appId.toString) should have size (3) appInfo.maybeTasks.get.map(_.task.getId).toSet should be (Set("task1", "task2", "task3")) appInfo should be(AppInfo(app, maybeTasks = Some( Seq( EnrichedTask(app.id, running1, Seq.empty), EnrichedTask(app.id, running2, Seq(alive)), EnrichedTask(app.id, running3, Seq(unhealthy)) ) ))) And("the taskTracker should have been called") verify(f.taskTracker, times(1)).get(app.id) And("the healthCheckManager as well") verify(f.healthCheckManager, times(1)).statuses(app.id) And("we have no more interactions") f.verifyNoMoreInteractions() } test("requesting task counts only retrieves tasks from taskTracker and health counts") { val f = new Fixture Given("one staged and two running tasks in the taskTracker") val staged = MarathonTask .newBuilder() .setStatus(Protos.TaskStatus.newBuilder().setState(Protos.TaskState.TASK_STAGING).buildPartial()) .buildPartial() val running = MarathonTask .newBuilder() .setStatus(Protos.TaskStatus.newBuilder().setState(Protos.TaskState.TASK_RUNNING).buildPartial()) .buildPartial() val running2 = running.toBuilder.setId("some other").buildPartial() f.taskTracker.get(app.id) returns Set(staged, running, running2) f.healthCheckManager.healthCounts(app.id) returns Future.successful( HealthCounts(healthy = 3, unknown = 4, unhealthy = 5) ) When("requesting AppInfos with counts") val appInfo = f.baseData.appInfoFuture(app, Set(AppInfo.Embed.Counts)).futureValue Then("we get counts object in the appInfo") appInfo should be(AppInfo(app, maybeCounts = Some( TaskCounts(tasksStaged = 1, tasksRunning = 2, tasksHealthy = 3, tasksUnhealthy = 5) ))) And("the taskTracker should have been called") verify(f.taskTracker, times(1)).get(app.id) And("the healthCheckManager as well") verify(f.healthCheckManager, times(1)).healthCounts(app.id) And("we have no more interactions") f.verifyNoMoreInteractions() } test("requesting deployments does not request anything else") { val f = new Fixture Given("One related and one unrelated deployment") val emptyGroup = Group.empty val relatedDeployment = DeploymentPlan(emptyGroup, emptyGroup.copy(apps = Set(app))) val unrelatedDeployment = DeploymentPlan(emptyGroup, emptyGroup.copy(apps = Set(other))) f.marathonSchedulerService.listRunningDeployments() returns Future.successful(Seq[DeploymentStepInfo]( DeploymentStepInfo(relatedDeployment, DeploymentStep(Seq.empty), 1), DeploymentStepInfo(unrelatedDeployment, DeploymentStep(Seq.empty), 1) )) When("Getting AppInfos without counts") val appInfo = f.baseData.appInfoFuture(app, Set(AppInfo.Embed.Deployments)).futureValue Then("we get an counts in the appInfo") appInfo should be(AppInfo(app, maybeDeployments = Some( Seq(Identifiable(relatedDeployment.id)) ))) And("the marathonSchedulerService should have been called to retrieve the deployments") verify(f.marathonSchedulerService, times(1)).listRunningDeployments() And("we have no more interactions") f.verifyNoMoreInteractions() } test("requesting deployments does work if no deployments are running") { val f = new Fixture Given("No deployments") f.marathonSchedulerService.listRunningDeployments() returns Future.successful( Seq.empty[DeploymentStepInfo] ) When("Getting AppInfos with deployments") val appInfo = f.baseData.appInfoFuture(app, Set(AppInfo.Embed.Deployments)).futureValue Then("we get an empty list of deployments") appInfo should be(AppInfo(app, maybeDeployments = Some( Seq.empty ))) And("the marathonSchedulerService should have been called to retrieve the deployments") verify(f.marathonSchedulerService, times(1)).listRunningDeployments() And("we have no more interactions") f.verifyNoMoreInteractions() } test("requesting lastTaskFailure when one exists") { val f = new Fixture Given("One last taskFailure") f.taskFailureRepository.current(app.id) returns Future.successful(Some(TaskFailureTestHelper.taskFailure)) When("Getting AppInfos with last task failures") val appInfo = f.baseData.appInfoFuture(app, Set(AppInfo.Embed.LastTaskFailure)).futureValue Then("we get the failure in the app info") appInfo should be(AppInfo(app, maybeLastTaskFailure = Some( TaskFailureTestHelper.taskFailure ))) And("the taskFailureRepository should have been called to retrieve the failure") verify(f.taskFailureRepository, times(1)).current(app.id) And("we have no more interactions") f.verifyNoMoreInteractions() } test("requesting lastTaskFailure when None exist") { val f = new Fixture Given("no taskFailure") f.taskFailureRepository.current(app.id) returns Future.successful(None) When("Getting AppInfos with last task failures") val appInfo = f.baseData.appInfoFuture(app, Set(AppInfo.Embed.LastTaskFailure)).futureValue Then("we get no failure in the app info") appInfo should be(AppInfo(app)) And("the taskFailureRepository should have been called to retrieve the failure") verify(f.taskFailureRepository, times(1)).current(app.id) And("we have no more interactions") f.verifyNoMoreInteractions() } test("Combining embed options work") { val f = new Fixture Given("One last taskFailure and no deployments") f.taskFailureRepository.current(app.id) returns Future.successful(Some(TaskFailureTestHelper.taskFailure)) f.marathonSchedulerService.listRunningDeployments() returns Future.successful( Seq.empty[DeploymentStepInfo] ) When("Getting AppInfos with last task failures and deployments") val appInfo = f.baseData.appInfoFuture(app, Set(AppInfo.Embed.LastTaskFailure, AppInfo.Embed.Deployments)).futureValue Then("we get the failure in the app info") appInfo should be(AppInfo( app, maybeLastTaskFailure = Some(TaskFailureTestHelper.taskFailure), maybeDeployments = Some(Seq.empty) )) And("the taskFailureRepository should have been called to retrieve the failure") verify(f.taskFailureRepository, times(1)).current(app.id) And("the marathonSchedulerService should have been called to retrieve the deployments") verify(f.marathonSchedulerService, times(1)).listRunningDeployments() And("we have no more interactions") f.verifyNoMoreInteractions() } }
cgvarela/marathon
src/test/scala/mesosphere/marathon/core/appinfo/impl/AppInfoBaseDataTest.scala
Scala
apache-2.0
9,731
package akka.ainterface.remote import akka.actor.ActorRef import akka.ainterface.NodeName import akka.ainterface.remote.transport.TcpConnectionProtocol import akka.ainterface.remote.transport.TcpConnectionProtocol.ReadSuccess import akka.util.ByteString import java.nio.charset.StandardCharsets import java.security.MessageDigest import scodec.{Decoder, Encoder} package object handshake { private[handshake] val DigestLength = 16 private[handshake] def write[A](connection: ActorRef, x: A) (implicit encoder: Encoder[HandshakeMessage[A]]): Unit = { connection ! TcpConnectionProtocol.Write(HandshakeMessage(x)) } private[handshake] def read[A](connection: ActorRef, self: ActorRef) (implicit decoder: Decoder[HandshakeMessage[A]]): Unit = { connection ! TcpConnectionProtocol.Read[HandshakeMessage[A]](self, keeps = false) } private[handshake] object ReadHandshake { def unapply(result: ReadSuccess): Option[Any] = result.obj match { case HandshakeMessage(x) => Some(x) case _ => None } } /** * Generates a digest. */ def genDigest(challenge: Int, cookie: String): ByteString = { val challengeNumber: Long = challenge & 0xffffffffL val challengeString = challengeNumber.toString val md5 = MessageDigest.getInstance("MD5") md5.update(cookie.getBytes(StandardCharsets.ISO_8859_1)) md5.update(challengeString.getBytes(StandardCharsets.ISO_8859_1)) val digest = md5.digest() assert(digest.length == DigestLength) ByteString(digest) } /** * Currently always hidden. */ private[this] def publishOnNode(node: NodeName): DFlags = DFlags.hidden private[handshake] def makeThisFlags(node: NodeName): DFlags = { makeThisFlags(isHidden = false, node) } private[handshake] def makeThisFlags(isHidden: Boolean, remoteNode: NodeName): DFlags = { isHidden match { case true => DFlags.hidden case false => publishOnNode(remoteNode) } } private[handshake] def adjustFlags(thisFlags: DFlags, otherFlags: DFlags): (DFlags, DFlags) = { if (thisFlags.published && otherFlags.published) { (thisFlags, otherFlags) } else { (thisFlags.hide, otherFlags.hide) } } private[handshake] def checkDFlagXnc(otherFlags: DFlags): Boolean = { otherFlags.acceptsExtendedReferences && otherFlags.acceptsExtendedPidsPorts } private[handshake] def isAllowed(otherNodeName: NodeName, allowed: Option[Set[NodeName]]): Boolean = { allowed match { case None => true case Some(as) => as.contains(otherNodeName) } } }
ainterface/ainterface
ainterface/src/main/scala/akka/ainterface/remote/handshake/package.scala
Scala
apache-2.0
2,708
/* * (c) Copyright 2016 Hewlett Packard Enterprise Development LP * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ //package cogdebugger.ui.fieldvisualizations.complex // //import libcog._ //import scala.swing._ //import cogx.platform.cpumemory.ComplexFieldMemory // ///** Stand-alone testing of a field viewer. // * // * @author Greg Snider // */ //object TestComplexFieldSuperPanel extends SimpleSwingApplication { // // Create a simple complex field // val Rows = 8 // val Columns = 8 // val complexFieldPolar = ComplexFieldMemory(Rows, Columns, // (r, c) => Complex.polar(r, c * 2 * math.Pi.toFloat / Columns) // ) // val complexFieldCartesian = ComplexFieldMemory(Rows, Columns, // (r, c) => Complex(r - Rows / 2, c - Columns / 2) // ) // val stickTensorField = ComplexFieldMemory(Rows, Columns, (row, col) => { // val magnitude = row // val orientation = col * (math.Pi.toFloat / Columns) // // We double the orientation to encode it in a complex number. // Complex.polar(magnitude, orientation * 2) // }) // // lazy val top = new MainFrame { // title = "Test ComplexFieldSuperPanel" // // // Select field to view // //val complexField = complexFieldCartesian // //val complexField = complexFieldPolar // val complexField = stickTensorField // // contents = new BoxPanel(Orientation.Horizontal) { // contents += new ComplexFieldSuperPanel(complexField.fieldType) { // update(null, complexField, 0L) // } // } // minimumSize = new Dimension(250, 100) // } //}
hpe-cct/cct-core
src/test/scala/cogdebugger/ui/fieldvisualizations/complex/TestComplexFieldSuperPanel.scala
Scala
apache-2.0
2,049
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.catalyst.optimizer import org.apache.spark.api.python.PythonEvalType import org.apache.spark.sql.catalyst.analysis.EliminateSubqueryAliases import org.apache.spark.sql.catalyst.dsl.expressions._ import org.apache.spark.sql.catalyst.dsl.plans._ import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.plans._ import org.apache.spark.sql.catalyst.plans.logical._ import org.apache.spark.sql.catalyst.rules._ import org.apache.spark.sql.types.{IntegerType, StringType} import org.apache.spark.unsafe.types.CalendarInterval class FilterPushdownSuite extends PlanTest { object Optimize extends RuleExecutor[LogicalPlan] { val batches = Batch("Subqueries", Once, EliminateSubqueryAliases) :: Batch("Filter Pushdown", FixedPoint(10), CombineFilters, PushPredicateThroughNonJoin, BooleanSimplification, PushPredicateThroughJoin, CollapseProject) :: Batch("Push extra predicate through join", FixedPoint(10), PushExtraPredicateThroughJoin, PushDownPredicates) :: Nil } val attrA = 'a.int val attrB = 'b.int val attrC = 'c.int val attrD = 'd.int val testRelation = LocalRelation(attrA, attrB, attrC) val testRelation1 = LocalRelation(attrD) val simpleDisjunctivePredicate = ("x.a".attr > 3) && ("y.a".attr > 13) || ("x.a".attr > 1) && ("y.a".attr > 11) val expectedPredicatePushDownResult = { val left = testRelation.where(('a > 3 || 'a > 1)).subquery('x) val right = testRelation.where('a > 13 || 'a > 11).subquery('y) left.join(right, condition = Some("x.b".attr === "y.b".attr && (("x.a".attr > 3) && ("y.a".attr > 13) || ("x.a".attr > 1) && ("y.a".attr > 11)))).analyze } // This test already passes. test("eliminate subqueries") { val originalQuery = testRelation .subquery('y) .select('a) val optimized = Optimize.execute(originalQuery.analyze) val correctAnswer = testRelation .select('a.attr) .analyze comparePlans(optimized, correctAnswer) } // After this line is unimplemented. test("simple push down") { val originalQuery = testRelation .select('a) .where('a === 1) val optimized = Optimize.execute(originalQuery.analyze) val correctAnswer = testRelation .where('a === 1) .select('a) .analyze comparePlans(optimized, correctAnswer) } test("combine redundant filters") { val originalQuery = testRelation .where('a === 1 && 'b === 1) .where('a === 1 && 'c === 1) val optimized = Optimize.execute(originalQuery.analyze) val correctAnswer = testRelation .where('a === 1 && 'b === 1 && 'c === 1) .analyze comparePlans(optimized, correctAnswer) } test("do not combine non-deterministic filters even if they are identical") { val originalQuery = testRelation .where(Rand(0) > 0.1 && 'a === 1) .where(Rand(0) > 0.1 && 'a === 1).analyze val optimized = Optimize.execute(originalQuery) comparePlans(optimized, originalQuery) } test("SPARK-16164: Filter pushdown should keep the ordering in the logical plan") { val originalQuery = testRelation .where('a === 1) .select('a, 'b) .where('b === 1) val optimized = Optimize.execute(originalQuery.analyze) val correctAnswer = testRelation .where('a === 1 && 'b === 1) .select('a, 'b) .analyze // We can not use comparePlans here because it normalized the plan. assert(optimized == correctAnswer) } test("SPARK-16994: filter should not be pushed through limit") { val originalQuery = testRelation.limit(10).where('a === 1).analyze val optimized = Optimize.execute(originalQuery) comparePlans(optimized, originalQuery) } test("can't push without rewrite") { val originalQuery = testRelation .select('a + 'b as 'e) .where('e === 1) .analyze val optimized = Optimize.execute(originalQuery.analyze) val correctAnswer = testRelation .where('a + 'b === 1) .select('a + 'b as 'e) .analyze comparePlans(optimized, correctAnswer) } test("nondeterministic: can always push down filter through project with deterministic field") { val originalQuery = testRelation .select('a) .where(Rand(10) > 5 || 'a > 5) .analyze val optimized = Optimize.execute(originalQuery) val correctAnswer = testRelation .where(Rand(10) > 5 || 'a > 5) .select('a) .analyze comparePlans(optimized, correctAnswer) } test("nondeterministic: can't push down filter through project with nondeterministic field") { val originalQuery = testRelation .select(Rand(10).as('rand), 'a) .where('a > 5) .analyze val optimized = Optimize.execute(originalQuery) comparePlans(optimized, originalQuery) } test("nondeterministic: can't push down filter through aggregate with nondeterministic field") { val originalQuery = testRelation .groupBy('a)('a, Rand(10).as('rand)) .where('a > 5) .analyze val optimized = Optimize.execute(originalQuery) comparePlans(optimized, originalQuery) } test("nondeterministic: push down part of filter through aggregate with deterministic field") { val originalQuery = testRelation .groupBy('a)('a) .where('a > 5 && Rand(10) > 5) .analyze val optimized = Optimize.execute(originalQuery.analyze) val correctAnswer = testRelation .where('a > 5) .groupBy('a)('a) .where(Rand(10) > 5) .analyze comparePlans(optimized, correctAnswer) } test("filters: combines filters") { val originalQuery = testRelation .select('a) .where('a === 1) .where('a === 2) val optimized = Optimize.execute(originalQuery.analyze) val correctAnswer = testRelation .where('a === 1 && 'a === 2) .select('a).analyze comparePlans(optimized, correctAnswer) } test("joins: push to either side") { val x = testRelation.subquery('x) val y = testRelation.subquery('y) val originalQuery = { x.join(y) .where("x.b".attr === 1) .where("y.b".attr === 2) } val optimized = Optimize.execute(originalQuery.analyze) val left = testRelation.where('b === 1) val right = testRelation.where('b === 2) val correctAnswer = left.join(right).analyze comparePlans(optimized, correctAnswer) } test("joins: push to one side") { val x = testRelation.subquery('x) val y = testRelation.subquery('y) val originalQuery = { x.join(y) .where("x.b".attr === 1) } val optimized = Optimize.execute(originalQuery.analyze) val left = testRelation.where('b === 1) val right = testRelation val correctAnswer = left.join(right).analyze comparePlans(optimized, correctAnswer) } test("joins: do not push down non-deterministic filters into join condition") { val x = testRelation.subquery('x) val y = testRelation1.subquery('y) val originalQuery = x.join(y).where(Rand(10) > 5.0).analyze val optimized = Optimize.execute(originalQuery) comparePlans(optimized, originalQuery) } test("joins: push to one side after transformCondition") { val x = testRelation.subquery('x) val y = testRelation1.subquery('y) val originalQuery = { x.join(y) .where(("x.a".attr === 1 && "y.d".attr === "x.b".attr) || ("x.a".attr === 1 && "y.d".attr === "x.c".attr)) } val optimized = Optimize.execute(originalQuery.analyze) val left = testRelation.where('a === 1) val right = testRelation1 val correctAnswer = left.join(right, condition = Some("d".attr === "b".attr || "d".attr === "c".attr)).analyze comparePlans(optimized, correctAnswer) } test("joins: rewrite filter to push to either side") { val x = testRelation.subquery('x) val y = testRelation.subquery('y) val originalQuery = { x.join(y) .where("x.b".attr === 1 && "y.b".attr === 2) } val optimized = Optimize.execute(originalQuery.analyze) val left = testRelation.where('b === 1) val right = testRelation.where('b === 2) val correctAnswer = left.join(right).analyze comparePlans(optimized, correctAnswer) } test("joins: push down left semi join") { val x = testRelation.subquery('x) val y = testRelation1.subquery('y) val originalQuery = { x.join(y, LeftSemi, Option("x.a".attr === "y.d".attr && "x.b".attr >= 1 && "y.d".attr >= 2)) } val optimized = Optimize.execute(originalQuery.analyze) val left = testRelation.where('b >= 1) val right = testRelation1.where('d >= 2) val correctAnswer = left.join(right, LeftSemi, Option("a".attr === "d".attr)).analyze comparePlans(optimized, correctAnswer) } test("joins: push down left outer join #1") { val x = testRelation.subquery('x) val y = testRelation.subquery('y) val originalQuery = { x.join(y, LeftOuter) .where("x.b".attr === 1 && "y.b".attr === 2) } val optimized = Optimize.execute(originalQuery.analyze) val left = testRelation.where('b === 1) val correctAnswer = left.join(y, LeftOuter).where("y.b".attr === 2).analyze comparePlans(optimized, correctAnswer) } test("joins: push down right outer join #1") { val x = testRelation.subquery('x) val y = testRelation.subquery('y) val originalQuery = { x.join(y, RightOuter) .where("x.b".attr === 1 && "y.b".attr === 2) } val optimized = Optimize.execute(originalQuery.analyze) val right = testRelation.where('b === 2).subquery('d) val correctAnswer = x.join(right, RightOuter).where("x.b".attr === 1).analyze comparePlans(optimized, correctAnswer) } test("joins: push down left outer join #2") { val x = testRelation.subquery('x) val y = testRelation.subquery('y) val originalQuery = { x.join(y, LeftOuter, Some("x.b".attr === 1)) .where("x.b".attr === 2 && "y.b".attr === 2) } val optimized = Optimize.execute(originalQuery.analyze) val left = testRelation.where('b === 2).subquery('d) val correctAnswer = left.join(y, LeftOuter, Some("d.b".attr === 1)).where("y.b".attr === 2).analyze comparePlans(optimized, correctAnswer) } test("joins: push down right outer join #2") { val x = testRelation.subquery('x) val y = testRelation.subquery('y) val originalQuery = { x.join(y, RightOuter, Some("y.b".attr === 1)) .where("x.b".attr === 2 && "y.b".attr === 2) } val optimized = Optimize.execute(originalQuery.analyze) val right = testRelation.where('b === 2).subquery('d) val correctAnswer = x.join(right, RightOuter, Some("d.b".attr === 1)).where("x.b".attr === 2).analyze comparePlans(optimized, correctAnswer) } test("joins: push down left outer join #3") { val x = testRelation.subquery('x) val y = testRelation.subquery('y) val originalQuery = { x.join(y, LeftOuter, Some("y.b".attr === 1)) .where("x.b".attr === 2 && "y.b".attr === 2) } val optimized = Optimize.execute(originalQuery.analyze) val left = testRelation.where('b === 2).subquery('l) val right = testRelation.where('b === 1).subquery('r) val correctAnswer = left.join(right, LeftOuter).where("r.b".attr === 2).analyze comparePlans(optimized, correctAnswer) } test("joins: push down right outer join #3") { val x = testRelation.subquery('x) val y = testRelation.subquery('y) val originalQuery = { x.join(y, RightOuter, Some("y.b".attr === 1)) .where("x.b".attr === 2 && "y.b".attr === 2) } val optimized = Optimize.execute(originalQuery.analyze) val right = testRelation.where('b === 2).subquery('r) val correctAnswer = x.join(right, RightOuter, Some("r.b".attr === 1)).where("x.b".attr === 2).analyze comparePlans(optimized, correctAnswer) } test("joins: push down left outer join #4") { val x = testRelation.subquery('x) val y = testRelation.subquery('y) val originalQuery = { x.join(y, LeftOuter, Some("y.b".attr === 1)) .where("x.b".attr === 2 && "y.b".attr === 2 && "x.c".attr === "y.c".attr) } val optimized = Optimize.execute(originalQuery.analyze) val left = testRelation.where('b === 2).subquery('l) val right = testRelation.where('b === 1).subquery('r) val correctAnswer = left.join(right, LeftOuter).where("r.b".attr === 2 && "l.c".attr === "r.c".attr).analyze comparePlans(optimized, correctAnswer) } test("joins: push down right outer join #4") { val x = testRelation.subquery('x) val y = testRelation.subquery('y) val originalQuery = { x.join(y, RightOuter, Some("y.b".attr === 1)) .where("x.b".attr === 2 && "y.b".attr === 2 && "x.c".attr === "y.c".attr) } val optimized = Optimize.execute(originalQuery.analyze) val left = testRelation.subquery('l) val right = testRelation.where('b === 2).subquery('r) val correctAnswer = left.join(right, RightOuter, Some("r.b".attr === 1)). where("l.b".attr === 2 && "l.c".attr === "r.c".attr).analyze comparePlans(optimized, correctAnswer) } test("joins: push down left outer join #5") { val x = testRelation.subquery('x) val y = testRelation.subquery('y) val originalQuery = { x.join(y, LeftOuter, Some("y.b".attr === 1 && "x.a".attr === 3)) .where("x.b".attr === 2 && "y.b".attr === 2 && "x.c".attr === "y.c".attr) } val optimized = Optimize.execute(originalQuery.analyze) val left = testRelation.where('b === 2).subquery('l) val right = testRelation.where('b === 1).subquery('r) val correctAnswer = left.join(right, LeftOuter, Some("l.a".attr===3)). where("r.b".attr === 2 && "l.c".attr === "r.c".attr).analyze comparePlans(optimized, correctAnswer) } test("joins: push down right outer join #5") { val x = testRelation.subquery('x) val y = testRelation.subquery('y) val originalQuery = { x.join(y, RightOuter, Some("y.b".attr === 1 && "x.a".attr === 3)) .where("x.b".attr === 2 && "y.b".attr === 2 && "x.c".attr === "y.c".attr) } val optimized = Optimize.execute(originalQuery.analyze) val left = testRelation.where('a === 3).subquery('l) val right = testRelation.where('b === 2).subquery('r) val correctAnswer = left.join(right, RightOuter, Some("r.b".attr === 1)). where("l.b".attr === 2 && "l.c".attr === "r.c".attr).analyze comparePlans(optimized, correctAnswer) } test("joins: can't push down") { val x = testRelation.subquery('x) val y = testRelation.subquery('y) val originalQuery = { x.join(y, condition = Some("x.b".attr === "y.b".attr)) } val optimized = Optimize.execute(originalQuery.analyze) comparePlans(originalQuery.analyze, optimized) } test("joins: conjunctive predicates") { val x = testRelation.subquery('x) val y = testRelation.subquery('y) val originalQuery = { x.join(y) .where(("x.b".attr === "y.b".attr) && ("x.a".attr === 1) && ("y.a".attr === 1)) } val optimized = Optimize.execute(originalQuery.analyze) val left = testRelation.where('a === 1).subquery('x) val right = testRelation.where('a === 1).subquery('y) val correctAnswer = left.join(right, condition = Some("x.b".attr === "y.b".attr)) .analyze comparePlans(optimized, correctAnswer) } test("joins: conjunctive predicates #2") { val x = testRelation.subquery('x) val y = testRelation.subquery('y) val originalQuery = { x.join(y) .where(("x.b".attr === "y.b".attr) && ("x.a".attr === 1)) } val optimized = Optimize.execute(originalQuery.analyze) val left = testRelation.where('a === 1).subquery('x) val right = testRelation.subquery('y) val correctAnswer = left.join(right, condition = Some("x.b".attr === "y.b".attr)) .analyze comparePlans(optimized, correctAnswer) } test("joins: conjunctive predicates #3") { val x = testRelation.subquery('x) val y = testRelation.subquery('y) val z = testRelation.subquery('z) val originalQuery = { z.join(x.join(y)) .where(("x.b".attr === "y.b".attr) && ("x.a".attr === 1) && ("z.a".attr >= 3) && ("z.a".attr === "x.b".attr)) } val optimized = Optimize.execute(originalQuery.analyze) val lleft = testRelation.where('a >= 3).subquery('z) val left = testRelation.where('a === 1).subquery('x) val right = testRelation.subquery('y) val correctAnswer = lleft.join( left.join(right, condition = Some("x.b".attr === "y.b".attr)), condition = Some("z.a".attr === "x.b".attr)) .analyze comparePlans(optimized, correctAnswer) } test("joins: push down where clause into left anti join") { val x = testRelation.subquery('x) val y = testRelation.subquery('y) val originalQuery = x.join(y, LeftAnti, Some("x.b".attr === "y.b".attr)) .where("x.a".attr > 10) .analyze val optimized = Optimize.execute(originalQuery) val correctAnswer = x.where("x.a".attr > 10) .join(y, LeftAnti, Some("x.b".attr === "y.b".attr)) .analyze comparePlans(optimized, correctAnswer) } test("joins: only push down join conditions to the right of a left anti join") { val x = testRelation.subquery('x) val y = testRelation.subquery('y) val originalQuery = x.join(y, LeftAnti, Some("x.b".attr === "y.b".attr && "y.a".attr > 10 && "x.a".attr > 10)).analyze val optimized = Optimize.execute(originalQuery) val correctAnswer = x.join( y.where("y.a".attr > 10), LeftAnti, Some("x.b".attr === "y.b".attr && "x.a".attr > 10)) .analyze comparePlans(optimized, correctAnswer) } test("joins: only push down join conditions to the right of an existence join") { val x = testRelation.subquery('x) val y = testRelation.subquery('y) val fillerVal = 'val.boolean val originalQuery = x.join(y, ExistenceJoin(fillerVal), Some("x.a".attr > 1 && "y.b".attr > 2)).analyze val optimized = Optimize.execute(originalQuery) val correctAnswer = x.join( y.where("y.b".attr > 2), ExistenceJoin(fillerVal), Some("x.a".attr > 1)) .analyze comparePlans(optimized, correctAnswer) } val testRelationWithArrayType = LocalRelation('a.int, 'b.int, 'c_arr.array(IntegerType)) test("generate: predicate referenced no generated column") { val originalQuery = { testRelationWithArrayType .generate(Explode('c_arr), alias = Some("arr")) .where(('b >= 5) && ('a > 6)) } val optimized = Optimize.execute(originalQuery.analyze) val correctAnswer = { testRelationWithArrayType .where(('b >= 5) && ('a > 6)) .generate(Explode('c_arr), alias = Some("arr")).analyze } comparePlans(optimized, correctAnswer) } test("generate: non-deterministic predicate referenced no generated column") { val originalQuery = { testRelationWithArrayType .generate(Explode('c_arr), alias = Some("arr")) .where(('b >= 5) && ('a + Rand(10).as("rnd") > 6) && ('col > 6)) } val optimized = Optimize.execute(originalQuery.analyze) val correctAnswer = { testRelationWithArrayType .where('b >= 5) .generate(Explode('c_arr), alias = Some("arr")) .where('a + Rand(10).as("rnd") > 6 && 'col > 6) .analyze } comparePlans(optimized, correctAnswer) } test("generate: part of conjuncts referenced generated column") { val generator = Explode('c_arr) val originalQuery = { testRelationWithArrayType .generate(generator, alias = Some("arr"), outputNames = Seq("c")) .where(('b >= 5) && ('c > 6)) } val optimized = Optimize.execute(originalQuery.analyze) val referenceResult = { testRelationWithArrayType .where('b >= 5) .generate(generator, alias = Some("arr"), outputNames = Seq("c")) .where('c > 6).analyze } // Since newly generated columns get different ids every time being analyzed // e.g. comparePlans(originalQuery.analyze, originalQuery.analyze) fails. // So we check operators manually here. // Filter("c" > 6) assertResult(classOf[Filter])(optimized.getClass) assertResult(1)(optimized.asInstanceOf[Filter].condition.references.size) assertResult("c") { optimized.asInstanceOf[Filter].condition.references.toSeq(0).name } // the rest part comparePlans(optimized.children(0), referenceResult.children(0)) } test("generate: all conjuncts referenced generated column") { val originalQuery = { testRelationWithArrayType .generate(Explode('c_arr), alias = Some("arr")) .where(('col > 6) || ('b > 5)).analyze } val optimized = Optimize.execute(originalQuery) comparePlans(optimized, originalQuery) } test("aggregate: push down filter when filter on group by expression") { val originalQuery = testRelation .groupBy('a)('a, count('b) as 'c) .select('a, 'c) .where('a === 2) val optimized = Optimize.execute(originalQuery.analyze) val correctAnswer = testRelation .where('a === 2) .groupBy('a)('a, count('b) as 'c) .analyze comparePlans(optimized, correctAnswer) } test("aggregate: don't push down filter when filter not on group by expression") { val originalQuery = testRelation .select('a, 'b) .groupBy('a)('a, count('b) as 'c) .where('c === 2L) val optimized = Optimize.execute(originalQuery.analyze) comparePlans(optimized, originalQuery.analyze) } test("aggregate: push down filters partially which are subset of group by expressions") { val originalQuery = testRelation .select('a, 'b) .groupBy('a)('a, count('b) as 'c) .where('c === 2L && 'a === 3) val optimized = Optimize.execute(originalQuery.analyze) val correctAnswer = testRelation .where('a === 3) .select('a, 'b) .groupBy('a)('a, count('b) as 'c) .where('c === 2L) .analyze comparePlans(optimized, correctAnswer) } test("aggregate: push down filters with alias") { val originalQuery = testRelation .select('a, 'b) .groupBy('a)(('a + 1) as 'aa, count('b) as 'c) .where(('c === 2L || 'aa > 4) && 'aa < 3) val optimized = Optimize.execute(originalQuery.analyze) val correctAnswer = testRelation .where('a + 1 < 3) .select('a, 'b) .groupBy('a)(('a + 1) as 'aa, count('b) as 'c) .where('c === 2L || 'aa > 4) .analyze comparePlans(optimized, correctAnswer) } test("aggregate: push down filters with literal") { val originalQuery = testRelation .select('a, 'b) .groupBy('a)('a, count('b) as 'c, "s" as 'd) .where('c === 2L && 'd === "s") val optimized = Optimize.execute(originalQuery.analyze) val correctAnswer = testRelation .where("s" === "s") .select('a, 'b) .groupBy('a)('a, count('b) as 'c, "s" as 'd) .where('c === 2L) .analyze comparePlans(optimized, correctAnswer) } test("aggregate: don't push down filters that are nondeterministic") { val originalQuery = testRelation .select('a, 'b) .groupBy('a)('a + Rand(10) as 'aa, count('b) as 'c, Rand(11).as("rnd")) .where('c === 2L && 'aa + Rand(10).as("rnd") === 3 && 'rnd === 5) val optimized = Optimize.execute(originalQuery.analyze) val correctAnswer = testRelation .select('a, 'b) .groupBy('a)('a + Rand(10) as 'aa, count('b) as 'c, Rand(11).as("rnd")) .where('c === 2L && 'aa + Rand(10).as("rnd") === 3 && 'rnd === 5) .analyze comparePlans(optimized, correctAnswer) } test("SPARK-17712: aggregate: don't push down filters that are data-independent") { val originalQuery = LocalRelation.apply(testRelation.output, Seq.empty) .select('a, 'b) .groupBy('a)(count('a)) .where(false) val optimized = Optimize.execute(originalQuery.analyze) val correctAnswer = testRelation .select('a, 'b) .groupBy('a)(count('a)) .where(false) .analyze comparePlans(optimized, correctAnswer) } test("aggregate: don't push filters if the aggregate has no grouping expressions") { val originalQuery = LocalRelation.apply(testRelation.output, Seq.empty) .select('a, 'b) .groupBy()(count(1)) .where(false) val optimized = Optimize.execute(originalQuery.analyze) val correctAnswer = originalQuery.analyze comparePlans(optimized, correctAnswer) } test("union") { val testRelation2 = LocalRelation('d.int, 'e.int, 'f.int) val originalQuery = Union(Seq(testRelation, testRelation2)) .where('a === 2L && 'b + Rand(10).as("rnd") === 3 && 'c > 5L) val optimized = Optimize.execute(originalQuery.analyze) val correctAnswer = Union(Seq( testRelation.where('a === 2L && 'c > 5L), testRelation2.where('d === 2L && 'f > 5L))) .where('b + Rand(10).as("rnd") === 3) .analyze comparePlans(optimized, correctAnswer) } test("expand") { val agg = testRelation .groupBy(Cube(Seq(Seq('a), Seq('b))))('a, 'b, sum('c)) .analyze .asInstanceOf[Aggregate] val a = agg.output(0) val b = agg.output(1) val query = agg.where(a > 1 && b > 2) val optimized = Optimize.execute(query) val correctedAnswer = agg.copy(child = agg.child.where(a > 1 && b > 2)).analyze comparePlans(optimized, correctedAnswer) } test("predicate subquery: push down simple") { val x = testRelation.subquery('x) val y = testRelation.subquery('y) val z = LocalRelation('a.int, 'b.int, 'c.int).subquery('z) val query = x .join(y, Inner, Option("x.a".attr === "y.a".attr)) .where(Exists(z.where("x.a".attr === "z.a".attr))) .analyze val answer = x .where(Exists(z.where("x.a".attr === "z.a".attr))) .join(y, Inner, Option("x.a".attr === "y.a".attr)) .analyze val optimized = Optimize.execute(Optimize.execute(query)) comparePlans(optimized, answer) } test("predicate subquery: push down complex") { val w = testRelation.subquery('w) val x = testRelation.subquery('x) val y = testRelation.subquery('y) val z = LocalRelation('a.int, 'b.int, 'c.int).subquery('z) val query = w .join(x, Inner, Option("w.a".attr === "x.a".attr)) .join(y, LeftOuter, Option("x.a".attr === "y.a".attr)) .where(Exists(z.where("w.a".attr === "z.a".attr))) .analyze val answer = w .where(Exists(z.where("w.a".attr === "z.a".attr))) .join(x, Inner, Option("w.a".attr === "x.a".attr)) .join(y, LeftOuter, Option("x.a".attr === "y.a".attr)) .analyze val optimized = Optimize.execute(Optimize.execute(query)) comparePlans(optimized, answer) } test("SPARK-20094: don't push predicate with IN subquery into join condition") { val x = testRelation.subquery('x) val z = testRelation.subquery('z) val w = testRelation1.subquery('w) val queryPlan = x .join(z) .where(("x.b".attr === "z.b".attr) && ("x.a".attr > 1 || "z.c".attr.in(ListQuery(w.select("w.d".attr))))) .analyze val expectedPlan = x .join(z, Inner, Some("x.b".attr === "z.b".attr)) .where("x.a".attr > 1 || "z.c".attr.in(ListQuery(w.select("w.d".attr)))) .analyze val optimized = Optimize.execute(queryPlan) comparePlans(optimized, expectedPlan) } test("Window: predicate push down -- basic") { val winExpr = windowExpr(count('b), windowSpec('a :: Nil, 'b.asc :: Nil, UnspecifiedFrame)) val originalQuery = testRelation.select('a, 'b, 'c, winExpr.as('window)).where('a > 1) val correctAnswer = testRelation .where('a > 1).select('a, 'b, 'c) .window(winExpr.as('window) :: Nil, 'a :: Nil, 'b.asc :: Nil) .select('a, 'b, 'c, 'window).analyze comparePlans(Optimize.execute(originalQuery.analyze), correctAnswer) } test("Window: predicate push down -- predicates with compound predicate using only one column") { val winExpr = windowExpr(count('b), windowSpec('a.attr :: 'b.attr :: Nil, 'b.asc :: Nil, UnspecifiedFrame)) val originalQuery = testRelation.select('a, 'b, 'c, winExpr.as('window)).where('a * 3 > 15) val correctAnswer = testRelation .where('a * 3 > 15).select('a, 'b, 'c) .window(winExpr.as('window) :: Nil, 'a.attr :: 'b.attr :: Nil, 'b.asc :: Nil) .select('a, 'b, 'c, 'window).analyze comparePlans(Optimize.execute(originalQuery.analyze), correctAnswer) } test("Window: predicate push down -- multi window expressions with the same window spec") { val winSpec = windowSpec('a.attr :: 'b.attr :: Nil, 'b.asc :: Nil, UnspecifiedFrame) val winExpr1 = windowExpr(count('b), winSpec) val winExpr2 = windowExpr(sum('b), winSpec) val originalQuery = testRelation .select('a, 'b, 'c, winExpr1.as('window1), winExpr2.as('window2)).where('a > 1) val correctAnswer = testRelation .where('a > 1).select('a, 'b, 'c) .window(winExpr1.as('window1) :: winExpr2.as('window2) :: Nil, 'a.attr :: 'b.attr :: Nil, 'b.asc :: Nil) .select('a, 'b, 'c, 'window1, 'window2).analyze comparePlans(Optimize.execute(originalQuery.analyze), correctAnswer) } test("Window: predicate push down -- multi window specification - 1") { // order by clauses are different between winSpec1 and winSpec2 val winSpec1 = windowSpec('a.attr :: 'b.attr :: Nil, 'b.asc :: Nil, UnspecifiedFrame) val winExpr1 = windowExpr(count('b), winSpec1) val winSpec2 = windowSpec('a.attr :: 'b.attr :: Nil, 'a.asc :: Nil, UnspecifiedFrame) val winExpr2 = windowExpr(count('b), winSpec2) val originalQuery = testRelation .select('a, 'b, 'c, winExpr1.as('window1), winExpr2.as('window2)).where('a > 1) val correctAnswer1 = testRelation .where('a > 1).select('a, 'b, 'c) .window(winExpr1.as('window1) :: Nil, 'a.attr :: 'b.attr :: Nil, 'b.asc :: Nil) .window(winExpr2.as('window2) :: Nil, 'a.attr :: 'b.attr :: Nil, 'a.asc :: Nil) .select('a, 'b, 'c, 'window1, 'window2).analyze val correctAnswer2 = testRelation .where('a > 1).select('a, 'b, 'c) .window(winExpr2.as('window2) :: Nil, 'a.attr :: 'b.attr :: Nil, 'a.asc :: Nil) .window(winExpr1.as('window1) :: Nil, 'a.attr :: 'b.attr :: Nil, 'b.asc :: Nil) .select('a, 'b, 'c, 'window1, 'window2).analyze // When Analyzer adding Window operators after grouping the extracted Window Expressions // based on their Partition and Order Specs, the order of Window operators is // non-deterministic. Thus, we have two correct plans val optimizedQuery = Optimize.execute(originalQuery.analyze) try { comparePlans(optimizedQuery, correctAnswer1) } catch { case ae: Throwable => comparePlans(optimizedQuery, correctAnswer2) } } test("Window: predicate push down -- multi window specification - 2") { // partitioning clauses are different between winSpec1 and winSpec2 val winSpec1 = windowSpec('a.attr :: Nil, 'b.asc :: Nil, UnspecifiedFrame) val winExpr1 = windowExpr(count('b), winSpec1) val winSpec2 = windowSpec('b.attr :: Nil, 'b.asc :: Nil, UnspecifiedFrame) val winExpr2 = windowExpr(count('a), winSpec2) val originalQuery = testRelation .select('a, winExpr1.as('window1), 'b, 'c, winExpr2.as('window2)).where('b > 1) val correctAnswer1 = testRelation.select('a, 'b, 'c) .window(winExpr1.as('window1) :: Nil, 'a.attr :: Nil, 'b.asc :: Nil) .where('b > 1) .window(winExpr2.as('window2) :: Nil, 'b.attr :: Nil, 'b.asc :: Nil) .select('a, 'window1, 'b, 'c, 'window2).analyze val correctAnswer2 = testRelation.select('a, 'b, 'c) .window(winExpr2.as('window2) :: Nil, 'b.attr :: Nil, 'b.asc :: Nil) .window(winExpr1.as('window1) :: Nil, 'a.attr :: Nil, 'b.asc :: Nil) .where('b > 1) .select('a, 'window1, 'b, 'c, 'window2).analyze val optimizedQuery = Optimize.execute(originalQuery.analyze) // When Analyzer adding Window operators after grouping the extracted Window Expressions // based on their Partition and Order Specs, the order of Window operators is // non-deterministic. Thus, we have two correct plans try { comparePlans(optimizedQuery, correctAnswer1) } catch { case ae: Throwable => comparePlans(optimizedQuery, correctAnswer2) } } test("Window: predicate push down -- predicates with multiple partitioning columns") { val winExpr = windowExpr(count('b), windowSpec('a.attr :: 'b.attr :: Nil, 'b.asc :: Nil, UnspecifiedFrame)) val originalQuery = testRelation.select('a, 'b, 'c, winExpr.as('window)).where('a + 'b > 1) val correctAnswer = testRelation .where('a + 'b > 1).select('a, 'b, 'c) .window(winExpr.as('window) :: Nil, 'a.attr :: 'b.attr :: Nil, 'b.asc :: Nil) .select('a, 'b, 'c, 'window).analyze comparePlans(Optimize.execute(originalQuery.analyze), correctAnswer) } // complex predicates with the same references but the same expressions // Todo: in Analyzer, to enable it, we need to convert the expression in conditions // to the alias that is defined as the same expression ignore("Window: predicate push down -- complex predicate with the same expressions") { val winSpec = windowSpec( partitionSpec = 'a.attr + 'b.attr :: Nil, orderSpec = 'b.asc :: Nil, UnspecifiedFrame) val winExpr = windowExpr(count('b), winSpec) val winSpecAnalyzed = windowSpec( partitionSpec = '_w0.attr :: Nil, orderSpec = 'b.asc :: Nil, UnspecifiedFrame) val winExprAnalyzed = windowExpr(count('b), winSpecAnalyzed) val originalQuery = testRelation.select('a, 'b, 'c, winExpr.as('window)).where('a + 'b > 1) val correctAnswer = testRelation .where('a + 'b > 1).select('a, 'b, 'c, ('a + 'b).as("_w0")) .window(winExprAnalyzed.as('window) :: Nil, '_w0 :: Nil, 'b.asc :: Nil) .select('a, 'b, 'c, 'window).analyze comparePlans(Optimize.execute(originalQuery.analyze), correctAnswer) } test("Window: no predicate push down -- predicates are not from partitioning keys") { val winSpec = windowSpec( partitionSpec = 'a.attr :: 'b.attr :: Nil, orderSpec = 'b.asc :: Nil, UnspecifiedFrame) val winExpr = windowExpr(count('b), winSpec) // No push down: the predicate is c > 1, but the partitioning key is (a, b). val originalQuery = testRelation.select('a, 'b, 'c, winExpr.as('window)).where('c > 1) val correctAnswer = testRelation.select('a, 'b, 'c) .window(winExpr.as('window) :: Nil, 'a.attr :: 'b.attr :: Nil, 'b.asc :: Nil) .where('c > 1).select('a, 'b, 'c, 'window).analyze comparePlans(Optimize.execute(originalQuery.analyze), correctAnswer) } test("Window: no predicate push down -- partial compound partition key") { val winSpec = windowSpec( partitionSpec = 'a.attr + 'b.attr :: 'b.attr :: Nil, orderSpec = 'b.asc :: Nil, UnspecifiedFrame) val winExpr = windowExpr(count('b), winSpec) // No push down: the predicate is a > 1, but the partitioning key is (a + b, b) val originalQuery = testRelation.select('a, 'b, 'c, winExpr.as('window)).where('a > 1) val winSpecAnalyzed = windowSpec( partitionSpec = '_w0.attr :: 'b.attr :: Nil, orderSpec = 'b.asc :: Nil, UnspecifiedFrame) val winExprAnalyzed = windowExpr(count('b), winSpecAnalyzed) val correctAnswer = testRelation.select('a, 'b, 'c, ('a + 'b).as("_w0")) .window(winExprAnalyzed.as('window) :: Nil, '_w0 :: 'b.attr :: Nil, 'b.asc :: Nil) .where('a > 1).select('a, 'b, 'c, 'window).analyze comparePlans(Optimize.execute(originalQuery.analyze), correctAnswer) } test("Window: no predicate push down -- complex predicates containing non partitioning columns") { val winSpec = windowSpec(partitionSpec = 'b.attr :: Nil, orderSpec = 'b.asc :: Nil, UnspecifiedFrame) val winExpr = windowExpr(count('b), winSpec) // No push down: the predicate is a + b > 1, but the partitioning key is b. val originalQuery = testRelation.select('a, 'b, 'c, winExpr.as('window)).where('a + 'b > 1) val correctAnswer = testRelation .select('a, 'b, 'c) .window(winExpr.as('window) :: Nil, 'b.attr :: Nil, 'b.asc :: Nil) .where('a + 'b > 1).select('a, 'b, 'c, 'window).analyze comparePlans(Optimize.execute(originalQuery.analyze), correctAnswer) } // complex predicates with the same references but different expressions test("Window: no predicate push down -- complex predicate with different expressions") { val winSpec = windowSpec( partitionSpec = 'a.attr + 'b.attr :: Nil, orderSpec = 'b.asc :: Nil, UnspecifiedFrame) val winExpr = windowExpr(count('b), winSpec) val winSpecAnalyzed = windowSpec( partitionSpec = '_w0.attr :: Nil, orderSpec = 'b.asc :: Nil, UnspecifiedFrame) val winExprAnalyzed = windowExpr(count('b), winSpecAnalyzed) // No push down: the predicate is a + b > 1, but the partitioning key is a + b. val originalQuery = testRelation.select('a, 'b, 'c, winExpr.as('window)).where('a - 'b > 1) val correctAnswer = testRelation.select('a, 'b, 'c, ('a + 'b).as("_w0")) .window(winExprAnalyzed.as('window) :: Nil, '_w0 :: Nil, 'b.asc :: Nil) .where('a - 'b > 1).select('a, 'b, 'c, 'window).analyze comparePlans(Optimize.execute(originalQuery.analyze), correctAnswer) } test("watermark pushdown: no pushdown on watermark attribute #1") { val interval = new CalendarInterval(2, 2, 2000L) val relation = LocalRelation(attrA, 'b.timestamp, attrC) // Verify that all conditions except the watermark touching condition are pushed down // by the optimizer and others are not. val originalQuery = EventTimeWatermark('b, interval, relation) .where('a === 5 && 'b === new java.sql.Timestamp(0) && 'c === 5) val correctAnswer = EventTimeWatermark( 'b, interval, relation.where('a === 5 && 'c === 5)) .where('b === new java.sql.Timestamp(0)) comparePlans(Optimize.execute(originalQuery.analyze), correctAnswer.analyze) } test("watermark pushdown: no pushdown for nondeterministic filter") { val interval = new CalendarInterval(2, 2, 2000L) val relation = LocalRelation(attrA, attrB, 'c.timestamp) // Verify that all conditions except the watermark touching condition are pushed down // by the optimizer and others are not. val originalQuery = EventTimeWatermark('c, interval, relation) .where('a === 5 && 'b === Rand(10) && 'c === new java.sql.Timestamp(0)) val correctAnswer = EventTimeWatermark( 'c, interval, relation.where('a === 5)) .where('b === Rand(10) && 'c === new java.sql.Timestamp(0)) comparePlans(Optimize.execute(originalQuery.analyze), correctAnswer.analyze, checkAnalysis = false) } test("watermark pushdown: full pushdown") { val interval = new CalendarInterval(2, 2, 2000L) val relation = LocalRelation(attrA, attrB, 'c.timestamp) // Verify that all conditions except the watermark touching condition are pushed down // by the optimizer and others are not. val originalQuery = EventTimeWatermark('c, interval, relation) .where('a === 5 && 'b === 10) val correctAnswer = EventTimeWatermark( 'c, interval, relation.where('a === 5 && 'b === 10)) comparePlans(Optimize.execute(originalQuery.analyze), correctAnswer.analyze, checkAnalysis = false) } test("watermark pushdown: no pushdown on watermark attribute #2") { val interval = new CalendarInterval(2, 2, 2000L) val relation = LocalRelation('a.timestamp, attrB, attrC) val originalQuery = EventTimeWatermark('a, interval, relation) .where('a === new java.sql.Timestamp(0) && 'b === 10) val correctAnswer = EventTimeWatermark( 'a, interval, relation.where('b === 10)).where('a === new java.sql.Timestamp(0)) comparePlans(Optimize.execute(originalQuery.analyze), correctAnswer.analyze, checkAnalysis = false) } test("push down predicate through expand") { val query = Filter('a > 1, Expand( Seq( Seq('a, 'b, 'c, Literal.create(null, StringType), 1), Seq('a, 'b, 'c, 'a, 2)), Seq('a, 'b, 'c), testRelation)).analyze val optimized = Optimize.execute(query) val expected = Expand( Seq( Seq('a, 'b, 'c, Literal.create(null, StringType), 1), Seq('a, 'b, 'c, 'a, 2)), Seq('a, 'b, 'c), Filter('a > 1, testRelation)).analyze comparePlans(optimized, expected) } test("SPARK-28345: PythonUDF predicate should be able to pushdown to join") { val pythonUDFJoinCond = { val pythonUDF = PythonUDF("pythonUDF", null, IntegerType, Seq(attrA), PythonEvalType.SQL_BATCHED_UDF, udfDeterministic = true) pythonUDF === attrD } val query = testRelation.join( testRelation1, joinType = Cross).where(pythonUDFJoinCond) val expected = testRelation.join( testRelation1, joinType = Cross, condition = Some(pythonUDFJoinCond)).analyze comparePlans(Optimize.execute(query.analyze), expected) } test("push down filter predicates through inner join") { val x = testRelation.subquery('x) val y = testRelation.subquery('y) val originalQuery = x.join(y).where(("x.b".attr === "y.b".attr) && (simpleDisjunctivePredicate)) val optimized = Optimize.execute(originalQuery.analyze) comparePlans(optimized, expectedPredicatePushDownResult) } test("push down join predicates through inner join") { val x = testRelation.subquery('x) val y = testRelation.subquery('y) val originalQuery = x.join(y, condition = Some(("x.b".attr === "y.b".attr) && (simpleDisjunctivePredicate))) val optimized = Optimize.execute(originalQuery.analyze) comparePlans(optimized, expectedPredicatePushDownResult) } test("push down complex predicates through inner join") { val x = testRelation.subquery('x) val y = testRelation.subquery('y) val joinCondition = (("x.b".attr === "y.b".attr) && ((("x.a".attr === 5) && ("y.a".attr >= 2) && ("y.a".attr <= 3)) || (("x.a".attr === 2) && ("y.a".attr >= 1) && ("y.a".attr <= 14)) || (("x.a".attr === 1) && ("y.a".attr >= 9) && ("y.a".attr <= 27)))) val originalQuery = x.join(y, condition = Some(joinCondition)) val optimized = Optimize.execute(originalQuery.analyze) val left = testRelation.where( ('a === 5 || 'a === 2 || 'a === 1)).subquery('x) val right = testRelation.where( ('a >= 2 && 'a <= 3) || ('a >= 1 && 'a <= 14) || ('a >= 9 && 'a <= 27)).subquery('y) val correctAnswer = left.join(right, condition = Some(joinCondition)).analyze comparePlans(optimized, correctAnswer) } test("push down predicates(with NOT predicate) through inner join") { val x = testRelation.subquery('x) val y = testRelation.subquery('y) val originalQuery = x.join(y, condition = Some(("x.b".attr === "y.b".attr) && Not(("x.a".attr > 3) && ("x.a".attr < 2 || ("y.a".attr > 13)) || ("x.a".attr > 1) && ("y.a".attr > 11)))) val optimized = Optimize.execute(originalQuery.analyze) val left = testRelation.where('a <= 3 || 'a >= 2).subquery('x) val right = testRelation.subquery('y) val correctAnswer = left.join(right, condition = Some("x.b".attr === "y.b".attr && (("x.a".attr <= 3) || (("x.a".attr >= 2) && ("y.a".attr <= 13))) && (("x.a".attr <= 1) || ("y.a".attr <= 11)))) .analyze comparePlans(optimized, correctAnswer) } test("push down predicates through left join") { val x = testRelation.subquery('x) val y = testRelation.subquery('y) val originalQuery = x.join(y, joinType = LeftOuter, condition = Some(("x.b".attr === "y.b".attr) && simpleDisjunctivePredicate)) val optimized = Optimize.execute(originalQuery.analyze) val left = testRelation.subquery('x) val right = testRelation.where('a > 13 || 'a > 11).subquery('y) val correctAnswer = left.join(right, joinType = LeftOuter, condition = Some("x.b".attr === "y.b".attr && (("x.a".attr > 3) && ("y.a".attr > 13) || ("x.a".attr > 1) && ("y.a".attr > 11)))) .analyze comparePlans(optimized, correctAnswer) } test("push down predicates through right join") { val x = testRelation.subquery('x) val y = testRelation.subquery('y) val originalQuery = x.join(y, joinType = RightOuter, condition = Some(("x.b".attr === "y.b".attr) && simpleDisjunctivePredicate)) val optimized = Optimize.execute(originalQuery.analyze) val left = testRelation.where('a > 3 || 'a > 1).subquery('x) val right = testRelation.subquery('y) val correctAnswer = left.join(right, joinType = RightOuter, condition = Some("x.b".attr === "y.b".attr && (("x.a".attr > 3) && ("y.a".attr > 13) || ("x.a".attr > 1) && ("y.a".attr > 11)))) .analyze comparePlans(optimized, correctAnswer) } test("SPARK-32302: avoid generating too many predicates") { val x = testRelation.subquery('x) val y = testRelation.subquery('y) val originalQuery = x.join(y, condition = Some(("x.b".attr === "y.b".attr) && ((("x.a".attr > 3) && ("x.a".attr < 13) && ("y.c".attr <= 5)) || (("y.a".attr > 2) && ("y.c".attr < 1))))) val optimized = Optimize.execute(originalQuery.analyze) val left = testRelation.subquery('x) val right = testRelation.where('c <= 5 || ('a > 2 && 'c < 1)).subquery('y) val correctAnswer = left.join(right, condition = Some("x.b".attr === "y.b".attr && ((("x.a".attr > 3) && ("x.a".attr < 13) && ("y.c".attr <= 5)) || (("y.a".attr > 2) && ("y.c".attr < 1))))).analyze comparePlans(optimized, correctAnswer) } test("push down predicate through multiple joins") { val x = testRelation.subquery('x) val y = testRelation.subquery('y) val z = testRelation.subquery('z) val xJoinY = x.join(y, condition = Some("x.b".attr === "y.b".attr)) val originalQuery = z.join(xJoinY, condition = Some("x.a".attr === "z.a".attr && simpleDisjunctivePredicate)) val optimized = Optimize.execute(originalQuery.analyze) val left = x.where('a > 3 || 'a > 1) val right = y.where('a > 13 || 'a > 11) val correctAnswer = z.join(left.join(right, condition = Some("x.b".attr === "y.b".attr && simpleDisjunctivePredicate)), condition = Some("x.a".attr === "z.a".attr)).analyze comparePlans(optimized, correctAnswer) } }
wangmiao1981/spark
sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/FilterPushdownSuite.scala
Scala
apache-2.0
48,504
// Copyright (C) 2011-2012 the original author or authors. // See the LICENCE.txt file distributed with this work for additional // information regarding copyright ownership. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package config import com.typesafe.config.ConfigFactory object ExternalUrls { val conf = ConfigFactory.load val env = conf.getString("cjww.environment") val diagnostics = conf.getString(s"$env.cjww-diagnostics.url") val login = conf.getString(s"$env.cjww-auth-service.login") val register = conf.getString(s"$env.cjww-auth-service.register") }
chrisjwwalker/cjww-diagnostics
app/config/ExternalUrls.scala
Scala
apache-2.0
1,089
package reopp.workers.strategies import reopp.workers.Node import reopp.common._ import Utils.addID /** * Created by IntelliJ IDEA. * User: jose * Date: 04/05/12 * Time: 14:22 * To change this template use File | Settings | File Templates. */ trait Strategy[S<:Solution[S],C<:Constraints[S,C],St<:Strategy[S,C, St]] { type Nd = Node[S,C] val owned = scala.collection.mutable.Set[Nd]() val fringe = scala.collection.mutable.Set[Nd]() var droppedFringe = Set[Nd]() var triedSol: Option[NoneSol] = None // managed by the workers // abstract methods // // Find the next nodes (from the fringe) to expand to. def nextNodes: Iterable[Nd] // // Find the initial nodes based on a prefered node "n". // def initNodes(n:Nd): Iterable[Nd] // Checks if it makes sense to search now for a solution. def canSolve: Boolean /** Merges the information from another traversal. Should be extended by subclasses. * Right now it only merges the previous buffer. */ def merge(s:St) { //println("merging strats: "+triedSol+" - "+s.triedSol) (triedSol,s.triedSol) match { case (None,Some(NoneSol(Some(_)))) => triedSol = s.triedSol case (Some(NoneSol(None)),Some(NoneSol(Some(_)))) => triedSol = s.triedSol case (Some(NoneSol(Some(b1))),Some(NoneSol(Some(b2)))) => b1.safeImport(b2) case _ => {} } } def solve(implicit builder:CBuilder[S,C]): OptionSol[S] = { solve(triedSol) match { case s:NoneSol => triedSol = Some(s) s case s => s } } // aux functions private def solve(tried:Option[NoneSol])(implicit builder:CBuilder[S,C]): OptionSol[S] = { // debug("solving - "+owned.mkString(",")) // var beh = new Behaviour[S,C](val ends: List[String],val uid: Int) if (owned.isEmpty) return NoneSol() // get first node and behaviour val fstNode = owned.head val fstConn = fstNode.connector //println("building neighbour constraints = border + sync") var c = neighbourConstr(fstNode,fstConn.getConstraints.withID(fstNode.uid))(builder) // collect the constraints + neighbour constraints of owned ports, // avoiding adding repeated neighbours (via "included") -- DROPPED (reopp.common neighbours of 2 nodes must be added 2x) for (n <- (owned - fstNode)) { c ++= n.connector.getConstraints.withID(n.hashCode()) c = neighbourConstr(n,c)(builder) } // println("solving: "+c) val res = c.solve(tried) //if (res.isDefined) println("-------------\nSolved:\n"+res.get) //else println("failed") res } private def neighbourConstr(node:Nd, basec:C)(builder:CBuilder[S,C]): C = { var c = basec // var i = included for (n <- node.getNeighbours) { // i += n // node connected to n! if (owned contains n) { c = sync(node,n,c)(builder) } // node makes border with possible sync region else { c = border(node,n,c)(builder) } } c } private def sync(n1:Nd,n2:Nd, basec: C)(implicit cbuilder: CBuilder[S,C]): C = { val uid1 = n1.uid //connector.getID val uid2 = n2.uid //connector.getID var res = basec for (ends <- n1.getConnectedEndsTo(n2)) if (n1 hasSourceEnd ends._1) { res ++= cbuilder.sync(addID(ends._2,uid2),addID(ends._1,uid1)) } for (ends <- n2.getConnectedEndsTo(n1)) if (n2 hasSourceEnd ends._1) { res ++= cbuilder.sync(addID(ends._2,uid2),addID(ends._1,uid1)) } // for ((e1,u1,e2,u2) <- n1.flowconn) // if (u2 == uid2) res ++= cbuilder.sync(e1,u1,e2,u2) // for ((e2,u2,e1,u1) <- n2.flowconn) // if (u1 == uid1) res ++= cbuilder.sync(e2,u2,e1,u1) res } // n1 owend, n2 not owned -> border n1.ends inters. n2.ends private def border(n1:Nd,n2:Nd, basec: C)(implicit cbuilder: CBuilder[S,C]): C = { val uid1 = n1.uid //connector.getID var res = basec if (n1 connectedTo n2) { //println(s"connected: $n1 -- $n2") for (end <- n1.getConnectedEndsTo(n2)) { //println("noflow at "+end._1) res ++= cbuilder.noflow(addID(end._1,uid1)) } } // println("added borded. New constraints: "+c.commands.mkString(",")) res } def register(nds:Iterable[Nd]) { owned ++= nds fringe --= nds for (nd <- nds; nb <- nd.getNeighbours) extendFringe(nb) } def register(nd:Nd) { owned += nd fringe -= nd for (n <- nd.getNeighbours) extendFringe(n) } private def extendFringe(n:Nd) { // for (n <- nd) if (!(owned contains n)) fringe += n } def dropFromFringe(nd:Nd) { fringe -= nd droppedFringe += nd } def restore2fringe(nd:Nd) { droppedFringe -= nd fringe += nd } def restore2fringe() { fringe ++= droppedFringe droppedFringe = Set() } protected def debug(s:String) { // println(s"str[${hashCode.toString.substring(5)}] $s") } } abstract class StrategyBuilder[S <: Solution[S], C <: Constraints[S, C], St <: Strategy[S, C, St]] { def apply: St }
joseproenca/ip-constraints
code/src/main/scala/reopp/workers/strategies/Strategy.scala
Scala
mit
5,059
package com.airbnb.aerosolve.training.pipeline import com.airbnb.aerosolve.core.{Example, FeatureVector, LabelDictionaryEntry} import com.airbnb.aerosolve.core.models.{FullRankLinearModel, LinearModel} import com.airbnb.aerosolve.core.transforms.Transformer import com.airbnb.aerosolve.core.util.FloatVector import com.google.common.collect.{ImmutableMap, ImmutableSet} import com.typesafe.config.ConfigFactory import org.apache.spark.sql.hive.HiveContext import org.apache.spark.sql.types.StructType import org.apache.spark.sql.{Row, SQLContext} import org.apache.spark.{SparkConf, SparkContext} import org.mockito.Matchers._ import org.mockito.Mockito._ import scala.language.implicitConversions import scala.reflect.ClassTag import scala.reflect.runtime.universe.TypeTag /* * Misc. utilities that may be useful for testing Spark pipelines. */ object PipelineTestingUtil { val transformer = { val config = """ |identity_transform { | transform : list | transforms : [ ] |} | |model_transforms { | context_transform : identity_transform | item_transform : identity_transform | combined_transform : identity_transform |} """.stripMargin new Transformer(ConfigFactory.parseString(config), "model_transforms") } // Simple full rank linear model with 2 label classes and 2 features val fullRankLinearModel = { val model = new FullRankLinearModel() model.setLabelToIndex(ImmutableMap.of("label1", 0, "label2", 1)) val labelDictEntry1 = new LabelDictionaryEntry() labelDictEntry1.setLabel("label1") labelDictEntry1.setCount(50) val labelDictEntry2 = new LabelDictionaryEntry() labelDictEntry2.setLabel("label2") labelDictEntry2.setCount(100) val labelDictionary = new java.util.ArrayList[LabelDictionaryEntry]() labelDictionary.add(labelDictEntry1) labelDictionary.add(labelDictEntry2) model.setLabelDictionary(labelDictionary) val floatVector1 = new FloatVector(Array(1.2f, 2.1f)) val floatVector2 = new FloatVector(Array(3.4f, -1.2f)) model.setWeightVector( ImmutableMap.of( "f", ImmutableMap.of("feature1", floatVector1, "feature2", floatVector2) ) ) model } // Simple linear model with 2 features val linearModel = { val model = new LinearModel() model.setWeights(ImmutableMap.of("s", ImmutableMap.of("feature1", 1.4f, "feature2", 1.3f))) model } val multiclassExample1 = { val example = new Example() val fv = new FeatureVector() fv.setFloatFeatures(ImmutableMap.of( "f", ImmutableMap.of("feature1", 1.2, "feature2", 5.6), "LABEL", ImmutableMap.of("label1", 10.0, "label2", 9.0) )) example.addToExample(fv) example } val multiclassExample2 = { val example = new Example() val fv = new FeatureVector() fv.setFloatFeatures(ImmutableMap.of( "f", ImmutableMap.of("feature1", 1.8, "feature2", -1.6), "LABEL", ImmutableMap.of("label1", 8.0, "label2", 4.0) )) example.addToExample(fv) example } val linearExample1 = { val example = new Example() val fv = new FeatureVector() fv.setFloatFeatures(ImmutableMap.of( "LABEL", ImmutableMap.of("", 3.5) )) fv.setStringFeatures(ImmutableMap.of( "s", ImmutableSet.of("feature1", "feature2") )) example.addToExample(fv) example } val linearExample2 = { val example = new Example() val fv = new FeatureVector() fv.setFloatFeatures(ImmutableMap.of( "LABEL", ImmutableMap.of("", -2.0) )) fv.setStringFeatures(ImmutableMap.of( "s", ImmutableSet.of("feature1") )) example.addToExample(fv) example } def generateSparkContext = { val sparkConf = new SparkConf() .setMaster("local[2]") .setAppName("PipelineTestingUtil") .set("spark.io.compression.codec", "lz4") new SparkContext(sparkConf) } /* * Wrapper that generates a local SparkContext for a test and then * cleans everything up after test completion. */ def withSparkContext[B](f: SparkContext => B): B = { val sc = generateSparkContext try { f(sc) } finally { sc.stop System.clearProperty("spark.master.port") } } /* * Create a mock HiveContext that responds to sql calls by returning * the argument results in a SchemaRDD. * * Works for any case class. */ def createFakeHiveContext[A <: Product: TypeTag : ClassTag]( sc: SparkContext, results: Seq[A]): HiveContext = { val mockHiveContext = mock(classOf[HiveContext]) val sqlContext = new SQLContext(sc) when(mockHiveContext.sql(anyString())).thenReturn(sqlContext.createDataFrame(results)) mockHiveContext } /* * Create a fake row and schema for a given case class. */ def createFakeRowAndSchema[A <: Product: TypeTag : ClassTag]( sc: SparkContext, result: A): (Row, StructType) = { val sqlContext = new SQLContext(sc) val sqlResult = sqlContext.createDataFrame(Seq(result)) (sqlResult.head(), sqlResult.schema) } }
airbnb/aerosolve
training/src/test/scala/com/airbnb/aerosolve/training/pipeline/PipelineTestingUtil.scala
Scala
apache-2.0
5,276
package code package model import net.liftweb.mapper._ object Task extends Task with LongKeyedMetaMapper[Task] class Task extends LongKeyedMapper[Task] { def getSingleton = Task def primaryKeyField = id object id extends MappedLongIndex[Task](this) object parent extends MappedLongForeignKey[Task, Task](this, Task) object name extends MappedString[Task](this, 140) object description extends MappedString[Task](this, 300) object active extends MappedBoolean[Task](this) object color extends MappedString[Task](this, 140) object specifiable extends MappedBoolean[Task](this) object selectable extends MappedBoolean[Task](this) }
dodie/time-admin
src/main/scala/code/model/Task.scala
Scala
apache-2.0
652
/* sbt -- Simple Build Tool * Copyright 2008, 2009 Mark Harrah */ package sbt import jline.{Completor, ConsoleReader, History} import java.io.{File,PrintWriter} import complete.Parser abstract class JLine extends LineReader { protected[this] val handleCONT: Boolean protected[this] val reader: ConsoleReader protected[this] val historyPath: Option[File] def readLine(prompt: String, mask: Option[Char] = None) = JLine.withJLine { unsynchronizedReadLine(prompt, mask) } private[this] def unsynchronizedReadLine(prompt: String, mask: Option[Char]) = readLineWithHistory(prompt, mask) match { case null => None case x => Some(x.trim) } private[this] def readLineWithHistory(prompt: String, mask: Option[Char]): String = historyPath match { case None => readLineDirect(prompt, mask) case Some(file) => val h = reader.getHistory JLine.loadHistory(h, file) try { readLineDirect(prompt, mask) } finally { JLine.saveHistory(h, file) } } private[this] def readLineDirect(prompt: String, mask: Option[Char]): String = if(handleCONT) Signals.withHandler(() => resume(), signal = Signals.CONT)( () => readLineDirectRaw(prompt, mask) ) else readLineDirectRaw(prompt, mask) private[this] def readLineDirectRaw(prompt: String, mask: Option[Char]): String = mask match { case Some(m) => reader.readLine(prompt, m) case None => reader.readLine(prompt) } private[this] def resume() { jline.Terminal.resetTerminal JLine.terminal.disableEcho() reader.drawLine() reader.flushConsole() } } private object JLine { // When calling this, ensure that enableEcho has been or will be called. // getTerminal will initialize the terminal to disable echo. private def terminal = jline.Terminal.getTerminal private def withTerminal[T](f: jline.Terminal => T): T = synchronized { val t = terminal t.synchronized { f(t) } } /** For accessing the JLine Terminal object. * This ensures synchronized access as well as re-enabling echo after getting the Terminal. */ def usingTerminal[T](f: jline.Terminal => T): T = withTerminal { t => t.enableEcho() f(t) } def createReader() = usingTerminal { t => val cr = new ConsoleReader cr.setBellEnabled(false) cr } def withJLine[T](action: => T): T = withTerminal { t => t.disableEcho() try { action } finally { t.enableEcho() } } private[sbt] def loadHistory(h: History, file: File) { h.setMaxSize(MaxHistorySize) if(file.isFile) IO.reader(file)( h.load ) } private[sbt] def saveHistory(h: History, file: File): Unit = Using.fileWriter()(file) { writer => val out = new PrintWriter(writer, false) h.setOutput(out) h.flushBuffer() out.close() h.setOutput(null) } def simple(historyPath: Option[File], handleCONT: Boolean = HandleCONT): SimpleReader = new SimpleReader(historyPath, handleCONT) val MaxHistorySize = 500 val HandleCONT = !java.lang.Boolean.getBoolean("sbt.disable.cont") && Signals.supported(Signals.CONT) } trait LineReader { def readLine(prompt: String, mask: Option[Char] = None): Option[String] } final class FullReader(val historyPath: Option[File], complete: Parser[_], val handleCONT: Boolean = JLine.HandleCONT) extends JLine { protected[this] val reader = { val cr = new ConsoleReader cr.setBellEnabled(false) sbt.complete.JLineCompletion.installCustomCompletor(cr, complete) cr } } class SimpleReader private[sbt] (val historyPath: Option[File], val handleCONT: Boolean) extends JLine { protected[this] val reader = JLine.createReader() } object SimpleReader extends SimpleReader(None, JLine.HandleCONT)
gilt/xsbt
util/complete/LineReader.scala
Scala
bsd-3-clause
3,630
/* Copyright 2017-18, Emmanouil Antonios Platanios. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package org.platanios.tensorflow.api.learn /** * @author Emmanouil Antonios Platanios */ package object hooks { private[api] trait API extends HookTrigger.API { type Hook = hooks.Hook type CheckpointSaver = hooks.CheckpointSaver type Evaluator[IT, IO, ID, IS, I, TT, TO, TD, TS, EI] = hooks.Evaluator[IT, IO, ID, IS, I, TT, TO, TD, TS, EI] type LossLogger = hooks.LossLogger type ModelDependentHook[IT, IO, ID, IS, I, TT, TO, TD, TS, EI] = hooks.ModelDependentHook[IT, IO, ID, IS, I, TT, TO, TD, TS, EI] type NaNChecker = hooks.NaNChecker type StepRateLogger = hooks.StepRateLogger type Stopper = hooks.Stopper type SummarySaver = hooks.SummarySaver type SummaryWriterHookAddOn = hooks.SummaryWriterHookAddOn type TensorBoardHook = hooks.TensorBoardHook type TensorLogger = hooks.TensorLogger type TimelineHook = hooks.TimelineHook type TriggeredHook = hooks.TriggeredHook val CheckpointSaver: hooks.CheckpointSaver.type = hooks.CheckpointSaver val Evaluator : hooks.Evaluator.type = hooks.Evaluator val LossLogger : hooks.LossLogger.type = hooks.LossLogger val NaNChecker : hooks.NaNChecker.type = hooks.NaNChecker val StepRateLogger : hooks.StepRateLogger.type = hooks.StepRateLogger val Stopper : hooks.Stopper.type = hooks.Stopper val SummarySaver : hooks.SummarySaver.type = hooks.SummarySaver val TensorBoardHook: hooks.TensorBoardHook.type = hooks.TensorBoardHook val TensorLogger : hooks.TensorLogger.type = hooks.TensorLogger val TimelineHook : hooks.TimelineHook.type = hooks.TimelineHook } private[api] object API extends API }
eaplatanios/tensorflow
tensorflow/scala/api/src/main/scala/org/platanios/tensorflow/api/learn/hooks/package.scala
Scala
apache-2.0
2,350
/** * Copyright 2015 Mohiva Organisation (license at mohiva dot com) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.mohiva.play.silhouette.api.actions import javax.inject.Inject import akka.actor.{ Actor, Props } import akka.testkit.TestProbe import com.mohiva.play.silhouette.api._ import com.mohiva.play.silhouette.api.actions.SecuredActionSpec._ import com.mohiva.play.silhouette.api.exceptions.{ NotAuthenticatedException, NotAuthorizedException } import com.mohiva.play.silhouette.api.services.{ AuthenticatorResult, AuthenticatorService, IdentityService } import net.codingwell.scalaguice.ScalaModule import org.specs2.control.NoLanguageFeatures import org.specs2.matcher.JsonMatchers import org.specs2.mock.Mockito import org.specs2.specification.Scope import play.api.i18n.{ Lang, Messages, MessagesApi } import play.api.inject.bind import play.api.inject.guice.GuiceApplicationBuilder import play.api.libs.concurrent.Akka import play.api.libs.concurrent.Execution.Implicits._ import play.api.libs.json.Json import play.api.mvc.Results._ import play.api.mvc._ import play.api.test.{ FakeRequest, PlaySpecification, WithApplication } import scala.concurrent.Future import scala.concurrent.duration._ import scala.language.postfixOps import scala.reflect.ClassTag /** * Test case for the [[com.mohiva.play.silhouette.api.actions.SecuredActionSpec]]. */ class SecuredActionSpec extends PlaySpecification with Mockito with JsonMatchers with NoLanguageFeatures { "The `SecuredAction` action" should { "restrict access if no valid authenticator can be retrieved" in new InjectorContext { new WithApplication(app) with Context { withEvent[NotAuthenticatedEvent] { env.authenticatorService.retrieve(any) returns Future.successful(None) val result = controller.defaultAction(request) status(result) must equalTo(UNAUTHORIZED) contentAsString(result) must contain("global.not.authenticated") theProbe.expectMsg(500 millis, NotAuthenticatedEvent(request)) } } } "restrict access and discard authenticator if an invalid authenticator can be retrieved" in new InjectorContext { new WithApplication(app) with Context { env.authenticatorService.retrieve(any) returns Future.successful(Some(authenticator.copy(isValid = false))) env.authenticatorService.discard(any, any)(any) answers { (a, m) => Future.successful(AuthenticatorResult(a.asInstanceOf[Array[Any]](1).asInstanceOf[Result])) } withEvent[NotAuthenticatedEvent] { val result = controller.defaultAction(request) status(result) must equalTo(UNAUTHORIZED) contentAsString(result) must contain("global.not.authenticated") there was one(env.authenticatorService).discard(any, any)(any) theProbe.expectMsg(500 millis, NotAuthenticatedEvent(request)) } } } "restrict access and discard authenticator if no identity could be found for an authenticator" in new InjectorContext { new WithApplication(app) with Context { env.authenticatorService.retrieve(any) returns Future.successful(Some(authenticator)) env.authenticatorService.discard(any, any)(any) answers { (a, m) => Future.successful(AuthenticatorResult(a.asInstanceOf[Array[Any]](1).asInstanceOf[Result])) } env.identityService.retrieve(identity.loginInfo) returns Future.successful(None) withEvent[NotAuthenticatedEvent] { val result = controller.defaultAction(request) status(result) must equalTo(UNAUTHORIZED) contentAsString(result) must contain("global.not.authenticated") there was one(env.authenticatorService).discard(any, any)(any) theProbe.expectMsg(500 millis, NotAuthenticatedEvent(request)) } } } "display local not-authenticated result if user isn't authenticated[authorization and error handler]" in new InjectorContext { new WithApplication(app) with Context { env.authenticatorService.retrieve(any) returns Future.successful(Some(authenticator)) env.authenticatorService.discard(any, any)(any) answers { (a, m) => Future.successful(AuthenticatorResult(a.asInstanceOf[Array[Any]](1).asInstanceOf[Result])) } env.identityService.retrieve(identity.loginInfo) returns Future.successful(None) val result = controller.actionWithAuthorizationAndErrorHandler(request) status(result) must equalTo(UNAUTHORIZED) contentAsString(result) must contain("local.not.authenticated") } } "display local not-authenticated result if user isn't authenticated[error handler only]" in new InjectorContext { new WithApplication(app) with Context { env.authenticatorService.retrieve(any) returns Future.successful(Some(authenticator)) env.authenticatorService.discard(any, any)(any) answers { (a, m) => Future.successful(AuthenticatorResult(a.asInstanceOf[Array[Any]](1).asInstanceOf[Result])) } env.identityService.retrieve(identity.loginInfo) returns Future.successful(None) val result = controller.actionWithErrorHandler(request) status(result) must equalTo(UNAUTHORIZED) contentAsString(result) must contain("local.not.authenticated") } } "display global not-authenticated result if user isn't authenticated" in new InjectorContext { new WithApplication(app) with Context { env.authenticatorService.retrieve(any) returns Future.successful(Some(authenticator)) env.authenticatorService.discard(any, any)(any) answers { (a, m) => Future.successful(AuthenticatorResult(a.asInstanceOf[Array[Any]](1).asInstanceOf[Result])) } env.identityService.retrieve(identity.loginInfo) returns Future.successful(None) val result = controller.defaultAction(request) status(result) must equalTo(UNAUTHORIZED) contentAsString(result) must contain("global.not.authenticated") } } "restrict access and update authenticator if a user is authenticated but not authorized" in new InjectorContext { new WithApplication(app) with Context { env.authenticatorService.retrieve(any) returns Future.successful(Some(authenticator)) env.authenticatorService.touch(any) returns Left(authenticator) env.authenticatorService.update(any, any)(any) answers { (a, m) => Future.successful(AuthenticatorResult(a.asInstanceOf[Array[Any]](1).asInstanceOf[Result])) } env.identityService.retrieve(identity.loginInfo) returns Future.successful(Some(identity)) authorization.isAuthorized(any, any)(any) returns Future.successful(false) withEvent[NotAuthorizedEvent[FakeIdentity]] { val result = controller.actionWithAuthorization(request) status(result) must equalTo(FORBIDDEN) contentAsString(result) must contain("global.not.authorized") there was one(env.authenticatorService).update(any, any)(any) theProbe.expectMsg(500 millis, NotAuthorizedEvent(identity, request)) } } } "display local not-authorized result if user isn't authorized" in new InjectorContext { new WithApplication(app) with Context { env.authenticatorService.retrieve(any) returns Future.successful(Some(authenticator)) env.authenticatorService.touch(any) returns Left(authenticator) env.authenticatorService.update(any, any)(any) answers { (a, m) => Future.successful(AuthenticatorResult(a.asInstanceOf[Array[Any]](1).asInstanceOf[Result])) } env.identityService.retrieve(identity.loginInfo) returns Future.successful(Some(identity)) authorization.isAuthorized(any, any)(any) returns Future.successful(false) val result = controller.actionWithAuthorizationAndErrorHandler(request) status(result) must equalTo(FORBIDDEN) contentAsString(result) must contain("local.not.authorized") there was one(env.authenticatorService).touch(any) there was one(env.authenticatorService).update(any, any)(any) } } "display global not-authorized result if user isn't authorized" in new InjectorContext { new WithApplication(app) with Context { env.authenticatorService.retrieve(any) returns Future.successful(Some(authenticator)) env.authenticatorService.touch(any) returns Left(authenticator) env.authenticatorService.update(any, any)(any) answers { (a, m) => Future.successful(AuthenticatorResult(a.asInstanceOf[Array[Any]](1).asInstanceOf[Result])) } env.identityService.retrieve(identity.loginInfo) returns Future.successful(Some(identity)) authorization.isAuthorized(any, any)(any) returns Future.successful(false) val result = controller.actionWithAuthorization(request) status(result) must equalTo(FORBIDDEN) contentAsString(result) must contain("global.not.authorized") there was one(env.authenticatorService).touch(any) there was one(env.authenticatorService).update(any, any)(any) } } "invoke action without authorization if user is authenticated" in new InjectorContext { new WithApplication(app) with Context { env.authenticatorService.retrieve(any) returns Future.successful(Some(authenticator)) env.authenticatorService.touch(any) returns Left(authenticator) env.authenticatorService.update(any, any)(any) answers { (a, m) => Future.successful(AuthenticatorResult(a.asInstanceOf[Array[Any]](1).asInstanceOf[Result])) } env.identityService.retrieve(identity.loginInfo) returns Future.successful(Some(identity)) withEvent[AuthenticatedEvent[FakeIdentity]] { val result = controller.defaultAction(request) status(result) must equalTo(OK) contentAsString(result) must contain("full.access") there was one(env.authenticatorService).touch(any) there was one(env.authenticatorService).update(any, any)(any) theProbe.expectMsg(500 millis, AuthenticatedEvent(identity, request)) } } } "invoke action with authorization if user is authenticated but not authorized" in new InjectorContext { new WithApplication(app) with Context { env.authenticatorService.retrieve(any) returns Future.successful(Some(authenticator)) env.authenticatorService.touch(any) returns Left(authenticator) env.authenticatorService.update(any, any)(any) answers { (a, m) => Future.successful(AuthenticatorResult(a.asInstanceOf[Array[Any]](1).asInstanceOf[Result])) } env.identityService.retrieve(identity.loginInfo) returns Future.successful(Some(identity)) withEvent[AuthenticatedEvent[FakeIdentity]] { val result = controller.actionWithAuthorization(request) status(result) must equalTo(OK) contentAsString(result) must contain("full.access") there was one(env.authenticatorService).touch(any) there was one(env.authenticatorService).update(any, any)(any) theProbe.expectMsg(500 millis, AuthenticatedEvent(identity, request)) } } } "use next request provider in the chain if first isn't responsible" in new InjectorContext with WithRequestProvider { new WithApplication(app) with Context { tokenRequestProvider.authenticate(any) returns Future.successful(None) basicAuthRequestProvider.authenticate(any) returns Future.successful(Some(identity.loginInfo)) env.authenticatorService.retrieve(any) returns Future.successful(None) env.authenticatorService.create(any)(any) returns Future.successful(authenticator) env.authenticatorService.init(any)(any) answers { p => Future.successful(p.asInstanceOf[FakeAuthenticator#Value]) } env.authenticatorService.embed(any, any[Result])(any) answers { (a, m) => Future.successful(AuthenticatorResult(a.asInstanceOf[Array[Any]](1).asInstanceOf[Result])) } env.identityService.retrieve(identity.loginInfo) returns Future.successful(Some(identity)) withEvent[AuthenticatedEvent[FakeIdentity]] { val result = controller.actionWithAuthorization(request) status(result) must equalTo(OK) contentAsString(result) must contain("full.access") there was one(env.authenticatorService).create(any)(any) there was one(env.authenticatorService).init(any)(any) theProbe.expectMsg(500 millis, AuthenticatedEvent(identity, request)) } } } "update an initialized authenticator if it was touched" in new InjectorContext { new WithApplication(app) with Context { env.authenticatorService.retrieve(any) returns Future.successful(Some(authenticator)) env.authenticatorService.touch(any) returns Left(authenticator) env.identityService.retrieve(identity.loginInfo) returns Future.successful(Some(identity)) env.authenticatorService.update(any, any)(any) answers { (a, m) => Future.successful(AuthenticatorResult(a.asInstanceOf[Array[Any]](1).asInstanceOf[Result])) } withEvent[AuthenticatedEvent[FakeIdentity]] { val result = controller.actionWithAuthorization(request) status(result) must equalTo(OK) contentAsString(result) must contain("full.access") there was one(env.authenticatorService).touch(any) there was one(env.authenticatorService).update(any, any)(any) theProbe.expectMsg(500 millis, AuthenticatedEvent(identity, request)) } } } "do not update an initialized authenticator if it was not touched" in new InjectorContext { new WithApplication(app) with Context { env.authenticatorService.retrieve(any) returns Future.successful(Some(authenticator)) env.authenticatorService.touch(any) returns Right(authenticator) env.identityService.retrieve(identity.loginInfo) returns Future.successful(Some(identity)) withEvent[AuthenticatedEvent[FakeIdentity]] { val result = controller.actionWithAuthorization(request) status(result) must equalTo(OK) contentAsString(result) must contain("full.access") there was one(env.authenticatorService).touch(any) there was no(env.authenticatorService).update(any, any)(any) theProbe.expectMsg(500 millis, AuthenticatedEvent(identity, request)) } } } "init an uninitialized authenticator" in new InjectorContext with WithRequestProvider { new WithApplication(app) with Context { tokenRequestProvider.authenticate(any) returns Future.successful(Some(identity.loginInfo)) env.authenticatorService.retrieve(any) returns Future.successful(None) env.authenticatorService.create(any)(any) returns Future.successful(authenticator) env.authenticatorService.init(any)(any) answers { p => Future.successful(p.asInstanceOf[FakeAuthenticator#Value]) } env.authenticatorService.embed(any, any[Result])(any) answers { (a, m) => Future.successful(AuthenticatorResult(a.asInstanceOf[Array[Any]](1).asInstanceOf[Result])) } env.identityService.retrieve(identity.loginInfo) returns Future.successful(Some(identity)) withEvent[AuthenticatedEvent[FakeIdentity]] { val result = controller.actionWithAuthorization(request) status(result) must equalTo(OK) contentAsString(result) must contain("full.access") there was one(env.authenticatorService).create(any)(any) there was one(env.authenticatorService).init(any)(any) theProbe.expectMsg(500 millis, AuthenticatedEvent(identity, request)) } } } "renew an initialized authenticator" in new InjectorContext { new WithApplication(app) with Context { env.authenticatorService.retrieve(any) returns Future.successful(Some(authenticator)) env.authenticatorService.touch(any) returns Left(authenticator) env.authenticatorService.renew(any, any)(any) answers { (a, m) => Future.successful(AuthenticatorResult(a.asInstanceOf[Array[Any]](1).asInstanceOf[Result])) } env.identityService.retrieve(identity.loginInfo) returns Future.successful(Some(identity)) withEvent[AuthenticatedEvent[FakeIdentity]] { val result = controller.renewAction(request) status(result) must equalTo(OK) contentAsString(result) must contain("renewed") there was one(env.authenticatorService).touch(any) there was one(env.authenticatorService).renew(any, any)(any) there was no(env.authenticatorService).update(any, any)(any) theProbe.expectMsg(500 millis, AuthenticatedEvent(identity, request)) } } } "renew an uninitialized authenticator" in new InjectorContext with WithRequestProvider { new WithApplication(app) with Context { tokenRequestProvider.authenticate(any) returns Future.successful(Some(identity.loginInfo)) env.authenticatorService.retrieve(any) returns Future.successful(None) env.authenticatorService.create(any)(any) returns Future.successful(authenticator) env.authenticatorService.renew(any, any)(any) answers { (a, m) => Future.successful(AuthenticatorResult(a.asInstanceOf[Array[Any]](1).asInstanceOf[Result])) } env.identityService.retrieve(identity.loginInfo) returns Future.successful(Some(identity)) withEvent[AuthenticatedEvent[FakeIdentity]] { val result = controller.renewAction(request) status(result) must equalTo(OK) contentAsString(result) must contain("renewed") there was one(env.authenticatorService).create(any)(any) there was one(env.authenticatorService).renew(any, any)(any) theProbe.expectMsg(500 millis, AuthenticatedEvent(identity, request)) } } } "discard an initialized authenticator" in new InjectorContext { new WithApplication(app) with Context { env.authenticatorService.retrieve(any) returns Future.successful(Some(authenticator)) env.authenticatorService.touch(any) returns Left(authenticator) env.authenticatorService.discard(any, any)(any) answers { (a, m) => Future.successful(AuthenticatorResult(a.asInstanceOf[Array[Any]](1).asInstanceOf[Result])) } env.identityService.retrieve(identity.loginInfo) returns Future.successful(Some(identity)) withEvent[AuthenticatedEvent[FakeIdentity]] { val result = controller.discardAction(request) status(result) must equalTo(OK) contentAsString(result) must contain("discarded") there was one(env.authenticatorService).touch(any) there was one(env.authenticatorService).discard(any, any)(any) there was no(env.authenticatorService).update(any, any)(any) theProbe.expectMsg(500 millis, AuthenticatedEvent(identity, request)) } } } "discard an uninitialized authenticator" in new InjectorContext with WithRequestProvider { new WithApplication(app) with Context { tokenRequestProvider.authenticate(any) returns Future.successful(Some(identity.loginInfo)) env.authenticatorService.retrieve(any) returns Future.successful(None) env.authenticatorService.create(any)(any) returns Future.successful(authenticator) env.authenticatorService.discard(any, any)(any) answers { (a, m) => Future.successful(AuthenticatorResult(a.asInstanceOf[Array[Any]](1).asInstanceOf[Result])) } env.identityService.retrieve(identity.loginInfo) returns Future.successful(Some(identity)) withEvent[AuthenticatedEvent[FakeIdentity]] { val result = controller.discardAction(request) status(result) must equalTo(OK) there was one(env.authenticatorService).create(any)(any) there was one(env.authenticatorService).discard(any, any)(any) theProbe.expectMsg(500 millis, AuthenticatedEvent(identity, request)) } } } "handle an Ajax request" in new InjectorContext { new WithApplication(app) with Context { implicit val req = FakeRequest().withHeaders("Accept" -> "application/json") env.authenticatorService.retrieve(any) returns Future.successful(Some(authenticator)) env.authenticatorService.touch(any) returns Left(authenticator) env.authenticatorService.update(any, any)(any) answers { (a, m) => Future.successful(AuthenticatorResult(a.asInstanceOf[Array[Any]](1).asInstanceOf[Result])) } env.identityService.retrieve(identity.loginInfo) returns Future.successful(Some(identity)) withEvent[AuthenticatedEvent[FakeIdentity]] { val result = controller.defaultAction(req) status(result) must equalTo(OK) contentType(result) must beSome("application/json") contentAsString(result) must /("result" -> "full.access") there was one(env.authenticatorService).touch(any) there was one(env.authenticatorService).update(any, any)(any) theProbe.expectMsg(500 millis, AuthenticatedEvent(identity, req)) } } } } "The `SecureRequestHandler`" should { "return status 401 if authentication was not successful" in new InjectorContext { new WithApplication(app) with Context { env.authenticatorService.retrieve(any) returns Future.successful(None) val result = controller.defaultHandler(request) status(result) must equalTo(UNAUTHORIZED) there was no(env.authenticatorService).touch(any) there was no(env.authenticatorService).update(any, any)(any) } } "return the user if authentication was successful" in new InjectorContext { new WithApplication(app) with Context { env.authenticatorService.retrieve(any) returns Future.successful(Some(authenticator)) env.authenticatorService.touch(any) returns Left(authenticator) env.authenticatorService.update(any, any)(any) answers { (a, m) => Future.successful(AuthenticatorResult(a.asInstanceOf[Array[Any]](1).asInstanceOf[Result])) } env.identityService.retrieve(identity.loginInfo) returns Future.successful(Some(identity)) val result = controller.defaultHandler(request) status(result) must equalTo(OK) contentAsString(result) must */("providerID" -> "test") and */("providerKey" -> "1") there was one(env.authenticatorService).touch(any) there was one(env.authenticatorService).update(any, any)(any) } } } "The `exceptionHandler` method of the SecuredErrorHandler" should { "translate an ForbiddenException into a 403 Forbidden result" in new InjectorContext { new WithApplication(app) with Context { env.authenticatorService.retrieve(any) returns Future.successful(None) env.authenticatorService.discard(any, any)(any) answers { (a, m) => Future.successful(AuthenticatorResult(a.asInstanceOf[Array[Any]](1).asInstanceOf[Result])) } val failed = Future.failed(new NotAuthorizedException("Access denied")) val result = controller.recover(failed) status(result) must equalTo(FORBIDDEN) } } "translate an UnauthorizedException into a 401 Unauthorized result" in new InjectorContext { new WithApplication(app) with Context { env.authenticatorService.retrieve(any) returns Future.successful(None) env.authenticatorService.discard(any, any)(any) answers { (a, m) => Future.successful(AuthenticatorResult(a.asInstanceOf[Array[Any]](1).asInstanceOf[Result])) } val failed = Future.failed(new NotAuthenticatedException("Not authenticated")) val result = controller.recover(failed) status(result) must equalTo(UNAUTHORIZED) } } } /** * The injector context. */ trait InjectorContext extends Scope { /** * The Silhouette environment. */ lazy val env = Environment[SecuredEnv]( mock[IdentityService[SecuredEnv#I]], mock[AuthenticatorService[SecuredEnv#A]], Seq(), new EventBus ) /** * An authorization mock. */ lazy val authorization = { val a = mock[Authorization[SecuredEnv#I, SecuredEnv#A]] a.isAuthorized(any, any)(any) returns Future.successful(true) a } /** * The guice application builder. */ lazy val app = new GuiceApplicationBuilder() .bindings(new GuiceModule) .overrides(bind[SecuredErrorHandler].to[GlobalSecuredErrorHandler]) .build() /** * The guice module. */ class GuiceModule extends ScalaModule { def configure(): Unit = { bind[Environment[SecuredEnv]].toInstance(env) bind[Authorization[SecuredEnv#I, SecuredEnv#A]].toInstance(authorization) bind[Silhouette[SecuredEnv]].to[SilhouetteProvider[SecuredEnv]] bind[SecuredController] } } /** * The context. */ trait Context { self: WithApplication => /** * An identity. */ lazy val identity = new FakeIdentity(LoginInfo("test", "1")) /** * An authenticator. */ lazy val authenticator = new FakeAuthenticator(LoginInfo("test", "1")) /** * A fake request. */ lazy implicit val request = FakeRequest() /** * The messages API. */ lazy implicit val messagesApi = app.injector.instanceOf[MessagesApi] /** * The secured controller. */ lazy implicit val controller = app.injector.instanceOf[SecuredController] /** * The messages for the current language. */ lazy implicit val messages = Messages(Lang.defaultLang, messagesApi) /** * The Play actor system. */ lazy implicit val system = Akka.system /** * The test probe. */ lazy val theProbe = TestProbe() /** * Executes a block after event bus initialization, so that the event can be handled inside the given block. * * @param ct The class tag of the event. * @tparam T The type of the event to handle. * @return The result of the block. */ def withEvent[T <: SilhouetteEvent](block: => Any)(implicit ct: ClassTag[T]) = { val listener = system.actorOf(Props(new Actor { def receive = { case e: T => theProbe.ref ! e } })) env.eventBus.subscribe(listener, ct.runtimeClass.asInstanceOf[Class[T]]) block } } } /** * Adds some request providers in scope. * * We add two providers in scope to test the chaining of this providers. */ trait WithRequestProvider { self: InjectorContext => /** * A mock that simulates a token request provider. */ lazy val tokenRequestProvider = mock[RequestProvider] /** * A mock that simulates a basic auth request provider. */ lazy val basicAuthRequestProvider = mock[RequestProvider] /** * A non request provider. */ lazy val nonRequestProvider = mock[RequestProvider] /** * The Silhouette environment. */ override lazy val env = Environment[SecuredEnv]( mock[IdentityService[FakeIdentity]], mock[AuthenticatorService[FakeAuthenticator]], Seq( tokenRequestProvider, basicAuthRequestProvider, nonRequestProvider ), new EventBus ) } } /** * The companion object. */ object SecuredActionSpec { /** * The environment type. */ trait SecuredEnv extends Env { type I = FakeIdentity type A = FakeAuthenticator } /** * A test identity. * * @param loginInfo The linked login info. */ case class FakeIdentity(loginInfo: LoginInfo) extends Identity /** * A test authenticator. * * @param loginInfo The linked login info. */ case class FakeAuthenticator(loginInfo: LoginInfo, isValid: Boolean = true) extends Authenticator /** * A simple authorization class. * * @param isAuthorized True if the access is authorized, false otherwise. */ case class SimpleAuthorization(isAuthorized: Boolean = true) extends Authorization[FakeIdentity, FakeAuthenticator] { /** * Checks whether the user is authorized to execute an action or not. * * @param identity The current identity instance. * @param authenticator The current authenticator instance. * @param request The current request header. * @tparam B The type of the request body. * @return True if the user is authorized, false otherwise. */ def isAuthorized[B](identity: FakeIdentity, authenticator: FakeAuthenticator)( implicit request: Request[B]): Future[Boolean] = { Future.successful(isAuthorized) } } /** * The global secured error handler. */ class GlobalSecuredErrorHandler extends SecuredErrorHandler { /** * Called when a user is not authenticated. * * As defined by RFC 2616, the status code of the response should be 401 Unauthorized. * * @param request The request header. * @return The result to send to the client. */ def onNotAuthenticated(implicit request: RequestHeader): Future[Result] = { Future.successful(Unauthorized("global.not.authenticated")) } /** * Called when a user is authenticated but not authorized. * * As defined by RFC 2616, the status code of the response should be 403 Forbidden. * * @param request The request header. * @return The result to send to the client. */ def onNotAuthorized(implicit request: RequestHeader) = { Future.successful(Forbidden("global.not.authorized")) } } /** * A secured controller. * * @param silhouette The Silhouette stack. * @param authorization An authorization implementation. */ class SecuredController @Inject() ( silhouette: Silhouette[SecuredEnv], authorization: Authorization[FakeIdentity, FakeAuthenticator]) extends Controller { /** * A local error handler. */ lazy val errorHandler = new SecuredErrorHandler { override def onNotAuthenticated(implicit request: RequestHeader) = { Future.successful(Unauthorized("local.not.authenticated")) } override def onNotAuthorized(implicit request: RequestHeader) = { Future.successful(Forbidden("local.not.authorized")) } } /** * A secured action. * * @return The result to send to the client. */ def defaultAction = silhouette.SecuredAction { implicit request => render { case Accepts.Json() => Ok(Json.obj("result" -> "full.access")) case Accepts.Html() => Ok("full.access") } } /** * A secured action with an authorization and a custom error handler. * * @return The result to send to the client. */ def actionWithAuthorizationAndErrorHandler = silhouette.SecuredAction(authorization)(errorHandler) { Ok } /** * A secured action with a custom error handler. * * @return The result to send to the client. */ def actionWithErrorHandler = silhouette.SecuredAction(errorHandler) { Ok("full.access") } /** * A secured action with authorization. * * @return The result to send to the client. */ def actionWithAuthorization = silhouette.SecuredAction(authorization) { Ok("full.access") } /** * A secured renew action. * * @return The result to send to the client. */ def renewAction = silhouette.SecuredAction.async { implicit request => silhouette.env.authenticatorService.renew(request.authenticator, Ok("renewed")) } /** * A secured discard action. * * @return The result to send to the client. */ def discardAction = silhouette.SecuredAction.async { implicit request => silhouette.env.authenticatorService.discard(request.authenticator, Ok("discarded")) } /** * A secured request handler. */ def defaultHandler = Action.async { implicit request => silhouette.SecuredRequestHandler { securedRequest => Future.successful(HandlerResult(Ok, Some(securedRequest.identity))) }.map { case HandlerResult(r, Some(user)) => Ok(Json.toJson(user.loginInfo)) case HandlerResult(r, None) => Unauthorized } } /** * Method to test the `exceptionHandler` method of the [[SecuredErrorHandler]]. * * @param f The future to recover from. * @param request The request header. * @return The result to send to the client. */ def recover(f: Future[Result])(implicit request: RequestHeader): Future[Result] = { f.recoverWith(silhouette.SecuredAction.requestHandler.errorHandler.exceptionHandler) } } }
cemcatik/play-silhouette
silhouette/test/com/mohiva/play/silhouette/api/actions/SecuredActionSpec.scala
Scala
apache-2.0
33,792
package com.github.etacassiopeia.s99.list /** * <h1>P24</h1> * Lotto: Draw N different random numbers from the set 1..M * * @author Mohsen Zainalpour * @version 1.0 * @since 2/05/16 */ object P24 { def main(args: Array[String]) { println(lotto(6, 49)) } def lotto(n: Int, m: Int): List[Int] = { import P23.randomSelect val list = List.range(1, m) randomSelect(n, list) } }
EtaCassiopeia/S-99
src/main/scala/com/github/etacassiopeia/s99/list/P24.scala
Scala
apache-2.0
416
package com.datastax.spark.connector.sql class CassandraPrunedScanSpec extends CassandraDataSourceSpec { override def pushDown = false }
viirya/spark-cassandra-connector
spark-cassandra-connector/src/it/scala/com/datastax/spark/connector/sql/CassandraPrunedScanSpec.scala
Scala
apache-2.0
139
/******************************************************************************* * (C) Copyright 2015 Haifeng Li * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *******************************************************************************/ package smile /** Manifold learning finds a low-dimensional basis for describing * high-dimensional data. Manifold learning is a popular approach to nonlinear * dimensionality reduction. Algorithms for this task are based on the idea * that the dimensionality of many data sets is only artificially high; though * each data point consists of perhaps thousands of features, it may be * described as a function of only a few underlying parameters. That is, the * data points are actually samples from a low-dimensional manifold that is * embedded in a high-dimensional space. Manifold learning algorithms attempt * to uncover these parameters in order to find a low-dimensional representation * of the data. * * Some prominent approaches are locally linear embedding * (LLE), Hessian LLE, Laplacian eigenmaps, and LTSA. These techniques * construct a low-dimensional data representation using a cost function * that retains local properties of the data, and can be viewed as defining * a graph-based kernel for Kernel PCA. More recently, techniques have been * proposed that, instead of defining a fixed kernel, try to learn the kernel * using semidefinite programming. The most prominent example of such a * technique is maximum variance unfolding (MVU). The central idea of MVU * is to exactly preserve all pairwise distances between nearest neighbors * (in the inner product space), while maximizing the distances between points * that are not nearest neighbors. * * An alternative approach to neighborhood preservation is through the * minimization of a cost function that measures differences between * distances in the input and output spaces. Important examples of such * techniques include classical multidimensional scaling (which is identical * to PCA), Isomap (which uses geodesic distances in the data space), diffusion * maps (which uses diffusion distances in the data space), t-SNE (which * minimizes the divergence between distributions over pairs of points), * and curvilinear component analysis. * * @author Haifeng Li */ package object manifold extends Operators { }
arehart13/smile
scala/src/main/scala/smile/manifold/package.scala
Scala
apache-2.0
2,906
package edu.rice.habanero.benchmarks.philosopher import java.util.concurrent.atomic.{AtomicBoolean, AtomicLong} import edu.rice.habanero.actors.{JumiActor, JumiActorState, JumiPool} import edu.rice.habanero.benchmarks.{Benchmark, BenchmarkRunner} /** * * @author <a href="http://shams.web.rice.edu/">Shams Imam</a> (shams@rice.edu) */ object PhilosopherJumiActorBenchmark { def main(args: Array[String]) { BenchmarkRunner.runBenchmark(args, new PhilosopherJumiActorBenchmark) } private final class PhilosopherJumiActorBenchmark extends Benchmark { def initialize(args: Array[String]) { PhilosopherConfig.parseArgs(args) } def printArgInfo() { PhilosopherConfig.printArgs() } def runIteration() { val counter = new AtomicLong(0) val arbitrator = new ArbitratorActor(PhilosopherConfig.N) arbitrator.start() val philosophers = Array.tabulate[JumiActor[AnyRef]](PhilosopherConfig.N)(i => { val loopActor = new PhilosopherActor(i, PhilosopherConfig.M, counter, arbitrator) loopActor.start() loopActor }) philosophers.foreach(loopActor => { loopActor.send(StartMessage()) }) JumiActorState.awaitTermination() println(" Num retries: " + counter.get()) track("Avg. Retry Count", counter.get()) } def cleanupIteration(lastIteration: Boolean, execTimeMillis: Double): Unit = { if (lastIteration) { JumiPool.shutdown() } } } case class StartMessage() case class ExitMessage() case class HungryMessage(philosopher: JumiActor[AnyRef], philosopherId: Int) case class DoneMessage(philosopherId: Int) case class EatMessage() case class DeniedMessage() private class PhilosopherActor(id: Int, rounds: Int, counter: AtomicLong, arbitrator: ArbitratorActor) extends JumiActor[AnyRef] { private val self = this private var localCounter = 0L private var roundsSoFar = 0 private val myHungryMessage = HungryMessage(self, id) private val myDoneMessage = DoneMessage(id) override def process(msg: AnyRef) { msg match { case dm: DeniedMessage => localCounter += 1 arbitrator.send(myHungryMessage) case em: EatMessage => roundsSoFar += 1 counter.addAndGet(localCounter) arbitrator.send(myDoneMessage) if (roundsSoFar < rounds) { self.send(StartMessage()) } else { arbitrator.send(ExitMessage()) exit() } case sm: StartMessage => arbitrator.send(myHungryMessage) } } } private class ArbitratorActor(numForks: Int) extends JumiActor[AnyRef] { private val forks = Array.tabulate(numForks)(i => new AtomicBoolean(false)) private var numExitedPhilosophers = 0 override def process(msg: AnyRef) { msg match { case hm: HungryMessage => val leftFork = forks(hm.philosopherId) val rightFork = forks((hm.philosopherId + 1) % numForks) if (leftFork.get() || rightFork.get()) { // someone else has access to the fork hm.philosopher.send(DeniedMessage()) } else { leftFork.set(true) rightFork.set(true) hm.philosopher.send(EatMessage()) } case dm: DoneMessage => val leftFork = forks(dm.philosopherId) val rightFork = forks((dm.philosopherId + 1) % numForks) leftFork.set(false) rightFork.set(false) case em: ExitMessage => numExitedPhilosophers += 1 if (numForks == numExitedPhilosophers) { exit() } } } } }
shamsmahmood/savina
src/main/scala/edu/rice/habanero/benchmarks/philosopher/PhilosopherJumiActorBenchmark.scala
Scala
gpl-2.0
3,740