code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package net.akmorrow13.endive.utils
import net.akmorrow13.endive.EndiveFunSuite
import net.akmorrow13.endive.processing.{CellTypes, Chromosomes, TranscriptionFactors}
import org.bdgenomics.adam.models.ReferenceRegion
class FoldsSuite extends EndiveFunSuite {
// training data of region and labels
var labelPath = resourcePath("ARID3A.train.labels.head30.tsv")
val tf = TranscriptionFactors.withName("ATF3")
val NUM_CHROMOSOMES = Chromosomes.toVector.size
val sequence = "ATTTTGGGGGAAAAA"
sparkTest("Leave 1 out (test set is 1 cell type/chromsome pair)") {
val NUM_CELL_TYPES = 4
val NUM_SAMPLES = 100
val CELL_TYPES_PER_FOLD = 1
val CHROMOSOMES_PER_FOLD = 1
val windowsAll = (0 until NUM_SAMPLES).map { x =>
((0 until NUM_CHROMOSOMES).map { cr =>
(0 until NUM_CELL_TYPES).map { cellType =>
LabeledWindow(Window(tf, CellTypes.apply(cellType), ReferenceRegion("chr" + cr, 0, 100), sequence, 0, None, None), 0)
}
}).flatten
}
val windows:Seq[LabeledWindow] = windowsAll.flatten
val windowsRDD = sc.parallelize(windows)
/* First one chromesome and one celltype per fold (leave 1 out) */
val folds = EndiveUtils.generateFoldsRDD(windowsRDD.keyBy(r => (r.win.region.referenceName, r.win.cellType)), CELL_TYPES_PER_FOLD, CHROMOSOMES_PER_FOLD, 1)
val cellTypesChromosomes:Iterable[(String, CellTypes.Value)] = windowsRDD.map(x => (x.win.getRegion.referenceName, x.win.cellType)).countByValue().keys
println("TOTAL FOLDS " + folds.size)
for (i <- (0 until folds.size)) {
println("FOLD " + i)
val train = folds(i)._1.map(_._2)
val test = folds(i)._2.map(_._2)
println("TRAIN SIZE IS " + train.count())
println("TEST SIZE IS " + test.count())
val cellTypesTest:Iterable[CellTypes.Value] = test.map(x => (x.win.cellType)).countByValue().keys
val chromosomesTest:Iterable[String] = test.map(x => (x.win.getRegion.referenceName)).countByValue().keys
val cellTypesChromosomesTest:Iterable[(String, CellTypes.Value)] = test.map(x => (x.win.getRegion.referenceName, x.win.cellType)).countByValue().keys
println(cellTypesTest.size)
println(chromosomesTest.size)
assert(cellTypesTest.size == CELL_TYPES_PER_FOLD)
assert(chromosomesTest.size == CHROMOSOMES_PER_FOLD)
val cellTypesTrain:Iterable[CellTypes.Value] = train.map(x => (x.win.cellType)).countByValue().keys
val chromosomesTrain:Iterable[String] = train.map(x => (x.win.getRegion.referenceName)).countByValue().keys
assert(cellTypesTrain.size == NUM_CELL_TYPES - CELL_TYPES_PER_FOLD)
assert(chromosomesTrain.size == NUM_CHROMOSOMES - CHROMOSOMES_PER_FOLD)
}
}
sparkTest("Leave 3 out (test set is 3 cell type/chromsome pair)") {
val NUM_CELL_TYPES = 10
val NUM_SAMPLES = 100
val CELL_TYPES_PER_FOLD = 3
val CHROMOSOMES_PER_FOLD = 3
val windowsAll = (0 until NUM_SAMPLES).map { x =>
((0 until NUM_CHROMOSOMES).map { cr =>
(0 until NUM_CELL_TYPES).map { cellType =>
LabeledWindow(Window(tf, CellTypes.apply(cellType), ReferenceRegion("chr" + cr, 0, 100), sequence, 0, None, None), 0)
}
}).flatten
}
val windows:Seq[LabeledWindow] = windowsAll.flatten
val windowsRDD = sc.parallelize(windows)
/* First one chromesome and one celltype per fold (leave 1 out) */
val folds = EndiveUtils.generateFoldsRDD(windowsRDD.keyBy(r => (r.win.region.referenceName, r.win.cellType)), CELL_TYPES_PER_FOLD, CHROMOSOMES_PER_FOLD, 1)
val cellTypesChromosomes:Iterable[(String, CellTypes.Value)] = windowsRDD.map(x => (x.win.getRegion.referenceName, x.win.cellType)).countByValue().keys
println("TOTAL FOLDS " + folds.size)
for (i <- (0 until folds.size)) {
println("FOLD " + i)
val train = folds(i)._1.map(_._2)
val test = folds(i)._2.map(_._2)
println("TRAIN SIZE IS " + train.count())
println("TEST SIZE IS " + test.count())
val cellTypesTest = test.map(x => (x.win.cellType)).countByValue().keys
val chromosomesTest:Iterable[String] = test.map(x => (x.win.getRegion.referenceName)).countByValue().keys
val cellTypesChromosomesTest = test.map(x => (x.win.getRegion.referenceName, x.win.cellType)).countByValue().keys
println(cellTypesTest.size)
println(chromosomesTest.size)
assert(cellTypesTest.size == CELL_TYPES_PER_FOLD)
assert(chromosomesTest.size == CHROMOSOMES_PER_FOLD)
val cellTypesTrain = train.map(x => (x.win.cellType)).countByValue().keys
val chromosomesTrain = train.map(x => (x.win.getRegion.referenceName)).countByValue().keys
assert(cellTypesTrain.size == NUM_CELL_TYPES - CELL_TYPES_PER_FOLD)
assert(chromosomesTrain.size == NUM_CHROMOSOMES - CHROMOSOMES_PER_FOLD)
}
}
}
| akmorrow13/endive | src/test/scala/net/akmorrow13/endive/utils/FoldsSuite.scala | Scala | apache-2.0 | 4,825 |
package org.jetbrains.plugins.scala.externalLibraries.scalazDeriving
import com.intellij.openapi.util.text.StringUtil
import com.intellij.psi.util.PsiTreeUtil
import org.jetbrains.plugins.scala.DependencyManagerBase._
import org.jetbrains.plugins.scala.base.ScalaLightCodeInsightFixtureTestAdapter
import org.jetbrains.plugins.scala.base.libraryLoaders.IvyManagedLoader
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScFunctionDefinition
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef._
import org.jetbrains.plugins.scala.lang.psi.types.PhysicalMethodSignature
import org.jetbrains.plugins.scala.lang.psi.types.result._
import org.jetbrains.plugins.scala.util.TestUtils
import org.jetbrains.plugins.scala.{LatestScalaVersions, ScalaVersion}
import org.junit.Assert._
/**
* IntelliJ's equivalent of scalaz-deriving's built-in PresentationCompilerTest
*
* @author Sam Halliday
* @since 24/08/2017
*/
class ScalazDerivingTest_2_11 extends ScalaLightCodeInsightFixtureTestAdapter {
override protected def supportedIn(version: ScalaVersion): Boolean = version == LatestScalaVersions.Scala_2_11
override def librariesLoaders = super.librariesLoaders :+ IvyManagedLoader(
"com.fommil" %% "stalactite" % "0.0.5",
"com.github.mpilquist" %% "simulacrum" % "0.11.0"
)
protected def folderPath: String = TestUtils.getTestDataPath
def doTest(text: String, expectedType: String): Unit = {
val cleaned = StringUtil.convertLineSeparators(text)
val caretPos = cleaned.indexOf("<caret>")
getFixture.configureByText("dummy.scala", cleaned.replace("<caret>", ""))
val clazz = PsiTreeUtil.findElementOfClassAtOffset(
getFile,
caretPos,
classOf[ScTypeDefinition],
false
)
clazz
.fakeCompanionModule
.getOrElse(clazz.asInstanceOf[ScObject])
.allMethods
.collectFirst {
case PhysicalMethodSignature(fun: ScFunctionDefinition, _) if fun.hasModifierProperty("implicit") => fun
} match {
case Some(method) =>
method.returnType match {
case Right(t) =>
val tyText = t.presentableText(clazz)
assertEquals(s"$tyText != $expectedType", expectedType, tyText)
case Failure(cause) => fail(cause)
}
case None =>
fail("no implicit def was generated")
}
}
def testClass(): Unit = {
val fileText: String = """
package wibble
import stalactite.deriving
import simulacrum.typeclass
@typeclass trait Wibble[T] {}
object DerivedWibble {
def gen[T]: Wibble[T] = new Wibble[T] {}
}
@deriving(Wibble)
final case class <caret>Foo(string: String, int: Int)
"""
doTest(fileText, "Wibble[Foo]")
}
def testTrait(): Unit = {
val fileText: String = """
package wibble
import stalactite.deriving
import simulacrum.typeclass
@typeclass trait Wibble[T] {}
object DerivedWibble {
def gen[T]: Wibble[T] = new Wibble[T] {}
}
@deriving(Wibble)
sealed trait <caret>Baz
@deriving(Wibble)
final case class Foo(string: String, int: Int) extends Baz
"""
doTest(fileText, "Wibble[Baz]")
}
def testObject(): Unit = {
val fileText: String = """
package wibble
import stalactite.deriving
import simulacrum.typeclass
@typeclass trait Wibble[T] {}
object DerivedWibble {
def gen[T]: Wibble[T] = new Wibble[T] {}
}
@deriving(Wibble)
case object <caret>Caz
"""
doTest(fileText, "Wibble[Caz.type]")
}
}
| JetBrains/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/externalLibraries/scalazDeriving/ScalazDerivingTest_2_11.scala | Scala | apache-2.0 | 3,456 |
package test.scaladoc.template.owners
trait X {
/** @template */
type Symbol >: Null <: SymbolApi
/** @template */
type TypeSymbol >: Null <: Symbol with TypeSymbolApi
/** @template */
type TermSymbol >: Null <: Symbol with TermSymbolApi
/** @template */
type MethodSymbol >: Null <: TermSymbol with MethodSymbolApi
trait SymbolApi { this: Symbol => def x: Int}
trait TermSymbolApi extends SymbolApi { this: TermSymbol => def y: Int}
trait TypeSymbolApi extends SymbolApi { this: TypeSymbol => def z: Int}
trait MethodSymbolApi extends TermSymbolApi { this: MethodSymbol => def t: Int }
}
trait Y extends X
trait Z extends Y
trait T extends Z
| scala/scala | test/scaladoc/resources/t6509.scala | Scala | apache-2.0 | 673 |
import scala.quoted.*
object Macro {
inline def optimize[T](inline x: List[T]): List[T] = ${ Macro.impl[T]('x) }
def impl[T: Type](x: Expr[List[T]])(using Quotes): Expr[List[T]] = {
val res = optimize(x)
'{
val result = $res
val originalCode = ${Expr(x.show)}
val optimizeCode = ${Expr(res.show)}
println("Original: " + originalCode)
println("Optimized: " + optimizeCode)
println("Result: " + result)
println()
result
}
}
def optimize[T: Type](x: Expr[List[T]])(using Quotes): Expr[List[T]] = x match {
case '{ ($ls: List[T]).filter($f).filter($g) } =>
optimize('{ $ls.filter(x => $f(x) && $g(x)) })
case '{ ($ls: List[u]).map[v]($f).map[T]($g) } =>
optimize('{ $ls.map(x => $g($f(x))) })
case _ => x
}
}
| lampepfl/dotty | tests/run-macros/quote-matching-optimize-5/Macro_1.scala | Scala | apache-2.0 | 806 |
package net.scalytica.symbiotic.test.specs
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import net.scalytica.symbiotic.api.SymbioticResults.{NotFound, Ok}
import net.scalytica.symbiotic.api.repository.{FileRepository, FolderRepository}
import net.scalytica.symbiotic.api.types.CustomMetadataAttributes.Implicits._
import net.scalytica.symbiotic.api.types.CustomMetadataAttributes.{
JodaValue,
MetadataMap
}
import net.scalytica.symbiotic.api.types.ResourceParties.{
AllowedParty,
Org,
Owner
}
import net.scalytica.symbiotic.api.types._
import net.scalytica.symbiotic.test.generators.FileGenerator.file
import net.scalytica.symbiotic.test.generators._
import net.scalytica.symbiotic.test.utils.SymResValues
import net.scalytica.symbiotic.time.SymbioticDateTime._
import org.scalatest.Inspectors.forAll
import org.scalatest._
import org.scalatest.concurrent.ScalaFutures
import scala.concurrent.Await
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
abstract class FileRepositorySpec
extends WordSpecLike
with ScalaFutures
with MustMatchers
with OptionValues
with SymResValues
with PersistenceSpec {
// scalastyle:off magic.number
val usrId1 = TestUserId.create()
val orgId1 = TestOrgId.create()
val ownerId = usrId1
val owner = Owner(ownerId, Org)
val usrId2 = TestUserId.create()
val orgId2 = TestOrgId.create()
val accessors = Seq(AllowedParty(usrId2), AllowedParty(orgId2))
implicit val ctx: TestContext = TestContext(usrId1, owner, Seq(owner.id))
val ctx2 = TestContext(usrId2, owner, Seq(usrId2))
implicit val actorSystem: ActorSystem = ActorSystem("file-repo-test")
implicit val materializer: ActorMaterializer = ActorMaterializer()
val fileRepo: FileRepository
val folderRepo: FolderRepository
override val invokeBeforeAllAndAfterAllEvenIfNoTestsAreExpected = true
val folders = {
Seq(Folder(ownerId, Path.root)) ++ FolderGenerator.createFolders(
owner = ownerId,
baseName = "folder",
depth = 3
)
}
val folderIds = Seq.newBuilder[FolderId]
val fileIds = Seq.newBuilder[FileId]
override def beforeAll(): Unit = {
super.beforeAll()
folders.flatMap { f =>
val r = Await.result(folderRepo.save(f), 5 seconds)
r.toOption
}.foreach(res => folderIds += res)
}
override def afterAll(): Unit = {
materializer.shutdown()
actorSystem.terminate()
super.afterAll()
}
"The file repository" should {
"successfully save a file" in {
val f = file(owner, usrId1, "file1", folders(2).flattenPath)
val res = fileRepo.save(f).futureValue
res.map(fileIds += _)
res.success mustBe true
}
"save another file" in {
val f = file(owner, usrId1, "file2", folders(2).flattenPath)
val res = fileRepo.save(f).futureValue
res.map(fileIds += _)
res.success mustBe true
}
"not be able to save a file in a folder without access" in {
val f = file(owner, usrId1, "file3", folders(2).flattenPath)
fileRepo.save(f)(ctx2, global).futureValue.failed mustBe true
}
"find the file with a specific name and path" in {
val path = Some(folders(2).flattenPath)
val res = fileRepo.find("file2", path).futureValue.value
res.size mustBe 1
res.headOption.value.filename mustBe "file2"
res.headOption.value.fileType mustBe Some("application/pdf")
res.headOption.value.metadata.path mustBe path
res.headOption.value.metadata.version mustBe 1
res.headOption.value.metadata.extraAttributes must not be empty
val ea = res.headOption.value.metadata.extraAttributes.value
val expAttr = FileGenerator.extraAttributes.toSeq
forAll(expAttr) {
case (k: String, v: JodaValue) =>
ea.get(k).map(_.toString) mustBe Some(v.toString)
case kv =>
ea.get(kv._1) mustBe Some(kv._2)
}
}
"not return a file with a specific name and path without access" in {
val path = Some(folders(2).flattenPath)
fileRepo.find("file2", path)(ctx2, global).futureValue mustBe NotFound()
}
"save a new version of a file" in {
val f = file(
owner = owner,
by = usrId1,
fname = "file1",
folder = folders(2).flattenPath,
fileId = fileIds.result().headOption,
version = 2
)
val res = fileRepo.save(f).futureValue.value
res mustBe fileIds.result().headOption.value
}
"not allow saving a new version without access" in {
val f = file(
owner = owner,
by = usrId1,
fname = "file1",
folder = folders(2).flattenPath,
fileId = fileIds.result().headOption,
version = 3
)
fileRepo.save(f)(ctx2, global).futureValue.failed mustBe true
}
"find the latest version of a file" in {
val path = Some(folders(2).flattenPath)
val res = fileRepo.findLatest("file1", path).futureValue.value
res.filename mustBe "file1"
res.fileType mustBe Some("application/pdf")
res.metadata.path mustBe path
res.metadata.version mustBe 2
}
"update the metadata on the latest version of a file" in {
val expDesc = "Updated metadata"
val expExtAttrs = MetadataMap("addedKey" -> "set by update")
val fid = fileIds.result().headOption.value
val orig = fileRepo.findLatestBy(fid).futureValue.value
val mod = orig.copy(
metadata = orig.metadata.copy(
description = Some(expDesc),
extraAttributes = orig.metadata.extraAttributes.map(_ ++ expExtAttrs)
)
)
val res = fileRepo.updateMetadata(mod).futureValue.value
res.id mustBe orig.id
res.filename mustBe orig.filename
res.metadata.version mustBe orig.metadata.version
res.metadata.description mustBe Some(expDesc)
res.metadata.extraAttributes.value.toSeq must contain(
expExtAttrs.headOption.value
)
}
"not return the latest version of a file without access" in {
val path = Some(folders(2).flattenPath)
fileRepo
.findLatest("file1", path)(ctx2, global)
.futureValue
.failed mustBe true
}
"list all files at a given path" in {
val fseq = Seq(
file(owner, usrId1, "file3", folders(1).flattenPath),
file(owner, usrId1, "file4", folders(3).flattenPath),
file(owner, usrId1, "file5", folders(2).flattenPath)
)
// Add the files
fseq.foreach { f =>
fileIds += fileRepo.save(f).futureValue.value
}
val res = fileRepo.listFiles(folders(2).flattenPath).futureValue.value
res.size mustBe 3
res.map(_.filename) must contain only ("file1", "file2", "file5")
res.find(_.filename == "file1").value.metadata.version mustBe 2
res.find(_.filename == "file2").value.metadata.version mustBe 1
res.find(_.filename == "file5").value.metadata.version mustBe 1
}
"not list all files at a given path without access" in {
fileRepo
.listFiles(folders(2).flattenPath)(ctx2, global)
.futureValue
.value mustBe empty
}
"not lock a file without access" in {
val fid = fileIds.result()(4)
fileRepo.lock(fid)(ctx2, global).futureValue mustBe NotFound()
}
"lock a file" in {
val fid = fileIds.result()(4)
fileRepo.lock(fid).futureValue match {
case Ok(lock) =>
lock.by mustBe usrId1
lock.date.getDayOfYear mustBe now.getDayOfYear
case wrong =>
fail(s"Expected LockApplied[Option[Lock]], got ${wrong.getClass}")
}
}
"return the user id of the lock owner on a locked file" in {
val fid = fileIds.result()(4)
fileRepo.locked(fid).futureValue mustBe Ok(Some(usrId1))
}
"not return any information about a locked file without access" in {
val fid = fileIds.result()(4)
fileRepo.locked(fid)(ctx2, global).futureValue mustBe NotFound()
}
"not unlock a file without access" in {
val fid = fileIds.result()(4)
fileRepo.unlock(fid)(ctx2, global).futureValue mustBe NotFound()
}
"unlock a file" in {
val fid = fileIds.result()(4)
fileRepo.unlock(fid).futureValue mustBe Ok(())
}
"return None if the file isn't locked" in {
val fid = fileIds.result()(4)
fileRepo.locked(fid).futureValue.value mustBe None
}
"not be allowed to move a file without access" in {
val origPath = folders(1).flattenPath
val destPath = folders(3).flattenPath
fileRepo
.move("file3", origPath, destPath)(ctx2, global)
.futureValue mustBe NotFound()
}
"move a file" in {
val origPath = folders(1).flattenPath
val destPath = folders(3).flattenPath
fileRepo.listFiles(destPath).futureValue.value.size mustBe 1
val moved = fileRepo.move("file3", origPath, destPath).futureValue.value
moved.metadata.path.value mustBe destPath
moved.filename mustBe "file3"
fileRepo.listFiles(destPath).futureValue.value.size mustBe 2
}
"successfully mark a file as deleted" in {
val fid = fileIds.result()(4)
fileRepo.markAsDeleted(fid).futureValue mustBe Ok(1)
}
"not return information about a deleted file" in {
val fid = fileIds.result()(4)
fileRepo.findLatestBy(fid).futureValue mustBe NotFound()
}
"entirely erase all versions of metadata and files for a given File" in {
val fid = fileIds.result().headOption.value
// Erasing the file should result in 2 removed versions
fileRepo.eraseFile(fid).futureValue mustBe Ok(2)
fileRepo.findLatestBy(fid).futureValue mustBe NotFound()
}
}
}
| kpmeen/symbiotic | symbiotic-testkit/src/main/scala/net/scalytica/symbiotic/test/specs/FileRepositorySpec.scala | Scala | apache-2.0 | 9,850 |
package com.karasiq.shadowcloud.actors.utils
import akka.actor.ActorRef
import scala.collection.mutable
object PendingOperation {
def apply[Key <: AnyRef]: PendingOperation[Key] = {
new PendingOperation[Key]()
}
}
class PendingOperation[Key <: AnyRef] {
val subscribers = mutable.AnyRefMap[Key, mutable.Set[ActorRef]]()
def addWaiter(key: Key, actor: ActorRef, ifFirst: () ⇒ Unit = () ⇒ ()): Unit = {
subscribers.get(key) match {
case Some(actors) ⇒
actors.add(actor)
case None ⇒
subscribers += key → mutable.Set(actor)
ifFirst()
}
}
def removeWaiter(actor: ActorRef): Unit = {
subscribers
.withFilter(_._2.contains(actor))
.foreach {
case (key, actors) ⇒
actors -= actor
if (actors.isEmpty) subscribers -= key
}
}
def count: Int = {
subscribers.size
}
def finish(key: Key, result: AnyRef)(implicit sender: ActorRef = ActorRef.noSender): Unit = {
subscribers.remove(key).foreach(_.foreach(_ ! result))
}
def finishAll(f: Key => AnyRef)(implicit sender: ActorRef = ActorRef.noSender): Unit = {
subscribers.foreach {
case (key, actors) =>
val result = f(key)
actors.foreach(_ ! result)
}
subscribers.clear()
}
}
| Karasiq/shadowcloud | utils/.jvm/src/main/scala/com/karasiq/shadowcloud/actors/utils/PendingOperation.scala | Scala | apache-2.0 | 1,299 |
/*
* Copyright 2015 LG CNS.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package scouter.server.netio.service.handle;
import scouter.lang.pack.MapPack
import scouter.lang.pack.ObjectPack
import scouter.lang.pack.Pack
import scouter.io.DataInputX
import scouter.io.DataOutputX
import scouter.net.RequestCmd
import scouter.net.TcpFlag
import scouter.server.core.AgentManager
import scouter.server.netio.AgentCall
import scouter.server.netio.service.anotation.ServiceHandler
class DumpService {
@ServiceHandler(RequestCmd.TRIGGER_ACTIVE_SERVICE_LIST)
def triggerActiveServiceList(din: DataInputX, dout: DataOutputX, login: Boolean) {
val param = din.readMapPack();
val objHash = param.getInt("objHash");
val o = AgentManager.getAgent(objHash);
val p = AgentCall.call(o, RequestCmd.TRIGGER_ACTIVE_SERVICE_LIST, param);
if (p != null) {
dout.writeByte(TcpFlag.HasNEXT);
dout.writePack(p);
}
}
@ServiceHandler(RequestCmd.TRIGGER_HEAPHISTO)
def triggerHeapHisto(din: DataInputX, dout: DataOutputX, login: Boolean) {
val param = din.readMapPack();
val objHash = param.getInt("objHash");
val o = AgentManager.getAgent(objHash);
val p = AgentCall.call(o, RequestCmd.TRIGGER_HEAPHISTO, param);
if (p != null) {
dout.writeByte(TcpFlag.HasNEXT);
dout.writePack(p);
}
}
@ServiceHandler(RequestCmd.TRIGGER_THREAD_DUMP)
def triggerThreadDump(din: DataInputX, dout: DataOutputX, login: Boolean) {
val param = din.readMapPack();
val objHash = param.getInt("objHash");
val o = AgentManager.getAgent(objHash);
val p = AgentCall.call(o, RequestCmd.TRIGGER_THREAD_DUMP, param);
if (p != null) {
dout.writeByte(TcpFlag.HasNEXT);
dout.writePack(p);
}
}
@ServiceHandler(RequestCmd.TRIGGER_THREAD_LIST)
def triggerThreadList(din: DataInputX, dout: DataOutputX, login: Boolean) {
val param = din.readMapPack();
val objHash = param.getInt("objHash");
val o = AgentManager.getAgent(objHash);
val p = AgentCall.call(o, RequestCmd.TRIGGER_THREAD_LIST, param);
if (p != null) {
dout.writeByte(TcpFlag.HasNEXT);
dout.writePack(p);
}
}
@ServiceHandler(RequestCmd.OBJECT_DUMP_FILE_LIST)
def getDumpFileList(din: DataInputX, dout: DataOutputX, login: Boolean) {
val param = din.readMapPack();
val objHash = param.getInt("objHash");
val o = AgentManager.getAgent(objHash);
val p = AgentCall.call(o, RequestCmd.OBJECT_DUMP_FILE_LIST, param);
if (p != null) {
dout.writeByte(TcpFlag.HasNEXT);
dout.writePack(p);
}
}
@ServiceHandler(RequestCmd.OBJECT_DUMP_FILE_DETAIL)
def getDumpFileDetail(din: DataInputX, dout: DataOutputX, login: Boolean) {
val param = din.readMapPack();
val objHash = param.getInt("objHash");
val o = AgentManager.getAgent(objHash);
val handler = (_in: DataInputX, _out: DataOutputX) => {
while (_in.readByte() == TcpFlag.HasNEXT) {
dout.writeByte(TcpFlag.HasNEXT);
dout.writeBlob(_in.readBlob());
}
}
AgentCall.call(o, RequestCmd.OBJECT_DUMP_FILE_DETAIL, param, handler)
}
@ServiceHandler(RequestCmd.OBJECT_SYSTEM_GC)
def systemGc(din: DataInputX, dout: DataOutputX, login: Boolean) {
val param = din.readMapPack();
val objHash = param.getInt("objHash");
val o = AgentManager.getAgent(objHash);
AgentCall.call(o, RequestCmd.OBJECT_SYSTEM_GC, param);
}
@ServiceHandler(RequestCmd.DUMP_APACHE_STATUS)
def dumpApacheStatus(din: DataInputX, dout: DataOutputX, login: Boolean) {
val param = din.readMapPack();
val objHash = param.getInt("objHash");
val o = AgentManager.getAgent(objHash);
val p = AgentCall.call(o, RequestCmd.DUMP_APACHE_STATUS, param);
if (p != null) {
dout.writeByte(TcpFlag.HasNEXT);
dout.writePack(p);
}
}
} | jahnaviancha/scouter | scouter.server/src/scouter/server/netio/service/handle/DumpService.scala | Scala | apache-2.0 | 4,492 |
/* Copyright 2015 Alessandro Maria Rizzi
* Copyright 2016 Eugenio Gianniti
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package profiler
case class Bounds(simulation: Simulation, numContainers: Int) {
def errorUpper(duration: Long): Double = {
val delta = upperBound - duration
delta.toDouble / duration.toDouble
}
def error(duration: Long): Double = {
val delta = avg - duration
delta.toDouble / duration.toDouble
}
val slots: Double = numContainers.toDouble
val mapRatio = 1.0d
val reduceRatio = 1.0d
val mapSlots = mapRatio * slots
val reduceSlots = reduceRatio * slots
lazy val avgMap = simulation avg MapTask
lazy val avgReduce = simulation avg ReduceTask
lazy val avgShuffle = simulation avg ShuffleTask
lazy val maxMap = simulation max MapTask
lazy val maxReduce = simulation max ReduceTask
lazy val maxShuffle = simulation max ShuffleTask
lazy val numMap = simulation numOf MapTask
lazy val numReduce = simulation numOf ReduceTask
lazy val upperBound = (avgMap * numMap - 2 * maxMap) / mapSlots +
((avgReduce + avgShuffle) * numReduce - 2 * (maxReduce + maxShuffle)) / reduceSlots +
2 * (maxMap + maxReduce + maxShuffle)
lazy val lowerBound = (avgMap * numMap + (avgReduce + avgShuffle) * numReduce) / slots
lazy val avg = (upperBound + lowerBound) / 2
}
| deib-polimi/Profiler | src/main/scala-2.11/profiler/Bounds.scala | Scala | apache-2.0 | 1,865 |
package org.scalajs.core.compiler.test.util
import language.implicitConversions
import scala.tools.nsc._
import scala.reflect.internal.util.SourceFile
import scala.util.control.ControlThrowable
import org.junit.Assert._
import org.scalajs.core.compiler.{ScalaJSPlugin, JSTreeExtractors}
import JSTreeExtractors.jse
import org.scalajs.core.ir
import ir.{Trees => js}
abstract class JSASTTest extends DirectTest {
private var lastAST: JSAST = _
class JSAST(val clDefs: List[js.Tree]) {
type Pat = PartialFunction[js.Tree, Unit]
class PFTraverser(pf: Pat) extends ir.Traversers.Traverser {
private case object Found extends ControlThrowable
private[this] var finding = false
def find: Boolean = {
finding = true
try {
clDefs.map(traverse)
false
} catch {
case Found => true
}
}
def traverse(): Unit = {
finding = false
clDefs.map(traverse)
}
override def traverse(tree: js.Tree): Unit = {
if (finding && pf.isDefinedAt(tree))
throw Found
if (!finding)
pf.lift(tree)
super.traverse(tree)
}
}
def has(trgName: String)(pf: Pat): this.type = {
val tr = new PFTraverser(pf)
assertTrue(s"AST should have $trgName", tr.find)
this
}
def hasNot(trgName: String)(pf: Pat): this.type = {
val tr = new PFTraverser(pf)
assertFalse(s"AST should not have $trgName", tr.find)
this
}
def hasExactly(count: Int, trgName: String)(pf: Pat): this.type = {
var actualCount = 0
val tr = new PFTraverser(pf.andThen(_ => actualCount += 1))
tr.traverse()
assertEquals(s"AST has the wrong number of $trgName", count, actualCount)
this
}
def traverse(pf: Pat): this.type = {
val tr = new PFTraverser(pf)
tr.traverse()
this
}
def show: this.type = {
clDefs foreach println _
this
}
}
implicit def string2ast(str: String): JSAST = stringAST(str)
override def newScalaJSPlugin(global: Global): ScalaJSPlugin = {
new ScalaJSPlugin(global) {
override def generatedJSAST(cld: List[js.Tree]): Unit = {
lastAST = new JSAST(cld)
}
}
}
def stringAST(code: String): JSAST = stringAST(defaultGlobal)(code)
def stringAST(global: Global)(code: String): JSAST = {
if (!compileString(global)(code))
throw new IllegalArgumentException("snippet did not compile")
lastAST
}
def sourceAST(source: SourceFile): JSAST = sourceAST(defaultGlobal)(source)
def sourceAST(global: Global)(source: SourceFile): JSAST = {
if (!compileSources(global)(source))
throw new IllegalArgumentException("snippet did not compile")
lastAST
}
}
| xuwei-k/scala-js | compiler/src/test/scala/org/scalajs/core/compiler/test/util/JSASTTest.scala | Scala | bsd-3-clause | 2,796 |
package com.twitter.finagle.http2.exp.transport
import com.twitter.finagle.Status
import com.twitter.finagle.Status.Closed
import com.twitter.finagle.http.Message
import com.twitter.finagle.http.exp.{Multi, StreamTransport, StreamTransportProxy}
import com.twitter.util.{Future, Return, Try}
import scala.util.control.NonFatal
/**
* A Transport with close behavior suitable for single-use H2 pipelines.
*
* In the Netty MultiplexCodec world H2 streams each get their own channel.
* For now we're representing them each as their own Finagle `Transport` and
* as such they will all only do 1 dispatch. This allows us to reduce the
* complexity of the `HttpTransport` significantly to improve performance.
*/
private[finagle] final class Http2Transport[In <: Message, Out <: Message](
self: StreamTransport[In, Out])
extends StreamTransportProxy[In, Out](self)
with (Try[Multi[Out]] => Unit) {
// We start with 2 half streams (read and write) in the 'open' state. As the
// read and write close they will decrement by 1, and at 0 both are closed.
@volatile private[this] var count = 2
// A respond handler for `read`.
def apply(mb: Try[Multi[Out]]): Unit = mb match {
case Return(Multi(m, onFinish)) => observeMessage(m, onFinish)
case _ => // do nothing
}
def read(): Future[Multi[Out]] = self.read().respond(this)
def write(m: In): Future[Unit] =
try {
val f = self.write(m)
observeMessage(m, f)
f
} catch {
case NonFatal(e) => Future.exception(e)
}
override def status: Status = if (count == 0) Closed else self.status
private[this] def observeMessage(message: Message, onFinish: Future[Unit]): Unit = {
if (onFinish.isDefined) endHalfStream()
else
onFinish.respond { _ =>
endHalfStream()
}
}
private[this] def endHalfStream(): Unit = {
val shouldClose = synchronized {
val i = count
if (i == 0) false // already done
else {
val next = i - 1
count = next
// if we're at 0, it's our job to close things down.
next == 0
}
}
if (shouldClose) self.close()
}
}
| luciferous/finagle | finagle-http2/src/main/scala/com/twitter/finagle/http2/exp/transport/Http2Transport.scala | Scala | apache-2.0 | 2,149 |
/*
* Copyright 2014 Joshua R. Rodgers
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* ========================================================================
*/
package com.theenginerd.core.client.model.builder
import net.minecraft.util.Vec3
case class Normal(x: Float, y: Float, z: Float)
case class Vertex(position: (Double, Double, Double), uv: Option[(Float, Float)])
class Face(val vertices: IndexedSeq[Vertex])
{
lazy val normal = {
val (x0, y0, z0) = vertices(0).position
val (x1, y1, z1) = vertices(1).position
val (x2, y2, z2) = vertices(2).position
val firstVector = Vec3.createVectorHelper(x1 - x0, y1 - y0, z1 - z0)
val secondVector = Vec3.createVectorHelper(x2 - x0, y2 - y0, z2 - z0)
val normal = (firstVector crossProduct secondVector).normalize
Normal(normal.xCoord.toFloat, normal.yCoord.toFloat, normal.zCoord.toFloat)
}
}
object Face
{
def apply(vertices: Vertex*) = new Face(vertices.toVector)
}
| Mr-Byte/Random-Redstone | core/src/main/scala/com/theenginerd/core/client/model/builder/Face.scala | Scala | apache-2.0 | 1,507 |
/*
* Copyright (c) 2015 Contributor. All rights reserved.
*/
package org.scalaide.debug.internal.expression
package proxies.phases
import scala.reflect.runtime.universe
import scala.tools.reflect.ToolBox
import org.scalaide.logging.HasLogger
/**
* Typechecks given code using `toolbox.typecheck`.
*/
case class TypeCheck(toolbox: ToolBox[universe.type])
extends TransformationPhase[IsTypecheck]
with HasLogger {
override def transform(data: TransformationPhaseData): TransformationPhaseData = {
val newTree = toolbox.typecheck(data.tree)
data.after(phaseName, newTree)
}
}
| Kwestor/scala-ide | org.scala-ide.sdt.debug.expression/src/org/scalaide/debug/internal/expression/proxies/phases/TypeCheck.scala | Scala | bsd-3-clause | 601 |
package org.geoscript.geocss
import org.geotools.{ styling => gt }
object OpenLayersStyle {
def write(style: gt.Style, out: java.io.OutputStream) {
val writer = new java.io.OutputStreamWriter(out)
writer.write(
"var styleMap = new OpenLayers.StyleMap(OpenLayers.Util.applyDefaults("
)
writer.write("{}")
writer.write(",")
writer.write("OpenLayers.Feature.Vector.style[\\"default\\"]")
writer.write(");")
writer.flush()
writer.close()
}
}
| dwins/geoscript.scala | geocss/src/main/scala/org/geoscript/geocss/OpenLayersStyle.scala | Scala | mit | 484 |
class D extends C
object Hello extends App {
new D
}
| dotty-staging/dotty | sbt-test/source-dependencies/transitive-class/D.scala | Scala | apache-2.0 | 55 |
package fpinscala.parallelism
import java.util.Date
import java.util.concurrent._
object Par {
type Par[A] = ExecutorService => Future[A]
def run[A](s: ExecutorService)(a: Par[A]): Future[A] = a(s)
def unit[A](a: A): Par[A] = (es: ExecutorService) => UnitFuture(a) // `unit` is represented as a function that returns a `UnitFuture`, which is a simple implementation of `Future` that just wraps a constant value. It doesn't use the `ExecutorService` at all. It's always done and can't be cancelled. Its `get` method simply returns the value that we gave it.
private case class UnitFuture[A](get: A) extends Future[A] {
def isDone = true
def get(timeout: Long, units: TimeUnit) = get
def isCancelled = false
def cancel(evenIfRunning: Boolean): Boolean = false
}
def map2[A,B,C](a: Par[A], b: Par[B])(f: (A,B) => C): Par[C] = // `map2` doesn't evaluate the call to `f` in a separate logical thread, in accord with our design choice of having `fork` be the sole function in the API for controlling parallelism. We can always do `fork(map2(a,b)(f))` if we want the evaluation of `f` to occur in a separate thread.
(es: ExecutorService) => {
val af = a(es)
val bf = b(es)
UnitFuture(f(af.get, bf.get)) // This implementation of `map2` does _not_ respect timeouts. It simply passes the `ExecutorService` on to both `Par` values, waits for the results of the Futures `af` and `bf`, applies `f` to them, and wraps them in a `UnitFuture`. In order to respect timeouts, we'd need a new `Future` implementation that records the amount of time spent evaluating `af`, then subtracts that time from the available time allocated for evaluating `bf`.
}
def fork[A](a: => Par[A]): Par[A] = // This is the simplest and most natural implementation of `fork`, but there are some problems with it--for one, the outer `Callable` will block waiting for the "inner" task to complete. Since this blocking occupies a thread in our thread pool, or whatever resource backs the `ExecutorService`, this implies that we're losing out on some potential parallelism. Essentially, we're using two threads when one should suffice. This is a symptom of a more serious problem with the implementation, and we will discuss this later in the chapter.
es => es.submit(new Callable[A] {
def call = a(es).get
})
def asyncF[A,B](f: A => B): A => Par[B] =
a => fork(unit(f(a)))
def map[A,B](pa: Par[A])(f: A => B): Par[B] =
map2(pa, unit(()))((a,_) => f(a))
def sortPar(parList: Par[List[Int]]) = map(parList)(_.sorted)
def sequence_simple[A](l: List[Par[A]]): Par[List[A]] =
l.foldRight[Par[List[A]]](unit(List()))((h,t) => map2(h,t)(_ :: _))
// This implementation forks the recursive step off to a new logical thread,
// making it effectively tail-recursive. However, we are constructing
// a right-nested parallel program, and we can get better performance by
// dividing the list in half, and running both halves in parallel.
// See `sequenceBalanced` below.
def sequenceRight[A](as: List[Par[A]]): Par[List[A]] =
as match {
case Nil => unit(Nil)
case h :: t => map2(h, fork(sequence(t)))(_ :: _)
}
// We define `sequenceBalanced` using `IndexedSeq`, which provides an
// efficient function for splitting the sequence in half.
def sequenceBalanced[A](as: IndexedSeq[Par[A]]): Par[IndexedSeq[A]] = fork {
if (as.isEmpty) unit(Vector())
else if (as.length == 1) map(as.head)(a => Vector(a))
else {
val (l,r) = as.splitAt(as.length/2)
map2(sequenceBalanced(l), sequenceBalanced(r))(_ ++ _)
}
}
def sequence[A](as: List[Par[A]]): Par[List[A]] =
map(sequenceBalanced(as.toIndexedSeq))(_.toList)
def parFilter[A](l: List[A])(f: A => Boolean): Par[List[A]] = {
val pars: List[Par[List[A]]] =
l map (asyncF((a: A) => if (f(a)) List(a) else List()))
map(sequence(pars))(_.flatten) // convenience method on `List` for concatenating a list of lists
}
def equal[A](e: ExecutorService)(p: Par[A], p2: Par[A]): Boolean =
p(e).get == p2(e).get
def delay[A](fa: => Par[A]): Par[A] =
es => fa(es)
def choice[A](cond: Par[Boolean])(t: Par[A], f: Par[A]): Par[A] =
es =>
if (run(es)(cond).get) t(es) // Notice we are blocking on the result of `cond`.
else f(es)
def choiceN[A](n: Par[Int])(choices: List[Par[A]]): Par[A] =
es => {
val ind = run(es)(n).get // Full source files
run(es)(choices(ind))
}
def choiceViaChoiceN[A](a: Par[Boolean])(ifTrue: Par[A], ifFalse: Par[A]): Par[A] =
choiceN(map(a)(b => if (b) 0 else 1))(List(ifTrue, ifFalse))
def choiceMap[K,V](key: Par[K])(choices: Map[K,Par[V]]): Par[V] =
es => {
val k = run(es)(key).get
run(es)(choices(k))
}
def chooser[A,B](p: Par[A])(choices: A => Par[B]): Par[B] =
es => {
val k = run(es)(p).get
run(es)(choices(k))
}
/* `chooser` is usually called `flatMap` or `bind`. */
def flatMap[A,B](p: Par[A])(choices: A => Par[B]): Par[B] =
es => {
val k = run(es)(p).get
run(es)(choices(k))
}
def choiceViaFlatMap[A](p: Par[Boolean])(f: Par[A], t: Par[A]): Par[A] =
flatMap(p)(b => if (b) t else f)
def choiceNViaFlatMap[A](p: Par[Int])(choices: List[Par[A]]): Par[A] =
flatMap(p)(i => choices(i))
// see nonblocking implementation in `Nonblocking.scala`
def join[A](a: Par[Par[A]]): Par[A] =
es => run(es)(run(es)(a).get())
def joinViaFlatMap[A](a: Par[Par[A]]): Par[A] =
flatMap(a)(x => x)
def flatMapViaJoin[A,B](p: Par[A])(f: A => Par[B]): Par[B] =
join(map(p)(f))
/* Gives us infix syntax for `Par`. */
implicit def toParOps[A](p: Par[A]): ParOps[A] = new ParOps(p)
class ParOps[A](p: Par[A]) {
}
}
object Examples extends App {
import Par._
def sum(ints: IndexedSeq[Int]): Int = // `IndexedSeq` is a superclass of random-access sequences like `Vector` in the standard library. Unlike lists, these sequences provide an efficient `splitAt` method for dividing them into two parts at a particular index.
if (ints.size <= 1)
ints.headOption getOrElse 0 // `headOption` is a method defined on all collections in Scala. We saw this function in chapter 3.
else {
val (l,r) = ints.splitAt(ints.length/2) // Divide the sequence in half using the `splitAt` function.
sum(l) + sum(r) // Recursively sum both halves and add the results together.
}
} | pierangeloc/fpinscala | answers/src/main/scala/fpinscala/parallelism/Par.scala | Scala | mit | 6,505 |
/*
* Copyright (C) 2010 Lalit Pant <pant.lalit@gmail.com>
*
* The contents of this file are subject to the GNU General Public License
* Version 3 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.gnu.org/copyleft/gpl.html
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
*/
package net.kogics.kojo
package story
import org.junit.Test
import org.junit.Assert._
class LinkListenerTest extends KojoTestBase {
val ll = new LinkListener(StoryTeller.instance)
@Test
def test1 {
assertEquals((2,1), ll.localpageLocation("http://localpage/2"))
}
@Test
def test2 {
assertEquals((2,3), ll.localpageLocation("http://LOCALPAGE/2#3"))
}
@Test
def test3 {
assertEquals((12,11), ll.localpageLocation("http://localpage/12#11"))
}
@Test
def test4 {
val pg = Page(
name = "",
body =
<body>
</body>,
code = {}
)
ll.setStory(Story(pg))
try {
ll.localpageLocation("http://localpage/a#11")
fail("Invalid location not detected")
}
catch {
case ex: IllegalArgumentException =>
assertTrue(true)
}
}
@Test
def test5 {
try {
ll.localpageLocation("http://localpage/5#x")
fail("Invalid location not detected")
}
catch {
case ex: IllegalArgumentException =>
assertTrue(true)
}
}
@Test
def test6 {
val story = Story(
Page(
name = "pg1",
body =
<body>
</body>,
code = {}
),
IncrPage(
name = "pg2",
style = "",
body = List(
Para(
<p>
Para1
</p>
),
Para(
<p>
Para2
</p>
)
)
),
Page(
name = "pg3",
body =
<body>
</body>,
code = {}
)
)
ll.setStory(story)
assertEquals((1,1), ll.localpageLocation("http://localpage/pg1"))
assertEquals((2,1), ll.localpageLocation("http://localpage/pg2"))
assertEquals((2,1), ll.localpageLocation("http://localpage/pg2#1"))
assertEquals((2,2), ll.localpageLocation("http://localpage/pg2#2"))
assertEquals((3,1), ll.localpageLocation("http://localpage/pg3#1"))
try {
ll.localpageLocation("http://localpage/nopage")
fail("Invalid location not detected")
}
catch {
case ex: IllegalArgumentException =>
assertTrue(true)
}
}
@Test
def testHandlerData {
assertEquals(("code","5"), ll.handlerData("http://runHandler/code/5"))
}
@Test
def testHandlerData2 {
assertEquals(("code2","7"), ll.handlerData("http://runhandler/code2/7 "))
}
@Test
def testHandlerData3 {
assertEquals(("code3",""), ll.handlerData("http://runhandler/code3"))
}
}
| vnkmr7620/kojo | KojoEnv/test/unit/src/net/kogics/kojo/story/LinkListenerTest.scala | Scala | gpl-3.0 | 3,103 |
/**
* Copyright (C) 2012-2013 Vadim Bartko (vadim.bartko@nevilon.com).
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* See file LICENSE.txt for License information.
*/
package com.nevilon.nomad.logs
object Tabulator {
def format(table: Seq[Seq[Any]]) = table match {
case Seq() => ""
case _ =>
val sizes = for (row <- table) yield (for (cell <- row) yield if (cell == null) 0 else cell.toString.length)
val colSizes = for (col <- sizes.transpose) yield col.max
val rows = for (row <- table) yield formatRow(row, colSizes)
formatRows(rowSeparator(colSizes), rows)
}
def formatRows(rowSeparator: String, rows: Seq[String]): String = (
rowSeparator ::
rows.head ::
rowSeparator ::
rows.tail.toList :::
rowSeparator ::
List()).mkString("\n")
def formatRow(row: Seq[Any], colSizes: Seq[Int]) = {
val cells = (for ((item, size) <- row.zip(colSizes)) yield if (size == 0) "" else ("%" + size + "s").format(item))
cells.mkString("|", "|", "|")
}
def rowSeparator(colSizes: Seq[Int]) = colSizes map {
"-" * _
} mkString("+", "+", "+")
}
| hudvin/nomad | src/main/scala/com/nevilon/nomad/logs/Tabulator.scala | Scala | gpl-2.0 | 1,340 |
package geotrellis.util
import java.io.{File,FileInputStream}
import java.nio.ByteBuffer
import java.nio.MappedByteBuffer
import java.nio.channels.FileChannel.MapMode._
import scala.math.min
object Units {
val bytesUnits = List("B", "K", "M", "G", "P")
def bytes(n:Long) = {
def xyz(amt:Double, units:List[String]): (Double, String) = units match {
case Nil => sys.error("invalid units list")
case u :: Nil => (amt, u)
case u :: us => if (amt < 1024) (amt, u) else xyz(amt / 1024, us)
}
xyz(n, bytesUnits)
}
}
/**
* Utility class for timing the execution time of a function.
*/
object Timer {
def time[T](thunk: => T) = {
val t0 = System.currentTimeMillis()
val result = thunk
val t1 = System.currentTimeMillis()
(result, t1 - t0)
}
def run[T](thunk: => T) = {
val (result, t) = time { thunk }
printf("%s: %d ms\\n", result, t)
result
}
def log[T](fmt:String, args:Any*)(thunk: => T) = {
val label = fmt.format(args:_*)
val (result, t) = time { thunk }
printf(label + ": %d ms\\n".format(t))
result
}
}
object Filesystem {
def slurp(path:String, bs:Int = 262144):Array[Byte] = {
val f = new File(path)
val fis = new FileInputStream(f)
val size = f.length.toInt
val channel = fis.getChannel
val buffer = channel.map(READ_ONLY, 0, size)
fis.close
// read 256K at a time out of the buffer into our array
var i = 0
val data = Array.ofDim[Byte](size)
while(buffer.hasRemaining()) {
val n = min(buffer.remaining(), bs)
buffer.get(data, i, n)
i += n
}
data
}
/**
* Return the path string with the final extension removed.
*/
def basename(p:String) = p.lastIndexOf(".") match {
case -1 => p
case n => p.substring(0, n)
}
def split(p:String) = p.lastIndexOf(".") match {
case -1 => (p, "")
case n => (p.substring(0, n), p.substring(n + 1, p.length))
}
def slurpToBuffer(path:String, pos:Int, size:Int, bs:Int = 262144) = {
ByteBuffer.wrap(slurp(path, bs), pos, size)
}
def join(parts:String*) = parts.mkString(File.separator)
//def findFiles(f:File, r:Regex):Array[File] = {
// val these = f.listFiles
// val good = these.filter(f => r.findFirstIn(f.getName).isDefined)
// good ++ these.filter(_.isDirectory).flatMap(recursiveListFiles(_, r))
//}
}
| Tjoene/thesis | Case_Programs/geotrellis-0.7.0/src/main/scala/geotrellis/util.scala | Scala | gpl-2.0 | 2,383 |
/*
* Copyright 2015-2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package laika.render
import java.io.ByteArrayOutputStream
import laika.api.Render
import laika.factory.RenderResultProcessor
import laika.io.Input
import laika.io.Output.BinaryOutput
import laika.io.OutputProvider.OutputConfig
import laika.tree.Documents.DocumentTree
import laika.tree.Paths.Root
import org.scalatest.{FlatSpec, Matchers}
class FOforPDFSpec extends FlatSpec with Matchers {
case class FOTest (config: Option[PDFConfig]) extends RenderResultProcessor[FOWriter] {
val factory = XSLFO
private val foForPDF = new FOforPDF(config)
def process (tree: DocumentTree, render: (DocumentTree, OutputConfig) => Unit, output: BinaryOutput): Unit = {
val fo = foForPDF.renderFO(tree, render)
val out = output.asStream
out.write(fo.getBytes("UTF-8"))
}
}
trait ResultModel {
private lazy val defaultTemplate = Input.fromClasspath("/templates/default.template.fo", Root / "default.template.fo").asParserInput.source.toString
def results (num: Int): String = (1 to num) map (result) reduce (_ + _)
def idPrefix (num: Int): String = if (num > 4) "_tree2" else if (num > 2) "_tree1" else ""
def result (num: Int): String = {
s"""<fo:marker marker-class-name="chapter"><fo:block>Title $num & More</fo:block></fo:marker>
|<fo:block id="${idPrefix(num)}_doc${num}_title-$num" font-family="sans-serif" font-size="16pt" font-weight="bold" keep-with-next="always" space-after="7mm" space-before="12mm">Title $num & More</fo:block>
|<fo:block font-family="serif" font-size="10pt" space-after="3mm">Text $num</fo:block>""".stripMargin
}
def resultsWithDocTitle (num: Int): String = (1 to num) map (resultWithDocTitle) reduce (_ + _)
def resultWithDocTitle (num: Int): String = {
s"""<fo:block id="${idPrefix(num)}_doc${num}_">
| <fo:marker marker-class-name="chapter"><fo:block>Title $num & More</fo:block></fo:marker>
| <fo:block id="${idPrefix(num)}_doc${num}_title-$num" font-family="sans-serif" font-size="16pt" font-weight="bold" keep-with-next="always" space-after="7mm" space-before="12mm">Title $num & More</fo:block>
|</fo:block>
|<fo:block font-family="serif" font-size="10pt" space-after="3mm">Text $num</fo:block>""".stripMargin
}
def treeTitleResult (num: Int): String = {
val idPrefix = if (num == 3) "_tree2" else if (num == 2) "_tree1" else ""
s"""<fo:block id="${idPrefix}__title__" font-family="sans-serif" font-size="16pt" font-weight="bold" keep-with-next="always" space-after="7mm" space-before="12mm">Tree $num & More</fo:block>"""
}
def treeLinkResult (num: Int): String = {
val idPrefix = if (num == 3) "_tree2" else if (num == 2) "_tree1" else ""
s"""<fo:block id="${idPrefix}__title__"/>"""
}
def tocTitle: String = """<fo:marker marker-class-name="chapter"><fo:block>Contents</fo:block></fo:marker>
|<fo:block font-family="sans-serif" font-size="16pt" keep-with-next="always" space-after="7mm" space-before="12mm">Contents</fo:block>
|"""stripMargin
def tocDocResult (num: Int): String = {
val margin = if (num > 2) """ margin-left="4mm"""" else ""
val fontsize = if (num > 2) "11pt" else "12pt"
val spaceBefore = if (num > 2) "2mm" else "5mm"
s"""<fo:block font-family="serif" font-size="$fontsize"$margin space-after="0mm" space-before="$spaceBefore" text-align-last="justify"><fo:basic-link color="#3956ac" internal-destination="${idPrefix(num)}_doc${num}_">Title $num & More<fo:leader leader-pattern="dots"></fo:leader><fo:page-number-citation ref-id="${idPrefix(num)}_doc${num}_" /></fo:basic-link></fo:block>""" + "\\n"
}
def tocTreeResult (num: Int): String =
s"""<fo:block font-family="serif" font-size="12pt" space-after="0mm" space-before="5mm" text-align-last="justify"><fo:basic-link color="#3956ac" internal-destination="_tree${num}__title__">Tree ${num+1} & More<fo:leader leader-pattern="dots"></fo:leader><fo:page-number-citation ref-id="_tree${num}__title__" /></fo:basic-link></fo:block>""" + "\\n"
def withDefaultTemplate(result: String, bookmarks: String = ""): String =
defaultTemplate.replace("{{document.content}}", result).replace("{{document.fragments.bookmarks}}", bookmarks)
def bookmarkTreeResult(treeNum: Int, docNum: Int): String = s""" <fo:bookmark internal-destination="_tree${treeNum}__title__">
| <fo:bookmark-title>Tree ${treeNum + 1} & More</fo:bookmark-title>
| <fo:bookmark internal-destination="_tree${treeNum}_doc${docNum}_">
| <fo:bookmark-title>Title $docNum & More</fo:bookmark-title>
| </fo:bookmark>
| <fo:bookmark internal-destination="_tree${treeNum}_doc${docNum + 1}_">
| <fo:bookmark-title>Title ${docNum + 1} & More</fo:bookmark-title>
| </fo:bookmark>
| </fo:bookmark>
|""".stripMargin
val bookmarkRootResult = """<fo:bookmark-tree>
| <fo:bookmark internal-destination="_doc1_">
| <fo:bookmark-title>Title 1 & More</fo:bookmark-title>
| </fo:bookmark>
| <fo:bookmark internal-destination="_doc2_">
| <fo:bookmark-title>Title 2 & More</fo:bookmark-title>
| </fo:bookmark>
|""".stripMargin
val closeBookmarks = "</fo:bookmark-tree>"
}
trait Setup extends TreeModel with ResultModel {
def config: Option[PDFConfig]
def result: String = {
val stream = new ByteArrayOutputStream
Render as FOTest(config) from tree toStream stream
stream.toString
}
}
"The FOforPDF utility" should "render a tree with all structure elements disabled" in new Setup {
val config = Some(PDFConfig(insertTitles = false, bookmarkDepth = 0, tocDepth = 0))
result should be (withDefaultTemplate(results(6)))
}
it should "render a tree with inserted titles for documents and trees" in new Setup {
val config = Some(PDFConfig(insertTitles = true, bookmarkDepth = 0, tocDepth = 0))
result should be (withDefaultTemplate(treeTitleResult(1) + resultWithDocTitle(1) + resultWithDocTitle(2)
+ treeTitleResult(2) + resultWithDocTitle(3) + resultWithDocTitle(4)
+ treeTitleResult(3) + resultWithDocTitle(5) + resultWithDocTitle(6)))
}
it should "render a tree with a table of content" in new Setup {
val config = Some(PDFConfig(insertTitles = false, bookmarkDepth = 0, tocDepth = Int.MaxValue, tocTitle = Some("Contents")))
result should be (withDefaultTemplate(treeLinkResult(1) + tocTitle + tocDocResult(1) + tocDocResult(2)
+ tocTreeResult(1) + tocDocResult(3) + tocDocResult(4)
+ tocTreeResult(2) + tocDocResult(5) + tocDocResult(6).dropRight(1) + resultWithDocTitle(1) + resultWithDocTitle(2)
+ treeLinkResult(2) + resultWithDocTitle(3) + resultWithDocTitle(4)
+ treeLinkResult(3) + resultWithDocTitle(5) + resultWithDocTitle(6)))
}
it should "render a tree with bookmarks" in new Setup {
val config = Some(PDFConfig(insertTitles = false, bookmarkDepth = Int.MaxValue, tocDepth = 0))
result should be (withDefaultTemplate(treeLinkResult(1) + resultWithDocTitle(1) + resultWithDocTitle(2)
+ treeLinkResult(2) + resultWithDocTitle(3) + resultWithDocTitle(4)
+ treeLinkResult(3) + resultWithDocTitle(5) + resultWithDocTitle(6),
bookmarkRootResult + bookmarkTreeResult(1,3) + bookmarkTreeResult(2,5).dropRight(1) + closeBookmarks))
}
it should "render a tree with all structure elements enabled" in new Setup {
val config = Some(PDFConfig.default)
result should be (withDefaultTemplate(
treeTitleResult(1) + tocDocResult(1) + tocDocResult(2)
+ tocTreeResult(1) + tocDocResult(3) + tocDocResult(4)
+ tocTreeResult(2) + tocDocResult(5) + tocDocResult(6).dropRight(1)
+ resultWithDocTitle(1) + resultWithDocTitle(2)
+ treeTitleResult(2) + resultWithDocTitle(3) + resultWithDocTitle(4)
+ treeTitleResult(3) + resultWithDocTitle(5) + resultWithDocTitle(6),
bookmarkRootResult + bookmarkTreeResult(1,3) + bookmarkTreeResult(2,5).dropRight(1) + closeBookmarks
))
}
it should "render a tree with all structure elements disabled by a tree configuration file" in new Setup {
val config = None
override val usePDFFileConfig = true
result should be (withDefaultTemplate(results(6)))
}
}
| amuramatsu/Laika | pdf/src/test/scala/laika/render/FOforPDFSpec.scala | Scala | apache-2.0 | 9,280 |
package me.aihe.dataframe.types
/**
* Created by aihe on 11/25/15.
*/
class SeqType extends DataType{
}
object SeqType extends SeqType
| AiHe/DataFrame | src/main/scala/me/aihe/dataframe/types/SeqType.scala | Scala | apache-2.0 | 140 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.catalog
import java.util.Date
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.{FunctionIdentifier, InternalRow, TableIdentifier}
import org.apache.spark.sql.catalyst.expressions.{Attribute, Cast, Literal}
import org.apache.spark.sql.catalyst.plans.logical.{LeafNode, LogicalPlan, Statistics}
import org.apache.spark.sql.catalyst.util.quoteIdentifier
import org.apache.spark.sql.types.{StructField, StructType}
/**
* A function defined in the catalog.
*
* @param identifier name of the function
* @param className fully qualified class name, e.g. "org.apache.spark.util.MyFunc"
* @param resources resource types and Uris used by the function
*/
case class CatalogFunction(
identifier: FunctionIdentifier,
className: String,
resources: Seq[FunctionResource])
/**
* Storage format, used to describe how a partition or a table is stored.
*/
case class CatalogStorageFormat(
// TODO(ekl) consider storing this field as java.net.URI for type safety. Note that this must
// be converted to/from a hadoop Path object using new Path(new URI(locationUri)) and
// path.toUri respectively before use as a filesystem path due to URI char escaping.
locationUri: Option[String],
inputFormat: Option[String],
outputFormat: Option[String],
serde: Option[String],
compressed: Boolean,
properties: Map[String, String]) {
override def toString: String = {
val serdePropsToString = CatalogUtils.maskCredentials(properties) match {
case props if props.isEmpty => ""
case props => "Properties: " + props.map(p => p._1 + "=" + p._2).mkString("[", ", ", "]")
}
val output =
Seq(locationUri.map("Location: " + _).getOrElse(""),
inputFormat.map("InputFormat: " + _).getOrElse(""),
outputFormat.map("OutputFormat: " + _).getOrElse(""),
if (compressed) "Compressed" else "",
serde.map("Serde: " + _).getOrElse(""),
serdePropsToString)
output.filter(_.nonEmpty).mkString("Storage(", ", ", ")")
}
}
object CatalogStorageFormat {
/** Empty storage format for default values and copies. */
val empty = CatalogStorageFormat(locationUri = None, inputFormat = None,
outputFormat = None, serde = None, compressed = false, properties = Map.empty)
}
/**
* A partition (Hive style) defined in the catalog.
*
* @param spec partition spec values indexed by column name
* @param storage storage format of the partition
* @param parameters some parameters for the partition, for example, stats.
*/
case class CatalogTablePartition(
spec: CatalogTypes.TablePartitionSpec,
storage: CatalogStorageFormat,
parameters: Map[String, String] = Map.empty) {
override def toString: String = {
val specString = spec.map { case (k, v) => s"$k=$v" }.mkString(", ")
val output =
Seq(
s"Partition Values: [$specString]",
s"$storage",
s"Partition Parameters:{${parameters.map(p => p._1 + "=" + p._2).mkString(", ")}}")
output.filter(_.nonEmpty).mkString("CatalogPartition(\\n\\t", "\\n\\t", ")")
}
/** Return the partition location, assuming it is specified. */
def location: String = storage.locationUri.getOrElse {
val specString = spec.map { case (k, v) => s"$k=$v" }.mkString(", ")
throw new AnalysisException(s"Partition [$specString] did not specify locationUri")
}
/**
* Given the partition schema, returns a row with that schema holding the partition values.
*/
def toRow(partitionSchema: StructType): InternalRow = {
InternalRow.fromSeq(partitionSchema.map { field =>
val partValue = if (spec(field.name) == ExternalCatalogUtils.DEFAULT_PARTITION_NAME) {
null
} else {
spec(field.name)
}
Cast(Literal(partValue), field.dataType).eval()
})
}
}
/**
* A container for bucketing information.
* Bucketing is a technology for decomposing data sets into more manageable parts, and the number
* of buckets is fixed so it does not fluctuate with data.
*
* @param numBuckets number of buckets.
* @param bucketColumnNames the names of the columns that used to generate the bucket id.
* @param sortColumnNames the names of the columns that used to sort data in each bucket.
*/
case class BucketSpec(
numBuckets: Int,
bucketColumnNames: Seq[String],
sortColumnNames: Seq[String]) {
if (numBuckets <= 0) {
throw new AnalysisException(s"Expected positive number of buckets, but got `$numBuckets`.")
}
override def toString: String = {
val bucketString = s"bucket columns: [${bucketColumnNames.mkString(", ")}]"
val sortString = if (sortColumnNames.nonEmpty) {
s", sort columns: [${sortColumnNames.mkString(", ")}]"
} else {
""
}
s"$numBuckets buckets, $bucketString$sortString"
}
}
/**
* A table defined in the catalog.
*
* Note that Hive's metastore also tracks skewed columns. We should consider adding that in the
* future once we have a better understanding of how we want to handle skewed columns.
*
* @param provider the name of the data source provider for this table, e.g. parquet, json, etc.
* Can be None if this table is a View, should be "hive" for hive serde tables.
* @param unsupportedFeatures is a list of string descriptions of features that are used by the
* underlying table but not supported by Spark SQL yet.
* @param tracksPartitionsInCatalog whether this table's partition metadata is stored in the
* catalog. If false, it is inferred automatically based on file
* structure.
* @param schemaPreservesCase Whether or not the schema resolved for this table is case-sensitive.
* When using a Hive Metastore, this flag is set to false if a case-
* sensitive schema was unable to be read from the table properties.
* Used to trigger case-sensitive schema inference at query time, when
* configured.
*/
case class CatalogTable(
identifier: TableIdentifier,
tableType: CatalogTableType,
storage: CatalogStorageFormat,
schema: StructType,
provider: Option[String] = None,
partitionColumnNames: Seq[String] = Seq.empty,
bucketSpec: Option[BucketSpec] = None,
owner: String = "",
createTime: Long = System.currentTimeMillis,
lastAccessTime: Long = -1,
properties: Map[String, String] = Map.empty,
stats: Option[Statistics] = None,
viewOriginalText: Option[String] = None,
viewText: Option[String] = None,
comment: Option[String] = None,
unsupportedFeatures: Seq[String] = Seq.empty,
tracksPartitionsInCatalog: Boolean = false,
schemaPreservesCase: Boolean = true) {
/** schema of this table's partition columns */
def partitionSchema: StructType = StructType(schema.filter {
c => partitionColumnNames.contains(c.name)
})
/** Return the database this table was specified to belong to, assuming it exists. */
def database: String = identifier.database.getOrElse {
throw new AnalysisException(s"table $identifier did not specify database")
}
/** Return the table location, assuming it is specified. */
def location: String = storage.locationUri.getOrElse {
throw new AnalysisException(s"table $identifier did not specify locationUri")
}
/** Return the fully qualified name of this table, assuming the database was specified. */
def qualifiedName: String = identifier.unquotedString
/** Syntactic sugar to update a field in `storage`. */
def withNewStorage(
locationUri: Option[String] = storage.locationUri,
inputFormat: Option[String] = storage.inputFormat,
outputFormat: Option[String] = storage.outputFormat,
compressed: Boolean = false,
serde: Option[String] = storage.serde,
properties: Map[String, String] = storage.properties): CatalogTable = {
copy(storage = CatalogStorageFormat(
locationUri, inputFormat, outputFormat, serde, compressed, properties))
}
override def toString: String = {
val tableProperties = properties.map(p => p._1 + "=" + p._2).mkString("[", ", ", "]")
val partitionColumns = partitionColumnNames.map(quoteIdentifier).mkString("[", ", ", "]")
val bucketStrings = bucketSpec match {
case Some(BucketSpec(numBuckets, bucketColumnNames, sortColumnNames)) =>
val bucketColumnsString = bucketColumnNames.map(quoteIdentifier).mkString("[", ", ", "]")
val sortColumnsString = sortColumnNames.map(quoteIdentifier).mkString("[", ", ", "]")
Seq(
s"Num Buckets: $numBuckets",
if (bucketColumnNames.nonEmpty) s"Bucket Columns: $bucketColumnsString" else "",
if (sortColumnNames.nonEmpty) s"Sort Columns: $sortColumnsString" else ""
)
case _ => Nil
}
val output =
Seq(s"Table: ${identifier.quotedString}",
if (owner.nonEmpty) s"Owner: $owner" else "",
s"Created: ${new Date(createTime).toString}",
s"Last Access: ${new Date(lastAccessTime).toString}",
s"Type: ${tableType.name}",
if (schema.nonEmpty) s"Schema: ${schema.mkString("[", ", ", "]")}" else "",
if (provider.isDefined) s"Provider: ${provider.get}" else "",
if (partitionColumnNames.nonEmpty) s"Partition Columns: $partitionColumns" else ""
) ++ bucketStrings ++ Seq(
viewOriginalText.map("Original View: " + _).getOrElse(""),
viewText.map("View: " + _).getOrElse(""),
comment.map("Comment: " + _).getOrElse(""),
if (properties.nonEmpty) s"Properties: $tableProperties" else "",
if (stats.isDefined) s"Statistics: ${stats.get.simpleString}" else "",
s"$storage",
if (tracksPartitionsInCatalog) "Partition Provider: Catalog" else "")
output.filter(_.nonEmpty).mkString("CatalogTable(\\n\\t", "\\n\\t", ")")
}
}
case class CatalogTableType private(name: String)
object CatalogTableType {
val EXTERNAL = new CatalogTableType("EXTERNAL")
val MANAGED = new CatalogTableType("MANAGED")
val VIEW = new CatalogTableType("VIEW")
}
/**
* A database defined in the catalog.
*/
case class CatalogDatabase(
name: String,
description: String,
locationUri: String,
properties: Map[String, String])
object CatalogTypes {
/**
* Specifications of a table partition. Mapping column name to column value.
*/
type TablePartitionSpec = Map[String, String]
}
/**
* An interface that is implemented by logical plans to return the underlying catalog table.
* If we can in the future consolidate SimpleCatalogRelation and MetastoreRelation, we should
* probably remove this interface.
*/
trait CatalogRelation {
def catalogTable: CatalogTable
def output: Seq[Attribute]
}
/**
* A [[LogicalPlan]] that wraps [[CatalogTable]].
*
* Note that in the future we should consolidate this and HiveCatalogRelation.
*/
case class SimpleCatalogRelation(
databaseName: String,
metadata: CatalogTable)
extends LeafNode with CatalogRelation {
override def catalogTable: CatalogTable = metadata
override lazy val resolved: Boolean = false
override val output: Seq[Attribute] = {
val (partCols, dataCols) = metadata.schema.toAttributes
// Since data can be dumped in randomly with no validation, everything is nullable.
.map(_.withNullability(true).withQualifier(Some(metadata.identifier.table)))
.partition { a =>
metadata.partitionColumnNames.contains(a.name)
}
dataCols ++ partCols
}
require(
metadata.identifier.database == Some(databaseName),
"provided database does not match the one specified in the table definition")
}
| spark0001/spark2.1.1 | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/interface.scala | Scala | apache-2.0 | 12,567 |
/*
* Licensed to STRATIO (C) under one or more contributor license agreements.
* See the NOTICE file distributed with this work for additional information
* regarding copyright ownership. The STRATIO (C) licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.stratio.crossdata.server.actors
import com.codahale.metrics.Timer
import com.stratio.crossdata.common.utils.Metrics
/**
* Trait to be able to time the operations inside an actor.
*/
trait TimeTracker {
/**
* Name of the timer.
*/
lazy val timerName: String = ???
/**
* Timer gauge.
*/
lazy val timerMetrics: Timer = Metrics.getRegistry.timer(timerName)
/**
* Initialize the timer.
*/
def initTimer(): Timer.Context = timerMetrics.time()
/**
* Stop the timer.
* @param context The timing context.
* @return Whether it has stop.
*/
def finishTimer(context: Timer.Context) : Unit= {
context.stop()
}
}
| pfcoperez/crossdata | crossdata-server/src/main/scala/com/stratio/crossdata/server/actors/TimeTracker.scala | Scala | apache-2.0 | 1,459 |
package model
import scala.slick.driver.MySQLDriver.simple._
import scala.slick.ast.ColumnOption.{Default, NotNull}
/**
* CREATE TABLE IF NOT EXISTS `wordnet31_snapshot`.`synsets` (
`synsetid` INT(10) UNSIGNED NOT NULL DEFAULT '0',
`pos` ENUM('n','v','a','r','s') NULL DEFAULT NULL,
`lexdomainid` TINYINT(3) UNSIGNED NOT NULL DEFAULT '0',
`definition` MEDIUMTEXT NULL DEFAULT NULL,
PRIMARY KEY (`synsetid`),
INDEX `k_synsets_lexdomainid` (`lexdomainid` ASC),
CONSTRAINT `fk_synsets_lexdomainid`
FOREIGN KEY (`lexdomainid`)
REFERENCES `wordnet31_snapshot`.`lexdomains` (`lexdomainid`))
ENGINE = InnoDB
DEFAULT CHARACTER SET = utf8
*/
case class synsets(synsetid:Int, lexdomainid:Int, pos:String, definition:String)
class _synsets(tag:Tag) extends Table[synsets](tag,"synsets"){
def synsetid = column[Int]("synsetid", O.PrimaryKey, O.DBType("INT(10)"), NotNull, Default(0)) // This is the primary key column
def lexdomainid = column[Int]("lexdomainid",NotNull,O.DBType("TINYINT(3)"),Default(0))
def pos = column[String]("pos",O.DBType("ENUM('n','v','a','r','s')"))
def definition = column[String]("definition",O.DBType("MEDIUMTEXT"))
def * = (synsetid, lexdomainid, pos,definition) <> (synsets.tupled, synsets.unapply)
val lexdomains = TableQuery[_lexdomains]
def fk_synsets_lexdomainid = foreignKey("fk_synsets_lexdomainid", lexdomainid, lexdomains)(_.lexdomainid)
}
| gaoyike/WordNet-Scala-Slick-Model | src/main/scala/model/synsets.scala | Scala | bsd-2-clause | 1,410 |
package com.sksamuel.scapegoat.inspections.style
import com.sksamuel.scapegoat.InspectionTest
/** @author Stephen Samuel */
class SimplifyBooleanExpressionTest extends InspectionTest {
override val inspections = Seq(new SimplifyBooleanExpression)
"incorrectly named exceptions" - {
"should report warning" in {
val code = """object Test {
val b = false
val f = b == false
}
""".stripMargin
compileCodeSnippet(code)
compiler.scapegoat.feedback.warnings.size shouldBe 1
}
}
}
| sksamuel/scalac-scapegoat-plugin | src/test/scala/com/sksamuel/scapegoat/inspections/style/SimplifyBooleanExpressionTest.scala | Scala | apache-2.0 | 596 |
package com.olegych.scastie
package balancer
import akka.NotUsed
import akka.actor.{Actor, ActorRef}
import akka.stream.scaladsl.Source
import com.olegych.scastie.api._
import com.olegych.scastie.util.GraphStageForwarder
import scala.collection.mutable.{Map => MMap, Queue => MQueue}
import scala.concurrent.duration.DurationLong
case class SubscribeProgress(snippetId: SnippetId)
private case class Cleanup(snippetId: SnippetId)
class ProgressActor extends Actor {
type ProgressSource = Source[SnippetProgress, NotUsed]
private val subscribers = MMap.empty[SnippetId, (ProgressSource, Option[ActorRef])]
private val queuedMessages = MMap.empty[SnippetId, MQueue[SnippetProgress]]
override def receive: Receive = {
case SubscribeProgress(snippetId) =>
val (source, _) = getOrCreateNewSubscriberInfo(snippetId, self)
sender() ! source
case snippetProgress: SnippetProgress =>
snippetProgress.snippetId.foreach { snippetId =>
getOrCreateNewSubscriberInfo(snippetId, self)
queuedMessages.getOrElseUpdate(snippetId, MQueue()).enqueue(snippetProgress)
sendQueuedMessages(snippetId, self)
}
case (snippedId: SnippetId, graphStageForwarderActor: ActorRef) =>
subscribers.get(snippedId).foreach(s => subscribers.update(snippedId, s.copy(_2 = Some(graphStageForwarderActor))))
sendQueuedMessages(snippedId, self)
case Cleanup(snippetId) =>
subscribers.remove(snippetId)
queuedMessages.remove(snippetId)
}
private def getOrCreateNewSubscriberInfo(snippetId: SnippetId, self: ActorRef): (ProgressSource, Option[ActorRef]) = {
subscribers.getOrElseUpdate(
snippetId,
Source.fromGraph(new GraphStageForwarder("outlet-graph-" + snippetId, self, snippetId)) -> None
)
}
private def sendQueuedMessages(snippetId: SnippetId, self: ActorRef): Unit =
for {
messageQueue <- queuedMessages.get(snippetId).toSeq
(_, Some(graphStageForwarderActor)) <- subscribers.get(snippetId).toSeq
message <- messageQueue.dequeueAll(_ => true)
} yield {
graphStageForwarderActor ! message
if (message.isDone) context.system.scheduler.scheduleOnce(3.seconds, self, Cleanup(snippetId))(context.dispatcher)
}
}
| scalacenter/scastie | balancer/src/main/scala/com.olegych.scastie.balancer/ProgressActor.scala | Scala | apache-2.0 | 2,250 |
/*
* Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package play.api.mvc
import controllers.Assets.Asset
import java.net.URLEncoder
import java.util.{ Optional, UUID }
import scala.annotation._
import scala.collection.JavaConverters._
import scala.compat.java8.OptionConverters._
import reflect.ClassTag
/**
* Binder for query string parameters.
*
* You can provide an implementation of `QueryStringBindable[A]` for any type `A` you want to be able to
* bind directly from the request query string.
*
* For example, if you have the following type to encode pagination:
*
* {{{
* /**
* * @param index Current page index
* * @param size Number of items in a page
* */
* case class Pager(index: Int, size: Int)
* }}}
*
* Play will create a `Pager(5, 42)` value from a query string looking like `/foo?p.index=5&p.size=42` if you define
* an instance of `QueryStringBindable[Pager]` available in the implicit scope.
*
* For example:
*
* {{{
* object Pager {
* implicit def queryStringBinder(implicit intBinder: QueryStringBindable[Int]) = new QueryStringBindable[Pager] {
* override def bind(key: String, params: Map[String, Seq[String]]): Option[Either[String, Pager]] = {
* for {
* index <- intBinder.bind(key + ".index", params)
* size <- intBinder.bind(key + ".size", params)
* } yield {
* (index, size) match {
* case (Right(index), Right(size)) => Right(Pager(index, size))
* case _ => Left("Unable to bind a Pager")
* }
* }
* }
* override def unbind(key: String, pager: Pager): String = {
* intBinder.unbind(key + ".index", pager.index) + "&" + intBinder.unbind(key + ".size", pager.size)
* }
* }
* }
* }}}
*
* To use it in a route, just write a type annotation aside the parameter you want to bind:
*
* {{{
* GET /foo controllers.foo(p: Pager)
* }}}
*/
@implicitNotFound(
"No QueryString binder found for type ${A}. Try to implement an implicit QueryStringBindable for this type."
)
trait QueryStringBindable[A] {
self =>
/**
* Bind a query string parameter.
*
* @param key Parameter key
* @param params QueryString data
* @return `None` if the parameter was not present in the query string data. Otherwise, returns `Some` of either
* `Right` of the parameter value, or `Left` of an error message if the binding failed.
*/
def bind(key: String, params: Map[String, Seq[String]]): Option[Either[String, A]]
/**
* Unbind a query string parameter.
*
* @param key Parameter key
* @param value Parameter value.
* @return a query string fragment containing the key and its value. E.g. "foo=42"
*/
def unbind(key: String, value: A): String
/**
* Javascript function to unbind in the Javascript router.
*/
def javascriptUnbind: String = """function(k,v) {return encodeURIComponent(k)+'='+encodeURIComponent(v)}"""
/**
* Transform this QueryStringBindable[A] to QueryStringBindable[B]
*/
def transform[B](toB: A => B, toA: B => A) = new QueryStringBindable[B] {
def bind(key: String, params: Map[String, Seq[String]]): Option[Either[String, B]] = {
self.bind(key, params).map(_.right.map(toB))
}
def unbind(key: String, value: B): String = self.unbind(key, toA(value))
override def javascriptUnbind: String = self.javascriptUnbind
}
}
/**
* Binder for URL path parameters.
*
* You can provide an implementation of `PathBindable[A]` for any type `A` you want to be able to
* bind directly from the request path.
*
* For example, given this class definition:
*
* {{{
* case class User(id: Int, name: String, age: Int)
* }}}
*
* You can define a binder retrieving a `User` instance from its id, useable like the following:
*
* {{{
* // In your routes:
* // GET /show/:user controllers.Application.show(user)
* // For example: /show/42
*
* class HomeController @Inject() (val controllerComponents: ControllerComponents) extends BaseController {
* def show(user: User) = Action {
* ...
* }
* }
* }}}
*
* The definition of binder can look like the following:
*
* {{{
* object User {
* implicit def pathBinder(implicit intBinder: PathBindable[Int]) = new PathBindable[User] {
* override def bind(key: String, value: String): Either[String, User] = {
* for {
* id <- intBinder.bind(key, value).right
* user <- User.findById(id).toRight("User not found").right
* } yield user
* }
* override def unbind(key: String, user: User): String = {
* intBinder.unbind(key, user.id)
* }
* }
* }
* }}}
*/
@implicitNotFound(
"No URL path binder found for type ${A}. Try to implement an implicit PathBindable for this type."
)
trait PathBindable[A] {
self =>
/**
* Bind an URL path parameter.
*
* @param key Parameter key
* @param value The value as String (extracted from the URL path)
* @return `Right` of the value or `Left` of an error message if the binding failed
*/
def bind(key: String, value: String): Either[String, A]
/**
* Unbind a URL path parameter.
*
* @param key Parameter key
* @param value Parameter value.
*/
def unbind(key: String, value: A): String
/**
* Javascript function to unbind in the Javascript router.
*/
def javascriptUnbind: String = """function(k,v) {return v}"""
/**
* Transform this PathBinding[A] to PathBinding[B]
*/
def transform[B](toB: A => B, toA: B => A) = new PathBindable[B] {
def bind(key: String, value: String): Either[String, B] = self.bind(key, value).right.map(toB)
def unbind(key: String, value: B): String = self.unbind(key, toA(value))
}
}
/**
* Transform a value to a Javascript literal.
*/
@implicitNotFound(
"No JavaScript literal binder found for type ${A}. Try to implement an implicit JavascriptLiteral for this type."
)
trait JavascriptLiteral[A] {
/**
* Convert a value of A to a JavaScript literal.
*/
def to(value: A): String
}
/**
* Default JavaScript literals converters.
*/
object JavascriptLiteral {
/**
* Convert a (primitive) value to it's Javascript equivalent
*/
private def toJsValue(value: Any): String = {
value match {
case null => "null"
case _ => value.toString
}
}
/**
* Convert a value to a Javascript String
*/
private def toJsString(value: Any): String = {
value match {
case null => "null"
case _ => "\"" + value.toString + "\""
}
}
/**
* Convert a Scala String to Javascript String (or Javascript null if given String value is null)
*/
implicit def literalString: JavascriptLiteral[String] = new JavascriptLiteral[String] {
def to(value: String) = toJsString(value)
}
/**
* Convert a Scala Int to Javascript number
*/
implicit def literalInt: JavascriptLiteral[Int] = new JavascriptLiteral[Int] {
def to(value: Int) = value.toString
}
/**
* Convert a Java Integer to Javascript number (or Javascript null if given Integer value is null)
*/
implicit def literalJavaInteger: JavascriptLiteral[java.lang.Integer] = new JavascriptLiteral[java.lang.Integer] {
def to(value: java.lang.Integer) = toJsValue(value)
}
/**
* Convert a Scala Long to Javascript Long
*/
implicit def literalLong: JavascriptLiteral[Long] = new JavascriptLiteral[Long] {
def to(value: Long) = value.toString
}
/**
* Convert a Java Long to Javascript number (or Javascript null if given Long value is null)
*/
implicit def literalJavaLong: JavascriptLiteral[java.lang.Long] = new JavascriptLiteral[java.lang.Long] {
def to(value: java.lang.Long) = toJsValue(value)
}
/**
* Convert a Scala Boolean to Javascript boolean
*/
implicit def literalBoolean: JavascriptLiteral[Boolean] = new JavascriptLiteral[Boolean] {
def to(value: Boolean) = value.toString
}
/**
* Convert a Java Boolean to Javascript boolean (or Javascript null if given Boolean value is null)
*/
implicit def literalJavaBoolean: JavascriptLiteral[java.lang.Boolean] = new JavascriptLiteral[java.lang.Boolean] {
def to(value: java.lang.Boolean) = toJsValue(value)
}
/**
* Convert a Scala Option to Javascript literal (use null for None)
*/
implicit def literalOption[T](implicit jsl: JavascriptLiteral[T]): JavascriptLiteral[Option[T]] = new JavascriptLiteral[Option[T]] {
def to(value: Option[T]) = value.map(jsl.to(_)).getOrElse("null")
}
/**
* Convert a Java Optional to Javascript literal (use "null" for an empty Optional)
*/
implicit def literalJavaOption[T](implicit jsl: JavascriptLiteral[T]): JavascriptLiteral[Optional[T]] = new JavascriptLiteral[Optional[T]] {
def to(value: Optional[T]) = value.asScala.map(jsl.to(_)).getOrElse("null")
}
/**
* Convert a Play Asset to Javascript String
*/
implicit def literalAsset: JavascriptLiteral[Asset] = new JavascriptLiteral[Asset] {
def to(value: Asset) = toJsString(value.name)
}
/**
* Convert a java.util.UUID to Javascript String (or Javascript null if given UUID value is null)
*/
implicit def literalUUID: JavascriptLiteral[UUID] = new JavascriptLiteral[UUID] {
def to(value: UUID) = toJsString(value)
}
}
/**
* Default binders for Query String
*/
object QueryStringBindable {
/**
* A helper class for creating QueryStringBindables to map the value of a single key
*
* @param parse a function to parse the param value
* @param serialize a function to serialize and URL-encode the param value. Remember to encode arbitrary strings,
* for example using URLEncoder.encode.
* @param error a function for rendering an error message if an error occurs
* @tparam A the type being parsed
*/
class Parsing[A](parse: String => A, serialize: A => String, error: (String, Exception) => String)
extends QueryStringBindable[A] {
def bind(key: String, params: Map[String, Seq[String]]) = params.get(key).flatMap(_.headOption).map { p =>
try {
Right(parse(p))
} catch {
case e: Exception => Left(error(key, e))
}
}
def unbind(key: String, value: A) = key + "=" + serialize(value)
}
/**
* QueryString binder for String.
*/
implicit def bindableString = new QueryStringBindable[String] {
def bind(key: String, params: Map[String, Seq[String]]) = params.get(key).flatMap(_.headOption).map(Right(_)) // No need to URL decode from query string since netty already does that
// Use an option here in case users call index(null) in the routes -- see #818
def unbind(key: String, value: String) = key + "=" + URLEncoder.encode(Option(value).getOrElse(""), "utf-8")
}
/**
* QueryString binder for Char.
*/
implicit object bindableChar extends QueryStringBindable[Char] {
def bind(key: String, params: Map[String, Seq[String]]) = params.get(key).flatMap(_.headOption).map { value =>
if (value.length != 1) Left(s"Cannot parse parameter $key with value '$value' as Char: $key must be exactly one digit in length.")
else Right(value.charAt(0))
}
def unbind(key: String, value: Char) = key + "=" + value.toString
}
/**
* QueryString binder for Int.
*/
implicit object bindableInt extends Parsing[Int](
_.toInt, _.toString, (key: String, e: Exception) => "Cannot parse parameter %s as Int: %s".format(key, e.getMessage)
)
/**
* QueryString binder for Integer.
*/
implicit def bindableJavaInteger: QueryStringBindable[java.lang.Integer] =
bindableInt.transform(i => i, i => i)
/**
* QueryString binder for Long.
*/
implicit object bindableLong extends Parsing[Long](
_.toLong, _.toString, (key: String, e: Exception) => "Cannot parse parameter %s as Long: %s".format(key, e.getMessage)
)
/**
* QueryString binder for Java Long.
*/
implicit def bindableJavaLong: QueryStringBindable[java.lang.Long] =
bindableLong.transform(l => l, l => l)
/**
* QueryString binder for Double.
*/
implicit object bindableDouble extends Parsing[Double](
_.toDouble, _.toString, (key: String, e: Exception) => "Cannot parse parameter %s as Double: %s".format(key, e.getMessage)
)
/**
* QueryString binder for Java Double.
*/
implicit def bindableJavaDouble: QueryStringBindable[java.lang.Double] =
bindableDouble.transform(d => d, d => d)
/**
* QueryString binder for Float.
*/
implicit object bindableFloat extends Parsing[Float](
_.toFloat, _.toString, (key: String, e: Exception) => "Cannot parse parameter %s as Float: %s".format(key, e.getMessage)
)
/**
* QueryString binder for Java Float.
*/
implicit def bindableJavaFloat: QueryStringBindable[java.lang.Float] =
bindableFloat.transform(f => f, f => f)
/**
* QueryString binder for Boolean.
*/
implicit object bindableBoolean extends Parsing[Boolean](
_.trim match {
case "true" => true
case "false" => false
case b => b.toInt match {
case 1 => true
case 0 => false
}
}, _.toString,
(key: String, e: Exception) => "Cannot parse parameter %s as Boolean: should be true, false, 0 or 1".format(key)
) {
override def javascriptUnbind = """function(k,v){return k+'='+(!!v)}"""
}
/**
* QueryString binder for Java Boolean.
*/
implicit def bindableJavaBoolean: QueryStringBindable[java.lang.Boolean] =
bindableBoolean.transform(b => b, b => b)
/**
* QueryString binder for java.util.UUID.
*/
implicit object bindableUUID extends Parsing[UUID](
UUID.fromString(_), _.toString, (key: String, e: Exception) => "Cannot parse parameter %s as UUID: %s".format(key, e.getMessage)
)
/**
* QueryString binder for Option.
*/
implicit def bindableOption[T: QueryStringBindable] = new QueryStringBindable[Option[T]] {
def bind(key: String, params: Map[String, Seq[String]]) = {
Some(
implicitly[QueryStringBindable[T]].bind(key, params)
.map(_.right.map(Some(_)))
.getOrElse(Right(None)))
}
def unbind(key: String, value: Option[T]) = value.map(implicitly[QueryStringBindable[T]].unbind(key, _)).getOrElse("")
override def javascriptUnbind = javascriptUnbindOption(implicitly[QueryStringBindable[T]].javascriptUnbind)
}
/**
* QueryString binder for Java Optional.
*/
implicit def bindableJavaOption[T: QueryStringBindable]: QueryStringBindable[Optional[T]] = new QueryStringBindable[Optional[T]] {
def bind(key: String, params: Map[String, Seq[String]]) = {
Some(
implicitly[QueryStringBindable[T]].bind(key, params)
.map(_.right.map(Optional.ofNullable[T]))
.getOrElse(Right(Optional.empty[T])))
}
def unbind(key: String, value: Optional[T]) = {
value.asScala.map(implicitly[QueryStringBindable[T]].unbind(key, _)).getOrElse("")
}
override def javascriptUnbind = javascriptUnbindOption(implicitly[QueryStringBindable[T]].javascriptUnbind)
}
private def javascriptUnbindOption(jsUnbindT: String) = "function(k,v){return v!=null?(" + jsUnbindT + ")(k,v):''}"
/**
* QueryString binder for Seq
*/
implicit def bindableSeq[T: QueryStringBindable]: QueryStringBindable[Seq[T]] = new QueryStringBindable[Seq[T]] {
def bind(key: String, params: Map[String, Seq[String]]) = bindSeq[T](key, params)
def unbind(key: String, values: Seq[T]) = unbindSeq(key, values)
override def javascriptUnbind = javascriptUnbindSeq(implicitly[QueryStringBindable[T]].javascriptUnbind)
}
/**
* QueryString binder for List
*/
implicit def bindableList[T: QueryStringBindable]: QueryStringBindable[List[T]] =
bindableSeq[T].transform(_.toList, _.toSeq)
/**
* QueryString binder for java.util.List
*/
implicit def bindableJavaList[T: QueryStringBindable]: QueryStringBindable[java.util.List[T]] = new QueryStringBindable[java.util.List[T]] {
def bind(key: String, params: Map[String, Seq[String]]) = bindSeq[T](key, params).map(_.right.map(_.asJava))
def unbind(key: String, values: java.util.List[T]) = unbindSeq(key, values.asScala)
override def javascriptUnbind = javascriptUnbindSeq(implicitly[QueryStringBindable[T]].javascriptUnbind)
}
private def bindSeq[T: QueryStringBindable](key: String, params: Map[String, Seq[String]]): Option[Either[String, Seq[T]]] = {
@tailrec
def collectResults(values: List[String], results: List[T]): Either[String, Seq[T]] = {
values match {
case Nil => Right(results.reverse) // to preserve the original order
case head :: rest =>
implicitly[QueryStringBindable[T]].bind(key, Map(key -> Seq(head))) match {
case None => collectResults(rest, results)
case Some(Right(result)) => collectResults(rest, result :: results)
case Some(Left(err)) => collectErrs(rest, err :: Nil)
}
}
}
@tailrec
def collectErrs(values: List[String], errs: List[String]): Left[String, Seq[T]] = {
values match {
case Nil => Left(errs.reverse.mkString("\n"))
case head :: rest =>
implicitly[QueryStringBindable[T]].bind(key, Map(key -> Seq(head))) match {
case Some(Left(err)) => collectErrs(rest, err :: errs)
case Some(Right(_)) | None => collectErrs(rest, errs)
}
}
}
params.get(key) match {
case None => Some(Right(Nil))
case Some(values) => Some(collectResults(values.toList, Nil))
}
}
private def unbindSeq[T: QueryStringBindable](key: String, values: Iterable[T]): String = {
(for (value <- values) yield {
implicitly[QueryStringBindable[T]].unbind(key, value)
}).mkString("&")
}
private def javascriptUnbindSeq(jsUnbindT: String) = "function(k,vs){var l=vs&&vs.length,r=[],i=0;for(;i<l;i++){r[i]=(" + jsUnbindT + ")(k,vs[i])}return r.join('&')}"
/**
* QueryString binder for QueryStringBindable.
*/
implicit def javaQueryStringBindable[T <: play.mvc.QueryStringBindable[T]](implicit ct: ClassTag[T]) = new QueryStringBindable[T] {
def bind(key: String, params: Map[String, Seq[String]]) = {
try {
val o = ct.runtimeClass.newInstance.asInstanceOf[T].bind(key, params.mapValues(_.toArray).asJava)
if (o.isPresent) {
Some(Right(o.get))
} else {
None
}
} catch {
case e: Exception => Some(Left(e.getMessage))
}
}
def unbind(key: String, value: T) = {
value.unbind(key)
}
override def javascriptUnbind = Option(ct.runtimeClass.newInstance.asInstanceOf[T].javascriptUnbind())
.getOrElse(super.javascriptUnbind)
}
}
/**
* Default binders for URL path part.
*/
object PathBindable {
/**
* A helper class for creating PathBindables to map the value of a path pattern/segment
*
* @param parse a function to parse the path value
* @param serialize a function to serialize the path value to a string
* @param error a function for rendering an error message if an error occurs
* @tparam A the type being parsed
*/
class Parsing[A](parse: String => A, serialize: A => String, error: (String, Exception) => String)
extends PathBindable[A] {
// added for bincompat
@deprecated("Use constructor without codec", "2.6.2")
private[mvc] def this(parse: String => A, serialize: A => String, error: (String, Exception) => String, codec: Codec) = {
this(parse, serialize, error)
}
def bind(key: String, value: String): Either[String, A] = {
try {
Right(parse(value))
} catch {
case e: Exception => Left(error(key, e))
}
}
def unbind(key: String, value: A): String = serialize(value)
}
/**
* Path binder for String.
*/
implicit object bindableString extends Parsing[String](
(s: String) => s, (s: String) => s, (key: String, e: Exception) => "Cannot parse parameter %s as String: %s".format(key, e.getMessage)
)
/**
* Path binder for Char.
*/
implicit object bindableChar extends PathBindable[Char] {
def bind(key: String, value: String) = {
if (value.length != 1) Left(s"Cannot parse parameter $key with value '$value' as Char: $key must be exactly one digit in length.")
else Right(value.charAt(0))
}
def unbind(key: String, value: Char) = value.toString
}
/**
* Path binder for Int.
*/
implicit object bindableInt extends Parsing[Int](
_.toInt, _.toString, (key: String, e: Exception) => "Cannot parse parameter %s as Int: %s".format(key, e.getMessage)
)
/**
* Path binder for Java Integer.
*/
implicit def bindableJavaInteger: PathBindable[java.lang.Integer] =
bindableInt.transform(i => i, i => i)
/**
* Path binder for Long.
*/
implicit object bindableLong extends Parsing[Long](
_.toLong, _.toString, (key: String, e: Exception) => "Cannot parse parameter %s as Long: %s".format(key, e.getMessage)
)
/**
* Path binder for Java Long.
*/
implicit def bindableJavaLong: PathBindable[java.lang.Long] =
bindableLong.transform(l => l, l => l)
/**
* Path binder for Double.
*/
implicit object bindableDouble extends Parsing[Double](
_.toDouble, _.toString, (key: String, e: Exception) => "Cannot parse parameter %s as Double: %s".format(key, e.getMessage)
)
/**
* Path binder for Java Double.
*/
implicit def bindableJavaDouble: PathBindable[java.lang.Double] =
bindableDouble.transform(d => d, d => d)
/**
* Path binder for Float.
*/
implicit object bindableFloat extends Parsing[Float](
_.toFloat, _.toString, (key: String, e: Exception) => "Cannot parse parameter %s as Float: %s".format(key, e.getMessage)
)
/**
* Path binder for Java Float.
*/
implicit def bindableJavaFloat: PathBindable[java.lang.Float] =
bindableFloat.transform(f => f, f => f)
/**
* Path binder for Boolean.
*/
implicit object bindableBoolean extends Parsing[Boolean](
_.trim match {
case "true" => true
case "false" => false
case b => b.toInt match {
case 1 => true
case 0 => false
}
}, _.toString,
(key: String, e: Exception) => "Cannot parse parameter %s as Boolean: should be true, false, 0 or 1".format(key)
) {
override def javascriptUnbind = """function(k,v){return !!v}"""
}
/**
* Path binder for Java Boolean.
*/
implicit def bindableJavaBoolean: PathBindable[java.lang.Boolean] =
bindableBoolean.transform(b => b, b => b)
/**
* Path binder for java.util.UUID.
*/
implicit object bindableUUID extends Parsing[UUID](
UUID.fromString(_), _.toString, (key: String, e: Exception) => "Cannot parse parameter %s as UUID: %s".format(key, e.getMessage)
)
/**
* Path binder for Java PathBindable
*/
implicit def javaPathBindable[T <: play.mvc.PathBindable[T]](implicit ct: ClassTag[T]): PathBindable[T] = new PathBindable[T] {
def bind(key: String, value: String) = {
try {
Right(ct.runtimeClass.newInstance.asInstanceOf[T].bind(key, value))
} catch {
case e: Exception => Left(e.getMessage)
}
}
def unbind(key: String, value: T) = {
value.unbind(key)
}
override def javascriptUnbind = Option(ct.runtimeClass.newInstance.asInstanceOf[T].javascriptUnbind())
.getOrElse(super.javascriptUnbind)
}
/**
* This is used by the Java RouterBuilder DSL.
*/
private[play] lazy val pathBindableRegister: Map[Class[_], PathBindable[_]] = {
import scala.language.existentials
def register[T](implicit pb: PathBindable[T], ct: ClassTag[T]) = ct.runtimeClass -> pb
Map(
register[String],
register[java.lang.Integer],
register[java.lang.Long],
register[java.lang.Double],
register[java.lang.Float],
register[java.lang.Boolean],
register[UUID]
)
}
}
| Shruti9520/playframework | framework/src/play/src/main/scala/play/api/mvc/Binders.scala | Scala | apache-2.0 | 24,042 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.codegen
import org.apache.flink.api.common.functions.{FlatMapFunction, Function}
import org.apache.flink.api.dag.Transformation
import org.apache.flink.table.api.{TableConfig, TableException}
import org.apache.flink.table.dataformat.{BaseRow, BoxedWrapperRow}
import org.apache.flink.table.runtime.generated.GeneratedFunction
import org.apache.flink.table.runtime.operators.CodeGenOperatorFactory
import org.apache.flink.table.runtime.typeutils.BaseRowTypeInfo
import org.apache.flink.table.types.logical.RowType
import org.apache.calcite.plan.RelOptCluster
import org.apache.calcite.rex._
import scala.collection.JavaConversions._
object CalcCodeGenerator {
private[flink] def generateCalcOperator(
ctx: CodeGeneratorContext,
cluster: RelOptCluster,
inputTransform: Transformation[BaseRow],
outputType: RowType,
config: TableConfig,
calcProgram: RexProgram,
condition: Option[RexNode],
retainHeader: Boolean = false,
opName: String): CodeGenOperatorFactory[BaseRow] = {
val inputType = inputTransform.getOutputType.asInstanceOf[BaseRowTypeInfo].toRowType
// filter out time attributes
val inputTerm = CodeGenUtils.DEFAULT_INPUT1_TERM
val processCode = generateProcessCode(
ctx,
inputType,
outputType,
classOf[BoxedWrapperRow],
outputType.getFieldNames,
config,
calcProgram,
condition,
eagerInputUnboxingCode = true,
retainHeader = retainHeader)
val genOperator =
OperatorCodeGenerator.generateOneInputStreamOperator[BaseRow, BaseRow](
ctx,
opName,
processCode,
inputType,
inputTerm = inputTerm,
lazyInputUnboxingCode = true)
new CodeGenOperatorFactory(genOperator)
}
private[flink] def generateFunction[T <: Function](
inputType: RowType,
name: String,
returnType: RowType,
outRowClass: Class[_ <: BaseRow],
calcProjection: RexProgram,
calcCondition: Option[RexNode],
config: TableConfig): GeneratedFunction[FlatMapFunction[BaseRow, BaseRow]] = {
val ctx = CodeGeneratorContext(config)
val inputTerm = CodeGenUtils.DEFAULT_INPUT1_TERM
val collectorTerm = CodeGenUtils.DEFAULT_COLLECTOR_TERM
val processCode = generateProcessCode(
ctx,
inputType,
returnType,
outRowClass,
returnType.getFieldNames,
config,
calcProjection,
calcCondition,
collectorTerm = collectorTerm,
eagerInputUnboxingCode = false,
outputDirectly = true
)
FunctionCodeGenerator.generateFunction(
ctx,
name,
classOf[FlatMapFunction[BaseRow, BaseRow]],
processCode,
returnType,
inputType,
input1Term = inputTerm,
collectorTerm = collectorTerm)
}
private[flink] def generateProcessCode(
ctx: CodeGeneratorContext,
inputType: RowType,
outRowType: RowType,
outRowClass: Class[_ <: BaseRow],
resultFieldNames: Seq[String],
config: TableConfig,
calcProgram: RexProgram,
condition: Option[RexNode],
inputTerm: String = CodeGenUtils.DEFAULT_INPUT1_TERM,
collectorTerm: String = CodeGenUtils.DEFAULT_OPERATOR_COLLECTOR_TERM,
eagerInputUnboxingCode: Boolean,
retainHeader: Boolean = false,
outputDirectly: Boolean = false): String = {
val projection = calcProgram.getProjectList.map(calcProgram.expandLocalRef)
val exprGenerator = new ExprCodeGenerator(ctx, false)
.bindInput(inputType, inputTerm = inputTerm)
val onlyFilter = projection.lengthCompare(inputType.getFieldCount) == 0 &&
projection.zipWithIndex.forall { case (rexNode, index) =>
rexNode.isInstanceOf[RexInputRef] && rexNode.asInstanceOf[RexInputRef].getIndex == index
}
def produceOutputCode(resultTerm: String) = if (outputDirectly) {
s"$collectorTerm.collect($resultTerm);"
} else {
s"${OperatorCodeGenerator.generateCollect(resultTerm)}"
}
def produceProjectionCode = {
// we cannot use for-loop optimization if projection contains other calculations
// (for example "select id + 1 from T")
val simpleProjection = projection.forall { rexNode => rexNode.isInstanceOf[RexInputRef] }
val projectionExpression = if (simpleProjection) {
val inputMapping = projection.map(_.asInstanceOf[RexInputRef].getIndex).toArray
ProjectionCodeGenerator.generateProjectionExpression(
ctx, inputType, outRowType, inputMapping,
outRowClass, inputTerm, nullCheck = config.getNullCheck)
} else {
val projectionExprs = projection.map(exprGenerator.generateExpression)
exprGenerator.generateResultExpression(
projectionExprs,
outRowType,
outRowClass)
}
val projectionExpressionCode = projectionExpression.code
val header = if (retainHeader) {
s"${projectionExpression.resultTerm}.setHeader($inputTerm.getHeader());"
} else {
""
}
s"""
|$header
|$projectionExpressionCode
|${produceOutputCode(projectionExpression.resultTerm)}
|""".stripMargin
}
if (condition.isEmpty && onlyFilter) {
throw new TableException("This calc has no useful projection and no filter. " +
"It should be removed by CalcRemoveRule.")
} else if (condition.isEmpty) { // only projection
val projectionCode = produceProjectionCode
s"""
|${if (eagerInputUnboxingCode) ctx.reuseInputUnboxingCode() else ""}
|$projectionCode
|""".stripMargin
} else {
val filterCondition = exprGenerator.generateExpression(condition.get)
// only filter
if (onlyFilter) {
s"""
|${if (eagerInputUnboxingCode) ctx.reuseInputUnboxingCode() else ""}
|${filterCondition.code}
|if (${filterCondition.resultTerm}) {
| ${produceOutputCode(inputTerm)}
|}
|""".stripMargin
} else { // both filter and projection
val filterInputCode = ctx.reuseInputUnboxingCode()
val filterInputSet = Set(ctx.reusableInputUnboxingExprs.keySet.toSeq: _*)
// if any filter conditions, projection code will enter an new scope
val projectionCode = produceProjectionCode
val projectionInputCode = ctx.reusableInputUnboxingExprs
.filter(entry => !filterInputSet.contains(entry._1))
.values.map(_.code).mkString("\\n")
s"""
|${if (eagerInputUnboxingCode) filterInputCode else ""}
|${filterCondition.code}
|if (${filterCondition.resultTerm}) {
| ${if (eagerInputUnboxingCode) projectionInputCode else ""}
| $projectionCode
|}
|""".stripMargin
}
}
}
}
| fhueske/flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/codegen/CalcCodeGenerator.scala | Scala | apache-2.0 | 7,685 |
package stormlantern.consul.client.helpers
import stormlantern.consul.client.dao.ServiceInstance
import stormlantern.consul.client.discovery.ServiceDefinition
object ModelHelpers {
def createService(id: String, name: String, port: Int = 666, node: String = "node", tags: Set[String] = Set.empty) = ServiceInstance(
node = node,
address = s"${node}Address",
serviceId = id,
serviceName = name,
serviceTags = tags,
serviceAddress = s"${name}Address",
servicePort = port
)
def createService(service: ServiceDefinition): ServiceInstance = createService(service.key, service.serviceName)
}
| dlouwers/reactive-consul | client/src/test/scala/stormlantern/consul/client/helpers/ModelHelpers.scala | Scala | mit | 621 |
///*
// * Licensed to the Apache Software Foundation (ASF) under one
// * or more contributor license agreements. See the NOTICE file
// * distributed with this work for additional information
// * regarding copyright ownership. The ASF licenses this file
// * to you under the Apache License, Version 2.0 (the
// * "License"); you may not use this file except in compliance
// * with the License. You may obtain a copy of the License at
// *
// * http://www.apache.org/licenses/LICENSE-2.0
// *
// * Unless required by applicable law or agreed to in writing,
// * software distributed under the License is distributed on an
// * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// * KIND, either express or implied. See the License for the
// * specific language governing permissions and limitations
// * under the License.
// */
//
//package org.apache.s2graph.rest.play.benchmark
//
//import play.api.test.{FakeApplication, PlaySpecification, WithApplication}
//
//import scala.annotation.tailrec
//import scala.util.Random
//
//class SamplingBenchmarkSpec extends BenchmarkCommon with PlaySpecification {
// "sample" should {
// implicit val app = FakeApplication()
//
// "sample benchmark" in new WithApplication(app) {
// @tailrec
// def randomInt(n: Int, range: Int, set: Set[Int] = Set.empty[Int]): Set[Int] = {
// if (set.size == n) set
// else randomInt(n, range, set + Random.nextInt(range))
// }
//
// // sample using random array
// def randomArraySample[T](num: Int, ls: List[T]): List[T] = {
// val randomNum = randomInt(num, ls.size)
// var sample = List.empty[T]
// var idx = 0
// ls.foreach { e =>
// if (randomNum.contains(idx)) sample = e :: sample
// idx += 1
// }
// sample
// }
//
// // sample using shuffle
// def shuffleSample[T](num: Int, ls: List[T]): List[T] = {
// Random.shuffle(ls).take(num)
// }
//
// // sample using random number generation
// def rngSample[T](num: Int, ls: List[T]): List[T] = {
// var sampled = List.empty[T]
// val N = ls.size // population
// var t = 0 // total input records dealt with
// var m = 0 // number of items selected so far
//
// while (m < num) {
// val u = Random.nextDouble()
// if ((N - t) * u < num - m) {
// sampled = ls(t) :: sampled
// m += 1
// }
// t += 1
// }
// sampled
// }
//
// // test data
// val testLimit = 10000
// val testNum = 10
// val testData = (0 to 1000).toList
//
// // dummy for warm-up
// (0 to testLimit) foreach { n =>
// randomArraySample(testNum, testData)
// shuffleSample(testNum, testData)
// rngSample(testNum, testData)
// }
//
// duration("Random Array Sampling") {
// (0 to testLimit) foreach { _ =>
// val sampled = randomArraySample(testNum, testData)
// }
// }
//
// duration("Shuffle Sampling") {
// (0 to testLimit) foreach { _ =>
// val sampled = shuffleSample(testNum, testData)
// }
// }
//
// duration("RNG Sampling") {
// (0 to testLimit) foreach { _ =>
// val sampled = rngSample(testNum, testData)
// }
// }
// true
// }
// }
//}
| daewon/incubator-s2graph | s2core/src/test/scala/org/apache/s2graph/core/benchmark/SamplingBenchmarkSpec.scala | Scala | apache-2.0 | 3,380 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.nodes.datastream
import org.apache.calcite.plan.{RelOptCluster, RelTraitSet}
import org.apache.calcite.rel.core.AggregateCall
import org.apache.calcite.rel.{RelNode, RelWriter, SingleRel}
import org.apache.flink.api.java.functions.NullByteKeySelector
import org.apache.flink.streaming.api.datastream.DataStream
import org.apache.flink.table.api.{StreamQueryConfig, StreamTableEnvironment}
import org.apache.flink.table.codegen.AggregationCodeGenerator
import org.apache.flink.table.plan.nodes.CommonAggregate
import org.apache.flink.table.plan.rules.datastream.DataStreamRetractionRules
import org.apache.flink.table.plan.schema.RowSchema
import org.apache.flink.table.runtime.CRowKeySelector
import org.apache.flink.table.runtime.aggregate.AggregateUtil.CalcitePair
import org.apache.flink.table.runtime.aggregate._
import org.apache.flink.table.runtime.types.{CRow, CRowTypeInfo}
import org.apache.flink.table.util.Logging
/**
*
* Flink RelNode for data stream unbounded group aggregate
*
* @param cluster Cluster of the RelNode, represent for an environment of related
* relational expressions during the optimization of a query.
* @param traitSet Trait set of the RelNode
* @param inputNode The input RelNode of aggregation
* @param namedAggregates List of calls to aggregate functions and their output field names
* @param inputSchema The type of the rows consumed by this RelNode
* @param schema The type of the rows emitted by this RelNode
* @param groupings The position (in the input Row) of the grouping keys
*/
class DataStreamGroupAggregate(
cluster: RelOptCluster,
traitSet: RelTraitSet,
inputNode: RelNode,
namedAggregates: Seq[CalcitePair[AggregateCall, String]],
schema: RowSchema,
inputSchema: RowSchema,
groupings: Array[Int])
extends SingleRel(cluster, traitSet, inputNode)
with CommonAggregate
with DataStreamRel
with Logging {
override def deriveRowType() = schema.relDataType
override def needsUpdatesAsRetraction = true
override def producesUpdates = true
override def consumesRetractions = true
def getGroupings: Array[Int] = groupings
override def copy(traitSet: RelTraitSet, inputs: java.util.List[RelNode]): RelNode = {
new DataStreamGroupAggregate(
cluster,
traitSet,
inputs.get(0),
namedAggregates,
schema,
inputSchema,
groupings)
}
override def toString: String = {
s"Aggregate(${
if (!groupings.isEmpty) {
s"groupBy: (${groupingToString(inputSchema.relDataType, groupings)}), "
} else {
""
}
}select:(${aggregationToString(
inputSchema.relDataType, groupings, getRowType, namedAggregates, Nil)}))"
}
override def explainTerms(pw: RelWriter): RelWriter = {
super.explainTerms(pw)
.itemIf("groupBy", groupingToString(
inputSchema.relDataType, groupings), !groupings.isEmpty)
.item("select", aggregationToString(
inputSchema.relDataType, groupings, getRowType, namedAggregates, Nil))
}
override def translateToPlan(
tableEnv: StreamTableEnvironment,
queryConfig: StreamQueryConfig): DataStream[CRow] = {
if (groupings.length > 0 && queryConfig.getMinIdleStateRetentionTime < 0) {
LOG.warn(
"No state retention interval configured for a query which accumulates state. " +
"Please provide a query configuration with valid retention interval to prevent excessive " +
"state size. You may specify a retention time of 0 to not clean up the state.")
}
val inputDS = input.asInstanceOf[DataStreamRel].translateToPlan(tableEnv, queryConfig)
val outRowType = CRowTypeInfo(schema.typeInfo)
val generator = new AggregationCodeGenerator(
tableEnv.getConfig,
false,
inputSchema.typeInfo,
None)
val aggString = aggregationToString(
inputSchema.relDataType,
groupings,
getRowType,
namedAggregates,
Nil)
val keyedAggOpName = s"groupBy: (${groupingToString(inputSchema.relDataType, groupings)}), " +
s"select: ($aggString)"
val nonKeyedAggOpName = s"select: ($aggString)"
val processFunction = AggregateUtil.createGroupAggregateFunction(
generator,
namedAggregates,
inputSchema.relDataType,
inputSchema.fieldTypeInfos,
groupings,
queryConfig,
DataStreamRetractionRules.isAccRetract(this),
DataStreamRetractionRules.isAccRetract(getInput))
val result: DataStream[CRow] =
// grouped / keyed aggregation
if (groupings.nonEmpty) {
inputDS
.keyBy(new CRowKeySelector(groupings, inputSchema.projectedTypeInfo(groupings)))
.process(processFunction)
.returns(outRowType)
.name(keyedAggOpName)
.asInstanceOf[DataStream[CRow]]
}
// global / non-keyed aggregation
else {
inputDS
.keyBy(new NullByteKeySelector[CRow])
.process(processFunction)
.setParallelism(1)
.setMaxParallelism(1)
.returns(outRowType)
.name(nonKeyedAggOpName)
.asInstanceOf[DataStream[CRow]]
}
result
}
}
| zimmermatt/flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/datastream/DataStreamGroupAggregate.scala | Scala | apache-2.0 | 6,058 |
/**
* Copyright 2015 Mohiva Organisation (license at mohiva dot com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mohiva.play.silhouette.api
import com.mohiva.play.silhouette.api.services.{ AuthenticatorService, IdentityService }
import com.mohiva.play.silhouette.api.util.ExecutionContextProvider
import scala.concurrent.ExecutionContext
/**
* The environment type.
*
* Defines the [[Identity]] and [[Authenticator]] types for an environment. It is possible
* to implement as many types as needed. This has the advantage that an application isn't
* bound only to a single `Identity` -> `Authenticator` combination.
*
* To define a new environment type create a new trait with the appropriate [[Identity]] and
* [[Authenticator]] types:
*
* {{{
* trait SessionEnv extends Env {
* type I = User
* type A = SessionAuthenticator
* }
* trait JWTEnv extends Env {
* type I = User
* type A = JWTAuthenticator
* }
* }}}
*/
trait Env {
type I <: Identity
type A <: Authenticator
}
/**
* Provides the components needed to handle a secured request.
*
* It's possible to declare different environments for different environment types. The
* [[com.mohiva.play.silhouette.api.services.IdentityService]] and the
* [[com.mohiva.play.silhouette.api.services.AuthenticatorService]] are bound to the appropriate types
* defined in the environment type. But the [[EventBus]] and the list of [[RequestProvider]]
* instances can be defined as needed for every environment type.
*/
trait Environment[E <: Env] extends ExecutionContextProvider {
/**
* Gets the identity service implementation.
*
* @return The identity service implementation.
*/
def identityService: IdentityService[E#I]
/**
* Gets the authenticator service implementation.
*
* @return The authenticator service implementation.
*/
def authenticatorService: AuthenticatorService[E#A]
/**
* Gets the list of request providers.
*
* @return The list of request providers.
*/
def requestProviders: Seq[RequestProvider]
/**
* The event bus implementation.
*
* @return The event bus implementation.
*/
def eventBus: EventBus
}
/**
* Companion object to easily create environment instances.
*
* {{{
* Environment[SessionEnv](...)
* Environment[JWTEnv](...)
* }}}
*/
object Environment {
def apply[E <: Env](
identityServiceImpl: IdentityService[E#I],
authenticatorServiceImpl: AuthenticatorService[E#A],
requestProvidersImpl: Seq[RequestProvider],
eventBusImpl: EventBus)(implicit ec: ExecutionContext) = new Environment[E] {
val identityService = identityServiceImpl
val authenticatorService = authenticatorServiceImpl
val requestProviders = requestProvidersImpl
val eventBus = eventBusImpl
val executionContext = ec
}
}
| akkie/play-silhouette | silhouette/app/com/mohiva/play/silhouette/api/Environment.scala | Scala | apache-2.0 | 3,368 |
package marge.map
/**
*
* User: mikio
* Date: 4/7/11
* Time: 3:24 PM
*/
trait InputMap[A,B] {
def apply(a: A): B
}
| mikiobraun/marge | src/main/scala/marge/map/InputMap.scala | Scala | mit | 124 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.words
import org.scalatest.matchers._
import scala.collection.GenTraversable
import org.scalatest.FailureMessages
import org.scalatest.UnquotedString
import org.scalautils.Equality
import scala.annotation.tailrec
/**
* This class is part of the ScalaTest matchers DSL. Please see the documentation for <a href="../Matchers.html"><code>Matchers</code></a> for an overview of
* the matchers DSL.
*
* @author Bill Venners
*/
class InOrderOnlyContainMatcher[T](right: GenTraversable[T], equality: Equality[T]) extends ContainMatcher[T] {
@tailrec
private def findNext(value: T, rightItr: Iterator[T], processedList: IndexedSeq[T]): IndexedSeq[T] =
if (rightItr.hasNext) {
val nextRight = rightItr.next
if (processedList.find(equality.areEqual(_, nextRight)).isDefined)
throw new IllegalArgumentException(FailureMessages("inOrderOnlyDuplicate", nextRight))
if (equality.areEqual(value, nextRight))
processedList :+ nextRight
else
findNext(value, rightItr, processedList :+ nextRight)
}
else
processedList
@tailrec
private def checkEqual(leftItr: Iterator[T], rightItr: Iterator[T], currentRight: T, processedList: IndexedSeq[T]): Boolean = {
if (leftItr.hasNext) {
val nextLeft = leftItr.next
if (equality.areEqual(nextLeft, currentRight)) // The nextLeft is contained in right, let's continue next
checkEqual(leftItr, rightItr, currentRight, processedList)
else {
val newProcessedList = findNext(nextLeft, rightItr, processedList)
if (equality.areEqual(nextLeft, newProcessedList.last)) // The nextLeft is contained in right, let's continue next
checkEqual(leftItr, rightItr, nextLeft, newProcessedList) // nextLeft will be the new currentRight
else // The nextLeft is not in right, let's fail early
false
}
}
else // No more element in left, left contains only elements of right.
true
}
/**
* This method contains the matching code for inOrderOnly.
*/
def apply(left: GenTraversable[T]): MatchResult = {
val rightItr = right.toIterator
val rightFirst = rightItr.next
MatchResult(
if (rightItr.hasNext) checkEqual(left.toIterator, rightItr, rightFirst, IndexedSeq(rightFirst)) else left.isEmpty,
FailureMessages("didNotContainInOrderOnlyElements", left, UnquotedString(right.mkString(", "))),
FailureMessages("containedInOrderOnlyElements", left, UnquotedString(right.mkString(", ")))
)
}
}
| svn2github/scalatest | src/main/scala/org/scalatest/words/InOrderOnlyContainMatcher.scala | Scala | apache-2.0 | 3,152 |
package frameless
import org.apache.spark.sql.{Column, DataFrame, Dataset}
trait FramelessSyntax {
implicit class ColumnSyntax(self: Column) {
def typedColumn[T, U: TypedEncoder]: TypedColumn[T, U] = new TypedColumn[T, U](self)
def typedAggregate[T, U: TypedEncoder]: TypedAggregate[T, U] = new TypedAggregate[T, U](self)
}
implicit class DatasetSyntax[T: TypedEncoder](self: Dataset[T]) {
def typed: TypedDataset[T] = TypedDataset.create[T](self)
}
implicit class DataframeSyntax(self: DataFrame){
def unsafeTyped[T: TypedEncoder]: TypedDataset[T] = TypedDataset.createUnsafe(self)
}
}
| adelbertc/frameless | dataset/src/main/scala/frameless/FramelessSyntax.scala | Scala | apache-2.0 | 619 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.http.test
import com.github.tomakehurst.wiremock.WireMockServer
import com.github.tomakehurst.wiremock.client.WireMock
import com.github.tomakehurst.wiremock.core.WireMockConfiguration.wireMockConfig
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, Suite}
import org.slf4j.{Logger, LoggerFactory}
trait WireMockSupport extends BeforeAndAfterAll with BeforeAndAfterEach {
this: Suite =>
private val logger: Logger = LoggerFactory.getLogger(getClass)
lazy val wireMockHost: String =
// this has to match the configuration in `internalServiceHostPatterns`
"localhost"
lazy val wireMockPort: Int =
// we lookup a port ourselves rather than using `wireMockConfig().dynamicPort()` since it's simpler to provide
// it up front (rather than query the running server), and allow overriding.
PortFinder.findFreePort(portRange = 6001 to 7000)
lazy val wireMockRootDirectory: String =
// wiremock doesn't look in the classpath, it uses src/test/resources by default.
// since play projects use the non-standard `test/resources` we should attempt to identify the path
// note, it may require `Test / fork := true` in sbt (or just override explicitly)
System.getProperty("java.class.path").split(":").head
lazy val wireMockServer: WireMockServer =
new WireMockServer(
wireMockConfig()
.port(wireMockPort)
.withRootDirectory(wireMockRootDirectory)
)
lazy val wireMockUrl: String =
s"http://$wireMockHost:$wireMockPort"
/** If true (default) it will clear the wireMock settings before each test */
lazy val resetWireMockMappings: Boolean = true
lazy val resetWireMockRequests: Boolean = true
def startWireMock(): Unit =
if (!wireMockServer.isRunning) {
wireMockServer.start()
// this initialises static access to `WireMock` rather than calling functions on the wireMockServer instance itself
WireMock.configureFor(wireMockHost, wireMockServer.port())
logger.info(s"Started WireMock server on host: $wireMockHost, port: ${wireMockServer.port()}, rootDirectory: $wireMockRootDirectory")
}
def stopWireMock(): Unit =
if (wireMockServer.isRunning) {
wireMockServer.stop()
logger.info(s"Stopped WireMock server on host: $wireMockHost, port: $wireMockPort")
}
override protected def beforeAll(): Unit = {
super.beforeAll()
startWireMock()
}
override protected def beforeEach(): Unit = {
super.beforeEach()
if (resetWireMockMappings)
wireMockServer.resetMappings()
if (resetWireMockRequests)
wireMockServer.resetRequests()
}
override protected def afterAll(): Unit = {
stopWireMock()
super.afterAll()
}
}
| hmrc/http-verbs | http-verbs-test-common/src/main/scala/uk/gov/hmrc/http/test/WireMockSupport.scala | Scala | apache-2.0 | 3,332 |
/***
* Copyright 2014 Rackspace US, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.rackspace.com.papi.components.checker.step
import javax.servlet.FilterChain
import com.rackspace.com.papi.components.checker.servlet._
import com.rackspace.com.papi.components.checker.step.base.{ConnectedStep, Step, StepContext}
import scala.util.matching.Regex
class URI(id : String, label : String, val uri : Regex, val captureHeader : Option[String], next : Array[Step]) extends ConnectedStep(id, label, next) {
def this (id : String, label : String, uri : Regex, next : Array[Step]) =
this(id, label, uri, None, next)
override val mismatchMessage : String = uri.toString;
override def checkStep(req : CheckerServletRequest, resp : CheckerServletResponse, chain : FilterChain, context : StepContext) : Option[StepContext] = {
var ret : Option[StepContext] = None
if (context.uriLevel < req.URISegment.size) {
val v = req.URISegment(context.uriLevel)
v match {
case uri() => captureHeader match {
case None => ret= Some(context.copy(uriLevel = context.uriLevel+1))
case Some(h) => ret = Some(context.copy(uriLevel = context.uriLevel+1,
requestHeaders = context.requestHeaders.addHeader(h, v)))
}
case _ => ret= None
}
}
ret
}
}
| tylerroyal/api-checker | core/src/main/scala/com/rackspace/com/papi/components/checker/step/URI.scala | Scala | apache-2.0 | 1,900 |
package dotty.tools.dotc.util
/** A class inheriting from Attachment.Container supports
* adding, removing and lookup of attachments. Attachments are typed key/value pairs.
*/
object Attachment {
/** The class of keys for attachments yielding values of type V */
class Key[+V]
/** An implementation trait for attachements.
* Clients should inherit from Container instead.
*/
trait LinkSource {
private[Attachment] var next: Link[_]
/** Optionally get attachment corresponding to `key` */
final def getAttachment[V](key: Key[V]): Option[V] = {
val nx = next
if (nx == null) None
else if (nx.key eq key) Some(nx.value.asInstanceOf[V])
else nx.getAttachment[V](key)
}
/** The attachment corresponding to `key`.
* @throws NoSuchElementException if no attachment with key exists
*/
final def attachment[V](key: Key[V]): V = {
val nx = next
if (nx == null) throw new NoSuchElementException
else if (nx.key eq key) nx.value.asInstanceOf[V]
else nx.attachment(key)
}
/** The attachment corresponding to `key`, or `default`
* if no attachment with `key` exists.
*/
final def attachmentOrElse[V](key: Key[V], default: V): V = {
val nx = next
if (nx == null) default
else if (nx.key eq key) nx.value.asInstanceOf[V]
else nx.attachmentOrElse(key, default)
}
/** Add attachment with given `key` and `value`.
* @return Optionally, the old attachment with given `key` if one existed before.
* The new attachment is added at the position of the old one, or at the end
* if no attachment with same `key` existed.
*/
final def putAttachment[V](key: Key[V], value: V): Option[V] = {
val nx = next
if (nx == null) {
next = new Link(key, value, null)
None
}
else if (nx.key eq key) {
next = new Link(key, value, nx.next)
Some(nx.value.asInstanceOf[V])
}
else nx.putAttachment(key, value)
}
/** Remove attachment with given `key`, if it exists.
* @return Optionally, the removed attachment with given `key` if one existed before.
*/
final def removeAttachment[V](key: Key[V]): Option[V] = {
val nx = next
if (nx == null)
None
else if (nx.key eq key) {
next = nx.next
Some(nx.value.asInstanceOf[V])
}
else nx.removeAttachment(key)
}
/** The list of all values attached to this container. */
final def allAttachments: List[Any] = {
val nx = next
if (nx == null) Nil else nx.value :: nx.allAttachments
}
}
/** A private, concrete implementation class linking attachments.
*/
private[Attachment] class Link[+V](val key: Key[V], val value: V, var next: Link[_])
extends LinkSource
/** A trait for objects that can contain attachments */
trait Container extends LinkSource {
private[Attachment] var next: Link[_] = null
final def pushAttachment[V](key: Key[V], value: V): Unit = {
assert(!getAttachment(key).isDefined)
next = new Link(key, value, next)
}
}
} | magarciaEPFL/dotty | src/dotty/tools/dotc/util/Attachment.scala | Scala | bsd-3-clause | 3,146 |
/*
* Copyright (C) 2007-2008 Artima, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Example code from:
*
* Programming in Scala (First Edition, Version 6)
* by Martin Odersky, Lex Spoon, Bill Venners
*
* http://booksites.artima.com/programming_in_scala
*/
/* An example of using a Java annotation. See FindTests.scala */
object Tests {
@Ignore
def testData = List(0, 1, -1, 5, -5)
def test1 {
assert(testData == (testData.head :: testData.tail))
}
def test2 {
assert(testData.contains(testData.head))
}
}
| peachyy/scalastu | java/Tests.scala | Scala | apache-2.0 | 1,083 |
package com.dividezero.stubby.core.service
import com.dividezero.stubby.core.service.model._
import com.dividezero.stubby.core.model._
import scala.collection.mutable.ListBuffer
import com.dividezero.stubby.core.js.ScriptWorld
import com.dividezero.stubby.core.js.Script
import com.typesafe.scalalogging.LazyLogging
import com.dividezero.stubby.core.util.JsonUtils
import com.dividezero.stubby.core.util.TimeLimit
case class NotFoundException(message: String) extends RuntimeException(message)
class StubService extends LazyLogging {
val LOGGER = logger // make logging stand out...
val requests: ListBuffer[StubRequest] = new ListBuffer
val responses: ListBuffer[StubServiceExchange] = new ListBuffer
def addResponse(exchange: StubExchange): Unit = this.synchronized {
LOGGER.trace("Adding stubbed exchange: " + JsonUtils.prettyPrint(exchange))
val internal = new StubServiceExchange(exchange)
responses -= internal // remove existing stubed request (ie, will never match anymore)
internal +=: responses // ensure most recent matched first
}
def findMatch(request: StubRequest): StubServiceResult = this.synchronized {
try {
LOGGER.trace("Got request: " + JsonUtils.prettyPrint(request))
request +=: requests // prepend
val attempts = new ListBuffer[MatchResult]
for (response <- responses) {
val matchResult = response.matches(request)
attempts += matchResult
if (matchResult.matches) {
LOGGER.info("Matched: " + request.path.get)
val exchange = response.exchange
return exchange.script match {
case Some(script) => {
val world = new ScriptWorld(request, exchange.response, exchange.delay) // creates deep copies of objects
new Script(script).execute(world)
val (scriptResponse, scriptDelay) = world.result
new StubServiceResult(
attempts.toList, Some(scriptResponse), scriptDelay)
}
case None => new StubServiceResult(
attempts.toList, Some(exchange.response), exchange.delay)
}
}
}
LOGGER.info("Didn't match: " + request.path.get)
this.notifyAll // inform any waiting threads that a new request has come in
new StubServiceResult(Nil) // no match (empty list)
} catch {
case e: Exception =>
throw new RuntimeException("Error matching request", e)
}
}
@throws[NotFoundException]("if index does not exist")
def getResponse(index: Int): StubServiceExchange = this.synchronized {
try {
return responses(index)
} catch {
case e: IndexOutOfBoundsException =>
throw new NotFoundException("Response does not exist: " + index)
}
}
def deleteResponse(index: Int) = this.synchronized {
LOGGER.trace("Deleting response: " + index)
try {
responses.remove(index)
} catch {
case e: IndexOutOfBoundsException =>
throw new RuntimeException("Response does not exist: " + index)
}
}
def deleteResponse(exchange: StubExchange) = this.synchronized {
val toDelete = responses.filter { it =>
//println("it"+it+"exchange.req"+exchange.request+" match: "+it.matches(exchange.request).matches)
it.matches(exchange.request).matches
}
toDelete.foreach { it: StubServiceExchange =>
val index = responses.indexOf(it)
//println("index:"+index)
responses.remove(index)
//println("responses="+responses)
}
}
def deleteResponses() = this.synchronized {
LOGGER.trace("Deleting all responses")
responses.clear
}
@throws[NotFoundException]("if index does not exist")
def getRequest(index: Int): StubRequest = this.synchronized {
try {
requests(index)
} catch {
case e: IndexOutOfBoundsException =>
throw new NotFoundException("Response does not exist: " + index)
}
}
def findRequests(filter: StubRequest, timeout: Long): Traversable[StubRequest] = this.synchronized { // blocking call
TimeLimit.retry(timeout) { remaining =>
val result = findRequests(filter)
if (result.isEmpty) {
try {
this.wait(remaining) // wait for a request to come in, or time to expire
} catch {
case e: InterruptedException =>
throw new RuntimeException("Interrupted while waiting for request")
}
None // retry
} else {
Some(result) // found
}
}.getOrElse(Nil)
}
def findRequests(filter: StubRequest): Traversable[StubRequest] = this.synchronized {
val pattern = new RequestPattern(filter)
requests.filter(r => pattern.matches(r).matches)
}
@throws[NotFoundException]("if index does not exist")
def deleteRequest(index: Int) = this.synchronized {
LOGGER.trace("Deleting request: " + index)
try {
requests.remove(index)
} catch {
case e: IndexOutOfBoundsException =>
throw new NotFoundException("Request does not exist: " + index)
}
}
def deleteRequests() = this.synchronized {
LOGGER.trace("Deleting all requests")
requests.clear
}
}
| themillhousegroup/http-stub-server-scala | core/src/main/scala/com/dividezero/stubby/core/service/StubService.scala | Scala | apache-2.0 | 5,158 |
package com.mesosphere.cosmos
import com.netaporter.uri.dsl._
import com.twitter.finagle.http.{RequestBuilder, Status}
import com.twitter.util.{Await, Return}
import org.scalatest.FreeSpec
final class ServicesIntegrationSpec extends FreeSpec {
"Services" - {
"adminRouterClient should" - {
"be able to connect to an https site" in {
val url = "https://www.google.com"
val Return(client) = Services.adminRouterClient(url)
val request = RequestBuilder().url(url).buildGet()
val response = Await.result(client(request))
assertResult(response.status)(Status.Ok)
}
}
}
}
| movicha/cosmos | cosmos-server/src/it/scala/com/mesosphere/cosmos/ServicesIntegrationSpec.scala | Scala | apache-2.0 | 634 |
package delta
import scuff._
import scala.reflect.ClassTag
/**
* Generic state projector.
* @tparam S state type
* @tparam EVT event type
*/
abstract class Projector[S >: Null, EVT: ClassTag] extends Serializable {
/** Initial event. */
def init(evt: EVT): S
/** Subsequent event(s). */
def next(state: S, evt: EVT): S
type Transaction = delta.Transaction[_, _ >: EVT]
def apply(tx: Transaction, state: Option[S]): S =
tx.events.iterator
.collectAs[EVT]
.foldLeft(state.orNull) {
case (null, evt) => init(evt)
case (state, evt) => next(state, evt)
}
}
object Projector {
def apply[ID, S >: Null, EVT: ClassTag](
getProjector: Transaction[ID, _ >: EVT] => Projector[S, EVT])(
tx: Transaction[ID, _ >: EVT], state: Option[S]): S =
getProjector(tx).apply(tx, state)
}
| nilskp/delta | src/main/scala/delta/Projector.scala | Scala | mit | 845 |
/*
* Copyright 2015 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.computations
import uk.gov.hmrc.ct.box.{Calculated, CtBoxIdentifier, CtInteger}
import uk.gov.hmrc.ct.computations.calculations.SummaryCalculator
import uk.gov.hmrc.ct.computations.retriever.ComputationsBoxRetriever
case class CP305(value: Int) extends CtBoxIdentifier(name = "Qualifying charitable Donation") with CtInteger
object CP305 extends Calculated[CP305, ComputationsBoxRetriever] with SummaryCalculator {
override def calculate(fieldValueRetriever: ComputationsBoxRetriever): CP305 = {
calculateQualifyingCharitableDonations(fieldValueRetriever.retrieveCP301(), fieldValueRetriever.retrieveCP302())
}
}
| keithhall/ct-calculations | src/main/scala/uk/gov/hmrc/ct/computations/CP305.scala | Scala | apache-2.0 | 1,253 |
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher.internal.compiler.v2_3.pipes.matching
import org.neo4j.cypher.internal.compiler.v2_3._
import org.neo4j.graphdb.{Relationship, Node}
import collection.Set
/**
* This class is responsible for keeping track of the already visited parts of the pattern, and the matched
* entities corresponding to the pattern items.
*
* It's also used to emit the subgraph when the whole pattern has been matched (that's the toMap method)
*/
abstract class History {
def removeSeen(relationships: Set[PatternRelationship]): Set[PatternRelationship] =
relationships.filterNot(r => hasSeen(r))
def removeSeen(relationships: Seq[GraphRelationship]): Seq[GraphRelationship] = relationships.filterNot {
case SingleGraphRelationship(r) => hasSeen(r)
case VariableLengthGraphRelationship(p) => hasSeen(p)
}
def hasSeen(p : Any): Boolean
def add(pair: MatchingPair): History
val toMap: ExecutionContext
def contains(p : MatchingPair) : Boolean
}
class InitialHistory(source : ExecutionContext, alreadySeen: Seq[Relationship]) extends History {
def hasSeen(p: Any) = p match {
case r: Relationship => alreadySeen.contains(r)
case _ => false
}
def contains(p : MatchingPair) = false
def add(pair: MatchingPair) = new AddedHistory(this,pair)
val toMap = source
}
class AddedHistory(val parent : History, val pair : MatchingPair) extends History {
def hasSeen(p: Any) = pair.matches(p) || parent.hasSeen(p)
def contains(p : MatchingPair) = pair == p || parent.contains(p)
def add(pair: MatchingPair) = if (contains(pair)) this else new AddedHistory(this,pair)
lazy val toMap = {
parent.toMap.newWith(toSeq(pair))
}
def toSeq(p: MatchingPair) : Seq[(String,Any)] = {
p match {
case MatchingPair(pe: PatternNode, entity: Node) => Seq(pe.key -> entity)
case MatchingPair(pe: PatternRelationship, entity: SingleGraphRelationship) => Seq(pe.key -> entity.rel)
case MatchingPair(pe: VariableLengthPatternRelationship, null) => Seq(pe.key -> null) ++ pe.relIterable.map(_ -> null)
case MatchingPair(pe: PatternRelationship, null) => Seq(pe.key -> null)
case MatchingPair(pe: VariableLengthPatternRelationship, entity: VariableLengthGraphRelationship) => {
relationshipIterable(pe, entity) match {
case Some(aPair) => Seq(pe.key -> entity.path, aPair)
case None => Seq(pe.key -> entity.path)
}
}
}
}
private def relationshipIterable(pe: VariableLengthPatternRelationship, entity: VariableLengthGraphRelationship):Option[(String, Any)] = pe.relIterable.map(_->entity.relationships)
}
| HuangLS/neo4j | community/cypher/cypher-compiler-2.3/src/main/scala/org/neo4j/cypher/internal/compiler/v2_3/pipes/matching/History.scala | Scala | apache-2.0 | 3,616 |
package io.getquill.context.cassandra.norm
import io.getquill._
import io.getquill.context.cassandra.mirrorContext
class RenamePropertiesSpec extends Spec {
import mirrorContext._
val e = quote {
query[TestEntity].schema(_.entity("test_entity").columns(_.s -> "field_s", _.i -> "field_i"))
}
val f = quote {
qr1.filter(t => t.i == 1)
}
"renames properties according to the entity aliases" - {
"action" - {
"insert" in {
val q = quote {
e.insert(lift(TestEntity("a", 1, 1L, None)))
}
mirrorContext.run(q).string mustEqual
"INSERT INTO test_entity (field_s,field_i,l,o) VALUES (?, ?, ?, ?)"
}
"insert assigned" in {
val q = quote {
e.insert(_.i -> lift(1), _.l -> lift(1L), _.o -> lift(Option(1)), _.s -> lift("test"))
}
mirrorContext.run(q).string mustEqual
"INSERT INTO test_entity (field_i,l,o,field_s) VALUES (?, ?, ?, ?)"
}
"update" in {
val q = quote {
e.filter(_.i == 999).update(lift(TestEntity("a", 1, 1L, None)))
}
mirrorContext.run(q).string mustEqual
"UPDATE test_entity SET field_s = ?, field_i = ?, l = ?, o = ? WHERE field_i = 999"
}
"delete" in {
val q: Quoted[Delete[TestEntity]] = quote {
e.filter(_.i == 999).delete
}
mirrorContext.run(q).string mustEqual
"DELETE FROM test_entity WHERE field_i = 999"
}
}
"map" - {
"body" in {
val q = quote {
e.map(t => (t.i, t.l))
}
mirrorContext.run(q).string mustEqual
"SELECT field_i, l FROM test_entity"
}
"transitive" in {
val q = quote {
e.map(t => t).filter(t => t.i == 1)
}
mirrorContext.run(q).string mustEqual
"SELECT field_s, field_i, l, o FROM test_entity WHERE field_i = 1"
}
}
"filter" - {
"body" in {
val q = quote {
e.filter(t => t.i == 1)
}
mirrorContext.run(q).string mustEqual
"SELECT field_s, field_i, l, o FROM test_entity WHERE field_i = 1"
}
"transitive" in {
val q = quote {
e.filter(t => t.l == 1).map(t => t.s)
}
mirrorContext.run(q).string mustEqual
"SELECT field_s FROM test_entity WHERE l = 1"
}
}
"sortBy" - {
"body" in {
val q = quote {
e.sortBy(t => t.i)
}
mirrorContext.run(q).string mustEqual
"SELECT field_s, field_i, l, o FROM test_entity ORDER BY field_i ASC"
}
"transitive" in {
val q = quote {
e.sortBy(t => t.l).map(t => t.s)
}
mirrorContext.run(q).string mustEqual
"SELECT field_s FROM test_entity ORDER BY l ASC"
}
}
}
}
| jcranky/quill | quill-cassandra/src/test/scala/io/getquill/context/cassandra/norm/RenamePropertiesSpec.scala | Scala | apache-2.0 | 2,838 |
package uk.ac.ncl.openlab.intake24.sql.tools.food.migrations
import org.rogach.scallop.ScallopConf
import uk.ac.ncl.openlab.intake24.sql.tools._
import anorm.SQL
object FoodV18_4_Update_Guide_Images extends App with MigrationRunner with WarningMessage {
trait Options extends ScallopConf with DatabaseConfigurationOptions
val versionFrom = 18l
val options = new ScallopConf(args) with Options
options.verify()
runMigration(18, 19, options) {
implicit conn =>
SQL("UPDATE guide_images SET image_map_id=id").executeUpdate()
SQL("DELETE FROM guide_image_objects WHERE guide_image_id='Gcri' AND object_id > 6").execute()
SQL("DELETE FROM guide_image_objects WHERE guide_image_id='Gyog' AND object_id > 12").execute()
SQL("UPDATE guide_image_objects SET image_map_id=guide_image_id,image_map_object_id=object_id").executeUpdate()
}
} | digitalinteraction/intake24 | DatabaseTools/src/main/scala/uk/ac/ncl/openlab/intake24/sql/tools/food/migrations/FoodV18_4_Update_Guide_Images.scala | Scala | apache-2.0 | 878 |
package com.wavesplatform.lang.v1.repl.node.http.response.model
private[node] case class BlockInfoResponse(
timestamp: Long,
height: Int,
`nxt-consensus`: NxtData,
generator: ByteString,
generatorPublicKey: ByteString,
VRF: Option[ByteString]
)
private[node] case class NxtData(
`base-target`: Long,
`generation-signature`: ByteString
)
| wavesplatform/Waves | repl/shared/src/main/scala/com/wavesplatform/lang/v1/repl/node/http/response/model/BlockInfoResponse.scala | Scala | mit | 355 |
/*
* Copyright 2012-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package laika.io
import cats.data.{Chain, NonEmptyChain}
import cats.effect.IO
import laika.api.MarkupParser
import laika.ast.DocumentType._
import laika.ast.Path.Root
import laika.ast._
import laika.ast.sample.{ParagraphCompanionShortcuts, SampleTrees, TestSourceBuilders}
import laika.bundle._
import laika.config.ConfigException
import laika.format.{Markdown, ReStructuredText}
import laika.io.helper.InputBuilder
import laika.io.implicits._
import laika.io.model.{InputTree, InputTreeBuilder, ParsedTree}
import laika.io.runtime.ParserRuntime.{DuplicatePath, ParserErrors}
import laika.parse.Parser
import laika.parse.markup.DocumentParser.{InvalidDocument, InvalidDocuments}
import laika.parse.text.TextParsers
import laika.rewrite.nav.TargetFormats
import laika.rewrite.{DefaultTemplatePath, TemplateContext, TemplateRewriter}
import laika.theme.Theme
import munit.CatsEffectSuite
class TreeParserSpec
extends CatsEffectSuite
with ParagraphCompanionShortcuts
with IOTreeAssertions
with TestSourceBuilders
with InputBuilder
with ParserSetup {
object Contents {
val link = "[link](/foo)"
val name = "foo"
val name2 = "bar"
val multiline: String =
"""aaa
|
|bbb""".stripMargin
val directive = "aa @:foo(bar) bb"
val template: String =
"""<div>
| ${cursor.currentDocument.content}
|</div>""".stripMargin
val template2: String =
"""<div>
|xx${cursor.currentDocument.content}
|</div>""".stripMargin
val brokenTemplate: String =
"""<div>
|${cursor.currentDocument.content} @:foo
|</div>""".stripMargin
val dynDoc = "${value}"
val conf = "value: abc"
val titleDocNameConf = "laika.titleDocuments.inputName = alternative-title"
val order: String =
"""laika.navigationOrder: [
| lemon.md
| shapes
| cherry.md
| colors
| apple.md
| orange.md
|]""".stripMargin
}
val defaultContent = Seq(p("foo"))
def docResult (num: Int, content: Seq[Block] = defaultContent, path: Path = Root): Document =
Document(path / s"doc-$num.md", RootElement(content))
def docResult (name: String): Document = Document(Root / name, RootElement(defaultContent))
def customDocResult(name: String, content: Seq[Block], path: Path = Root): Document =
Document(path / name, RootElement(content))
def applyTemplates (parsed: ParsedTree[IO]): DocumentTreeRoot =
TemplateRewriter.applyTemplates(parsed.root, TemplateContext("html")).toOption.get
def parsedTree (inputs: Seq[(Path, String)], f: InputTreeBuilder[IO] => InputTreeBuilder[IO] = identity): IO[DocumentTreeRoot] = defaultParser
.use(_.fromInput(f(build(inputs))).parse)
.map(applyTemplates)
def mixedParsedTree (inputs: Seq[(Path, String)]): IO[DocumentTreeRoot] = {
val parser = MarkupParser
.of(Markdown)
.parallel[IO]
.withTheme(Theme.empty)
.withAlternativeParser(MarkupParser.of(ReStructuredText))
.build
parser.use(_.fromInput(build(inputs)).parse).map(_.root)
}
def parsedWith (inputs: Seq[(Path, String)], bundle: ExtensionBundle): IO[DocumentTreeRoot] =
parserWithBundle(bundle)
.use(_.fromInput(build(inputs)).parse)
.map(applyTemplates)
def parsedTemplates (inputs: Seq[(Path, String)], bundle: ExtensionBundle): IO[Seq[TemplateRoot]] = {
parserWithBundle(bundle)
.use(_.fromInput(build(inputs)).parse)
.flatMap { parsed =>
IO.fromEither(DocumentCursor(Document(Root, RootElement.empty)).map(cursor =>
parsed.root.tree.templates.map { tpl =>
tpl.content.rewriteChildren(TemplateRewriter.rewriteRules(cursor))
}
).left.map(ConfigException.apply))
}
}
test("an empty tree") {
parsedTree(Nil).assertEquals(DocumentTreeRoot(DocumentTree(Root, Nil)))
}
test("tree with a single document") {
val inputs = Seq(
Root / "name.md" -> Contents.name
)
val docResult = Document(Root / "name.md", RootElement(p("foo")))
val treeResult = DocumentTreeRoot(DocumentTree(Root, List(docResult)))
parsedTree(inputs).assertEquals(treeResult)
}
test("tree with multiple subtrees") {
val inputs = Seq(
Root / "doc-1.md" -> Contents.name,
Root / "doc-2.md" -> Contents.name,
Root / "tree-1" / "doc-3.md" -> Contents.name,
Root / "tree-1" / "doc-4.md" -> Contents.name,
Root / "tree-2" / "doc-5.md" -> Contents.name,
Root / "tree-2" / "doc-6.md" -> Contents.name
)
val expected = SampleTrees.sixDocuments
.docContent(defaultContent)
.suffix("md")
.build
parsedTree(inputs).assertEquals(expected)
}
test("collect errors from multiple documents") {
val inputs = Seq(
Root / "doc-1.md" -> "[link1]",
Root / "doc-2.md" -> "[link2]",
Root / "tree-1" / "doc-3.md" -> "[link3]",
Root / "tree-1" / "doc-4.md" -> "[link4]",
Root / "tree-2" / "doc-5.md" -> "[link5]",
Root / "tree-2" / "doc-6.md" -> "[link6]"
)
val invalidDocuments = inputs.map { case (path, markup) =>
val msg = s"unresolved link id reference: link${markup.charAt(5)}"
val invalidSpan = InvalidSpan(msg, source(markup, markup, path))
InvalidDocument(path, invalidSpan)
}
val expectedError = InvalidDocuments(NonEmptyChain.fromChainUnsafe(Chain.fromSeq(invalidDocuments)))
val expectedMessage =
"""/doc-1.md
|
| [1]: unresolved link id reference: link1
|
| [link1]
| ^
|
|/doc-2.md
|
| [1]: unresolved link id reference: link2
|
| [link2]
| ^
|
|/tree-1/doc-3.md
|
| [1]: unresolved link id reference: link3
|
| [link3]
| ^
|
|/tree-1/doc-4.md
|
| [1]: unresolved link id reference: link4
|
| [link4]
| ^
|
|/tree-2/doc-5.md
|
| [1]: unresolved link id reference: link5
|
| [link5]
| ^
|
|/tree-2/doc-6.md
|
| [1]: unresolved link id reference: link6
|
| [link6]
| ^""".stripMargin
assertEquals(InvalidDocuments.format(expectedError.documents), expectedMessage)
parsedTree(inputs).attempt.assertEquals(Left(expectedError))
}
test("report errors originating in templates with additional path info") {
val markup = "text"
val docPath = Root / "doc-1.md"
val inputs = Seq(
docPath -> markup,
DefaultTemplatePath.forHTML -> Contents.brokenTemplate
)
val msg = "One or more errors processing directive 'foo': No template directive registered with name: foo"
val invalidDocument = {
val invalidSpan2 = InvalidSpan(msg, source("@:foo", Contents.brokenTemplate, DefaultTemplatePath.forHTML))
InvalidDocument(docPath, invalidSpan2)
}
val expectedError = InvalidDocuments(NonEmptyChain(invalidDocument))
val expectedMessage =
s"""/doc-1.md
|
| [/default.template.html:2]: $msg
|
| $${cursor.currentDocument.content} @:foo
| ^""".stripMargin
assertEquals(InvalidDocuments.format(expectedError.documents), expectedMessage)
parsedTree(inputs).map(root =>
InvalidDocuments.from(root, MessageFilter.Error).map(_.documents.head)
).assertEquals(Some(invalidDocument))
}
test("tree with a cover and a title document") {
val inputs = Seq(
Root / "doc-1.md" -> Contents.name,
Root / "doc-2.md" -> Contents.name,
Root / "README.md" -> Contents.name,
Root / "cover.md" -> Contents.name
)
val treeResult = DocumentTreeRoot(
DocumentTree(Root, List(docResult(1), docResult(2)), titleDocument = Some(docResult("README.md"))),
coverDocument = Some(docResult("cover.md")),
)
parsedTree(inputs).assertEquals(treeResult)
}
test("tree with a title document with a custom document name configuration") {
val inputs = Seq(
Root / "directory.conf" -> Contents.titleDocNameConf,
Root / "doc-1.md" -> Contents.name,
Root / "doc-2.md" -> Contents.name,
Root / "alternative-title.md" -> Contents.name,
Root / "cover.md" -> Contents.name
)
val treeResult = DocumentTreeRoot(
DocumentTree(Root, List(docResult(1), docResult(2)), titleDocument = Some(docResult("alternative-title.md"))),
coverDocument = Some(docResult("cover.md")),
)
parsedTree(inputs).assertEquals(treeResult)
}
test("tree with a single template".ignore) {
val inputs = Seq(
Root / "main.template.html" -> Contents.name
)
val template = TemplateDocument(Root / "main.template.html", TemplateRoot("foo"))
val treeResult = DocumentTreeRoot(DocumentTree(Root, Nil, templates = List(template)))
parsedTree(inputs).assertEquals(treeResult)
}
test("fail with duplicate paths") {
val inputs = Seq(
Root / "doc1.md" -> Contents.name,
Root / "doc2.md" -> Contents.name,
Root / "doc2.md" -> Contents.name,
Root / "sub" / "doc.md" -> Contents.name,
Root / "sub" / "doc.md" -> Contents.name
)
defaultParser.use(_.fromInput(build(inputs)).parse).attempt.assertEquals(Left(
ParserErrors(Set(DuplicatePath(Root / "doc2.md"), DuplicatePath(Root / "sub" / "doc.md")))
))
}
test("tree with a static document") {
val inputs = Seq(
Root / "omg.js" -> Contents.name
)
val staticDoc = StaticDocument(Root / "omg.js", TargetFormats.Selected("html"))
val treeResult = DocumentTreeRoot(DocumentTree(Root, Nil), staticDocuments = List(staticDoc))
parsedTree(inputs).assertEquals(treeResult)
}
test("tree with a provided path") {
val providedPath = Root / "provided" / "ext.html"
val inputs = Seq(
Root / "doc-1.md" -> "[link](provided/ext.html)"
)
val target = InternalTarget(providedPath).relativeTo(Root / "doc-1.md")
val expectedDocs = Seq(docResult(1, Seq(Paragraph(SpanLink.internal(providedPath)("link").withTarget(target)))))
val expectedResult = DocumentTreeRoot(DocumentTree(Root, expectedDocs), staticDocuments = List(StaticDocument(providedPath)))
parsedTree(inputs, _.addProvidedPath(providedPath)).assertEquals(expectedResult)
}
test("tree with all available file types and multiple markup formats") {
val inputs = Seq(
Root / "doc-1.md" -> Contents.link,
Root / "doc-2.rst" -> Contents.link,
Root / "mainA.template.html" -> Contents.name,
Root / "tree-1" / "mainB.template.html" -> Contents.name,
Root / "tree-1" / "doc-3.md" -> Contents.name,
Root / "tree-1" / "doc-4.md" -> Contents.name,
Root / "tree-2" / "doc-5.md" -> Contents.name,
Root / "tree-2" / "doc-6.md" -> Contents.name,
Root / "static-1" / "omg.js" -> Contents.name,
)
val linkResult = Seq(p(SpanLink.external("/foo")("link")))
val rstResult = Seq(p("[link](/foo)"))
val expected = SampleTrees.sixDocuments
.staticDoc(Root / "static-1" / "omg.js", "html")
.docContent(defaultContent)
.suffix("md")
.doc1.content(linkResult)
.doc2.content(rstResult)
.doc2.suffix("rst")
.root.template("mainA.template.html", TemplateString("foo"))
.tree1.template("mainB.template.html", TemplateString("foo"))
.build
mixedParsedTree(inputs).assertEquals(expected)
}
test("custom template engine".ignore) {
val parser: Parser[TemplateRoot] = TextParsers.anyChars.map { str => TemplateRoot("$$" + str) }
val inputs = Seq(
Root / "main1.template.html" -> Contents.name,
Root / "main2.template.html" -> Contents.name
)
def template (num: Int) = TemplateDocument(Root / s"main$num.template.html", TemplateRoot("$$foo"))
val treeResult = DocumentTreeRoot(DocumentTree(Root, Nil, templates = List(template(1), template(2))))
parsedWith(inputs, BundleProvider.forTemplateParser(parser)).assertEquals(treeResult)
}
test("custom style sheet engine") {
val customDocTypeMatcher: PartialFunction[Path, DocumentType] = {
case path =>
val Stylesheet = """.+\\.([a,b]+).css$""".r
path.name match {
case Stylesheet(kind) => StyleSheet(kind)
}
}
def styleDecl (styleName: String, order: Int = 0) =
StyleDeclaration(StylePredicate.ElementType("Type"), styleName -> "foo").increaseOrderBy(order)
val parser: Parser[Set[StyleDeclaration]] = TextParsers.anyChars.map { n => Set(styleDecl(n)) }
val inputs = Seq(
Root / "main1.aaa.css" -> Contents.name,
Root / "main2.bbb.css" -> Contents.name2,
Root / "main3.aaa.css" -> Contents.name
)
val treeResult = DocumentTreeRoot(DocumentTree(Root, Nil), styles = Map(
"aaa" -> StyleDeclarationSet(Set(Root / "main1.aaa.css", Root / "main3.aaa.css"), Set(styleDecl("foo"), styleDecl("foo", 1))),
"bbb" -> StyleDeclarationSet(Set(Root / "main2.bbb.css"), Set(styleDecl("bar")))
))
parsedWith(inputs, BundleProvider.forDocTypeMatcher(customDocTypeMatcher)
.withBase(BundleProvider.forStyleSheetParser(parser))).assertEquals(treeResult)
}
test("template directive") {
import laika.directive.Templates
import Templates.dsl._
val directive = Templates.create("foo") {
attribute(0).as[String] map {
TemplateString(_)
}
}
val inputs = Seq(
Root / "main1.template.html" -> Contents.directive,
Root / "main2.template.html" -> Contents.directive
)
val template = TemplateRoot(TemplateString("aa "), TemplateString("bar"), TemplateString(" bb"))
val result = Seq(template, template)
parsedTemplates(inputs, BundleProvider.forTemplateDirective(directive)).assertEquals(result)
}
test("add indentation information if an embedded root is preceded by whitespace characters") {
import laika.ast.EmbeddedRoot
val inputs = Seq(
DefaultTemplatePath.forHTML -> Contents.template,
Root / "doc.md" -> Contents.multiline
)
val docResult = Document(Root / "doc.md", RootElement(TemplateRoot(
TemplateString("<div>\\n "),
EmbeddedRoot(List(p("aaa"), p("bbb")), 2),
TemplateString("\\n</div>")
)))
val treeResult = DocumentTreeRoot(DocumentTree(Root, List(docResult)))
parsedTree(inputs).assertEquals(treeResult)
}
test("do not add indentation information if an embedded root is preceded by non-whitespace characters") {
import laika.ast.EmbeddedRoot
val inputs = Seq(
DefaultTemplatePath.forHTML -> Contents.template2,
Root / "doc.md" -> Contents.multiline
)
val docResult = Document(Root / "doc.md", RootElement(TemplateRoot(
TemplateString("<div>\\nxx"),
EmbeddedRoot(p("aaa"), p("bbb")),
TemplateString("\\n</div>")
)))
val treeResult = DocumentTreeRoot(DocumentTree(Root, List(docResult)))
parsedTree(inputs).assertEquals(treeResult)
}
test("custom navigation order") {
val inputs = Seq(
Root / "apple.md" -> Contents.name,
Root / "orange.md" -> Contents.name,
Root / "colors" / "green.md" -> Contents.name,
Root / "lemon.md" -> Contents.name,
Root / "shapes" / "circle.md" -> Contents.name,
Root / "cherry.md" -> Contents.name,
Root / "directory.conf" -> Contents.order,
)
defaultParser.use(_.fromInput(build(inputs)).parse).map {
_.root.tree.content map (_.path.name)
}.assertEquals(List("lemon.md", "shapes", "cherry.md", "colors", "apple.md", "orange.md"))
}
test("always move title documents to the front, even with a custom navigation order") {
val inputs = Seq(
Root / "apple.md" -> Contents.name,
Root / "orange.md" -> Contents.name,
Root / "colors" / "green.md" -> Contents.name,
Root / "lemon.md" -> Contents.name,
Root / "README.md" -> Contents.name,
Root / "shapes" / "circle.md" -> Contents.name,
Root / "cherry.md" -> Contents.name,
Root / "directory.conf" -> Contents.order,
)
defaultParser.use(_.fromInput(build(inputs)).parse).map(_.root.tree).map { tree =>
assertEquals(tree.titleDocument.map(_.path.basename), Some("README"))
assertEquals(tree.content map (_.path.name), List("lemon.md", "shapes", "cherry.md", "colors", "apple.md", "orange.md"))
assertEquals(tree.content map (_.position), List(
TreePosition(Seq(1)),
TreePosition(Seq(2)),
TreePosition(Seq(3)),
TreePosition(Seq(4)),
TreePosition(Seq(5)),
TreePosition(Seq(6)),
))
}
}
object CustomSpanParsers {
import TextParsers._
import laika.parse.implicits._
case class DecoratedSpan (deco: Char, text: String) extends Span {
val options: Options = NoOpt
type Self = DecoratedSpan
def withOptions (options: Options): DecoratedSpan = this
}
def spanFor (deco: Char): SpanParserBuilder = spanFor(deco, deco)
def spanFor (deco: Char, overrideDeco: Char): SpanParserBuilder =
SpanParser.standalone {
(deco.toString ~> anyNot(' ')).map(DecoratedSpan(overrideDeco, _))
}
val input: InputTreeBuilder[IO] = InputTree[IO].addString("aaa +bbb ccc", Root / "doc.md")
def parse (themeParsers: Seq[SpanParserBuilder] = Nil, appParsers: Seq[SpanParserBuilder] = Nil): IO[RootElement] =
parserWithThemeAndBundle(
BundleProvider.forMarkupParser(spanParsers = themeParsers, origin = BundleOrigin.Theme),
BundleProvider.forMarkupParser(spanParsers = appParsers)
)
.use(_.fromInput(input).parse)
.map(_.root.allDocuments.head.content)
}
test("use a span parser from a theme") {
import CustomSpanParsers._
val themeParsers = Seq(spanFor('+'))
parse(themeParsers).assertEquals(RootElement(Paragraph(
Text("aaa "),
DecoratedSpan('+', "bbb"),
Text(" ccc")
)))
}
test("let a span parser from an app extension override a span parser from a theme") {
import CustomSpanParsers._
val themeParsers = Seq(spanFor('+'))
val appParsers = Seq(spanFor('+', '!'))
parse(themeParsers, appParsers).assertEquals(RootElement(Paragraph(
Text("aaa "),
DecoratedSpan('!', "bbb"),
Text(" ccc")
)))
}
// TODO - reactivate these tests for the removed sequential parser
//
// "parse Markdown from an empty file" in {
// val filename = getClass.getResource("/emptyInput.md").getFile
// parser.fromFile(filename).parse.map(_.content).assertEquals(root())
// }
//
// "parse Markdown from a java.io.InputStream instance, specifying the encoding explicitly" in {
// val input = """äää
// |ööö
// |üüü""".stripMargin
// val stream = IO(new ByteArrayInputStream(input.getBytes("ISO-8859-1")))
// parser.fromStream(stream)(Codec.ISO8859).parse.map(_.content).assertEquals(root(p(input)))
// }
//
// "parse Markdown from a java.io.InputStream instance, specifying the encoding implicitly" in {
// val input = """äää
// |ööö
// |üüü""".stripMargin
// val stream = IO(new ByteArrayInputStream(input.getBytes("ISO-8859-1")))
// implicit val codec:Codec = Codec.ISO8859
// parser.fromStream(stream).parse.map(_.content).assertEquals(root(p(input)))
// }
}
| planet42/Laika | io/src/test/scala/laika/io/TreeParserSpec.scala | Scala | apache-2.0 | 20,203 |
package org.scalawiki.dto.history
import java.time.ZonedDateTime
import org.scalawiki.dto.Revision
import org.scalawiki.dto.filter.RevisionFilter
class History(val revisions: Seq[Revision]) {
def hasPageCreation = revisions.headOption.exists(_.isNewPage)
def users(revisionFilter: RevisionFilter): Set[String] = {
val filtered = revisionFilter.apply(revisions)
filtered.flatMap(_.user.flatMap(_.name)).toSet
}
def delta(revisionFilter: RevisionFilter): Option[Long] = {
val filtered = revisionFilter.apply(revisions)
val sum = for (
oldest <- filtered.lastOption;
newest <- filtered.headOption;
d1 <- delta(oldest);
d2 <- delta(oldest, newest))
yield d1 + d2
sum
}
def delta(revision: Revision): Option[Long] =
revision.parentId.flatMap { parentId =>
if (parentId == 0)
revision.size
else
revisions.find(_.revId.contains(parentId)).flatMap {
parent => delta(parent, revision)
}
}
def delta(from: Revision, to: Revision): Option[Long] =
for (fromSize <- from.size; toSize <- to.size) yield toSize - fromSize
def created: Option[ZonedDateTime] = revisions.lastOption.filter(_.parentId.forall(_ == 0)).flatMap(_.timestamp)
def updated: Option[ZonedDateTime] = revisions.headOption.flatMap(_.timestamp)
def createdAfter(from: Option[ZonedDateTime]) = created.exists(rev => from.forall(rev.isAfter))
def editedIn(revisionFilter: RevisionFilter) =
revisionFilter.apply(revisions).nonEmpty
}
| intracer/scalawiki | scalawiki-core/src/main/scala/org/scalawiki/dto/history/History.scala | Scala | apache-2.0 | 1,532 |
package im.actor.push.controllers
import akka.util.Timeout
import com.spingo.op_rabbit.Message
import com.spingo.op_rabbit.Message.ConfirmResponse
import im.actor.push.broker.Rabbit
import play.api.libs.json.{JsValue, Json}
import play.api.mvc.Results._
import play.api.mvc._
import play.mvc.Controller
import scala.concurrent.Await
import scala.concurrent.duration._
import akka.pattern.ask
class PushController extends Controller {
def send(channel: String) = Action { request =>
implicit val timeout = Timeout(5 seconds)
val received = (
Rabbit.rabbitControl ? Message.queue("ping",
queue = channel)
).mapTo[ConfirmResponse]
def res = Await.result(received, timeout.duration)
Ok(Json.obj(
"result" -> "ok",
"channel" -> channel
))
}
} | actorapp/actor-push | actor-push/app/im/actor/push/controllers/PushController.scala | Scala | apache-2.0 | 799 |
package com.twitter.finagle.http2.transport
import com.twitter.finagle.Stack
import com.twitter.finagle.netty4.http.exp._
import io.netty.channel.{Channel, ChannelInitializer, ChannelHandlerContext}
import io.netty.handler.codec.http2.{Http2Codec, Http2ServerDowngrader}
import io.netty.handler.ssl.{ApplicationProtocolNames, ApplicationProtocolNegotiationHandler}
private[http2] class NpnOrAlpnHandler(init: ChannelInitializer[Channel], params: Stack.Params)
extends ApplicationProtocolNegotiationHandler(ApplicationProtocolNames.HTTP_1_1) {
@throws(classOf[Exception])
protected def configurePipeline(ctx: ChannelHandlerContext, protocol: String) {
protocol match {
case ApplicationProtocolNames.HTTP_2 =>
// Http2 has been negotiated, replace the HttpCodec with an Http2Codec
val initializer = new ChannelInitializer[Channel] {
def initChannel(ch: Channel): Unit = {
ch.pipeline.addLast(new Http2ServerDowngrader(false /*validateHeaders*/))
initServer(params)(ch.pipeline)
ch.pipeline.addLast(init)
}
}
ctx.channel.config.setAutoRead(true)
ctx.pipeline().replace("httpCodec", "http2Codec", new Http2Codec(true /* server */ , initializer))
case ApplicationProtocolNames.HTTP_1_1 =>
// The Http codec is already in the pipeline, so we are good!
case _ =>
throw new IllegalStateException("unknown protocol: " + protocol)
}
}
}
| BuoyantIO/finagle | finagle-http2/src/main/scala/com/twitter/finagle/http2/transport/NpnOrAlpnHandler.scala | Scala | apache-2.0 | 1,476 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.metrics.reporters
import java.util.Map.Entry
import java.util.concurrent.TimeUnit
import com.codahale.metrics._
import org.apache.accumulo.core.client.mock.MockInstance
import org.apache.accumulo.core.client.security.tokens.PasswordToken
import org.apache.accumulo.core.data.{Key, Value}
import org.apache.accumulo.core.security.Authorizations
import org.apache.hadoop.io.Text
import org.junit.runner.RunWith
import org.locationtech.geomesa.metrics.reporters.AccumuloReporter.Keys
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import scala.collection.JavaConversions._
@RunWith(classOf[JUnitRunner])
class AccumuloReporterTest extends Specification {
sequential
val registry = new MetricRegistry()
val connector = new MockInstance("AccumuloReporterTest").getConnector("root", new PasswordToken(""))
val reporter = AccumuloReporter.forRegistry(registry)
.mock(true)
.writeToTable("metrics")
.convertDurationsTo(TimeUnit.MILLISECONDS)
.convertRatesTo(TimeUnit.SECONDS)
.build("AccumuloReporterTest", "", "root", "")
def scan(prefix: String): List[Entry[Key, Value]] = {
val scanner = connector.createScanner("metrics", new Authorizations)
scanner.setRange(org.apache.accumulo.core.data.Range.prefix(prefix))
try {
scanner.toList
} finally {
scanner.close()
}
}
def entry(entries: List[Entry[Key, Value]], cf: Text): Option[Double] =
entries.find(_.getKey.getColumnFamily == cf).map(_.getValue.toString.toDouble)
"AccumuloReporter" should {
"report gauges" >> {
val name = "mygauge"
registry.register(name, new Gauge[String] {
override def getValue: String = "value1"
})
reporter.report()
reporter.flush()
registry.remove(name)
val entries = scan(name)
entries must haveLength(1)
entries.head.getKey.getRow.toString must startWith(name)
entries.head.getKey.getColumnFamily mustEqual Keys.value
entries.head.getValue.toString mustEqual "value1"
}
"report counters" >> {
val name = "mycounter"
val metric = registry.counter(name)
metric.inc(10)
reporter.report()
reporter.flush()
registry.remove(name)
val entries = scan(name)
entries must haveLength(1)
entries.head.getKey.getColumnFamily mustEqual Keys.count
entries.head.getValue.toString mustEqual "10"
}
"report histograms" >> {
val name = "myhistogram"
val metric = registry.histogram(name)
(0 until 10).foreach(metric.update)
reporter.report()
reporter.flush()
registry.remove(name)
val entries = scan(name)
entries must haveLength(11)
forall(entries)(_.getKey.getRow.toString mustEqual name)
entry(entries, Keys.count) must beSome(10.0)
entry(entries, Keys.max) must beSome(9.0)
entry(entries, Keys.mean) must beSome(4.5)
entry(entries, Keys.min) must beSome(0.0)
entry(entries, Keys.stddev) must beSome(2.872281)
entry(entries, Keys.p50) must beSome(5.0)
entry(entries, Keys.p75) must beSome(7.0)
entry(entries, Keys.p95) must beSome(9.0)
entry(entries, Keys.p98) must beSome(9.0)
entry(entries, Keys.p99) must beSome(9.0)
entry(entries, Keys.p999) must beSome(9.0)
}
"report meters" >> {
val name = "mymeter"
var tick: Long = 0
val clock = new Clock { override def getTick: Long = tick * 1000 } // tick is in nanos - we use millis
val metric = registry.register(name, new Meter(clock))
(0 until 10).foreach { i => tick += i; metric.mark() }
reporter.report()
reporter.flush()
registry.remove(name)
val entries = scan(name)
entries must haveLength(6)
forall(entries)(_.getKey.getRow.toString mustEqual name)
entry(entries, Keys.count) must beSome(10.0)
entry(entries, Keys.mean_rate) must beSome(222222.222222)
entry(entries, Keys.m1_rate) must beSome(0.0)
entry(entries, Keys.m5_rate) must beSome(0.0)
entry(entries, Keys.m15_rate) must beSome(0.0)
entries.find(_.getKey.getColumnFamily == Keys.rate_unit).map(_.getValue.toString) must beSome("events/second")
}
"report timers" >> {
val name = "mytimer"
var tick: Long = 0
val clock = new Clock { override def getTick: Long = tick * 1000 } // tick is in nanos - we use millis
val metric = registry.register(name, new Timer(new ExponentiallyDecayingReservoir(), clock))
(0 until 10).foreach { i =>
tick += i
val c = metric.time()
tick += i
c.stop()
}
reporter.report()
reporter.flush()
registry.remove(name)
val entries = scan(name)
entries must haveLength(17)
forall(entries)(_.getKey.getRow.toString mustEqual name)
entry(entries, Keys.count) must beSome(10.0)
entry(entries, Keys.max) must beSome(0.009)
entry(entries, Keys.mean) must beSome(0.0045)
entry(entries, Keys.min) must beSome(0.0)
entry(entries, Keys.stddev) must beSome(0.002872)
entry(entries, Keys.p50) must beSome(0.005)
entry(entries, Keys.p75) must beSome(0.007)
entry(entries, Keys.p95) must beSome(0.009)
entry(entries, Keys.p98) must beSome(0.009)
entry(entries, Keys.p99) must beSome(0.009)
entry(entries, Keys.p999) must beSome(0.009)
entry(entries, Keys.mean_rate) must beSome(111111.111111)
entry(entries, Keys.m1_rate) must beSome(0.0)
entry(entries, Keys.m5_rate) must beSome(0.0)
entry(entries, Keys.m15_rate) must beSome(0.0)
entries.find(_.getKey.getColumnFamily == Keys.rate_unit).map(_.getValue.toString) must beSome("calls/second")
entries.find(_.getKey.getColumnFamily == Keys.duration_unit).map(_.getValue.toString) must beSome("milliseconds")
}
}
step {
reporter.stop()
}
}
| elahrvivaz/geomesa | geomesa-metrics/src/test/scala/org/locationtech/geomesa/metrics/reporters/AccumuloReporterTest.scala | Scala | apache-2.0 | 6,428 |
package mesosphere.mesos
import mesosphere.marathon.MarathonSpec
import mesosphere.marathon.Protos.Constraint
import mesosphere.marathon.Protos.Constraint.Operator
import mesosphere.marathon.state.AppDefinition
import mesosphere.marathon.state.PathId._
import org.scalatest.Matchers
import scala.collection.immutable.Seq
class ResourceMatcherTest extends MarathonSpec with Matchers {
test("match with app.disk == 0, even if no disk resource is contained in the offer") {
import scala.collection.JavaConverters._
val offerBuilder = makeBasicOffer()
val diskResourceIndex = offerBuilder.getResourcesList.asScala.indexWhere(_.getName == "disk")
offerBuilder.removeResources(diskResourceIndex)
val offer = offerBuilder.build()
offer.getResourcesList.asScala.find(_.getName == "disk") should be('empty)
val app = AppDefinition(
id = "/test".toRootPath,
cpus = 1.0,
mem = 128.0,
disk = 0.0,
ports = Seq(0, 0)
)
val resOpt = ResourceMatcher.matchResources(offer, app, Set())
resOpt should not be empty
val res = resOpt.get
res.cpuRole should be("*")
res.memRole should be("*")
res.diskRole should be("")
// check if we got 2 ports
val range = res.ports.head.ranges.head
(range.end - range.begin) should be (1)
}
test("match resources success") {
val offer = makeBasicOffer().build()
val app = AppDefinition(
id = "/test".toRootPath,
cpus = 1.0,
mem = 128.0,
disk = 0.0,
ports = Seq(0, 0)
)
val resOpt = ResourceMatcher.matchResources(offer, app, Set())
resOpt should not be empty
val res = resOpt.get
res.cpuRole should be("*")
res.memRole should be("*")
res.diskRole should be("")
// check if we got 2 ports
val range = res.ports.head.ranges.head
(range.end - range.begin) should be (1)
}
test("match resources success with preserved roles") {
val offer = makeBasicOffer(role = "marathon").build()
val app = AppDefinition(
id = "/test".toRootPath,
cpus = 1.0,
mem = 128.0,
disk = 0.0,
ports = Seq(0, 0)
)
val resOpt = ResourceMatcher.matchResources(
offer, app,
runningTasks = Set(), acceptedResourceRoles = Set("marathon"))
resOpt should not be empty
val res = resOpt.get
res.cpuRole should be("marathon")
res.memRole should be("marathon")
res.diskRole should be("")
}
test("match resources failure because of incorrect roles") {
val offer = makeBasicOffer(role = "marathon").build()
val app = AppDefinition(
id = "/test".toRootPath,
cpus = 1.0,
mem = 128.0,
disk = 0.0,
ports = Seq(0, 0)
)
val resOpt = ResourceMatcher.matchResources(
offer, app,
runningTasks = Set(), acceptedResourceRoles = Set("*"))
resOpt should be ('empty)
}
test("match resources success with constraints") {
val offer = makeBasicOffer(beginPort = 0, endPort = 0).setHostname("host1").build()
val app = AppDefinition(
id = "/test".toRootPath,
cpus = 1.0,
mem = 128.0,
disk = 0.0,
constraints = Set(
Constraint.newBuilder
.setField("hostname")
.setOperator(Operator.LIKE)
.setValue("host1")
.build()
)
)
val resOpt = ResourceMatcher.matchResources(offer, app, Set())
resOpt should not be empty
}
test("match resources fails on constraints") {
val offer = makeBasicOffer(beginPort = 0, endPort = 0).setHostname("host1").build()
val app = AppDefinition(
id = "/test".toRootPath,
cpus = 1.0,
mem = 128.0,
disk = 0.0,
constraints = Set(
Constraint.newBuilder
.setField("hostname")
.setOperator(Operator.LIKE)
.setValue("host2")
.build()
)
)
val resOpt = ResourceMatcher.matchResources(offer, app, Set())
resOpt should be (empty)
}
test("match resources fail on cpu") {
val offer = makeBasicOffer(cpus = 0.1).build()
val app = AppDefinition(
id = "/test".toRootPath,
cpus = 1.0,
mem = 128.0,
disk = 0.0,
ports = Seq(0, 0)
)
val resOpt = ResourceMatcher.matchResources(offer, app, Set())
resOpt should be (empty)
}
test("match resources fail on mem") {
val offer = makeBasicOffer(mem = 0.1).build()
val app = AppDefinition(
id = "/test".toRootPath,
cpus = 1.0,
mem = 128.0,
disk = 0.0,
ports = Seq(0, 0)
)
val resOpt = ResourceMatcher.matchResources(offer, app, Set())
resOpt should be (empty)
}
test("match resources fail on disk") {
val offer = makeBasicOffer(disk = 0.1).build()
val app = AppDefinition(
id = "/test".toRootPath,
cpus = 1.0,
mem = 128.0,
disk = 1.0,
ports = Seq(0, 0)
)
val resOpt = ResourceMatcher.matchResources(offer, app, Set())
resOpt should be (empty)
}
test("match resources fail on ports") {
val offer = makeBasicOffer(beginPort = 0, endPort = 0).build()
val app = AppDefinition(
id = "/test".toRootPath,
cpus = 1.0,
mem = 128.0,
disk = 0.0,
ports = Seq(1, 2)
)
val resOpt = ResourceMatcher.matchResources(offer, app, Set())
resOpt should be (empty)
}
}
| spacejam/marathon | src/test/scala/mesosphere/mesos/ResourceMatcherTest.scala | Scala | apache-2.0 | 5,346 |
/**
* Copyright 2015 Thomson Reuters
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmwell.crawler
import akka.stream.{Attributes, FlowShape, Inlet, Outlet}
import akka.stream.stage.{GraphStage, GraphStageLogic, InHandler, OutHandler}
import com.typesafe.scalalogging.Logger
object CrawlerRatePrinter {
def apply(crawlerId: String,
printFrequency: Int,
maxPrintRateMillis: Int)(logger: Logger): CrawlerRatePrinter =
new CrawlerRatePrinter(crawlerId, printFrequency, maxPrintRateMillis)(logger)
}
class CrawlerRatePrinter(crawlerId: String,
printFrequency: Int,
maxPrintRateMillis: Int)(logger: Logger) extends GraphStage[FlowShape[Long, Long]] {
val in = Inlet[Long]("CrawlerRatePrinter.in")
val out = Outlet[Long]("CrawlerRatePrinter.out")
override val shape = FlowShape.of(in, out)
override def createLogic(attr: Attributes): GraphStageLogic =
new GraphStageLogic(shape) {
private val startTime: Double = System.currentTimeMillis
private var totalElementsGot: Long = 0L
private var printedAtElementNo: Long = 0L
private var printedAtTime: Double = startTime
private var localStartTime: Double = startTime
private var localTotalElementsGot: Long = 0L
private var localRate: Double = 0
setHandler(
in,
new InHandler {
override def onPush(): Unit = {
val elem = grab(in)
totalElementsGot += 1
localTotalElementsGot += 1
if (totalElementsGot - printedAtElementNo >= printFrequency) {
val currentTime = System.currentTimeMillis
if (currentTime - printedAtTime > maxPrintRateMillis) {
val rate = totalElementsGot / (currentTime - startTime) * 1000
if (currentTime - localStartTime > 15000) {
localRate = localTotalElementsGot / (currentTime - localStartTime) * 1000
localTotalElementsGot = 0
localStartTime = currentTime
}
logger.info(s"$crawlerId Current offset is $elem. Total $totalElementsGot offsets already processed. " +
s"Read rate: avg: ${rate.formatted("%.2f")} current: ${localRate.formatted("%.2f")} offsets/second")
printedAtElementNo = totalElementsGot
printedAtTime = currentTime
}
}
push(out, elem)
}
}
)
setHandler(out, new OutHandler {
override def onPull(): Unit = {
pull(in)
}
})
}
}
| bryaakov/CM-Well | server/cmwell-bg/src/main/scala/cmwell/crawler/CrawlerRatePrinter.scala | Scala | apache-2.0 | 3,158 |
import java.util.{Calendar, GregorianCalendar}
case class Gigasecond(initial: Calendar) {
val date = {
val copy = initial.clone.asInstanceOf[Calendar]
copy.add(Calendar.SECOND, 1000000000)
copy
}
}
| nlochschmidt/xscala | gigasecond/example.scala | Scala | mit | 215 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.regression
import scala.collection.JavaConverters._
import scala.util.Random
import org.dmg.pmml.{OpType, PMML}
import org.dmg.pmml.regression.{RegressionModel => PMMLRegressionModel}
import org.apache.spark.ml.feature.Instance
import org.apache.spark.ml.feature.LabeledPoint
import org.apache.spark.ml.linalg.{DenseVector, Vector, Vectors}
import org.apache.spark.ml.param.{ParamMap, ParamsSuite}
import org.apache.spark.ml.util._
import org.apache.spark.ml.util.TestingUtils._
import org.apache.spark.mllib.util.LinearDataGenerator
import org.apache.spark.sql.{DataFrame, Row}
class LinearRegressionSuite extends MLTest with DefaultReadWriteTest with PMMLReadWriteTest {
import testImplicits._
private val seed: Int = 42
@transient var datasetWithDenseFeature: DataFrame = _
@transient var datasetWithStrongNoise: DataFrame = _
@transient var datasetWithDenseFeatureWithoutIntercept: DataFrame = _
@transient var datasetWithSparseFeature: DataFrame = _
@transient var datasetWithWeight: DataFrame = _
@transient var datasetWithWeightConstantLabel: DataFrame = _
@transient var datasetWithWeightZeroLabel: DataFrame = _
@transient var datasetWithOutlier: DataFrame = _
override def beforeAll(): Unit = {
super.beforeAll()
datasetWithDenseFeature = sc.parallelize(LinearDataGenerator.generateLinearInput(
intercept = 6.3, weights = Array(4.7, 7.2), xMean = Array(0.9, -1.3),
xVariance = Array(0.7, 1.2), nPoints = 10000, seed, eps = 0.1), 2).map(_.asML).toDF()
datasetWithStrongNoise = sc.parallelize(LinearDataGenerator.generateLinearInput(
intercept = 6.3, weights = Array(4.7, 7.2), xMean = Array(0.9, -1.3),
xVariance = Array(0.7, 1.2), nPoints = 100, seed, eps = 5.0), 2).map(_.asML).toDF()
/*
datasetWithDenseFeatureWithoutIntercept is not needed for correctness testing
but is useful for illustrating training model without intercept
*/
datasetWithDenseFeatureWithoutIntercept = sc.parallelize(
LinearDataGenerator.generateLinearInput(
intercept = 0.0, weights = Array(4.7, 7.2), xMean = Array(0.9, -1.3),
xVariance = Array(0.7, 1.2), nPoints = 10000, seed, eps = 0.1), 2).map(_.asML).toDF()
val r = new Random(seed)
// When feature size is larger than 4096, normal optimizer is chosen
// as the solver of linear regression in the case of "auto" mode.
val featureSize = 4100
datasetWithSparseFeature = sc.parallelize(LinearDataGenerator.generateLinearInput(
intercept = 0.0, weights = Seq.fill(featureSize)(r.nextDouble()).toArray,
xMean = Seq.fill(featureSize)(r.nextDouble()).toArray,
xVariance = Seq.fill(featureSize)(r.nextDouble()).toArray, nPoints = 200,
seed, eps = 0.1, sparsity = 0.7), 2).map(_.asML).toDF()
/*
R code:
A <- matrix(c(0, 1, 2, 3, 5, 7, 11, 13), 4, 2)
b <- c(17, 19, 23, 29)
w <- c(1, 2, 3, 4)
df <- as.data.frame(cbind(A, b))
*/
datasetWithWeight = sc.parallelize(Seq(
Instance(17.0, 1.0, Vectors.dense(0.0, 5.0).toSparse),
Instance(19.0, 2.0, Vectors.dense(1.0, 7.0)),
Instance(23.0, 3.0, Vectors.dense(2.0, 11.0)),
Instance(29.0, 4.0, Vectors.dense(3.0, 13.0))
), 2).toDF()
/*
R code:
A <- matrix(c(0, 1, 2, 3, 5, 7, 11, 13), 4, 2)
b.const <- c(17, 17, 17, 17)
w <- c(1, 2, 3, 4)
df.const.label <- as.data.frame(cbind(A, b.const))
*/
datasetWithWeightConstantLabel = sc.parallelize(Seq(
Instance(17.0, 1.0, Vectors.dense(0.0, 5.0).toSparse),
Instance(17.0, 2.0, Vectors.dense(1.0, 7.0)),
Instance(17.0, 3.0, Vectors.dense(2.0, 11.0)),
Instance(17.0, 4.0, Vectors.dense(3.0, 13.0))
), 2).toDF()
datasetWithWeightZeroLabel = sc.parallelize(Seq(
Instance(0.0, 1.0, Vectors.dense(0.0, 5.0).toSparse),
Instance(0.0, 2.0, Vectors.dense(1.0, 7.0)),
Instance(0.0, 3.0, Vectors.dense(2.0, 11.0)),
Instance(0.0, 4.0, Vectors.dense(3.0, 13.0))
), 2).toDF()
datasetWithOutlier = {
val inlierData = LinearDataGenerator.generateLinearInput(
intercept = 6.3, weights = Array(4.7, 7.2), xMean = Array(0.9, -1.3),
xVariance = Array(0.7, 1.2), nPoints = 900, seed, eps = 0.1)
val outlierData = LinearDataGenerator.generateLinearInput(
intercept = -2.1, weights = Array(0.6, -1.2), xMean = Array(0.9, -1.3),
xVariance = Array(1.5, 0.8), nPoints = 100, seed, eps = 0.1)
sc.parallelize(inlierData ++ outlierData, 2).map(_.asML).toDF()
}
}
/**
* Enable the ignored test to export the dataset into CSV format,
* so we can validate the training accuracy compared with R's glmnet package.
*/
ignore("export test data into CSV format") {
datasetWithDenseFeature.rdd.map { case Row(label: Double, features: Vector) =>
label + "," + features.toArray.mkString(",")
}.repartition(1).saveAsTextFile("target/tmp/LinearRegressionSuite/datasetWithDenseFeature")
datasetWithDenseFeatureWithoutIntercept.rdd.map {
case Row(label: Double, features: Vector) =>
label + "," + features.toArray.mkString(",")
}.repartition(1).saveAsTextFile(
"target/tmp/LinearRegressionSuite/datasetWithDenseFeatureWithoutIntercept")
datasetWithSparseFeature.rdd.map { case Row(label: Double, features: Vector) =>
label + "," + features.toArray.mkString(",")
}.repartition(1).saveAsTextFile("target/tmp/LinearRegressionSuite/datasetWithSparseFeature")
datasetWithOutlier.rdd.map { case Row(label: Double, features: Vector) =>
label + "," + features.toArray.mkString(",")
}.repartition(1).saveAsTextFile("target/tmp/LinearRegressionSuite/datasetWithOutlier")
}
test("params") {
ParamsSuite.checkParams(new LinearRegression)
val model = new LinearRegressionModel("linearReg", Vectors.dense(0.0), 0.0)
ParamsSuite.checkParams(model)
}
test("linear regression: default params") {
val lir = new LinearRegression
assert(lir.getLabelCol === "label")
assert(lir.getFeaturesCol === "features")
assert(lir.getPredictionCol === "prediction")
assert(lir.getRegParam === 0.0)
assert(lir.getElasticNetParam === 0.0)
assert(lir.getFitIntercept)
assert(lir.getStandardization)
assert(lir.getSolver === "auto")
assert(lir.getLoss === "squaredError")
assert(lir.getEpsilon === 1.35)
val model = lir.fit(datasetWithDenseFeature)
MLTestingUtils.checkCopyAndUids(lir, model)
assert(model.hasSummary)
val copiedModel = model.copy(ParamMap.empty)
assert(copiedModel.hasSummary)
model.setSummary(None)
assert(!model.hasSummary)
model.transform(datasetWithDenseFeature)
.select("label", "prediction")
.collect()
assert(model.getFeaturesCol === "features")
assert(model.getPredictionCol === "prediction")
assert(model.intercept !== 0.0)
assert(model.scale === 1.0)
assert(model.hasParent)
val numFeatures = datasetWithDenseFeature.select("features").first().getAs[Vector](0).size
assert(model.numFeatures === numFeatures)
}
test("linear regression: can transform data with LinearRegressionModel") {
withClue("training related params like loss are only validated during fitting phase") {
val original = new LinearRegression().fit(datasetWithDenseFeature)
val deserialized = new LinearRegressionModel(uid = original.uid,
coefficients = original.coefficients,
intercept = original.intercept)
val output = deserialized.transform(datasetWithDenseFeature)
assert(output.collect().size > 0) // simple assertion to ensure no exception thrown
}
}
test("linear regression: illegal params") {
withClue("LinearRegression with huber loss only supports L2 regularization") {
intercept[IllegalArgumentException] {
new LinearRegression().setLoss("huber").setElasticNetParam(0.5)
.fit(datasetWithDenseFeature)
}
}
withClue("LinearRegression with huber loss doesn't support normal solver") {
intercept[IllegalArgumentException] {
new LinearRegression().setLoss("huber").setSolver("normal").fit(datasetWithDenseFeature)
}
}
}
test("linear regression handles singular matrices") {
// check for both constant columns with intercept (zero std) and collinear
val singularDataConstantColumn = sc.parallelize(Seq(
Instance(17.0, 1.0, Vectors.dense(1.0, 5.0).toSparse),
Instance(19.0, 2.0, Vectors.dense(1.0, 7.0)),
Instance(23.0, 3.0, Vectors.dense(1.0, 11.0)),
Instance(29.0, 4.0, Vectors.dense(1.0, 13.0))
), 2).toDF()
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer = new LinearRegression().setSolver(solver).setFitIntercept(true)
val model = trainer.fit(singularDataConstantColumn)
// to make it clear that WLS did not solve analytically
intercept[UnsupportedOperationException] {
model.summary.coefficientStandardErrors
}
assert(model.summary.objectiveHistory !== Array(0.0))
}
val singularDataCollinearFeatures = sc.parallelize(Seq(
Instance(17.0, 1.0, Vectors.dense(10.0, 5.0).toSparse),
Instance(19.0, 2.0, Vectors.dense(14.0, 7.0)),
Instance(23.0, 3.0, Vectors.dense(22.0, 11.0)),
Instance(29.0, 4.0, Vectors.dense(26.0, 13.0))
), 2).toDF()
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer = new LinearRegression().setSolver(solver).setFitIntercept(true)
val model = trainer.fit(singularDataCollinearFeatures)
intercept[UnsupportedOperationException] {
model.summary.coefficientStandardErrors
}
assert(model.summary.objectiveHistory !== Array(0.0))
}
}
test("linear regression with intercept without regularization") {
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer1 = new LinearRegression().setSolver(solver)
// The result should be the same regardless of standardization without regularization
val trainer2 = (new LinearRegression).setStandardization(false).setSolver(solver)
val model1 = trainer1.fit(datasetWithDenseFeature)
val model2 = trainer2.fit(datasetWithDenseFeature)
/*
Using the following R code to load the data and train the model using glmnet package.
library("glmnet")
data <- read.csv("path", header=FALSE, stringsAsFactors=FALSE)
features <- as.matrix(data.frame(as.numeric(data$V2), as.numeric(data$V3)))
label <- as.numeric(data$V1)
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 0, lambda = 0))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) 6.298698
as.numeric.data.V2. 4.700706
as.numeric.data.V3. 7.199082
*/
val interceptR = 6.298698
val coefficientsR = Vectors.dense(4.700706, 7.199082)
assert(model1.intercept ~== interceptR relTol 1E-3)
assert(model1.coefficients ~= coefficientsR relTol 1E-3)
assert(model2.intercept ~== interceptR relTol 1E-3)
assert(model2.coefficients ~= coefficientsR relTol 1E-3)
testTransformer[(Double, Vector)](datasetWithDenseFeature, model1,
"features", "prediction") {
case Row(features: DenseVector, prediction1: Double) =>
val prediction2 =
features(0) * model1.coefficients(0) + features(1) * model1.coefficients(1) +
model1.intercept
assert(prediction1 ~== prediction2 relTol 1E-5)
}
}
}
test("linear regression without intercept without regularization") {
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer1 = (new LinearRegression).setFitIntercept(false).setSolver(solver)
// Without regularization the results should be the same
val trainer2 = (new LinearRegression).setFitIntercept(false).setStandardization(false)
.setSolver(solver)
val model1 = trainer1.fit(datasetWithDenseFeature)
val modelWithoutIntercept1 = trainer1.fit(datasetWithDenseFeatureWithoutIntercept)
val model2 = trainer2.fit(datasetWithDenseFeature)
val modelWithoutIntercept2 = trainer2.fit(datasetWithDenseFeatureWithoutIntercept)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 0, lambda = 0,
intercept = FALSE))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) .
as.numeric.data.V2. 6.973403
as.numeric.data.V3. 5.284370
*/
val coefficientsR = Vectors.dense(6.973403, 5.284370)
assert(model1.intercept ~== 0 absTol 1E-2)
assert(model1.coefficients ~= coefficientsR relTol 1E-2)
assert(model2.intercept ~== 0 absTol 1E-2)
assert(model2.coefficients ~= coefficientsR relTol 1E-2)
/*
Then again with the data with no intercept:
> coefficientsWithoutIntercept
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) .
as.numeric.data3.V2. 4.70011
as.numeric.data3.V3. 7.19943
*/
val coefficientsWithoutInterceptR = Vectors.dense(4.70011, 7.19943)
assert(modelWithoutIntercept1.intercept ~== 0 absTol 1E-3)
assert(modelWithoutIntercept1.coefficients ~= coefficientsWithoutInterceptR relTol 1E-3)
assert(modelWithoutIntercept2.intercept ~== 0 absTol 1E-3)
assert(modelWithoutIntercept2.coefficients ~= coefficientsWithoutInterceptR relTol 1E-3)
}
}
test("linear regression with intercept with L1 regularization") {
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer1 = (new LinearRegression).setElasticNetParam(1.0).setRegParam(0.57)
.setSolver(solver)
val trainer2 = (new LinearRegression).setElasticNetParam(1.0).setRegParam(0.57)
.setSolver(solver).setStandardization(false)
val model1 = trainer1.fit(datasetWithDenseFeature)
val model2 = trainer2.fit(datasetWithDenseFeature)
/*
coefficients <- coef(glmnet(features, label, family="gaussian",
alpha = 1.0, lambda = 0.57 ))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) 6.242284
as.numeric.d1.V2. 4.019605
as.numeric.d1.V3. 6.679538
*/
val interceptR1 = 6.242284
val coefficientsR1 = Vectors.dense(4.019605, 6.679538)
assert(model1.intercept ~== interceptR1 relTol 1E-2)
assert(model1.coefficients ~= coefficientsR1 relTol 1E-2)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 1.0,
lambda = 0.57, standardize=FALSE ))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) 6.416948
as.numeric.data.V2. 3.893869
as.numeric.data.V3. 6.724286
*/
val interceptR2 = 6.416948
val coefficientsR2 = Vectors.dense(3.893869, 6.724286)
assert(model2.intercept ~== interceptR2 relTol 1E-3)
assert(model2.coefficients ~= coefficientsR2 relTol 1E-3)
testTransformer[(Double, Vector)](datasetWithDenseFeature, model1,
"features", "prediction") {
case Row(features: DenseVector, prediction1: Double) =>
val prediction2 =
features(0) * model1.coefficients(0) + features(1) * model1.coefficients(1) +
model1.intercept
assert(prediction1 ~== prediction2 relTol 1E-5)
}
}
}
test("linear regression without intercept with L1 regularization") {
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer1 = (new LinearRegression).setElasticNetParam(1.0).setRegParam(0.57)
.setFitIntercept(false).setSolver(solver)
val trainer2 = (new LinearRegression).setElasticNetParam(1.0).setRegParam(0.57)
.setFitIntercept(false).setStandardization(false).setSolver(solver)
val model1 = trainer1.fit(datasetWithDenseFeature)
val model2 = trainer2.fit(datasetWithDenseFeature)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 1.0,
lambda = 0.57, intercept=FALSE ))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) .
as.numeric.data.V2. 6.272927
as.numeric.data.V3. 4.782604
*/
val interceptR1 = 0.0
val coefficientsR1 = Vectors.dense(6.272927, 4.782604)
assert(model1.intercept ~== interceptR1 absTol 1E-2)
assert(model1.coefficients ~= coefficientsR1 relTol 1E-2)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 1.0,
lambda = 0.57, intercept=FALSE, standardize=FALSE ))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) .
as.numeric.data.V2. 6.207817
as.numeric.data.V3. 4.775780
*/
val interceptR2 = 0.0
val coefficientsR2 = Vectors.dense(6.207817, 4.775780)
assert(model2.intercept ~== interceptR2 absTol 1E-2)
assert(model2.coefficients ~= coefficientsR2 relTol 1E-2)
testTransformer[(Double, Vector)](datasetWithDenseFeature, model1,
"features", "prediction") {
case Row(features: DenseVector, prediction1: Double) =>
val prediction2 =
features(0) * model1.coefficients(0) + features(1) * model1.coefficients(1) +
model1.intercept
assert(prediction1 ~== prediction2 relTol 1E-5)
}
}
}
test("linear regression with intercept with L2 regularization") {
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer1 = (new LinearRegression).setElasticNetParam(0.0).setRegParam(2.3)
.setSolver(solver)
val trainer2 = (new LinearRegression).setElasticNetParam(0.0).setRegParam(2.3)
.setStandardization(false).setSolver(solver)
val model1 = trainer1.fit(datasetWithDenseFeature)
val model2 = trainer2.fit(datasetWithDenseFeature)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 0.0, lambda = 2.3))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) 5.260103
as.numeric.d1.V2. 3.725522
as.numeric.d1.V3. 5.711203
*/
val interceptR1 = 5.260103
val coefficientsR1 = Vectors.dense(3.725522, 5.711203)
assert(model1.intercept ~== interceptR1 relTol 1E-2)
assert(model1.coefficients ~= coefficientsR1 relTol 1E-2)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 0.0, lambda = 2.3,
standardize=FALSE))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) 5.790885
as.numeric.d1.V2. 3.432373
as.numeric.d1.V3. 5.919196
*/
val interceptR2 = 5.790885
val coefficientsR2 = Vectors.dense(3.432373, 5.919196)
assert(model2.intercept ~== interceptR2 relTol 1E-2)
assert(model2.coefficients ~= coefficientsR2 relTol 1E-2)
testTransformer[(Double, Vector)](datasetWithDenseFeature, model1,
"features", "prediction") {
case Row(features: DenseVector, prediction1: Double) =>
val prediction2 =
features(0) * model1.coefficients(0) + features(1) * model1.coefficients(1) +
model1.intercept
assert(prediction1 ~== prediction2 relTol 1E-5)
}
}
}
test("linear regression without intercept with L2 regularization") {
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer1 = (new LinearRegression).setElasticNetParam(0.0).setRegParam(2.3)
.setFitIntercept(false).setSolver(solver)
val trainer2 = (new LinearRegression).setElasticNetParam(0.0).setRegParam(2.3)
.setFitIntercept(false).setStandardization(false).setSolver(solver)
val model1 = trainer1.fit(datasetWithDenseFeature)
val model2 = trainer2.fit(datasetWithDenseFeature)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 0.0, lambda = 2.3,
intercept = FALSE))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) .
as.numeric.d1.V2. 5.493430
as.numeric.d1.V3. 4.223082
*/
val interceptR1 = 0.0
val coefficientsR1 = Vectors.dense(5.493430, 4.223082)
assert(model1.intercept ~== interceptR1 absTol 1E-2)
assert(model1.coefficients ~= coefficientsR1 relTol 1E-2)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 0.0, lambda = 2.3,
intercept = FALSE, standardize=FALSE))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) .
as.numeric.d1.V2. 5.244324
as.numeric.d1.V3. 4.203106
*/
val interceptR2 = 0.0
val coefficientsR2 = Vectors.dense(5.244324, 4.203106)
assert(model2.intercept ~== interceptR2 absTol 1E-2)
assert(model2.coefficients ~= coefficientsR2 relTol 1E-2)
testTransformer[(Double, Vector)](datasetWithDenseFeature, model1,
"features", "prediction") {
case Row(features: DenseVector, prediction1: Double) =>
val prediction2 =
features(0) * model1.coefficients(0) + features(1) * model1.coefficients(1) +
model1.intercept
assert(prediction1 ~== prediction2 relTol 1E-5)
}
}
}
test("linear regression with intercept with ElasticNet regularization") {
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer1 = (new LinearRegression).setElasticNetParam(0.3).setRegParam(1.6)
.setSolver(solver)
val trainer2 = (new LinearRegression).setElasticNetParam(0.3).setRegParam(1.6)
.setStandardization(false).setSolver(solver)
val model1 = trainer1.fit(datasetWithDenseFeature)
val model2 = trainer2.fit(datasetWithDenseFeature)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 0.3,
lambda = 1.6 ))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) 5.689855
as.numeric.d1.V2. 3.661181
as.numeric.d1.V3. 6.000274
*/
val interceptR1 = 5.689855
val coefficientsR1 = Vectors.dense(3.661181, 6.000274)
assert(model1.intercept ~== interceptR1 relTol 1E-2)
assert(model1.coefficients ~= coefficientsR1 relTol 1E-2)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 0.3, lambda = 1.6
standardize=FALSE))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) 6.113890
as.numeric.d1.V2. 3.407021
as.numeric.d1.V3. 6.152512
*/
val interceptR2 = 6.113890
val coefficientsR2 = Vectors.dense(3.407021, 6.152512)
assert(model2.intercept ~== interceptR2 relTol 1E-2)
assert(model2.coefficients ~= coefficientsR2 relTol 1E-2)
testTransformer[(Double, Vector)](datasetWithDenseFeature, model1,
"features", "prediction") {
case Row(features: DenseVector, prediction1: Double) =>
val prediction2 =
features(0) * model1.coefficients(0) + features(1) * model1.coefficients(1) +
model1.intercept
assert(prediction1 ~== prediction2 relTol 1E-5)
}
}
}
test("linear regression without intercept with ElasticNet regularization") {
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer1 = (new LinearRegression).setElasticNetParam(0.3).setRegParam(1.6)
.setFitIntercept(false).setSolver(solver)
val trainer2 = (new LinearRegression).setElasticNetParam(0.3).setRegParam(1.6)
.setFitIntercept(false).setStandardization(false).setSolver(solver)
val model1 = trainer1.fit(datasetWithDenseFeature)
val model2 = trainer2.fit(datasetWithDenseFeature)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 0.3,
lambda = 1.6, intercept=FALSE ))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) .
as.numeric.d1.V2. 5.643748
as.numeric.d1.V3. 4.331519
*/
val interceptR1 = 0.0
val coefficientsR1 = Vectors.dense(5.643748, 4.331519)
assert(model1.intercept ~== interceptR1 absTol 1E-2)
assert(model1.coefficients ~= coefficientsR1 relTol 1E-2)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 0.3,
lambda = 1.6, intercept=FALSE, standardize=FALSE ))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) .
as.numeric.d1.V2. 5.455902
as.numeric.d1.V3. 4.312266
*/
val interceptR2 = 0.0
val coefficientsR2 = Vectors.dense(5.455902, 4.312266)
assert(model2.intercept ~== interceptR2 absTol 1E-2)
assert(model2.coefficients ~= coefficientsR2 relTol 1E-2)
testTransformer[(Double, Vector)](datasetWithDenseFeature, model1,
"features", "prediction") {
case Row(features: DenseVector, prediction1: Double) =>
val prediction2 =
features(0) * model1.coefficients(0) + features(1) * model1.coefficients(1) +
model1.intercept
assert(prediction1 ~== prediction2 relTol 1E-5)
}
}
}
test("prediction on single instance") {
val trainer = new LinearRegression
val model = trainer.fit(datasetWithDenseFeature)
testPredictionModelSinglePrediction(model, datasetWithDenseFeature)
}
test("linear regression model with constant label") {
/*
R code:
for (formula in c(b.const ~ . -1, b.const ~ .)) {
model <- lm(formula, data=df.const.label, weights=w)
print(as.vector(coef(model)))
}
[1] -9.221298 3.394343
[1] 17 0 0
*/
val expected = Seq(
Vectors.dense(0.0, -9.221298, 3.394343),
Vectors.dense(17.0, 0.0, 0.0))
Seq("auto", "l-bfgs", "normal").foreach { solver =>
var idx = 0
for (fitIntercept <- Seq(false, true)) {
val model1 = new LinearRegression()
.setFitIntercept(fitIntercept)
.setWeightCol("weight")
.setPredictionCol("myPrediction")
.setSolver(solver)
.fit(datasetWithWeightConstantLabel)
val actual1 = Vectors.dense(model1.intercept, model1.coefficients(0),
model1.coefficients(1))
assert(actual1 ~== expected(idx) absTol 1e-4)
// Schema of summary.predictions should be a superset of the input dataset
assert((datasetWithWeightConstantLabel.schema.fieldNames.toSet + model1.getPredictionCol)
.subsetOf(model1.summary.predictions.schema.fieldNames.toSet))
val model2 = new LinearRegression()
.setFitIntercept(fitIntercept)
.setWeightCol("weight")
.setPredictionCol("myPrediction")
.setSolver(solver)
.fit(datasetWithWeightZeroLabel)
val actual2 = Vectors.dense(model2.intercept, model2.coefficients(0),
model2.coefficients(1))
assert(actual2 ~== Vectors.dense(0.0, 0.0, 0.0) absTol 1e-4)
// Schema of summary.predictions should be a superset of the input dataset
assert((datasetWithWeightZeroLabel.schema.fieldNames.toSet + model2.getPredictionCol)
.subsetOf(model2.summary.predictions.schema.fieldNames.toSet))
idx += 1
}
}
}
test("regularized linear regression through origin with constant label") {
// The problem is ill-defined if fitIntercept=false, regParam is non-zero.
// An exception is thrown in this case.
Seq("auto", "l-bfgs", "normal").foreach { solver =>
for (standardization <- Seq(false, true)) {
val model = new LinearRegression().setFitIntercept(false)
.setRegParam(0.1).setStandardization(standardization).setSolver(solver)
intercept[IllegalArgumentException] {
model.fit(datasetWithWeightConstantLabel)
}
}
}
}
test("linear regression with l-bfgs when training is not needed") {
// When label is constant, l-bfgs solver returns results without training.
// There are two possibilities: If the label is non-zero but constant,
// and fitIntercept is true, then the model return yMean as intercept without training.
// If label is all zeros, then all coefficients are zero regardless of fitIntercept, so
// no training is needed.
for (fitIntercept <- Seq(false, true)) {
for (standardization <- Seq(false, true)) {
val model1 = new LinearRegression()
.setFitIntercept(fitIntercept)
.setStandardization(standardization)
.setWeightCol("weight")
.setSolver("l-bfgs")
.fit(datasetWithWeightConstantLabel)
if (fitIntercept) {
assert(model1.summary.objectiveHistory(0) ~== 0.0 absTol 1e-4)
}
val model2 = new LinearRegression()
.setFitIntercept(fitIntercept)
.setWeightCol("weight")
.setSolver("l-bfgs")
.fit(datasetWithWeightZeroLabel)
assert(model2.summary.objectiveHistory(0) ~== 0.0 absTol 1e-4)
}
}
}
test("linear regression model training summary") {
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer = new LinearRegression().setSolver(solver).setPredictionCol("myPrediction")
val model = trainer.fit(datasetWithDenseFeature)
val trainerNoPredictionCol = trainer.setPredictionCol("")
val modelNoPredictionCol = trainerNoPredictionCol.fit(datasetWithDenseFeature)
// Training results for the model should be available
assert(model.hasSummary)
assert(modelNoPredictionCol.hasSummary)
// Schema should be a superset of the input dataset
assert((datasetWithDenseFeature.schema.fieldNames.toSet + model.getPredictionCol).subsetOf(
model.summary.predictions.schema.fieldNames.toSet))
// Validate that we re-insert a prediction column for evaluation
val modelNoPredictionColFieldNames
= modelNoPredictionCol.summary.predictions.schema.fieldNames
assert(datasetWithDenseFeature.schema.fieldNames.toSet.subsetOf(
modelNoPredictionColFieldNames.toSet))
assert(modelNoPredictionColFieldNames.exists(s => s.startsWith("prediction_")))
// Residuals in [[LinearRegressionResults]] should equal those manually computed
datasetWithDenseFeature.select("features", "label")
.rdd
.map { case Row(features: DenseVector, label: Double) =>
val prediction =
features(0) * model.coefficients(0) + features(1) * model.coefficients(1) +
model.intercept
label - prediction
}
.zip(model.summary.residuals.rdd.map(_.getDouble(0)))
.collect()
.foreach { case (manualResidual: Double, resultResidual: Double) =>
assert(manualResidual ~== resultResidual relTol 1E-5)
}
/*
# Use the following R code to generate model training results.
# path/part-00000 is the file generated by running LinearDataGenerator.generateLinearInput
# as described before the beforeAll() method.
d1 <- read.csv("path/part-00000", header=FALSE, stringsAsFactors=FALSE)
fit <- glm(V1 ~ V2 + V3, data = d1, family = "gaussian")
names(f1)[1] = c("V2")
names(f1)[2] = c("V3")
f1 <- data.frame(as.numeric(d1$V2), as.numeric(d1$V3))
predictions <- predict(fit, newdata=f1)
l1 <- as.numeric(d1$V1)
residuals <- l1 - predictions
> mean(residuals^2) # MSE
[1] 0.00985449
> mean(abs(residuals)) # MAD
[1] 0.07961668
> cor(predictions, l1)^2 # r^2
[1] 0.9998737
> summary(fit)
Call:
glm(formula = V1 ~ V2 + V3, family = "gaussian", data = d1)
Deviance Residuals:
Min 1Q Median 3Q Max
-0.47082 -0.06797 0.00002 0.06725 0.34635
Coefficients:
Estimate Std. Error t value Pr(>|t|)
(Intercept) 6.3022157 0.0018600 3388 <2e-16 ***
V2 4.6982442 0.0011805 3980 <2e-16 ***
V3 7.1994344 0.0009044 7961 <2e-16 ***
# R code for r2adj
lm_fit <- lm(V1 ~ V2 + V3, data = d1)
summary(lm_fit)$adj.r.squared
[1] 0.9998736
---
....
*/
assert(model.summary.meanSquaredError ~== 0.00985449 relTol 1E-4)
assert(model.summary.meanAbsoluteError ~== 0.07961668 relTol 1E-4)
assert(model.summary.r2 ~== 0.9998737 relTol 1E-4)
assert(model.summary.r2adj ~== 0.9998736 relTol 1E-4)
// Normal solver uses "WeightedLeastSquares". If no regularization is applied or only L2
// regularization is applied, this algorithm uses a direct solver and does not generate an
// objective history because it does not run through iterations.
if (solver == "l-bfgs") {
// Objective function should be monotonically decreasing for linear regression
assert(
model.summary
.objectiveHistory
.sliding(2)
.forall(x => x(0) >= x(1)))
} else {
// To clarify that the normal solver is used here.
assert(model.summary.objectiveHistory.length == 1)
assert(model.summary.objectiveHistory(0) == 0.0)
val devianceResidualsR = Array(-0.47082, 0.34635)
val seCoefR = Array(0.0011805, 0.0009044, 0.0018600)
val tValsR = Array(3980, 7961, 3388)
val pValsR = Array(0, 0, 0)
model.summary.devianceResiduals.zip(devianceResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-4) }
model.summary.coefficientStandardErrors.zip(seCoefR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-4) }
model.summary.tValues.map(_.round).zip(tValsR).foreach{ x => assert(x._1 === x._2) }
model.summary.pValues.map(_.round).zip(pValsR).foreach{ x => assert(x._1 === x._2) }
}
}
}
test("linear regression model testset evaluation summary") {
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer = new LinearRegression().setSolver(solver)
val model = trainer.fit(datasetWithDenseFeature)
// Evaluating on training dataset should yield results summary equal to training summary
val testSummary = model.evaluate(datasetWithDenseFeature)
assert(model.summary.meanSquaredError ~== testSummary.meanSquaredError relTol 1E-5)
assert(model.summary.r2 ~== testSummary.r2 relTol 1E-5)
model.summary.residuals.select("residuals").collect()
.zip(testSummary.residuals.select("residuals").collect())
.forall { case (Row(r1: Double), Row(r2: Double)) => r1 ~== r2 relTol 1E-5 }
}
}
test("linear regression with weighted samples") {
val sqlContext = spark.sqlContext
import sqlContext.implicits._
val numClasses = 0
def modelEquals(m1: LinearRegressionModel, m2: LinearRegressionModel): Unit = {
assert(m1.coefficients ~== m2.coefficients relTol 0.01)
assert(m1.intercept ~== m2.intercept relTol 0.01)
}
val testParams = Seq(
// (elasticNetParam, regParam, fitIntercept, standardization)
(0.0, 0.21, true, true),
(0.0, 0.21, true, false),
(0.0, 0.21, false, false),
(1.0, 0.21, true, true)
)
// For squaredError loss
for (solver <- Seq("auto", "l-bfgs", "normal");
(elasticNetParam, regParam, fitIntercept, standardization) <- testParams) {
val estimator = new LinearRegression()
.setFitIntercept(fitIntercept)
.setStandardization(standardization)
.setRegParam(regParam)
.setElasticNetParam(elasticNetParam)
.setSolver(solver)
.setMaxIter(1)
MLTestingUtils.testArbitrarilyScaledWeights[LinearRegressionModel, LinearRegression](
datasetWithStrongNoise.as[LabeledPoint], estimator, modelEquals)
MLTestingUtils.testOutliersWithSmallWeights[LinearRegressionModel, LinearRegression](
datasetWithStrongNoise.as[LabeledPoint], estimator, numClasses, modelEquals,
outlierRatio = 3)
MLTestingUtils.testOversamplingVsWeighting[LinearRegressionModel, LinearRegression](
datasetWithStrongNoise.as[LabeledPoint], estimator, modelEquals, seed)
}
// For huber loss
for ((_, regParam, fitIntercept, standardization) <- testParams) {
val estimator = new LinearRegression()
.setLoss("huber")
.setFitIntercept(fitIntercept)
.setStandardization(standardization)
.setRegParam(regParam)
.setMaxIter(1)
MLTestingUtils.testArbitrarilyScaledWeights[LinearRegressionModel, LinearRegression](
datasetWithOutlier.as[LabeledPoint], estimator, modelEquals)
MLTestingUtils.testOutliersWithSmallWeights[LinearRegressionModel, LinearRegression](
datasetWithOutlier.as[LabeledPoint], estimator, numClasses, modelEquals,
outlierRatio = 3)
MLTestingUtils.testOversamplingVsWeighting[LinearRegressionModel, LinearRegression](
datasetWithOutlier.as[LabeledPoint], estimator, modelEquals, seed)
}
}
test("linear regression model with l-bfgs with big feature datasets") {
val trainer = new LinearRegression().setSolver("auto")
val model = trainer.fit(datasetWithSparseFeature)
// Training results for the model should be available
assert(model.hasSummary)
// When LBFGS is used as optimizer, objective history can be restored.
assert(
model.summary
.objectiveHistory
.sliding(2)
.forall(x => x(0) >= x(1)))
}
test("linear regression summary with weighted samples and intercept by normal solver") {
/*
R code:
model <- glm(formula = "b ~ .", data = df, weights = w)
summary(model)
Call:
glm(formula = "b ~ .", data = df, weights = w)
Deviance Residuals:
1 2 3 4
1.920 -1.358 -1.109 0.960
Coefficients:
Estimate Std. Error t value Pr(>|t|)
(Intercept) 18.080 9.608 1.882 0.311
V1 6.080 5.556 1.094 0.471
V2 -0.600 1.960 -0.306 0.811
(Dispersion parameter for gaussian family taken to be 7.68)
Null deviance: 202.00 on 3 degrees of freedom
Residual deviance: 7.68 on 1 degrees of freedom
AIC: 18.783
Number of Fisher Scoring iterations: 2
*/
val model = new LinearRegression()
.setWeightCol("weight")
.setSolver("normal")
.fit(datasetWithWeight)
val coefficientsR = Vectors.dense(Array(6.080, -0.600))
val interceptR = 18.080
val devianceResidualsR = Array(-1.358, 1.920)
val seCoefR = Array(5.556, 1.960, 9.608)
val tValsR = Array(1.094, -0.306, 1.882)
val pValsR = Array(0.471, 0.811, 0.311)
assert(model.coefficients ~== coefficientsR absTol 1E-3)
assert(model.intercept ~== interceptR absTol 1E-3)
model.summary.devianceResiduals.zip(devianceResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-3) }
model.summary.coefficientStandardErrors.zip(seCoefR).foreach{ x =>
assert(x._1 ~== x._2 absTol 1E-3) }
model.summary.tValues.zip(tValsR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) }
model.summary.pValues.zip(pValsR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) }
val modelWithL1 = new LinearRegression()
.setWeightCol("weight")
.setSolver("normal")
.setRegParam(0.5)
.setElasticNetParam(1.0)
.fit(datasetWithWeight)
assert(modelWithL1.summary.objectiveHistory !== Array(0.0))
assert(
modelWithL1.summary
.objectiveHistory
.sliding(2)
.forall(x => x(0) >= x(1)))
}
test("linear regression summary with weighted samples and w/o intercept by normal solver") {
/*
R code:
model <- glm(formula = "b ~ . -1", data = df, weights = w)
summary(model)
Call:
glm(formula = "b ~ . -1", data = df, weights = w)
Deviance Residuals:
1 2 3 4
1.950 2.344 -4.600 2.103
Coefficients:
Estimate Std. Error t value Pr(>|t|)
V1 -3.7271 2.9032 -1.284 0.3279
V2 3.0100 0.6022 4.998 0.0378 *
---
Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
(Dispersion parameter for gaussian family taken to be 17.4376)
Null deviance: 5962.000 on 4 degrees of freedom
Residual deviance: 34.875 on 2 degrees of freedom
AIC: 22.835
Number of Fisher Scoring iterations: 2
*/
val model = new LinearRegression()
.setWeightCol("weight")
.setSolver("normal")
.setFitIntercept(false)
.fit(datasetWithWeight)
val coefficientsR = Vectors.dense(Array(-3.7271, 3.0100))
val interceptR = 0.0
val devianceResidualsR = Array(-4.600, 2.344)
val seCoefR = Array(2.9032, 0.6022)
val tValsR = Array(-1.284, 4.998)
val pValsR = Array(0.3279, 0.0378)
assert(model.coefficients ~== coefficientsR absTol 1E-3)
assert(model.intercept === interceptR)
model.summary.devianceResiduals.zip(devianceResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-3) }
model.summary.coefficientStandardErrors.zip(seCoefR).foreach{ x =>
assert(x._1 ~== x._2 absTol 1E-3) }
model.summary.tValues.zip(tValsR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) }
model.summary.pValues.zip(pValsR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) }
}
test("read/write") {
def checkModelData(model: LinearRegressionModel, model2: LinearRegressionModel): Unit = {
assert(model.intercept === model2.intercept)
assert(model.coefficients === model2.coefficients)
}
val lr = new LinearRegression()
testEstimatorAndModelReadWrite(lr, datasetWithWeight, LinearRegressionSuite.allParamSettings,
LinearRegressionSuite.allParamSettings, checkModelData)
}
test("pmml export") {
val lr = new LinearRegression()
val model = lr.fit(datasetWithWeight)
def checkModel(pmml: PMML): Unit = {
val dd = pmml.getDataDictionary
assert(dd.getNumberOfFields === 3)
val fields = dd.getDataFields.asScala
assert(fields(0).getName().toString === "field_0")
assert(fields(0).getOpType() == OpType.CONTINUOUS)
val pmmlRegressionModel = pmml.getModels().get(0).asInstanceOf[PMMLRegressionModel]
val pmmlPredictors = pmmlRegressionModel.getRegressionTables.get(0).getNumericPredictors
val pmmlWeights = pmmlPredictors.asScala.map(_.getCoefficient()).toList
assert(pmmlWeights(0) ~== model.coefficients(0) relTol 1E-3)
assert(pmmlWeights(1) ~== model.coefficients(1) relTol 1E-3)
}
testPMMLWrite(sc, model, checkModel)
}
test("should support all NumericType labels and weights, and not support other types") {
for (solver <- Seq("auto", "l-bfgs", "normal")) {
val lr = new LinearRegression().setMaxIter(1).setSolver(solver)
MLTestingUtils.checkNumericTypes[LinearRegressionModel, LinearRegression](
lr, spark, isClassification = false) { (expected, actual) =>
assert(expected.intercept === actual.intercept)
assert(expected.coefficients === actual.coefficients)
}
}
}
test("linear regression (huber loss) with intercept without regularization") {
val trainer1 = (new LinearRegression).setLoss("huber")
.setFitIntercept(true).setStandardization(true)
val trainer2 = (new LinearRegression).setLoss("huber")
.setFitIntercept(true).setStandardization(false)
val model1 = trainer1.fit(datasetWithOutlier)
val model2 = trainer2.fit(datasetWithOutlier)
/*
Using the following Python code to load the data and train the model using
scikit-learn package.
import pandas as pd
import numpy as np
from sklearn.linear_model import HuberRegressor
df = pd.read_csv("path", header = None)
X = df[df.columns[1:3]]
y = np.array(df[df.columns[0]])
huber = HuberRegressor(fit_intercept=True, alpha=0.0, max_iter=100, epsilon=1.35)
huber.fit(X, y)
>>> huber.coef_
array([ 4.68998007, 7.19429011])
>>> huber.intercept_
6.3002404351083037
>>> huber.scale_
0.077810159205220747
*/
val coefficientsPy = Vectors.dense(4.68998007, 7.19429011)
val interceptPy = 6.30024044
val scalePy = 0.07781016
assert(model1.coefficients ~= coefficientsPy relTol 1E-3)
assert(model1.intercept ~== interceptPy relTol 1E-3)
assert(model1.scale ~== scalePy relTol 1E-3)
// Without regularization, with or without standardization will converge to the same solution.
assert(model2.coefficients ~= coefficientsPy relTol 1E-3)
assert(model2.intercept ~== interceptPy relTol 1E-3)
assert(model2.scale ~== scalePy relTol 1E-3)
}
test("linear regression (huber loss) without intercept without regularization") {
val trainer1 = (new LinearRegression).setLoss("huber")
.setFitIntercept(false).setStandardization(true)
val trainer2 = (new LinearRegression).setLoss("huber")
.setFitIntercept(false).setStandardization(false)
val model1 = trainer1.fit(datasetWithOutlier)
val model2 = trainer2.fit(datasetWithOutlier)
/*
huber = HuberRegressor(fit_intercept=False, alpha=0.0, max_iter=100, epsilon=1.35)
huber.fit(X, y)
>>> huber.coef_
array([ 6.71756703, 5.08873222])
>>> huber.intercept_
0.0
>>> huber.scale_
2.5560209922722317
*/
val coefficientsPy = Vectors.dense(6.71756703, 5.08873222)
val interceptPy = 0.0
val scalePy = 2.55602099
assert(model1.coefficients ~= coefficientsPy relTol 1E-3)
assert(model1.intercept === interceptPy)
assert(model1.scale ~== scalePy relTol 1E-3)
// Without regularization, with or without standardization will converge to the same solution.
assert(model2.coefficients ~= coefficientsPy relTol 1E-3)
assert(model2.intercept === interceptPy)
assert(model2.scale ~== scalePy relTol 1E-3)
}
test("linear regression (huber loss) with intercept with L2 regularization") {
val trainer1 = (new LinearRegression).setLoss("huber")
.setFitIntercept(true).setRegParam(0.21).setStandardization(true)
val trainer2 = (new LinearRegression).setLoss("huber")
.setFitIntercept(true).setRegParam(0.21).setStandardization(false)
val model1 = trainer1.fit(datasetWithOutlier)
val model2 = trainer2.fit(datasetWithOutlier)
/*
Since scikit-learn HuberRegressor does not support standardization,
we do it manually out of the estimator.
xStd = np.std(X, axis=0)
scaledX = X / xStd
huber = HuberRegressor(fit_intercept=True, alpha=210, max_iter=100, epsilon=1.35)
huber.fit(scaledX, y)
>>> np.array(huber.coef_ / xStd)
array([ 1.97732633, 3.38816722])
>>> huber.intercept_
3.7527581430531227
>>> huber.scale_
3.787363673371801
*/
val coefficientsPy1 = Vectors.dense(1.97732633, 3.38816722)
val interceptPy1 = 3.75275814
val scalePy1 = 3.78736367
assert(model1.coefficients ~= coefficientsPy1 relTol 1E-2)
assert(model1.intercept ~== interceptPy1 relTol 1E-2)
assert(model1.scale ~== scalePy1 relTol 1E-2)
/*
huber = HuberRegressor(fit_intercept=True, alpha=210, max_iter=100, epsilon=1.35)
huber.fit(X, y)
>>> huber.coef_
array([ 1.73346444, 3.63746999])
>>> huber.intercept_
4.3017134790781739
>>> huber.scale_
3.6472742809286793
*/
val coefficientsPy2 = Vectors.dense(1.73346444, 3.63746999)
val interceptPy2 = 4.30171347
val scalePy2 = 3.64727428
assert(model2.coefficients ~= coefficientsPy2 relTol 1E-3)
assert(model2.intercept ~== interceptPy2 relTol 1E-3)
assert(model2.scale ~== scalePy2 relTol 1E-3)
}
test("linear regression (huber loss) without intercept with L2 regularization") {
val trainer1 = (new LinearRegression).setLoss("huber")
.setFitIntercept(false).setRegParam(0.21).setStandardization(true)
val trainer2 = (new LinearRegression).setLoss("huber")
.setFitIntercept(false).setRegParam(0.21).setStandardization(false)
val model1 = trainer1.fit(datasetWithOutlier)
val model2 = trainer2.fit(datasetWithOutlier)
/*
Since scikit-learn HuberRegressor does not support standardization,
we do it manually out of the estimator.
xStd = np.std(X, axis=0)
scaledX = X / xStd
huber = HuberRegressor(fit_intercept=False, alpha=210, max_iter=100, epsilon=1.35)
huber.fit(scaledX, y)
>>> np.array(huber.coef_ / xStd)
array([ 2.59679008, 2.26973102])
>>> huber.intercept_
0.0
>>> huber.scale_
4.5766311924091791
*/
val coefficientsPy1 = Vectors.dense(2.59679008, 2.26973102)
val interceptPy1 = 0.0
val scalePy1 = 4.57663119
assert(model1.coefficients ~= coefficientsPy1 relTol 1E-2)
assert(model1.intercept === interceptPy1)
assert(model1.scale ~== scalePy1 relTol 1E-2)
/*
huber = HuberRegressor(fit_intercept=False, alpha=210, max_iter=100, epsilon=1.35)
huber.fit(X, y)
>>> huber.coef_
array([ 2.28423908, 2.25196887])
>>> huber.intercept_
0.0
>>> huber.scale_
4.5979643506051753
*/
val coefficientsPy2 = Vectors.dense(2.28423908, 2.25196887)
val interceptPy2 = 0.0
val scalePy2 = 4.59796435
assert(model2.coefficients ~= coefficientsPy2 relTol 1E-3)
assert(model2.intercept === interceptPy2)
assert(model2.scale ~== scalePy2 relTol 1E-3)
}
test("huber loss model match squared error for large epsilon") {
val trainer1 = new LinearRegression().setLoss("huber").setEpsilon(1E5)
val model1 = trainer1.fit(datasetWithOutlier)
val trainer2 = new LinearRegression()
val model2 = trainer2.fit(datasetWithOutlier)
assert(model1.coefficients ~== model2.coefficients relTol 1E-3)
assert(model1.intercept ~== model2.intercept relTol 1E-3)
}
}
object LinearRegressionSuite {
/**
* Mapping from all Params to valid settings which differ from the defaults.
* This is useful for tests which need to exercise all Params, such as save/load.
* This excludes input columns to simplify some tests.
*/
val allParamSettings: Map[String, Any] = Map(
"predictionCol" -> "myPrediction",
"regParam" -> 0.01,
"elasticNetParam" -> 0.1,
"maxIter" -> 2, // intentionally small
"fitIntercept" -> true,
"tol" -> 0.8,
"standardization" -> false,
"solver" -> "l-bfgs"
)
}
| pgandhi999/spark | mllib/src/test/scala/org/apache/spark/ml/regression/LinearRegressionSuite.scala | Scala | apache-2.0 | 52,682 |
package apsu.demo.rocks.components.geometry
/**
* Marker component for entities such as bullets and enemy saucers
* that should not wrap but rather travel across the screen once
* and disappear
*
* @author david
*/
case class DeleteAtEdge()
| chronodm/apsu-demo-scala | src/main/scala/apsu/demo/rocks/components/geometry/DeleteAtEdge.scala | Scala | mit | 248 |
package org.openworm.trackercommons.minimal
// A fully self-contained WCON format reader
object Reader {
import fastparse.all._
// Begin full JSON parser
def W[A](p: P[A]) = CharsWhile(_.isWhitespace).? ~ p ~ CharsWhile(_.isWhitespace).?
val Null = P("null").map(_ => null)
val Bool = P("true").map(_ => true) | P("false").map(_ => false)
val Hex = CharIn("0123456789ABCDEFabcdef")
val Esc = "\\\\" ~ (
P("b").map(_ => "\\b") |
P("f").map(_ => "\\f") |
P("n").map(_ => "\\n") |
P("r").map(_ => "\\r") |
P("t").map(_ => "\\t") |
CharIn("\\\\/\\"").! |
"u" ~ (Hex ~ Hex ~ Hex ~ Hex).!.map(h4 => java.lang.Integer.parseInt(h4, 16).toChar.toString)
)
val Str = "\\"" ~ (CharsWhile(c => c != '"' && c != '\\\\').! | Esc).rep.map(_.mkString) ~ "\\""
val Digits = CharsWhile(c => c >= '0' & c <= '9')
val Num = ("-".? ~ ("0" | Digits) ~ ("." ~ Digits).? ~ (CharIn("eE") ~ CharIn("+-").? ~ Digits).?).!.map(_.toFloat)
val Arr = P("[" ~ All.rep(sep = ",").map(_.toArray) ~ "]")
val KeyVal = P(W(Str ~ W(":") ~ All))
val Obj = "{" ~ KeyVal.rep(sep = ",").map(_.toMap) ~ "}"
val All: P[Any] = W(Obj | Arr | Num | Str | Bool | Null)
// End of full JSON parser
// Begin of extraction of data from Json
def wormify(entry: Map[String, Any], index: Int): Either[String, Worm] = {
val hasNot = Seq("id", "t", "x", "y").filterNot(entry contains _)
if (hasNot.nonEmpty) return Left(hasNot.map(x => s"no $x").mkString("\\n"))
val id = entry("id") match {
case s: String => s
case f: Float => if (f.toInt == f) f.toInt.toString else f.toString
case _ => return Left(s"id is neither a string nor a number")
}
val ts = entry("t") match {
case null => Array(Float.NaN)
case f: Float => Array(f)
case a: Array[Any] =>
val b = a.collect{ case null => Float.NaN; case f: Float => f }
if (b.length != a.length) return Left(s"found non-numeric value in t")
b
case _ => return Left(s"t is neither a number or array of numbers")
}
val List(oxs, oys): List[Array[Float]] = List( List("ox", "cx"), List("oy", "cy") ).map{
case q :: qq :: Nil => entry.getOrElse(q, entry.getOrElse(qq, null)) match {
case null => Array.fill(ts.length)(0f)
case f: Float => Array.fill(ts.length)(f)
case a: Array[Any] =>
val b = a.collect{ case null => Float.NaN; case f: Float => f }
if (a.length != ts.length || b.length != ts.length) return Left("Origin/centroid size doesn't match timepoint length")
b
}
case _ => throw new Exception("Implementation error. This pattern match should never fail.")
}
val List(xss, yss) = List(("x", oxs), ("y",oys)).map{ case (q,o) => entry(q) match {
case null => Array(Array(Float.NaN))
case f: Float => Array(Array(f + o(0)))
case a: Array[Any] =>
val b = a.collect{ case null => Float.NaN; case f: Float => f }
if (b.length == a.length) {
if (ts.length == 1) {
for (i <- b.indices) b(i) += o(0)
Array(b)
}
else {
if (b.length != ts.length) return Left(s"mismatched sizes for t and $q")
b.zipWithIndex.map{ case (f,i) => Array(f + o(i)) }
}
}
else {
val c = a.collect{
case null => Array(Float.NaN)
case f: Float => Array(f)
case xs: Array[Any] =>
val ys = xs.collect{ case null => Float.NaN; case f: Float=> f }
if (xs.length != ys.length) return Left("found non-numeric value in $q")
ys
}
if (c.length != a.length) return Left("found non-numeric value in $q")
if (c.length != ts.length) return Left("mismatched sizes for t and $q")
for {i <- c.indices; j <- c(i).indices} c(i)(j) += o(i)
c
}
}}
for (i <- xss.indices) if (xss(i).length != yss(i).length) return Left("x and y lengths fail to match at timepoint ${ts(i)}")
Right(new Worm(id, ts, xss, yss, index))
}
// End of extraction of data from JSON
// Interpretation of JSON as WCON (not counting extraction of data)
def apply(wcon: String): Either[String, Array[Worm]] = All.parse{wcon} match {
case rf: Parsed.Failure =>
Left("Error parsing WCON: not valid JSON.\\nParse error:\\n" + rf.toString)
case Parsed.Success(j, _) => j match {
case m: Map[String @unchecked, Any @unchecked] =>
if (!m.contains("units")) Left("Error parsing WCON: no units")
else if (!m("units").isInstanceOf[Map[_,_]]) Left("Error parsing WCON: units is not an object")
else if (!Seq("t", "x", "y").forall(v => m("units").asInstanceOf[Map[String, Any]] contains v))
Left("Error parsing WCON: units object must specify units for t, x, and y")
else if (!m.contains("data")) Left("Error parsing WCON: no data")
else m("data") match {
case e: Map[String @unchecked, Any @unchecked] => wormify(e, 0).right.map(worm => Array(worm))
case js: Array[Any] =>
val es = js.collect{ case e: Map[String @unchecked, Any @unchecked] => e }
if (js.length != es.length) Left("Error parsing WCON: data array contains things that are not objects")
else {
val perhapsWorms = es.zipWithIndex.map{ case (e,i) => wormify(e, i) -> i }
val badWorms = perhapsWorms.collect{ case (Left(s), i) => s" Bad entry $i: $s" }.mkString("\\n")
if (badWorms.nonEmpty) Left("Error parsing WCON: bad data entries\\n"+badWorms)
else Right(
perhapsWorms.collect{ case (Right(worm), _) => worm }.
groupBy(_.id).
map{ case (id, worms) => Worm merge worms }.
toArray
)
}
case _ => Left("Error parsing WCON: data is neither an object nor an array of objects")
}
case _ => Left("Error parsing WCON: not a JSON object")
}
}
// End of interpretation of JSON as WCON
}
| Ichoran/tracker-commons | src/scala/src/minimal/MinimalReader.scala | Scala | mit | 6,086 |
package fr.laas.fape.anml.model
import fr.laas.fape.anml
import fr.laas.fape.anml.ANMLException
import fr.laas.fape.anml.parser.PType
import fr.laas.fape.anml.parser
import scala.collection.JavaConversions._
import scala.collection.mutable
/** Representation of an ANML function.
*
* @param name Name of the function. the function name can be of the form `functionName` if it is defined at the root of
* an anml problem or `typeName.scopedFunctionName` if it is defined in a type
* @param valueType Type of the function's value.
* @param argTypes Types of the arguments of the function in order.
* @param isConstant True if this function is defined as constant. False otherwise
*/
abstract class Function(val name:String, val valueType:Type, val argTypes:List[Type], val isConstant:Boolean) {
override def toString =
name+"("+argTypes.mkString(",")+"):"+valueType
/** Builds a version of this defined as if it was scoped inside this containingType.
* if the function is `AType aFunction(Object)`, invoking `scoped("Container)` on it would result in:
* `AType Container.aFunction(Container, Object)`
*
* @param containingType Type in which the function was declared.
* @return THe scoped version of the function.
*/
def scoped(containingType : Type) : Function
}
/** Representation of an ANML symbolic function.
*
* @param name Name of the function. the function name can be of the form `functionName` if it is defined at the root of
* an anml problem or `typeName.scopedFunctionName` if it is defined in a type
* @param valueType Type of the function's value.
* @param argTypes Types of the arguments of the function in order.
* @param isConstant True if this function is defined as constant. False otherwise
*/
class SymFunction(name:String, valueType:Type, argTypes:List[Type], isConstant:Boolean)
extends Function(name, valueType, argTypes, isConstant)
{
def scoped(containingType : Type) : SymFunction =
new SymFunction(containingType+"."+name, valueType, containingType::argTypes, isConstant)
}
/** Representation of an ANML numeric function (on float or integer).
* See derived classes [[fr.laas.fape.anml.model.IntFunction]] and [[fr.laas.fape.anml.model.FloatFunction]]
* for concrete implementations.
*
* @param name Name of the function. the function name can be of the form `functionName` if it is defined at the root of
* an anml problem or `typeName.scopedFunctionName` if it is defined in a type
* @param valueType Type of the function's value.
* @param argTypes Types of the arguments of the function in order.
* @param isConstant True if this function is defined as constant. False otherwise
* @param resourceType Type of the resource which can be one of consumable, replenishable, reusable or producible.
* If it is an empty String, then it is a generic resource.
*/
abstract class NumFunction(name:String, valueType:NumericType, argTypes:List[Type], isConstant:Boolean, val resourceType:String)
extends Function(name, valueType, argTypes, isConstant)
{
/** Returns true if this function has a specific resource type in consumable, replenishable, reusable or producible.
* If this is the case, you should check `resourceType` to see what it is.
*/
def hasSpecificResourceType : Boolean = resourceType != ""
}
/** Representation of an ANML function on integers.
*
* @param name Name of the function. the function name can be of the form `functionName` if it is defined at the root of
* an anml problem or `typeName.scopedFunctionName` if it is defined in a type
* @param argTypes Types of the arguments of the function in order.
* @param isConstant True if this function is defined as constant. False otherwise
* @param minValue Minimum possible value of the function. It is set to `Integer.MIN_VALUE` if no lower bound is
* specified in the ANML model.
* @param maxValue Maximum possible value of the function. It is set to `Integer.MAX_VALUE` if no upper bound is
* specified in the ANML model.
* @param resourceType Type of the resource which can be one of consumable, replenishable, reusable or producible.
* If it is an empty String, then it is a generic resource.
*/
class IntFunction(name:String, argTypes:List[Type], isConstant:Boolean, val minValue:Int, val maxValue:Int, resourceType:String)
extends NumFunction(name, TInteger, argTypes, isConstant, resourceType)
{
require(maxValue > Int.MinValue)
def scoped(container : Type) : IntFunction =
new IntFunction(container+"."+name, container::argTypes, isConstant, minValue, maxValue, resourceType)
}
/** Storage for functions found in an [[fr.laas.fape.anml.model.AnmlProblem]] */
class FunctionManager(val pb:AnmlProblem) {
/**
* Maps function name to definition.
* Those function have an implicit time parameter which is dealt with externally
*/
private val functions = mutable.Map[String, Function]()
/** Converts a function from the parser model and adds it to this function manager. */
def addFunction(f:anml.parser.Function) {
addFunction(buildFunction(f))
}
/** Builds a Function from the output of the ANML parser. */
private def buildFunction(f : anml.parser.Function) : Function = {
def t(typeName: PType) = pb.instances.asType(typeName)
f match {
case parser.SymFunction(name, args, tipe, isConstant) =>
new SymFunction(name, t(tipe), args.map(a => t(a.tipe)), isConstant)
case parser.IntFunction(name, args, tipe, isConstant, min, max, Some(resourceType)) => {
new IntFunction(name, args.map(a => t(a.tipe)), isConstant, min, max, resourceType)
}
case parser.IntFunction(name, args, tipe, isConstant, min, max, None) => {
new IntFunction(name, args.map(a => t(a.tipe)), isConstant, min, max, "")
}
}
}
/**
* Adds a function declared in the scope of a type.
*
* For instance a function location(Loc a) declared in a type robot results in
* a function Robot.location(Robot r, Loc a)
*
* @param scope Name of the type in which the function is declared
* @param f function definition
*/
def addScopedFunction(scope:Type, f:anml.parser.Function) {
addFunction(buildFunction(f).scoped(scope))
}
def addFunction(func : Function) {
assert(!functions.contains(func), "Function "+func.name+" already exists.")
functions(func.name) = func
}
/** Look up if there exists a function with this name.
*
* @param funcName Name of the function to look up.
* @return True if such a function exists, False otherwise.
*/
def isDefined(funcName:String) = functions.contains(funcName)
/** Returns true if the given function name maps to a constant function
*
* @param funcName Name of the function to look up.
* @return True if the function is constant, False otherwise.
*/
def isConstantFunc(funcName:String) = functions.contains(funcName)
/** Returns all functions stored in this function manager */
def getAll : java.util.List[Function] = seqAsJavaList(functions.values.toList)
def all : Iterable[Function] = functions.values
/** Finds the definition of the function with the given name.
*
* @param functionName Name of the function to look up.
* @return The function definition. Throws an [[ANMLException]] if no such function can be found.
*/
def get(functionName:String) : Function = {
if(functions.contains(functionName))
functions(functionName)
else
throw new ANMLException("Unknown function name: "+functionName)
}
}
| athy/fape | anml-parser/src/main/scala/fr/laas/fape/anml/model/FunctionManager.scala | Scala | bsd-2-clause | 7,700 |
package org.scaladebugger.test.events
/**
* Provides test of hitting a breakpoint event in a loop, used to verify
* events received AND that stopping the reception of an event results in
* not receiving it on the client anymore.
*
* @note Should place breakpoints on lines 13, 16, and 19.
*/
object LoopingEvent extends App {
var x = 0
while (true) {
val newValue = if (x != 0) 0 else 1
x = newValue
// Release resources and make possible to interrupt
Thread.sleep(1)
}
}
| ensime/scala-debugger | scala-debugger-test/src/main/scala/org/scaladebugger/test/events/LoopingEvent.scala | Scala | apache-2.0 | 504 |
/*
* Copyright 2013 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.storehaus
import com.twitter.storehaus.cache.MutableCache
import com.twitter.util.Future
import scala.collection.breakOut
/*
This will eagerly read/write to the backing gage when doing operations against
its underlying store.
It will not wait for the return of the network call to update the cache entry.
The cache itself is assumed to be backed by a thread safe implementation.
*/
class EagerWriteThroughCacheStore[K, V](
store: Store[K, V], threadSafeCache: MutableCache[K, Future[Option[V]]]) extends Store[K, V] {
override def put(kv: (K, Option[V])): Future[Unit] = {
threadSafeCache += ((kv._1, Future.value(kv._2)))
store.put(kv)
}
override def get(k: K): Future[Option[V]] = {
threadSafeCache.get(k).getOrElse {
store.get(k)
}
}
override def multiGet[K1 <: K](keys: Set[K1]): Map[K1, Future[Option[V]]] = {
val present: Map[K1, Future[Option[V]]] =
keys.map { k => k -> threadSafeCache.get(k) }
.collect { case (k, Some(v)) => k -> v }(breakOut)
val replaced = store.multiGet(keys -- present.keySet)
replaced.foreach {kv =>
threadSafeCache += ((kv._1, kv._2))
}
present ++ replaced
}
override def multiPut[K1 <: K](kvs: Map[K1, Option[V]]): Map[K1, Future[Unit]] = {
kvs.foreach { case (k, optiV) =>
threadSafeCache += ((k, Future.value(optiV)))
}
store.multiPut(kvs)
}
}
| twitter/storehaus | storehaus-core/src/main/scala/com/twitter/storehaus/EagerWriteThroughCacheStore.scala | Scala | apache-2.0 | 2,002 |
package com.massrelevance.dropwizard.scala.params.tests
import javax.ws.rs.WebApplicationException
import com.massrelevance.dropwizard.scala.params.IntParam
import org.scalatest.FlatSpec
import org.scalatest.matchers.ShouldMatchers
class IntParamTest extends FlatSpec with ShouldMatchers {
"A valid int parameter" should "have an int value" in {
val param = IntParam("40")
param.value should equal (40)
}
"An invalid int parameter" should "throws a WebApplicationException with an error message" in {
val e = intercept[WebApplicationException] {
IntParam("poop")
}
val response = e.getResponse
response.getStatus should equal (400)
response.getEntity should equal ("Invalid parameter: poop (Must be an integer value.)")
}
}
| torbjornvatn/dropwizard-scala | src/test/scala/com/massrelevance/dropwizard/scala/params/tests/IntParamTest.scala | Scala | apache-2.0 | 771 |
package ru.stachek66.nlp.mystem.parsing
import ru.stachek66.nlp.mystem.model._
/**
* alexeyev
* 31.08.14.
*/
object GrammarInfoParsing {
/**
* Grammar info parsing.
*/
def toGrammarInfo(commaSeparatedTags: String): GrammarInfo = {
val mappedEnums =
(commaSeparatedTags
.split("[,=]")
.map {
case name: String =>
val obj: Enumeration = GrammarMapBuilder.tagToEnumMap(name)
(obj, obj.withName(name))
} groupBy {
case (obj: Enumeration, _) => obj
} mapValues {
case array => array.map(_._2)
}).toMap
def findByEnum[T <: scala.Enumeration](enum: T): Set[T#Value] =
mappedEnums
.get(enum)
.map(_.map(_.asInstanceOf[T#Value]).toSet)
.getOrElse(Set.empty[T#Value])
GrammarInfo(
pos = findByEnum(POS),
tense = findByEnum(Tense),
`case` = findByEnum(Case),
number = findByEnum(Number),
verbFormInfo = findByEnum(VerbForms),
adjFormInfo = findByEnum(AdjectiveForms),
gender = findByEnum(Gender),
aspect = findByEnum(Aspect),
voice = findByEnum(Voice),
animacy = findByEnum(Animacy),
other = findByEnum(Other)
)
}
def toStringRepresentation(gi: GrammarInfo): String =
(gi.`case` ++ gi.adjFormInfo ++ gi.animacy ++ gi.aspect ++ gi.gender ++
gi.number ++ gi.pos ++ gi.other ++ gi.tense ++ gi.verbFormInfo ++ gi.voice).mkString(",")
}
| alexeyev/mystem-scala | src/main/scala/ru/stachek66/nlp/mystem/parsing/GrammarInfoParsing.scala | Scala | mit | 1,452 |
package com.mesosphere.cosmos.error
import com.mesosphere.cosmos.rpc.v1.model.ErrorResponse
import com.twitter.finagle.http.Status
final case class CosmosException(
error: CosmosError,
status: Status,
headers: Map[String, String],
causedBy: Option[Throwable]
) extends RuntimeException(error.message, causedBy.orNull) {
def errorResponse: ErrorResponse = {
ErrorResponse(
error.getClass.getSimpleName,
error.message,
error.data
)
}
}
object CosmosException {
def apply(error: CosmosError): CosmosException = {
CosmosException(error, Status.BadRequest, Map.empty, None)
}
def apply(error: CosmosError, causedBy: Throwable): CosmosException = {
CosmosException(error, Status.BadRequest, Map.empty, Option(causedBy))
}
}
| takirala/cosmos | cosmos-common/src/main/scala/com/mesosphere/cosmos/error/CosmosException.scala | Scala | apache-2.0 | 776 |
package engine
import utils._
import org.lwjgl.opengl.GL20._
import java.nio.FloatBuffer
import collection.mutable.HashMap
abstract class Shader (val source: String, glType: Int) {
val id = glCreateShader(glType)
glShaderSource(id, source)
glCompileShader(id)
Renderer.checkGLError("Shader compilation")
val err = glGetShaderInfoLog(id, 1024).trim()
if (!err.equals(""))
Console.println("Shader ("+type2str+") compiled with error : " + err)
def type2str = if (glType == GL_VERTEX_SHADER) "vertex"
else "fragment"
}
class VertexShader (source: String) extends Shader(source, GL_VERTEX_SHADER) {
}
class FragmentShader (source: String) extends Shader(source, GL_FRAGMENT_SHADER) {
}
class GLSLProgram (vShader: VertexShader, fShader: FragmentShader) {
private val id = glCreateProgram()
glAttachShader(id, vShader.id)
glAttachShader(id, fShader.id)
glLinkProgram(id)
Renderer.checkGLError("Program linking")
val err = glGetProgramInfoLog(id, 1024).trim()
if (!err.equals(""))
Console.println("Program compiled with error : " + err)
private val uniformLocs = new HashMap[String, Int]
private val attribLocs = new HashMap[String, Int]
def bind () : Unit = glUseProgram(id)
def unbind () {
glUseProgram(0)
for ((name, loc) <- attribLocs) {
//TODO: not sure if this is really needed
glDisableVertexAttribArray(loc)
}
}
def getUniformLocation (name: String) : Int = {
uniformLocs.getOrElseUpdate(name, {
val loc = glGetUniformLocation(id, name)
if (loc == -1) {
Console.println("Error : uniform location not found for '"+name+"'")
}
loc
})
}
def getAttribLocation (name: String) : Int = {
attribLocs.getOrElseUpdate(name, {
val loc = glGetAttribLocation(id, name)
if (loc == -1) {
Console.println("Error : attribute location not found for '"+name+"'")
}
loc
})
}
def setAttribPointer (name: String, size: Int, normalize: Boolean, buff: FloatBuffer) {
val loc = getAttribLocation(name)
if (loc != -1) {
glEnableVertexAttribArray(loc)
glVertexAttribPointer(loc, size, normalize, 0, buff)
}
}
def setUniform (name: String, v: Vector3) {
val loc = getUniformLocation(name)
if (loc != -1)
glUniform3f(loc, v.x, v.y, v.z)
}
def setUniform (name: String, v: Float) {
val loc = getUniformLocation(name)
if (loc != -1)
glUniform1f(loc, v)
}
//Associate the given sampler to the given texture unit
def setSamplerUnit (samplerName: String, unit: Int) = glUniform1i(getUniformLocation(samplerName), unit)
}
// vim: set ts=2 sw=2 et:
| julienr/scalamd5 | src/main/scala/engine/shader.scala | Scala | bsd-2-clause | 2,673 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.net.InetAddress
import java.util.Locale
import java.util.concurrent.TimeUnit
import kafka.api.ApiVersion
import kafka.cluster.EndPoint
import kafka.metrics.KafkaMetricsGroup
import kafka.utils._
import com.yammer.metrics.core.Gauge
import org.I0Itec.zkclient.IZkStateListener
import org.apache.kafka.common.security.auth.SecurityProtocol
import org.apache.zookeeper.Watcher.Event.KeeperState
import scala.collection.mutable.Set
/**
* This class registers the broker in zookeeper to allow
* other brokers and consumers to detect failures. It uses an ephemeral znode with the path:
* /brokers/ids/[0...N] --> advertisedHost:advertisedPort
*
* Right now our definition of health is fairly naive. If we register in zk we are healthy, otherwise
* we are dead.
*/
class KafkaHealthcheck(brokerId: Int,
advertisedEndpoints: Seq[EndPoint],
zkUtils: ZkUtils,
rack: Option[String],
interBrokerProtocolVersion: ApiVersion) extends Logging {
private[server] val sessionExpireListener = new SessionExpireListener
def startup() {
zkUtils.subscribeStateChanges(sessionExpireListener)
register()
}
/**
* Register this broker as "alive" in zookeeper
*/
def register() {
val jmxPort = System.getProperty("com.sun.management.jmxremote.port", "-1").toInt
val updatedEndpoints = advertisedEndpoints.map(endpoint =>
if (endpoint.host == null || endpoint.host.trim.isEmpty)
endpoint.copy(host = InetAddress.getLocalHost.getCanonicalHostName)
else
endpoint
)
// the default host and port are here for compatibility with older clients that only support PLAINTEXT
// we choose the first plaintext port, if there is one
// or we register an empty endpoint, which means that older clients will not be able to connect
val plaintextEndpoint = updatedEndpoints.find(_.securityProtocol == SecurityProtocol.PLAINTEXT).getOrElse(
new EndPoint(null, -1, null, null))
zkUtils.registerBrokerInZk(brokerId, plaintextEndpoint.host, plaintextEndpoint.port, updatedEndpoints, jmxPort, rack,
interBrokerProtocolVersion)
}
def shutdown(): Unit = sessionExpireListener.shutdown()
/**
* When we get a SessionExpired event, it means that we have lost all ephemeral nodes and ZKClient has re-established
* a connection for us. We need to re-register this broker in the broker registry. We rely on `handleStateChanged`
* to record ZooKeeper connection state metrics.
*/
class SessionExpireListener extends IZkStateListener with KafkaMetricsGroup {
private val metricNames = Set[String]()
private[server] val stateToMeterMap = {
import KeeperState._
val stateToEventTypeMap = Map(
Disconnected -> "Disconnects",
SyncConnected -> "SyncConnects",
AuthFailed -> "AuthFailures",
ConnectedReadOnly -> "ReadOnlyConnects",
SaslAuthenticated -> "SaslAuthentications",
Expired -> "Expires"
)
stateToEventTypeMap.map { case (state, eventType) =>
val name = s"ZooKeeper${eventType}PerSec"
metricNames += name
state -> newMeter(name, eventType.toLowerCase(Locale.ROOT), TimeUnit.SECONDS)
}
}
private[server] val sessionStateGauge =
newGauge("SessionState", new Gauge[String] {
override def value: String =
Option(zkUtils.zkConnection.getZookeeperState.toString).getOrElse("DISCONNECTED")
})
metricNames += "SessionState"
@throws[Exception]
override def handleStateChanged(state: KeeperState) {
stateToMeterMap.get(state).foreach(_.mark())
}
@throws[Exception]
override def handleNewSession() {
info("re-registering broker info in ZK for broker " + brokerId)
register()
info("done re-registering broker")
info("Subscribing to %s path to watch for new topics".format(ZkUtils.BrokerTopicsPath))
}
override def handleSessionEstablishmentError(error: Throwable) {
fatal("Could not establish session with zookeeper", error)
}
def shutdown(): Unit = metricNames.foreach(removeMetric(_))
}
}
| themarkypantz/kafka | core/src/main/scala/kafka/server/KafkaHealthcheck.scala | Scala | apache-2.0 | 5,029 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.fs
import java.io.{File, IOException}
import java.nio.file.Files
import java.time.temporal.ChronoUnit
import java.util.Collections
import com.typesafe.config.ConfigFactory
import org.apache.commons.io.FileUtils
import org.geotools.data.{DataStoreFinder, Query, Transaction}
import org.geotools.filter.text.ecql.ECQL
import org.junit.runner.RunWith
import org.locationtech.geomesa.features.ScalaSimpleFeature
import org.locationtech.geomesa.fs.storage.common.PartitionScheme
import org.locationtech.geomesa.fs.storage.common.partitions.DateTimeScheme
import org.locationtech.geomesa.utils.collection.SelfClosingIterator
import org.locationtech.geomesa.utils.geotools.{FeatureUtils, SimpleFeatureTypes}
import org.locationtech.geomesa.utils.io.WithClose
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import scala.collection.JavaConversions._
@RunWith(classOf[JUnitRunner])
class FileSystemDataStoreTest extends Specification {
sequential
def createFormat(format: String): (String, SimpleFeatureType, Seq[SimpleFeature]) = {
val sft = SimpleFeatureTypes.createType(format, "name:String,age:Int,dtg:Date,*geom:Point:srid=4326")
PartitionScheme.addToSft(sft, new DateTimeScheme(DateTimeScheme.Formats.Daily.format, ChronoUnit.DAYS, 1, "dtg", false))
val features = Seq.tabulate(10) { i =>
ScalaSimpleFeature.create(sft, s"$i", s"test$i", 100 + i, s"2017-06-0${5 + (i % 3)}T04:03:02.0001Z", s"POINT(10 10.$i)")
}
(format, sft, features)
}
val formats = Seq("orc", "parquet").map(createFormat)
val dirs = scala.collection.mutable.Map.empty[String, File]
step {
formats.foreach { case (f, _, _) => dirs.put(f, Files.createTempDirectory(s"fsds-test-$f").toFile) }
}
"FileSystemDataStore" should {
"create a DS" >> {
foreach(formats) { case (format, sft, features) =>
val dir = dirs(format)
val ds = DataStoreFinder.getDataStore(Map(
"fs.path" -> dir.getPath,
"fs.encoding" -> format,
"fs.config" -> "parquet.compression=gzip"))
ds.createSchema(sft)
WithClose(ds.getFeatureWriterAppend(format, Transaction.AUTO_COMMIT)) { writer =>
features.foreach { feature =>
FeatureUtils.copyToWriter(writer, feature, useProvidedFid = true)
writer.write()
}
}
// metadata
new File(dir, s"$format/metadata.json").exists() must beTrue
val conf = ConfigFactory.parseFile(new File(dir, s"$format/metadata.json"))
conf.hasPath("partitions") must beTrue
foreach(Seq("05", "06", "07")) { day =>
val name = s"2017/06/$day"
val partition = conf.getConfig("partitions").getStringList(name)
partition.size() mustEqual 1
partition.get(0).matches(s"W[0-9a-f]{32}\\\\.$format") must beTrue
// Metadata, schema, and partition file checks
new File(dir, s"$format/$name").exists() must beTrue
new File(dir, s"$format/$name").isDirectory must beTrue
new File(dir, s"$format/$name/${partition.get(0)}").exists() must beTrue
new File(dir, s"$format/$name/${partition.get(0)}").isFile must beTrue
}
ds.getTypeNames must have size 1
val fs = ds.getFeatureSource(format)
fs must not(beNull)
val results = SelfClosingIterator(fs.getFeatures(new Query(format)).features()).toList
results must containTheSameElementsAs(features)
}
}
"create a second ds with the same path" >> {
foreach(formats) { case (format, sft, features) =>
val dir = dirs(format)
// Load a new datastore to read metadata and stuff
val ds = DataStoreFinder.getDataStore(Collections.singletonMap("fs.path", dir.getPath))
ds.getTypeNames.toList must containTheSameElementsAs(Seq(format))
val results = SelfClosingIterator(ds.getFeatureReader(new Query(format), Transaction.AUTO_COMMIT)).toList
results must containTheSameElementsAs(features)
}
}
"query with multiple threads" >> {
foreach(formats) { case (format, sft, features) =>
val dir = dirs(format)
// Load a new datastore to read metadata and stuff
val ds = DataStoreFinder.getDataStore(Map("fs.path" -> dir.getPath, "fs.read-threads" -> "4"))
ds.getTypeNames.toList must containTheSameElementsAs(Seq(format))
val results = SelfClosingIterator(ds.getFeatureReader(new Query(format), Transaction.AUTO_COMMIT)).toList
results must containTheSameElementsAs(features)
}
}
"call create schema on existing type" >> {
foreach(formats) { case (format, sft, features) =>
val dir = dirs(format)
val ds = DataStoreFinder.getDataStore(Collections.singletonMap("fs.path", dir.getPath))
val sameSft = SimpleFeatureTypes.createType(format, "name:String,age:Int,dtg:Date,*geom:Point:srid=4326")
PartitionScheme.addToSft(sameSft, PartitionScheme.extractFromSft(sft).get)
ds.createSchema(sameSft) must not(throwA[Throwable])
}
}
"reject schemas with reserved words" >> {
foreach(formats) { case (format, sft, features) =>
val dir = dirs(format)
val reserved = SimpleFeatureTypes.createType("reserved", "dtg:Date,*point:Point:srid=4326")
PartitionScheme.addToSft(reserved, PartitionScheme.extractFromSft(sft).get)
val ds = DataStoreFinder.getDataStore(Map(
"fs.path" -> dir.getPath,
"fs.encoding" -> format,
"fs.config" -> "parquet.compression=gzip"))
ds.createSchema(reserved) must throwAn[IllegalArgumentException]
ds.getSchema(reserved.getTypeName) must throwAn[IOException] // content data store schema does not exist
}
}
"support transforms" >> {
val filters = Seq(
"INCLUDE",
s"name IN ${(0 until 10).mkString("('test", "','test", "')")}",
"bbox(geom, 5, 5, 15, 15)",
"dtg DURING 2017-06-05T04:03:00.0000Z/2017-06-07T04:04:00.0000Z",
"dtg > '2017-06-05T04:03:00.0000Z' AND dtg < '2017-06-07T04:04:00.0000Z'",
"dtg DURING 2017-06-05T04:03:00.0000Z/2017-06-07T04:04:00.0000Z and bbox(geom, 5, 5, 15, 15)"
).map(ECQL.toFilter)
val transforms = Seq(null, Array("name"), Array("dtg", "geom"))
foreach(formats) { case (format, sft, features) =>
val dir = dirs(format)
val ds = DataStoreFinder.getDataStore(Collections.singletonMap("fs.path", dir.getPath))
filters.foreach { filter =>
transforms.foreach { transform =>
val query = new Query(format, filter, transform)
val results = SelfClosingIterator(ds.getFeatureReader(query, Transaction.AUTO_COMMIT)).toList
results must haveLength(features.length)
if (transform == null) {
results must containTheSameElementsAs(features)
} else {
results.map(_.getID) must containTheSameElementsAs(features.map(_.getID))
results.foreach { result =>
result.getAttributeCount mustEqual transform.length
val matched = features.find(_.getID == result.getID).get
transform.foreach(t => result.getAttribute(t) mustEqual matched.getAttribute(t))
}
}
}
}
ok
}
}
}
step {
dirs.foreach { case (_, dir) => FileUtils.deleteDirectory(dir) }
}
}
| ddseapy/geomesa | geomesa-fs/geomesa-fs-datastore/src/test/scala/org/locationtech/geomesa/fs/FileSystemDataStoreTest.scala | Scala | apache-2.0 | 8,037 |
// scalastyle:off line.size.limit
/*
* Ported by Alistair Johnson from
* https://github.com/gwtproject/gwt/blob/master/user/test/com/google/gwt/emultest/java/math/BigIntegerConstructorsTest.java
*/
// scalastyle:on line.size.limit
package org.scalajs.testsuite.javalib.math
import java.math.BigInteger
import java.util.Random
import org.scalajs.jasminetest.JasmineTest
object BigIntegerConstructorsTest extends JasmineTest {
describe("BigIntegerConstructorsTest") {
it("testConstructorBytesException") {
val aBytes = Array[Byte]()
expect(() => new BigInteger(aBytes)).toThrow()
}
it("testConstructorBytesNegative1") {
val aBytes = Array[Byte](-12, 56, 100, -2, -76, 89, 45, 91, 3, -15, 35, 26, 3, 91)
val rBytes = Array[Byte](-12, 56, 100, -2, -76, 89, 45, 91, 3, -15, 35, 26, 3, 91)
val aNumber = new BigInteger(aBytes)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = aNumber.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(aNumber.signum()).toEqual(-1)
}
it("testConstructorBytesNegative2") {
val aBytes = Array[Byte](-12, 56, 100)
val rBytes = Array[Byte](-12, 56, 100)
val aNumber = new BigInteger(aBytes)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = aNumber.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(aNumber.signum()).toEqual(-1)
}
it("testConstructorBytesNegative3") {
val aBytes = Array[Byte](-128, -12, 56, 100)
val rBytes = Array[Byte](-128, -12, 56, 100)
val aNumber = new BigInteger(aBytes)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = aNumber.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(aNumber.signum()).toEqual(-1)
}
it("testConstructorBytesNegative4") {
val aBytes = Array[Byte](-128, -12, 56, 100, -13, 56, 93, -78)
val rBytes = Array[Byte](-128, -12, 56, 100, -13, 56, 93, -78)
val aNumber = new BigInteger(aBytes)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = aNumber.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(aNumber.signum()).toEqual(-1)
}
it("testConstructorBytesPositive") {
val aBytes = Array[Byte](127, 56, 100, -1, 14, 75, -24, -100)
val rBytes = Array[Byte](127, 56, 100, -1, 14, 75, -24, -100)
val aNumber = new BigInteger(aBytes)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = aNumber.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(aNumber.signum()).toEqual(1)
}
it("testConstructorBytesPositive1") {
val aBytes = Array[Byte](12, 56, 100, -2, -76, 89, 45, 91, 3, -15, 35, 26, 3, 91)
val rBytes = Array[Byte](12, 56, 100, -2, -76, 89, 45, 91, 3, -15, 35, 26, 3, 91)
val aNumber = new BigInteger(aBytes)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = aNumber.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(aNumber.signum()).toEqual(1)
}
it("testConstructorBytesPositive2") {
val aBytes = Array[Byte](12, 56, 100)
val rBytes = Array[Byte](12, 56, 100)
val aNumber = new BigInteger(aBytes)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = aNumber.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(aNumber.signum()).toEqual(1)
}
it("testConstructorBytesPositive3") {
val aBytes = Array[Byte](127, 56, 100, -1)
val rBytes = Array[Byte](127, 56, 100, -1)
val aNumber = new BigInteger(aBytes)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = aNumber.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(aNumber.signum()).toEqual(1)
}
it("testConstructorBytesZero") {
val aBytes = Array[Byte](0, 0, 0, -0, +0, 0, -0)
val rBytes = Array[Byte](0)
val aNumber = new BigInteger(aBytes)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = aNumber.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(aNumber.signum()).toEqual(0)
}
it("testConstructorPrime") {
val bitLen = 25
val rnd = new Random()
val aNumber = new BigInteger(bitLen, 80, rnd)
expect(aNumber.bitLength()).toEqual(bitLen)
}
it("testConstructorPrime2") {
val bitLen = 2
val rnd = new Random()
val aNumber = new BigInteger(bitLen, 80, rnd)
expect(aNumber.bitLength()).toEqual(bitLen)
val num = aNumber.intValue()
expect(num == 2 || num == 3).toBeTruthy()
}
it("testConstructorRandom") {
val bitLen = 75
val rnd: Random = new Random()
val aNumber = new BigInteger(bitLen, rnd)
expect(aNumber.bitLength() <= bitLen).toBeTruthy()
}
it("testConstructorSignBytesException1") {
val aBytes = Array[Byte](123, 45, -3, -76)
val aSign = 3
expect(() => new BigInteger(aSign, aBytes)).toThrow()
}
it("testConstructorSignBytesException2") {
val aBytes = Array[Byte](123, 45, -3, -76)
val aSign = 0
expect(() => new BigInteger(aSign, aBytes)).toThrow()
}
it("testConstructorSignBytesNegative1") {
val aBytes = Array[Byte](12, 56, 100, -2, -76, 89, 45, 91, 3, -15)
val aSign = -1
val rBytes = Array[Byte](-13, -57, -101, 1, 75, -90, -46, -92, -4, 15)
val aNumber = new BigInteger(aSign, aBytes)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = aNumber.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(aNumber.signum()).toEqual(-1)
}
it("testConstructorSignBytesNegative2") {
val aBytes = Array[Byte](-12, 56, 100, -2, -76, 89, 45, 91, 3, -15)
val aSign = -1
val rBytes = Array[Byte](-1, 11, -57, -101, 1, 75, -90, -46, -92, -4, 15)
val aNumber = new BigInteger(aSign, aBytes)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = aNumber.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(aNumber.signum()).toEqual(-1)
}
it("testConstructorSignBytesNegative3") {
val aBytes = Array[Byte](-12, 56, 100)
val aSign = -1
val rBytes = Array[Byte](-1, 11, -57, -100)
val aNumber = new BigInteger(aSign, aBytes)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = aNumber.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(aNumber.signum()).toEqual(-1)
}
it("testConstructorSignBytesNegative4") {
val aBytes = Array[Byte](127, 56, 100, -2)
val aSign = -1
val rBytes = Array[Byte](-128, -57, -101, 2)
val aNumber = new BigInteger(aSign, aBytes)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = aNumber.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(aNumber.signum()).toEqual(-1)
}
it("testConstructorSignBytesNegative5") {
val aBytes = Array[Byte](-127, 56, 100, -2)
val aSign = -1
val rBytes = Array[Byte](-1, 126, -57, -101, 2)
val aNumber = new BigInteger(aSign, aBytes)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = aNumber.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(aNumber.signum()).toEqual(-1)
}
it("testConstructorSignBytesNegative6") {
val aBytes = Array[Byte](12, 56, 100, -2, -76, 89, 45, 91, 3, -15, 23, -101)
val aSign = -1
val rBytes = Array[Byte](-13, -57, -101, 1, 75, -90, -46, -92, -4, 14, -24, 101)
val aNumber = new BigInteger(aSign, aBytes)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = aNumber.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(aNumber.signum()).toEqual(-1)
}
it("testConstructorSignBytesNegative7") {
val aBytes = Array[Byte](-12, 56, 100, -2, -76, 89, 45, 91, 3, -15, 23, -101)
val aSign = -1
val rBytes = Array[Byte](-1, 11, -57, -101, 1, 75, -90, -46, -92, -4, 14, -24, 101)
val aNumber = new BigInteger(aSign, aBytes)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = aNumber.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(aNumber.signum()).toEqual(-1)
}
it("testConstructorSignBytesPositive1") {
val aBytes = Array[Byte](12, 56, 100, -2, -76, 89, 45, 91, 3, -15)
val aSign = 1
val rBytes = Array[Byte](12, 56, 100, -2, -76, 89, 45, 91, 3, -15)
val aNumber = new BigInteger(aSign, aBytes)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = aNumber.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(aNumber.signum()).toEqual(1)
}
it("testConstructorSignBytesPositive2") {
val aBytes = Array[Byte](-12, 56, 100, -2, -76, 89, 45, 91, 3, -15)
val aSign = 1
val rBytes = Array[Byte](0, -12, 56, 100, -2, -76, 89, 45, 91, 3, -15)
val aNumber = new BigInteger(aSign, aBytes)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = aNumber.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(aNumber.signum()).toEqual(1)
}
it("testConstructorSignBytesPositive3") {
val aBytes = Array[Byte](-12, 56, 100)
val aSign = 1
val rBytes = Array[Byte](0, -12, 56, 100)
val aNumber = new BigInteger(aSign, aBytes)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = aNumber.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(aNumber.signum()).toEqual(1)
}
it("testConstructorSignBytesPositive4") {
val aBytes = Array[Byte](127, 56, 100, -2)
val aSign = 1
val rBytes = Array[Byte](127, 56, 100, -2)
val aNumber = new BigInteger(aSign, aBytes)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = aNumber.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(aNumber.signum()).toEqual(1)
}
it("testConstructorSignBytesPositive5") {
val aBytes = Array[Byte](-127, 56, 100, -2)
val aSign = 1
val rBytes = Array[Byte](0, -127, 56, 100, -2)
val aNumber = new BigInteger(aSign, aBytes)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = aNumber.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(aNumber.signum()).toEqual(1)
}
it("testConstructorSignBytesPositive6") {
val aBytes = Array[Byte](12, 56, 100, -2, -76, 89, 45, 91, 3, -15, 23, -101)
val aSign = 1
val rBytes = Array[Byte](12, 56, 100, -2, -76, 89, 45, 91, 3, -15, 23, -101)
val aNumber = new BigInteger(aSign, aBytes)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = aNumber.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(aNumber.signum()).toEqual(1)
}
it("testConstructorSignBytesPositive7") {
val aBytes = Array[Byte](-12, 56, 100, -2, -76, 89, 45, 91, 3, -15, 23, -101)
val aSign = 1
val rBytes = Array[Byte](0, -12, 56, 100, -2, -76, 89, 45, 91, 3, -15, 23, -101)
val aNumber = new BigInteger(aSign, aBytes)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = aNumber.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(aNumber.signum()).toEqual(1)
}
it("testConstructorSignBytesZero1") {
val aBytes = Array[Byte](-0, 0, +0, 0, 0, 0, 0)
val aSign = -1
val rBytes = Array[Byte](0)
val aNumber = new BigInteger(aSign, aBytes)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = aNumber.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(aNumber.signum()).toEqual(0)
}
it("testConstructorSignBytesZero2") {
val aBytes = Array[Byte](-0, 0, +0, 0, 0, 0, 0)
val aSign = 0
val rBytes = Array[Byte](0)
val aNumber = new BigInteger(aSign, aBytes)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = aNumber.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(aNumber.signum()).toEqual(0)
}
it("testConstructorSignBytesZero3") {
val aBytes = Array[Byte](-0, 0, +0, 0, 0, 0, 0)
val aSign = 1
val rBytes = Array[Byte](0)
val aNumber = new BigInteger(aSign, aBytes)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = aNumber.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(aNumber.signum()).toEqual(0)
}
it("testConstructorSignBytesZeroNull1") {
val aBytes = Array[Byte]()
val aSign = -1
val rBytes = Array[Byte](0)
val aNumber = new BigInteger(aSign, aBytes)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = aNumber.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(aNumber.signum()).toEqual(0)
}
it("testConstructorSignBytesZeroNull2") {
val aBytes = Array[Byte]()
val aSign = 0
val rBytes = Array[Byte](0)
val aNumber = new BigInteger(aSign, aBytes)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = aNumber.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(aNumber.signum()).toEqual(0)
}
it("testConstructorSignBytesZeroNull3") {
val aBytes = Array[Byte]()
val aSign = 1
val rBytes = Array[Byte](0)
val aNumber = new BigInteger(aSign, aBytes)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = aNumber.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(aNumber.signum()).toEqual(0)
}
it("testConstructorStringException1") {
val value = "9234853876401"
val radix = 45
expect(() => new BigInteger(value, radix)).toThrow()
}
it("testConstructorStringException2") {
val value = " 9234853876401"
val radix = 10
expect(() => new BigInteger(value, radix)).toThrow()
}
it("testConstructorStringException3") {
val value = "92348$*#78987"
val radix = 34
expect(() => new BigInteger(value, radix)).toThrow()
}
it("testConstructorStringException4") {
val value = "98zv765hdsaiy"
val radix = 20
expect(() => new BigInteger(value, radix)).toThrow()
}
it("testConstructorStringRadix10") {
val value = "987328901348934898"
val radix = 10
val rBytes = Array(13, -77, -78, 103, -103, 97, 68, -14)
val aNumber = new BigInteger(value, radix)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = aNumber.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(aNumber.signum()).toEqual(1)
}
it("testConstructorStringRadix10Negative") {
val value = "-234871376037"
val radix = 36
val rBytes = Array(-4, 48, 71, 62, -76, 93, -105, 13)
val aNumber = new BigInteger(value, radix)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = aNumber.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(aNumber.signum()).toEqual(-1)
}
it("testConstructorStringRadix10Zero") {
val value = "-00000000000000"
val radix = 10
val rBytes = Array(0)
val aNumber = new BigInteger(value, radix)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = aNumber.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(aNumber.signum()).toEqual(0)
}
it("testConstructorStringRadix16") {
val value = "fe2340a8b5ce790"
val radix = 16
val rBytes = Array(15, -30, 52, 10, -117, 92, -25, -112)
val aNumber = new BigInteger(value, radix)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = aNumber.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(aNumber.signum()).toEqual(1)
}
it("testConstructorStringRadix2") {
val value = "10101010101010101"
val radix = 2
val rBytes = Array(1, 85, 85)
val aNumber = new BigInteger(value, radix)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = aNumber.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(aNumber.signum()).toEqual(1)
}
it("testConstructorStringRadix36") {
val value = "skdjgocvhdjfkl20jndjkf347ejg457"
val radix = 36
val rBytes = Array(0, -12, -116, 112, -105, 12, -36, 66, 108, 66, -20,
-37, -15, 108, -7, 52, -99, -109, -8, -45, -5)
val aNumber = new BigInteger(value, radix)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = aNumber.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(aNumber.signum()).toEqual(1)
}
it("testConstructorStringRadix8") {
val value = "76356237071623450"
val radix = 8
val rBytes = Array(7, -50, -28, -8, -25, 39, 40)
val aNumber = new BigInteger(value, radix)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = aNumber.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(aNumber.signum()).toEqual(1)
}
}
}
| CapeSepias/scala-js | test-suite/src/test/scala/org/scalajs/testsuite/javalib/math/BigIntegerConstructorsTest.scala | Scala | bsd-3-clause | 18,979 |
package utils.parsers
import scala.annotation.tailrec
import scala.annotation.switch
import utils.CharReader
final object ParserUtilities {
protected[parsers] class Jump(val n:Int) extends Exception {
override def fillInStackTrace = this
}
protected[parsers] class Internal(val msg:String) extends Exception {
override def fillInStackTrace = this
}
protected[parsers] object exit extends Exception {
override def fillInStackTrace = this
}
} | Y-P-/data-processing-binding | Utils/src/utils/parsers/ParserUtilities.scala | Scala | gpl-3.0 | 500 |
/*
* Copyright 2014 Commonwealth Computer Research, Inc.
*
* Licensed under the Apache License, Version 2.0 (the License);
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.locationtech.geomesa.core.process.query
import com.vividsolutions.jts.geom.Geometry
import org.geotools.data.DataStoreFinder
import org.geotools.factory.Hints
import org.geotools.feature.DefaultFeatureCollection
import org.geotools.filter.text.cql2.CQL
import org.joda.time.{DateTime, DateTimeZone}
import org.junit.runner.RunWith
import org.locationtech.geomesa.core.data.{AccumuloDataStore, AccumuloFeatureStore}
import org.locationtech.geomesa.core.index.Constants
import org.locationtech.geomesa.feature.AvroSimpleFeatureFactory
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.locationtech.geomesa.utils.text.WKTUtils
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import scala.collection.JavaConversions._
@RunWith(classOf[JUnitRunner])
class QueryProcessTest extends Specification {
sequential
val dtgField = org.locationtech.geomesa.core.process.tube.DEFAULT_DTG_FIELD
val geotimeAttributes = s"*geom:Geometry:srid=4326,$dtgField:Date"
def createStore: AccumuloDataStore =
// the specific parameter values should not matter, as we
// are requesting a mock data store connection to Accumulo
DataStoreFinder.getDataStore(Map(
"instanceId" -> "mycloud",
"zookeepers" -> "zoo1:2181,zoo2:2181,zoo3:2181",
"user" -> "myuser",
"password" -> "mypassword",
"auths" -> "A,B,C",
"tableName" -> "testwrite",
"useMock" -> "true",
"featureEncoding" -> "avro")).asInstanceOf[AccumuloDataStore]
val sftName = "geomesaQueryTestType"
val sft = SimpleFeatureTypes.createType(sftName, s"type:String,$geotimeAttributes")
sft.getUserData()(Constants.SF_PROPERTY_START_TIME) = dtgField
val ds = createStore
ds.createSchema(sft)
val fs = ds.getFeatureSource(sftName).asInstanceOf[AccumuloFeatureStore]
val featureCollection = new DefaultFeatureCollection(sftName, sft)
List("a", "b").foreach { name =>
List(1, 2, 3, 4).zip(List(45, 46, 47, 48)).foreach { case (i, lat) =>
val sf = AvroSimpleFeatureFactory.buildAvroFeature(sft, List(), name + i.toString)
sf.setDefaultGeometry(WKTUtils.read(f"POINT($lat%d $lat%d)"))
sf.setAttribute(org.locationtech.geomesa.core.process.tube.DEFAULT_DTG_FIELD, new DateTime("2011-01-01T00:00:00Z", DateTimeZone.UTC).toDate)
sf.setAttribute("type", name)
sf.getUserData()(Hints.USE_PROVIDED_FID) = java.lang.Boolean.TRUE
featureCollection.add(sf)
}
}
// write the feature to the store
val res = fs.addFeatures(featureCollection)
"GeomesaQuery" should {
"return things without a filter" in {
val features = fs.getFeatures()
val geomesaQuery = new QueryProcess
val results = geomesaQuery.execute(features, null)
val f = results.features()
while (f.hasNext) {
val sf = f.next
sf.getAttribute("type") should beOneOf("a", "b")
}
results.size mustEqual 8
}
"respect a parent filter" in {
val features = fs.getFeatures(CQL.toFilter("type = 'b'"))
val geomesaQuery = new QueryProcess
val results = geomesaQuery.execute(features, null)
val f = results.features()
while (f.hasNext) {
val sf = f.next
sf.getAttribute("type") mustEqual "b"
}
results.size mustEqual 4
}
"be able to use its own filter" in {
val features = fs.getFeatures(CQL.toFilter("type = 'b' OR type = 'a'"))
val geomesaQuery = new QueryProcess
val results = geomesaQuery.execute(features, CQL.toFilter("type = 'a'"))
val f = results.features()
while (f.hasNext) {
val sf = f.next
sf.getAttribute("type") mustEqual "a"
}
results.size mustEqual 4
}
"properly query geometry" in {
val features = fs.getFeatures()
val geomesaQuery = new QueryProcess
val results = geomesaQuery.execute(features, CQL.toFilter("bbox(geom, 45.0, 45.0, 46.0, 46.0)"))
var poly = WKTUtils.read("POLYGON((45 45, 46 45, 46 46, 45 46, 45 45))")
val f = results.features()
while (f.hasNext) {
val sf = f.next
poly.intersects(sf.getDefaultGeometry.asInstanceOf[Geometry]) must beTrue
}
results.size mustEqual 4
}
}
}
| jnh5y/geomesa | geomesa-core/src/test/scala/org/locationtech/geomesa/core/process/query/QueryProcessTest.scala | Scala | apache-2.0 | 4,948 |
package com.socrata.soql.types
import org.scalatest.FunSuite
import org.scalatest.MustMatchers
class SoQLValueTest extends FunSuite with MustMatchers {
test("SoQLValue is 1:1 with SoQLType, and its typ fields are accurate") {
import scala.reflect.runtime.universe._
// Not sure why this is necessary, but without it, the
// directKnownSubclasses calls return the empty set (2.10.0)
typeOf[SoQLValue].toString
val valueClasses = typeOf[SoQLValue].typeSymbol.asClass.knownDirectSubclasses
val typeClasses = typeOf[SoQLValue].typeSymbol.asClass.knownDirectSubclasses
valueClasses must not be (Symbol("empty"))
valueClasses.size must equal (typeClasses.size)
valueClasses.foreach { sym =>
val companionType = sym.typeSignature.member(TermName("typ")).asMethod.returnType.typeSymbol
companionType.name.toString must equal (sym.name.toString)
}
}
test("SoQLNumber's equals ignores scale") {
def bd(s: String) = new java.math.BigDecimal(s)
SoQLNumber(bd("5.00")) must equal (SoQLNumber(bd("5")))
}
test("SoQLNumber's hashCode ignores scala") {
def bd(s: String) = new java.math.BigDecimal(s)
SoQLNumber(bd("5.00")).hashCode must equal (SoQLNumber(bd("5")).hashCode)
}
}
| socrata-platform/soql-reference | soql-types/src/test/scala/com/socrata/soql/types/SoQLValueTest.scala | Scala | apache-2.0 | 1,251 |
package diode
/**
* Provides a type class instance of ActionType for Any, allowing you to dispatch anything as an action.
*
* Bring to scope with `import diode.AnyAction._`
*/
object AnyAction {
implicit object aType extends ActionType[Any]
}
type Subscriber[A] = (ModelRO[A] => Unit) => () => Unit
| ochrons/diode | diode-core/shared/src/main/scala-3/diode/package.scala | Scala | mit | 313 |
/*******************************************************************************
* Copyright (c) 2016 Logimethods
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the MIT License (MIT)
* which accompanies this distribution, and is available at
* http://opensource.org/licenses/MIT
*******************************************************************************/
package com.logimethods.connector.gatling.to_nats
import akka.actor.{ActorRef, Props}
import io.gatling.core.Predef._
import io.gatling.core.action.builder.ActionBuilder
import scala.concurrent.duration._
import java.util.Properties
class NatsActionTest extends Simulation {
val properties = new Properties()
val natsProtocol = NatsProtocol(properties, "TestingSubject")
val natsScn = scenario("NATS call").exec(NatsBuilder("Hello from Gatling!"))
setUp(
natsScn.inject(constantUsersPerSec(15) during (1 minute))
).protocols(natsProtocol)
} | Logimethods/nats-connector-gatling | src/test/scala/com/logimethods/connector/gatling/to_nats/NatsActionTest.scala | Scala | mit | 992 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.runtime.stream.sql
import org.apache.flink.api.java.typeutils.RowTypeInfo
import org.apache.flink.api.scala._
import org.apache.flink.table.api.Types
import org.apache.flink.table.api.config.OptimizerConfigOptions
import org.apache.flink.table.api.scala._
import org.apache.flink.table.planner.runtime.stream.sql.SplitAggregateITCase.PartialAggMode
import org.apache.flink.table.planner.runtime.utils.StreamingWithAggTestBase.{AggMode, LocalGlobalOff, LocalGlobalOn}
import org.apache.flink.table.planner.runtime.utils.StreamingWithMiniBatchTestBase.MiniBatchOn
import org.apache.flink.table.planner.runtime.utils.StreamingWithStateTestBase.{HEAP_BACKEND, ROCKSDB_BACKEND, StateBackendMode}
import org.apache.flink.table.planner.runtime.utils.{StreamingWithAggTestBase, TestingRetractSink}
import org.apache.flink.table.planner.utils.DateTimeTestUtil.{localDate, localDateTime, localTime => mLocalTime}
import org.apache.flink.types.Row
import org.junit.Assert.assertEquals
import org.junit.runner.RunWith
import org.junit.runners.Parameterized
import org.junit.{Before, Test}
import java.lang.{Integer => JInt, Long => JLong}
import java.math.{BigDecimal => JBigDecimal}
import java.util
import scala.collection.JavaConversions._
import scala.collection.{Seq, mutable}
import scala.util.Random
@RunWith(classOf[Parameterized])
class SplitAggregateITCase(
partialAggMode: PartialAggMode,
aggMode: AggMode,
backend: StateBackendMode)
extends StreamingWithAggTestBase(aggMode, MiniBatchOn, backend) {
@Before
override def before(): Unit = {
super.before()
if (partialAggMode.isPartialAggEnabled) {
tEnv.getConfig.getConfiguration.setBoolean(
OptimizerConfigOptions.TABLE_OPTIMIZER_DISTINCT_AGG_SPLIT_ENABLED, true)
} else {
tEnv.getConfig.getConfiguration.setBoolean(
OptimizerConfigOptions.TABLE_OPTIMIZER_DISTINCT_AGG_SPLIT_ENABLED, false)
}
val data = List(
(1L, 1, "Hello 0"),
(1L, 2, "Hello 1"),
(2L, 3, "Hello 1"),
(3L, 5, "Hello 1"),
(2L, 3, "Hello 2"),
(2L, 4, "Hello 3"),
(2L, 4, null),
(2L, 5, "Hello 4"),
(3L, 5, "Hello 0"),
(2L, 4, "Hello 3"),
(4L, 5, "Hello 2"),
(2L, 4, "Hello 3"),
(4L, 5, null),
(4L, 5, "Hello 3"),
(2L, 2, "Hello 0"),
(4L, 6, "Hello 1"))
val t = failingDataSource(data).toTable(tEnv, 'a, 'b, 'c)
tEnv.registerTable("T", t)
}
@Test
def testCountDistinct(): Unit = {
val ids = List(
1,
2, 2,
3, 3, 3,
4, 4, 4, 4,
5, 5, 5, 5, 5)
val dateTimes = List(
"1970-01-01 00:00:01",
"1970-01-01 00:00:02", null,
"1970-01-01 00:00:04", "1970-01-01 00:00:05", "1970-01-01 00:00:06",
"1970-01-01 00:00:07", null, null, "1970-01-01 00:00:10",
"1970-01-01 00:00:11", "1970-01-01 00:00:11", "1970-01-01 00:00:13",
"1970-01-01 00:00:14", "1970-01-01 00:00:15")
val dates = List(
"1970-01-01",
"1970-01-02", null,
"1970-01-04", "1970-01-05", "1970-01-06",
"1970-01-07", null, null, "1970-01-10",
"1970-01-11", "1970-01-11", "1970-01-13", "1970-01-14", "1970-01-15")
val times = List(
"00:00:01",
"00:00:02", null,
"00:00:04", "00:00:05", "00:00:06",
"00:00:07", null, null, "00:00:10",
"00:00:11", "00:00:11", "00:00:13", "00:00:14", "00:00:15")
val integers = List(
"1",
"2", null,
"4", "5", "6",
"7", null, null, "10",
"11", "11", "13", "14", "15")
val chars = List(
"A",
"B", null,
"D", "E", "F",
"H", null, null, "K",
"L", "L", "N", "O", "P")
val data = new mutable.MutableList[Row]
for (i <- ids.indices) {
val v = integers(i)
val decimal = if (v == null) null else new JBigDecimal(v)
val int = if (v == null) null else JInt.valueOf(v)
val long = if (v == null) null else JLong.valueOf(v)
data.+=(Row.of(
Int.box(ids(i)), localDateTime(dateTimes(i)), localDate(dates(i)),
mLocalTime(times(i)), decimal, int, long, chars(i)))
}
val inputs = Random.shuffle(data)
val rowType = new RowTypeInfo(
Types.INT, Types.LOCAL_DATE_TIME, Types.LOCAL_DATE, Types.LOCAL_TIME,
Types.DECIMAL, Types.INT, Types.LONG, Types.STRING)
val t = failingDataSource(inputs)(rowType).toTable(tEnv, 'id, 'a, 'b, 'c, 'd, 'e, 'f, 'g)
tEnv.createTemporaryView("MyTable", t)
val t1 = tEnv.sqlQuery(
s"""
|SELECT
| id,
| count(distinct a),
| count(distinct b),
| count(distinct c),
| count(distinct d),
| count(distinct e),
| count(distinct f),
| count(distinct g)
|FROM MyTable
|GROUP BY id
""".stripMargin)
val sink = new TestingRetractSink
t1.toRetractStream[Row].addSink(sink)
env.execute()
val expected = List(
"1,1,1,1,1,1,1,1",
"2,1,1,1,1,1,1,1",
"3,3,3,3,3,3,3,3",
"4,2,2,2,2,2,2,2",
"5,4,4,4,4,4,4,4")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testSingleDistinctAgg(): Unit = {
val t1 = tEnv.sqlQuery("SELECT COUNT(DISTINCT c) FROM T")
val sink = new TestingRetractSink
t1.toRetractStream[Row].addSink(sink).setParallelism(1)
env.execute()
val expected = List("5")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testMultiCountDistinctAgg(): Unit = {
val t1 = tEnv.sqlQuery("SELECT COUNT(DISTINCT b), COUNT(DISTINCT c) FROM T")
val sink = new TestingRetractSink
t1.toRetractStream[Row].addSink(sink).setParallelism(1)
env.execute()
val expected = List("6,5")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testSingleDistinctAggAndOneOrMultiNonDistinctAgg(): Unit = {
val t1 = tEnv.sqlQuery("SELECT a, SUM(b), COUNT(DISTINCT c), avg(b) FROM T GROUP BY a")
val sink = new TestingRetractSink
t1.toRetractStream[Row].addSink(sink)
env.execute()
val expected = List("1,3,2,1", "2,29,5,3",
"3,10,2,5", "4,21,3,5")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testSingleDistinctAggWithGroupBy(): Unit = {
val t1 = tEnv.sqlQuery("SELECT a, COUNT(DISTINCT c) FROM T GROUP BY a")
val sink = new TestingRetractSink
t1.toRetractStream[Row].addSink(sink)
env.execute()
val expected = List("1,2", "2,5", "3,2", "4,3")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testSingleDistinctAggWithAndNonDistinctAggOnSameColumn(): Unit = {
val t1 = tEnv.sqlQuery("SELECT a, COUNT(DISTINCT b), MAX(b), MIN(b) FROM T GROUP BY a")
val sink = new TestingRetractSink
t1.toRetractStream[Row].addSink(sink)
env.execute()
val expected = List("1,2,2,1", "2,4,5,2", "3,1,5,5", "4,2,6,5")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testSomeColumnsBothInDistinctAggAndGroupBy(): Unit = {
val t1 = tEnv.sqlQuery("SELECT a, COUNT(DISTINCT a), COUNT(b) FROM T GROUP BY a")
val sink = new TestingRetractSink
t1.toRetractStream[Row].addSink(sink)
env.execute()
val expected = List("1,1,2", "2,1,8", "3,1,2", "4,1,4")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testAggWithFilterClause(): Unit = {
val t1 = tEnv.sqlQuery(
s"""
|SELECT
| a,
| COUNT(DISTINCT b) filter (where not b = 2),
| MAX(b) filter (where not b = 5),
| MIN(b) filter (where not b = 2)
|FROM T
|GROUP BY a
""".stripMargin)
val sink = new TestingRetractSink
t1.toRetractStream[Row].addSink(sink)
env.execute()
val expected = List("1,1,2,1", "2,3,4,3", "3,1,null,5", "4,2,6,5")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testMinMaxWithRetraction(): Unit = {
val t1 = tEnv.sqlQuery(
s"""
|SELECT
| c, MIN(b), MAX(b), COUNT(DISTINCT a)
|FROM(
| SELECT
| a, COUNT(DISTINCT b) as b, MAX(b) as c
| FROM T
| GROUP BY a
|) GROUP BY c
""".stripMargin)
val sink = new TestingRetractSink
t1.toRetractStream[Row].addSink(sink)
env.execute()
val expected = List("2,2,2,1", "5,1,4,2", "6,2,2,1")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testAggWithJoin(): Unit = {
val t1 = tEnv.sqlQuery(
s"""
|SELECT *
|FROM(
| SELECT
| c, MIN(b) as b, MAX(b) as d, COUNT(DISTINCT a) as a
| FROM(
| SELECT
| a, COUNT(DISTINCT b) as b, MAX(b) as c
| FROM T
| GROUP BY a
| ) GROUP BY c
|) as T1 JOIN T ON T1.b + 2 = T.a
""".stripMargin)
val sink = new TestingRetractSink
t1.toRetractStream[Row].addSink(sink)
env.execute()
val expected = List("2,2,2,1,4,5,Hello 2", "2,2,2,1,4,5,Hello 3", "2,2,2,1,4,5,null",
"2,2,2,1,4,6,Hello 1", "5,1,4,2,3,5,Hello 0", "5,1,4,2,3,5,Hello 1",
"6,2,2,1,4,5,Hello 2", "6,2,2,1,4,5,Hello 3", "6,2,2,1,4,5,null",
"6,2,2,1,4,6,Hello 1")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testUvWithRetraction(): Unit = {
val data = (0 until 1000).map {i => (s"${i%10}", s"${i%100}", s"$i")}.toList
val t = failingDataSource(data).toTable(tEnv, 'a, 'b, 'c)
tEnv.registerTable("src", t)
val sql =
s"""
|SELECT
| a,
| COUNT(distinct b) as uv
|FROM (
| SELECT a, b, last_value(c)
| FROM src
| GROUP BY a, b
|) t
|GROUP BY a
""".stripMargin
val t1 = tEnv.sqlQuery(sql)
val sink = new TestingRetractSink
t1.toRetractStream[Row].addSink(sink)
env.execute()
val expected = List("0,10", "1,10", "2,10", "3,10", "4,10",
"5,10", "6,10", "7,10", "8,10", "9,10")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testCountDistinctWithBinaryRowSource(): Unit = {
// this case is failed before, because of object reuse problem
val data = (0 until 100).map {i => ("1", "1", s"${i%50}", "1")}.toList
// use BinaryRowData source here for StringData reuse
val t = failingBinaryRowSource(data).toTable(tEnv, 'a, 'b, 'c, 'd)
tEnv.registerTable("src", t)
val sql =
s"""
|SELECT
| a,
| b,
| COUNT(distinct c) as uv
|FROM (
| SELECT
| a, b, c, d
| FROM
| src where b <> ''
| UNION ALL
| SELECT
| a, 'ALL' as b, c, d
| FROM
| src where b <> ''
|) t
|GROUP BY
| a, b
""".stripMargin
val t1 = tEnv.sqlQuery(sql)
val sink = new TestingRetractSink
t1.toRetractStream[Row].addSink(sink)
env.execute()
val expected = List("1,1,50", "1,ALL,50")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
}
object SplitAggregateITCase {
case class PartialAggMode(isPartialAggEnabled: Boolean) {
override def toString: String = if (isPartialAggEnabled) "ON" else "OFF"
}
val PartialAggOn = PartialAggMode(isPartialAggEnabled = true)
val PartialAggOff = PartialAggMode(isPartialAggEnabled = false)
@Parameterized.Parameters(name = "PartialAgg={0}, LocalGlobal={1}, StateBackend={2}")
def parameters(): util.Collection[Array[java.lang.Object]] = {
Seq[Array[AnyRef]](
Array(PartialAggOn, LocalGlobalOff, HEAP_BACKEND),
Array(PartialAggOn, LocalGlobalOn, HEAP_BACKEND),
Array(PartialAggOn, LocalGlobalOff, ROCKSDB_BACKEND),
Array(PartialAggOn, LocalGlobalOn, ROCKSDB_BACKEND))
}
}
| hequn8128/flink | flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/runtime/stream/sql/SplitAggregateITCase.scala | Scala | apache-2.0 | 12,750 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.runtime.aggregate
import java.util
import java.util.{List => JList}
import org.apache.flink.api.common.state.{MapState, MapStateDescriptor, ValueState, ValueStateDescriptor}
import org.apache.flink.api.common.typeinfo.{BasicTypeInfo, TypeInformation}
import org.apache.flink.api.java.typeutils.{ListTypeInfo, RowTypeInfo}
import org.apache.flink.configuration.Configuration
import org.apache.flink.streaming.api.functions.ProcessFunction
import org.apache.flink.table.api.StreamQueryConfig
import org.apache.flink.table.codegen.{Compiler, GeneratedAggregationsFunction}
import org.apache.flink.table.runtime.types.{CRow, CRowTypeInfo}
import org.apache.flink.table.util.Logging
import org.apache.flink.types.Row
import org.apache.flink.util.{Collector, Preconditions}
/**
* Process Function for ROW clause processing-time bounded OVER window
*
* @param genAggregations Generated aggregate helper function
* @param precedingOffset preceding offset
* @param aggregatesTypeInfo row type info of aggregation
* @param inputType row type info of input row
*/
class ProcTimeBoundedRowsOver(
genAggregations: GeneratedAggregationsFunction,
precedingOffset: Long,
aggregatesTypeInfo: RowTypeInfo,
inputType: TypeInformation[CRow],
queryConfig: StreamQueryConfig)
extends ProcessFunctionWithCleanupState[CRow, CRow](queryConfig)
with Compiler[GeneratedAggregations]
with Logging {
Preconditions.checkArgument(precedingOffset > 0)
private var accumulatorState: ValueState[Row] = _
private var rowMapState: MapState[Long, JList[Row]] = _
private var output: CRow = _
private var counterState: ValueState[Long] = _
private var smallestTsState: ValueState[Long] = _
private var function: GeneratedAggregations = _
override def open(config: Configuration) {
LOG.debug(s"Compiling AggregateHelper: ${genAggregations.name} \\n\\n" +
s"Code:\\n${genAggregations.code}")
val clazz = compile(
getRuntimeContext.getUserCodeClassLoader,
genAggregations.name,
genAggregations.code)
LOG.debug("Instantiating AggregateHelper.")
function = clazz.newInstance()
function.open(getRuntimeContext)
output = new CRow(function.createOutputRow(), true)
// We keep the elements received in a Map state keyed
// by the ingestion time in the operator.
// we also keep counter of processed elements
// and timestamp of oldest element
val rowListTypeInfo: TypeInformation[JList[Row]] =
new ListTypeInfo[Row](inputType.asInstanceOf[CRowTypeInfo].rowType)
.asInstanceOf[TypeInformation[JList[Row]]]
val mapStateDescriptor: MapStateDescriptor[Long, JList[Row]] =
new MapStateDescriptor[Long, JList[Row]]("windowBufferMapState",
BasicTypeInfo.LONG_TYPE_INFO.asInstanceOf[TypeInformation[Long]], rowListTypeInfo)
rowMapState = getRuntimeContext.getMapState(mapStateDescriptor)
val aggregationStateDescriptor: ValueStateDescriptor[Row] =
new ValueStateDescriptor[Row]("aggregationState", aggregatesTypeInfo)
accumulatorState = getRuntimeContext.getState(aggregationStateDescriptor)
val processedCountDescriptor : ValueStateDescriptor[Long] =
new ValueStateDescriptor[Long]("processedCountState", classOf[Long])
counterState = getRuntimeContext.getState(processedCountDescriptor)
val smallestTimestampDescriptor : ValueStateDescriptor[Long] =
new ValueStateDescriptor[Long]("smallestTSState", classOf[Long])
smallestTsState = getRuntimeContext.getState(smallestTimestampDescriptor)
initCleanupTimeState("ProcTimeBoundedRowsOverCleanupTime")
}
override def processElement(
inputC: CRow,
ctx: ProcessFunction[CRow, CRow]#Context,
out: Collector[CRow]): Unit = {
val input = inputC.row
val currentTime = ctx.timerService.currentProcessingTime
// register state-cleanup timer
registerProcessingCleanupTimer(ctx, currentTime)
// initialize state for the processed element
var accumulators = accumulatorState.value
if (accumulators == null) {
accumulators = function.createAccumulators()
}
// get smallest timestamp
var smallestTs = smallestTsState.value
if (smallestTs == 0L) {
smallestTs = currentTime
smallestTsState.update(smallestTs)
}
// get previous counter value
var counter = counterState.value
if (counter == precedingOffset) {
val retractList = rowMapState.get(smallestTs)
// get oldest element beyond buffer size
// and if oldest element exist, retract value
val retractRow = retractList.get(0)
function.retract(accumulators, retractRow)
retractList.remove(0)
// if reference timestamp list not empty, keep the list
if (!retractList.isEmpty) {
rowMapState.put(smallestTs, retractList)
} // if smallest timestamp list is empty, remove and find new smallest
else {
rowMapState.remove(smallestTs)
val iter = rowMapState.keys.iterator
var currentTs: Long = 0L
var newSmallestTs: Long = Long.MaxValue
while (iter.hasNext) {
currentTs = iter.next
if (currentTs < newSmallestTs) {
newSmallestTs = currentTs
}
}
smallestTsState.update(newSmallestTs)
}
} // we update the counter only while buffer is getting filled
else {
counter += 1
counterState.update(counter)
}
// copy forwarded fields in output row
function.setForwardedFields(input, output.row)
// accumulate current row and set aggregate in output row
function.accumulate(accumulators, input)
function.setAggregationResults(accumulators, output.row)
// update map state, accumulator state, counter and timestamp
val currentTimeState = rowMapState.get(currentTime)
if (currentTimeState != null) {
currentTimeState.add(input)
rowMapState.put(currentTime, currentTimeState)
} else { // add new input
val newList = new util.ArrayList[Row]
newList.add(input)
rowMapState.put(currentTime, newList)
}
accumulatorState.update(accumulators)
out.collect(output)
}
override def onTimer(
timestamp: Long,
ctx: ProcessFunction[CRow, CRow]#OnTimerContext,
out: Collector[CRow]): Unit = {
if (needToCleanupState(timestamp)) {
cleanupState(rowMapState, accumulatorState, counterState, smallestTsState)
function.cleanup()
}
}
override def close(): Unit = {
function.close()
}
}
| mylog00/flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/aggregate/ProcTimeBoundedRowsOver.scala | Scala | apache-2.0 | 7,394 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.h2o.sparkling.ml.utils
import org.apache.spark.ml.util.{MLReadable, MLReader}
class H2OParamsReadable[T] extends MLReadable[T] {
override def read: MLReader[T] = new H2OReaderBase[T]
}
| h2oai/sparkling-water | ml/src/main/scala/ai/h2o/sparkling/ml/utils/H2OParamsReadable.scala | Scala | apache-2.0 | 1,003 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frs102.boxes
import uk.gov.hmrc.ct.accounts.frs102.calculations.BalanceSheetTangibleAssetsCalculator
import uk.gov.hmrc.ct.accounts.frs102.retriever.{AbridgedAccountsBoxRetriever, Frs102AccountsBoxRetriever, FullAccountsBoxRetriever}
import uk.gov.hmrc.ct.box._
case class AC131(value: Option[Int]) extends CtBoxIdentifier(name = "Total net assets or liabilities (previous PoA)")
with CtOptionalInteger
with ValidatableBox[Frs102AccountsBoxRetriever]
with Validators {
override def validate(boxRetriever: Frs102AccountsBoxRetriever): Set[CtValidation] = {
collectErrors(
validateMoney(value)
)
}
}
object AC131 extends Calculated[AC131, Frs102AccountsBoxRetriever] with BalanceSheetTangibleAssetsCalculator {
override def calculate(boxRetriever: Frs102AccountsBoxRetriever): AC131 = {
boxRetriever match {
case x: AbridgedAccountsBoxRetriever => calculateDepreciationOfTangibleAssetsAtEndOfThePeriod(x.ac128(), x.ac219(), x.ac130(), x.ac214())
case x: FullAccountsBoxRetriever => {
import x._
calculateAC131(
ac131A(),
ac131B(),
ac131C(),
ac131D(),
ac131E()
)
}
}
}
}
| hmrc/ct-calculations | src/main/scala/uk/gov/hmrc/ct/accounts/frs102/boxes/AC131.scala | Scala | apache-2.0 | 1,839 |
/*
* Copyright (c) 2013-2015 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0 which
* accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*/
package org.locationtech.geomesa.accumulo.data
import java.util.Date
import com.vividsolutions.jts.geom.Point
import org.geotools.data._
import org.geotools.factory.CommonFactoryFinder
import org.geotools.filter.text.cql2.CQL
import org.geotools.filter.text.ecql.ECQL
import org.geotools.util.Converters
import org.joda.time.{DateTime, DateTimeZone}
import org.junit.runner.RunWith
import org.locationtech.geomesa.accumulo.TestWithMultipleSfts
import org.locationtech.geomesa.accumulo.util.{CloseableIterator, SelfClosingIterator}
import org.locationtech.geomesa.features.ScalaSimpleFeature
import org.locationtech.geomesa.utils.geotools.Conversions._
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes._
import org.locationtech.geomesa.utils.text.WKTUtils
import org.opengis.feature.simple.SimpleFeatureType
import org.opengis.filter.Filter
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class AccumuloDataStoreTransformsTest extends Specification with TestWithMultipleSfts {
sequential
val spec = "name:String,dtg:Date,*geom:Point:srid=4326"
val spec2 = "name:String,attr:String,dtg:Date,*geom:Point:srid=4326"
val name = "myname"
val date = Converters.convert("2012-01-01T00:00:00.000Z", classOf[Date])
val geom = Converters.convert("POINT(45 49)", classOf[Point])
val ff = CommonFactoryFinder.getFilterFactory2
def createFeature(sft: SimpleFeatureType) =
Seq(new ScalaSimpleFeature("fid-1", sft, Array(name, date, geom)))
def createFeature2(sft: SimpleFeatureType, attr: String) =
Seq(new ScalaSimpleFeature("fid-1", sft, Array(name, attr, date, geom)))
"AccumuloDataStore" should {
"handle transformations" >> {
val sft = createNewSchema(spec)
val sftName = sft.getTypeName
addFeatures(sft, createFeature(sft))
"with derived values" >> {
val query = new Query(sftName, Filter.INCLUDE,
Array("name", "derived=strConcat('hello',name)", "geom"))
// Let's read out what we wrote.
val results = ds.getFeatureSource(sftName).getFeatures(query)
"with the correct schema" >> {
val schema = SimpleFeatureTypes.encodeType(results.getSchema)
schema mustEqual s"name:String,*geom:Point:srid=4326:$OPT_INDEX=full:$OPT_INDEX_VALUE=true,derived:String"
}
"with the correct results" >> {
val features = results.features
features.hasNext must beTrue
val f = features.next()
DataUtilities.encodeFeature(f) mustEqual "fid-1=myname|POINT (45 49)|hellomyname"
}
}
"with dtg and geom" in {
val query = new Query(sftName, Filter.INCLUDE, List("dtg", "geom").toArray)
val results = SelfClosingIterator(CloseableIterator(ds.getFeatureSource(sftName).getFeatures(query).features())).toList
results must haveSize(1)
results.head.getAttribute("dtg") mustEqual date
results.head.getAttribute("geom") mustEqual geom
results.head.getAttribute("name") must beNull
}
"with setPropertyNames" in {
val filter = ff.bbox("geom", 44.0, 48.0, 46.0, 50.0, "EPSG:4326")
val query = new Query(sftName, filter)
query.setPropertyNames(Array("geom"))
val features = ds.getFeatureSource(sftName).getFeatures(query).features
val results = features.toList
"return exactly one result" >> {
results.size must equalTo(1)
}
"with correct fields" >> {
results.head.getAttribute("geom") mustEqual geom
results.head.getAttribute("dtg") must beNull
results.head.getAttribute("name") must beNull
}
}
}
"handle back compatible transformations" >> {
val sft = createNewSchema(spec)
val sftName = sft.getTypeName
ds.setGeomesaVersion(sftName, 2)
addFeatures(sft, createFeature(sft))
val query = new Query(sftName, Filter.INCLUDE, List("dtg", "geom").toArray)
val results = SelfClosingIterator(CloseableIterator(ds.getFeatureSource(sftName).getFeatures(query).features())).toList
results must haveSize(1)
results.head.getAttribute("dtg") mustEqual date
results.head.getAttribute("geom") mustEqual geom
results.head.getAttribute("name") must beNull
}
"handle transformations" >> {
val sft = createNewSchema(spec2)
val sftName = sft.getTypeName
addFeatures(sft, createFeature2(sft, "v1"))
"across multiple fields" >> {
val query = new Query(sftName, Filter.INCLUDE,
Array("name", "derived=strConcat(attr,name)", "geom"))
// Let's read out what we wrote.
val results = ds.getFeatureSource(sftName).getFeatures(query)
"with the correct schema" >> {
SimpleFeatureTypes.encodeType(results.getSchema) mustEqual
s"name:String,*geom:Point:srid=4326:$OPT_INDEX=full:$OPT_INDEX_VALUE=true,derived:String"
}
"with the correct results" >> {
val features = results.features
features.hasNext must beTrue
val f = features.next()
DataUtilities.encodeFeature(f) mustEqual "fid-1=myname|POINT (45 49)|v1myname"
}
}
"to subtypes" >> {
val query = new Query(sftName, Filter.INCLUDE, Array("name", "geom"))
// Let's read out what we wrote.
val results = ds.getFeatureSource(sftName).getFeatures(query)
"with the correct schema" >> {
SimpleFeatureTypes.encodeType(results.getSchema) mustEqual
s"name:String,*geom:Point:srid=4326:$OPT_INDEX=full:$OPT_INDEX_VALUE=true"
}
"with the correct results" >> {
val features = results.features
features.hasNext must beTrue
val f = features.next()
DataUtilities.encodeFeature(f) mustEqual "fid-1=myname|POINT (45 49)"
}
}
"with filters on other attributes" >> {
val filter = CQL.toFilter("bbox(geom,45,45,55,55) AND " +
"dtg BETWEEN '2011-01-01T00:00:00.000Z' AND '2013-01-02T00:00:00.000Z'")
val query = new Query(sftName, filter, Array("geom"))
// Let's read out what we wrote.
val features = ds.getFeatureSource(sftName).getFeatures(query).features
"return the data" >> {
features.hasNext must beTrue
}
"with correct results" >> {
val f = features.next()
DataUtilities.encodeFeature(f) mustEqual "fid-1=POINT (45 49)"
}
}
}
"transform index value data correctly" in {
val sft = createNewSchema("trackId:String:index-value=true,label:String:index-value=true," +
"extraValue:String,score:Double:index-value=true,dtg:Date,geom:Point:srid=4326")
val sftName = sft.getTypeName
val baseDate = Converters.convert("2014-01-01T00:00:00.000Z", classOf[Date]).getTime
addFeatures(sft, {
(0 until 5).map { i =>
val sf = new ScalaSimpleFeature(s"f$i", sft)
sf.setAttribute(0, s"trk$i")
sf.setAttribute(1, s"label$i")
sf.setAttribute(2, "extra")
sf.setAttribute(3, new java.lang.Double(i))
sf.setAttribute(4, s"2014-01-01T0$i:00:00.000Z")
sf.setAttribute(5, s"POINT(5$i 50)")
sf
}
})
"with out of order attributes" >> {
val query = new Query(sftName, ECQL.toFilter("bbox(geom,49,49,60,60)"), Array("geom", "dtg", "label"))
val features =
SelfClosingIterator(ds.getFeatureSource(sftName).getFeatures(query).features).toList.sortBy(_.getID)
features must haveSize(5)
(0 until 5).foreach { i =>
features(i).getID mustEqual s"f$i"
features(i).getAttributeCount mustEqual 3
features(i).getAttribute("label") mustEqual s"label$i"
features(i).getAttribute("dtg").asInstanceOf[Date].getTime mustEqual baseDate + i * 60 *60 * 1000
features(i).getAttribute("geom") mustEqual WKTUtils.read(s"POINT(5$i 50)")
}
success
}
"with only date and geom" >> {
val query = new Query(sftName, ECQL.toFilter("bbox(geom,49,49,60,60)"), Array("geom", "dtg"))
val features =
SelfClosingIterator(ds.getFeatureSource(sftName).getFeatures(query).features).toList.sortBy(_.getID)
features must haveSize(5)
(0 until 5).foreach { i =>
features(i).getID mustEqual s"f$i"
features(i).getAttributeCount mustEqual 2
features(i).getAttribute("dtg").asInstanceOf[Date].getTime mustEqual baseDate + i * 60 *60 * 1000
features(i).getAttribute("geom") mustEqual WKTUtils.read(s"POINT(5$i 50)")
}
success
}
"with all attributes" >> {
val query = new Query(sftName, ECQL.toFilter("bbox(geom,49,49,60,60)"),
Array("geom", "dtg", "label", "score", "trackId"))
val features =
SelfClosingIterator(ds.getFeatureSource(sftName).getFeatures(query).features).toList.sortBy(_.getID)
features must haveSize(5)
(0 until 5).foreach { i =>
features(i).getID mustEqual s"f$i"
features(i).getAttributeCount mustEqual 5
features(i).getAttribute("label") mustEqual s"label$i"
features(i).getAttribute("trackId") mustEqual s"trk$i"
features(i).getAttribute("score") mustEqual i.toDouble
features(i).getAttribute("dtg").asInstanceOf[Date].getTime mustEqual baseDate + i * 60 *60 * 1000
features(i).getAttribute("geom") mustEqual WKTUtils.read(s"POINT(5$i 50)")
}
success
}
}
"handle transformations to updated types" >> {
var sft = createNewSchema("dtg:Date,geom:Point:srid=4326")
val sftName = sft.getTypeName
addFeatures(sft, {
(0 until 10).filter(_ % 2 == 0).map { i =>
val sf = new ScalaSimpleFeature(s"f$i", sft)
sf.setAttribute(0, s"2014-01-01T0$i:00:00.000Z")
sf.setAttribute(1, s"POINT(5$i 50)")
sf
}
})
sft = SimpleFeatureTypes.createType(sftName, "dtg:Date,geom:Point:srid=4326,attr1:String")
ds.metadata.insert(sftName, org.locationtech.geomesa.accumulo.data.ATTRIBUTES_KEY, SimpleFeatureTypes.encodeType(sft))
ds.metadata.expireCache(sftName)
addFeatures(sft, {
(0 until 10).filter(_ % 2 == 1).map { i =>
val sf = new ScalaSimpleFeature(s"f$i", sft)
sf.setAttribute(0, s"2014-01-01T0$i:00:00.000Z")
sf.setAttribute(1, s"POINT(5$i 50)")
sf.setAttribute(2, s"$i")
sf
}
})
ok
"for old attributes with new and old features" >> {
val query = new Query(sftName, ECQL.toFilter("IN ('f1', 'f2')"), Array("geom", "dtg"))
val features = SelfClosingIterator(ds.getFeatureSource(sftName).getFeatures(query).features).toList
features.map(_.getID) must containTheSameElementsAs(Seq("f1", "f2"))
features.sortBy(_.getID).map(_.getAttribute("geom").toString) mustEqual Seq("POINT (51 50)", "POINT (52 50)")
features.sortBy(_.getID).map(_.getAttribute("dtg")).map(new DateTime(_).withZone(DateTimeZone.UTC).getHourOfDay) mustEqual Seq(1, 2)
}
"for old attributes with new features" >> {
val query = new Query(sftName, ECQL.toFilter("IN ('f1')"), Array("geom", "dtg"))
val features = SelfClosingIterator(ds.getFeatureSource(sftName).getFeatures(query).features).toList
features.map(_.getID) must containTheSameElementsAs(Seq("f1"))
features.head.getAttribute("geom").toString mustEqual "POINT (51 50)"
new DateTime(features.head.getAttribute("dtg")).withZone(DateTimeZone.UTC).getHourOfDay mustEqual 1
}
"for old attributes with old features" >> {
val query = new Query(sftName, ECQL.toFilter("IN ('f2')"), Array("geom", "dtg"))
val features = SelfClosingIterator(ds.getFeatureSource(sftName).getFeatures(query).features).toList
features.map(_.getID) must containTheSameElementsAs(Seq("f2"))
features.head.getAttribute("geom").toString mustEqual "POINT (52 50)"
new DateTime(features.head.getAttribute("dtg")).withZone(DateTimeZone.UTC).getHourOfDay mustEqual 2
}
"for new attributes with new and old features" >> {
val query = new Query(sftName, ECQL.toFilter("IN ('f1', 'f2')"), Array("geom", "attr1"))
val features = SelfClosingIterator(ds.getFeatureSource(sftName).getFeatures(query).features).toList
features.map(_.getID) must containTheSameElementsAs(Seq("f1", "f2"))
features.sortBy(_.getID).map(_.getAttribute("geom").toString) mustEqual Seq("POINT (51 50)", "POINT (52 50)")
features.sortBy(_.getID).map(_.getAttribute("attr1")) mustEqual Seq("1", null)
}
"for new attributes with new features" >> {
val query = new Query(sftName, ECQL.toFilter("IN ('f1')"), Array("geom", "attr1"))
val features = SelfClosingIterator(ds.getFeatureSource(sftName).getFeatures(query).features).toList
features.map(_.getID) must containTheSameElementsAs(Seq("f1"))
features.head.getAttribute("geom").toString mustEqual "POINT (51 50)"
features.head.getAttribute("attr1") mustEqual "1"
}
"for new attributes with old features" >> {
val query = new Query(sftName, ECQL.toFilter("IN ('f2')"), Array("geom", "attr1"))
val features = SelfClosingIterator(ds.getFeatureSource(sftName).getFeatures(query).features).toList
features.map(_.getID) must containTheSameElementsAs(Seq("f2"))
features.head.getAttribute("geom").toString mustEqual "POINT (52 50)"
features.head.getAttribute("attr1") must beNull
}
}
}
}
| setumaven/geomesa | geomesa-accumulo/geomesa-accumulo-datastore/src/test/scala/org/locationtech/geomesa/accumulo/data/AccumuloDataStoreTransformsTest.scala | Scala | apache-2.0 | 14,171 |
package com.arcusys.valamis.lesson.scorm.model.manifest
/**
* A rule used mostly from keeping a learner from having access to an activity.
* Such rules are continuously evaluated.
* @param conditions Set of conditions that define whether the action will be applied or not
* @param action An action to perform if conditions hit true
*/
class PreConditionRule(conditions: RuleConditionSet, val action: PreConditionAction.Value) extends ConditionRule(conditions) | igor-borisov/valamis | valamis-scorm-lesson/src/main/scala/com/arcusys/valamis/lesson/scorm/model/manifest/PreConditionRule.scala | Scala | gpl-3.0 | 471 |
/* ___ _ ___ _ _ *\\
** / __| |/ (_) | | The SKilL Generator **
** \\__ \\ ' <| | | |__ (c) 2013-16 University of Stuttgart **
** |___/_|\\_\\_|_|____| see LICENSE **
\\* */
package de.ust.skill.generator.cpp
import scala.collection.JavaConverters._
import de.ust.skill.ir.Field
import de.ust.skill.ir.restriction.FloatRangeRestriction
import de.ust.skill.ir.restriction.IntRangeRestriction
import de.ust.skill.ir.restriction.MonotoneRestriction
import de.ust.skill.ir.Type
import de.ust.skill.ir.UserType
import scala.collection.mutable.ArrayBuffer
import scala.collection.mutable.HashSet
/**
* creates header and implementation for all type definitions
*
* @author Timm Felden
*/
trait TypesMaker extends GeneralOutputMaker {
@inline private final def fieldName(implicit f : Field) : String = escaped(f.getName.capital())
@inline private final def localFieldName(implicit f : Field) : String = internalName(f)
abstract override def make {
super.make
makeHeader
makeSource
}
private final def makeHeader {
// one header per base type
for(base <- IR.par if null == base.getSuperType) {
val out = files.open(s"TypesOf${name(base)}.h")
base.getSubTypes
// get all customizations in types below base, so that we can generate includes for them
val customIncludes = gatherCustomIncludes(base).toSet.toArray.sorted
//includes package
out.write(s"""${beginGuard(s"types_of_${name(base)}")}
#include <skill/api/types.h>
#include <skill/api/SkillException.h>
#include <cassert>
#include <vector>
#include <set>
#include <map>
${customIncludes.map(i⇒s"#include <$i>\\n").mkString}
namespace skill{
namespace internal {
template<class T>
class Book;
template<class T, class B>
class StoragePool;
}
}
${packageParts.mkString("namespace ", " {\\nnamespace ", " {")}
${
if(visitors.length>0) s"""
// predef visitor
namespace api {
class Visitor;
}
"""
else ""
}
// type predef for cyclic dependencies${
(for (t ← IR) yield s"""
class ${name(t)};""").mkString
}
// type predef known fields for friend declarations
namespace internal {${
(for (t ← IR if base == t.getBaseType; f <- t.getFields.asScala) yield s"""
class ${knownField(f)};""").mkString
}
}
// begin actual type defs
""")
for (t ← IR if base == t.getBaseType){
val fields = t.getAllFields.asScala.filter(!_.isConstant)
val relevantFields = fields.filter(!_.isIgnored)
val Name = name(t)
val SuperName = if (null != t.getSuperType()) name(t.getSuperType)
else "::skill::api::Object"
//class declaration
out.write(s"""
${
comment(t)
}class $Name : public $SuperName {
friend class ::skill::internal::Book<${name(t)}>;
friend class ::skill::internal::StoragePool<${name(t)},${name(t.getBaseType)}>;${
(for (f <- t.getFields.asScala) yield s"""
friend class internal::${knownField(f)};""").mkString
}
protected:
""")
// fields
out.write((for(f <- t.getFields.asScala if !f.isConstant)
yield s""" ${mapType(f.getType())} ${localFieldName(f)};
""").mkString)
// constructor
out.write(s"""
$Name() { }
$Name(::skill::SKilLID _skillID${
(for(f <- t.getAllFields.asScala if !f.isConstant()) yield s""",
${mapType(f.getType)} __${name(f)} = ${defaultValue(f)}""").mkString
}) {
this->id = _skillID;${
(for(f <- t.getAllFields.asScala if !f.isConstant()) yield s"""
this->${localFieldName(f)} = __${name(f)};""").mkString
}
}
public:
""")
// accept visitor
if(visited.contains(t.getSkillName)){
out.write(s"""
virtual void accept(api::Visitor *v);
""")
}
// reveal skill id
if(revealSkillID && null==t.getSuperType)
out.write("""
inline ::skill::SKilLID skillID() const { return this->id; }
""")
// show implemented interfaces
if(interfaceChecks){
val subs = interfaceCheckMethods.getOrElse(t.getSkillName, HashSet())
val supers = interfaceCheckImplementations.getOrElse(t.getSkillName, HashSet())
val both = subs.intersect(supers)
subs --= both
supers --= both
out.write(subs.map(s ⇒ s"""
virtual bool is$s() const { return false; }
""").mkString)
out.write(supers.map(s ⇒ s"""
virtual bool is$s() const override { return true; }
""").mkString)
out.write(both.map(s ⇒ s"""
inline bool is$s() const { return true; }
""").mkString)
}
//${if(revealSkillID)"" else s"protected[${packageName}] "}final def getSkillID = skillID
// custom fields
val customizations = t.getCustomizations.asScala.filter(_.language.equals("cpp")).toArray
for(c <- customizations) {
val opts = c.getOptions.asScala.toMap
val default = opts.get("default").map(s ⇒ s" = ${s.get(0)}").getOrElse("")
out.write(s"""
${comment(c)}${c.`type`} ${name(c)}$default;
""")
}
///////////////////////
// getters & setters //
///////////////////////
for(f <- t.getFields.asScala) {
implicit val thisF = f;
def makeGetterImplementation:String = {
if(f.isIgnored)
s"""throw ::skill::SkillException::IllegalAccessError("${name(f)} has ${if(f.hasIgnoredType)"a type with "else""}an !ignore hint");"""
else if(f.isConstant)
s"return (${mapType(f.getType)})0x${f.constantValue().toHexString};"
else
s"return $localFieldName;"
}
def makeSetterImplementation:String = {
if(f.isIgnored)
s"""throw ::skill::SkillException::IllegalAccessError("${name(f)} has ${if(f.hasIgnoredType)"a type with "else""}an !ignore hint");"""
else
s"${
f.getRestrictions.asScala.map {
//@range
case r:IntRangeRestriction ⇒
(r.getLow == Long.MinValue, r.getHigh == Long.MaxValue) match {
case (true, true) ⇒ ""
case (true, false) ⇒ s"assert(${name(f)} <= ${r.getHigh}L);"
case (false, true) ⇒ s"assert(${r.getLow}L <= ${name(f)});"
case (false, false) ⇒ s"assert(${r.getLow}L <= ${name(f)} && ${name(f)} <= ${r.getHigh}L);"
}
case r:FloatRangeRestriction if("f32".equals(f.getType.getName)) ⇒
s"assert(${r.getLowFloat}f <= ${name(f)} && ${name(f)} <= ${r.getHighFloat}f);"
case r:FloatRangeRestriction ⇒
s"assert(${r.getLowDouble} <= ${name(f)} && ${name(f)} <= ${r.getHighDouble});"
//@monotone modification check
case r:MonotoneRestriction ⇒ "assert(id == -1L); "
case _ ⇒ ""
}.mkString
}this->$localFieldName = ${name(f)};"
}
if(f.isConstant)
out.write(s"""
${comment(f)}inline ${mapType(f.getType)} get$fieldName() const {$makeGetterImplementation}
""")
else
out.write(s"""
${comment(f)}inline ${mapType(f.getType)} get$fieldName() const {$makeGetterImplementation}
${comment(f)}inline void set$fieldName(${mapType(f.getType)} ${name(f)}) {$makeSetterImplementation}
""")
}
out.write(s"""
/* override def prettyString : String = s"${name(t)}(#$$skillID${
(
for(f <- t.getAllFields.asScala)
yield if(f.isIgnored) s""", ${f.getName()}: <<ignored>>"""
else if (!f.isConstant) s""", ${if(f.isAuto)"auto "else""}${f.getName()}: $${${name(f)}}"""
else s""", const ${f.getName()}: ${f.constantValue()}"""
).mkString
})"*/
static const char *const typeName;
virtual const char *skillName() const { return typeName; }
virtual std::string toString() const { return std::string(typeName) + std::to_string(this->id); }
virtual void prettyString(std::ostream &os) const {
os << "${t.getName.capital}#" << id;
}
};
class ${name(t)}_UnknownSubType : public ${name(t)} {
const ::skill::internal::AbstractStoragePool *owner;
//! bulk allocation constructor
${name(t)}_UnknownSubType() { };
friend class ::skill::internal::Book<${name(t)}_UnknownSubType>;
//final override def prettyString : String = s"$$getTypeName#$$skillID"
public:
/**
* !internal use only!
*/
inline void byPassConstruction(::skill::SKilLID id, const ::skill::internal::AbstractStoragePool *owner) {
this->id = id;
this->owner = owner;
}
${name(t)}_UnknownSubType(::skill::SKilLID id) : owner(nullptr) {
throw ::skill::SkillException("one cannot create an unknown object without supllying a name");
}
virtual const char *skillName() const;
};
""");
}
// close name spaces
out.write(s"""${packageParts.map(_ ⇒ "}").mkString}
$endGuard""")
out.close()
}
}
private final def makeSource {
// one file per base type
for(base <- IR if null == base.getSuperType) {
val out = files.open(s"TypesOf${name(base)}.cpp")
out.write(s"""#include "File.h"
#include "TypesOf${name(base)}.h"${
(for(t <- IR if base == t.getBaseType) yield s"""
const char *const $packageName::${name(t)}::typeName = "${t.getSkillName}";
const char *$packageName::${name(t)}_UnknownSubType::skillName() const {
return owner->name->c_str();
}${
if(visited.contains(t.getSkillName)) s"""
void $packageName::${name(t)}::accept($packageName::api::Visitor *v) {
v->visit(this);
}"""
else ""
}""").mkString
}
""")
out.close()
}
}
private def gatherCustomIncludes(t : UserType) : Seq[String] = {
val x = t.getCustomizations.asScala.filter(_.language.equals("cpp")).flatMap{
case null ⇒ ArrayBuffer[String]()
case c ⇒ val inc = c.getOptions.get("include")
if(null!=inc) inc.asScala
else ArrayBuffer[String]()
}
x ++ t.getSubTypes.asScala.flatMap(gatherCustomIncludes)
}
}
| skill-lang/skill | src/main/scala/de/ust/skill/generator/cpp/TypesMaker.scala | Scala | bsd-3-clause | 10,408 |
package com.example
import org.specs2.mutable.Specification
import spray.testkit.Specs2RouteTest
import spray.http._
import StatusCodes._
class MyServiceSpec extends Specification with Specs2RouteTest with MyService {
def actorRefFactory = system
"MyService" should {
"return an empty response for GET requests to the /reverse?q path" in {
Get("/reverse?q") ~> route ~> check {
responseAs[String] must be_== ("")
}
}
"return a reversed string response for GET requests to the /reverse?q=123 path" in {
Get("/reverse?q=123") ~> route ~> check {
responseAs[String] must be_== ("321")
}
}
"leave GET requests to other paths unhandled" in {
Get("/kermit") ~> route ~> check {
handled must beFalse
}
}
"return a NotFound error for Get requests without 'q' param to the /reverse path" in {
Get("/reverse") ~> sealRoute(route) ~> check {
status === NotFound
responseAs[String] === "Request is missing required query parameter 'q'"
}
}
"return a MethodNotAllowed error for PUT requests to the /reverse path" in {
Put("/reverse") ~> sealRoute(route) ~> check {
status === MethodNotAllowed
responseAs[String] === "HTTP method not allowed, supported methods: GET"
}
}
}
}
| usr000/AkkaSprayPlayground | spray-service/src/test/scala/com/example/MyServiceSpec.scala | Scala | apache-2.0 | 1,333 |
package net.koofr.play.ws
import javax.xml.ws.Endpoint
import javax.xml.ws.spi.http.HttpContext
import play.api._
import net.koofr.play.ws.util.EndpointHelper
class WSPlugin(application: Application) extends Plugin {
private[this] var endpoints = Seq[Endpoint]()
def register(serviceImpl: Object, context: HttpContext) = synchronized {
val endpoint = EndpointHelper.create(serviceImpl)
endpoint.publish(context)
endpoints = endpoint +: endpoints
}
def onStop(app: Application) = synchronized {
endpoints foreach (_.stop())
}
}
| koofr/play-jax-ws | app/net/koofr/play/ws/WSPlugin.scala | Scala | mit | 559 |
/**
* Copyright (C) 2014 Kaj Magnus Lindberg (born 1979)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package controllers
import actions.ApiActions._
import actions.PageActions._
import com.debiki.core._
import com.debiki.core.Prelude._
import debiki._
import play.api.mvc
import DebikiHttp._
import Utils.{OkHtml, OkXml}
/** Handles per website terms-of-use and privacy-policy pages.
*/
object LegalController extends mvc.Controller {
def viewTermsOfUsePage() = GetAction { apiReq =>
/* Later:
apiReq.siteSettings.termsOfUseUrl match {
case None =>
// Use default terms-of-use page.
Ok(views.html.legal.termsOfUse)
case Some(url) =>
// This website has its own custom terms-of-use page, use it instead.
Redirect(url)
}
*/
// For now: (use hardcoded ToU page, no custimization)
Ok(views.html.legal.termsOfUse(SiteTpi(apiReq)).body) as HTML
}
def viewPrivacyPolicyPage() = GetAction { apiReq =>
// Later: allow overriding privacy policy, see comments in viewTermsOfUsePage() above.
// For now:
Ok(views.html.legal.privacyPolicy(SiteTpi(apiReq)).body) as HTML
}
}
| debiki/debiki-server-old | app/controllers/LegalController.scala | Scala | agpl-3.0 | 1,794 |
package debop4s.data.slick3.tests
import debop4s.data.slick3.SlickContext
import debop4s.data.slick3.AbstractSlickFunSuite
import debop4s.data.slick3.TestDatabase._
import debop4s.data.slick3.TestDatabase.driver.api._
/**
* RelationalMiscFunSuite
* @author sunghyouk.bae@gmail.com
*/
class RelationalMiscFunSuite extends AbstractSlickFunSuite {
test("is not and or") {
class T(tag: Tag) extends Table[(String, String)](tag, "users") {
def a = column[String]("a")
def b = column[String]("b")
def * = (a, b)
}
lazy val ts = TableQuery[T]
commit {
DBIO.seq(ts.schema.drop.asTry,
ts.schema.create,
ts ++= Seq(("1", "a"), ("2", "a"), ("3", "b"))
)
}
val q1 = for (t <- ts if t.a === "1" || t.a === "2") yield t
readonly { q1.to[Set].result } shouldEqual Set(("1", "a"), ("2", "a"))
val q2 = for (t <- ts if (t.a =!= "1") || (t.b =!= "a")) yield t
readonly { q2.to[Set].result } shouldEqual Set(("2", "a"), ("3", "b"))
// No need to test that the unexpected result is actually unexpected
// now that the compiler prints a warning about it
val q4 = for (t <- ts if t.a =!= "1" || t.b =!= "a") yield t
readonly { q4.to[Set].result } shouldEqual Set(("2", "a"), ("3", "b"))
commit { ts.schema.drop }
}
test("like") {
class T1(tag: Tag) extends Table[String](tag, "t1_2") {
def a = column[String]("a")
def * = a
}
lazy val t1s = TableQuery[T1]
commit {
DBIO.seq(t1s.schema.drop.asTry,
t1s.schema.create,
t1s ++= Seq("foo", "bar", "foobar", "foo%")
)
}
val q1 = for {t1 <- t1s if t1.a like "foo"} yield t1.a
readonly(q1.result) shouldEqual Seq("foo")
val q2 = for {t1 <- t1s if t1.a like "foo%"} yield t1.a
readonly(q2.result) shouldEqual Seq("foo", "foobar", "foo%")
readonly {
ifCap(rcap.likeEscape) {
val q3 = for {t1 <- t1s if t1.a.like("foo^%", '^')} yield t1.a
q3.result.map(_ shouldEqual Seq("foo%"))
}
}
commit { t1s.schema.drop }
}
test("sorting") {
import slick.lifted.{Shape, ShapeLevel, Ordered}
class T1(tag: Tag) extends Table[(String, String, String)](tag, "t1_3") {
def a = column[String]("a")
def b = column[String]("b")
def c = column[String]("c")
def * = (a, b, c)
}
lazy val ts = TableQuery[T1]
implicit class TupledQueryExtensionMethods[E1, E2, U1, U2, C[_]](q: Query[(E1, E2), (U1, U2), C]) {
def sortedValues(implicit ordered: (E1 => Ordered),
shape: Shape[FlatShapeLevel, E2, U2, E2]): Query[E2, U2, C] =
q.sortBy(_._1).map(_._2)
}
commit {
DBIO.seq(ts.schema.drop.asTry,
ts.schema.create,
ts ++= Seq(("a2", "b2", "c2"), ("a1", "b1", "c1"))
)
}
val q1 = (
for {
t1 <- ts
} yield t1.c ->(t1.a, t1.b)
).sortedValues
// val q1 = ts.map(x => (x.c, (x.a, x.b))).sortBy(_._1).map(_._2)
readonly { q1.result } shouldEqual Seq(("a1", "b1"), ("a2", "b2"))
commit { ts.schema.drop }
}
test("conditional") {
class T1(tag: Tag) extends Table[Int](tag, "conditional_t") {
def a = column[Int]("a")
def * = a
}
val ts = TableQuery[T1]
commit {
ts.schema.drop.asTry >>
ts.schema.create >>
(ts ++= Seq(1, 2, 3, 4))
}
val q1 = ts.map { t => (t.a, Case If (t.a < 3) Then 1 Else 0) }
readonly { q1.to[Set].result } shouldEqual Set((1, 1), (2, 1), (3, 0), (4, 0))
val q2 = ts.map { t => (t.a, Case If (t.a < 3) Then 1) }
readonly { q2.to[Set].result } shouldEqual Set((1, Some(1)), (2, Some(1)), (3, None), (4, None))
val q3 = ts.map { t => (t.a, Case If (t.a < 3) Then 1 If (t.a < 4) Then 2 Else 0) }
readonly { q3.to[Set].result } shouldEqual Set((1, 1), (2, 1), (3, 2), (4, 0))
commit { ts.schema.drop }
}
test("cast") {
class T1(tag: Tag) extends Table[(String, Int)](tag, "cast_t") {
def a = column[String]("a")
def b = column[Int]("b")
def * = (a, b)
}
val ts = TableQuery[T1]
commit {
ts.schema.drop.asTry >>
ts.schema.create >>
(ts ++= Seq(("foo", 1), ("bar", 2)))
}
// HINT: MariaDB에서는 VARCHAR(255) 도 안되고, CHAR(255) 형식으로 해야 합니다.
val q1 = ts.map(t => t.a ++ (if (SlickContext.isMySQL) t.b.asColumnOfType[String]("CHAR(255)") else t.b.asColumnOf[String]))
readonly { q1.to[Set].result } shouldEqual Set("foo1", "bar2")
commit { ts.schema.drop }
}
test("option conversions") {
class T1(tag: Tag) extends Table[(Int, Option[Int])](tag, "t1_optconv") {
def a = column[Int]("a")
def b = column[Option[Int]]("b")
def * = (a, b)
}
val ts = TableQuery[T1]
commit {
ts.schema.drop.asTry >>
ts.schema.create >>
(ts ++= Seq((1, Some(10)), (2, None)))
}
// GetOrElse in ResultSetMapping on client side ( 아닌데??? )
val q1 = ts.map(t => (t.a, t.b.getOrElse(0)))
// GetOrElse in query on the DB side
val q2 = ts.map(t => (t.a, t.b.getOrElse(0) + 1))
readonly {
q1.to[Set].result.map { _ shouldEqual Set((1, 10), (2, 0)) } >>
q2.to[Set].result.map { _ shouldEqual Set((1, 11), (2, 1)) }
}
commit { ts.schema.drop }
}
test("init errors") {
case class Id(toInt: Int)
case class Customer(id: Id)
// Before making `shipped` and `toNode` in `TableQuery` lazy,
// putting `Tables` before `A` caused a StackOverflowException
object Tables {
val as = TableQuery[A]
implicit val idMapper = MappedColumnType.base[Id, Int](_.toInt, Id)
}
class A(tag: Tag) extends Table[Customer](tag, "init_a") {
def id = column[Id]("id", O.PrimaryKey, O.AutoInc)(Tables.idMapper)
import Tables.idMapper
def * = id <>(Customer.apply, Customer.unapply)
}
Tables.as.schema
case class Id2(toInt: Int)
implicit val id2Mapper = null.asInstanceOf[BaseColumnType[Id2]]
class B(tag: Tag) extends Table[Id2](tag, "INIT_A") {
def id = column[Id2]("ID", O.PrimaryKey, O.AutoInc)
def * = id
}
val bs = TableQuery[B]
try {
bs.map(_.id)
bs.schema
???
} catch {
case t: NullPointerException if (t.getMessage ne null) && (t.getMessage contains "initialization order") =>
// This is the expected error message from RelationalTableComponent.Table.column
}
try {
MappedColumnType.base[Id, Int](_.toInt, Id)(implicitly, null.asInstanceOf[BaseColumnType[Int]])
???
} catch {
case t: NullPointerException if (t.getMessage ne null) && (t.getMessage contains "initialization order") =>
// This is the expected error message from RelationalTypesComponent.MappedColumnTypeFactory.assertNonNullType
}
}
}
| debop/debop4s | debop4s-data-slick3/src/test/scala/debop4s/data/slick3/tests/RelationalMiscFunSuite.scala | Scala | apache-2.0 | 6,920 |
package edu.msu.mi.knevol.universe
import akka.actor.{ActorRef, Actor}
import akka.pattern.ask
import akka.util.Timeout
import scala.collection.mutable.ListBuffer
import scala.collection.{mutable, BitSet}
import scala.concurrent.Future
import concurrent.duration._
import scala.util.Random
/**
* Created by josh on 10/1/15.
*/
class StateVar(node:Node) extends Actor {
implicit val timeout = Timeout(5 seconds)
import context.dispatcher
var neighbors:Array[ActorRef] = Array.empty
override def receive: Receive = {
case Wire(actors:Seq[ActorRef]) =>
neighbors = actors.toArray
if (neighbors.length != node.neighbors.length) {
println ("Problem on creation!")
}
sender ! "OK"
case SetState(state: Boolean) =>
node.currentState = Some(state)
node.calculatedState = None
sender ! "OK"
case Advance =>
//println(s"${this} Node advance")
node.currentState = Some(node.calculatedState.get)
sender ! "OK"
case Update =>
//println(s"${this} Node update")
//capture this here
val s = sender()
val f = Future.sequence(for (a<-neighbors.toList) yield a? StateRequest )
f onSuccess {
case results =>
var result = mutable.BitSet()
for ((bit:Boolean, count) <- results.zipWithIndex if bit) {
result += count
}
node.calculate(BitSet()++result)
s ! node.calculatedState.get
}
case StateRequest =>
sender ! node.currentState.get
}
}
| jintrone/KnEvol | src/main/scala-2.12/edu/msu/mi/knevol/universe/StateVar.scala | Scala | mit | 1,558 |
package sds.classfile.attribute
class Synthetic extends AttributeInfo {
override def toString(): String = "Synthetic"
} | g1144146/sds_for_scala | src/main/scala/sds/classfile/attribute/Synthetic.scala | Scala | apache-2.0 | 124 |
package com.raquo.dombuilder.jsdom.simple
import com.raquo.domtypes
import org.scalajs.dom
trait SimpleSvgTagBuilder extends domtypes.generic.builders.SvgTagBuilder[SimpleSvgTag, dom.svg.Element] {
override def svgTag[Ref <: dom.svg.Element](tagName: String, void: Boolean): SimpleSvgTag[Ref] = {
new SimpleSvgTag(tagName, void)
}
}
| raquo/scala-dom-builder | js/src/main/scala/com/raquo/dombuilder/jsdom/simple/SimpleSvgTagBuilder.scala | Scala | mit | 344 |
/*
* Copyright 2014 OSSCube UK.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package com.osscube.spark.aerospike.rdd
import com.aerospike.client.cluster.Node
import com.aerospike.client.query.{Filter, RecordSet, Statement}
import com.aerospike.client.{AerospikeClient, Value}
import com.osscube.spark.aerospike._
import org.apache.spark._
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.sql._
import org.apache.spark.sql.types.StructType
import scala.collection.JavaConverters._
class AerospikeRDD(
@transient override val sc: SparkContext,
@transient override val aerospikeHosts: Array[Node],
val namespace: String,
val set: String,
val bins: Seq[String],
val filterType: AeroFilterType,
val filterBin: String,
val filterStringVal: String,
@transient override val filterVals: Seq[(Long, Long)],
val attrs: Seq[(SparkFilterType, String, String, Seq[(Long, Long)])] = Seq(),
val sch: StructType = null,
useUdfWithoutIndexQuery: Boolean = false
) extends BaseAerospikeRDD(sc, aerospikeHosts, filterVals) {
@DeveloperApi
override def compute(split: Partition, context: TaskContext): Iterator[Row] = {
val partition: AerospikePartition = split.asInstanceOf[AerospikePartition]
val stmt = new Statement()
stmt.setNamespace(namespace)
stmt.setSetName(set)
stmt.setBinNames(bins: _*)
val aeroFilter: Option[Filter] = filterType match {
case FilterNone => None
case FilterString => Some(Filter.equal(filterBin, filterStringVal))
case FilterLong => Some(Filter.equal(filterBin, partition.startRange))
case FilterRange => Some(Filter.range(filterBin, partition.startRange, partition.endRange))
}
aeroFilter map (f => stmt.setFilters(f))
val useUDF = attrs.length > 0 && (useUdfWithoutIndexQuery || aeroFilter.isDefined)
if (useUDF) {
var udfFilters: Array[Value] = Array(Value.get(bins.asJava))
attrs.foreach {
case (FilterString, s, stringVal, Seq((_, _))) => udfFilters = udfFilters :+ Value.get(Array(Value.get(1), Value.get(s), Value.get(stringVal)))
case (FilterLong, s, _, Seq((longLower, _))) => udfFilters = udfFilters :+ Value.get(Array(Value.get(2), Value.get(s), Value.get(longLower)))
case (FilterRange, s, _, Seq((longLower, longUpper))) => udfFilters = udfFilters :+ Value.get(Array(Value.get(3), Value.get(s), Value.get(longLower), Value.get(longUpper)))
case (FilterIn, s, stringVal, Seq((longLower, _))) =>
udfFilters =
if (longLower == 0L)
udfFilters :+ Value.get(Array(Value.get(4), Value.get(s)) ++ stringVal.split("'").map(Value.get))
else
udfFilters :+ Value.get(Array(Value.get(4), Value.get(s)) ++ stringVal.split("'").map(_.toLong).map(Value.get))
}
stmt.setAggregateFunction("spark_filters", "multifilter", udfFilters: _*)
println("UDF Filters applied: " + udfFilters.mkString(","))
}
val endpoint = partition.endpoint
logInfo("RDD: " + split.index + ", Connecting to: " + endpoint._1)
val client = new AerospikeClient(null, endpoint._1, endpoint._2)
val res: RecordSet = client.queryNode(null, stmt, client.getNode(endpoint._3))
context.addTaskCompletionListener(context => {
res.close();
client.close()
})
res.iterator.asScala.map { rs =>
if (!useUDF) {
Row.fromSeq(bins.map(rs.record.bins.get(_)))
}
else {
rs.record.bins.get("SUCCESS") match {
case m: java.util.HashMap[Long, Any] =>
Row.fromSeq(m.asScala.map { f =>
if (checkType(f._1))
f._2
else
f._2.asInstanceOf[java.lang.Long].intValue
}.toSeq
)
case _ =>
println("useUDF: " + useUDF)
println("UDF params: " + attrs.mkString("-"))
throw new Exception(rs.toString)
}
}
}
}
def checkType(position: Long): Boolean = {
val binName = bins(position.toInt - 1)
sch(binName).dataType.typeName != "integer"
}
}
@deprecated("use AqlParser instead")
object AerospikeRDD {
def removeDoubleSpaces(s: String): String = AqlParser.removeDoubleSpaces(s)
/**
*
* @param asql_statement ASQL statement to parse, select only
* @param numPartitionsPerServerForRange number partitions per Aerospike snode
* @return namespace, set, bins, filterType, filterBin, filterVals, filterStringVal
*/
def parseSelect(asql_statement: String, numPartitionsPerServerForRange: Int): (String, String, Seq[String], FilterType, String, Seq[(Long, Long)], String) =
AqlParser.parseSelect(asql_statement, numPartitionsPerServerForRange).toArray()
}
class QueryParams(namespace: String, set: String, bins: Array[String],
filterType: AeroFilterType, filterBin: String, filterVals: Seq[(Long, Long)],
filterStringVal: String) {
@deprecated("use the object")
def toArray(): (String, String, Seq[String], AeroFilterType, String, Seq[(Long, Long)], String) = {
(namespace, set, bins, filterType, filterBin, filterVals, filterStringVal)
}
}
| sasha-polev/aerospark | src/main/scala/com/osscube/spark/aerospike/rdd/AerospikeRDD.scala | Scala | apache-2.0 | 5,905 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.sources
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.execution.datasources.DataSource
class ResolvedDataSourceSuite extends SparkFunSuite {
private def getProvidingClass(name: String): Class[_] =
DataSource(sparkSession = null, className = name).providingClass
test("jdbc") {
assert(
getProvidingClass("jdbc") ===
classOf[org.apache.spark.sql.execution.datasources.jdbc.JdbcRelationProvider])
assert(
getProvidingClass("org.apache.spark.sql.execution.datasources.jdbc") ===
classOf[org.apache.spark.sql.execution.datasources.jdbc.JdbcRelationProvider])
assert(
getProvidingClass("org.apache.spark.sql.jdbc") ===
classOf[org.apache.spark.sql.execution.datasources.jdbc.JdbcRelationProvider])
}
test("json") {
assert(
getProvidingClass("json") ===
classOf[org.apache.spark.sql.execution.datasources.json.JsonFileFormat])
assert(
getProvidingClass("org.apache.spark.sql.execution.datasources.json") ===
classOf[org.apache.spark.sql.execution.datasources.json.JsonFileFormat])
assert(
getProvidingClass("org.apache.spark.sql.json") ===
classOf[org.apache.spark.sql.execution.datasources.json.JsonFileFormat])
}
test("parquet") {
assert(
getProvidingClass("parquet") ===
classOf[org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat])
assert(
getProvidingClass("org.apache.spark.sql.execution.datasources.parquet") ===
classOf[org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat])
assert(
getProvidingClass("org.apache.spark.sql.parquet") ===
classOf[org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat])
}
test("csv") {
assert(
getProvidingClass("csv") ===
classOf[org.apache.spark.sql.execution.datasources.csv.CSVFileFormat])
assert(
getProvidingClass("com.databricks.spark.csv") ===
classOf[org.apache.spark.sql.execution.datasources.csv.CSVFileFormat])
}
test("error message for unknown data sources") {
val error1 = intercept[AnalysisException] {
getProvidingClass("avro")
}
assert(error1.getMessage.contains("Failed to find data source: avro."))
val error2 = intercept[AnalysisException] {
getProvidingClass("com.databricks.spark.avro")
}
assert(error2.getMessage.contains("Failed to find data source: com.databricks.spark.avro."))
val error3 = intercept[ClassNotFoundException] {
getProvidingClass("asfdwefasdfasdf")
}
assert(error3.getMessage.contains("Failed to find data source: asfdwefasdfasdf."))
}
}
| u2009cf/spark-radar | sql/core/src/test/scala/org/apache/spark/sql/sources/ResolvedDataSourceSuite.scala | Scala | apache-2.0 | 3,520 |
package com.twitter.finagle.netty4.ssl.server
import com.twitter.finagle.netty4.ssl.Netty4SslConfigurations
import com.twitter.finagle.ssl.{
ApplicationProtocols,
Engine,
KeyCredentials,
SslConfigurationException
}
import com.twitter.finagle.ssl.server.{SslServerConfiguration, SslServerEngineFactory}
import com.twitter.util.Return
import com.twitter.util.security.{PrivateKeyFile, X509CertificateFile}
import io.netty.buffer.ByteBufAllocator
import io.netty.handler.ssl.{SslContext, SslContextBuilder}
import io.netty.handler.ssl.ApplicationProtocolConfig.Protocol
/**
* Convenience functions for setting values on a Netty `SslContextBuilder`
* which are applicable to server configurations and engines.
*/
private[finagle] object Netty4ServerSslConfigurations {
/**
* Configures the application protocols of the `SslContextBuilder`. This
* method mutates the `SslContextBuilder`, and returns it as the result.
*
* @note This sets which application level protocol negotiation to
* use NPN and ALPN.
*
* @note This also sets the `SelectorFailureBehavior` to NO_ADVERTISE,
* and the `SelectedListenerFailureBehavior` to ACCEPT as those are the
* only modes supported by both JDK and Native engines.
*/
private def configureServerApplicationProtocols(
builder: SslContextBuilder,
applicationProtocols: ApplicationProtocols
): SslContextBuilder =
Netty4SslConfigurations.configureApplicationProtocols(
builder,
applicationProtocols,
Protocol.NPN_AND_ALPN
)
/**
* Creates an `SslContextBuilder` for a server with the supplied `KeyCredentials`.
*
* @note `KeyCredentials` must be specified, using `Unspecified` is not supported.
* @note An `SslConfigurationException` will be thrown if there is an issue loading
* the certificate(s) or private key.
*
* @note Will not validate the validity for certificates when configured
* with [[KeyCredentials.KeyManagerFactory]] in contrast to when
* configured with [[KeyCredentials.CertAndKey]], [[KeyCredentials.CertsAndKey]],
* or [[KeyCredentials.CertKeyAndChain]].
*/
private def startServerWithKey(keyCredentials: KeyCredentials): SslContextBuilder = {
val builder = keyCredentials match {
case KeyCredentials.Unspecified =>
throw SslConfigurationException.notSupported(
"KeyCredentials.Unspecified",
"Netty4ServerEngineFactory"
)
case KeyCredentials.CertAndKey(certFile, keyFile) =>
for {
key <- new PrivateKeyFile(keyFile).readPrivateKey()
cert <- new X509CertificateFile(certFile).readX509Certificate()
} yield SslContextBuilder.forServer(key, cert)
case KeyCredentials.CertsAndKey(certsFile, keyFile) =>
for {
key <- new PrivateKeyFile(keyFile).readPrivateKey()
certs <- new X509CertificateFile(certsFile).readX509Certificates()
} yield SslContextBuilder.forServer(key, certs: _*)
case KeyCredentials.CertKeyAndChain(certFile, keyFile, chainFile) =>
for {
key <- new PrivateKeyFile(keyFile).readPrivateKey()
cert <- new X509CertificateFile(certFile).readX509Certificate()
chain <- new X509CertificateFile(chainFile).readX509Certificates()
} yield SslContextBuilder.forServer(key, cert +: chain: _*)
case KeyCredentials.KeyManagerFactory(keyManagerFactory) =>
Return(SslContextBuilder.forServer(keyManagerFactory))
}
Netty4SslConfigurations.unwrapTryContextBuilder(builder)
}
/**
* Creates an `SslContext` based on the supplied `SslServerConfiguration`. This method uses
* the `KeyCredentials`, `TrustCredentials`, and `ApplicationProtocols` from the provided
* configuration, and forces the JDK provider if forceJdk is true.
*/
def createServerContext(config: SslServerConfiguration, forceJdk: Boolean): SslContext = {
val builder = startServerWithKey(config.keyCredentials)
val withProvider = Netty4SslConfigurations.configureProvider(builder, forceJdk)
val withTrust = Netty4SslConfigurations.configureTrust(withProvider, config.trustCredentials)
val withAppProtocols = Netty4ServerSslConfigurations.configureServerApplicationProtocols(
withTrust,
config.applicationProtocols
)
withAppProtocols.build()
}
/**
* Creates an `Engine` based on the supplied `SslContext` and `ByteBufAllocator`, and then
* configures the underlying `SSLEngine` based on the supplied `SslServerConfiguration`.
*/
def createServerEngine(
config: SslServerConfiguration,
context: SslContext,
allocator: ByteBufAllocator
): Engine = {
val engine = new Engine(context.newEngine(allocator))
SslServerEngineFactory.configureEngine(engine, config)
engine
}
}
| luciferous/finagle | finagle-netty4/src/main/scala/com/twitter/finagle/netty4/ssl/server/Netty4ServerSslConfigurations.scala | Scala | apache-2.0 | 4,821 |
package io.surfkit.client
import io.surfkit.client.Chat.{ChatState, ChatEvents}
import io.surfkit.client.Friends._
import japgolly.scalajs.react.vdom.prefix_<^._
import japgolly.scalajs.react._
import scala.scalajs.js.JSON
/**
* Created by suroot on 04/06/15.
*/
trait Chat{
}
object Chat{
type ChatToUnit = (io.surfkit.model.Chat.Chat) => Unit
type SendChatMessage = (io.surfkit.model.Chat.Chat, String) => Unit
type ChatMessageChange = (io.surfkit.model.Chat.Chat, String) => Unit
type ChatCloseEvent = ChatToUnit
type ChatShowAddFriends = ChatToUnit
type ChatSelect = ChatToUnit
case class ChatEvents(onMessageChange:ChatMessageChange, onSendMessage:SendChatMessage, onChatClose:ChatCloseEvent, onShowAddFriends:ChatShowAddFriends)
case class ChatState(chat:io.surfkit.model.Chat.Chat, events:ChatEvents, ui:String, msg:String, friendState:FriendsState)
object ChatUI{
final val ShowAddFriend = "addFriends "
}
val ChatEntry= ReactComponentB[(io.surfkit.model.Chat.ChatEntry)]("ChatEntry")
.render(props => {
val (entry) = props
val dyn = JSON.parse(entry.json)
val date = new scala.scalajs.js.Date()
date.setTime(entry.timestamp.toDouble)
<.div(^.className:="entry",
<.div(
<.img(^.className:="avatar",^.src:=entry.from.avatarUrl)
),
<.div(
<.span(^.className:="uname",entry.from.fullName),
<.span(^.className:="time",date.toLocaleString),
<.span(dyn.msg.toString)
)
)
})
.build
val RecentChatList = ReactComponentB[(Seq[io.surfkit.model.Chat.Chat], ChatSelect)]("RecentChatList")
.render(props => {
val (chats, onChatSelect) = props
<.div(^.className:="chat-recent",
chats.filter(_.entries.length > 0).map{
c =>
<.div(^.onClick --> onChatSelect(c),
ChatEntry( (c.entries.head) )
)
}
)
})
.build
val ChatControls= ReactComponentB[(ChatState)]("ChatEntry")
.render(props => {
val (chatState) = props
<.div(^.className:="cntrls input-group",
<.span(^.className:="fa fa-ellipsis-v input-group-addon"),
<.input(^.`type`:="text", ^.className := "form-control", ^.placeholder := "type message", ^.onChange ==> ((e:ReactEventI) => {chatState.events.onMessageChange(chatState.chat,e.target.value)}), ^.value := chatState.msg ),
<.span(^.className:="fa fa-paper-plane input-group-addon",^.onClick --> chatState.events.onSendMessage(chatState.chat,chatState.msg))
)
})
.build
val ChatEntryList = ReactComponentB[(ChatState)]("ChatEntryList")
.render(props => {
val (chatState) = props
// TODO: only want loading when user scrolls to top..
// TODO: need to fetch more entries
<.div(^.className:="entries",
<.div(^.className:="loading",
<.i(^.className:="fa fa-circle-o-notch fa-spin")
),
chatState.chat.entries.map(e => ChatEntry( (e) ))
)
}).componentWillUpdate( (self, prevProps, prevState) =>{
val node = self.getDOMNode()
val shouldScroll = (node.scrollTop + node.offsetHeight) == node.scrollHeight
//self.modState() TODO: see if we can store the shouldScroll ?
}).componentDidUpdate( (self, prevProps, prevState) =>{
val node = self.getDOMNode()
node.scrollTop = node.scrollHeight
})
.build
val Chat = ReactComponentB[(ChatState)]("Chat")
.render(props => {
val (chatState) = props
val names = chatState.chat.members.take(3).map(_.fullName).mkString(",")
<.div(^.className:="chat",
<.header(names,
<.i(^.className:="fa fa-user-plus",^.onClick --> chatState.events.onShowAddFriends(chatState.chat) ),
<.i(^.className:="fa fa-close",^.onClick --> chatState.events.onChatClose(chatState.chat) )
),
if (chatState.ui.contains(ChatUI.ShowAddFriend))FriendSelector( (chatState.friendState, chatState.chat.members.toSet) ) else <.div() ,
ChatEntryList( (chatState) ),
ChatControls( (chatState) )
)
})
.build
val Chats = ReactComponentB[(Seq[ChatState])]("Chats")
.render(props => {
val (chats) = props
<.div(^.className:="chats",
chats.map(c => Chat( (c) ))
)
})
.build
}
| coreyauger/surfkit | client/src/main/scala/Chat.scala | Scala | mit | 4,334 |
package akka.persistence.pg.event
import java.time.OffsetDateTime
trait Created {
def created: OffsetDateTime
}
| kwark/akka-persistence-postgresql | modules/akka-persistence-pg/src/main/scala/akka/persistence/pg/event/Created.scala | Scala | mit | 118 |
import sbt._
import Keys._
import Dependencies._
object NightlyPlugin extends AutoPlugin {
override def trigger = allRequirements
override def requires = plugins.JvmPlugin
object autoImport {
val includeTestDependencies = settingKey[Boolean]("Doesn't declare test dependencies.")
def testDependencies = libraryDependencies ++= (
if (includeTestDependencies.value)
Seq(
scalacheck % Test,
junit % Test,
scalatest % Test,
scalaVerify % Test,
hedgehog % Test
)
else Seq()
)
}
import autoImport._
override def buildSettings: Seq[Setting[_]] = Seq(
includeTestDependencies := true
)
override def projectSettings: Seq[Setting[_]] = Seq(
resolvers += Resolver.typesafeIvyRepo("releases").withName("typesafe-alt-project-releases")
)
}
| sbt/sbt | project/NightlyPlugin.scala | Scala | apache-2.0 | 848 |
package com.mesosphere.cosmos.thirdparty.marathon.model
import io.lemonlabs.uri.dsl._
import org.scalatest.FreeSpec
final class AppIdSpec extends FreeSpec {
private[this] val relative: String = "cassandra/dcos"
private[this] val absolute: String = s"/$relative"
"AppId should" - {
"consistently render" - {
"absolute" in {
assertResult(absolute)(AppId(absolute).toString)
}
"relative" in {
assertResult(absolute)(AppId(relative).toString)
}
}
"consistently construct" - {
"absolute" in {
assertResult(AppId(absolute))(AppId(absolute))
assertResult(AppId(absolute).hashCode)(AppId(absolute).hashCode)
}
"relative" in {
assertResult(AppId(absolute))(AppId(relative))
assertResult(AppId(absolute).hashCode)(AppId(relative).hashCode)
}
}
"generate uri" - {
"absolute" in {
assertResult("/cassandra" / "dcos")(AppId(absolute).toUri)
}
"relative" in {
assertResult("/cassandra" / "dcos")(AppId(relative).toUri)
}
}
}
}
| dcos/cosmos | cosmos-test-common/src/test/scala/com/mesosphere/cosmos/thirdparty/marathon/model/AppIdSpec.scala | Scala | apache-2.0 | 1,089 |
package test
import io.circe._
import io.circe.parser._
import io.circe.syntax._
import scoutagent._
import scoutagent.State._
import scoutagent.controller._
import environment._
import environment.anomaly._
import environment.element._
import environment.element.seed._
import environment.terrainmodification._
import environment.EnvironmentBuilder._
import test._
import operation._
import scoututil.Util._
import jsonhandler.Encoder._
import jsonhandler.Decoder._
import filemanager.FileManager._
import bestweights._
import weighttuning.WeightTuning._
import scala.collection.mutable.{Map => MutableMap}
import scala.collection.mutable.{ArrayBuffer => AB}
object MapWaterTesting {
def main(args: Array[String]): Unit = {
// Testing Setup
val testTemplate = "EASY"
val numEnvironments = 200
val numTestsPerEnvironment = 5
val memoryFileName = "MapWater"
val weightsSet = BestWeights.hybridLongRun
val agentSensors = List(
new ElevationSensor(false),
new WaterSensor(true))
val testEnvironments: Map[String,Int] = Map()
val testTemplates = Map(
testTemplate -> (numEnvironments, numTestsPerEnvironment)
)
val goalTemplate = new MapElementsTemplate(List("Water Depth"), None)
val controllers = Map(
"Random" -> new RandomController(),
"Heuristic" -> new FindHumanController(),
"SCOUt" -> new SCOUtController(memoryFileName, "json", false, weightsSet))
// Test Controllers
val testingSuite = new Test(
testEnvironments = testEnvironments,
testTemplates = testTemplates,
controllers = controllers,
sensors = agentSensors,
goalTemplate = goalTemplate,
maxActions = None,
verbose = true
)
testingSuite.run
// Collect and Save Test Metrics
// Random
val rResults: TestMetric = testingSuite.testMetrics("Random")
// Heuristic
val hResults: TestMetric = testingSuite.testMetrics("Heuristic")
// SCOUt
val sResults: TestMetric = testingSuite.testMetrics("SCOUt")
val jsonData = Json.obj(
(testTemplate, Json.obj(
("Number of Tests", Json.fromInt(numEnvironments * numTestsPerEnvironment)),
("Number of Environments Generated", Json.fromInt(numEnvironments)),
("Number of Tests Per Environment", Json.fromInt(numTestsPerEnvironment)),
("Random", Json.obj(
("Test Results", Json.obj(
("Goal Completion", Json.fromValues(rResults.runs.map(r => Json.fromDoubleOrNull(r.goalCompletion)))),
("Steps", Json.fromValues(rResults.runs.map(r => Json.fromInt(r.steps)))),
("Remaining Health", Json.fromValues(rResults.runs.map(r => Json.fromDoubleOrNull(r.remainingHealth)))),
("Remaining Energy", Json.fromValues(rResults.runs.map(r => Json.fromDoubleOrNull(r.remainingEnergy))))
)),
("Average Goal Completion", Json.fromDoubleOrNull(rResults.avgGoalCompletion)),
("Average Steps", Json.fromInt(rResults.avgActions)),
("Average Remaining Health", Json.fromDoubleOrNull(rResults.avgRemainingHealth)),
("Average Remaining Energy", Json.fromDoubleOrNull(rResults.avgRemainingEnergy))
)),
("Heuristic", Json.obj(
("Test Results", Json.obj(
("Goal Completion", Json.fromValues(hResults.runs.map(r => Json.fromDoubleOrNull(r.goalCompletion)))),
("Steps", Json.fromValues(hResults.runs.map(r => Json.fromInt(r.steps)))),
("Remaining Health", Json.fromValues(hResults.runs.map(r => Json.fromDoubleOrNull(r.remainingHealth)))),
("Remaining Energy", Json.fromValues(hResults.runs.map(r => Json.fromDoubleOrNull(r.remainingEnergy))))
)),
("Average Goal Completion", Json.fromDoubleOrNull(hResults.avgGoalCompletion)),
("Average Steps", Json.fromInt(hResults.avgActions)),
("Average Remaining Health", Json.fromDoubleOrNull(hResults.avgRemainingHealth)),
("Average Remaining Energy", Json.fromDoubleOrNull(hResults.avgRemainingEnergy))
)),
("SCOUt", Json.obj(
("Test Results", Json.obj(
("Goal Completion", Json.fromValues(sResults.runs.map(r => Json.fromDoubleOrNull(r.goalCompletion)))),
("Steps", Json.fromValues(sResults.runs.map(r => Json.fromInt(r.steps)))),
("Remaining Health", Json.fromValues(sResults.runs.map(r => Json.fromDoubleOrNull(r.remainingHealth)))),
("Remaining Energy", Json.fromValues(sResults.runs.map(r => Json.fromDoubleOrNull(r.remainingEnergy))))
)),
("Average Goal Completion", Json.fromDoubleOrNull(sResults.avgGoalCompletion)),
("Average Steps", Json.fromInt(sResults.avgActions)),
("Average Remaining Health", Json.fromDoubleOrNull(sResults.avgRemainingHealth)),
("Average Remaining Energy", Json.fromDoubleOrNull(sResults.avgRemainingEnergy))
))
))
)
val fileName = "Test-Results-" + testTemplate
val filePath = "src/main/scala/testing/Tests/Experiment1/MapWater/Results/"
saveJsonFile(fileName, filePath, jsonData)
}
}
| KeithCissell/SCOUt | app/src/main/scala/testing/Tests/Experiment1/MapWater/Testing.scala | Scala | mit | 5,146 |
package uk.co.morleydev.zander.client
import java.io.File
import uk.co.morleydev.zander.client.model.{Configuration, Arguments}
import uk.co.morleydev.zander.client.model.arg.BuildCompiler._
import uk.co.morleydev.zander.client.model.arg.BuildMode._
import uk.co.morleydev.zander.client.model.arg.{Branch, Project}
import uk.co.morleydev.zander.client.model.net.ProjectDto
import uk.co.morleydev.zander.client.model.store.{CacheDetails, ArtefactDetails, SourceVersion}
import scala.concurrent.Future
package object data {
/**
* Parse a set of arguments and returned the parsed arguments
*/
type ArgumentParser = (IndexedSeq[String] => Arguments)
/**
* Parse a set of arguments, and return either the parsed arguments or an error code
*/
type ArgumentParserWithErrorCodes = (IndexedSeq[String] => Either[Int, Arguments])
/**
* Maps build modes to build types for a build strategy (i.e BuildMode.Debug -> cmake Debug flag)
*/
type BuildModeBuildTypeMap = (BuildMode => String)
/**
* The project source build is responsible for the compilation of the project files using the makefiles
* generated by a source prebuild
*/
type BuildProjectSource = ((Project, BuildCompiler, BuildMode) => Unit)
/**
* Check if the artefact details exist for the specified Project, Compiler and Mode
*/
type CheckArtefactDetailsExist = ((Project, BuildCompiler, BuildMode) => Boolean)
/**
* Get the Generator flags to use for the specified compiler
*/
type CompilerGeneratorMap = (BuildCompiler => Seq[String])
/**
* Delete the artefact details for a project
*/
type DeleteProjectArtefactDetails = ((Project, BuildCompiler, BuildMode) => Unit)
/**
* Delete the artefacts for a project
*/
type DeleteProjectArtefacts = (Seq[String] => Unit)
/**
* Acquire the source for a given project, using information from the ProjectDto
*/
type DownloadProjectSource = ((Project, ProjectDto) => Unit)
/**
* Checkout a branch in the source for a given project
*/
type CheckoutProjectSource = ((Project, Branch) => Unit)
/**
* Get the ProjectDto for the Project for a given Compiler
*/
type GetProjectDto = ((Project, BuildCompiler) => Future[ProjectDto])
/**
* Get the project source version currently available for a given project
*/
type GetProjectSourceVersion = (Project => SourceVersion)
/**
* The project artefact install is responsible for the installation of artefacts from a store (i.e the project cache)
* to the local working directory
*/
type InstallProjectArtefact = ((Project, BuildCompiler, BuildMode, Branch) => Unit)
/**
* The project source install is responsible for the installation of project files created by the build to the cache
*/
type InstallProjectCache = ((Project, BuildCompiler, BuildMode) => Unit)
/**
* List the cache filenames for a project
*/
type ListProjectCacheFiles = ((Project, BuildCompiler, BuildMode) => Seq[String])
/**
* Load a configuration file, creating it if the configuration cannot be found
*/
type LoadOrCreateConfiguration = (String => Configuration)
/**
* Run the prebuild steps (e.g cmake)
*/
type PreBuildProjectSource = ((Project, BuildCompiler, BuildMode, Branch) => Unit)
/**
* Process a map of projects to artefact details (e.g remove duplicate files from the Artefact Details)
*/
type ProcessProjectArtefactDetailsMap = (Map[(Project, BuildCompiler, BuildMode), ArtefactDetails] => Map[(Project, BuildCompiler, BuildMode), ArtefactDetails])
/**
* Read the artefact details for a given project, compiler, mode combination
*/
type ReadProjectArtefactDetails = ((Project, BuildCompiler, BuildMode) => ArtefactDetails)
/**
* Read the cache details for a given project, compiler, mode combination
*/
type ReadProjectCacheDetails = ((Project, BuildCompiler, BuildMode, Branch) => CacheDetails)
/**
* Split a filename into a project, compiler and build mode ( e.g project.compiler.buildmode.json => (Project, Compiler, BuildMode) )
*/
type SplitFileNameToProjectDetails = (String => (Project, BuildCompiler, BuildMode))
/**
* Update the cached project source
*/
type UpdateProjectSource = ((Project, ProjectDto) => Unit)
/**
* Write a file containing the project artefact details for a project, with a compiler, build mode, source version and set of filenames
*/
type WriteProjectArtefactDetails = ((Project, BuildCompiler, BuildMode, SourceVersion, Seq[String]) => Unit)
/**
* Write project source details for a project, with a compiler, build mode and source version
*/
type WriteProjectSourceDetails = ((Project, BuildCompiler, BuildMode, Branch, SourceVersion) => Unit)
/**
* Factory used for building a native process to run, for example running CMake or Git
*/
type NativeProcessBuilderFactory = (Seq[String] => NativeProcessBuilder)
/**
* Gets the path to write/read for a project artefacts in the cache
*/
type GetArtefactsLocation = ((Project, BuildCompiler, BuildMode, Branch) => File)
/**
* Gets the path to write/read for a project source in the cache
*/
type GetSourceLocation = (Project => File)
}
| MorleyDev/zander.client | src/main/scala/uk/co/morleydev/zander/client/data/package.scala | Scala | mit | 5,236 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import kafka.network._
import kafka.utils._
import kafka.metrics.KafkaMetricsGroup
import java.util.concurrent.TimeUnit
import com.yammer.metrics.core.Meter
import org.apache.kafka.common.utils.Utils
/**
* A thread that answers kafka requests.
*/
class KafkaRequestHandler(id: Int,
brokerId: Int,
val aggregateIdleMeter: Meter,
val totalHandlerThreads: Int,
val requestChannel: RequestChannel,
apis: KafkaApis) extends Runnable with Logging {
this.logIdent = "[Kafka Request Handler " + id + " on Broker " + brokerId + "], "
def run() {
while(true) {
try {
var req : RequestChannel.Request = null
while (req == null) {
// We use a single meter for aggregate idle percentage for the thread pool.
// Since meter is calculated as total_recorded_value / time_window and
// time_window is independent of the number of threads, each recorded idle
// time should be discounted by # threads.
val startSelectTime = SystemTime.nanoseconds
req = requestChannel.receiveRequest(300)
val idleTime = SystemTime.nanoseconds - startSelectTime
aggregateIdleMeter.mark(idleTime / totalHandlerThreads)
}
if(req eq RequestChannel.AllDone) {
debug("Kafka request handler %d on broker %d received shut down command".format(
id, brokerId))
return
}
req.requestDequeueTimeMs = SystemTime.milliseconds
trace("Kafka request handler %d on broker %d handling request %s".format(id, brokerId, req))
apis.handle(req)
} catch {
case e: Throwable => error("Exception when handling request", e)
}
}
}
def shutdown(): Unit = requestChannel.sendRequest(RequestChannel.AllDone)
}
class KafkaRequestHandlerPool(val brokerId: Int,
val requestChannel: RequestChannel,
val apis: KafkaApis,
numThreads: Int) extends Logging with KafkaMetricsGroup {
/* a meter to track the average free capacity of the request handlers */
private val aggregateIdleMeter = newMeter("RequestHandlerAvgIdlePercent", "percent", TimeUnit.NANOSECONDS)
this.logIdent = "[Kafka Request Handler on Broker " + brokerId + "], "
val threads = new Array[Thread](numThreads)
val runnables = new Array[KafkaRequestHandler](numThreads)
for(i <- 0 until numThreads) {
runnables(i) = new KafkaRequestHandler(i, brokerId, aggregateIdleMeter, numThreads, requestChannel, apis)
threads(i) = Utils.daemonThread("kafka-request-handler-" + i, runnables(i))
threads(i).start()
}
def shutdown() {
info("shutting down")
for(handler <- runnables)
handler.shutdown
for(thread <- threads)
thread.join
info("shut down completely")
}
}
class BrokerTopicMetrics(name: Option[String]) extends KafkaMetricsGroup {
val tags: scala.collection.Map[String, String] = name match {
case None => scala.collection.Map.empty
case Some(topic) => Map("topic" -> topic)
}
val messagesInRate = newMeter("MessagesInPerSec", "messages", TimeUnit.SECONDS, tags)
val bytesInRate = newMeter("BytesInPerSec", "bytes", TimeUnit.SECONDS, tags)
val bytesOutRate = newMeter("BytesOutPerSec", "bytes", TimeUnit.SECONDS, tags)
val bytesRejectedRate = newMeter("BytesRejectedPerSec", "bytes", TimeUnit.SECONDS, tags)
val failedProduceRequestRate = newMeter("FailedProduceRequestsPerSec", "requests", TimeUnit.SECONDS, tags)
val failedFetchRequestRate = newMeter("FailedFetchRequestsPerSec", "requests", TimeUnit.SECONDS, tags)
val totalProduceRequestRate = newMeter("TotalProduceRequestsPerSec", "requests", TimeUnit.SECONDS, tags)
val totalFetchRequestRate = newMeter("TotalFetchRequestsPerSec", "requests", TimeUnit.SECONDS, tags)
}
object BrokerTopicStats extends Logging {
private val valueFactory = (k: String) => new BrokerTopicMetrics(Some(k))
private val stats = new Pool[String, BrokerTopicMetrics](Some(valueFactory))
private val allTopicsStats = new BrokerTopicMetrics(None)
def getBrokerAllTopicsStats(): BrokerTopicMetrics = allTopicsStats
def getBrokerTopicStats(topic: String): BrokerTopicMetrics = {
stats.getAndMaybePut(topic)
}
}
| eljefe6a/kafka | core/src/main/scala/kafka/server/KafkaRequestHandler.scala | Scala | apache-2.0 | 5,208 |
package com.github.havarunner.exception
class NonStaticInnerClassException(clazz: Class[_]) extends RuntimeException(
s"The class ${clazz.getName} must be static (HavaRunner does not support non-static inner classes)"
)
| havarunner/havarunner | src/main/scala/com/github/havarunner/exception/NonStaticInnerClassException.scala | Scala | mit | 223 |
package nl.lpdiy.pishake.model
case class SystemInfo(hardwareInfo: HardwareInfo, memoryInfo: MemoryInfo, operatingSystemInfo: OperatingSystemInfo, javaEnvironmentInfo: JavaEnvironmentInfo, networkInfo: NetworkInfo, codecInfo: CodecInfo, clockFrequencyInfo: ClockFrequencyInfo)
case class HardwareInfo(serial: String, cpuRevision: String, cpuArchitecture: String, cpuPart: String, cpuTemperature: Float, cpuVoltage: Float, modelName: String, processor: String, revision: String, hardFloatAbi: Boolean, boardType: String)
case class MemoryInfo(total: Long, used: Long, free: Long, shared: Long, buffers: Long, cached: Long, voltageSDRamC: Float, voltageSDRamI: Float, voltageSDRamP: Float)
case class OperatingSystemInfo(name: String, version: String, arch: String, firmwareBuild: String, firmwareDate: String)
case class JavaEnvironmentInfo(vendor: String, vendorUrl: String, version: String, virtualMachine: String, runtime: String)
case class NetworkInfo(hostname: String, ipAddresses: List[String], fqdns: List[String], nameservers: List[String])
case class CodecInfo(h264: Boolean, mpg2: Boolean, wvc1: Boolean)
case class ClockFrequencyInfo(arm: Long, core: Long, h264: Long, isp: Long, v3d: Long, uart: Long, pwm: Long, emmc: Long, pixel: Long, vec: Long, hdmi: Long, dpi: Long)
| dragoslav/pishake | common/src/main/scala/nl/lpdiy/pishake/model/SystemInfo.scala | Scala | apache-2.0 | 1,295 |
package com.github.sekruse.pagerank
import org.qcri.rheem.api.{PlanBuilder, _}
import org.qcri.rheem.api.graph._
import org.qcri.rheem.basic.RheemBasics
import org.qcri.rheem.core.api.{Configuration, RheemContext}
import org.qcri.rheem.core.plugin.Plugin
import org.qcri.rheem.graphchi.GraphChi
import org.qcri.rheem.java.Java
import org.qcri.rheem.spark.Spark
/**
* This is a Rheem implementation of the PageRank algorithm with some preprocessing.
*/
class PageRank(configuration: Configuration, plugins: Plugin*) {
/**
* Executes this instance.
*
* @param inputUrl URL to the first RDF NT file
* @param numIterations number of PageRank iterations to perform
* @return the page ranks
*/
def apply(inputUrl: String, numIterations: Int) = {
// Initialize.
val rheemCtx = new RheemContext(configuration)
plugins.foreach(rheemCtx.register)
implicit val planBuilder = new PlanBuilder(rheemCtx)
.withJobName(s"PageRank ($inputUrl, $numIterations iterations)")
.withUdfJarsOf(this.getClass)
// Read and parse the input file.
val edges = planBuilder
.readTextFile(inputUrl).withName("Load file")
.filter(!_.startsWith("#"), selectivity = 1.0).withName("Filter comments")
.map(PageRank.parseTriple).withName("Parse triples")
.map { case (s, p, o) => (s, o) }.withName("Discard predicate")
// Create vertex IDs.
val vertexIds = edges
.flatMap(edge => Seq(edge._1, edge._2)).withName("Extract vertices")
.distinct.withName("Distinct vertices")
.zipWithId.withName("Add vertex IDs")
// Encode the edges with the vertex IDs
type VertexId = org.qcri.rheem.basic.data.Tuple2[Vertex, String]
val idEdges = edges
.join[VertexId, String](_._1, vertexIds, _.field1).withName("Join source vertex IDs")
.map { linkAndVertexId =>
(linkAndVertexId.field1.field0, linkAndVertexId.field0._2)
}.withName("Set source vertex ID")
.join[VertexId, String](_._2, vertexIds, _.field1).withName("Join target vertex IDs")
.map(linkAndVertexId => new Edge(linkAndVertexId.field0._1, linkAndVertexId.field1.field0)).withName("Set target vertex ID")
// Run the PageRank.
// Note: org.qcri.rheem.api.graph._ must be imported for this to work.
val pageRanks = idEdges.pageRank(numIterations)
// Make the page ranks readable.
pageRanks
.join[VertexId, Long](_.field0, vertexIds, _.field0).withName("Join page ranks with vertex IDs")
.map(joinTuple => (joinTuple.field1.field1, joinTuple.field0.field1)).withName("Make page ranks readable")
.collect()
}
}
/**
* Companion for [[PageRank]].
*/
object PageRank {
def main(args: Array[String]) {
// Parse args.
if (args.isEmpty) {
println("Usage: <main class> <plugin(,plugin)*> <input file> <#iterations>")
sys.exit(1)
}
val plugins = parsePlugins(args(0))
val inputFile = args(1)
val numIterations = args(2).toInt
// Set up our wordcount app.
val configuration = new Configuration
val pageRank = new PageRank(configuration, plugins: _*)
// Run the wordcount.
val pageRanks = pageRank(inputFile, numIterations)
// Print results.
println(s"Found ${pageRanks.size} pageRanks:")
pageRanks.toSeq.sortBy(-_._2)
pageRanks.take(10).foreach(pr => println(f"${pr._1} has a page rank of ${pr._2 % .3f}"))
if (pageRanks.size > 10) println(s"${pageRanks.size - 10} more...")
}
/**
* Parse a comma-separated list of plugins.
*
* @param arg the list
* @return the [[Plugin]]s
*/
def parsePlugins(arg: String) = arg.split(",").map {
case "basic-graph" => RheemBasics.graphPlugin
case "java" => Java.basicPlugin
case "java-conversion" => Java.channelConversionPlugin
case "java-graph" => Java.graphPlugin
case "spark" => Spark.basicPlugin
case "spark-graph" => Spark.graphPlugin
case "graphchi" => GraphChi.plugin
case other: String => sys.error(s"Unknown plugin: $other")
}
/**
* Parse a NT file triple.
*
* @param raw a [[String]] that is expected to conform to the pattern `<subject> <predicate> <object>|<literal> .`
* @return the parsed triple
*/
def parseTriple(raw: String): (String, String, String) = {
// Find the first two spaces: Odds are that these are separate subject, predicated and object.
val firstSpacePos = raw.indexOf(' ')
val secondSpacePos = raw.indexOf(' ', firstSpacePos + 1)
// Find the end position.
var stopPos = raw.lastIndexOf('.')
while (raw.charAt(stopPos - 1) == ' ') stopPos -= 1
(raw.substring(0, firstSpacePos),
raw.substring(firstSpacePos + 1, secondSpacePos),
raw.substring(secondSpacePos + 1, stopPos))
}
}
| sekruse/rheem-examples | pagerank/src/main/scala/com/github/sekruse/pagerank/PageRank.scala | Scala | apache-2.0 | 4,779 |
println ("Calificación de algoritmos.")
println ("Ingresar los datos que se piden por favor.")
println (" ")
println ("Primer parcial: ")
val cal1 = readFloat()
println ("Segundo parcial: ")
val cal2 = readFloat()
println ("Tercer parcial: ")
val cal3 = readFloat()
val promedio = (cal1 + cal2 + cal3)/3
val porpromedio = promedio * 5.5
println (" ")
println ("El porcentaje del promedio es: " +porpromedio+ " %")
println (" ")
println ("Calificación del examen final: ")
val calfin = readFloat()
val porcalfin = calfin * 3
println (" ")
println ("El porcentaje del examen final es: " +porcalfin+ " %")
println (" ")
println ("Calificación del trabajo final: ")
val caltra = readFloat()
val porcaltra = caltra * 1.5
println (" ")
println ("El porcentaje del trabajo final es: " +porcaltra+ " %")
val fin = (porpromedio + porcalfin + porcaltra)
println (" ")
println ("El porcentaje final es: " +fin+ " %")
| CesMijangos04/poo1 | Calificacion.scala | Scala | mit | 941 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.