code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package com.alanjz.microstrike.weapon
trait Gun extends Weapon {
}
| spacenut/microstrike | src/com/alanjz/microstrike/weapon/Gun.scala | Scala | gpl-2.0 | 69 |
import leon.lang._
object LocalScope {
sealed abstract class Color
case class Red() extends Color
case class Black() extends Color
sealed abstract class Tree
case class Empty() extends Tree
case class Node(color: Color, left: Tree, value: Int, right: Tree) extends Tree
sealed abstract class OptionInt
case class Some(v : Int) extends OptionInt
case class None() extends OptionInt
def blackBalanced(t : Tree) : Boolean = t match {
case Node(_, l, v, r) => {
// hide r
val r: Int = 5
//val f: ((Int, Int) => Boolean) = (x: Int, y: Int) => false
Some(5) match {
case Some(newInt) => hole(0)
}
false
}
case Empty() => true
}
}
| ericpony/scala-examples | testcases/graveyard/insynth-synthesis-tests/LocalScope.scala | Scala | mit | 730 |
package org.oedura.scavrodemo.model
import org.apache.avro.Schema
import org.oedura.scavro.{AvroReader, AvroSerializeable, AvroMetadata}
import org.oedura.scavrodemo.idl.{LineItem => JLineItem}
case class LineItem(name: String, price: Double, quantity: Int) extends AvroSerializeable {
type J = JLineItem
override def toAvro: JLineItem = new JLineItem(name, price.toFloat, quantity)
}
object LineItem {
implicit def reader = new AvroReader[LineItem] { override type J = JLineItem }
implicit val metadata: AvroMetadata[LineItem, JLineItem] = new AvroMetadata[LineItem, JLineItem] {
override val avroClass: Class[JLineItem] = classOf[JLineItem]
override val schema: Schema = JLineItem.getClassSchema
override val fromAvro: (JLineItem) => LineItem = (j: JLineItem) => {
LineItem(j.getName.toString, j.getPrice.doubleValue, j.getQuantity)
}
}
}
| oedura/scavro | demo/src/main/scala/org/oedura/scavrodemo/model/LineItem.scala | Scala | apache-2.0 | 879 |
/*
* RaisingVE.scala
* A flat variable elimination algorithm.
*
* Created By: Brian Ruttenberg (bruttenberg@cra.com)
* Creation Date: July 1, 2015
*
* Copyright 2015 Avrom J. Pfeffer and Charles River Analytics, Inc.
* See http://www.cra.com or email figaro@cra.com for information.
*
* See http://www.github.com/p2t2/figaro for a copy of the software license.
*/
package com.cra.figaro.algorithm.structured.algorithm.hybrid
import com.cra.figaro.language._
import com.cra.figaro.algorithm.factored.factors.SumProductSemiring
import com.cra.figaro.algorithm.structured._
import com.cra.figaro.algorithm.structured.strategy._
import com.cra.figaro.algorithm.structured.solver._
import com.cra.figaro.algorithm.structured.strategy.solve.ConstantStrategy
import com.cra.figaro.algorithm.structured.algorithm._
import com.cra.figaro.algorithm.structured.strategy.decompose._
import com.cra.figaro.algorithm.factored.factors.factory._
class RaisingVE(universe: Universe, targets: Element[_]*) extends StructuredProbQueryAlgorithm(universe, targets:_*) {
val semiring = SumProductSemiring()
def run() {
val strategy = DecompositionStrategy.recursiveRaisingStrategy(problem, new ConstantStrategy(marginalVariableElimination), RaisingStrategy.raiseIfGlobal, defaultRangeSizer, Lower, false)
strategy.execute(initialComponents)
val joint = problem.solution.foldLeft(Factory.unit(semiring))(_.product(_))
targets.foreach(t => marginalizeToTarget(t, joint))
}
}
object RaisingVE {
/** Create a structured variable elimination algorithm with the given query targets. */
def apply(targets: Element[_]*) = {
if (targets.isEmpty) throw new IllegalArgumentException("Cannot run VE with no targets")
val universes = targets.map(_.universe).toSet
if (universes.size > 1) throw new IllegalArgumentException("Cannot have targets in different universes")
new RaisingVE(targets(0).universe, targets:_*)
}
/**
* Use VE to compute the probability that the given element satisfies the given predicate.
*/
def probability[T](target: Element[T], predicate: T => Boolean): Double = {
val alg = RaisingVE(target)
alg.start()
val result = alg.probability(target, predicate)
alg.kill()
result
}
/**
* Use VE to compute the probability that the given element has the given value.
*/
def probability[T](target: Element[T], value: T): Double =
probability(target, (t: T) => t == value)
}
| scottcb/figaro | Figaro/src/main/scala/com/cra/figaro/algorithm/structured/algorithm/hybrid/RaisingVE.scala | Scala | bsd-3-clause | 2,480 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark
import org.scalatest.Matchers
import org.scalatest.concurrent.{Signaler, ThreadSignaler, TimeLimits}
import org.scalatest.time.{Millis, Span}
import org.apache.spark.internal.config
import org.apache.spark.internal.config.Tests._
import org.apache.spark.security.EncryptionFunSuite
import org.apache.spark.storage.{RDDBlockId, StorageLevel}
import org.apache.spark.util.io.ChunkedByteBuffer
class NotSerializableClass
class NotSerializableExn(val notSer: NotSerializableClass) extends Throwable() {}
class DistributedSuite extends SparkFunSuite with Matchers with LocalSparkContext
with EncryptionFunSuite with TimeLimits {
// Necessary to make ScalaTest 3.x interrupt a thread on the JVM like ScalaTest 2.2.x
implicit val defaultSignaler: Signaler = ThreadSignaler
val clusterUrl = "local-cluster[2,1,1024]"
test("task throws not serializable exception") {
// Ensures that executors do not crash when an exn is not serializable. If executors crash,
// this test will hang. Correct behavior is that executors don't crash but fail tasks
// and the scheduler throws a SparkException.
// numSlaves must be less than numPartitions
val numSlaves = 3
val numPartitions = 10
sc = new SparkContext("local-cluster[%s,1,1024]".format(numSlaves), "test")
val data = sc.parallelize(1 to 100, numPartitions).
map(x => throw new NotSerializableExn(new NotSerializableClass))
intercept[SparkException] {
data.count()
}
resetSparkContext()
}
test("local-cluster format") {
import SparkMasterRegex._
val masterStrings = Seq(
"local-cluster[2,1,1024]",
"local-cluster[2 , 1 , 1024]",
"local-cluster[2, 1, 1024]",
"local-cluster[ 2, 1, 1024 ]"
)
masterStrings.foreach {
case LOCAL_CLUSTER_REGEX(numSlaves, coresPerSlave, memoryPerSlave) =>
assert(numSlaves.toInt == 2)
assert(coresPerSlave.toInt == 1)
assert(memoryPerSlave.toInt == 1024)
}
}
test("simple groupByKey") {
sc = new SparkContext(clusterUrl, "test")
val pairs = sc.parallelize(Array((1, 1), (1, 2), (1, 3), (2, 1)), 5)
val groups = pairs.groupByKey(5).collect()
assert(groups.size === 2)
val valuesFor1 = groups.find(_._1 == 1).get._2
assert(valuesFor1.toList.sorted === List(1, 2, 3))
val valuesFor2 = groups.find(_._1 == 2).get._2
assert(valuesFor2.toList.sorted === List(1))
}
test("groupByKey where map output sizes exceed maxMbInFlight") {
val conf = new SparkConf().set(config.REDUCER_MAX_SIZE_IN_FLIGHT.key, "1m")
sc = new SparkContext(clusterUrl, "test", conf)
// This data should be around 20 MB, so even with 4 mappers and 2 reducers, each map output
// file should be about 2.5 MB
val pairs = sc.parallelize(1 to 2000, 4).map(x => (x % 16, new Array[Byte](10000)))
val groups = pairs.groupByKey(2).map(x => (x._1, x._2.size)).collect()
assert(groups.length === 16)
assert(groups.map(_._2).sum === 2000)
}
test("accumulators") {
sc = new SparkContext(clusterUrl, "test")
val accum = sc.longAccumulator
sc.parallelize(1 to 10, 10).foreach(x => accum.add(x))
assert(accum.value === 55)
}
test("broadcast variables") {
sc = new SparkContext(clusterUrl, "test")
val array = new Array[Int](100)
val bv = sc.broadcast(array)
array(2) = 3 // Change the array -- this should not be seen on workers
val rdd = sc.parallelize(1 to 10, 10)
val sum = rdd.map(x => bv.value.sum).reduce(_ + _)
assert(sum === 0)
}
test("repeatedly failing task") {
sc = new SparkContext(clusterUrl, "test")
val thrown = intercept[SparkException] {
// scalastyle:off println
sc.parallelize(1 to 10, 10).foreach(x => println(x / 0))
// scalastyle:on println
}
assert(thrown.getClass === classOf[SparkException])
assert(thrown.getMessage.contains("failed 4 times"))
}
test("repeatedly failing task that crashes JVM") {
// Ensures that if a task fails in a way that crashes the JVM, the job eventually fails rather
// than hanging due to retrying the failed task infinitely many times (eventually the
// standalone scheduler will remove the application, causing the job to hang waiting to
// reconnect to the master).
sc = new SparkContext(clusterUrl, "test")
failAfter(Span(100000, Millis)) {
val thrown = intercept[SparkException] {
// One of the tasks always fails.
sc.parallelize(1 to 10, 2).foreach { x => if (x == 1) System.exit(42) }
}
assert(thrown.getClass === classOf[SparkException])
assert(thrown.getMessage.contains("failed 4 times"))
}
}
test("repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)") {
// Ensures that if a task which causes the JVM to exit with a zero exit code will cause the
// Spark job to eventually fail.
sc = new SparkContext(clusterUrl, "test")
failAfter(Span(100000, Millis)) {
val thrown = intercept[SparkException] {
sc.parallelize(1 to 1, 1).foreachPartition { _ => System.exit(0) }
}
assert(thrown.getClass === classOf[SparkException])
assert(thrown.getMessage.contains("failed 4 times"))
}
// Check that the cluster is still usable:
sc.parallelize(1 to 10).count()
}
private def testCaching(testName: String, conf: SparkConf, storageLevel: StorageLevel): Unit = {
test(testName) {
testCaching(conf, storageLevel)
}
if (storageLevel.replication > 1) {
// also try with block replication as a stream
val uploadStreamConf = new SparkConf()
uploadStreamConf.setAll(conf.getAll)
uploadStreamConf.set(config.MAX_REMOTE_BLOCK_SIZE_FETCH_TO_MEM, 1L)
test(s"$testName (with replication as stream)") {
testCaching(uploadStreamConf, storageLevel)
}
}
}
private def testCaching(conf: SparkConf, storageLevel: StorageLevel): Unit = {
sc = new SparkContext(conf.setMaster(clusterUrl).setAppName("test"))
TestUtils.waitUntilExecutorsUp(sc, 2, 30000)
val data = sc.parallelize(1 to 1000, 10)
val cachedData = data.persist(storageLevel)
assert(cachedData.count === 1000)
assert(sc.getRDDStorageInfo.filter(_.id == cachedData.id).map(_.numCachedPartitions).sum ===
data.getNumPartitions)
// Get all the locations of the first partition and try to fetch the partitions
// from those locations.
val blockIds = data.partitions.indices.map(index => RDDBlockId(data.id, index)).toArray
val blockId = blockIds(0)
val blockManager = SparkEnv.get.blockManager
val blockTransfer = blockManager.blockTransferService
val serializerManager = SparkEnv.get.serializerManager
val locations = blockManager.master.getLocations(blockId)
assert(locations.size === storageLevel.replication,
s"; got ${locations.size} replicas instead of ${storageLevel.replication}")
locations.foreach { cmId =>
val bytes = blockTransfer.fetchBlockSync(cmId.host, cmId.port, cmId.executorId,
blockId.toString, null)
val deserialized = serializerManager.dataDeserializeStream(blockId,
new ChunkedByteBuffer(bytes.nioByteBuffer()).toInputStream())(data.elementClassTag).toList
assert(deserialized === (1 to 100).toList)
}
// This will exercise the getRemoteValues code path:
assert(blockIds.flatMap(id => blockManager.get[Int](id).get.data).toSet === (1 to 1000).toSet)
}
Seq(
"caching" -> StorageLevel.MEMORY_ONLY,
"caching on disk" -> StorageLevel.DISK_ONLY,
"caching in memory, replicated" -> StorageLevel.MEMORY_ONLY_2,
"caching in memory, serialized, replicated" -> StorageLevel.MEMORY_ONLY_SER_2,
"caching on disk, replicated" -> StorageLevel.DISK_ONLY_2,
"caching in memory and disk, replicated" -> StorageLevel.MEMORY_AND_DISK_2,
"caching in memory and disk, serialized, replicated" -> StorageLevel.MEMORY_AND_DISK_SER_2
).foreach { case (testName, storageLevel) =>
encryptionTestHelper(testName) { case (name, conf) =>
testCaching(name, conf, storageLevel)
}
}
test("compute without caching when no partitions fit in memory") {
val size = 10000
val conf = new SparkConf()
.set(config.STORAGE_UNROLL_MEMORY_THRESHOLD, 1024L)
.set(TEST_MEMORY, size.toLong / 2)
sc = new SparkContext(clusterUrl, "test", conf)
val data = sc.parallelize(1 to size, 2).persist(StorageLevel.MEMORY_ONLY)
assert(data.count() === size)
assert(data.count() === size)
assert(data.count() === size)
// ensure only a subset of partitions were cached
val rddBlocks = sc.env.blockManager.master.getMatchingBlockIds(_.isRDD, askSlaves = true)
assert(rddBlocks.size === 0, s"expected no RDD blocks, found ${rddBlocks.size}")
}
test("compute when only some partitions fit in memory") {
val size = 10000
val numPartitions = 20
val conf = new SparkConf()
.set(config.STORAGE_UNROLL_MEMORY_THRESHOLD, 1024L)
.set(TEST_MEMORY, size.toLong)
sc = new SparkContext(clusterUrl, "test", conf)
val data = sc.parallelize(1 to size, numPartitions).persist(StorageLevel.MEMORY_ONLY)
assert(data.count() === size)
assert(data.count() === size)
assert(data.count() === size)
// ensure only a subset of partitions were cached
val rddBlocks = sc.env.blockManager.master.getMatchingBlockIds(_.isRDD, askSlaves = true)
assert(rddBlocks.size > 0, "no RDD blocks found")
assert(rddBlocks.size < numPartitions, s"too many RDD blocks found, expected <$numPartitions")
}
test("passing environment variables to cluster") {
sc = new SparkContext(clusterUrl, "test", null, Nil, Map("TEST_VAR" -> "TEST_VALUE"))
val values = sc.parallelize(1 to 2, 2).map(x => System.getenv("TEST_VAR")).collect()
assert(values.toSeq === Seq("TEST_VALUE", "TEST_VALUE"))
}
test("recover from node failures") {
import DistributedSuite.{markNodeIfIdentity, failOnMarkedIdentity}
DistributedSuite.amMaster = true
sc = new SparkContext(clusterUrl, "test")
val data = sc.parallelize(Seq(true, true), 2)
assert(data.count === 2) // force executors to start
assert(data.map(markNodeIfIdentity).collect.size === 2)
assert(data.map(failOnMarkedIdentity).collect.size === 2)
}
test("recover from repeated node failures during shuffle-map") {
import DistributedSuite.{markNodeIfIdentity, failOnMarkedIdentity}
DistributedSuite.amMaster = true
sc = new SparkContext(clusterUrl, "test")
for (i <- 1 to 3) {
val data = sc.parallelize(Seq(true, false), 2)
assert(data.count === 2)
assert(data.map(markNodeIfIdentity).collect.size === 2)
assert(data.map(failOnMarkedIdentity).map(x => x -> x).groupByKey.count === 2)
}
}
test("recover from repeated node failures during shuffle-reduce") {
import DistributedSuite.{markNodeIfIdentity, failOnMarkedIdentity}
DistributedSuite.amMaster = true
sc = new SparkContext(clusterUrl, "test")
for (i <- 1 to 3) {
val data = sc.parallelize(Seq(true, true), 2)
assert(data.count === 2)
assert(data.map(markNodeIfIdentity).collect.size === 2)
// This relies on mergeCombiners being used to perform the actual reduce for this
// test to actually be testing what it claims.
val grouped = data.map(x => x -> x).combineByKey(
x => x,
(x: Boolean, y: Boolean) => x,
(x: Boolean, y: Boolean) => failOnMarkedIdentity(x)
)
assert(grouped.collect.size === 1)
}
}
test("recover from node failures with replication") {
import DistributedSuite.{markNodeIfIdentity, failOnMarkedIdentity}
DistributedSuite.amMaster = true
// Using more than two nodes so we don't have a symmetric communication pattern and might
// cache a partially correct list of peers.
sc = new SparkContext("local-cluster[3,1,1024]", "test")
for (i <- 1 to 3) {
val data = sc.parallelize(Seq(true, false, false, false), 4)
data.persist(StorageLevel.MEMORY_ONLY_2)
assert(data.count === 4)
assert(data.map(markNodeIfIdentity).collect.size === 4)
assert(data.map(failOnMarkedIdentity).collect.size === 4)
// Create a new replicated RDD to make sure that cached peer information doesn't cause
// problems.
val data2 = sc.parallelize(Seq(true, true), 2).persist(StorageLevel.MEMORY_ONLY_2)
assert(data2.count === 2)
}
}
test("unpersist RDDs") {
DistributedSuite.amMaster = true
sc = new SparkContext("local-cluster[3,1,1024]", "test")
val data = sc.parallelize(Seq(true, false, false, false), 4)
data.persist(StorageLevel.MEMORY_ONLY_2)
data.count
assert(sc.persistentRdds.isEmpty === false)
data.unpersist()
assert(sc.persistentRdds.isEmpty === true)
failAfter(Span(3000, Millis)) {
try {
while (! sc.getRDDStorageInfo.isEmpty) {
Thread.sleep(200)
}
} catch {
case _: Throwable => Thread.sleep(10)
// Do nothing. We might see exceptions because block manager
// is racing this thread to remove entries from the driver.
}
}
}
}
object DistributedSuite {
// Indicates whether this JVM is marked for failure.
var mark = false
// Set by test to remember if we are in the driver program so we can assert
// that we are not.
var amMaster = false
// Act like an identity function, but if the argument is true, set mark to true.
def markNodeIfIdentity(item: Boolean): Boolean = {
if (item) {
assert(!amMaster)
mark = true
}
item
}
// Act like an identity function, but if mark was set to true previously, fail,
// crashing the entire JVM.
def failOnMarkedIdentity(item: Boolean): Boolean = {
if (mark) {
System.exit(42)
}
item
}
}
| hhbyyh/spark | core/src/test/scala/org/apache/spark/DistributedSuite.scala | Scala | apache-2.0 | 14,754 |
/*
* Copyright (c) 2011 Sgine
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* Neither the name of 'Sgine' nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.sgine.input
import event.{MouseReleaseEvent, MousePressEvent}
import org.powerscala.event.Listenable
import org.powerscala.{Enumerated, EnumEntry}
import org.powerscala.property.{PropertyParent, Property}
/**
* Mouse represents the singleton of the mouse object.
*
* @author Matt Hicks <mhicks@sgine.org>
*/
sealed class Mouse extends EnumEntry[Mouse]()(Mouse)
object Mouse extends Listenable with Enumerated[Mouse] with PropertyParent {
val parent: PropertyParent = null
// Update the mouse button state
listeners.synchronous {
case evt: MousePressEvent => evt.button._down = true
case evt: MouseReleaseEvent => evt.button._down = false
}
val x = Property[Int]("x", 0)
val y = Property[Int]("y", 0)
val Move = new Mouse
val Press = new Mouse
val Release = new Mouse
val Drag = new Mouse
val Over = new Mouse
val Out = new Mouse
val Click = new Mouse
val Wheel = new Mouse
} | Axiometry/sgine | input/src/main/scala/org/sgine/input/Mouse.scala | Scala | bsd-3-clause | 2,553 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.k8s.integrationtest
import java.nio.file.{Path, Paths}
import java.util.UUID
import scala.collection.JavaConverters._
import scala.collection.mutable
import io.fabric8.kubernetes.api.model.NamespaceBuilder
import io.fabric8.kubernetes.client.DefaultKubernetesClient
import org.scalatest.concurrent.Eventually
import org.apache.spark.SparkConf
import org.apache.spark.deploy.k8s.integrationtest.TestConstants._
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config.JARS
import org.apache.spark.internal.config.Tests.IS_TESTING
import org.apache.spark.internal.config.UI.UI_ENABLED
private[spark] class KubernetesTestComponents(defaultClient: DefaultKubernetesClient) {
val namespaceOption = Option(System.getProperty(CONFIG_KEY_KUBE_NAMESPACE))
val hasUserSpecifiedNamespace = namespaceOption.isDefined
val namespace = namespaceOption.getOrElse(UUID.randomUUID().toString.replaceAll("-", ""))
val serviceAccountName =
Option(System.getProperty(CONFIG_KEY_KUBE_SVC_ACCOUNT))
.getOrElse("default")
val kubernetesClient = defaultClient.inNamespace(namespace)
val clientConfig = kubernetesClient.getConfiguration
def createNamespace(): Unit = {
defaultClient.namespaces.create(new NamespaceBuilder()
.withNewMetadata()
.withName(namespace)
.endMetadata()
.build())
}
def deleteNamespace(): Unit = {
defaultClient.namespaces.withName(namespace).delete()
Eventually.eventually(KubernetesSuite.TIMEOUT, KubernetesSuite.INTERVAL) {
val namespaceList = defaultClient
.namespaces()
.list()
.getItems
.asScala
require(!namespaceList.exists(_.getMetadata.getName == namespace))
}
}
def newSparkAppConf(): SparkAppConf = {
new SparkAppConf()
.set("spark.master", s"k8s://${kubernetesClient.getMasterUrl}")
.set("spark.kubernetes.namespace", namespace)
.set("spark.executor.cores", "1")
.set("spark.executor.instances", "1")
.set("spark.app.name", "spark-test-app")
.set(IS_TESTING.key, "false")
.set(UI_ENABLED.key, "true")
.set("spark.kubernetes.submission.waitAppCompletion", "false")
.set("spark.kubernetes.authenticate.driver.serviceAccountName", serviceAccountName)
}
}
private[spark] class SparkAppConf {
private val map = mutable.Map[String, String]()
def set(key: String, value: String): SparkAppConf = {
map.put(key, value)
this
}
def get(key: String): String = map.getOrElse(key, "")
def setJars(jars: Seq[String]): Unit = set(JARS.key, jars.mkString(","))
override def toString: String = map.toString
def toStringArray: Iterable[String] = map.toList.flatMap(t => List("--conf", s"${t._1}=${t._2}"))
def toSparkConf: SparkConf = new SparkConf().setAll(map)
}
private[spark] case class SparkAppArguments(
mainAppResource: String,
mainClass: String,
appArgs: Array[String])
private[spark] object SparkAppLauncher extends Logging {
def launch(
appArguments: SparkAppArguments,
appConf: SparkAppConf,
timeoutSecs: Int,
sparkHomeDir: Path,
isJVM: Boolean,
pyFiles: Option[String] = None,
env: Map[String, String] = Map.empty[String, String]): Unit = {
val sparkSubmitExecutable = sparkHomeDir.resolve(Paths.get("bin", "spark-submit"))
logInfo(s"Launching a spark app with arguments $appArguments and conf $appConf")
val preCommandLine = if (isJVM) {
mutable.ArrayBuffer(sparkSubmitExecutable.toFile.getAbsolutePath,
"--deploy-mode", "cluster",
"--class", appArguments.mainClass,
"--master", appConf.get("spark.master"))
} else {
mutable.ArrayBuffer(sparkSubmitExecutable.toFile.getAbsolutePath,
"--deploy-mode", "cluster",
"--master", appConf.get("spark.master"))
}
val commandLine =
pyFiles.map(s => preCommandLine ++ Array("--py-files", s)).getOrElse(preCommandLine) ++
appConf.toStringArray :+ appArguments.mainAppResource
if (appArguments.appArgs.nonEmpty) {
commandLine ++= appArguments.appArgs
}
logInfo(s"Launching a spark app with command line: ${commandLine.mkString(" ")}")
ProcessUtils.executeProcess(commandLine.toArray, timeoutSecs, env = env)
}
}
| shaneknapp/spark | resource-managers/kubernetes/integration-tests/src/test/scala/org/apache/spark/deploy/k8s/integrationtest/KubernetesTestComponents.scala | Scala | apache-2.0 | 5,096 |
package しきしま
class 抽象構文
// Expression
abstract class 式
// Constant
case class 定数(値: Any) extends 式
// Identifier
case class 識別子(名: String) extends 式
// Assignment
case class 代入(対象: 式, 識別子: 識別子, 式: 式) extends 式
// Arguments
case class 仮引数(識別子列: 識別子*) extends 式
// Function
case class 関数(仮引数: 仮引数, 本体: 式) extends 式
// Parameters
case class 引数(式列: 式*)
// Method
case class 操作(対象: 式, 識別子: 識別子, 引数: 引数) extends 式
// Conditional branch
case class 分岐節(条件: 式, 式: 式) extends 式
// Conditional
case class 分岐(分既節列: 分岐節*) extends 式
// Loop
case class 繰返(条件: 式, 本体: 式) extends 式
// Compound
case class 複合式(式列: 式*) extends 式
| ychubachi/shikishima | src/main/scala/しきしま/抽象構文.scala | Scala | mit | 845 |
package au.com.agiledigital.toolform.plugin
import au.com.agiledigital.toolform.app.ToolFormError
import cats.data.NonEmptyList
import com.monovore.decline._
/**
* Extension point to add new commands to toolform.
* Uses Java SPI - See [[java.util.ServiceLoader]] for details.
* Implement the trait and register the new implementation in
* META-INF/services/au.com.agiledigital.toolform.plugin.ToolFormCommandPlugin
* on the runtime classpath.
*/
trait ToolFormCommandPlugin {
def command: Opts[Either[NonEmptyList[ToolFormError], String]]
}
| agiledigital/toolform | src/main/scala/au/com/agiledigital/toolform/plugin/ToolFormCommandPlugin.scala | Scala | apache-2.0 | 557 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.consumer
import kafka.common.{OffsetAndMetadata, TopicAndPartition}
import kafka.javaapi.consumer.ConsumerRebalanceListener
import scala.collection._
import kafka.utils.Logging
import kafka.serializer._
/**
* Main interface for consumer
*/
trait ConsumerConnector {
/**
* Create a list of MessageStreams for each topic.
*
* @param topicCountMap a map of (topic, #streams) pair
* @return a map of (topic, list of KafkaStream) pairs.
* The number of items in the list is #streams. Each stream supports
* an iterator over message/metadata pairs.
*/
def createMessageStreams(topicCountMap: Map[String,Int]): Map[String, List[KafkaStream[Array[Byte],Array[Byte]]]]
/**
* Create a list of MessageStreams for each topic.
*
* @param topicCountMap a map of (topic, #streams) pair
* @param keyDecoder Decoder to decode the key portion of the message
* @param valueDecoder Decoder to decode the value portion of the message
* @return a map of (topic, list of KafkaStream) pairs.
* The number of items in the list is #streams. Each stream supports
* an iterator over message/metadata pairs.
*/
def createMessageStreams[K,V](topicCountMap: Map[String,Int],
keyDecoder: Decoder[K],
valueDecoder: Decoder[V])
: Map[String,List[KafkaStream[K,V]]]
/**
* Create a list of message streams for all topics that match a given filter.
*
* @param topicFilter Either a Whitelist or Blacklist TopicFilter object.
* @param numStreams Number of streams to return
* @param keyDecoder Decoder to decode the key portion of the message
* @param valueDecoder Decoder to decode the value portion of the message
* @return a list of KafkaStream each of which provides an
* iterator over message/metadata pairs over allowed topics.
*/
def createMessageStreamsByFilter[K,V](topicFilter: TopicFilter,
numStreams: Int = 1,
keyDecoder: Decoder[K] = new DefaultDecoder(),
valueDecoder: Decoder[V] = new DefaultDecoder())
: Seq[KafkaStream[K,V]]
/**
* Commit the offsets of all broker partitions connected by this connector.
*/
def commitOffsets(retryOnFailure: Boolean)
/**
* KAFKA-1743: This method added for backward compatibility.
*/
def commitOffsets
/**
* Commit offsets from an external offsets map.
* @param offsetsToCommit the offsets to be committed.
*/
def commitOffsets(offsetsToCommit: immutable.Map[TopicAndPartition, OffsetAndMetadata], retryOnFailure: Boolean)
/**
* Wire in a consumer rebalance listener to be executed when consumer rebalance occurs.
* @param listener The consumer rebalance listener to wire in
*/
def setConsumerRebalanceListener(listener: ConsumerRebalanceListener)
/**
* Shut down the connector
*/
def shutdown()
}
object Consumer extends Logging {
/**
* Create a ConsumerConnector
*
* @param config at the minimum, need to specify the groupid of the consumer and the zookeeper
* connection string zookeeper.connect.
*/
def create(config: ConsumerConfig): ConsumerConnector = {
val consumerConnect = new ZookeeperConsumerConnector(config)
consumerConnect
}
/**
* Create a ConsumerConnector
*
* @param config at the minimum, need to specify the groupid of the consumer and the zookeeper
* connection string zookeeper.connect.
*/
def createJavaConsumerConnector(config: ConsumerConfig): kafka.javaapi.consumer.ConsumerConnector = {
val consumerConnect = new kafka.javaapi.consumer.ZookeeperConsumerConnector(config)
consumerConnect
}
}
| flange/drift-dev | kafka/00-kafka_2.11-0.10.1.0/libs/tmp/kafka/consumer/ConsumerConnector.scala | Scala | apache-2.0 | 4,668 |
/* Example based on discussion with Pierre Quinton.
This fails, as it should. See ../valid/LawsExample.scala for working version. */
import stainless.lang._
import stainless.annotation._
object LawsExampleInvalid {
abstract class A[T] {
def a: T
def f(x: T, y: T): T
@law
def uniqueRight(x: T, y: T, z: T): Boolean = {
f(x,y) != f(x,z) || y == z
}
def something(x: T, y: T): Boolean = {
require(f(x,y) == f(x,a))
uniqueRight(x,y,a)
y == this.a
}.holds
def somethingelse(y: T): Unit = {
require(f(a,a) == f(a,y))
assert(something(a,y))
}.ensuring(y == a)
}
}
| epfl-lara/stainless | frontends/benchmarks/verification/invalid/LawsExampleInvalid.scala | Scala | apache-2.0 | 643 |
package com.avsystem.commons
package spring
import java.lang.reflect.{Constructor, Method, Modifier}
import org.springframework.beans.factory.config.ConstructorArgumentValues.ValueHolder
import org.springframework.beans.factory.config.{BeanDefinition, BeanDefinitionHolder, ConfigurableListableBeanFactory}
import org.springframework.beans.factory.support._
import org.springframework.core.{ParameterNameDiscoverer, StandardReflectionParameterNameDiscoverer}
import scala.beans.BeanProperty
import scala.reflect.{ScalaLongSignature, ScalaSignature}
class ScalaDefaultValuesInjector extends BeanDefinitionRegistryPostProcessor {
@BeanProperty var paramNameDiscoverer: ParameterNameDiscoverer =
new StandardReflectionParameterNameDiscoverer
def classLoader: ClassLoader =
Thread.currentThread.getContextClassLoader.opt getOrElse getClass.getClassLoader
def loadClass(name: String): Class[_] = Class.forName(name, false, classLoader)
def postProcessBeanDefinitionRegistry(registry: BeanDefinitionRegistry): Unit = {
def traverse(value: Any): Unit = value match {
case bd: BeanDefinition =>
bd.getConstructorArgumentValues.getGenericArgumentValues.asScala.foreach(traverse)
bd.getConstructorArgumentValues.getIndexedArgumentValues.values.asScala.foreach(traverse)
bd.getPropertyValues.getPropertyValueList.asScala.foreach(pv => traverse(pv.getValue))
injectDefaultValues(bd)
case bdw: BeanDefinitionHolder =>
traverse(bdw.getBeanDefinition)
case vh: ValueHolder =>
traverse(vh.getValue)
case ml: ManagedList[_] =>
ml.asScala.foreach(traverse)
case ms: ManagedSet[_] =>
ms.asScala.foreach(traverse)
case mm: ManagedMap[_, _] =>
mm.asScala.foreach {
case (k, v) =>
traverse(k)
traverse(v)
}
case _ =>
}
registry.getBeanDefinitionNames
.foreach(n => traverse(registry.getBeanDefinition(n)))
}
private def isScalaClass(cls: Class[_]): Boolean = cls.getEnclosingClass match {
case null => cls.getAnnotation(classOf[ScalaSignature]) != null ||
cls.getAnnotation(classOf[ScalaLongSignature]) != null
case encls => isScalaClass(encls)
}
private def injectDefaultValues(bd: BeanDefinition): Unit =
bd.getBeanClassName.opt.map(loadClass)
.recoverToOpt[ClassNotFoundException].flatten.filter(isScalaClass)
.foreach { clazz =>
val usingConstructor = bd.getFactoryMethodName == null
val factoryExecs =
if (usingConstructor) clazz.getConstructors.toVector
else clazz.getMethods.iterator.filter(_.getName == bd.getFactoryMethodName).toVector
val factorySymbolName =
if (usingConstructor) "$lessinit$greater" else bd.getFactoryMethodName
if (factoryExecs.size == 1) {
val constrVals = bd.getConstructorArgumentValues
val factoryExec = factoryExecs.head
val paramNames = factoryExec match {
case c: Constructor[_] => paramNameDiscoverer.getParameterNames(c)
case m: Method => paramNameDiscoverer.getParameterNames(m)
}
(0 until factoryExec.getParameterCount).foreach { i =>
def defaultValueMethod = clazz.getMethod(s"$factorySymbolName$$default$$${i + 1}")
.recoverToOpt[NoSuchMethodException].filter(m => Modifier.isStatic(m.getModifiers))
def specifiedNamed = paramNames != null &&
constrVals.getGenericArgumentValues.asScala.exists(_.getName == paramNames(i))
def specifiedIndexed =
constrVals.getIndexedArgumentValues.get(i) != null
if (!specifiedNamed && !specifiedIndexed) {
defaultValueMethod.foreach { dvm =>
constrVals.addIndexedArgumentValue(i, dvm.invoke(null))
}
}
}
}
}
def postProcessBeanFactory(beanFactory: ConfigurableListableBeanFactory): Unit = ()
}
| AVSystem/scala-commons | commons-spring/src/main/scala/com/avsystem/commons/spring/ScalaDefaultValuesInjector.scala | Scala | mit | 3,996 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.File
import java.nio.charset.Charset
import com.google.common.io.Files
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.{Time, Seconds, StreamingContext}
import org.apache.spark.storage.StorageLevel
//import org.apache.spark.util.IntParam
import org.apache.spark.Logging
import org.apache.log4j.{Level, Logger}
/**
* Counts words in text encoded with UTF8 received from the network every second.
*
* Usage: WordCountStream <hostname> <port> <checkpoint-directory> <output-file>
* <hostname> and <port> describe the TCP server that Spark Streaming would connect to receive
* data. <checkpoint-directory> directory to HDFS-compatible file system which checkpoint data
* <output-file> file to which the word counts will be appended
*
* <checkpoint-directory> and <output-file> must be absolute paths
*
* To run this on your local machine, you need to first run a Netcat server
*
* `$ nc -lk 9999`
*
* and run the example as
*
* `$ ./bin/run-example org.apache.spark.examples.streaming.WordCountStream \
* localhost 9999 ~/checkpoint/ ~/out`
*
* If the directory ~/checkpoint/ does not exist (e.g. running for the first time), it will create
* a new StreamingContext (will print "Creating new context" to the console). Otherwise, if
* checkpoint data exists in ~/checkpoint/, then it will create StreamingContext from
* the checkpoint data.
*
* Refer to the online documentation for more details.
*/
object WordCountStream {
def createContext(ip: String, port: Int, outputPath: String, checkpointDirectory: String, interval: Int)
: StreamingContext = {
// If you do not see this printed, that means the StreamingContext has been loaded
// from the new checkpoint
val outputFile = new File(outputPath)
if (outputFile.exists()) outputFile.delete()
println("Creating new context")
val sparkConf = new SparkConf().setAppName("WordCountStream")
// Create the context with a 1 second batch size
val ssc = new StreamingContext(sparkConf, Seconds(interval))
ssc.checkpoint(checkpointDirectory)
// Create a socket stream on target ip:port and count the
// words in input stream of \n delimited text (eg. generated by 'nc')
val lines = ssc.socketTextStream(ip, port, StorageLevel.MEMORY_AND_DISK_SER)
val words = lines.flatMap(_.split(" "))
val wordCounts = words.map(x => (x, 1)).reduceByKey(_ + _)
wordCounts.foreachRDD((rdd: RDD[(String, Int)], time: Time) => {
val counts = "Counts at time " + time + " " + rdd.collect().mkString("[", ", ", "]")
//println(counts)
//println("Appending to " + outputFile.getAbsolutePath)
//Files.append(counts + "\n", outputFile, Charset.defaultCharset())
})
ssc
}
def main(args: Array[String]) {
if (args.length != 5) {
System.err.println("You arguments were " + args.mkString("[", ", ", "]"))
System.err.println(
"""
|Usage: WordCountStream <hostname> <port> <checkpoint-directory> <interval>
| <output-file>. <hostname> and <port> describe the TCP server that Spark
| Streaming would connect to receive data. <checkpoint-directory> directory to
| HDFS-compatible file system which checkpoint data <output-file> file to which the
| word counts will be appended
|
|In local mode, <master> should be 'local[n]' with n > 1
|Both <checkpoint-directory> and <output-file> must be absolute paths
""".stripMargin
)
System.exit(1)
}
StreamingExamples.setStreamingLogLevels()
val Array(ip, IntParam(port), checkpointDirectory, outputPath, IntParam(interval)) = args
val ssc = StreamingContext.getOrCreate(checkpointDirectory,
() => {
createContext(ip, port, outputPath, checkpointDirectory, interval)
})
ssc.start()
ssc.awaitTermination()
}
}
object StreamingExamples extends Logging {
/** Set reasonable logging levels for streaming if the user has not configured log4j. */
def setStreamingLogLevels() {
val log4jInitialized = Logger.getRootLogger.getAllAppenders.hasMoreElements
if (!log4jInitialized) {
// We first log something to initialize Spark's default logging, then we override the
// logging level.
logInfo("Setting log level to [WARN] for streaming example." +
" To override add a custom log4j.properties to the classpath.")
Logger.getRootLogger.setLevel(Level.WARN)
}
}
}
object IntParam {
def unapply(str: String): Option[Int] = {
try {
Some(str.toInt)
} catch {
case e: NumberFormatException => None
}
}
}
| huangqundl/MySparkApps | old/word_count_stream_recovery/src/main/scala/WordCountStream.scala | Scala | gpl-2.0 | 5,536 |
package scala.collection.scalameter.mutable.HashBag
import org.scalameter.api._
object HashBag_distinct extends HashBagBenchmark {
def sizes = Gen.range("size")(5000, 50000, 5000)
def funName: String = "distinct"
def fun(bag: Bag[BigInt]): Unit = bag.distinct
def listFun(list: List[BigInt]): Unit = list.distinct
runBenchmark()
}
| nicolasstucki/multisets | src/test/scala/scala/collection/scalameter/mutable/HashBag/HashBag_distinct.scala | Scala | bsd-3-clause | 349 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.keras
import com.intel.analytics.bigdl.nn.PoissonCriterion
class PoissonCriterionSpec extends KerasBaseSpec {
"PoissonCriterion" should "be ok" in {
val kerasCode =
"""
|input_tensor = Input(shape=[3])
|target_tensor = Input(shape=[3])
|loss = poisson(target_tensor, input_tensor)
|input = np.random.uniform(0, 1, [2, 3])
|Y = np.random.uniform(0, 1, [2, 3])
""".stripMargin
val kld = new PoissonCriterion[Float]()
checkOutputAndGradForLoss(kld, kerasCode)
}
}
| yiheng/BigDL | spark/dl/src/test/scala/com/intel/analytics/bigdl/keras/PoissonCriterionSpec.scala | Scala | apache-2.0 | 1,153 |
package temportalist.esotericraft.galvanization.common.task.ai.world
import net.minecraft.block.Block
import net.minecraft.block.state.IBlockState
import net.minecraft.client.Minecraft
import net.minecraft.entity.EntityCreature
import net.minecraft.entity.player.{EntityPlayer, EntityPlayerMP}
import net.minecraft.init.Blocks
import net.minecraft.item.ItemSword
import net.minecraft.network.play.client.CPacketPlayerDigging
import net.minecraft.network.play.server.SPacketBlockChange
import net.minecraft.util.math.BlockPos
import net.minecraft.util.{EnumFacing, EnumHand}
import net.minecraft.world.WorldSettings.GameType
import net.minecraft.world.{World, WorldServer}
import net.minecraftforge.common.util.FakePlayer
import net.minecraftforge.common.{ForgeHooks, MinecraftForge}
import net.minecraftforge.event.world.BlockEvent
import net.minecraftforge.fml.common.eventhandler.SubscribeEvent
import net.minecraftforge.fml.common.gameevent.TickEvent.WorldTickEvent
import net.minecraftforge.fml.relauncher.{Side, SideOnly}
import temportalist.esotericraft.api.galvanize.ai.GalvanizeTask
import temportalist.esotericraft.api.init.Details
import temportalist.esotericraft.galvanization.common.task.ai.interfaces.IFakePlayer
import scala.collection.mutable
import scala.collection.mutable.ListBuffer
/**
*
* Created by TheTemportalist on 5/26/2016.
*
* @author TheTemportalist
*/
@GalvanizeTask(modid = Details.MOD_ID,
name = "harvestTree",
displayName = "Harvest (Tree)"
)
class TaskHarvestTree(
pos: BlockPos, face: EnumFacing
) extends TaskHarvest(pos, face) with IFakePlayer {
override def isBlockValid(world: World, pos: BlockPos, state: IBlockState): Boolean = {
state.getBlock.isWood(world, pos)
}
override def harvestState(world: World, pos: BlockPos, state: IBlockState,
entity: EntityCreature): Unit = {
world match {
case worldServer: WorldServer =>
val fakePlayer = this.getFakePlayer(worldServer)
entity.swingArm(EnumHand.MAIN_HAND)
this.harvestTree(world, pos, fakePlayer)
case _ =>
}
}
def harvestTree(world: World, pos: BlockPos, fakePlayer: FakePlayer): Unit = {
new TaskHarvestTree.RunMe(world, pos, fakePlayer, 1)
}
}
object TaskHarvestTree {
/**
* https://github.com/SlimeKnights/TinkersConstruct/blob/f6dd1ea51486cfd1ae5b39ca8021e93cfa1413bb/src/main/java/slimeknights/tconstruct/tools/item/LumberAxe.java#L193
*/
class RunMe(
private val world: World,
private val start: BlockPos,
private val player: EntityPlayer,
private val blocksPerTick: Int
) {
val blocksToBreak = mutable.Queue[BlockPos]()
val blocksBroken = ListBuffer[BlockPos]()
this.blocksToBreak += this.start
MinecraftForge.EVENT_BUS.register(this)
def finish(): Unit = {
MinecraftForge.EVENT_BUS.unregister(this)
}
@SubscribeEvent
def onWorldTick(event: WorldTickEvent): Unit = {
if (event.side.isClient) {
this.finish()
return
}
var pos: BlockPos = null
var state: IBlockState = null
var blocksRemainingInTick = this.blocksPerTick
while (blocksRemainingInTick > 0) {
if (this.blocksToBreak.isEmpty) {
this.finish()
return
}
pos = this.blocksToBreak.dequeue()
state = this.world.getBlockState(pos)
if (state.getBlock.isWood(this.world, pos)) {
// Get the neighbors
for (face <- EnumFacing.HORIZONTALS) {
val posNeighbor = pos.offset(face)
if (!this.blocksToBreak.contains(posNeighbor))
this.blocksToBreak += posNeighbor
}
// Support for Acacia Trees (diagonal wood)
for {
x <- 0 until 3
z <- 0 until 3
} {
val posNeighbor = pos.add(-1 + x, 1, -1 + z)
if (!this.blocksToBreak.contains(posNeighbor))
this.blocksToBreak += posNeighbor
}
breakBlock(this.world, pos, this.player, stateIn = state)
blocksRemainingInTick -= 1
}
}
}
}
def breakBlock(world: World, pos: BlockPos, player: EntityPlayer, stateIn: IBlockState = null): Unit = {
val state = if (stateIn == null) world.getBlockState(pos) else stateIn
val block = state.getBlock
if (!ForgeHooks.canHarvestBlock(block, player, world, pos))
return
if (player.capabilities.isCreativeMode) {
block.onBlockHarvested(world, pos, state, player)
if (block.removedByPlayer(state, world, pos, player, false))
block.onBlockDestroyedByPlayer(world, pos, state)
if (!world.isRemote)
player.asInstanceOf[EntityPlayerMP].connection.sendPacket(
new SPacketBlockChange(world, pos))
return
}
if (!world.isRemote) {
player match {
case playerMP: EntityPlayerMP =>
val xp = onBlockBreakEvent(world,
playerMP.interactionManager.getGameType, playerMP, pos)
if (xp == -1) return
block.onBlockHarvested(world, pos, state, player)
if (block.removedByPlayer(state, world, pos, player, true)) {
block.onBlockDestroyedByPlayer(world, pos, state)
block.harvestBlock(world, player, pos, state,
world.getTileEntity(pos), null)
block.dropXpOnBlockBreak(world, pos, xp)
}
if (playerMP.connection != null)
playerMP.connection.sendPacket(new SPacketBlockChange(world, pos))
case _ =>
}
}
else {
world.playEvent(2001, pos, Block.getStateId(state))
if (block.removedByPlayer(state, world, pos, player, true)) {
block.onBlockDestroyedByPlayer(world, pos, state)
}
sendUpdateDiggingPacket(pos)
}
}
def onBlockBreakEvent(world: World, gameType: GameType, playerMP: EntityPlayerMP,
pos: BlockPos): Int = {
//ForgeHooks.onBlockBreakEvent(world, gameType, playerMP, pos)
var preCancelEvent = false
if (gameType.isCreative && playerMP.getHeldItemMainhand != null &&
playerMP.getHeldItemMainhand.getItem.isInstanceOf[ItemSword]) {
preCancelEvent = true
}
if (gameType.isAdventure) {
if (gameType == GameType.SPECTATOR)
preCancelEvent = true
if (!playerMP.isAllowEdit) {
val stack = playerMP.getHeldItemMainhand
if (stack == null || !stack.canDestroy(world.getBlockState(pos).getBlock))
preCancelEvent = true
}
}
if (world.getTileEntity(pos) == null && playerMP.connection != null) {
val packet = new SPacketBlockChange(world, pos)
packet.blockState = Blocks.AIR.getDefaultState
playerMP.connection.sendPacket(packet)
}
val state = world.getBlockState(pos)
val event = new BlockEvent.BreakEvent(world, pos, state, playerMP)
event.setCanceled(preCancelEvent)
MinecraftForge.EVENT_BUS.post(event)
if (event.isCanceled) {
if (playerMP.connection != null)
playerMP.connection.sendPacket(new SPacketBlockChange(world, pos))
val tile = world.getTileEntity(pos)
if (tile != null && playerMP.connection != null) {
val pkt = tile.getUpdatePacket
if (pkt != null)
playerMP.connection.sendPacket(pkt)
}
}
if (event.isCanceled) -1 else event.getExpToDrop
}
@SideOnly(Side.CLIENT)
def sendUpdateDiggingPacket(pos: BlockPos): Unit = {
Minecraft.getMinecraft.getConnection.sendPacket(
new CPacketPlayerDigging(CPacketPlayerDigging.Action.STOP_DESTROY_BLOCK,
pos, Minecraft.getMinecraft.objectMouseOver.sideHit
)
)
}
}
| TheTemportalist/EsoTeriCraft | src/main/scala/temportalist/esotericraft/galvanization/common/task/ai/world/TaskHarvestTree.scala | Scala | apache-2.0 | 7,156 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.graphx
import org.apache.spark.{SparkContext, SparkFunSuite}
import org.apache.spark.graphx.Graph._
import org.apache.spark.graphx.PartitionStrategy._
import org.apache.spark.rdd._
import org.apache.spark.storage.StorageLevel
import org.apache.spark.util.Utils
class GraphSuite extends SparkFunSuite with LocalSparkContext {
def starGraph(sc: SparkContext, n: Int): Graph[String, Int] = {
Graph.fromEdgeTuples(sc.parallelize((1 to n).map(x => (0: VertexId, x: VertexId)), 3), "v")
}
test("Graph.fromEdgeTuples") {
withSpark { sc =>
val ring = (0L to 100L).zip((1L to 99L) :+ 0L)
val doubleRing = ring ++ ring
val graph = Graph.fromEdgeTuples(sc.parallelize(doubleRing), 1)
assert(graph.edges.count() === doubleRing.size)
assert(graph.edges.collect().forall(e => e.attr == 1))
// uniqueEdges option should uniquify edges and store duplicate count in edge attributes
val uniqueGraph = Graph.fromEdgeTuples(sc.parallelize(doubleRing), 1, Some(RandomVertexCut))
assert(uniqueGraph.edges.count() === ring.size)
assert(uniqueGraph.edges.collect().forall(e => e.attr == 2))
}
}
test("Graph.fromEdges") {
withSpark { sc =>
val ring = (0L to 100L).zip((1L to 99L) :+ 0L).map { case (a, b) => Edge(a, b, 1) }
val graph = Graph.fromEdges(sc.parallelize(ring), 1.0F)
assert(graph.edges.count() === ring.size)
}
}
test("Graph.apply") {
withSpark { sc =>
val rawEdges = (0L to 98L).zip((1L to 99L) :+ 0L)
val edges: RDD[Edge[Int]] = sc.parallelize(rawEdges).map { case (s, t) => Edge(s, t, 1) }
val vertices: RDD[(VertexId, Boolean)] = sc.parallelize((0L until 10L).map(id => (id, true)))
val graph = Graph(vertices, edges, false)
assert( graph.edges.count() === rawEdges.size )
// Vertices not explicitly provided but referenced by edges should be created automatically
assert( graph.vertices.count() === 100)
graph.triplets.collect().map { et =>
assert((et.srcId < 10 && et.srcAttr) || (et.srcId >= 10 && !et.srcAttr))
assert((et.dstId < 10 && et.dstAttr) || (et.dstId >= 10 && !et.dstAttr))
}
}
}
test("triplets") {
withSpark { sc =>
val n = 5
val star = starGraph(sc, n)
assert(star.triplets.map(et => (et.srcId, et.dstId, et.srcAttr, et.dstAttr)).collect().toSet
=== (1 to n).map(x => (0: VertexId, x: VertexId, "v", "v")).toSet)
}
}
test("partitionBy") {
withSpark { sc =>
def mkGraph(edges: List[(Long, Long)]): Graph[Int, Int] = {
Graph.fromEdgeTuples(sc.parallelize(edges, 2), 0)
}
def nonemptyParts(graph: Graph[Int, Int]): RDD[List[Edge[Int]]] = {
graph.edges.partitionsRDD.mapPartitions { iter =>
Iterator(iter.next()._2.iterator.toList)
}.filter(_.nonEmpty)
}
val identicalEdges = List((0L, 1L), (0L, 1L))
val canonicalEdges = List((0L, 1L), (1L, 0L))
val sameSrcEdges = List((0L, 1L), (0L, 2L))
// The two edges start out in different partitions
for (edges <- List(identicalEdges, canonicalEdges, sameSrcEdges)) {
assert(nonemptyParts(mkGraph(edges)).count === 2)
}
// partitionBy(RandomVertexCut) puts identical edges in the same partition
assert(nonemptyParts(mkGraph(identicalEdges).partitionBy(RandomVertexCut)).count === 1)
// partitionBy(EdgePartition1D) puts same-source edges in the same partition
assert(nonemptyParts(mkGraph(sameSrcEdges).partitionBy(EdgePartition1D)).count === 1)
// partitionBy(CanonicalRandomVertexCut) puts edges that are identical modulo direction into
// the same partition
assert(
nonemptyParts(mkGraph(canonicalEdges).partitionBy(CanonicalRandomVertexCut)).count === 1)
// partitionBy(EdgePartition2D) puts identical edges in the same partition
assert(nonemptyParts(mkGraph(identicalEdges).partitionBy(EdgePartition2D)).count === 1)
// partitionBy(EdgePartition2D) ensures that vertices need only be replicated to 2 * sqrt(p)
// partitions
val n = 100
val p = 100
val verts = 1 to n
val graph = Graph.fromEdgeTuples(sc.parallelize(verts.flatMap(x =>
verts.withFilter(y => y % x == 0).map(y => (x: VertexId, y: VertexId))), p), 0)
assert(graph.edges.partitions.length === p)
val partitionedGraph = graph.partitionBy(EdgePartition2D)
assert(graph.edges.partitions.length === p)
val bound = 2 * math.sqrt(p)
// Each vertex should be replicated to at most 2 * sqrt(p) partitions
val partitionSets = partitionedGraph.edges.partitionsRDD.mapPartitions { iter =>
val part = iter.next()._2
Iterator((part.iterator.flatMap(e => Iterator(e.srcId, e.dstId))).toSet)
}.collect
if (!verts.forall(id => partitionSets.count(_.contains(id)) <= bound)) {
val numFailures = verts.count(id => partitionSets.count(_.contains(id)) > bound)
val failure = verts.maxBy(id => partitionSets.count(_.contains(id)))
fail(("Replication bound test failed for %d/%d vertices. " +
"Example: vertex %d replicated to %d (> %f) partitions.").format(
numFailures, n, failure, partitionSets.count(_.contains(failure)), bound))
}
// This should not be true for the default hash partitioning
val partitionSetsUnpartitioned = graph.edges.partitionsRDD.mapPartitions { iter =>
val part = iter.next()._2
Iterator((part.iterator.flatMap(e => Iterator(e.srcId, e.dstId))).toSet)
}.collect
assert(verts.exists(id => partitionSetsUnpartitioned.count(_.contains(id)) > bound))
// Forming triplets view
val g = Graph(
sc.parallelize(List((0L, "a"), (1L, "b"), (2L, "c"))),
sc.parallelize(List(Edge(0L, 1L, 1), Edge(0L, 2L, 1)), 2))
assert(g.triplets.collect().map(_.toTuple).toSet ===
Set(((0L, "a"), (1L, "b"), 1), ((0L, "a"), (2L, "c"), 1)))
val gPart = g.partitionBy(EdgePartition2D)
assert(gPart.triplets.collect().map(_.toTuple).toSet ===
Set(((0L, "a"), (1L, "b"), 1), ((0L, "a"), (2L, "c"), 1)))
}
}
test("mapVertices") {
withSpark { sc =>
val n = 5
val star = starGraph(sc, n)
// mapVertices preserving type
val mappedVAttrs = star.mapVertices((vid, attr) => attr + "2")
assert(mappedVAttrs.vertices.collect().toSet === (0 to n).map(x => (x: VertexId, "v2")).toSet)
// mapVertices changing type
val mappedVAttrs2 = star.mapVertices((vid, attr) => attr.length)
assert(mappedVAttrs2.vertices.collect().toSet === (0 to n).map(x => (x: VertexId, 1)).toSet)
}
}
test("mapVertices changing type with same erased type") {
withSpark { sc =>
val vertices = sc.parallelize(Array[(Long, Option[java.lang.Integer])](
(1L, Some(1)),
(2L, Some(2)),
(3L, Some(3))
))
val edges = sc.parallelize(Array(
Edge(1L, 2L, 0),
Edge(2L, 3L, 0),
Edge(3L, 1L, 0)
))
val graph0 = Graph(vertices, edges)
// Trigger initial vertex replication
graph0.triplets.foreach(x => {})
// Change type of replicated vertices, but preserve erased type
val graph1 = graph0.mapVertices { case (vid, integerOpt) =>
integerOpt.map((x: java.lang.Integer) => x.toDouble: java.lang.Double)
}
// Access replicated vertices, exposing the erased type
val graph2 = graph1.mapTriplets(t => t.srcAttr.get)
assert(graph2.edges.map(_.attr).collect().toSet === Set[java.lang.Double](1.0, 2.0, 3.0))
}
}
test("mapEdges") {
withSpark { sc =>
val n = 3
val star = starGraph(sc, n)
val starWithEdgeAttrs = star.mapEdges(e => e.dstId)
val edges = starWithEdgeAttrs.edges.collect()
assert(edges.size === n)
assert(edges.toSet === (1 to n).map(x => Edge(0, x, x)).toSet)
}
}
test("mapTriplets") {
withSpark { sc =>
val n = 5
val star = starGraph(sc, n)
assert(star.mapTriplets(et => et.srcAttr + et.dstAttr).edges.collect().toSet ===
(1L to n).map(x => Edge(0, x, "vv")).toSet)
}
}
test("reverse") {
withSpark { sc =>
val n = 5
val star = starGraph(sc, n)
assert(star.reverse.outDegrees.collect().toSet === (1 to n).map(x => (x: VertexId, 1)).toSet)
}
}
test("reverse with join elimination") {
withSpark { sc =>
val vertices: RDD[(VertexId, Int)] = sc.parallelize(Array((1L, 1), (2L, 2)))
val edges: RDD[Edge[Int]] = sc.parallelize(Array(Edge(1L, 2L, 0)))
val graph = Graph(vertices, edges).reverse
val result = graph.mapReduceTriplets[Int](et => Iterator((et.dstId, et.srcAttr)), _ + _)
assert(result.collect().toSet === Set((1L, 2)))
}
}
test("subgraph") {
withSpark { sc =>
// Create a star graph of 10 veritces.
val n = 10
val star = starGraph(sc, n)
// Take only vertices whose vids are even
val subgraph = star.subgraph(vpred = (vid, attr) => vid % 2 == 0)
// We should have 5 vertices.
assert(subgraph.vertices.collect().toSet === (0 to n by 2).map(x => (x, "v")).toSet)
// And 4 edges.
assert(subgraph.edges.map(_.copy()).collect().toSet ===
(2 to n by 2).map(x => Edge(0, x, 1)).toSet)
}
}
test("mask") {
withSpark { sc =>
val n = 5
val vertices = sc.parallelize((0 to n).map(x => (x: VertexId, x)))
val edges = sc.parallelize((1 to n).map(x => Edge(0, x, x)))
val graph: Graph[Int, Int] = Graph(vertices, edges).cache()
val subgraph = graph.subgraph(
e => e.dstId != 4L,
(vid, vdata) => vid != 3L
).mapVertices((vid, vdata) => -1).mapEdges(e => -1)
val projectedGraph = graph.mask(subgraph)
val v = projectedGraph.vertices.collect().toSet
assert(v === Set((0, 0), (1, 1), (2, 2), (4, 4), (5, 5)))
// the map is necessary because of object-reuse in the edge iterator
val e = projectedGraph.edges.map(e => Edge(e.srcId, e.dstId, e.attr)).collect().toSet
assert(e === Set(Edge(0, 1, 1), Edge(0, 2, 2), Edge(0, 5, 5)))
}
}
test("groupEdges") {
withSpark { sc =>
val n = 5
val star = starGraph(sc, n)
val doubleStar = Graph.fromEdgeTuples(
sc.parallelize((1 to n).flatMap(x =>
List((0: VertexId, x: VertexId), (0: VertexId, x: VertexId))), 1), "v")
val star2 = doubleStar.groupEdges { (a, b) => a}
assert(star2.edges.collect().toArray.sorted(Edge.lexicographicOrdering[Int]) ===
star.edges.collect().toArray.sorted(Edge.lexicographicOrdering[Int]))
assert(star2.vertices.collect().toSet === star.vertices.collect().toSet)
}
}
test("mapReduceTriplets") {
withSpark { sc =>
val n = 5
val star = starGraph(sc, n).mapVertices { (_, _) => 0 }.cache()
val starDeg = star.joinVertices(star.degrees){ (vid, oldV, deg) => deg }
val neighborDegreeSums = starDeg.mapReduceTriplets(
edge => Iterator((edge.srcId, edge.dstAttr), (edge.dstId, edge.srcAttr)),
(a: Int, b: Int) => a + b)
assert(neighborDegreeSums.collect().toSet === (0 to n).map(x => (x, n)).toSet)
// activeSetOpt
val allPairs = for (x <- 1 to n; y <- 1 to n) yield (x: VertexId, y: VertexId)
val complete = Graph.fromEdgeTuples(sc.parallelize(allPairs, 3), 0)
val vids = complete.mapVertices((vid, attr) => vid).cache()
val active = vids.vertices.filter { case (vid, attr) => attr % 2 == 0 }
val numEvenNeighbors = vids.mapReduceTriplets(et => {
// Map function should only run on edges with destination in the active set
if (et.dstId % 2 != 0) {
throw new Exception("map ran on edge with dst vid %d, which is odd".format(et.dstId))
}
Iterator((et.srcId, 1))
}, (a: Int, b: Int) => a + b, Some((active, EdgeDirection.In))).collect().toSet
assert(numEvenNeighbors === (1 to n).map(x => (x: VertexId, n / 2)).toSet)
// outerJoinVertices followed by mapReduceTriplets(activeSetOpt)
val ringEdges = sc.parallelize((0 until n).map(x => (x: VertexId, (x + 1) % n: VertexId)), 3)
val ring = Graph.fromEdgeTuples(ringEdges, 0) .mapVertices((vid, attr) => vid).cache()
val changed = ring.vertices.filter { case (vid, attr) => attr % 2 == 1 }.mapValues(-_).cache()
val changedGraph = ring.outerJoinVertices(changed) { (vid, old, newOpt) =>
newOpt.getOrElse(old)
}
val numOddNeighbors = changedGraph.mapReduceTriplets(et => {
// Map function should only run on edges with source in the active set
if (et.srcId % 2 != 1) {
throw new Exception("map ran on edge with src vid %d, which is even".format(et.dstId))
}
Iterator((et.dstId, 1))
}, (a: Int, b: Int) => a + b, Some(changed, EdgeDirection.Out)).collect().toSet
assert(numOddNeighbors === (2 to n by 2).map(x => (x: VertexId, 1)).toSet)
}
}
test("aggregateMessages") {
withSpark { sc =>
val n = 5
val agg = starGraph(sc, n).aggregateMessages[String](
ctx => {
if (ctx.dstAttr != null) {
throw new Exception(
"expected ctx.dstAttr to be null due to TripletFields, but it was " + ctx.dstAttr)
}
ctx.sendToDst(ctx.srcAttr)
}, _ + _, TripletFields.Src)
assert(agg.collect().toSet === (1 to n).map(x => (x: VertexId, "v")).toSet)
}
}
test("outerJoinVertices") {
withSpark { sc =>
val n = 5
val reverseStar = starGraph(sc, n).reverse.cache()
// outerJoinVertices changing type
val reverseStarDegrees = reverseStar.outerJoinVertices(reverseStar.outDegrees) {
(vid, a, bOpt) => bOpt.getOrElse(0)
}
val neighborDegreeSums = reverseStarDegrees.mapReduceTriplets(
et => Iterator((et.srcId, et.dstAttr), (et.dstId, et.srcAttr)),
(a: Int, b: Int) => a + b).collect().toSet
assert(neighborDegreeSums === Set((0: VertexId, n)) ++ (1 to n).map(x => (x: VertexId, 0)))
// outerJoinVertices preserving type
val messages = reverseStar.vertices.mapValues { (vid, attr) => vid.toString }
val newReverseStar =
reverseStar.outerJoinVertices(messages) { (vid, a, bOpt) => a + bOpt.getOrElse("") }
assert(newReverseStar.vertices.map(_._2).collect().toSet ===
(0 to n).map(x => "v%d".format(x)).toSet)
}
}
test("more edge partitions than vertex partitions") {
withSpark { sc =>
val verts = sc.parallelize(List((1: VertexId, "a"), (2: VertexId, "b")), 1)
val edges = sc.parallelize(List(Edge(1, 2, 0), Edge(2, 1, 0)), 2)
val graph = Graph(verts, edges)
val triplets = graph.triplets.map(et => (et.srcId, et.dstId, et.srcAttr, et.dstAttr))
.collect().toSet
assert(triplets ===
Set((1: VertexId, 2: VertexId, "a", "b"), (2: VertexId, 1: VertexId, "b", "a")))
}
}
test("checkpoint") {
val checkpointDir = Utils.createTempDir()
withSpark { sc =>
sc.setCheckpointDir(checkpointDir.getAbsolutePath)
val ring = (0L to 100L).zip((1L to 99L) :+ 0L).map { case (a, b) => Edge(a, b, 1)}
val rdd = sc.parallelize(ring)
val graph = Graph.fromEdges(rdd, 1.0F)
assert(!graph.isCheckpointed)
assert(graph.getCheckpointFiles.size === 0)
graph.checkpoint()
graph.edges.map(_.attr).count()
graph.vertices.map(_._2).count()
val edgesDependencies = graph.edges.partitionsRDD.dependencies
val verticesDependencies = graph.vertices.partitionsRDD.dependencies
assert(edgesDependencies.forall(_.rdd.isInstanceOf[CheckpointRDD[_]]))
assert(verticesDependencies.forall(_.rdd.isInstanceOf[CheckpointRDD[_]]))
assert(graph.isCheckpointed)
assert(graph.getCheckpointFiles.size === 2)
}
}
test("cache, getStorageLevel") {
// test to see if getStorageLevel returns correct value
withSpark { sc =>
val verts = sc.parallelize(List((1: VertexId, "a"), (2: VertexId, "b")), 1)
val edges = sc.parallelize(List(Edge(1, 2, 0), Edge(2, 1, 0)), 2)
val graph = Graph(verts, edges, "", StorageLevel.MEMORY_ONLY, StorageLevel.MEMORY_ONLY)
// Note: Before caching, graph.vertices is cached, but graph.edges is not (but graph.edges'
// parent RDD is cached).
graph.cache()
assert(graph.vertices.getStorageLevel == StorageLevel.MEMORY_ONLY)
assert(graph.edges.getStorageLevel == StorageLevel.MEMORY_ONLY)
}
}
test("non-default number of edge partitions") {
val n = 10
val defaultParallelism = 3
val numEdgePartitions = 4
assert(defaultParallelism != numEdgePartitions)
val conf = new org.apache.spark.SparkConf()
.set("spark.default.parallelism", defaultParallelism.toString)
val sc = new SparkContext("local", "test", conf)
try {
val edges = sc.parallelize((1 to n).map(x => (x: VertexId, 0: VertexId)),
numEdgePartitions)
val graph = Graph.fromEdgeTuples(edges, 1)
val neighborAttrSums = graph.mapReduceTriplets[Int](
et => Iterator((et.dstId, et.srcAttr)), _ + _)
assert(neighborAttrSums.collect().toSet === Set((0: VertexId, n)))
} finally {
sc.stop()
}
}
}
| practice-vishnoi/dev-spark-1 | graphx/src/test/scala/org/apache/spark/graphx/GraphSuite.scala | Scala | apache-2.0 | 18,157 |
package com.keba.scala.bank.money
import java.util.Currency
/**
* Value object representing an amount of money in a specific currency
* Created by alexp on 29/05/16.
*/
class Money(val amount: BigDecimal, val currency: Currency) {
/**
* Adds supplied money to this instance, creating a new money
* holding the result.
* The supplied money must be in the same currency as this instance.
*
* @param inMoneyToAdd Money to add to this instance.
* @return Money holding sum of this instance and supplied money.
*/
def add(inMoneyToAdd: Money): Money = {
require(inMoneyToAdd.currency == currency, "must add same currency money")
val theSum = amount + inMoneyToAdd.amount
new Money(theSum, currency)
}
/**
* Subtracts supplied money from this instance, creating a new money
* holding the result.
* The supplied money must be in the same currency as this instance.
*
* @param inMoneyToSubtract Money to subtract from this instance.
* @return Money holding the difference between this instance and
* supplied money.
*/
def subtract(inMoneyToSubtract: Money): Money = {
require(inMoneyToSubtract.currency == currency, "must subtract same currency money")
val theDifference = amount - inMoneyToSubtract.amount
new Money(theDifference, currency)
}
/**
* Compares supplied object with object on which this method
* is invoked upon.
*
* @param inObjectToCompare Object to compare with.
* @return True if supplied object is an instance of this class and
* represents the same amount of money. False otherwise.
*/
override def equals(inObjectToCompare: Any): Boolean = {
inObjectToCompare match {
case theMoneyToCompare: Money =>
(theMoneyToCompare.amount == amount) && (theMoneyToCompare.currency == currency)
case _ =>
false
}
}
}
| alexp82/ddd-banking-system | src/main/scala/com/keba/scala/bank/money/Money.scala | Scala | apache-2.0 | 1,910 |
/**
* Copyright (C) 2016 Hurence (bailet.thomas@gmail.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.hurence.botsearch.trace
import java.util
import com.hurence.logisland.component.ProcessContext
import com.hurence.logisland.record.StandardRecord
import com.typesafe.scalalogging.slf4j.LazyLogging
/**
* Created by tom on 12/01/16.
*/
class NetworkTraceLogParser extends LazyLogging {
val EVENT_TYPE = "logisland-trace"
/**
* take a line of csv and convert it to a NetworkFlow
*
* @param records
* @return
*/
def process(context: ProcessContext, records: util.Collection[StandardRecord]): util.Collection[StandardRecord] = {
val event = new StandardRecord(EVENT_TYPE)
// build the event
/* event.setField("ipSource", "String", trace.ipSource)
event.setField("ipTarget", "String", trace.ipTarget)
event.setField("avgUploadedBytes", "Float", trace.avgUploadedBytes)
event.setField("avgDownloadedBytes", "Float", trace.avgDownloadedBytes)
event.setField("avgTimeBetweenTwoFLows", "Float", trace.avgTimeBetweenTwoFLows)
event.setField("mostSignificantFrequency", "Float", trace.mostSignificantFrequency)
event.setField("flowsCount", "Integer", trace.flowsCount)
event.setField("tags", "String", trace.tags)
event.setField("centroid", "Integer", trace.centroid)
*/
util.Collections.singletonList(event)
}
}
| MiniPlayer/log-island | logisland-plugins/logisland-botsearch-plugin/src/main/scala/com/hurence/botsearch/trace/NetworkTraceLogParser.scala | Scala | apache-2.0 | 2,058 |
package fpinscala.ch13iomonad
import fpinscala.ch07parallelism.Nonblocking._
import java.util.concurrent.ExecutorService
/*
* `Task[A]` is a wrapper around `Free[Par, Either[Throwable, A]]`, with some
* convenience functions for handling exceptions.
*/
case class Task[A](get: IO[Either[Throwable, A]]) {
def flatMap[B](f: A => Task[B]): Task[B] =
Task(get.flatMap {
case Left(e) => IO(Left(e))
case Right(a) => f(a).get
})
def map[B](f: A => B): Task[B] = flatMap(f andThen (Task.now))
/* 'Catches' exceptions in the given task and returns them as values. */
def attempt: Task[Either[Throwable,A]] =
Task(get map {
case Left(e) => Right(Left(e))
case Right(a) => Right(Right(a))
})
def handle[B>:A](f: PartialFunction[Throwable,B]): Task[B] =
attempt flatMap {
case Left(e) => f.lift(e) map (Task.now) getOrElse Task.fail(e)
case Right(a) => Task.now(a)
}
def or[B>:A](t2: Task[B]): Task[B] =
Task(this.get flatMap {
case Left(e) => t2.get
case a => IO(a)
})
def run(implicit E: ExecutorService): A = unsafePerformIO(get) match {
case Left(e) => throw e
case Right(a) => a
}
def attemptRun(implicit E: ExecutorService): Either[Throwable,A] =
try unsafePerformIO(get) catch { case t: Throwable => Left(t) }
}
object Task extends Monad[Task] {
def unit[A](a: => A) = Task(IO(Try(a)))
def flatMap[A,B](a: Task[A])(f: A => Task[B]): Task[B] =
a flatMap f
def fail[A](e: Throwable): Task[A] = Task(IO(Left(e)))
def now[A](a: A): Task[A] = Task(Return(Right(a)))
def more[A](a: => Task[A]): Task[A] = Task.now(()) flatMap (_ => a)
def delay[A](a: => A): Task[A] = more(now(a))
def fork[A](a: => Task[A]): Task[A] =
Task { par { Par.lazyUnit(()) } flatMap (_ => a.get) }
def forkUnit[A](a: => A): Task[A] = fork(now(a))
def Try[A](a: => A): Either[Throwable,A] =
try Right(a) catch { case e: Throwable => Left(e) }
}
| hugocf/fpinscala | src/main/scala/fpinscala/ch13iomonad/Task.scala | Scala | mit | 1,969 |
package org.eigengo.sogx.config
import org.springframework.context.annotation.Bean
import org.springframework.integration.{SpringIntegration, MessageChannel}
import org.springframework.integration.gateway.GatewayProxyFactoryBean
import java.util.concurrent.Executor
trait IntegrationConfig {
import SpringIntegration.channels._
import SpringIntegration.gateways._
import SpringIntegration.messageflow._
def asyncExecutor(): Executor
@Bean
def recogRequest(): MessageChannel = directChannel()
@Bean
def recogResponse(): MessageChannel = directChannel()
@Bean
def rawRecogResponse(): MessageChannel = directChannel()
@Bean
def rawBytesRecogResponse(): MessageChannel = directChannel()
@Bean
def recogGateway(): GatewayProxyFactoryBean = {
gatewayProxy[RecogGateway].
withMethod(_.recogFrame, requestChannel = recogRequest(), replyChannel = recogResponse()).
withAsyncExecutor(asyncExecutor())
}
}
| eigengo/springone2gx2013 | jvm/src/scraps/IntegrationConfig.scala | Scala | apache-2.0 | 950 |
package kr.scala.experiments.tests.akka
import akka.actor.{Props, ActorSystem, Actor, ActorRef}
import kr.scala.experiments.tests.AbstractExperimentTest
/**
* CommuncationActors
* @author Sunghyouk Bae
*/
class PingPongTest extends AbstractExperimentTest {
test("ping-pong test") {
val system = ActorSystem("PingPongSystem")
val pong = system.actorOf(Props[Pong], name = "pong")
val ping = system.actorOf(Props(new Ping(pong)), name = "ping")
ping ! StartMessage
}
}
case object PingMessage
case object PongMessage
case object StartMessage
case object StopMessage
class Ping(pong: ActorRef) extends Actor {
var count = 0
def incrementAndPrint() {
count += 1
println("ping")
}
override def receive: Receive = {
case StartMessage =>
incrementAndPrint()
pong ! PingMessage
case PongMessage =>
incrementAndPrint()
if (count > 99) {
sender ! StopMessage
println("ping stopped")
context.stop(self)
} else {
sender ! PingMessage
}
case _ => println("Ping got something unexpected.")
}
}
class Pong extends Actor {
def receive = {
case PingMessage =>
println(" pong")
sender ! PongMessage
case StopMessage =>
println("pong stopped")
context.stop(self)
case _ => println("Pong got something unexpected.")
}
}
| debop/scala-experiments | src/test/scala/kr/scala/experiments/tests/akka/PingPongTest.scala | Scala | apache-2.0 | 1,539 |
import sbt._
import Keys._
object B extends Build
{
lazy val root = Project("root", file("."))
lazy val a = Project("a", file("a"))
lazy val b = Project("b", file("b"))
}
| olove/xsbt | sbt/src/sbt-test/project/build-deps/project/B.scala | Scala | bsd-3-clause | 175 |
/* Copyright (C) 2008-2016 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.app.nlp.embeddings
import cc.factorie.la.{DenseTensor1, WeightsMapAccumulator}
import cc.factorie.optimize.Example
import cc.factorie.util.DoubleAccumulator
import scala.collection.mutable
class CBOWNegSamplingEmbeddingModel(override val opts: EmbeddingOpts) extends WordEmbeddingModel(opts) {
val negative = opts.negative.value
val window = opts.window.value
val rng = new util.Random(5) // fix the seed;
val sample = opts.sample.value.toDouble
override def process(doc: String): Int = {
// given a document, below line splits by space and converts each word to Int (by vocab.getId) and filters out words not in vocab
var sen = doc.stripLineEnd.split(' ').map(word => vocab.getId(word)).filter(id => id != -1)
val wordCount = sen.size
// subsampling -> speed increase
if (sample > 0)
sen = sen.filter(id => subSample(id) != -1)
val senLength = sen.size
for (senPosition <- 0 until senLength) {
val currWord = sen(senPosition)
val b = rng.nextInt(window)
val contexts = new mutable.ArrayBuffer[Int]
// make the contexts
for (a <- b until window * 2 + 1 - b) if (a != window) {
val c = senPosition - window + a
if (c >= 0 && c < senLength)
contexts += sen(c)
}
// make the examples
trainer.processExample(new CBOWNegSamplingExample(this, currWord, contexts, 1))
(0 until negative).foreach(neg => trainer.processExample(new CBOWNegSamplingExample(this, currWord, List(vocab.getRandWordId), -1)))
}
return wordCount
}
// subsampling
def subSample(word: Int): Int = {
val prob = vocab.getSubSampleProb(word) // pre-computed to avoid sqrt call every time. Improvement of 10 secs on 100MB data ~ 15 MINs on 10GB
val alpha = rng.nextInt(0xFFFF) / 0xFFFF.toDouble
if (prob < alpha) { return -1 }
else return word
}
}
class CBOWNegSamplingExample(model: WordEmbeddingModel, word: Int, contexts: Seq[Int], label: Int) extends Example {
// to understand the gradient and objective refer to : http://arxiv.org/pdf/1310.4546.pdf
def accumulateValueAndGradient(value: DoubleAccumulator, gradient: WeightsMapAccumulator): Unit = {
val wordEmbedding = model.weights(word).value
val contextEmbedding = new DenseTensor1(model.D, 0)
contexts.foreach(context => contextEmbedding.+=(model.weights(context).value))
val score: Double = wordEmbedding.dot(contextEmbedding)
val exp: Double = math.exp(-score) // TODO : pre-compute , costly operation
var objective: Double = 0.0
var factor: Double = 0.0
if (label == 1) {
objective = -math.log1p(exp)
factor = exp / (1 + exp)
}
if (label == -1) {
objective = -score - math.log1p(exp)
factor = -1 / (1 + exp)
}
if (value ne null) value.accumulate(objective)
if (gradient ne null) {
contexts.foreach(context => gradient.accumulate(model.weights(context), wordEmbedding, factor))
gradient.accumulate(model.weights(word), contextEmbedding, factor)
}
}
}
| strubell/factorie | src/main/scala/cc/factorie/app/nlp/embeddings/CBOWEmbeddingModel.scala | Scala | apache-2.0 | 3,820 |
/*
* Copyright (c) 2014 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.storage.kinesis.elasticsearch
// Scalaz
import scalaz._
import Scalaz._
// json4s
import org.json4s._
import org.json4s.jackson.JsonMethods._
import org.json4s.JsonDSL._
// Specs2
import org.specs2.mutable.Specification
import org.specs2.scalaz.ValidationMatchers
/**
* Tests Shredder
*/
class ShredderSpec extends Specification with ValidationMatchers {
"The fixSchema method" should {
"convert a snake_case schema to an Elasticsearch field name" in {
val actual = Shredder.fixSchema("unstruct_event", "iglu:com.snowplowanalytics.snowplow/change_form/jsonschema/1-0-0")
actual must beSuccessful("unstruct_event_com_snowplowanalytics_snowplow_change_form_1")
}
"convert a schema with a hyphen to an Elasticsearch field name" in {
val actual = Shredder.fixSchema("unstruct_event", "iglu:com.hy-phen/evt/jsonschema/1-0-0")
actual must beSuccessful("unstruct_event_com_hy_phen_evt_1")
}
"convert a PascalCase schema to an Elasticsearch field name" in {
val actual = Shredder.fixSchema("contexts", "iglu:com.acme/PascalCaseContext/jsonschema/1-0-0")
actual must beSuccessful("contexts_com_acme_pascal_case_context_1")
}
"convert a schema with consecutive capital letters to an Elasticsearch field name" in {
val actual = Shredder.fixSchema("contexts", "iglu:com.acme/ContextUK/jsonschema/1-0-0")
actual must beSuccessful("contexts_com_acme_context_uk_1")
}
}
"The parseUnstruct method" should {
"fix up an unstructured event JSON" in {
val actual = Shredder.parseUnstruct("""{
"schema": "any",
"data": {
"schema": "iglu:com.snowplowanalytics.snowplow/social_interaction/jsonschema/1-0-0",
"data": {
"action": "like",
"network": "fb"
}
}
}""")
val expected = JObject("unstruct_event_com_snowplowanalytics_snowplow_social_interaction_1" ->
(("action" -> "like") ~ ("network" -> "fb")))
actual must beSuccessful(expected)
}
"fail a malformed unstructured event JSON" in {
val actual = Shredder.parseUnstruct("""{
"schema": "any",
"data": {}
}""")
val expected = NonEmptyList(
"Unstructured event JSON did not contain a stringly typed schema field",
"Could not extract inner data field from unstructured event")
actual must be failing(expected)
}
}
"The parseContexts method" should {
"fix up a custom contexts JSON" in {
val actual = Shredder.parseContexts("""{
"schema": "any",
"data": [
{
"schema": "iglu:com.acme/duplicated/jsonschema/20-0-5",
"data": {
"value": 1
}
},
{
"schema": "iglu:com.acme/duplicated/jsonschema/20-0-5",
"data": {
"value": 2
}
},
{
"schema": "iglu:com.acme/unduplicated/jsonschema/1-0-0",
"data": {
"type": "test"
}
}
]
}""")
val expected = ("contexts_com_acme_duplicated_20" -> List(("value" -> 2), ("value" -> 1))) ~
("contexts_com_acme_unduplicated_1" -> List(("type" -> "test")))
actual must beSuccessful(expected)
}
"fail a malformed custom contexts JSON" in {
val actual = Shredder.parseContexts("""{
"schema": "any",
"data": [
{
"schema": "failing",
"data": {
"value": 1
}
},
{
"data": {
"value": 2
}
},
{
"schema": "iglu:com.acme/unduplicated/jsonschema/1-0-0"
}
]
}""")
val expected = NonEmptyList(
"Could not extract inner data field from custom context",
"Context JSON did not contain a stringly typed schema field",
"""Schema failing does not conform to regular expression %s""".format(Shredder.schemaPattern))
actual must be failing(expected)
}
}
}
| jramos/snowplow | 4-storage/kinesis-elasticsearch-sink/src/test/scala/com.snowplowanalytics.snowplow.storage.kinesis.elasticsearch/ShredderSpec.scala | Scala | apache-2.0 | 4,830 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import scala.collection.immutable.{HashSet => HSet}
import scala.collection.immutable.Queue
import scala.collection.mutable.{LinkedHashMap => LHMap}
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.sql.test.SharedSparkSession
case class IntClass(value: Int)
case class SeqClass(s: Seq[Int])
case class ListClass(l: List[Int])
case class QueueClass(q: Queue[Int])
case class MapClass(m: Map[Int, Int])
case class LHMapClass(m: LHMap[Int, Int])
case class ComplexClass(seq: SeqClass, list: ListClass, queue: QueueClass)
case class ComplexMapClass(map: MapClass, lhmap: LHMapClass)
case class InnerData(name: String, value: Int)
case class NestedData(id: Int, param: Map[String, InnerData])
package object packageobject {
case class PackageClass(value: Int)
}
class DatasetPrimitiveSuite extends QueryTest with SharedSparkSession {
import testImplicits._
test("toDS") {
val data = Seq(1, 2, 3, 4, 5, 6)
checkDataset(
data.toDS(),
data: _*)
}
test("as case class / collect") {
val ds = Seq(1, 2, 3).toDS().as[IntClass]
checkDataset(
ds,
IntClass(1), IntClass(2), IntClass(3))
assert(ds.collect().head == IntClass(1))
}
test("map") {
val ds = Seq(1, 2, 3).toDS()
checkDataset(
ds.map(_ + 1),
2, 3, 4)
}
test("mapPrimitive") {
val dsInt = Seq(1, 2, 3).toDS()
checkDataset(dsInt.map(_ > 1), false, true, true)
checkDataset(dsInt.map(_ + 1), 2, 3, 4)
checkDataset(dsInt.map(_ + 8589934592L), 8589934593L, 8589934594L, 8589934595L)
checkDataset(dsInt.map(_ + 1.1F), 2.1F, 3.1F, 4.1F)
checkDataset(dsInt.map(_ + 1.23D), 2.23D, 3.23D, 4.23D)
val dsLong = Seq(1L, 2L, 3L).toDS()
checkDataset(dsLong.map(_ > 1), false, true, true)
checkDataset(dsLong.map(e => (e + 1).toInt), 2, 3, 4)
checkDataset(dsLong.map(_ + 8589934592L), 8589934593L, 8589934594L, 8589934595L)
checkDataset(dsLong.map(_ + 1.1F), 2.1F, 3.1F, 4.1F)
checkDataset(dsLong.map(_ + 1.23D), 2.23D, 3.23D, 4.23D)
val dsFloat = Seq(1F, 2F, 3F).toDS()
checkDataset(dsFloat.map(_ > 1), false, true, true)
checkDataset(dsFloat.map(e => (e + 1).toInt), 2, 3, 4)
checkDataset(dsFloat.map(e => (e + 123456L).toLong), 123457L, 123458L, 123459L)
checkDataset(dsFloat.map(_ + 1.1F), 2.1F, 3.1F, 4.1F)
checkDataset(dsFloat.map(_ + 1.23D), 2.23D, 3.23D, 4.23D)
val dsDouble = Seq(1D, 2D, 3D).toDS()
checkDataset(dsDouble.map(_ > 1), false, true, true)
checkDataset(dsDouble.map(e => (e + 1).toInt), 2, 3, 4)
checkDataset(dsDouble.map(e => (e + 8589934592L).toLong),
8589934593L, 8589934594L, 8589934595L)
checkDataset(dsDouble.map(e => (e + 1.1F).toFloat), 2.1F, 3.1F, 4.1F)
checkDataset(dsDouble.map(_ + 1.23D), 2.23D, 3.23D, 4.23D)
val dsBoolean = Seq(true, false).toDS()
checkDataset(dsBoolean.map(e => !e), false, true)
}
test("mapPrimitiveArray") {
val dsInt = Seq(Array(1, 2), Array(3, 4)).toDS()
checkDataset(dsInt.map(e => e), Array(1, 2), Array(3, 4))
checkDataset(dsInt.map(e => null: Array[Int]), null, null)
val dsDouble = Seq(Array(1D, 2D), Array(3D, 4D)).toDS()
checkDataset(dsDouble.map(e => e), Array(1D, 2D), Array(3D, 4D))
checkDataset(dsDouble.map(e => null: Array[Double]), null, null)
}
test("filter") {
val ds = Seq(1, 2, 3, 4).toDS()
checkDataset(
ds.filter(_ % 2 == 0),
2, 4)
}
test("filterPrimitive") {
val dsInt = Seq(1, 2, 3).toDS()
checkDataset(dsInt.filter(_ > 1), 2, 3)
val dsLong = Seq(1L, 2L, 3L).toDS()
checkDataset(dsLong.filter(_ > 1), 2L, 3L)
val dsFloat = Seq(1F, 2F, 3F).toDS()
checkDataset(dsFloat.filter(_ > 1), 2F, 3F)
val dsDouble = Seq(1D, 2D, 3D).toDS()
checkDataset(dsDouble.filter(_ > 1), 2D, 3D)
val dsBoolean = Seq(true, false).toDS()
checkDataset(dsBoolean.filter(e => !e), false)
}
test("foreach") {
val ds = Seq(1, 2, 3).toDS()
val acc = sparkContext.longAccumulator
ds.foreach(acc.add(_))
assert(acc.value == 6)
}
test("foreachPartition") {
val ds = Seq(1, 2, 3).toDS()
val acc = sparkContext.longAccumulator
ds.foreachPartition((it: Iterator[Int]) => it.foreach(acc.add(_)))
assert(acc.value == 6)
}
test("reduce") {
val ds = Seq(1, 2, 3).toDS()
assert(ds.reduce(_ + _) == 6)
}
test("groupBy function, keys") {
val ds = Seq(1, 2, 3, 4, 5).toDS()
val grouped = ds.groupByKey(_ % 2)
checkDatasetUnorderly(
grouped.keys,
0, 1)
}
test("groupBy function, map") {
val ds = Seq(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11).toDS()
val grouped = ds.groupByKey(_ % 2)
val agged = grouped.mapGroups { case (g, iter) =>
val name = if (g == 0) "even" else "odd"
(name, iter.size)
}
checkDatasetUnorderly(
agged,
("even", 5), ("odd", 6))
}
test("groupBy function, flatMap") {
val ds = Seq("a", "b", "c", "xyz", "hello").toDS()
val grouped = ds.groupByKey(_.length)
val agged = grouped.flatMapGroups { case (g, iter) => Iterator(g.toString, iter.mkString) }
checkDatasetUnorderly(
agged,
"1", "abc", "3", "xyz", "5", "hello")
}
test("Arrays and Lists") {
checkDataset(Seq(Seq(1)).toDS(), Seq(1))
checkDataset(Seq(Seq(1.toLong)).toDS(), Seq(1.toLong))
checkDataset(Seq(Seq(1.toDouble)).toDS(), Seq(1.toDouble))
checkDataset(Seq(Seq(1.toFloat)).toDS(), Seq(1.toFloat))
checkDataset(Seq(Seq(1.toByte)).toDS(), Seq(1.toByte))
checkDataset(Seq(Seq(1.toShort)).toDS(), Seq(1.toShort))
checkDataset(Seq(Seq(true)).toDS(), Seq(true))
checkDataset(Seq(Seq("test")).toDS(), Seq("test"))
checkDataset(Seq(Seq(Tuple1(1))).toDS(), Seq(Tuple1(1)))
checkDataset(Seq(Array(1)).toDS(), Array(1))
checkDataset(Seq(Array(1.toLong)).toDS(), Array(1.toLong))
checkDataset(Seq(Array(1.toDouble)).toDS(), Array(1.toDouble))
checkDataset(Seq(Array(1.toFloat)).toDS(), Array(1.toFloat))
checkDataset(Seq(Array(1.toByte)).toDS(), Array(1.toByte))
checkDataset(Seq(Array(1.toShort)).toDS(), Array(1.toShort))
checkDataset(Seq(Array(true)).toDS(), Array(true))
checkDataset(Seq(Array("test")).toDS(), Array("test"))
checkDataset(Seq(Array(Tuple1(1))).toDS(), Array(Tuple1(1)))
}
test("arbitrary sequences") {
checkDataset(Seq(Queue(1)).toDS(), Queue(1))
checkDataset(Seq(Queue(1.toLong)).toDS(), Queue(1.toLong))
checkDataset(Seq(Queue(1.toDouble)).toDS(), Queue(1.toDouble))
checkDataset(Seq(Queue(1.toFloat)).toDS(), Queue(1.toFloat))
checkDataset(Seq(Queue(1.toByte)).toDS(), Queue(1.toByte))
checkDataset(Seq(Queue(1.toShort)).toDS(), Queue(1.toShort))
checkDataset(Seq(Queue(true)).toDS(), Queue(true))
checkDataset(Seq(Queue("test")).toDS(), Queue("test"))
checkDataset(Seq(Queue(Tuple1(1))).toDS(), Queue(Tuple1(1)))
checkDataset(Seq(ArrayBuffer(1)).toDS(), ArrayBuffer(1))
checkDataset(Seq(ArrayBuffer(1.toLong)).toDS(), ArrayBuffer(1.toLong))
checkDataset(Seq(ArrayBuffer(1.toDouble)).toDS(), ArrayBuffer(1.toDouble))
checkDataset(Seq(ArrayBuffer(1.toFloat)).toDS(), ArrayBuffer(1.toFloat))
checkDataset(Seq(ArrayBuffer(1.toByte)).toDS(), ArrayBuffer(1.toByte))
checkDataset(Seq(ArrayBuffer(1.toShort)).toDS(), ArrayBuffer(1.toShort))
checkDataset(Seq(ArrayBuffer(true)).toDS(), ArrayBuffer(true))
checkDataset(Seq(ArrayBuffer("test")).toDS(), ArrayBuffer("test"))
checkDataset(Seq(ArrayBuffer(Tuple1(1))).toDS(), ArrayBuffer(Tuple1(1)))
}
test("sequence and product combinations") {
// Case classes
checkDataset(Seq(SeqClass(Seq(1))).toDS(), SeqClass(Seq(1)))
checkDataset(Seq(Seq(SeqClass(Seq(1)))).toDS(), Seq(SeqClass(Seq(1))))
checkDataset(Seq(List(SeqClass(Seq(1)))).toDS(), List(SeqClass(Seq(1))))
checkDataset(Seq(Queue(SeqClass(Seq(1)))).toDS(), Queue(SeqClass(Seq(1))))
checkDataset(Seq(ListClass(List(1))).toDS(), ListClass(List(1)))
checkDataset(Seq(Seq(ListClass(List(1)))).toDS(), Seq(ListClass(List(1))))
checkDataset(Seq(List(ListClass(List(1)))).toDS(), List(ListClass(List(1))))
checkDataset(Seq(Queue(ListClass(List(1)))).toDS(), Queue(ListClass(List(1))))
checkDataset(Seq(QueueClass(Queue(1))).toDS(), QueueClass(Queue(1)))
checkDataset(Seq(Seq(QueueClass(Queue(1)))).toDS(), Seq(QueueClass(Queue(1))))
checkDataset(Seq(List(QueueClass(Queue(1)))).toDS(), List(QueueClass(Queue(1))))
checkDataset(Seq(Queue(QueueClass(Queue(1)))).toDS(), Queue(QueueClass(Queue(1))))
val complex = ComplexClass(SeqClass(Seq(1)), ListClass(List(2)), QueueClass(Queue(3)))
checkDataset(Seq(complex).toDS(), complex)
checkDataset(Seq(Seq(complex)).toDS(), Seq(complex))
checkDataset(Seq(List(complex)).toDS(), List(complex))
checkDataset(Seq(Queue(complex)).toDS(), Queue(complex))
// Tuples
checkDataset(Seq(Seq(1) -> Seq(2)).toDS(), Seq(1) -> Seq(2))
checkDataset(Seq(List(1) -> Queue(2)).toDS(), List(1) -> Queue(2))
checkDataset(Seq(List(Seq("test1") -> List(Queue("test2")))).toDS(),
List(Seq("test1") -> List(Queue("test2"))))
// Complex
checkDataset(Seq(ListClass(List(1)) -> Queue("test" -> SeqClass(Seq(2)))).toDS(),
ListClass(List(1)) -> Queue("test" -> SeqClass(Seq(2))))
}
test("arbitrary maps") {
checkDataset(Seq(Map(1 -> 2)).toDS(), Map(1 -> 2))
checkDataset(Seq(Map(1.toLong -> 2.toLong)).toDS(), Map(1.toLong -> 2.toLong))
checkDataset(Seq(Map(1.toDouble -> 2.toDouble)).toDS(), Map(1.toDouble -> 2.toDouble))
checkDataset(Seq(Map(1.toFloat -> 2.toFloat)).toDS(), Map(1.toFloat -> 2.toFloat))
checkDataset(Seq(Map(1.toByte -> 2.toByte)).toDS(), Map(1.toByte -> 2.toByte))
checkDataset(Seq(Map(1.toShort -> 2.toShort)).toDS(), Map(1.toShort -> 2.toShort))
checkDataset(Seq(Map(true -> false)).toDS(), Map(true -> false))
checkDataset(Seq(Map("test1" -> "test2")).toDS(), Map("test1" -> "test2"))
checkDataset(Seq(Map(Tuple1(1) -> Tuple1(2))).toDS(), Map(Tuple1(1) -> Tuple1(2)))
checkDataset(Seq(Map(1 -> Tuple1(2))).toDS(), Map(1 -> Tuple1(2)))
checkDataset(Seq(Map("test" -> 2.toLong)).toDS(), Map("test" -> 2.toLong))
checkDataset(Seq(LHMap(1 -> 2)).toDS(), LHMap(1 -> 2))
checkDataset(Seq(LHMap(1.toLong -> 2.toLong)).toDS(), LHMap(1.toLong -> 2.toLong))
checkDataset(Seq(LHMap(1.toDouble -> 2.toDouble)).toDS(), LHMap(1.toDouble -> 2.toDouble))
checkDataset(Seq(LHMap(1.toFloat -> 2.toFloat)).toDS(), LHMap(1.toFloat -> 2.toFloat))
checkDataset(Seq(LHMap(1.toByte -> 2.toByte)).toDS(), LHMap(1.toByte -> 2.toByte))
checkDataset(Seq(LHMap(1.toShort -> 2.toShort)).toDS(), LHMap(1.toShort -> 2.toShort))
checkDataset(Seq(LHMap(true -> false)).toDS(), LHMap(true -> false))
checkDataset(Seq(LHMap("test1" -> "test2")).toDS(), LHMap("test1" -> "test2"))
checkDataset(Seq(LHMap(Tuple1(1) -> Tuple1(2))).toDS(), LHMap(Tuple1(1) -> Tuple1(2)))
checkDataset(Seq(LHMap(1 -> Tuple1(2))).toDS(), LHMap(1 -> Tuple1(2)))
checkDataset(Seq(LHMap("test" -> 2.toLong)).toDS(), LHMap("test" -> 2.toLong))
}
test("SPARK-25817: map and product combinations") {
// Case classes
checkDataset(Seq(MapClass(Map(1 -> 2))).toDS(), MapClass(Map(1 -> 2)))
checkDataset(Seq(Map(1 -> MapClass(Map(2 -> 3)))).toDS(), Map(1 -> MapClass(Map(2 -> 3))))
checkDataset(Seq(Map(MapClass(Map(1 -> 2)) -> 3)).toDS(), Map(MapClass(Map(1 -> 2)) -> 3))
checkDataset(Seq(Map(MapClass(Map(1 -> 2)) -> MapClass(Map(3 -> 4)))).toDS(),
Map(MapClass(Map(1 -> 2)) -> MapClass(Map(3 -> 4))))
checkDataset(Seq(LHMap(1 -> MapClass(Map(2 -> 3)))).toDS(), LHMap(1 -> MapClass(Map(2 -> 3))))
checkDataset(Seq(LHMap(MapClass(Map(1 -> 2)) -> 3)).toDS(), LHMap(MapClass(Map(1 -> 2)) -> 3))
checkDataset(Seq(LHMap(MapClass(Map(1 -> 2)) -> MapClass(Map(3 -> 4)))).toDS(),
LHMap(MapClass(Map(1 -> 2)) -> MapClass(Map(3 -> 4))))
checkDataset(Seq(LHMapClass(LHMap(1 -> 2))).toDS(), LHMapClass(LHMap(1 -> 2)))
checkDataset(Seq(Map(1 -> LHMapClass(LHMap(2 -> 3)))).toDS(),
Map(1 -> LHMapClass(LHMap(2 -> 3))))
checkDataset(Seq(Map(LHMapClass(LHMap(1 -> 2)) -> 3)).toDS(),
Map(LHMapClass(LHMap(1 -> 2)) -> 3))
checkDataset(Seq(Map(LHMapClass(LHMap(1 -> 2)) -> LHMapClass(LHMap(3 -> 4)))).toDS(),
Map(LHMapClass(LHMap(1 -> 2)) -> LHMapClass(LHMap(3 -> 4))))
checkDataset(Seq(LHMap(1 -> LHMapClass(LHMap(2 -> 3)))).toDS(),
LHMap(1 -> LHMapClass(LHMap(2 -> 3))))
checkDataset(Seq(LHMap(LHMapClass(LHMap(1 -> 2)) -> 3)).toDS(),
LHMap(LHMapClass(LHMap(1 -> 2)) -> 3))
checkDataset(Seq(LHMap(LHMapClass(LHMap(1 -> 2)) -> LHMapClass(LHMap(3 -> 4)))).toDS(),
LHMap(LHMapClass(LHMap(1 -> 2)) -> LHMapClass(LHMap(3 -> 4))))
val complex = ComplexMapClass(MapClass(Map(1 -> 2)), LHMapClass(LHMap(3 -> 4)))
checkDataset(Seq(complex).toDS(), complex)
checkDataset(Seq(Map(1 -> complex)).toDS(), Map(1 -> complex))
checkDataset(Seq(Map(complex -> 5)).toDS(), Map(complex -> 5))
checkDataset(Seq(Map(complex -> complex)).toDS(), Map(complex -> complex))
checkDataset(Seq(LHMap(1 -> complex)).toDS(), LHMap(1 -> complex))
checkDataset(Seq(LHMap(complex -> 5)).toDS(), LHMap(complex -> 5))
checkDataset(Seq(LHMap(complex -> complex)).toDS(), LHMap(complex -> complex))
// Tuples
checkDataset(Seq(Map(1 -> 2) -> Map(3 -> 4)).toDS(), Map(1 -> 2) -> Map(3 -> 4))
checkDataset(Seq(LHMap(1 -> 2) -> Map(3 -> 4)).toDS(), LHMap(1 -> 2) -> Map(3 -> 4))
checkDataset(Seq(Map(1 -> 2) -> LHMap(3 -> 4)).toDS(), Map(1 -> 2) -> LHMap(3 -> 4))
checkDataset(Seq(LHMap(1 -> 2) -> LHMap(3 -> 4)).toDS(), LHMap(1 -> 2) -> LHMap(3 -> 4))
checkDataset(Seq(LHMap((Map("test1" -> 1) -> 2) -> (3 -> LHMap(4 -> "test2")))).toDS(),
LHMap((Map("test1" -> 1) -> 2) -> (3 -> LHMap(4 -> "test2"))))
// Complex
checkDataset(Seq(LHMapClass(LHMap(1 -> 2)) -> LHMap("test" -> MapClass(Map(3 -> 4)))).toDS(),
LHMapClass(LHMap(1 -> 2)) -> LHMap("test" -> MapClass(Map(3 -> 4))))
}
test("arbitrary sets") {
checkDataset(Seq(Set(1, 2, 3, 4)).toDS(), Set(1, 2, 3, 4))
checkDataset(Seq(Set(1.toLong, 2.toLong)).toDS(), Set(1.toLong, 2.toLong))
checkDataset(Seq(Set(1.toDouble, 2.toDouble)).toDS(), Set(1.toDouble, 2.toDouble))
checkDataset(Seq(Set(1.toFloat, 2.toFloat)).toDS(), Set(1.toFloat, 2.toFloat))
checkDataset(Seq(Set(1.toByte, 2.toByte)).toDS(), Set(1.toByte, 2.toByte))
checkDataset(Seq(Set(1.toShort, 2.toShort)).toDS(), Set(1.toShort, 2.toShort))
checkDataset(Seq(Set(true, false)).toDS(), Set(true, false))
checkDataset(Seq(Set("test1", "test2")).toDS(), Set("test1", "test2"))
checkDataset(Seq(Set(Tuple1(1), Tuple1(2))).toDS(), Set(Tuple1(1), Tuple1(2)))
checkDataset(Seq(HSet(1, 2)).toDS(), HSet(1, 2))
checkDataset(Seq(HSet(1.toLong, 2.toLong)).toDS(), HSet(1.toLong, 2.toLong))
checkDataset(Seq(HSet(1.toDouble, 2.toDouble)).toDS(), HSet(1.toDouble, 2.toDouble))
checkDataset(Seq(HSet(1.toFloat, 2.toFloat)).toDS(), HSet(1.toFloat, 2.toFloat))
checkDataset(Seq(HSet(1.toByte, 2.toByte)).toDS(), HSet(1.toByte, 2.toByte))
checkDataset(Seq(HSet(1.toShort, 2.toShort)).toDS(), HSet(1.toShort, 2.toShort))
checkDataset(Seq(HSet(true, false)).toDS(), HSet(true, false))
checkDataset(Seq(HSet("test1", "test2")).toDS(), HSet("test1", "test2"))
checkDataset(Seq(HSet(Tuple1(1), Tuple1(2))).toDS(), HSet(Tuple1(1), Tuple1(2)))
checkDataset(Seq(Seq(Some(1), None), Seq(Some(2))).toDF("c").as[Set[Integer]],
Seq(Set[Integer](1, null), Set[Integer](2)): _*)
}
test("nested sequences") {
checkDataset(Seq(Seq(Seq(1))).toDS(), Seq(Seq(1)))
checkDataset(Seq(List(Queue(1))).toDS(), List(Queue(1)))
}
test("nested maps") {
checkDataset(Seq(Map(1 -> LHMap(2 -> 3))).toDS(), Map(1 -> LHMap(2 -> 3)))
checkDataset(Seq(LHMap(Map(1 -> 2) -> 3)).toDS(), LHMap(Map(1 -> 2) -> 3))
}
test("nested set") {
checkDataset(Seq(Set(HSet(1, 2), HSet(3, 4))).toDS(), Set(HSet(1, 2), HSet(3, 4)))
checkDataset(Seq(HSet(Set(1, 2), Set(3, 4))).toDS(), HSet(Set(1, 2), Set(3, 4)))
}
test("package objects") {
import packageobject._
checkDataset(Seq(PackageClass(1)).toDS(), PackageClass(1))
}
test("SPARK-19104: Lambda variables in ExternalMapToCatalyst should be global") {
val data = Seq.tabulate(10)(i => NestedData(1, Map("key" -> InnerData("name", i + 100))))
val ds = spark.createDataset(data)
checkDataset(ds, data: _*)
}
test("special floating point values") {
import org.scalatest.exceptions.TestFailedException
// Spark distinguishes -0.0 and 0.0
intercept[TestFailedException] {
checkDataset(Seq(-0.0d).toDS(), 0.0d)
}
intercept[TestFailedException] {
checkAnswer(Seq(-0.0d).toDF(), Row(0.0d))
}
intercept[TestFailedException] {
checkDataset(Seq(-0.0f).toDS(), 0.0f)
}
intercept[TestFailedException] {
checkAnswer(Seq(-0.0f).toDF(), Row(0.0f))
}
intercept[TestFailedException] {
checkDataset(Seq(Tuple1(-0.0)).toDS(), Tuple1(0.0))
}
intercept[TestFailedException] {
checkAnswer(Seq(Tuple1(-0.0)).toDF(), Row(Row(0.0)))
}
intercept[TestFailedException] {
checkDataset(Seq(Seq(-0.0)).toDS(), Seq(0.0))
}
intercept[TestFailedException] {
checkAnswer(Seq(Seq(-0.0)).toDF(), Row(Seq(0.0)))
}
val floats = Seq[Float](-0.0f, 0.0f, Float.NaN)
checkDataset(floats.toDS(), floats: _*)
val arrayOfFloats = Seq[Array[Float]](Array(0.0f, -0.0f), Array(-0.0f, Float.NaN))
checkDataset(arrayOfFloats.toDS(), arrayOfFloats: _*)
val doubles = Seq[Double](-0.0d, 0.0d, Double.NaN)
checkDataset(doubles.toDS(), doubles: _*)
val arrayOfDoubles = Seq[Array[Double]](Array(0.0d, -0.0d), Array(-0.0d, Double.NaN))
checkDataset(arrayOfDoubles.toDS(), arrayOfDoubles: _*)
val tuples = Seq[(Float, Float, Double, Double)](
(0.0f, -0.0f, 0.0d, -0.0d),
(-0.0f, Float.NaN, -0.0d, Double.NaN))
checkDataset(tuples.toDS(), tuples: _*)
val complex = Map(Array(Seq(Tuple1(Double.NaN))) -> Map(Tuple2(Float.NaN, null)))
checkDataset(Seq(complex).toDS(), complex)
}
}
| techaddict/spark | sql/core/src/test/scala/org/apache/spark/sql/DatasetPrimitiveSuite.scala | Scala | apache-2.0 | 19,121 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.h2o.sparkling.doc.generation
import java.io.{File, PrintWriter}
import java.net.URLDecoder
import java.util.jar.JarFile
import ai.h2o.sparkling.utils.ScalaUtils.withResource
import scala.collection.mutable.ArrayBuffer
import scala.util.Try
object Runner {
private def writeResultToFile(content: String, fileName: String, destinationDir: String) = {
val destinationDirFile = new File(destinationDir)
destinationDirFile.mkdirs()
val destinationFile = new File(destinationDirFile, s"$fileName.rst")
withResource(new PrintWriter(destinationFile)) { outputStream =>
outputStream.print(content)
}
}
def main(args: Array[String]): Unit = {
val destinationDir = args(0)
if (destinationDir.endsWith("parameters")) {
val algorithms = getParamClasses("ai.h2o.sparkling.ml.algos")
val featureTransformers = getParamClasses("ai.h2o.sparkling.ml.features")
writeResultToFile(ParametersTocTreeTemplate(algorithms, featureTransformers), "parameters", destinationDir)
for (algorithm <- algorithms) {
val modelClassName = s"ai.h2o.sparkling.ml.models.${algorithm.getSimpleName}MOJOModel"
val model = Try(Class.forName(modelClassName)).toOption
val content = ParametersTemplate(algorithm, model)
writeResultToFile(content, s"parameters_${algorithm.getSimpleName}", destinationDir)
}
for (featureTransformer <- featureTransformers) {
val modelClassName = s"ai.h2o.sparkling.ml.models.${featureTransformer.getSimpleName}MOJOModel"
val model = Try(Class.forName(modelClassName)).toOption
writeResultToFile(
ParametersTemplate(featureTransformer, model),
s"parameters_${featureTransformer.getSimpleName}",
destinationDir)
}
} else if (destinationDir.endsWith("model_details")) {
val algorithms = getParamClasses("ai.h2o.sparkling.ml.algos")
val algorithmMOJOModels = algorithms.toSeq
.map { algorithm =>
val modelClassName = s"ai.h2o.sparkling.ml.models.${algorithm.getSimpleName}MOJOModel"
Try(Class.forName(modelClassName)).toOption
}
.filter(_ != None)
.map(_.get)
val featureTransformers = getParamClasses("ai.h2o.sparkling.ml.features")
val featureMOJOModels = featureTransformers.toSeq
.map { featureTransformer =>
val modelClassName = s"ai.h2o.sparkling.ml.models.${featureTransformer.getSimpleName}MOJOModel"
Try(Class.forName(modelClassName)).toOption
}
.filter(_ != None)
.map(_.get)
writeResultToFile(
ModelDetailsTocTreeTemplate(algorithmMOJOModels, featureMOJOModels),
"model_details",
destinationDir)
for (algorithm <- algorithms) {
val modelClassName = s"ai.h2o.sparkling.ml.models.${algorithm.getSimpleName}MOJOModel"
val modelOption = Try(Class.forName(modelClassName)).toOption
if (modelOption.isDefined) {
val content = ModelDetailsTemplate(algorithm, modelOption.get)
writeResultToFile(content, s"model_details_${modelOption.get.getSimpleName}", destinationDir)
}
}
for (featureTransformer <- featureTransformers) {
val modelClassName = s"ai.h2o.sparkling.ml.models.${featureTransformer.getSimpleName}MOJOModel"
val modelOption = Try(Class.forName(modelClassName)).toOption
if (modelOption.isDefined) {
writeResultToFile(
ModelDetailsTemplate(featureTransformer, modelOption.get),
s"model_details_${modelOption.get.getSimpleName}",
destinationDir)
}
}
} else {
val metricClasses = getParamClasses("ai.h2o.sparkling.ml.metrics")
writeResultToFile(MetricsTocTreeTemplate(metricClasses), "metrics", destinationDir)
for (metricClass <- metricClasses) {
val content = MetricsTemplate(metricClass)
writeResultToFile(content, s"metrics_${metricClass.getSimpleName}", destinationDir)
}
}
}
private def getParamClasses(packageName: String): Array[Class[_]] = {
val classLoader = Thread.currentThread.getContextClassLoader
val path = packageName.replace('.', '/')
val packageUrl = classLoader.getResource(path)
val classes = new ArrayBuffer[Class[_]]
if (packageUrl.getProtocol().equals("jar")) {
val decodedUrl = URLDecoder.decode(packageUrl.getFile(), "UTF-8")
val jarFileName = decodedUrl.substring(5, decodedUrl.indexOf("!"))
val jarFile = new JarFile(jarFileName)
val entries = jarFile.entries()
while (entries.hasMoreElements) {
val entryName = entries.nextElement().getName
if (entryName.startsWith(path) && entryName.endsWith(".class")) {
val name = entryName.substring(path.length + 1, entryName.length - 6)
if (!name.contains('/')) {
classes.append(Class.forName(packageName + '.' + name))
}
}
}
} else {
val directory = new File(packageUrl.toURI)
val files = directory.listFiles
for (file <- files) {
if (file.getName.endsWith(".class")) {
classes.append(Class.forName(packageName + '.' + file.getName.substring(0, file.getName.length - 6)))
}
}
}
classes.filter(_.getConstructors().size == 2).toArray
}
}
| h2oai/sparkling-water | doc/src/main/scala/ai/h2o/sparkling/doc/generation/Runner.scala | Scala | apache-2.0 | 6,146 |
package scalaz.stream
import org.scalacheck._
import org.scalacheck.Prop._
import scalaz.-\\/
import scodec.bits.ByteVector
import Process._
import compress._
import TestInstances._
class CompressSpec extends Properties("compress") {
def getBytes(s: String): ByteVector =
ByteVector.view(s.getBytes)
def foldBytes(bytes: List[ByteVector]): ByteVector =
bytes.fold(ByteVector.empty)(_ ++ _)
property("deflate.empty input") = protect {
Process[ByteVector]().pipe(deflate()).toList.isEmpty
}
property("inflate.empty input") = protect {
Process[ByteVector]().pipe(inflate()).toList.isEmpty
}
property("deflate |> inflate ~= id") = forAll { (input: List[ByteVector]) =>
val inflated = emitAll(input).pipe(deflate()).pipe(inflate()).toList
foldBytes(input) == foldBytes(inflated)
}
property("(de|in)flate") = forAll { (input: List[ByteVector]) =>
val deflated = emitAll(input).pipe(deflate()).toList
val inflated = emitAll(deflated).pipe(inflate()).toList
foldBytes(input) == foldBytes(inflated)
}
property("(de|in)flate with small buffers") = forAll { (input: List[ByteVector]) =>
val deflated = emitAll(input).pipe(deflate(0, false, 32)).toList
val inflated = emitAll(deflated).pipe(inflate(false, 32)).toList
foldBytes(input) == foldBytes(inflated)
}
property("(de|in)flate with single byte inputs") = forAll { (bs: ByteVector) =>
val input = bs.grouped(1).toList
val deflated = emitAll(input).pipe(deflate()).toList.flatMap(_.grouped(1))
val inflated = emitAll(deflated).pipe(inflate()).toList
foldBytes(input) == foldBytes(inflated)
}
property("deflate.compresses input") = protect {
val uncompressed = getBytes(
""""
|"A type system is a tractable syntactic method for proving the absence
|of certain program behaviors by classifying phrases according to the
|kinds of values they compute."
|-- Pierce, Benjamin C. (2002). Types and Programming Languages""")
val compressed = foldBytes(emit(uncompressed).pipe(deflate(9)).toList)
compressed.length < uncompressed.length
}
property("inflate.uncompressed input") = protect {
emit(getBytes("Hello")).pipe(inflate()).attempt().toList match {
case List(-\\/(ex: java.util.zip.DataFormatException)) => true
case _ => false
}
}
}
| drostron/scalaz-stream | src/test/scala/scalaz/stream/CompressSpec.scala | Scala | mit | 2,360 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gearpump.akkastream.example
import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.{ActorMaterializer, ActorMaterializerSettings}
import akka.stream.scaladsl._
import org.apache.gearpump.akkastream.GearpumpMaterializer
import org.apache.gearpump.cluster.main.{ArgumentsParser, CLIOption}
import org.apache.gearpump.util.AkkaApp
import scala.concurrent.{Await, Future}
import scala.concurrent.duration._
/**
* Stream example to find sum of elements
*/
object Test8 extends AkkaApp with ArgumentsParser {
// scalastyle:off println
override val options: Array[(String, CLIOption[Any])] = Array(
"gearpump" -> CLIOption[Boolean]("<boolean>", required = false, defaultValue = Some(false))
)
override def main(akkaConf: Config, args: Array[String]): Unit = {
val config = parse(args)
implicit val system = ActorSystem("Test8", akkaConf)
implicit val materializer: ActorMaterializer = config.getBoolean("gearpump") match {
case true =>
GearpumpMaterializer()
case false =>
ActorMaterializer(
ActorMaterializerSettings(system).withAutoFusing(false)
)
}
implicit val ec = system.dispatcher
// Source gives 1 to 100 elements
val source: Source[Int, NotUsed] = Source(Stream.from(1).take(100))
val sink: Sink[Int, Future[Int]] = Sink.fold[Int, Int](0)(_ + _)
val result: Future[Int] = source.runWith(sink)
result.map(sum => {
println(s"Sum of stream elements => $sum")
})
Await.result(system.whenTerminated, 60.minutes)
}
// scalastyle:on println
}
| manuzhang/incubator-gearpump | experiments/akkastream/src/main/scala/org/apache/gearpump/akkastream/example/Test8.scala | Scala | apache-2.0 | 2,405 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cs.ucla.edu.bwaspark.datatype
class MemAlnRegArrayType {
var maxLength: Int = 0
var curLength: Int = 0
var regs: Array[MemAlnRegType] = _
}
| ytchen0323/cloud-scale-bwamem | src/main/scala/cs/ucla/edu/bwaspark/datatype/MemAlnRegArrayType.scala | Scala | apache-2.0 | 959 |
/*
* SPDX-License-Identifier: Apache-2.0
*
* Copyright 2015-2021 Andre White.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.truthencode.ddo.model.feats
import io.truthencode.ddo.enhancement.BonusType
import io.truthencode.ddo.model.abilities.ActiveAbilities
import io.truthencode.ddo.model.attribute.Attribute
import io.truthencode.ddo.model.effect
import io.truthencode.ddo.model.effect.TriggerEvent
import io.truthencode.ddo.model.effect.features.{FeaturesImpl, GrantAbilityFeature}
import io.truthencode.ddo.support.requisite._
/**
* Icon Feat Spring Attack.png Spring Attack Passive Character suffers no penalty to his attack roll
* when meleeing and moving. You will also gain a 2% dodge bonus. NOT YET IMPLEMENTED: No attack
* penalty for melee while moving.
*
* Dodge, Mobility Dexterity 13 , Base Attack Bonus 4,
*/
protected[feats] trait SpringAttack
extends FeatRequisiteImpl with Passive with RequiresAllOfFeat with AttributeRequisiteImpl
with RequiresAllOfAttribute with RequiresBaB with FighterBonusFeat with MartialArtsFeat
with FeaturesImpl with GrantAbilityFeature {
self: GeneralFeat =>
override lazy val grantedAbility: ActiveAbilities = ActiveAbilities.SpringAttack
override val grantBonusType: BonusType = BonusType.Feat
// scalastyle:on
override protected[this] val triggerOn: Seq[TriggerEvent] = Seq(TriggerEvent.SpecialAttack)
override protected[this] val triggerOff: Seq[TriggerEvent] = Seq(TriggerEvent.WhileOn)
override protected[this] val grantAbilityCategories: Seq[effect.EffectCategories.Value] = Seq(
effect.EffectCategories.SpecialAttack)
override val abilityId: String = "SpringAttack"
override val description: String =
"Character suffers no penalty to his attack roll when meleeing and moving. You will also gain a 2% dodge bonus"
override def allOfFeats: Seq[GeneralFeat] = List(GeneralFeat.Dodge, GeneralFeat.Mobility)
/**
* The Minimum Required Base Attack Bonus
*
* @return
* Minimum value allowed
*/
// scalastyle:off magic.number
override def requiresBaB: Int = 4
override def allOfAttributes: Seq[(Attribute, Int)] = List((Attribute.Dexterity, 13))
}
| adarro/ddo-calc | subprojects/common/ddo-core/src/main/scala/io/truthencode/ddo/model/feats/SpringAttack.scala | Scala | apache-2.0 | 2,757 |
package com.roundeights.tubeutil.static
import org.specs2.mutable._
import org.specs2.mock._
import scala.concurrent.ExecutionContext.Implicits.global
import java.io.File
class AssetLoaderTest extends Specification with Mockito {
val reader = mock[Asset.Reader]
val hasher = mock[HashCache]
hasher.hash( reader ) returns "ABCDEFGHIJKLMNOP"
"An AssetLoader" should {
"Generate a URL with a hash" in {
val loader = new AssetLoader(
"asset", hasher, (asset: Asset) => {
asset must_== Asset("path/file.js")
Some(reader)
}
).addHashes
loader.url("/path/file.js") must_==
Some("/asset/path/file.ABCDEFGH.js")
}
"Generate a URL without a hash when they are disabled" in {
val loader = new AssetLoader(
"asset", hasher,
(asset: Asset) => {
asset must_== Asset("path/file.js")
Some(reader)
}
)
loader.url("/path/file.js") must_== Some("/asset/path/file.js")
}
"Generate URLs with a host" in {
val loader = new AssetLoader("asset", hasher, _ => Some(reader))
.set( host = Some("http://example.com") )
loader.url("/path/file.js") must_==
Some("http://example.com/asset/path/file.js")
}
"Normalize the prefix" in {
val loader = new AssetLoader(
"/asset/./path/../",
hasher, _ => Some(reader)
).addHashes
loader.url("/path/file.js") must_==
Some("/asset/path/file.ABCDEFGH.js")
}
"Return None if the file can't be found" in {
new AssetLoader("asset", mock[HashCache], _ => None)
.url("/path/file.js") must_== None
}
"Find Dir based files" in {
AssetLoader.fromDir("src/test/resources", "static").addHashes
.url("test.txt") must_==
Some("/static/test.76cce7d0.txt")
}
"Find Jar based files" in {
AssetLoader.fromJar(classOf[AssetLoaderTest], "static").addHashes
.url("test.txt") must_==
Some("/static/test.76cce7d0.txt")
}
"Return the content of an asset" in {
AssetLoader.fromDir( "src/test/resources", "static")
.content("test.txt").map( _.mkString ) must_==
Some("Some Content\\n")
}
}
}
| Nycto/TubeUtil | src/test/scala/static/LoaderTest.scala | Scala | mit | 2,592 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.serializer.ModuleSerializationTest
import scala.util.Random
class StaticGraphSerialTest extends ModuleSerializationTest {
override def test(): Unit = {
val linear = Linear[Float](10, 2).inputs()
val graph = Graph[Float](linear, linear).setName("graph")
val input = Tensor[Float](10).apply1(_ => Random.nextFloat())
runSerializationTest(graph, input)
}
}
| yiheng/BigDL | spark/dl/src/test/scala/com/intel/analytics/bigdl/nn/StaticGraphSpec.scala | Scala | apache-2.0 | 1,103 |
package controllers
import com.google.inject.Inject
import models.enums._
import play.api.i18n._
import play.api.mvc._
import scala.concurrent._
class Application @Inject()(postDao: dao.Post,
postTagDao: dao.PostTag,
localizedAction: LocalizedAction,
langs: Langs,
val controllerComponents: ControllerComponents)
(implicit val options: dao.Options,
implicit val blogrolls: dao.Blogroll,
implicit val ec: ExecutionContext) extends BaseController {
def index(lang: String, page: Int): Action[AnyContent] = localizedAction.async { implicit request =>
if (page <= 0) {
Future.successful(Redirect(routes.Application.index(lang)))
} else {
for {
(posts, postCount) <- postDao.listByPage(page)
} yield Ok(views.html.index(posts, page, postCount))
}
}
def post(lang: String, slug: String): Action[AnyContent] = localizedAction.async { implicit request =>
val post = for {
post <- postDao.getBySlug(slug) if post.postType == PostTypes.Post
tags <- postTagDao.getBySlugs(post.tags)
} yield Ok(views.html.post(post, tags))
post.recover { case _ => NotFound(views.html.notFound()) }
}
def page(lang: String, slug: String) = localizedAction.async { implicit request =>
val post = for {
post <- postDao.getBySlug(slug) if post.postType == PostTypes.Page
} yield Ok(views.html.page(post))
post.recover { case _ => NotFound(views.html.notFound()) }
}
def tag(lang: String, slug: String, page: Int) = localizedAction.async { implicit request =>
if (page <= 0) {
Future(Redirect(routes.Application.tag(lang, slug, 1)))
} else {
for {
(posts, postCount) <- postDao.listByTag(slug, if (page < 0) 1 else page)
tag <- postTagDao.getBySlug(slug)
} yield Ok(views.html.tag(tag, posts, page, postCount))
}
}
def chooseLanguage: Action[AnyContent] = Action { implicit request =>
request.acceptLanguages.map { lang =>
if (lang.language == "zh") {
Some(if (lang.country == "CN" || lang.country == "SG") "zh-Hans" else "zh-Hant")
} else if (langs.availables.contains(lang)) {
Some(lang.code)
} else {
None
}
}.find(_.isDefined).flatten match {
case Some(code) => Redirect(routes.Application.index(code))
case _ => Redirect(routes.Application.index(langs.availables.head.code))
}
}
} | terro/virblog | app/controllers/Application.scala | Scala | apache-2.0 | 2,575 |
package recfun
import common._
object Main {
def main(args: Array[String]) {
println("Pascal's Triangle")
for (row <- 0 to 10) {
for (col <- 0 to row)
print(pascal(col, row) + " ")
println()
}
}
/**
* Exercise 1
*/
def pascal(c: Int, r: Int): Int =
if (c == 0 || c == r) 1
else pascal(c - 1, r-1) + pascal(c, r-1)
/**
* Exercise 2
*/
def balance(chars: List[Char]): Boolean = {
def isOpen(char: Char) = char == '('
def isClose(char: Char) = char == ')'
def isMatching(c1: Char, c2: Char) = isOpen(c1) && isClose(c2)
def isBalanced(input: List[Char], stack: String): Boolean =
if (input.isEmpty)
true
else if (isOpen(input.head))
isBalanced(input.tail, input.head + stack)
else if (isClose(input.head))
!stack.isEmpty && isMatching(stack.head, input.head) && isBalanced(input.tail, stack.tail)
else
isBalanced(input.tail, stack)
isBalanced(chars, "")
}
/**
* Exercise 3
*/
def countChange(money: Int, coins: List[Int]): Int = {
if (money < 0)
0
else if (coins.isEmpty)
if (money == 0) 1 else 0
else
countChange(money, coins.tail) + countChange(money - coins.head, coins)
}
}
| spolnik/scala-workspace | scala-learning/recfun/src/main/scala/recfun/Main.scala | Scala | apache-2.0 | 1,264 |
package io.finch.benchmarks.service
import org.json4s.{DefaultFormats, Formats}
import io.finch.json4s._
package object json4s {
implicit val formats: Formats = DefaultFormats
def userService: UserService = new FinchUserService
}
| peel/finch | benchmarks/src/main/scala/io/finch/benchmarks/service/json4s/package.scala | Scala | apache-2.0 | 237 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.nn
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import org.scalatest.{FlatSpec, Matchers}
@com.intel.analytics.bigdl.tags.Parallel
class TransformerCriterionSpec extends FlatSpec with Matchers {
"TransformerCriterion" should "work correctly" in {
import com.intel.analytics.bigdl.dllib.tensor.TensorNumericMath.TensorNumeric.NumericFloat
val criterion = TransformerCriterion[Float](MSECriterion[Float](),
Some(Square[Float]()), Some(Square[Float]()))
val input = Tensor(1, 3, 224, 224).rand()
val target = Tensor(1, 3, 224, 224).rand()
val loss = criterion.forward(input, target)
val gradInput = criterion.backward(input, target)
val squaredInput = Tensor(1, 3, 224, 224).copy(input).square()
val squaredTarget = Tensor(1, 3, 224, 224).copy(target).square()
val referenceCriterion = MSECriterion()
val expectedLoss = referenceCriterion.forward(squaredInput, squaredTarget)
val expectedGradInput = referenceCriterion
.backward(squaredInput, squaredTarget).cmul(input).mul(2.0f)
loss should be (expectedLoss)
gradInput should be (expectedGradInput)
}
}
| intel-analytics/BigDL | scala/dllib/src/test/scala/com/intel/analytics/bigdl/dllib/nn/TransformerCriterionSpec.scala | Scala | apache-2.0 | 1,780 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.streaming.ui
import org.mockito.Mockito.{mock, when, RETURNS_SMART_NULLS}
import org.scalatest.Matchers
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.streaming.StreamingQueryProgress
class UIUtilsSuite extends SparkFunSuite with Matchers {
test("streaming query started with no batch completed") {
val query = mock(classOf[StreamingQueryUIData], RETURNS_SMART_NULLS)
when(query.lastProgress).thenReturn(null)
assert(0 == UIUtils.withNoProgress(query, 1, 0))
}
test("streaming query started with at least one batch completed") {
val query = mock(classOf[StreamingQueryUIData], RETURNS_SMART_NULLS)
val progress = mock(classOf[StreamingQueryProgress], RETURNS_SMART_NULLS)
when(query.lastProgress).thenReturn(progress)
assert(1 == UIUtils.withNoProgress(query, 1, 0))
}
}
| goldmedal/spark | sql/core/src/test/scala/org/apache/spark/sql/streaming/ui/UIUtilsSuite.scala | Scala | apache-2.0 | 1,663 |
package fpinscala.laziness
import Stream._
import annotation.tailrec
trait Stream[+A] {
// The arrow `=>` in front of the argument type `B` means that the function `f` takes its second
// argument by name and may choose not to evaluate it.
def foldRight[B](z: => B)(f: (A, => B) => B): B =
this match {
// If `f` doesn't evaluate its second argument, the recursion never occurs.
case Cons(h,t) => f(h(), t().foldRight(z)(f))
case _ => z
}
def exists(p: A => Boolean): Boolean =
// Here `b` is the unevaluated recursive step that folds the tail of the stream.
// If `p(a)` returns `true`, `b` will never be evaluated and the computation terminates early.
foldRight(false)((a, b) => p(a) || b)
@tailrec
final def find(f: A => Boolean): Option[A] = this match {
case Empty => None
case Cons(h, t) => if (f(h())) Some(h()) else t().find(f)
}
// EX 5.1
// Write a function to convert a Stream to a List, which will force its evaluation
// and let you look at it in the REPL.
def toList(): List[A] =
foldRight[List[A]](Nil)((h, t) => h :: t)
// EX 5.2
// Write the function take(n) for returning the first n elements of a Stream, and
// drop(n) for skipping the first n elements of a Stream.
def take(n: Int): Stream[A] = this match {
case Cons(h, t) if n > 0 => cons(h(), t().take(n-1))
case _ => empty
}
@tailrec
final def drop(n: Int): Stream[A] = this match {
case Cons(_, t) if n > 0 => t().drop(n-1)
case _ => this
}
// EX 5.3
// Write the function takeWhile for returning all starting elements of a Stream that
// match the given predicate.
def takeWhile(p: A => Boolean): Stream[A] = this match {
case Cons(h, t) if p(h()) => cons(h(), t().takeWhile(p))
case _ => empty
}
// EX 5.4
// Implement forAll, which checks that all elements in the Stream match a given predicate. Your
// implementation should terminate the traversal as soon as it encounters a nonmatching value.
def forAll(p: A => Boolean): Boolean =
foldRight(true)((a, b) => p(a) && b)
// EX 5.5
// Use foldRight to implement takeWhile.
def takeWhileViaFoldRight(p: A => Boolean): Stream[A] =
foldRight(empty[A])((h,t) => if (p(h)) cons(h,t) else empty)
// EX 5.6
// Implement headOption using foldRight.
def headOption: Option[A] =
foldRight[Option[A]](None)((h,_) => Some(h))
// EX 5.7
// Implement map, filter, append and flatMap using foldRight. The append method should be
// non-strict in its argument. Part of the exercise is writing your own function signatures.
def map[B](f: A => B): Stream[B] =
foldRight(empty[B])((h,t) => cons(f(h),t))
def filter(p: A => Boolean): Stream[A] =
foldRight(empty[A])((h,t) => if (p(h)) cons(h,t) else t)
def append[B >: A](s: => Stream[B]): Stream[B] =
foldRight(s)((h,t) => cons(h,t))
def flatMap[B](f: A => Stream[B]): Stream[B] =
foldRight(empty[B])((h,t) => f(h) append t)
// EX 5.13
// Use unfold to implement map, take, takeWhile, zipWith and zipAll. The zipAll function
// should continue the traversal as long as either stream has more elements ; it uses
// Option to indicate whether each stream has been exhausted.
// >> def unfold[A, S](z: S)(f: S => Option[(A, S)]): Stream[A]
def mapViaUnfold[B](f: A => B): Stream[B] = unfold[B,Stream[A]] (this) {
case Cons(h, t) => Some((f(h()), t()))
case _ => None
}
def takeViaUnfold(n: Int): Stream[A] = unfold[A,(Stream[A],Int)] ((this,n)) {
case (Cons(h, t),i) if i > 0 => Some(h(), (t(),i-1))
case _ => None
}
def takeWhileViaUnfold(p: A => Boolean): Stream[A] = unfold[A,Stream[A]] (this) {
case Cons(h, t) if p(h()) => Some((h(), t()))
case _ => None
}
def zipWithViaUnfold[B,C](s2: Stream[B])(zip: (A,B) => C): Stream[C] = unfold[C,(Stream[A],Stream[B])] ((this,s2)) {
case (Cons(h1,t1), Cons(h2,t2)) => Some((zip(h1(),h2()), (t1(),t2())))
case _ => None
}
def zipAllViaUnfold[B](s2: Stream[B]): Stream[(Option[A],Option[B])] = unfold[(Option[A],Option[B]),(Stream[A],Stream[B])] ((this,s2)) {
case (Cons(h1,t1), Cons(h2,t2)) => Some(((Some(h1()),Some(h2())), (t1(),t2())))
case (Cons(h1,t1), Empty) => Some(((Some(h1()),None), (t1(),Empty)))
case (Empty, Cons(h2,t2)) => Some(((None,Some(h2())), (Empty,t2())))
case _ => None
}
// EX 5.14
// Implement startsWith using functions you’ve written. It should check if one Stream is a prefix of another.
def startsWith[B](s: Stream[B]): Boolean =
zipAllViaUnfold(s).takeWhile { // takeWhile(!_._2.isEmpty)
case (_,Some(_)) => true
case _ => false
} forAll {
case (o1,o2) => o1 == o2
}
// EX 5.15
// Implement tails using unfold. For a given Stream, tails returns the Stream of suffixes
// of the input sequence, starting with the original Stream. For example, given Stream(1,2,3),
// it would return Stream(Stream(1,2,3), Stream(2,3), Stream(3), Stream()).
// >> def unfold[A, S](z: S)(f: S => Option[(A, S)]): Stream[A]
def tails: Stream[Stream[A]] = unfold[Stream[A],Stream[A]] (this) {
case s @ Cons(_,t) => Some(s,t())
case Empty => None
} append Stream(Empty)
def hasSubsequence[A](s: Stream[A]): Boolean =
tails exists (_ startsWith s)
// EX 5.16
// Generalize tails to the function scanRight, which is like a foldRight that
// returns a stream of the intermediate results.
// >> def foldRight[B](z: => B)(f: (A, => B) => B): B
// >> def unfold[A, S](z: S)(f: S => Option[(A, S)]): Stream[A]
def scanRight[B](z: => B)(f: (A, => B) => B): Stream[B] =
foldRight[(B,Stream[B])] ((z, Stream(z))) {
case (a, (b,bs)) => { val b1 = f(a,b); (b1, cons(b1,bs)) }
}._2
}
case object Empty extends Stream[Nothing]
case class Cons[+A](h: () => A, t: () => Stream[A]) extends Stream[A]
object Stream {
def cons[A](hd: => A, tl: => Stream[A]): Stream[A] = {
lazy val head = hd
lazy val tail = tl
Cons(() => head, () => tail)
}
def empty[A]: Stream[A] = Empty
def apply[A](as: A*): Stream[A] =
if (as.isEmpty) empty
else cons(as.head, apply(as.tail: _*))
val ones: Stream[Int] = Stream.cons(1, ones)
// EX 5.8
// Generalize ones slightly to the function constant, which returns an infinite Stream of a given value.
def constant[A](a: A): Stream[A] =
cons(a, constant(a))
// EX 5.9
// Write a function that generates an infinite stream of integers, starting from n, then n+1, n+2, and so on.
def from(n: Int): Stream[Int] =
cons(n, from(n+1))
// EX 5.10
// Write a function fibs that generates the infinite stream of Fibonacci numbers: 0, 1, 1, 2, 3, 5, 8, and so on.
def fibs(): Stream[Int] = {
def go(previous: Int, current: Int): Stream[Int] =
cons(previous, go(current, previous+current))
go(0, 1)
}
// EX 5.11
// Write a more general stream-building function called unfold. It takes an initial state, and
// a function for producing both the next state and the next value in the generated stream.
// Option is used to indicate when the Stream should be terminated, if at all.
def unfold[A, S](z: S)(f: S => Option[(A, S)]): Stream[A] = f(z) match {
case None => empty
case Some((a,s)) => cons(a, unfold(s)(f))
}
// EX 5.12
// Write fibs, from, constant and ones in terms of unfold.
def fibsViaUnfold(): Stream[Int] = unfold[Int,(Int,Int)] ((0,1)) {
case (p,c) => Some((p, (c,p+c))) // §15.7 Case sequences as partial functions (pp 327-330)
}
def fromViaUnfold(n: Int): Stream[Int] = unfold[Int,Int] (n) (i => Some((i, i+1)))
def constantViaUnfold[A](a: A): Stream[A] = unfold[A,A] (a) (_ => Some(a,a))
val onesViaUnfold: Stream[Int] = unfold[Int,Int] (1) (_ => Some((1, 1)))
} | gpaturel/FPiS | exercises/src/main/scala/fpinscala/laziness/Stream.scala | Scala | mit | 7,845 |
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.convert.text
import java.io._
import java.util.concurrent.Executors
import com.google.common.collect.Queues
import com.typesafe.config.Config
import org.apache.commons.csv.{CSVFormat, QuoteMode}
import org.locationtech.geomesa.convert.Transformers.{EvaluationContext, Expr}
import org.locationtech.geomesa.convert.{Field, LinesToSimpleFeatureConverter, SimpleFeatureConverterFactory}
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
import scala.collection.JavaConversions._
class DelimitedTextConverterFactory extends SimpleFeatureConverterFactory[String] {
override def canProcess(conf: Config): Boolean = canProcessType(conf, "delimited-text")
val QUOTED = CSVFormat.DEFAULT.withQuoteMode(QuoteMode.ALL)
val QUOTE_ESCAPE = CSVFormat.DEFAULT.withEscape('"')
val QUOTED_WITH_QUOTE_ESCAPE = QUOTE_ESCAPE.withQuoteMode(QuoteMode.ALL)
def buildConverter(targetSFT: SimpleFeatureType, conf: Config): DelimitedTextConverter = {
val baseFmt = conf.getString("format").toUpperCase match {
case "CSV" | "DEFAULT" => CSVFormat.DEFAULT
case "EXCEL" => CSVFormat.EXCEL
case "MYSQL" => CSVFormat.MYSQL
case "TDF" | "TSV" | "TAB" => CSVFormat.TDF
case "RFC4180" => CSVFormat.RFC4180
case "QUOTED" => QUOTED
case "QUOTE_ESCAPE" => QUOTE_ESCAPE
case "QUOTED_WITH_QUOTE_ESCAPE" => QUOTED_WITH_QUOTE_ESCAPE
case _ => throw new IllegalArgumentException("Unknown delimited text format")
}
val opts = {
import org.locationtech.geomesa.utils.conf.ConfConversions._
val o = "options"
val dOpts = new DelimitedOptions()
conf.getIntOpt(s"$o.skip-lines").foreach(s => dOpts.skipLines = s)
conf.getIntOpt(s"$o.pipe-size").foreach(p => dOpts.pipeSize = p)
dOpts
}
val fields = buildFields(conf.getConfigList("fields"))
val idBuilder = buildIdBuilder(conf.getString("id-field"))
new DelimitedTextConverter(baseFmt, targetSFT, idBuilder, fields, opts, isValidating(conf))
}
}
class DelimitedOptions(var skipLines: Int = 0, var pipeSize: Int = 16 * 1024)
class DelimitedTextConverter(format: CSVFormat,
val targetSFT: SimpleFeatureType,
val idBuilder: Expr,
val inputFields: IndexedSeq[Field],
val options: DelimitedOptions,
val validating: Boolean)
extends LinesToSimpleFeatureConverter {
var curString: String = null
val q = Queues.newArrayBlockingQueue[String](32)
// if the record to write is bigger than the buffer size of the PipedReader
// then the writer will block until the reader reads data off of the pipe.
// For this reason, we have to separate the reading and writing into two
// threads
val writer = new PipedWriter()
val reader = new PipedReader(writer, options.pipeSize) // record size
val parser = format.parse(reader).iterator()
val separator = format.getRecordSeparator
val es = Executors.newSingleThreadExecutor()
es.submit(new Runnable {
override def run(): Unit = {
while (true) {
val s = q.take()
// make sure the input is not null and is nonempty...if it is empty the threads will deadlock
if (s != null && s.nonEmpty) {
writer.write(s)
writer.write(separator)
writer.flush()
}
}
}
})
override def processInput(is: Iterator[String], ec: EvaluationContext): Iterator[SimpleFeature] = {
ec.counter.incLineCount(options.skipLines)
super.processInput(is.drop(options.skipLines), ec)
}
override def fromInputType(string: String): Seq[Array[Any]] = {
// empty strings cause deadlock
if (string == null || string.isEmpty) {
throw new IllegalArgumentException("Invalid input (empty)")
}
q.put(string)
val rec = parser.next()
val len = rec.size()
val ret = Array.ofDim[Any](len + 1)
ret(0) = string
var i = 0
while (i < len) {
ret(i+1) = rec.get(i)
i += 1
}
Seq(ret)
}
override def close(): Unit = {
es.shutdownNow()
writer.close()
reader.close()
}
}
| mdzimmerman/geomesa | geomesa-convert/geomesa-convert-text/src/main/scala/org/locationtech/geomesa/convert/text/DelimitedTextConverter.scala | Scala | apache-2.0 | 4,789 |
//: ----------------------------------------------------------------------------
//: Copyright (C) 2015 Verizon. All Rights Reserved.
//:
//: Licensed under the Apache License, Version 2.0 (the "License");
//: you may not use this file except in compliance with the License.
//: You may obtain a copy of the License at
//:
//: http://www.apache.org/licenses/LICENSE-2.0
//:
//: Unless required by applicable law or agreed to in writing, software
//: distributed under the License is distributed on an "AS IS" BASIS,
//: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//: See the License for the specific language governing permissions and
//: limitations under the License.
//:
//: ----------------------------------------------------------------------------
package funnel
package chemist
package static
import java.io.File
import journal.Logger
import concurrent.duration._
import scalaz.concurrent.Task
import http.MonitoringServer
import knobs.{FileResource,ClassPathResource,Optional,Pattern,Required}
object Main {
def main(args: Array[String]): Unit = {
val chemist = new StaticChemist[Static]
val k = knobs.load(
Required(ClassPathResource("oncue/chemist.cfg")) ::
Optional(FileResource(new File("/usr/share/oncue/etc/chemist.cfg"))) :: Nil).run
val s = new Static { val config = Config.readConfig(k).run }
// TIM: best I can tell, this didnt actually work beforehand
// as we're running this on a prototype and it doesn't
// seem to reload.
// k.subscribe(Pattern("*.*"), {
// case (name, optvalue) => chemist.bootstrap.run(new Static {
// val config = Config.readConfig(k).run
// })
// })
val monitoring = MonitoringServer.start(Monitoring.default, 5775)
// this is the edge of the world and will just block until its stopped
Server.unsafeStart(new Server(chemist, s))
// if we reach these then the server process has stopped and we need
// to cleanup the associated resources.
monitoring.stop()
dispatch.Http.shutdown()
}
}
| neigor/funnel | chemist-static/src/main/scala/Main.scala | Scala | apache-2.0 | 2,089 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.nisp.fixtures
import uk.gov.hmrc.auth.core.retrieve.Name
import uk.gov.hmrc.domain.Nino
import uk.gov.hmrc.nisp.controllers.auth.NispAuthedUser
import uk.gov.hmrc.nisp.helpers.TestAccountBuilder
import uk.gov.hmrc.nisp.models.UserName
import uk.gov.hmrc.nisp.models.citizen.CitizenDetailsResponse
object NispAuthedUserFixture {
def user(nino: Nino): NispAuthedUser = {
val citizenDetailsResponse: CitizenDetailsResponse = TestAccountBuilder.directJsonResponse(nino, "citizen-details")
NispAuthedUser(
nino,
dateOfBirth = citizenDetailsResponse.person.dateOfBirth,
name = UserName(Name(citizenDetailsResponse.person.firstName, citizenDetailsResponse.person.lastName)),
address = citizenDetailsResponse.address,
None,
isSa = false
)
}
}
| hmrc/nisp-frontend | test/uk/gov/hmrc/nisp/fixtures/NispAuthedUserFixture.scala | Scala | apache-2.0 | 1,418 |
package org.abstractbinary.unshuffle
sealed abstract class Suite
case object Clubs extends Suite {
override def toString = "\u2663"
}
case object Spades extends Suite {
override def toString = "\u2660"
}
case object Hearts extends Suite {
override def toString = "\u2665"
}
case object Diamonds extends Suite {
override def toString = "\u2666"
}
case class Card(_number : Int, _suite : Suite) {
override def toString = {
val numberString =
if (_number == 1)
"A"
else if (_number <= 10)
_number.toString
else if (_number == 11)
"J"
else if (_number == 12)
"Q"
else if (_number == 13)
"K"
else
"Invalid card number: %d".format(_number)
numberString + suite.toString
}
def isRed : Boolean =
_suite match {
case Hearts => true
case Diamonds => true
case Clubs => false
case Spades => false
}
def isBlack : Boolean = !isRed
def suite = _suite
}
| scvalex/unshuffle | src/main/scala/Card.scala | Scala | gpl-3.0 | 994 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution
import org.apache.spark.sql.{Dataset, QueryTest, Row, SaveMode}
import org.apache.spark.sql.catalyst.expressions.codegen.{ByteCodeStats, CodeAndComment, CodeGenerator}
import org.apache.spark.sql.execution.adaptive.DisableAdaptiveExecutionSuite
import org.apache.spark.sql.execution.aggregate.HashAggregateExec
import org.apache.spark.sql.execution.columnar.InMemoryTableScanExec
import org.apache.spark.sql.execution.joins.{BroadcastHashJoinExec, BroadcastNestedLoopJoinExec, ShuffledHashJoinExec, SortMergeJoinExec}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types.{IntegerType, StringType, StructType}
// Disable AQE because the WholeStageCodegenExec is added when running QueryStageExec
class WholeStageCodegenSuite extends QueryTest with SharedSparkSession
with DisableAdaptiveExecutionSuite {
import testImplicits._
test("range/filter should be combined") {
val df = spark.range(10).filter("id = 1").selectExpr("id + 1")
val plan = df.queryExecution.executedPlan
assert(plan.find(_.isInstanceOf[WholeStageCodegenExec]).isDefined)
assert(df.collect() === Array(Row(2)))
}
test("Aggregate should be included in WholeStageCodegen") {
val df = spark.range(10).groupBy().agg(max(col("id")), avg(col("id")))
val plan = df.queryExecution.executedPlan
assert(plan.find(p =>
p.isInstanceOf[WholeStageCodegenExec] &&
p.asInstanceOf[WholeStageCodegenExec].child.isInstanceOf[HashAggregateExec]).isDefined)
assert(df.collect() === Array(Row(9, 4.5)))
}
testWithWholeStageCodegenOnAndOff("GenerateExec should be" +
" included in WholeStageCodegen") { codegenEnabled =>
import testImplicits._
val arrayData = Seq(("James", Seq("Java", "Scala"), Map("hair" -> "black", "eye" -> "brown")))
val df = arrayData.toDF("name", "knownLanguages", "properties")
// Array - explode
var expDF = df.select($"name", explode($"knownLanguages"), $"properties")
var plan = expDF.queryExecution.executedPlan
assert(plan.find {
case stage: WholeStageCodegenExec =>
stage.find(_.isInstanceOf[GenerateExec]).isDefined
case _ => !codegenEnabled.toBoolean
}.isDefined)
checkAnswer(expDF, Array(Row("James", "Java", Map("hair" -> "black", "eye" -> "brown")),
Row("James", "Scala", Map("hair" -> "black", "eye" -> "brown"))))
// Map - explode
expDF = df.select($"name", $"knownLanguages", explode($"properties"))
plan = expDF.queryExecution.executedPlan
assert(plan.find {
case stage: WholeStageCodegenExec =>
stage.find(_.isInstanceOf[GenerateExec]).isDefined
case _ => !codegenEnabled.toBoolean
}.isDefined)
checkAnswer(expDF,
Array(Row("James", List("Java", "Scala"), "hair", "black"),
Row("James", List("Java", "Scala"), "eye", "brown")))
// Array - posexplode
expDF = df.select($"name", posexplode($"knownLanguages"))
plan = expDF.queryExecution.executedPlan
assert(plan.find {
case stage: WholeStageCodegenExec =>
stage.find(_.isInstanceOf[GenerateExec]).isDefined
case _ => !codegenEnabled.toBoolean
}.isDefined)
checkAnswer(expDF,
Array(Row("James", 0, "Java"), Row("James", 1, "Scala")))
// Map - posexplode
expDF = df.select($"name", posexplode($"properties"))
plan = expDF.queryExecution.executedPlan
assert(plan.find {
case stage: WholeStageCodegenExec =>
stage.find(_.isInstanceOf[GenerateExec]).isDefined
case _ => !codegenEnabled.toBoolean
}.isDefined)
checkAnswer(expDF,
Array(Row("James", 0, "hair", "black"), Row("James", 1, "eye", "brown")))
// Array - explode , selecting all columns
expDF = df.select($"*", explode($"knownLanguages"))
plan = expDF.queryExecution.executedPlan
assert(plan.find {
case stage: WholeStageCodegenExec =>
stage.find(_.isInstanceOf[GenerateExec]).isDefined
case _ => !codegenEnabled.toBoolean
}.isDefined)
checkAnswer(expDF,
Array(Row("James", Seq("Java", "Scala"), Map("hair" -> "black", "eye" -> "brown"), "Java"),
Row("James", Seq("Java", "Scala"), Map("hair" -> "black", "eye" -> "brown"), "Scala")))
// Map - explode, selecting all columns
expDF = df.select($"*", explode($"properties"))
plan = expDF.queryExecution.executedPlan
assert(plan.find {
case stage: WholeStageCodegenExec =>
stage.find(_.isInstanceOf[GenerateExec]).isDefined
case _ => !codegenEnabled.toBoolean
}.isDefined)
checkAnswer(expDF,
Array(
Row("James", List("Java", "Scala"),
Map("hair" -> "black", "eye" -> "brown"), "hair", "black"),
Row("James", List("Java", "Scala"),
Map("hair" -> "black", "eye" -> "brown"), "eye", "brown")))
}
test("Aggregate with grouping keys should be included in WholeStageCodegen") {
val df = spark.range(3).groupBy(col("id") * 2).count().orderBy(col("id") * 2)
val plan = df.queryExecution.executedPlan
assert(plan.find(p =>
p.isInstanceOf[WholeStageCodegenExec] &&
p.asInstanceOf[WholeStageCodegenExec].child.isInstanceOf[HashAggregateExec]).isDefined)
assert(df.collect() === Array(Row(0, 1), Row(2, 1), Row(4, 1)))
}
test("BroadcastHashJoin should be included in WholeStageCodegen") {
val rdd = spark.sparkContext.makeRDD(Seq(Row(1, "1"), Row(1, "1"), Row(2, "2")))
val schema = new StructType().add("k", IntegerType).add("v", StringType)
val smallDF = spark.createDataFrame(rdd, schema)
val df = spark.range(10).join(broadcast(smallDF), col("k") === col("id"))
assert(df.queryExecution.executedPlan.find(p =>
p.isInstanceOf[WholeStageCodegenExec] &&
p.asInstanceOf[WholeStageCodegenExec].child.isInstanceOf[BroadcastHashJoinExec]).isDefined)
assert(df.collect() === Array(Row(1, 1, "1"), Row(1, 1, "1"), Row(2, 2, "2")))
}
test("ShuffledHashJoin should be included in WholeStageCodegen") {
val df1 = spark.range(5).select($"id".as("k1"))
val df2 = spark.range(15).select($"id".as("k2"))
val df3 = spark.range(6).select($"id".as("k3"))
// test one shuffled hash join
val oneJoinDF = df1.join(df2.hint("SHUFFLE_HASH"), $"k1" === $"k2")
assert(oneJoinDF.queryExecution.executedPlan.collect {
case WholeStageCodegenExec(_ : ShuffledHashJoinExec) => true
}.size === 1)
checkAnswer(oneJoinDF, Seq(Row(0, 0), Row(1, 1), Row(2, 2), Row(3, 3), Row(4, 4)))
// test two shuffled hash joins
val twoJoinsDF = df1.join(df2.hint("SHUFFLE_HASH"), $"k1" === $"k2")
.join(df3.hint("SHUFFLE_HASH"), $"k1" === $"k3")
assert(twoJoinsDF.queryExecution.executedPlan.collect {
case WholeStageCodegenExec(_ : ShuffledHashJoinExec) => true
}.size === 2)
checkAnswer(twoJoinsDF,
Seq(Row(0, 0, 0), Row(1, 1, 1), Row(2, 2, 2), Row(3, 3, 3), Row(4, 4, 4)))
}
test("Left/Right Outer SortMergeJoin should be included in WholeStageCodegen") {
val df1 = spark.range(10).select($"id".as("k1"))
val df2 = spark.range(4).select($"id".as("k2"))
val df3 = spark.range(6).select($"id".as("k3"))
// test one left outer sort merge join
val oneLeftOuterJoinDF = df1.join(df2.hint("SHUFFLE_MERGE"), $"k1" === $"k2", "left_outer")
assert(oneLeftOuterJoinDF.queryExecution.executedPlan.collect {
case WholeStageCodegenExec(_ : SortMergeJoinExec) => true
}.size === 1)
checkAnswer(oneLeftOuterJoinDF, Seq(Row(0, 0), Row(1, 1), Row(2, 2), Row(3, 3), Row(4, null),
Row(5, null), Row(6, null), Row(7, null), Row(8, null), Row(9, null)))
// test one right outer sort merge join
val oneRightOuterJoinDF = df2.join(df3.hint("SHUFFLE_MERGE"), $"k2" === $"k3", "right_outer")
assert(oneRightOuterJoinDF.queryExecution.executedPlan.collect {
case WholeStageCodegenExec(_ : SortMergeJoinExec) => true
}.size === 1)
checkAnswer(oneRightOuterJoinDF, Seq(Row(0, 0), Row(1, 1), Row(2, 2), Row(3, 3), Row(null, 4),
Row(null, 5)))
// test two sort merge joins
val twoJoinsDF = df3.join(df2.hint("SHUFFLE_MERGE"), $"k3" === $"k2", "left_outer")
.join(df1.hint("SHUFFLE_MERGE"), $"k3" === $"k1", "right_outer")
assert(twoJoinsDF.queryExecution.executedPlan.collect {
case WholeStageCodegenExec(_ : SortMergeJoinExec) => true
}.size === 2)
checkAnswer(twoJoinsDF,
Seq(Row(0, 0, 0), Row(1, 1, 1), Row(2, 2, 2), Row(3, 3, 3), Row(4, null, 4), Row(5, null, 5),
Row(null, null, 6), Row(null, null, 7), Row(null, null, 8), Row(null, null, 9)))
}
test("Left Semi SortMergeJoin should be included in WholeStageCodegen") {
val df1 = spark.range(10).select($"id".as("k1"))
val df2 = spark.range(4).select($"id".as("k2"))
val df3 = spark.range(6).select($"id".as("k3"))
// test one left semi sort merge join
val oneJoinDF = df1.join(df2.hint("SHUFFLE_MERGE"), $"k1" === $"k2", "left_semi")
assert(oneJoinDF.queryExecution.executedPlan.collect {
case WholeStageCodegenExec(ProjectExec(_, _ : SortMergeJoinExec)) => true
}.size === 1)
checkAnswer(oneJoinDF, Seq(Row(0), Row(1), Row(2), Row(3)))
// test two left semi sort merge joins
val twoJoinsDF = df3.join(df2.hint("SHUFFLE_MERGE"), $"k3" === $"k2", "left_semi")
.join(df1.hint("SHUFFLE_MERGE"), $"k3" === $"k1", "left_semi")
assert(twoJoinsDF.queryExecution.executedPlan.collect {
case WholeStageCodegenExec(ProjectExec(_, _ : SortMergeJoinExec)) |
WholeStageCodegenExec(_ : SortMergeJoinExec) => true
}.size === 2)
checkAnswer(twoJoinsDF, Seq(Row(0), Row(1), Row(2), Row(3)))
}
test("Left Anti SortMergeJoin should be included in WholeStageCodegen") {
val df1 = spark.range(10).select($"id".as("k1"))
val df2 = spark.range(4).select($"id".as("k2"))
val df3 = spark.range(6).select($"id".as("k3"))
// test one left anti sort merge join
val oneJoinDF = df1.join(df2.hint("SHUFFLE_MERGE"), $"k1" === $"k2", "left_anti")
assert(oneJoinDF.queryExecution.executedPlan.collect {
case WholeStageCodegenExec(ProjectExec(_, _ : SortMergeJoinExec)) => true
}.size === 1)
checkAnswer(oneJoinDF, Seq(Row(4), Row(5), Row(6), Row(7), Row(8), Row(9)))
// test two left anti sort merge joins
val twoJoinsDF = df1.join(df2.hint("SHUFFLE_MERGE"), $"k1" === $"k2", "left_anti")
.join(df3.hint("SHUFFLE_MERGE"), $"k1" === $"k3", "left_anti")
assert(twoJoinsDF.queryExecution.executedPlan.collect {
case WholeStageCodegenExec(ProjectExec(_, _ : SortMergeJoinExec)) |
WholeStageCodegenExec(_ : SortMergeJoinExec) => true
}.size === 2)
checkAnswer(twoJoinsDF, Seq(Row(6), Row(7), Row(8), Row(9)))
}
test("Inner/Cross BroadcastNestedLoopJoinExec should be included in WholeStageCodegen") {
val df1 = spark.range(4).select($"id".as("k1"))
val df2 = spark.range(3).select($"id".as("k2"))
val df3 = spark.range(2).select($"id".as("k3"))
Seq(true, false).foreach { codegenEnabled =>
withSQLConf(SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> codegenEnabled.toString) {
// test broadcast nested loop join without condition
val oneJoinDF = df1.join(df2)
var hasJoinInCodegen = oneJoinDF.queryExecution.executedPlan.collect {
case WholeStageCodegenExec(_ : BroadcastNestedLoopJoinExec) => true
}.size === 1
assert(hasJoinInCodegen == codegenEnabled)
checkAnswer(oneJoinDF,
Seq(Row(0, 0), Row(0, 1), Row(0, 2), Row(1, 0), Row(1, 1), Row(1, 2),
Row(2, 0), Row(2, 1), Row(2, 2), Row(3, 0), Row(3, 1), Row(3, 2)))
// test broadcast nested loop join with condition
val oneJoinDFWithCondition = df1.join(df2, $"k1" + 1 =!= $"k2")
hasJoinInCodegen = oneJoinDFWithCondition.queryExecution.executedPlan.collect {
case WholeStageCodegenExec(_ : BroadcastNestedLoopJoinExec) => true
}.size === 1
assert(hasJoinInCodegen == codegenEnabled)
checkAnswer(oneJoinDFWithCondition,
Seq(Row(0, 0), Row(0, 2), Row(1, 0), Row(1, 1), Row(2, 0), Row(2, 1),
Row(2, 2), Row(3, 0), Row(3, 1), Row(3, 2)))
// test two broadcast nested loop joins
val twoJoinsDF = df1.join(df2, $"k1" < $"k2").crossJoin(df3)
hasJoinInCodegen = twoJoinsDF.queryExecution.executedPlan.collect {
case WholeStageCodegenExec(BroadcastNestedLoopJoinExec(
_: BroadcastNestedLoopJoinExec, _, _, _, _)) => true
}.size === 1
assert(hasJoinInCodegen == codegenEnabled)
checkAnswer(twoJoinsDF,
Seq(Row(0, 1, 0), Row(0, 2, 0), Row(1, 2, 0), Row(0, 1, 1), Row(0, 2, 1), Row(1, 2, 1)))
}
}
}
test("Left/Right outer BroadcastNestedLoopJoinExec should be included in WholeStageCodegen") {
val df1 = spark.range(4).select($"id".as("k1"))
val df2 = spark.range(3).select($"id".as("k2"))
val df3 = spark.range(2).select($"id".as("k3"))
val df4 = spark.range(0).select($"id".as("k4"))
Seq(true, false).foreach { codegenEnabled =>
withSQLConf(SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> codegenEnabled.toString) {
// test left outer join
val leftOuterJoinDF = df1.join(df2, $"k1" > $"k2", "left_outer")
var hasJoinInCodegen = leftOuterJoinDF.queryExecution.executedPlan.collect {
case WholeStageCodegenExec(_: BroadcastNestedLoopJoinExec) => true
}.size === 1
assert(hasJoinInCodegen == codegenEnabled)
checkAnswer(leftOuterJoinDF,
Seq(Row(0, null), Row(1, 0), Row(2, 0), Row(2, 1), Row(3, 0), Row(3, 1), Row(3, 2)))
// test right outer join
val rightOuterJoinDF = df1.join(df2, $"k1" < $"k2", "right_outer")
hasJoinInCodegen = rightOuterJoinDF.queryExecution.executedPlan.collect {
case WholeStageCodegenExec(_: BroadcastNestedLoopJoinExec) => true
}.size === 1
assert(hasJoinInCodegen == codegenEnabled)
checkAnswer(rightOuterJoinDF, Seq(Row(null, 0), Row(0, 1), Row(0, 2), Row(1, 2)))
// test a combination of left outer and right outer joins
val twoJoinsDF = df1.join(df2, $"k1" > $"k2" + 1, "right_outer")
.join(df3, $"k1" <= $"k3", "left_outer")
hasJoinInCodegen = twoJoinsDF.queryExecution.executedPlan.collect {
case WholeStageCodegenExec(BroadcastNestedLoopJoinExec(
_: BroadcastNestedLoopJoinExec, _, _, _, _)) => true
}.size === 1
assert(hasJoinInCodegen == codegenEnabled)
checkAnswer(twoJoinsDF,
Seq(Row(2, 0, null), Row(3, 0, null), Row(3, 1, null), Row(null, 2, null)))
// test build side is empty
val buildSideIsEmptyDF = df3.join(df4, $"k3" > $"k4", "left_outer")
hasJoinInCodegen = buildSideIsEmptyDF.queryExecution.executedPlan.collect {
case WholeStageCodegenExec(_: BroadcastNestedLoopJoinExec) => true
}.size === 1
assert(hasJoinInCodegen == codegenEnabled)
checkAnswer(buildSideIsEmptyDF, Seq(Row(0, null), Row(1, null)))
}
}
}
test("Left semi/anti BroadcastNestedLoopJoinExec should be included in WholeStageCodegen") {
val df1 = spark.range(4).select($"id".as("k1"))
val df2 = spark.range(3).select($"id".as("k2"))
val df3 = spark.range(2).select($"id".as("k3"))
Seq(true, false).foreach { codegenEnabled =>
withSQLConf(SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> codegenEnabled.toString) {
// test left semi join
val semiJoinDF = df1.join(df2, $"k1" + 1 <= $"k2", "left_semi")
var hasJoinInCodegen = semiJoinDF.queryExecution.executedPlan.collect {
case WholeStageCodegenExec(ProjectExec(_, _ : BroadcastNestedLoopJoinExec)) => true
}.size === 1
assert(hasJoinInCodegen == codegenEnabled)
checkAnswer(semiJoinDF, Seq(Row(0), Row(1)))
// test left anti join
val antiJoinDF = df1.join(df2, $"k1" + 1 <= $"k2", "left_anti")
hasJoinInCodegen = antiJoinDF.queryExecution.executedPlan.collect {
case WholeStageCodegenExec(ProjectExec(_, _ : BroadcastNestedLoopJoinExec)) => true
}.size === 1
assert(hasJoinInCodegen == codegenEnabled)
checkAnswer(antiJoinDF, Seq(Row(2), Row(3)))
// test a combination of left semi and left anti joins
val twoJoinsDF = df1.join(df2, $"k1" < $"k2", "left_semi")
.join(df3, $"k1" > $"k3", "left_anti")
hasJoinInCodegen = twoJoinsDF.queryExecution.executedPlan.collect {
case WholeStageCodegenExec(ProjectExec(_, BroadcastNestedLoopJoinExec(
_: BroadcastNestedLoopJoinExec, _, _, _, _))) => true
}.size === 1
assert(hasJoinInCodegen == codegenEnabled)
checkAnswer(twoJoinsDF, Seq(Row(0)))
}
}
}
test("Sort should be included in WholeStageCodegen") {
val df = spark.range(3, 0, -1).toDF().sort(col("id"))
val plan = df.queryExecution.executedPlan
assert(plan.find(p =>
p.isInstanceOf[WholeStageCodegenExec] &&
p.asInstanceOf[WholeStageCodegenExec].child.isInstanceOf[SortExec]).isDefined)
assert(df.collect() === Array(Row(1), Row(2), Row(3)))
}
test("MapElements should be included in WholeStageCodegen") {
import testImplicits._
val ds = spark.range(10).map(_.toString)
val plan = ds.queryExecution.executedPlan
assert(plan.find(p =>
p.isInstanceOf[WholeStageCodegenExec] &&
p.asInstanceOf[WholeStageCodegenExec].child.isInstanceOf[SerializeFromObjectExec]).isDefined)
assert(ds.collect() === 0.until(10).map(_.toString).toArray)
}
test("typed filter should be included in WholeStageCodegen") {
val ds = spark.range(10).filter(_ % 2 == 0)
val plan = ds.queryExecution.executedPlan
assert(plan.find(p =>
p.isInstanceOf[WholeStageCodegenExec] &&
p.asInstanceOf[WholeStageCodegenExec].child.isInstanceOf[FilterExec]).isDefined)
assert(ds.collect() === Array(0, 2, 4, 6, 8))
}
test("back-to-back typed filter should be included in WholeStageCodegen") {
val ds = spark.range(10).filter(_ % 2 == 0).filter(_ % 3 == 0)
val plan = ds.queryExecution.executedPlan
assert(plan.find(p =>
p.isInstanceOf[WholeStageCodegenExec] &&
p.asInstanceOf[WholeStageCodegenExec].child.isInstanceOf[FilterExec]).isDefined)
assert(ds.collect() === Array(0, 6))
}
test("cache for primitive type should be in WholeStageCodegen with InMemoryTableScanExec") {
import testImplicits._
val dsInt = spark.range(3).cache()
dsInt.count()
val dsIntFilter = dsInt.filter(_ > 0)
val planInt = dsIntFilter.queryExecution.executedPlan
assert(planInt.collect {
case WholeStageCodegenExec(FilterExec(_,
ColumnarToRowExec(InputAdapter(_: InMemoryTableScanExec)))) => ()
}.length == 1)
assert(dsIntFilter.collect() === Array(1, 2))
// cache for string type is not supported for InMemoryTableScanExec
val dsString = spark.range(3).map(_.toString).cache()
dsString.count()
val dsStringFilter = dsString.filter(_ == "1")
val planString = dsStringFilter.queryExecution.executedPlan
assert(planString.collect {
case _: ColumnarToRowExec => ()
}.isEmpty)
assert(dsStringFilter.collect() === Array("1"))
}
test("SPARK-19512 codegen for comparing structs is incorrect") {
// this would raise CompileException before the fix
spark.range(10)
.selectExpr("named_struct('a', id) as col1", "named_struct('a', id+2) as col2")
.filter("col1 = col2").count()
// this would raise java.lang.IndexOutOfBoundsException before the fix
spark.range(10)
.selectExpr("named_struct('a', id, 'b', id) as col1",
"named_struct('a',id+2, 'b',id+2) as col2")
.filter("col1 = col2").count()
}
test("SPARK-21441 SortMergeJoin codegen with CodegenFallback expressions should be disabled") {
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "1") {
import testImplicits._
val df1 = Seq((1, 1), (2, 2), (3, 3)).toDF("key", "int")
val df2 = Seq((1, "1"), (2, "2"), (3, "3")).toDF("key", "str")
val df = df1.join(df2, df1("key") === df2("key"))
.filter("int = 2 or reflect('java.lang.Integer', 'valueOf', str) = 1")
.select("int")
val plan = df.queryExecution.executedPlan
assert(plan.find(p =>
p.isInstanceOf[WholeStageCodegenExec] &&
p.asInstanceOf[WholeStageCodegenExec].child.children(0)
.isInstanceOf[SortMergeJoinExec]).isEmpty)
assert(df.collect() === Array(Row(1), Row(2)))
}
}
def genGroupByCode(caseNum: Int): CodeAndComment = {
val caseExp = (1 to caseNum).map { i =>
s"case when id > $i and id <= ${i + 1} then 1 else 0 end as v$i"
}.toList
val keyExp = List(
"id",
"(id & 1023) as k1",
"cast(id & 1023 as double) as k2",
"cast(id & 1023 as int) as k3")
val ds = spark.range(10)
.selectExpr(keyExp:::caseExp: _*)
.groupBy("k1", "k2", "k3")
.sum()
val plan = ds.queryExecution.executedPlan
val wholeStageCodeGenExec = plan.find(p => p match {
case wp: WholeStageCodegenExec => wp.child match {
case hp: HashAggregateExec if (hp.child.isInstanceOf[ProjectExec]) => true
case _ => false
}
case _ => false
})
assert(wholeStageCodeGenExec.isDefined)
wholeStageCodeGenExec.get.asInstanceOf[WholeStageCodegenExec].doCodeGen()._2
}
def genCode(ds: Dataset[_]): Seq[CodeAndComment] = {
val plan = ds.queryExecution.executedPlan
val wholeStageCodeGenExecs = plan.collect { case p: WholeStageCodegenExec => p }
assert(wholeStageCodeGenExecs.nonEmpty, "WholeStageCodegenExec is expected")
wholeStageCodeGenExecs.map(_.doCodeGen()._2)
}
ignore("SPARK-21871 check if we can get large code size when compiling too long functions") {
val codeWithShortFunctions = genGroupByCode(3)
val (_, ByteCodeStats(maxCodeSize1, _, _)) = CodeGenerator.compile(codeWithShortFunctions)
assert(maxCodeSize1 < SQLConf.WHOLESTAGE_HUGE_METHOD_LIMIT.defaultValue.get)
val codeWithLongFunctions = genGroupByCode(50)
val (_, ByteCodeStats(maxCodeSize2, _, _)) = CodeGenerator.compile(codeWithLongFunctions)
assert(maxCodeSize2 > SQLConf.WHOLESTAGE_HUGE_METHOD_LIMIT.defaultValue.get)
}
ignore("bytecode of batch file scan exceeds the limit of WHOLESTAGE_HUGE_METHOD_LIMIT") {
import testImplicits._
withTempPath { dir =>
val path = dir.getCanonicalPath
val df = spark.range(10).select(Seq.tabulate(201) {i => ('id + i).as(s"c$i")} : _*)
df.write.mode(SaveMode.Overwrite).parquet(path)
withSQLConf(SQLConf.WHOLESTAGE_MAX_NUM_FIELDS.key -> "202",
SQLConf.WHOLESTAGE_HUGE_METHOD_LIMIT.key -> "2000") {
// wide table batch scan causes the byte code of codegen exceeds the limit of
// WHOLESTAGE_HUGE_METHOD_LIMIT
val df2 = spark.read.parquet(path)
val fileScan2 = df2.queryExecution.sparkPlan.find(_.isInstanceOf[FileSourceScanExec]).get
assert(fileScan2.asInstanceOf[FileSourceScanExec].supportsColumnar)
checkAnswer(df2, df)
}
}
}
test("Control splitting consume function by operators with config") {
import testImplicits._
val df = spark.range(10).select(Seq.tabulate(2) {i => ('id + i).as(s"c$i")} : _*)
Seq(true, false).foreach { config =>
withSQLConf(SQLConf.WHOLESTAGE_SPLIT_CONSUME_FUNC_BY_OPERATOR.key -> s"$config") {
val plan = df.queryExecution.executedPlan
val wholeStageCodeGenExec = plan.find(p => p match {
case wp: WholeStageCodegenExec => true
case _ => false
})
assert(wholeStageCodeGenExec.isDefined)
val code = wholeStageCodeGenExec.get.asInstanceOf[WholeStageCodegenExec].doCodeGen()._2
assert(code.body.contains("project_doConsume") == config)
}
}
}
test("Skip splitting consume function when parameter number exceeds JVM limit") {
// since every field is nullable we have 2 params for each input column (one for the value
// and one for the isNull variable)
Seq((128, false), (127, true)).foreach { case (columnNum, hasSplit) =>
withTempPath { dir =>
val path = dir.getCanonicalPath
spark.range(10).select(Seq.tabulate(columnNum) {i => lit(i).as(s"c$i")} : _*)
.write.mode(SaveMode.Overwrite).parquet(path)
withSQLConf(SQLConf.WHOLESTAGE_MAX_NUM_FIELDS.key -> "255",
SQLConf.WHOLESTAGE_SPLIT_CONSUME_FUNC_BY_OPERATOR.key -> "true") {
val projection = Seq.tabulate(columnNum)(i => s"c$i + c$i as newC$i")
val df = spark.read.parquet(path).selectExpr(projection: _*)
val plan = df.queryExecution.executedPlan
val wholeStageCodeGenExec = plan.find {
case _: WholeStageCodegenExec => true
case _ => false
}
assert(wholeStageCodeGenExec.isDefined)
val code = wholeStageCodeGenExec.get.asInstanceOf[WholeStageCodegenExec].doCodeGen()._2
assert(code.body.contains("project_doConsume") == hasSplit)
}
}
}
}
test("codegen stage IDs should be preserved in transformations after CollapseCodegenStages") {
// test case adapted from DataFrameSuite to trigger ReuseExchange
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "2") {
val df = spark.range(100)
val join = df.join(df, "id")
val plan = join.queryExecution.executedPlan
assert(plan.find(p =>
p.isInstanceOf[WholeStageCodegenExec] &&
p.asInstanceOf[WholeStageCodegenExec].codegenStageId == 0).isEmpty,
"codegen stage IDs should be preserved through ReuseExchange")
checkAnswer(join, df.toDF)
}
}
test("including codegen stage ID in generated class name should not regress codegen caching") {
import testImplicits._
withSQLConf(SQLConf.WHOLESTAGE_CODEGEN_USE_ID_IN_CLASS_NAME.key -> "true") {
// the same query run twice should produce identical code, which would imply a hit in
// the generated code cache.
val ds1 = spark.range(3).select('id + 2)
val code1 = genCode(ds1)
val ds2 = spark.range(3).select('id + 2)
val code2 = genCode(ds2) // same query shape as above, deliberately
assert(code1 == code2, "Should produce same code")
}
}
ignore("SPARK-23598: Codegen working for lots of aggregation operations without runtime errors") {
withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") {
var df = Seq((8, "bat"), (15, "mouse"), (5, "horse")).toDF("age", "name")
for (i <- 0 until 70) {
df = df.groupBy("name").agg(avg("age").alias("age"))
}
assert(df.limit(1).collect() === Array(Row("bat", 8.0)))
}
}
test("SPARK-25767: Lazy evaluated stream of expressions handled correctly") {
val a = Seq(1).toDF("key")
val b = Seq((1, "a")).toDF("key", "value")
val c = Seq(1).toDF("key")
val ab = a.join(b, Stream("key"), "left")
val abc = ab.join(c, Seq("key"), "left")
checkAnswer(abc, Row(1, "a"))
}
test("SPARK-26680: Stream in groupBy does not cause StackOverflowError") {
val groupByCols = Stream(col("key"))
val df = Seq((1, 2), (2, 3), (1, 3)).toDF("key", "value")
.groupBy(groupByCols: _*)
.max("value")
checkAnswer(df, Seq(Row(1, 3), Row(2, 3)))
}
test("SPARK-26572: evaluate non-deterministic expressions for aggregate results") {
withSQLConf(
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> Long.MaxValue.toString,
SQLConf.SHUFFLE_PARTITIONS.key -> "1") {
val baseTable = Seq(1, 1).toDF("idx")
// BroadcastHashJoinExec with a HashAggregateExec child containing no aggregate expressions
val distinctWithId = baseTable.distinct().withColumn("id", monotonically_increasing_id())
.join(baseTable, "idx")
assert(distinctWithId.queryExecution.executedPlan.collectFirst {
case WholeStageCodegenExec(
ProjectExec(_, BroadcastHashJoinExec(_, _, _, _, _, _: HashAggregateExec, _, _))) => true
}.isDefined)
checkAnswer(distinctWithId, Seq(Row(1, 0), Row(1, 0)))
// BroadcastHashJoinExec with a HashAggregateExec child containing a Final mode aggregate
// expression
val groupByWithId =
baseTable.groupBy("idx").sum().withColumn("id", monotonically_increasing_id())
.join(baseTable, "idx")
assert(groupByWithId.queryExecution.executedPlan.collectFirst {
case WholeStageCodegenExec(
ProjectExec(_, BroadcastHashJoinExec(_, _, _, _, _, _: HashAggregateExec, _, _))) => true
}.isDefined)
checkAnswer(groupByWithId, Seq(Row(1, 2, 0), Row(1, 2, 0)))
}
}
test("SPARK-28520: WholeStageCodegen does not work properly for LocalTableScanExec") {
// Case1: LocalTableScanExec is the root of a query plan tree.
// In this case, WholeStageCodegenExec should not be inserted
// as the direct parent of LocalTableScanExec.
val df = Seq(1, 2, 3).toDF
val rootOfExecutedPlan = df.queryExecution.executedPlan
// Ensure WholeStageCodegenExec is not inserted and
// LocalTableScanExec is still the root.
assert(rootOfExecutedPlan.isInstanceOf[LocalTableScanExec],
"LocalTableScanExec should be still the root.")
// Case2: The parent of a LocalTableScanExec supports WholeStageCodegen.
// In this case, the LocalTableScanExec should be within a WholeStageCodegen domain
// and no more InputAdapter is inserted as the direct parent of the LocalTableScanExec.
val aggregatedDF = Seq(1, 2, 3).toDF.groupBy("value").sum()
val executedPlan = aggregatedDF.queryExecution.executedPlan
// HashAggregateExec supports WholeStageCodegen and it's the parent of
// LocalTableScanExec so LocalTableScanExec should be within a WholeStageCodegen domain.
assert(
executedPlan.find {
case WholeStageCodegenExec(
HashAggregateExec(_, _, _, _, _, _, _: LocalTableScanExec)) => true
case _ => false
}.isDefined,
"LocalTableScanExec should be within a WholeStageCodegen domain.")
}
test("Give up splitting aggregate code if a parameter length goes over the limit") {
withSQLConf(
SQLConf.CODEGEN_SPLIT_AGGREGATE_FUNC.key -> "true",
SQLConf.CODEGEN_METHOD_SPLIT_THRESHOLD.key -> "1",
"spark.sql.CodeGenerator.validParamLength" -> "0") {
withTable("t") {
val expectedErrMsg = "Failed to split aggregate code into small functions"
Seq(
// Test case without keys
"SELECT AVG(v) FROM VALUES(1) t(v)",
// Tet case with keys
"SELECT k, AVG(v) FROM VALUES((1, 1)) t(k, v) GROUP BY k").foreach { query =>
val errMsg = intercept[IllegalStateException] {
sql(query).collect
}.getMessage
assert(errMsg.contains(expectedErrMsg))
}
}
}
}
test("Give up splitting subexpression code if a parameter length goes over the limit") {
withSQLConf(
SQLConf.CODEGEN_SPLIT_AGGREGATE_FUNC.key -> "false",
SQLConf.CODEGEN_METHOD_SPLIT_THRESHOLD.key -> "1",
"spark.sql.CodeGenerator.validParamLength" -> "0") {
withTable("t") {
val expectedErrMsg = "Failed to split subexpression code into small functions"
Seq(
// Test case without keys
"SELECT AVG(a + b), SUM(a + b + c) FROM VALUES((1, 1, 1)) t(a, b, c)",
// Tet case with keys
"SELECT k, AVG(a + b), SUM(a + b + c) FROM VALUES((1, 1, 1, 1)) t(k, a, b, c) " +
"GROUP BY k").foreach { query =>
val e = intercept[Exception] {
sql(query).collect
}
assert(e.isInstanceOf[IllegalStateException])
assert(e.getMessage.contains(expectedErrMsg))
}
}
}
}
}
| maropu/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/WholeStageCodegenSuite.scala | Scala | apache-2.0 | 32,852 |
package frameless
import org.scalacheck.Prop
import org.scalacheck.Prop._
import scala.reflect.ClassTag
class TakeTests extends TypedDatasetSuite {
test("take") {
def prop[A: TypedEncoder](n: Int, data: Vector[A]): Prop =
(n >= 0) ==> (TypedDataset.create(data).take(n).run().toVector =? data.take(n))
def propArray[A: TypedEncoder: ClassTag](n: Int, data: Vector[X1[Array[A]]]): Prop =
(n >= 0) ==> {
Prop {
TypedDataset.create(data).take(n).run().toVector.zip(data.take(n)).forall {
case (X1(l), X1(r)) => l sameElements r
}
}
}
check(forAll(prop[Int] _))
check(forAll(prop[String] _))
check(forAll(propArray[Int] _))
check(forAll(propArray[String] _))
check(forAll(propArray[Byte] _))
}
}
| adelbertc/frameless | dataset/src/test/scala/frameless/forward/TakeTests.scala | Scala | apache-2.0 | 794 |
package example
import org.scalatest.FunSuite
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
/**
* This class implements a ScalaTest test suite for the methods in object
* `Lists` that need to be implemented as part of this assignment. A test
* suite is simply a collection of individual tests for some specific
* component of a program.
*
* A test suite is created by defining a class which extends the type
* `org.scalatest.FunSuite`. When running ScalaTest, it will automatically
* find this class and execute all of its tests.
*
* Adding the `@RunWith` annotation enables the test suite to be executed
* inside eclipse using the built-in JUnit test runner.
*
* You have two options for running this test suite:
*
* - Start the sbt console and run the "test" command
* - Right-click this file in eclipse and chose "Run As" - "JUnit Test"
*/
@RunWith(classOf[JUnitRunner])
class ListsSuite extends FunSuite {
/**
* Tests are written using the `test` operator which takes two arguments:
*
* - A description of the test. This description has to be unique, no two
* tests can have the same description.
* - The test body, a piece of Scala code that implements the test
*
* The most common way to implement a test body is using the method `assert`
* which tests that its argument evaluates to `true`. So one of the simplest
* successful tests is the following:
*/
test("one plus one is two")(assert(1 + 1 == 2))
/**
* In Scala, it is allowed to pass an argument to a method using the block
* syntax, i.e. `{ argument }` instead of parentheses `(argument)`.
*
* This allows tests to be written in a more readable manner:
*/
test("one plus one is three?") {
assert(1 + 1 == 3) // This assertion fails! Go ahead and fix it.
}
/**
* One problem with the previous (failing) test is that ScalaTest will
* only tell you that a test failed, but it will not tell you what was
* the reason for the failure. The output looks like this:
*
* {{{
* [info] - one plus one is three? *** FAILED ***
* }}}
*
* This situation can be improved by using a special equality operator
* `===` instead of `==` (this is only possible in ScalaTest). So if you
* run the next test, ScalaTest will show the following output:
*
* {{{
* [info] - details why one plus one is not three *** FAILED ***
* [info] 2 did not equal 3 (ListsSuite.scala:67)
* }}}
*
* We recommend to always use the `===` equality operator when writing tests.
*/
test("details why one plus one is not three") {
assert(1 + 1 === 3) // Fix me, please!
}
/**
* In order to test the exceptional behavior of a methods, ScalaTest offers
* the `intercept` operation.
*
* In the following example, we test the fact that the method `intNotZero`
* throws an `IllegalArgumentException` if its argument is `0`.
*/
test("intNotZero throws an exception if its argument is 0") {
intercept[IllegalArgumentException] {
intNotZero(0)
}
}
def intNotZero(x: Int): Int = {
if (x == 0) throw new IllegalArgumentException("zero is not allowed")
else x
}
/**
* Now we finally write some tests for the list functions that have to be
* implemented for this assignment. We fist import all members of the
* `List` object.
*/
import Lists._
/**
* We only provide two very basic tests for you. Write more tests to make
* sure your `sum` and `max` methods work as expected.
*
* In particular, write tests for corner cases: negative numbers, zeros,
* empty lists, lists with repeated elements, etc.
*
* It is allowed to have multiple `assert` statements inside one test,
* however it is recommended to write an individual `test` statement for
* every tested aspect of a method.
*/
test("sum of a few numbers") {
assert(sum(List(1,2,0)) === 3)
}
test("max of a few numbers") {
assert(max(List(3, 7, 2)) === 7)
}
}
| hsinhuang/codebase | reactive-002/example/src/test/scala/example/ListsSuite.scala | Scala | gpl-2.0 | 4,030 |
/*
* Copyright (C) 2012 The Regents of The University California.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package shark.execution
import java.nio.ByteBuffer
import scala.collection.mutable.ArrayBuffer
import scala.reflect.BeanProperty
import org.apache.hadoop.io.Writable
import org.apache.spark.rdd.{RDD, UnionRDD}
import org.apache.spark.storage.StorageLevel
import shark.{SharkConfVars, SharkEnv}
import shark.execution.serialization.{OperatorSerializationWrapper, JavaSerializer}
import shark.memstore2._
/**
* Cache the RDD and force evaluate it (so the cache is filled).
*/
class MemoryStoreSinkOperator extends TerminalOperator {
// The initial capacity for ArrayLists used to construct the columnar storage. If -1,
// the ColumnarSerde will obtain the partition size from a Configuration during execution
// initialization (see ColumnarSerde#initialize()).
@BeanProperty var partitionSize: Int = _
// If true, columnar storage will use compression.
@BeanProperty var shouldCompress: Boolean = _
// For CTAS, this is the name of the table that is created. For INSERTS, this is the name of*
// the table that is modified.
@BeanProperty var tableName: String = _
// The Hive metastore DB that the `tableName` table belongs to.
@BeanProperty var databaseName: String = _
// Used only for commands that target Hive partitions. The partition key is a set of unique values
// for the the table's partitioning columns and identifies the partition (represented by an RDD)
// that will be created or modified by the INSERT command being handled.
@BeanProperty var hivePartitionKeyOpt: Option[String] = _
// The memory storage used to store the output RDD - e.g., CacheType.HEAP refers to Spark's
// block manager.
@transient var cacheMode: CacheType.CacheType = _
// Whether to compose a UnionRDD from the output RDD and a previous RDD. For example, for an
// INSERT INTO <tableName> command, the previous RDD will contain the contents of the 'tableName'.
@transient var isInsertInto: Boolean = _
// The number of columns in the schema for the table corresponding to 'tableName'. Used only
// to create an OffHeapTableWriter, if off-heap storage is used.
@transient var numColumns: Int = _
override def initializeOnMaster() {
super.initializeOnMaster()
partitionSize = SharkConfVars.getIntVar(localHconf, SharkConfVars.COLUMN_BUILDER_PARTITION_SIZE)
shouldCompress = SharkConfVars.getBoolVar(localHconf, SharkConfVars.COLUMNAR_COMPRESSION)
}
override def initializeOnSlave() {
super.initializeOnSlave()
localHconf.setInt(SharkConfVars.COLUMN_BUILDER_PARTITION_SIZE.varname, partitionSize)
localHconf.setBoolean(SharkConfVars.COLUMNAR_COMPRESSION.varname, shouldCompress)
}
override def execute(): RDD[_] = {
val inputRdd = if (parentOperators.size == 1) executeParents().head._2 else null
val statsAcc = SharkEnv.sc.accumulableCollection(ArrayBuffer[(Int, TablePartitionStats)]())
val op = OperatorSerializationWrapper(this)
val tableKey = MemoryMetadataManager.makeTableKey(databaseName, tableName)
val offHeapWriter: OffHeapTableWriter =
if (cacheMode == CacheType.OFFHEAP) {
val offHeapClient = OffHeapStorageClient.client
if (!isInsertInto && offHeapClient.tablePartitionExists(tableKey, hivePartitionKeyOpt)) {
// For INSERT OVERWRITE, delete the old table or Hive partition directory, if it exists.
offHeapClient.dropTablePartition(tableKey, hivePartitionKeyOpt)
}
// Use an additional row to store metadata (e.g. number of rows in each partition).
offHeapClient.createTablePartitionWriter(tableKey, hivePartitionKeyOpt, numColumns + 1)
} else {
null
}
// Put all rows of the table into a set of TablePartition's. Each partition contains
// only one TablePartition object.
var outputRDD: RDD[TablePartition] = inputRdd.mapPartitionsWithIndex { case (part, iter) =>
op.initializeOnSlave()
val serde = new ColumnarSerDe
serde.initialize(op.localHconf, op.localHiveOp.getConf.getTableInfo.getProperties)
// Serialize each row into the builder object.
// ColumnarSerDe will return a TablePartitionBuilder.
var builder: Writable = null
iter.foreach { row =>
builder = serde.serialize(row.asInstanceOf[AnyRef], op.objectInspector)
}
if (builder == null) {
// Empty partition.
statsAcc += Tuple2(part, new TablePartitionStats(Array(), 0))
Iterator(new TablePartition(0, Array()))
} else {
statsAcc += Tuple2(part, builder.asInstanceOf[TablePartitionBuilder].stats)
Iterator(builder.asInstanceOf[TablePartitionBuilder].build)
}
}
if (offHeapWriter != null) {
// Put the table in off-heap storage.
op.logInfo("Putting RDD for %s.%s in off-heap storage".format(databaseName, tableName))
offHeapWriter.createTable()
outputRDD = outputRDD.mapPartitionsWithIndex { case(part, iter) =>
val partition = iter.next()
partition.toOffHeap.zipWithIndex.foreach { case(buf, column) =>
offHeapWriter.writeColumnPartition(column, part, buf)
}
Iterator(partition)
}
// Force evaluate so the data gets put into off-heap storage.
outputRDD.context.runJob(
outputRDD, (iter: Iterator[TablePartition]) => iter.foreach(_ => Unit))
} else {
// Run a job on the RDD that contains the query output to force the data into the memory
// store. The statistics will also be collected by 'statsAcc' during job execution.
if (cacheMode == CacheType.MEMORY) {
outputRDD.persist(StorageLevel.MEMORY_AND_DISK)
} else if (cacheMode == CacheType.MEMORY_ONLY) {
outputRDD.persist(StorageLevel.MEMORY_ONLY)
}
outputRDD.context.runJob(
outputRDD, (iter: Iterator[TablePartition]) => iter.foreach(_ => Unit))
}
// Put the table in Spark block manager or off-heap storage.
op.logInfo("Putting %sRDD for %s.%s in %s store".format(
if (isInsertInto) "Union" else "",
databaseName,
tableName,
if (cacheMode == CacheType.NONE) "disk" else cacheMode.toString))
val tableStats =
if (cacheMode == CacheType.OFFHEAP) {
offHeapWriter.setStats(statsAcc.value.toMap)
statsAcc.value.toMap
} else {
val isHivePartitioned = SharkEnv.memoryMetadataManager.isHivePartitioned(
databaseName, tableName)
if (isHivePartitioned) {
val partitionedTable = SharkEnv.memoryMetadataManager.getPartitionedTable(
databaseName, tableName).get
val hivePartitionKey = hivePartitionKeyOpt.get
outputRDD.setName("%s.%s(%s)".format(databaseName, tableName, hivePartitionKey))
if (isInsertInto) {
// An RDD for the Hive partition already exists, so update its metadata entry in
// 'partitionedTable'.
partitionedTable.updatePartition(hivePartitionKey, outputRDD, statsAcc.value)
} else {
// This is a new Hive-partition. Add a new metadata entry in 'partitionedTable'.
partitionedTable.putPartition(hivePartitionKey, outputRDD, statsAcc.value.toMap)
}
// Stats should be updated at this point.
partitionedTable.getStats(hivePartitionKey).get
} else {
outputRDD.setName(tableName)
// Create a new MemoryTable entry if one doesn't exist (i.e., this operator is for a CTAS).
val memoryTable = SharkEnv.memoryMetadataManager.getMemoryTable(databaseName, tableName)
.getOrElse(SharkEnv.memoryMetadataManager.createMemoryTable(
databaseName, tableName, cacheMode))
if (isInsertInto) {
// Ok, a off-heap table should manage stats for each rdd, and never union the maps.
memoryTable.update(outputRDD, statsAcc.value)
} else {
memoryTable.put(outputRDD, statsAcc.value.toMap)
}
memoryTable.getStats.get
}
}
if (SharkConfVars.getBoolVar(localHconf, SharkConfVars.MAP_PRUNING_PRINT_DEBUG)) {
tableStats.foreach { case(index, tablePartitionStats) =>
println("Partition " + index + " " + tablePartitionStats.toString)
}
}
return outputRDD
}
override def processPartition(split: Int, iter: Iterator[_]): Iterator[_] =
throw new UnsupportedOperationException("CacheSinkOperator.processPartition()")
}
| lzshlzsh/shark | src/main/scala/shark/execution/MemoryStoreSinkOperator.scala | Scala | apache-2.0 | 9,105 |
/*
* Copyright 2006 - 2013
* Stefan Balev <stefan.balev@graphstream-project.org>
* Julien Baudry <julien.baudry@graphstream-project.org>
* Antoine Dutot <antoine.dutot@graphstream-project.org>
* Yoann Pigné <yoann.pigne@graphstream-project.org>
* Guilhelm Savin <guilhelm.savin@graphstream-project.org>
*
* This file is part of GraphStream <http://graphstream-project.org>.
*
* GraphStream is a library whose purpose is to handle static or dynamic
* graph, create them from scratch, file or any source and display them.
*
* This program is free software distributed under the terms of two licenses, the
* CeCILL-C license that fits European law, and the GNU Lesser General Public
* License. You can use, modify and/ or redistribute the software under the terms
* of the CeCILL-C license as circulated by CEA, CNRS and INRIA at the following
* URL <http://www.cecill.info> or under the terms of the GNU LGPL as published by
* the Free Software Foundation, either version 3 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
* PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The fact that you are presently reading this means that you have had
* knowledge of the CeCILL-C and LGPL licenses and that you accept their terms.
*/
package org.graphstream.ui.j2dviewer
import org.graphstream.ui.geom.Point3
import java.util.ArrayList
import scala.collection.mutable.HashSet
import scala.collection.JavaConversions._
import scala.math._
import org.graphstream.graph.Node
import org.graphstream.ui.graphicGraph.stylesheet.Selector.Type._
import org.graphstream.ui.graphicGraph.{GraphicEdge, GraphicElement, GraphicGraph, GraphicNode, GraphicSprite}
import org.graphstream.ui.graphicGraph.stylesheet.{Style, Values}
import org.graphstream.ui.graphicGraph.stylesheet.StyleConstants._
import org.graphstream.ui.util.CubicCurve
import org.graphstream.ui.swingViewer.util.GraphMetrics
import org.graphstream.ui.geom.Point2
import org.graphstream.ui.geom.Vector2
import org.graphstream.ui.j2dviewer.renderer.{Skeleton, AreaSkeleton, ConnectorSkeleton}
import scala.math._
import org.graphstream.ui.graphicGraph.stylesheet.StyleConstants
/**
* Define a view of the graph or a part of the graph.
*
* The camera can be seen as an element in charge of projecting the graph elements in graph units
* (GU) into rendering space units, often in pixels. It defines the transformation, an affine
* matrix, to pass from the first to the second (in fact its the back-end that does it).
*
* It also contains the graph metrics. This is a set of values that give the overall dimensions of
* the graph in graph units, as well as the view port, the area on the screen (or any rendering
* surface) that will receive the results in pixels (or any rendering units). The two mains methods
* for this operation are [[Camera.pushView(Graphics2D,GraphicGraph)]] and [[Camera.popView()]].
*
* The user of the camera must set both the view port and the graph bounds in order for the
* camera to correctly project the graph view (the Renderer does that before using the Camera,
* at each frame). The camera model is as follows: the camera defines a center at which it
* always points. It can zoom on the graph (as if the camera angle of view was changing), pan in any
* direction by moving its center of view and rotate along the axe going from the center to the
* camera position (camera can rotate around two axes in 3D, but this is a 2D camera).
*
* There are two modes:
* - an "auto-fit" mode where the camera always show the whole graph even if it changes in size, by
* automatically changing the center and zoom values,
* - and a "user" mode where the camera center (looked-at point), zoom and panning are specified and
* will not be modified in the bounds of the graph change.
*
* The camera is also able to answer questions like: "what element is visible actually?", or "on
* what element is the mouse cursor actually?".
*
* The camera is also able to compute sprite positions according to their attachment, as well as
* maintaining a list of all elements out of the view, so that it is not needed to render them.
*/
class Camera(protected val graph:GraphicGraph) extends org.graphstream.ui.swingViewer.util.Camera {
// Attribute
/** Information on the graph overall dimension and position. */
val metrics = new org.graphstream.ui.swingViewer.util.GraphMetrics
/** Automatic centering of the view. */
protected var autoFit = true
/** The camera center of view. */
protected val center = new Point3
/** The camera zoom. */
protected var zoom:Double = 1
/** The rotation angle (along an axis perpendicular to the view). */
protected var rotation:Double = 0
/** Padding around the graph. */
protected var padding = new Values(Units.GU, 0, 0, 0);
/** The rendering back-end. */
protected var bck:Backend = null
/** Which node is visible. This allows to mark invisible nodes to fasten visibility tests for
* nodes, attached sprites and edges. The visibility test is heavy, and we often need to test
* for nodes visibility. This allows to do it only once per rendering step. Hence the storage
* of the invisible nodes here. */
protected val nodeInvisible = new HashSet[String]
/** The graph view port, if any. The graph view port is a view inside the graph space. It allows
* to compute the view according to a specified area of the graph space instead of the graph
* dimensions. */
protected var gviewport:Array[Double] = null
// Access
def getMetrics() = metrics
/** The view center (a point in graph units). */
def viewCenter:Point3 = center
def getViewCenter:Point3 = viewCenter
/** The visible portion of the graph.
* @return A real for which value 1 means the graph is fully visible and uses the whole
* view port. */
def viewPercent:Double = zoom
def getViewPercent:Double = viewPercent
/** The rotation angle in degrees.
* @return The rotation angle in degrees. */
def viewRotation:Double = rotation
def getViewRotation:Double = viewRotation
def getGraphDimension():Double = metrics.getDiagonal
override def toString():String = {
var builder = new StringBuilder( "Camera :%n".format() )
builder.append( " autoFit = %b%n".format( autoFit ) )
builder.append( " center = %s%n".format( center ) )
builder.append( " rotation = %f%n".format( rotation ) )
builder.append( " zoom = %f%n".format( zoom ) )
builder.append( " padding = %s%n".format( padding ) )
builder.append( " metrics = %s%n".format( metrics ) )
builder.toString
}
/** True if the element is be visible by the camera view (not out of view). The method used is
* to transform the center of the element (which is always in graph units) using the camera
* actual transformation to put it in pixel units. Then to look in the style sheet the size of
* the element and to test if its enclosing rectangle intersects the view port. For edges, its
* two nodes are used. If auto fitting is on, all elements are necessarily visible, this
* method takes this in consideration.
* @param element The element to test.
* @return True if the element is visible and therefore must be rendered. */
def isVisible(element:GraphicElement):Boolean = {
if(autoFit) {
((! element.hidden) && (element.style.getVisibilityMode() != StyleConstants.VisibilityMode.HIDDEN))
} else {
if(styleVisible(element)) element.getSelectorType match {
case NODE => ! nodeInvisible.contains(element.getId)
case EDGE => isEdgeVisible(element.asInstanceOf[GraphicEdge])
case SPRITE => isSpriteVisible(element.asInstanceOf[GraphicSprite])
case _ => false
} else false
}
}
/** Return the given point in pixels converted in graph units (GU) using the inverse
* transformation of the current projection matrix. The inverse matrix is computed only
* once each time a new projection matrix is created.
* @param x The source point abscissa in pixels.
* @param y The source point ordinate in pixels.
* @return The resulting points in graph units. */
def transformPxToGu(x:Double, y:Double):Point3 = bck.inverseTransform(x, y, 0)
/** Transform a point in graph units into pixels.
* @return The transformed point. */
def transformGuToPx(x:Double, y:Double, z:Double):Point3 = bck.transform(x, y, 0)
/** Search for the first node or sprite (in that order) that contains the point at coordinates
* (x, y).
* @param graph The graph to search for.
* @param x The point abscissa.
* @param y The point ordinate.
* @return The first node or sprite at the given coordinates or null if nothing found. */
def findNodeOrSpriteAt(graph:GraphicGraph, x:Double, y:Double):GraphicElement = {
var ge:GraphicElement = null
graph.getEachNode.foreach { n =>
val node = n.asInstanceOf[GraphicNode]
if( nodeContains( node, x, y ) )
ge = node
}
graph.spriteSet.foreach { sprite =>
if( spriteContains( sprite, x, y ) )
ge = sprite
}
ge
}
/** Search for all the nodes and sprites contained inside the rectangle (x1,y1)-(x2,y2).
* @param graph The graph to search for.
* @param x1 The rectangle lowest point abscissa.
* @param y1 The rectangle lowest point ordinate.
* @param x2 The rectangle highest point abscissa.
* @param y2 The rectangle highest point ordinate.
* @return The set of sprites and nodes in the given rectangle. */
def allNodesOrSpritesIn(graph:GraphicGraph, x1:Double, y1:Double, x2:Double, y2:Double):ArrayList[GraphicElement] = {
val elts = new ArrayList[GraphicElement]
graph.getEachNode.foreach { node:Node =>
if(isNodeIn(node.asInstanceOf[GraphicNode], x1, y1, x2, y2))
elts.add( node.asInstanceOf[GraphicNode])
}
graph.spriteSet.foreach { sprite:GraphicSprite =>
if(isSpriteIn(sprite, x1, y1, x2, y2))
elts.add(sprite)
}
elts
}
/** Compute the real position of a sprite according to its eventual attachment in graph units.
* @param sprite The sprite.
* @param pos Receiver for the sprite 2D position, can be null.
* @param units The units in which the position must be computed (the sprite already contains units).
* @return The same instance as the one given by parameter pos or a new one if pos was null,
* containing the computed position in the given units. */
def getSpritePosition(sprite:GraphicSprite, pos:Point3, units:Units):Point3 = {
if( sprite.isAttachedToNode() ) getSpritePositionNode(sprite, pos, units)
else if( sprite.isAttachedToEdge() ) getSpritePositionEdge(sprite, pos, units)
else getSpritePositionFree(sprite, pos, units)
}
def graphViewport = gviewport
// Command
def setBackend(backend:Backend) { bck = backend }
def setGraphViewport(minx:Double, miny:Double, maxx:Double, maxy:Double) {
gviewport = Array( minx, miny, maxx, maxy )
}
def removeGraphViewport() { gviewport = null }
def resetView() {
setAutoFitView(true)
setViewRotation(0)
}
/** Set the camera view in the given graphics and backup the previous transform of the graphics.
* Call {@link #popView(Graphics2D)} to restore the saved transform. You can only push one time
* the view.
* @param g2 The Swing graphics to change.
* @param graph The graphic graph (used to check element visibility). */
def pushView(graph:GraphicGraph) {
bck.pushTransform
setPadding(graph)
if(autoFit)
autoFitView
else userView
checkVisibility(graph)
}
/** Restore the transform that was used before {@link #pushView(Graphics2D)} is used.
* @param g2 The Swing graphics to restore. */
def popView() { bck.popTransform }
/** Compute a transformation matrix that pass from graph units (user space) to pixel units
* (device space) so that the whole graph is visible.
* @param g2 The Swing graphics.
* @param Tx The transformation to modify.
* @return The transformation modified. */
protected def autoFitView() {
var sx = 0.0; var sy = 0.0
var tx = 0.0; var ty = 0.0
val padXgu = paddingXgu * 2
val padYgu = paddingYgu * 2
val padXpx = paddingXpx * 2
val padYpx = paddingYpx * 2
sx = (metrics.viewport(2) - padXpx) / (metrics.size.data(0) + padXgu) // Ratio along X
sy = (metrics.viewport(3) - padYpx) / (metrics.size.data(1) + padYgu) // Ratio along Y
tx = metrics.lo.x + (metrics.size.data(0) / 2) // Center of graph in X
ty = metrics.lo.y + (metrics.size.data(1) / 2) // Center of graph in Y
if(sx > sy) // The least ratio.
sx = sy
else sy = sx
bck.beginTransform
bck.setIdentity
bck.translate(metrics.viewport(2)/2,
metrics.viewport(3)/2, 0) // 4. Place the whole result at the center of the view port.
if(rotation != 0)
bck.rotate(rotation/(180.0/Pi), 0, 0, 1) // 3. Eventually apply a Z axis rotation.
bck.scale(sx, -sy, 0) // 2. Scale the graph to pixels. Scale -y since we reverse the view (top-left to bottom-left).
bck.translate(-tx, -ty, 0) // 1. Move the graph so that its real center is at (0,0).
bck.endTransform
zoom = 1
center.set(tx, ty, 0)
metrics.ratioPx2Gu = sx
metrics.loVisible.copy(metrics.lo)
metrics.hiVisible.copy(metrics.hi)
}
/** Compute a transformation that pass from graph units (user space) to a pixel units (device
* space) so that the view (zoom and center) requested by the user is produced.
* @param g2 The Swing graphics.
* @param Tx The transformation to modify.
* @return The transformation modified. */
protected def userView() {
var sx = 0.0; var sy = 0.0
var tx = 0.0; var ty = 0.0
val padXgu = paddingXgu * 2
val padYgu = paddingYgu * 2
val padXpx = paddingXpx * 2
val padYpx = paddingYpx * 2
val gw = if(gviewport ne null) gviewport(2)-gviewport(0) else metrics.size.data(0)
val gh = if(gviewport ne null) gviewport(3)-gviewport(1) else metrics.size.data(1)
sx = (metrics.viewport(2) - padXpx) / ((gw + padXgu) * zoom)
sy = (metrics.viewport(3) - padYpx) / ((gh + padYgu) * zoom)
tx = center.x
ty = center.y
if(sx > sy) // The least ratio.
sx = sy;
else sy = sx;
bck.beginTransform
bck.setIdentity
bck.translate(metrics.viewport(2)/2,
metrics.viewport(3)/2, 0) // 4. Place the whole result at the center of the view port.
if(rotation != 0)
bck.rotate(rotation/(180.0/Pi), 0, 0, 1) // 3. Eventually apply a rotation.
bck.scale(sx, -sy, 0) // 2. Scale the graph to pixels. Scale -y since we reverse the view (top-left to bottom-left).
bck.translate(-tx, -ty, 0) // 1. Move the graph so that the give center is at (0,0).
bck.endTransform
metrics.ratioPx2Gu = sx
val w2 = (metrics.viewport(2) / sx) / 2f
val h2 = (metrics.viewport(3) / sx) / 2f
metrics.loVisible.set(center.x-w2, center.y-h2)
metrics.hiVisible.set(center.x+w2, center.y+h2)
}
/** Enable or disable automatic adjustment of the view to see the entire graph.
* @param on If true, automatic adjustment is enabled. */
def setAutoFitView(on:Boolean) {
if(autoFit && (! on)) {
// We go from autoFit to user view, ensure the current center is at the
// middle of the graph, and the zoom is at one.
zoom = 1
center.set(metrics.lo.x + (metrics.size.data(0) / 2),
metrics.lo.y + (metrics.size.data(1) / 2), 0);
}
autoFit = on
}
/** Set the center of the view (the looked at point). As the viewer is only 2D, the z value is
* not required.
* @param x The new position abscissa.
* @param y The new position ordinate. */
def setViewCenter(x:Double, y:Double, z:Double) {
setAutoFitView(false)
center.set(x, y, z)
graph.graphChanged = true
}
/** Set the zoom (or percent of the graph visible), 1 means the graph is fully visible.
* @param z The zoom. */
def viewPercent_=(z:Double) { zoom = z; graph.graphChanged = true }
def setViewPercent(z:Double) {
setAutoFitView(false)
zoom = z
graph.graphChanged = true
}
/** Set the rotation angle around the center.
* @param angle The rotation angle in degrees. */
def viewRotation_=(angle:Double) { rotation = angle; graph.graphChanged = true }
def setViewRotation(angle:Double) { rotation = angle; graph.graphChanged = true }
/** Set the output view port size in pixels.
* @param viewportWidth The width in pixels of the view port.
* @param viewportHeight The width in pixels of the view port. */
def setViewport(x:Double, y:Double, viewportWidth:Double, viewportHeight:Double) { metrics.setViewport(x, y, viewportWidth, viewportHeight) }
/** Set the graphic graph bounds (the lowest and highest points).
* @param minx Lowest abscissa.
* @param miny Lowest ordinate.
* @param minz Lowest depth.
* @param maxx Highest abscissa.
* @param maxy Highest ordinate.
* @param maxz Highest depth. */
def setBounds(minx:Double, miny:Double, minz:Double, maxx:Double, maxy:Double, maxz:Double) = metrics.setBounds(minx, miny, minz, maxx, maxy, maxz)
/** Set the graphic graph bounds from the graphic graph. */
def setBounds(graph:GraphicGraph) {
setBounds(graph.getMinPos.x, graph.getMinPos.y, 0, graph.getMaxPos.x, graph.getMaxPos.y, 0)
}
// Utility
/** Set the graph padding. Called in pushView.
* @param graph The graphic graph. */
protected def setPadding(graph:GraphicGraph) { padding.copy(graph.getStyle.getPadding) }
/** Process each node to check if it is in the actual view port, and mark invisible nodes. This
* method allows for fast node, sprite and edge visibility checking when drawing. This must be
* called before each rendering (if the view port changed). Called in pushView.
* A node is not visible if it is out of the view port, if it is deliberately hidden (its hidden
* flag is set) or if it has not yet been positioned (has not yet received a (x,y,z) position).
* If the auto fitting feature is activate the whole graph is always visible. */
protected def checkVisibility(graph:GraphicGraph) {
nodeInvisible.clear
if(! autoFit) {
// If autoFit is on, we know the whole graph is visible anyway.
val X:Double = metrics.viewport(0)
val Y:Double = metrics.viewport(1)
val W:Double = metrics.viewport(2)
val H:Double = metrics.viewport(3)
graph.getEachNode.foreach { node:Node =>
val n:GraphicNode = node.asInstanceOf[GraphicNode]
val visible = isNodeIn(n, X, Y, X+W, Y+H) && (!n.hidden) && n.positionned;
// val visible = isNodeIn(n, 0, 0, W, H) && (!n.hidden) && n.positionned;
if(! visible) {
nodeInvisible += node.getId
}
}
}
}
protected def paddingXgu:Double = if(padding.units == Units.GU && padding.size > 0) padding.get( 0 ) else 0
protected def paddingYgu:Double = if(padding.units == Units.GU && padding.size > 1) padding.get( 1 ) else paddingXgu
protected def paddingXpx:Double = if(padding.units == Units.PX && padding.size > 0) padding.get( 0 ) else 0
protected def paddingYpx:Double = if(padding.units == Units.PX && padding.size > 1) padding.get( 1 ) else paddingXpx
/** Check if a sprite is visible in the current view port.
* @param sprite The sprite to check.
* @return True if visible. */
protected def isSpriteVisible(sprite:GraphicSprite):Boolean = isSpriteIn(sprite, 0, 0, metrics.viewport(2), metrics.viewport(3))
/** Check if an edge is visible in the current view port.
* @param edge The edge to check.
* @return True if visible. */
protected def isEdgeVisible(edge:GraphicEdge):Boolean = {
if((!edge.getNode0[GraphicNode].positionned)
|| (!edge.getNode1[GraphicNode].positionned)) {
false
} else if(edge.hidden) {
false
} else {
val node0Invis = nodeInvisible.contains(edge.getNode0[Node].getId)
val node1Invis = nodeInvisible.contains(edge.getNode1[Node].getId)
! (node0Invis && node1Invis)
}
}
/** Is the given node visible in the given area.
* @param node The node to check.
* @param X1 The min abscissa of the area.
* @param Y1 The min ordinate of the area.
* @param X2 The max abscissa of the area.
* @param Y2 The max ordinate of the area.
* @return True if the node lies in the given area. */
protected def isNodeIn(node:GraphicNode, X1:Double, Y1:Double, X2:Double, Y2:Double):Boolean = {
val size = getNodeOrSpriteSize(node)//node.getStyle.getSize
val w2 = metrics.lengthToPx(size, 0) / 2
val h2 = if(size.size > 1) metrics.lengthToPx(size, 1)/2 else w2
val src = new Point3(node.getX, node.getY, 0)
bck.transform(src)
// Tx.transform( src, src )
val x1 = src.x - w2
val x2 = src.x + w2
val y1 = src.y - h2
val y2 = src.y + h2
if( x2 < X1) false
else if(y2 < Y1) false
else if(x1 > X2) false
else if(y1 > Y2) false
else true
}
/** Is the given sprite visible in the given area.
* @param sprite The sprite to check.
* @param X1 The min abscissa of the area.
* @param Y1 The min ordinate of the area.
* @param X2 The max abscissa of the area.
* @param Y2 The max ordinate of the area.
* @return True if the node lies in the given area. */
protected def isSpriteIn( sprite:GraphicSprite, X1:Double, Y1:Double, X2:Double, Y2:Double ):Boolean = {
if( sprite.isAttachedToNode && ( nodeInvisible.contains( sprite.getNodeAttachment.getId ) ) ) {
false
} else if( sprite.isAttachedToEdge && ! isEdgeVisible( sprite.getEdgeAttachment ) ) {
false
} else {
val size = sprite.getStyle.getSize
val w2 = metrics.lengthToPx( size, 0 ) / 2
val h2 = if( size.size > 1 ) metrics.lengthToPx( size, 1 )/2 else w2
val src = spritePositionPx( sprite )
val x1 = src.x - w2
val x2 = src.x + w2
val y1 = src.y - h2
val y2 = src.y + h2
if( x2 < X1 ) false
else if( y2 < Y1 ) false
else if( x1 > X2 ) false
else if( y1 > Y2 ) false
else true
}
}
protected def spritePositionPx(sprite:GraphicSprite):Point3 = getSpritePosition(sprite, new Point3, Units.PX)
/** Check if a node contains the given point (x,y).
* @param elt The node.
* @param x The point abscissa.
* @param y The point ordinate.
* @return True if (x,y) is in the given element. */
protected def nodeContains(elt:GraphicElement, x:Double, y:Double):Boolean = {
val size = getNodeOrSpriteSize(elt) // elt.getStyle.getSize // TODO use nodeinfo
val w2 = metrics.lengthToPx(size, 0) / 2
val h2 = if(size.size() > 1) metrics.lengthToPx(size, 1)/2 else w2
val dst = bck.transform(elt.getX, elt.getY, 0)
dst.x -= metrics.viewport(0)
dst.y -= metrics.viewport(1)
val x1 = (dst.x) - w2
val x2 = (dst.x) + w2
val y1 = (dst.y) - h2
val y2 = (dst.y) + h2
if( x < x1) false
else if(y < y1) false
else if(x > x2) false
else if(y > y2) false
else true
}
/** Check if a sprite contains the given point (x,y).
* @param elt The sprite.
* @param x The point abscissa.
* @param y The point ordinate.
* @return True if (x,y) is in the given element. */
protected def spriteContains(elt:GraphicElement, x:Double, y:Double):Boolean = {
val sprite = elt.asInstanceOf[GraphicSprite]
val size = getNodeOrSpriteSize(elt) //sprite.getStyle.getSize // TODO use nodeinfo
val w2 = metrics.lengthToPx(size, 0) / 2
val h2 = if(size.size() > 1) metrics.lengthToPx(size, 1)/2 else w2
val dst = spritePositionPx(sprite) // new Point2D.Double( sprite.getX(), sprite.getY() )
// val dst = new Point2D.Double
//
// Tx.transform( src, dst )
dst.x -= metrics.viewport(0)
dst.y -= metrics.viewport(1)
val x1 = dst.x - w2
val x2 = dst.x + w2
val y1 = dst.y - h2
val y2 = dst.y + h2
if( x < x1) false
else if(y < y1) false
else if(x > x2) false
else if(y > y2) false
else true
}
protected def getNodeOrSpriteSize(elt:GraphicElement):Values = {
val info = elt.getAttribute(Skeleton.attributeName).asInstanceOf[AreaSkeleton]
if(info ne null) {
new Values(Units.GU, info.theSize.x, info.theSize.y)
} else {
elt.getStyle.getSize
}
}
protected def styleVisible(element:GraphicElement):Boolean = {
val visibility = element.getStyle.getVisibility
element.getStyle.getVisibilityMode match {
case VisibilityMode.HIDDEN => false
case VisibilityMode.AT_ZOOM => (zoom == visibility.get(0))
case VisibilityMode.UNDER_ZOOM => (zoom <= visibility.get(0))
case VisibilityMode.OVER_ZOOM => (zoom >= visibility.get(0))
case VisibilityMode.ZOOM_RANGE => if(visibility.size > 1) (zoom >= visibility.get(0) && zoom <= visibility.get(1)) else true
case VisibilityMode.ZOOMS => values.contains(visibility.get(0))
case _ => true
}
}
def isTextVisible(element:GraphicElement):Boolean = {
val visibility = element.getStyle.getTextVisibility
element.getStyle.getTextVisibilityMode match {
case TextVisibilityMode.HIDDEN => false
case TextVisibilityMode.AT_ZOOM => (zoom == visibility.get(0))
case TextVisibilityMode.UNDER_ZOOM => (zoom <= visibility.get(0))
case TextVisibilityMode.OVER_ZOOM => (zoom >= visibility.get(0))
case TextVisibilityMode.ZOOM_RANGE => if(visibility.size > 1) (zoom >= visibility.get(0) && zoom <= visibility.get(1)) else true
case TextVisibilityMode.ZOOMS => values.contains(visibility.get(0))
case _ => true
}
}
def getNodeOrSpritePositionGU(elt:GraphicElement, pos:Point3):Point3 = {
var p = pos
if(p eq null) p = new Point3
elt match {
case node:GraphicNode => { p.x = node.getX; p.y = node.getY; p }
case sprite:GraphicSprite => { getSpritePosition(sprite, p, Units.GU) }
}
}
/** Compute the position of a sprite if it is not attached.
* @param sprite The sprite.
* @param position Where to stored the computed position, if null, the position is created.
* @param units The units the computed position must be given into.
* @return The same instance as pos, or a new one if pos was null. */
protected def getSpritePositionFree(sprite:GraphicSprite, position:Point3, units:Units):Point3 = {
var pos = position
if(pos eq null)
pos = new Point3
if(sprite.getUnits == units) {
pos.x = sprite.getX
pos.y = sprite.getY
} else if(units == Units.GU && sprite.getUnits == Units.PX) {
pos.x = sprite.getX
pos.y = sprite.getY
bck.transform(pos)
} else if(units == Units.PX && sprite.getUnits == Units.GU) {
pos.x = sprite.getX
pos.y = sprite.getY
bck.transform(pos)
} else if(units == Units.GU && sprite.getUnits == Units.PERCENTS) {
pos.x = metrics.lo.x + (sprite.getX/100f) * metrics.graphWidthGU
pos.y = metrics.lo.y + (sprite.getY/100f) * metrics.graphHeightGU
} else if(units == Units.PX && sprite.getUnits == Units.PERCENTS) {
pos.x = (sprite.getX/100f) * metrics.viewport(2)
pos.y = (sprite.getY/100f) * metrics.viewport(3)
} else {
throw new RuntimeException("Unhandled yet sprite positioning convertion %s to %s.".format(sprite.getUnits, units))
}
pos
}
/** Compute the position of a sprite if attached to a node.
* @param sprite The sprite.
* @param pos Where to stored the computed position, if null, the position is created.
* @param units The units the computed position must be given into.
* @return The same instance as pos, or a new one if pos was null. */
protected def getSpritePositionNode(sprite:GraphicSprite, position:Point3, units:Units):Point3 = {
var pos = position
if(pos eq null)
pos = new Point3
val node = sprite.getNodeAttachment
val radius = metrics.lengthToGu( sprite.getX, sprite.getUnits )
val z = sprite.getZ * (Pi / 180.0)
pos.x = node.x + (cos(z) * radius)
pos.y = node.y + (sin(z) * radius)
if(units == Units.PX)
bck.transform(pos)
pos
}
/** Compute the position of a sprite if attached to an edge.
* @param sprite The sprite.
* @param pos Where to store the computed position, if null, the position is created.
* @param units The units the computed position must be given into.
* @return The same instance as pos, or a new one if pos was null. */
protected def getSpritePositionEdge(sprite:GraphicSprite, position:Point3, units:Units):Point3 = {
var pos = position
if(pos eq null)
pos = new Point3
val edge = sprite.getEdgeAttachment.asInstanceOf[GraphicEdge]
val info = edge.getAttribute(Skeleton.attributeName).asInstanceOf[ConnectorSkeleton]
if(info ne null) {
val o = metrics.lengthToGu(sprite.getY, sprite.getUnits)
if(o==0) {
val p = info.pointOnShape(sprite.getX)
pos.x = p.x
pos.y = p.y
} else {
val p = info.pointOnShapeAndPerpendicular(sprite.getX, o)
pos.x = p.x
pos.y = p.y
}
} else {
var x = 0.0
var y = 0.0
var dx = 0.0
var dy = 0.0
var d = sprite.getX // Percent on the edge.
val o = metrics.lengthToGu(sprite.getY, sprite.getUnits)
// Offset from the position given by percent, perpendicular to the edge.
x = edge.from.x
y = edge.from.y
dx = edge.to.x - x
dy = edge.to.y - y
d = if( d > 1 ) 1 else d
d = if( d < 0 ) 0 else d
x += dx * d
y += dy * d
if(o != 0) {
d = sqrt( dx*dx + dy*dy )
dx /= d
dy /= d
x += -dy * o
y += dx * o
}
pos.x = x
pos.y = y
}
if(units == Units.PX)
bck.transform(pos)
pos
}
} | prismsoul/gedgraph | sources/prismsoul.genealogy.gedgraph/gs-ui/org/graphstream/ui/j2dviewer/Camera.scala | Scala | gpl-2.0 | 31,103 |
package ca.dubey.music.midi.event
import ca.dubey.music.theory.KeySignature
import ca.dubey.music.theory.Key
import ca.dubey.music.theory.Tonality
import ca.dubey.music.theory.Major
import ca.dubey.music.theory.Minor
import javax.sound.midi.MetaMessage
import javax.sound.midi.MidiEvent
object KeySignatureEvent extends EventCompanion[KeySignatureEvent] {
val EVENT_ID = 0x59
def fromMidiEventData(data : Array[Byte]) : Option[KeySignatureEvent] = {
val tonality = Tonality.fromByte(data(1))
val k = KeySignature.fromTonalityAndNumAccidentals(tonality, data(0))
return Some(new KeySignatureEvent(k))
}
def apply(n : Key, t : Tonality) : KeySignatureEvent = {
new KeySignatureEvent(new KeySignature(n, t))
}
}
class KeySignatureEvent(val keySignature : KeySignature) extends Event {
override def toMidiEvent : MidiEvent = {
return new MidiEvent(
new MetaMessage(
KeySignatureEvent.EVENT_ID,
Array[Byte](
keySignature.numAccidentals.toByte,
keySignature.tonality.toByte),
2),
0L)
}
}
| adubey/music | src/main/scala/midi/event/KeySignatureEvent.scala | Scala | gpl-2.0 | 1,102 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gearpump.cluster.main
import akka.actor.{ActorSystem, Props}
import akka.testkit.TestProbe
import com.typesafe.config.Config
import org.apache.gearpump.cluster.TestUtil
import org.scalatest.{FlatSpec, Matchers}
import scala.concurrent.Await
import scala.concurrent.duration._
class MasterWatcherSpec extends FlatSpec with Matchers {
def config: Config = TestUtil.MASTER_CONFIG
"MasterWatcher" should "kill itself when can not get a quorum" in {
val system = ActorSystem("ForMasterWatcher", config)
val actorWatcher = TestProbe()(system)
val masterWatcher = system.actorOf(Props(classOf[MasterWatcher], "watcher"))
actorWatcher watch masterWatcher
actorWatcher.expectTerminated(masterWatcher, 5.seconds)
system.terminate()
Await.result(system.whenTerminated, Duration.Inf)
}
}
| manuzhang/incubator-gearpump | core/src/test/scala/org/apache/gearpump/cluster/main/MasterWatcherSpec.scala | Scala | apache-2.0 | 1,642 |
/*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.wiki.pages
import com.netflix.atlas.core.model.MathVocabulary
import com.netflix.atlas.core.stacklang.Vocabulary
import com.netflix.atlas.core.stacklang.Word
import com.netflix.atlas.wiki.StackWordPage
case object DistMax extends StackWordPage {
val vocab: Vocabulary = MathVocabulary
val word: Word = vocab.words.find(_.name == "dist-max").get
override def signature: String = s"`Query -- TimeSeriesExpr`"
override def summary: String =
"""
|Compute the maximum recorded value for [timers] and [distribution summaries]. This
|is a helper for aggregating by the max of the max statistic for the meter.
|
|[timers]: http://netflix.github.io/spectator/en/latest/intro/timer/
|[distribution summaries]: http://netflix.github.io/spectator/en/latest/intro/dist-summary/
""".stripMargin.trim
}
| copperlight/atlas | atlas-wiki/src/main/scala/com/netflix/atlas/wiki/pages/DistMax.scala | Scala | apache-2.0 | 1,466 |
/**
* Copyright (C) 2013 Carnegie Mellon University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package tdb.examples
import scala.collection.{GenIterable, GenMap}
import scala.collection.immutable.HashMap
import scala.collection.mutable.Map
import scala.collection.parallel.{ForkJoinTaskSupport, ParIterable}
import scala.concurrent.forkjoin.ForkJoinPool
import tdb._
import tdb.list._
import tdb.TDB._
import tdb.util._
object WCAlgorithm {
def wordcount(s: String): HashMap[String, Int] = {
HashMap(mutableWordcount(s).toSeq: _*)
}
def mutableWordcount(s: String, counts: Map[String, Int] = Map[String, Int]())
: Map[String, Int] = {
for (word <- s.split("\\\\W+")) {
if (counts.contains(word)) {
counts(word) += 1
} else {
counts(word) = 1
}
}
counts
}
def countReduce(s: String, counts: Map[String, Int]): Map[String, Int] = {
for (word <- s.split("\\\\W+")) {
if (counts.contains(word)) {
counts(word) += 1
} else {
counts(word) = 1
}
}
counts
}
def reduce(map1: HashMap[String, Int], map2: HashMap[String, Int])
: HashMap[String, Int] = {
map1.merged(map2)({ case ((k, v1),(_, v2)) => (k, v1 + v2)})
}
def mutableReduce(map1: Map[String, Int], map2: Map[String, Int])
: Map[String, Int] = {
val counts = map2.clone()
for ((key, value) <- map1) {
if (counts.contains(key)) {
counts(key) += map1(key)
} else {
counts(key) = map1(key)
}
}
counts
}
}
class WCAdjust(list: AdjustableList[String, String])
extends Adjustable[Mod[(String, HashMap[String, Int])]] {
def run(implicit c: Context) = {
val counts = list.map {
case (title, body) => (title, WCAlgorithm.wordcount(body))
}
counts.reduce {
case ((key1, value1), (key2, value2)) =>
(key1, WCAlgorithm.reduce(value1, value2))
}
}
}
class ChunkWCAdjust(list: AdjustableList[String, String])
extends Adjustable[Mod[(String, HashMap[String, Int])]] {
def chunkMapper(chunk: Iterable[(String, String)]) = {
var counts = Map[String, Int]()
for (page <- chunk) {
counts = WCAlgorithm.mutableWordcount(page._2, counts)
}
(chunk.head._1, HashMap(counts.toSeq: _*))
}
def run(implicit c: Context) = {
val counts = list.chunkMap(chunkMapper)
counts.reduce {
case ((key1, value1), (key2, value2)) =>
(key1, WCAlgorithm.reduce(value1, value2))
}
}
}
class WCAlgorithm(_conf: AlgorithmConf)
extends Algorithm[Mod[(String, HashMap[String, Int])]](_conf) {
val input = mutator.createList[String, String](conf.listConf)
val data =
if (conf.file == "") {
if (Experiment.verbosity > 0) {
println("Generating random data.")
}
new RandomStringData(
input, conf.count, conf.mutations, Experiment.check, conf.runs)
} else {
if (Experiment.verbosity > 0) {
println("Reading data from " + conf.file)
}
if (OS.isDir(conf.updateFile)) {
new DirectoryData(input, conf.file, conf.updateFile, conf.runs, Experiment.check)
} else {
new FileData(input, conf.file, conf.updateFile, conf.runs, Experiment.check)
}
}
val adjust =
if (conf.listConf.chunkSize == 1)
new WCAdjust(input.getAdjustableList())
else
new ChunkWCAdjust(input.getAdjustableList())
var naiveTable: ParIterable[String] = _
def generateNaive() {
data.generate()
naiveTable = Vector(data.table.values.toSeq: _*).par
naiveTable.tasksupport =
new ForkJoinTaskSupport(new ForkJoinPool(OS.getNumCores() * 2))
}
def runNaive() {
naiveHelper(naiveTable)
}
private def naiveHelper(input: GenIterable[String] = naiveTable) = {
input.aggregate(Map[String, Int]())((x, line) =>
WCAlgorithm.countReduce(line, x), WCAlgorithm.mutableReduce)
}
def loadInitial() {
data.load()
}
def hasUpdates() = data.hasUpdates()
def loadUpdate() = data.update()
def checkOutput(output: Mod[(String, HashMap[String, Int])]) = {
val answer = naiveHelper(data.table.values)
val out = mutator.read(output)._2
//println("answer = " + answer)
//println("output = " + out)
out == answer
}
}
| twmarshall/tdb | core/src/main/scala/tdb/examples/WCAlgorithm.scala | Scala | apache-2.0 | 4,798 |
package org.sisioh.aws4s.sns.model
import com.amazonaws.services.sns.model.ConfirmSubscriptionRequest
import org.sisioh.aws4s.PimpedType
object ConfirmSubscriptionRequestFactory {
def create(): ConfirmSubscriptionRequest =
new ConfirmSubscriptionRequest()
def create(topicArn: String, token: String): ConfirmSubscriptionRequest =
new ConfirmSubscriptionRequest(topicArn, token)
def create(topicArn: String, token: String, authenticateOnUnsubscribe: String): ConfirmSubscriptionRequest =
new ConfirmSubscriptionRequest(topicArn, token, authenticateOnUnsubscribe)
}
class RichConfirmSubscriptionRequest(val underlying: ConfirmSubscriptionRequest)
extends AnyVal
with PimpedType[ConfirmSubscriptionRequest] {
def topicArnOpt: Option[String] =
Option(underlying.getTopicArn)
def topicArnOpt_=(value: Option[String]): Unit =
underlying.setTopicArn(value.orNull)
def withTopicArnOpt(value: Option[String]): ConfirmSubscriptionRequest =
underlying.withTopicArn(value.orNull)
def tokenOpt: Option[String] =
Option(underlying.getToken)
def tokenOpt_=(value: Option[String]): Unit =
underlying.setToken(value.orNull)
def withTokenOpt(value: Option[String]): ConfirmSubscriptionRequest =
underlying.withToken(value.orNull)
def authenticateOnUnsubscribeOpt: Option[String] =
Option(underlying.getAuthenticateOnUnsubscribe)
def authenticateOnUnsubscribeOpt_=(value: Option[String]) =
underlying.setAuthenticateOnUnsubscribe(value.orNull)
def withAuthenticateOnUnsubscribeOpt(authenticateOnUnsubscribe: Option[String]): ConfirmSubscriptionRequest =
underlying.withAuthenticateOnUnsubscribe(authenticateOnUnsubscribe.orNull)
}
| sisioh/aws4s | aws4s-sns/src/main/scala/org/sisioh/aws4s/sns/model/RichConfirmSubscriptionRequest.scala | Scala | mit | 1,709 |
import me.ivanyu.luscinia.ClusterInterface.{RequestVoteResponse, RequestVote}
import me.ivanyu.luscinia.NodeActor.{Candidate, Follower, FollowerData}
import me.ivanyu.luscinia.entities.Term
import me.ivanyu.luscinia.{NodeActor, TestBase}
import scala.concurrent.duration._
class FollowerTest extends TestBase {
test("Case 1: reaction to no cluster RPCs") {
val (node, clusterInterfaceProbe, monitoringInterfaceProbe) = init(smallPeerList)
// Must send RequestVote RPC to peers
val sent = clusterInterfaceProbe.expectMsgAllClassOf(
(electionTimeout.max + timingEpsilon).milliseconds,
List.fill(smallPeerList.size)(classOf[RequestVote]):_*)
assert(sent.length == smallPeerList.length)
val receivers = sent.map(_.receiver).toSet
assert((smallPeerList.toSet -- receivers).isEmpty)
// The state must be Candidate
assert(node.stateName == NodeActor.Candidate)
node.stop()
}
test("Case 2: reaction to RequestVote RPC with lower term") {
val (node, clusterInterfaceProbe, monitoringInterfaceProbe) = init(smallPeerList)
// The node have the higher term
node.setState(Follower, FollowerData(Term(5), emptyLog, None))
// Receives RPC with lower term -> response with false and continue election countdown
clusterInterfaceProbe.send(node, RequestVote(Term(1), 0, Term(0), node2, node1))
// Expecting denial of vote
clusterInterfaceProbe.expectMsgPF() {
case RequestVoteResponse(term, voteGranted, rpcSender, receiver) =>
term.t == 1 && !voteGranted && rpcSender == node1 && receiver == node2
}
// Expecting node1's RequestVote
val sent = clusterInterfaceProbe.expectMsgAllClassOf(
(electionTimeout.max + timingEpsilon).milliseconds,
List.fill(smallPeerList.size)(classOf[RequestVote]):_*)
assert(sent.length == smallPeerList.length)
assert(node.stateName == Candidate)
node.stop()
}
test("Case 3: equal term + voted for another peer") {
val (node, clusterInterfaceProbe, monitoringInterfaceProbe) = init(smallPeerList)
val t = Term(5)
// Already voted for another peer
node.setState(Follower, FollowerData(t, emptyLog, Some(node3)))
clusterInterfaceProbe.send(node, RequestVote(t, 0, Term.start, node2, node1))
// Expecting denial of vote
clusterInterfaceProbe.expectMsgPF() {
case RequestVoteResponse(term, voteGranted, rpcSender, receiver) =>
term.t == 1 && !voteGranted && rpcSender == node1 && receiver == node2
}
assert(node.stateName == Follower)
assert(node.stateData == FollowerData(t, emptyLog, Some(node3)))
}
/*
test("Case 4: higher/equal term + hasn't voted for another peer + last log term smaller") {
val (node, clusterInterfaceProbe, monitoringInterfaceProbe) = init(smallPeerList)
val t = Term(5)
// Hasn't voted for another peer
node.setState(Follower, FollowerData(t, emptyLog, None))
clusterInterfaceProbe.send(node, RequestVote(t, 0, Term(4), node2, node1))
// Expecting denial of vote
clusterInterfaceProbe.expectMsgPF() {
case RequestVoteResponse(term, voteGranted, rpcSender, receiver) =>
term.t == 1 && !voteGranted && rpcSender == node1 && receiver == node2
}
assert(node.stateName == Follower)
// Should accept higher term
// assert(node.stateData == FollowerData(t2, emptyLog, Some(node3)))
}
*/
}
| ivanyu/luscinia | node/src/test/scala/me/ivanyu/luscinia/FollowerTest.scala | Scala | unlicense | 3,393 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package models.renewal
import jto.validation.forms.Rules._
import jto.validation.forms._
import jto.validation.{From, Rule, Write}
import models.FormTypes._
import play.api.libs.json.Json
case class FXTransactionsInLast12Months(fxTransaction: String)
object FXTransactionsInLast12Months {
import utils.MappingUtils.Implicits._
implicit val format = Json.format[FXTransactionsInLast12Months]
private val txnAmountRegex = regexWithMsg("^[0-9]{1,11}$".r, "error.invalid.renewal.fx.transactions.in.12months")
private val txnAmountType = notEmptyStrip andThen
notEmpty.withMessage("error.required.renewal.fx.transactions.in.12months") andThen txnAmountRegex
implicit val formRule: Rule[UrlFormEncoded, FXTransactionsInLast12Months] = From[UrlFormEncoded] { __ =>
import jto.validation.forms.Rules._
(__ \ "fxTransaction").read(txnAmountType) map FXTransactionsInLast12Months.apply
}
implicit val formWrites: Write[FXTransactionsInLast12Months, UrlFormEncoded] = Write { x =>
Map("fxTransaction" -> Seq(x.fxTransaction))
}
implicit def convert(model: FXTransactionsInLast12Months): models.moneyservicebusiness.FXTransactionsInNext12Months = {
models.moneyservicebusiness.FXTransactionsInNext12Months(model.fxTransaction)
}
}
| hmrc/amls-frontend | app/models/renewal/FXTransactionsInLast12Months.scala | Scala | apache-2.0 | 1,873 |
import java.io.{InputStream, OutputStream}
import org.json4s.jackson.Serialization
abstract class Handler[I, O <: AnyRef](delegate: I => O)(implicit im: Manifest[I], om: Manifest[O]) {
implicit val formats = org.json4s.DefaultFormats
/**
* Base Lambda handler class to avoid boilerplate.
*
* By using this handler, conversions from/to case class can be unnecessary.
* To use, define a simple class and extend. The function to be passed as
* `delegate` parameter is typically companion object's function.
*
* To use from Lambda, specify derived class's `handler` function, e.g.
* "DerivedClass::Handler".
*
* For further usages, see HandlerSpec.
*
* @param inputStream Stream to read request body. Specification by Lambda
* @param outputStream Stream to write response. Specification by Lambda
*/
def handler(inputStream: InputStream, outputStream: OutputStream): Unit = {
val input = Serialization.read[I](inputStream)
val output = delegate(input)
val json = Serialization.write[O](output)
outputStream.write(json.getBytes("UTF-8"))
}
}
| yamitzky/Scala-Lambda-Apex-Kuromoji | modules/core/src/main/scala/Handler.scala | Scala | mit | 1,122 |
// Copyright © 2010-2016, Esko Luontola <www.orfjackal.net>
// This software is released under the Apache License 2.0.
// The license text is at http://www.apache.org/licenses/LICENSE-2.0
package org.specsy.examples.scala
import org.specsy.scala.ScalaSpecsy
class PendingUntilFixedExampleSpec extends ScalaSpecsy {
"An acceptance test for an already implemented feature" >> {
// Test code...
}
"An acceptance test whose feature has not yet been implemented" >> AcceptanceTestHelpers.pendingUntilFixed {
// Test code which is still failing...
assert(false, "this feature is not implemented")
}
}
object AcceptanceTestHelpers {
// When this method is in a helper class, it's easy to find all pending tests
// by searching for all usages of this method with your IDE.
def pendingUntilFixed(closure: => Unit) {
try {
closure
} catch {
case e: Throwable =>
System.err.println("This test is pending until fixed:")
e.printStackTrace()
return // test is pending
}
throw new AssertionError("This test would now pass. Remove the 'pendingUntilFixed' tag.")
}
}
| orfjackal/specsy | specsy-examples/src/test/scala/org/specsy/examples/scala/PendingUntilFixedExampleSpec.scala | Scala | apache-2.0 | 1,138 |
/*
* @author Genc Mazlami
*
* Copyright 2013 University of Zurich
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.signalcollect.dcop.evaluation
/**
* This class is used to adapt the Input graphs such that they can be used
* with the implementations of the BR and DSA algorithms, authored by Robin Hafen.
*
*
*/
class GraphInputAdapter {
//look at BinaryConstraintGraphProvider !!
} | gmazlami/dcop-maxsum | src/main/scala/com/signalcollect/dcop/evaluation/GraphInputAdapter.scala | Scala | apache-2.0 | 940 |
package provingground.translation
import provingground._
import ammonite.ops._
import edu.stanford.nlp.simple._
object Script {
val file = pwd / "notes" / "NLPtoHoTT.markdown"
def save(s: String) = write.append(file, "\\n" + s + "\\n")
def saveCode(s: String) = save(s"""```\\n$s\\n```""")
def saveQuote(s: String) = save(s"""* ``$s``""")
def parse(s: String) = {
val sent = new Sentence(s)
val tree = sent.parse
tree.pennPrint
saveQuote(sent.toString)
saveCode(tree.pennString)
tree
}
}
| siddhartha-gadgil/ProvingGround | nlp/src/main/scala/provingground/nlp/Script.scala | Scala | mit | 527 |
package org.shapelogic.sc.polygon
/**
* Interface of an annotated shape.
*
* Polygon, MultiLine and ChainCodeHander implement this.<p>
*
* Currently there are a few base classes that implements the functionality in this.
* And the other subclass them .
*
* @author Sami Badawi
*
*/
import scala.collection.mutable.HashMap
import scala.collection.mutable.Map
import scala.collection.mutable.HashSet
import scala.collection.mutable.Set
trait AnnotatedShape {
def getMap(): Map[Object, Set[GeometricShape2D]]
def putAnnotation(shape: GeometricShape2D, annotation: Object): Unit
def setup(): Unit
def getAnnotatedShape(): AnnotatedShapeImplementation
def getShapesForAnnotation(annotation: Object): Set[GeometricShape2D]
def getAnnotationForShapes(shape: GeometricShape2D): Set[Object]
def putAllAnnotation(shape: GeometricShape2D, annotationKeySet: Set[_ <: Object]): Unit
}
| sami-badawi/shapelogic-scala | src/main/scala/org/shapelogic/sc/polygon/AnnotatedShape.scala | Scala | mit | 900 |
/**
* Copyright (C) 2013 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.fr
import org.dom4j._
import org.junit.Test
import org.orbeon.oxf.properties.PropertyStore
import org.orbeon.oxf.resources.URLFactory
import org.orbeon.oxf.test.DocumentTestBase
import org.orbeon.oxf.util.ScalaUtils._
import org.orbeon.oxf.util.XPath
import org.orbeon.oxf.xml.Dom4j.elemToDocument
import org.orbeon.oxf.xml.TransformerUtils
import org.orbeon.saxon.om.{DocumentInfo, NodeInfo}
import org.scalatest.junit.AssertionsForJUnit
class ResourcesPatcherTest extends DocumentTestBase with AssertionsForJUnit {
@Test def patchingScenarios(): Unit = {
val propertySet = {
val properties: Document =
<properties xmlns:xs="http://www.w3.org/2001/XMLSchema">
<property as="xs:string" name="oxf.fr.resource.*.*.en.detail.buttons.existing" value="Existing"/>
<property as="xs:string" name="oxf.fr.resource.*.*.fr.detail.buttons.existing" value="Existant"/>
<property as="xs:string" name="oxf.fr.resource.*.*.de.detail.buttons.existing" value="Vorhanden"/>
<property as="xs:string" name="oxf.fr.resource.*.*.en.detail.labels.missing" value="Missing"/>
<property as="xs:string" name="oxf.fr.resource.*.*.fr.detail.labels.missing" value="Manquant"/>
<property as="xs:string" name="oxf.fr.resource.*.*.de.detail.labels.missing" value="Vermisst"/>
<property as="xs:string" name="oxf.fr.resource.*.*.*.detail.buttons.acme" value="Acme Existing"/>
<property as="xs:string" name="oxf.fr.resource.*.*.*.detail.labels.acme" value="Acme Missing"/>
</properties>
new PropertyStore(properties).getGlobalPropertySet
}
def newDoc: Document =
<resources>
<resource xml:lang="en">
<buttons>
<existing>OVERRIDE ME</existing>
<acme>OVERRIDE ME</acme>
</buttons>
</resource>
<resource xml:lang="fr">
<buttons>
<existing>OVERRIDE ME</existing>
<acme>OVERRIDE ME</acme>
</buttons>
</resource>
</resources>
val expected: Document =
<resources>
<resource xml:lang="en">
<buttons>
<existing>Existing</existing>
<acme>Acme Existing</acme>
</buttons>
<detail>
<labels>
<missing>Missing</missing>
<acme>Acme Missing</acme>
</labels>
</detail>
</resource>
<resource xml:lang="fr">
<buttons>
<existing>Existant</existing>
<acme>Acme Existing</acme>
</buttons>
<detail>
<labels>
<missing>Manquant</missing>
<acme>Acme Missing</acme>
</labels>
</detail>
</resource>
</resources>
val initial = newDoc
ResourcesPatcher.transform(initial, "*", "*")(propertySet)
assertXMLDocumentsIgnoreNamespacesInScope(initial, expected)
}
@Test def testResourcesConsistency(): Unit = {
import org.orbeon.scaxon.XML._
def hasLang(lang: String)(e: NodeInfo) = (e attValue "*:lang") == "en"
val urls = Seq(
"oxf:/apps/fr/i18n/resources.xml",
"oxf:/forms/orbeon/builder/form/resources.xml",
"oxf:/xbl/orbeon/dialog-select/dialog-select-resources.xml"
)
// - allow "item" and "choices" because we use this for itemsets
// - allow "type" because it's used for the FB list of types
val AllowedDuplicateNames = Set("item", "choices", "type")
for (url ← urls) {
val doc =
useAndClose(URLFactory.createURL(url).openStream()) { is ⇒
TransformerUtils.readTinyTree(XPath.GlobalConfiguration, is, null, false, false)
}
// Baseline is "en"
val englishResource = doc / * / "resource" filter hasLang("en") head
// Recursively compare element presence and order. All other nodes, including text and attribute nodes, are
// ignored.
def compareElements(left: NodeInfo, right: NodeInfo, lang: String): Boolean = (left, right) match {
case (left: DocumentInfo, right: DocumentInfo) ⇒
compareElements(left.rootElement, right.rootElement, lang)
case (left: NodeInfo, right: NodeInfo) if isElement(left) ⇒
def commonMessageSuffix = s" (url=$url and lang=$lang)"
assert(left.name === right.name, s"different names$commonMessageSuffix")
// Ignore children of "div" because it can contain XHTML which is different per language
left.name == right.name && (left.name == "div" || {
val leftChildren = left / *
val rightChildren = right / *
val duplicates = findDuplicates(leftChildren map (_.name)) filterNot AllowedDuplicateNames
assert(
duplicates.isEmpty,
s"duplicate names under `${left.name}`: ${duplicates mkString ", "}$commonMessageSuffix"
)
def elemNames(elems: Seq[NodeInfo]) =
elems map (_.name) mkString ("[", ", ", "]")
def errorSuffix =
s"$commonMessageSuffix (left=${elemNames(leftChildren)}, right=${elemNames(rightChildren)}"
assert(leftChildren.size === rightChildren.size, s"different sizes$errorSuffix")
leftChildren.size == rightChildren.size && {
(leftChildren zip rightChildren) forall {
case (l, r) ⇒ compareElements(l, r, lang)
}
}
})
case _ ⇒
// Ignore all other nodes
true
}
for {
resource ← doc / * / "resource" filterNot hasLang("en")
lang = resource attValue "*:lang"
} locally {
assert(compareElements(englishResource, resource, lang))
}
}
}
}
| wesley1001/orbeon-forms | src/test/scala/org/orbeon/oxf/fr/ResourcesPatcherTest.scala | Scala | lgpl-2.1 | 6,480 |
import sbt.{ State => _, Configuration => _, Show => _, _ }
import Keys._
import scalaz._
import Scalaz.{ state => _, _}
import sbt.complete.DefaultParsers._
import sbt.complete._
import edu.gemini.osgi.tools.idea.{ IdeaModule, IdeaProject, IdeaProjectTask }
import edu.gemini.osgi.tools.app.{ Application, Configuration, AppBuilder }
import edu.gemini.osgi.tools.Version
import xml.PrettyPrinter
object OcsKeys // TODO: get rid of
trait OcsKey { this: OcsBundleSettings =>
lazy val javaVersion = settingKey[String]("Java version to use for -source and -target, and IDEA projects.")
lazy val ocsBootTime = settingKey[Long]("Time of last project reload, for caching.")
lazy val ocsLibraryBundles = settingKey[List[File]]("List of all library bundles in the build.")
lazy val ocsAllProjects = settingKey[List[ProjectRef]]("List of all projects in the build.")
lazy val ocsAllBundleProjects = settingKey[List[ProjectRef]]("List of all bundle projects in the build.")
lazy val ocsProjectDependencies = settingKey[Seq[ProjectRef]]("List of projects we depend on.")
lazy val ocsProjectAggregate = settingKey[Seq[ProjectRef]]("List of projects we aggregate.")
lazy val ocsDependencies = settingKey[Seq[ProjectRef]]("List of projects we depend on or aggregate.")
lazy val ocsClosure = taskKey[Seq[ProjectRef]]("List of projects we depend on or aggregate, recursively.")
lazy val ocsUsers = taskKey[Seq[ProjectRef]]("List of bundle projects that directly depend on us.")
lazy val ocsBundleIdeaModuleAbstractPath = settingKey[File]("Abstract path of the [possibly non-existent] IDEA module for the current project.")
lazy val ocsBundleIdeaModuleName = settingKey[String]("IDEA module name for the current project.")
lazy val ocsBundleIdeaModule = taskKey[File]("Builds an IDEA module for the current project.")
lazy val ocsBundleDependencies = taskKey[Unit]("Display a full tree of bundle dependencies.")
lazy val ocsBundleDependencies0 = taskKey[Unit]("Display a list of direct bundle dependencies.")
lazy val ocsBundleUsers = taskKey[Unit]("Display a full tree of bundle users.")
lazy val ocsBundleUsers0 = taskKey[Unit]("Display a list of direct bundle users.")
lazy val ocsBundleInfo = taskKey[OcsBundleInfo]("Show bundle info.")
lazy val ocsAppManifest = settingKey[Application]("App manifest.")
lazy val ocsAppInfo = taskKey[Unit]("Show information about the current app.")
lazy val ocsJreDir = settingKey[File]("Directory where distribution JREs are stored.")
lazy val ocsVersion = settingKey[OcsVersion]("OCS version for non-PIT bundles and applications.")
lazy val pitVersion = settingKey[OcsVersion]("Version for PIT and its [unshared] bundles.")
}
// Isomorphic with the real spModel.core.Version, but used at build time
case class OcsVersion(semester: String, test: Boolean, xmlCompatibility: Int, serialCompatibility: Int, minor: Int) {
private val Pat = "(\\\\d{4})([AB])".r
private val Pat(year, half) = semester
private val halfDigit = if (half == "A") 0 else 1
/** Convert to an OSGi-compatible version. */
def toOsgiVersion: String =
f"${year}%s${halfDigit}%d${xmlCompatibility}%02d.${serialCompatibility}%d.${minor}%d"
def toBundleVersion: Version =
Version.parse(toOsgiVersion)
def sourceFileName = "CurrentVersion.java"
def toClass = s"""
|package edu.gemini.spModel.core;
|
|import java.text.ParseException;
|
|// AUTO-GENERATED; DO NOT MODIFY
|
|class CurrentVersion {
|
| /** Current version, as generated from the build. N.B. bundles at this version have OSGi version ${toOsgiVersion} */
| static Version get() {
| try {
| return new Version(Semester.parse("$semester"), $test, $xmlCompatibility, $serialCompatibility, $minor);
| } catch (ParseException pe) {
| throw new Error("Bogus OCS Version; check the build.", pe);
| }
| }
|
|}
""".trim.stripMargin
override def toString = {
val testString = if (test) "-test" else ""
s"${semester}${testString}.$xmlCompatibility.$serialCompatibility.$minor"
}
} | spakzad/ocs | project/OcsKey.scala | Scala | bsd-3-clause | 4,113 |
/**
* This code is generated using [[https://www.scala-sbt.org/contraband/ sbt-contraband]].
*/
// DO NOT EDIT MANUALLY
package sbt.internal.bsp.codec
import _root_.sjsonnew.{ Unbuilder, Builder, JsonFormat, deserializationError }
trait SourcesResultFormats { self: sbt.internal.bsp.codec.SourcesItemFormats with sjsonnew.BasicJsonProtocol =>
implicit lazy val SourcesResultFormat: JsonFormat[sbt.internal.bsp.SourcesResult] = new JsonFormat[sbt.internal.bsp.SourcesResult] {
override def read[J](__jsOpt: Option[J], unbuilder: Unbuilder[J]): sbt.internal.bsp.SourcesResult = {
__jsOpt match {
case Some(__js) =>
unbuilder.beginObject(__js)
val items = unbuilder.readField[Vector[sbt.internal.bsp.SourcesItem]]("items")
unbuilder.endObject()
sbt.internal.bsp.SourcesResult(items)
case None =>
deserializationError("Expected JsObject but found None")
}
}
override def write[J](obj: sbt.internal.bsp.SourcesResult, builder: Builder[J]): Unit = {
builder.beginObject()
builder.addField("items", obj.items)
builder.endObject()
}
}
}
| xuwei-k/xsbt | protocol/src/main/contraband-scala/sbt/internal/bsp/codec/SourcesResultFormats.scala | Scala | apache-2.0 | 1,102 |
/*
* Copyright (c) 2015-2017 Toby Weston
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package s4j.scala.chapter13
import java.io._
import java.net.{MalformedURLException, URL}
object Exceptions extends App {
autocloseExample()
def example2() = try {
val url = new URL("http://baddotrobot.com")
val reader = new BufferedReader(new InputStreamReader(url.openStream))
try {
var line = reader.readLine
while (line != null) {
line = reader.readLine
println(line)
}
} finally {
reader.close()
}
} catch {
case _: MalformedURLException => System.out.println("Bad URL")
case e: IOException => System.out.println("Problem reading data from the web: " + e.getMessage)
}
def autoclose[A, B <: Closeable](resource: B)(f: => A): A =
try {
f
} finally {
resource.close()
}
def autocloseExample() = {
try {
val url = new URL("http://baddotrobot.com")
val reader = new BufferedReader(new InputStreamReader(url.openStream))
autoclose(reader) {
var line = reader.readLine
while (line != null) {
line = reader.readLine
println(line)
}
}
} catch {
case _: MalformedURLException => System.out.println("Bad URL")
case e: IOException => System.out.println("Problem reading data from the web: " + e.getMessage)
}
}
def try_[A, B <: Closeable](resource: B)(f: B => A): A =
try {
f(resource)
} finally {
resource.close()
}
def autocloseExample2() = {
try {
val url = new URL("http://baddotrobot.com")
try_(new BufferedReader(new InputStreamReader(url.openStream))) {
reader => {
var line = reader.readLine
while (line != null) {
line = reader.readLine
println(line)
}
}
}
} catch {
case _: MalformedURLException => System.out.println("Bad URL")
case e: IOException => System.out.println("Problem reading data from the web: " + e.getMessage)
}
}
} | tobyweston/learn-scala-java-devs | src/main/scala/s4j/scala/chapter13/Exceptions.scala | Scala | apache-2.0 | 2,590 |
package com.github.mdr.mash.view.render.browser
import com.github.mdr.mash.view.render.{ KeyHint, LinesAndCursorPos, MashRenderingContext }
import com.github.mdr.mash.repl.browser.TextLinesBrowserState
import com.github.mdr.mash.screen.Style.StylableString
import com.github.mdr.mash.screen._
import com.github.mdr.mash.utils.Dimensions
import com.github.mdr.mash.utils.Utils._
class TextLinesBrowserRenderer(state: TextLinesBrowserState,
terminalSize: Dimensions,
mashRenderingContext: MashRenderingContext)
extends AbstractBrowserRenderer(state, terminalSize, mashRenderingContext) {
protected def renderDataLines: Seq[Line] =
for {
(lineContents, index) ← state.model.renderedLines.zipWithIndex.window(state.firstRow, windowSize)
isSelected = index == state.selectedRow && state.expressionStateOpt.isEmpty
actualLineContents = if (isSelected && lineContents.isEmpty) " " else lineContents
} yield Line(actualLineContents.style(Style(inverse = isSelected)))
protected def renderLines: LinesAndCursorPos =
combineUpperStatusLines(renderUpperStatusLines, renderDataLines ++ Seq(renderStatusLine))
private def renderRegularStatusLine = {
import KeyHint._
val hints = Seq(Exit, Back, InsertWhole, NextParentResult, PreviousParentResult)
val countChars = renderCount(state.selectedRow + 1, state.model.renderedLines.size)
Line(countChars + " (".style + renderKeyHints(hints) + ")".style)
}
private def renderStatusLine: Line =
state.expressionStateOpt match {
case Some(_) ⇒ StatusLineRenderers.renderExpressionInputStatusLine
case None ⇒ renderRegularStatusLine
}
protected val windowSize = state.windowSize(terminalSize.rows)
}
| mdr/mash | src/main/scala/com/github/mdr/mash/view/render/browser/TextLinesBrowserRenderer.scala | Scala | mit | 1,786 |
/*
*************************************************************************************
* Copyright 2011 Normation SAS
*************************************************************************************
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* In accordance with the terms of section 7 (7. Additional Terms.) of
* the GNU Affero GPL v3, the copyright holders add the following
* Additional permissions:
* Notwithstanding to the terms of section 5 (5. Conveying Modified Source
* Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU Affero GPL v3
* licence, when you create a Related Module, this Related Module is
* not considered as a part of the work and may be distributed under the
* license agreement of your choice.
* A "Related Module" means a set of sources files including their
* documentation that, without modification of the Source Code, enables
* supplementary functions or services in addition to those offered by
* the Software.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/agpl.html>.
*
*************************************************************************************
*/
package com.normation.rudder.migration
import net.liftweb.common._
import org.junit.runner.RunWith
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import Migration_10_2_DATA_Other._
import Migration_10_2_DATA_Group._
import Migration_10_2_DATA_Directive._
import Migration_10_2_DATA_Rule._
import Migration_3_DATA_Other._
import Migration_3_DATA_Group._
import Migration_3_DATA_Directive._
import Migration_3_DATA_Rule._
import scala.xml.Elem
import com.normation.utils.XmlUtils
/**
* Test individual event log data migration
*/
@RunWith(classOf[JUnitRunner])
class TestXmlMigration_10_2 extends Specification with Loggable {
val migration = new XmlMigration_10_2
def compare(b:Box[Elem], e:Elem) = {
val Full(x) = b
XmlUtils.trim(x) must beEqualTo(XmlUtils.trim(e))
}
"rule migration from fileFormat '1.0' to '2'" should {
"correctly rewrite add" in {
compare(migration.rule(rule_add_10) , rule_add_2)
}
"correctly rewrite modify" in {
compare(migration.rule(rule_modify_10), rule_modify_2)
}
"correctly rewrite delete" in {
compare(migration.rule(rule_delete_10), rule_delete_2)
}
}
"directive migration from fileFormat '1.0' to '2'" should {
"correctly rewrite add" in {
compare(migration.directive(directive_add_10), directive_add_2)
}
"correctly rewrite modify" in {
compare(migration.directive(directive_modify_10), directive_modify_2)
}
"correctly rewrite delete" in {
compare(migration.directive(directive_delete_10), directive_delete_2)
}
}
"nodeGroup migration from fileFormat '1.0' to '2'" should {
"correctly rewrite add" in {
compare(migration.nodeGroup(nodeGroup_add_10), nodeGroup_add_2)
}
"correctly rewrite modify" in {
compare(migration.nodeGroup(nodeGroup_modify_10), nodeGroup_modify_2)
}
"correctly rewrite delete" in {
compare(migration.nodeGroup(nodeGroup_delete_10), nodeGroup_delete_2)
}
}
"other migration from fileFormat '1.0' to '2'" should {
"correctly rewrite 'add deployment status'" in {
compare(migration.addPendingDeployment(addPendingDeployment_10), addPendingDeployment_2)
}
// introduced in 2.4 ?
// "correctly rewrite pending deployment status" in {
// migration.deploymentStatus(deploymentStatus_10) must beEqualTo(Full(deploymentStatus_2))
// }
"correctly rewrite node acceptation status" in {
compare(migration.node(node_accept_10), node_accept_2)
}
}
}
/**
* Test individual event log data migration
*/
@RunWith(classOf[JUnitRunner])
class TestXmlMigration_2_3 extends Specification with Loggable {
val migration = new XmlMigration_2_3
def compare(b:Box[Elem], e:Elem) = {
val Full(x) = b
XmlUtils.trim(x) must beEqualTo(XmlUtils.trim(e))
}
"rule migration from fileFormat '2' to '3'" should {
"correctly rewrite add" in {
compare(migration.rule(rule_add_2) , rule_add_3)
}
"correctly rewrite modify" in {
compare(migration.rule(rule_modify_2), rule_modify_3)
}
"correctly rewrite delete" in {
compare(migration.rule(rule_delete_2), rule_delete_3)
}
}
"directive migration from fileFormat '2' to '3'" should {
"correctly rewrite add" in {
compare(migration.other(directive_add_2), directive_add_3)
}
"correctly rewrite modify" in {
compare(migration.other(directive_modify_2), directive_modify_3)
}
"correctly rewrite delete" in {
compare(migration.other(directive_delete_2), directive_delete_3)
}
}
"nodeGroup migration from fileFormat '2' to '3'" should {
"correctly rewrite add" in {
compare(migration.other(nodeGroup_add_2), nodeGroup_add_3)
}
"correctly rewrite modify" in {
compare(migration.other(nodeGroup_modify_2), nodeGroup_modify_3)
}
"correctly rewrite delete" in {
compare(migration.other(nodeGroup_delete_2), nodeGroup_delete_3)
}
}
"other migration from fileFormat '2' to '3'" should {
"correctly rewrite 'add deployment status'" in {
compare(migration.other(addPendingDeployment_2), addPendingDeployment_3)
}
// introduced in 2.4 ?
// "correctly rewrite pending deployment status" in {
// migration.deploymentStatus(deploymentStatus_10) must beEqualTo(Full(deploymentStatus_2))
// }
"correctly rewrite node acceptation status" in {
compare(migration.other(node_accept_2), node_accept_3)
}
}
}
/**
* Test individual event log data migration
*/
@RunWith(classOf[JUnitRunner])
class TestXmlMigration_10_3 extends Specification with Loggable {
val migration10_2 = new XmlMigration_10_2
val migration2_3 = new XmlMigration_2_3
def compose(f: Elem => Box[Elem], g: Elem => Box[Elem], xml:Elem) : Box[Elem] = {
for {
fxml <- f(xml)
gxml <- g(fxml)
} yield {
gxml
}
}
def compare(b:Box[Elem], e:Elem) = {
val Full(x) = b
XmlUtils.trim(x) must beEqualTo(XmlUtils.trim(e))
}
"rule migration from fileFormat '1.0' to '3'" should {
"correctly rewrite add" in {
compare( compose(migration10_2.rule, migration2_3.rule, rule_add_10), rule_add_3)
}
"correctly rewrite modify" in {
compare( compose(migration10_2.rule, migration2_3.rule, rule_modify_10), rule_modify_3)
}
"correctly rewrite delete" in {
compare( compose(migration10_2.rule, migration2_3.rule, rule_delete_10), rule_delete_3)
}
}
"directive migration from fileFormat '1.0' to '3'" should {
"correctly rewrite add" in {
compare( compose(migration10_2.directive, migration2_3.other, directive_add_10), directive_add_3)
}
"correctly rewrite modify" in {
compare( compose(migration10_2.directive, migration2_3.other, directive_modify_10), directive_modify_3)
}
"correctly rewrite delete" in {
compare( compose(migration10_2.directive, migration2_3.other, directive_delete_10), directive_delete_3)
}
}
"nodeGroup migration from fileFormat '1.0' to '3'" should {
"correctly rewrite add" in {
compare( compose(migration10_2.nodeGroup, migration2_3.other, nodeGroup_add_10), nodeGroup_add_3)
}
"correctly rewrite modify" in {
compare( compose(migration10_2.nodeGroup, migration2_3.other, nodeGroup_modify_10), nodeGroup_modify_3)
}
"correctly rewrite delete" in {
compare( compose(migration10_2.nodeGroup, migration2_3.other, nodeGroup_delete_10), nodeGroup_delete_3)
}
}
"other migration from fileFormat '1.0' to '3'" should {
"correctly rewrite 'add deployment status'" in {
compare( compose(migration10_2.addPendingDeployment, migration2_3.other, addPendingDeployment_10), addPendingDeployment_3)
}
// introduced in 2.4 ?
// "correctly rewrite pending deployment status" in {
// migration.deploymentStatus(deploymentStatus_10) must beEqualTo(Full(deploymentStatus_2))
// }
"correctly rewrite node acceptation status" in {
compare( compose(migration10_2.node, migration2_3.other, node_accept_10), node_accept_3)
}
}
}
| jooooooon/rudder | rudder-core/src/test/scala/com/normation/rudder/migration/TestXmlMigration_10_2.scala | Scala | agpl-3.0 | 8,765 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.collection
package mutable
import scala.reflect.ClassTag
/** A builder class for arrays.
*
* @tparam T the type of the elements for the builder.
*/
@SerialVersionUID(3L)
sealed abstract class ArrayBuilder[T]
extends ReusableBuilder[T, Array[T]]
with Serializable {
protected[this] var capacity: Int = 0
protected[this] def elems: Array[T]
protected var size: Int = 0
def length: Int = size
override def knownSize: Int = size
protected[this] final def ensureSize(size: Int): Unit = {
if (capacity < size || capacity == 0) {
var newsize = if (capacity == 0) 16 else capacity * 2
while (newsize < size) newsize *= 2
resize(newsize)
}
}
override final def sizeHint(size: Int): Unit =
if (capacity < size) resize(size)
def clear(): Unit = size = 0
protected[this] def resize(size: Int): Unit
/** Add all elements of an array */
def addAll(xs: Array[_ <: T]): this.type = addAll(xs, 0, xs.length)
/** Add a slice of an array */
def addAll(xs: Array[_ <: T], offset: Int, length: Int): this.type = {
ensureSize(this.size + length)
Array.copy(xs, offset, elems, this.size, length)
size += length
this
}
override def addAll(xs: IterableOnce[T]): this.type = {
val k = xs.knownSize
if (k > 0) {
ensureSize(this.size + k)
IterableOnce.copyElemsToArray(xs, elems, this.size)
size += k
} else if (k < 0) super.addAll(xs)
this
}
}
/** A companion object for array builders.
*/
object ArrayBuilder {
/** Creates a new arraybuilder of type `T`.
*
* @tparam T type of the elements for the array builder, with a `ClassTag` context bound.
* @return a new empty array builder.
*/
@inline def make[T: ClassTag]: ArrayBuilder[T] = {
val tag = implicitly[ClassTag[T]]
tag.runtimeClass match {
case java.lang.Byte.TYPE => new ArrayBuilder.ofByte().asInstanceOf[ArrayBuilder[T]]
case java.lang.Short.TYPE => new ArrayBuilder.ofShort().asInstanceOf[ArrayBuilder[T]]
case java.lang.Character.TYPE => new ArrayBuilder.ofChar().asInstanceOf[ArrayBuilder[T]]
case java.lang.Integer.TYPE => new ArrayBuilder.ofInt().asInstanceOf[ArrayBuilder[T]]
case java.lang.Long.TYPE => new ArrayBuilder.ofLong().asInstanceOf[ArrayBuilder[T]]
case java.lang.Float.TYPE => new ArrayBuilder.ofFloat().asInstanceOf[ArrayBuilder[T]]
case java.lang.Double.TYPE => new ArrayBuilder.ofDouble().asInstanceOf[ArrayBuilder[T]]
case java.lang.Boolean.TYPE => new ArrayBuilder.ofBoolean().asInstanceOf[ArrayBuilder[T]]
case java.lang.Void.TYPE => new ArrayBuilder.ofUnit().asInstanceOf[ArrayBuilder[T]]
case _ => new ArrayBuilder.ofRef[T with AnyRef]()(tag.asInstanceOf[ClassTag[T with AnyRef]]).asInstanceOf[ArrayBuilder[T]]
}
}
/** A class for array builders for arrays of reference types.
*
* This builder can be reused.
*
* @tparam T type of elements for the array builder, subtype of `AnyRef` with a `ClassTag` context bound.
*/
@SerialVersionUID(3L)
final class ofRef[T <: AnyRef](implicit ct: ClassTag[T]) extends ArrayBuilder[T] {
protected var elems: Array[T] = _
private def mkArray(size: Int): Array[T] = {
if (capacity == size && capacity > 0) elems
else if (elems eq null) new Array[T](size)
else java.util.Arrays.copyOf[T](elems, size)
}
protected[this] def resize(size: Int): Unit = {
elems = mkArray(size)
capacity = size
}
def addOne(elem: T): this.type = {
ensureSize(size + 1)
elems(size) = elem
size += 1
this
}
def result(): Array[T] = {
if (capacity != 0 && capacity == size) {
capacity = 0
val res = elems
elems = null
res
}
else mkArray(size)
}
override def clear(): Unit = {
super.clear()
if(elems ne null) java.util.Arrays.fill(elems.asInstanceOf[Array[AnyRef]], null)
}
override def equals(other: Any): Boolean = other match {
case x: ofRef[_] => (size == x.size) && (elems == x.elems)
case _ => false
}
override def toString = "ArrayBuilder.ofRef"
}
/** A class for array builders for arrays of `byte`s. It can be reused. */
@SerialVersionUID(3L)
final class ofByte extends ArrayBuilder[Byte] {
protected var elems: Array[Byte] = _
private def mkArray(size: Int): Array[Byte] = {
val newelems = new Array[Byte](size)
if (this.size > 0) Array.copy(elems, 0, newelems, 0, this.size)
newelems
}
protected[this] def resize(size: Int): Unit = {
elems = mkArray(size)
capacity = size
}
def addOne(elem: Byte): this.type = {
ensureSize(size + 1)
elems(size) = elem
size += 1
this
}
def result(): Array[Byte] = {
if (capacity != 0 && capacity == size) {
capacity = 0
val res = elems
elems = null
res
}
else mkArray(size)
}
override def equals(other: Any): Boolean = other match {
case x: ofByte => (size == x.size) && (elems == x.elems)
case _ => false
}
override def toString = "ArrayBuilder.ofByte"
}
/** A class for array builders for arrays of `short`s. It can be reused. */
@SerialVersionUID(3L)
final class ofShort extends ArrayBuilder[Short] {
protected var elems: Array[Short] = _
private def mkArray(size: Int): Array[Short] = {
val newelems = new Array[Short](size)
if (this.size > 0) Array.copy(elems, 0, newelems, 0, this.size)
newelems
}
protected[this] def resize(size: Int): Unit = {
elems = mkArray(size)
capacity = size
}
def addOne(elem: Short): this.type = {
ensureSize(size + 1)
elems(size) = elem
size += 1
this
}
def result(): Array[Short] = {
if (capacity != 0 && capacity == size) {
capacity = 0
val res = elems
elems = null
res
}
else mkArray(size)
}
override def equals(other: Any): Boolean = other match {
case x: ofShort => (size == x.size) && (elems == x.elems)
case _ => false
}
override def toString = "ArrayBuilder.ofShort"
}
/** A class for array builders for arrays of `char`s. It can be reused. */
@SerialVersionUID(3L)
final class ofChar extends ArrayBuilder[Char] {
protected var elems: Array[Char] = _
private def mkArray(size: Int): Array[Char] = {
val newelems = new Array[Char](size)
if (this.size > 0) Array.copy(elems, 0, newelems, 0, this.size)
newelems
}
protected[this] def resize(size: Int): Unit = {
elems = mkArray(size)
capacity = size
}
def addOne(elem: Char): this.type = {
ensureSize(size + 1)
elems(size) = elem
size += 1
this
}
def result(): Array[Char] = {
if (capacity != 0 && capacity == size) {
capacity = 0
val res = elems
elems = null
res
}
else mkArray(size)
}
override def equals(other: Any): Boolean = other match {
case x: ofChar => (size == x.size) && (elems == x.elems)
case _ => false
}
override def toString = "ArrayBuilder.ofChar"
}
/** A class for array builders for arrays of `int`s. It can be reused. */
@SerialVersionUID(3L)
final class ofInt extends ArrayBuilder[Int] {
protected var elems: Array[Int] = _
private def mkArray(size: Int): Array[Int] = {
val newelems = new Array[Int](size)
if (this.size > 0) Array.copy(elems, 0, newelems, 0, this.size)
newelems
}
protected[this] def resize(size: Int): Unit = {
elems = mkArray(size)
capacity = size
}
def addOne(elem: Int): this.type = {
ensureSize(size + 1)
elems(size) = elem
size += 1
this
}
def result(): Array[Int] = {
if (capacity != 0 && capacity == size) {
capacity = 0
val res = elems
elems = null
res
}
else mkArray(size)
}
override def equals(other: Any): Boolean = other match {
case x: ofInt => (size == x.size) && (elems == x.elems)
case _ => false
}
override def toString = "ArrayBuilder.ofInt"
}
/** A class for array builders for arrays of `long`s. It can be reused. */
@SerialVersionUID(3L)
final class ofLong extends ArrayBuilder[Long] {
protected var elems: Array[Long] = _
private def mkArray(size: Int): Array[Long] = {
val newelems = new Array[Long](size)
if (this.size > 0) Array.copy(elems, 0, newelems, 0, this.size)
newelems
}
protected[this] def resize(size: Int): Unit = {
elems = mkArray(size)
capacity = size
}
def addOne(elem: Long): this.type = {
ensureSize(size + 1)
elems(size) = elem
size += 1
this
}
def result(): Array[Long] = {
if (capacity != 0 && capacity == size) {
capacity = 0
val res = elems
elems = null
res
}
else mkArray(size)
}
override def equals(other: Any): Boolean = other match {
case x: ofLong => (size == x.size) && (elems == x.elems)
case _ => false
}
override def toString = "ArrayBuilder.ofLong"
}
/** A class for array builders for arrays of `float`s. It can be reused. */
@SerialVersionUID(3L)
final class ofFloat extends ArrayBuilder[Float] {
protected var elems: Array[Float] = _
private def mkArray(size: Int): Array[Float] = {
val newelems = new Array[Float](size)
if (this.size > 0) Array.copy(elems, 0, newelems, 0, this.size)
newelems
}
protected[this] def resize(size: Int): Unit = {
elems = mkArray(size)
capacity = size
}
def addOne(elem: Float): this.type = {
ensureSize(size + 1)
elems(size) = elem
size += 1
this
}
def result(): Array[Float] = {
if (capacity != 0 && capacity == size) {
capacity = 0
val res = elems
elems = null
res
}
else mkArray(size)
}
override def equals(other: Any): Boolean = other match {
case x: ofFloat => (size == x.size) && (elems == x.elems)
case _ => false
}
override def toString = "ArrayBuilder.ofFloat"
}
/** A class for array builders for arrays of `double`s. It can be reused. */
@SerialVersionUID(3L)
final class ofDouble extends ArrayBuilder[Double] {
protected var elems: Array[Double] = _
private def mkArray(size: Int): Array[Double] = {
val newelems = new Array[Double](size)
if (this.size > 0) Array.copy(elems, 0, newelems, 0, this.size)
newelems
}
protected[this] def resize(size: Int): Unit = {
elems = mkArray(size)
capacity = size
}
def addOne(elem: Double): this.type = {
ensureSize(size + 1)
elems(size) = elem
size += 1
this
}
def result(): Array[Double] = {
if (capacity != 0 && capacity == size) {
capacity = 0
val res = elems
elems = null
res
}
else mkArray(size)
}
override def equals(other: Any): Boolean = other match {
case x: ofDouble => (size == x.size) && (elems == x.elems)
case _ => false
}
override def toString = "ArrayBuilder.ofDouble"
}
/** A class for array builders for arrays of `boolean`s. It can be reused. */
@SerialVersionUID(3L)
class ofBoolean extends ArrayBuilder[Boolean] {
protected var elems: Array[Boolean] = _
private def mkArray(size: Int): Array[Boolean] = {
val newelems = new Array[Boolean](size)
if (this.size > 0) Array.copy(elems, 0, newelems, 0, this.size)
newelems
}
protected[this] def resize(size: Int): Unit = {
elems = mkArray(size)
capacity = size
}
def addOne(elem: Boolean): this.type = {
ensureSize(size + 1)
elems(size) = elem
size += 1
this
}
def result(): Array[Boolean] = {
if (capacity != 0 && capacity == size) {
capacity = 0
val res = elems
elems = null
res
}
else mkArray(size)
}
override def equals(other: Any): Boolean = other match {
case x: ofBoolean => (size == x.size) && (elems == x.elems)
case _ => false
}
override def toString = "ArrayBuilder.ofBoolean"
}
/** A class for array builders for arrays of `Unit` type. It can be reused. */
@SerialVersionUID(3L)
final class ofUnit extends ArrayBuilder[Unit] {
protected def elems: Array[Unit] = throw new UnsupportedOperationException()
def addOne(elem: Unit): this.type = {
size += 1
this
}
override def addAll(xs: IterableOnce[Unit]): this.type = {
size += xs.iterator.size
this
}
override def addAll(xs: Array[_ <: Unit], offset: Int, length: Int): this.type = {
size += length
this
}
def result() = {
val ans = new Array[Unit](size)
var i = 0
while (i < size) { ans(i) = (); i += 1 }
ans
}
override def equals(other: Any): Boolean = other match {
case x: ofUnit => (size == x.size)
case _ => false
}
protected[this] def resize(size: Int): Unit = ()
override def toString = "ArrayBuilder.ofUnit"
}
}
| scala/scala | src/library/scala/collection/mutable/ArrayBuilder.scala | Scala | apache-2.0 | 13,731 |
package com.akkademy
import akka.actor.{ActorSystem, Props}
import akka.contrib.pattern.ClusterReceptionistExtension
import akka.routing.BalancingPool
object Main extends App {
val system = ActorSystem("Akkademy")
val clusterController = system.actorOf(Props[ClusterController], "clusterController")
val workers = system.actorOf(BalancingPool(5).props(Props[ArticleParseActor]), "workers")
ClusterReceptionistExtension(system).registerService(workers)
}
| jasongoodwin/learning-akka | ch6/akkademaid-scala/src/main/scala/com/akkademy/Akkamaid.scala | Scala | apache-2.0 | 467 |
/*
* Skylark
* http://skylark.io
*
* Copyright 2012-2017 Quantarray, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.quantarray.skylark.measure
/**
* Mass measure.
*
* @author Araik Grigoryan
*/
case class MassMeasure private(name: String, system: SystemOfUnits, base: Option[(MassMeasure, Double)]) extends Measure[MassMeasure]
{
type D = MassDimension
val dimension = Mass
override def composes(name: String, system: SystemOfUnits, multiple: Double) = new MassMeasure(name, system, Some(this, multiple))
override def toString = name
}
object MassMeasure
{
def apply(name: String, system: SystemOfUnits): MassMeasure = new MassMeasure(name, system, None)
}
| quantarray/skylark | skylark-measure/src/main/scala/com/quantarray/skylark/measure/MassMeasure.scala | Scala | apache-2.0 | 1,216 |
package com.recursivity.bowler
import org.scalatest.FunSuite
import stub.ListModelComponent
/**
* Created by IntelliJ IDEA.
* User: wfaler
* Date: Nov 10, 2010
* Time: 10:55:38 PM
* To change this template use File | Settings | File Templates.
*/
class ListModelTest extends FunSuite{
test("test ListModel"){
val parent = new ListModelComponent
parent.add(new ListModel[Tuple2[String, String]](Some("list"),{getList}){
def populateItem(container: Container, listItem: (String, String)) = {
container.add(new SimpleRenderable(Some("firstName"), {listItem._1}))
container.add(new SimpleRenderable(Some("lastName"), {listItem._2}))
}
})
assert(parent.render.equals("<ul><li>Wille Faler</li><li>John Doe</li><li>Jane Doe</li></ul>"))
}
def getList = List(("Wille", "Faler"), ("John", "Doe"), ("Jane", "Doe"))
} | rossabaker/Handlebar | src/test/scala/com/recursivity/bowler/ListModelTest.scala | Scala | bsd-3-clause | 869 |
package com.raquo.domtypes.fixtures.keys
import com.raquo.domtypes.generic.codecs.Codec
/**
* This class represents an Svg Element Attribute. Meaning the key that can be set, not the whole a key-value pair.
*
* @tparam V type of values that this Attribute can be set to
*/
class SvgAttr[V] (override val name: String, val codec: Codec[V, String], val namespace: Option[String]) extends Key
| raquo/scala-dom-types | shared/src/test/scala/com/raquo/domtypes/fixtures/keys/SvgAttr.scala | Scala | mit | 400 |
package ru.org.codingteam.horta.plugins.markov
case class UserIdentity(room: String, nickname: String) | codingteam/horta-hell | src/main/scala/ru/org/codingteam/horta/plugins/markov/UserIdentity.scala | Scala | mit | 103 |
package com.mesosphere.universe
case class Assets(
uris: Option[Map[String, String]], // GitHub issue #58
container: Option[Container]
)
| movicha/cosmos | cosmos-model/src/main/scala/com/mesosphere/universe/Assets.scala | Scala | apache-2.0 | 142 |
package com.twitter.finagle.mux.transport
import com.twitter.finagle.{Failure, FailureFlags}
import com.twitter.io.{Buf, BufByteWriter}
import org.scalatest.FunSuite
class MuxFailureTest extends FunSuite {
class FlaggedClass(val flags: Long) extends FailureFlags[FlaggedClass] {
protected def copyWithFlags(f: Long): FlaggedClass = ???
}
test("Flag values") {
assert(MuxFailure.Retryable == 1L << 0)
assert(MuxFailure.Rejected == 1L << 1)
assert(MuxFailure.NonRetryable == 1L << 2)
}
test("convert flags with c.t.f.FailureFlags") {
val flagTests = Seq(
(FailureFlags.Retryable | FailureFlags.Rejected, MuxFailure.Retryable | MuxFailure.Rejected),
(FailureFlags.NonRetryable, MuxFailure.NonRetryable),
(0L, 0L)
)
flagTests.foreach {
case (finagle, mux) =>
assert(MuxFailure(mux).finagleFlags == finagle)
assert(MuxFailure.fromThrow(Failure(":(", finagle)).flags == mux)
assert(MuxFailure.fromThrow(new FlaggedClass(finagle)).flags == mux)
}
}
test("Convert to & from context pairs") {
val muxFail = MuxFailure(MuxFailure.NonRetryable)
val expectedContext = Seq(
(Buf.Utf8("MuxFailure"), BufByteWriter.fixed(8).writeLongBE(MuxFailure.NonRetryable).owned())
)
assert(muxFail.contexts.equals(expectedContext))
// Round trip
assert(MuxFailure.fromContexts(muxFail.contexts) == Some(muxFail))
// Special case - No relevant info, so no need to pass context.
assert(MuxFailure.Empty.contexts == Nil)
}
}
| mkhq/finagle | finagle-mux/src/test/scala/com/twitter/finagle/mux/transport/MuxFailureTest.scala | Scala | apache-2.0 | 1,541 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600.v2
import uk.gov.hmrc.ct.box.{CtBoxIdentifier, CtOptionalBigDecimal, Linked}
import uk.gov.hmrc.ct.ct600a.v2.A13
case class B79(value: Option[BigDecimal]) extends CtBoxIdentifier("B79 - Tax payable under S419 ICTA 1988") with CtOptionalBigDecimal
object B79 extends Linked[A13, B79] {
override def apply(source: A13): B79 = B79(source.value)
}
| hmrc/ct-calculations | src/main/scala/uk/gov/hmrc/ct/ct600/v2/B79.scala | Scala | apache-2.0 | 984 |
package es.upm.oeg.pnk.transformations
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
/**
* Created by cbadenes on 08/08/15.
*/
@RunWith(classOf[JUnitRunner])
class CleanerTest extends FunSuite {
test("Clean text") {
val input : String = "\\fw\\n\\n\\fJUZGADO CENTRAL INSTRUCCION\\n\\nFecha ...... :\\n\\nOF.REG! REP DOS.CENT. INSTRUCCION\\n\\nHora\\n\\nMADRID( MADRID)\\n\\nUsuario Pet:\\n\\nju7500\\n\\nPENAL\\n\\nUsuarlo Reg:\\n\\nju7500"
val expected : String = "w . Juzgado Central Instruccion . Fecha . Of.Reg! Rep Dos.Cent . Instruccion . Hora . Madrid Madrid . Usuario Pet: . ju7500 . Penal . Usuarlo Reg: . ju7500"
val obtained : String = Cleaner(input)
println(s"Input: " + input)
println(s"Expected: " + expected)
println(s"Obtained: " + obtained)
assert(expected.equals(obtained))
}
test("Inline parenthesis text") {
val input : String = "MADRID( MADRID)"
val expected : String = "Madrid Madrid"
val obtained : String = Cleaner(input)
println(s"Input: " + input)
println(s"Expected: " + expected)
println(s"Obtained: " + obtained)
assert(expected.equals(obtained))
}
test("Final point text") {
val input : String = "en Madrid."
val expected : String = "en Madrid ."
val obtained : String = Cleaner(input)
println(s"Input: " + input)
println(s"Expected: " + expected)
println(s"Obtained: " + obtained)
assert(expected.equals(obtained))
}
test("Final colon text") {
val input : String = "en Madrid,"
val expected : String = "en Madrid ,"
val obtained : String = Cleaner(input)
println(s"Input: " + input)
println(s"Expected: " + expected)
println(s"Obtained: " + obtained)
assert(expected.equals(obtained))
}
}
| cbadenes/pnk | src/test/scala/es/upm/oeg/pnk/transformations/CleanerTest.scala | Scala | gpl-2.0 | 1,843 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.rules.datastream
import org.apache.calcite.plan.{RelOptRule, RelOptRuleCall, RelTraitSet}
import org.apache.calcite.rel.RelNode
import org.apache.calcite.rel.convert.ConverterRule
import org.apache.flink.table.functions.FunctionLanguage
import org.apache.flink.table.plan.nodes.FlinkConventions
import org.apache.flink.table.plan.nodes.datastream.DataStreamPythonCalc
import org.apache.flink.table.plan.nodes.logical.FlinkLogicalCalc
import org.apache.flink.table.plan.schema.RowSchema
import org.apache.flink.table.plan.util.PythonUtil.containsFunctionOf
import scala.collection.JavaConverters._
class DataStreamPythonCalcRule
extends ConverterRule(
classOf[FlinkLogicalCalc],
FlinkConventions.LOGICAL,
FlinkConventions.DATASTREAM,
"DataStreamPythonCalcRule") {
override def matches(call: RelOptRuleCall): Boolean = {
val calc: FlinkLogicalCalc = call.rel(0).asInstanceOf[FlinkLogicalCalc]
val program = calc.getProgram
program.getExprList.asScala.exists(containsFunctionOf(_, FunctionLanguage.PYTHON))
}
def convert(rel: RelNode): RelNode = {
val calc: FlinkLogicalCalc = rel.asInstanceOf[FlinkLogicalCalc]
val traitSet: RelTraitSet = rel.getTraitSet.replace(FlinkConventions.DATASTREAM)
val convInput: RelNode = RelOptRule.convert(calc.getInput, FlinkConventions.DATASTREAM)
new DataStreamPythonCalc(
rel.getCluster,
traitSet,
convInput,
new RowSchema(convInput.getRowType),
new RowSchema(rel.getRowType),
calc.getProgram,
"DataStreamPythonCalcRule")
}
}
object DataStreamPythonCalcRule {
val INSTANCE: RelOptRule = new DataStreamPythonCalcRule
}
| mbode/flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/plan/rules/datastream/DataStreamPythonCalcRule.scala | Scala | apache-2.0 | 2,498 |
object Test
{
type T = Foo.type
object Foo
def main(argv : Array[String]) : Unit = {
}
}
| densh/dotty | tests/pos/t604.scala | Scala | bsd-3-clause | 122 |
package eu.shiftforward.deploylogger.entities
import DeployStatus._
import eu.shiftforward.deploylogger.entities.ModuleStatus._
import spray.json.{ RootJsonFormat, DefaultJsonProtocol }
sealed trait Response
case class ResponseModule(
name: String,
version: String,
status: ModuleStatus
)
case class ResponseProject(
name: String,
description: String,
createdAt: Long,
git: String
) extends Response
case class ResponseDeploy(
user: String,
timestamp: Long,
commitBranch: String,
commitHash: String,
description: String,
events: List[ResponseEvent],
changelog: String,
id: String,
version: String,
automatic: Boolean,
client: String,
modules: List[ResponseModule],
configuration: String
) extends Response
case class ResponseEvent(
timestamp: Long,
status: DeployStatus,
description: String
) extends Response
object ResponseProject extends DefaultJsonProtocol {
implicit val projFormat: RootJsonFormat[ResponseProject] = jsonFormat4(ResponseProject.apply)
}
object ResponseDeploy extends DefaultJsonProtocol {
implicit val deployFormat: RootJsonFormat[ResponseDeploy] = jsonFormat13(ResponseDeploy.apply)
}
object ResponseEvent extends DefaultJsonProtocol {
implicit val eventFormat: RootJsonFormat[ResponseEvent] = jsonFormat3(ResponseEvent.apply)
}
object ResponseModule extends DefaultJsonProtocol {
implicit val moduleFormat: RootJsonFormat[ResponseModule] = jsonFormat3(ResponseModule.apply)
}
| ShiftForward/malamute | src/main/scala/eu/shiftforward/deploylogger/entities/Response.scala | Scala | mit | 1,464 |
package com.github.apognu.kafkaconnectslack
import java.util.{List => JList, Map => JMap}
import org.apache.kafka.common.config.ConfigDef
import org.apache.kafka.common.config.ConfigDef.{Importance, Type}
import org.apache.kafka.connect.connector.Task
import org.apache.kafka.connect.errors.ConnectException
import org.apache.kafka.connect.source.SourceConnector
class SlackSourceConnector extends SourceConnector {
var topic: String = ""
var channelList: String = ""
var apiKey: String = ""
override def version(): String = "1.0"
override val taskClass: Class[_ <: Task] = classOf[SlackSourceTask]
override def start(conf: JMap[String, String]) = {
topic = conf.getOrDefault(Config.kafkaTopicDirective, "")
apiKey = conf.getOrDefault(Config.slackApiKeyDirective, "")
if (Config.kafkaTopicDirective.isEmpty) throw new ConnectException(s"SlackSourceConnector configuration must include '${Config.kafkaTopicDirective}' setting")
if (Config.slackApiKeyDirective.isEmpty) throw new ConnectException(s"SlackSourceConnector configuration must include '${Config.slackApiKeyDirective}' setting")
}
override def stop() = { }
override def taskConfigs(maxTasks: Int): JList[JMap[String, String]] = {
import scala.collection.JavaConverters._
val configs = channelList.split(",").map { channel =>
Map(
Config.kafkaTopicDirective -> topic,
Config.slackApiKeyDirective -> apiKey
).asJava
}
configs.toList.asJava
}
override def config(): ConfigDef = new ConfigDef()
.define(Config.kafkaTopicDirective, Type.STRING, Importance.HIGH, "Destination Kafka topic")
.define(Config.slackApiKeyDirective, Type.STRING, Importance.HIGH, "Slack API key")
}
| apognu/kafka-connect-slack | src/main/scala/com/github/apognu/kafkaconnectslack/SlackSourceConnector.scala | Scala | mit | 1,731 |
/* __ *\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2003-2013, LAMP/EPFL **
** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\___/_/ |_/____/_/ | | **
** |/ **
\* */
package scala
package collection
package mutable
import generic._
import convert.Wrappers._
/** A hash map with references to entries which are weakly reachable. Entries are
* removed from this map when the key is no longer (strongly) referenced. This class wraps
* `java.util.WeakHashMap`.
*
* @tparam A type of keys contained in this map
* @tparam B type of values associated with the keys
*
* @since 2.8
* @see [[http://docs.scala-lang.org/overviews/collections/concrete-mutable-collection-classes.html#weak-hash-maps "Scala's Collection Library overview"]]
* section on `Weak Hash Maps` for more information.
*
* @define Coll `WeakHashMap`
* @define coll weak hash map
* @define mayNotTerminateInf
* @define willNotTerminateInf
*/
@SerialVersionUID(3L)
class WeakHashMap[A, B] extends JMapWrapper[A, B](new java.util.WeakHashMap)
with JMapWrapperLike[A, B, WeakHashMap, WeakHashMap[A, B]] {
override def empty = new WeakHashMap[A, B]
override def mapFactory: MapFactory[WeakHashMap] = WeakHashMap
}
/** $factoryInfo
* @define Coll `WeakHashMap`
* @define coll weak hash map
*/
object WeakHashMap extends MapFactory[WeakHashMap] {
def empty[K, V]: WeakHashMap[K,V] = new WeakHashMap[K, V]
def from[K, V](it: collection.IterableOnce[(K, V)]): WeakHashMap[K,V] = Growable.from(empty[K, V], it)
def newBuilder[K, V](): Builder[(K, V), WeakHashMap[K,V]] = new GrowableBuilder(WeakHashMap.empty[K, V])
}
| rorygraves/perf_tester | corpus/scala-library/src/main/scala/collection/mutable/WeakHashMap.scala | Scala | apache-2.0 | 1,987 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.bwsw.sj.engine.input.config
import com.bwsw.sj.common.engine.core.config.EngineConfigNames.instance
/**
* Names of configurations for application config
*/
object InputEngineConfigNames {
val inputInstance = instance + ".input"
val hosts = inputInstance + ".hosts"
val entryPort = inputInstance + ".entry.port"
}
| bwsw/sj-platform | core/sj-input-streaming-engine/src/main/scala/com/bwsw/sj/engine/input/config/InputEngineConfigNames.scala | Scala | apache-2.0 | 1,145 |
package exam.national_center_test.xml
import scala.xml.NodeSeq
/**
* <pre>
* Created on 5/29/15.
* </pre>
* @param context context
* @param smallQuestions small questions
* @author K.Sakamoto
*/
class MiddleQuestion(val context: NodeSeq,
val smallQuestions: Seq[SmallQuestion]) {
override def toString: String = {
s"""Context: $context
|Small Questions: $smallQuestions
""".stripMargin
}
}
| ktr-skmt/FelisCatusZero | src/main/scala/exam/national_center_test/xml/MiddleQuestion.scala | Scala | apache-2.0 | 440 |
package io.udash.generator.plugins.core
import java.io.File
import io.udash.generator.plugins._
import io.udash.generator.plugins.sbt.SBTProjectFiles
import io.udash.generator.plugins.utils.{FrontendPaths, UtilPaths}
import io.udash.generator.utils._
import io.udash.generator.{FrontendOnlyProject, GeneratorPlugin, GeneratorSettings, StandardProject}
object CoreDemosPlugin extends GeneratorPlugin with SBTProjectFiles with FrontendPaths with UtilPaths {
override val dependencies = Seq(CorePlugin)
override def run(settings: GeneratorSettings): GeneratorSettings = {
val rootPck: File = settings.projectType match {
case FrontendOnlyProject =>
rootPackageInSrc(settings.rootDirectory, settings)
case StandardProject(_, shared, frontend) =>
rootPackageInSrc(settings.rootDirectory.subFile(frontend), settings)
}
val stateName = createDemoView(rootPck, settings)
addIndexLink(rootPck, stateName)
settings
}
private def addIndexLink(rootPackage: File, state: String): Unit = {
val indexViewScala = viewsPackageInSrc(rootPackage).subFile("IndexView.scala")
requireFilesExist(Seq(indexViewScala))
appendOnPlaceholder(indexViewScala)(FrontendIndexMenuPlaceholder,
s""",
| li(a(${FrontendStylesLinkBlackPlaceholder}href := $state().url)("Binding demo")),
| li(a(${FrontendStylesLinkBlackPlaceholder}href := $state("From index").url)("Binding demo with URL argument"))""".stripMargin)
}
private def createDemoView(rootPackage: File, settings: GeneratorSettings): String = {
val statesScala = rootPackage.subFile("states.scala")
val routingRegistryDefScala = rootPackage.subFile("RoutingRegistryDef.scala")
val statesToViewPresenterDefScala = rootPackage.subFile("StatesToViewPresenterDef.scala")
val bindingDemoViewScala = viewsPackageInSrc(rootPackage).subFile("BindingDemoView.scala")
val stateName = "BindingDemoState"
requireFilesExist(Seq(viewsPackageInSrc(rootPackage), statesScala, routingRegistryDefScala, statesToViewPresenterDefScala))
createFiles(Seq(bindingDemoViewScala), requireNotExists = true)
appendOnPlaceholder(statesScala)(FrontendStatesRegistryPlaceholder,
s"""
|
|case class $stateName(urlArg: String = "") extends RoutingState(RootState)""".stripMargin)
appendOnPlaceholder(routingRegistryDefScala)(FrontendRoutingRegistryPlaceholder,
s"""
| case "/binding" => $stateName("")
| case "/binding" /:/ arg => $stateName(arg)""".stripMargin)
appendOnPlaceholder(statesToViewPresenterDefScala)(FrontendVPRegistryPlaceholder,
s"""
| case $stateName(urlArg) => BindingDemoViewPresenter(urlArg)""".stripMargin)
writeFile(bindingDemoViewScala)(
s"""package ${settings.rootPackage.mkPackage()}.${settings.viewsSubPackage.mkPackage()}
|
|import io.udash._
|import ${settings.rootPackage.mkPackage()}.$stateName
|import org.scalajs.dom.Element$FrontendImportsPlaceholder
|
|case class BindingDemoViewPresenter(urlArg: String) extends DefaultViewPresenterFactory[$stateName](() => {
| import ${settings.rootPackage.mkPackage()}.Context._
|
| val model = Property[String](urlArg)
| new BindingDemoView(model)
|})
|
|class BindingDemoView(model: Property[String]) extends View {
| import scalatags.JsDom.all._
|
| private val content = div(
| h2(
| "You can find this demo source code in: ",
| i("${settings.rootPackage.mkPackage()}.${settings.viewsSubPackage.mkPackage()}.BindingDemoView")
| ),
| h3("Example"),
| TextInput.debounced(model, placeholder := "Type something..."),
| p("You typed: ", bind(model)),
| h3("Read more"),
| a$FrontendStylesLinkBlackPlaceholder(href := "${settings.udashDevGuide}#/frontend/bindings", target := "_blank")("Read more in Udash Guide.")
| )
|
| override def getTemplate: Modifier = content
|
| override def renderChild(view: View): Unit = {}
|}
|""".stripMargin
)
stateName
}
}
| UdashFramework/udash-generator | core/src/main/scala/io/udash/generator/plugins/core/CoreDemosPlugin.scala | Scala | apache-2.0 | 4,298 |
package system
import controllers.HitherS3Signer
import fly.play.s3.{S3Client, S3Configuration}
import play.api.libs.ws.WS
import play.api.{Application, Logger}
import system.index.S3Index
import system.registry.{Registry, S3Registry}
object Production {
private val signer: HitherS3Signer = new HitherS3Signer(s3Config.credentials, s3Config.region, Configuration.aws.proxy)
private lazy val s3Config = S3Configuration.fromConfig(play.api.Play.current)
lazy val s3Index = new S3Index {
override def bucketName: String = Configuration.s3.bucketName
override implicit def app: Application = play.api.Play.current
override lazy val s3Client = new S3Client(WS.client, signer, s3Config)
override lazy val logger = Logger
}
lazy val index = Configuration.hither.storage match {
case "s3" => s3Index
case s => throw new IllegalArgumentException(s"Don't recognise storage type '$s'")
}
lazy val s3Registry = new S3Registry {
override implicit def app: Application = play.api.Play.current
override val bucketName: String = Configuration.s3.bucketName
override def s3Client: S3Client = new S3Client(WS.client, signer, s3Config)
override val logger = Logger
Logger.debug("Initialising S3 registry")
Logger.debug(s"Using aws.accessKeyId ${Configuration.aws.accessKeyId.map(s => obfuscate(s))}")
Logger.debug(s"Using aws.secretKey ${Configuration.aws.secretKey.map(s => obfuscate(s))}")
Logger.debug(s"Using region ${Configuration.s3.region}")
Logger.debug(s"Using bucket $bucketName")
Configuration.aws.proxy match {
case Some(proxy) => Logger.debug(s"Using proxy server $proxy")
case None => Logger.debug("Not using proxy server")
}
}
def obfuscate(s: String, show: Int = 3): String = {
val hide = if (s.length > show) s.length - show else s.length
List.fill(hide)('*').mkString + s.substring(hide)
}
lazy val registry: Registry = Configuration.hither.storage match {
case "s3" => s3Registry
case s => throw new IllegalArgumentException(s"Don't recognise storage type '$s'")
}
}
| WiredThing/hither | app/system/Production.scala | Scala | mit | 2,107 |
package core.pcomponents
import core.data.{ AbstractData, BoundData }
import core.{ InputConnector, OutputConnector, PComponent }
/**
* Created by OpenDataFlow on 31/12/15.
*/
class Ident extends PComponent {
addConnector(InputConnector("sink", new AbstractData, "Writes data somewhere"))
addConnector(OutputConnector("source", new AbstractData, "Reads data from somewhere"))
/**
* run method is the one that actually performs the work
*/
override def run(): Unit = ???
}
| opendataflow/opendataflow | opendataflow-core/src/main/scala/core/pcomponents/Ident.scala | Scala | apache-2.0 | 491 |
package com.eevolution.context.dictionary.infrastructure.repository
import java.util.UUID
import com.eevolution.context.dictionary.domain._
import com.eevolution.context.dictionary.domain.model.Attachment
import com.eevolution.context.dictionary.infrastructure.db.DbContext._
import com.eevolution.utils.PaginatedSequence
import com.lightbend.lagom.scaladsl.persistence.jdbc.JdbcSession
import scala.concurrent.{ExecutionContext, Future}
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: emeris.hernandez@e-evolution.com, http://www.e-evolution.com , http://github.com/EmerisScala
* Created by emeris.hernandez@e-evolution.com , www.e-evolution.com on 07/11/17.
*/
/**
* Attachment Repository
* @param session
* @param executionContext
*/
class AttachmentRepository(session: JdbcSession)(implicit executionContext: ExecutionContext)
extends api.repository.AttachmentRepository[Attachment , Int]
with AttachmentMapping {
def getById(id: Int): Future[Attachment] = {
Future(run(queryAttachment.filter(_.attachmentId == lift(id))).headOption.get)
}
def getByUUID(uuid: UUID): Future[Attachment] = {
Future(run(queryAttachment.filter(_.uuid == lift(uuid.toString))).headOption.get)
}
def getByAttachmentId(id : Int) : Future[List[Attachment]] = {
Future(run(queryAttachment))
}
def getAll() : Future[List[Attachment]] = {
Future(run(queryAttachment))
}
def getAllByPage(page: Int, pageSize: Int): Future[PaginatedSequence[Attachment]] = {
val offset = page * pageSize
val limit = (page + 1) * pageSize
for {
count <- countAttachment()
elements <- if (offset > count) Future.successful(Nil)
else selectAttachment(offset, limit)
} yield {
PaginatedSequence(elements, page, pageSize, count)
}
}
private def countAttachment() = {
Future(run(queryAttachment.size).toInt)
}
private def selectAttachment(offset: Int, limit: Int): Future[Seq[Attachment]] = {
Future(run(queryAttachment).drop(offset).take(limit).toSeq)
}
}
| adempiere/ADReactiveSystem | dictionary-impl/src/main/scala/com/eevolution/context/dictionary/infrastructure/repository/AttachmentRepository.scala | Scala | gpl-3.0 | 2,740 |
package sttp.client3.akkahttp
import akka.http.scaladsl.model.{HttpHeader, HttpMethod, HttpMethods, HttpRequest}
import akka.http.scaladsl.model.HttpHeader.ParsingResult
import sttp.client3.Request
import sttp.model.{Header, Method}
import scala.collection.immutable.Seq
import scala.util.{Failure, Success, Try}
private[akkahttp] object ToAkka {
def request(r: Request[_, _]): Try[HttpRequest] = {
val ar = HttpRequest(uri = r.uri.toString, method = method(r.method))
ToAkka.headers(r.headers).map(ar.withHeaders)
}
def headers(headers: Seq[Header]): Try[Seq[HttpHeader]] = {
// content-type and content-length headers have to be set via the body
// entity, not as headers
val parsed =
headers
.filterNot(Util.isContentType)
.filterNot(Util.isContentLength)
.map(h => HttpHeader.parse(h.name, h.value))
val errors = parsed.collect { case ParsingResult.Error(e) =>
e
}
if (errors.isEmpty) {
val headers = parsed.collect { case ParsingResult.Ok(h, _) =>
h
}
Success(headers.toList)
} else {
Failure(new RuntimeException(s"Cannot parse headers: $errors"))
}
}
private def method(m: Method): HttpMethod =
m match {
case Method.GET => HttpMethods.GET
case Method.HEAD => HttpMethods.HEAD
case Method.POST => HttpMethods.POST
case Method.PUT => HttpMethods.PUT
case Method.DELETE => HttpMethods.DELETE
case Method.OPTIONS => HttpMethods.OPTIONS
case Method.PATCH => HttpMethods.PATCH
case Method.CONNECT => HttpMethods.CONNECT
case Method.TRACE => HttpMethods.TRACE
case _ => HttpMethod.custom(m.method)
}
}
| softwaremill/sttp | akka-http-backend/src/main/scala/sttp/client3/akkahttp/ToAkka.scala | Scala | apache-2.0 | 1,727 |
package com.twitter.conversions.common
import com.twitter.common.zookeeper.ZooKeeperClient
import com.twitter.conversions.common.quantity.COMMON_FOREVER
import com.twitter.util.{Duration, FuturePool}
import com.twitter.zk.{CommonConnector, ZkClient}
import scala.language.implicitConversions
/** Adapters for common's ZooKeeperClient (and, later, serversets, etc) */
object zookeeper {
class CommonZkClientAdapter(zkc: ZooKeeperClient) {
def toConnector(timeout: Duration = COMMON_FOREVER)
(implicit pool: FuturePool): CommonConnector = {
new CommonConnector(zkc, timeout)
}
def toZkClient(timeout: Duration = COMMON_FOREVER)(implicit pool: FuturePool): ZkClient = {
ZkClient(toConnector(timeout))
}
}
/** Implicit conversion of ZooKeeperClient to CommonZkClient */
implicit def commonZkClient(zkc: ZooKeeperClient): CommonZkClientAdapter =
new CommonZkClientAdapter(zkc)
}
| tdyas/util | util-zk-common/src/main/scala/com/twitter/conversions/common/zookeeper.scala | Scala | apache-2.0 | 937 |
package org.scalex
package object search {
type Score = Int
}
| kzys/scalex | src/main/scala/search/package.scala | Scala | mit | 65 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.shuffle
/**
* An interface for reporting shuffle read metrics, for each shuffle. This interface assumes
* all the methods are called on a single-threaded, i.e. concrete implementations would not need
* to synchronize.
*
* All methods have additional Spark visibility modifier to allow public, concrete implementations
* that still have these methods marked as private[spark].
*/
private[spark] trait ShuffleReadMetricsReporter {
private[spark] def incRemoteBlocksFetched(v: Long): Unit
private[spark] def incLocalBlocksFetched(v: Long): Unit
private[spark] def incRemoteBytesRead(v: Long): Unit
private[spark] def incRemoteBytesReadToDisk(v: Long): Unit
private[spark] def incLocalBytesRead(v: Long): Unit
private[spark] def incFetchWaitTime(v: Long): Unit
private[spark] def incRecordsRead(v: Long): Unit
}
/**
* An interface for reporting shuffle write metrics. This interface assumes all the methods are
* called on a single-threaded, i.e. concrete implementations would not need to synchronize.
*
* All methods have additional Spark visibility modifier to allow public, concrete implementations
* that still have these methods marked as private[spark].
*/
private[spark] trait ShuffleWriteMetricsReporter {
private[spark] def incBytesWritten(v: Long): Unit
private[spark] def incRecordsWritten(v: Long): Unit
private[spark] def incWriteTime(v: Long): Unit
private[spark] def decBytesWritten(v: Long): Unit
private[spark] def decRecordsWritten(v: Long): Unit
}
| maropu/spark | core/src/main/scala/org/apache/spark/shuffle/metrics.scala | Scala | apache-2.0 | 2,331 |
package cn.jpush
import org.apache.spark.SparkContext
/**
* Created by fengwu on 15/5/25.
*/
class MsgRecvStatsDemo {
def main(args: Array[String]) {
// val sc = new SparkContext()
//
// val sqlContext = new org.apache.spark.sql.SQLContext(sc)
//
//
// val msgRecvLines = sc.textFile(args(0))
// val pairsUniqe = msgRecvLines.map(x => {
// val fields = x.split("\\t")
// (fields(0) + "\\t" + fields(1), fields(2))
// }).distinct()
// val stattime = args(1)
// val fields = stattime.split("\\t")
}
}
| wufengwhu/offline | src/main/scala/cn/jpush/MsgRecvStatsDemo.scala | Scala | mit | 585 |
package com.sksamuel.elastic4s.delete
import org.elasticsearch.action.support.WriteRequest.RefreshPolicy
import org.elasticsearch.index.VersionType
import org.scalactic.TypeCheckedTripleEquals
import org.scalatest.{FlatSpec, Matchers}
class DeleteApiTest extends FlatSpec with Matchers with TypeCheckedTripleEquals {
import com.sksamuel.elastic4s.ElasticApi._
"a delete by id request" should "accept tuple for from" in {
delete(141212) from "places" -> "cities"
}
it should "parse slash indextype" in {
delete(141212) from "index/type"
}
it should "accept index and type in dot syntax" in {
delete(123).from("places", "type1")
delete(123).from("places", "type1")
}
it should "accept tuple in dot syntax" in {
delete(123).from("places" -> "type1")
delete(123).from("places" -> "type1")
}
it should "accept routing key" in {
delete(141212).from("places" / "type1").routing("my-route")
}
it should "accept version and version type" in {
delete(141212) from "places" / "type1" version 53423l versionType VersionType.EXTERNAL
}
it should "accept refresh" in {
delete(141212) from "places" / "type1" refresh RefreshPolicy.IMMEDIATE
}
"a delete by query request" should "support the dsl syntax" in {
// deleteIn("places").by("query")
}
}
| FabienPennequin/elastic4s | elastic4s-core/src/test/scala/com/sksamuel/elastic4s/delete/DeleteApiTest.scala | Scala | apache-2.0 | 1,315 |
/*****************************************
Emitting Generated Code
*******************************************/
class Snippet extends ((Int)=>(Array[Int])) {
def apply(x0:Int): Array[Int] = {
val x1 = new Array[Int](x0)
val x20 = x0 > 0
val x39 = if (x20) {
val x21 = x1(0) = 8
var x22: Int = 5
var x23: Int = 1
var x25 : Int = 1
val x37 = while (x25 < x0) {
val x26 = x22
val x27 = x23
val x29 = x25 + 1
val x30 = 2 * x29
val x31 = x30 + 3
val x32 = x26 + x31
val x33 = x1(x27) = x32
x22 = x31
x23 = x29
x25 = x25 + 1
}
x37
} else {
()
}
x1
}
}
/*****************************************
End of Generated Code
*******************************************/
| RomanTsegelskyi/lms-truffle | src/out/sliding1.check.scala | Scala | gpl-2.0 | 812 |
/*
* Copyright (c) 2011-16 Miles Sabin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package shapeless
import scala.util.Try
import org.junit.Test
import org.junit.Assert._
import newtype._, tag._, test._, testutil._
class TypeOperatorTests {
import TypeOperatorTests._
trait ATag
object ATag {
implicit def taggedToString[T](value: T with Tagged[ATag]): String = message
val message = "This object has ATag tag type"
}
@Test
def testImplicitScopeForTaggedType: Unit = {
val x = tag[ATag](1)
val s: String = x
assertEquals(ATag.message, s)
}
@Test
def testNewtype: Unit = {
type MyString = Newtype[String, MyStringOps]
def MyString(s : String) : MyString = newtype(s)
case class MyStringOps(s : String) {
def mySize = s.size
}
implicit val mkOps = MyStringOps
val ms = MyString("foo")
illTyped("""
val s : String = ms
""")
illTyped("""
val ms2 : MyString = "foo"
""")
illTyped("""
ms.size
""")
assertEquals(3, ms.mySize)
val s2 = "bar"
val ms2 = MyString(s2)
assertTrue(ms2 eq (s2 : AnyRef))
}
trait Foo {
type T
val t: T
}
object Foo {
implicit def mkFoo: Foo { type T = Int } = new Foo { type T = Int ; val t = 23 }
}
trait Foo2[U] {
type T
val t: T
}
object Foo2 {
implicit def mkFoo2: Foo2[Char] { type T = Int } = new Foo2[Char] { type T = Int ; val t = 23 }
}
trait Bar[T] {
type U
val tu: Either[T, U]
}
object Bar {
implicit def mkBar1: Bar[Boolean] { type U = Int } = new Bar[Boolean] { type U = Int ; val tu = Right(23) }
implicit def mkBar2: Bar[String] { type U = Double } = new Bar[String] { type U = Double ; val tu = Right(13.0) }
}
case class Baz(i: Int, s: String)
@Test
def testTheValues: Unit = {
val foo = the[Foo]
typed[Foo](foo)
typed[Int](foo.t)
val bar1 = the[Bar[Boolean]]
typed[Bar[Boolean]](bar1)
typed[Either[Boolean, Int]](bar1.tu)
val bar2 = the[Bar[String]]
typed[Bar[String]](bar2)
typed[Either[String, Double]](bar2.tu)
}
@Test
def testTheTypes: Unit = {
val t: the.Foo.T = 23
typed[Int](t)
val tu1: Either[Boolean, the.`Bar[Boolean]`.U] = Right(23)
typed[Either[Boolean, Int]](tu1)
val tu2: Either[String, the.`Bar[String]`.U] = Right(23)
typed[Either[String, Double]](tu2)
}
@Test
def testTheErrors: Unit = {
illTyped("the.`Ordering[Set[Int]]`.Ops", "No implicit Ordering defined for Set\\\\[Int].")
}
@Test
def testTheQuantifiers: Unit = {
def bar0[T, U0](implicit b: Bar[T] { type U = U0 }): Bar[T] { type U = U0 } = {
val res = the[Bar[T]]
res
}
// Note: Slightly different method signature in TypeOperator211Tests
def bar1[T, U0](implicit b: Bar[T] { type U = U0 }): Option[U0] = {
val res: Option[the.`Bar[T]`.U] = None
res
}
val b0 = bar0[Boolean, Int]
typed[Bar[Boolean] { type U = Int }](b0)
val b1 = bar1[Boolean, Int]
typed[Option[Int]](b1)
}
@Test
def testTypeOf: Unit = {
val t1: TypeOf.`Foo.mkFoo`.T = 23
typed[Int](t1)
val t2: TypeOf.`Foo.mkFoo: Foo`.T = 23
typed[Int](t2)
val tu1: Either[Boolean, TypeOf.`Bar.mkBar1: Bar[Boolean]`.U] = Right(23)
typed[Either[Boolean, Int]](tu1)
val tu2: Either[String, TypeOf.`the.apply: Bar[String]`.U] = Right(23)
typed[Either[String, Double]](tu2)
val tu3: Either[String, TypeOf.`the[Bar[String]]`.U] = Right(23)
typed[Either[String, Double]](tu3)
val indexedHList: TypeOf.`Generic[(String, Boolean)].to(("foo", true)).zipWithIndex`.type = {
Generic[(String, Boolean)].to(("foo", true)).zipWithIndex
}
typed[(String, _0) :: (Boolean, Succ[_0]) :: HNil](indexedHList)
implicit val genBaz: TypeOf.`Generic[Baz]`.type = cachedImplicit
val reprBaz = genBaz.to(Baz(23, "foo"))
typed[Int :: String :: HNil](reprBaz)
}
@Test
def testRejectBogus: Unit = {
try {
the.Foo
assert(false)
} catch {
case _: Throwable => // OK
}
//the.Unit // illTyped fails for this expression
implicit val u2: Unit = ()
//the.Unit // illTyped fails for this expression
//the.Int // illTyped fails for this expression
implicit val i2: Int = 23
//the.Int // illTyped fails for this expression
illTyped("""
val blah = the.`package wibble`
""")
}
@Test
def testValueClass: Unit = {
implicit val one: AValueClass = AValueClass(1L)
val x = the[AValueClass]
typed[AValueClass](x)
}
}
object TypeOperatorTests {
final case class AValueClass(l: Long) extends AnyVal
}
| fthomas/shapeless | core/src/test/scala/shapeless/typeoperators.scala | Scala | apache-2.0 | 5,200 |
/**
* Copyright 2015 Thomson Reuters
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package security.httpauth
import filters.Attrs
import k.grid.Grid
import play.api.mvc.Request
import security._
import scala.concurrent.{ExecutionContext, Future}
import scala.util.Random
import scala.concurrent.duration._
/**
* Created by yaakov on 12/16/15.
*/
trait DigestHttpAuthentication {
import cmwell.util.string.Hash.md5
implicit val timeout = akka.util.Timeout(10.seconds)
val opaque = md5("any string will do. even 42.")
val userInfotonPropName = "digest2"
val ha2 = md5Concat("GET", "/_login")
def initialDigestHeader = {
val newNonce = md5(Random.alphanumeric.take(26).toString)
Grid.serviceRef(classOf[NoncesManager].getName) ! AddNonce(newNonce)
DigestServerHeader("cmwell", newNonce, opaque)
}
// This is a Server Side Implementation for HTTP Digest Authentication, according to RFC 2617
// which is a "challenge-response" handshake, like this:
//
// Request: GET /_login (no headers)
// Response: 401 with initial Digest Header { realm, nonce, opaque } // that's the "challenge"
// Request: GET /_login with Digest Header { username, nonce, opaque, response } // that's the "response"
// Response: 200 / 403 according to `response` value
//
// Where:
// realm is "cmwell"
// opaque is md5 of a const string
// nonce is md5 that should only be used once
// response is: md5(s"$HA1:$nonce:$HA2") where HA1 is md5(s"$username:$realm:$password")
// and HA2 is md5("GET:/_login")
//
// We keep HA1 per user inside its UserInfoton as "digest2"
//
def digestAuthenticate(
authCache: EagerAuthCache
)(req: Request[_])(implicit ec: ExecutionContext): Future[DigestStatus] = {
import akka.pattern.ask
req.headers.get("Authorization") match {
case None => Future.successful(DigestStatus(isAuthenticated = false, ""))
case Some(authHeader) => {
val header = DigestHeaderUtils.fromClientHeaderString(authHeader)
(Grid.serviceRef(classOf[NoncesManager].getName) ? ConsumeNonce(header.nonce)).mapTo[NonceStatus].map {
case NonceConsumed if header.opaque == opaque =>
authCache.getUserInfoton(header.username) match {
case None => DigestStatus(isAuthenticated = false, "")
case Some(user) =>
val ha1 = (user \\ userInfotonPropName).asOpt[String].getOrElse("")
val calculatedResponse = md5Concat(ha1, header.nonce, ha2)
DigestStatus(isAuthenticated = calculatedResponse == header.response, header.username)
}
case _ => DigestStatus(isAuthenticated = false, "")
}
}
}
}
def md5Concat(values: String*) = md5(values.mkString(":"))
}
case class DigestStatus(isAuthenticated: Boolean, username: String)
| hochgi/CM-Well | server/cmwell-ws/app/security/httpauth/DigestHttpAuthentication.scala | Scala | apache-2.0 | 3,426 |
/*
Copyright 2016 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.algebird
import algebra.{BoundedSemilattice, Semilattice}
/**
* Tracks the minimum wrapped instance of some ordered type `T`.
*
* [[Min]][T] is a [[Semigroup]] for all types `T`. If `T` has some
* maximum element (`Long` has `Long.MaxValue`, for example), then
* [[Min]][T] is a [[Monoid]].
*
* @param get wrapped instance of `T`
*/
case class Min[@specialized(Int, Long, Float, Double) +T](get: T) {
/**
* If this instance wraps a smaller `T` than `r`, returns this
* instance, else returns `r`.
*
* @param r instance of `Min[U]` for comparison
*/
def min[U >: T](r: Min[U])(implicit ord: Ordering[U]): Min[U] =
Min.ordering.min(this, r)
/**
* Identical to [[min]].
*
* @param r instance of `Min[U]` for comparison
*/
def +[U >: T](r: Min[U])(implicit ord: Ordering[U]): Min[U] = min(r)
}
/**
* Provides a set of operations and typeclass instances needed to use
* [[Min]] instances.
*/
object Min extends MinInstances {
/**
* Returns an [[Aggregator]] that selects the minimum instance of an
* ordered type `T` in the aggregated stream.
*/
def aggregator[T](implicit ord: Ordering[T]): MinAggregator[T] =
MinAggregator()(ord)
/**
* Returns a [[Semigroup]] instance with a `plus` implementation
* that always returns the minimum `T` argument.
*/
def minSemigroup[T](implicit ord: Ordering[T]): Semigroup[T] with Semilattice[T] =
new Semigroup[T] with Semilattice[T] {
def plus(l: T, r: T) = ord.min(l, r)
}
}
private[algebird] sealed abstract class MinInstances {
implicit def equiv[T](implicit eq: Equiv[T]): Equiv[Min[T]] = Equiv.by(_.get)
implicit def ordering[T: Ordering]: Ordering[Min[T]] = Ordering.by(_.get)
/**
* Returns a [[Monoid]] instance for [[Min]][T] that combines
* instances using [[Min.min]] and uses `zero` for its identity.
*
* @param zero identity of the returned [[Monoid]] instance
* @note `zero` must be `>=` every element of `T` for the returned instance to be lawful.
*/
def monoid[T: Ordering](zero: => T): Monoid[Min[T]] with BoundedSemilattice[Min[T]] = {
val z = zero // avoid confusion below when overriding zero
new Monoid[Min[T]] with BoundedSemilattice[Min[T]] {
val zero = Min(z)
val ord = implicitly[Ordering[T]]
def plus(l: Min[T], r: Min[T]): Min[T] =
if (ord.lteq(l.get, r.get)) l else r
}
}
/**
* Returns a [[Semigroup]] instance for [[Min]][T]. The `plus`
* implementation always returns the minimum `Min[T]` argument.
*/
implicit def semigroup[T: Ordering]: Semigroup[Min[T]] with Semilattice[Min[T]] =
new Semigroup[Min[T]] with Semilattice[Min[T]] {
val ord = implicitly[Ordering[T]]
def plus(l: Min[T], r: Min[T]): Min[T] =
if (ord.lteq(l.get, r.get)) l else r
}
/** [[Monoid]] for [[Min]][Int] with `zero == Int.MaxValue` */
implicit def intMonoid: Monoid[Min[Int]] with BoundedSemilattice[Min[Int]] =
monoid(Int.MaxValue)
/** [[Monoid]] for [[Min]][Long] with `zero == Long.MaxValue` */
implicit def longMonoid: Monoid[Min[Long]] with BoundedSemilattice[Min[Long]] =
monoid(Long.MaxValue)
/**
* [[Monoid]] for [[Min]][Double] with `zero == Double.MaxValue`
* Note: MaxValue < PositiveInfinity, but people may
* be relying on this emitting a non-infinite number. Sadness
*/
implicit def doubleMonoid: Monoid[Min[Double]] with BoundedSemilattice[Min[Double]] =
monoid(Double.MaxValue)
/**
* [[Monoid]] for [[Min]][Float] with `zero == Float.MaxValue`
* Note: MaxValue < PositiveInfinity, but people may
* be relying on this emitting a non-infinite number. Sadness
*/
implicit def floatMonoid: Monoid[Min[Float]] with BoundedSemilattice[Min[Float]] =
monoid(Float.MaxValue)
}
/**
* [[Aggregator]] that selects the minimum instance of `T` in the
* aggregated stream.
*/
case class MinAggregator[T](implicit ord: Ordering[T]) extends Aggregator[T, T, T] {
def prepare(v: T) = v
val semigroup = Min.minSemigroup[T]
def present(v: T) = v
}
| nevillelyh/algebird | algebird-core/src/main/scala/com/twitter/algebird/Min.scala | Scala | apache-2.0 | 4,639 |
package models.mission
import java.util.UUID
import models.daos.slick.DBTableDefinitions.{DBUser, UserTable}
import models.utils.MyPostgresDriver.simple._
import play.api.Play.current
import scala.slick.lifted.ForeignKeyQuery
case class MissionUser(missionUserId: Int, missionId: Int, userId: String)
class MissionUserTable(tag: Tag) extends Table[MissionUser](tag, Some("sidewalk"), "mission_user") {
def missionUserId = column[Int]("mission_user_id", O.PrimaryKey, O.AutoInc)
def missionId = column[Int]("mission_id", O.NotNull)
def userId = column[String]("user_id", O.NotNull)
def * = (missionUserId, missionId, userId) <> ((MissionUser.apply _).tupled, MissionUser.unapply)
def mission: ForeignKeyQuery[MissionTable, Mission] =
foreignKey("mission_user_mission_id_fkey", missionId, TableQuery[MissionTable])(_.missionId)
def user: ForeignKeyQuery[UserTable, DBUser] =
foreignKey("mission_user_user_id_fkey", userId, TableQuery[UserTable])(_.userId)
}
object MissionUserTable {
val db = play.api.db.slick.DB
val missionUsers = TableQuery[MissionUserTable]
def exists(missionId: Int, userId: String): Boolean = db.withTransaction { implicit session =>
val l = missionUsers.list
l.count(m => m.missionId == missionId && m.userId.toString == userId) > 0
}
/**
* Insert a new mission user
* @param missionId mission id
* @param userId user id
* @return missionUserId
*/
def save(missionId: Int, userId: String): Int = save(MissionUser(0, missionId, userId))
/**
* Insert a new mission user
* @param missionUser A MissionUser object
* @return missionUserId
*/
def save(missionUser: MissionUser): Int = db.withTransaction { implicit session =>
val missionUserId: Int =
(missionUsers returning missionUsers.map(_.missionUserId)) += missionUser
missionUserId
}
}
| danZzyy/SidewalkWebpage | sidewalk-webpage/app/models/mission/MissionUserTable.scala | Scala | mit | 1,866 |
package org.dhash.util
import java.awt.event.WindowEvent
import java.io.File
import javax.swing.JFrame
import org.bytedeco.javacpp.indexer.FloatIndexer
import org.bytedeco.javacpp.opencv_core.{DMatchVector, KeyPoint, KeyPointVector, Mat, Point2fVector, Scalar, _}
import org.bytedeco.javacpp.opencv_features2d.{DrawMatchesFlags, drawKeypoints}
import org.bytedeco.javacpp.opencv_imgcodecs.imread
import org.bytedeco.javacpp.opencv_imgproc.{COLOR_BGR2GRAY, cvtColor}
import org.bytedeco.javacpp.opencv_xfeatures2d.SIFT
import org.bytedeco.javacv.{CanvasFrame, OpenCVFrameConverter}
import org.pmw.tinylog.Logger
class opencv {
def imopen(respath: String): Mat = {
val imFile = new File(getClass.getResource(respath).toURI)
val imName = imFile.toString
imread(imName)
}
def SIFT_features(im: Mat): (KeyPointVector, Mat) = {
val pts = new KeyPointVector()
val desc = new Mat
SIFT.create().detectAndCompute(im, new Mat(), pts, desc)
(pts, desc)
}
//These are essentially SerDes
//lifted from https://github.com/bytedeco/javacv-examples/tree/master/OpenCV2_Cookbook
//Chapter 10 and Utils
def KeyPointsToP2V(
matches: DMatchVector,
keyPoints1: KeyPointVector,
keyPoints2: KeyPointVector): (Point2fVector, Point2fVector) = {
// Extract keypoints from each match, separate Left and Right
val size = matches.size.toInt
val pointIndexes1 = new Array[Int](size)
val pointIndexes2 = new Array[Int](size)
for (i <- 0 until size) {
pointIndexes1(i) = matches.get(i).queryIdx()
pointIndexes2(i) = matches.get(i).trainIdx()
}
// Convert keypoints into Point2f
val points1 = new Point2fVector()
val points2 = new Point2fVector()
KeyPoint.convert(keyPoints1, points1, pointIndexes1)
KeyPoint.convert(keyPoints2, points2, pointIndexes2)
(points1, points2)
}
def toMat(points: Point2fVector): Mat = {
// Create Mat representing a vector of Points3f
val size: Int = points.size.toInt
// Argument to Mat constructor must be `Int` to mean sizes, otherwise it may be interpreted as content.
val dest = new Mat(1, size, CV_32FC2)
val indx = dest.createIndexer().asInstanceOf[FloatIndexer]
for (i <- 0 until size) {
val p = points.get(i)
indx.put(0, i, 0, p.x)
indx.put(0, i, 1, p.y)
}
dest
}
}
class canvas(title: String) {
var canvas = new CanvasFrame(title)
canvas.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE)
def imshow(mat: Mat = new Mat): CanvasFrame = {
val converter = new OpenCVFrameConverter.ToMat()
canvas.showImage(converter.convert(mat))
canvas
}
def close(): Unit = {
canvas.dispatchEvent(new WindowEvent(canvas, WindowEvent.WINDOW_CLOSING))
}
}
class tests {
def imshow_test(impath: String = "/lena.bmp"): Unit = {
val opencv = new opencv
val cvs = new canvas("Test image")
val im = opencv.imopen(impath)
cvs.imshow(im)
Logger.trace(s"Showing $impath in a new JFrame")
Logger.trace(s"Test complete, closing JFrame")
}
def SIFT_test(): Unit = {
//show a gray scale image
val cv = new opencv
val gs_im = new Mat
cvtColor(cv.imopen("/lena.bmp"), gs_im, COLOR_BGR2GRAY)
val cvs = new canvas("Grayscale Lena")
cvs.imshow(gs_im)
//get SIFT keypoints and draw them on the initial image
var SIFT_keypoints = new KeyPointVector()
SIFT.create().detect(gs_im, SIFT_keypoints)
val img_with_feats = new Mat()
drawKeypoints(gs_im,
SIFT_keypoints,
img_with_feats,
Scalar.all(-1),
DrawMatchesFlags.DRAW_RICH_KEYPOINTS)
//open another window with the drawn keypoints
val cvs2 = new canvas("Image with SIFT keypoints")
cvs2.imshow(img_with_feats)
}
}
| DhashS/scala_comp_robo_sign_detection | src/main/scala-2.12/util.scala | Scala | gpl-3.0 | 3,807 |
/*
* Copyright 2016 Nicolas Rinaudo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kantan.regex
import laws.discipline._, arbitrary._
class BigDecimalDecoderTests extends DisciplineSuite {
checkAll("GroupDecoder[BigDecimal]", GroupDecoderTests[BigDecimal].decoder[Int, Int])
checkAll("MatchDecoder[BigDecimal]", MatchDecoderTests[BigDecimal].decoder[Int, Int])
}
| nrinaudo/kantan.regex | core/shared/src/test/scala/kantan/regex/BigDecimalDecoderTests.scala | Scala | apache-2.0 | 896 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.