code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.statsEstimation
import org.apache.spark.sql.catalyst.CatalystConf
import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeMap, AttributeReference, Literal}
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.types.IntegerType
class BasicStatsEstimationSuite extends StatsEstimationTestBase {
val attribute = attr("key")
val colStat = ColumnStat(distinctCount = 10, min = Some(1), max = Some(10),
nullCount = 0, avgLen = 4, maxLen = 4)
val plan = StatsTestPlan(
outputList = Seq(attribute),
attributeStats = AttributeMap(Seq(attribute -> colStat)),
rowCount = 10,
// row count * (overhead + column size)
size = Some(10 * (8 + 4)))
test("limit estimation: limit < child's rowCount") {
val localLimit = LocalLimit(Literal(2), plan)
val globalLimit = GlobalLimit(Literal(2), plan)
// LocalLimit's stats is just its child's stats except column stats
checkStats(localLimit, plan.stats(conf).copy(attributeStats = AttributeMap(Nil)))
checkStats(globalLimit, Statistics(sizeInBytes = 24, rowCount = Some(2)))
}
test("limit estimation: limit > child's rowCount") {
val localLimit = LocalLimit(Literal(20), plan)
val globalLimit = GlobalLimit(Literal(20), plan)
checkStats(localLimit, plan.stats(conf).copy(attributeStats = AttributeMap(Nil)))
// Limit is larger than child's rowCount, so GlobalLimit's stats is equal to its child's stats.
checkStats(globalLimit, plan.stats(conf).copy(attributeStats = AttributeMap(Nil)))
}
test("limit estimation: limit = 0") {
val localLimit = LocalLimit(Literal(0), plan)
val globalLimit = GlobalLimit(Literal(0), plan)
val stats = Statistics(sizeInBytes = 1, rowCount = Some(0))
checkStats(localLimit, stats)
checkStats(globalLimit, stats)
}
test("sample estimation") {
val sample = Sample(0.0, 0.5, withReplacement = false, (math.random * 1000).toLong, plan)()
checkStats(sample, Statistics(sizeInBytes = 60, rowCount = Some(5)))
// Child doesn't have rowCount in stats
val childStats = Statistics(sizeInBytes = 120)
val childPlan = DummyLogicalPlan(childStats, childStats)
val sample2 =
Sample(0.0, 0.11, withReplacement = false, (math.random * 1000).toLong, childPlan)()
checkStats(sample2, Statistics(sizeInBytes = 14))
}
test("estimate statistics when the conf changes") {
val expectedDefaultStats =
Statistics(
sizeInBytes = 40,
rowCount = Some(10),
attributeStats = AttributeMap(Seq(
AttributeReference("c1", IntegerType)() -> ColumnStat(10, Some(1), Some(10), 0, 4, 4))),
isBroadcastable = false)
val expectedCboStats =
Statistics(
sizeInBytes = 4,
rowCount = Some(1),
attributeStats = AttributeMap(Seq(
AttributeReference("c1", IntegerType)() -> ColumnStat(1, Some(5), Some(5), 0, 4, 4))),
isBroadcastable = false)
val plan = DummyLogicalPlan(defaultStats = expectedDefaultStats, cboStats = expectedCboStats)
checkStats(
plan, expectedStatsCboOn = expectedCboStats, expectedStatsCboOff = expectedDefaultStats)
}
/** Check estimated stats when cbo is turned on/off. */
private def checkStats(
plan: LogicalPlan,
expectedStatsCboOn: Statistics,
expectedStatsCboOff: Statistics): Unit = {
assert(plan.stats(conf.copy(cboEnabled = true)) == expectedStatsCboOn)
// Invalidate statistics
plan.invalidateStatsCache()
assert(plan.stats(conf.copy(cboEnabled = false)) == expectedStatsCboOff)
}
/** Check estimated stats when it's the same whether cbo is turned on or off. */
private def checkStats(plan: LogicalPlan, expectedStats: Statistics): Unit =
checkStats(plan, expectedStats, expectedStats)
}
/**
* This class is used for unit-testing the cbo switch, it mimics a logical plan which computes
* a simple statistics or a cbo estimated statistics based on the conf.
*/
private case class DummyLogicalPlan(
defaultStats: Statistics,
cboStats: Statistics) extends LogicalPlan {
override def output: Seq[Attribute] = Nil
override def children: Seq[LogicalPlan] = Nil
override def computeStats(conf: CatalystConf): Statistics =
if (conf.cboEnabled) cboStats else defaultStats
}
| jianran/spark | sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/statsEstimation/BasicStatsEstimationSuite.scala | Scala | apache-2.0 | 5,134 |
package org.jetbrains.plugins.scala
package lang
package completion
package filters.expression
import com.intellij.psi.filters.ElementFilter
import com.intellij.psi.{PsiElement, _}
import org.jetbrains.annotations.NonNls
import org.jetbrains.plugins.scala.lang.completion.ScalaCompletionUtil._
import org.jetbrains.plugins.scala.lang.psi.api.expr._
/**
* @author Alexander Podkhalyuzin
* Date: 28.05.2008
*/
class MatchFilter extends ElementFilter {
def isAcceptable(element: Object, context: PsiElement): Boolean = {
if (context.isInstanceOf[PsiComment]) return false
val leaf = getLeafByOffset(context.getTextRange.getStartOffset, context)
if (leaf != null) {
val parent = leaf.getParent
if (parent.isInstanceOf[ScExpression] && (parent.getParent.isInstanceOf[ScInfixExpr] ||
parent.getParent.isInstanceOf[ScPostfixExpr])) {
return true
}
}
false
}
def isClassAcceptable(hintClass: java.lang.Class[_]): Boolean = {
true
}
@NonNls
override def toString: String = {
"'match' keyword filter"
}
} | whorbowicz/intellij-scala | src/org/jetbrains/plugins/scala/lang/completion/filters/expression/MatchFilter.scala | Scala | apache-2.0 | 1,082 |
/*
* Copyright 2017 Datamountaineer.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.datastax.driver.core
import com.datastax.driver.core.ColumnDefinitions.Definition
/**
* Created by andrew@datamountaineer.com on 21/04/16.
* stream-reactor
*/
object TestUtils {
def getColumnDefs :ColumnDefinitions = {
val cols = List(
"uuidCol" -> DataType.uuid(),
"inetCol" -> DataType.inet(),
"asciiCol" -> DataType.ascii(),
"textCol" -> DataType.text(),
"varcharCol" -> DataType.varchar(),
"booleanCol" -> DataType.cboolean(),
"smallintCol" -> DataType.smallint(),
"intCol" -> DataType.cint(),
"decimalCol" -> DataType.decimal(),
"floatCol" -> DataType.cfloat(),
"counterCol" -> DataType.counter(),
"bigintCol" -> DataType.bigint(),
"varintCol" -> DataType.varint(),
"doubleCol" -> DataType.cdouble(),
"timeuuidCol" -> DataType.timeuuid(),
"blobCol" -> DataType.blob(),
"dateCol" -> DataType.date(),
"timeCol" -> DataType.time(),
"timestampCol"->DataType.timestamp(),
"mapCol"->DataType.map(DataType.varchar(), DataType.varchar()),
"listCol"->DataType.list(DataType.varchar()),
"setCol"->DataType.set(DataType.varchar())
)
val definitions = cols.map {
case (name, colType) => new Definition("sink_test", "sink_test", name, colType)
}.toArray
new ColumnDefinitions(definitions, CodecRegistry.DEFAULT_INSTANCE)
}
}
| datamountaineer/stream-reactor | kafka-connect-cassandra/src/test/scala/com/datamountaineer/streamreactor/connect/cassandra/utils/TestUtils.scala | Scala | apache-2.0 | 1,996 |
import java.util.UUID
import kafka.admin.AdminUtils
import kafka.consumer.Whitelist
import kafka.producer.KeyedMessage
import kafka.serializer.StringDecoder
import org.scalatest.{ FunSpec, ShouldMatchers }
import utils.{AwaitCondition, KafkaAdminUtils, KafkaConsumerUtils, KafkaProducerUtils}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.{Await, Future}
import scala.concurrent.duration._
class ConsumerGroupTest extends FunSpec with ShouldMatchers with AwaitCondition {
describe("A consumer group") {
/*
Almost identical to ConsumerGroupTheGhettoWayTest, but in this case instead of refreshing the topic
metadata we actually produce a key using the message number. Use this solution instead of the previous
one to jump start a consumer group use case.
*/
it("should consume messages in a balanced fashion, using keys") {
val MessageCount = 25
val topic = s"topic-${UUID.randomUUID()}"
val consumerGroupId = UUID.randomUUID().toString
KafkaAdminUtils.createTopic(topic, numPartitions = 3)
val producer = KafkaProducerUtils.create()
val producerFuture = Future {
(1 to MessageCount) foreach { number ⇒
println(s"Producing Message $number")
producer.send(new KeyedMessage[Array[Byte], Array[Byte]](topic, number.toString.getBytes("UTF-8"), s"Message $number".getBytes("UTF-8")))
Thread.sleep(50) // N.B.: Unnecessary; it's here to show the parallelism in the tests
}
}.andThen { case _ ⇒
println(s"Finished producing messages")
producer.close()
}
var consumedMessages = 0
val consumers = (1 to 3) map { n ⇒
(n, KafkaConsumerUtils.create(consumerTimeoutMs = 5000, autoOffsetReset = "smallest", groupId = consumerGroupId))
}
val consumerFutures = consumers map { case (n, consumer) =>
Future {
val stream = consumer.createMessageStreamsByFilter(new Whitelist(topic), 1, new StringDecoder, new StringDecoder).head
println(s"Consumer Number $n begins consuming")
stream foreach { item ⇒
println(s"Consumer Number $n consumed ${item.message()}")
consumedMessages += 1
}
}.andThen { case _ ⇒ println(s"Shut down Consumer Number $n"); consumer.shutdown() }
}
awaitCondition(s"Didn't consume $MessageCount messages!", 10.seconds) {
consumedMessages shouldBe MessageCount
}
val shutdownFutures = consumers map (t => Future ( t._2.shutdown() ) )
KafkaAdminUtils.deleteTopic(topic)
(consumerFutures ++ shutdownFutures :+ producerFuture) foreach (Await.ready(_, 10.second))
}
}
}
| MarianoGappa/kafka-examples | src/test/scala/ConsumerGroupTest.scala | Scala | mit | 2,726 |
package com.roundeights.tubeutil
import org.specs2.mutable._
import java.util.Date
class DateGenTest extends Specification {
"A DateGen" should {
"Parse a date" in {
DateGen.parse("2013-08-02T01:43:08+0000") must_==
new Date( 1375407788000L )
DateGen.parse("2013-08-02T01:43:08+0800") must_==
new Date( 1375378988000L )
}
"Format a date in GMT" in {
DateGen.format( new Date( 1375407788000L ) ) must_==
"2013-08-02T01:43:08+0000"
}
}
}
| Nycto/TubeUtil | src/test/scala/DateGenTest.scala | Scala | mit | 567 |
import java.io.File
import java.util.concurrent.{ExecutorService, Executors}
import javax.servlet.ServletContext
import org.openeyes.api.controllers._
import org.openeyes.api.controllers.workflow._
import org.openeyes.api.handlers.OctScan
import org.scalatra._
class ScalatraBootstrap extends LifeCycle {
implicit val swagger = new OpenEyesSwagger
// val pool: ExecutorService = Executors.newFixedThreadPool(5)
override def init(context: ServletContext) {
context.mount(new ApiDocsController, "/api-docs")
context.mount(new ElementController, "/Element", "Element")
context.mount(new EncounterController, "/Encounter", "Encounter")
context.mount(new PatientController, "/Patient", "Patient")
context.mount(new TicketController, "/Ticket", "Ticket")
context.mount(new WorkflowController, "/Workflow", "Workflow")
// try {
// pool.execute(new Handler())
// } catch {
// case e: Exception =>
// pool.shutdown()
// pool.execute(new Handler())
// }
}
// override def destroy(context: ServletContext) {
// pool.shutdown()
// }
//
// class Handler() extends Runnable {
// def run() {
// val args: Array[String] = Array("-b", "OPENEYES:11112", "--directory", "tmp/DICOMFiles/")
// OctScan.main(args, (file: File) => {
// OctScan.process(file)
// })
// }
// }
}
| openeyes/poc-backend | src/main/scala/ScalatraBootstrap.scala | Scala | gpl-3.0 | 1,361 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.storage
import java.nio.ByteBuffer
import scala.collection.mutable.ArrayBuffer
import scala.concurrent.duration._
import scala.concurrent.Future
import scala.language.implicitConversions
import scala.language.postfixOps
import scala.reflect.ClassTag
import org.mockito.{Matchers => mc}
import org.mockito.Mockito.{mock, times, verify, when}
import org.scalatest._
import org.scalatest.concurrent.Eventually._
import org.scalatest.concurrent.Timeouts._
import org.apache.spark._
import org.apache.spark.broadcast.BroadcastManager
import org.apache.spark.executor.DataReadMethod
import org.apache.spark.internal.config._
import org.apache.spark.memory.UnifiedMemoryManager
import org.apache.spark.network.{BlockDataManager, BlockTransferService}
import org.apache.spark.network.buffer.{ManagedBuffer, NioManagedBuffer}
import org.apache.spark.network.netty.NettyBlockTransferService
import org.apache.spark.network.shuffle.{BlockFetchingListener, TempShuffleFileManager}
import org.apache.spark.rpc.RpcEnv
import org.apache.spark.scheduler.LiveListenerBus
import org.apache.spark.security.{CryptoStreamUtils, EncryptionFunSuite}
import org.apache.spark.serializer.{JavaSerializer, KryoSerializer, SerializerManager}
import org.apache.spark.shuffle.sort.SortShuffleManager
import org.apache.spark.storage.BlockManagerMessages.BlockManagerHeartbeat
import org.apache.spark.util._
import org.apache.spark.util.io.ChunkedByteBuffer
class BlockManagerSuite extends SparkFunSuite with Matchers with BeforeAndAfterEach
with PrivateMethodTester with LocalSparkContext with ResetSystemProperties
with EncryptionFunSuite {
import BlockManagerSuite._
var conf: SparkConf = null
var store: BlockManager = null
var store2: BlockManager = null
var store3: BlockManager = null
var rpcEnv: RpcEnv = null
var master: BlockManagerMaster = null
val securityMgr = new SecurityManager(new SparkConf(false))
val bcastManager = new BroadcastManager(true, new SparkConf(false), securityMgr)
val mapOutputTracker = new MapOutputTrackerMaster(new SparkConf(false), bcastManager, true)
val shuffleManager = new SortShuffleManager(new SparkConf(false))
// Reuse a serializer across tests to avoid creating a new thread-local buffer on each test
val serializer = new KryoSerializer(new SparkConf(false).set("spark.kryoserializer.buffer", "1m"))
// Implicitly convert strings to BlockIds for test clarity.
implicit def StringToBlockId(value: String): BlockId = new TestBlockId(value)
def rdd(rddId: Int, splitId: Int): RDDBlockId = RDDBlockId(rddId, splitId)
private def makeBlockManager(
maxMem: Long,
name: String = SparkContext.DRIVER_IDENTIFIER,
master: BlockManagerMaster = this.master,
transferService: Option[BlockTransferService] = Option.empty,
testConf: Option[SparkConf] = None): BlockManager = {
val bmConf = testConf.map(_.setAll(conf.getAll)).getOrElse(conf)
bmConf.set("spark.testing.memory", maxMem.toString)
bmConf.set("spark.memory.offHeap.size", maxMem.toString)
val serializer = new KryoSerializer(bmConf)
val encryptionKey = if (bmConf.get(IO_ENCRYPTION_ENABLED)) {
Some(CryptoStreamUtils.createKey(bmConf))
} else {
None
}
val bmSecurityMgr = new SecurityManager(bmConf, encryptionKey)
val transfer = transferService
.getOrElse(new NettyBlockTransferService(conf, securityMgr, "localhost", "localhost", 0, 1))
val memManager = UnifiedMemoryManager(bmConf, numCores = 1)
val serializerManager = new SerializerManager(serializer, bmConf)
val blockManager = new BlockManager(name, rpcEnv, master, serializerManager, bmConf,
memManager, mapOutputTracker, shuffleManager, transfer, bmSecurityMgr, 0)
memManager.setMemoryStore(blockManager.memoryStore)
blockManager.initialize("app-id")
blockManager
}
override def beforeEach(): Unit = {
super.beforeEach()
// Set the arch to 64-bit and compressedOops to true to get a deterministic test-case
System.setProperty("os.arch", "amd64")
conf = new SparkConf(false)
.set("spark.app.id", "test")
.set("spark.testing", "true")
.set("spark.memory.fraction", "1")
.set("spark.memory.storageFraction", "1")
.set("spark.kryoserializer.buffer", "1m")
.set("spark.test.useCompressedOops", "true")
.set("spark.storage.unrollFraction", "0.4")
.set("spark.storage.unrollMemoryThreshold", "512")
rpcEnv = RpcEnv.create("test", "localhost", 0, conf, securityMgr)
conf.set("spark.driver.port", rpcEnv.address.port.toString)
// Mock SparkContext to reduce the memory usage of tests. It's fine since the only reason we
// need to create a SparkContext is to initialize LiveListenerBus.
sc = mock(classOf[SparkContext])
when(sc.conf).thenReturn(conf)
master = new BlockManagerMaster(rpcEnv.setupEndpoint("blockmanager",
new BlockManagerMasterEndpoint(rpcEnv, true, conf,
new LiveListenerBus(sc))), conf, true)
val initialize = PrivateMethod[Unit]('initialize)
SizeEstimator invokePrivate initialize()
}
override def afterEach(): Unit = {
try {
conf = null
if (store != null) {
store.stop()
store = null
}
if (store2 != null) {
store2.stop()
store2 = null
}
if (store3 != null) {
store3.stop()
store3 = null
}
rpcEnv.shutdown()
rpcEnv.awaitTermination()
rpcEnv = null
master = null
} finally {
super.afterEach()
}
}
test("StorageLevel object caching") {
val level1 = StorageLevel(false, false, false, 3)
// this should return the same object as level1
val level2 = StorageLevel(false, false, false, 3)
// this should return a different object
val level3 = StorageLevel(false, false, false, 2)
assert(level2 === level1, "level2 is not same as level1")
assert(level2.eq(level1), "level2 is not the same object as level1")
assert(level3 != level1, "level3 is same as level1")
val bytes1 = Utils.serialize(level1)
val level1_ = Utils.deserialize[StorageLevel](bytes1)
val bytes2 = Utils.serialize(level2)
val level2_ = Utils.deserialize[StorageLevel](bytes2)
assert(level1_ === level1, "Deserialized level1 not same as original level1")
assert(level1_.eq(level1), "Deserialized level1 not the same object as original level2")
assert(level2_ === level2, "Deserialized level2 not same as original level2")
assert(level2_.eq(level1), "Deserialized level2 not the same object as original level1")
}
test("BlockManagerId object caching") {
val id1 = BlockManagerId("e1", "XXX", 1)
val id2 = BlockManagerId("e1", "XXX", 1) // this should return the same object as id1
val id3 = BlockManagerId("e1", "XXX", 2) // this should return a different object
assert(id2 === id1, "id2 is not same as id1")
assert(id2.eq(id1), "id2 is not the same object as id1")
assert(id3 != id1, "id3 is same as id1")
val bytes1 = Utils.serialize(id1)
val id1_ = Utils.deserialize[BlockManagerId](bytes1)
val bytes2 = Utils.serialize(id2)
val id2_ = Utils.deserialize[BlockManagerId](bytes2)
assert(id1_ === id1, "Deserialized id1 is not same as original id1")
assert(id1_.eq(id1), "Deserialized id1 is not the same object as original id1")
assert(id2_ === id2, "Deserialized id2 is not same as original id2")
assert(id2_.eq(id1), "Deserialized id2 is not the same object as original id1")
}
test("BlockManagerId.isDriver() backwards-compatibility with legacy driver ids (SPARK-6716)") {
assert(BlockManagerId(SparkContext.DRIVER_IDENTIFIER, "XXX", 1).isDriver)
assert(BlockManagerId(SparkContext.LEGACY_DRIVER_IDENTIFIER, "XXX", 1).isDriver)
assert(!BlockManagerId("notADriverIdentifier", "XXX", 1).isDriver)
}
test("master + 1 manager interaction") {
store = makeBlockManager(20000)
val a1 = new Array[Byte](4000)
val a2 = new Array[Byte](4000)
val a3 = new Array[Byte](4000)
// Putting a1, a2 and a3 in memory and telling master only about a1 and a2
store.putSingle("a1", a1, StorageLevel.MEMORY_ONLY)
store.putSingle("a2", a2, StorageLevel.MEMORY_ONLY)
store.putSingle("a3", a3, StorageLevel.MEMORY_ONLY, tellMaster = false)
// Checking whether blocks are in memory
assert(store.getSingleAndReleaseLock("a1").isDefined, "a1 was not in store")
assert(store.getSingleAndReleaseLock("a2").isDefined, "a2 was not in store")
assert(store.getSingleAndReleaseLock("a3").isDefined, "a3 was not in store")
// Checking whether master knows about the blocks or not
assert(master.getLocations("a1").size > 0, "master was not told about a1")
assert(master.getLocations("a2").size > 0, "master was not told about a2")
assert(master.getLocations("a3").size === 0, "master was told about a3")
// Drop a1 and a2 from memory; this should be reported back to the master
store.dropFromMemoryIfExists("a1", () => null: Either[Array[Any], ChunkedByteBuffer])
store.dropFromMemoryIfExists("a2", () => null: Either[Array[Any], ChunkedByteBuffer])
assert(store.getSingleAndReleaseLock("a1") === None, "a1 not removed from store")
assert(store.getSingleAndReleaseLock("a2") === None, "a2 not removed from store")
assert(master.getLocations("a1").size === 0, "master did not remove a1")
assert(master.getLocations("a2").size === 0, "master did not remove a2")
}
test("master + 2 managers interaction") {
store = makeBlockManager(2000, "exec1")
store2 = makeBlockManager(2000, "exec2")
val peers = master.getPeers(store.blockManagerId)
assert(peers.size === 1, "master did not return the other manager as a peer")
assert(peers.head === store2.blockManagerId, "peer returned by master is not the other manager")
val a1 = new Array[Byte](400)
val a2 = new Array[Byte](400)
store.putSingle("a1", a1, StorageLevel.MEMORY_ONLY_2)
store2.putSingle("a2", a2, StorageLevel.MEMORY_ONLY_2)
assert(master.getLocations("a1").size === 2, "master did not report 2 locations for a1")
assert(master.getLocations("a2").size === 2, "master did not report 2 locations for a2")
}
test("removing block") {
store = makeBlockManager(20000)
val a1 = new Array[Byte](4000)
val a2 = new Array[Byte](4000)
val a3 = new Array[Byte](4000)
// Putting a1, a2 and a3 in memory and telling master only about a1 and a2
store.putSingle("a1-to-remove", a1, StorageLevel.MEMORY_ONLY)
store.putSingle("a2-to-remove", a2, StorageLevel.MEMORY_ONLY)
store.putSingle("a3-to-remove", a3, StorageLevel.MEMORY_ONLY, tellMaster = false)
// Checking whether blocks are in memory and memory size
val memStatus = master.getMemoryStatus.head._2
assert(memStatus._1 == 40000L, "total memory " + memStatus._1 + " should equal 40000")
assert(memStatus._2 <= 32000L, "remaining memory " + memStatus._2 + " should <= 12000")
assert(store.getSingleAndReleaseLock("a1-to-remove").isDefined, "a1 was not in store")
assert(store.getSingleAndReleaseLock("a2-to-remove").isDefined, "a2 was not in store")
assert(store.getSingleAndReleaseLock("a3-to-remove").isDefined, "a3 was not in store")
// Checking whether master knows about the blocks or not
assert(master.getLocations("a1-to-remove").size > 0, "master was not told about a1")
assert(master.getLocations("a2-to-remove").size > 0, "master was not told about a2")
assert(master.getLocations("a3-to-remove").size === 0, "master was told about a3")
// Remove a1 and a2 and a3. Should be no-op for a3.
master.removeBlock("a1-to-remove")
master.removeBlock("a2-to-remove")
master.removeBlock("a3-to-remove")
eventually(timeout(1000 milliseconds), interval(10 milliseconds)) {
assert(!store.hasLocalBlock("a1-to-remove"))
master.getLocations("a1-to-remove") should have size 0
}
eventually(timeout(1000 milliseconds), interval(10 milliseconds)) {
assert(!store.hasLocalBlock("a2-to-remove"))
master.getLocations("a2-to-remove") should have size 0
}
eventually(timeout(1000 milliseconds), interval(10 milliseconds)) {
assert(store.hasLocalBlock("a3-to-remove"))
master.getLocations("a3-to-remove") should have size 0
}
eventually(timeout(1000 milliseconds), interval(10 milliseconds)) {
val memStatus = master.getMemoryStatus.head._2
memStatus._1 should equal (40000L)
memStatus._2 should equal (40000L)
}
}
test("removing rdd") {
store = makeBlockManager(20000)
val a1 = new Array[Byte](4000)
val a2 = new Array[Byte](4000)
val a3 = new Array[Byte](4000)
// Putting a1, a2 and a3 in memory.
store.putSingle(rdd(0, 0), a1, StorageLevel.MEMORY_ONLY)
store.putSingle(rdd(0, 1), a2, StorageLevel.MEMORY_ONLY)
store.putSingle("nonrddblock", a3, StorageLevel.MEMORY_ONLY)
master.removeRdd(0, blocking = false)
eventually(timeout(1000 milliseconds), interval(10 milliseconds)) {
store.getSingleAndReleaseLock(rdd(0, 0)) should be (None)
master.getLocations(rdd(0, 0)) should have size 0
}
eventually(timeout(1000 milliseconds), interval(10 milliseconds)) {
store.getSingleAndReleaseLock(rdd(0, 1)) should be (None)
master.getLocations(rdd(0, 1)) should have size 0
}
eventually(timeout(1000 milliseconds), interval(10 milliseconds)) {
store.getSingleAndReleaseLock("nonrddblock") should not be (None)
master.getLocations("nonrddblock") should have size (1)
}
store.putSingle(rdd(0, 0), a1, StorageLevel.MEMORY_ONLY)
store.putSingle(rdd(0, 1), a2, StorageLevel.MEMORY_ONLY)
master.removeRdd(0, blocking = true)
store.getSingleAndReleaseLock(rdd(0, 0)) should be (None)
master.getLocations(rdd(0, 0)) should have size 0
store.getSingleAndReleaseLock(rdd(0, 1)) should be (None)
master.getLocations(rdd(0, 1)) should have size 0
}
test("removing broadcast") {
store = makeBlockManager(2000)
val driverStore = store
val executorStore = makeBlockManager(2000, "executor")
val a1 = new Array[Byte](400)
val a2 = new Array[Byte](400)
val a3 = new Array[Byte](400)
val a4 = new Array[Byte](400)
val broadcast0BlockId = BroadcastBlockId(0)
val broadcast1BlockId = BroadcastBlockId(1)
val broadcast2BlockId = BroadcastBlockId(2)
val broadcast2BlockId2 = BroadcastBlockId(2, "_")
// insert broadcast blocks in both the stores
Seq(driverStore, executorStore).foreach { case s =>
s.putSingle(broadcast0BlockId, a1, StorageLevel.DISK_ONLY)
s.putSingle(broadcast1BlockId, a2, StorageLevel.DISK_ONLY)
s.putSingle(broadcast2BlockId, a3, StorageLevel.DISK_ONLY)
s.putSingle(broadcast2BlockId2, a4, StorageLevel.DISK_ONLY)
}
// verify whether the blocks exist in both the stores
Seq(driverStore, executorStore).foreach { case s =>
assert(s.hasLocalBlock(broadcast0BlockId))
assert(s.hasLocalBlock(broadcast1BlockId))
assert(s.hasLocalBlock(broadcast2BlockId))
assert(s.hasLocalBlock(broadcast2BlockId2))
}
// remove broadcast 0 block only from executors
master.removeBroadcast(0, removeFromMaster = false, blocking = true)
// only broadcast 0 block should be removed from the executor store
assert(!executorStore.hasLocalBlock(broadcast0BlockId))
assert(executorStore.hasLocalBlock(broadcast1BlockId))
assert(executorStore.hasLocalBlock(broadcast2BlockId))
// nothing should be removed from the driver store
assert(driverStore.hasLocalBlock(broadcast0BlockId))
assert(driverStore.hasLocalBlock(broadcast1BlockId))
assert(driverStore.hasLocalBlock(broadcast2BlockId))
// remove broadcast 0 block from the driver as well
master.removeBroadcast(0, removeFromMaster = true, blocking = true)
assert(!driverStore.hasLocalBlock(broadcast0BlockId))
assert(driverStore.hasLocalBlock(broadcast1BlockId))
// remove broadcast 1 block from both the stores asynchronously
// and verify all broadcast 1 blocks have been removed
master.removeBroadcast(1, removeFromMaster = true, blocking = false)
eventually(timeout(1000 milliseconds), interval(10 milliseconds)) {
assert(!driverStore.hasLocalBlock(broadcast1BlockId))
assert(!executorStore.hasLocalBlock(broadcast1BlockId))
}
// remove broadcast 2 from both the stores asynchronously
// and verify all broadcast 2 blocks have been removed
master.removeBroadcast(2, removeFromMaster = true, blocking = false)
eventually(timeout(1000 milliseconds), interval(10 milliseconds)) {
assert(!driverStore.hasLocalBlock(broadcast2BlockId))
assert(!driverStore.hasLocalBlock(broadcast2BlockId2))
assert(!executorStore.hasLocalBlock(broadcast2BlockId))
assert(!executorStore.hasLocalBlock(broadcast2BlockId2))
}
executorStore.stop()
driverStore.stop()
store = null
}
test("reregistration on heart beat") {
store = makeBlockManager(2000)
val a1 = new Array[Byte](400)
store.putSingle("a1", a1, StorageLevel.MEMORY_ONLY)
assert(store.getSingleAndReleaseLock("a1").isDefined, "a1 was not in store")
assert(master.getLocations("a1").size > 0, "master was not told about a1")
master.removeExecutor(store.blockManagerId.executorId)
assert(master.getLocations("a1").size == 0, "a1 was not removed from master")
val reregister = !master.driverEndpoint.askSync[Boolean](
BlockManagerHeartbeat(store.blockManagerId))
assert(reregister == true)
}
test("reregistration on block update") {
store = makeBlockManager(2000)
val a1 = new Array[Byte](400)
val a2 = new Array[Byte](400)
store.putSingle("a1", a1, StorageLevel.MEMORY_ONLY)
assert(master.getLocations("a1").size > 0, "master was not told about a1")
master.removeExecutor(store.blockManagerId.executorId)
assert(master.getLocations("a1").size == 0, "a1 was not removed from master")
store.putSingle("a2", a2, StorageLevel.MEMORY_ONLY)
store.waitForAsyncReregister()
assert(master.getLocations("a1").size > 0, "a1 was not reregistered with master")
assert(master.getLocations("a2").size > 0, "master was not told about a2")
}
test("reregistration doesn't dead lock") {
store = makeBlockManager(2000)
val a1 = new Array[Byte](400)
val a2 = List(new Array[Byte](400))
// try many times to trigger any deadlocks
for (i <- 1 to 100) {
master.removeExecutor(store.blockManagerId.executorId)
val t1 = new Thread {
override def run() {
store.putIterator(
"a2", a2.iterator, StorageLevel.MEMORY_ONLY, tellMaster = true)
}
}
val t2 = new Thread {
override def run() {
store.putSingle("a1", a1, StorageLevel.MEMORY_ONLY)
}
}
val t3 = new Thread {
override def run() {
store.reregister()
}
}
t1.start()
t2.start()
t3.start()
t1.join()
t2.join()
t3.join()
store.dropFromMemoryIfExists("a1", () => null: Either[Array[Any], ChunkedByteBuffer])
store.dropFromMemoryIfExists("a2", () => null: Either[Array[Any], ChunkedByteBuffer])
store.waitForAsyncReregister()
}
}
test("correct BlockResult returned from get() calls") {
store = makeBlockManager(12000)
val list1 = List(new Array[Byte](2000), new Array[Byte](2000))
val list2 = List(new Array[Byte](500), new Array[Byte](1000), new Array[Byte](1500))
val list1SizeEstimate = SizeEstimator.estimate(list1.iterator.toArray)
val list2SizeEstimate = SizeEstimator.estimate(list2.iterator.toArray)
store.putIterator(
"list1", list1.iterator, StorageLevel.MEMORY_ONLY, tellMaster = true)
store.putIterator(
"list2memory", list2.iterator, StorageLevel.MEMORY_ONLY, tellMaster = true)
store.putIterator(
"list2disk", list2.iterator, StorageLevel.DISK_ONLY, tellMaster = true)
val list1Get = store.get("list1")
assert(list1Get.isDefined, "list1 expected to be in store")
assert(list1Get.get.data.size === 2)
assert(list1Get.get.bytes === list1SizeEstimate)
assert(list1Get.get.readMethod === DataReadMethod.Memory)
val list2MemoryGet = store.get("list2memory")
assert(list2MemoryGet.isDefined, "list2memory expected to be in store")
assert(list2MemoryGet.get.data.size === 3)
assert(list2MemoryGet.get.bytes === list2SizeEstimate)
assert(list2MemoryGet.get.readMethod === DataReadMethod.Memory)
val list2DiskGet = store.get("list2disk")
assert(list2DiskGet.isDefined, "list2memory expected to be in store")
assert(list2DiskGet.get.data.size === 3)
// We don't know the exact size of the data on disk, but it should certainly be > 0.
assert(list2DiskGet.get.bytes > 0)
assert(list2DiskGet.get.readMethod === DataReadMethod.Disk)
}
test("optimize a location order of blocks") {
val localHost = Utils.localHostName()
val otherHost = "otherHost"
val bmMaster = mock(classOf[BlockManagerMaster])
val bmId1 = BlockManagerId("id1", localHost, 1)
val bmId2 = BlockManagerId("id2", localHost, 2)
val bmId3 = BlockManagerId("id3", otherHost, 3)
when(bmMaster.getLocations(mc.any[BlockId])).thenReturn(Seq(bmId1, bmId2, bmId3))
val blockManager = makeBlockManager(128, "exec", bmMaster)
val getLocations = PrivateMethod[Seq[BlockManagerId]]('getLocations)
val locations = blockManager invokePrivate getLocations(BroadcastBlockId(0))
assert(locations.map(_.host).toSet === Set(localHost, localHost, otherHost))
}
test("SPARK-9591: getRemoteBytes from another location when Exception throw") {
conf.set("spark.shuffle.io.maxRetries", "0")
store = makeBlockManager(8000, "executor1")
store2 = makeBlockManager(8000, "executor2")
store3 = makeBlockManager(8000, "executor3")
val list1 = List(new Array[Byte](4000))
store2.putIterator(
"list1", list1.iterator, StorageLevel.MEMORY_ONLY, tellMaster = true)
store3.putIterator(
"list1", list1.iterator, StorageLevel.MEMORY_ONLY, tellMaster = true)
assert(store.getRemoteBytes("list1").isDefined, "list1Get expected to be fetched")
store2.stop()
store2 = null
assert(store.getRemoteBytes("list1").isDefined, "list1Get expected to be fetched")
store3.stop()
store3 = null
// Should return None instead of throwing an exception:
assert(store.getRemoteBytes("list1").isEmpty)
}
test("SPARK-14252: getOrElseUpdate should still read from remote storage") {
store = makeBlockManager(8000, "executor1")
store2 = makeBlockManager(8000, "executor2")
val list1 = List(new Array[Byte](4000))
store2.putIterator(
"list1", list1.iterator, StorageLevel.MEMORY_ONLY, tellMaster = true)
assert(store.getOrElseUpdate(
"list1",
StorageLevel.MEMORY_ONLY,
ClassTag.Any,
() => throw new AssertionError("attempted to compute locally")).isLeft)
}
test("in-memory LRU storage") {
testInMemoryLRUStorage(StorageLevel.MEMORY_ONLY)
}
test("in-memory LRU storage with serialization") {
testInMemoryLRUStorage(StorageLevel.MEMORY_ONLY_SER)
}
test("in-memory LRU storage with off-heap") {
testInMemoryLRUStorage(StorageLevel(
useDisk = false,
useMemory = true,
useOffHeap = true,
deserialized = false, replication = 1))
}
private def testInMemoryLRUStorage(storageLevel: StorageLevel): Unit = {
store = makeBlockManager(12000)
val a1 = new Array[Byte](4000)
val a2 = new Array[Byte](4000)
val a3 = new Array[Byte](4000)
store.putSingle("a1", a1, storageLevel)
store.putSingle("a2", a2, storageLevel)
store.putSingle("a3", a3, storageLevel)
assert(store.getSingleAndReleaseLock("a2").isDefined, "a2 was not in store")
assert(store.getSingleAndReleaseLock("a3").isDefined, "a3 was not in store")
assert(store.getSingleAndReleaseLock("a1") === None, "a1 was in store")
assert(store.getSingleAndReleaseLock("a2").isDefined, "a2 was not in store")
// At this point a2 was gotten last, so LRU will getSingle rid of a3
store.putSingle("a1", a1, storageLevel)
assert(store.getSingleAndReleaseLock("a1").isDefined, "a1 was not in store")
assert(store.getSingleAndReleaseLock("a2").isDefined, "a2 was not in store")
assert(store.getSingleAndReleaseLock("a3") === None, "a3 was in store")
}
test("in-memory LRU for partitions of same RDD") {
store = makeBlockManager(12000)
val a1 = new Array[Byte](4000)
val a2 = new Array[Byte](4000)
val a3 = new Array[Byte](4000)
store.putSingle(rdd(0, 1), a1, StorageLevel.MEMORY_ONLY)
store.putSingle(rdd(0, 2), a2, StorageLevel.MEMORY_ONLY)
store.putSingle(rdd(0, 3), a3, StorageLevel.MEMORY_ONLY)
// Even though we accessed rdd_0_3 last, it should not have replaced partitions 1 and 2
// from the same RDD
assert(store.getSingleAndReleaseLock(rdd(0, 3)) === None, "rdd_0_3 was in store")
assert(store.getSingleAndReleaseLock(rdd(0, 2)).isDefined, "rdd_0_2 was not in store")
assert(store.getSingleAndReleaseLock(rdd(0, 1)).isDefined, "rdd_0_1 was not in store")
// Check that rdd_0_3 doesn't replace them even after further accesses
assert(store.getSingleAndReleaseLock(rdd(0, 3)) === None, "rdd_0_3 was in store")
assert(store.getSingleAndReleaseLock(rdd(0, 3)) === None, "rdd_0_3 was in store")
assert(store.getSingleAndReleaseLock(rdd(0, 3)) === None, "rdd_0_3 was in store")
}
test("in-memory LRU for partitions of multiple RDDs") {
store = makeBlockManager(12000)
store.putSingle(rdd(0, 1), new Array[Byte](4000), StorageLevel.MEMORY_ONLY)
store.putSingle(rdd(0, 2), new Array[Byte](4000), StorageLevel.MEMORY_ONLY)
store.putSingle(rdd(1, 1), new Array[Byte](4000), StorageLevel.MEMORY_ONLY)
// At this point rdd_1_1 should've replaced rdd_0_1
assert(store.memoryStore.contains(rdd(1, 1)), "rdd_1_1 was not in store")
assert(!store.memoryStore.contains(rdd(0, 1)), "rdd_0_1 was in store")
assert(store.memoryStore.contains(rdd(0, 2)), "rdd_0_2 was not in store")
// Do a get() on rdd_0_2 so that it is the most recently used item
assert(store.getSingleAndReleaseLock(rdd(0, 2)).isDefined, "rdd_0_2 was not in store")
// Put in more partitions from RDD 0; they should replace rdd_1_1
store.putSingle(rdd(0, 3), new Array[Byte](4000), StorageLevel.MEMORY_ONLY)
store.putSingle(rdd(0, 4), new Array[Byte](4000), StorageLevel.MEMORY_ONLY)
// Now rdd_1_1 should be dropped to add rdd_0_3, but then rdd_0_2 should *not* be dropped
// when we try to add rdd_0_4.
assert(!store.memoryStore.contains(rdd(1, 1)), "rdd_1_1 was in store")
assert(!store.memoryStore.contains(rdd(0, 1)), "rdd_0_1 was in store")
assert(!store.memoryStore.contains(rdd(0, 4)), "rdd_0_4 was in store")
assert(store.memoryStore.contains(rdd(0, 2)), "rdd_0_2 was not in store")
assert(store.memoryStore.contains(rdd(0, 3)), "rdd_0_3 was not in store")
}
encryptionTest("on-disk storage") { _conf =>
store = makeBlockManager(1200, testConf = Some(_conf))
val a1 = new Array[Byte](400)
val a2 = new Array[Byte](400)
val a3 = new Array[Byte](400)
store.putSingle("a1", a1, StorageLevel.DISK_ONLY)
store.putSingle("a2", a2, StorageLevel.DISK_ONLY)
store.putSingle("a3", a3, StorageLevel.DISK_ONLY)
assert(store.getSingleAndReleaseLock("a2").isDefined, "a2 was in store")
assert(store.getSingleAndReleaseLock("a3").isDefined, "a3 was in store")
assert(store.getSingleAndReleaseLock("a1").isDefined, "a1 was in store")
}
encryptionTest("disk and memory storage") { _conf =>
testDiskAndMemoryStorage(StorageLevel.MEMORY_AND_DISK, getAsBytes = false, testConf = conf)
}
encryptionTest("disk and memory storage with getLocalBytes") { _conf =>
testDiskAndMemoryStorage(StorageLevel.MEMORY_AND_DISK, getAsBytes = true, testConf = conf)
}
encryptionTest("disk and memory storage with serialization") { _conf =>
testDiskAndMemoryStorage(StorageLevel.MEMORY_AND_DISK_SER, getAsBytes = false, testConf = conf)
}
encryptionTest("disk and memory storage with serialization and getLocalBytes") { _conf =>
testDiskAndMemoryStorage(StorageLevel.MEMORY_AND_DISK_SER, getAsBytes = true, testConf = conf)
}
encryptionTest("disk and off-heap memory storage") { _conf =>
testDiskAndMemoryStorage(StorageLevel.OFF_HEAP, getAsBytes = false, testConf = conf)
}
encryptionTest("disk and off-heap memory storage with getLocalBytes") { _conf =>
testDiskAndMemoryStorage(StorageLevel.OFF_HEAP, getAsBytes = true, testConf = conf)
}
def testDiskAndMemoryStorage(
storageLevel: StorageLevel,
getAsBytes: Boolean,
testConf: SparkConf): Unit = {
store = makeBlockManager(12000, testConf = Some(testConf))
val accessMethod =
if (getAsBytes) store.getLocalBytesAndReleaseLock else store.getSingleAndReleaseLock
val a1 = new Array[Byte](4000)
val a2 = new Array[Byte](4000)
val a3 = new Array[Byte](4000)
store.putSingle("a1", a1, storageLevel)
store.putSingle("a2", a2, storageLevel)
store.putSingle("a3", a3, storageLevel)
assert(accessMethod("a2").isDefined, "a2 was not in store")
assert(accessMethod("a3").isDefined, "a3 was not in store")
assert(accessMethod("a1").isDefined, "a1 was not in store")
val dataShouldHaveBeenCachedBackIntoMemory = {
if (storageLevel.deserialized) {
!getAsBytes
} else {
// If the block's storage level is serialized, then always cache the bytes in memory, even
// if the caller requested values.
true
}
}
if (dataShouldHaveBeenCachedBackIntoMemory) {
assert(store.memoryStore.contains("a1"), "a1 was not in memory store")
} else {
assert(!store.memoryStore.contains("a1"), "a1 was in memory store")
}
}
encryptionTest("LRU with mixed storage levels") { _conf =>
store = makeBlockManager(12000, testConf = Some(_conf))
val a1 = new Array[Byte](4000)
val a2 = new Array[Byte](4000)
val a3 = new Array[Byte](4000)
val a4 = new Array[Byte](4000)
// First store a1 and a2, both in memory, and a3, on disk only
store.putSingle("a1", a1, StorageLevel.MEMORY_ONLY_SER)
store.putSingle("a2", a2, StorageLevel.MEMORY_ONLY_SER)
store.putSingle("a3", a3, StorageLevel.DISK_ONLY)
// At this point LRU should not kick in because a3 is only on disk
assert(store.getSingleAndReleaseLock("a1").isDefined, "a1 was not in store")
assert(store.getSingleAndReleaseLock("a2").isDefined, "a2 was not in store")
assert(store.getSingleAndReleaseLock("a3").isDefined, "a3 was not in store")
// Now let's add in a4, which uses both disk and memory; a1 should drop out
store.putSingle("a4", a4, StorageLevel.MEMORY_AND_DISK_SER)
assert(store.getSingleAndReleaseLock("a1") == None, "a1 was in store")
assert(store.getSingleAndReleaseLock("a2").isDefined, "a2 was not in store")
assert(store.getSingleAndReleaseLock("a3").isDefined, "a3 was not in store")
assert(store.getSingleAndReleaseLock("a4").isDefined, "a4 was not in store")
}
encryptionTest("in-memory LRU with streams") { _conf =>
store = makeBlockManager(12000, testConf = Some(_conf))
val list1 = List(new Array[Byte](2000), new Array[Byte](2000))
val list2 = List(new Array[Byte](2000), new Array[Byte](2000))
val list3 = List(new Array[Byte](2000), new Array[Byte](2000))
store.putIterator(
"list1", list1.iterator, StorageLevel.MEMORY_ONLY, tellMaster = true)
store.putIterator(
"list2", list2.iterator, StorageLevel.MEMORY_ONLY, tellMaster = true)
store.putIterator(
"list3", list3.iterator, StorageLevel.MEMORY_ONLY, tellMaster = true)
assert(store.getAndReleaseLock("list2").isDefined, "list2 was not in store")
assert(store.get("list2").get.data.size === 2)
assert(store.getAndReleaseLock("list3").isDefined, "list3 was not in store")
assert(store.get("list3").get.data.size === 2)
assert(store.getAndReleaseLock("list1") === None, "list1 was in store")
assert(store.getAndReleaseLock("list2").isDefined, "list2 was not in store")
assert(store.get("list2").get.data.size === 2)
// At this point list2 was gotten last, so LRU will getSingle rid of list3
store.putIterator(
"list1", list1.iterator, StorageLevel.MEMORY_ONLY, tellMaster = true)
assert(store.getAndReleaseLock("list1").isDefined, "list1 was not in store")
assert(store.get("list1").get.data.size === 2)
assert(store.getAndReleaseLock("list2").isDefined, "list2 was not in store")
assert(store.get("list2").get.data.size === 2)
assert(store.getAndReleaseLock("list3") === None, "list1 was in store")
}
encryptionTest("LRU with mixed storage levels and streams") { _conf =>
store = makeBlockManager(12000, testConf = Some(_conf))
val list1 = List(new Array[Byte](2000), new Array[Byte](2000))
val list2 = List(new Array[Byte](2000), new Array[Byte](2000))
val list3 = List(new Array[Byte](2000), new Array[Byte](2000))
val list4 = List(new Array[Byte](2000), new Array[Byte](2000))
// First store list1 and list2, both in memory, and list3, on disk only
store.putIterator(
"list1", list1.iterator, StorageLevel.MEMORY_ONLY_SER, tellMaster = true)
store.putIterator(
"list2", list2.iterator, StorageLevel.MEMORY_ONLY_SER, tellMaster = true)
store.putIterator(
"list3", list3.iterator, StorageLevel.DISK_ONLY, tellMaster = true)
val listForSizeEstimate = new ArrayBuffer[Any]
listForSizeEstimate ++= list1.iterator
val listSize = SizeEstimator.estimate(listForSizeEstimate)
// At this point LRU should not kick in because list3 is only on disk
assert(store.getAndReleaseLock("list1").isDefined, "list1 was not in store")
assert(store.get("list1").get.data.size === 2)
assert(store.getAndReleaseLock("list2").isDefined, "list2 was not in store")
assert(store.get("list2").get.data.size === 2)
assert(store.getAndReleaseLock("list3").isDefined, "list3 was not in store")
assert(store.get("list3").get.data.size === 2)
assert(store.getAndReleaseLock("list1").isDefined, "list1 was not in store")
assert(store.get("list1").get.data.size === 2)
assert(store.getAndReleaseLock("list2").isDefined, "list2 was not in store")
assert(store.get("list2").get.data.size === 2)
assert(store.getAndReleaseLock("list3").isDefined, "list3 was not in store")
assert(store.get("list3").get.data.size === 2)
// Now let's add in list4, which uses both disk and memory; list1 should drop out
store.putIterator(
"list4", list4.iterator, StorageLevel.MEMORY_AND_DISK_SER, tellMaster = true)
assert(store.getAndReleaseLock("list1") === None, "list1 was in store")
assert(store.getAndReleaseLock("list2").isDefined, "list2 was not in store")
assert(store.get("list2").get.data.size === 2)
assert(store.getAndReleaseLock("list3").isDefined, "list3 was not in store")
assert(store.get("list3").get.data.size === 2)
assert(store.getAndReleaseLock("list4").isDefined, "list4 was not in store")
assert(store.get("list4").get.data.size === 2)
}
test("negative byte values in ByteBufferInputStream") {
val buffer = ByteBuffer.wrap(Array[Int](254, 255, 0, 1, 2).map(_.toByte).toArray)
val stream = new ByteBufferInputStream(buffer)
val temp = new Array[Byte](10)
assert(stream.read() === 254, "unexpected byte read")
assert(stream.read() === 255, "unexpected byte read")
assert(stream.read() === 0, "unexpected byte read")
assert(stream.read(temp, 0, temp.length) === 2, "unexpected number of bytes read")
assert(stream.read() === -1, "end of stream not signalled")
assert(stream.read(temp, 0, temp.length) === -1, "end of stream not signalled")
}
test("overly large block") {
store = makeBlockManager(5000)
store.putSingle("a1", new Array[Byte](10000), StorageLevel.MEMORY_ONLY)
assert(store.getSingleAndReleaseLock("a1") === None, "a1 was in store")
store.putSingle("a2", new Array[Byte](10000), StorageLevel.MEMORY_AND_DISK)
assert(!store.memoryStore.contains("a2"), "a2 was in memory store")
assert(store.getSingleAndReleaseLock("a2").isDefined, "a2 was not in store")
}
test("block compression") {
try {
conf.set("spark.shuffle.compress", "true")
store = makeBlockManager(20000, "exec1")
store.putSingle(
ShuffleBlockId(0, 0, 0), new Array[Byte](1000), StorageLevel.MEMORY_ONLY_SER)
assert(store.memoryStore.getSize(ShuffleBlockId(0, 0, 0)) <= 100,
"shuffle_0_0_0 was not compressed")
store.stop()
store = null
conf.set("spark.shuffle.compress", "false")
store = makeBlockManager(20000, "exec2")
store.putSingle(
ShuffleBlockId(0, 0, 0), new Array[Byte](10000), StorageLevel.MEMORY_ONLY_SER)
assert(store.memoryStore.getSize(ShuffleBlockId(0, 0, 0)) >= 10000,
"shuffle_0_0_0 was compressed")
store.stop()
store = null
conf.set("spark.broadcast.compress", "true")
store = makeBlockManager(20000, "exec3")
store.putSingle(
BroadcastBlockId(0), new Array[Byte](10000), StorageLevel.MEMORY_ONLY_SER)
assert(store.memoryStore.getSize(BroadcastBlockId(0)) <= 1000,
"broadcast_0 was not compressed")
store.stop()
store = null
conf.set("spark.broadcast.compress", "false")
store = makeBlockManager(20000, "exec4")
store.putSingle(
BroadcastBlockId(0), new Array[Byte](10000), StorageLevel.MEMORY_ONLY_SER)
assert(store.memoryStore.getSize(BroadcastBlockId(0)) >= 10000, "broadcast_0 was compressed")
store.stop()
store = null
conf.set("spark.rdd.compress", "true")
store = makeBlockManager(20000, "exec5")
store.putSingle(rdd(0, 0), new Array[Byte](10000), StorageLevel.MEMORY_ONLY_SER)
assert(store.memoryStore.getSize(rdd(0, 0)) <= 1000, "rdd_0_0 was not compressed")
store.stop()
store = null
conf.set("spark.rdd.compress", "false")
store = makeBlockManager(20000, "exec6")
store.putSingle(rdd(0, 0), new Array[Byte](10000), StorageLevel.MEMORY_ONLY_SER)
assert(store.memoryStore.getSize(rdd(0, 0)) >= 10000, "rdd_0_0 was compressed")
store.stop()
store = null
// Check that any other block types are also kept uncompressed
store = makeBlockManager(20000, "exec7")
store.putSingle("other_block", new Array[Byte](10000), StorageLevel.MEMORY_ONLY)
assert(store.memoryStore.getSize("other_block") >= 10000, "other_block was compressed")
store.stop()
store = null
} finally {
System.clearProperty("spark.shuffle.compress")
System.clearProperty("spark.broadcast.compress")
System.clearProperty("spark.rdd.compress")
}
}
test("block store put failure") {
// Use Java serializer so we can create an unserializable error.
conf.set("spark.testing.memory", "1200")
val transfer = new NettyBlockTransferService(conf, securityMgr, "localhost", "localhost", 0, 1)
val memoryManager = UnifiedMemoryManager(conf, numCores = 1)
val serializerManager = new SerializerManager(new JavaSerializer(conf), conf)
store = new BlockManager(SparkContext.DRIVER_IDENTIFIER, rpcEnv, master,
serializerManager, conf, memoryManager, mapOutputTracker,
shuffleManager, transfer, securityMgr, 0)
memoryManager.setMemoryStore(store.memoryStore)
store.initialize("app-id")
// The put should fail since a1 is not serializable.
class UnserializableClass
val a1 = new UnserializableClass
intercept[java.io.NotSerializableException] {
store.putSingle("a1", a1, StorageLevel.DISK_ONLY)
}
// Make sure get a1 doesn't hang and returns None.
failAfter(1 second) {
assert(store.getSingleAndReleaseLock("a1").isEmpty, "a1 should not be in store")
}
}
test("updated block statuses") {
store = makeBlockManager(12000)
store.registerTask(0)
val list = List.fill(2)(new Array[Byte](2000))
val bigList = List.fill(8)(new Array[Byte](2000))
def getUpdatedBlocks(task: => Unit): Seq[(BlockId, BlockStatus)] = {
val context = TaskContext.empty()
try {
TaskContext.setTaskContext(context)
task
} finally {
TaskContext.unset()
}
context.taskMetrics.updatedBlockStatuses
}
// 1 updated block (i.e. list1)
val updatedBlocks1 = getUpdatedBlocks {
store.putIterator(
"list1", list.iterator, StorageLevel.MEMORY_ONLY, tellMaster = true)
}
assert(updatedBlocks1.size === 1)
assert(updatedBlocks1.head._1 === TestBlockId("list1"))
assert(updatedBlocks1.head._2.storageLevel === StorageLevel.MEMORY_ONLY)
// 1 updated block (i.e. list2)
val updatedBlocks2 = getUpdatedBlocks {
store.putIterator(
"list2", list.iterator, StorageLevel.MEMORY_AND_DISK, tellMaster = true)
}
assert(updatedBlocks2.size === 1)
assert(updatedBlocks2.head._1 === TestBlockId("list2"))
assert(updatedBlocks2.head._2.storageLevel === StorageLevel.MEMORY_ONLY)
// 2 updated blocks - list1 is kicked out of memory while list3 is added
val updatedBlocks3 = getUpdatedBlocks {
store.putIterator(
"list3", list.iterator, StorageLevel.MEMORY_ONLY, tellMaster = true)
}
assert(updatedBlocks3.size === 2)
updatedBlocks3.foreach { case (id, status) =>
id match {
case TestBlockId("list1") => assert(status.storageLevel === StorageLevel.NONE)
case TestBlockId("list3") => assert(status.storageLevel === StorageLevel.MEMORY_ONLY)
case _ => fail("Updated block is neither list1 nor list3")
}
}
assert(store.memoryStore.contains("list3"), "list3 was not in memory store")
// 2 updated blocks - list2 is kicked out of memory (but put on disk) while list4 is added
val updatedBlocks4 = getUpdatedBlocks {
store.putIterator(
"list4", list.iterator, StorageLevel.MEMORY_ONLY, tellMaster = true)
}
assert(updatedBlocks4.size === 2)
updatedBlocks4.foreach { case (id, status) =>
id match {
case TestBlockId("list2") => assert(status.storageLevel === StorageLevel.DISK_ONLY)
case TestBlockId("list4") => assert(status.storageLevel === StorageLevel.MEMORY_ONLY)
case _ => fail("Updated block is neither list2 nor list4")
}
}
assert(store.diskStore.contains("list2"), "list2 was not in disk store")
assert(store.memoryStore.contains("list4"), "list4 was not in memory store")
// No updated blocks - list5 is too big to fit in store and nothing is kicked out
val updatedBlocks5 = getUpdatedBlocks {
store.putIterator(
"list5", bigList.iterator, StorageLevel.MEMORY_ONLY, tellMaster = true)
}
assert(updatedBlocks5.size === 0)
// memory store contains only list3 and list4
assert(!store.memoryStore.contains("list1"), "list1 was in memory store")
assert(!store.memoryStore.contains("list2"), "list2 was in memory store")
assert(store.memoryStore.contains("list3"), "list3 was not in memory store")
assert(store.memoryStore.contains("list4"), "list4 was not in memory store")
assert(!store.memoryStore.contains("list5"), "list5 was in memory store")
// disk store contains only list2
assert(!store.diskStore.contains("list1"), "list1 was in disk store")
assert(store.diskStore.contains("list2"), "list2 was not in disk store")
assert(!store.diskStore.contains("list3"), "list3 was in disk store")
assert(!store.diskStore.contains("list4"), "list4 was in disk store")
assert(!store.diskStore.contains("list5"), "list5 was in disk store")
// remove block - list2 should be removed from disk
val updatedBlocks6 = getUpdatedBlocks {
store.removeBlock(
"list2", tellMaster = true)
}
assert(updatedBlocks6.size === 1)
assert(updatedBlocks6.head._1 === TestBlockId("list2"))
assert(updatedBlocks6.head._2.storageLevel == StorageLevel.NONE)
assert(!store.diskStore.contains("list2"), "list2 was in disk store")
}
test("query block statuses") {
store = makeBlockManager(12000)
val list = List.fill(2)(new Array[Byte](2000))
// Tell master. By LRU, only list2 and list3 remains.
store.putIterator(
"list1", list.iterator, StorageLevel.MEMORY_ONLY, tellMaster = true)
store.putIterator(
"list2", list.iterator, StorageLevel.MEMORY_AND_DISK, tellMaster = true)
store.putIterator(
"list3", list.iterator, StorageLevel.MEMORY_ONLY, tellMaster = true)
// getLocations and getBlockStatus should yield the same locations
assert(store.master.getLocations("list1").size === 0)
assert(store.master.getLocations("list2").size === 1)
assert(store.master.getLocations("list3").size === 1)
assert(store.master.getBlockStatus("list1", askSlaves = false).size === 0)
assert(store.master.getBlockStatus("list2", askSlaves = false).size === 1)
assert(store.master.getBlockStatus("list3", askSlaves = false).size === 1)
assert(store.master.getBlockStatus("list1", askSlaves = true).size === 0)
assert(store.master.getBlockStatus("list2", askSlaves = true).size === 1)
assert(store.master.getBlockStatus("list3", askSlaves = true).size === 1)
// This time don't tell master and see what happens. By LRU, only list5 and list6 remains.
store.putIterator(
"list4", list.iterator, StorageLevel.MEMORY_ONLY, tellMaster = false)
store.putIterator(
"list5", list.iterator, StorageLevel.MEMORY_AND_DISK, tellMaster = false)
store.putIterator(
"list6", list.iterator, StorageLevel.MEMORY_ONLY, tellMaster = false)
// getLocations should return nothing because the master is not informed
// getBlockStatus without asking slaves should have the same result
// getBlockStatus with asking slaves, however, should return the actual block statuses
assert(store.master.getLocations("list4").size === 0)
assert(store.master.getLocations("list5").size === 0)
assert(store.master.getLocations("list6").size === 0)
assert(store.master.getBlockStatus("list4", askSlaves = false).size === 0)
assert(store.master.getBlockStatus("list5", askSlaves = false).size === 0)
assert(store.master.getBlockStatus("list6", askSlaves = false).size === 0)
assert(store.master.getBlockStatus("list4", askSlaves = true).size === 0)
assert(store.master.getBlockStatus("list5", askSlaves = true).size === 1)
assert(store.master.getBlockStatus("list6", askSlaves = true).size === 1)
}
test("get matching blocks") {
store = makeBlockManager(12000)
val list = List.fill(2)(new Array[Byte](100))
// insert some blocks
store.putIterator(
"list1", list.iterator, StorageLevel.MEMORY_AND_DISK, tellMaster = true)
store.putIterator(
"list2", list.iterator, StorageLevel.MEMORY_AND_DISK, tellMaster = true)
store.putIterator(
"list3", list.iterator, StorageLevel.MEMORY_AND_DISK, tellMaster = true)
// getLocations and getBlockStatus should yield the same locations
assert(store.master.getMatchingBlockIds(_.toString.contains("list"), askSlaves = false).size
=== 3)
assert(store.master.getMatchingBlockIds(_.toString.contains("list1"), askSlaves = false).size
=== 1)
// insert some more blocks
store.putIterator(
"newlist1", list.iterator, StorageLevel.MEMORY_AND_DISK, tellMaster = true)
store.putIterator(
"newlist2", list.iterator, StorageLevel.MEMORY_AND_DISK, tellMaster = false)
store.putIterator(
"newlist3", list.iterator, StorageLevel.MEMORY_AND_DISK, tellMaster = false)
// getLocations and getBlockStatus should yield the same locations
assert(store.master.getMatchingBlockIds(_.toString.contains("newlist"), askSlaves = false).size
=== 1)
assert(store.master.getMatchingBlockIds(_.toString.contains("newlist"), askSlaves = true).size
=== 3)
val blockIds = Seq(RDDBlockId(1, 0), RDDBlockId(1, 1), RDDBlockId(2, 0))
blockIds.foreach { blockId =>
store.putIterator(
blockId, list.iterator, StorageLevel.MEMORY_ONLY, tellMaster = true)
}
val matchedBlockIds = store.master.getMatchingBlockIds(_ match {
case RDDBlockId(1, _) => true
case _ => false
}, askSlaves = true)
assert(matchedBlockIds.toSet === Set(RDDBlockId(1, 0), RDDBlockId(1, 1)))
}
test("SPARK-1194 regression: fix the same-RDD rule for cache replacement") {
store = makeBlockManager(12000)
store.putSingle(rdd(0, 0), new Array[Byte](4000), StorageLevel.MEMORY_ONLY)
store.putSingle(rdd(1, 0), new Array[Byte](4000), StorageLevel.MEMORY_ONLY)
// Access rdd_1_0 to ensure it's not least recently used.
assert(store.getSingleAndReleaseLock(rdd(1, 0)).isDefined, "rdd_1_0 was not in store")
// According to the same-RDD rule, rdd_1_0 should be replaced here.
store.putSingle(rdd(0, 1), new Array[Byte](4000), StorageLevel.MEMORY_ONLY)
// rdd_1_0 should have been replaced, even it's not least recently used.
assert(store.memoryStore.contains(rdd(0, 0)), "rdd_0_0 was not in store")
assert(store.memoryStore.contains(rdd(0, 1)), "rdd_0_1 was not in store")
assert(!store.memoryStore.contains(rdd(1, 0)), "rdd_1_0 was in store")
}
test("safely unroll blocks through putIterator (disk)") {
store = makeBlockManager(12000)
val memoryStore = store.memoryStore
val diskStore = store.diskStore
val smallList = List.fill(40)(new Array[Byte](100))
val bigList = List.fill(40)(new Array[Byte](1000))
def smallIterator: Iterator[Any] = smallList.iterator.asInstanceOf[Iterator[Any]]
def bigIterator: Iterator[Any] = bigList.iterator.asInstanceOf[Iterator[Any]]
assert(memoryStore.currentUnrollMemoryForThisTask === 0)
store.putIterator("b1", smallIterator, StorageLevel.MEMORY_AND_DISK)
store.putIterator("b2", smallIterator, StorageLevel.MEMORY_AND_DISK)
// Unroll with not enough space. This should succeed but kick out b1 in the process.
// Memory store should contain b2 and b3, while disk store should contain only b1
val result3 = memoryStore.putIteratorAsValues("b3", smallIterator, ClassTag.Any)
assert(result3.isRight)
assert(!memoryStore.contains("b1"))
assert(memoryStore.contains("b2"))
assert(memoryStore.contains("b3"))
assert(diskStore.contains("b1"))
assert(!diskStore.contains("b2"))
assert(!diskStore.contains("b3"))
memoryStore.remove("b3")
store.putIterator("b3", smallIterator, StorageLevel.MEMORY_ONLY)
assert(memoryStore.currentUnrollMemoryForThisTask === 0)
// Unroll huge block with not enough space. This should fail and return an iterator so that
// the block may be stored to disk. During the unrolling process, block "b2" should be kicked
// out, so the memory store should contain only b3, while the disk store should contain
// b1, b2 and b4.
val result4 = memoryStore.putIteratorAsValues("b4", bigIterator, ClassTag.Any)
assert(result4.isLeft)
assert(!memoryStore.contains("b1"))
assert(!memoryStore.contains("b2"))
assert(memoryStore.contains("b3"))
assert(!memoryStore.contains("b4"))
}
test("read-locked blocks cannot be evicted from memory") {
store = makeBlockManager(12000)
val arr = new Array[Byte](4000)
// First store a1 and a2, both in memory, and a3, on disk only
store.putSingle("a1", arr, StorageLevel.MEMORY_ONLY_SER)
store.putSingle("a2", arr, StorageLevel.MEMORY_ONLY_SER)
assert(store.getSingle("a1").isDefined, "a1 was not in store")
assert(store.getSingle("a2").isDefined, "a2 was not in store")
// This put should fail because both a1 and a2 should be read-locked:
store.putSingle("a3", arr, StorageLevel.MEMORY_ONLY_SER)
assert(store.getSingle("a3").isEmpty, "a3 was in store")
assert(store.getSingle("a1").isDefined, "a1 was not in store")
assert(store.getSingle("a2").isDefined, "a2 was not in store")
// Release both pins of block a2:
store.releaseLock("a2")
store.releaseLock("a2")
// Block a1 is the least-recently accessed, so an LRU eviction policy would evict it before
// block a2. However, a1 is still pinned so this put of a3 should evict a2 instead:
store.putSingle("a3", arr, StorageLevel.MEMORY_ONLY_SER)
assert(store.getSingle("a2").isEmpty, "a2 was in store")
assert(store.getSingle("a1").isDefined, "a1 was not in store")
assert(store.getSingle("a3").isDefined, "a3 was not in store")
}
private def testReadWithLossOfOnDiskFiles(
storageLevel: StorageLevel,
readMethod: BlockManager => Option[_]): Unit = {
store = makeBlockManager(12000)
assert(store.putSingle("blockId", new Array[Byte](4000), storageLevel))
assert(store.getStatus("blockId").isDefined)
// Directly delete all files from the disk store, triggering failures when reading blocks:
store.diskBlockManager.getAllFiles().foreach(_.delete())
// The BlockManager still thinks that these blocks exist:
assert(store.getStatus("blockId").isDefined)
// Because the BlockManager's metadata claims that the block exists (i.e. that it's present
// in at least one store), the read attempts to read it and fails when the on-disk file is
// missing.
intercept[SparkException] {
readMethod(store)
}
// Subsequent read attempts will succeed; the block isn't present but we return an expected
// "block not found" response rather than a fatal error:
assert(readMethod(store).isEmpty)
// The reason why this second read succeeded is because the metadata entry for the missing
// block was removed as a result of the read failure:
assert(store.getStatus("blockId").isEmpty)
}
test("remove block if a read fails due to missing DiskStore files (SPARK-15736)") {
val storageLevels = Seq(
StorageLevel(useDisk = true, useMemory = false, deserialized = false),
StorageLevel(useDisk = true, useMemory = false, deserialized = true))
val readMethods = Map[String, BlockManager => Option[_]](
"getLocalBytes" -> ((m: BlockManager) => m.getLocalBytes("blockId")),
"getLocalValues" -> ((m: BlockManager) => m.getLocalValues("blockId"))
)
testReadWithLossOfOnDiskFiles(StorageLevel.DISK_ONLY, _.getLocalBytes("blockId"))
for ((readMethodName, readMethod) <- readMethods; storageLevel <- storageLevels) {
withClue(s"$readMethodName $storageLevel") {
testReadWithLossOfOnDiskFiles(storageLevel, readMethod)
}
}
}
test("SPARK-13328: refresh block locations (fetch should fail after hitting a threshold)") {
val mockBlockTransferService =
new MockBlockTransferService(conf.getInt("spark.block.failures.beforeLocationRefresh", 5))
store = makeBlockManager(8000, "executor1", transferService = Option(mockBlockTransferService))
store.putSingle("item", 999L, StorageLevel.MEMORY_ONLY, tellMaster = true)
assert(store.getRemoteBytes("item").isEmpty)
}
test("SPARK-13328: refresh block locations (fetch should succeed after location refresh)") {
val maxFailuresBeforeLocationRefresh =
conf.getInt("spark.block.failures.beforeLocationRefresh", 5)
val mockBlockManagerMaster = mock(classOf[BlockManagerMaster])
val mockBlockTransferService =
new MockBlockTransferService(maxFailuresBeforeLocationRefresh)
// make sure we have more than maxFailuresBeforeLocationRefresh locations
// so that we have a chance to do location refresh
val blockManagerIds = (0 to maxFailuresBeforeLocationRefresh)
.map { i => BlockManagerId(s"id-$i", s"host-$i", i + 1) }
when(mockBlockManagerMaster.getLocations(mc.any[BlockId])).thenReturn(blockManagerIds)
store = makeBlockManager(8000, "executor1", mockBlockManagerMaster,
transferService = Option(mockBlockTransferService))
val block = store.getRemoteBytes("item")
.asInstanceOf[Option[ByteBuffer]]
assert(block.isDefined)
verify(mockBlockManagerMaster, times(2)).getLocations("item")
}
test("SPARK-17484: block status is properly updated following an exception in put()") {
val mockBlockTransferService = new MockBlockTransferService(maxFailures = 10) {
override def uploadBlock(
hostname: String,
port: Int, execId: String,
blockId: BlockId,
blockData: ManagedBuffer,
level: StorageLevel,
classTag: ClassTag[_]): Future[Unit] = {
throw new InterruptedException("Intentional interrupt")
}
}
store = makeBlockManager(8000, "executor1", transferService = Option(mockBlockTransferService))
store2 = makeBlockManager(8000, "executor2", transferService = Option(mockBlockTransferService))
intercept[InterruptedException] {
store.putSingle("item", "value", StorageLevel.MEMORY_ONLY_2, tellMaster = true)
}
assert(store.getLocalBytes("item").isEmpty)
assert(master.getLocations("item").isEmpty)
assert(store2.getRemoteBytes("item").isEmpty)
}
test("SPARK-17484: master block locations are updated following an invalid remote block fetch") {
store = makeBlockManager(8000, "executor1")
store2 = makeBlockManager(8000, "executor2")
store.putSingle("item", "value", StorageLevel.MEMORY_ONLY, tellMaster = true)
assert(master.getLocations("item").nonEmpty)
store.removeBlock("item", tellMaster = false)
assert(master.getLocations("item").nonEmpty)
assert(store2.getRemoteBytes("item").isEmpty)
assert(master.getLocations("item").isEmpty)
}
class MockBlockTransferService(val maxFailures: Int) extends BlockTransferService {
var numCalls = 0
override def init(blockDataManager: BlockDataManager): Unit = {}
override def fetchBlocks(
host: String,
port: Int,
execId: String,
blockIds: Array[String],
listener: BlockFetchingListener,
tempShuffleFileManager: TempShuffleFileManager): Unit = {
listener.onBlockFetchSuccess("mockBlockId", new NioManagedBuffer(ByteBuffer.allocate(1)))
}
override def close(): Unit = {}
override def hostName: String = { "MockBlockTransferServiceHost" }
override def port: Int = { 63332 }
override def uploadBlock(
hostname: String,
port: Int, execId: String,
blockId: BlockId,
blockData: ManagedBuffer,
level: StorageLevel,
classTag: ClassTag[_]): Future[Unit] = {
import scala.concurrent.ExecutionContext.Implicits.global
Future {}
}
override def fetchBlockSync(
host: String,
port: Int,
execId: String,
blockId: String): ManagedBuffer = {
numCalls += 1
if (numCalls <= maxFailures) {
throw new RuntimeException("Failing block fetch in the mock block transfer service")
}
super.fetchBlockSync(host, port, execId, blockId)
}
}
}
private object BlockManagerSuite {
private implicit class BlockManagerTestUtils(store: BlockManager) {
def dropFromMemoryIfExists(
blockId: BlockId,
data: () => Either[Array[Any], ChunkedByteBuffer]): Unit = {
store.blockInfoManager.lockForWriting(blockId).foreach { info =>
val newEffectiveStorageLevel = store.dropFromMemory(blockId, data)
if (newEffectiveStorageLevel.isValid) {
// The block is still present in at least one store, so release the lock
// but don't delete the block info
store.releaseLock(blockId)
} else {
// The block isn't present in any store, so delete the block info so that the
// block can be stored again
store.blockInfoManager.removeBlock(blockId)
}
}
}
private def wrapGet[T](f: BlockId => Option[T]): BlockId => Option[T] = (blockId: BlockId) => {
val result = f(blockId)
if (result.isDefined) {
store.releaseLock(blockId)
}
result
}
def hasLocalBlock(blockId: BlockId): Boolean = {
getLocalAndReleaseLock(blockId).isDefined
}
val getLocalAndReleaseLock: (BlockId) => Option[BlockResult] = wrapGet(store.getLocalValues)
val getAndReleaseLock: (BlockId) => Option[BlockResult] = wrapGet(store.get)
val getSingleAndReleaseLock: (BlockId) => Option[Any] = wrapGet(store.getSingle)
val getLocalBytesAndReleaseLock: (BlockId) => Option[ChunkedByteBuffer] = {
val allocator = ByteBuffer.allocate _
wrapGet { bid => store.getLocalBytes(bid).map(_.toChunkedByteBuffer(allocator)) }
}
}
}
| jlopezmalla/spark | core/src/test/scala/org/apache/spark/storage/BlockManagerSuite.scala | Scala | apache-2.0 | 61,768 |
import com.jgdodson.rosalind.{ProteinString, RNAString}
import org.scalacheck.Properties
import org.scalacheck.Prop.forAll
import org.scalacheck.Gen
object RNAStringTest extends Properties("RNAStringTest") {
// Generate non-empty strings from ('A', 'C', 'G', 'U')
val ACGUStrings: Gen[String] =
Gen.containerOf[Vector, Char](Gen.oneOf(RNAString.alphabet)).
map(_.mkString("")).suchThat(_.length != 0)
// Generate RNAStrings
val RNAStrings: Gen[RNAString] = ACGUStrings.map(RNAString(_))
property("length") = forAll(ACGUStrings) { acgu =>
acgu.length == RNAString(acgu).length
}
property("reverse-length") = forAll(RNAStrings) { rna =>
rna.length == rna.reverse.length
}
property("toDNAString-length") = forAll(RNAStrings) { rna =>
rna.length == rna.toDNAString.length
}
property("valid-translation") = forAll(RNAStrings suchThat (_.length >= 3)) { rna =>
rna.toProteinString.seq.forall(ch => ProteinString.alphabet.contains(ch))
}
property("positive-mass") = forAll(RNAStrings) { rna =>
rna.mass > 0
}
property("positive-masses") = RNAString.masses.forall(_._2 > 0)
// A,C,G,U
property("alphabet-size") = RNAString.alphabet.size == 4
}
| PilgrimShadow/Rosalind.scala | src/test/scala/RNAStringTest.scala | Scala | mit | 1,212 |
package io.github.mandar2812.dynaml.kernels
import breeze.linalg.{norm, DenseVector, DenseMatrix}
/**
* Implementation of the Normalized Exponential Kernel
*
* K(x,y) = exp(beta*(x.y))
*/
class ExponentialKernel(be: Double) extends SVMKernel[DenseMatrix[Double]]
with Serializable {
override val hyper_parameters = List("beta")
private var beta: Double = be
def setbeta(b: Double): Unit = {
this.beta = b
}
override def evaluate(x: DenseVector[Double], y: DenseVector[Double]): Double =
math.exp(beta*(x.t * y)/(norm(x,2)*norm(y,2)))
override def buildKernelMatrix(mappedData: List[DenseVector[Double]],
length: Int): KernelMatrix[DenseMatrix[Double]] =
SVMKernel.buildSVMKernelMatrix(mappedData, length, this.evaluate)
override def setHyperParameters(h: Map[String, Double]) = {
assert(hyper_parameters.forall(h contains _),
"All hyper parameters must be contained in the arguments")
this.beta = h("beta")
this
}
}
| Koldh/DynaML | src/main/scala/io/github/mandar2812/dynaml/kernels/ExponentialKernel.scala | Scala | apache-2.0 | 1,006 |
package controllers.organization
import com.artclod.securesocial.TestUtils._
import models.DBTest.newFakeUser
import models.organization._
import org.junit.runner._
import org.specs2.mutable._
import org.specs2.runner._
import play.api.db.slick.Config.driver.simple._
import play.api.db.slick.DB
import play.api.test.Helpers._
import play.api.test._
import service.Logins
@RunWith(classOf[JUnitRunner])
class CoursesControllerSpec extends Specification {
"Courses" should {
"allow any user to view a course" in new WithApplication {
DB.withSession {
implicit session: Session =>
val user = newFakeUser
val organization = Organizations.create(TestOrganization())
val course = Courses.create(TestCourse(owner = newFakeUser.id, organizationId = organization.id))
val page = route(FakeRequest(GET, "/orgs/" + organization.id.v + "/courses/" + course.id.v).withLoggedInUser(Logins(user.id).get)).get
status(page) must equalTo(OK)
contentType(page) must beSome.which(_ == "text/html")
contentAsString(page) must contain(course.name)
}
}
"allow any user to add a course" in new WithApplication {
DB.withSession {
implicit session: Session =>
val user = newFakeUser
val organization = Organizations.create(TestOrganization())
val page = route(FakeRequest(GET, "/orgs/" + organization.id.v + "/courses/create").withLoggedInUser(Logins(user.id).get)).get
status(page) must equalTo(OK)
contentType(page) must beSome.which(_ == "text/html")
contentAsString(page) must contain("Please")
}
}
}
}
| kristiankime/web-education-games | test/controllers/organization/CoursesControllerSpec.scala | Scala | mit | 1,682 |
package de.tu_berlin.formic.datastructure.json.client
import akka.actor.ActorRef
import de.tu_berlin.formic.common.controlalgo.ControlAlgorithmClient
import de.tu_berlin.formic.common.datastructure.FormicDataStructure.LocalOperationMessage
import de.tu_berlin.formic.common.datastructure.client.AbstractClientDataStructure
import de.tu_berlin.formic.common.datastructure.{DataStructureName, DataStructureOperation, OperationContext, OperationTransformer}
import de.tu_berlin.formic.common.message.{FormicMessage, OperationMessage}
import de.tu_berlin.formic.common.{ClientId, DataStructureInstanceId, OperationId}
import de.tu_berlin.formic.datastructure.json._
import de.tu_berlin.formic.datastructure.json.client.JsonClientDataStructure._
import de.tu_berlin.formic.datastructure.json.JsonFormicJsonDataStructureProtocol._
import de.tu_berlin.formic.datastructure.tree.{TreeDeleteOperation, TreeInsertOperation, TreeStructureOperation}
import upickle.default._
/**
* @author Ronny Bräunlich
*/
class JsonClientDataStructure(id: DataStructureInstanceId,
controlAlgorithm: ControlAlgorithmClient,
val dataStructureName: DataStructureName,
initialData: Option[String],
lastOperationId: Option[OperationId],
outgoingConnection: ActorRef)
(implicit val writer: JsonTreeNodeWriter, val reader: JsonTreeNodeReader) extends AbstractClientDataStructure(id, controlAlgorithm, lastOperationId, outgoingConnection) {
private var privateData: ObjectNode = initialData.map(read[ObjectNode]).getOrElse(ObjectNode(null, List.empty))
def data: ObjectNode = privateData
override val transformer: OperationTransformer = new JsonTransformer
override def apply(op: DataStructureOperation): Unit = {
log.debug(s"Applying operation: $op")
privateData = data.applyOperation(op.asInstanceOf[TreeStructureOperation]).asInstanceOf[ObjectNode]
}
override def cloneOperationWithNewContext(op: DataStructureOperation, context: OperationContext): DataStructureOperation = {
op match {
case TreeInsertOperation(path, tree, opId, _, clientId) => TreeInsertOperation(path, tree, opId, context, clientId)
case TreeDeleteOperation(path, opId, _, clientId) => TreeDeleteOperation(path, opId, context, clientId)
case JsonReplaceOperation(path, tree, opId, _, clientId) => JsonReplaceOperation(path, tree, opId, context, clientId)
}
}
override def getDataAsJson: String = write(data)
override def unacknowledged(callbackWrapper: _root_.akka.actor.ActorRef): JsonClientDataStructure.this.Receive = {
case local: LocalOperationMessage =>
local.op.operations.head match {
case json: JsonClientOperation =>
val newOperation = transformJsonOperationsIntoGeneralTreeOperations(json)
val newMessage = OperationMessage(local.op.clientId, local.op.dataStructureInstanceId, local.op.dataStructure, List(newOperation))
super.unacknowledged(callbackWrapper).apply(LocalOperationMessage(newMessage))
case _ => super.unacknowledged(callbackWrapper).apply(local)
}
case rest: FormicMessage => super.unacknowledged(callbackWrapper).apply(rest)
}
override def acknowledged(callbackWrapper: ActorRef): Receive = {
case local: LocalOperationMessage =>
local.op.operations.head match {
case json: JsonClientOperation =>
val newOperation = transformJsonOperationsIntoGeneralTreeOperations(json)
val newMessage = OperationMessage(local.op.clientId, local.op.dataStructureInstanceId, local.op.dataStructure, List(newOperation))
super.acknowledged(callbackWrapper).apply(LocalOperationMessage(newMessage))
case _ => super.acknowledged(callbackWrapper).apply(local)
}
case rest: FormicMessage => super.acknowledged(callbackWrapper).apply(rest)
}
/**
* Because the user is allowed to use a JSON path, the JSONClientOperations have to be translated into
* basic tree operations including the translation of the path.
*/
def transformJsonOperationsIntoGeneralTreeOperations(jsonOp: JsonClientOperation): TreeStructureOperation = {
val newOperation = jsonOp match {
case ins: JsonClientInsertOperation => TreeInsertOperation(data.translateJsonPathForInsertion(jsonOp.path), ins.tree, ins.id, ins.operationContext, ins.clientId)
case del: JsonClientDeleteOperation => TreeDeleteOperation(data.translateJsonPath(jsonOp.path), del.id, del.operationContext, del.clientId)
case rep: JsonClientReplaceOperation => JsonReplaceOperation(data.translateJsonPath(jsonOp.path), rep.tree, rep.id, rep.operationContext, rep.clientId)
}
newOperation
}
}
object JsonClientDataStructure {
/**
* Marker interface for all the operations that are only restricted to the client and to the JSON data type.
* Common for all of them is the JsonPath that has to be translated into an AccessPath.
*/
sealed trait JsonClientOperation extends DataStructureOperation {
val path: JsonPath
}
case class JsonClientInsertOperation(path: JsonPath, tree: JsonTreeNode[_], id: OperationId, operationContext: OperationContext, var clientId: ClientId) extends JsonClientOperation
case class JsonClientDeleteOperation(path: JsonPath, id: OperationId, operationContext: OperationContext, var clientId: ClientId) extends JsonClientOperation
case class JsonClientReplaceOperation(path: JsonPath, tree: JsonTreeNode[_], id: OperationId, operationContext: OperationContext, var clientId: ClientId) extends JsonClientOperation
def apply(id: DataStructureInstanceId,
controlAlgorithm: ControlAlgorithmClient,
dataStructureName: DataStructureName,
initialData: Option[String],
lastOperationId: Option[OperationId],
outgoingConnection: ActorRef): JsonClientDataStructure = new JsonClientDataStructure(id, controlAlgorithm, dataStructureName, initialData, lastOperationId, outgoingConnection)
}
| rbraeunlich/formic | json/shared/src/main/scala/de/tu_berlin/formic/datastructure/json/client/JsonClientDataStructure.scala | Scala | apache-2.0 | 6,109 |
package org.genericConfig.admin.shared.configTree.json
import play.api.libs.json._
import play.api.libs.functional.syntax.{unlift, _}
/**
* Copyright (C) 2016 Gennadi Heimann genaheimann@gmail.com
*
* Created by Gennadi Heimann 19.12.2016
*/
case class JsonConfigTreeComponent(
componentId: String,
nameToShow: String,
kind: String,
nextStepId: Option[String]
)
object JsonConfigTreeComponent {
implicit lazy val jsonConfigTreeComponentWrites: Writes[JsonConfigTreeComponent] = (
(JsPath \\ "componentId").write[String] and
(JsPath \\ "nameToShow").write[String] and
(JsPath \\ "kind").write[String] and
(JsPath \\ "nextStepId").write(Writes.optionWithNull[String])
) (unlift(JsonConfigTreeComponent.unapply))
implicit lazy val jsonConfigTreeComponentReads: Reads[JsonConfigTreeComponent] = (
(JsPath \\ "componentId").read[String] and
(JsPath \\ "nameToShow").read[String] and
(JsPath \\ "kind").read[String] and
(JsPath \\ "nextStepId").read(Reads.optionWithNull[String])
) (JsonConfigTreeComponent.apply _)
} | gennadij/admin | shared/src/main/scala/org/genericConfig/admin/shared/configTree/json/JsonConfigTreeComponent.scala | Scala | apache-2.0 | 1,252 |
package com.sksamuel.elastic4s.requests.searches.queries.span
import com.sksamuel.elastic4s.requests.searches.queries.Query
import com.sksamuel.exts.OptionImplicits._
case class SpanNotQuery(include: SpanQuery,
exclude: SpanQuery,
dist: Option[Int] = None,
pre: Option[Int] = None,
post: Option[Int] = None,
boost: Option[Double] = None,
queryName: Option[String] = None)
extends Query {
def boost(boost: Double): SpanNotQuery = copy(boost = Option(boost))
def queryName(queryName: String): SpanNotQuery = copy(queryName = queryName.some)
def post(post: Int): SpanNotQuery = copy(post = post.some)
def pre(pre: Int): SpanNotQuery = copy(pre = pre.some)
def dist(dist: Int): SpanNotQuery = copy(dist = dist.some)
}
| stringbean/elastic4s | elastic4s-core/src/main/scala/com/sksamuel/elastic4s/requests/searches/queries/span/SpanNotQuery.scala | Scala | apache-2.0 | 924 |
object Test extends App {
class L[A]
class Quux0[B, CC[_]]
class Quux[C] extends Quux0[C, L]
def foo[D[_]](x: D[D[Boolean]]) = ???
def bar: Quux[Int] = ???
foo(bar)
}
| lrytz/scala | test/files/neg/t2712-8.scala | Scala | apache-2.0 | 181 |
/***********************************************************************
* Copyright (c) 2013-2015 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0 which
* accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.accumulo.stats
import java.util.Map.Entry
import com.typesafe.scalalogging.slf4j.Logging
import org.apache.accumulo.core.client.Scanner
import org.apache.accumulo.core.data.{Key, Mutation, Value}
import org.joda.time.format.DateTimeFormat
import org.locationtech.geomesa.accumulo.util.{CloseableIterator, SelfClosingIterator}
import scala.util.Random
/**
* Base trait for all stat types
*/
trait Stat {
def featureName: String
def date: Long
}
/**
* Trait for mapping stats to accumulo and back
*/
trait StatTransform[S <: Stat] extends Logging {
protected def createMutation(stat: Stat) = new Mutation(s"${stat.featureName}~${StatTransform.dateFormat.print(stat.date)}")
protected def createRandomColumnFamily = Random.nextInt(9999).formatted("%1$04d")
/**
* Convert a stat to a mutation
*
* @param stat
* @return
*/
def statToMutation(stat: S): Mutation
/**
* Convert accumulo scan results into a stat
*
* @param entries
* @return
*/
def rowToStat(entries: Iterable[Entry[Key, Value]]): S
/**
* Creates an iterator that returns Stats from accumulo scans
*
* @param scanner
* @return
*/
def iterator(scanner: Scanner): CloseableIterator[S] = {
val iter = scanner.iterator()
val wrappedIter = new CloseableIterator[S] {
var last: Option[Entry[Key, Value]] = None
override def close() = scanner.close()
override def next() = {
// get the data for the stat entry, which consists of a several CQ/values
val entries = collection.mutable.ListBuffer.empty[Entry[Key, Value]]
if (last.isEmpty) {
last = Some(iter.next())
}
val lastRowKey = last.get.getKey.getRow.toString
var next: Option[Entry[Key, Value]] = last
while (next.isDefined && next.get.getKey.getRow.toString == lastRowKey) {
entries.append(next.get)
next = if (iter.hasNext) Some(iter.next()) else None
}
last = next
// use the row data to return a Stat
rowToStat(entries)
}
override def hasNext = last.isDefined || iter.hasNext
}
SelfClosingIterator(wrappedIter)
}
}
object StatTransform {
val dateFormat = DateTimeFormat.forPattern("yyyyMMdd-HH:mm:ss.SSS").withZoneUTC()
}
| drackaer/geomesa | geomesa-accumulo/geomesa-accumulo-datastore/src/main/scala/org/locationtech/geomesa/accumulo/stats/Stat.scala | Scala | apache-2.0 | 2,779 |
/*
* ____ ____ _____ ____ ___ ____
* | _ \ | _ \ | ____| / ___| / _/ / ___| Precog (R)
* | |_) | | |_) | | _| | | | | /| | | _ Advanced Analytics Engine for NoSQL Data
* | __/ | _ < | |___ | |___ |/ _| | | |_| | Copyright (C) 2010 - 2013 SlamData, Inc.
* |_| |_| \_\ |_____| \____| /__/ \____| All Rights Reserved.
*
* This program is free software: you can redistribute it and/or modify it under the terms of the
* GNU Affero General Public License as published by the Free Software Foundation, either version
* 3 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License along with this
* program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package com.precog.accounts
import com.precog.common.Path
import com.precog.common.accounts._
import com.precog.common.security.{TZDateTimeDecomposer => _, _}
import com.precog.util.PrecogUnit
import blueeyes._
import blueeyes.bkka._
import blueeyes.json._
import blueeyes.persistence.mongo._
import blueeyes.persistence.mongo.dsl._
import blueeyes.json.serialization.{ Extractor, Decomposer }
import blueeyes.json.serialization.DefaultSerialization._
import blueeyes.json.serialization.Extractor._
import akka.util.Timeout
import akka.dispatch.Future
import akka.dispatch.ExecutionContext
import org.joda.time.DateTime
import org.bson.types.ObjectId
import org.I0Itec.zkclient.ZkClient
import org.I0Itec.zkclient.DataUpdater
import org.slf4j.LoggerFactory
import org.streum.configrity.Configuration
import scalaz._
import scalaz.syntax.monad._
trait ZkAccountManagerSettings {
def zkAccountIdPath: String
}
trait ZKAccountIdSource extends AccountManager[Future] {
implicit def M: Monad[Future]
def zkc: ZkClient
def settings: ZkAccountManagerSettings
def newAccountId: Future[String] = M.point {
if (!zkc.exists(settings.zkAccountIdPath)) {
zkc.createPersistent(settings.zkAccountIdPath, true)
}
val createdPath = zkc.createPersistentSequential(settings.zkAccountIdPath, Array.empty[Byte])
createdPath.substring(createdPath.length - 10) //last 10 characters are a sequential int
}
}
trait MongoAccountManagerSettings {
def accounts: String
def deletedAccounts: String
def timeout: Timeout
def resetTokens: String
def resetTokenExpirationMinutes: Int
}
abstract class MongoAccountManager(mongo: Mongo, database: Database, settings: MongoAccountManagerSettings)(implicit val M: Monad[Future])
extends AccountManager[Future] {
import Account._
private lazy val mamLogger = LoggerFactory.getLogger("com.precog.accounts.MongoAccountManager")
private implicit val impTimeout = settings.timeout
// Ensure indices for account lookup on apiKey, accountId, or email
database(ensureIndex("apiKey_index").on(".apiKey").in(settings.accounts))
database(ensureIndex("accountId_index").on(".accountId").in(settings.accounts))
database(ensureIndex("email_index").on(".email").in(settings.accounts))
// Ensure reset token lookup by token Id
database(ensureIndex("reset_token_index").on(".tokenId").in(settings.resetTokens))
def newAccountId: Future[String]
def createAccount(email: String, password: String, creationDate: DateTime, plan: AccountPlan, parent: Option[AccountId], profile: Option[JValue])(f: AccountId => Future[APIKey]): Future[Account] = {
for {
accountId <- newAccountId
path = Path(accountId)
apiKey <- f(accountId)
account <- {
val salt = randomSalt()
val account0 = Account(
accountId, email,
saltAndHashSHA256(password, salt), salt,
creationDate,
apiKey, path, plan,
parent, Some(creationDate), profile)
database(insert(account0.serialize.asInstanceOf[JObject]).into(settings.accounts)) map {
_ => account0
}
}
} yield account
}
private def findOneMatching[A](keyName: String, keyValue: String, collection: String)(implicit extractor: Extractor[A]): Future[Option[A]] = {
database(selectOne().from(collection).where(keyName === keyValue)) map {
_.map(_.deserialize(extractor))
}
}
private def findAllMatching[A](keyName: String, keyValue: String, collection: String)(implicit extractor: Extractor[A]): Future[Set[A]] = {
database(selectAll.from(collection).where(keyName === keyValue)) map {
_.map(_.deserialize(extractor)).toSet
}
}
private def findAll[A](collection: String)(implicit extract: Extractor[A]): Future[Seq[A]] =
database(selectAll.from(collection)) map {
_.map(_.deserialize(extract)).toSeq
}
def generateResetToken(account: Account): Future[ResetTokenId] =
generateResetToken(account, (new DateTime).plusMinutes(settings.resetTokenExpirationMinutes))
def generateResetToken(account: Account, expiration: DateTime): Future[ResetTokenId] = {
val tokenId = java.util.UUID.randomUUID.toString.replace("-","")
val token = ResetToken(tokenId, account.accountId, account.email, expiration)
logger.debug("Saving new reset token " + token)
database(insert(token.serialize.asInstanceOf[JObject]).into(settings.resetTokens)).map { _ =>
logger.debug("Save complete on reset token " + token)
tokenId
}
}
def markResetTokenUsed(tokenId: ResetTokenId): Future[PrecogUnit] = {
logger.debug("Marking reset token %s as used".format(tokenId))
database(update(settings.resetTokens).set("usedAt" set (new DateTime).serialize).where("tokenId" === tokenId)).map {
_ => logger.debug("Reset token %s marked as used".format(tokenId)); PrecogUnit
}
}
def findResetToken(accountId: AccountId, tokenId: ResetTokenId): Future[Option[ResetToken]] =
findOneMatching[ResetToken]("tokenId", tokenId, settings.resetTokens)
def findAccountByAPIKey(apiKey: String) = findOneMatching[Account]("apiKey", apiKey, settings.accounts).map(_.map(_.accountId))
def findAccountById(accountId: String) = findOneMatching[Account]("accountId", accountId, settings.accounts)
def findAccountByEmail(email: String) = findOneMatching[Account]("email", email, settings.accounts)
def updateAccount(account: Account): Future[Boolean] = {
findAccountById(account.accountId).flatMap {
case Some(existingAccount) =>
database {
val updateObj = account.serialize.asInstanceOf[JObject]
update(settings.accounts).set(updateObj).where("accountId" === account.accountId)
} map {
_ => true
}
case None =>
M.point(false)
}
}
def deleteAccount(accountId: String): Future[Option[Account]] = {
findAccountById(accountId).flatMap {
case ot @ Some(account) =>
for {
_ <- database(insert(account.serialize.asInstanceOf[JObject]).into(settings.deletedAccounts))
_ <- database(remove.from(settings.accounts).where("accountId" === accountId))
} yield { ot }
case None =>
M.point(None)
}
}
def close() = database.disconnect.fallbackTo(M.point(())).flatMap{_ => mongo.close}
}
| precog/platform | accounts/src/main/scala/com/precog/accounts/MongoAccountManager.scala | Scala | agpl-3.0 | 7,416 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.util.Locale
import org.apache.spark.sql.catalyst.expressions.aggregate.PivotFirst
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.sql.types._
class DataFramePivotSuite extends QueryTest with SharedSQLContext {
import testImplicits._
test("pivot courses") {
val expected = Row(2012, 15000.0, 20000.0) :: Row(2013, 48000.0, 30000.0) :: Nil
checkAnswer(
courseSales.groupBy("year").pivot("course", Seq("dotNET", "Java"))
.agg(sum($"earnings")),
expected)
checkAnswer(
courseSales.groupBy($"year").pivot($"course", Seq("dotNET", "Java"))
.agg(sum($"earnings")),
expected)
}
test("pivot year") {
val expected = Row("dotNET", 15000.0, 48000.0) :: Row("Java", 20000.0, 30000.0) :: Nil
checkAnswer(
courseSales.groupBy("course").pivot("year", Seq(2012, 2013)).agg(sum($"earnings")),
expected)
checkAnswer(
courseSales.groupBy('course).pivot('year, Seq(2012, 2013)).agg(sum('earnings)),
expected)
}
test("pivot courses with multiple aggregations") {
val expected = Row(2012, 15000.0, 7500.0, 20000.0, 20000.0) ::
Row(2013, 48000.0, 48000.0, 30000.0, 30000.0) :: Nil
checkAnswer(
courseSales.groupBy($"year")
.pivot("course", Seq("dotNET", "Java"))
.agg(sum($"earnings"), avg($"earnings")),
expected)
checkAnswer(
courseSales.groupBy($"year")
.pivot($"course", Seq("dotNET", "Java"))
.agg(sum($"earnings"), avg($"earnings")),
expected)
}
test("pivot year with string values (cast)") {
checkAnswer(
courseSales.groupBy("course").pivot("year", Seq("2012", "2013")).sum("earnings"),
Row("dotNET", 15000.0, 48000.0) :: Row("Java", 20000.0, 30000.0) :: Nil
)
}
test("pivot year with int values") {
checkAnswer(
courseSales.groupBy("course").pivot("year", Seq(2012, 2013)).sum("earnings"),
Row("dotNET", 15000.0, 48000.0) :: Row("Java", 20000.0, 30000.0) :: Nil
)
}
test("pivot courses with no values") {
// Note Java comes before dotNet in sorted order
val expected = Row(2012, 20000.0, 15000.0) :: Row(2013, 30000.0, 48000.0) :: Nil
checkAnswer(
courseSales.groupBy("year").pivot("course").agg(sum($"earnings")),
expected)
checkAnswer(
courseSales.groupBy($"year").pivot($"course").agg(sum($"earnings")),
expected)
}
test("pivot year with no values") {
val expected = Row("dotNET", 15000.0, 48000.0) :: Row("Java", 20000.0, 30000.0) :: Nil
checkAnswer(
courseSales.groupBy("course").pivot("year").agg(sum($"earnings")),
expected)
checkAnswer(
courseSales.groupBy($"course").pivot($"year").agg(sum($"earnings")),
expected)
}
test("pivot max values enforced") {
spark.conf.set(SQLConf.DATAFRAME_PIVOT_MAX_VALUES.key, 1)
intercept[AnalysisException](
courseSales.groupBy("year").pivot("course")
)
spark.conf.set(SQLConf.DATAFRAME_PIVOT_MAX_VALUES.key,
SQLConf.DATAFRAME_PIVOT_MAX_VALUES.defaultValue.get)
}
test("pivot with UnresolvedFunction") {
checkAnswer(
courseSales.groupBy("year").pivot("course", Seq("dotNET", "Java"))
.agg("earnings" -> "sum"),
Row(2012, 15000.0, 20000.0) :: Row(2013, 48000.0, 30000.0) :: Nil
)
}
// Tests for optimized pivot (with PivotFirst) below
test("optimized pivot planned") {
val df = courseSales.groupBy("year")
// pivot with extra columns to trigger optimization
.pivot("course", Seq("dotNET", "Java") ++ (1 to 10).map(_.toString))
.agg(sum($"earnings"))
val queryExecution = spark.sessionState.executePlan(df.queryExecution.logical)
assert(queryExecution.simpleString.contains("pivotfirst"))
}
test("optimized pivot courses with literals") {
checkAnswer(
courseSales.groupBy("year")
// pivot with extra columns to trigger optimization
.pivot("course", Seq("dotNET", "Java") ++ (1 to 10).map(_.toString))
.agg(sum($"earnings"))
.select("year", "dotNET", "Java"),
Row(2012, 15000.0, 20000.0) :: Row(2013, 48000.0, 30000.0) :: Nil
)
}
test("optimized pivot year with literals") {
checkAnswer(
courseSales.groupBy($"course")
// pivot with extra columns to trigger optimization
.pivot("year", Seq(2012, 2013) ++ (1 to 10))
.agg(sum($"earnings"))
.select("course", "2012", "2013"),
Row("dotNET", 15000.0, 48000.0) :: Row("Java", 20000.0, 30000.0) :: Nil
)
}
test("optimized pivot year with string values (cast)") {
checkAnswer(
courseSales.groupBy("course")
// pivot with extra columns to trigger optimization
.pivot("year", Seq("2012", "2013") ++ (1 to 10).map(_.toString))
.sum("earnings")
.select("course", "2012", "2013"),
Row("dotNET", 15000.0, 48000.0) :: Row("Java", 20000.0, 30000.0) :: Nil
)
}
test("optimized pivot DecimalType") {
val df = courseSales.select($"course", $"year", $"earnings".cast(DecimalType(10, 2)))
.groupBy("year")
// pivot with extra columns to trigger optimization
.pivot("course", Seq("dotNET", "Java") ++ (1 to 10).map(_.toString))
.agg(sum($"earnings"))
.select("year", "dotNET", "Java")
assertResult(IntegerType)(df.schema("year").dataType)
assertResult(DecimalType(20, 2))(df.schema("Java").dataType)
assertResult(DecimalType(20, 2))(df.schema("dotNET").dataType)
checkAnswer(df, Row(2012, BigDecimal(1500000, 2), BigDecimal(2000000, 2)) ::
Row(2013, BigDecimal(4800000, 2), BigDecimal(3000000, 2)) :: Nil)
}
test("PivotFirst supported datatypes") {
val supportedDataTypes: Seq[DataType] = DoubleType :: IntegerType :: LongType :: FloatType ::
BooleanType :: ShortType :: ByteType :: Nil
for (datatype <- supportedDataTypes) {
assertResult(true)(PivotFirst.supportsDataType(datatype))
}
assertResult(true)(PivotFirst.supportsDataType(DecimalType(10, 1)))
assertResult(false)(PivotFirst.supportsDataType(null))
assertResult(false)(PivotFirst.supportsDataType(ArrayType(IntegerType)))
}
test("optimized pivot with multiple aggregations") {
checkAnswer(
courseSales.groupBy($"year")
// pivot with extra columns to trigger optimization
.pivot("course", Seq("dotNET", "Java") ++ (1 to 10).map(_.toString))
.agg(sum($"earnings"), avg($"earnings")),
Row(Seq(2012, 15000.0, 7500.0, 20000.0, 20000.0) ++ Seq.fill(20)(null): _*) ::
Row(Seq(2013, 48000.0, 48000.0, 30000.0, 30000.0) ++ Seq.fill(20)(null): _*) :: Nil
)
}
test("pivot with datatype not supported by PivotFirst") {
val expected = Row(Seq(1, 1, 1), Seq(2, 2, 2)) :: Nil
checkAnswer(
complexData.groupBy().pivot("b", Seq(true, false)).agg(max("a")),
expected)
checkAnswer(
complexData.groupBy().pivot('b, Seq(true, false)).agg(max('a)),
expected)
}
test("pivot with datatype not supported by PivotFirst 2") {
checkAnswer(
courseSales.withColumn("e", expr("array(earnings, 7.0d)"))
.groupBy("year")
.pivot("course", Seq("dotNET", "Java"))
.agg(min($"e")),
Row(2012, Seq(5000.0, 7.0), Seq(20000.0, 7.0)) ::
Row(2013, Seq(48000.0, 7.0), Seq(30000.0, 7.0)) :: Nil
)
}
test("pivot preserves aliases if given") {
assertResult(
Array("year", "dotNET_foo", "dotNET_avg(earnings)", "Java_foo", "Java_avg(earnings)")
)(
courseSales.groupBy($"year")
.pivot("course", Seq("dotNET", "Java"))
.agg(sum($"earnings").as("foo"), avg($"earnings")).columns
)
}
test("pivot with column definition in groupby") {
checkAnswer(
courseSales.groupBy(substring(col("course"), 0, 1).as("foo"))
.pivot("year", Seq(2012, 2013))
.sum("earnings"),
Row("d", 15000.0, 48000.0) :: Row("J", 20000.0, 30000.0) :: Nil
)
}
test("pivot with null should not throw NPE") {
checkAnswer(
Seq(Tuple1(None), Tuple1(Some(1))).toDF("a").groupBy($"a").pivot("a").count(),
Row(null, 1, null) :: Row(1, null, 1) :: Nil)
}
test("pivot with null and aggregate type not supported by PivotFirst returns correct result") {
checkAnswer(
Seq(Tuple1(None), Tuple1(Some(1))).toDF("a")
.withColumn("b", expr("array(a, 7)"))
.groupBy($"a").pivot("a").agg(min($"b")),
Row(null, Seq(null, 7), null) :: Row(1, null, Seq(1, 7)) :: Nil)
}
test("pivot with timestamp and count should not print internal representation") {
val ts = "2012-12-31 16:00:10.011"
val tsWithZone = "2013-01-01 00:00:10.011"
withSQLConf(SQLConf.SESSION_LOCAL_TIMEZONE.key -> "GMT") {
val df = Seq(java.sql.Timestamp.valueOf(ts)).toDF("a").groupBy("a").pivot("a").count()
val expected = StructType(
StructField("a", TimestampType) ::
StructField(tsWithZone, LongType) :: Nil)
assert(df.schema == expected)
// String representation of timestamp with timezone should take the time difference
// into account.
checkAnswer(df.select($"a".cast(StringType)), Row(tsWithZone))
}
}
test("SPARK-24722: pivoting nested columns") {
val expected = Row(2012, 15000.0, 20000.0) :: Row(2013, 48000.0, 30000.0) :: Nil
val df = trainingSales
.groupBy($"sales.year")
.pivot(lower($"sales.course"), Seq("dotNet", "Java").map(_.toLowerCase(Locale.ROOT)))
.agg(sum($"sales.earnings"))
checkAnswer(df, expected)
}
test("SPARK-24722: references to multiple columns in the pivot column") {
val expected = Row(2012, 10000.0) :: Row(2013, 48000.0) :: Nil
val df = trainingSales
.groupBy($"sales.year")
.pivot(concat_ws("-", $"training", $"sales.course"), Seq("Experts-dotNET"))
.agg(sum($"sales.earnings"))
checkAnswer(df, expected)
}
test("SPARK-24722: pivoting by a constant") {
val expected = Row(2012, 35000.0) :: Row(2013, 78000.0) :: Nil
val df1 = trainingSales
.groupBy($"sales.year")
.pivot(lit(123), Seq(123))
.agg(sum($"sales.earnings"))
checkAnswer(df1, expected)
}
test("SPARK-24722: aggregate as the pivot column") {
val exception = intercept[AnalysisException] {
trainingSales
.groupBy($"sales.year")
.pivot(min($"training"), Seq("Experts"))
.agg(sum($"sales.earnings"))
}
assert(exception.getMessage.contains("aggregate functions are not allowed"))
}
test("pivoting column list with values") {
val expected = Row(2012, 10000.0, null) :: Row(2013, 48000.0, 30000.0) :: Nil
val df = trainingSales
.groupBy($"sales.year")
.pivot(struct(lower($"sales.course"), $"training"), Seq(
struct(lit("dotnet"), lit("Experts")),
struct(lit("java"), lit("Dummies")))
).agg(sum($"sales.earnings"))
checkAnswer(df, expected)
}
test("pivoting column list") {
val exception = intercept[RuntimeException] {
trainingSales
.groupBy($"sales.year")
.pivot(struct(lower($"sales.course"), $"training"))
.agg(sum($"sales.earnings"))
.collect()
}
assert(exception.getMessage.contains("Unsupported literal type"))
}
}
| michalsenkyr/spark | sql/core/src/test/scala/org/apache/spark/sql/DataFramePivotSuite.scala | Scala | apache-2.0 | 12,175 |
package com.wavesplatform.lang
import cats.Id
import cats.kernel.Monoid
import com.wavesplatform.common.state.ByteStr
import com.wavesplatform.lang.directives.values._
import com.wavesplatform.lang.script.Script
import com.wavesplatform.lang.v1.CTX
import com.wavesplatform.lang.v1.compiler.Terms._
import com.wavesplatform.lang.v1.compiler.Types._
import com.wavesplatform.lang.v1.evaluator.Contextful.NoContext
import com.wavesplatform.lang.v1.evaluator.EvaluatorV1
import com.wavesplatform.lang.v1.evaluator.EvaluatorV1._
import com.wavesplatform.lang.v1.evaluator.ctx._
import com.wavesplatform.lang.v1.evaluator.ctx.impl.{EnvironmentFunctions, PureContext, _}
import com.wavesplatform.lang.v1.traits.domain.Recipient.Address
import com.wavesplatform.lang.v1.traits.domain.{BlockInfo, Recipient, ScriptAssetInfo, Tx}
import com.wavesplatform.lang.v1.traits.{DataType, Environment}
import monix.eval.Coeval
import scala.util.{Left, Right, Try}
object Common {
import com.wavesplatform.lang.v1.evaluator.ctx.impl.converters._
private val dataEntryValueType = UNION(LONG, BOOLEAN, BYTESTR, STRING)
val dataEntryType = CASETYPEREF("DataEntry", List("key" -> STRING, "value" -> dataEntryValueType))
val addCtx: CTX[NoContext] = CTX[NoContext](Seq(dataEntryType), Map.empty, Array.empty)
def ev[T <: EVALUATED](
context: EvaluationContext[NoContext, Id] =
Monoid.combine(PureContext.build(V1, fixUnicodeFunctions = true, useNewPowPrecision = true).evaluationContext, addCtx.evaluationContext),
expr: EXPR
): Either[ExecutionError, T] =
new EvaluatorV1[Id, NoContext]().apply[T](context, expr)
val multiplierFunction: NativeFunction[NoContext] =
NativeFunction("MULTIPLY", 1L, 10005.toShort, LONG, ("x1", LONG), ("x2", LONG)) {
case CONST_LONG(x1: Long) :: CONST_LONG(x2: Long) :: Nil => Try(x1 * x2).map(CONST_LONG).toEither.left.map(_.toString)
case _ => ??? // suppress pattern match warning
}
val pointTypeA = CASETYPEREF("PointA", List("X" -> LONG, "YA" -> LONG))
val pointTypeB = CASETYPEREF("PointB", List("X" -> LONG, "YB" -> LONG))
val pointTypeC = CASETYPEREF("PointC", List("YB" -> LONG))
val pointTypeD = CASETYPEREF("PointD", List("YB" -> UNION(LONG, UNIT)))
val AorB = UNION(pointTypeA, pointTypeB)
val AorBorC = UNION(pointTypeA, pointTypeB, pointTypeC)
val BorC = UNION(pointTypeB, pointTypeC)
val CorD = UNION(pointTypeC, pointTypeD)
val pointAInstance = CaseObj(pointTypeA, Map("X" -> 3L, "YA" -> 40L))
val pointBInstance = CaseObj(pointTypeB, Map("X" -> 3L, "YB" -> 41L))
val pointCInstance = CaseObj(pointTypeC, Map("YB" -> 42L))
val pointDInstance1 = CaseObj(pointTypeD, Map("YB" -> 43L))
val pointDInstance2 = CaseObj(pointTypeD, Map("YB" -> unit))
val sampleTypes = Seq(pointTypeA, pointTypeB, pointTypeC, pointTypeD) ++ Seq(
UNION.create(AorB.typeList, Some("PointAB")),
UNION.create(BorC.typeList, Some("PointBC")),
UNION.create(CorD.typeList, Some("PointCD"))
)
def sampleUnionContext(instance: CaseObj) =
EvaluationContext.build(
Map.empty,
Map("p" -> LazyVal.fromEvaluated[Id](instance)),
Seq.empty[BaseFunction[NoContext]]
)
def emptyBlockchainEnvironment(h: Int = 1, in: Coeval[Environment.InputEntity] = Coeval(???), nByte: Byte = 'T'): Environment[Id] = new Environment[Id] {
override def height: Long = h
override def chainId: Byte = nByte
override def inputEntity = in()
override def transactionById(id: Array[Byte]): Option[Tx] = ???
override def transferTransactionById(id: Array[Byte]): Option[Tx.Transfer] = ???
override def transactionHeightById(id: Array[Byte]): Option[Long] = ???
override def assetInfoById(id: Array[Byte]): Option[ScriptAssetInfo] = ???
override def lastBlockOpt(): Option[BlockInfo] = ???
override def blockInfoByHeight(height: Int): Option[BlockInfo] = ???
override def data(recipient: Recipient, key: String, dataType: DataType): Option[Any] = ???
override def hasData(recipient: Recipient): Boolean = ???
override def resolveAlias(name: String): Either[String, Recipient.Address] = ???
override def accountBalanceOf(addressOrAlias: Recipient, assetId: Option[Array[Byte]]): Either[String, Long] = ???
override def accountWavesBalanceOf(addressOrAlias: Recipient): Either[String, Environment.BalanceDetails] = ???
override def tthis: Environment.Tthis = ???
override def multiPaymentAllowed: Boolean = true
override def txId: ByteStr = ???
override def transferTransactionFromProto(b: Array[Byte]): Option[Tx.Transfer] = ???
override def addressFromString(address: String): Either[String, Recipient.Address] = ???
def accountScript(addressOrAlias: Recipient): Option[Script] = ???
override def callScript(dApp: Address, func: String, args: List[EVALUATED], payments: Seq[(Option[Array[Byte]], Long)], remainingComplexity: Int, reentrant: Boolean): Coeval[(Either[ValidationError, EVALUATED], Int)] = ???
}
def addressFromPublicKey(chainId: Byte, pk: Array[Byte], addressVersion: Byte = EnvironmentFunctions.AddressVersion): Array[Byte] = {
val publicKeyHash = Global.secureHash(pk).take(EnvironmentFunctions.HashLength)
val withoutChecksum = addressVersion +: chainId +: publicKeyHash
withoutChecksum ++ Global.secureHash(withoutChecksum).take(EnvironmentFunctions.ChecksumLength)
}
def addressFromString(chainId: Byte, str: String): Either[String, Option[Array[Byte]]] = {
val base58String = if (str.startsWith(EnvironmentFunctions.AddressPrefix)) str.drop(EnvironmentFunctions.AddressPrefix.length) else str
Global.base58Decode(base58String, Global.MaxAddressLength) match {
case Left(e) => Left(e)
case Right(addressBytes) =>
val version = addressBytes.head
val network = addressBytes.tail.head
lazy val checksumCorrect = {
val checkSum = addressBytes.takeRight(EnvironmentFunctions.ChecksumLength)
val checkSumGenerated =
Global.secureHash(addressBytes.dropRight(EnvironmentFunctions.ChecksumLength)).take(EnvironmentFunctions.ChecksumLength)
checkSum sameElements checkSumGenerated
}
if (version == EnvironmentFunctions.AddressVersion && network == chainId && addressBytes.length == EnvironmentFunctions.AddressLength && checksumCorrect)
Right(Some(addressBytes))
else Right(None)
}
}
}
| wavesplatform/Waves | lang/testkit/src/main/scala/com/wavesplatform/lang/Common.scala | Scala | mit | 7,237 |
package fpinscala.parsing
import scala.util.matching.Regex
object ParserTypes { // 167
type Parser[+A] = Location => Result[A]
trait Result[+A] {
def mapError(f: ParseError => ParseError): Result[A] = this match { // 168
case Failure(e, c) => Failure(f(e), c)
case _ => this
}
def uncommit: Result[A] = this match { // 169
case Failure(e, true) => Failure(e, false)
case _ => this
}
def addCommit(isCommitted: Boolean): Result[A] = this match { // 170
case Failure(e, c) => Failure(e, c || isCommitted)
case _ => this
}
def advanceSuccess(n: Int): Result[A] = this match { // 170
case Success(a, m) => Success(a, n + m)
case _ => this
}
}
case class Success[+A](get: A, charsConsumed: Int) extends Result[A]
case class Failure(get: ParseError, isCommitted: Boolean) extends Result[Nothing] // 169
}
object ParserImpl extends Parsers[ParserTypes.Parser] {
import ParserTypes._
override def run[A](p: Parser[A])(input: String): Either[ParseError,A] = // 149, 163, 170
???
override implicit def string(s: String): Parser[String] = // 149, 167
???
override implicit def regex(r: Regex): Parser[String] = // 157, 167
???
override def slice[A](p: Parser[A]): Parser[String] = // 154, 167
???
override def label[A](msg: String)(p: Parser[A]): Parser[A] = // 161
s => p(s).mapError(_.label(msg)) // 168
override def scope[A](msg: String)(p: Parser[A]): Parser[A] = // 162
loc => p(loc).mapError(_.push(loc, msg)) // 168
override def flatMap[A,B](p: Parser[A])(f: A => Parser[B]): Parser[B] = // 157
s => p(s) match { // 169
case Success(a, n) => f(a)(s.advanceBy(n))
.addCommit(n != 0)
.advanceSuccess(n)
case e @ Failure(_, _) => e
}
override def attempt[A](p: Parser[A]): Parser[A] = // 164
loc => p(loc).uncommit // 169
override def or[A](s1: Parser[A], s2: => Parser[A]): Parser[A] = // 149, 156
s => s1(s) match { // 169
case Failure(e, false) => s2(s)
case r => r
}
override def succeed[A](a: A): Parser[A] = // 153, 167
???
} | fpinscala-muc/fpinscala-seelmann | exercises/src/main/scala/fpinscala/parsing/ParserImpl.scala | Scala | mit | 2,143 |
package io.finch.request
import com.twitter.finagle.httpx.Request
import com.twitter.util.{Await, Future}
import io.finch._
import org.scalatest.{Matchers, FlatSpec}
import items._
class RequestReaderCompanionSpec extends FlatSpec with Matchers {
"The RequestReaderCompanion" should "support a factory method based on a function that reads from the request" in {
val request: Request = Request(("foo", "5"))
val futureResult: Future[Option[String]] = RequestReader[Option[String]](_ => Some("5"))(request)
Await.result(futureResult) shouldBe Some("5")
}
it should "support a factory method based on a constant Future" in {
val request: Request = Request(("foo", ""))
val futureResult: Future[Int] = RequestReader.const(1.toFuture)(request)
Await.result(futureResult) shouldBe 1
}
it should "support a factory method based on a constant value" in {
val request: Request = Request(("foo", ""))
val futureResult: Future[Int] = RequestReader.value(1)(request)
Await.result(futureResult) shouldBe 1
}
it should "support a factory method based on a constant exception" in {
val request: Request = Request(("foo", ""))
val futureResult: Future[Int] = RequestReader.exception(NotPresent(BodyItem))(request)
a [NotPresent] shouldBe thrownBy(Await.result(futureResult))
}
}
| bthuillier/finch | core/src/test/scala/io/finch/request/RequestReaderCompanionSpec.scala | Scala | apache-2.0 | 1,341 |
package biz.k11i.xgboost.spark.model
import biz.k11i.xgboost.spark.test.XGBoostPredictionTestBase
import org.scalatest.{FunSuite, Matchers}
class XGBoostRegressionModelTest
extends FunSuite
with XGBoostPredictionTestBase
with Matchers {
test("Regression using libxgboost-compatible model") {
val model = XGBoostRegression.load(modelPath("housing.model"))
val testDF = loadTestData("housing.test", denseVector = true)
val predDF = model.transform(testDF)
predDF.columns should contain("prediction")
val expectedDF = loadExpectedData("housing.predict.snappy.parquet")
assertColumnApproximateEquals(
expectedDF, "prediction",
predDF, "prediction",
1e-5)
}
test("Regression using xgboost4j-spark-compatible model") {
val model = XGBoostRegression.load(modelPath("housing.model.spark"))
val testDF = loadTestData("housing.test", denseVector = true)
val predDF = model.transform(testDF)
predDF.columns should contain("prediction")
val expectedDF = loadExpectedData("housing.predict.snappy.parquet")
assertColumnApproximateEquals(
expectedDF, "prediction",
predDF, "prediction",
1e-5)
}
test("Leaves prediction using xgboost4j-spark-compatible model") {
val model = XGBoostRegression.load(modelPath("housing.model.spark"))
val testDF = loadTestData("housing.test", denseVector = true)
val predDF = model
.setPredictLeaves(true)
.transform(testDF)
predDF.columns should contain("prediction")
val expectedDF = loadExpectedData("housing.leaf.snappy.parquet")
assertColumnExactEquals(
expectedDF, "predLeaf",
predDF, "prediction")
}
}
| komiya-atsushi/xgboost-predictor-java | xgboost-predictor-spark/src/test/scala/biz/k11i/xgboost/spark/model/XGBoostRegressionModelTest.scala | Scala | apache-2.0 | 1,692 |
package colossus.extensions.util.bson.reader
import java.nio.ByteBuffer
import colossus.extensions.util.bson.element.BsonBoolean
case class BsonBooleanReader(buffer: ByteBuffer) extends Reader[BsonBoolean] {
def read: Option[BsonBoolean] = {
val name = readCString()
Some {
buffer.get() match {
case 0x00 => BsonBoolean(name, false)
case 0x01 => BsonBoolean(name, true)
}
}
}
}
| fehmicansaglam/colossus-extensions | mongo/src/main/scala/colossus/extensions/util/bson/reader/BsonBooleanReader.scala | Scala | apache-2.0 | 426 |
/*
* Copyright 2021 Spotify AB.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.spotify.scio.parquet.types
import java.{lang => jl}
import com.spotify.scio.ScioContext
import com.spotify.scio.io.TapSpec
import com.spotify.scio.testing.ScioIOSpec
import org.apache.commons.io.FileUtils
import org.apache.parquet.filter2.predicate.FilterApi
import org.scalatest.BeforeAndAfterAll
class ParquetTypeIOTest extends ScioIOSpec with TapSpec with BeforeAndAfterAll {
private val dir = tmpDir
private val records = (1 to 10).map(newRecord)
override protected def beforeAll(): Unit = {
val sc = ScioContext()
sc.parallelize(records)
.saveAsTypedParquetFile(dir.toString)
sc.run()
()
}
override protected def afterAll(): Unit = FileUtils.deleteDirectory(dir)
private def newRecord(i: Int): Wide = Wide(i, i.toString, Some(i), (1 to i).toList)
private val predicate = FilterApi.or(
FilterApi.ltEq(FilterApi.intColumn("i"), jl.Integer.valueOf(5)),
FilterApi.gtEq(FilterApi.intColumn("o"), jl.Integer.valueOf(95))
)
"ParquetTypeIO" should "work" in {
val xs = (1 to 100).map(newRecord)
testTap(xs)(_.saveAsTypedParquetFile(_))(".parquet")
testJobTest(xs)(ParquetTypeIO(_))(_.typedParquetFile[Wide](_))(
_.saveAsTypedParquetFile(_)
)
}
it should "read case classes" in {
val sc = ScioContext()
val data = sc.typedParquetFile[Wide](s"$dir/*.parquet")
data should containInAnyOrder(records)
sc.run()
()
}
it should "read case classes with skip clone disabled" in {
val sc = ScioContext()
val data = sc.typedParquetFile[Wide](s"$dir/*.parquet", skipClone = false)
data should containInAnyOrder(records)
sc.run()
()
}
it should "read case classes with projection" in {
val sc = ScioContext()
val data = sc.typedParquetFile[Narrow](s"$dir/*.parquet")
data should containInAnyOrder(records.map(r => Narrow(r.i, r.r)))
sc.run()
()
}
it should "read case classes with predicate" in {
val sc = ScioContext()
val data = sc.typedParquetFile[Wide](s"$dir/*.parquet", predicate = predicate)
data should containInAnyOrder(records.filter(t => t.i <= 5 || t.o.exists(_ >= 95)))
sc.run()
()
}
it should "read case classes with projection and predicate" in {
val sc = ScioContext()
val data = sc.typedParquetFile[Narrow](s"$dir/*.parquet", predicate = predicate)
val expected = records.filter(t => t.i <= 5 || t.o.exists(_ >= 95)).map(t => Narrow(t.i, t.r))
data should containInAnyOrder(expected)
sc.run()
()
}
}
case class Wide(i: Int, s: String, o: Option[Int], r: List[Int])
case class Narrow(i: Int, r: List[Int])
| spotify/scio | scio-parquet/src/test/scala/com/spotify/scio/parquet/types/ParquetTypeIOTest.scala | Scala | apache-2.0 | 3,228 |
package com.Alteryx.sparkGLM
import org.apache.spark.sql.test.TestSQLContext
import org.apache.spark.sql.functions._
import org.scalatest.FunSuite
import com.Alteryx.testUtils.data.testData._
class lmPredict$Test extends FunSuite {
val sqlCtx = TestSQLContext
test("lmPredict with a single partition") {
val testDF = testDFSinglePart
val x = testDF.select("intercept", "x")
val y = testDF.select("y")
val lmTest = LM.fit(x, y)
val predicted = lmTest.predict(x)
assert(predicted.getClass.getName == "org.apache.spark.sql.DataFrame")
assert(predicted.rdd.partitions.size == 1)
assert(predicted.columns.size == 2)
assert(predicted.agg(max("index")).collect.apply(0).get(0) == 49)
}
test("lmPredict with multiple partitions") {
val testDF = testDFMultiPart
val x = testDF.select("intercept", "x")
val y = testDF.select("y")
val lmTest = LM.fit(x, y)
val predicted = lmTest.predict(x)
assert(predicted.getClass.getName == "org.apache.spark.sql.DataFrame")
assert(predicted.rdd.partitions.length == 4)
assert(predicted.columns.length == 2)
assert(predicted.agg(max("index")).collect.apply(0).get(0) == 49)
}
}
| cafreeman/sparkGLM | src/test/scala/com/Alteryx/sparkGLM/lmPredict$Test.scala | Scala | apache-2.0 | 1,192 |
package pureconfig
import scala.compiletime.testing.{typeChecks, typeCheckErrors}
import scala.deriving.Mirror
import scala.language.higherKinds
import com.typesafe.config.{ConfigFactory, ConfigValueFactory, ConfigValueType}
import pureconfig._
import pureconfig.error.WrongType
import pureconfig.generic.derivation.{EnumConfigReader, EnumConfigReaderDerivation}
import pureconfig.error.CannotConvert
enum Color derives EnumConfigReader {
case RainyBlue, SunnyYellow
}
class EnumerationReaderDerivationSuite extends BaseSuite {
import Color._
behavior of "EnumConfigReader"
it should "provide methods to derive readers for enumerations encoded as sealed traits or enums" in {
ConfigReader[Color].from(ConfigValueFactory.fromAnyRef("rainy-blue")) shouldBe Right(RainyBlue)
ConfigReader[Color].from(ConfigValueFactory.fromAnyRef("sunny-yellow")) shouldBe Right(SunnyYellow)
val unknownValue = ConfigValueFactory.fromAnyRef("blue")
ConfigReader[Color].from(unknownValue) should failWith(
CannotConvert("blue", "Color", "The value is not a valid enum option."),
"",
emptyConfigOrigin
)
val conf = ConfigFactory.parseString("{ type: person, name: John, surname: Doe }")
ConfigReader[Color].from(conf.root()) should failWith(
WrongType(ConfigValueType.OBJECT, Set(ConfigValueType.STRING)),
"",
stringConfigOrigin(1)
)
}
}
| melrief/pureconfig | tests/src/test/scala-3/pureconfig/EnumerationReaderDerivationSuite.scala | Scala | mpl-2.0 | 1,403 |
package edu.osu.cse.fathi.spark.ssb
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
//PART
// 0 P_PARTKEY:INTEGER
// 1 P_NAME:TEXT
// 2 P_MFGR:TEXT
// 3 P_CATEGORY:TEXT
// 4 P_BRAND1:TEXT
// 5 P_COLOR:TEXT
// 6 P_TYPE:TEXT
// 7 P_SIZE:INTEGER
// 8 P_CONTAINER:TEXT
//SUPPLIER
// 0 S_SUPPKEY:INTEGER
// 1 S_NAME:TEXT
// 2 S_ADDRESS:TEXT
// 3 S_CITY:TEXT
// 4 S_NATION:TEXT
// 5 S_REGION:TEXT
// 6 S_PHONE:TEXT
//CUSTOMER
// 0 C_CUSTKEY:INTEGER
// 1 C_NAME:TEXT
// 2 C_ADDRESS:TEXT
// 3 C_CITY:TEXT
// 4 C_NATION:TEXT
// 5 C_REGION:TEXT
// 6 C_PHONE:TEXT
// 7 C_MKTSEGMENT:TEXT
//LINEORDER
// 0 LO_ORDERKEY:INTEGER
// 1 LO_LINENUMBER:INTEGER
// 2 LO_CUSTKEY:INTEGER
// 3 LO_PARTKEY:INTEGER
// 4 LO_SUPPKEY:INTEGER
// 5 LO_ORDERDATE:DATE
// 6 LO_ORDERPRIORITY:TEXT
// 7 LO_SHIPPRIORITY:TEXT
// 8 LO_QUANTITY:INTEGER
// 9 LO_EXTENDEDPRICE:DECIMAL
//10 LO_ORDTOTALPRICE:DECIMAL
//11 LO_DISCOUNT:INTEGER
//12 LO_REVENUE:DECIMAL
//13 LO_SUPPLYCOST:DECIMAL
//14 LO_TAX:INTEGER
//15 L_COMMITDATE:DATE
//16 L_SHIPMODE:TEXT
//DDATE
// 0 D_DATEKEY:DATE
// 1 D_DATE:TEXT
// 2 D_DAYOFWEEK:TEXT
// 3 D_MONTH:TEXT
// 4 D_YEAR:INTEGER
// 5 D_YEARMONTHNUM:INTEGER
// 6 D_YEARMONTH:TEXT
// 7 D_DAYNUMINWEEK:INTEGER
// 8 D_DAYNUMINMONTH:INTEGER
// 9 D_DAYNUMINYEAR:INTEGER
//10 D_MONTHNUMINYEAR:INTEGER
//11 D_WEEKNUMINYEAR:INTEGER
//12 D_SELLINGSEASON:TEXT
//13 D_LASTDAYINWEEKFL:TEXT
//14 D_LASTDAYINMONTHFL:TEXT
//15 D_HOLIDAYFL:TEXT
//16 D_WEEKDAYFL:TEXT
//
/**
* Runs SSB all queries on parquet formats. Before executing queries, loads all data into memory.
*/
object SsbQueryRunnerOnSpark {
def main(args: Array[String]) {
val config = new SparkConf().setAppName("SSB Queries on Spark [Row Format]")
val sc = new SparkContext(config)
if (args.length != 1) {
Console.print("USAGE: SsbQueryRunnerOnSpark <db home dir>")
throw new RuntimeException("Not enough number of parameters")
}
val dbDir = args(0)
val allQueries = Seq[(SparkContext, String) => (RDD[_], String)](
ssb_1_1, hand_opt_ssb_1_1,
ssb_1_2, hand_opt_ssb_1_2,
ssb_1_3, hand_opt_ssb_1_3,
ssb_2_1, hand_opt_ssb_2_1,
ssb_2_2, hand_opt_ssb_2_2,
ssb_2_3, hand_opt_ssb_2_3,
ssb_3_1, hand_opt_ssb_3_1,
ssb_3_2, hand_opt_ssb_3_2,
ssb_3_3, hand_opt_ssb_3_3,
// /* ssb_3_4, */
ssb_4_1, hand_opt_ssb_4_1,
ssb_4_2, hand_opt_ssb_4_2,
ssb_4_3, hand_opt_ssb_4_3
)
val ITERATIONS = 20
println(f"[PROFILING RESULTS]:ITERATION,QueryName,Exectime,ResultRowCount")
allQueries.foreach { (query: (SparkContext, String) => (RDD[_], String)) =>
val (resultRdd: RDD[_], queryName) = query(sc, dbDir)
(1 to ITERATIONS).foreach { iterationNumber =>
val startTime = System.nanoTime()
resultRdd.foreachPartition(partition => println("Running the partition"))
val endTime = System.nanoTime()
val executionTime = endTime - startTime
val resultRowCount = resultRdd.count
println(f"[PROFILING RESULTS]:${iterationNumber}%d,${queryName}%s,${executionTime}%d,${resultRowCount}%d")
}
}
}
/**
* select sum(lo_extendedprice*lo_discount) as revenue
* from lineorder,ddate
* where lo_orderdate = d_datekey
* and d_year = 1993
* and lo_discount>=1
* and lo_discount<=3
* and lo_quantity<25
*/
val hand_opt_ssb_1_1 = (sc: SparkContext, dbDir: String) => {
val rddDate = sc.textFile(dbDir + "/ddate*").map(line => {
val columns = line.split("\\\\|")
val dateKey = columns(0).toInt
val year = columns(4).toInt
if (year == 1993) {
(dateKey, 0)
} else {
null
}
}).filter(_ != null)
val rddLo = sc.textFile(dbDir + "/lineorder*").map(line => {
val columns = line.split("\\\\|")
val orderDate = columns(5).toInt
val quantity = columns(8).toInt
val extendedPrice = columns(9).toFloat
val discount = columns(11).toInt
val revenue = extendedPrice * discount
(orderDate, (revenue, quantity, discount))
})
val rddLoFilter = rddLo.filter{
case (orderDate, (revenue, quantity, discount)) =>
(discount >= 1) && (discount <= 3) && (quantity < 25)
}
val rddLoDate = rddLoFilter.join(rddDate).map{
case (orderDate, ((revenue, quantity, discount), zero)) =>
(1, revenue)
}
val rddAggregate = rddLoDate.reduceByKey(_ + _)
(rddAggregate, "hand_opt_ssb_1_1")
}
val ssb_1_1 = (sc: SparkContext, dbDir: String) => {
val rdd000 = sc.textFile(dbDir + "/lineorder*")
val rdd001 = rdd000.map(line => {
val columns = line.split("\\\\|")
(columns(0).toInt, columns(1).toInt, columns(2).toInt, columns(3).toInt, columns(4).toInt, columns(5), columns(6), columns(7), columns(8).toInt, columns(9).toFloat, columns(10).toFloat, columns(11).toInt, columns(12).toFloat, columns(13).toFloat, columns(14).toInt, columns(15), columns(16))
})
val rdd002 = rdd001.filter(x => ((x._12 >= 1) && (x._12 <= 3) && (x._9 < 25)))
val rdd003 = rdd002.map(x => (x._10, x._12, x._6))
val rdd004 = sc.textFile(dbDir + "/ddate*").map(line => {
val columns = line.split("\\\\|")
(columns(0), columns(1), columns(2), columns(3), columns(4).toInt, columns(5).toInt, columns(6), columns(7).toInt, columns(8).toInt, columns(9).toInt, columns(10).toInt, columns(11).toInt, columns(12), columns(13), columns(14), columns(15), columns(16))
})
val rdd005 = rdd004.filter(x => (x._5 == 1993))
val rdd006 = rdd005.map(x => Tuple1(x._1))
val rdd007 = rdd003.map(x => (x._3, x))
val rdd008 = rdd006.map(x => (x._1, x))
val rdd009 = rdd007.join(rdd008).map(x => x._2)
val rdd010 = rdd009.map(x => (1, x._1._1, x._1._2))
val rdd011 = rdd010.groupBy(x => Tuple1(1))
val rdd012 = rdd011.map(x => (1, x._2.map(x => (x._2 * x._3)).sum))
(rdd012, "ssb_1_1")
}
/**
* code{{
* select sum(lo_extendedprice*lo_discount) as revenue
* from lineorder,ddate
* where lo_orderdate = d_datekey
* and d_yearmonth = '199401'
* and lo_discount>=4
* and lo_discount<=6
* and lo_quantity>=26
* and lo_quantity<=35
* }}
*/
val hand_opt_ssb_1_2 = (sc: SparkContext, dbDir: String) => {
val rddDate = sc.textFile(dbDir + "/ddate*").map(line => {
val columns = line.split("\\\\|")
val dateKey = columns(0).toInt
val yearMonth = columns(6)
if (yearMonth == "Jan1994") {
(dateKey, 0)
} else {
null
}
}).filter(_ != null)
val rddLo = sc.textFile(dbDir + "/lineorder*").map(line => {
val columns = line.split("\\\\|")
val orderDate = columns(5).toInt
val quantity = columns(8).toInt
val extendedPrice = columns(9).toFloat
val discount = columns(11).toInt
val revenue = extendedPrice * discount
(orderDate, (revenue, quantity, discount))
})
val rddLoFilter = rddLo.filter{
case (orderDate, (revenue, quantity, discount)) =>
(discount >= 4 ) && (discount <= 6) && (quantity >= 26) && (quantity <= 35)
}
val rddLoDate = rddLoFilter.join(rddDate).map{
case (orderDate, ((revenue, quantity, discount), zero)) =>
(1, revenue)
}
val rddAggregate = rddLoDate.reduceByKey(_ + _)
(rddAggregate, "hand_opt_ssb_1_2")
}
val ssb_1_2 = (sc: SparkContext, dbDir: String) => {
val rdd001 = sc.textFile(dbDir + "/lineorder*").map(line => {
val columns = line.split("\\\\|")
(columns(0).toInt, columns(1).toInt, columns(2).toInt, columns(3).toInt, columns(4).toInt, columns(5), columns(6), columns(7), columns(8).toInt, columns(9).toFloat, columns(10).toFloat, columns(11).toInt, columns(12).toFloat, columns(13).toFloat, columns(14).toInt, columns(15), columns(16))
})
val rdd002 = rdd001.filter(x => ((x._12 >= 4) && (x._12 <= 6) && (x._9 >= 26) && (x._9 <= 35)))
val rdd003 = rdd002.map(x => (x._10, x._12, x._6))
val rdd004 = sc.textFile(dbDir + "/ddate*").map(line => {
val columns = line.split("\\\\|")
(columns(0), columns(1), columns(2), columns(3), columns(4).toInt, columns(5).toInt, columns(6), columns(7).toInt, columns(8).toInt, columns(9).toInt, columns(10).toInt, columns(11).toInt, columns(12), columns(13), columns(14), columns(15), columns(16))
})
val rdd005 = rdd004.filter(x => (x._7 == "Jan1994"))
val rdd006 = rdd005.map(x => Tuple1(x._1))
val rdd007 = rdd003.map(x => (x._3, x))
val rdd008 = rdd006.map(x => (x._1, x))
val rdd009 = rdd007.join(rdd008).map(x => x._2)
val rdd010 = rdd009.map(x => (1, x._1._1, x._1._2))
val rdd011 = rdd010.groupBy(x => Tuple1(1))
val rdd012 = rdd011.map(x => (1, x._2.map(x => (x._2 * x._3)).sum))
(rdd012, "ssb_1_2")
}
/**
* code{{
* select sum(lo_extendedprice*lo_discount) as revenue
* from lineorder,ddate
* where lo_orderdate = d_datekey
* and d_weeknuminyear = 6
* and d_year = 1994
* and lo_discount>=5
* and lo_discount<=7
* and lo_quantity>=26
* and lo_quantity<=35
* }}
*/
val hand_opt_ssb_1_3 = (sc: SparkContext, dbDir: String) => {
val rddDate = sc.textFile(dbDir + "/ddate*").map(line => {
val columns = line.split("\\\\|")
val dateKey = columns(0).toInt
val year = columns(4).toInt
val weekNumInYear = columns(11).toInt
if ((year == 1994) && (weekNumInYear == 6)) {
(dateKey, 0)
} else {
null
}
}).filter(_ != null)
val rddLo = sc.textFile(dbDir + "/lineorder*").map(line => {
val columns = line.split("\\\\|")
val orderDate = columns(5).toInt
val quantity = columns(8).toInt
val extendedPrice = columns(9).toFloat
val discount = columns(11).toInt
val revenue = extendedPrice * discount
(orderDate, (revenue, quantity, discount))
})
val rddLoFilter = rddLo.filter{
case (orderDate, (revenue, quantity, discount)) =>
(discount >= 5 ) && (discount <= 7) && (quantity >= 26) && (quantity <= 35)
}
val rddLoDate = rddLoFilter.join(rddDate).map{
case (orderDate, ((revenue, quantity, discount), zero)) =>
(1, revenue)
}
val rddAggregate = rddLoDate.reduceByKey(_ + _)
(rddAggregate, "hand_opt_ssb_1_3")
}
val ssb_1_3 = (sc: SparkContext, dbDir: String) => {
val rdd001 = sc.textFile(dbDir + "/lineorder*").map(line => {
val columns = line.split("\\\\|")
(columns(0).toInt, columns(1).toInt, columns(2).toInt, columns(3).toInt, columns(4).toInt, columns(5), columns(6), columns(7), columns(8).toInt, columns(9).toFloat, columns(10).toFloat, columns(11).toInt, columns(12).toFloat, columns(13).toFloat, columns(14).toInt, columns(15), columns(16))
})
val rdd002 = rdd001.filter(x => ((x._12 >= 5) && (x._12 <= 7) && (x._9 >= 26) && (x._9 <= 35)))
val rdd003 = rdd002.map(x => (x._10, x._12, x._6))
val rdd004 = sc.textFile(dbDir + "/ddate*").map(line => {
val columns = line.split("\\\\|")
(columns(0), columns(1), columns(2), columns(3), columns(4).toInt, columns(5).toInt, columns(6), columns(7).toInt, columns(8).toInt, columns(9).toInt, columns(10).toInt, columns(11).toInt, columns(12), columns(13), columns(14), columns(15), columns(16))
})
val rdd005 = rdd004.filter(x => ((x._12 == 6) && (x._5 == 1994)))
val rdd006 = rdd005.map(x => Tuple1(x._1))
val rdd007 = rdd003.map(x => (x._3, x))
val rdd008 = rdd006.map(x => (x._1, x))
val rdd009 = rdd007.join(rdd008).map(x => x._2)
val rdd010 = rdd009.map(x => (1, x._1._1, x._1._2))
val rdd011 = rdd010.groupBy(x => Tuple1(1))
val rdd012 = rdd011.map(x => (1, x._2.map(x => (x._2 * x._3)).sum))
(rdd012, "ssb_1_3")
}
/**
*
* select sum(lo_revenue),d_year,p_brand1
* from lineorder,ddate,part,supplier
* where lo_orderdate = d_datekey
* and lo_partkey = p_partkey
* and lo_suppkey = s_suppkey
* and p_category = 'MFGR#12'
* and s_region = 'AMERICA'
* group by d_year,p_brand1
* order by d_year,p_brand1
*
*/
val hand_opt_ssb_2_1 = (sc: SparkContext, dbDir: String) => {
val rddDate = sc.textFile(dbDir + "/ddate*").map(line => {
val columns = line.split("\\\\|")
val dateKey = columns(0)
val year = columns(4).toInt
(dateKey, year)
})
val rddLo = sc.textFile(dbDir + "/lineorder*").map(line => {
val columns = line.split("\\\\|")
val orderDate = columns(5)
val revenue = columns(12).toFloat
val partKey = columns(3).toInt
val supplierKey = columns(4).toInt
(orderDate, (supplierKey, (partKey, revenue)))
})
val rddLoDate = rddLo.join(rddDate).map {
case (orderDate, ((supplierKey, (partKey, revenue)), year)) =>
(supplierKey, (partKey, revenue, year))
}
val rddSupplier = sc.textFile(dbDir + "/supplier*").map(line => {
val columns = line.split("\\\\|")
val supplierKey = columns(0).toInt
val supplierRegion = columns(5)
(supplierKey, supplierRegion)
})
val rddSupplierFilter = rddSupplier.filter {
case (supplierKey, supplierRegion) => supplierRegion == "AMERICA"
}
val rddLoDateSupplier = rddLoDate.join(rddSupplierFilter).map {
case (supplierKey, ((partKey, revenue, year), supplierRegion))
=> (partKey, (year, revenue))
}
val rddPart = sc.textFile(dbDir + "/part*").map(line => {
val columns = line.split("\\\\|")
val partKey = columns(0).toInt
val partCategory = columns(3)
val partBrand1 = columns(4)
(partKey, (partCategory, partBrand1))
})
val rddPartFilter = rddPart.filter {
case (partKey, (partCategory, partBrand1)) =>
partCategory == "MFGR#12"
}
val rddLoDateSupplierPaJoin = rddLoDateSupplier.join(rddPartFilter).map {
case (partKey, ((year, revenue), (partCategory, partBrand1))) =>
((year, partBrand1), (revenue))
}
val rddAggregate = rddLoDateSupplierPaJoin.reduceByKey(_ + _)
val rddOrderBy = rddAggregate.sortByKey(true, 1)
(rddOrderBy, "hand_opt_ssb_2_1")
}
val ssb_2_1 = (sc: SparkContext, dbDir: String) => {
val rdd001 = sc.textFile(dbDir + "/lineorder*").map(line => {
val columns = line.split("\\\\|")
(columns(0).toInt, columns(1).toInt, columns(2).toInt, columns(3).toInt, columns(4).toInt, columns(5), columns(6), columns(7), columns(8).toInt, columns(9).toFloat, columns(10).toFloat, columns(11).toInt, columns(12).toFloat, columns(13).toFloat, columns(14).toInt, columns(15), columns(16))
})
val rdd002 = rdd001.map(x => (x._13, x._5, x._4, x._6))
val rdd003 = sc.textFile(dbDir + "/ddate*").map(line => {
val columns = line.split("\\\\|")
(columns(0), columns(1), columns(2), columns(3), columns(4).toInt, columns(5).toInt, columns(6), columns(7).toInt, columns(8).toInt, columns(9).toInt, columns(10).toInt, columns(11).toInt, columns(12), columns(13), columns(14), columns(15), columns(16))
})
val rdd004 = rdd003.map(x => (x._5, x._1))
val rdd005 = rdd002.map(x => (x._4, x))
val rdd006 = rdd004.map(x => (x._2, x))
val rdd007 = rdd005.join(rdd006).map(x => x._2)
val rdd008 = rdd007.map(x => (x._2._1, x._1._1, x._1._2, x._1._3))
val rdd009 = sc.textFile(dbDir + "/part*").map(line => {
val columns = line.split("\\\\|")
(columns(0).toInt, columns(1), columns(2), columns(3), columns(4), columns(5), columns(6), columns(7).toInt, columns(8))
})
val rdd010 = rdd009.filter(x => (x._4 == "MFGR#12"))
val rdd011 = rdd010.map(x => (x._5, x._1))
val rdd012 = rdd008.map(x => (x._4, x))
val rdd013 = rdd011.map(x => (x._2, x))
val rdd014 = rdd012.join(rdd013).map(x => x._2)
val rdd015 = rdd014.map(x => (x._1._1, x._2._1, x._1._2, x._1._3))
val rdd016 = sc.textFile(dbDir + "/supplier*").map(line => {
val columns = line.split("\\\\|")
(columns(0).toInt, columns(1), columns(2), columns(3), columns(4), columns(5), columns(6))
})
val rdd017 = rdd016.filter(x => (x._6 == "AMERICA"))
val rdd018 = rdd017.map(x => Tuple1(x._1))
val rdd019 = rdd015.map(x => (x._4, x))
val rdd020 = rdd018.map(x => (x._1, x))
val rdd021 = rdd019.join(rdd020).map(x => x._2)
val rdd022 = rdd021.map(x => (x._1._1, x._1._2, x._1._3))
val rdd023 = rdd022.groupBy(x => (x._1, x._2))
val rdd024 = rdd023.map(x => ((x._1._1, x._1._2), x._2.map(x => x._3).sum))
val rddOrderBy = rdd024.sortByKey(true, 1)
(rddOrderBy, "ssb_2_1")
}
/**
* select sum(lo_revenue),d_year,p_brand1
* from lineorder,ddate,part,supplier
* where lo_orderdate = d_datekey
* and lo_partkey = p_partkey
* and lo_suppkey = s_suppkey
* and p_brand1 >= 'MFGR#2221'
* and p_brand1 <= 'MFGR#2228'
* and s_region = 'ASIA'
* group by d_year,p_brand1
* order by d_year,p_brand1
*/
val hand_opt_ssb_2_2 = (sc: SparkContext, dbDir: String) => {
val rddDate = sc.textFile(dbDir + "/ddate*").map(line => {
val columns = line.split("\\\\|")
val dateKey = columns(0)
val year = columns(4).toInt
(dateKey, year)
})
val rddLo = sc.textFile(dbDir + "/lineorder*").map(line => {
val columns = line.split("\\\\|")
val orderDate = columns(5)
val revenue = columns(12).toFloat
val partKey = columns(3).toInt
val supplierKey = columns(4).toInt
(orderDate, (supplierKey, (partKey, revenue)))
})
val rddLoDate = rddLo.join(rddDate).map {
case (orderDate, ((supplierKey, (partKey, revenue)), year)) =>
(supplierKey, (partKey, revenue, year))
}
val rddSupplier = sc.textFile(dbDir + "/supplier*").map(line => {
val columns = line.split("\\\\|")
val supplierKey = columns(0).toInt
val supplierRegion = columns(5)
(supplierKey, supplierRegion)
})
val rddSupplierFilter = rddSupplier.filter {
case (supplierKey, supplierRegion) => supplierRegion == "ASIA"
}
val rddLoDateSupplier = rddLoDate.join(rddSupplierFilter).map {
case (supplierKey, ((partKey, revenue, year), supplierRegion))
=> (partKey, (year, revenue))
}
val rddPart = sc.textFile(dbDir + "/part*").map(line => {
val columns = line.split("\\\\|")
val partKey = columns(0).toInt
val partCategory = columns(3)
val partBrand1 = columns(4)
(partKey, (partCategory, partBrand1))
})
val rddPartFilter = rddPart.filter {
case (partKey, (partCategory, partBrand1)) =>
partBrand1 >= "MFGR#2221" && partBrand1 <= "MFGR#2228"
}
val rddLoDateSupplierPaJoin = rddLoDateSupplier.join(rddPartFilter).map {
case (partKey, ((year, revenue), (partCategory, partBrand1))) =>
((year, partBrand1), (revenue))
}
val rddAggregate = rddLoDateSupplierPaJoin.reduceByKey(_ + _)
val rddOrderBy = rddAggregate.sortByKey(true, 1)
(rddOrderBy, "hand_opt_ssb_2_2")
}
val ssb_2_2 = (sc: SparkContext, dbDir: String) => {
val rdd001 = sc.textFile(dbDir + "/lineorder*").map(line => {
val columns = line.split("\\\\|")
(columns(0).toInt, columns(1).toInt, columns(2).toInt, columns(3).toInt, columns(4).toInt, columns(5), columns(6), columns(7), columns(8).toInt, columns(9).toFloat, columns(10).toFloat, columns(11).toInt, columns(12).toFloat, columns(13).toFloat, columns(14).toInt, columns(15), columns(16))
})
val rdd002 = rdd001.map(x => (x._13, x._5, x._4, x._6))
val rdd003 = sc.textFile(dbDir + "/ddate*").map(line => {
val columns = line.split("\\\\|")
(columns(0), columns(1), columns(2), columns(3), columns(4).toInt, columns(5).toInt, columns(6), columns(7).toInt, columns(8).toInt, columns(9).toInt, columns(10).toInt, columns(11).toInt, columns(12), columns(13), columns(14), columns(15), columns(16))
})
val rdd004 = rdd003.map(x => (x._5, x._1))
val rdd005 = rdd002.map(x => (x._4, x))
val rdd006 = rdd004.map(x => (x._2, x))
val rdd007 = rdd005.join(rdd006).map(x => x._2)
val rdd008 = rdd007.map(x => (x._2._1, x._1._1, x._1._2, x._1._3))
val rdd009 = sc.textFile(dbDir + "/part*").map(line => {
val columns = line.split("\\\\|")
(columns(0).toInt, columns(1), columns(2), columns(3), columns(4), columns(5), columns(6), columns(7).toInt, columns(8))
})
val rdd010 = rdd009.filter(x => ((x._5 >= "MFGR#2221") && (x._5 <= "MFGR#2228")))
val rdd011 = rdd010.map(x => (x._5, x._1))
val rdd012 = rdd008.map(x => (x._4, x))
val rdd013 = rdd011.map(x => (x._2, x))
val rdd014 = rdd012.join(rdd013).map(x => x._2)
val rdd015 = rdd014.map(x => (x._1._1, x._2._1, x._1._2, x._1._3))
val rdd016 = sc.textFile(dbDir + "/supplier*").map(line => {
val columns = line.split("\\\\|")
(columns(0).toInt, columns(1), columns(2), columns(3), columns(4), columns(5), columns(6))
})
val rdd017 = rdd016.filter(x => (x._6 == "ASIA"))
val rdd018 = rdd017.map(x => Tuple1(x._1))
val rdd019 = rdd015.map(x => (x._4, x))
val rdd020 = rdd018.map(x => (x._1, x))
val rdd021 = rdd019.join(rdd020).map(x => x._2)
val rdd022 = rdd021.map(x => (x._1._1, x._1._2, x._1._3))
val rdd023 = rdd022.groupBy(x => (x._1, x._2))
val rdd024 = rdd023.map(x => ((x._1._1, x._1._2), x._2.map(x => x._3).sum))
val rddOrderBy = rdd024.sortByKey(true, 1)
(rddOrderBy, "ssb_2_2")
}
/**
* select sum(lo_revenue),d_year,p_brand1
* from lineorder,ddate,part,supplier
* where lo_orderdate = d_datekey
* and lo_partkey = p_partkey
* and lo_suppkey = s_suppkey
* and p_brand1 = 'MFGR#2239'
* and s_region = 'EUROPE'
* group by d_year,p_brand1
* order by d_year,p_brand1
*/
val hand_opt_ssb_2_3 = (sc: SparkContext, dbDir: String) => {
val rddDate = sc.textFile(dbDir + "/ddate*").map(line => {
val columns = line.split("\\\\|")
val dateKey = columns(0)
val year = columns(4).toInt
(dateKey, year)
})
val rddLo = sc.textFile(dbDir + "/lineorder*").map(line => {
val columns = line.split("\\\\|")
val orderDate = columns(5)
val revenue = columns(12).toFloat
val partKey = columns(3).toInt
val supplierKey = columns(4).toInt
(orderDate, (supplierKey, (partKey, revenue)))
})
val rddLoDate = rddLo.join(rddDate).map {
case (orderDate, ((supplierKey, (partKey, revenue)), year)) =>
(supplierKey, (partKey, revenue, year))
}
val rddSupplier = sc.textFile(dbDir + "/supplier*").map(line => {
val columns = line.split("\\\\|")
val supplierKey = columns(0).toInt
val supplierRegion = columns(5)
(supplierKey, supplierRegion)
})
val rddSupplierFilter = rddSupplier.filter {
case (supplierKey, supplierRegion) => supplierRegion == "EUROPE"
}
val rddLoDateSupplier = rddLoDate.join(rddSupplierFilter).map {
case (supplierKey, ((partKey, revenue, year), supplierRegion))
=> (partKey, (year, revenue))
}
val rddPart = sc.textFile(dbDir + "/part*").map(line => {
val columns = line.split("\\\\|")
val partKey = columns(0).toInt
val partCategory = columns(3)
val partBrand1 = columns(4)
(partKey, (partCategory, partBrand1))
})
val rddPartFilter = rddPart.filter {
case (partKey, (partCategory, partBrand1)) =>
partBrand1 == "MFGR#2239"
}
val rddLoDateSupplierPaJoin = rddLoDateSupplier.join(rddPartFilter).map {
case (partKey, ((year, revenue), (partCategory, partBrand1))) =>
((year, partBrand1), (revenue))
}
val rddAggregate = rddLoDateSupplierPaJoin.reduceByKey(_ + _)
val rddOrderBy = rddAggregate.sortByKey(true, 1)
(rddOrderBy, "hand_opt_ssb_2_3")
}
val ssb_2_3 = (sc: SparkContext, dbDir: String) => {
val rdd001 = sc.textFile(dbDir + "/lineorder*").map(line => {
val columns = line.split("\\\\|")
(columns(0).toInt, columns(1).toInt, columns(2).toInt, columns(3).toInt, columns(4).toInt, columns(5), columns(6), columns(7), columns(8).toInt, columns(9).toFloat, columns(10).toFloat, columns(11).toInt, columns(12).toFloat, columns(13).toFloat, columns(14).toInt, columns(15), columns(16))
})
val rdd002 = rdd001.map(x => (x._13, x._5, x._4, x._6))
val rdd003 = sc.textFile(dbDir + "/ddate*").map(line => {
val columns = line.split("\\\\|")
(columns(0), columns(1), columns(2), columns(3), columns(4).toInt, columns(5).toInt, columns(6), columns(7).toInt, columns(8).toInt, columns(9).toInt, columns(10).toInt, columns(11).toInt, columns(12), columns(13), columns(14), columns(15), columns(16))
})
val rdd004 = rdd003.map(x => (x._5, x._1))
val rdd005 = rdd002.map(x => (x._4, x))
val rdd006 = rdd004.map(x => (x._2, x))
val rdd007 = rdd005.join(rdd006).map(x => x._2)
val rdd008 = rdd007.map(x => (x._2._1, x._1._1, x._1._2, x._1._3))
val rdd009 = sc.textFile(dbDir + "/part*").map(line => {
val columns = line.split("\\\\|")
(columns(0).toInt, columns(1), columns(2), columns(3), columns(4), columns(5), columns(6), columns(7).toInt, columns(8))
})
val rdd010 = rdd009.filter(x => (x._5 == "MFGR#2239"))
val rdd011 = rdd010.map(x => (x._5, x._1))
val rdd012 = rdd008.map(x => (x._4, x))
val rdd013 = rdd011.map(x => (x._2, x))
val rdd014 = rdd012.join(rdd013).map(x => x._2)
val rdd015 = rdd014.map(x => (x._1._1, x._2._1, x._1._2, x._1._3))
val rdd016 = sc.textFile(dbDir + "/supplier*").map(line => {
val columns = line.split("\\\\|")
(columns(0).toInt, columns(1), columns(2), columns(3), columns(4), columns(5), columns(6))
})
val rdd017 = rdd016.filter(x => (x._6 == "EUROPE"))
val rdd018 = rdd017.map(x => Tuple1(x._1))
val rdd019 = rdd015.map(x => (x._4, x))
val rdd020 = rdd018.map(x => (x._1, x))
val rdd021 = rdd019.join(rdd020).map(x => x._2)
val rdd022 = rdd021.map(x => (x._1._1, x._1._2, x._1._3))
val rdd023 = rdd022.groupBy(x => (x._1, x._2))
val rdd024 = rdd023.map(x => ((x._1._1, x._1._2), x._2.map(x => x._3).sum))
val rddOrderBy = rdd024.sortByKey(true, 1)
(rddOrderBy, "ssb_2_3")
}
/**
* select c_nation,s_nation,d_year,sum(lo_revenue) as revenue
* from customer,lineorder,supplier,ddate
* where lo_custkey = c_custkey
* and lo_suppkey = s_suppkey
* and lo_orderdate = d_datekey
* and c_region = 'ASIA'
* and s_region = 'ASIA'
* and d_year >=1992 and d_year <= 1997
* group by c_nation,s_nation,d_year
* order by d_year asc,revenue desc
*/
val hand_opt_ssb_3_1 = (sc: SparkContext, dbDir: String) => {
val rddDate = sc.textFile(dbDir + "/ddate*").map(line => {
val columns = line.split("\\\\|")
val dateKey = columns(0).toInt
val year = columns(4).toInt
(dateKey, year)
})
val rddDateFilter = rddDate.filter { case (dateKey, year) => year >= 1992 && year <= 1997}
val rddLineOrder = sc.textFile(dbDir + "/lineorder*").map(line => {
val columns = line.split("\\\\|")
val orderDate = columns(5).toInt
val supplierKey = columns(4).toInt
val customerKey = columns(2).toInt
val revenue = columns(12).toFloat
(orderDate, (supplierKey, customerKey, revenue))
})
val rddLoDate = rddLineOrder.join(rddDateFilter).map {
case (key, ((supplierKey, customerKey, revenue), year)) =>
(supplierKey, (customerKey, year, revenue))
}
val rddSupplier = sc.textFile(dbDir + "/supplier*").map(line => {
val columns = line.split("\\\\|")
val supplierKey = columns(0).toInt
val supplierNation = columns(4)
val supplierRegion = columns(5)
(supplierKey, (supplierNation, supplierRegion))
})
val rddSupplierFilter = rddSupplier.filter { case (supplierKey, (supplierNation, supplierRegion)) =>
supplierRegion == "ASIA"
}
val rddLoDateSupplier = rddLoDate.join(rddSupplierFilter).map {
case (supplyKey, ((customerKey, year, revenue), (supplierNation, supplierRegion))) =>
(customerKey, (year, supplierNation, revenue))
}
val rddCustomer = sc.textFile(dbDir + "/customer*").map(line => {
val columns = line.split("\\\\|")
val customerKey = columns(0).toInt
val customerNation = columns(4)
val customerRegion = columns(5)
(customerKey, (customerNation, customerRegion))
})
val rddCustomerFilter = rddCustomer.filter {
case (customerKey, (customerNation, customerRegion)) => customerRegion == "ASIA"
}
val rddLoDateSupplierCustomer = rddLoDateSupplier.join(rddCustomerFilter).map {
case (customerKey, ((year, supplierNation, revenue), (customerNation, customerRegion))) =>
((customerNation, supplierNation, year), revenue)
}
val rddAggregate = rddLoDateSupplierCustomer.reduceByKey(_ + _)
val rddOrderBy = rddAggregate.sortByKey(true, 1)
(rddOrderBy, "hand_opt_ssb_3_1")
}
val ssb_3_1 = (sc: SparkContext, dbDir: String) => {
val rdd001 = sc.textFile(dbDir + "/customer*").map(line => {
val columns = line.split("\\\\|")
(columns(0).toInt, columns(1), columns(2), columns(3), columns(4), columns(5), columns(6), columns(7))
})
val rdd002 = rdd001.filter(x => (x._6 == "ASIA"))
val rdd003 = rdd002.map(x => (x._5, x._1))
val rdd004 = sc.textFile(dbDir + "/lineorder*").map(line => {
val columns = line.split("\\\\|")
(columns(0).toInt, columns(1).toInt, columns(2).toInt, columns(3).toInt, columns(4).toInt, columns(5), columns(6), columns(7), columns(8).toInt, columns(9).toFloat, columns(10).toFloat, columns(11).toInt, columns(12).toFloat, columns(13).toFloat, columns(14).toInt, columns(15), columns(16))
})
val rdd005 = rdd004.map(x => (x._13, x._6, x._5, x._3))
val rdd006 = rdd003.map(x => (x._2, x))
val rdd007 = rdd005.map(x => (x._4, x))
val rdd008 = rdd006.join(rdd007).map(x => x._2)
val rdd009 = rdd008.map(x => (x._1._1, x._2._1, x._2._2, x._2._3))
val rdd010 = sc.textFile(dbDir + "/supplier*").map(line => {
val columns = line.split("\\\\|")
(columns(0).toInt, columns(1), columns(2), columns(3), columns(4), columns(5), columns(6))
})
val rdd011 = rdd010.filter(x => (x._6 == "ASIA"))
val rdd012 = rdd011.map(x => (x._5, x._1))
val rdd013 = rdd009.map(x => (x._4, x))
val rdd014 = rdd012.map(x => (x._2, x))
val rdd015 = rdd013.join(rdd014).map(x => x._2)
val rdd016 = rdd015.map(x => (x._1._1, x._2._1, x._1._2, x._1._3))
val rdd017 = sc.textFile(dbDir + "/ddate*").map(line => {
val columns = line.split("\\\\|")
(columns(0), columns(1), columns(2), columns(3), columns(4).toInt, columns(5).toInt, columns(6), columns(7).toInt, columns(8).toInt, columns(9).toInt, columns(10).toInt, columns(11).toInt, columns(12), columns(13), columns(14), columns(15), columns(16))
})
val rdd018 = rdd017.filter(x => ((x._5 >= 1992) && (x._5 <= 1997)))
val rdd019 = rdd018.map(x => (x._5, x._1))
val rdd020 = rdd016.map(x => (x._4, x))
val rdd021 = rdd019.map(x => (x._2, x))
val rdd022 = rdd020.join(rdd021).map(x => x._2)
val rdd023 = rdd022.map(x => (x._1._1, x._1._2, x._2._1, x._1._3))
val rdd024 = rdd023.groupBy(x => (x._1, x._2, x._3))
val rdd025 = rdd024.map(x => ((x._1._1, x._1._2, x._1._3), x._2.map(x => x._4).sum))
val rddOrderBy = rdd025.sortByKey(true, 1)
(rddOrderBy, "ssb_3_1")
}
/**
* select c_city,s_city,d_year,sum(lo_revenue) as revenue
* from customer,lineorder,supplier,ddate
* where lo_custkey = c_custkey
* and lo_suppkey = s_suppkey
* and lo_orderdate = d_datekey
* and c_nation = 'UNITED STATES'
* and s_nation = 'UNITED STATES'
* and d_year >=1992 and d_year <= 1997
* group by c_city,s_city,d_year
* order by d_year asc,revenue desc
*/
val hand_opt_ssb_3_2 = (sc: SparkContext, dbDir: String) => {
val rddDate = sc.textFile(dbDir + "/ddate*").map(line => {
val columns = line.split("\\\\|")
val dateKey = columns(0).toInt
val year = columns(4).toInt
(dateKey, year)
})
val rddDateFilter = rddDate.filter { case (dateKey, year) => year >= 1992 && year <= 1997}
val rddLineOrder = sc.textFile(dbDir + "/lineorder*").map(line => {
val columns = line.split("\\\\|")
val orderDate = columns(5).toInt
val supplierKey = columns(4).toInt
val customerKey = columns(2).toInt
val revenue = columns(12).toFloat
(orderDate, (supplierKey, customerKey, revenue))
})
val rddLoDate = rddLineOrder.join(rddDateFilter).map {
case (key, ((supplierKey, customerKey, revenue), year)) =>
(supplierKey, (customerKey, year, revenue))
}
val rddSupplier = sc.textFile(dbDir + "/supplier*").map(line => {
val columns = line.split("\\\\|")
val supplierKey = columns(0).toInt
val supplierCity = columns(3)
val supplierNation = columns(4)
(supplierKey, (supplierCity, supplierNation))
})
val rddSupplierFilter = rddSupplier.filter { case (supplierKey, (supplierCity, supplierNation)) =>
supplierNation == "UNITED STATES"
}
val rddLoDateSupplier = rddLoDate.join(rddSupplierFilter).map {
case (supplyKey, ((customerKey, year, revenue), (supplierCity, supplierNation))) =>
(customerKey, (year, supplierCity, revenue))
}
val rddCustomer = sc.textFile(dbDir + "/customer*").map(line => {
val columns = line.split("\\\\|")
val customerKey = columns(0).toInt
val customerCity = columns(3)
val customerNation = columns(4)
(customerKey, (customerCity, customerNation))
})
val rddCustomerFilter = rddCustomer.filter {
case (customerKey, (customerCity, customerNation)) => customerNation == "UNITED STATES"
}
val rddLoDateSupplierCustomer = rddLoDateSupplier.join(rddCustomerFilter).map {
case (customerKey, ((year, supplierCity, revenue), (customerCity, customerNation))) =>
((customerCity, supplierCity, year), revenue)
}
val rddAggregate = rddLoDateSupplierCustomer.reduceByKey(_ + _)
val rddOrderBy = rddAggregate.sortByKey(true, 1)
(rddOrderBy, "hand_opt_ssb_3_2")
}
val ssb_3_2 = (sc: SparkContext, dbDir: String) => {
val rdd001 = sc.textFile(dbDir + "/customer*").map(line => {
val columns = line.split("\\\\|")
(columns(0).toInt, columns(1), columns(2), columns(3), columns(4), columns(5), columns(6), columns(7))
})
val rdd002 = rdd001.filter(x => (x._5 == "UNITED STATES"))
val rdd003 = rdd002.map(x => (x._4, x._1))
val rdd004 = sc.textFile(dbDir + "/lineorder*").map(line => {
val columns = line.split("\\\\|")
(columns(0).toInt, columns(1).toInt, columns(2).toInt, columns(3).toInt, columns(4).toInt, columns(5), columns(6), columns(7), columns(8).toInt, columns(9).toFloat, columns(10).toFloat, columns(11).toInt, columns(12).toFloat, columns(13).toFloat, columns(14).toInt, columns(15), columns(16))
})
val rdd005 = rdd004.map(x => (x._13, x._6, x._5, x._3))
val rdd006 = rdd003.map(x => (x._2, x))
val rdd007 = rdd005.map(x => (x._4, x))
val rdd008 = rdd006.join(rdd007).map(x => x._2)
val rdd009 = rdd008.map(x => (x._1._1, x._2._1, x._2._2, x._2._3))
val rdd010 = sc.textFile(dbDir + "/supplier*").map(line => {
val columns = line.split("\\\\|")
(columns(0).toInt, columns(1), columns(2), columns(3), columns(4), columns(5), columns(6))
})
val rdd011 = rdd010.filter(x => (x._5 == "UNITED STATES"))
val rdd012 = rdd011.map(x => (x._4, x._1))
val rdd013 = rdd009.map(x => (x._4, x))
val rdd014 = rdd012.map(x => (x._2, x))
val rdd015 = rdd013.join(rdd014).map(x => x._2)
val rdd016 = rdd015.map(x => (x._1._1, x._2._1, x._1._2, x._1._3))
val rdd017 = sc.textFile(dbDir + "/ddate*").map(line => {
val columns = line.split("\\\\|")
(columns(0), columns(1), columns(2), columns(3), columns(4).toInt, columns(5).toInt, columns(6), columns(7).toInt, columns(8).toInt, columns(9).toInt, columns(10).toInt, columns(11).toInt, columns(12), columns(13), columns(14), columns(15), columns(16))
})
val rdd018 = rdd017.filter(x => ((x._5 >= 1992) && (x._5 <= 1997)))
val rdd019 = rdd018.map(x => (x._5, x._1))
val rdd020 = rdd016.map(x => (x._4, x))
val rdd021 = rdd019.map(x => (x._2, x))
val rdd022 = rdd020.join(rdd021).map(x => x._2)
val rdd023 = rdd022.map(x => (x._1._1, x._1._2, x._2._1, x._1._3))
val rdd024 = rdd023.groupBy(x => (x._1, x._2, x._3))
val rdd025 = rdd024.map(x => ((x._1._1, x._1._2, x._1._3), x._2.map(x => x._4).sum))
val rddOrderBy = rdd025.sortByKey(true, 1)
(rddOrderBy, "ssb_3_2")
}
/**
* select c_city,s_city,d_year,sum(lo_revenue) as revenue
* from customer,lineorder,supplier,ddate
* where lo_custkey = c_custkey
* and lo_suppkey = s_suppkey
* and lo_orderdate = d_datekey
* and (c_city = 'UNITED KI1' or c_city = 'UNITED KI5')
* and (s_city = 'UNITED KI1' or s_city = 'UNITED KI5')
* and d_year >=1992 and d_year <= 1997
* group by c_city,s_city,d_year
* order by d_year asc,revenue desc
*/
val hand_opt_ssb_3_3 = (sc: SparkContext, dbDir: String) => {
val rddDate = sc.textFile(dbDir + "/ddate*").map(line => {
val columns = line.split("\\\\|")
val dateKey: Int = columns(0).toInt
val year: Int = columns(4).toInt
(dateKey, year)
})
val rddDateFilter = rddDate.filter { case (dateKey, year) => year >= 1992 && year <= 1997}
val rddLineOrder = sc.textFile(dbDir + "/lineorder*").map(line => {
val columns = line.split("\\\\|")
val orderDate: Int = columns(5).toInt
val supplierKey: Int = columns(4).toInt
val customerKey: Int = columns(2).toInt
val revenue: Float = columns(12).toFloat
(orderDate, (supplierKey, customerKey, revenue))
})
val rddLoDate = rddLineOrder.join(rddDateFilter).map {
case (key, ((supplierKey, customerKey, revenue), year)) =>
(supplierKey, (customerKey, year, revenue))
}
val rddSupplier = sc.textFile(dbDir + "/supplier*").map(line => {
val columns = line.split("\\\\|")
val supplierKey = columns(0).toInt
val supplierCity = columns(3)
(supplierKey, supplierCity)
})
val rddSupplierFilter = rddSupplier.filter { case (supplierKey, supplierCity) =>
supplierCity == "UNITED KI1" || supplierCity == "UNITED KI5"
}
val rddLoDateSupplier = rddLoDate.join(rddSupplierFilter).map {
case (supplyKey, ((customerKey, year, revenue), supplierCity)) =>
(customerKey, (year, supplierCity, revenue))
}
val rddCustomer = sc.textFile(dbDir + "/customer*").map(line => {
val columns = line.split("\\\\|")
val customerKey = columns(0).toInt
val customerCity = columns(3)
(customerKey, customerCity)
})
val rddCustomerFilter = rddCustomer.filter {
case (customerKey, customerCity) =>
customerCity == "UNITED KI1" || customerCity == "UNITED KI5"
}
val rddLoDateSupplierCustomer = rddLoDateSupplier.join(rddCustomerFilter).map {
case (customerKey, ((year, supplierCity, revenue), customerCity)) =>
((customerCity, supplierCity, year), revenue)
}
val rddAggregate = rddLoDateSupplierCustomer.reduceByKey(_ + _)
val rddOrderBy = rddAggregate.sortByKey(true, 1)
(rddOrderBy, "hand_opt_ssb_3_3")
}
val ssb_3_3 = (sc: SparkContext, dbDir: String) => {
val rdd001 = sc.textFile(dbDir + "/customer*").map(line => {
val columns = line.split("\\\\|")
(columns(0).toInt, columns(1), columns(2), columns(3), columns(4), columns(5), columns(6), columns(7))
})
val rdd002 = rdd001.filter(x => ((x._4 == "UNITED KI1") || (x._4 == "UNITED KI5")))
val rdd003 = rdd002.map(x => (x._4, x._1))
val rdd004 = sc.textFile(dbDir + "/lineorder*").map(line => {
val columns = line.split("\\\\|")
(columns(0).toInt, columns(1).toInt, columns(2).toInt, columns(3).toInt, columns(4).toInt, columns(5), columns(6), columns(7), columns(8).toInt, columns(9).toFloat, columns(10).toFloat, columns(11).toInt, columns(12).toFloat, columns(13).toFloat, columns(14).toInt, columns(15), columns(16))
})
val rdd005 = rdd004.map(x => (x._13, x._6, x._5, x._3))
val rdd006 = rdd003.map(x => (x._2, x))
val rdd007 = rdd005.map(x => (x._4, x))
val rdd008 = rdd006.join(rdd007).map(x => x._2)
val rdd009 = rdd008.map(x => (x._1._1, x._2._1, x._2._2, x._2._3))
val rdd010 = sc.textFile(dbDir + "/supplier*").map(line => {
val columns = line.split("\\\\|")
(columns(0).toInt, columns(1), columns(2), columns(3), columns(4), columns(5), columns(6))
})
val rdd011 = rdd010.filter(x => ((x._4 == "UNITED KI1") || (x._4 == "UNITED KI5")))
val rdd012 = rdd011.map(x => (x._4, x._1))
val rdd013 = rdd009.map(x => (x._4, x))
val rdd014 = rdd012.map(x => (x._2, x))
val rdd015 = rdd013.join(rdd014).map(x => x._2)
val rdd016 = rdd015.map(x => (x._1._1, x._2._1, x._1._2, x._1._3))
val rdd017 = sc.textFile(dbDir + "/ddate*").map(line => {
val columns = line.split("\\\\|")
(columns(0), columns(1), columns(2), columns(3), columns(4).toInt, columns(5).toInt, columns(6), columns(7).toInt, columns(8).toInt, columns(9).toInt, columns(10).toInt, columns(11).toInt, columns(12), columns(13), columns(14), columns(15), columns(16))
})
val rdd018 = rdd017.filter(x => ((x._5 >= 1992) && (x._5 <= 1997)))
val rdd019 = rdd018.map(x => (x._5, x._1))
val rdd020 = rdd016.map(x => (x._4, x))
val rdd021 = rdd019.map(x => (x._2, x))
val rdd022 = rdd020.join(rdd021).map(x => x._2)
val rdd023 = rdd022.map(x => (x._1._1, x._1._2, x._2._1, x._1._3))
val rdd024 = rdd023.groupBy(x => (x._1, x._2, x._3))
val rdd025 = rdd024.map(x => ((x._1._1, x._1._2, x._1._3), x._2.map(x => x._4).sum))
val rddOrderBy = rdd025.sortByKey(true, 1)
(rddOrderBy, "ssb_3_3")
}
/**
* select d_year,c_nation,sum(lo_revenue-lo_supplycost) as profit
* from lineorder,ddate,customer,supplier,part
* where lo_custkey = c_custkey
* and lo_suppkey = s_suppkey
* and lo_partkey = p_partkey
* and lo_orderdate = d_datekey
* and c_region = 'AMERICA'
* and s_region = 'AMERICA'
* and (p_mfgr = 'MFGR#1' or p_mfgr = 'MFGR#2')
* group by d_year,c_nation
* order by d_year,c_nation
*/
val hand_opt_ssb_4_1 = (sc: SparkContext, dbDir: String) => {
val rddDate = sc.textFile(dbDir + "/ddate*").map(line => {
val columns = line.split("\\\\|")
(columns(0).toInt, columns(4).toInt)
})
val rddLineOrder = sc.textFile(dbDir + "/lineorder*").map(line => {
val columns = line.split("\\\\|")
val orderDate = columns(5).toInt
val customerKey = columns(2).toInt
val supplierKey = columns(4).toInt
val partKey = columns(3).toInt
val revenue = columns(12).toFloat
val supplyCost = columns(13).toFloat
(orderDate, (supplierKey, customerKey, partKey, revenue - supplyCost))
})
val rddLoDate = rddLineOrder.join(rddDate).map {
case (key, ((supplierKey, customerKey, partKey, profit), year)) =>
(supplierKey, (customerKey, partKey, profit, year))
}
val rddSupplier = sc.textFile(dbDir + "/supplier*").map(line => {
val columns = line.split("\\\\|")
val supplierKey = columns(0).toInt
val supplierRegion = columns(5)
(supplierKey, supplierRegion)
})
val rddSupplierFilter = rddSupplier.filter { case (supplierKey, supplierRegion) =>
supplierRegion == "AMERICA"
}
val rddLoDateSupplier = rddLoDate.join(rddSupplierFilter).map {
case (supplyKey, ((customerKey, partKey, profit, year), supplierRegion)) =>
(customerKey, (partKey, profit, year))
}
val rddCustomer = sc.textFile(dbDir + "/customer*").map(line => {
val columns = line.split("\\\\|")
val customerKey = columns(0).toInt
val customerNation = columns(4)
val customerRegion = columns(5)
(customerKey, (customerNation, customerRegion))
})
val rddCustomerFilter = rddCustomer.filter {
case (customerKey, (customerNation, customerRegion)) =>
customerRegion == "AMERICA"
}
val rddLoDateSupplierCustomer = rddLoDateSupplier.join(rddCustomerFilter).map {
case (customerKey, ((partKey, profit, year), (customerNation, customerRegion))) =>
(partKey, ((year, customerNation), profit))
}
val rddPart = sc.textFile(dbDir + "/part*").map(line => {
val columns = line.split("\\\\|")
val partKey = columns(0).toInt
val partMfgr = columns(2)
(partKey, partMfgr)
})
val rddPartFilter = rddPart.filter {
case (partKey, partMfgr) =>
partMfgr == "MFGR#1" || partMfgr == "MFGR#2"
}
val rddLoDateSupplierCustomerPart = rddLoDateSupplierCustomer.join(rddPartFilter).map {
case (partKey, (((year, customerNation), profit), partMfgr)) =>
((year, customerNation), profit)
}
val rddAggregate = rddLoDateSupplierCustomerPart.reduceByKey(_ + _)
val rddOrderBy = rddAggregate.sortByKey(true, 1)
(rddOrderBy, "hand_opt_ssb_4_1")
}
val ssb_4_1 = (sc: SparkContext, dbDir: String) => {
val rdd001 = sc.textFile(dbDir + "/lineorder*").map(line => {
val columns = line.split("\\\\|")
(columns(0).toInt, columns(1).toInt, columns(2).toInt, columns(3).toInt, columns(4).toInt, columns(5), columns(6), columns(7), columns(8).toInt, columns(9).toFloat, columns(10).toFloat, columns(11).toInt, columns(12).toFloat, columns(13).toFloat, columns(14).toInt, columns(15), columns(16))
})
val rdd002 = rdd001.map(x => (x._13, x._14, x._4, x._5, x._3, x._6))
val rdd003 = sc.textFile(dbDir + "/ddate*").map(line => {
val columns = line.split("\\\\|")
(columns(0), columns(1), columns(2), columns(3), columns(4).toInt, columns(5).toInt, columns(6), columns(7).toInt, columns(8).toInt, columns(9).toInt, columns(10).toInt, columns(11).toInt, columns(12), columns(13), columns(14), columns(15), columns(16))
})
val rdd004 = rdd003.map(x => (x._5, x._1))
val rdd005 = rdd002.map(x => (x._6, x))
val rdd006 = rdd004.map(x => (x._2, x))
val rdd007 = rdd005.join(rdd006).map(x => x._2)
val rdd008 = rdd007.map(x => (x._2._1, x._1._1, x._1._2, x._1._3, x._1._4, x._1._5))
val rdd009 = sc.textFile(dbDir + "/customer*").map(line => {
val columns = line.split("\\\\|")
(columns(0).toInt, columns(1), columns(2), columns(3), columns(4), columns(5), columns(6), columns(7))
})
val rdd010 = rdd009.filter(x => (x._6 == "AMERICA"))
val rdd011 = rdd010.map(x => (x._5, x._1))
val rdd012 = rdd008.map(x => (x._6, x))
val rdd013 = rdd011.map(x => (x._2, x))
val rdd014 = rdd012.join(rdd013).map(x => x._2)
val rdd015 = rdd014.map(x => (x._1._1, x._2._1, x._1._2, x._1._3, x._1._4, x._1._5))
val rdd016 = sc.textFile(dbDir + "/supplier*").map(line => {
val columns = line.split("\\\\|")
(columns(0).toInt, columns(1), columns(2), columns(3), columns(4), columns(5), columns(6))
})
val rdd017 = rdd016.filter(x => (x._6 == "AMERICA"))
val rdd018 = rdd017.map(x => Tuple1(x._1))
val rdd019 = rdd015.map(x => (x._6, x))
val rdd020 = rdd018.map(x => (x._1, x))
val rdd021 = rdd019.join(rdd020).map(x => x._2)
val rdd022 = rdd021.map(x => (x._1._1, x._1._2, x._1._3, x._1._4, x._1._5))
val rdd023 = sc.textFile(dbDir + "/part*").map(line => {
val columns = line.split("\\\\|")
(columns(0).toInt, columns(1), columns(2), columns(3), columns(4), columns(5), columns(6), columns(7).toInt, columns(8))
})
val rdd024 = rdd023.filter(x => ((x._3 == "MFGR#1") || (x._3 == "MFGR#2")))
val rdd025 = rdd024.map(x => (x._3, x._1))
val rdd026 = rdd022.map(x => (x._5, x))
val rdd027 = rdd025.map(x => (x._2, x))
val rdd028 = rdd026.join(rdd027).map(x => x._2)
val rdd029 = rdd028.map(x => (x._1._1, x._1._2, x._1._3, x._1._4))
val rdd030 = rdd029.groupBy(x => (x._1, x._2))
val rdd031 = rdd030.map(x => ((x._1._1, x._1._2), x._2.map(x => (x._3 - x._4)).sum))
val rddOrderBy = rdd031.sortByKey(true, 1)
(rddOrderBy, "ssb_4_1")
}
/**
* select d_year,s_nation,p_category,sum(lo_revenue-lo_supplycost) as profit
* from lineorder,ddate,customer,supplier,part
* where lo_custkey = c_custkey
* and lo_suppkey = s_suppkey
* and lo_partkey = p_partkey
* and lo_orderdate = d_datekey
* and c_region = 'AMERICA'
* and s_region = 'AMERICA'
* and (d_year = 1997 or d_year = 1998)
* and (p_mfgr = 'MFGR#1' or p_mfgr = 'MFGR#2')
* group by d_year,s_nation, p_category
* order by d_year,s_nation, p_category
*/
val hand_opt_ssb_4_2 = (sc: SparkContext, dbDir: String) => {
val rddDate = sc.textFile(dbDir + "/ddate*").map(line => {
val columns = line.split("\\\\|")
(columns(0).toInt, columns(4).toInt)
})
val rddDateFilter = rddDate.filter { case (dateKey, year) => year == 1997 || year == 1998}
val rddLineOrder = sc.textFile(dbDir + "/lineorder*").map(line => {
val columns = line.split("\\\\|")
val orderDate = columns(5).toInt
val customerKey = columns(2).toInt
val supplierKey = columns(4).toInt
val partKey = columns(3).toInt
val revenue = columns(12).toFloat
val supplyCost = columns(13).toFloat
(orderDate, (supplierKey, customerKey, partKey, revenue - supplyCost))
})
val rddLoDate = rddLineOrder.join(rddDateFilter).map {
case (key, ((supplierKey, customerKey, partKey, profit), year)) =>
(supplierKey, (customerKey, partKey, profit, year))
}
val rddSupplier = sc.textFile(dbDir + "/supplier*").map(line => {
val columns = line.split("\\\\|")
val supplierKey = columns(0).toInt
val supplierNation = columns(4)
val supplierRegion = columns(5)
(supplierKey, (supplierNation, supplierRegion))
})
val rddSupplierFilter = rddSupplier.filter { case (supplierKey, (supplierNation, supplierRegion)) =>
supplierRegion == "AMERICA"
}
val rddLoDateSupplier = rddLoDate.join(rddSupplierFilter).map {
case (supplyKey, ((customerKey, partKey, profit, year), (supplierNation, supplierRegion))) =>
(customerKey, (partKey, profit, year, supplierNation))
}
val rddCustomer = sc.textFile(dbDir + "/customer*").map(line => {
val columns = line.split("\\\\|")
val customerKey = columns(0).toInt
val customerRegion = columns(5)
(customerKey, customerRegion)
})
val rddCustomerFilter = rddCustomer.filter {
case (customerKey, customerRegion) =>
customerRegion == "AMERICA"
}
val rddLoDateSupplierCustomer = rddLoDateSupplier.join(rddCustomerFilter).map {
case (customerKey, ((partKey, profit, year, supplierNation), customerNation)) =>
(partKey, (year, supplierNation, profit))
}
val rddPart = sc.textFile(dbDir + "/part*").map(line => {
val columns = line.split("\\\\|")
val partKey = columns(0).toInt
val partMfgr = columns(2)
val partCategory = columns(3)
(partKey, (partCategory, partMfgr))
})
val rddPartFilter = rddPart.filter {
case (partKey, (partCategory, partMfgr)) =>
partMfgr == "MFGR#1" || partMfgr == "MFGR#2"
}
val rddLoDateSupplierCustomerPart = rddLoDateSupplierCustomer.join(rddPartFilter).map {
case (partKey, ((year, supplierNation, profit), (partCategory, partMfgr))) =>
((year, supplierNation, partCategory), profit)
}
val rddAggregate = rddLoDateSupplierCustomerPart.reduceByKey(_ + _)
val rddOrderBy = rddAggregate.sortByKey(true, 1)
(rddOrderBy, "hand_opt_ssb_4_2")
}
val ssb_4_2 = (sc: SparkContext, dbDir: String) => {
val rdd001 = sc.textFile(dbDir + "/lineorder*").map(line => {
val columns = line.split("\\\\|")
(columns(0).toInt, columns(1).toInt, columns(2).toInt, columns(3).toInt, columns(4).toInt, columns(5), columns(6), columns(7), columns(8).toInt, columns(9).toFloat, columns(10).toFloat, columns(11).toInt, columns(12).toFloat, columns(13).toFloat, columns(14).toInt, columns(15), columns(16))
})
val rdd002 = rdd001.map(x => (x._13, x._14, x._4, x._5, x._3, x._6))
val rdd003 = sc.textFile(dbDir + "/ddate*").map(line => {
val columns = line.split("\\\\|")
(columns(0), columns(1), columns(2), columns(3), columns(4).toInt, columns(5).toInt, columns(6), columns(7).toInt, columns(8).toInt, columns(9).toInt, columns(10).toInt, columns(11).toInt, columns(12), columns(13), columns(14), columns(15), columns(16))
})
val rdd004 = rdd003.filter(x => ((x._5 == 1997) || (x._5 == 1998)))
val rdd005 = rdd004.map(x => (x._5, x._1))
val rdd006 = rdd002.map(x => (x._6, x))
val rdd007 = rdd005.map(x => (x._2, x))
val rdd008 = rdd006.join(rdd007).map(x => x._2)
val rdd009 = rdd008.map(x => (x._2._1, x._1._1, x._1._2, x._1._3, x._1._4, x._1._5))
val rdd010 = sc.textFile(dbDir + "/customer*").map(line => {
val columns = line.split("\\\\|")
(columns(0).toInt, columns(1), columns(2), columns(3), columns(4), columns(5), columns(6), columns(7))
})
val rdd011 = rdd010.filter(x => (x._6 == "AMERICA"))
val rdd012 = rdd011.map(x => Tuple1(x._1))
val rdd013 = rdd009.map(x => (x._6, x))
val rdd014 = rdd012.map(x => (x._1, x))
val rdd015 = rdd013.join(rdd014).map(x => x._2)
val rdd016 = rdd015.map(x => (x._1._1, x._1._2, x._1._3, x._1._4, x._1._5))
val rdd017 = sc.textFile(dbDir + "/supplier*").map(line => {
val columns = line.split("\\\\|")
(columns(0).toInt, columns(1), columns(2), columns(3), columns(4), columns(5), columns(6))
})
val rdd018 = rdd017.filter(x => (x._6 == "AMERICA"))
val rdd019 = rdd018.map(x => (x._5, x._1))
val rdd020 = rdd016.map(x => (x._5, x))
val rdd021 = rdd019.map(x => (x._2, x))
val rdd022 = rdd020.join(rdd021).map(x => x._2)
val rdd023 = rdd022.map(x => (x._1._1, x._2._1, x._1._2, x._1._3, x._1._4))
val rdd024 = sc.textFile(dbDir + "/part*").map(line => {
val columns = line.split("\\\\|")
(columns(0).toInt, columns(1), columns(2), columns(3), columns(4), columns(5), columns(6), columns(7).toInt, columns(8))
})
val rdd025 = rdd024.filter(x => ((x._3 == "MFGR#1") || (x._3 == "MFGR#2")))
val rdd026 = rdd025.map(x => (x._4, x._3, x._1))
val rdd027 = rdd023.map(x => (x._5, x))
val rdd028 = rdd026.map(x => (x._3, x))
val rdd029 = rdd027.join(rdd028).map(x => x._2)
val rdd030 = rdd029.map(x => (x._1._1, x._1._2, x._2._1, x._1._3, x._1._4))
val rdd031 = rdd030.groupBy(x => (x._1, x._2, x._3))
val rdd032 = rdd031.map(x => ((x._1._1, x._1._2, x._1._3), x._2.map(x => (x._4 - x._5)).sum))
val rddOrderBy = rdd032.sortByKey(true, 1)
(rddOrderBy, "ssb_4_2")
}
/**
* select d_year,s_city,p_brand1,sum(lo_revenue-lo_supplycost) as profit
* from lineorder,ddate,customer,supplier,part
* where lo_custkey = c_custkey
* and lo_suppkey = s_suppkey
* and lo_partkey = p_partkey
* and lo_orderdate = d_datekey
* and s_nation = 'UNITED STATES'
* and (d_year = 1997 or d_year = 1998)
* and p_category = 'MFGR#14'
* group by d_year,s_city,p_brand1
* order by d_year,s_city,p_brand1
*/
val hand_opt_ssb_4_3 = (sc: SparkContext, dbDir: String) => {
val rddDate = sc.textFile(dbDir + "/ddate*").map(line => {
val columns = line.split("\\\\|")
(columns(0).toInt, columns(4).toInt)
})
val rddDateFilter = rddDate.filter { case (dateKey, year) => year == 1997 || year == 1998}
val rddLineOrder = sc.textFile(dbDir + "/lineorder*").map(line => {
val columns = line.split("\\\\|")
val orderDate = columns(5).toInt
val customerKey = columns(2).toInt
val supplierKey = columns(4).toInt
val partKey = columns(3).toInt
val revenue = columns(12).toFloat
val supplyCost = columns(13).toFloat
(orderDate, (supplierKey, customerKey, partKey, revenue - supplyCost))
})
val rddLoDate = rddLineOrder.join(rddDateFilter).map {
case (key, ((supplierKey, customerKey, partKey, profit), year)) =>
(supplierKey, (customerKey, partKey, profit, year))
}
val rddSupplier = sc.textFile(dbDir + "/supplier*").map(line => {
val columns = line.split("\\\\|")
val supplierKey = columns(0).toInt
val supplierCity = columns(3)
val supplierNation = columns(4)
(supplierKey, (supplierNation, supplierCity))
})
val rddSupplierFilter = rddSupplier.filter { case (supplierKey, (supplierNation, supplierCity)) =>
supplierNation == "UNITED STATES"
}
val rddLoDateSupplier = rddLoDate.join(rddSupplierFilter).map {
case (supplyKey, ((customerKey, partKey, profit, year), (supplierNation, supplierCity))) =>
(customerKey, (partKey, profit, year, supplierCity))
}
val rddCustomer = sc.textFile(dbDir + "/customer*").map(line => {
val columns = line.split("\\\\|")
val customerKey = columns(0).toInt
(customerKey, 0)
})
val rddLoDateSupplierCustomer = rddLoDateSupplier.join(rddCustomer).map {
case (customerKey, ((partKey, profit, year, supplierCity), _)) =>
(partKey, (year, supplierCity, profit))
}
val rddPart = sc.textFile(dbDir + "/part*").map(line => {
val columns = line.split("\\\\|")
val partKey = columns(0).toInt
val partCategory = columns(3)
val partBrand1 = columns(4)
(partKey, (partBrand1, partCategory))
})
val rddPartFilter = rddPart.filter {
case (partKey, (partBrand1, partCategory)) =>
partCategory == "MFGR#14"
}
val rddLoDateSupplierCustomerPart = rddLoDateSupplierCustomer.join(rddPartFilter).map {
case (partKey, ((year, supplierNation, profit), (partBrand1, partCategory))) =>
((year, supplierNation, partBrand1), profit)
}
val rddAggregate = rddLoDateSupplierCustomerPart.reduceByKey(_ + _)
val rddOrderBy = rddAggregate.sortByKey(true, 1)
(rddOrderBy, "hand_opt_ssb_4_3")
}
val ssb_4_3 = (sc: SparkContext, dbDir: String) => {
val rdd001 = sc.textFile(dbDir + "/lineorder*").map(line => {
val columns = line.split("\\\\|")
(columns(0).toInt, columns(1).toInt, columns(2).toInt, columns(3).toInt, columns(4).toInt, columns(5), columns(6), columns(7), columns(8).toInt, columns(9).toFloat, columns(10).toFloat, columns(11).toInt, columns(12).toFloat, columns(13).toFloat, columns(14).toInt, columns(15), columns(16))
})
val rdd002 = rdd001.map(x => (x._13, x._14, x._4, x._5, x._3, x._6))
val rdd003 = sc.textFile(dbDir + "/ddate*").map(line => {
val columns = line.split("\\\\|")
(columns(0), columns(1), columns(2), columns(3), columns(4).toInt, columns(5).toInt, columns(6), columns(7).toInt, columns(8).toInt, columns(9).toInt, columns(10).toInt, columns(11).toInt, columns(12), columns(13), columns(14), columns(15), columns(16))
})
val rdd004 = rdd003.filter(x => ((x._5 == 1997) || (x._5 == 1998)))
val rdd005 = rdd004.map(x => (x._5, x._1))
val rdd006 = rdd002.map(x => (x._6, x))
val rdd007 = rdd005.map(x => (x._2, x))
val rdd008 = rdd006.join(rdd007).map(x => x._2)
val rdd009 = rdd008.map(x => (x._2._1, x._1._1, x._1._2, x._1._3, x._1._4, x._1._5))
val rdd010 = sc.textFile(dbDir + "/customer*").map(line => {
val columns = line.split("\\\\|")
(columns(0).toInt, columns(1), columns(2), columns(3), columns(4), columns(5), columns(6), columns(7))
})
val rdd011 = rdd010.map(x => Tuple1(x._1))
val rdd012 = rdd009.map(x => (x._6, x))
val rdd013 = rdd011.map(x => (x._1, x))
val rdd014 = rdd012.join(rdd013).map(x => x._2)
val rdd015 = rdd014.map(x => (x._1._1, x._1._2, x._1._3, x._1._4, x._1._5))
val rdd016 = sc.textFile(dbDir + "/supplier*").map(line => {
val columns = line.split("\\\\|")
(columns(0).toInt, columns(1), columns(2), columns(3), columns(4), columns(5), columns(6))
})
val rdd017 = rdd016.filter(x => (x._5 == "UNITED STATES"))
val rdd018 = rdd017.map(x => (x._4, x._1))
val rdd019 = rdd015.map(x => (x._5, x))
val rdd020 = rdd018.map(x => (x._2, x))
val rdd021 = rdd019.join(rdd020).map(x => x._2)
val rdd022 = rdd021.map(x => (x._1._1, x._2._1, x._1._2, x._1._3, x._1._4))
val rdd023 = sc.textFile(dbDir + "/part*").map(line => {
val columns = line.split("\\\\|")
(columns(0).toInt, columns(1), columns(2), columns(3), columns(4), columns(5), columns(6), columns(7).toInt, columns(8))
})
val rdd024 = rdd023.filter(x => (x._4 == "MFGR#14"))
val rdd025 = rdd024.map(x => (x._5, x._1))
val rdd026 = rdd022.map(x => (x._5, x))
val rdd027 = rdd025.map(x => (x._2, x))
val rdd028 = rdd026.join(rdd027).map(x => x._2)
val rdd029 = rdd028.map(x => (x._1._1, x._1._2, x._2._1, x._1._3, x._1._4))
val rdd030 = rdd029.groupBy(x => (x._1, x._2, x._3))
val rdd031 = rdd030.map(x => ((x._1._1, x._1._2, x._1._3), x._2.map(x => (x._4 - x._5)).sum))
val rddOrderBy = rdd031.sortByKey(true, 1)
(rddOrderBy, "ssb_4_3")
}
}
| meisam/spark-sql-performance | src/main/scala/edu/osu/cse/fathi/spark/ssb/SsbQueryRunnerOnSpark.scala | Scala | apache-2.0 | 61,229 |
/**
* Copyright (c) 2012-2013, Tomasz Kaczmarzyk.
*
* This file is part of BeanDiff.
*
* BeanDiff is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* BeanDiff is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with BeanDiff; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
package org.beandiff.core
import org.scalatest.FunSuite
import org.scalatest.matchers.ShouldMatchers
import org.junit.runner.RunWith
import java.lang.Long
import java.lang.Double
import java.lang.Boolean
import java.lang.Byte
import org.beandiff.beans.SimpleEnum._
import java.lang.Float
import org.scalatest.junit.JUnitRunner
import org.beandiff.core.model.Path.EmptyPath
@RunWith(classOf[JUnitRunner])
class EndOnSimpleTypeStrategyTest extends FunSuite with ShouldMatchers {
test("should end on String") {
EndOnSimpleTypeStrategy.shouldProceed(EmptyPath, "aa", "bb") should be (false)
}
test("should end on java Integer") {
EndOnSimpleTypeStrategy.shouldProceed(EmptyPath, Integer.valueOf(1), Integer.valueOf(2)) should be (false)
}
test("should end on java Long") {
EndOnSimpleTypeStrategy.shouldProceed(EmptyPath, Long.valueOf(1), Long.valueOf(2)) should be (false)
}
test("should end on java Float") {
EndOnSimpleTypeStrategy.shouldProceed(EmptyPath, Float.valueOf(1.0f), Float.valueOf(2.0f)) should be (false)
}
test("should end on java Double") {
EndOnSimpleTypeStrategy.shouldProceed(EmptyPath, Double.valueOf(1.0), Double.valueOf(2.0)) should be (false)
}
test("should end on java Boolean") {
EndOnSimpleTypeStrategy.shouldProceed(EmptyPath, Boolean.TRUE, Boolean.TRUE) should be (false)
}
test("should end on java Character") {
EndOnSimpleTypeStrategy.shouldProceed(EmptyPath, Character.valueOf('t'), Character.valueOf('k')) should be (false)
}
test("should end on java Enum") {
EndOnSimpleTypeStrategy.shouldProceed(EmptyPath, ONE, TWO) should be (false)
}
test("should end on java Byte") {
EndOnSimpleTypeStrategy.shouldProceed(EmptyPath, Byte.MIN_VALUE, Byte.MAX_VALUE) should be (false)
}
} | tkaczmarzyk/beandiff | src/test/scala/org/beandiff/core/EndOnSimpleTypeStrategyTest.scala | Scala | lgpl-3.0 | 2,622 |
/*
* Licensed to Tuplejump Software Pvt. Ltd. under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Tuplejump Software Pvt. Ltd. licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except)compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to)writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package io.ddf.ddfTest
import io.ddf.types.AggregateTypes.AggregateFunction
import org.scalatest.Matchers
import scala.collection.JavaConversions._
trait AggregationSpec extends BaseSpec with Matchers {
feature("Aggregation") {
scenario("calculate simple aggregates") {
val ddf = loadAirlineDDF()
val aggregateResult = ddf.aggregate("Year, Month, min(ArrDelay), max(DepDelay)")
val result: Array[Double] = aggregateResult.get("2008\\t3")
result.length should be(2)
val colAggregate = ddf.getAggregationHandler.aggregateOnColumn(AggregateFunction.MAX, "Year")
colAggregate should be(2010)
}
scenario("group data") {
val ddf = loadAirlineDDF()
val l1: java.util.List[String] = List("DayofMonth")
val l2: java.util.List[String] = List("avg(DepDelay)")
val avgDelayByDay = ddf.groupBy(l1, l2)
avgDelayByDay.getColumnNames.map(col => col.toLowerCase()) should contain("dayofmonth")
avgDelayByDay.getColumnNames.size() should be(2)
val rows = avgDelayByDay.sql("select * from @this", "").getRows
rows.head.split("\\t").head.toDouble should be(21.0 +- 1.0)
}
scenario("group and aggregate)2 steps") {
val ddf = loadAirlineDDF()
val ddf2 = ddf.getAggregationHandler.groupBy(List("DayofMonth"))
val result = ddf2.getAggregationHandler.agg(List("mean=avg(ArrDelay)"))
result.getColumnNames.map(col => col.toLowerCase) should (contain("mean") and contain("dayofmonth"))
val rows = result.sql("select * from @this", "").getRows
rows.head.split("\\t").head.toDouble should be(9.0 +- 1.0)
}
scenario("throw an error on aggregate without groups") {
val airlineDDF = loadAirlineDDF()
val ddf = manager.sql2ddf("select * from airline", engineName)
intercept[Exception] {
ddf.getAggregationHandler.agg(List("mean=avg(ArrDelay)"))
}
}
scenario("calculate correlation") {
val ddf = loadAirlineDDF()
//0.8977184691827954
ddf.correlation("ArrDelay", "DepDelay") should be(0.89 +- 1)
}
}
}
| tuplejump/ddf-test | src/test/scala/io/ddf/ddfTest/AggregationSpec.scala | Scala | apache-2.0 | 2,902 |
/*
* Copyright 2015 Shao Tian-Chen (Austin)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.au9ustine.puzzles.s99
/**
* Problem 05: Reverse a list
*
* Created by au9ustine on 4/23/15.
*/
object P05 {
def reverse[A](lst: List[A]): List[A] = lst match {
case null => throw new NullPointerException
case _ => lst.reverse
}
}
| au9ustine/org.au9ustine.puzzles.s99 | src/main/scala/org/au9ustine/puzzles/s99/P05.scala | Scala | apache-2.0 | 863 |
/*
* SPDX-License-Identifier: Apache-2.0
* Copyright 2016-2020 Daniel Urban and contributors listed in NOTICE.txt
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dev.tauri.choam
package bench
package util
import java.util.concurrent.locks.ReentrantLock
final class LockedCounter {
final class Holder[A](var cnt: A)
private[this] val h: Holder[Long] =
new Holder(0L)
val lck: ReentrantLock =
new ReentrantLock
def add(n: Long): Long = {
lck.lock()
try {
unlockedAdd(n)
} finally {
lck.unlock()
}
}
def unlockedAdd(n: Long): Long = {
val old = h.cnt
h.cnt = old + n
old
}
def count(): Long = this.synchronized {
lck.lock()
try {
unlockedCount()
} finally {
lck.unlock()
}
}
def unlockedCount(): Long =
h.cnt
}
final class LockedCounterN(n: Int) {
private[this] val ctrs =
Array.fill(n)(new LockedCounter)
def add(n: Long): Unit = {
ctrs.foreach { _.lck.lock() }
try {
ctrs.foreach { _.unlockedAdd(n) }
} finally {
ctrs.foreach { _.lck.unlock() }
}
}
}
| durban/exp-reagents | bench/src/main/scala/dev/tauri/choam/bench/util/lockedCounter.scala | Scala | apache-2.0 | 1,625 |
/*
* Copyright (C) 2020 MapRoulette contributors (see CONTRIBUTORS.md).
* Licensed under the Apache License, Version 2.0 (see LICENSE).
*/
package org.maproulette.framework.model
/**
* @author mcuthbert
*/
case class MapillaryServerInfo(host: String, clientId: String, border: Double)
case class MapillaryImage(
key: String,
lat: Double,
lon: Double,
url_320: String,
url_640: String,
url_1024: String,
url_2048: String
)
| mgcuthbert/maproulette2 | app/org/maproulette/framework/model/Mapillary.scala | Scala | apache-2.0 | 459 |
import sbt._
import Keys._
import play.Project._
object ApplicationBuild extends Build {
val appName = "oncue-service"
val appVersion = "1.0.6-SNAPSHOT"
val appDependencies = Seq(
// Add your project dependencies here,
javaCore,
javaJdbc,
javaEbean
)
def customLessEntryPoints(base: File): PathFinder = (
(base / "app" / "assets" / "stylesheets" / "bootstrap" * "bootstrap.less") +++
(base / "app" / "assets" / "stylesheets" / "bootstrap" * "responsive.less") +++
(base / "app" / "assets" / "stylesheets" * "*.less"))
val main = play.Project(appName, appVersion, appDependencies).settings(
lessEntryPoints <<= baseDirectory(customLessEntryPoints)
)
}
| michaelmarconi/oncue | oncue-service/project/Build.scala | Scala | apache-2.0 | 714 |
package dao
import java.sql.Timestamp
import java.util.UUID
import base.PostgresDbSpec
import dao.helper.ModelAlreadyExists
import database._
import database.helper.LdapUserStatus._
import models._
import org.joda.time.{LocalDate, LocalTime}
import security.LWMRole
import slick.dbio.Effect.Write
import slick.jdbc.PostgresProfile.api._
import utils.date.DateTimeOps._
object AbstractDaoSpec {
import scala.util.Random.{nextBoolean, nextInt, shuffle}
// NOTE almost each population ignores business rules and can crash on abstractDao.exsistsQuery
lazy val maxDegrees = 10
lazy val maxLabworks = 20
lazy val maxSemesters = 10
lazy val maxCourses = 10
lazy val maxRooms = 10
lazy val maxEmployees = 10
lazy val maxBlacklists = 100
lazy val maxTimetables = 10
lazy val maxStudents = 100
lazy val maxReportCardEntries = 100
lazy val maxAuthorities = 10
lazy val maxScheduleEntries = 100
lazy val maxGroups = 4
lazy val maxEvaluations = 100
lazy val maxLabworkApplications = 20
lazy val maxEvaluationPatterns = 4 * 5
def randomSemester = semesters(nextInt(maxSemesters))
def randomCourse = courses(nextInt(maxCourses))
def randomDegree = degrees(nextInt(maxDegrees))
def randomLabwork = labworks(nextInt(maxDegrees))
def randomRoom = rooms(nextInt(maxRooms))
def randomEmployee = employees(nextInt(maxEmployees))
def randomBlacklist = blacklists(nextInt(maxBlacklists))
def randomStudent = students(nextInt(maxStudents))
def randomRole = roles(nextInt(roles.length))
def randomAuthority = authorities(nextInt(maxAuthorities))
def randomGroup = groups(nextInt(maxGroups))
def randomReportCardEntryTypes(reportCardEntry: UUID) =
takeSomeOf(ReportCardEntryType.all).map { entryType =>
ReportCardEntryTypeDb(reportCardEntry, entryType.entryType)
}.toSet
final def takeSomeOf[A](traversable: Traversable[A]) = if (
traversable.isEmpty
) traversable
else traversable.take(nextInt(traversable.size - 1) + 1)
final def takeOneOf[A](traversable: Traversable[A]) = shuffle(
traversable
).head
final def populateBlacklists(amount: Int) = (0 until amount).map { i =>
val global = nextBoolean
val (date, start, end) = {
val date = LocalDate.now.plusDays(i)
if (global) {
(date, Blacklist.startOfDay, Blacklist.endOfDay)
} else {
val start = LocalTime.now.withHourOfDay(nextInt(23))
val end = start.plusHours(1)
(date, start, end)
}
}
BlacklistDb(i.toString, date.sqlDate, start.sqlTime, end.sqlTime, global)
}.toList
final def populateLabworks(amount: Int)(
semesters: List[SemesterDb],
courses: List[CourseDb],
degrees: List[DegreeDb]
) = (0 until amount).map { i =>
LabworkDb(
i.toString,
i.toString,
takeOneOf(semesters).id,
takeOneOf(courses).id,
takeOneOf(degrees).id
)
}.toList
final def populateEmployees(amount: Int) = (0 until amount).map { i =>
UserDb(
i.toString,
i.toString,
i.toString,
i.toString,
i.toString,
EmployeeStatus,
None,
None
)
}.toList
final def populateStudents(amount: Int)(degrees: List[DegreeDb]) =
(0 until amount).map { i =>
UserDb(
i.toString,
i.toString,
i.toString,
i.toString,
s"$i@th-koeln.de",
StudentStatus,
Some(i.toString),
Some(takeOneOf(degrees).id)
)
}.toList
final def populateTimetables(amount: Int, numberOfEntries: Int)(
users: List[UserDb],
labworks: List[LabworkDb],
blacklists: List[BlacklistDb]
) = (0 until amount).map { i =>
val entries = (0 until numberOfEntries).map { j =>
TimetableEntry(
takeSomeOf(users).map(_.id).toSet,
randomRoom.id,
nextInt(5),
LocalTime.now.plusHours(j).withMillisOfSecond(0),
LocalTime.now.plusHours(j + 1).withMillisOfSecond(0)
)
}
TimetableDb(
labworks(i).id,
entries.toSet,
LocalDate.now.plusDays(i).sqlDate,
takeSomeOf(blacklists).map(_.id).toSet
)
}.toList
final def populateGroups(
amountForEachLabwork: Int
)(labworks: List[LabworkDb], students: List[UserDb]) = {
(for {
i <- 0 until amountForEachLabwork
l <- labworks
} yield GroupDb(
i.toString + l.label,
l.id,
takeSomeOf(students).map(_.id).toSet
)).toList
}
final def populateDegrees(amount: Int) =
(0 until amount).map(i => DegreeDb(i.toString, i.toString)).toList
final def populateCourses(
amount: Int
)(employees: List[UserDb])(semesterIndex: (Int) => Int) =
(0 until amount)
.zip(employees)
.map { case (i, e) =>
CourseDb(i.toString, i.toString, i.toString, e.id, semesterIndex(i))
}
.toList
final def populateSemester(amount: Int) = {
val template = LocalDate.now
.withDayOfWeek(1)
.withMonthOfYear(9)
.minusYears(5)
.plusMonths(6)
(0 until amount)
.foldLeft((List.empty[SemesterDb], template)) { case ((list, t), i) =>
val start = t.plusDays(1)
val end = start.plusMonths(6)
val exam = end.minusMonths(1)
val current = SemesterDb(
i.toString,
i.toString,
start.sqlDate,
end.sqlDate,
exam.sqlDate
)
(list.:+(current), end)
}
._1
}
def populateScheduleEntry(amount: Int)(
labworks: List[LabworkDb],
rooms: List[RoomDb],
employees: List[UserDb],
groups: List[GroupDb]
) = {
val labwork = takeOneOf(labworks).id
(0 until amount).map { i =>
val date = LocalDate.now.plusDays(i)
val start = LocalTime.now.plusHours(i)
val end = start.plusHours(1)
ScheduleEntryDb(
labwork,
start.sqlTime,
end.sqlTime,
date.sqlDate,
takeOneOf(rooms).id,
takeSomeOf(employees).map(_.id).toSet,
takeOneOf(groups).id
)
}
}.toList
def populateReportCardEntries(
amount: Int,
numberOfEntries: Int
)(labworks: List[LabworkDb], students: List[UserDb]) = {
var index = 0 // in order to satisfy uniqueness
(0 until amount).flatMap { _ =>
val student = takeOneOf(students).id
val labwork = takeOneOf(labworks).id
val room = randomRoom.id
(0 until numberOfEntries).map { i =>
val date = LocalDate.now.plusDays(i)
val start = LocalTime.now.plusHours(i)
val end = start.plusHours(1)
val id = UUID.randomUUID
val types = randomReportCardEntryTypes(id)
index += 1
ReportCardEntryDb(
student,
labwork,
s"assignment $i",
date.sqlDate,
start.sqlTime,
end.sqlTime,
room,
types,
index,
id = id
)
}
}.toList
}
@scala.annotation.tailrec
def randomStudent(avoiding: UUID, applicants: List[UserDb]): UUID = {
if (applicants.forall(_.id == avoiding))
avoiding
else {
val app = takeOneOf(applicants).id
if (app == avoiding) randomStudent(avoiding, applicants) else app
}
}
// does not care about business rules such as only one applicant per labwork
def populateLabworkApplications(amount: Int, withFriends: Boolean)(
labworks: List[LabworkDb],
applicants: List[UserDb]
) = (0 until amount).map { _ =>
val applicant = takeOneOf(applicants).id
val friends =
if (withFriends) Set(randomStudent(applicant, applicants))
else Set.empty[UUID]
database.LabworkApplicationDb(takeOneOf(labworks).id, applicant, friends)
}.toList
def populateEvaluationPatterns(amount: Int)(labworks: List[LabworkDb]) =
(0 until amount).map { i =>
import models.helper.EvaluationProperty._
ReportCardEvaluationPatternDb(
takeOneOf(labworks).id,
i.toString,
nextInt(10) + 1,
(if (nextBoolean) BoolBased else IntBased).toString
)
}.toList
lazy val semesters = populateSemester(maxSemesters)
lazy val employees = populateEmployees(maxEmployees)
lazy val courses = populateCourses(maxCourses)(employees)(_ % 6)
lazy val degrees = populateDegrees(maxDegrees)
lazy val authorities = (0 until maxAuthorities).map { i =>
val role: RoleDb = roles((i * 3) % roles.length)
AuthorityDb(employees(i % maxEmployees).id, role.id, None)
}.toList
lazy val roles = LWMRole.all.map(r => RoleDb(r.label))
lazy val labworks = populateLabworks(maxLabworks)(semesters, courses, degrees)
lazy val rooms =
(0 until maxRooms).map(i => RoomDb(i.toString, i.toString, i)).toList
lazy val blacklists = populateBlacklists(maxBlacklists)
lazy val timetables = populateTimetables(maxTimetables, 6)(
employees,
labworks.drop(1),
blacklists
)
lazy val students = populateStudents(maxStudents)(degrees)
lazy val reportCardEntries =
populateReportCardEntries(maxReportCardEntries, 8)(labworks, students)
lazy val groups = populateGroups(maxGroups)(
labworks,
students
) // remember to add groupMemberships also
lazy val groupMemberships =
groups.flatMap(g => g.members.map(m => GroupMembership(g.id, m)))
lazy val scheduleEntries = populateScheduleEntry(maxScheduleEntries)(
labworks,
rooms,
employees,
groups
)
lazy val labworkApplications = populateLabworkApplications(
maxLabworkApplications,
withFriends = true
)(labworks, students)
lazy val reportCardEvaluationpatterns =
populateEvaluationPatterns(maxEvaluationPatterns)(labworks)
}
abstract class AbstractDaoSpec[T <: Table[
DbModel
] with UniqueTable, DbModel <: UniqueDbEntity, LwmModel <: UniqueEntity]
extends PostgresDbSpec {
import scala.concurrent.ExecutionContext.Implicits.global
protected val lastModified: Timestamp = {
import org.joda.time.DateTime
import utils.date.DateTimeOps.DateTimeConverter
DateTime.now.timestamp
}
protected def dao: AbstractDao[T, DbModel, LwmModel]
protected def name: String
protected def dbEntity: DbModel // dbEntity should not expand
protected def invalidDuplicateOfDbEntity
: DbModel // invalidDuplicateOfDbEntity should not expand
protected def invalidUpdateOfDbEntity
: DbModel // invalidUpdateOfDbEntity should not expand
protected def validUpdateOnDbEntity
: DbModel // validUpdateOnDbEntity should not expand
protected def dbEntities: List[DbModel] // dbEntities should not expand
protected def lwmAtom: LwmModel
override protected def dependencies: DBIOAction[Unit, NoStream, Write]
s"A AbstractDaoSpec with $name " should {
s"create a $name" in {
async(dao.create(dbEntity))(_ shouldBe dbEntity)
}
s"get a $name" in {
async(dao.getSingle(dbEntity.id, atomic = false))(
_.value shouldBe dbEntity.toUniqueEntity
)
async(dao.getSingle(dbEntity.id))(_.value shouldBe lwmAtom)
}
s"not create a $name because model already exists" in {
async(dao.create(invalidDuplicateOfDbEntity).failed)(
_ shouldBe ModelAlreadyExists(invalidDuplicateOfDbEntity, Seq(dbEntity))
)
}
s"not update a $name because model already exists" in {
async(dao.update(invalidUpdateOfDbEntity).failed)(
_ shouldBe ModelAlreadyExists(invalidUpdateOfDbEntity, dbEntity)
)
}
s"update a $name properly" in {
async(dao.update(validUpdateOnDbEntity))(_ shouldBe validUpdateOnDbEntity)
}
s"create many $name" in {
async(dao.createMany(dbEntities))(
_ should contain theSameElementsAs dbEntities
)
async(dao.getMany(dbEntities.map(_.id), atomic = false))(
_.map(_.id) should contain theSameElementsAs dbEntities.map(_.id)
)
}
s"delete a $name by invalidating it" in {
val deleted = for {
_ <- dao.invalidate(dbEntity)
e <- dao.getSingle(dbEntity.id)
} yield e
async(deleted)(_.isEmpty shouldBe true)
}
}
}
| THK-ADV/lwm-reloaded | test/dao/AbstractDaoSpec.scala | Scala | mit | 12,049 |
package org.http4s
package client
import java.net.InetSocketAddress
import javax.servlet.ServletOutputStream
import javax.servlet.http.{HttpServlet, HttpServletRequest, HttpServletResponse}
import cats.implicits._
import fs2._
import fs2.interop.cats._
import org.http4s.Uri.{Authority, RegName}
import org.http4s.client.testroutes.GetRoutes
import org.http4s.dsl._
import org.http4s.headers.{`Content-Length`, `Transfer-Encoding`}
import org.specs2.specification.core.Fragments
import scala.concurrent.duration._
abstract class ClientRouteTestBattery(name: String, client: Client)
extends Http4sSpec with JettyScaffold
{
val timeout = 20.seconds
Fragments.foreach(GetRoutes.getPaths.toSeq) { case (path, expected) =>
s"Execute GET: $path" in {
val name = address.getHostName
val port = address.getPort
val req = Request(uri = Uri.fromString(s"http://$name:$port$path").yolo)
client.fetch(req) { resp =>
Task.delay(checkResponse(resp, expected))
}.unsafeRunFor(timeout)
}
}
name should {
"Strip fragments from URI" in {
skipped("Can only reproduce against external resource. Help wanted.")
val uri = Uri.uri("https://en.wikipedia.org/wiki/Buckethead_discography#Studio_albums")
val body = client.fetch(Request(uri = uri)) {
case resp => Task.now(resp.status)
}
body must returnValue(Ok)
}
"Repeat a simple request" in {
val path = GetRoutes.SimplePath
def fetchBody = client.toService(_.as[String]).local { uri: Uri =>
Request(uri = uri)
}
val url = Uri.fromString(s"http://${address.getHostName}:${address.getPort}$path").yolo
Task.parallelTraverse((0 until 10).toVector)(_ =>
fetchBody.run(url).map(_.length)
).unsafeRunFor(timeout).forall(_ mustNotEqual 0)
}
"POST an empty body" in {
val name = address.getHostName
val port = address.getPort
val uri = Uri.fromString(s"http://${address.getHostName}:${address.getPort}/echo").yolo
val req = POST(uri)
val body = client.expect[String](req)
body must returnValue("")
}
"POST a normal body" in {
val name = address.getHostName
val port = address.getPort
val uri = Uri.fromString(s"http://${address.getHostName}:${address.getPort}/echo").yolo
val req = POST(uri, "This is normal.")
val body = client.expect[String](req)
body must returnValue("This is normal.")
}
"POST a chunked body" in {
val name = address.getHostName
val port = address.getPort
val uri = Uri.fromString(s"http://${address.getHostName}:${address.getPort}/echo").yolo
val req = POST(uri, Stream.eval(Task.now("This is chunked.")))
val body = client.expect[String](req)
body must returnValue("This is chunked.")
}
}
override def map(fs: => Fragments) =
super.map(fs ^ step(client.shutdown.unsafeRun()))
def testServlet = new HttpServlet {
override def doGet(req: HttpServletRequest, srv: HttpServletResponse): Unit = {
GetRoutes.getPaths.get(req.getRequestURI) match {
case Some(r) => renderResponse(srv, r)
case None => srv.sendError(404)
}
}
override def doPost(req: HttpServletRequest, srv: HttpServletResponse): Unit = {
srv.setStatus(200)
val s = scala.io.Source.fromInputStream(req.getInputStream).mkString
srv.getOutputStream.print(s)
srv.getOutputStream.flush()
}
}
private def checkResponse(rec: Response, expected: Response) = {
val hs = rec.headers.toSeq
rec.status must be_==(expected.status)
collectBody(rec.body) must be_==(collectBody(expected.body))
expected.headers.foreach(h => h must beOneOf(hs:_*))
rec.httpVersion must be_==(expected.httpVersion)
}
private def translateTests(address: InetSocketAddress, method: Method, paths: Map[String, Response]): Map[Request, Response] = {
val port = address.getPort()
val name = address.getHostName()
paths.map { case (s, r) =>
(Request(method, uri = Uri.fromString(s"http://$name:$port$s").yolo), r)
}
}
private def renderResponse(srv: HttpServletResponse, resp: Response): Unit = {
srv.setStatus(resp.status.code)
resp.headers.foreach { h =>
srv.addHeader(h.name.toString, h.value)
}
val os : ServletOutputStream = srv.getOutputStream
val writeBody : Task[Unit] = resp.body
.evalMap{ byte => Task.delay(os.write(Array(byte))) }
.run
val flushOutputStream : Task[Unit] = Task.delay(os.flush())
(writeBody >> flushOutputStream).unsafeRun()
}
private def collectBody(body: EntityBody): Array[Byte] = body.runLog.unsafeRun().toArray
}
| ZizhengTai/http4s | client/src/test/scala/org/http4s/client/ClientRouteTestBattery.scala | Scala | apache-2.0 | 4,711 |
package net.maffoo.jsonquote
case class JsonError(msg: String, position: Pos) extends Exception(msg)
object Util {
def expect[A](a: A)(implicit it: Iterator[(A, Pos)]): Unit = {
val (next, pos) = it.next()
if (next != a) throw JsonError(s"expected $a but got $next", pos)
}
}
| maffoo/jsonquote | core/src/main/scala/net/maffoo/jsonquote/Util.scala | Scala | mit | 290 |
package com.sksamuel.scrimage
object IphoneOrientation {
def reorient(image: Image, metadata: ImageMetadata): Image = {
val orientationTags = metadata.tagsBy(_.`type` == 274)
if (orientationTags.size == 1) {
orientationTags.head.rawValue match {
case "3" => image.flipY // Bottom, right side (Rotate 180)
case "6" => image.rotateRight // 6,Right side, top (Rotate 90 CW)))
case "8" => image.rotateLeft // 8,Left side, bottom (Rotate 270 CW)
case _ => image // no info or 1,Top, left side (Horizontal / normal)))
}
} else {
image
}
}
}
| carlosFattor/scrimage | scrimage-core/src/main/scala/com/sksamuel/scrimage/IphoneOrientation.scala | Scala | apache-2.0 | 607 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network
import java.nio.ByteBuffer
import scala.concurrent.{Future, Promise}
import scala.concurrent.duration.Duration
import scala.reflect.ClassTag
import org.apache.spark.network.buffer.{FileSegmentManagedBuffer, ManagedBuffer, NioManagedBuffer}
import org.apache.spark.network.shuffle.{BlockFetchingListener, BlockStoreClient, DownloadFileManager}
import org.apache.spark.storage.{BlockId, EncryptedManagedBuffer, StorageLevel}
import org.apache.spark.util.ThreadUtils
/**
* The BlockTransferService that used for fetching a set of blocks at time. Each instance of
* BlockTransferService contains both client and server inside.
*/
private[spark]
abstract class BlockTransferService extends BlockStoreClient {
/**
* Initialize the transfer service by giving it the BlockDataManager that can be used to fetch
* local blocks or put local blocks. The fetchBlocks method in [[BlockStoreClient]] also
* available only after this is invoked.
*/
def init(blockDataManager: BlockDataManager): Unit
/**
* Port number the service is listening on, available only after [[init]] is invoked.
*/
def port: Int
/**
* Host name the service is listening on, available only after [[init]] is invoked.
*/
def hostName: String
/**
* Upload a single block to a remote node, available only after [[init]] is invoked.
*/
def uploadBlock(
hostname: String,
port: Int,
execId: String,
blockId: BlockId,
blockData: ManagedBuffer,
level: StorageLevel,
classTag: ClassTag[_]): Future[Unit]
/**
* A special case of [[fetchBlocks]], as it fetches only one block and is blocking.
*
* It is also only available after [[init]] is invoked.
*/
def fetchBlockSync(
host: String,
port: Int,
execId: String,
blockId: String,
tempFileManager: DownloadFileManager): ManagedBuffer = {
// A monitor for the thread to wait on.
val result = Promise[ManagedBuffer]()
fetchBlocks(host, port, execId, Array(blockId),
new BlockFetchingListener {
override def onBlockFetchFailure(blockId: String, exception: Throwable): Unit = {
result.failure(exception)
}
override def onBlockFetchSuccess(blockId: String, data: ManagedBuffer): Unit = {
data match {
case f: FileSegmentManagedBuffer =>
result.success(f)
case e: EncryptedManagedBuffer =>
result.success(e)
case _ =>
try {
val ret = ByteBuffer.allocate(data.size.toInt)
ret.put(data.nioByteBuffer())
ret.flip()
result.success(new NioManagedBuffer(ret))
} catch {
case e: Throwable => result.failure(e)
}
}
}
}, tempFileManager)
ThreadUtils.awaitResult(result.future, Duration.Inf)
}
/**
* Upload a single block to a remote node, available only after [[init]] is invoked.
*
* This method is similar to [[uploadBlock]], except this one blocks the thread
* until the upload finishes.
*/
@throws[java.io.IOException]
def uploadBlockSync(
hostname: String,
port: Int,
execId: String,
blockId: BlockId,
blockData: ManagedBuffer,
level: StorageLevel,
classTag: ClassTag[_]): Unit = {
val future = uploadBlock(hostname, port, execId, blockId, blockData, level, classTag)
ThreadUtils.awaitResult(future, Duration.Inf)
}
}
| maropu/spark | core/src/main/scala/org/apache/spark/network/BlockTransferService.scala | Scala | apache-2.0 | 4,341 |
package colang.ast.parsed.routines
import colang.ast.parsed.statement.Statement
import colang.ast.parsed.{LocalContext, RootNamespace, Variable}
import colang.ast.raw.{statement => raw}
import colang.issues.{Issue, Terms}
private[routines] object RegisterGlobalVariables {
/**
* Applies registerVariables to all global variable definitions
* @param rootNamespace root namespace
* @param rawDefs raw variables definition nodes
* @return (new variables, initialization statements, encountered issues)
*/
def registerGlobalVariables(rootNamespace: RootNamespace,
rawDefs: Seq[raw.VariablesDefinition]): (Seq[Variable], Seq[Statement], Seq[Issue]) = {
// Local context here means the context initializers are evaluated in, so we use 'main' function description.
val localContext = LocalContext(
Terms.Function,
expectedReturnType = Some(rootNamespace.voidType))
val result = rawDefs map { RegisterVariables.registerVariables(rootNamespace, localContext, _ )}
val variables = result flatMap { _._1 }
val initStatements = result flatMap { _._2 }
val issues = result flatMap { _._3 }
(variables, initStatements, issues)
}
}
| psenchanka/colang | src/main/scala/colang/ast/parsed/routines/RegisterGlobalVariables.scala | Scala | mit | 1,218 |
package basic._01.hello
/**
* Scalaの HelloWorld
*/
object ScalaMain {
/**
* 標準的な Mainメソッド
*
*/
def main(args: Array[String]): Unit = {
println("Hello Scala")
}
} | koooyooo/scala-java-comparison | scala-java-comparison/src/basic/_01/hello/ScalaMain.scala | Scala | apache-2.0 | 202 |
package io.aos.ebnf.spl.driver.es
import scala.collection.mutable
//TODO: using java.util.hashmap to do with strange behaviour of implicit conversions to mutable.map (from ES map). Revisit /JS
case class ElasticSearchMetadataResult(dataSources : Map[String, Seq[String]], objectFields: mutable.Map[String, java.util.HashMap[String, AnyRef]]) | echalkpad/t4f-data | parser/ebnf/src/main/scala/io/aos/ebnf/spl/driver/es/ElasticSearchMetadataResult.scala | Scala | apache-2.0 | 343 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import kafka.api.LeaderAndIsr
import org.apache.kafka.common.protocol.Errors
import org.apache.kafka.common.requests.ApiError
import scala.collection._
/**
* The create metadata maintained by the delayed create topic or create partitions operations.
*/
case class CreatePartitionsMetadata(topic: String, replicaAssignments: Map[Int, Seq[Int]], error: ApiError)
/**
* A delayed create topic or create partitions operation that is stored in the topic purgatory.
*/
class DelayedCreatePartitions(delayMs: Long,
createMetadata: Seq[CreatePartitionsMetadata],
adminManager: AdminManager,
responseCallback: Map[String, ApiError] => Unit)
extends DelayedOperation(delayMs) {
/**
* The operation can be completed if all of the topics that do not have an error exist and every partition has a
* leader in the controller.
* See KafkaController.onNewTopicCreation
*/
override def tryComplete() : Boolean = {
trace(s"Trying to complete operation for $createMetadata")
val leaderlessPartitionCount = createMetadata.filter(_.error.isSuccess).foldLeft(0) { case (topicCounter, metadata) =>
topicCounter + missingLeaderCount(metadata.topic, metadata.replicaAssignments.keySet)
}
if (leaderlessPartitionCount == 0) {
trace("All partitions have a leader, completing the delayed operation")
forceComplete()
} else {
trace(s"$leaderlessPartitionCount partitions do not have a leader, not completing the delayed operation")
false
}
}
/**
* Check for partitions that are still missing a leader, update their error code and call the responseCallback
*/
override def onComplete() {
trace(s"Completing operation for $createMetadata")
val results = createMetadata.map { metadata =>
// ignore topics that already have errors
if (metadata.error.isSuccess && missingLeaderCount(metadata.topic, metadata.replicaAssignments.keySet) > 0)
(metadata.topic, new ApiError(Errors.REQUEST_TIMED_OUT, null))
else
(metadata.topic, metadata.error)
}.toMap
responseCallback(results)
}
override def onExpiration(): Unit = {}
private def missingLeaderCount(topic: String, partitions: Set[Int]): Int = {
partitions.foldLeft(0) { case (counter, partition) =>
if (isMissingLeader(topic, partition)) counter + 1 else counter
}
}
private def isMissingLeader(topic: String, partition: Int): Boolean = {
val partitionInfo = adminManager.metadataCache.getPartitionInfo(topic, partition)
partitionInfo.isEmpty || partitionInfo.get.basePartitionState.leader == LeaderAndIsr.NoLeader
}
}
| KevinLiLu/kafka | core/src/main/scala/kafka/server/DelayedCreatePartitions.scala | Scala | apache-2.0 | 3,555 |
// Copyright: 2010 - 2018 https://github.com/ensime/ensime-server/graphs
// License: http://www.gnu.org/licenses/lgpl-3.0.en.html
package org.ensime.sexp.formats
import org.ensime.sexp._
class LegacyFamilyFormatsSpec extends FormatSpec with LegacyFamilyFormats {
case object Bloo
"LegacyFamilyFormats" should "support case objects" in {
assertFormat(Bloo, SexpNil)
}
it should "support an example ADT" in {
import DefaultSexpProtocol._
import ExampleAst._
// performance improvement - avoids creating afresh at each call
// site (only possible for the non-recursive classes)
// implicit val FieldTermF = SexpFormat[FieldTerm]
// implicit val BoundedTermF = SexpFormat[BoundedTerm]
// implicit val UnparsedF = SexpFormat[Unparsed]
// implicit val IgnoredF = SexpFormat[Ignored]
// implicit val UnclearF = SexpFormat[Unclear]
// implicit val InTermF = SexpFormat[InTerm]
// implicit val LikeTermF = SexpFormat[LikeTerm]
// implicit val QualifierTokenF = SexpFormat[QualifierToken]
/////////////////// START OF BOILERPLATE /////////////////
implicit object TokenTreeFormat extends TraitFormat[TokenTree] {
// get a performance improvement by creating as many implicit vals
// for TypeHint[T] as possible, e.g.
// implicit val FieldTermTH = typehint[FieldTerm]
// implicit val BoundedTermTH = typehint[BoundedTerm]
// implicit val UnparsedTH = typehint[Unparsed]
// implicit val IgnoredTH = typehint[Ignored]
// implicit val UnclearTH = typehint[Unclear]
// implicit val InTermTH = typehint[InTerm]
// implicit val LikeTermTH = typehint[LikeTerm]
// implicit val OrConditionTH = typehint[OrCondition]
// implicit val AndConditionTH = typehint[AndCondition]
// implicit val PreferTokenTH = typehint[PreferToken]
// implicit val QualifierTokenTH = typehint[QualifierToken]
def write(obj: TokenTree): Sexp = obj match {
case f: FieldTerm => wrap(f)
case b: BoundedTerm => wrap(b)
case u: Unparsed => wrap(u)
case i: Ignored => wrap(i)
case u: Unclear => wrap(u)
case i: InTerm => wrap(i)
case like: LikeTerm => wrap(like)
case a: AndCondition => wrap(a)
case o: OrCondition => wrap(o)
case prefer: PreferToken => wrap(prefer)
case q: QualifierToken => wrap(q)
case SpecialToken => wrap(SpecialToken)
}
def read(hint: SexpSymbol, value: Sexp): TokenTree = hint match {
case s if s == implicitly[TypeHint[FieldTerm]].hint =>
value.convertTo[FieldTerm]
case s if s == implicitly[TypeHint[BoundedTerm]].hint =>
value.convertTo[BoundedTerm]
case s if s == implicitly[TypeHint[Unparsed]].hint =>
value.convertTo[Unparsed]
case s if s == implicitly[TypeHint[Ignored]].hint =>
value.convertTo[Ignored]
case s if s == implicitly[TypeHint[Unclear]].hint =>
value.convertTo[Unclear]
case s if s == implicitly[TypeHint[InTerm]].hint =>
value.convertTo[InTerm]
case s if s == implicitly[TypeHint[LikeTerm]].hint =>
value.convertTo[LikeTerm]
case s if s == implicitly[TypeHint[AndCondition]].hint =>
value.convertTo[AndCondition]
case s if s == implicitly[TypeHint[OrCondition]].hint =>
value.convertTo[OrCondition]
case s if s == implicitly[TypeHint[PreferToken]].hint =>
value.convertTo[PreferToken]
case s if s == implicitly[TypeHint[QualifierToken]].hint =>
value.convertTo[QualifierToken]
case s if s == implicitly[TypeHint[SpecialToken.type]].hint =>
value.convertTo[SpecialToken.type]
// SAD FACE --- compiler doesn't catch typos on matches or missing impls
case _ => deserializationError(hint)
}
}
/////////////////// END OF BOILERPLATE /////////////////
assertFormat(SpecialToken, SexpNil)
assertFormat(SpecialToken: TokenTree, SexpList(SexpSymbol(":SpecialToken")))
val fieldTerm = FieldTerm("thing is ten", DatabaseField("THING"), "10")
val expectField = SexpData(
SexpSymbol(":text") -> SexpString("thing is ten"),
SexpSymbol(":field") -> SexpData(
SexpSymbol(":column") -> SexpString("THING")
),
SexpSymbol(":value") -> SexpString("10")
)
// confirm that the wrapper is picked up for a specific case class
assertFormat(fieldTerm, expectField)
val expectFieldTree = SexpData(SexpSymbol(":FieldTerm") -> expectField)
// confirm that the trait level formatter works
assertFormat(fieldTerm: TokenTree, expectFieldTree)
// confirm recursive works
val and = AndCondition(fieldTerm, fieldTerm, "wibble")
val expectAnd = SexpData(
SexpSymbol(":left") -> expectFieldTree,
SexpSymbol(":right") -> expectFieldTree,
SexpSymbol(":text") -> SexpString("wibble")
)
assertFormat(and, expectAnd)
val expectAndTree = SexpData(SexpSymbol(":AndCondition") -> expectAnd)
// and that the recursive type works as a trait
assertFormat(and: TokenTree, expectAndTree)
}
}
| yyadavalli/ensime-server | s-express/src/test/scala/org/ensime/sexp/formats/LegacyFamilyFormatsSpec.scala | Scala | gpl-3.0 | 5,258 |
/**
<slate_header>
author: Kishore Reddy
url: https://github.com/kishorereddy/scala-slate
copyright: 2015 Kishore Reddy
license: https://github.com/kishorereddy/scala-slate/blob/master/LICENSE.md
desc: a scala micro-framework
usage: Please refer to license on github for more info.
</slate_header>
*/
package slate.common
object Serializer {
/**
* converts the object supplied, into an html table of properties
* @param cc
* @return
*/
def asHtmlTable(cc:AnyRef): String =
{
val info = Reflect.getFields(cc)
asHtmlTable(info)
}
/**
* converts the map supplied into an html table of key/value pairs.
* @param items
* @return
*/
def asHtmlTable(items:Map[String,Any]*): String =
{
var text = "<table>"
for(item <- items)
{
for ((k,v) <- item)
{
text = text + "<tr>"
text = text + "<td>" + k + "</td><td>" + ( if ( v == null ) "" else v.toString) + "</td>"
text = text + "<tr>"
}
}
text = text + "</table>"
text
}
/**
* converts the list of tuples ( string, object ) into a html table
* @param items
* @return
*/
def asHtmlTable(items:List[(String,Any)]): String =
{
var text = "<table>"
for(item <- items)
{
text = text + "<tr>"
val value = if ( item._2 == null ) "" else item._2.toString
text = text + "<td>" + item._1 + "</td><td>" + value + "</td>"
text = text + "<tr>"
}
text = text + "</table>"
text
}
/**
* converts the list of tuples ( string, object ) into a json object.
* @param items
* @return
*/
def asJson(items:List[(String,Any)]): String =
{
var text = "{"
for(i <- items.indices)
{
if( i > 0 ) text = text + ","
val item = items(i)
text = text + "\\"" + item._1 + "\\":"
val value = if ( item._2 == null ) "" else item._2.toString
text = text + "\\"" + escapeJson(value) + "\\""
}
text = text + "}"
text
}
private def escapeJson(text:String) : String = {
text.replaceAllLiterally("\\"", "\\\\\\"").replaceAllLiterally("\\\\", "\\\\\\\\")
}
}
| kishorereddy/akka-http | src/main/scala/slate/common/Serializer.scala | Scala | mit | 2,136 |
package scutil.classpath.extension
import scutil.classpath._
object ClassClasspathImplicits extends ClassClasspathImplicits
trait ClassClasspathImplicits {
implicit final class ClassResourceExt[T](peer:Class[T]) {
/** paths are relative to the class unless preceeded by a slash */
def classpathResource(path:String):Option[ClasspathResource] =
Option(peer getResource path) map ClasspathResource.apply
/** paths are relative to the class unless preceeded by a slash */
def classpathResourceOrError(path:String):ClasspathResource =
classpathResource(path) getOrElse sys.error(s"cannot find classpath resource ${path}")
}
}
| ritschwumm/scutil | modules/jdk/src/main/scala/scutil/classpath/extension/ClassClasspathImplicits.scala | Scala | bsd-2-clause | 642 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.nn
import com.intel.analytics.bigdl.mkl.Memory
import com.intel.analytics.bigdl.dllib.nn.Graph.ModuleNode
import com.intel.analytics.bigdl.dllib.nn.abstractnn.{AbstractModule, Activity}
import com.intel.analytics.bigdl.dllib.nn.tf.ControlDependency
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import com.intel.analytics.bigdl.dllib.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.dllib.utils.intermediate.{BlasToIR, IRGraph}
import com.intel.analytics.bigdl.dllib.utils.{Node, Util}
import com.intel.analytics.bigdl.dllib.optim.DistriOptimizer._
import scala.reflect.ClassTag
/**
* A graph container. The modules in the container are connected as a DAG graph.
*
* @param _inputs inputs modules, user can feed data into these modules in the forward method
* @param _outputs output modules
* @param _variables
* @tparam T Numeric type. Only support float/double now
*/
class StaticGraph[T: ClassTag](
private val _inputs : Seq[ModuleNode[T]],
private val _outputs : Seq[ModuleNode[T]],
private val _variables: Option[(Array[Tensor[T]], Array[Tensor[T]])] = None,
private val enableExcludeChecking: Boolean = true
)(implicit ev: TensorNumeric[T]) extends Graph[T](_inputs, _outputs, _variables) {
private val forwardExecution = forwardGraph.topologySort.reverse
private var backwardExecution: Array[Node[AbstractModule[Activity, Activity, T]]] = _
private val inputCache = new Array[Activity](forwardExecution.length)
private var backId2ForwardId: Array[Int] = _
private var gradOutputCache: Array[Activity] = _
if (enableExcludeChecking) {
excludeInvalidLayers(forwardExecution.map {_.element})
}
buildBackwardGraph()
override def updateOutput(input: Activity): Activity = {
var i = 0
while(i < forwardExecution.length) {
val node = forwardExecution(i)
val nodeInput = findInput(node, input)
inputCache(i) = nodeInput
node.element.forward(nodeInput)
i += 1
}
output = dummyOutput.element.output
output
}
override def backward(input: Activity, gradOutput: Activity): Activity = {
val before = System.nanoTime()
val gradients = backwardExecution(input, gradOutput, true)
backwardTime += System.nanoTime() - before
gradients
}
override def updateGradInput(input: Activity, gradOutput: Activity): Activity = {
backwardExecution(input, gradOutput, false)
}
override def buildBackwardGraph(): this.type = {
super.buildBackwardGraph()
backwardExecution = backwardGraph.topologySort.reverse
backId2ForwardId = new Array[Int](backwardExecution.length)
gradOutputCache = new Array[Activity](backwardExecution.length)
var i = 0
while(i < backwardExecution.length - 1) {
var j = 0
var find = false
while(j < forwardExecution.length) {
if (forwardExecution(j).element.getName() == backwardExecution(i).element.getName()) {
backId2ForwardId(i) = j
find = true
}
j += 1
}
require(find, "Cannot find backward layer in forward executions")
i += 1
}
this
}
override def accGradParameters(input: Activity, gradOutput: Activity): Unit = {
var i = 0
while (i < backwardExecution.length - 1) {
val curNode = backwardExecution(i)
val curInput = inputCache(backId2ForwardId(i))
curNode.element.accGradParameters(curInput, gradOutputCache(i))
i += 1
}
}
override def populateModules(): Unit = {
modules.appendAll(
forwardGraph.topologySort
// todo: convert control dep node to edge
.filterNot(_.element.isInstanceOf[ControlDependency[T]])
.filter(n => !n.eq(dummyOutput)).map(_.element)
.reverse
)
checkDuplicate()
}
private def backwardExecution(input: Activity, gradOutput: Activity,
executeBackward: Boolean): Activity = {
dummyOutputGrad.element.gradInput = gradOutput
var i = 0
while (i < backwardExecution.length - 1) { // do not execute the dummy backward end
val curNode = backwardExecution(i)
val curGradOutput = findGradOutput(curNode, gradOutput)
gradOutputCache(i) = curGradOutput
val curInput = inputCache(backId2ForwardId(i))
if (!isStopGradient(curNode.element)) {
if (executeBackward) {
curNode.element.backward(curInput, curGradOutput)
} else {
curNode.element.updateGradInput(curInput, curGradOutput)
}
} else if (executeBackward) {
curNode.element.accGradParameters(curInput, curGradOutput)
}
i += 1
}
gradInput = fetchModelGradInput()
gradInput
}
/**
* convert static graph to ir graph and build according to engine type
* @return return ir graph if converted successfully, otherwise null
*/
def toIRgraph() : IRGraph[T] = {
val inFormats = if (inputsFormats == null) {
logger.warn("Input formats NCHW by default, Please set explicitly if needed")
Seq(Memory.Format.nchw)
} else inputsFormats
val outFormats = if (outputsFormats == null) {
logger.warn("Output formats NC by default, Please set explicitly if needed")
Seq(Memory.Format.nc)
} else outputsFormats
val allNodes = forwardExecution
if (!BlasToIR[T].convertingCheck(allNodes)) return null
val nodeMap = BlasToIR[T].convert(allNodes)
val inputNodes = inputs.toArray.map(n => nodeMap.get(n).get)
val outputNodes = outputs.toArray.map(n => nodeMap.get(n).get)
val inputsIR = inputs.toArray.map(n => nodeMap.get(n).get)
val outputsIR = outputs.toArray.map(n => nodeMap.get(n).get)
val model = IRGraph(inputsIR, outputsIR, variables, true, inFormats, outFormats)
model.build()
}
// Merge a nested StaticGraph into a non-nested one
private[bigdl] def toSingleGraph(): StaticGraph[T] = {
if (this.isNestedGraph()) {
val graph = this.cloneModule()
val fwdExecution = graph.getSortedForwardExecutions()
val dmOutput = fwdExecution(fwdExecution.length - 1).nextNodes(0)
var i = 0
while (i < fwdExecution.length) {
if (fwdExecution(i).element.isInstanceOf[StaticGraph[T]]) {
var g = fwdExecution(i).element.asInstanceOf[StaticGraph[T]].toSingleGraph()
fwdExecution(i).element = g
for (inputIndex <- 0 until fwdExecution(i).prevNodes.length) {
val inputNode = g.inputs(inputIndex)
inputNode.element = Identity()
while (fwdExecution(i).prevNodes.length != 0) {
val preNode = fwdExecution(i).prevNodes(0)
preNode.delete(fwdExecution(i))
preNode.add(inputNode)
}
}
for (outputIndex <- 0 until g.outputs.length) {
val outputNode = g.outputs(outputIndex)
outputNode.removeNextEdges()
while (fwdExecution(i).nextNodes.length != 0) {
val nextNode = fwdExecution(i).nextNodes(0)
fwdExecution(i).delete(nextNode)
outputNode.add(nextNode)
}
}
}
i += 1
}
val resultOutputNodes = dmOutput.prevNodes
resultOutputNodes.foreach(_.delete(dmOutput))
new StaticGraph[T](Array(graph.inputs(0)), resultOutputNodes,
enableExcludeChecking = this.enableExcludeChecking)
} else {
this
}
}
private def isNestedGraph(): Boolean = {
for (i <- 0 until forwardExecution.length) {
if (forwardExecution(i).element.isInstanceOf[StaticGraph[T]]) {
return true
}
}
false
}
}
| intel-analytics/BigDL | scala/dllib/src/main/scala/com/intel/analytics/bigdl/dllib/nn/StaticGraph.scala | Scala | apache-2.0 | 8,227 |
package mesosphere.marathon
import javax.inject.{ Inject, Named }
import akka.actor.ActorSystem
import akka.event.EventStream
import mesosphere.marathon.core.base.Clock
import mesosphere.marathon.core.launcher.OfferProcessor
import mesosphere.marathon.core.task.update.TaskStatusUpdateProcessor
import mesosphere.marathon.event._
import mesosphere.util.state.{ FrameworkIdUtil, MesosLeaderInfo }
import org.apache.mesos.Protos._
import org.apache.mesos.{ Scheduler, SchedulerDriver }
import org.slf4j.LoggerFactory
import scala.concurrent.{ Await, Future }
import scala.util.control.NonFatal
trait SchedulerCallbacks {
def disconnected(): Unit
}
class MarathonScheduler @Inject() (
@Named(EventModule.busName) eventBus: EventStream,
clock: Clock,
offerProcessor: OfferProcessor,
taskStatusProcessor: TaskStatusUpdateProcessor,
frameworkIdUtil: FrameworkIdUtil,
mesosLeaderInfo: MesosLeaderInfo,
system: ActorSystem,
config: MarathonConf,
schedulerCallbacks: SchedulerCallbacks) extends Scheduler {
private[this] val log = LoggerFactory.getLogger(getClass.getName)
import scala.concurrent.ExecutionContext.Implicits.global
implicit val zkTimeout = config.zkTimeoutDuration
override def registered(
driver: SchedulerDriver,
frameworkId: FrameworkID,
master: MasterInfo): Unit = {
log.info(s"Registered as ${frameworkId.getValue} to master '${master.getId}'")
frameworkIdUtil.store(frameworkId)
mesosLeaderInfo.onNewMasterInfo(master)
eventBus.publish(SchedulerRegisteredEvent(frameworkId.getValue, master.getHostname))
}
override def reregistered(driver: SchedulerDriver, master: MasterInfo): Unit = {
log.info("Re-registered to %s".format(master))
mesosLeaderInfo.onNewMasterInfo(master)
eventBus.publish(SchedulerReregisteredEvent(master.getHostname))
}
override def resourceOffers(driver: SchedulerDriver, offers: java.util.List[Offer]): Unit = {
import scala.collection.JavaConverters._
offers.asScala.foreach { offer =>
val processFuture = offerProcessor.processOffer(offer)
processFuture.onComplete {
case scala.util.Success(_) => log.debug(s"Finished processing offer '${offer.getId.getValue}'")
case scala.util.Failure(NonFatal(e)) => log.error(s"while processing offer '${offer.getId.getValue}'", e)
}
}
}
override def offerRescinded(driver: SchedulerDriver, offer: OfferID): Unit = {
log.info("Offer %s rescinded".format(offer))
}
override def statusUpdate(driver: SchedulerDriver, status: TaskStatus): Unit = {
log.info("Received status update for task %s: %s (%s)"
.format(status.getTaskId.getValue, status.getState, status.getMessage))
taskStatusProcessor.publish(status).onFailure {
case NonFatal(e) =>
log.error(s"while processing task status update $status", e)
}
}
override def frameworkMessage(
driver: SchedulerDriver,
executor: ExecutorID,
slave: SlaveID,
message: Array[Byte]): Unit = {
log.info("Received framework message %s %s %s ".format(executor, slave, message))
eventBus.publish(MesosFrameworkMessageEvent(executor.getValue, slave.getValue, message))
}
override def disconnected(driver: SchedulerDriver) {
log.warn("Disconnected")
eventBus.publish(SchedulerDisconnectedEvent())
// Disconnection from the Mesos master has occurred.
// Thus, call the scheduler callbacks.
schedulerCallbacks.disconnected()
}
override def slaveLost(driver: SchedulerDriver, slave: SlaveID) {
log.info(s"Lost slave $slave")
}
override def executorLost(
driver: SchedulerDriver,
executor: ExecutorID,
slave: SlaveID,
p4: Int) {
log.info(s"Lost executor $executor slave $p4")
}
override def error(driver: SchedulerDriver, message: String) {
log.warn(s"Error: $message\\n" +
s"In case Mesos does not allow registration with the current frameworkId, " +
s"delete the ZooKeeper Node: ${config.zkPath}/state/framework:id\\n" +
s"CAUTION: if you remove this node, all tasks started with the current frameworkId will be orphaned!")
// Currently, it's pretty hard to disambiguate this error from other causes of framework errors.
// Watch MESOS-2522 which will add a reason field for framework errors to help with this.
// For now the frameworkId is removed based on the error message.
val removeFrameworkId = message match {
case "Framework has been removed" => true
case _: String => false
}
suicide(removeFrameworkId)
}
/**
* Exits the JVM process, optionally deleting Marathon's FrameworkID
* from the backing persistence store.
*
* If `removeFrameworkId` is set, the next Marathon process elected
* leader will fail to find a stored FrameworkID and invoke `register`
* instead of `reregister`. This is important because on certain kinds
* of framework errors (such as exceeding the framework failover timeout),
* the scheduler may never re-register with the saved FrameworkID until
* the leading Mesos master process is killed.
*/
protected def suicide(removeFrameworkId: Boolean): Unit = {
log.error(s"Committing suicide!")
if (removeFrameworkId) Await.ready(frameworkIdUtil.expunge(), config.zkTimeoutDuration)
// Asynchronously call sys.exit() to avoid deadlock due to the JVM shutdown hooks
// scalastyle:off magic.number
Future(sys.exit(9)).onFailure {
case NonFatal(t) => log.error("Exception while committing suicide", t)
}
// scalastyle:on
}
}
| ss75710541/marathon | src/main/scala/mesosphere/marathon/MarathonScheduler.scala | Scala | apache-2.0 | 5,618 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import org.apache.spark.benchmark.Benchmark
import org.apache.spark.sql.catalyst.encoders.RowEncoder
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.codegen.GenerateSafeProjection
import org.apache.spark.sql.types._
/**
* Benchmark for the previous interpreted hash function(InternalRow.hashCode) vs codegened
* hash expressions (Murmur3Hash/xxHash64).
*/
object HashBenchmark {
def test(name: String, schema: StructType, numRows: Int, iters: Int): Unit = {
val generator = RandomDataGenerator.forType(schema, nullable = false).get
val encoder = RowEncoder(schema)
val attrs = schema.toAttributes
val safeProjection = GenerateSafeProjection.generate(attrs, attrs)
val rows = (1 to numRows).map(_ =>
// The output of encoder is UnsafeRow, use safeProjection to turn in into safe format.
safeProjection(encoder.toRow(generator().asInstanceOf[Row])).copy()
).toArray
val benchmark = new Benchmark("Hash For " + name, iters * numRows.toLong)
benchmark.addCase("interpreted version") { _: Int =>
var sum = 0
for (_ <- 0L until iters) {
var i = 0
while (i < numRows) {
sum += rows(i).hashCode()
i += 1
}
}
}
val getHashCode = UnsafeProjection.create(new Murmur3Hash(attrs) :: Nil, attrs)
benchmark.addCase("codegen version") { _: Int =>
var sum = 0
for (_ <- 0L until iters) {
var i = 0
while (i < numRows) {
sum += getHashCode(rows(i)).getInt(0)
i += 1
}
}
}
val getHashCode64b = UnsafeProjection.create(new XxHash64(attrs) :: Nil, attrs)
benchmark.addCase("codegen version 64-bit") { _: Int =>
var sum = 0
for (_ <- 0L until iters) {
var i = 0
while (i < numRows) {
sum += getHashCode64b(rows(i)).getInt(0)
i += 1
}
}
}
val getHiveHashCode = UnsafeProjection.create(new HiveHash(attrs) :: Nil, attrs)
benchmark.addCase("codegen HiveHash version") { _: Int =>
var sum = 0
for (_ <- 0L until iters) {
var i = 0
while (i < numRows) {
sum += getHiveHashCode(rows(i)).getInt(0)
i += 1
}
}
}
benchmark.run()
}
def main(args: Array[String]): Unit = {
val singleInt = new StructType().add("i", IntegerType)
/*
Intel(R) Core(TM) i7-4558U CPU @ 2.80GHz
Hash For single ints: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
------------------------------------------------------------------------------------------------
interpreted version 3262 / 3267 164.6 6.1 1.0X
codegen version 6448 / 6718 83.3 12.0 0.5X
codegen version 64-bit 6088 / 6154 88.2 11.3 0.5X
codegen HiveHash version 4732 / 4745 113.5 8.8 0.7X
*/
test("single ints", singleInt, 1 << 15, 1 << 14)
val singleLong = new StructType().add("i", LongType)
/*
Intel(R) Core(TM) i7-4558U CPU @ 2.80GHz
Hash For single longs: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
------------------------------------------------------------------------------------------------
interpreted version 3716 / 3726 144.5 6.9 1.0X
codegen version 7706 / 7732 69.7 14.4 0.5X
codegen version 64-bit 6370 / 6399 84.3 11.9 0.6X
codegen HiveHash version 4924 / 5026 109.0 9.2 0.8X
*/
test("single longs", singleLong, 1 << 15, 1 << 14)
val normal = new StructType()
.add("null", NullType)
.add("boolean", BooleanType)
.add("byte", ByteType)
.add("short", ShortType)
.add("int", IntegerType)
.add("long", LongType)
.add("float", FloatType)
.add("double", DoubleType)
.add("bigDecimal", DecimalType.SYSTEM_DEFAULT)
.add("smallDecimal", DecimalType.USER_DEFAULT)
.add("string", StringType)
.add("binary", BinaryType)
.add("date", DateType)
.add("timestamp", TimestampType)
/*
Intel(R) Core(TM) i7-4558U CPU @ 2.80GHz
Hash For normal: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
------------------------------------------------------------------------------------------------
interpreted version 2985 / 3013 0.7 1423.4 1.0X
codegen version 2422 / 2434 0.9 1155.1 1.2X
codegen version 64-bit 856 / 920 2.5 408.0 3.5X
codegen HiveHash version 4501 / 4979 0.5 2146.4 0.7X
*/
test("normal", normal, 1 << 10, 1 << 11)
val arrayOfInt = ArrayType(IntegerType)
val array = new StructType()
.add("array", arrayOfInt)
.add("arrayOfArray", ArrayType(arrayOfInt))
/*
Intel(R) Core(TM) i7-4558U CPU @ 2.80GHz
Hash For array: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
------------------------------------------------------------------------------------------------
interpreted version 3100 / 3555 0.0 23651.8 1.0X
codegen version 5779 / 5865 0.0 44088.4 0.5X
codegen version 64-bit 4738 / 4821 0.0 36151.7 0.7X
codegen HiveHash version 2200 / 2246 0.1 16785.9 1.4X
*/
test("array", array, 1 << 8, 1 << 9)
val mapOfInt = MapType(IntegerType, IntegerType)
val map = new StructType()
.add("map", mapOfInt)
.add("mapOfMap", MapType(IntegerType, mapOfInt))
/*
Intel(R) Core(TM) i7-4558U CPU @ 2.80GHz
Hash For map: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
------------------------------------------------------------------------------------------------
interpreted version 0 / 0 48.1 20.8 1.0X
codegen version 257 / 275 0.0 62768.7 0.0X
codegen version 64-bit 226 / 240 0.0 55224.5 0.0X
codegen HiveHash version 89 / 96 0.0 21708.8 0.0X
*/
test("map", map, 1 << 6, 1 << 6)
}
}
| michalsenkyr/spark | sql/catalyst/src/test/scala/org/apache/spark/sql/HashBenchmark.scala | Scala | apache-2.0 | 7,688 |
package com.twitter.finagle.netty4
import com.twitter.finagle.{Service, Stack}
import com.twitter.finagle.client.utils.StringClient.StringClientPipeline
import com.twitter.finagle.netty4.ssl.Netty4SslTestComponents._
import com.twitter.finagle.server.utils.StringServer
import com.twitter.finagle.transport.Transport
import com.twitter.util.{Await, Awaitable, Future, Duration}
import io.netty.channel.local.LocalAddress
import org.scalatest.funsuite.AnyFunSuite
/**
* This test class uses Netty's `LocalAddress` to signal
* to Finagle client and server components that it should
* use a `LocalChannel` and `LocalServerChannel` respectively.
* Using a `LocalChannel` allows for testing of functionality
* without using actual sockets.
*/
class LocalChannelTest extends AnyFunSuite {
private[this] def await[T](a: Awaitable[T]): T = Await.result(a, Duration.fromSeconds(1))
private[this] val service = Service.mk[String, String](Future.value)
// To use a full Finagle client (i.e. StringClient) here
// requires additional changes to `c.t.f.Address` to allow
// it to accept a `SocketAddress` and not just an `InetSocketAddress`.
// So, we test components of the client here instead as a substitute
// for the time being.
test("client components and server can communicate") {
val addr = new LocalAddress("LocalChannelTestPlain")
val server = StringServer.server.serve(addr, service)
try {
val transporter =
Netty4Transporter.raw[String, String](StringClientPipeline, addr, Stack.Params.empty)
val transport = await(transporter())
transport.write("Finagle")
try {
val result = await(transport.read())
assert(result == "Finagle")
} finally {
transport.close()
}
} finally {
server.close()
}
}
test("client components and server can communicate over SSL/TLS") {
val addr = new LocalAddress("LocalChannelTestTls")
val server = StringServer.server.withTransport.tls(serverConfig).serve(addr, service)
try {
val transporter =
Netty4Transporter.raw[String, String](
StringClientPipeline,
addr,
Stack.Params.empty + Transport.ClientSsl(Some(clientConfig)))
val transport = await(transporter())
transport.write("Finagle over TLS")
try {
val result = await(transport.read())
assert(result == "Finagle over TLS")
} finally {
transport.close()
}
} finally {
server.close()
}
}
}
| twitter/finagle | finagle-netty4/src/test/scala/com/twitter/finagle/netty4/LocalChannelTest.scala | Scala | apache-2.0 | 2,520 |
package com.twitter.util
import java.net.InetAddress
import org.junit.runner.RunWith
import org.scalatest.WordSpec
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class NetUtilTest extends WordSpec {
"NetUtil" should {
"isIpv4Address" in {
for (i <- 0.to(255)) {
assert(NetUtil.isIpv4Address("%d.0.0.0".format(i)) === true)
assert(NetUtil.isIpv4Address("0.%d.0.0".format(i)) === true)
assert(NetUtil.isIpv4Address("0.0.%d.0".format(i)) === true)
assert(NetUtil.isIpv4Address("0.0.0.%d".format(i)) === true)
assert(NetUtil.isIpv4Address("%d.%d.%d.%d".format(i, i, i, i)) === true)
}
assert(NetUtil.isIpv4Address("") === false)
assert(NetUtil.isIpv4Address("no") === false)
assert(NetUtil.isIpv4Address("::127.0.0.1") === false)
assert(NetUtil.isIpv4Address("-1.0.0.0") === false)
assert(NetUtil.isIpv4Address("256.0.0.0") === false)
assert(NetUtil.isIpv4Address("0.256.0.0") === false)
assert(NetUtil.isIpv4Address("0.0.256.0") === false)
assert(NetUtil.isIpv4Address("0.0.0.256") === false)
assert(NetUtil.isIpv4Address("x1.2.3.4") === false)
assert(NetUtil.isIpv4Address("1.x2.3.4") === false)
assert(NetUtil.isIpv4Address("1.2.x3.4") === false)
assert(NetUtil.isIpv4Address("1.2.3.x4") === false)
assert(NetUtil.isIpv4Address("1.2.3.4x") === false)
assert(NetUtil.isIpv4Address(" 1.2.3.4") === false)
assert(NetUtil.isIpv4Address("1.2.3.4 ") === false)
assert(NetUtil.isIpv4Address(".") === false)
assert(NetUtil.isIpv4Address("....") === false)
assert(NetUtil.isIpv4Address("1....") === false)
assert(NetUtil.isIpv4Address("1.2...") === false)
assert(NetUtil.isIpv4Address("1.2.3.") === false)
assert(NetUtil.isIpv4Address(".2.3.4") === false)
}
"isPrivate" in {
assert(NetUtil.isPrivateAddress(InetAddress.getByName("0.0.0.0")) === false)
assert(NetUtil.isPrivateAddress(InetAddress.getByName("199.59.148.13")) === false)
assert(NetUtil.isPrivateAddress(InetAddress.getByName("10.0.0.0")) === true)
assert(NetUtil.isPrivateAddress(InetAddress.getByName("10.255.255.255")) === true)
assert(NetUtil.isPrivateAddress(InetAddress.getByName("172.16.0.0")) === true)
assert(NetUtil.isPrivateAddress(InetAddress.getByName("172.31.255.255")) === true)
assert(NetUtil.isPrivateAddress(InetAddress.getByName("192.168.0.0")) === true)
assert(NetUtil.isPrivateAddress(InetAddress.getByName("192.168.255.255")) === true)
}
"ipToInt" in {
assert(NetUtil.ipToInt("0.0.0.0") === 0)
assert(NetUtil.ipToInt("255.255.255.255") === 0xFFFFFFFF)
assert(NetUtil.ipToInt("255.255.255.0") === 0xFFFFFF00)
assert(NetUtil.ipToInt("255.0.255.0") === 0xFF00FF00)
assert(NetUtil.ipToInt("61.197.253.56") === 0x3dc5fd38)
intercept[IllegalArgumentException] {
NetUtil.ipToInt("256.0.255.0")
}
}
"inetAddressToInt" in {
assert(NetUtil.inetAddressToInt(InetAddress.getByName("0.0.0.0")) === 0)
assert(NetUtil.inetAddressToInt(InetAddress.getByName("255.255.255.255")) === 0xFFFFFFFF)
assert(NetUtil.inetAddressToInt(InetAddress.getByName("255.255.255.0")) === 0xFFFFFF00)
assert(NetUtil.inetAddressToInt(InetAddress.getByName("255.0.255.0")) === 0xFF00FF00)
assert(NetUtil.inetAddressToInt(InetAddress.getByName("61.197.253.56")) === 0x3dc5fd38)
intercept[IllegalArgumentException] {
NetUtil.inetAddressToInt(InetAddress.getByName("::1"))
}
}
"cidrToIpBlock" in {
assert(NetUtil.cidrToIpBlock("127") === ((0x7F000000, 0xFF000000)))
assert(NetUtil.cidrToIpBlock("127.0.0") === ((0x7F000000, 0xFFFFFF00)))
assert(NetUtil.cidrToIpBlock("127.0.0.1") === ((0x7F000001, 0xFFFFFFFF)))
assert(NetUtil.cidrToIpBlock("127.0.0.1/1") === ((0x7F000001, 0x80000000)))
assert(NetUtil.cidrToIpBlock("127.0.0.1/4") === ((0x7F000001, 0xF0000000)))
assert(NetUtil.cidrToIpBlock("127.0.0.1/32") === ((0x7F000001, 0xFFFFFFFF)))
assert(NetUtil.cidrToIpBlock("127/24") === ((0x7F000000, 0xFFFFFF00)))
}
"isInetAddressInBlock" in {
val block = NetUtil.cidrToIpBlock("192.168.0.0/16")
assert(NetUtil.isInetAddressInBlock(InetAddress.getByName("192.168.0.1"), block) === true)
assert(NetUtil.isInetAddressInBlock(InetAddress.getByName("192.168.255.254"), block) === true)
assert(NetUtil.isInetAddressInBlock(InetAddress.getByName("192.169.0.1"), block) === false)
}
"isIpInBlocks" in {
val blocks = Seq(NetUtil.cidrToIpBlock("127"),
NetUtil.cidrToIpBlock("10.1.1.0/24"),
NetUtil.cidrToIpBlock("192.168.0.0/16"),
NetUtil.cidrToIpBlock("200.1.1.1"),
NetUtil.cidrToIpBlock("200.1.1.2/32"))
assert(NetUtil.isIpInBlocks("127.0.0.1", blocks) === true)
assert(NetUtil.isIpInBlocks("128.0.0.1", blocks) === false)
assert(NetUtil.isIpInBlocks("127.255.255.255", blocks) === true)
assert(NetUtil.isIpInBlocks("10.1.1.1", blocks) === true)
assert(NetUtil.isIpInBlocks("10.1.1.255", blocks) === true)
assert(NetUtil.isIpInBlocks("10.1.0.255", blocks) === false)
assert(NetUtil.isIpInBlocks("10.1.2.0", blocks) === false)
assert(NetUtil.isIpInBlocks("192.168.0.1", blocks) === true)
assert(NetUtil.isIpInBlocks("192.168.255.255", blocks) === true)
assert(NetUtil.isIpInBlocks("192.167.255.255", blocks) === false)
assert(NetUtil.isIpInBlocks("192.169.0.0", blocks) === false)
assert(NetUtil.isIpInBlocks("200.168.0.0", blocks) === false)
assert(NetUtil.isIpInBlocks("200.1.1.1", blocks) === true)
assert(NetUtil.isIpInBlocks("200.1.3.1", blocks) === false)
assert(NetUtil.isIpInBlocks("200.1.1.2", blocks) === true)
assert(NetUtil.isIpInBlocks("200.1.3.2", blocks) === false)
intercept[IllegalArgumentException] {
NetUtil.isIpInBlocks("", blocks)
}
intercept[IllegalArgumentException] {
NetUtil.isIpInBlocks("no", blocks)
}
intercept[IllegalArgumentException] {
NetUtil.isIpInBlocks("::127.0.0.1", blocks)
}
intercept[IllegalArgumentException] {
NetUtil.isIpInBlocks("-1.0.0.0", blocks)
}
intercept[IllegalArgumentException] {
NetUtil.isIpInBlocks("256.0.0.0", blocks)
}
intercept[IllegalArgumentException] {
NetUtil.isIpInBlocks("0.256.0.0", blocks)
}
intercept[IllegalArgumentException] {
NetUtil.isIpInBlocks("0.0.256.0", blocks)
}
intercept[IllegalArgumentException] {
NetUtil.isIpInBlocks("0.0.0.256", blocks)
}
}
}
}
| tdyas/util | util-core/src/test/scala/com/twitter/util/NetUtilTest.scala | Scala | apache-2.0 | 7,052 |
package net.badgerhunt.shares.render
import db.DB
import java.util.Date
import javax.servlet.http.HttpSession
import model.{User, Portfolio}
import xml.NodeSeq
import scweery.Scweery._
abstract class BuyingStock(val session: HttpSession, code: String, quantityS: String, priceS: String, brokerageS: String) extends Page {
val user: User
val portfolio: Portfolio
val url = "/buying"
def asDouble(s: String, valid: Double => Boolean): Option[Double] = try {
val d = s.toDouble
if (valid(d)) Some(d) else None
} catch {
case _ => None
}
def asInt(s: String, valid: Int => Boolean): Option[Int] = try {
val i = s.toInt
if (valid(i)) Some(i) else None
} catch {
case _ => None
}
val price = asDouble(priceS, _ > 0.0)
val quantity = asInt(quantityS, _ > 0)
val brokerage = asDouble(brokerageS, _ >= 0.0)
def render: Either[String, NodeSeq] = {
if (price.isDefined && quantity.isDefined) {
use(DB.connection) { c=>
val sql = "insert into buys (portfolio_id, company, day, quantity, price, brokerage) values (%s, '%s', '%s', %s, %s, %s)".format(
portfolio.id, code, DB.dateFormat.format(new Date), quantity.get, price.get, brokerage.get)
println(sql)
c.update(sql)
}
Right(<h1>Bought {quantity} of {code} at {price}</h1>)
} else {
Left("/buy")
}
}
} | Synesso/tofucube | src/main/scala/net/badgerhunt/shares/render/BuyingStock.scala | Scala | bsd-2-clause | 1,371 |
// Copyright: 2010 - 2016 https://github.com/ensime/ensime-server/graphs
// License: http://www.apache.org/licenses/LICENSE-2.0
package org.ensime.api
import java.io.File
import java.nio.file.Path
import scala.annotation.StaticAnnotation
/**
* Indicates that something will be removed.
*
* WORKAROUND https://issues.scala-lang.org/browse/SI-7934
*/
class deprecating(detail: String = "") extends StaticAnnotation
sealed abstract class DeclaredAs(val symbol: scala.Symbol)
object DeclaredAs {
case object Method extends DeclaredAs('method)
case object Trait extends DeclaredAs('trait)
case object Interface extends DeclaredAs('interface)
case object Object extends DeclaredAs('object)
case object Class extends DeclaredAs('class)
case object Field extends DeclaredAs('field)
case object Nil extends DeclaredAs('nil)
def allDeclarations = Seq(Method, Trait, Interface, Object, Class, Field, Nil)
}
sealed trait FileEdit extends Ordered[FileEdit] {
def file: File
def text: String
def from: Int
def to: Int
// Required as of Scala 2.11 for reasons unknown - the companion to Ordered
// should already be in implicit scope
import scala.math.Ordered.orderingToOrdered
def compare(that: FileEdit): Int =
(this.file, this.from, this.to, this.text).compare((that.file, that.from, that.to, that.text))
}
final case class TextEdit(file: File, from: Int, to: Int, text: String) extends FileEdit
// the next case classes have weird fields because we need the values in the protocol
final case class NewFile(file: File, from: Int, to: Int, text: String) extends FileEdit
object NewFile {
def apply(file: File, text: String): NewFile = new NewFile(file, 0, text.length - 1, text)
}
final case class DeleteFile(file: File, from: Int, to: Int, text: String) extends FileEdit
object DeleteFile {
def apply(file: File, text: String): DeleteFile = new DeleteFile(file, 0, text.length - 1, text)
}
sealed trait NoteSeverity
case object NoteError extends NoteSeverity
case object NoteWarn extends NoteSeverity
case object NoteInfo extends NoteSeverity
object NoteSeverity {
def apply(severity: Int) = severity match {
case 2 => NoteError
case 1 => NoteWarn
case 0 => NoteInfo
}
}
sealed abstract class RefactorLocation(val symbol: Symbol)
object RefactorLocation {
case object QualifiedName extends RefactorLocation('qualifiedName)
case object File extends RefactorLocation('file)
case object NewName extends RefactorLocation('newName)
case object Name extends RefactorLocation('name)
case object Start extends RefactorLocation('start)
case object End extends RefactorLocation('end)
case object MethodName extends RefactorLocation('methodName)
}
sealed abstract class RefactorType(val symbol: Symbol)
object RefactorType {
case object Rename extends RefactorType('rename)
case object ExtractMethod extends RefactorType('extractMethod)
case object ExtractLocal extends RefactorType('extractLocal)
case object InlineLocal extends RefactorType('inlineLocal)
case object OrganizeImports extends RefactorType('organizeImports)
case object AddImport extends RefactorType('addImport)
def allTypes = Seq(Rename, ExtractMethod, ExtractLocal, InlineLocal, OrganizeImports, AddImport)
}
/**
* Represents a source file that has a physical location (either a
* file or an archive entry) with (optional) up-to-date information in
* another file, or as a String.
*
* Clients using a wire protocol should prefer `contentsIn` for
* performance (string escaping), whereas in-process clients should
* use the `contents` variant.
*
* If both contents and contentsIn are provided, contents is
* preferred.
*/
final case class SourceFileInfo(
file: EnsimeFile,
contents: Option[String] = None,
contentsIn: Option[File] = None
) {
// keep the log file sane for unsaved files
override def toString = s"SourceFileInfo($file,${contents.map(_ => "...")},$contentsIn)"
}
final case class OffsetRange(from: Int, to: Int)
@deprecating("move all non-model code out of the api")
object OffsetRange extends ((Int, Int) => OffsetRange) {
def apply(fromTo: Int): OffsetRange = new OffsetRange(fromTo, fromTo)
}
sealed trait EnsimeFile
final case class RawFile(file: Path) extends EnsimeFile
/**
* @param jar the container of entry (in nio terms, the FileSystem)
* @param entry is relative to the container (this needs to be loaded by a FileSystem to be usable)
*/
final case class ArchiveFile(jar: Path, entry: String) extends EnsimeFile
| espinhogr/ensime-server | api/src/main/scala/org/ensime/api/common.scala | Scala | gpl-3.0 | 4,524 |
// Copyright (C) 2011-2012 the original author or authors.
// See the LICENCE.txt file distributed with this work for additional
// information regarding copyright ownership.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package security
import java.security.MessageDigest
object Encryption extends Encryption
trait Encryption {
def sha512(plainpassword: String) : String = {
val sha512 = MessageDigest.getInstance("SHA-512")
val passbytes : Array[Byte] = plainpassword getBytes()
val passhash : Array[Byte] = sha512.digest(passbytes)
var result = ""
def loopArray(increment: Int) : String = {
if(increment >= passhash.length - 1) {
val b = passhash(increment)
result ++= "%02x".format(b).toString
result
} else {
val b = passhash(increment)
result ++= "%02x".format(b).toString
loopArray(increment + 1)
}
}
loopArray(0)
}
}
| chrisjwwalker/cjww-diagnostics | app/security/Encryption.scala | Scala | apache-2.0 | 1,437 |
package com.sksamuel.scrimage
import org.scalatest.{FunSuite, Matchers}
/** @author Stephen Samuel */
class PixelToolsTest extends FunSuite with Matchers {
val white = 0xFFFFFFFF
val yellow = 0xFFFFFF00
val pink = 0xFFFFAFAF
test("non transparent alpha component") {
assert(white === java.awt.Color.WHITE.getRGB)
assert(PixelTools.alpha(white) === 255)
}
test("transparent alpha component") {
assert(PixelTools.alpha(0xDD001122) === 221)
}
test("red component") {
assert(pink === java.awt.Color.PINK.getRGB)
assert(PixelTools.red(yellow) === 255)
assert(PixelTools.red(pink) === 255)
}
test("blue component") {
assert(pink === java.awt.Color.PINK.getRGB)
assert(PixelTools.blue(yellow) === 0)
assert(PixelTools.blue(pink) === 175)
}
test("green component") {
assert(yellow === java.awt.Color.YELLOW.getRGB)
assert(PixelTools.green(yellow) === 255)
assert(PixelTools.green(pink) === 175)
}
test("rgb combination") {
assert(0xFFFF00FF === PixelTools.rgb(255, 0, 255))
assert(0xFF556677 === PixelTools.rgb(85, 102, 119))
}
test("coordinate to offset") {
assert(160 === PixelTools.coordinateToOffset(10, 3, 50))
assert(10 === PixelTools.coordinateToOffset(10, 0, 50))
assert(99 === PixelTools.coordinateToOffset(49, 1, 50))
}
test("offset to coordinate") {
PixelTools.offsetToCoordinate(0, 100) shouldBe 0 -> 0
PixelTools.offsetToCoordinate(100, 100) shouldBe 0 -> 1
PixelTools.offsetToCoordinate(99, 100) shouldBe 99 -> 0
PixelTools.offsetToCoordinate(199, 100) shouldBe 99 -> 1
PixelTools.offsetToCoordinate(101, 100) shouldBe 1 -> 1
}
}
| carlosFattor/scrimage | scrimage-core/src/test/scala/com/sksamuel/scrimage/PixelToolsTest.scala | Scala | apache-2.0 | 1,673 |
package lila.study
import chess.format.FEN
import lila.game.{ Namer, Pov }
import lila.user.User
final private class StudyMaker(
lightUserApi: lila.user.LightUserApi,
gameRepo: lila.game.GameRepo,
chapterMaker: ChapterMaker,
pgnDump: lila.game.PgnDump
)(implicit ec: scala.concurrent.ExecutionContext) {
def apply(data: StudyMaker.ImportGame, user: User): Fu[Study.WithChapter] =
(data.form.gameId ?? gameRepo.gameWithInitialFen).flatMap {
case Some((game, initialFen)) => createFromPov(data, Pov(game, data.form.orientation), initialFen, user)
case None => createFromScratch(data, user)
} map { sc =>
// apply specified From if any
sc.copy(study = sc.study.copy(from = data.from | sc.study.from))
}
private def createFromScratch(data: StudyMaker.ImportGame, user: User): Fu[Study.WithChapter] = {
val study = Study.make(user, Study.From.Scratch, data.id, data.name, data.settings)
chapterMaker.fromFenOrPgnOrBlank(
study,
ChapterMaker.Data(
game = none,
name = Chapter.Name("Chapter 1"),
variant = data.form.variantStr,
fen = data.form.fen,
pgn = data.form.pgnStr,
orientation = data.form.orientation.name,
mode = ChapterMaker.Mode.Normal.key,
initial = true
),
order = 1,
userId = user.id
) map { chapter =>
Study.WithChapter(study withChapter chapter, chapter)
}
}
private def createFromPov(
data: StudyMaker.ImportGame,
pov: Pov,
initialFen: Option[FEN],
user: User
): Fu[Study.WithChapter] = {
for {
root <- chapterMaker.game2root(pov.game, initialFen)
tags <- pgnDump.tags(pov.game, initialFen, none, withOpening = true, withRating = true)
name <- Namer.gameVsText(pov.game, withRatings = false)(lightUserApi.async) dmap Chapter.Name.apply
study = Study.make(user, Study.From.Game(pov.gameId), data.id, Study.Name("Game study").some)
chapter = Chapter.make(
studyId = study.id,
name = name,
setup = Chapter.Setup(
gameId = pov.gameId.some,
variant = pov.game.variant,
orientation = pov.color
),
root = root,
tags = PgnTags(tags),
order = 1,
ownerId = user.id,
practice = false,
gamebook = false,
conceal = None
)
} yield {
Study.WithChapter(study withChapter chapter, chapter)
}
} addEffect { swc =>
chapterMaker.notifyChat(swc.study, pov.game, user.id)
}
}
object StudyMaker {
case class ImportGame(
form: StudyForm.importGame.Data = StudyForm.importGame.Data(),
id: Option[Study.Id] = None,
name: Option[Study.Name] = None,
settings: Option[Settings] = None,
from: Option[Study.From] = None
)
}
| luanlv/lila | modules/study/src/main/StudyMaker.scala | Scala | mit | 2,846 |
/*
Copyright 2012 Georgia Tech Research Institute
Author: lance.gatlin@gtri.gatech.edu
This file is part of org.gtri.util.iteratee library.
org.gtri.util.iteratee library is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
org.gtri.util.iteratee library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with org.gtri.util.iteratee library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.gtri.util.iteratee.impl.plan3
import org.gtri.util.iteratee.api
import api.ImmutableBuffer
import org.gtri.util.issue.api.Issue
import org.gtri.util.iteratee.impl.enumerators._
case class Result[I1,I2,O](
next : api.Plan3.State[I1,I2,O],
output : ImmutableBuffer[O],
overflow : ImmutableBuffer[I2],
issues : ImmutableBuffer[Issue]
) extends api.Plan3.State.Result[I1,I2,O] {
override def toString = this.toDebugString
} | gtri-iead/org.gtri.util.iteratee | impl/src/main/scala/org/gtri/util/iteratee/impl/plan3/Result.scala | Scala | gpl-3.0 | 1,342 |
package scaffvis.configuration
import better.files.File
/**
* Define default database files locations and extract them from the environment variables.
*/
object Locations {
/**
* The location of PubChem files to be imported.
*/
lazy val pubchemSdfDirectory = {
val dirPath: String = sys.env.getOrElse("SCAFFVIS_PUBCHEM_DIR", "pubchem")
val dir = File(dirPath)
if(!dir.exists)
println(s"Pubchem source directory ${dir.pathAsString} does not exist")
dir
}
lazy val pubchemSdfFiles = pubchemSdfDirectory.glob("**\\\\\\\\*.sdf.gz").toVector
/**
* Directory containing the internal database files.
*/
lazy val hierarchyStoreDirectory = {
val dirPath: String = sys.env.getOrElse("SCAFFVIS_HIERARCHY_DIR", "hierarchy")
val dir = File(dirPath)
if(!dir.exists)
println(s"Hierarchy store directory ${dir.pathAsString} does not exist")
dir
}
lazy val pubchemStore = hierarchyStoreDirectory / "pubchem.mapdb"
lazy val scaffoldHierarchyStore = hierarchyStoreDirectory / "scaffoldHierarchy.mapdb"
lazy val processingHierarchyStore = hierarchyStoreDirectory / "processingHierarchy.mapdb"
}
| velkoborsky/scaffvis | generator/src/main/scala/scaffvis/configuration/Locations.scala | Scala | gpl-3.0 | 1,162 |
/*
* Licensed under the Apache License, Version 2.0
* Copyright 2010-2012 Coda Hale and Yammer, Inc.
* https://github.com/dropwizard/metrics/blob/v3.1.0/LICENSE
*/
package org.http4s.server.jetty
import com.codahale.metrics.Counter
import com.codahale.metrics.Meter
import com.codahale.metrics.MetricRegistry
import com.codahale.metrics.RatioGauge
import com.codahale.metrics.RatioGauge._
import com.codahale.metrics.Timer
import org.eclipse.jetty.http.HttpMethod
import org.eclipse.jetty.server.AsyncContextState
import org.eclipse.jetty.server.Handler
import org.eclipse.jetty.server.HttpChannelState
import org.eclipse.jetty.server.Request
import org.eclipse.jetty.server.handler.HandlerWrapper
import javax.servlet.AsyncEvent
import javax.servlet.AsyncListener
import javax.servlet.ServletException
import javax.servlet.http.HttpServletRequest
import javax.servlet.http.HttpServletResponse
import java.io.IOException
import java.util.concurrent.TimeUnit
import HttpMethod._
import com.codahale.metrics.MetricRegistry.{name => registryName}
/**
* A Jetty Handler which records various metrics about an underlying `Handler`
* instance.
*
* <a href="https://github.com/http4s/http4s/issues/204">See http4s/http4s#204</a>.
*/
protected final case class InstrumentedHandler(registry:MetricRegistry , _prefix:Option[String] = None) extends HandlerWrapper {
var name:String = null
private val registerTimer:String => Timer = name => registry.timer(registryName(prefix, name))
private val registerMeter:String => Meter = name => registry.meter(registryName(prefix, name))
private val registerCounter:String => Counter = name => registry.counter(registryName(prefix, name))
private val listener:AsyncListener = new AsyncListener() {
def onTimeout(event:AsyncEvent ) = asyncTimeouts.mark()
def onStartAsync(event:AsyncEvent) = event.getAsyncContext().addListener(this)
def onError(event:AsyncEvent ) = ()
def onComplete(event:AsyncEvent) = {
val state:AsyncContextState = event.getAsyncContext().asInstanceOf[AsyncContextState]
val request:Request = state.getRequest().asInstanceOf[Request]
updateResponses(request)
if (state.getHttpChannelState().getState() != HttpChannelState.State.DISPATCHED) {
activeSuspended.dec()
}
}
}
private lazy val prefix = _prefix.fold(registryName(getHandler().getClass(), name) )(p => registryName(p, name))
// the requests handled by this handler, excluding active
private lazy val requests = registerTimer("requests")
// the number of dispatches seen by this handler, excluding active
private lazy val dispatches = registerTimer("dispatches")
// the number of active requests
private lazy val activeRequests = registerCounter("active-requests")
// the number of active dispatches
private lazy val activeDispatches = registerCounter("active-dispatches")
// the number of requests currently suspended.
private lazy val activeSuspended = registerCounter("active-suspended")
// the number of requests that have been asynchronously dispatched
private lazy val asyncDispatches = registerMeter("async-dispatches")
// the number of requests that expired while suspended
private lazy val asyncTimeouts = registerMeter( "async-timeouts")
private lazy val responses = Array[Meter](
registerMeter( "1xx-responses"), // 1xx
registerMeter( "2xx-responses"), // 2xx
registerMeter( "3xx-responses"), // 3xx
registerMeter( "4xx-responses"), // 4xx
registerMeter( "5xx-responses")) // 5xx
private lazy val getRequests = registerTimer("get-requests")
private lazy val postRequests = registerTimer( "post-requests")
private lazy val headRequests = registerTimer( "head-requests")
private lazy val putRequests = registerTimer( "put-requests")
private lazy val deleteRequests = registerTimer( "delete-requests")
private lazy val optionsRequests = registerTimer( "options-requests")
private lazy val traceRequests = registerTimer( "trace-requests")
private lazy val connectRequests = registerTimer( "connect-requests")
private lazy val moveRequests = registerTimer( "move-requests")
private lazy val otherRequests = registerTimer( "other-requests")
override protected def doStart: Unit = {
super.doStart()
registry.register(registryName(prefix, "percent-4xx-1m"), new RatioGauge() {
protected def getRatio() = {
Ratio.of(responses(3).getOneMinuteRate(),
requests.getOneMinuteRate())
}
})
registry.register(registryName(prefix, "percent-4xx-5m"), new RatioGauge() {
protected def getRatio() =
Ratio.of(responses(3).getFiveMinuteRate(),requests.getFiveMinuteRate())
})
registry.register(registryName(prefix, "percent-4xx-15m"), new RatioGauge() {
protected def getRatio() =
Ratio.of(responses(3).getFifteenMinuteRate(),requests.getFifteenMinuteRate())
})
registry.register(registryName(prefix, "percent-5xx-1m"), new RatioGauge() {
protected def getRatio() =
Ratio.of(responses(4).getOneMinuteRate(),requests.getOneMinuteRate())
})
registry.register(registryName(prefix, "percent-5xx-5m"), new RatioGauge() {
protected def getRatio() =
Ratio.of(responses(4).getFiveMinuteRate(),requests.getFiveMinuteRate())
})
registry.register(registryName(prefix, "percent-5xx-15m"), new RatioGauge() {
protected def getRatio() =
Ratio.of(responses(4).getFifteenMinuteRate(),requests.getFifteenMinuteRate())
})
()
}
override def handle(path:String,
request:Request,
httpRequest:HttpServletRequest,
httpResponse:HttpServletResponse):Unit = {
activeDispatches.inc()
val state = request.getHttpChannelState()
val start = if (state.isInitial()) {
// new request
activeRequests.inc()
request.getTimeStamp()
} else {
// resumed request
activeSuspended.dec()
if (state.getState() == HttpChannelState.State.DISPATCHED) {
asyncDispatches.mark()
}
System.currentTimeMillis()
}
try {
super.handle(path, request, httpRequest, httpResponse)
} finally {
val now = System.currentTimeMillis()
val dispatched = now - start
activeDispatches.dec()
dispatches.update(dispatched, TimeUnit.MILLISECONDS)
if (state.isSuspended()) {
if (state.isInitial()) {
state.addListener(listener)
}
activeSuspended.inc()
} else if (state.isInitial()) {
updateResponses(request)
}
// else onCompletion will handle it.
}
}
private def requestTimer( method:String) = HttpMethod.fromString(method) match {
case GET => getRequests
case POST => postRequests
case PUT => putRequests
case HEAD => headRequests
case DELETE => deleteRequests
case OPTIONS => optionsRequests
case TRACE => traceRequests
case CONNECT => connectRequests
case MOVE => moveRequests
case default => otherRequests
}
private def updateResponses(request:Request) = {
val response = request.getResponse().getStatus() / 100
if (response >= 1 && response <= 5) {
responses(response - 1).mark()
}
activeRequests.dec()
val elapsedTime = System.currentTimeMillis() - request.getTimeStamp()
requests.update(elapsedTime, TimeUnit.MILLISECONDS)
requestTimer(request.getMethod()).update(elapsedTime, TimeUnit.MILLISECONDS)
}
}
| hvesalai/http4s | jetty/src/main/scala/org/http4s/server/jetty/InstrumentedHandler.scala | Scala | apache-2.0 | 7,909 |
package org.openmole.gui.client.tool.plot
import org.openmole.plotlyjs._
import org.openmole.plotlyjs.all._
import org.openmole.plotlyjs.PlotlyImplicits._
import org.openmole.plotlyjs.plotlyConts._
import scala.scalajs.js.JSConverters._
case class Dim(values: Seq[String], label: String = "") {
def toDimension = Dimension.values(values.toJSArray).label(label)
}
case class Serie(
// dimensionSize: Int = 0,
xValues: Dim = Dim(Seq()),
yValues: Array[Dim] = Array(),
plotDataBuilder: PlotDataBuilder = linechart.lines,
markerBuilder: PlotMarkerBuilder = marker.symbol(cross),
colorScale: ColorScale = colorscale.blues
) | openmole/openmole | openmole/gui/client/org.openmole.gui.client.tool/src/main/scala/org/openmole/gui/client/tool/plot/Serie.scala | Scala | agpl-3.0 | 705 |
/*
* Copyright (c) 2013, Scodec
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package scodec
package codecs
import scodec.bits.BitVector
private[codecs] final class FailCodec[A](encErr: Err, decErr: Err) extends Codec[A]:
override def sizeBound = SizeBound.unknown
override def encode(a: A) = Attempt.failure(encErr)
override def decode(b: BitVector) = Attempt.failure(decErr)
override def toString = "fail"
| scodec/scodec | shared/src/main/scala/scodec/codecs/FailCodec.scala | Scala | bsd-3-clause | 1,920 |
/*
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.model
import com.netflix.atlas.core.stacklang.SimpleWord
import com.netflix.atlas.core.stacklang.StandardVocabulary
import com.netflix.atlas.core.stacklang.Vocabulary
import com.netflix.atlas.core.stacklang.Word
object QueryVocabulary extends Vocabulary {
import com.netflix.atlas.core.model.ModelExtractors._
val name: String = "query"
val dependsOn: List[Vocabulary] = List(StandardVocabulary)
val words: List[Word] = List(
True,
False,
HasKey,
Equal,
LessThan,
LessThanEqual,
GreaterThan,
GreaterThanEqual,
Regex,
RegexIgnoreCase,
In,
And,
Or,
Not
)
case object True extends SimpleWord {
override def name: String = "true"
protected def matcher: PartialFunction[List[Any], Boolean] = { case _ => true }
protected def executor: PartialFunction[List[Any], List[Any]] = {
case s => Query.True :: s
}
override def summary: String =
"""
|Query expression that matches all input time series.
""".stripMargin.trim
override def signature: String = " -- Query"
override def examples: List[String] = List("")
}
case object False extends SimpleWord {
override def name: String = "false"
protected def matcher: PartialFunction[List[Any], Boolean] = { case _ => true }
protected def executor: PartialFunction[List[Any], List[Any]] = {
case s => Query.False :: s
}
override def summary: String =
"""
|Query expression that will not match any input time series.
""".stripMargin.trim
override def signature: String = " -- Query"
override def examples: List[String] = List("")
}
case object HasKey extends SimpleWord {
override def name: String = "has"
protected def matcher: PartialFunction[List[Any], Boolean] = {
case (_: String) :: _ => true
}
protected def executor: PartialFunction[List[Any], List[Any]] = {
case (k: String) :: s => Query.HasKey(k) :: s
}
override def summary: String =
"""
|Query expression that matches time series with `tags.contains(k)`.
""".stripMargin.trim
override def signature: String = "k:String -- Query"
override def examples: List[String] = List("a", "name", "ERROR:")
}
trait KeyValueWord extends SimpleWord {
protected def matcher: PartialFunction[List[Any], Boolean] = {
case (_: String) :: (_: String) :: _ => true
}
def newInstance(k: String, v: String): Query
protected def executor: PartialFunction[List[Any], List[Any]] = {
case (v: String) :: (k: String) :: s => newInstance(k, v) :: s
}
override def signature: String = "k:String v:String -- Query"
override def examples: List[String] = List(
"a,b",
"nf.node,silverlight-003e",
"ERROR:name")
}
case object Equal extends KeyValueWord {
override def name: String = "eq"
def newInstance(k: String, v: String): Query = Query.Equal(k, v)
override def summary: String =
"""
|Query expression that matches time series with `tags[k] == v`.
""".stripMargin.trim
}
case object LessThan extends KeyValueWord {
override def name: String = "lt"
def newInstance(k: String, v: String): Query = Query.LessThan(k, v)
override def summary: String =
"""
|Query expression that matches time series with `tags[k] < v`.
""".stripMargin.trim
}
case object LessThanEqual extends KeyValueWord {
override def name: String = "le"
def newInstance(k: String, v: String): Query = Query.LessThanEqual(k, v)
override def summary: String =
"""
|Query expression that matches time series with `tags[k] <= v`.
""".stripMargin.trim
}
case object GreaterThan extends KeyValueWord {
override def name: String = "gt"
def newInstance(k: String, v: String): Query = Query.GreaterThan(k, v)
override def summary: String =
"""
|Query expression that matches time series with `tags[k] > v`.
""".stripMargin.trim
}
case object GreaterThanEqual extends KeyValueWord {
override def name: String = "ge"
def newInstance(k: String, v: String): Query = Query.GreaterThanEqual(k, v)
override def summary: String =
"""
|Query expression that matches time series with `tags[k] >= v`.
""".stripMargin.trim
}
case object Regex extends KeyValueWord {
override def name: String = "re"
def newInstance(k: String, v: String): Query = Query.Regex(k, v)
override def summary: String =
"""
|Query expression that matches time series with `tags[k] =~ /^v/`.
|
|> :warning: Regular expressions without a clear prefix force a full scan and should be
|avoided.
""".stripMargin.trim
override def examples: List[String] = List(
"name,DiscoveryStatus_(UP|DOWN)",
"name,discoverystatus_(Up|Down)",
"ERROR:name")
}
case object RegexIgnoreCase extends KeyValueWord {
override def name: String = "reic"
def newInstance(k: String, v: String): Query = Query.RegexIgnoreCase(k, v)
override def summary: String =
"""
|Query expression that matches time series with `tags[k] =~ /^v/i`.
|
|> :warning: This operation requires a full scan and should be avoided it at all
|possible.
""".stripMargin.trim
override def examples: List[String] = List(
"name,DiscoveryStatus_(UP|DOWN)",
"name,discoverystatus_(Up|Down)",
"ERROR:name")
}
case object In extends SimpleWord {
override def name: String = "in"
protected def matcher: PartialFunction[List[Any], Boolean] = {
case (_: List[_]) :: (_: String) :: _ => true
}
protected def executor: PartialFunction[List[Any], List[Any]] = {
case Nil :: (k: String) :: s => Query.False :: s
case ((v: String) :: Nil) :: (k: String) :: s => Query.Equal(k, v) :: s
case StringListType(vs) :: (k: String) :: s => Query.In(k, vs) :: s
}
override def summary: String =
"""
|Query expression that matches time series with `vs.contains(tags[k])`.
""".stripMargin.trim
override def signature: String = "k:String vs:List -- Query"
override def examples: List[String] = List(
"name,(,sps,)",
"name,(,requestsPerSecond,sps,)",
"name,(,)",
"ERROR:name,sps")
}
case object And extends SimpleWord {
override def name: String = "and"
protected def matcher: PartialFunction[List[Any], Boolean] = {
case (_: Query) :: (_: Query) :: _ => true
}
protected def executor: PartialFunction[List[Any], List[Any]] = {
case (_: Query) :: Query.False :: s => Query.False :: s
case Query.False :: (_: Query) :: s => Query.False :: s
case (q: Query) :: Query.True :: s => q :: s
case Query.True :: (q: Query) :: s => q :: s
case (q2: Query) :: (q1: Query) :: s => Query.And(q1, q2) :: s
}
override def summary: String =
"""
|Query expression that will match iff both sub queries match.
""".stripMargin.trim
override def signature: String = "Query Query -- Query"
override def examples: List[String] =
List(":false,:false", ":false,:true", ":true,:false", ":true,:true")
}
case object Or extends SimpleWord {
override def name: String = "or"
protected def matcher: PartialFunction[List[Any], Boolean] = {
case (_: Query) :: (_: Query) :: _ => true
}
protected def executor: PartialFunction[List[Any], List[Any]] = {
case (q: Query) :: Query.False :: s => q :: s
case Query.False :: (q: Query) :: s => q :: s
case (_: Query) :: Query.True :: s => Query.True :: s
case Query.True :: (_: Query) :: s => Query.True :: s
case (q2: Query) :: (q1: Query) :: s => Query.Or(q1, q2) :: s
}
override def summary: String =
"""
|Query expression that will match if either sub query matches.
""".stripMargin.trim
override def signature: String = "Query Query -- Query"
override def examples: List[String] =
List(":false,:false", ":false,:true", ":true,:false", ":true,:true")
}
case object Not extends SimpleWord {
override def name: String = "not"
protected def matcher: PartialFunction[List[Any], Boolean] = {
case (_: Query) :: _ => true
}
protected def executor: PartialFunction[List[Any], List[Any]] = {
case Query.False :: s => Query.True :: s
case Query.True :: s => Query.False :: s
case (q: Query) :: s => Query.Not(q) :: s
}
override def summary: String =
"""
|Query expression that will match if the sub query doesn't match.
""".stripMargin.trim
override def signature: String = "Query -- Query"
override def examples: List[String] = List(":false", ":true")
}
}
| jasimmk/atlas | atlas-core/src/main/scala/com/netflix/atlas/core/model/QueryVocabulary.scala | Scala | apache-2.0 | 9,540 |
package org.scalaide.core.internal.quickassist
package changecase
import scala.reflect.internal.util.RangePosition
import org.eclipse.jface.text.IDocument
import org.scalaide.core.compiler.InteractiveCompilationUnit
import org.scalaide.core.internal.quickassist.RelevanceValues
import org.scalaide.core.internal.statistics.Features.FixSpellingMistake
import org.scalaide.core.quickassist.BasicCompletionProposal
import org.scalaide.ui.ScalaImages
/*
* Find another member with the same spelling but different capitalization.
* Eg "asdf".subString would offer to change it to .substring instead.
*/
case class ChangeCaseProposal(originalName: String, newName: String, offset: Int, length: Int) extends BasicCompletionProposal(
feature = FixSpellingMistake,
relevance = RelevanceValues.ChangeCaseProposal,
displayString = s"Change to '${newName}'",
image = ScalaImages.CORRECTION_RENAME.createImage()) {
override def applyProposal(document: IDocument): Unit = {
val o = offset + length - originalName.length
document.replace(o, originalName.length, newName)
}
}
object ChangeCaseProposal {
import org.scalaide.core.compiler.IScalaPresentationCompiler.Implicits._
def createProposals(icu: InteractiveCompilationUnit, offset: Int, length: Int, wrongName: String): List[ChangeCaseProposal] = {
def membersAtRange(start: Int, end: Int): List[String] = {
val memberNames = icu.withSourceFile { (srcFile, compiler) =>
compiler asyncExec {
val length = end - start
/*
* I wish we could use askTypeCompletion (similar to createProposalsWithCompletion),
* but because of the error the compiler won't give it to us.
* Because of this, a limitation is that we can't fix the capitalization when it must
* be found via implicit conversion.
*/
val context = compiler.doLocateContext(new RangePosition(srcFile, start, start, start + length))
val tree = compiler.locateTree(new RangePosition(srcFile, start, start, start + length))
val typer = compiler.analyzer.newTyper(context)
val typedTree = typer.typed(tree)
val tpe = typedTree.tpe.resultType.underlying
if (tpe.isError) Nil else tpe.members.map(_.nameString).toList.distinct
} getOption()
}
memberNames.flatten.getOrElse(Nil)
}
val memberNames = membersAtRange(offset, offset + length - wrongName.length - 1)
makeProposals(memberNames, wrongName, offset, length)
}
def createProposalsWithCompletion(icu: InteractiveCompilationUnit, offset: Int, length: Int, wrongName: String): List[ChangeCaseProposal] = {
def membersAtPosition(offset: Int): List[String] = {
val memberNames = icu.withSourceFile { (srcFile, compiler) =>
compiler.asyncExec {
val completed = compiler.askScopeCompletion(new RangePosition(srcFile, offset, offset, offset))
completed.getOrElse(Nil)().map(_.sym.nameString).distinct
} getOption()
}
memberNames.flatten.getOrElse(Nil)
}
val memberNames = membersAtPosition(offset)
makeProposals(memberNames, wrongName, offset, length)
}
private def makeProposals(memberNames: List[String], wrongName: String, offset: Int, length: Int): List[ChangeCaseProposal] = {
val matchingMembers = memberNames.filter(_.equalsIgnoreCase(wrongName))
for (newName <- matchingMembers) yield ChangeCaseProposal(wrongName, newName, offset, length)
}
}
| scala-ide/scala-ide | org.scala-ide.sdt.core/src/org/scalaide/core/internal/quickassist/changecase/ChangeCaseProposal.scala | Scala | bsd-3-clause | 3,501 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{execution, AnalysisException, Strategy}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.encoders.RowEncoder
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate.AggregateExpression
import org.apache.spark.sql.catalyst.planning._
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.plans.physical._
import org.apache.spark.sql.catalyst.streaming.InternalOutputModes
import org.apache.spark.sql.execution.columnar.{InMemoryRelation, InMemoryTableScanExec}
import org.apache.spark.sql.execution.command._
import org.apache.spark.sql.execution.exchange.ShuffleExchangeExec
import org.apache.spark.sql.execution.joins.{BuildLeft, BuildRight, BuildSide}
import org.apache.spark.sql.execution.streaming._
import org.apache.spark.sql.execution.streaming.sources.MemoryPlanV2
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.streaming.{OutputMode, StreamingQuery}
import org.apache.spark.sql.types.StructType
/**
* Converts a logical plan into zero or more SparkPlans. This API is exposed for experimenting
* with the query planner and is not designed to be stable across spark releases. Developers
* writing libraries should instead consider using the stable APIs provided in
* [[org.apache.spark.sql.sources]]
*/
abstract class SparkStrategy extends GenericStrategy[SparkPlan] {
override protected def planLater(plan: LogicalPlan): SparkPlan = PlanLater(plan)
}
case class PlanLater(plan: LogicalPlan) extends LeafExecNode {
override def output: Seq[Attribute] = plan.output
protected override def doExecute(): RDD[InternalRow] = {
throw new UnsupportedOperationException()
}
}
abstract class SparkStrategies extends QueryPlanner[SparkPlan] {
self: SparkPlanner =>
/**
* Plans special cases of limit operators.
*/
object SpecialLimits extends Strategy {
override def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
case ReturnAnswer(rootPlan) => rootPlan match {
case Limit(IntegerLiteral(limit), Sort(order, true, child))
if limit < conf.topKSortFallbackThreshold =>
TakeOrderedAndProjectExec(limit, order, child.output, planLater(child)) :: Nil
case Limit(IntegerLiteral(limit), Project(projectList, Sort(order, true, child)))
if limit < conf.topKSortFallbackThreshold =>
TakeOrderedAndProjectExec(limit, order, projectList, planLater(child)) :: Nil
case Limit(IntegerLiteral(limit), child) =>
CollectLimitExec(limit, planLater(child)) :: Nil
case other => planLater(other) :: Nil
}
case Limit(IntegerLiteral(limit), Sort(order, true, child))
if limit < conf.topKSortFallbackThreshold =>
TakeOrderedAndProjectExec(limit, order, child.output, planLater(child)) :: Nil
case Limit(IntegerLiteral(limit), Project(projectList, Sort(order, true, child)))
if limit < conf.topKSortFallbackThreshold =>
TakeOrderedAndProjectExec(limit, order, projectList, planLater(child)) :: Nil
case _ => Nil
}
}
/**
* Select the proper physical plan for join based on joining keys and size of logical plan.
*
* At first, uses the [[ExtractEquiJoinKeys]] pattern to find joins where at least some of the
* predicates can be evaluated by matching join keys. If found, join implementations are chosen
* with the following precedence:
*
* - Broadcast hash join (BHJ):
* BHJ is not supported for full outer join. For right outer join, we only can broadcast the
* left side. For left outer, left semi, left anti and the internal join type ExistenceJoin,
* we only can broadcast the right side. For inner like join, we can broadcast both sides.
* Normally, BHJ can perform faster than the other join algorithms when the broadcast side is
* small. However, broadcasting tables is a network-intensive operation. It could cause OOM
* or perform worse than the other join algorithms, especially when the build/broadcast side
* is big.
*
* For the supported cases, users can specify the broadcast hint (e.g. the user applied the
* [[org.apache.spark.sql.functions.broadcast()]] function to a DataFrame) and session-based
* [[SQLConf.AUTO_BROADCASTJOIN_THRESHOLD]] threshold to adjust whether BHJ is used and
* which join side is broadcast.
*
* 1) Broadcast the join side with the broadcast hint, even if the size is larger than
* [[SQLConf.AUTO_BROADCASTJOIN_THRESHOLD]]. If both sides have the hint (only when the type
* is inner like join), the side with a smaller estimated physical size will be broadcast.
* 2) Respect the [[SQLConf.AUTO_BROADCASTJOIN_THRESHOLD]] threshold and broadcast the side
* whose estimated physical size is smaller than the threshold. If both sides are below the
* threshold, broadcast the smaller side. If neither is smaller, BHJ is not used.
*
* - Shuffle hash join: if the average size of a single partition is small enough to build a hash
* table.
*
* - Sort merge: if the matching join keys are sortable.
*
* If there is no joining keys, Join implementations are chosen with the following precedence:
* - BroadcastNestedLoopJoin (BNLJ):
* BNLJ supports all the join types but the impl is OPTIMIZED for the following scenarios:
* For right outer join, the left side is broadcast. For left outer, left semi, left anti
* and the internal join type ExistenceJoin, the right side is broadcast. For inner like
* joins, either side is broadcast.
*
* Like BHJ, users still can specify the broadcast hint and session-based
* [[SQLConf.AUTO_BROADCASTJOIN_THRESHOLD]] threshold to impact which side is broadcast.
*
* 1) Broadcast the join side with the broadcast hint, even if the size is larger than
* [[SQLConf.AUTO_BROADCASTJOIN_THRESHOLD]]. If both sides have the hint (i.e., just for
* inner-like join), the side with a smaller estimated physical size will be broadcast.
* 2) Respect the [[SQLConf.AUTO_BROADCASTJOIN_THRESHOLD]] threshold and broadcast the side
* whose estimated physical size is smaller than the threshold. If both sides are below the
* threshold, broadcast the smaller side. If neither is smaller, BNLJ is not used.
*
* - CartesianProduct: for inner like join, CartesianProduct is the fallback option.
*
* - BroadcastNestedLoopJoin (BNLJ):
* For the other join types, BNLJ is the fallback option. Here, we just pick the broadcast
* side with the broadcast hint. If neither side has a hint, we broadcast the side with
* the smaller estimated physical size.
*/
object JoinSelection extends Strategy with PredicateHelper {
/**
* Matches a plan whose output should be small enough to be used in broadcast join.
*/
private def canBroadcast(plan: LogicalPlan): Boolean = {
plan.stats.sizeInBytes >= 0 && plan.stats.sizeInBytes <= conf.autoBroadcastJoinThreshold
}
/**
* Matches a plan whose single partition should be small enough to build a hash table.
*
* Note: this assume that the number of partition is fixed, requires additional work if it's
* dynamic.
*/
private def canBuildLocalHashMap(plan: LogicalPlan): Boolean = {
plan.stats.sizeInBytes < conf.autoBroadcastJoinThreshold * conf.numShufflePartitions
}
/**
* Returns whether plan a is much smaller (3X) than plan b.
*
* The cost to build hash map is higher than sorting, we should only build hash map on a table
* that is much smaller than other one. Since we does not have the statistic for number of rows,
* use the size of bytes here as estimation.
*/
private def muchSmaller(a: LogicalPlan, b: LogicalPlan): Boolean = {
a.stats.sizeInBytes * 3 <= b.stats.sizeInBytes
}
private def canBuildRight(joinType: JoinType): Boolean = joinType match {
case _: InnerLike | LeftOuter | LeftSemi | LeftAnti | _: ExistenceJoin => true
case _ => false
}
private def canBuildLeft(joinType: JoinType): Boolean = joinType match {
case _: InnerLike | RightOuter => true
case _ => false
}
private def broadcastSide(
canBuildLeft: Boolean,
canBuildRight: Boolean,
left: LogicalPlan,
right: LogicalPlan): BuildSide = {
def smallerSide =
if (right.stats.sizeInBytes <= left.stats.sizeInBytes) BuildRight else BuildLeft
if (canBuildRight && canBuildLeft) {
// Broadcast smaller side base on its estimated physical size
// if both sides have broadcast hint
smallerSide
} else if (canBuildRight) {
BuildRight
} else if (canBuildLeft) {
BuildLeft
} else {
// for the last default broadcast nested loop join
smallerSide
}
}
private def canBroadcastByHints(joinType: JoinType, left: LogicalPlan, right: LogicalPlan)
: Boolean = {
val buildLeft = canBuildLeft(joinType) && left.stats.hints.broadcast
val buildRight = canBuildRight(joinType) && right.stats.hints.broadcast
buildLeft || buildRight
}
private def broadcastSideByHints(joinType: JoinType, left: LogicalPlan, right: LogicalPlan)
: BuildSide = {
val buildLeft = canBuildLeft(joinType) && left.stats.hints.broadcast
val buildRight = canBuildRight(joinType) && right.stats.hints.broadcast
broadcastSide(buildLeft, buildRight, left, right)
}
private def canBroadcastBySizes(joinType: JoinType, left: LogicalPlan, right: LogicalPlan)
: Boolean = {
val buildLeft = canBuildLeft(joinType) && canBroadcast(left)
val buildRight = canBuildRight(joinType) && canBroadcast(right)
buildLeft || buildRight
}
private def broadcastSideBySizes(joinType: JoinType, left: LogicalPlan, right: LogicalPlan)
: BuildSide = {
val buildLeft = canBuildLeft(joinType) && canBroadcast(left)
val buildRight = canBuildRight(joinType) && canBroadcast(right)
broadcastSide(buildLeft, buildRight, left, right)
}
def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
// --- BroadcastHashJoin --------------------------------------------------------------------
// broadcast hints were specified
case ExtractEquiJoinKeys(joinType, leftKeys, rightKeys, condition, left, right)
if canBroadcastByHints(joinType, left, right) =>
val buildSide = broadcastSideByHints(joinType, left, right)
Seq(joins.BroadcastHashJoinExec(
leftKeys, rightKeys, joinType, buildSide, condition, planLater(left), planLater(right)))
// broadcast hints were not specified, so need to infer it from size and configuration.
case ExtractEquiJoinKeys(joinType, leftKeys, rightKeys, condition, left, right)
if canBroadcastBySizes(joinType, left, right) =>
val buildSide = broadcastSideBySizes(joinType, left, right)
Seq(joins.BroadcastHashJoinExec(
leftKeys, rightKeys, joinType, buildSide, condition, planLater(left), planLater(right)))
// --- ShuffledHashJoin ---------------------------------------------------------------------
case ExtractEquiJoinKeys(joinType, leftKeys, rightKeys, condition, left, right)
if !conf.preferSortMergeJoin && canBuildRight(joinType) && canBuildLocalHashMap(right)
&& muchSmaller(right, left) ||
!RowOrdering.isOrderable(leftKeys) =>
Seq(joins.ShuffledHashJoinExec(
leftKeys, rightKeys, joinType, BuildRight, condition, planLater(left), planLater(right)))
case ExtractEquiJoinKeys(joinType, leftKeys, rightKeys, condition, left, right)
if !conf.preferSortMergeJoin && canBuildLeft(joinType) && canBuildLocalHashMap(left)
&& muchSmaller(left, right) ||
!RowOrdering.isOrderable(leftKeys) =>
Seq(joins.ShuffledHashJoinExec(
leftKeys, rightKeys, joinType, BuildLeft, condition, planLater(left), planLater(right)))
// --- SortMergeJoin ------------------------------------------------------------
case ExtractEquiJoinKeys(joinType, leftKeys, rightKeys, condition, left, right)
if RowOrdering.isOrderable(leftKeys) =>
joins.SortMergeJoinExec(
leftKeys, rightKeys, joinType, condition, planLater(left), planLater(right)) :: Nil
// --- Without joining keys ------------------------------------------------------------
// Pick BroadcastNestedLoopJoin if one side could be broadcast
case j @ logical.Join(left, right, joinType, condition)
if canBroadcastByHints(joinType, left, right) =>
val buildSide = broadcastSideByHints(joinType, left, right)
joins.BroadcastNestedLoopJoinExec(
planLater(left), planLater(right), buildSide, joinType, condition) :: Nil
case j @ logical.Join(left, right, joinType, condition)
if canBroadcastBySizes(joinType, left, right) =>
val buildSide = broadcastSideBySizes(joinType, left, right)
joins.BroadcastNestedLoopJoinExec(
planLater(left), planLater(right), buildSide, joinType, condition) :: Nil
// Pick CartesianProduct for InnerJoin
case logical.Join(left, right, _: InnerLike, condition) =>
joins.CartesianProductExec(planLater(left), planLater(right), condition) :: Nil
case logical.Join(left, right, joinType, condition) =>
val buildSide = broadcastSide(
left.stats.hints.broadcast, right.stats.hints.broadcast, left, right)
// This join could be very slow or OOM
joins.BroadcastNestedLoopJoinExec(
planLater(left), planLater(right), buildSide, joinType, condition) :: Nil
// --- Cases where this strategy does not apply ---------------------------------------------
case _ => Nil
}
}
/**
* Used to plan streaming aggregation queries that are computed incrementally as part of a
* [[StreamingQuery]]. Currently this rule is injected into the planner
* on-demand, only when planning in a [[org.apache.spark.sql.execution.streaming.StreamExecution]]
*/
object StatefulAggregationStrategy extends Strategy {
override def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
case _ if !plan.isStreaming => Nil
case EventTimeWatermark(columnName, delay, child) =>
EventTimeWatermarkExec(columnName, delay, planLater(child)) :: Nil
case PhysicalAggregation(
namedGroupingExpressions, aggregateExpressions, rewrittenResultExpressions, child) =>
if (aggregateExpressions.exists(PythonUDF.isGroupedAggPandasUDF)) {
throw new AnalysisException(
"Streaming aggregation doesn't support group aggregate pandas UDF")
}
aggregate.AggUtils.planStreamingAggregation(
namedGroupingExpressions,
aggregateExpressions.map(expr => expr.asInstanceOf[AggregateExpression]),
rewrittenResultExpressions,
planLater(child))
case _ => Nil
}
}
/**
* Used to plan the streaming deduplicate operator.
*/
object StreamingDeduplicationStrategy extends Strategy {
override def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
case Deduplicate(keys, child) if child.isStreaming =>
StreamingDeduplicateExec(keys, planLater(child)) :: Nil
case _ => Nil
}
}
/**
* Used to plan the streaming global limit operator for streams in append mode.
* We need to check for either a direct Limit or a Limit wrapped in a ReturnAnswer operator,
* following the example of the SpecialLimits Strategy above.
* Streams with limit in Append mode use the stateful StreamingGlobalLimitExec.
* Streams with limit in Complete mode use the stateless CollectLimitExec operator.
* Limit is unsupported for streams in Update mode.
*/
case class StreamingGlobalLimitStrategy(outputMode: OutputMode) extends Strategy {
override def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
case ReturnAnswer(rootPlan) => rootPlan match {
case Limit(IntegerLiteral(limit), child)
if plan.isStreaming && outputMode == InternalOutputModes.Append =>
StreamingGlobalLimitExec(limit, LocalLimitExec(limit, planLater(child))) :: Nil
case _ => Nil
}
case Limit(IntegerLiteral(limit), child)
if plan.isStreaming && outputMode == InternalOutputModes.Append =>
StreamingGlobalLimitExec(limit, LocalLimitExec(limit, planLater(child))) :: Nil
case _ => Nil
}
}
object StreamingJoinStrategy extends Strategy {
override def apply(plan: LogicalPlan): Seq[SparkPlan] = {
plan match {
case ExtractEquiJoinKeys(joinType, leftKeys, rightKeys, condition, left, right)
if left.isStreaming && right.isStreaming =>
new StreamingSymmetricHashJoinExec(
leftKeys, rightKeys, joinType, condition, planLater(left), planLater(right)) :: Nil
case Join(left, right, _, _) if left.isStreaming && right.isStreaming =>
throw new AnalysisException(
"Stream-stream join without equality predicate is not supported", plan = Some(plan))
case _ => Nil
}
}
}
/**
* Used to plan the aggregate operator for expressions based on the AggregateFunction2 interface.
*/
object Aggregation extends Strategy {
def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
case PhysicalAggregation(groupingExpressions, aggExpressions, resultExpressions, child)
if aggExpressions.forall(expr => expr.isInstanceOf[AggregateExpression]) =>
val aggregateExpressions = aggExpressions.map(expr =>
expr.asInstanceOf[AggregateExpression])
val (functionsWithDistinct, functionsWithoutDistinct) =
aggregateExpressions.partition(_.isDistinct)
if (functionsWithDistinct.map(_.aggregateFunction.children.toSet).distinct.length > 1) {
// This is a sanity check. We should not reach here when we have multiple distinct
// column sets. Our `RewriteDistinctAggregates` should take care this case.
sys.error("You hit a query analyzer bug. Please report your query to " +
"Spark user mailing list.")
}
val aggregateOperator =
if (functionsWithDistinct.isEmpty) {
aggregate.AggUtils.planAggregateWithoutDistinct(
groupingExpressions,
aggregateExpressions,
resultExpressions,
planLater(child))
} else {
aggregate.AggUtils.planAggregateWithOneDistinct(
groupingExpressions,
functionsWithDistinct,
functionsWithoutDistinct,
resultExpressions,
planLater(child))
}
aggregateOperator
case PhysicalAggregation(groupingExpressions, aggExpressions, resultExpressions, child)
if aggExpressions.forall(expr => expr.isInstanceOf[PythonUDF]) =>
val udfExpressions = aggExpressions.map(expr => expr.asInstanceOf[PythonUDF])
Seq(execution.python.AggregateInPandasExec(
groupingExpressions,
udfExpressions,
resultExpressions,
planLater(child)))
case PhysicalAggregation(_, _, _, _) =>
// If cannot match the two cases above, then it's an error
throw new AnalysisException(
"Cannot use a mixture of aggregate function and group aggregate pandas UDF")
case _ => Nil
}
}
object Window extends Strategy {
def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
case PhysicalWindow(
WindowFunctionType.SQL, windowExprs, partitionSpec, orderSpec, child) =>
execution.window.WindowExec(
windowExprs, partitionSpec, orderSpec, planLater(child)) :: Nil
case PhysicalWindow(
WindowFunctionType.Python, windowExprs, partitionSpec, orderSpec, child) =>
execution.python.WindowInPandasExec(
windowExprs, partitionSpec, orderSpec, planLater(child)) :: Nil
case _ => Nil
}
}
protected lazy val singleRowRdd = sparkContext.parallelize(Seq(InternalRow()), 1)
object InMemoryScans extends Strategy {
def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
case PhysicalOperation(projectList, filters, mem: InMemoryRelation) =>
pruneFilterProject(
projectList,
filters,
identity[Seq[Expression]], // All filters still need to be evaluated.
InMemoryTableScanExec(_, filters, mem)) :: Nil
case _ => Nil
}
}
/**
* This strategy is just for explaining `Dataset/DataFrame` created by `spark.readStream`.
* It won't affect the execution, because `StreamingRelation` will be replaced with
* `StreamingExecutionRelation` in `StreamingQueryManager` and `StreamingExecutionRelation` will
* be replaced with the real relation using the `Source` in `StreamExecution`.
*/
object StreamingRelationStrategy extends Strategy {
def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
case s: StreamingRelation =>
StreamingRelationExec(s.sourceName, s.output) :: Nil
case s: StreamingExecutionRelation =>
StreamingRelationExec(s.toString, s.output) :: Nil
case s: StreamingRelationV2 =>
StreamingRelationExec(s.sourceName, s.output) :: Nil
case _ => Nil
}
}
/**
* Strategy to convert [[FlatMapGroupsWithState]] logical operator to physical operator
* in streaming plans. Conversion for batch plans is handled by [[BasicOperators]].
*/
object FlatMapGroupsWithStateStrategy extends Strategy {
override def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
case FlatMapGroupsWithState(
func, keyDeser, valueDeser, groupAttr, dataAttr, outputAttr, stateEnc, outputMode, _,
timeout, child) =>
val stateVersion = conf.getConf(SQLConf.FLATMAPGROUPSWITHSTATE_STATE_FORMAT_VERSION)
val execPlan = FlatMapGroupsWithStateExec(
func, keyDeser, valueDeser, groupAttr, dataAttr, outputAttr, None, stateEnc, stateVersion,
outputMode, timeout, batchTimestampMs = None, eventTimeWatermark = None, planLater(child))
execPlan :: Nil
case _ =>
Nil
}
}
object BasicOperators extends Strategy {
def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
case d: DataWritingCommand => DataWritingCommandExec(d, planLater(d.query)) :: Nil
case r: RunnableCommand => ExecutedCommandExec(r) :: Nil
case MemoryPlan(sink, output) =>
val encoder = RowEncoder(sink.schema)
LocalTableScanExec(output, sink.allData.map(r => encoder.toRow(r).copy())) :: Nil
case MemoryPlanV2(sink, output) =>
val encoder = RowEncoder(StructType.fromAttributes(output))
LocalTableScanExec(output, sink.allData.map(r => encoder.toRow(r).copy())) :: Nil
case logical.Distinct(child) =>
throw new IllegalStateException(
"logical distinct operator should have been replaced by aggregate in the optimizer")
case logical.Intersect(left, right) =>
throw new IllegalStateException(
"logical intersect operator should have been replaced by semi-join in the optimizer")
case logical.Except(left, right) =>
throw new IllegalStateException(
"logical except operator should have been replaced by anti-join in the optimizer")
case logical.DeserializeToObject(deserializer, objAttr, child) =>
execution.DeserializeToObjectExec(deserializer, objAttr, planLater(child)) :: Nil
case logical.SerializeFromObject(serializer, child) =>
execution.SerializeFromObjectExec(serializer, planLater(child)) :: Nil
case logical.MapPartitions(f, objAttr, child) =>
execution.MapPartitionsExec(f, objAttr, planLater(child)) :: Nil
case logical.MapPartitionsInR(f, p, b, is, os, objAttr, child) =>
execution.MapPartitionsExec(
execution.r.MapPartitionsRWrapper(f, p, b, is, os), objAttr, planLater(child)) :: Nil
case logical.FlatMapGroupsInR(f, p, b, is, os, key, value, grouping, data, objAttr, child) =>
execution.FlatMapGroupsInRExec(f, p, b, is, os, key, value, grouping,
data, objAttr, planLater(child)) :: Nil
case logical.FlatMapGroupsInPandas(grouping, func, output, child) =>
execution.python.FlatMapGroupsInPandasExec(grouping, func, output, planLater(child)) :: Nil
case logical.MapElements(f, _, _, objAttr, child) =>
execution.MapElementsExec(f, objAttr, planLater(child)) :: Nil
case logical.AppendColumns(f, _, _, in, out, child) =>
execution.AppendColumnsExec(f, in, out, planLater(child)) :: Nil
case logical.AppendColumnsWithObject(f, childSer, newSer, child) =>
execution.AppendColumnsWithObjectExec(f, childSer, newSer, planLater(child)) :: Nil
case logical.MapGroups(f, key, value, grouping, data, objAttr, child) =>
execution.MapGroupsExec(f, key, value, grouping, data, objAttr, planLater(child)) :: Nil
case logical.FlatMapGroupsWithState(
f, key, value, grouping, data, output, _, _, _, timeout, child) =>
execution.MapGroupsExec(
f, key, value, grouping, data, output, timeout, planLater(child)) :: Nil
case logical.CoGroup(f, key, lObj, rObj, lGroup, rGroup, lAttr, rAttr, oAttr, left, right) =>
execution.CoGroupExec(
f, key, lObj, rObj, lGroup, rGroup, lAttr, rAttr, oAttr,
planLater(left), planLater(right)) :: Nil
case logical.Repartition(numPartitions, shuffle, child) =>
if (shuffle) {
ShuffleExchangeExec(RoundRobinPartitioning(numPartitions), planLater(child)) :: Nil
} else {
execution.CoalesceExec(numPartitions, planLater(child)) :: Nil
}
case logical.Sort(sortExprs, global, child) =>
execution.SortExec(sortExprs, global, planLater(child)) :: Nil
case logical.Project(projectList, child) =>
execution.ProjectExec(projectList, planLater(child)) :: Nil
case logical.Filter(condition, child) =>
execution.FilterExec(condition, planLater(child)) :: Nil
case f: logical.TypedFilter =>
execution.FilterExec(f.typedCondition(f.deserializer), planLater(f.child)) :: Nil
case e @ logical.Expand(_, _, child) =>
execution.ExpandExec(e.projections, e.output, planLater(child)) :: Nil
case logical.Sample(lb, ub, withReplacement, seed, child) =>
execution.SampleExec(lb, ub, withReplacement, seed, planLater(child)) :: Nil
case logical.LocalRelation(output, data, _) =>
LocalTableScanExec(output, data) :: Nil
case logical.LocalLimit(IntegerLiteral(limit), child) =>
execution.LocalLimitExec(limit, planLater(child)) :: Nil
case logical.GlobalLimit(IntegerLiteral(limit), child) =>
execution.GlobalLimitExec(limit, planLater(child)) :: Nil
case logical.Union(unionChildren) =>
execution.UnionExec(unionChildren.map(planLater)) :: Nil
case g @ logical.Generate(generator, _, outer, _, _, child) =>
execution.GenerateExec(
generator, g.requiredChildOutput, outer,
g.qualifiedGeneratorOutput, planLater(child)) :: Nil
case _: logical.OneRowRelation =>
execution.RDDScanExec(Nil, singleRowRdd, "OneRowRelation") :: Nil
case r: logical.Range =>
execution.RangeExec(r) :: Nil
case r: logical.RepartitionByExpression =>
exchange.ShuffleExchangeExec(r.partitioning, planLater(r.child)) :: Nil
case ExternalRDD(outputObjAttr, rdd) => ExternalRDDScanExec(outputObjAttr, rdd) :: Nil
case r: LogicalRDD =>
RDDScanExec(r.output, r.rdd, "ExistingRDD", r.outputPartitioning, r.outputOrdering) :: Nil
case h: ResolvedHint => planLater(h.child) :: Nil
case _ => Nil
}
}
}
| tejasapatil/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala | Scala | apache-2.0 | 29,133 |
// diversity-maximization: Diversity maximization in Streaming and MapReduce
// Copyright (C) 2016 Matteo Ceccarello <ceccarel@dei.unipd.it>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package it.unipd.dei.diversity.wiki
import it.unipd.dei.diversity.Distance
import org.scalacheck._
import org.scalacheck.Prop.{BooleanOperators, all, forAll}
import org.scalacheck.Gen
import scala.util.Random
object WikiBagOfWordsProperties extends Properties("WikiBagOfWords") {
def wordWithCount: Gen[(String, Double)] =
for {
chars <- Gen.nonEmptyListOf(Gen.alphaChar)
score <- Gen.choose[Double](1, 100)
} yield (chars.mkString(""), score)
def wordCounts = Gen.nonEmptyMap(wordWithCount)
def filteredBagOfWords(wordScores: Map[String, Double]) = {
val (words, scores) = wordScores.filter(_ => Random.nextBoolean()).toSeq.sortBy(_._1).unzip
if (words.isEmpty) {
val (word, score) = wordScores.head
new WikiBagOfWords("", Set.empty, Array(word), Array(score))
} else {
new WikiBagOfWords("", Set.empty, words.toArray, scores.toArray)
}
}
def buildBagOfWords(wordCounts: Map[String, Double]) = {
val (words, counts) = wordCounts.toSeq.sortBy(_._1).unzip
new WikiBagOfWords("", Set.empty, words.toArray, counts.toArray)
}
def bagOfWords =
for {
countsMap <- wordCounts
} yield buildBagOfWords(countsMap)
def orthogonalBagOfWords = wordCounts.flatMap { countsMapA =>
wordCounts.flatMap { countsMapB =>
val intersection = countsMapA.keySet.intersect(countsMapB.keySet)
val wordsA = countsMapA -- intersection
val wordsB = countsMapB -- intersection
(buildBagOfWords(wordsA), buildBagOfWords(wordsB))
}
}
def bagOfWordsPair =
for {
countsMap <- wordCounts
} yield (filteredBagOfWords(countsMap), filteredBagOfWords(countsMap))
def bagOfWordsTriplet =
for {
countsMap1 <- wordCounts
countsMap2 <- wordCounts
countsMap3 <- wordCounts
} yield (
filteredBagOfWords(countsMap1 ++ countsMap2 ++ countsMap3),
filteredBagOfWords(countsMap1 ++ countsMap2 ++ countsMap3),
filteredBagOfWords(countsMap1 ++ countsMap2 ++ countsMap3)
)
property("cosine similarity conformance with generic implementation") =
forAll(bagOfWordsPair) { case (a, b) =>
val expected = Distance.cosineSimilarity(a, b)
val actual = WikiBagOfWords.cosineSimilarity(a, b)
doubleEquality(expected, actual) :|
s"""
| "A" words: ${a.wordsArray.zip(a.scoresArray).toSeq}
| "B" words: ${b.wordsArray.zip(b.scoresArray).toSeq}
| expected != actual
| $expected != $actual
""".stripMargin
}
property("cosine similarity of the same page") =
forAll(bagOfWords) { bow =>
val sim = WikiBagOfWords.cosineSimilarity(bow, bow)
doubleEquality(sim, 1.0) :| s"Similarity is $sim instead of 1.0"
}
property("cosine distance of same bag of words") =
forAll(bagOfWords) { bow =>
val dist = WikiBagOfWords.cosineDistance(bow, bow)
// here we use a lower precision in the equality comparison because of
// the propagation of errors in floating point operations
doubleEquality(dist, 0.0, 0.0000001) :| s"Distance is $dist instead of 0.0"
}
property("cosine distance of unrelated bag of words") =
forAll(orthogonalBagOfWords) { case (a, b) =>
val dist = WikiBagOfWords.cosineDistance(a, b)
doubleEquality(dist, 1.0) :| s"Distance is $dist instead of 1.0"
}
property("cosine distance triangle inequality") =
forAll(bagOfWordsTriplet) { case (a, b, c) =>
val dFn: (WikiBagOfWords, WikiBagOfWords) => Double = WikiBagOfWords.cosineDistance
dFn(a, b) + dFn(b, c) >= dFn(a, c)
}
property("generalized Jaccard distance conformance with generic implementation") =
forAll(bagOfWordsPair) { case (a, b) =>
val expected = Distance.jaccard(a, b)
val actual = WikiBagOfWords.jaccard(a, b)
doubleEquality(expected, actual) :|
s"""
| "A" words: ${a.wordsArray.zip(a.scoresArray).toSeq}
| "B" words: ${b.wordsArray.zip(b.scoresArray).toSeq}
| expected != actual
| $expected != $actual
""".stripMargin
}
def doubleEquality(a: Double, b: Double, precision: Double = 0.000000000001): Boolean = {
math.abs(a-b) <= precision
}
}
| Cecca/diversity-maximization | experiments/src/test/scala/it/unipd/dei/diversity/wiki/WikiBagOfWordsProperties.scala | Scala | gpl-3.0 | 5,034 |
/*-------------------------------------------------------------------------*\
** ScalaCheck **
** Copyright (c) 2007-2014 Rickard Nilsson. All rights reserved. **
** http://www.scalacheck.org **
** **
** This software is released under the terms of the Revised BSD License. **
** There is NO WARRANTY. See the file LICENSE for the full text. **
\*------------------------------------------------------------------------ */
package org.scalacheck
import language.reflectiveCalls
import util.Pretty
import org.scalatools.testing._
class ScalaCheckFramework extends Framework {
private def mkFP(mod: Boolean, cname: String) =
new SubclassFingerprint {
val superClassName = cname
val isModule = mod
}
val name = "ScalaCheck"
val tests = Array[Fingerprint](
mkFP(true, "org.scalacheck.Properties"),
mkFP(false, "org.scalacheck.Prop"),
mkFP(false, "org.scalacheck.Properties"),
mkFP(true, "org.scalacheck.Prop")
)
def testRunner(loader: ClassLoader, loggers: Array[Logger]) = new Runner2 {
private def asEvent(nr: (String, Test.Result)) = nr match {
case (n: String, r: Test.Result) => new Event {
val testName = n
val description = n
val result = r.status match {
case Test.Passed => Result.Success
case _:Test.Proved => Result.Success
case _:Test.Failed => Result.Failure
case Test.Exhausted => Result.Skipped
case _:Test.PropException => Result.Error
}
val error = r.status match {
case Test.PropException(_, e, _) => e
case _:Test.Failed => new Exception(Pretty.pretty(r,Pretty.Params(0)))
case _ => null
}
}
}
def run(testClassName: String, fingerprint: Fingerprint, handler: EventHandler, args: Array[String]) {
val testCallback = new Test.TestCallback {
override def onPropEval(n: String, w: Int, s: Int, d: Int) = {}
override def onTestResult(n: String, r: Test.Result) = {
for (l <- loggers) {
import Pretty._
val verbosityOpts = Set("-verbosity", "-v")
val verbosity = args.grouped(2).filter(twos => verbosityOpts(twos.head)).toSeq.headOption.map(_.last).map(_.toInt).getOrElse(0)
l.info(
(if (r.passed) "+ " else "! ") + n + ": " + pretty(r, Params(verbosity))
)
}
handler.handle(asEvent((n,r)))
}
}
import Test.cmdLineParser.{Success, NoSuccess}
val prms = Test.cmdLineParser.parseParams(args) match {
case Success(params, _) =>
params.withTestCallback(testCallback).withCustomClassLoader(Some(loader))
// TODO: Maybe handle this a bit better than throwing exception?
case e: NoSuccess => throw new Exception(e.toString)
}
fingerprint match {
case fp: SubclassFingerprint =>
val obj =
if(fp.isModule) Class.forName(testClassName + "$", true, loader).getField("MODULE$").get(null)
else Class.forName(testClassName, true, loader).newInstance
if(obj.isInstanceOf[Properties])
Test.checkProperties(prms, obj.asInstanceOf[Properties])
else
handler.handle(asEvent((testClassName, Test.check(prms, obj.asInstanceOf[Prop]))))
}
}
}
}
| jedws/scalacheck | src/main/scala/org/scalacheck/ScalaCheckFramework.scala | Scala | bsd-3-clause | 3,545 |
/*
* (c) Copyright 2016 Hewlett Packard Enterprise Development LP
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cogx.cogmath.algebra.real
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatest.FunSuite
import org.scalatest.MustMatchers
/** Test code.
*
*
* @author Greg Snider
*/
@RunWith(classOf[JUnitRunner])
class VectorSpec extends FunSuite with MustMatchers {
test("Constructors") {
val v1 = new Vector(17)
require(v1.length == 17)
val v2 = new Vector(1f, 2f, 34.5f, 9f)
require(v2.length == 4)
val v3 = v2.copy
for (i <- 0 until v2.length)
require(v2(i) == v3(i))
}
test("ElementAccess") {
val v = new Vector(1f, 2f, 34.5f, 9f)
require(v(0) == 1 && v(1) == 2 && v(2) == 34.5f && v(3) == 9)
v(1) = 123
require(v(0) == 1 && v(1) == 123 && v(2) == 34.5f && v(3) == 9)
}
test("VectorScalarOperations") {
val v = new Vector(2f, 3f, 7f)
require(v + 1 === new Vector(3f, 4f, 8f))
require(v - 1 === new Vector(1f, 2f, 6f))
require(v * 2 === new Vector(4f, 6f, 14f))
require(v / 2 === new Vector(1f, 1.5f, 3.5f))
require(-v === new Vector(-2f, -3f, -7f))
v += 1
require(v === new Vector(3f, 4f, 8f))
v -= 1
require(v === new Vector(2f, 3f, 7f))
v *= 2
require(v === new Vector(4f, 6f, 14f))
v /= 2
require(v === new Vector(2f, 3f, 7f))
val v2 = new Vector(3)
v2 := v
require(v2 === new Vector(2f, 3f, 7f))
}
test("VectorVectorOperations") {
val v1 = new Vector(2f, 3f, 5f)
val v2 = new Vector(7f, 11f, 13f)
require(v1 !=== v2)
require(v1 + v2 === new Vector(9f, 14f, 18f))
require(v1 - v2 === new Vector(-5f, -8f, -8f))
require(v1 :* v2 === new Vector(14f, 33f, 65f))
require(v1 :/ v2 === new Vector(2.0f/7f, 3.0f/11f, 5.0f/13f))
val v3 = v1 concat v2
require(v3 === new Vector(2f, 3f, 5f, 7f, 11f, 13f))
val dotProduct = v1 dot v2
require(dotProduct == 2*7 + 3*11 + 5*13)
val outer = v1.shape(v1.length, 1) * v2.transpose
val outerExpected = Matrix(
Array(14f, 22f, 26f),
Array(21f, 33f, 39f),
Array(35f, 55f, 65f)
)
//(outer - outerExpected).print
require(outer === outerExpected)
v1 += v2
require(v1 === new Vector(9f, 14f, 18f))
v1 -= v2
require(v1 === new Vector(2f, 3f, 5f))
v1 :*= v2
require(v1 === new Vector(2*7f, 3*11f, 5*13f))
v1 :/= v2
require(v1 === new Vector(2f, 3f, 5f))
}
test("MatrixConversions") {
val v = new Vector(2f, 3f, 5f)
val m = v.transpose
require(m.rows == 1 && m.columns == v.length)
require(m(0, 0) == 2 && m(0, 1) == 3 && m(0, 2) == 5)
val v2 = new Vector(2f, 3, 5, 7, 11, 13)
val m2 = v2.shape(2, 3)
require(m2.rows == 2 && m2.columns == 3)
require(m2(0, 0) == 2 && m2(0, 1) == 3 && m2(0, 2) == 5)
require(m2(1, 0) == 7 && m2(1, 1) == 11 && m2(1, 2) == 13)
val m3 = v2.rectangularize
require((m3.rows == 3 && m3.columns == 2) ||
(m3.rows == 2 && m3.columns == 3))
var dataArray = v2.asArray
require(dataArray(2) == v2(2))
dataArray(2) = 4.0f
require(dataArray(2) == v2(2))
}
test("MapReduce") {
val v1 = new Vector(2f, 3, 5)
val v2 = v1.map(_ * 3)
require(v2 === new Vector(6f, 9, 15))
v1.mapSelf(_ * 2)
require(v1 === new Vector(4f, 6, 10))
val sum = v1.reduce(_ + _)
require(sum == 4 + 6 + 10)
val product = v1.reduce(_ * _)
require(product == 4 * 6 * 10)
}
test("Subvector") {
val Length = 25
val v1 = new Vector(Length) {
for (i <- 0 until length)
this(i) = i
}
val vLow = v1.subvector(0 until Length / 2)
require(vLow.length == Length / 2)
for (i <- 0 until Length/2)
require(vLow(i) == i)
val vHigh = v1.subvector(Length / 2 until Length)
require(vHigh.length == Length - (Length / 2) )
for (i <- 0 until vHigh.length)
require(vHigh(i) == i + Length / 2)
}
test("Miscellaneous") {
val v1 = new Vector(2f, -3, 5, 0)
require(v1.abs === new Vector(2f, 3, 5, 0))
require(v1.sgn === new Vector(1f, -1, 1, 0))
require(v1.rectify === new Vector(2f, 0, 5, 0))
require(v1.normL1 == 2 + (-3).abs + 5)
require(v1.normL2 == math.sqrt(2*2 + 3*3 + 5*5f).toFloat)
require(v1.argmax == 2)
}
test("Sort") {
val v1 = new Vector(2f, -3, 5, 0)
val sorted = v1.sort
require(sorted === new Vector(-3f, 0, 2, 5))
}
test("Flip") {
val v1 = new Vector(2f, -3, 5, 0)
val flipped = v1.flip
val expect = new Vector(0f, 5, -3, 2)
require(flipped === expect)
}
} | hpe-cct/cct-core | src/test/scala/cogx/cogmath/algebra/real/VectorSpec.scala | Scala | apache-2.0 | 5,145 |
package org.eobjects.analyzer.beans.transform
import java.util.regex.Pattern
import org.eobjects.analyzer.data.MockInputColumn
import org.eobjects.analyzer.data.MockInputRow
import org.junit.Assert.assertEquals
import org.junit.Test
import org.scalatest.junit.AssertionsForJUnit
class PlainSearchReplaceTransformerTest extends AssertionsForJUnit {
@Test
def testTransformMatchRegion() {
val col: MockInputColumn[String] = new MockInputColumn[String]("foobar", classOf[String]);
val transformer = new PlainSearchReplaceTransformer();
transformer.valueColumn = col;
transformer.searchString = "foo";
transformer.replacementString = "bar";
assertEquals("OutputColumns[foobar (replaced 'foo')]", transformer.getOutputColumns().toString());
assertEquals("null", transformer.transform(new MockInputRow().put(col, null)).mkString(","));
assertEquals("", transformer.transform(new MockInputRow().put(col, "")).mkString(","));
assertEquals("bar baz bar", transformer.transform(new MockInputRow().put(col, "bar baz bar")).mkString(","));
assertEquals("bar bar", transformer.transform(new MockInputRow().put(col, "foo foo")).mkString(","));
assertEquals("bar Hello there world bar", transformer.transform(new MockInputRow().put(col, "foo Hello there world foo")).mkString(","));
}
@Test
def testValidateReplaceTokenWithinSearchToken() {
val col: MockInputColumn[String] = new MockInputColumn[String]("foobar", classOf[String]);
val transformer = new PlainSearchReplaceTransformer();
transformer.valueColumn = col;
transformer.searchString = "foo";
transformer.replacementString = "fooo";
try {
transformer.validate();
fail("Exception expected");
} catch {
case e: IllegalArgumentException => assertEquals("Replacement string cannot contain the search string (implies an infinite replacement loop)", e.getMessage());
case e: Throwable => fail("Unexpected exception: " + e);
}
}
@Test
def testTransformMatchEntireString() {
val col: MockInputColumn[String] = new MockInputColumn[String]("foobar", classOf[String]);
val transformer = new PlainSearchReplaceTransformer();
transformer.valueColumn = col;
transformer.searchString = "foo";
transformer.replacementString = "bar";
transformer.replaceEntireString = true
assertEquals("OutputColumns[foobar (replaced 'foo')]", transformer.getOutputColumns().toString());
assertEquals("null", transformer.transform(new MockInputRow().put(col, null)).mkString(","));
assertEquals("", transformer.transform(new MockInputRow().put(col, "")).mkString(","));
assertEquals("bar baz bar", transformer.transform(new MockInputRow().put(col, "bar baz bar")).mkString(","));
assertEquals("bar", transformer.transform(new MockInputRow().put(col, "foo foo")).mkString(","));
assertEquals("bar", transformer.transform(new MockInputRow().put(col, "hello foo")).mkString(","));
}
} | datacleaner/AnalyzerBeans | components/basic-transformers/src/test/scala/org/eobjects/analyzer/beans/transform/PlainSearchReplaceTransformerTest.scala | Scala | lgpl-3.0 | 2,973 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.stream.table.validation
import org.apache.flink.api.scala._
import org.apache.flink.table.api.ValidationException
import org.apache.flink.table.api.scala._
import org.apache.flink.table.planner.utils.{TableFunc0, TableTestBase}
import org.apache.flink.types.Row
import org.junit.Assert.{assertTrue, fail}
import org.junit.Test
class AggregateValidationTest extends TableTestBase {
private val util = scalaStreamTestUtil()
@Test(expected = classOf[ValidationException])
def testGroupingOnNonExistentField(): Unit = {
val table = util.addTableSource[(Long, Int, String)]('a, 'b, 'c)
val ds = table
// must fail. '_foo is not a valid field
.groupBy('_foo)
.select('a.avg)
}
@Test(expected = classOf[ValidationException])
def testGroupingInvalidSelection(): Unit = {
val table = util.addTableSource[(Long, Int, String)]('a, 'b, 'c)
table
.groupBy('a, 'b)
// must fail. 'c is not a grouping key or aggregation
.select('c)
}
@Test(expected = classOf[ValidationException])
def testInvalidAggregationInSelection(): Unit = {
val table = util.addTableSource[(Long, Int, String)]('a, 'b, 'c)
table
.groupBy('a)
.aggregate('b.sum as 'd)
// must fail. Cannot use AggregateFunction in select right after aggregate
.select('d.sum)
}
@Test(expected = classOf[ValidationException])
def testInvalidWindowPropertiesInSelection(): Unit = {
val table = util.addTableSource[(Long, Int, String)]('a, 'b, 'c)
table
.groupBy('a)
.aggregate('b.sum as 'd)
// must fail. Cannot use window properties in select right after aggregate
.select('d.start)
}
@Test(expected = classOf[RuntimeException])
def testTableFunctionInSelection(): Unit = {
val table = util.addTableSource[(Long, Int, String)]('a, 'b, 'c)
util.addFunction("func", new TableFunc0)
val resultTable = table
.groupBy('a)
.aggregate('b.sum as 'd)
// must fail. Cannot use TableFunction in select after aggregate
.select("func('abc')")
util.verifyPlan(resultTable)
}
@Test(expected = classOf[ValidationException])
def testInvalidScalarFunctionInAggregate(): Unit = {
val table = util.addTableSource[(Long, Int, String)]('a, 'b, 'c)
table
.groupBy('a)
// must fail. Only AggregateFunction can be used in aggregate
.aggregate('c.upperCase as 'd)
.select('a, 'd)
}
@Test(expected = classOf[ValidationException])
def testInvalidTableFunctionInAggregate(): Unit = {
val table = util.addTableSource[(Long, Int, String)]('a, 'b, 'c)
util.addFunction("func", new TableFunc0)
table
.groupBy('a)
// must fail. Only AggregateFunction can be used in aggregate
.aggregate("func(c) as d")
.select('a, 'd)
}
@Test(expected = classOf[RuntimeException])
def testMultipleAggregateExpressionInAggregate(): Unit = {
util.addFunction("func", new TableFunc0)
val table = util.addTableSource[(Long, Int, String)]('a, 'b, 'c)
table
.groupBy('a)
// must fail. Only one AggregateFunction can be used in aggregate
.aggregate("sum(c), count(b)")
}
@Test
def testIllegalArgumentForListAgg(): Unit = {
util.addTableSource[(Long, Int, String, String)]("T", 'a, 'b, 'c, 'd)
// If there are two parameters, second one must be character literal.
expectExceptionThrown(
"SELECT listagg(c, d) FROM T GROUP BY a",
"Supported form(s): 'LISTAGG(<CHARACTER>)'\\n'LISTAGG(<CHARACTER>, <CHARACTER_LITERAL>)",
classOf[ValidationException])
}
@Test
def testIllegalArgumentForListAgg1(): Unit = {
util.addTableSource[(Long, Int, String, String)]("T", 'a, 'b, 'c, 'd)
// If there are two parameters, second one must be character literal.
expectExceptionThrown(
"SELECT LISTAGG(c, 1) FROM T GROUP BY a",
"Supported form(s): 'LISTAGG(<CHARACTER>)'\\n'LISTAGG(<CHARACTER>, <CHARACTER_LITERAL>)",
classOf[ValidationException])
}
// ----------------------------------------------------------------------------------------------
private def expectExceptionThrown(
sql: String,
keywords: String,
clazz: Class[_ <: Throwable] = classOf[ValidationException])
: Unit = {
try {
util.tableEnv.toAppendStream[Row](util.tableEnv.sqlQuery(sql))
fail(s"Expected a $clazz, but no exception is thrown.")
} catch {
case e if e.getClass == clazz =>
if (keywords != null) {
assertTrue(
s"The exception message '${e.getMessage}' doesn't contain keyword '$keywords'",
e.getMessage.contains(keywords))
}
case e: Throwable => fail(s"Expected throw ${clazz.getSimpleName}, but is $e.")
}
}
}
| bowenli86/flink | flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/plan/stream/table/validation/AggregateValidationTest.scala | Scala | apache-2.0 | 5,611 |
package scala.meta.tests
package scalameta
package prettyprinters
import org.scalatest._
import scala.reflect.runtime.universe._
import scala.meta.prettyprinters._
import scala.meta.internal.ast._
import scala.meta.dialects.Scala211
class QuasiSuite extends FunSuite {
val XtensionQuasiquoteTerm = "shadow scala.metq quasiquotes"
test("$x") {
assert(Term.Quasi(0, q"x").show[Syntax] === "${x @ Term}")
}
test("..$xs") {
assert(Term.Quasi(1, Term.Quasi(0, q"xs")).show[Syntax] === "..${xs @ Term}")
}
} | beni55/scalameta | scalameta/trees/src/test/scala/scala/meta/tests/prettyprinters/QuasiSuite.scala | Scala | bsd-3-clause | 521 |
//http://www.scala-lang.org/node/227
/* Defines a new method 'sort' for array objects */
object implicits extends Application {
implicit def arrayWrapper[A : ClassManifest](x: Array[A]) =
new {
def sort(p: (A, A) => Boolean) = {
util.Sorting.stableSort(x, p); x
}
}
val x = Array(2, 3, 1, 4)
println("x = "+ x.sort((x: Int, y: Int) => x < y))
} | ishandutta2007/test | public/js/ace/demo/kitchen-sink/docs/scala.scala | Scala | mit | 378 |
package org.jetbrains.plugins.scala
package lang
package resolve
import org.jetbrains.plugins.scala.lang.resolve.ResolveTargets._
object StdKinds {
/**
* NOTE: other checks for "stable" are done here:
* - [[lang.psi.api.toplevel.ScTypedDefinition.isStable]]
* - [[lang.psi.impl.base.ScFieldIdImpl.isStable]]
* - [[lang.psi.api.statements.ScValueOrVariable.isStable]]
* - [[lang.psi.impl.toplevel.typedef.StableTermsCollector.isStable]]
* - [[lang.psi.impl.toplevel.typedef.StableTermsCollector.mayContainStable]]
*/
val stableQualRef: ResolveTargets.ValueSet = ValueSet(PACKAGE, OBJECT, VAL)
val stableQualRef_Scala3: ResolveTargets.ValueSet = ValueSet(PACKAGE, OBJECT, VAL, METHOD, VAR) // SCL-19477
val stableQualOrClass: ResolveTargets.ValueSet = stableQualRef + CLASS
val noPackagesClassCompletion: ResolveTargets.ValueSet = ValueSet(OBJECT, VAL, CLASS)
val stableImportSelector: ResolveTargets.ValueSet = ValueSet(OBJECT, VAL, VAR, METHOD, PACKAGE, CLASS)
val stableClass: ResolveTargets.ValueSet = ValueSet(CLASS)
val stableClassOrObject: ResolveTargets.ValueSet = ValueSet(CLASS, OBJECT)
val objectOrValue: ResolveTargets.ValueSet = ValueSet(OBJECT, VAL)
val refExprLastRef: ResolveTargets.ValueSet = ValueSet(OBJECT, VAL, VAR, METHOD)
val refExprQualRef: ResolveTargets.ValueSet = refExprLastRef + PACKAGE
val methodRef: ResolveTargets.ValueSet = ValueSet(VAL, VAR, METHOD)
val methodsOnly: ResolveTargets.ValueSet = ValueSet(METHOD)
val valuesRef: ResolveTargets.ValueSet = ValueSet(VAL, VAR)
val varsRef: ResolveTargets.ValueSet = ValueSet(VAR)
val packageRef: ResolveTargets.ValueSet = ValueSet(PACKAGE)
val annotCtor: ResolveTargets.ValueSet = ValueSet(CLASS, ANNOTATION)
}
| JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/resolve/StdKinds.scala | Scala | apache-2.0 | 1,810 |
package scala.pickling
package spi
/** The is the interface used by picklers to register/handle circular references in picklees.
*
* This interface is stateful, and we assume only one thread will talk to it at a time.
*/
trait RefPicklingRegistry {
/** Returns the OID of the picklee.
* @param picklee An object to tag/track
* @return A -1 if this object hasn't been seen since the last clear on this thread *OR*
* the previous OID if we have seen it before.
*/
def registerPicklee(picklee: Any): Int
/** Clears all registered objects out of the this cache. */
def clear(): Unit
}
/** This is the interface used by unpicklers to register/handle `Ref` types when unpickling.
*
* We assume only one thread will be talking to a RefUnpicklingRegistry at a time.
*/
trait RefUnpicklingRegistry {
/** Grabs the registeration id for the next object. */
def preregisterUnpicklee(): Int
/** Registers an object to an id, after its FIRST deserialization. */
def regsiterUnpicklee(oid: Int, value: Any): Unit
/** Looks up an unpicklee by its object id. Throws an exception if oid is not valid. */
def lookupUnpicklee(oid: Int): Any
/** Removes all instances from the registry. This should be done AFTER a top-level unpickle/pickle call. */
def clear(): Unit
}
/** The owner of `Ref` registeries. These are used to help detangle circular references/trees when pickling or to
* help optimise/reduce the amount of data pickled if an object shows up more than once.
*/
trait RefRegistry {
/** Returns the pickling registry for this thread. */
def pickle: RefPicklingRegistry
/** Returns the unpickling registry for this thread. */
def unpickle: RefUnpicklingRegistry
} | beni55/pickling | core/src/main/scala/scala/pickling/spi/RefRegistry.scala | Scala | bsd-3-clause | 1,731 |
import java.io.StringReader
import au.com.bytecode.opencsv.CSVReader
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SQLContext}
case class EuroData(val fromCountry: String, val toCountry: String, val juryA: Int, val juryB: Int, val juryC: Int,
val juryD: Int, val juryE: Int, val rank: Int, val televote: Int, val combined: Int, val points: Int)
object EurovisionContent {
def winner:Unit= {
val inputFile = "ESC-2015-grand_final-full_results.csv"
val sc = new SparkContext("local", "Simple App")
// remove empty lines
val input = sc.textFile(inputFile).filter(line => line.isEmpty == false)
val result = input.map { line =>
val reader = new CSVReader(new StringReader(line));
reader.readNext();
}
val fulData = result.filter(x => x.size == 11)
val euData = fulData.map(x => EuroData(x(0), x(1), x(2).toInt, x(3).toInt, x(4).toInt, x(5).toInt, x(6).toInt, x(7).toInt, x(8).toInt, x(9).toInt, x(10).toInt))
// create sql RDD
// get all country pairs who gave 12 to another country
val sqlContext = new SQLContext(sc)
import sqlContext.implicits._
import sqlContext.sql
// Convenient for running SQL queries.
// Create a DataFrame and register as a temporary "table".
val countries = sqlContext.createDataFrame(euData)
countries.registerTempTable("votes")
countries.cache()
// print the 1st 20 lines (Use dump(verses), defined above, for more lines)
val highestRank = sql("SELECT toCountry,sum(points) points FROM votes Group by toCountry order by points desc")
highestRank.collect().foreach(println(_))
// use graph to get hihest ranking country
}
def main(args: Array[String]) {
winner
}
}
| edvorkin/spark-eurovision | src/main/scala/EurovisionContent.scala | Scala | apache-2.0 | 1,767 |
/* Scala.js compiler
* Copyright 2013 LAMP/EPFL
* @author Sébastien Doeraene
*/
package org.scalajs.core.compiler
import scala.tools.nsc._
/** Core definitions for Scala.js
*
* @author Sébastien Doeraene
*/
trait JSDefinitions { self: JSGlobalAddons =>
import global._
// scalastyle:off line.size.limit
object jsDefinitions extends JSDefinitionsClass // scalastyle:ignore
import definitions._
import rootMirror._
class JSDefinitionsClass {
lazy val ScalaJSJSPackage = getPackage(newTermNameCached("scala.scalajs.js")) // compat 2.10/2.11
lazy val JSPackage_typeOf = getMemberMethod(ScalaJSJSPackage, newTermName("typeOf"))
lazy val JSPackage_constructorOf = getMemberMethod(ScalaJSJSPackage, newTermName("constructorOf"))
lazy val JSPackage_debugger = getMemberMethod(ScalaJSJSPackage, newTermName("debugger"))
lazy val JSPackage_native = getMemberMethod(ScalaJSJSPackage, newTermName("native"))
lazy val JSNativeAnnotation = getRequiredClass("scala.scalajs.js.native")
lazy val JSAnyClass = getRequiredClass("scala.scalajs.js.Any")
lazy val JSDynamicClass = getRequiredClass("scala.scalajs.js.Dynamic")
lazy val JSDictionaryClass = getRequiredClass("scala.scalajs.js.Dictionary")
lazy val JSDictionary_delete = getMemberMethod(JSDictionaryClass, newTermName("delete"))
lazy val JSObjectClass = getRequiredClass("scala.scalajs.js.Object")
lazy val JSThisFunctionClass = getRequiredClass("scala.scalajs.js.ThisFunction")
lazy val JSGlobalScopeClass = getRequiredClass("scala.scalajs.js.GlobalScope")
lazy val UndefOrClass = getRequiredClass("scala.scalajs.js.UndefOr")
lazy val JSArrayClass = getRequiredClass("scala.scalajs.js.Array")
lazy val JSArray_apply = getMemberMethod(JSArrayClass, newTermName("apply"))
lazy val JSArray_update = getMemberMethod(JSArrayClass, newTermName("update"))
lazy val JSFunctionClasses = (0 to 22) map (n => getRequiredClass("scala.scalajs.js.Function"+n))
lazy val JSThisFunctionClasses = (0 to 21) map (n => getRequiredClass("scala.scalajs.js.ThisFunction"+n))
lazy val AllJSFunctionClasses = JSFunctionClasses ++ JSThisFunctionClasses
lazy val RuntimeExceptionClass = requiredClass[RuntimeException]
lazy val JavaScriptExceptionClass = getClassIfDefined("scala.scalajs.js.JavaScriptException")
lazy val JSNameAnnotation = getRequiredClass("scala.scalajs.js.annotation.JSName")
lazy val JSFullNameAnnotation = getRequiredClass("scala.scalajs.js.annotation.JSFullName")
lazy val JSBracketAccessAnnotation = getRequiredClass("scala.scalajs.js.annotation.JSBracketAccess")
lazy val JSBracketCallAnnotation = getRequiredClass("scala.scalajs.js.annotation.JSBracketCall")
lazy val JSExportAnnotation = getRequiredClass("scala.scalajs.js.annotation.JSExport")
lazy val JSExportDescendentObjectsAnnotation = getRequiredClass("scala.scalajs.js.annotation.JSExportDescendentObjects")
lazy val JSExportDescendentClassesAnnotation = getRequiredClass("scala.scalajs.js.annotation.JSExportDescendentClasses")
lazy val JSExportAllAnnotation = getRequiredClass("scala.scalajs.js.annotation.JSExportAll")
lazy val JSExportNamedAnnotation = getRequiredClass("scala.scalajs.js.annotation.JSExportNamed")
lazy val ScalaJSDefinedAnnotation = getRequiredClass("scala.scalajs.js.annotation.ScalaJSDefined")
lazy val JSAnyTpe = JSAnyClass.toTypeConstructor
lazy val JSObjectTpe = JSObjectClass.toTypeConstructor
lazy val JSGlobalScopeTpe = JSGlobalScopeClass.toTypeConstructor
lazy val JSFunctionTpes = JSFunctionClasses.map(_.toTypeConstructor)
lazy val JSAnyModule = JSAnyClass.companionModule
def JSAny_fromFunction(arity: Int): TermSymbol = getMemberMethod(JSAnyModule, newTermName("fromFunction"+arity))
lazy val JSDynamicModule = JSDynamicClass.companionModule
lazy val JSDynamic_newInstance = getMemberMethod(JSDynamicModule, newTermName("newInstance"))
lazy val JSDynamicLiteral = getMemberModule(JSDynamicModule, newTermName("literal"))
lazy val JSDynamicLiteral_applyDynamicNamed = getMemberMethod(JSDynamicLiteral, newTermName("applyDynamicNamed"))
lazy val JSDynamicLiteral_applyDynamic = getMemberMethod(JSDynamicLiteral, newTermName("applyDynamic"))
lazy val JSObjectModule = JSObjectClass.companionModule
lazy val JSObject_hasProperty = getMemberMethod(JSObjectModule, newTermName("hasProperty"))
lazy val JSObject_properties = getMemberMethod(JSObjectModule, newTermName("properties"))
lazy val JSArrayModule = JSArrayClass.companionModule
lazy val JSArray_create = getMemberMethod(JSArrayModule, newTermName("apply"))
lazy val JSThisFunctionModule = JSThisFunctionClass.companionModule
def JSThisFunction_fromFunction(arity: Int): TermSymbol = getMemberMethod(JSThisFunctionModule, newTermName("fromFunction"+arity))
lazy val JSConstructorTagModule = getRequiredModule("scala.scalajs.js.ConstructorTag")
lazy val JSConstructorTag_materialize = getMemberMethod(JSConstructorTagModule, newTermName("materialize"))
lazy val RawJSTypeAnnot = getRequiredClass("scala.scalajs.js.annotation.RawJSType")
lazy val ExposedJSMemberAnnot = getRequiredClass("scala.scalajs.js.annotation.ExposedJSMember")
lazy val RuntimeStringModule = getRequiredModule("scala.scalajs.runtime.RuntimeString")
lazy val RuntimeStringModuleClass = RuntimeStringModule.moduleClass
lazy val BooleanReflectiveCallClass = getRequiredClass("scala.scalajs.runtime.BooleanReflectiveCall")
lazy val NumberReflectiveCallClass = getRequiredClass("scala.scalajs.runtime.NumberReflectiveCall")
lazy val IntegerReflectiveCallClass = getRequiredClass("scala.scalajs.runtime.IntegerReflectiveCall")
lazy val LongReflectiveCallClass = getRequiredClass("scala.scalajs.runtime.LongReflectiveCall")
lazy val RuntimePackageModule = getPackageObject("scala.scalajs.runtime")
lazy val Runtime_wrapJavaScriptException = getMemberMethod(RuntimePackageModule, newTermName("wrapJavaScriptException"))
lazy val Runtime_unwrapJavaScriptException = getMemberMethod(RuntimePackageModule, newTermName("unwrapJavaScriptException"))
lazy val Runtime_genTraversableOnce2jsArray = getMemberMethod(RuntimePackageModule, newTermName("genTraversableOnce2jsArray"))
lazy val Runtime_jsTupleArray2jsObject = getMemberMethod(RuntimePackageModule, newTermName("jsTupleArray2jsObject"))
lazy val Runtime_constructorOf = getMemberMethod(RuntimePackageModule, newTermName("constructorOf"))
lazy val Runtime_newConstructorTag = getMemberMethod(RuntimePackageModule, newTermName("newConstructorTag"))
lazy val Runtime_propertiesOf = getMemberMethod(RuntimePackageModule, newTermName("propertiesOf"))
lazy val Runtime_environmentInfo = getMemberMethod(RuntimePackageModule, newTermName("environmentInfo"))
lazy val Runtime_linkingInfo = getMemberMethod(RuntimePackageModule, newTermName("linkingInfo"))
lazy val WrappedArrayClass = getRequiredClass("scala.scalajs.js.WrappedArray")
lazy val WrappedArray_ctor = WrappedArrayClass.primaryConstructor
// This is a def, since similar symbols (arrayUpdateMethod, etc.) are in runDefinitions
// (rather than definitions) and we weren't sure if it is safe to make this a lazy val
def ScalaRunTime_isArray: Symbol = getMemberMethod(ScalaRunTimeModule, newTermName("isArray")).suchThat(_.tpe.params.size == 2)
lazy val BoxesRunTime_boxToCharacter = getMemberMethod(BoxesRunTimeModule, newTermName("boxToCharacter"))
lazy val BoxesRunTime_unboxToChar = getMemberMethod(BoxesRunTimeModule, newTermName("unboxToChar"))
}
// scalastyle:on line.size.limit
}
| jasonchaffee/scala-js | compiler/src/main/scala/org/scalajs/core/compiler/JSDefinitions.scala | Scala | bsd-3-clause | 7,909 |
package core
import com.bryzek.apidoc.spec.v0.models.Enum
import java.net.{MalformedURLException, URL}
import scala.util.{Failure, Success, Try}
object Util {
// Select out named parameters in the path. E.g. /:org/:service/foo would return [org, service]
def namedParametersInPath(path: String): Seq[String] = {
path.split("/").flatMap { name =>
if (name.startsWith(":")) {
val idx = if (name.indexOf(".") >= 0) {
name.indexOf(".")
} else {
name.length
}
Some(name.slice(1, idx))
} else {
None
}
}
}
def isValidEnumValue(enum: Enum, value: String): Boolean = {
enum.values.map(_.name).contains(value)
}
def isValidUri(value: String): Boolean = {
val formatted = value.trim.toLowerCase
formatted.startsWith("http://") || formatted.startsWith("https://") || formatted.startsWith("file://")
}
def validateUri(value: String): Seq[String] = {
val formatted = value.trim.toLowerCase
if (!formatted.startsWith("http://") && !formatted.startsWith("https://") && !formatted.startsWith("file://")) {
Seq(s"URI[$value] must start with http://, https://, or file://")
} else if (formatted.endsWith("/")) {
Seq(s"URI[$value] cannot end with a '/'")
} else {
Try(new URL(value)) match {
case Success(url) => Nil
case Failure(e) => e match {
case e: MalformedURLException => Seq(s"URL is not valid: ${e.getMessage}")
}
}
}
}
}
| Seanstoppable/apidoc | core/src/main/scala/core/Util.scala | Scala | mit | 1,513 |
package structures
import simulacrum.typeclass
@typeclass trait Contravariant[F[_]] extends Any with Exponential[F] { self =>
def contramap[A, B](fa: F[A])(f: B => A): F[B]
override def xmap[A, B](fa: F[A])(f: A => B, g: B => A): F[B] =
contramap(fa)(g)
def compose[G[_]: Contravariant]: Functor[Lambda[X => F[G[X]]]] =
new Contravariant.Composite[F, G] {
def F = self
def G = Contravariant[G]
}
override def composeWithFunctor[G[_]: Functor]: Contravariant[Lambda[X => F[G[X]]]] =
new Contravariant.CovariantComposite[F, G] {
def F = self
def G = Functor[G]
}
override def composeWithContravariant[G[_]: Contravariant]: Functor[Lambda[X => F[G[X]]]] =
compose[G]
}
object Contravariant {
trait Composite[F[_], G[_]] extends Any with Functor[Lambda[X => F[G[X]]]] {
def F: Contravariant[F]
def G: Contravariant[G]
def map[A, B](fga: F[G[A]])(f: A => B): F[G[B]] =
F.contramap(fga)(gb => G.contramap(gb)(f))
}
trait CovariantComposite[F[_], G[_]] extends Any with Contravariant[Lambda[X => F[G[X]]]] {
def F: Contravariant[F]
def G: Functor[G]
def contramap[A, B](fga: F[G[A]])(f: B => A): F[G[B]] =
F.contramap(fga)(gb => G.map(gb)(f))
}
}
| mpilquist/Structures | core/shared/src/main/scala/structures/Contravariant.scala | Scala | bsd-3-clause | 1,252 |
package sigmastate.helpers
import org.ergoplatform.SigmaConstants.ScriptCostLimit
import org.ergoplatform.ErgoLikeContext.Height
import org.ergoplatform._
import org.ergoplatform.validation.{ValidationRules, SigmaValidationSettings}
import sigmastate.AvlTreeData
import sigmastate.eval._
import sigmastate.interpreter.{ContextExtension, CryptoConstants}
import sigmastate.serialization.{SigmaSerializer, GroupElementSerializer}
import special.collection.Coll
import special.sigma.{Box, PreHeader, Header}
object ErgoLikeContextTesting {
/* NO HF PROOF:
Changed: val dummyPubkey from `Array[Byte] = Array.fill(32)(0: Byte)` to `GroupElementSerializer.toBytes(CryptoConstants.dlogGroup.generator)`
Motivation: to avoid exception on deserialization(wrong size, needs to be 33 bytes) and later in GroupElement.toString (infinity was not handled) and to provide more practical value in tests.
Safety:
Used only in tests and not used in ergo.
Examined ergo code: all (with IDE's "find usages" action).
*/
val dummyPubkey: Array[Byte] = GroupElementSerializer.toBytes(CryptoConstants.dlogGroup.generator)
val noBoxes: IndexedSeq[ErgoBox] = IndexedSeq.empty[ErgoBox]
val noHeaders: Coll[Header] = CostingSigmaDslBuilder.Colls.emptyColl[Header]
def dummyPreHeader(currentHeight: Height, minerPk: Array[Byte]): PreHeader = CPreHeader(0,
parentId = Colls.emptyColl[Byte],
timestamp = 3,
nBits = 0,
height = currentHeight,
minerPk = GroupElementSerializer.parse(SigmaSerializer.startReader(minerPk)),
votes = Colls.emptyColl[Byte]
)
def apply(currentHeight: Height,
lastBlockUtxoRoot: AvlTreeData,
minerPubkey: Array[Byte],
boxesToSpend: IndexedSeq[ErgoBox],
spendingTransaction: ErgoLikeTransactionTemplate[_ <: UnsignedInput],
self: ErgoBox,
activatedVersion: Byte,
extension: ContextExtension = ContextExtension.empty,
vs: SigmaValidationSettings = ValidationRules.currentSettings): ErgoLikeContext =
new ErgoLikeContext(
lastBlockUtxoRoot, noHeaders, dummyPreHeader(currentHeight, minerPubkey), noBoxes,
boxesToSpend, spendingTransaction, boxesToSpend.indexOf(self), extension, vs,
ScriptCostLimit.value, initCost = 0L, activatedVersion)
def apply(currentHeight: Height,
lastBlockUtxoRoot: AvlTreeData,
minerPubkey: Array[Byte],
dataBoxes: IndexedSeq[ErgoBox],
boxesToSpend: IndexedSeq[ErgoBox],
spendingTransaction: ErgoLikeTransactionTemplate[_ <: UnsignedInput],
selfIndex: Int,
activatedVersion: Byte) =
new ErgoLikeContext(
lastBlockUtxoRoot, noHeaders, dummyPreHeader(currentHeight, minerPubkey),
dataBoxes, boxesToSpend, spendingTransaction, selfIndex, ContextExtension.empty,
ValidationRules.currentSettings, ScriptCostLimit.value,
initCost = 0L, activatedVersion)
def dummy(selfDesc: ErgoBox, activatedVersion: Byte): ErgoLikeContext =
ErgoLikeContextTesting(currentHeight = 0,
lastBlockUtxoRoot = AvlTreeData.dummy, dummyPubkey, boxesToSpend = IndexedSeq(selfDesc),
spendingTransaction = ErgoLikeTransaction(IndexedSeq(), IndexedSeq()), self = selfDesc,
activatedVersion = activatedVersion)
val noInputs: Array[Box] = Array[Box]()
val noOutputs: Array[Box] = Array[Box]()
}
case class BlockchainState(currentHeight: Height, lastBlockUtxoRoot: AvlTreeData)
| ScorexFoundation/sigmastate-interpreter | sigmastate/src/test/scala/sigmastate/helpers/ErgoLikeContextTesting.scala | Scala | mit | 3,477 |
package exception
import play.api.libs.ws.WSResponse
/**
* @author Ruslan Gunawardana
*/
class GiftsServiceException(response: WSResponse) extends Exception {
}
| Azarieled/GiftRecommenderSystem | frontend/app/exception/GiftsServiceException.scala | Scala | apache-2.0 | 168 |
package com.v_standard.vsp.compiler
import com.v_standard.vsp.script.ScriptDefine
import java.io.{File, FileNotFoundException}
import org.scalatest.FunSpec
import org.scalatest.matchers.ShouldMatchers
/**
* Token テストスペッククラス。
*/
class TokenSpec extends FunSpec with ShouldMatchers {
describe("StringToken.toScript") {
it("追加した文字、文字列が文字列として print の引き数に与えられた文字列として返される") {
val token = new StringToken
token += 'a'
token += "b\nc"
token += "d\te"
token += "f\rg"
token += "\""
token += 'h'
token.toScript should be ("print(\"ab\\ncd\\tef\\rg\\\"h\");\n")
}
}
describe("PrintToken.toScript") {
it("追加した文字、文字列が評価され print の引き数に与えられた文字列として返される") {
val token = new PrintToken
token += 'a'
token += "b\nc"
token += "d\te"
token += "f\rg"
token += "\""
token += 'h'
token.toScript should be ("print(" + ScriptDefine.SCRIPT_OBJ_NAME +
".escape((ab\ncd\tef\rg\"h) == null ? \"\" : (ab\ncd\tef\rg\"h)));\n")
}
}
describe("SyntaxToken.toScript") {
it("追加した文字、文字列がそのまま返される") {
val token = new SyntaxToken
token += 'a'
token += "b\nc"
token += "d\te"
token += "f\rg"
token += "\""
token += 'h'
token.toScript should be ("ab\ncd\tef\rg\"h\n")
}
}
describe("IncludeToken.toScript") {
describe("ScriptDefine.MAX_INCLUDE 以上の深さを指定") {
it("IllegalStateException") {
val context = ScriptConverterContext(null, ScriptDefine.MAX_INCLUDE)
val token = new IncludeToken(context)
evaluating {
token.toScript
} should produce [IllegalStateException]
}
}
describe("存在しないファイルを指定") {
it("FileNotFoundException") {
val context = ScriptConverterContext(TokenParseConfig(new File("./"), '%'), 0)
val token = new IncludeToken(context)
token += "notfound.txt"
evaluating {
token.toScript
} should produce [FileNotFoundException]
}
}
describe("ネストしてインクルードしたファイルを指定") {
it("FileNotFoundException") {
val context = ScriptConverterContext(TokenParseConfig(new File("./templates"), '%'), 0)
val token = new IncludeToken(context)
token += "nest_include.html"
val res = token.toScript
context.includeFiles.size should be (3)
context.includeFiles(new File("./templates/nest_include.html").getCanonicalFile) should be (true)
context.includeFiles(new File("./templates/include_twice.html").getCanonicalFile) should be (true)
context.includeFiles(new File("./templates/common.html").getCanonicalFile) should be (true)
}
}
}
}
| VanishStandard/vsp | src/test/scala/com/v_standard/vsp/compiler/TokenSpec.scala | Scala | bsd-3-clause | 2,984 |
/* Copyright (C) 2008-2014 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.variable
/** The type of the domain of DoubleVariables.
@author Andrew McCallum */
trait DoubleDomain extends Domain {
type Value = Double
def minValue = Double.MinValue
def maxValue = Double.MaxValue
}
/** The domain of DoubleVariables.
@author Andrew McCallum */
object DoubleDomain extends DoubleDomain
/** A Variable with a real (double) value.
If you want a variable that holds a single double but also has a value that inherits from Tensor, then consider RealVar.
@author Andrew McCallum */
trait DoubleVar extends ScalarVar {
type Value = Double
def domain: DoubleDomain = DoubleDomain
@inline final def value: Double = doubleValue
def doubleValue: Double
def intValue: Int = doubleValue.toInt
override def toString = printName + "(" + doubleValue.toString + ")"
}
trait MutableDoubleVar extends DoubleVar with MutableDoubleScalarVar with MutableIntScalarVar with MutableVar {
override type Value = Double
}
/** A Variable with a mutable Double value.
@author Andrew McCallum */
class DoubleVariable(initialValue: Double) extends MutableDoubleVar {
def this() = this(0.0)
private var _value: Double = initialValue
@inline final def doubleValue = _value
def +=(x:Double) = set(_value + x)(null) // Should we allow non-null DiffLists?
def -=(x:Double) = set(_value - x)(null)
def *=(x:Double) = set(_value * x)(null)
def /=(x:Double) = set(_value / x)(null)
def set(newValue: Double)(implicit d: DiffList): Unit = if (newValue != _value) {
if (d ne null) d += new DoubleDiff(_value, newValue)
_value = newValue
}
final def set(newValue:Int)(implicit d:DiffList): Unit = set(newValue.toDouble)
//override def :=(newValue:Double): Unit = set(newValue)(null) // To avoid wrapping the Double when calling the generic method in MutableVar, but this method is final in MutableVar.
case class DoubleDiff(oldValue: Double, newValue: Double) extends Diff {
def variable: DoubleVariable = DoubleVariable.this
def redo() = _value = newValue
def undo() = _value = oldValue
}
}
| hlin117/factorie | src/main/scala/cc/factorie/variable/DoubleVariable.scala | Scala | apache-2.0 | 2,831 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.zeppelin.spark
import org.apache.spark.SparkContext
object JobProgressUtil {
def progress(sc: SparkContext, jobGroup : String):Int = {
val jobIds = sc.statusTracker.getJobIdsForGroup(jobGroup)
val jobs = jobIds.flatMap { id => sc.statusTracker.getJobInfo(id) }
val stages = jobs.flatMap { job =>
job.stageIds().flatMap(sc.statusTracker.getStageInfo)
}
val taskCount = stages.map(_.numTasks).sum
val completedTaskCount = stages.map(_.numCompletedTasks).sum
if (taskCount == 0) {
0
} else {
(100 * completedTaskCount.toDouble / taskCount).toInt
}
}
}
| sergeymazin/zeppelin | spark/spark-scala-parent/src/main/scala/org/apache/zeppelin/spark/JobProgressUtil.scala | Scala | apache-2.0 | 1,432 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package wvlet.airframe.metrics
import java.time.format.{DateTimeFormatterBuilder, SignStyle}
import java.time.{Instant, ZoneOffset, ZonedDateTime}
import java.util.Locale
/**
*/
object TimeStampFormatter {
import java.time.temporal.ChronoField._
val noSpaceTimestampFormat = new DateTimeFormatterBuilder()
.parseCaseInsensitive()
.appendValue(YEAR, 4, 10, SignStyle.EXCEEDS_PAD)
.appendLiteral('-')
.appendValue(MONTH_OF_YEAR, 2)
.appendLiteral('-')
.appendValue(DAY_OF_MONTH, 2)
.appendLiteral('T')
.appendValue(HOUR_OF_DAY, 2)
.appendLiteral(':')
.appendValue(MINUTE_OF_HOUR, 2)
.appendLiteral(':')
.appendValue(SECOND_OF_MINUTE, 2)
.appendLiteral('.')
.appendValue(MILLI_OF_SECOND, 3)
.appendOffset("+HHMM", "Z")
.toFormatter(Locale.US)
val humanReadableTimestampFormatter = new DateTimeFormatterBuilder()
.parseCaseInsensitive()
.appendValue(YEAR, 4, 10, SignStyle.EXCEEDS_PAD)
.appendLiteral('-')
.appendValue(MONTH_OF_YEAR, 2)
.appendLiteral('-')
.appendValue(DAY_OF_MONTH, 2)
.appendLiteral(' ')
.appendValue(HOUR_OF_DAY, 2)
.appendLiteral(':')
.appendValue(MINUTE_OF_HOUR, 2)
.appendLiteral(':')
.appendValue(SECOND_OF_MINUTE, 2)
.appendOffset("+HHMM", "Z")
.toFormatter(Locale.US)
def formatTimestamp(time: ZonedDateTime): String = {
humanReadableTimestampFormatter.format(time)
}
def formatTimestamp(timeMillis: Long, zone: ZoneOffset = systemTimeZone): String = {
val timestamp = ZonedDateTime.ofInstant(Instant.ofEpochMilli(timeMillis), zone)
humanReadableTimestampFormatter.format(timestamp)
}
def formatTimestampWithNoSpace(timeMillis: Long): String = {
val timestamp = ZonedDateTime.ofInstant(Instant.ofEpochMilli(timeMillis), systemTimeZone)
noSpaceTimestampFormat.format(timestamp)
}
}
| wvlet/airframe | airframe-metrics/.jvm/src/main/scala/wvlet/airframe/metrics/TimeStampFormatter.scala | Scala | apache-2.0 | 2,431 |
package scribe.benchmark.tester
import cats.effect.IO
import cats.effect.unsafe.implicits.global
import cats.implicits._
import scribe.Logger
import scribe.cats._
import scribe.file._
import scribe.format._
class ScribeEffectLoggingTester extends LoggingTester {
private lazy val fileWriter = FileWriter("logs" / "scribe-effect.log")
private lazy val formatter = formatter"$date $levelPaddedRight [$threadName] $messages"
private lazy val logger = Logger.empty.orphan().withHandler(formatter = formatter, writer = fileWriter).f[IO]
override def init(): Unit = logger
override def run(messages: Iterator[String]): Unit = fs2.Stream
.fromIterator[IO](messages, 1000)
.evalTap(msg => logger.info(msg))
.compile
.drain
.unsafeRunSync()
override def dispose(): Unit = fileWriter.dispose()
} | outr/scribe | benchmarks/src/main/scala/scribe/benchmark/tester/ScribeEffectLoggingTester.scala | Scala | mit | 822 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.samza.storage.kv
import java.io.File
import org.apache.samza.container.SamzaContainerContext
import org.apache.samza.metrics.MetricsRegistry
import org.apache.samza.storage.kv._
import org.apache.samza.system.SystemStreamPartition
import org.rocksdb.WriteOptions
import org.apache.samza.config.StorageConfig._
class RocksDbKeyValueStorageEngineFactory [K, V] extends BaseKeyValueStorageEngineFactory[K, V]
{
/**
* Return a KeyValueStore instance for the given store name
* @param storeName Name of the store
* @param storeDir The directory of the store
* @param registry MetricsRegistry to which to publish store specific metrics.
* @param changeLogSystemStreamPartition Samza stream partition from which to receive the changelog.
* @param containerContext Information about the container in which the task is executing.
* @return A valid KeyValueStore instance
*/
override def getKVStore(storeName: String,
storeDir: File,
registry: MetricsRegistry,
changeLogSystemStreamPartition: SystemStreamPartition,
containerContext: SamzaContainerContext): KeyValueStore[Array[Byte], Array[Byte]] = {
val storageConfig = containerContext.config.subset("stores." + storeName + ".", true)
val isLoggedStore = containerContext.config.getChangelogStream(storeName).isDefined
val rocksDbMetrics = new KeyValueStoreMetrics(storeName, registry)
val rocksDbOptions = RocksDbKeyValueStore.options(storageConfig, containerContext)
val rocksDbWriteOptions = new WriteOptions().setDisableWAL(true)
val rocksDb = new RocksDbKeyValueStore(storeDir, rocksDbOptions, storageConfig, isLoggedStore, storeName, rocksDbWriteOptions, rocksDbMetrics)
rocksDb
}
}
| davidzchen/samza | samza-kv-rocksdb/src/main/scala/org/apache/samza/storage/kv/RocksDbKeyValueStorageEngineFactory.scala | Scala | apache-2.0 | 2,624 |
package com.twitter.inject.thrift.integration.filters
import com.twitter.finagle.Filter
import com.twitter.finagle.Service
import com.twitter.scrooge
import com.twitter.test.thriftscala.EchoService.SetTimesToEcho
import com.twitter.util.Future
import com.twitter.util.logging.Logging
class SetTimesEchoTypeAgnosticFilter extends Filter.TypeAgnostic with Logging {
def toFilter[Req, Rep]: Filter[Req, Rep, Req, Rep] = new Filter[Req, Rep, Req, Rep] {
def apply(request: Req, service: Service[Req, Rep]): Future[Rep] = {
request match {
case _: scrooge.Request[_] =>
info(
"SetTimesToEcho called with times " + request
.asInstanceOf[scrooge.Request[SetTimesToEcho.Args]].args.times)
case _ =>
info(
"SetTimesToEcho called with times " + request.asInstanceOf[SetTimesToEcho.Args].times)
}
service(request)
}
}
}
| twitter/finatra | inject/inject-thrift-client/src/test/scala/com/twitter/inject/thrift/integration/filters/SetTimesEchoTypeAgnosticFilter.scala | Scala | apache-2.0 | 917 |
/*
* Copyright 2009 Mark Tye
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
*/
package net.liftweb.ext_api.appengine
import com.google.appengine.api.datastore.{Entity => GaeEntity}
import com.google.appengine.api.datastore.Blob
import com.google.appengine.api.datastore.Key
import com.google.appengine.api.datastore.Link
import com.google.appengine.api.datastore.ShortBlob
import com.google.appengine.api.datastore.Text
import com.google.appengine.api.users.{User => GaeUser}
import com.google.appengine.api.users.UserServiceFactory
import java.util.Date
import java.text.DateFormat
trait Property [T <: Any, Owner <: Entity[Owner]] {
val owner: Owner
val kind = getClass.getSimpleName.split('$').toList.last
def asOption: Option[T] = owner.get(kind).asInstanceOf[Option[T]]
def apply(value: T): Owner = value match {
case null => owner.set(kind, None)
case _ => owner.set(kind, Some(value))
}
def apply(value: Option[T]): Owner = value match {
case null => owner.set(kind, None)
case _ => owner.set(kind, value)
}
def asString(default: String): String = asOption.map(_.toString).getOrElse(default)
def asText(default: String): xml.Text = xml.Text(asString(default))
override def equals(other: Any): Boolean = other match {
case that: Property[_,_] => (that canEqual this) && this.asOption == that.asOption
}
def canEqual(other: Any): Boolean = other.isInstanceOf[Property[_,_]]
}
object Property {
implicit def propertyToOption[T, Owner <: Entity[Owner]](p: Property[T, Owner]): Option[T] = p.asOption
}
// Standard Scala/Java properties
class BooleanProperty[Owner <: Entity[Owner]](val owner: Owner) extends Property[Boolean, Owner]
class ByteProperty[Owner <: Entity[Owner]](val owner: Owner) extends Property[Byte, Owner]
class ShortProperty[Owner <: Entity[Owner]](val owner: Owner) extends Property[Short, Owner]
class IntProperty[Owner <: Entity[Owner]](val owner: Owner) extends Property[Int, Owner]
class LongProperty[Owner <: Entity[Owner]](val owner: Owner) extends Property[Long, Owner]
class FloatProperty[Owner <: Entity[Owner]](val owner: Owner) extends Property[Float, Owner]
class DoubleProperty[Owner <: Entity[Owner]](val owner: Owner) extends Property[Double, Owner]
class StringProperty[Owner <: Entity[Owner]](val owner:Owner) extends Property[String, Owner]
class DateProperty[Owner <: Entity[Owner]](val owner: Owner) extends Property[Date, Owner] {
def now() = apply(Some(new Date()))
def withFormat(default: String, format: DateFormat) = asOption.map(format.format(_)) getOrElse default
def asText(default: String, format: DateFormat) = xml.Text(withFormat(default, format))
}
// Google API properties
class TextProperty[Owner <: Entity[Owner]](val owner: Owner) extends Property[Text, Owner]
class ShortBlobProperty[Owner <: Entity[Owner]](val owner: Owner) extends Property[ShortBlob, Owner]
class BlobProperty[Owner <: Entity[Owner]](val owner: Owner) extends Property[Blob, Owner]
class KeyProperty[Owner <: Entity[Owner]](val owner: Owner) extends Property[Key, Owner]
class LinkProperty[Owner <: Entity[Owner]](val owner: Owner) extends Property[Link, Owner]
class UserProperty[Owner <: Entity[Owner]](val owner: Owner) extends Property[GaeUser, Owner] {
def current(): Owner = apply(User.currentUser)
def authDomain: Option[String] = asOption.map(_.getAuthDomain)
def email: Option[String] = asOption.map(_.getEmail)
def nickname: Option[String] = asOption.map(_.getNickname)
}
| mtye/lift-appengine | lift-appengine/src/main/scala/net/liftweb/ext_api/appengine/Property.scala | Scala | apache-2.0 | 3,995 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.io.File
import java.util.Date
import java.util.concurrent.TimeoutException
import scala.concurrent.duration._
import scala.language.postfixOps
import org.apache.hadoop.mapred._
import org.apache.hadoop.mapreduce.TaskType
import org.mockito.Matchers
import org.mockito.Mockito._
import org.mockito.invocation.InvocationOnMock
import org.mockito.stubbing.Answer
import org.scalatest.BeforeAndAfter
import org.apache.spark._
import org.apache.spark.internal.io.{FileCommitProtocol, HadoopMapRedCommitProtocol, SparkHadoopWriterUtils}
import org.apache.spark.rdd.{FakeOutputCommitter, RDD}
import org.apache.spark.util.{ThreadUtils, Utils}
/**
* Unit tests for the output commit coordination functionality.
*
* The unit test makes both the original task and the speculated task
* attempt to commit, where committing is emulated by creating a
* directory. If both tasks create directories then the end result is
* a failure.
*
* Note that there are some aspects of this test that are less than ideal.
* In particular, the test mocks the speculation-dequeuing logic to always
* dequeue a task and consider it as speculated. Immediately after initially
* submitting the tasks and calling reviveOffers(), reviveOffers() is invoked
* again to pick up the speculated task. This may be hacking the original
* behavior in too much of an unrealistic fashion.
*
* Also, the validation is done by checking the number of files in a directory.
* Ideally, an accumulator would be used for this, where we could increment
* the accumulator in the output committer's commitTask() call. If the call to
* commitTask() was called twice erroneously then the test would ideally fail because
* the accumulator would be incremented twice.
*
* The problem with this test implementation is that when both a speculated task and
* its original counterpart complete, only one of the accumulator's increments is
* captured. This results in a paradox where if the OutputCommitCoordinator logic
* was not in SparkHadoopWriter, the tests would still pass because only one of the
* increments would be captured even though the commit in both tasks was executed
* erroneously.
*
* See also: [[OutputCommitCoordinatorIntegrationSuite]] for integration tests that do
* not use mocks.
*/
class OutputCommitCoordinatorSuite extends SparkFunSuite with BeforeAndAfter {
var outputCommitCoordinator: OutputCommitCoordinator = null
var tempDir: File = null
var sc: SparkContext = null
before {
tempDir = Utils.createTempDir()
val conf = new SparkConf()
.setMaster("local[4]")
.setAppName(classOf[OutputCommitCoordinatorSuite].getSimpleName)
.set("spark.hadoop.outputCommitCoordination.enabled", "true")
sc = new SparkContext(conf) {
override private[spark] def createSparkEnv(
conf: SparkConf,
isLocal: Boolean,
listenerBus: LiveListenerBus): SparkEnv = {
outputCommitCoordinator = spy(new OutputCommitCoordinator(conf, isDriver = true))
// Use Mockito.spy() to maintain the default infrastructure everywhere else.
// This mocking allows us to control the coordinator responses in test cases.
SparkEnv.createDriverEnv(conf, isLocal, listenerBus,
SparkContext.numDriverCores(master), Some(outputCommitCoordinator))
}
}
// Use Mockito.spy() to maintain the default infrastructure everywhere else
val mockTaskScheduler = spy(sc.taskScheduler.asInstanceOf[TaskSchedulerImpl])
doAnswer(new Answer[Unit]() {
override def answer(invoke: InvocationOnMock): Unit = {
// Submit the tasks, then force the task scheduler to dequeue the
// speculated task
invoke.callRealMethod()
mockTaskScheduler.backend.reviveOffers()
}
}).when(mockTaskScheduler).submitTasks(Matchers.any())
doAnswer(new Answer[TaskSetManager]() {
override def answer(invoke: InvocationOnMock): TaskSetManager = {
val taskSet = invoke.getArguments()(0).asInstanceOf[TaskSet]
new TaskSetManager(mockTaskScheduler, taskSet, 4) {
var hasDequeuedSpeculatedTask = false
override def dequeueSpeculativeTask(
execId: String,
host: String,
locality: TaskLocality.Value): Option[(Int, TaskLocality.Value)] = {
if (!hasDequeuedSpeculatedTask) {
hasDequeuedSpeculatedTask = true
Some(0, TaskLocality.PROCESS_LOCAL)
} else {
None
}
}
}
}
}).when(mockTaskScheduler).createTaskSetManager(Matchers.any(), Matchers.any())
sc.taskScheduler = mockTaskScheduler
val dagSchedulerWithMockTaskScheduler = new DAGScheduler(sc, mockTaskScheduler)
sc.taskScheduler.setDAGScheduler(dagSchedulerWithMockTaskScheduler)
sc.dagScheduler = dagSchedulerWithMockTaskScheduler
}
after {
sc.stop()
tempDir.delete()
outputCommitCoordinator = null
}
test("Only one of two duplicate commit tasks should commit") {
val rdd = sc.parallelize(Seq(1), 1)
sc.runJob(rdd, OutputCommitFunctions(tempDir.getAbsolutePath).commitSuccessfully _,
0 until rdd.partitions.size)
assert(tempDir.list().size === 1)
}
test("If commit fails, if task is retried it should not be locked, and will succeed.") {
val rdd = sc.parallelize(Seq(1), 1)
sc.runJob(rdd, OutputCommitFunctions(tempDir.getAbsolutePath).failFirstCommitAttempt _,
0 until rdd.partitions.size)
assert(tempDir.list().size === 1)
}
test("Job should not complete if all commits are denied") {
// Create a mock OutputCommitCoordinator that denies all attempts to commit
doReturn(false).when(outputCommitCoordinator).handleAskPermissionToCommit(
Matchers.any(), Matchers.any(), Matchers.any())
val rdd: RDD[Int] = sc.parallelize(Seq(1), 1)
def resultHandler(x: Int, y: Unit): Unit = {}
val futureAction: SimpleFutureAction[Unit] = sc.submitJob[Int, Unit, Unit](rdd,
OutputCommitFunctions(tempDir.getAbsolutePath).commitSuccessfully,
0 until rdd.partitions.size, resultHandler, () => Unit)
// It's an error if the job completes successfully even though no committer was authorized,
// so throw an exception if the job was allowed to complete.
intercept[TimeoutException] {
ThreadUtils.awaitResult(futureAction, 5 seconds)
}
assert(tempDir.list().size === 0)
}
test("Only authorized committer failures can clear the authorized committer lock (SPARK-6614)") {
val stage: Int = 1
val partition: Int = 2
val authorizedCommitter: Int = 3
val nonAuthorizedCommitter: Int = 100
outputCommitCoordinator.stageStart(stage, maxPartitionId = 2)
assert(outputCommitCoordinator.canCommit(stage, partition, authorizedCommitter))
assert(!outputCommitCoordinator.canCommit(stage, partition, nonAuthorizedCommitter))
// The non-authorized committer fails
outputCommitCoordinator.taskCompleted(
stage, partition, attemptNumber = nonAuthorizedCommitter, reason = TaskKilled("test"))
// New tasks should still not be able to commit because the authorized committer has not failed
assert(
!outputCommitCoordinator.canCommit(stage, partition, nonAuthorizedCommitter + 1))
// The authorized committer now fails, clearing the lock
outputCommitCoordinator.taskCompleted(
stage, partition, attemptNumber = authorizedCommitter, reason = TaskKilled("test"))
// A new task should now be allowed to become the authorized committer
assert(
outputCommitCoordinator.canCommit(stage, partition, nonAuthorizedCommitter + 2))
// There can only be one authorized committer
assert(
!outputCommitCoordinator.canCommit(stage, partition, nonAuthorizedCommitter + 3))
}
test("Duplicate calls to canCommit from the authorized committer gets idempotent responses.") {
val rdd = sc.parallelize(Seq(1), 1)
sc.runJob(rdd, OutputCommitFunctions(tempDir.getAbsolutePath).callCanCommitMultipleTimes _,
0 until rdd.partitions.size)
}
test("SPARK-19631: Do not allow failed attempts to be authorized for committing") {
val stage: Int = 1
val partition: Int = 1
val failedAttempt: Int = 0
outputCommitCoordinator.stageStart(stage, maxPartitionId = 1)
outputCommitCoordinator.taskCompleted(stage, partition, attemptNumber = failedAttempt,
reason = ExecutorLostFailure("0", exitCausedByApp = true, None))
assert(!outputCommitCoordinator.canCommit(stage, partition, failedAttempt))
assert(outputCommitCoordinator.canCommit(stage, partition, failedAttempt + 1))
}
}
/**
* Class with methods that can be passed to runJob to test commits with a mock committer.
*/
private case class OutputCommitFunctions(tempDirPath: String) {
private val jobId = new SerializableWritable(SparkHadoopWriterUtils.createJobID(new Date, 0))
// Mock output committer that simulates a successful commit (after commit is authorized)
private def successfulOutputCommitter = new FakeOutputCommitter {
override def commitTask(context: TaskAttemptContext): Unit = {
Utils.createDirectory(tempDirPath)
}
}
// Mock output committer that simulates a failed commit (after commit is authorized)
private def failingOutputCommitter = new FakeOutputCommitter {
override def commitTask(taskAttemptContext: TaskAttemptContext) {
throw new RuntimeException
}
}
def commitSuccessfully(iter: Iterator[Int]): Unit = {
val ctx = TaskContext.get()
runCommitWithProvidedCommitter(ctx, iter, successfulOutputCommitter)
}
def failFirstCommitAttempt(iter: Iterator[Int]): Unit = {
val ctx = TaskContext.get()
runCommitWithProvidedCommitter(ctx, iter,
if (ctx.attemptNumber == 0) failingOutputCommitter else successfulOutputCommitter)
}
// Receiver should be idempotent for AskPermissionToCommitOutput
def callCanCommitMultipleTimes(iter: Iterator[Int]): Unit = {
val ctx = TaskContext.get()
val canCommit1 = SparkEnv.get.outputCommitCoordinator
.canCommit(ctx.stageId(), ctx.partitionId(), ctx.attemptNumber())
val canCommit2 = SparkEnv.get.outputCommitCoordinator
.canCommit(ctx.stageId(), ctx.partitionId(), ctx.attemptNumber())
assert(canCommit1 && canCommit2)
}
private def runCommitWithProvidedCommitter(
ctx: TaskContext,
iter: Iterator[Int],
outputCommitter: OutputCommitter): Unit = {
def jobConf = new JobConf {
override def getOutputCommitter(): OutputCommitter = outputCommitter
}
// Instantiate committer.
val committer = FileCommitProtocol.instantiate(
className = classOf[HadoopMapRedCommitProtocol].getName,
jobId = jobId.value.getId.toString,
outputPath = jobConf.get("mapred.output.dir"),
isAppend = false)
// Create TaskAttemptContext.
// Hadoop wants a 32-bit task attempt ID, so if ours is bigger than Int.MaxValue, roll it
// around by taking a mod. We expect that no task will be attempted 2 billion times.
val taskAttemptId = (ctx.taskAttemptId % Int.MaxValue).toInt
val attemptId = new TaskAttemptID(
new TaskID(jobId.value, TaskType.MAP, ctx.partitionId), taskAttemptId)
val taskContext = new TaskAttemptContextImpl(jobConf, attemptId)
committer.setupTask(taskContext)
committer.commitTask(taskContext)
}
}
| aokolnychyi/spark | core/src/test/scala/org/apache/spark/scheduler/OutputCommitCoordinatorSuite.scala | Scala | apache-2.0 | 12,246 |
package com.jdrews.logstation.config
import akka.actor.{Actor, ActorLogging}
import com.jdrews.logstation.utils.FixedList
import com.jdrews.logstation.{BufferLength, LogStationName, MaxLogLinesPerLog}
import net.liftweb.actor.LiftActor
/**
* Created by jdrews on 3/22/2015.
*
* Used to bridge between Lift and Scala
* Buffers up messages before a LiftActor connects and then hands over messages
* Stores configuration for the LiftActor
*/
class BridgeActor extends Actor with ActorLogging {
private var target: Option[LiftActor] = None
// only store n entries
private var bufferLength = 12
private var maxLogLinesPerLog = 120
private var msgs = new FixedList[Any](bufferLength)
private var logStationName = ""
def receive = {
case lift: LiftActor =>
log.debug(s"received LiftActor: $lift")
target = Some(lift)
// send LogStationWebServer the maxLogLinesPerLog
lift ! MaxLogLinesPerLog(maxLogLinesPerLog)
lift ! LogStationName(logStationName)
if (msgs.nonEmpty) {
log.debug("sending out buffered msgs")
msgs.foreach{ m =>
log.debug(s"passing the following to $lift: $m")
lift ! m
}
log.debug("done")
}
case mll: MaxLogLinesPerLog =>
log.debug(s"received maxLogLinesPerLog: $mll")
maxLogLinesPerLog = mll.myVal
case bl: BufferLength =>
log.debug(s"received bufferLength: $bl")
bufferLength = bl.myVal
// rebuild msgs list with new buffer length
msgs = new FixedList[Any](bufferLength)
case lsname: LogStationName =>
log.debug(s"received logStationName: $logStationName")
logStationName = lsname.myVal
case msg =>
if (target.isEmpty) {
log.debug(s"buffering this message since target is empty... $msg")
msgs.append(msg)
} else {
log.debug(s"passing the following to $target: $msg")
target.foreach(_ ! msg)
}
}
}
| jdrews/logstation | src/main/scala/com/jdrews/logstation/config/BridgeActor.scala | Scala | apache-2.0 | 2,175 |
package com.faacets.qalg
import org.scalatest.{FunSuite, NonImplicitAssertions}
import spire.math.Rational
import spire.syntax.eq._
import spire.syntax.innerProductSpace._
import spire.std.long._
import algebra._
import algos._
import math._
import syntax.all._
import Matrix.packs._
import optional.matProductOrder._
class GramSchmidtSuite(implicit val pack: PackField.ForMV[Matrix[Rational, Imm], Vector[Rational, Imm], Rational]) extends FunSuite with NonImplicitAssertions {
def M = pack.M
test("Rational Gram Schmidt") {
val m = M.rowMajor(2, 3)(
3, 1, 1,
2, 2, 1)
val res = M.rowMajor(2, 3)(
3, 1, 1,
Rational(-5, 11), Rational(13, 11), Rational(2, 11))
assert(m.orthogonalized === res)
}
/*
test("Integer Gram Schmidt") {
val m = DenseM.forLong.build(2, 3,
3, 1, 1,
2, 2, 1)
val res = DenseM.forLong.build(2, 3,
3, 1, 1,
-5, 13, 2)
assert(euclideanGramSchmidt(m) === res)
}*/
}
| denisrosset/qalg | tests/src/test/scala/qalg/algos/GramSchmidtSuite.scala | Scala | mit | 972 |
package korolev.blazeServer
import korolev.Async
import korolev.server.{KorolevServiceConfig, MimeTypes}
import org.http4s.blaze.http.HttpService
import scala.language.higherKinds
/**
* @author Aleksey Fomkin <aleksey.fomkin@gmail.com>
*/
final class BlazeServiceBuilder[F[+_]: Async, S, M](mimeTypes: MimeTypes) {
def from(config: KorolevServiceConfig[F, S, M]): HttpService =
blazeService(config, mimeTypes)
}
| PhilAndrew/JumpMicro | JMSangriaGraphql/src/main/scala/korolev/blazeServer/BlazeServiceBuilder.scala | Scala | mit | 425 |
package chandu0101.scalajs.react.components.models
import org.scalajs.dom.Element
case class RPoint(x: Double, y: Double)
case class RGrid(width: Double, height: Double)
case class RClientRect(top: Double = 0, left: Double = 0, right: Double = 0, bottom: Double = 0, height: Double = 0, width: Double = 0)
case class RPosition(top: Double = 0, left: Double = 0, right: Double = 0, bottom: Double = 0)
case class RElementPosition(element: Element, top: Double = 0, left: Double = 0, right: Double = 0, bottom: Double = 0) | mproch/scalajs-react-components | core/src/main/scala/chandu0101/scalajs/react/components/models/DomPositions.scala | Scala | apache-2.0 | 529 |
package com.sksamuel.elastic4s.requests.get
import com.sksamuel.elastic4s.requests.common.RefreshPolicy
import com.sksamuel.elastic4s.testkit.DockerTests
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers._
import org.scalatestplus.mockito.MockitoSugar
import scala.util.Try
class MultiGetTest extends AnyFlatSpec with MockitoSugar with DockerTests {
Try {
client.execute {
deleteIndex("coldplay")
}.await
}
client.execute {
createIndex("coldplay").shards(2).mapping(
mapping(
textField("name").stored(true),
intField("year").stored(true)
)
)
}.await
client.execute(
bulk(
indexInto("coldplay") id "1" fields("name" -> "parachutes", "year" -> 2000),
indexInto("coldplay") id "3" fields("name" -> "x&y", "year" -> 2005),
indexInto("coldplay") id "5" fields("name" -> "mylo xyloto", "year" -> 2011),
indexInto("coldplay") id "7" fields("name" -> "ghost stories", "year" -> 2015)
).refresh(RefreshPolicy.Immediate)
).await
"a multiget request" should "retrieve documents by id" in {
val resp = client.execute(
multiget(
get("3").from("coldplay"),
get("5") from "coldplay",
get("7") from "coldplay"
)
).await.result
resp.size shouldBe 3
resp.items.head.id shouldBe "3"
resp.items.head.exists shouldBe true
resp.items(1).id shouldBe "5"
resp.items(1).exists shouldBe true
resp.items.last.id shouldBe "7"
resp.items.last.exists shouldBe true
}
it should "set exists=false for missing documents" in {
val resp = client.execute(
multiget(
get("3").from("coldplay"),
get("711111") from "coldplay"
)
).await.result
resp.size shouldBe 2
resp.items.head.exists shouldBe true
resp.items.last.exists shouldBe false
}
it should "retrieve documents by id with selected fields" in {
val resp = client.execute(
multiget(
get("3") from "coldplay" storedFields("name", "year"),
get("5") from "coldplay" storedFields "name"
)
).await.result
resp.size shouldBe 2
resp.items.head.fields shouldBe Map("year" -> List(2005), "name" -> List("x&y"))
resp.items.last.fields shouldBe Map("name" -> List("mylo xyloto"))
}
it should "retrieve documents by id with fetchSourceContext" in {
val resp = client.execute(
multiget(
get("3") from "coldplay" fetchSourceContext Seq("name", "year"),
get("5") from "coldplay" fetchSourceContext Seq("name")
)
).await.result
resp.size shouldBe 2
resp.items.head.source shouldBe Map("year" -> 2005, "name" -> "x&y")
resp.items.last.source shouldBe Map("name" -> "mylo xyloto")
}
it should "retrieve documents by id with routing spec" in {
val resp = client.execute(
multiget(get("3") from "coldplay" routing "3")
).await.result
resp.size shouldBe 1
resp.items.head.id shouldBe "3"
resp.items.head.exists shouldBe true
}
}
| sksamuel/elastic4s | elastic4s-tests/src/test/scala/com/sksamuel/elastic4s/requests/get/MultiGetTest.scala | Scala | apache-2.0 | 3,044 |
package com.sksamuel.elastic4s
import com.sksamuel.elastic4s.ElasticDsl._
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.SpanSugar._
import org.scalatest.{Matchers, WordSpec}
import com.sksamuel.elastic4s.testkit.ElasticSugar
/** @author Stephen Samuel */
class TermVectorTest
extends WordSpec
with ElasticSugar
with Matchers
with ScalaFutures {
override implicit def patienceConfig: PatienceConfig = PatienceConfig(timeout = 10.seconds, interval = 1.seconds)
client.execute {
bulk(
index into "termvectortest/startrek" fields("name" -> "james kirk", "rank" -> "captain") id 1,
index into "termvectortest/startrek" fields("name" -> "jean luc picard", "rank" -> "captain") id 2,
index into "termvectortest/startrek" fields("name" -> "will riker", "rank" -> "cmdr") id 3,
index into "termvectortest/startrek" fields("name" -> "data", "rank" -> "ltr cmdr") id 4,
index into "termvectortest/startrek" fields("name" -> "geordie la forge", "rank" -> "ltr cmdr") id 5
)
}.await
// "term vector api " should {
// "return number of terms for a field in " in {
//
// val f = client.execute {
// termVector("termvectortest", "startrek", "5")
// .withTermStatistics(true)
// .withFields("name", "rank")
// .withFieldStatistics(true)
// }
//
// whenReady(f) { resp =>
// val fields = resp.getFields
// val terms = fields.terms("rank").size shouldBe 2 // ltr cmdr
// }
// }
// }
}
| beni55/elastic4s | elastic4s-core-tests/src/test/scala/com/sksamuel/elastic4s/TermVectorTest.scala | Scala | apache-2.0 | 1,524 |
package com.arcusys.learn.scorm.sequencing.storage.impl.liferay
import com.arcusys.learn.storage.impl.KeyedEntityStorage
import com.arcusys.learn.storage.impl.liferay.LiferayCommon
import com.arcusys.learn.persistence.liferay.service.LFSequencingLocalServiceUtil
import com.arcusys.learn.persistence.liferay.model.LFSequencing
import com.arcusys.learn.scorm.manifest.sequencing.storage.impl.SequencingFieldsMapper
import com.arcusys.valamis.lesson.scorm.model.manifest.Sequencing
/**
* User: Yulia.Glushonkova
* Date: 09.04.13
*/
trait LFSequencingStorageImpl extends KeyedEntityStorage[Sequencing] {
protected def doRenew() {
LFSequencingLocalServiceUtil.removeAll()
}
def getOne(parameters: (String, Any)*) = {
val lfSequencing = LFSequencingLocalServiceUtil.findByActivityIDAndPackageID(
LiferayCommon.getParameter("packageID", parameters: _*).get,
LiferayCommon.getParameter("activityID", parameters: _*).get)
extract(lfSequencing)
}
private def extract(lfentity: LFSequencing): Option[Sequencing] = {
import com.arcusys.learn.storage.impl.liferay.LiferayCommon._
if (lfentity == null) None
else {
val mapper = new SequencingFieldsMapper {
def id = lfentity.getId.toInt
def sharedId = lfentity.getSharedId.toOption
def sharedSequencingIdReference = lfentity.getSharedSequencingIdReference.toOption
def onlyCurrentAttemptObjectiveProgressForChildren = lfentity.getCAttemptAttemptProgressChild
def onlyCurrentAttemptAttemptProgressForChildren = lfentity.getCAttemptObjectiveProgressChild
def attemptLimit = lfentity.getAttemptLimit.toOption
def durationLimitInMilliseconds = lfentity.getDurationLimitInMilliseconds.toOption
def preventChildrenActivation = lfentity.getPreventChildrenActivation
def constrainChoice = lfentity.getConstrainChoice
}
Option(createSequencing(mapper))
}
}
def createSequencing(mapper: SequencingFieldsMapper): Sequencing
def getAll(parameters: (String, Any)*) = throw new UnsupportedOperationException("Not implemented")
def create(parameters: (String, Any)*) {
throw new UnsupportedOperationException("Not implemented")
}
def create(entity: Sequencing, parameters: (String, Any)*) {
throw new UnsupportedOperationException("Not implemented")
}
def delete(parameters: (String, Any)*) {
LFSequencingLocalServiceUtil.removeByActivityIDAndPackageID(
LiferayCommon.getParameter("packageID", parameters: _*).get,
LiferayCommon.getParameter("activityID", parameters: _*).get)
}
def modify(parameters: (String, Any)*) {
throw new UnsupportedOperationException("Not implemented")
}
def modify(entity: Sequencing, parameters: (String, Any)*) {
throw new UnsupportedOperationException("Not implemented")
}
def getByID(id: Int, parameters: (String, Any)*) = throw new UnsupportedOperationException("Not implemented")
def createAndGetID(entity: Sequencing, parameters: (String, Any)*) = {
import com.arcusys.learn.storage.impl.liferay.LiferayCommon._
val newEntity = LFSequencingLocalServiceUtil.createLFSequencing()
newEntity.setPackageID(LiferayCommon.getParameter("packageID", parameters: _*).get)
newEntity.setActivityID(LiferayCommon.getParameter("activityID", parameters: _*).get)
newEntity.setSharedId(entity.sharedId.getOrElse(""))
newEntity.setSharedSequencingIdReference(entity.sharedSequencingIdReference.getOrElse(""))
newEntity.setCAttemptAttemptProgressChild(entity.onlyCurrentAttemptAttemptProgressForChildren)
newEntity.setCAttemptObjectiveProgressChild(entity.onlyCurrentAttemptObjectiveProgressForChildren)
newEntity.setAttemptLimit(entity.attemptLimit)
newEntity.setDurationLimitInMilliseconds(entity.durationLimitInMilliseconds)
newEntity.setPreventChildrenActivation(entity.preventChildrenActivation)
newEntity.setConstrainChoice(entity.constrainChoice)
LFSequencingLocalServiceUtil.addLFSequencing(newEntity).getId.toInt
}
def execute(sqlKey: String, parameters: (String, Any)*) {
throw new UnsupportedOperationException
}
def getAll(sqlKey: String, parameters: (String, Any)*) = throw new UnsupportedOperationException
def getOne(sqlKey: String, parameters: (String, Any)*) = throw new UnsupportedOperationException
def modify(sqlKey: String, parameters: (String, Any)*) {
throw new UnsupportedOperationException
}
def createAndGetID(parameters: (String, Any)*) = throw new UnsupportedOperationException("Not implemented")
}
| ViLPy/Valamis | learn-persistence-liferay-wrapper/src/main/scala/com/arcusys/learn/scorm/sequencing/storage/impl/liferay/LFSequencingStorageImpl.scala | Scala | lgpl-3.0 | 4,566 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.executor
import java.io.{File, NotSerializableException}
import java.lang.management.ManagementFactory
import java.net.URL
import java.nio.ByteBuffer
import java.util.concurrent.{ConcurrentHashMap, TimeUnit}
import scala.collection.JavaConversions._
import scala.collection.mutable.{ArrayBuffer, HashMap}
import scala.util.control.NonFatal
import org.apache.spark._
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.scheduler.{DirectTaskResult, IndirectTaskResult, Task}
import org.apache.spark.shuffle.FetchFailedException
import org.apache.spark.storage.{StorageLevel, TaskResultBlockId}
import org.apache.spark.unsafe.memory.TaskMemoryManager
import org.apache.spark.util._
/**
* Spark executor, backed by a threadpool to run tasks.
*
* This can be used with Mesos, YARN, and the standalone scheduler.
* An internal RPC interface (at the moment Akka) is used for communication with the driver,
* except in the case of Mesos fine-grained mode.
*/
private[spark] class Executor(
executorId: String,
executorHostname: String,
env: SparkEnv,
userClassPath: Seq[URL] = Nil,
isLocal: Boolean = false)
extends Logging {
logInfo(s"Starting executor ID $executorId on host $executorHostname")
// Application dependencies (added through SparkContext) that we've fetched so far on this node.
// Each map holds the master's timestamp for the version of that file or JAR we got.
private val currentFiles: HashMap[String, Long] = new HashMap[String, Long]()
private val currentJars: HashMap[String, Long] = new HashMap[String, Long]()
private val EMPTY_BYTE_BUFFER = ByteBuffer.wrap(new Array[Byte](0))
private val conf = env.conf
// No ip or host:port - just hostname
Utils.checkHost(executorHostname, "Expected executed slave to be a hostname")
// must not have port specified.
assert (0 == Utils.parseHostPort(executorHostname)._2)
// Make sure the local hostname we report matches the cluster scheduler's name for this host
Utils.setCustomHostname(executorHostname)
if (!isLocal) {
// Setup an uncaught exception handler for non-local mode.
// Make any thread terminations due to uncaught exceptions kill the entire
// executor process to avoid surprising stalls.
Thread.setDefaultUncaughtExceptionHandler(SparkUncaughtExceptionHandler)
}
// Start worker thread pool
private val threadPool = ThreadUtils.newDaemonCachedThreadPool("Executor task launch worker")
private val executorSource = new ExecutorSource(threadPool, executorId)
if (!isLocal) {
env.metricsSystem.registerSource(executorSource)
env.blockManager.initialize(conf.getAppId)
}
// Create an RpcEndpoint for receiving RPCs from the driver
private val executorEndpoint = env.rpcEnv.setupEndpoint(
ExecutorEndpoint.EXECUTOR_ENDPOINT_NAME, new ExecutorEndpoint(env.rpcEnv, executorId))
// Whether to load classes in user jars before those in Spark jars
private val userClassPathFirst = conf.getBoolean("spark.executor.userClassPathFirst", false)
// Create our ClassLoader
// do this after SparkEnv creation so can access the SecurityManager
private val urlClassLoader = createClassLoader()
private val replClassLoader = addReplClassLoaderIfNeeded(urlClassLoader)
// Set the classloader for serializer
env.serializer.setDefaultClassLoader(replClassLoader)
// Akka's message frame size. If task result is bigger than this, we use the block manager
// to send the result back.
private val akkaFrameSize = AkkaUtils.maxFrameSizeBytes(conf)
// Limit of bytes for total size of results (default is 1GB)
private val maxResultSize = Utils.getMaxResultSize(conf)
// Maintains the list of running tasks.
private val runningTasks = new ConcurrentHashMap[Long, TaskRunner]
// Executor for the heartbeat task.
private val heartbeater = ThreadUtils.newDaemonSingleThreadScheduledExecutor("driver-heartbeater")
startDriverHeartbeater()
def launchTask(
context: ExecutorBackend,
taskId: Long,
attemptNumber: Int,
taskName: String,
serializedTask: ByteBuffer): Unit = {
val tr = new TaskRunner(context, taskId = taskId, attemptNumber = attemptNumber, taskName,
serializedTask)
runningTasks.put(taskId, tr)
threadPool.execute(tr)
}
def killTask(taskId: Long, interruptThread: Boolean): Unit = {
val tr = runningTasks.get(taskId)
if (tr != null) {
tr.kill(interruptThread)
}
}
def stop(): Unit = {
env.metricsSystem.report()
env.rpcEnv.stop(executorEndpoint)
heartbeater.shutdown()
heartbeater.awaitTermination(10, TimeUnit.SECONDS)
threadPool.shutdown()
if (!isLocal) {
env.stop()
}
}
/** Returns the total amount of time this JVM process has spent in garbage collection. */
private def computeTotalGcTime(): Long = {
ManagementFactory.getGarbageCollectorMXBeans.map(_.getCollectionTime).sum
}
class TaskRunner(
execBackend: ExecutorBackend,
val taskId: Long,
val attemptNumber: Int,
taskName: String,
serializedTask: ByteBuffer)
extends Runnable {
/** Whether this task has been killed. */
@volatile private var killed = false
/** How much the JVM process has spent in GC when the task starts to run. */
@volatile var startGCTime: Long = _
/**
* The task to run. This will be set in run() by deserializing the task binary coming
* from the driver. Once it is set, it will never be changed.
*/
@volatile var task: Task[Any] = _
def kill(interruptThread: Boolean): Unit = {
logInfo(s"Executor is trying to kill $taskName (TID $taskId)")
killed = true
if (task != null) {
task.kill(interruptThread)
}
}
override def run(): Unit = {
val taskMemoryManager = new TaskMemoryManager(env.executorMemoryManager)
val deserializeStartTime = System.currentTimeMillis()
Thread.currentThread.setContextClassLoader(replClassLoader)
val ser = env.closureSerializer.newInstance()
logInfo(s"Running $taskName (TID $taskId)")
execBackend.statusUpdate(taskId, TaskState.RUNNING, EMPTY_BYTE_BUFFER)
var taskStart: Long = 0
startGCTime = computeTotalGcTime()
try {
val (taskFiles, taskJars, taskBytes) = Task.deserializeWithDependencies(serializedTask)
updateDependencies(taskFiles, taskJars)
task = ser.deserialize[Task[Any]](taskBytes, Thread.currentThread.getContextClassLoader)
task.setTaskMemoryManager(taskMemoryManager)
// If this task has been killed before we deserialized it, let's quit now. Otherwise,
// continue executing the task.
if (killed) {
// Throw an exception rather than returning, because returning within a try{} block
// causes a NonLocalReturnControl exception to be thrown. The NonLocalReturnControl
// exception will be caught by the catch block, leading to an incorrect ExceptionFailure
// for the task.
throw new TaskKilledException
}
logDebug("Task " + taskId + "'s epoch is " + task.epoch)
env.mapOutputTracker.updateEpoch(task.epoch)
// Run the actual task and measure its runtime.
taskStart = System.currentTimeMillis()
var threwException = true
val (value, accumUpdates) = try {
val res = task.run(
taskAttemptId = taskId,
attemptNumber = attemptNumber,
metricsSystem = env.metricsSystem)
threwException = false
res
} finally {
val freedMemory = taskMemoryManager.cleanUpAllAllocatedMemory()
if (freedMemory > 0) {
val errMsg = s"Managed memory leak detected; size = $freedMemory bytes, TID = $taskId"
if (conf.getBoolean("spark.unsafe.exceptionOnMemoryLeak", false) && !threwException) {
throw new SparkException(errMsg)
} else {
logError(errMsg)
}
}
}
val taskFinish = System.currentTimeMillis()
// If the task has been killed, let's fail it.
if (task.killed) {
throw new TaskKilledException
}
val resultSer = env.serializer.newInstance()
val beforeSerialization = System.currentTimeMillis()
val valueBytes = resultSer.serialize(value)
val afterSerialization = System.currentTimeMillis()
for (m <- task.metrics) {
// Deserialization happens in two parts: first, we deserialize a Task object, which
// includes the Partition. Second, Task.run() deserializes the RDD and function to be run.
m.setExecutorDeserializeTime(
(taskStart - deserializeStartTime) + task.executorDeserializeTime)
// We need to subtract Task.run()'s deserialization time to avoid double-counting
m.setExecutorRunTime((taskFinish - taskStart) - task.executorDeserializeTime)
m.setJvmGCTime(computeTotalGcTime() - startGCTime)
m.setResultSerializationTime(afterSerialization - beforeSerialization)
m.updateAccumulators()
}
val directResult = new DirectTaskResult(valueBytes, accumUpdates, task.metrics.orNull)
val serializedDirectResult = ser.serialize(directResult)
val resultSize = serializedDirectResult.limit
// directSend = sending directly back to the driver
val serializedResult: ByteBuffer = {
if (maxResultSize > 0 && resultSize > maxResultSize) {
logWarning(s"Finished $taskName (TID $taskId). Result is larger than maxResultSize " +
s"(${Utils.bytesToString(resultSize)} > ${Utils.bytesToString(maxResultSize)}), " +
s"dropping it.")
ser.serialize(new IndirectTaskResult[Any](TaskResultBlockId(taskId), resultSize))
} else if (resultSize >= akkaFrameSize - AkkaUtils.reservedSizeBytes) {
val blockId = TaskResultBlockId(taskId)
env.blockManager.putBytes(
blockId, serializedDirectResult, StorageLevel.MEMORY_AND_DISK_SER)
logInfo(
s"Finished $taskName (TID $taskId). $resultSize bytes result sent via BlockManager)")
ser.serialize(new IndirectTaskResult[Any](blockId, resultSize))
} else {
logInfo(s"Finished $taskName (TID $taskId). $resultSize bytes result sent to driver")
serializedDirectResult
}
}
execBackend.statusUpdate(taskId, TaskState.FINISHED, serializedResult)
} catch {
case ffe: FetchFailedException =>
val reason = ffe.toTaskEndReason
execBackend.statusUpdate(taskId, TaskState.FAILED, ser.serialize(reason))
case _: TaskKilledException | _: InterruptedException if task.killed =>
logInfo(s"Executor killed $taskName (TID $taskId)")
execBackend.statusUpdate(taskId, TaskState.KILLED, ser.serialize(TaskKilled))
case CausedBy(cDE: CommitDeniedException) =>
val reason = cDE.toTaskEndReason
execBackend.statusUpdate(taskId, TaskState.FAILED, ser.serialize(reason))
case t: Throwable =>
// Attempt to exit cleanly by informing the driver of our failure.
// If anything goes wrong (or this was a fatal exception), we will delegate to
// the default uncaught exception handler, which will terminate the Executor.
logError(s"Exception in $taskName (TID $taskId)", t)
val metrics: Option[TaskMetrics] = Option(task).flatMap { task =>
task.metrics.map { m =>
m.setExecutorRunTime(System.currentTimeMillis() - taskStart)
m.setJvmGCTime(computeTotalGcTime() - startGCTime)
m.updateAccumulators()
m
}
}
val serializedTaskEndReason = {
try {
ser.serialize(new ExceptionFailure(t, metrics))
} catch {
case _: NotSerializableException =>
// t is not serializable so just send the stacktrace
ser.serialize(new ExceptionFailure(t, metrics, false))
}
}
execBackend.statusUpdate(taskId, TaskState.FAILED, serializedTaskEndReason)
// Don't forcibly exit unless the exception was inherently fatal, to avoid
// stopping other tasks unnecessarily.
if (Utils.isFatalError(t)) {
SparkUncaughtExceptionHandler.uncaughtException(t)
}
} finally {
runningTasks.remove(taskId)
}
}
}
/**
* Create a ClassLoader for use in tasks, adding any JARs specified by the user or any classes
* created by the interpreter to the search path
*/
private def createClassLoader(): MutableURLClassLoader = {
// Bootstrap the list of jars with the user class path.
val now = System.currentTimeMillis()
userClassPath.foreach { url =>
currentJars(url.getPath().split("/").last) = now
}
val currentLoader = Utils.getContextOrSparkClassLoader
// For each of the jars in the jarSet, add them to the class loader.
// We assume each of the files has already been fetched.
val urls = userClassPath.toArray ++ currentJars.keySet.map { uri =>
new File(uri.split("/").last).toURI.toURL
}
if (userClassPathFirst) {
new ChildFirstURLClassLoader(urls, currentLoader)
} else {
new MutableURLClassLoader(urls, currentLoader)
}
}
/**
* If the REPL is in use, add another ClassLoader that will read
* new classes defined by the REPL as the user types code
*/
private def addReplClassLoaderIfNeeded(parent: ClassLoader): ClassLoader = {
val classUri = conf.get("spark.repl.class.uri", null)
if (classUri != null) {
logInfo("Using REPL class URI: " + classUri)
try {
val _userClassPathFirst: java.lang.Boolean = userClassPathFirst
val klass = Utils.classForName("org.apache.spark.repl.ExecutorClassLoader")
.asInstanceOf[Class[_ <: ClassLoader]]
val constructor = klass.getConstructor(classOf[SparkConf], classOf[String],
classOf[ClassLoader], classOf[Boolean])
constructor.newInstance(conf, classUri, parent, _userClassPathFirst)
} catch {
case _: ClassNotFoundException =>
logError("Could not find org.apache.spark.repl.ExecutorClassLoader on classpath!")
System.exit(1)
null
}
} else {
parent
}
}
/**
* Download any missing dependencies if we receive a new set of files and JARs from the
* SparkContext. Also adds any new JARs we fetched to the class loader.
*/
private def updateDependencies(newFiles: HashMap[String, Long], newJars: HashMap[String, Long]) {
lazy val hadoopConf = SparkHadoopUtil.get.newConfiguration(conf)
synchronized {
// Fetch missing dependencies
for ((name, timestamp) <- newFiles if currentFiles.getOrElse(name, -1L) < timestamp) {
logInfo("Fetching " + name + " with timestamp " + timestamp)
// Fetch file with useCache mode, close cache for local mode.
Utils.fetchFile(name, new File(SparkFiles.getRootDirectory()), conf,
env.securityManager, hadoopConf, timestamp, useCache = !isLocal)
currentFiles(name) = timestamp
}
for ((name, timestamp) <- newJars) {
val localName = name.split("/").last
val currentTimeStamp = currentJars.get(name)
.orElse(currentJars.get(localName))
.getOrElse(-1L)
if (currentTimeStamp < timestamp) {
logInfo("Fetching " + name + " with timestamp " + timestamp)
// Fetch file with useCache mode, close cache for local mode.
Utils.fetchFile(name, new File(SparkFiles.getRootDirectory()), conf,
env.securityManager, hadoopConf, timestamp, useCache = !isLocal)
currentJars(name) = timestamp
// Add it to our class loader
val url = new File(SparkFiles.getRootDirectory(), localName).toURI.toURL
if (!urlClassLoader.getURLs().contains(url)) {
logInfo("Adding " + url + " to class loader")
urlClassLoader.addURL(url)
}
}
}
}
}
private val heartbeatReceiverRef =
RpcUtils.makeDriverRef(HeartbeatReceiver.ENDPOINT_NAME, conf, env.rpcEnv)
/** Reports heartbeat and metrics for active tasks to the driver. */
private def reportHeartBeat(): Unit = {
// list of (task id, metrics) to send back to the driver
val tasksMetrics = new ArrayBuffer[(Long, TaskMetrics)]()
val curGCTime = computeTotalGcTime()
for (taskRunner <- runningTasks.values()) {
if (taskRunner.task != null) {
taskRunner.task.metrics.foreach { metrics =>
metrics.updateShuffleReadMetrics()
metrics.updateInputMetrics()
metrics.setJvmGCTime(curGCTime - taskRunner.startGCTime)
metrics.updateAccumulators()
if (isLocal) {
// JobProgressListener will hold an reference of it during
// onExecutorMetricsUpdate(), then JobProgressListener can not see
// the changes of metrics any more, so make a deep copy of it
val copiedMetrics = Utils.deserialize[TaskMetrics](Utils.serialize(metrics))
tasksMetrics += ((taskRunner.taskId, copiedMetrics))
} else {
// It will be copied by serialization
tasksMetrics += ((taskRunner.taskId, metrics))
}
}
}
}
val message = Heartbeat(executorId, tasksMetrics.toArray, env.blockManager.blockManagerId)
try {
val response = heartbeatReceiverRef.askWithRetry[HeartbeatResponse](message)
if (response.reregisterBlockManager) {
logInfo("Told to re-register on heartbeat")
env.blockManager.reregister()
}
} catch {
case NonFatal(e) => logWarning("Issue communicating with driver in heartbeater", e)
}
}
/**
* Schedules a task to report heartbeat and partial metrics for active tasks to driver.
*/
private def startDriverHeartbeater(): Unit = {
val intervalMs = conf.getTimeAsMs("spark.executor.heartbeatInterval", "10s")
// Wait a random interval so the heartbeats don't end up in sync
val initialDelay = intervalMs + (math.random * intervalMs).asInstanceOf[Int]
val heartbeatTask = new Runnable() {
override def run(): Unit = Utils.logUncaughtExceptions(reportHeartBeat())
}
heartbeater.scheduleAtFixedRate(heartbeatTask, initialDelay, intervalMs, TimeUnit.MILLISECONDS)
}
}
| practice-vishnoi/dev-spark-1 | core/src/main/scala/org/apache/spark/executor/Executor.scala | Scala | apache-2.0 | 19,473 |
package scorex.network
import akka.actor.{Actor, ActorRef}
import scorex.transaction.History._
import scorex.utils.ScorexLogging
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
//todo: break a connection if no score message from remote for some time?
class ScoreObserver(historySynchronizer: ActorRef) extends Actor with ScorexLogging {
import ScoreObserver._
private case class Candidate(peer: ConnectedPeer, score: BlockchainScore, seen: Long)
private var candidates = Seq[Candidate]()
private def consider(candidates: Seq[Candidate]): (Option[Candidate], Seq[Candidate]) =
candidates.isEmpty match {
case true => (None, Seq())
case false =>
val bestNetworkScore = candidates.maxBy(_.score).score
val witnesses = candidates.filter(_.score == bestNetworkScore)
(witnesses.headOption, witnesses)
}
private def clearOld(candidates: Seq[Candidate]): Seq[Candidate] = {
//todo: make configurable?
val threshold = System.currentTimeMillis() - 1.minute.toMillis
candidates.filter(_.seen > threshold)
}
override def preStart: Unit = {
//todo: make configurable?
context.system.scheduler.schedule(5.seconds, 5.seconds)(self ! UpdateScore(None))
}
override def receive: Receive = {
case UpdateScore(scoreToAddOpt) =>
val oldScore = candidates.headOption.map(_.score)
candidates = clearOld(candidates)
scoreToAddOpt.foreach { case (connectedPeer, value) =>
candidates = candidates.filter(_.peer != connectedPeer)
candidates = candidates :+ Candidate(connectedPeer, value, System.currentTimeMillis())
}
val ct = consider(candidates)
candidates = ct._2
val newScore = ct._1.map(_.score)
val witnesses = candidates.map(_.peer)
if (newScore.getOrElse(BigInt(0)) != oldScore.getOrElse(BigInt(0))) {
historySynchronizer ! ConsideredValue(newScore, witnesses)
}
case GetScore =>
candidates = clearOld(candidates)
candidates.headOption.map(_.score) match {
case None => context.system.scheduler.scheduleOnce(1.second, sender(), ConsideredValue(None, Seq()))
case score => sender() ! ConsideredValue(score, candidates.map(_.peer))
}
}
}
object ScoreObserver {
case class UpdateScore(scoreToAdd: Option[(ConnectedPeer, BlockchainScore)])
case object GetScore
case class ConsideredValue(value: Option[BlockchainScore], witnesses: Seq[ConnectedPeer])
} | ScorexProject/Scorex | scorex-basics/src/main/scala/scorex/network/ScoreObserver.scala | Scala | cc0-1.0 | 2,513 |
package com.sksamuel.elastic4s
import org.elasticsearch.common.xcontent.XContentBuilder
/** @author liorh */
object FieldsMapper {
def mapFields(fields: Map[String, Any]): Seq[FieldValue] = {
fields map {
case (name: String, nest: Map[_, _]) =>
val nestedFields = mapFields(nest.asInstanceOf[Map[String, Any]])
NestedFieldValue(Some(name), nestedFields)
case (name: String, nest: Array[Map[_, _]]) =>
val nested = nest.map(n => new NestedFieldValue(None, mapFields(n.asInstanceOf[Map[String, Any]])))
ArrayFieldValue(name, nested)
case (name: String, arr: Array[Any]) =>
val values = arr.map(new SimpleFieldValue(None, _))
ArrayFieldValue(name, values)
case (name: String, s: Iterable[_]) =>
s.headOption match {
case Some(m: Map[_, _]) =>
val nested = s.map(n => new NestedFieldValue(None, mapFields(n.asInstanceOf[Map[String, Any]])))
ArrayFieldValue(name, nested.toSeq)
case Some(a: Any) =>
val values = s.map(new SimpleFieldValue(None, _))
ArrayFieldValue(name, values.toSeq)
case _ =>
// can't work out or empty - map to empty
ArrayFieldValue(name, Seq.empty)
}
case (name: String, a: Any) =>
SimpleFieldValue(Some(name), a)
case (name: String, _) =>
NullFieldValue(name)
}
}.toSeq
}
trait FieldValue {
def output(source: XContentBuilder): Unit
}
case class NullFieldValue(name: String) extends FieldValue {
def output(source: XContentBuilder): Unit = {
source.nullField(name)
}
}
case class SimpleFieldValue(name: Option[String], value: Any) extends FieldValue {
def output(source: XContentBuilder): Unit = {
name match {
case Some(n) => source.field(n, value)
case None => source.value(value)
}
}
}
object SimpleFieldValue {
def apply(name: String, value: Any): SimpleFieldValue = apply(Some(name), value)
def apply(value: Any): SimpleFieldValue = apply(None, value)
}
case class ArrayFieldValue(name: String, values: Seq[FieldValue]) extends FieldValue {
def output(source: XContentBuilder): Unit = {
source.startArray(name)
values.foreach(_.output(source))
source.endArray()
}
}
case class NestedFieldValue(name: Option[String], values: Seq[FieldValue]) extends FieldValue {
def output(source: XContentBuilder): Unit = {
name match {
case Some(n) => source.startObject(n)
case None => source.startObject()
}
values.foreach(_.output(source))
source.endObject()
}
}
object NestedFieldValue {
def apply(name: String, values: Seq[FieldValue]): NestedFieldValue = apply(Some(name), values)
def apply(values: Seq[FieldValue]): NestedFieldValue = apply(None, values)
}
| l15k4/elastic4s | elastic4s-core/src/main/scala/com/sksamuel/elastic4s/fields.scala | Scala | apache-2.0 | 2,811 |
package failurewall.retry
import akka.actor.Scheduler
import akka.pattern.after
import failurewall.Failurewall
import failurewall.util.FailurewallHelper
import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success, Try}
/**
* A [[Failurewall]] implementing the retry pattern.
* Calls wrapped in [[AkkaRetryFailurewall]] can be retried if it fails.
* [[AkkaRetryFailurewall]] can delay retrials by Akka's [[Scheduler]].
*
* [[AkkaRetryFailurewall]] is recommended for resources which may become temporarily unavailable.
*
* @param maxTrialTimes max trial times
* @param strategy backoff strategy
* @param scheduler Akka's [[Scheduler]] to delay retrials
* @param feedback feedback logic to test whether this failurewall should retry calling or not
* @param executor ExecutionContext
*/
final class AkkaRetryFailurewall[A](maxTrialTimes: Int,
strategy: BackoffStrategy,
scheduler: Scheduler,
feedback: Try[A] => RetryFeedback,
implicit private[this] val executor: ExecutionContext)
extends Failurewall[A, A] {
require(maxTrialTimes > 0, "`maxTrialTimes` should be greater than 0.")
/**
* Wraps a call which needs retrials if it fails.
* @param body call that needs retrials if it fails
* @return the last result of `body`
*/
override def call(body: => Future[A]): Future[A] = {
def retry(i: Int): Future[A] = {
FailurewallHelper.mapToTry(FailurewallHelper.callSafely(body)).flatMap {
case result if feedback(result) == ShouldNotRetry => Future.fromTry(result)
case result if i == maxTrialTimes => Future.fromTry(result)
case _ => after(strategy.nextDelay(i), scheduler)(retry(i + 1))
}
}
retry(1)
}
}
object AkkaRetryFailurewall {
/**
* Creates [[AkkaRetryFailurewall]].
* The created [[AkkaRetryFailurewall]] retries if the result is a failed [[Future]].
* @param maxTrialTimes max trial times
* @param strategy backoff strategy
* @param scheduler Akka's [[Scheduler]] to delay retrials
* @param executor ExecutionContext
*/
def apply[A](maxTrialTimes: Int,
strategy: BackoffStrategy,
scheduler: Scheduler,
executor: ExecutionContext): AkkaRetryFailurewall[A] = {
withFeedback(maxTrialTimes, strategy, scheduler, executor) {
case Success(_) => ShouldNotRetry
case Failure(_) => ShouldRetry
}
}
/**
* Creates [[AkkaRetryFailurewall]] with the given feedback logic.
* @param maxTrialTimes max trial times
* @param strategy backoff strategy
* @param scheduler Akka's [[Scheduler]] to delay retrials
* @param executor ExecutionContext
* @param feedback feedback logic to test whether this failurewall should retry calling or not
*/
def withFeedback[A](maxTrialTimes: Int,
strategy: BackoffStrategy,
scheduler: Scheduler,
executor: ExecutionContext)
(feedback: Try[A] => RetryFeedback): AkkaRetryFailurewall[A] = {
new AkkaRetryFailurewall[A](maxTrialTimes, strategy, scheduler, feedback, executor)
}
}
| failurewall/failurewall | failurewall-akka/src/main/scala/failurewall/retry/AkkaRetryFailurewall.scala | Scala | apache-2.0 | 3,265 |
package ecommerce.sales.app
import akka.actor._
import akka.kernel.Bootable
import com.typesafe.config.{Config, ConfigFactory}
import ecommerce.sales.{HttpService, SalesReadFrontConfiguration}
class SalesReadFrontApp extends Bootable {
val config = ConfigFactory.load()
val system = ActorSystem("sales-read-front", config)
def startup() = {
new SalesReadFrontConfiguration {
override def config: Config = SalesReadFrontApp.this.config
import httpService._
system.actorOf(HttpService.props(interface, port, askTimeout), "http-service")
}
}
def shutdown() = {
system.terminate()
}
} | dominhtung/ddd-leaven-akka-v2 | sales/read-front/src/main/scala/ecommerce/sales/app/SalesReadFrontApp.scala | Scala | mit | 639 |
package chess
package format.pgn
import chess.variant.Variant
import scala.util.parsing.combinator._
import cats.data.Validated
import cats.data.Validated.{ invalid, valid }
import cats.implicits._
// http://www.saremba.de/chessgml/standards/pgn/pgn-complete.htm
object Parser {
case class StrMove(
san: String,
glyphs: Glyphs,
comments: List[String],
variations: List[List[StrMove]]
)
def full(pgn: String): Validated[String, ParsedPgn] =
try {
val preprocessed = augmentString(pgn).linesIterator
.map(_.trim)
.filterNot {
_.headOption.contains('%')
}
.mkString("\n")
.replace("[pgn]", "")
.replace("[/pgn]", "")
.replace("‑", "-")
.replace("–", "-")
.replace("e.p.", "") // silly en-passant notation
for {
splitted <- splitTagAndMoves(preprocessed)
tagStr = splitted._1
moveStr = splitted._2
preTags <- TagParser(tagStr)
parsedMoves <- MovesParser(moveStr)
init = parsedMoves._1
strMoves = parsedMoves._2
resultOption = parsedMoves._3
tags = resultOption.filterNot(_ => preTags.exists(_.Result)).foldLeft(preTags)(_ + _)
sans <- objMoves(strMoves, tags.variant | Variant.default)
} yield ParsedPgn(init, tags, sans)
} catch {
case _: StackOverflowError =>
println(pgn)
sys error "### StackOverflowError ### in PGN parser"
}
def moves(str: String, variant: Variant): Validated[String, Sans] =
moves(
str.split(' ').toList,
variant
)
def moves(strMoves: Iterable[String], variant: Variant): Validated[String, Sans] =
objMoves(
strMoves.map { StrMove(_, Glyphs.empty, Nil, Nil) }.to(List),
variant
)
def objMoves(strMoves: List[StrMove], variant: Variant): Validated[String, Sans] =
strMoves.map {
case StrMove(san, glyphs, comments, variations) =>
(
MoveParser(san, variant) map { m =>
m withComments comments withVariations {
variations
.map { v =>
objMoves(v, variant) getOrElse Sans.empty
}
.filter(_.value.nonEmpty)
} mergeGlyphs glyphs
}
): Validated[String, San]
}.sequence map { Sans.apply }
trait Logging { self: Parsers =>
protected val loggingEnabled = false
protected def as[T](msg: String)(p: => Parser[T]): Parser[T] =
if (loggingEnabled) log(p)(msg) else p
}
object MovesParser extends RegexParsers with Logging {
override val whiteSpace = """(\s|\t|\r?\n)+""".r
private def cleanComments(comments: List[String]) = comments.map(_.trim).filter(_.nonEmpty)
def apply(pgn: String): Validated[String, (InitialPosition, List[StrMove], Option[Tag])] =
parseAll(strMoves, pgn) match {
case Success((init, moves, result), _) =>
valid(
(
init,
moves,
result map { r =>
Tag(_.Result, r)
}
)
)
case err => invalid("Cannot parse moves: %s\n%s".format(err.toString, pgn))
}
def strMoves: Parser[(InitialPosition, List[StrMove], Option[String])] =
as("moves") {
(commentary *) ~ (strMove *) ~ (result ?) ~ (commentary *) ^^ {
case coms ~ sans ~ res ~ _ => (InitialPosition(cleanComments(coms)), sans, res)
}
}
val moveRegex =
"""(?:(?:0\-0(?:\-0|)[\+\#]?)|[PQKRBNOoa-h@][QKRBNa-h1-8xOo\-=\+\#\@]{1,6})[\?!□]{0,2}""".r
def strMove: Parser[StrMove] =
as("move") {
((number | commentary) *) ~>
(moveRegex ~ nagGlyphs ~ rep(commentary) ~ nagGlyphs ~ rep(variation)) <~
(moveExtras *) ^^ {
case san ~ glyphs ~ comments ~ glyphs2 ~ variations =>
StrMove(san, glyphs merge glyphs2, cleanComments(comments), variations)
}
}
def number: Parser[String] = """[1-9]\d*[\s\.]*""".r
def moveExtras: Parser[Unit] =
as("moveExtras") {
commentary.^^^(())
}
def nagGlyphs: Parser[Glyphs] =
as("nagGlyphs") {
rep(nag) ^^ { nags =>
Glyphs fromList nags.flatMap { n =>
n.drop(1).toIntOption flatMap Glyph.find
}
}
}
def nag: Parser[String] =
as("nag") {
"""\$\d+""".r
}
def variation: Parser[List[StrMove]] =
as("variation") {
"(" ~> strMoves <~ ")" ^^ { case (_, sms, _) => sms }
}
def commentary: Parser[String] = blockCommentary | inlineCommentary
def blockCommentary: Parser[String] =
as("block comment") {
"{" ~> """[^\}]*""".r <~ "}"
}
def inlineCommentary: Parser[String] =
as("inline comment") {
";" ~> """.+""".r
}
val result: Parser[String] = "*" | "1/2-1/2" | "½-½" | "0-1" | "1-0"
}
object MoveParser extends RegexParsers with Logging {
override def skipWhitespace = false
private def rangeToMap(r: Iterable[Char]) = r.zipWithIndex.to(Map).view.mapValues(_ + 1)
private val fileMap = rangeToMap('a' to 'h')
private val rankMap = rangeToMap('1' to '8')
private val MoveR = """^(N|B|R|Q|K|)([a-h]?)([1-8]?)(x?)([a-h][0-9])(=?[NBRQ]?)(\+?)(\#?)$""".r
private val DropR = """^([NBRQP])@([a-h][1-8])(\+?)(\#?)$""".r
def apply(str: String, variant: Variant): Validated[String, San] = {
if (str.length == 2) Pos.fromKey(str).fold(slow(str)) { pos =>
valid(Std(pos, Pawn))
}
else
str match {
case "O-O" | "o-o" | "0-0" => valid(Castle(KingSide))
case "O-O-O" | "o-o-o" | "0-0-0" => valid(Castle(QueenSide))
case MoveR(role, file, rank, capture, pos, prom, check, mate) =>
role.headOption.fold[Option[Role]](Option(Pawn))(variant.rolesByPgn.get) flatMap { role =>
Pos fromKey pos map { dest =>
valid(
Std(
dest = dest,
role = role,
capture = capture != "",
file = if (file == "") None else fileMap get file.head,
rank = if (rank == "") None else rankMap get rank.head,
promotion = if (prom == "") None else variant.rolesPromotableByPgn get prom.last,
metas = Metas(
check = check.nonEmpty,
checkmate = mate.nonEmpty,
comments = Nil,
glyphs = Glyphs.empty,
variations = Nil
)
)
)
}
} getOrElse slow(str)
case DropR(roleS, posS, check, mate) =>
roleS.headOption flatMap variant.rolesByPgn.get flatMap { role =>
Pos fromKey posS map { pos =>
valid(
Drop(
role = role,
pos = pos,
metas = Metas(
check = check.nonEmpty,
checkmate = mate.nonEmpty,
comments = Nil,
glyphs = Glyphs.empty,
variations = Nil
)
)
)
}
} getOrElse invalid(s"Cannot parse drop: $str")
case _ => slow(str)
}
}
private def slow(str: String): Validated[String, San] =
parseAll(move, str) match {
case Success(san, _) => valid(san)
case err => invalid("Cannot parse move: %s\n%s".format(err.toString, str))
}
def move: Parser[San] = castle | standard
def castle =
(qCastle | kCastle) ~ suffixes ^^ {
case side ~ suf => Castle(side) withSuffixes suf
}
val qCastle: Parser[Side] = ("O-O-O" | "o-o-o" | "0-0-0") ^^^ QueenSide
val kCastle: Parser[Side] = ("O-O" | "o-o" | "0-0") ^^^ KingSide
def standard: Parser[San] =
as("standard") {
(disambiguatedPawn | pawn | disambiguated | ambiguous | drop) ~ suffixes ^^ {
case std ~ suf => std withSuffixes suf
}
}
// e5
def pawn: Parser[Std] =
as("pawn") {
dest ^^ (de => Std(dest = de, role = Pawn))
}
// Bg5
def ambiguous: Parser[Std] =
as("ambiguous") {
role ~ x ~ dest ^^ {
case ro ~ ca ~ de => Std(dest = de, role = ro, capture = ca)
}
}
// B@g5
def drop: Parser[Drop] =
as("drop") {
role ~ "@" ~ dest ^^ {
case ro ~ _ ~ po => Drop(role = ro, pos = po)
}
}
// Bac3 Baxc3 B2c3 B2xc3 Ba2xc3
def disambiguated: Parser[Std] =
as("disambiguated") {
role ~ opt(file) ~ opt(rank) ~ x ~ dest ^^ {
case ro ~ fi ~ ra ~ ca ~ de =>
Std(
dest = de,
role = ro,
capture = ca,
file = fi,
rank = ra
)
}
}
// d7d5
def disambiguatedPawn: Parser[Std] =
as("disambiguated") {
opt(file) ~ opt(rank) ~ x ~ dest ^^ {
case fi ~ ra ~ ca ~ de =>
Std(
dest = de,
role = Pawn,
capture = ca,
file = fi,
rank = ra
)
}
}
def suffixes: Parser[Suffixes] =
opt(promotion) ~ checkmate ~ check ~ glyphs ^^ {
case p ~ cm ~ c ~ g => Suffixes(c, cm, p, g)
}
def glyphs: Parser[Glyphs] =
as("glyphs") {
rep(glyph) ^^ Glyphs.fromList
}
def glyph: Parser[Glyph] =
as("glyph") {
mapParser(
Glyph.MoveAssessment.all.sortBy(_.symbol.length).map { g =>
g.symbol -> g
},
"glyph"
)
}
val x = exists("x")
val check = exists("+")
val checkmate = ("#" | "++") ^^^ true | success(false)
val role = mapParser(Role.allByPgn, "role") | success(Pawn)
val file = mapParser(fileMap, "file")
val rank = mapParser(rankMap, "rank")
val promotable = Role.allPromotableByPgn mapKeys (_.toUpper)
val promotion = ("=" ?) ~> mapParser(promotable, "promotion")
val dest = mapParser(Pos.allKeys, "dest")
def exists(c: String): Parser[Boolean] = c ^^^ true | success(false)
def mapParser[A, B](pairs: Iterable[(A, B)], name: String): Parser[B] =
pairs.foldLeft(failure(name + " not found"): Parser[B]) {
case (acc, (a, b)) => a.toString ^^^ b | acc
}
}
object TagParser extends RegexParsers with Logging {
def apply(pgn: String): Validated[String, Tags] =
parseAll(all, pgn) match {
case f: Failure => invalid("Cannot parse tags: %s\n%s".format(f.toString, pgn))
case Success(tags, _) => valid(Tags(tags))
case err => invalid("Cannot parse tags: %s\n%s".format(err.toString, pgn))
}
def fromFullPgn(pgn: String): Validated[String, Tags] =
splitTagAndMoves(pgn) flatMap {
case (tags, _) => apply(tags)
}
def all: Parser[List[Tag]] =
as("all") {
tags <~ """(.|\n)*""".r
}
def tags: Parser[List[Tag]] = rep(tag)
def tag: Parser[Tag] =
as("tag") {
tagName ~ tagValue ^^ {
case name ~ value => Tag(name, value)
}
}
val tagName: Parser[String] = "[" ~> """[a-zA-Z]+""".r
val tagValue: Parser[String] = """"(?:[^"\\]|\\.)*"""".r <~ "]" ^^ {
_.stripPrefix("\"").stripSuffix("\"").replace("\\\"", "\"")
}
}
// there must be a newline between the tags and the first move
private def ensureTagsNewline(pgn: String): String =
""""\]\s*(\d+\.)""".r.replaceAllIn(pgn, m => "\"]\n" + m.group(1))
private def splitTagAndMoves(pgn: String): Validated[String, (String, String)] =
augmentString(ensureTagsNewline(pgn)).linesIterator.to(List).map(_.trim).filter(_.nonEmpty) span { line =>
line lift 0 contains '['
} match {
case (tagLines, moveLines) => valid(tagLines.mkString("\n") -> moveLines.mkString("\n"))
}
}
| niklasf/scalachess | src/main/scala/format/pgn/Parser.scala | Scala | mit | 12,254 |
package com.box.castle.router.kafkadispatcher
import com.box.castle.collections.immutable.LinkedHashMap
import com.box.castle.router.kafkadispatcher.processors.RequesterInfo
import org.slf4s.Logging
/**
* RequestQueue is a a convenient wrapper around a set of key -> queue pairs where each queue is represented by
* a LinkedHashMap. For the RequestQueue "removing the head" is equivalent to removing a single element from
* each of the Subqueues associated with each key. Furthermore, RequestQueue guarantees that each Subqueue has
* at least one RequesterInfo element associated with the Subqueue Key.
* {{{
* For example if our RequestQueue has this shape:
* "key1" -> LinkedHashMap(20 -> RI1, 25 -> RI2, 26 -> RI3)
* "key2" -> LinkedHashMap(870 -> RI54, 890 -> RI55)
* "key3" -> LinkedHashMap(70 -> RI80)
*
* Calling "removeHead" would return a Map:
* "key1" -> (20, RI1)
* "key2" -> (870, RI54)
* "key3" -> (70, RI80)
*
* And the new request quest would look like this:
* "key1" -> LinkedHashMap(25 -> RI2, 26 -> RI3)
* "key2" -> LinkedHashMap(890 -> RI55)
*
* }}}
*/
class RequestQueue[A, B] private (val subqueues: Map[A, LinkedHashMap[B, Set[RequesterInfo]]]) extends Logging {
subqueues foreach {
case (key, subqueue) => {
require(!subqueue.isEmpty, s"subqueue for key $key must not be empty")
subqueue.foreach {
case (subqueueKey, requesters) =>
require(requesters.nonEmpty,
s"there must be at least one requester associated with key:$key, subqueueKey:$subqueueKey")
}
}
}
def add(key: A, subqueueKey: B, requesterInfo: RequesterInfo): RequestQueue[A, B] = {
val existingSubqueue = subqueues(key)
val existingRequesters = existingSubqueue(subqueueKey)
val newRequesters = existingRequesters + requesterInfo
val newSubqueue = existingSubqueue + (subqueueKey -> newRequesters)
new RequestQueue(subqueues + (key -> newSubqueue))
}
/**
* Removes subqueueKey from the subqueue associated with the the given key.
* If this action will result in an empty subqueue then the entire key is removed from the RequestQueue.
* @param key
* @param subqueueKey
* @return
*/
def remove(key: A, subqueueKey: B): RequestQueue[A, B] = {
subqueues.get(key) match {
case Some(subqueue) => {
val newSubqueue = subqueue - subqueueKey
if (newSubqueue.isEmpty) {
new RequestQueue(subqueues - key)
}
else {
new RequestQueue(subqueues + (key -> newSubqueue))
}
}
case None => this
}
}
/**
* Removes this key and all of its subqueues from the RequestQueue
*/
def remove(key: A): RequestQueue[A, B] = new RequestQueue(subqueues - key)
def removeHead(): (Map[A, (B, Set[RequesterInfo])], RequestQueue[A, B]) = {
var newSubqueues = subqueues.empty
val requests = subqueues map {
case (topicAndPartition, subqueue) => {
assert(!subqueue.isEmpty, "subqueue must not be empty")
val (subqueueKey, requesterActorRefs, newSubqueue) = subqueue.removeHead()
if (!newSubqueue.isEmpty)
newSubqueues = newSubqueues + (topicAndPartition -> newSubqueue)
(topicAndPartition, (subqueueKey, requesterActorRefs))
}
}
(requests, new RequestQueue(newSubqueues))
}
def get(key: A): Option[LinkedHashMap[B, Set[RequesterInfo]]] = subqueues.get(key)
def isEmpty: Boolean = subqueues.isEmpty
override def toString: String = subqueues.toString()
}
object RequestQueue {
private def emptySubqueue[A, B]: Map[A, LinkedHashMap[B, Set[RequesterInfo]]] =
Map.empty[A, LinkedHashMap[B, Set[RequesterInfo]]].withDefaultValue(
LinkedHashMap.empty[B, Set[RequesterInfo]].withDefaultValue(Set.empty[RequesterInfo]))
def empty[A, B]: RequestQueue[A, B] = new RequestQueue(emptySubqueue[A, B])
} | Box-Castle/router | src/main/scala/com/box/castle/router/kafkadispatcher/RequestQueue.scala | Scala | apache-2.0 | 3,898 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.